diff --git a/.bowerrc b/.bowerrc deleted file mode 100644 index 49a14a942b..0000000000 --- a/.bowerrc +++ /dev/null @@ -1,3 +0,0 @@ -{ - "directory": "awx/ui/client/lib" -} diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..f5faf1f0e3 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +.git +awx/ui/node_modules diff --git a/.gitignore b/.gitignore index 5095c8d699..57e2baf042 100644 --- a/.gitignore +++ b/.gitignore @@ -3,7 +3,7 @@ .tags1 # Tower -awx/settings/local_settings.py* +awx/settings/local_*.py* awx/*.sqlite3 awx/*.sqlite3_* awx/job_status @@ -22,6 +22,7 @@ tower/tower_warnings.log celerybeat-schedule awx/ui/static awx/ui/build_test +awx/ui/client/languages # Tower setup playbook testing setup/test/roles/postgresql @@ -31,6 +32,7 @@ setup/test/roles/postgresql __pycache__ /build /deb-build +/reprepro /rpm-build /tar-build /setup-bundle-build @@ -44,14 +46,15 @@ __pycache__ /bower.json /package.json /testem.yml -/coverage +**/coverage /.istanbul.yml -node_modules/** +**/node_modules/** /tmp npm-debug.log -# UI build debugging -/DEBUG +# UI build flag files +awx/ui/.deps_built +awx/ui/.release_built # Testing .cache @@ -104,8 +107,10 @@ reports *.log.[0-9] *.results local/ +*.mo # AWX python libs populated by requirements.txt awx/lib/.deps_built awx/lib/site-packages venv/* +use_dev_supervisor.txt diff --git a/.jshintrc b/.jshintrc deleted file mode 100644 index e44155d984..0000000000 --- a/.jshintrc +++ /dev/null @@ -1,41 +0,0 @@ -{ - "browser": true, - "jquery": true, - "esnext": true, - "globalstrict": true, - "curly": true, - "immed": true, - "latedef": "nofunc", - "noarg": true, - "nonew": true, - "maxerr": 10000, - "notypeof": true, - "globals": { - "beforeEach": false, - "inject": false, - "module": false, - "angular":false, - "alert":false, - "$AnsibleConfig":true, - "$basePath":true, - "jsyaml":false, - "_":false, - "d3":false, - "Donut3D":false, - "nv":false, - "it": false, - "xit": false, - "expect": false, - "context": false, - "describe": false, - "moment": false - }, - "strict": false, - "quotmark": false, - "trailing": true, - "undef": true, - "unused": true, - "eqeqeq": true, - "indent": 4, - "newcap": false -} diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000000..8e00dc4371 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,31 @@ +sudo: false +language: python +python: + - '2.7' +env: + - TOXENV=api-lint + - TOXENV=api + - TOXENV=ui-lint + - TOXENV=ui +install: + - pip install tox +script: + - tox +# after_success: +# - TOXENV=coveralls tox +addons: + apt: + packages: + - swig + - libxmlsec1-dev + - postgresql-9.5 + - libssl-dev +cache: + pip: true + directories: + - node_modules + - .tox +services: + - mongodb + # Enable when we stop using sqlite for API tests + # - postgresql diff --git a/COPYING b/COPYING index 991895d074..83fb3ccb6e 100644 --- a/COPYING +++ b/COPYING @@ -1,5 +1,19 @@ -The Ansible Tower Software is a commercial software licensed to you pursuant to the Ansible Software Subscription and Services Agreement (“EULA”) located at www.ansible.com/subscription-agreement and an annual Order/Agreement with Ansible, Inc. +ANSIBLE TOWER BY RED HAT END USER LICENSE AGREEMENT -The Ansible Tower Software is free for use up to ten (10) Nodes, any additional Nodes shall be purchased. +This end user license agreement (“EULA”) governs the use of the Ansible Tower software and any related updates, upgrades, versions, appearance, structure and organization (the “Ansible Tower Software”), regardless of the delivery mechanism. -Ansible and Ansible Tower are registered Trademarks of Ansible, Inc. +1. License Grant. Subject to the terms of this EULA, Red Hat, Inc. and its affiliates (“Red Hat”) grant to you (“You”) a non-transferable, non-exclusive, worldwide, non-sublicensable, limited, revocable license to use the Ansible Tower Software for the term of the associated Red Hat Software Subscription(s) and in a quantity equal to the number of Red Hat Software Subscriptions purchased from Red Hat for the Ansible Tower Software (“License”), each as set forth on the applicable Red Hat ordering document. You acquire only the right to use the Ansible Tower Software and do not acquire any rights of ownership. Red Hat reserves all rights to the Ansible Tower Software not expressly granted to You. This License grant pertains solely to Your use of the Ansible Tower Software and is not intended to limit Your rights under, or grant You rights that supersede, the license terms of any software packages which may be made available with the Ansible Tower Software that are subject to an open source software license. + +2. Intellectual Property Rights. Title to the Ansible Tower Software and each component, copy and modification, including all derivative works whether made by Red Hat, You or on Red Hat's behalf, including those made at Your suggestion and all associated intellectual property rights, are and shall remain the sole and exclusive property of Red Hat and/or it licensors. The License does not authorize You (nor may You allow any third party, specifically non-employees of Yours) to: (a) copy, distribute, reproduce, use or allow third party access to the Ansible Tower Software except as expressly authorized hereunder; (b) decompile, disassemble, reverse engineer, translate, modify, convert or apply any procedure or process to the Ansible Tower Software in order to ascertain, derive, and/or appropriate for any reason or purpose, including the Ansible Tower Software source code or source listings or any trade secret information or process contained in the Ansible Tower Software (except as permitted under applicable law); (c) execute or incorporate other software (except for approved software as appears in the Ansible Tower Software documentation or specifically approved by Red Hat in writing) into Ansible Tower Software, or create a derivative work of any part of the Ansible Tower Software; (d) remove any trademarks, trade names or titles, copyrights legends or any other proprietary marking on the Ansible Tower Software; (e) disclose the results of any benchmarking of the Ansible Tower Software (whether or not obtained with Red Hat’s assistance) to any third party; (f) attempt to circumvent any user limits or other license, timing or use restrictions that are built into, defined or agreed upon, regarding the Ansible Tower Software. You are hereby notified that the Ansible Tower Software may contain time-out devices, counter devices, and/or other devices intended to ensure the limits of the License will not be exceeded (“Limiting Devices”). If the Ansible Tower Software contains Limiting Devices, Red Hat will provide You materials necessary to use the Ansible Tower Software to the extent permitted. You may not tamper with or otherwise take any action to defeat or circumvent a Limiting Device or other control measure, including but not limited to, resetting the unit amount or using false host identification number for the purpose of extending any term of the License. + +3. Evaluation Licenses. Unless You have purchased Ansible Tower Software Subscriptions from Red Hat or an authorized reseller under the terms of a commercial agreement with Red Hat, all use of the Ansible Tower Software shall be limited to testing purposes and not for production use (“Evaluation”). Unless otherwise agreed by Red Hat, Evaluation of the Ansible Tower Software shall be limited to an evaluation environment and the Ansible Tower Software shall not be used to manage any systems or virtual machines on networks being used in the operation of Your business or any other non-evaluation purpose. Unless otherwise agreed by Red Hat, You shall limit all Evaluation use to a single 30 day evaluation period and shall not download or otherwise obtain additional copies of the Ansible Tower Software or license keys for Evaluation. + +4. Limited Warranty. Except as specifically stated in this Section 4, to the maximum extent permitted under applicable law, the Ansible Tower Software and the components are provided and licensed “as is” without warranty of any kind, expressed or implied, including the implied warranties of merchantability, non-infringement or fitness for a particular purpose. Red Hat warrants solely to You that the media on which the Ansible Tower Software may be furnished will be free from defects in materials and manufacture under normal use for a period of thirty (30) days from the date of delivery to You. Red Hat does not warrant that the functions contained in the Ansible Tower Software will meet Your requirements or that the operation of the Ansible Tower Software will be entirely error free, appear precisely as described in the accompanying documentation, or comply with regulatory requirements. + +5. Limitation of Remedies and Liability. To the maximum extent permitted by applicable law, Your exclusive remedy under this EULA is to return any defective media within thirty (30) days of delivery along with a copy of Your payment receipt and Red Hat, at its option, will replace it or refund the money paid by You for the media. To the maximum extent permitted under applicable law, neither Red Hat nor any Red Hat authorized distributor will be liable to You for any incidental or consequential damages, including lost profits or lost savings arising out of the use or inability to use the Ansible Tower Software or any component, even if Red Hat or the authorized distributor has been advised of the possibility of such damages. In no event shall Red Hat's liability or an authorized distributor’s liability exceed the amount that You paid to Red Hat for the Ansible Tower Software during the twelve months preceding the first event giving rise to liability. + +6. Export Control. In accordance with the laws of the United States and other countries, You represent and warrant that You: (a) understand that the Ansible Tower Software and its components may be subject to export controls under the U.S. Commerce Department’s Export Administration Regulations (“EAR”); (b) are not located in any country listed in Country Group E:1 in Supplement No. 1 to part 740 of the EAR; (c) will not export, re-export, or transfer the Ansible Tower Software to any prohibited destination or to any end user who has been prohibited from participating in US export transactions by any federal agency of the US government; (d) will not use or transfer the Ansible Tower Software for use in connection with the design, development or production of nuclear, chemical or biological weapons, or rocket systems, space launch vehicles, or sounding rockets or unmanned air vehicle systems; (e) understand and agree that if you are in the United States and you export or transfer the Ansible Tower Software to eligible end users, you will, to the extent required by EAR Section 740.17 obtain a license for such export or transfer and will submit semi-annual reports to the Commerce Department’s Bureau of Industry and Security, which include the name and address (including country) of each transferee; and (f) understand that countries including the United States may restrict the import, use, or export of encryption products (which may include the Ansible Tower Software) and agree that you shall be solely responsible for compliance with any such import, use, or export restrictions. + +7. General. If any provision of this EULA is held to be unenforceable, that shall not affect the enforceability of the remaining provisions. This agreement shall be governed by the laws of the State of New York and of the United States, without regard to any conflict of laws provisions. The rights and obligations of the parties to this EULA shall not be governed by the United Nations Convention on the International Sale of Goods. + +Copyright © 2015 Red Hat, Inc. All rights reserved. "Red Hat" and “Ansible Tower” are registered trademarks of Red Hat, Inc. All other trademarks are the property of their respective owners. diff --git a/MANIFEST.in b/MANIFEST.in index b52764c7e8..27ad03a35d 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -6,6 +6,7 @@ recursive-include awx/ui/templates *.html recursive-include awx/ui/static * recursive-include awx/playbooks *.yml recursive-include awx/lib/site-packages * +recursive-include awx/plugins *.ps1 recursive-include requirements *.txt recursive-include config * recursive-include docs/licenses * @@ -17,6 +18,7 @@ recursive-exclude awx/settings local_settings.py* include tools/scripts/request_tower_configuration.sh include tools/scripts/request_tower_configuration.ps1 include tools/scripts/ansible-tower-service +include tools/scripts/failure-event-handler include tools/scripts/tower-python include tools/sosreport/* include COPYING diff --git a/Makefile b/Makefile index bc628758a1..836f96da9c 100644 --- a/Makefile +++ b/Makefile @@ -4,23 +4,21 @@ SITELIB=$(shell $(PYTHON) -c "from distutils.sysconfig import get_python_lib; pr OFFICIAL ?= no PACKER ?= packer PACKER_BUILD_OPTS ?= -var 'official=$(OFFICIAL)' -var 'aw_repo_url=$(AW_REPO_URL)' -GRUNT ?= $(shell [ -t 0 ] && echo "grunt" || echo "grunt --no-color") -TESTEM ?= ./node_modules/.bin/testem -BROCCOLI_BIN ?= ./node_modules/.bin/broccoli -MOCHA_BIN ?= ./node_modules/.bin/_mocha -ISTANBUL_BIN ?= ./node_modules/.bin/istanbul -BROWSER_SYNC_BIN ?= ./node_modules/.bin/browser-sync NODE ?= node NPM_BIN ?= npm DEPS_SCRIPT ?= packaging/bundle/deps.py GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) -VENV_BASE ?= /tower_devel/venv +GCLOUD_AUTH ?= $(shell gcloud auth print-access-token) +# NOTE: This defaults the container image version to the branch that's active +COMPOSE_TAG ?= $(GIT_BRANCH) + +COMPOSE_HOST ?= $(shell hostname) + +VENV_BASE ?= /venv SCL_PREFIX ?= CELERY_SCHEDULE_FILE ?= /celerybeat-schedule -CLIENT_TEST_DIR ?= build_test - # Python packages to install only from source (not from binary wheels) # Comma separated list SRC_ONLY_PKGS ?= cffi,pycparser @@ -47,7 +45,7 @@ ifeq ($(OFFICIAL),yes) AW_REPO_URL ?= http://releases.ansible.com/ansible-tower else RELEASE ?= $(BUILD) - AW_REPO_URL ?= http://jenkins.testing.ansible.com/ansible-tower_nightlies_RTYUIOPOIUYTYU/$(GIT_BRANCH) + AW_REPO_URL ?= http://jenkins.testing.ansible.com/ansible-tower_nightlies_f8b8c5588b2505970227a7b0900ef69040ad5a00/$(GIT_BRANCH) endif # Allow AMI license customization @@ -81,7 +79,7 @@ SETUP_TAR_CHECKSUM=$(NAME)-setup-CHECKSUM # DEB build parameters DEBUILD_BIN ?= debuild -DEBUILD_OPTS = --source-option="-I" +DEBUILD_OPTS = DPUT_BIN ?= dput DPUT_OPTS ?= -c .dput.cf -u REPREPRO_BIN ?= reprepro @@ -170,20 +168,22 @@ ifeq ($(DISTRO),ubuntu) SETUP_INSTALL_ARGS += --install-layout=deb endif +# UI flag files +UI_DEPS_FLAG_FILE = awx/ui/.deps_built +UI_RELEASE_FLAG_FILE = awx/ui/.release_built + .DEFAULT_GOAL := build .PHONY: clean clean-tmp clean-venv rebase push requirements requirements_dev \ - requirements_jenkins \ develop refresh adduser migrate dbchange dbshell runserver celeryd \ receiver test test_unit test_coverage coverage_html test_jenkins dev_build \ release_build release_clean sdist rpmtar mock-rpm mock-srpm rpm-sign \ - build-ui sync-ui test-ui build-ui-for-coverage test-ui-for-coverage \ - build-ui-for-browser-tests test-ui-debug jshint ngdocs \ - websocket-proxy browser-sync browser-sync-reload brocolli-watcher \ - devjs minjs testjs_ci \ deb deb-src debian debsign pbuilder reprepro setup_tarball \ - vagrant-virtualbox virtualbox-centos-7 virtualbox-centos-6 \ - vagrant-vmware clean-bundle setup_bundle_tarball + virtualbox-ovf virtualbox-centos-7 virtualbox-centos-6 \ + clean-bundle setup_bundle_tarball \ + ui-docker-machine ui-docker ui-release ui-devel \ + ui-test ui-deps ui-test-ci ui-test-saucelabs jlaska + # Remove setup build files clean-tar: @@ -197,11 +197,6 @@ clean-rpm: clean-deb: rm -rf deb-build reprepro -# Remove grunt build files -clean-grunt: - rm -f package.json Gruntfile.js Brocfile.js bower.json - rm -rf node_modules - # Remove packer artifacts clean-packer: rm -rf packer_cache @@ -218,13 +213,10 @@ clean-bundle: # remove ui build artifacts clean-ui: - rm -rf DEBUG - -clean-static: rm -rf awx/ui/static/ - -clean-build-test: - rm -rf awx/ui/build_test/ + rm -rf awx/ui/node_modules/ + rm -f $(UI_DEPS_FLAG_FILE) + rm -f $(UI_RELEASE_FLAG_FILE) clean-tmp: rm -rf tmp/ @@ -233,18 +225,23 @@ clean-venv: rm -rf venv/ # Remove temporary build files, compiled Python files. -clean: clean-rpm clean-deb clean-grunt clean-ui clean-static clean-build-test clean-tar clean-packer clean-bundle clean-venv +clean: clean-rpm clean-deb clean-ui clean-tar clean-packer clean-bundle + rm -rf awx/public rm -rf awx/lib/site-packages - rm -rf awx/lib/.deps_built rm -rf dist/* + rm -rf awx/job_status + rm -rf awx/job_output + rm -rf reports + rm -f awx/awx_test.sqlite3 rm -rf tmp mkdir tmp rm -rf build $(NAME)-$(VERSION) *.egg-info find . -type f -regex ".*\.py[co]$$" -delete + find . -type d -name "__pycache__" -delete # convenience target to assert environment variables are defined guard-%: - @if [ "${${*}}" == "" ]; then \ + @if [ "$${$*}" = "" ]; then \ echo "The required environment variable '$*' is not set"; \ exit 1; \ fi @@ -267,7 +264,7 @@ virtualenv_ansible: if [ ! -d "$(VENV_BASE)/ansible" ]; then \ virtualenv --system-site-packages --setuptools $(VENV_BASE)/ansible && \ $(VENV_BASE)/ansible/bin/pip install -I setuptools==23.0.0 && \ - $(VENV_BASE)/ansible/bin/pip install -I pip==8.1.1; \ + $(VENV_BASE)/ansible/bin/pip install -I pip==8.1.2; \ fi; \ fi @@ -279,48 +276,43 @@ virtualenv_tower: if [ ! -d "$(VENV_BASE)/tower" ]; then \ virtualenv --system-site-packages --setuptools $(VENV_BASE)/tower && \ $(VENV_BASE)/tower/bin/pip install -I setuptools==23.0.0 && \ - $(VENV_BASE)/tower/bin/pip install -I pip==8.1.1; \ + $(VENV_BASE)/tower/bin/pip install -I pip==8.1.2; \ fi; \ fi requirements_ansible: virtualenv_ansible if [ "$(VENV_BASE)" ]; then \ . $(VENV_BASE)/ansible/bin/activate; \ - $(VENV_BASE)/ansible/bin/pip install --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements_ansible.txt ;\ + $(VENV_BASE)/ansible/bin/pip install --ignore-installed --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements_ansible.txt ;\ + $(VENV_BASE)/ansible/bin/pip uninstall --yes -r requirements/requirements_ansible_uninstall.txt; \ else \ - pip install --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements_ansible.txt ; \ + pip install --ignore-installed --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements_ansible.txt ; \ + pip uninstall --yes -r requirements/requirements_ansible_uninstall.txt; \ fi # Install third-party requirements needed for Tower's environment. requirements_tower: virtualenv_tower if [ "$(VENV_BASE)" ]; then \ . $(VENV_BASE)/tower/bin/activate; \ - $(VENV_BASE)/tower/bin/pip install --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements.txt ;\ + $(VENV_BASE)/tower/bin/pip install --ignore-installed --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements.txt ;\ + $(VENV_BASE)/tower/bin/pip uninstall --yes -r requirements/requirements_tower_uninstall.txt; \ else \ - pip install --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements.txt ; \ + pip install --ignore-installed --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements.txt ; \ + pip uninstall --yes -r requirements/requirements_tower_uninstall.txt; \ fi requirements_tower_dev: if [ "$(VENV_BASE)" ]; then \ . $(VENV_BASE)/tower/bin/activate; \ $(VENV_BASE)/tower/bin/pip install -r requirements/requirements_dev.txt; \ + $(VENV_BASE)/tower/bin/pip uninstall --yes -r requirements/requirements_dev_uninstall.txt; \ fi -# Install third-party requirements needed for running unittests in jenkins -requirements_jenkins: - if [ "$(VENV_BASE)" ]; then \ - . $(VENV_BASE)/tower/bin/activate; \ - $(VENV_BASE)/tower/bin/pip install -Ir requirements/requirements_jenkins.txt; \ - else \ - pip install -Ir requirements/requirements_jenkins..txt; \ - fi && \ - $(NPM_BIN) install csslint jshint - requirements: requirements_ansible requirements_tower requirements_dev: requirements requirements_tower_dev -requirements_test: requirements requirements_jenkins +requirements_test: requirements # "Install" ansible-tower package in development mode. develop: @@ -328,8 +320,8 @@ develop: pip uninstall -y awx; \ $(PYTHON) setup.py develop; \ else \ - sudo pip uninstall -y awx; \ - sudo $(PYTHON) setup.py develop; \ + pip uninstall -y awx; \ + $(PYTHON) setup.py develop; \ fi version_file: @@ -341,7 +333,7 @@ init: if [ "$(VENV_BASE)" ]; then \ . $(VENV_BASE)/tower/bin/activate; \ fi; \ - tower-manage register_instance --primary --hostname=127.0.0.1; \ + tower-manage register_instance --hostname=$(COMPOSE_HOST); \ # Refresh development environment after pulling new code. refresh: clean requirements_dev version_file develop migrate @@ -366,15 +358,17 @@ dbshell: sudo -u postgres psql -d awx-dev server_noattach: - tmux new-session -d -s tower 'exec make runserver' + tmux new-session -d -s tower 'exec make uwsgi' tmux rename-window 'Tower' tmux select-window -t tower:0 tmux split-window -v 'exec make celeryd' - tmux split-window -h 'exec make taskmanager' - tmux new-window 'exec make receiver' + tmux new-window 'exec make daphne' tmux select-window -t tower:1 + tmux rename-window 'WebSockets' + tmux split-window -h 'exec make runworker' + tmux new-window 'exec make receiver' + tmux select-window -t tower:2 tmux rename-window 'Extra Services' - tmux split-window -v 'exec make socketservice' tmux split-window -h 'exec make factcacher' server: server_noattach @@ -384,6 +378,12 @@ server: server_noattach servercc: server_noattach tmux -2 -CC attach-session -t tower +supervisor: + @if [ "$(VENV_BASE)" ]; then \ + . $(VENV_BASE)/tower/bin/activate; \ + fi; \ + supervisord --configuration /supervisor.conf --pidfile=/tmp/supervisor_pid + # Alternate approach to tmux to run all development tasks specified in # Procfile. https://youtu.be/OPMgaibszjk honcho: @@ -392,6 +392,36 @@ honcho: fi; \ honcho start +flower: + @if [ "$(VENV_BASE)" ]; then \ + . $(VENV_BASE)/tower/bin/activate; \ + fi; \ + $(PYTHON) manage.py celery flower --address=0.0.0.0 --port=5555 --broker=amqp://guest:guest@$(RABBITMQ_HOST):5672// + +collectstatic: + @if [ "$(VENV_BASE)" ]; then \ + . $(VENV_BASE)/tower/bin/activate; \ + fi; \ + mkdir -p awx/public/static && $(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1 + +uwsgi: collectstatic + @if [ "$(VENV_BASE)" ]; then \ + . $(VENV_BASE)/tower/bin/activate; \ + fi; \ + uwsgi -b 32768 --socket :8050 --module=awx.wsgi:application --home=/venv/tower --chdir=/tower_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --master-fifo=/awxfifo --lazy-apps + +daphne: + @if [ "$(VENV_BASE)" ]; then \ + . $(VENV_BASE)/tower/bin/activate; \ + fi; \ + daphne -b 0.0.0.0 -p 8051 awx.asgi:channel_layer + +runworker: + @if [ "$(VENV_BASE)" ]; then \ + . $(VENV_BASE)/tower/bin/activate; \ + fi; \ + $(PYTHON) manage.py runworker --only-channels websocket.* + # Run the built-in development webserver (by default on http://localhost:8013). runserver: @if [ "$(VENV_BASE)" ]; then \ @@ -404,7 +434,8 @@ celeryd: @if [ "$(VENV_BASE)" ]; then \ . $(VENV_BASE)/tower/bin/activate; \ fi; \ - $(PYTHON) manage.py celeryd -l DEBUG -B --autoscale=20,2 -Ofair --schedule=$(CELERY_SCHEDULE_FILE) + $(PYTHON) manage.py celeryd -l DEBUG -B --autoreload --autoscale=20,3 --schedule=$(CELERY_SCHEDULE_FILE) -Q projects,jobs,default,scheduler,broadcast_all,$(COMPOSE_HOST) -n celery@$(COMPOSE_HOST) + #$(PYTHON) manage.py celery multi show projects jobs default -l DEBUG -Q:projects projects -Q:jobs jobs -Q:default default -c:projects 1 -c:jobs 3 -c:default 3 -Ofair -B --schedule=$(CELERY_SCHEDULE_FILE) # Run to start the zeromq callback receiver receiver: @@ -413,12 +444,6 @@ receiver: fi; \ $(PYTHON) manage.py run_callback_receiver -taskmanager: - @if [ "$(VENV_BASE)" ]; then \ - . $(VENV_BASE)/tower/bin/activate; \ - fi; \ - $(PYTHON) manage.py run_task_system - socketservice: @if [ "$(VENV_BASE)" ]; then \ . $(VENV_BASE)/tower/bin/activate; \ @@ -431,6 +456,9 @@ factcacher: fi; \ $(PYTHON) manage.py run_fact_cache_receiver +nginx: + nginx -g "daemon off;" + reports: mkdir -p $@ @@ -438,7 +466,10 @@ pep8: reports @(set -o pipefail && $@ | tee reports/$@.report) flake8: reports - @$@ --output-file=reports/$@.report + @if [ "$(VENV_BASE)" ]; then \ + . $(VENV_BASE)/tower/bin/activate; \ + fi; \ + (set -o pipefail && $@ | tee reports/$@.report) pyflakes: reports @(set -o pipefail && $@ | tee reports/$@.report) @@ -448,7 +479,7 @@ pylint: reports check: flake8 pep8 # pyflakes pylint -TEST_DIRS=awx/main/tests +TEST_DIRS ?= awx/main/tests awx/conf/tests awx/sso/tests # Run all API unit tests. test: @if [ "$(VENV_BASE)" ]; then \ @@ -460,7 +491,7 @@ test_unit: @if [ "$(VENV_BASE)" ]; then \ . $(VENV_BASE)/tower/bin/activate; \ fi; \ - py.test awx/main/tests/unit + py.test awx/main/tests/unit awx/conf/tests/unit awx/sso/tests/unit # Run all API unit tests with coverage enabled. test_coverage: @@ -481,173 +512,107 @@ test_tox: # Alias existing make target so old versions run against Jekins the same way test_jenkins : test_coverage +# Make fake data +DATA_GEN_PRESET = "" +bulk_data: + @if [ "$(VENV_BASE)" ]; then \ + . $(VENV_BASE)/tower/bin/activate; \ + fi; \ + $(PYTHON) tools/data_generators/rbac_dummy_data_generator.py --preset=$(DATA_GEN_PRESET) + +# l10n TASKS +# -------------------------------------- + +# check for UI po files +HAVE_PO := $(shell ls awx/ui/po/*.po 2>/dev/null) +check-po: +ifdef HAVE_PO + # Should be 'Language: zh-CN' but not 'Language: zh_CN' in zh_CN.po + for po in awx/ui/po/*.po ; do \ + echo $$po; \ + mo="awx/ui/po/`basename $$po .po`.mo"; \ + msgfmt --check --verbose $$po -o $$mo; \ + if test "$$?" -ne 0 ; then \ + exit -1; \ + fi; \ + rm $$mo; \ + name=`echo "$$po" | grep '-'`; \ + if test "x$$name" != x ; then \ + right_name=`echo $$language | sed -e 's/-/_/'`; \ + echo "ERROR: WRONG $$name CORRECTION: $$right_name"; \ + exit -1; \ + fi; \ + language=`grep '^"Language:' "$$po" | grep '_'`; \ + if test "x$$language" != x ; then \ + right_language=`echo $$language | sed -e 's/_/-/'`; \ + echo "ERROR: WRONG $$language CORRECTION: $$right_language in $$po"; \ + exit -1; \ + fi; \ + done; +else + @echo No PO files +endif + +# generate UI .pot +pot: $(UI_DEPS_FLAG_FILE) + $(NPM_BIN) --prefix awx/ui run pot + +# generate django .pot .po +LANG = "en-us" +messages: + @if [ "$(VENV_BASE)" ]; then \ + . $(VENV_BASE)/tower/bin/activate; \ + fi; \ + $(PYTHON) manage.py makemessages -l $(LANG) --keep-pot + +# generate l10n .json .mo +languages: $(UI_DEPS_FLAG_FILE) check-po + $(NPM_BIN) --prefix awx/ui run languages + $(PYTHON) tools/scripts/compilemessages.py + +# End l10n TASKS +# -------------------------------------- + # UI TASKS # -------------------------------------- -# begin targets that pull ui files from packaging to the root of the app -Gruntfile.js: packaging/node/Gruntfile.js - cp $< $@ +ui-deps: $(UI_DEPS_FLAG_FILE) -Brocfile.js: packaging/node/Brocfile.js - cp $< $@ +$(UI_DEPS_FLAG_FILE): + $(NPM_BIN) --unsafe-perm --prefix awx/ui install awx/ui + touch $(UI_DEPS_FLAG_FILE) -bower.json: packaging/node/bower.json - cp $< $@ +ui-docker-machine: $(UI_DEPS_FLAG_FILE) + $(NPM_BIN) --prefix awx/ui run ui-docker-machine -- $(MAKEFLAGS) -package.json: packaging/node/package.template - sed -e 's#%NAME%#$(NAME)#;s#%VERSION%#$(VERSION)#;s#%GIT_REMOTE_URL%#$(GIT_REMOTE_URL)#;' $< > $@ +# Native docker. Builds UI and raises BrowserSync & filesystem polling. +ui-docker: $(UI_DEPS_FLAG_FILE) + $(NPM_BIN) --prefix awx/ui run ui-docker -- $(MAKEFLAGS) -testem.yml: packaging/node/testem.yml - cp $< $@ +# Builds UI with development UI without raising browser-sync or filesystem polling. +ui-devel: $(UI_DEPS_FLAG_FILE) + $(NPM_BIN) --prefix awx/ui run build-devel -- $(MAKEFLAGS) -.istanbul.yml: packaging/node/.istanbul.yml - cp $< $@ -# end targets that pull ui files from packaging to the root of the app +ui-release: $(UI_RELEASE_FLAG_FILE) -# update package.json and install npm dependencies -node_modules: package.json - $(NPM_BIN) install - touch $@ +$(UI_RELEASE_FLAG_FILE): languages $(UI_DEPS_FLAG_FILE) + $(NPM_BIN) --prefix awx/ui run build-release + touch $(UI_RELEASE_FLAG_FILE) -# helper tasks to run broccoli build process at awx/ui/, -# to build the ui, use the build-ui target instead: -# UI_FLAGS=: additional parameters to pass broccoli -# for building -awx/ui/static: node_modules clean-ui clean-static Brocfile.js bower.json - $(BROCCOLI_BIN) build awx/ui/static -- $(UI_FLAGS) +ui-test: $(UI_DEPS_FLAG_FILE) + $(NPM_BIN) --prefix awx/ui run test -awx/ui/build_test: node_modules clean-ui clean-build-test Brocfile.js bower.json - $(BROCCOLI_BIN) build awx/ui/build_test -- $(UI_FLAGS) +ui-test-ci: $(UI_DEPS_FLAG_FILE) + $(NPM_BIN) --prefix awx/ui run test:ci -# build the ui to awx/ui/static: -# defaults to standard dev build (concatenated, non-minified, sourcemaps, no -# tests) -# PROD=true: standard prod build (concatenated, minified, no sourcemaps, -# compressed, no tests) -# EXTRA_UI_FLAGS=: additional parameters to pass broccoli -# for building -PROD ?= false +testjs_ci: + echo "Update UI unittests later" #ui-test-ci -# TODO: Remove after 2.4 (alias for devjs/minjs) -devjs: build-ui -minjs: build-ui -ifeq ($(MAKECMDGOALS),minjs) - PROD = true -endif +jshint: + $(NPM_BIN) run --prefix awx/ui jshint -ifeq ($(PROD),true) - UI_FLAGS=--silent --compress --no-docs --no-debug --no-sourcemaps \ - $(EXTRA_UI_FLAGS) -else - UI_FLAGS=$(EXTRA_UI_FLAGS) -endif - -build-ui: awx/ui/static - -# launch watcher to continuously build the ui to awx/ui/static and run tests -# after changes are made: -# WATCHER_FLAGS: options to be utilized by broccoli timepiece -# UI_FLAGS=: additional parameters to pass broccoli -# for building -# DOCKER_MACHINE_NAME=: when -# passed, not only will brocolli rebuild, but browser-sync will proxy -# proxy tower and refresh the ui when a change is made. -DOCKER_MACHINE_NAME ?= none -ifeq ($(DOCKER_MACHINE_NAME),none) - sync-ui: node_modules clean-tmp brocolli-watcher -else - sync-ui: node_modules clean-tmp - tmux new-session -d -s ui_sync 'exec make brocolli-watcher' - tmux rename-window 'UI Sync' - tmux select-window -t ui_sync:0 - tmux split-window -v 'exec make browser-sync' - tmux split-window -h 'exec make websocket-proxy' - tmux select-layout main-vertical - tmux attach-session -t ui_sync -endif - -websocket-proxy: - docker-machine ssh $(DOCKER_MACHINE_NAME) -L 8080:localhost:8080 - -browser-sync: - $(BROWSER_SYNC_BIN) start --proxy $(shell docker-machine ip $(DOCKER_MACHINE_NAME)):8013 --ws - -browser-sync-reload: - $(BROWSER_SYNC_BIN) reload - -brocolli-watcher: Brocfile.js testem.yml - $(NODE) tools/ui/timepiece.js awx/ui/static $(WATCHER_FLAGS) -- $(UI_FLAGS) - -# run ui unit-tests: -# defaults to a useful dev testing run. Builds the ui to awx/ui/build_test -# and runs mocha (node.js) tests with istanbul coverage (and an html -# coverage report) -# UI_TESTS_TO_RUN=-test.js: Set this to only run a specific test file -# CI=true: Builds the ui to awx/ui/build_test -# and runs mocha (node.js) tests with istanbul coverage (and a cobertura -# coverage report). Also builds the ui to awx/ui/static and runs the -# testem (phantomjs) tests. Outputs these to XUNIT format to be consumed -# and displayed in jenkins -# DEBUG=true: Builds the ui to awx/ui/static and runs testem tests in Chrome -# so you can breakpoint the tests and underlying code to figure out why -# tests are failing. -# TESTEM_DEBUG_BROWSER: the browser to run tests in, default to Chrome - -# TODO: deprecated past 2.4 -testjs_ci: test-ui # w var UI_TEST_MODE=CI - -UI_TEST_MODE ?= DEV -ifeq ($(UI_TEST_MODE),CI) - # ci testing run - # this used to be testjs_ci, sort-of - REPORTER = xunit - test-ui: .istanbul.yml build-ui-for-coverage test-ui-for-coverage -else -ifeq ($(UI_TEST_MODE),DEV_DEBUG) - # debug (breakpoint) dev testing run - test-ui: build-ui-for-browser-tests test-ui-debug -else - # default dev testing run - test-ui: .istanbul.yml build-ui-for-coverage test-ui-for-coverage -endif -endif - -# helper tasks to test ui, don't call directly -build-ui-for-coverage: UI_FLAGS=--node-tests --no-concat --no-styles -build-ui-for-coverage: awx/ui/build_test - -REPORTER ?= standard -UI_TESTS_TO_RUN ?= all -ifeq ($(REPORTER), xunit) - test-ui-for-coverage: - XUNIT_FILE=reports/test-results-ui.xml NODE_PATH=awx/ui/build_test $(ISTANBUL_BIN) cover --include-all-sources $(MOCHA_BIN) -- --full-trace --reporter xunit-file $(shell find awx/ui/build_test -name '*-test.js'); cp coverage/ui-coverage-report.xml reports/coverage-report-ui.xml -else -ifeq ($(UI_TESTS_TO_RUN), all) - test-ui-for-coverage: - NODE_PATH=awx/ui/build_test $(ISTANBUL_BIN) cover --include-all-sources $(MOCHA_BIN) -- --full-trace $(shell find awx/ui/build_test -name '*-test.js') -else -test-ui-for-coverage: - NODE_PATH=awx/ui/build_test $(ISTANBUL_BIN) cover $(MOCHA_BIN) -- --full-trace $(shell find awx/ui/build_test -name '$(UI_TESTS_TO_RUN)') -endif -endif - -build-ui-for-browser-tests: UI_FLAGS=--no-styles --no-compress --browser-tests --no-node-tests -build-ui-for-browser-tests: awx/ui/static - -TESTEM_DEBUG_BROWSER ?= Chrome -test-ui-debug: - PATH=./node_modules/.bin:$(PATH) $(TESTEM) --file testem.yml -l $(TESTEM_DEBUG_BROWSER) - -# lint .js files -jshint: node_modules Gruntfile.js - $(GRUNT) $@ - -# generate ui docs -ngdocs: build-ui Gruntfile.js - $(GRUNT) $@ +ui-test-saucelabs: $(UI_DEPS_FLAG_FILE) + $(NPM_BIN) --prefix awx/ui run test:saucelabs # END UI TASKS # -------------------------------------- @@ -688,7 +653,7 @@ release_clean: -(rm *.tar) -(rm -rf ($RELEASE)) -dist/$(SDIST_TAR_FILE): minjs +dist/$(SDIST_TAR_FILE): ui-release BUILD="$(BUILD)" $(PYTHON) setup.py sdist sdist: dist/$(SDIST_TAR_FILE) @@ -730,7 +695,8 @@ rpm-build: rpm-build/$(SDIST_TAR_FILE): rpm-build dist/$(SDIST_TAR_FILE) cp packaging/rpm/$(NAME).spec rpm-build/ - cp packaging/rpm/$(NAME).te rpm-build/ + cp packaging/rpm/tower.te rpm-build/ + cp packaging/rpm/tower.fc rpm-build/ cp packaging/rpm/$(NAME).sysconfig rpm-build/ cp packaging/remove_tower_source.py rpm-build/ cp packaging/bytecompile.sh rpm-build/ @@ -800,7 +766,9 @@ debian: deb-build/$(DEB_TAR_NAME) endif deb-build/$(DEB_NVR).dsc: deb-build/$(DEB_TAR_NAME) - cd deb-build/$(DEB_TAR_NAME) && $(DEBUILD) -S + cd deb-build/$(DEB_TAR_NAME) && \ + cp debian/control.$(DEB_DIST) debian/control && \ + $(DEBUILD) -S deb-src: deb-build/$(DEB_NVR).dsc @echo "#############################################" @@ -890,28 +858,46 @@ install: export SCL_PREFIX HTTPD_SCL_PREFIX $(PYTHON) setup.py install $(SETUP_INSTALL_ARGS) -# Docker Compose Development environment -docker-compose: - docker-compose -f tools/docker-compose.yml up --no-recreate +docker-auth: + docker login -e 1234@5678.com -u oauth2accesstoken -p "$(GCLOUD_AUTH)" https://gcr.io -docker-compose-test: - cd tools && docker-compose run --rm --service-ports tower /bin/bash +# Docker Compose Development environment +docker-compose: docker-auth + TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose.yml up --no-recreate tower + +docker-compose-cluster: docker-auth + TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose-cluster.yml up + +docker-compose-test: docker-auth + cd tools && TAG=$(COMPOSE_TAG) docker-compose run --rm --service-ports tower /bin/bash + +docker-compose-build: + docker build -t ansible/tower_devel -f tools/docker-compose/Dockerfile . + docker tag ansible/tower_devel gcr.io/ansible-tower-engineering/tower_devel:$(COMPOSE_TAG) + #docker push gcr.io/ansible-tower-engineering/tower_devel:$(COMPOSE_TAG) MACHINE?=default docker-clean: - rm -f awx/lib/.deps_built eval $$(docker-machine env $(MACHINE)) - docker stop $$(docker ps -a -q) - -docker rm $$(docker ps -f name=tools_tower -a -q) - -docker rmi tools_tower + $(foreach container_id,$(shell docker ps -f name=tools_tower -aq),docker stop $(container_id); docker rm -f $(container_id);) + -docker images | grep "tower_devel" | awk '{print $$1 ":" $$2}' | xargs docker rmi docker-refresh: docker-clean docker-compose -mongo-debug-ui: - docker run -it --rm --name mongo-express --link tools_mongo_1:mongo -e ME_CONFIG_OPTIONS_EDITORTHEME=ambiance -e ME_CONFIG_BASICAUTH_USERNAME=admin -e ME_CONFIG_BASICAUTH_PASSWORD=password -p 8081:8081 knickers/mongo-express +# Docker Development Environment with Elastic Stack Connected +docker-compose-elk: docker-auth + TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate -mongo-container: - docker run -it --link tools_mongo_1:mongo --rm mongo sh -c 'exec mongo "$MONGO_PORT_27017_TCP_ADDR:$MONGO_PORT_27017_TCP_PORT/system_tracking_dev"' +docker-compose-cluster-elk: docker-auth + TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose-cluster.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate + +clean-elk: + docker stop tools_kibana_1 + docker stop tools_logstash_1 + docker stop tools_elasticsearch_1 + docker rm tools_logstash_1 + docker rm tools_elasticsearch_1 + docker rm tools_kibana_1 psql-container: - docker run -it --link tools_postgres_1:postgres --rm postgres:9.4.1 sh -c 'exec psql -h "$$POSTGRES_PORT_5432_TCP_ADDR" -p "$$POSTGRES_PORT_5432_TCP_PORT" -U postgres' + docker run -it --net tools_default --rm postgres:9.4.1 sh -c 'exec psql -h "postgres" -p "5432" -U postgres' diff --git a/Procfile b/Procfile index a301a6aa1a..b30dfcad2b 100644 --- a/Procfile +++ b/Procfile @@ -1,6 +1,8 @@ -runserver: make runserver +nginx: make nginx +runworker: make runworker +daphne: make daphne celeryd: make celeryd -taskmanager: make taskmanager receiver: make receiver -socketservice: make socketservice factcacher: make factcacher +flower: make flower +uwsgi: make uwsgi diff --git a/awx/__init__.py b/awx/__init__.py index fe3644cf53..f5457b5caa 100644 --- a/awx/__init__.py +++ b/awx/__init__.py @@ -5,7 +5,7 @@ import os import sys import warnings -__version__ = '3.0.3' +__version__ = '3.1.0' __all__ = ['__version__'] @@ -17,6 +17,7 @@ try: except ImportError: # pragma: no cover MODE = 'production' + def find_commands(management_dir): # Modified version of function from django/core/management/__init__.py. command_dir = os.path.join(management_dir, 'commands') @@ -33,6 +34,7 @@ def find_commands(management_dir): pass return commands + def prepare_env(): # Update the default settings environment variable based on current mode. os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'awx.settings.%s' % MODE) diff --git a/awx/api/authentication.py b/awx/api/authentication.py index c8143facbd..1086778897 100644 --- a/awx/api/authentication.py +++ b/awx/api/authentication.py @@ -6,8 +6,10 @@ import urllib import logging # Django +from django.conf import settings from django.utils.timezone import now as tz_now from django.utils.encoding import smart_text +from django.utils.translation import ugettext_lazy as _ # Django REST Framework from rest_framework import authentication @@ -16,10 +18,10 @@ from rest_framework import HTTP_HEADER_ENCODING # AWX from awx.main.models import UnifiedJob, AuthToken -from awx.main.conf import tower_settings logger = logging.getLogger('awx.api.authentication') + class TokenAuthentication(authentication.TokenAuthentication): ''' Custom token authentication using tokens that expire and are associated @@ -62,10 +64,10 @@ class TokenAuthentication(authentication.TokenAuthentication): return None if len(auth) == 1: - msg = 'Invalid token header. No credentials provided.' + msg = _('Invalid token header. No credentials provided.') raise exceptions.AuthenticationFailed(msg) elif len(auth) > 2: - msg = 'Invalid token header. Token string should not contain spaces.' + msg = _('Invalid token header. Token string should not contain spaces.') raise exceptions.AuthenticationFailed(msg) return self.authenticate_credentials(auth[1]) @@ -93,14 +95,14 @@ class TokenAuthentication(authentication.TokenAuthentication): # Token invalidated due to session limit config being reduced # Session limit reached invalidation will also take place on authentication - if tower_settings.AUTH_TOKEN_PER_USER != -1: + if settings.AUTH_TOKEN_PER_USER != -1: if not token.in_valid_tokens(now=now): token.invalidate(reason='limit_reached') raise exceptions.AuthenticationFailed(AuthToken.reason_long('limit_reached')) # If the user is inactive, then return an error. if not token.user.is_active: - raise exceptions.AuthenticationFailed('User inactive or deleted') + raise exceptions.AuthenticationFailed(_('User inactive or deleted')) # Refresh the token. # The token is extended from "right now" + configurable setting amount. @@ -123,12 +125,19 @@ class TokenGetAuthentication(TokenAuthentication): class LoggedBasicAuthentication(authentication.BasicAuthentication): def authenticate(self, request): + if not settings.AUTH_BASIC_ENABLED: + return ret = super(LoggedBasicAuthentication, self).authenticate(request) if ret: username = ret[0].username if ret[0] else '' logger.debug(smart_text(u"User {} performed a {} to {} through the API".format(username, request.method, request.path))) return ret + def authenticate_header(self, request): + if not settings.AUTH_BASIC_ENABLED: + return + return super(LoggedBasicAuthentication, self).authenticate_header(request) + class TaskAuthentication(authentication.BaseAuthentication): ''' @@ -149,7 +158,7 @@ class TaskAuthentication(authentication.BaseAuthentication): return None token = unified_job.task_auth_token if auth[1] != token: - raise exceptions.AuthenticationFailed('Invalid task token') + raise exceptions.AuthenticationFailed(_('Invalid task token')) return (None, token) def authenticate_header(self, request): diff --git a/awx/api/conf.py b/awx/api/conf.py new file mode 100644 index 0000000000..6bbfee1d3d --- /dev/null +++ b/awx/api/conf.py @@ -0,0 +1,35 @@ +# Django +from django.utils.translation import ugettext_lazy as _ + +# Tower +from awx.conf import fields, register + + +register( + 'AUTH_TOKEN_EXPIRATION', + field_class=fields.IntegerField, + min_value=60, + label=_('Idle Time Force Log Out'), + help_text=_('Number of seconds that a user is inactive before they will need to login again.'), + category=_('Authentication'), + category_slug='authentication', +) + +register( + 'AUTH_TOKEN_PER_USER', + field_class=fields.IntegerField, + min_value=-1, + label=_('Maximum number of simultaneous logins'), + help_text=_('Maximum number of simultaneous logins a user may have. To disable enter -1.'), + category=_('Authentication'), + category_slug='authentication', +) + +register( + 'AUTH_BASIC_ENABLED', + field_class=fields.BooleanField, + label=_('Enable HTTP Basic Auth'), + help_text=_('Enable HTTP Basic Auth for the API Browser.'), + category=_('Authentication'), + category_slug='authentication', +) diff --git a/awx/api/filters.py b/awx/api/filters.py index 08a26735d2..e5c9c39264 100644 --- a/awx/api/filters.py +++ b/awx/api/filters.py @@ -9,9 +9,11 @@ from django.core.exceptions import FieldError, ValidationError from django.db import models from django.db.models import Q from django.db.models.fields import FieldDoesNotExist -from django.db.models.fields.related import ForeignObjectRel +from django.db.models.fields.related import ForeignObjectRel, ManyToManyField, ForeignKey from django.contrib.contenttypes.models import ContentType +from django.contrib.contenttypes.fields import GenericForeignKey from django.utils.encoding import force_text +from django.utils.translation import ugettext_lazy as _ # Django REST Framework from rest_framework.exceptions import ParseError, PermissionDenied @@ -19,6 +21,8 @@ from rest_framework.filters import BaseFilterBackend # Ansible Tower from awx.main.utils import get_type_for_model, to_python_boolean +from awx.main.models.rbac import RoleAncestorEntry + class MongoFilterBackend(BaseFilterBackend): @@ -26,6 +30,7 @@ class MongoFilterBackend(BaseFilterBackend): def filter_queryset(self, request, queryset, view): return queryset + class TypeFilterBackend(BaseFilterBackend): ''' Filter on type field now returned with all objects. @@ -62,6 +67,7 @@ class TypeFilterBackend(BaseFilterBackend): # Return a 400 for invalid field names. raise ParseError(*e.args) + class FieldLookupBackend(BaseFilterBackend): ''' Filter using field lookups provided via query string parameters. @@ -73,7 +79,7 @@ class FieldLookupBackend(BaseFilterBackend): SUPPORTED_LOOKUPS = ('exact', 'iexact', 'contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'regex', 'iregex', 'gt', 'gte', 'lt', 'lte', 'in', - 'isnull') + 'isnull', 'search') def get_field_from_lookup(self, model, lookup): field = None @@ -84,8 +90,8 @@ class FieldLookupBackend(BaseFilterBackend): # those lookups combined with request.user.get_queryset(Model) to make # sure user cannot query using objects he could not view. new_parts = [] - for n, name in enumerate(parts[:-1]): + for name in parts[:-1]: # HACK: Make project and inventory source filtering by old field names work for backwards compatibility. if model._meta.object_name in ('Project', 'InventorySource'): name = { @@ -95,15 +101,28 @@ class FieldLookupBackend(BaseFilterBackend): 'last_updated': 'last_job_run', }.get(name, name) - new_parts.append(name) + if name == 'type' and 'polymorphic_ctype' in model._meta.get_all_field_names(): + name = 'polymorphic_ctype' + new_parts.append('polymorphic_ctype__model') + else: + new_parts.append(name) - if name in getattr(model, 'PASSWORD_FIELDS', ()): - raise PermissionDenied('Filtering on password fields is not allowed.') + raise PermissionDenied(_('Filtering on password fields is not allowed.')) elif name == 'pk': field = model._meta.pk else: - field = model._meta.get_field_by_name(name)[0] + name_alt = name.replace("_", "") + if name_alt in model._meta.fields_map.keys(): + field = model._meta.fields_map[name_alt] + new_parts.pop() + new_parts.append(name_alt) + else: + field = model._meta.get_field_by_name(name)[0] + if isinstance(field, ForeignObjectRel) and getattr(field.field, '__prevent_search__', False): + raise PermissionDenied(_('Filtering on %s is not allowed.' % name)) + elif getattr(field, '__prevent_search__', False): + raise PermissionDenied(_('Filtering on %s is not allowed.' % name)) model = getattr(field, 'related_model', None) or field.model if parts: @@ -123,14 +142,20 @@ class FieldLookupBackend(BaseFilterBackend): return to_python_boolean(value, allow_none=True) elif isinstance(field, models.BooleanField): return to_python_boolean(value) - elif isinstance(field, ForeignObjectRel): + elif isinstance(field, (ForeignObjectRel, ManyToManyField, GenericForeignKey, ForeignKey)): return self.to_python_related(value) else: return field.to_python(value) def value_to_python(self, model, lookup, value): field, new_lookup = self.get_field_from_lookup(model, lookup) - if new_lookup.endswith('__isnull'): + + # Type names are stored without underscores internally, but are presented and + # and serialized over the API containing underscores so we remove `_` + # for polymorphic_ctype__model lookups. + if new_lookup.startswith('polymorphic_ctype__model'): + value = value.replace('_','') + elif new_lookup.endswith('__isnull'): value = to_python_boolean(value) elif new_lookup.endswith('__in'): items = [] @@ -144,6 +169,15 @@ class FieldLookupBackend(BaseFilterBackend): re.compile(value) except re.error as e: raise ValueError(e.args[0]) + elif new_lookup.endswith('__search'): + related_model = getattr(field, 'related_model', None) + if not related_model: + raise ValueError('%s is not searchable' % new_lookup[:-8]) + new_lookups = [] + for rm_field in related_model._meta.fields: + if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description'): + new_lookups.append('{}__{}__icontains'.format(new_lookup[:-8], rm_field.name)) + return value, new_lookups else: value = self.value_to_python_for_field(field, value) return value, new_lookup @@ -155,6 +189,8 @@ class FieldLookupBackend(BaseFilterBackend): and_filters = [] or_filters = [] chain_filters = [] + role_filters = [] + search_filters = [] for key, values in request.query_params.lists(): if key in self.RESERVED_NAMES: continue @@ -171,6 +207,21 @@ class FieldLookupBackend(BaseFilterBackend): key = key[:-5] q_int = True + # RBAC filtering + if key == 'role_level': + role_filters.append(values[0]) + continue + + # Search across related objects. + if key.endswith('__search'): + for value in values: + for search_term in force_text(value).replace(',', ' ').split(): + search_value, new_keys = self.value_to_python(queryset.model, key, search_term) + assert isinstance(new_keys, list) + for new_key in new_keys: + search_filters.append((new_key, search_value)) + continue + # Custom chain__ and or__ filters, mutually exclusive (both can # precede not__). q_chain = False @@ -201,13 +252,21 @@ class FieldLookupBackend(BaseFilterBackend): and_filters.append((q_not, new_key, value)) # Now build Q objects for database query filter. - if and_filters or or_filters or chain_filters: + if and_filters or or_filters or chain_filters or role_filters or search_filters: args = [] for n, k, v in and_filters: if n: args.append(~Q(**{k:v})) else: args.append(Q(**{k:v})) + for role_name in role_filters: + args.append( + Q(pk__in=RoleAncestorEntry.objects.filter( + ancestor__in=request.user.roles.all(), + content_type_id=ContentType.objects.get_for_model(queryset.model).id, + role_field=role_name + ).values_list('object_id').distinct()) + ) if or_filters: q = Q() for n,k,v in or_filters: @@ -216,6 +275,11 @@ class FieldLookupBackend(BaseFilterBackend): else: q |= Q(**{k:v}) args.append(q) + if search_filters: + q = Q() + for k,v in search_filters: + q |= Q(**{k:v}) + args.append(q) for n,k,v in chain_filters: if n: q = ~Q(**{k:v}) @@ -224,11 +288,12 @@ class FieldLookupBackend(BaseFilterBackend): queryset = queryset.filter(q) queryset = queryset.filter(*args).distinct() return queryset - except (FieldError, FieldDoesNotExist, ValueError) as e: + except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e: raise ParseError(e.args[0]) except ValidationError as e: raise ParseError(e.messages) + class OrderByBackend(BaseFilterBackend): ''' Filter to apply ordering based on query string parameters. diff --git a/awx/api/generics.py b/awx/api/generics.py index 51598979d8..fca4ce3582 100644 --- a/awx/api/generics.py +++ b/awx/api/generics.py @@ -9,12 +9,14 @@ import time # Django from django.conf import settings from django.db import connection +from django.db.models.fields import FieldDoesNotExist from django.http import QueryDict from django.shortcuts import get_object_or_404 from django.template.loader import render_to_string from django.utils.encoding import smart_text from django.utils.safestring import mark_safe from django.contrib.contenttypes.models import ContentType +from django.utils.translation import ugettext_lazy as _ # Django REST Framework from rest_framework.authentication import get_authorization_header @@ -25,6 +27,7 @@ from rest_framework import status from rest_framework import views # AWX +from awx.api.filters import FieldLookupBackend from awx.main.models import * # noqa from awx.main.utils import * # noqa from awx.api.serializers import ResourceAccessListElementSerializer @@ -40,6 +43,8 @@ __all__ = ['APIView', 'GenericAPIView', 'ListAPIView', 'SimpleListAPIView', 'DeleteLastUnattachLabelMixin',] logger = logging.getLogger('awx.api.generics') +analytics_logger = logging.getLogger('awx.analytics.performance') + def get_view_name(cls, suffix=None): ''' @@ -58,6 +63,7 @@ def get_view_name(cls, suffix=None): return ('%s %s' % (name, suffix)) if suffix else name return views.get_view_name(cls, suffix=None) + def get_view_description(cls, html=False): ''' Wrapper around REST framework get_view_description() to support @@ -77,6 +83,7 @@ def get_view_description(cls, html=False): desc = '
%s
' % desc return mark_safe(desc) + class APIView(views.APIView): def initialize_request(self, request, *args, **kwargs): @@ -104,6 +111,7 @@ class APIView(views.APIView): logger.warn(status_msg) response = super(APIView, self).finalize_response(request, response, *args, **kwargs) time_started = getattr(self, 'time_started', None) + response['X-API-Node'] = settings.CLUSTER_HOST_ID if time_started: time_elapsed = time.time() - self.time_started response['X-API-Time'] = '%0.3fs' % time_elapsed @@ -112,6 +120,8 @@ class APIView(views.APIView): q_times = [float(q['time']) for q in connection.queries[queries_before:]] response['X-API-Query-Count'] = len(q_times) response['X-API-Query-Time'] = '%0.3fs' % sum(q_times) + + analytics_logger.info("api response", extra=dict(python_objects=dict(request=request, response=response))) return response def get_authenticate_header(self, request): @@ -150,6 +160,8 @@ class APIView(views.APIView): 'new_in_230': getattr(self, 'new_in_230', False), 'new_in_240': getattr(self, 'new_in_240', False), 'new_in_300': getattr(self, 'new_in_300', False), + 'new_in_310': getattr(self, 'new_in_310', False), + 'deprecated': getattr(self, 'deprecated', False), } def get_description(self, html=False): @@ -224,17 +236,26 @@ class GenericAPIView(generics.GenericAPIView, APIView): d['settings'] = settings return d + class SimpleListAPIView(generics.ListAPIView, GenericAPIView): def get_queryset(self): return self.request.user.get_queryset(self.model) + class ListAPIView(generics.ListAPIView, GenericAPIView): # Base class for a read-only list view. def get_queryset(self): return self.request.user.get_queryset(self.model) + def paginate_queryset(self, queryset): + page = super(ListAPIView, self).paginate_queryset(queryset) + # Queries RBAC info & stores into list objects + if hasattr(self, 'capabilities_prefetch') and page is not None: + cache_list_capabilities(page, self.capabilities_prefetch, self.model, self.request.user) + return page + def get_description_context(self): opts = self.model._meta if 'username' in opts.get_all_field_names(): @@ -252,14 +273,61 @@ class ListAPIView(generics.ListAPIView, GenericAPIView): fields = [] for field in self.model._meta.fields: if field.name in ('username', 'first_name', 'last_name', 'email', - 'name', 'description', 'email'): + 'name', 'description'): fields.append(field.name) return fields + @property + def related_search_fields(self): + def skip_related_name(name): + return ( + name is None or name.endswith('_role') or name.startswith('_') or + name.startswith('deprecated_') or name.endswith('_set') or + name == 'polymorphic_ctype') + + fields = set([]) + for field in self.model._meta.fields: + if skip_related_name(field.name): + continue + if getattr(field, 'related_model', None): + fields.add('{}__search'.format(field.name)) + for rel in self.model._meta.related_objects: + name = rel.related_model._meta.verbose_name.replace(" ", "_") + if skip_related_name(name): + continue + fields.add('{}__search'.format(name)) + m2m_rel = [] + m2m_rel += self.model._meta.local_many_to_many + if issubclass(self.model, UnifiedJobTemplate) and self.model != UnifiedJobTemplate: + m2m_rel += UnifiedJobTemplate._meta.local_many_to_many + if issubclass(self.model, UnifiedJob) and self.model != UnifiedJob: + m2m_rel += UnifiedJob._meta.local_many_to_many + for relationship in m2m_rel: + if skip_related_name(relationship.name): + continue + if relationship.related_model._meta.app_label != 'main': + continue + fields.add('{}__search'.format(relationship.name)) + fields = list(fields) + + allowed_fields = [] + for field in fields: + try: + FieldLookupBackend().get_field_from_lookup(self.model, field) + except PermissionDenied: + pass + except FieldDoesNotExist: + allowed_fields.append(field) + else: + allowed_fields.append(field) + return allowed_fields + + class ListCreateAPIView(ListAPIView, generics.ListCreateAPIView): # Base class for a list view that allows creating new objects. pass + class ParentMixin(object): def get_parent_object(self): @@ -278,7 +346,8 @@ class ParentMixin(object): if not self.request.user.can_access(*args): raise PermissionDenied() -class SubListAPIView(ListAPIView, ParentMixin): + +class SubListAPIView(ParentMixin, ListAPIView): # Base class for a read-only sublist view. # Subclasses should define at least: @@ -305,6 +374,7 @@ class SubListAPIView(ListAPIView, ParentMixin): sublist_qs = getattrd(parent, self.relationship).distinct() return qs & sublist_qs + class SubListCreateAPIView(SubListAPIView, ListCreateAPIView): # Base class for a sublist view that allows for creating subobjects # associated with the parent object. @@ -357,10 +427,14 @@ class SubListCreateAPIView(SubListAPIView, ListCreateAPIView): headers = {'Location': obj.get_absolute_url()} return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) + class SubListCreateAttachDetachAPIView(SubListCreateAPIView): # Base class for a sublist view that allows for creating subobjects and # attaching/detaching them from the parent. + def is_valid_relation(self, parent, sub, created=False): + return None + def get_description_context(self): d = super(SubListCreateAttachDetachAPIView, self).get_description_context() d.update({ @@ -397,6 +471,13 @@ class SubListCreateAttachDetachAPIView(SubListCreateAPIView): skip_sub_obj_read_check=created): raise PermissionDenied() + # Verify that the relationship to be added is valid. + attach_errors = self.is_valid_relation(parent, sub, created=created) + if attach_errors is not None: + if created: + sub.delete() + return Response(attach_errors, status=status.HTTP_400_BAD_REQUEST) + # Attach the object to the collection. if sub not in relationship.all(): relationship.add(sub) @@ -413,7 +494,7 @@ class SubListCreateAttachDetachAPIView(SubListCreateAPIView): sub_id = request.data.get('id', None) res = None if not sub_id: - data = dict(msg='"id" is required to disassociate') + data = dict(msg=_('"id" is required to disassociate')) res = Response(data, status=status.HTTP_400_BAD_REQUEST) return (sub_id, res) @@ -449,12 +530,13 @@ class SubListCreateAttachDetachAPIView(SubListCreateAPIView): else: return self.attach(request, *args, **kwargs) -''' -Models for which you want the last instance to be deleted from the database -when the last disassociate is called should inherit from this class. Further, -the model should implement is_detached() -''' + class DeleteLastUnattachLabelMixin(object): + ''' + Models for which you want the last instance to be deleted from the database + when the last disassociate is called should inherit from this class. Further, + the model should implement is_detached() + ''' def unattach(self, request, *args, **kwargs): (sub_id, res) = super(DeleteLastUnattachLabelMixin, self).unattach_validate(request) if res: @@ -469,12 +551,15 @@ class DeleteLastUnattachLabelMixin(object): return res -class SubDetailAPIView(generics.RetrieveAPIView, GenericAPIView, ParentMixin): + +class SubDetailAPIView(ParentMixin, generics.RetrieveAPIView, GenericAPIView): pass + class RetrieveAPIView(generics.RetrieveAPIView, GenericAPIView): pass + class RetrieveUpdateAPIView(RetrieveAPIView, generics.RetrieveUpdateAPIView): def update(self, request, *args, **kwargs): @@ -489,6 +574,7 @@ class RetrieveUpdateAPIView(RetrieveAPIView, generics.RetrieveUpdateAPIView): ''' scrub any fields the user cannot/should not put/patch, based on user context. This runs after read-only serialization filtering ''' pass + class RetrieveDestroyAPIView(RetrieveAPIView, generics.RetrieveDestroyAPIView): def destroy(self, request, *args, **kwargs): @@ -499,21 +585,21 @@ class RetrieveDestroyAPIView(RetrieveAPIView, generics.RetrieveDestroyAPIView): obj.delete() return Response(status=status.HTTP_204_NO_CONTENT) + class RetrieveUpdateDestroyAPIView(RetrieveUpdateAPIView, RetrieveDestroyAPIView): pass + class DestroyAPIView(GenericAPIView, generics.DestroyAPIView): pass -class ResourceAccessList(ListAPIView): +class ResourceAccessList(ParentMixin, ListAPIView): serializer_class = ResourceAccessListElementSerializer def get_queryset(self): - self.object_id = self.kwargs['pk'] - resource_model = getattr(self, 'resource_model') - obj = get_object_or_404(resource_model, pk=self.object_id) + obj = self.get_parent_object() content_type = ContentType.objects.get_for_model(obj) roles = set(Role.objects.filter(content_type=content_type, object_id=obj.id)) diff --git a/awx/api/license.py b/awx/api/license.py deleted file mode 100644 index 55706364f8..0000000000 --- a/awx/api/license.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -from rest_framework.exceptions import APIException - -from awx.main.task_engine import TaskSerializer - - -class LicenseForbids(APIException): - status_code = 402 - default_detail = 'Your Tower license does not allow that.' - - -def get_license(show_key=False, bypass_database=False): - """Return a dictionary representing the license currently in - place on this Tower instance. - """ - license_reader = TaskSerializer() - if bypass_database: - return license_reader.from_file(show_key=show_key) - return license_reader.from_database(show_key=show_key) - - -def feature_enabled(name, bypass_database=False): - """Return True if the requested feature is enabled, False otherwise. - If the feature does not exist, raise KeyError. - """ - license = get_license(bypass_database=bypass_database) - - # Sanity check: If there is no license, the feature is considered - # to be off. - if 'features' not in license: - return False - - # Return the correct feature flag. - return license['features'].get(name, False) - -def feature_exists(name): - """Return True if the requested feature is enabled, False otherwise. - If the feature does not exist, raise KeyError. - """ - license = get_license() - - # Sanity check: If there is no license, the feature is considered - # to be off. - if 'features' not in license: - return False - - return name in license['features'] diff --git a/awx/api/management/commands/uses_mongo.py b/awx/api/management/commands/uses_mongo.py deleted file mode 100644 index 8ea6404f4a..0000000000 --- a/awx/api/management/commands/uses_mongo.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved - -import sys - -from optparse import make_option -from django.core.management.base import BaseCommand -from awx.main.ha import is_ha_environment -from awx.main.task_engine import TaskSerializer - - -class Command(BaseCommand): - """Return a exit status of 0 if MongoDB should be active, and an - exit status of 1 otherwise. - - This script is intended to be used by bash and init scripts to - conditionally start MongoDB, so its focus is on being bash-friendly. - """ - - def __init__(self): - super(Command, self).__init__() - BaseCommand.option_list += (make_option('--local', - dest='local', - default=False, - action="store_true", - help="Only check if mongo should be running locally"),) - - def handle(self, *args, **kwargs): - # Get the license data. - license_reader = TaskSerializer() - license_data = license_reader.from_database() - - # Does the license have features, at all? - # If there is no license yet, then all features are clearly off. - if 'features' not in license_data: - print('No license available.') - sys.exit(2) - - # Does the license contain the system tracking feature? - # If and only if it does, MongoDB should run. - system_tracking = license_data['features']['system_tracking'] - - # Okay, do we need MongoDB to be turned on? - # This is a silly variable assignment right now, but I expect the - # rules here will grow more complicated over time. - uses_mongo = system_tracking # noqa - - if is_ha_environment() and kwargs['local'] and uses_mongo: - print("HA Configuration detected. Database should be remote") - uses_mongo = False - - # If we do not need Mongo, return a non-zero exit status. - if not uses_mongo: - print('MongoDB NOT required') - sys.exit(1) - - # We do need Mongo, return zero. - print('MongoDB required') - sys.exit(0) diff --git a/awx/api/metadata.py b/awx/api/metadata.py index c326a4a875..910517a3fd 100644 --- a/awx/api/metadata.py +++ b/awx/api/metadata.py @@ -7,12 +7,13 @@ from collections import OrderedDict from django.core.exceptions import PermissionDenied from django.http import Http404 from django.utils.encoding import force_text, smart_text +from django.utils.translation import ugettext_lazy as _ # Django REST Framework from rest_framework import exceptions from rest_framework import metadata from rest_framework import serializers -from rest_framework.relations import RelatedField +from rest_framework.relations import RelatedField, ManyRelatedField from rest_framework.request import clone_request # Ansible Tower @@ -29,7 +30,9 @@ class Metadata(metadata.SimpleMetadata): text_attrs = [ 'read_only', 'label', 'help_text', 'min_length', 'max_length', - 'min_value', 'max_value' + 'min_value', 'max_value', + 'category', 'category_slug', + 'defined_in_file' ] for attr in text_attrs: @@ -37,29 +40,37 @@ class Metadata(metadata.SimpleMetadata): if value is not None and value != '': field_info[attr] = force_text(value, strings_only=True) + placeholder = getattr(field, 'placeholder', serializers.empty) + if placeholder is not serializers.empty: + field_info['placeholder'] = placeholder + # Update help text for common fields. serializer = getattr(field, 'parent', None) if serializer: field_help_text = { - 'id': 'Database ID for this {}.', - 'name': 'Name of this {}.', - 'description': 'Optional description of this {}.', - 'type': 'Data type for this {}.', - 'url': 'URL for this {}.', - 'related': 'Data structure with URLs of related resources.', - 'summary_fields': 'Data structure with name/description for related resources.', - 'created': 'Timestamp when this {} was created.', - 'modified': 'Timestamp when this {} was last modified.', + 'id': _('Database ID for this {}.'), + 'name': _('Name of this {}.'), + 'description': _('Optional description of this {}.'), + 'type': _('Data type for this {}.'), + 'url': _('URL for this {}.'), + 'related': _('Data structure with URLs of related resources.'), + 'summary_fields': _('Data structure with name/description for related resources.'), + 'created': _('Timestamp when this {} was created.'), + 'modified': _('Timestamp when this {} was last modified.'), } if field.field_name in field_help_text: - opts = serializer.Meta.model._meta.concrete_model._meta - verbose_name = smart_text(opts.verbose_name) - field_info['help_text'] = field_help_text[field.field_name].format(verbose_name) + if hasattr(serializer, 'Meta') and hasattr(serializer.Meta, 'model'): + opts = serializer.Meta.model._meta.concrete_model._meta + verbose_name = smart_text(opts.verbose_name) + field_info['help_text'] = field_help_text[field.field_name].format(verbose_name) # Indicate if a field has a default value. # FIXME: Still isn't showing all default values? try: - field_info['default'] = field.get_default() + default = field.get_default() + if field.field_name == 'TOWER_URL_BASE' and default == 'https://towerhost': + default = '{}://{}'.format(self.request.scheme, self.request.get_host()) + field_info['default'] = default except serializers.SkipField: pass @@ -68,7 +79,7 @@ class Metadata(metadata.SimpleMetadata): elif getattr(field, 'fields', None): field_info['children'] = self.get_serializer_info(field) - if hasattr(field, 'choices') and not isinstance(field, RelatedField): + if not isinstance(field, (RelatedField, ManyRelatedField)) and hasattr(field, 'choices'): field_info['choices'] = [(choice_value, choice_name) for choice_value, choice_name in field.choices.items()] # Indicate if a field is write-only. @@ -112,19 +123,20 @@ class Metadata(metadata.SimpleMetadata): actions = {} for method in {'GET', 'PUT', 'POST'} & set(view.allowed_methods): view.request = clone_request(request, method) + obj = None try: # Test global permissions if hasattr(view, 'check_permissions'): view.check_permissions(view.request) # Test object permissions if method == 'PUT' and hasattr(view, 'get_object'): - view.get_object() + obj = view.get_object() except (exceptions.APIException, PermissionDenied, Http404): continue else: # If user has appropriate permissions for the view, include # appropriate metadata about the fields that should be supplied. - serializer = view.get_serializer() + serializer = view.get_serializer(instance=obj) actions[method] = self.get_serializer_info(serializer) finally: view.request = request @@ -140,27 +152,34 @@ class Metadata(metadata.SimpleMetadata): # For GET method, remove meta attributes that aren't relevant # when reading a field and remove write-only fields. if method == 'GET': - meta.pop('required', None) - meta.pop('read_only', None) - meta.pop('default', None) - meta.pop('min_length', None) - meta.pop('max_length', None) + attrs_to_remove = ('required', 'read_only', 'default', 'min_length', 'max_length', 'placeholder') + for attr in attrs_to_remove: + meta.pop(attr, None) + meta.get('child', {}).pop(attr, None) if meta.pop('write_only', False): actions['GET'].pop(field) # For PUT/POST methods, remove read-only fields. if method in ('PUT', 'POST'): + # This value should always be False for PUT/POST, so don't + # show it (file-based read-only settings can't be updated) + meta.pop('defined_in_file', False) + if meta.pop('read_only', False): actions[method].pop(field) return actions def determine_metadata(self, request, view): + # store request on self so we can use it to generate field defaults + # (such as TOWER_URL_BASE) + self.request = request + metadata = super(Metadata, self).determine_metadata(request, view) # Add version number in which view was added to Tower. added_in_version = '1.2' - for version in ('3.0.0', '2.4.0', '2.3.0', '2.2.0', '2.1.0', '2.0.0', '1.4.8', '1.4.5', '1.4', '1.3'): + for version in ('3.1.0', '3.0.0', '2.4.0', '2.3.0', '2.2.0', '2.1.0', '2.0.0', '1.4.8', '1.4.5', '1.4', '1.3'): if getattr(view, 'new_in_%s' % version.replace('.', ''), False): added_in_version = version break @@ -176,8 +195,17 @@ class Metadata(metadata.SimpleMetadata): if getattr(view, 'search_fields', None): metadata['search_fields'] = view.search_fields + # Add related search fields if available from the view. + if getattr(view, 'related_search_fields', None): + metadata['related_search_fields'] = view.related_search_fields + + from rest_framework import generics + if isinstance(view, generics.ListAPIView) and hasattr(view, 'paginator'): + metadata['max_page_size'] = view.paginator.max_page_size + return metadata + class RoleMetadata(Metadata): def determine_metadata(self, request, view): metadata = super(RoleMetadata, self).determine_metadata(request, view) diff --git a/awx/api/pagination.py b/awx/api/pagination.py index ee17aee0e1..9a416e9995 100644 --- a/awx/api/pagination.py +++ b/awx/api/pagination.py @@ -2,6 +2,7 @@ # All Rights Reserved. # Django REST Framework +from django.conf import settings from rest_framework import pagination from rest_framework.utils.urls import replace_query_param @@ -9,11 +10,13 @@ from rest_framework.utils.urls import replace_query_param class Pagination(pagination.PageNumberPagination): page_size_query_param = 'page_size' + max_page_size = settings.MAX_PAGE_SIZE def get_next_link(self): if not self.page.has_next(): return None url = self.request and self.request.get_full_path() or '' + url = url.encode('utf-8') page_number = self.page.next_page_number() return replace_query_param(url, self.page_query_param, page_number) @@ -21,5 +24,6 @@ class Pagination(pagination.PageNumberPagination): if not self.page.has_previous(): return None url = self.request and self.request.get_full_path() or '' + url = url.encode('utf-8') page_number = self.page.previous_page_number() return replace_query_param(url, self.page_query_param, page_number) diff --git a/awx/api/parsers.py b/awx/api/parsers.py index 94ddbec561..8c720201a2 100644 --- a/awx/api/parsers.py +++ b/awx/api/parsers.py @@ -5,6 +5,7 @@ import json # Django from django.conf import settings from django.utils import six +from django.utils.translation import ugettext_lazy as _ # Django REST Framework from rest_framework import parsers @@ -27,4 +28,4 @@ class JSONParser(parsers.JSONParser): data = stream.read().decode(encoding) return json.loads(data, object_pairs_hook=OrderedDict) except ValueError as exc: - raise ParseError('JSON parse error - %s' % six.text_type(exc)) + raise ParseError(_('JSON parse error - %s') % six.text_type(exc)) diff --git a/awx/api/permissions.py b/awx/api/permissions.py index 285441421d..8ec26a2cc8 100644 --- a/awx/api/permissions.py +++ b/awx/api/permissions.py @@ -4,9 +4,6 @@ # Python import logging -# Django -from django.http import Http404 - # Django REST Framework from rest_framework.exceptions import MethodNotAllowed, PermissionDenied from rest_framework import permissions @@ -19,7 +16,8 @@ from awx.main.utils import get_object_or_400 logger = logging.getLogger('awx.api.permissions') __all__ = ['ModelAccessPermission', 'JobTemplateCallbackPermission', - 'TaskPermission', 'ProjectUpdatePermission', 'UserPermission'] + 'TaskPermission', 'ProjectUpdatePermission', 'UserPermission',] + class ModelAccessPermission(permissions.BasePermission): ''' @@ -49,6 +47,9 @@ class ModelAccessPermission(permissions.BasePermission): if not check_user_access(request.user, view.parent_model, 'read', parent_obj): return False + if hasattr(view, 'parent_key'): + if not check_user_access(request.user, view.model, 'add', {view.parent_key: parent_obj.pk}): + return False return True elif getattr(view, 'is_job_start', False): if not obj: @@ -92,13 +93,6 @@ class ModelAccessPermission(permissions.BasePermission): method based on the request method. ''' - # Check that obj (if given) is active, otherwise raise a 404. - active = getattr(obj, 'active', getattr(obj, 'is_active', True)) - if callable(active): - active = active() - if not active: - raise Http404() - # Don't allow anonymous users. 401, not 403, hence no raised exception. if not request.user or request.user.is_anonymous(): return False @@ -137,6 +131,7 @@ class ModelAccessPermission(permissions.BasePermission): def has_object_permission(self, request, view, obj): return self.has_permission(request, view, obj) + class JobTemplateCallbackPermission(ModelAccessPermission): ''' Permission check used by job template callback view for requests from @@ -162,6 +157,7 @@ class JobTemplateCallbackPermission(ModelAccessPermission): else: return True + class TaskPermission(ModelAccessPermission): ''' Permission checks used for API callbacks from running a task. @@ -186,11 +182,10 @@ class TaskPermission(ModelAccessPermission): # token. if view.model == Inventory and request.method.lower() in ('head', 'get'): return bool(not obj or obj.pk == unified_job.inventory_id) - elif view.model in (JobEvent, AdHocCommandEvent) and request.method.lower() == 'post': - return bool(not obj or obj.pk == unified_job.pk) else: return False + class ProjectUpdatePermission(ModelAccessPermission): ''' Permission check used by ProjectUpdateView to determine who can update projects @@ -206,6 +201,10 @@ class ProjectUpdatePermission(ModelAccessPermission): class UserPermission(ModelAccessPermission): def check_post_permissions(self, request, view, obj=None): - if request.user.is_superuser: + if not request.data: + return request.user.admin_of_organizations.exists() + elif request.user.is_superuser: return True raise PermissionDenied() + + diff --git a/awx/api/renderers.py b/awx/api/renderers.py index 348a8220c4..fa039a2226 100644 --- a/awx/api/renderers.py +++ b/awx/api/renderers.py @@ -3,6 +3,7 @@ # Django REST Framework from rest_framework import renderers +from rest_framework.request import override_method class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer): @@ -30,6 +31,8 @@ class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer): # Set a flag on the view to indiciate to the view/serializer that we're # creating a raw data form for the browsable API. Store the original # request method to determine how to populate the raw data form. + if request.method in {'OPTIONS', 'DELETE'}: + return try: setattr(view, '_raw_data_form_marker', True) setattr(view, '_raw_data_request_method', request.method) @@ -41,10 +44,16 @@ class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer): def get_rendered_html_form(self, data, view, method, request): # Never show auto-generated form (only raw form). obj = getattr(view, 'object', None) - if not self.show_form_for_method(view, method, request, obj): - return - if method in ('DELETE', 'OPTIONS'): - return True # Don't actually need to return a form + if obj is None and hasattr(view, 'get_object') and hasattr(view, 'retrieve'): + try: + obj = view.get_object() + except Exception: + obj = None + with override_method(view, request, method) as request: + if not self.show_form_for_method(view, method, request, obj): + return + if method in ('DELETE', 'OPTIONS'): + return True # Don't actually need to return a form def get_filter_form(self, data, view, request): # Don't show filter form in browsable API. @@ -71,3 +80,8 @@ class AnsiTextRenderer(PlainTextRenderer): media_type = 'text/plain' format = 'ansi' + + +class AnsiDownloadRenderer(PlainTextRenderer): + + format = "ansi_download" diff --git a/awx/api/serializers.py b/awx/api/serializers.py index 2eb4e3d7a1..7c6698ccdd 100644 --- a/awx/api/serializers.py +++ b/awx/api/serializers.py @@ -5,6 +5,7 @@ import copy import json import re +import six import logging from collections import OrderedDict from dateutil import rrule @@ -20,9 +21,11 @@ from django.contrib.contenttypes.models import ContentType from django.core.urlresolvers import reverse from django.core.exceptions import ObjectDoesNotExist, ValidationError as DjangoValidationError from django.db import models -# from django.utils.translation import ugettext_lazy as _ +from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import force_text from django.utils.text import capfirst +from django.utils.timezone import now +from django.utils.functional import cached_property # Django REST Framework from rest_framework.exceptions import ValidationError @@ -37,11 +40,14 @@ from polymorphic import PolymorphicModel # AWX from awx.main.constants import SCHEDULEABLE_PROVIDERS from awx.main.models import * # noqa +from awx.main.access import get_user_capabilities from awx.main.fields import ImplicitRoleField -from awx.main.utils import get_type_for_model, get_model_for_type, build_url, timestamp_apiformat, camelcase_to_underscore, getattrd -from awx.main.conf import tower_settings +from awx.main.utils import ( + get_type_for_model, get_model_for_type, build_url, timestamp_apiformat, + camelcase_to_underscore, getattrd, parse_yaml_or_json) +from awx.main.validators import vars_validate_or_raise -from awx.api.license import feature_enabled +from awx.conf.license import feature_enabled from awx.api.fields import BooleanNullField, CharNullField, ChoiceNullField, EncryptedPasswordField, VerbatimField logger = logging.getLogger('awx.api.serializers') @@ -73,12 +79,15 @@ SUMMARIZABLE_FK_FIELDS = { 'total_groups', 'groups_with_active_failures', 'has_inventory_sources'), - 'project': DEFAULT_SUMMARY_FIELDS + ('status',), + 'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'), + 'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',), 'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud'), 'cloud_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud'), 'network_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'net'), - 'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',), + 'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed'), 'job_template': DEFAULT_SUMMARY_FIELDS, + 'workflow_job_template': DEFAULT_SUMMARY_FIELDS, + 'workflow_job': DEFAULT_SUMMARY_FIELDS, 'schedule': DEFAULT_SUMMARY_FIELDS + ('next_run',), 'unified_job_template': DEFAULT_SUMMARY_FIELDS + ('unified_job_type',), 'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error'), @@ -241,11 +250,13 @@ class BaseSerializer(serializers.ModelSerializer): def get_type_choices(self): type_name_map = { - 'job': 'Playbook Run', - 'ad_hoc_command': 'Command', - 'project_update': 'SCM Update', - 'inventory_update': 'Inventory Sync', - 'system_job': 'Management Job', + 'job': _('Playbook Run'), + 'ad_hoc_command': _('Command'), + 'project_update': _('SCM Update'), + 'inventory_update': _('Inventory Sync'), + 'system_job': _('Management Job'), + 'workflow_job': _('Workflow Job'), + 'workflow_job_template': _('Workflow Template'), } choices = [] for t in self.get_types(): @@ -285,7 +296,8 @@ class BaseSerializer(serializers.ModelSerializer): # because it results in additional queries. if fk == 'job' and isinstance(obj, UnifiedJob): continue - if fk == 'project' and isinstance(obj, InventorySource): + if fk == 'project' and (isinstance(obj, InventorySource) or + isinstance(obj, Project)): continue fkval = getattr(obj, fk, None) @@ -321,15 +333,22 @@ class BaseSerializer(serializers.ModelSerializer): roles = {} for field in obj._meta.get_fields(): if type(field) is ImplicitRoleField: - role = getattr(obj, field.name) - #roles[field.name] = RoleSerializer(data=role).to_representation(role) - roles[field.name] = { - 'id': role.id, - 'name': role.name, - 'description': role.description, - } + roles[field.name] = role_summary_fields_generator(obj, field.name) if len(roles) > 0: summary_fields['object_roles'] = roles + + # Advance display of RBAC capabilities + if hasattr(self, 'show_capabilities'): + view = self.context.get('view', None) + parent_obj = None + if view and hasattr(view, 'parent_model') and hasattr(view, 'get_parent_object'): + parent_obj = view.get_parent_object() + if view and view.request and view.request.user: + user_capabilities = get_user_capabilities( + view.request.user, obj, method_list=self.show_capabilities, parent_obj=parent_obj) + if user_capabilities: + summary_fields['user_capabilities'] = user_capabilities + return summary_fields def get_created(self, obj): @@ -481,6 +500,7 @@ class BaseSerializer(serializers.ModelSerializer): class EmptySerializer(serializers.Serializer): pass + class BaseFactSerializer(BaseSerializer): __metaclass__ = BaseSerializerMetaclass @@ -494,11 +514,12 @@ class BaseFactSerializer(BaseSerializer): ret['module'] = serializers.ChoiceField(choices=choices, read_only=True, required=False) return ret + class UnifiedJobTemplateSerializer(BaseSerializer): class Meta: model = UnifiedJobTemplate - fields = ('*', 'last_job_run', 'last_job_failed', 'has_schedules', + fields = ('*', 'last_job_run', 'last_job_failed', 'next_job_run', 'status') def get_related(self, obj): @@ -513,7 +534,7 @@ class UnifiedJobTemplateSerializer(BaseSerializer): def get_types(self): if type(self) is UnifiedJobTemplateSerializer: - return ['project', 'inventory_source', 'job_template', 'system_job_template'] + return ['project', 'inventory_source', 'job_template', 'system_job_template', 'workflow_job_template',] else: return super(UnifiedJobTemplateSerializer, self).get_types() @@ -528,6 +549,8 @@ class UnifiedJobTemplateSerializer(BaseSerializer): serializer_class = JobTemplateSerializer elif isinstance(obj, SystemJobTemplate): serializer_class = SystemJobTemplateSerializer + elif isinstance(obj, WorkflowJobTemplate): + serializer_class = WorkflowJobTemplateSerializer if serializer_class: serializer = serializer_class(instance=obj, context=self.context) return serializer.to_representation(obj) @@ -536,6 +559,7 @@ class UnifiedJobTemplateSerializer(BaseSerializer): class UnifiedJobSerializer(BaseSerializer): + show_capabilities = ['start', 'delete'] result_stdout = serializers.SerializerMethodField() @@ -544,7 +568,7 @@ class UnifiedJobSerializer(BaseSerializer): fields = ('*', 'unified_job_template', 'launch_type', 'status', 'failed', 'started', 'finished', 'elapsed', 'job_args', 'job_cwd', 'job_env', 'job_explanation', 'result_stdout', - 'result_traceback') + 'execution_node', 'result_traceback') extra_kwargs = { 'unified_job_template': { 'source': 'unified_job_template_id', @@ -558,7 +582,7 @@ class UnifiedJobSerializer(BaseSerializer): def get_types(self): if type(self) is UnifiedJobSerializer: - return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job'] + return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job',] else: return super(UnifiedJobSerializer, self).get_types() @@ -576,8 +600,26 @@ class UnifiedJobSerializer(BaseSerializer): res['stdout'] = reverse('api:job_stdout', args=(obj.pk,)) elif isinstance(obj, AdHocCommand): res['stdout'] = reverse('api:ad_hoc_command_stdout', args=(obj.pk,)) + if obj.workflow_job_id: + res['source_workflow_job'] = reverse('api:workflow_job_detail', args=(obj.workflow_job_id,)) return res + def get_summary_fields(self, obj): + summary_fields = super(UnifiedJobSerializer, self).get_summary_fields(obj) + if obj.spawned_by_workflow: + summary_fields['source_workflow_job'] = {} + try: + summary_obj = obj.unified_job_node.workflow_job + except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist: + return summary_fields + + for field in SUMMARIZABLE_FK_FIELDS['job']: + val = getattr(summary_obj, field, None) + if val is not None: + summary_fields['source_workflow_job'][field] = val + + return summary_fields + def to_representation(self, obj): serializer_class = None if type(self) is UnifiedJobSerializer: @@ -591,20 +633,28 @@ class UnifiedJobSerializer(BaseSerializer): serializer_class = AdHocCommandSerializer elif isinstance(obj, SystemJob): serializer_class = SystemJobSerializer + elif isinstance(obj, WorkflowJob): + serializer_class = WorkflowJobSerializer if serializer_class: serializer = serializer_class(instance=obj, context=self.context) ret = serializer.to_representation(obj) else: ret = super(UnifiedJobSerializer, self).to_representation(obj) + if 'elapsed' in ret: + if obj and obj.pk and obj.started and not obj.finished: + td = now() - obj.started + ret['elapsed'] = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / (10 ** 6 * 1.0) ret['elapsed'] = float(ret['elapsed']) + return ret def get_result_stdout(self, obj): obj_size = obj.result_stdout_size - if obj_size > tower_settings.STDOUT_MAX_BYTES_DISPLAY: - return "Standard Output too large to display (%d bytes), only download supported for sizes over %d bytes" % (obj_size, - tower_settings.STDOUT_MAX_BYTES_DISPLAY) + if obj_size > settings.STDOUT_MAX_BYTES_DISPLAY: + return _("Standard Output too large to display (%(text_size)d bytes), " + "only download supported for sizes over %(supported_size)d bytes") % { + 'text_size': obj_size, 'supported_size': settings.STDOUT_MAX_BYTES_DISPLAY} return obj.result_stdout @@ -621,7 +671,7 @@ class UnifiedJobListSerializer(UnifiedJobSerializer): def get_types(self): if type(self) is UnifiedJobListSerializer: - return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job'] + return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job'] else: return super(UnifiedJobListSerializer, self).get_types() @@ -638,6 +688,8 @@ class UnifiedJobListSerializer(UnifiedJobSerializer): serializer_class = AdHocCommandListSerializer elif isinstance(obj, SystemJob): serializer_class = SystemJobListSerializer + elif isinstance(obj, WorkflowJob): + serializer_class = WorkflowJobSerializer if serializer_class: serializer = serializer_class(instance=obj, context=self.context) ret = serializer.to_representation(obj) @@ -657,9 +709,10 @@ class UnifiedJobStdoutSerializer(UnifiedJobSerializer): def get_result_stdout(self, obj): obj_size = obj.result_stdout_size - if obj_size > tower_settings.STDOUT_MAX_BYTES_DISPLAY: - return "Standard Output too large to display (%d bytes), only download supported for sizes over %d bytes" % (obj_size, - tower_settings.STDOUT_MAX_BYTES_DISPLAY) + if obj_size > settings.STDOUT_MAX_BYTES_DISPLAY: + return _("Standard Output too large to display (%(text_size)d bytes), " + "only download supported for sizes over %(supported_size)d bytes") % { + 'text_size': obj_size, 'supported_size': settings.STDOUT_MAX_BYTES_DISPLAY} return obj.result_stdout def get_types(self): @@ -672,15 +725,16 @@ class UnifiedJobStdoutSerializer(UnifiedJobSerializer): class UserSerializer(BaseSerializer): password = serializers.CharField(required=False, default='', write_only=True, - help_text='Write-only field used to change the password.') + help_text=_('Write-only field used to change the password.')) ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True) - external_account = serializers.SerializerMethodField(help_text='Set if the account is managed by an external service') + external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service')) is_system_auditor = serializers.BooleanField(default=False) + show_capabilities = ['edit', 'delete'] class Meta: model = User fields = ('*', '-name', '-description', '-modified', - '-summary_fields', 'username', 'first_name', 'last_name', + 'username', 'first_name', 'last_name', 'email', 'is_superuser', 'is_system_auditor', 'password', 'ldap_dn', 'external_account') def to_representation(self, obj): @@ -697,7 +751,7 @@ class UserSerializer(BaseSerializer): def validate_password(self, value): if not self.instance and value in (None, ''): - raise serializers.ValidationError('Password required for new User.') + raise serializers.ValidationError(_('Password required for new User.')) return value def _update_password(self, obj, new_password): @@ -781,7 +835,7 @@ class UserSerializer(BaseSerializer): ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {}).keys()) if field_name in ldap_managed_fields: if value != getattr(self.instance, field_name): - raise serializers.ValidationError('Unable to change %s on user managed by LDAP.' % field_name) + raise serializers.ValidationError(_('Unable to change %s on user managed by LDAP.') % field_name) return value def validate_username(self, value): @@ -801,6 +855,7 @@ class UserSerializer(BaseSerializer): class OrganizationSerializer(BaseSerializer): + show_capabilities = ['edit', 'delete'] class Meta: model = Organization @@ -811,6 +866,7 @@ class OrganizationSerializer(BaseSerializer): res.update(dict( projects = reverse('api:organization_projects_list', args=(obj.pk,)), inventories = reverse('api:organization_inventories_list', args=(obj.pk,)), + workflow_job_templates = reverse('api:organization_workflow_job_templates_list', args=(obj.pk,)), users = reverse('api:organization_users_list', args=(obj.pk,)), admins = reverse('api:organization_admins_list', args=(obj.pk,)), teams = reverse('api:organization_teams_list', args=(obj.pk,)), @@ -842,7 +898,7 @@ class ProjectOptionsSerializer(BaseSerializer): class Meta: fields = ('*', 'local_path', 'scm_type', 'scm_url', 'scm_branch', - 'scm_clean', 'scm_delete_on_update', 'credential') + 'scm_clean', 'scm_delete_on_update', 'credential', 'timeout',) def get_related(self, obj): res = super(ProjectOptionsSerializer, self).get_related(obj) @@ -885,11 +941,12 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer): status = serializers.ChoiceField(choices=Project.PROJECT_STATUS_CHOICES, read_only=True) last_update_failed = serializers.BooleanField(read_only=True) last_updated = serializers.DateTimeField(read_only=True) + show_capabilities = ['start', 'schedule', 'edit', 'delete'] class Meta: model = Project fields = ('*', 'organization', 'scm_delete_on_next_update', 'scm_update_on_launch', - 'scm_update_cache_timeout') + \ + 'scm_update_cache_timeout', 'scm_revision',) + \ ('last_update_failed', 'last_updated') # Backwards compatibility read_only_fields = ('scm_delete_on_next_update',) @@ -920,7 +977,16 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer): args=(obj.last_update.pk,)) return res + def to_representation(self, obj): + ret = super(ProjectSerializer, self).to_representation(obj) + if 'scm_revision' in ret and obj.scm_type == '': + ret['scm_revision'] = '' + return ret + def validate(self, attrs): + def get_field_from_model_or_attrs(fd): + return attrs.get(fd, self.instance and getattr(self.instance, fd) or None) + organization = None if 'organization' in attrs: organization = attrs['organization'] @@ -930,18 +996,25 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer): view = self.context.get('view', None) if not organization and not view.request.user.is_superuser: # Only allow super users to create orgless projects - raise serializers.ValidationError('Organization is missing') + raise serializers.ValidationError(_('Organization is missing')) + elif get_field_from_model_or_attrs('scm_type') == '': + for fd in ('scm_update_on_launch', 'scm_delete_on_update', 'scm_clean'): + if get_field_from_model_or_attrs(fd): + raise serializers.ValidationError({fd: _('Update options must be set to false for manual projects.')}) return super(ProjectSerializer, self).validate(attrs) class ProjectPlaybooksSerializer(ProjectSerializer): - playbooks = serializers.ReadOnlyField(help_text='Array of playbooks available within this project.') + playbooks = serializers.SerializerMethodField(help_text=_('Array of playbooks available within this project.')) class Meta: model = Project fields = ('playbooks',) + def get_playbooks(self, obj): + return obj.playbook_files if obj.scm_type else obj.playbooks + @property def data(self): ret = super(ProjectPlaybooksSerializer, self).data @@ -961,7 +1034,7 @@ class ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer): class Meta: model = ProjectUpdate - fields = ('*', 'project') + fields = ('*', 'project', 'job_type') def get_related(self, obj): res = super(ProjectUpdateSerializer, self).get_related(obj) @@ -989,17 +1062,11 @@ class ProjectUpdateCancelSerializer(ProjectUpdateSerializer): class BaseSerializerWithVariables(BaseSerializer): def validate_variables(self, value): - try: - json.loads(value.strip() or '{}') - except ValueError: - try: - yaml.safe_load(value) - except yaml.YAMLError: - raise serializers.ValidationError('Must be valid JSON or YAML.') - return value + return vars_validate_or_raise(value) class InventorySerializer(BaseSerializerWithVariables): + show_capabilities = ['edit', 'delete', 'adhoc'] class Meta: model = Inventory @@ -1056,6 +1123,7 @@ class InventoryScriptSerializer(InventorySerializer): class HostSerializer(BaseSerializerWithVariables): + show_capabilities = ['edit', 'delete'] class Meta: model = Host @@ -1120,7 +1188,7 @@ class HostSerializer(BaseSerializerWithVariables): if port < 1 or port > 65535: raise ValueError except ValueError: - raise serializers.ValidationError(u'Invalid port specification: %s' % force_text(port)) + raise serializers.ValidationError(_(u'Invalid port specification: %s') % force_text(port)) return name, port def validate_name(self, value): @@ -1148,7 +1216,7 @@ class HostSerializer(BaseSerializerWithVariables): vars_dict['ansible_ssh_port'] = port attrs['variables'] = yaml.dump(vars_dict) except (yaml.YAMLError, TypeError): - raise serializers.ValidationError('Must be valid JSON or YAML.') + raise serializers.ValidationError(_('Must be valid JSON or YAML.')) return super(HostSerializer, self).validate(attrs) @@ -1166,6 +1234,7 @@ class HostSerializer(BaseSerializerWithVariables): class GroupSerializer(BaseSerializerWithVariables): + show_capabilities = ['start', 'copy', 'schedule', 'edit', 'delete'] class Meta: model = Group @@ -1204,7 +1273,7 @@ class GroupSerializer(BaseSerializerWithVariables): def validate_name(self, value): if value in ('all', '_meta'): - raise serializers.ValidationError('Invalid group name.') + raise serializers.ValidationError(_('Invalid group name.')) return value def to_representation(self, obj): @@ -1240,10 +1309,7 @@ class BaseVariableDataSerializer(BaseSerializer): if obj is None: return {} ret = super(BaseVariableDataSerializer, self).to_representation(obj) - try: - return json.loads(ret.get('variables', '') or '{}') - except ValueError: - return yaml.safe_load(ret.get('variables', '')) + return parse_yaml_or_json(ret.get('variables', '') or '{}') def to_internal_value(self, data): data = {'variables': json.dumps(data)} @@ -1267,9 +1333,11 @@ class GroupVariableDataSerializer(BaseVariableDataSerializer): class Meta: model = Group + class CustomInventoryScriptSerializer(BaseSerializer): script = serializers.CharField(trim_whitespace=False) + show_capabilities = ['edit', 'delete'] class Meta: model = CustomInventoryScript @@ -1277,7 +1345,7 @@ class CustomInventoryScriptSerializer(BaseSerializer): def validate_script(self, value): if not value.startswith("#!"): - raise serializers.ValidationError('Script must begin with a hashbang sequence: i.e.... #!/usr/bin/env python') + raise serializers.ValidationError(_('Script must begin with a hashbang sequence: i.e.... #!/usr/bin/env python')) return value def to_representation(self, obj): @@ -1307,7 +1375,8 @@ class InventorySourceOptionsSerializer(BaseSerializer): class Meta: fields = ('*', 'source', 'source_path', 'source_script', 'source_vars', 'credential', - 'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars') + 'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars', + 'timeout') def get_related(self, obj): res = super(InventorySourceOptionsSerializer, self).get_related(obj) @@ -1319,18 +1388,7 @@ class InventorySourceOptionsSerializer(BaseSerializer): return res def validate_source_vars(self, value): - # source_env must be blank, a valid JSON or YAML dict, or ... - try: - json.loads((value or '').strip() or '{}') - return value - except ValueError: - pass - try: - yaml.safe_load(value) - return value - except yaml.YAMLError: - pass - raise serializers.ValidationError('Must be valid JSON or YAML.') + return vars_validate_or_raise(value) def validate(self, attrs): # TODO: Validate source, validate source_regions @@ -1340,13 +1398,13 @@ class InventorySourceOptionsSerializer(BaseSerializer): source_script = attrs.get('source_script', self.instance and self.instance.source_script or '') if source == 'custom': if source_script is None or source_script == '': - errors['source_script'] = "If 'source' is 'custom', 'source_script' must be provided." + errors['source_script'] = _("If 'source' is 'custom', 'source_script' must be provided.") else: try: if source_script.organization != self.instance.inventory.organization: - errors['source_script'] = "The 'source_script' does not belong to the same organization as the inventory." + errors['source_script'] = _("The 'source_script' does not belong to the same organization as the inventory.") except Exception as exc: - errors['source_script'] = "'source_script' doesn't exist." + errors['source_script'] = _("'source_script' doesn't exist.") logger.error(str(exc)) if errors: @@ -1451,6 +1509,7 @@ class InventoryUpdateCancelSerializer(InventoryUpdateSerializer): class TeamSerializer(BaseSerializer): + show_capabilities = ['edit', 'delete'] class Meta: model = Team @@ -1478,7 +1537,6 @@ class TeamSerializer(BaseSerializer): return ret - class RoleSerializer(BaseSerializer): class Meta: @@ -1519,8 +1577,12 @@ class RoleSerializer(BaseSerializer): return ret +class RoleSerializerWithParentAccess(RoleSerializer): + show_capabilities = ['unattach'] + class ResourceAccessListElementSerializer(UserSerializer): + show_capabilities = [] # Clear fields from UserSerializer parent class def to_representation(self, user): ''' @@ -1534,8 +1596,11 @@ class ResourceAccessListElementSerializer(UserSerializer): the resource. ''' ret = super(ResourceAccessListElementSerializer, self).to_representation(user) - object_id = self.context['view'].object_id - obj = self.context['view'].resource_model.objects.get(pk=object_id) + obj = self.context['view'].get_parent_object() + if self.context['view'].request is not None: + requesting_user = self.context['view'].request.user + else: + requesting_user = None if 'summary_fields' not in ret: ret['summary_fields'] = {} @@ -1546,34 +1611,45 @@ class ResourceAccessListElementSerializer(UserSerializer): role_dict['resource_name'] = role.content_object.name role_dict['resource_type'] = role.content_type.name role_dict['related'] = reverse_gfk(role.content_object) - except: + except AttributeError: pass + if role.content_type is not None: + role_dict['user_capabilities'] = {'unattach': requesting_user.can_access( + Role, 'unattach', role, user, 'members', data={}, skip_sub_obj_read_check=False)} + else: + # Singleton roles should not be managed from this view, as per copy/edit rework spec + role_dict['user_capabilities'] = {'unattach': False} return { 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, role)} - def format_team_role_perm(team_role, permissive_role_ids): + def format_team_role_perm(naive_team_role, permissive_role_ids): ret = [] + team_role = naive_team_role + if naive_team_role.role_field == 'admin_role': + team_role = naive_team_role.content_object.member_role for role in team_role.children.filter(id__in=permissive_role_ids).all(): role_dict = { 'id': role.id, 'name': role.name, 'description': role.description, 'team_id': team_role.object_id, - 'team_name': team_role.content_object.name + 'team_name': team_role.content_object.name, + 'team_organization_name': team_role.content_object.organization.name, } - try: + if role.content_type is not None: role_dict['resource_name'] = role.content_object.name role_dict['resource_type'] = role.content_type.name role_dict['related'] = reverse_gfk(role.content_object) - except: - pass + role_dict['user_capabilities'] = {'unattach': requesting_user.can_access( + Role, 'unattach', role, team_role, 'parents', data={}, skip_sub_obj_read_check=False)} + else: + # Singleton roles should not be managed from this view, as per copy/edit rework spec + role_dict['user_capabilities'] = {'unattach': False} ret.append({ 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, team_role)}) return ret team_content_type = ContentType.objects.get_for_model(Team) content_type = ContentType.objects.get_for_model(obj) - - content_type = ContentType.objects.get_for_model(obj) direct_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('id', flat=True) all_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('ancestors__id', flat=True) @@ -1608,16 +1684,17 @@ class ResourceAccessListElementSerializer(UserSerializer): ret['summary_fields']['direct_access'] \ = [format_role_perm(r) for r in direct_access_roles.distinct()] \ - + [y for x in (format_team_role_perm(r, direct_permissive_role_ids) for r in direct_team_roles.distinct()) for y in x] + + [y for x in (format_team_role_perm(r, direct_permissive_role_ids) for r in direct_team_roles.distinct()) for y in x] \ + + [y for x in (format_team_role_perm(r, all_permissive_role_ids) for r in indirect_team_roles.distinct()) for y in x] ret['summary_fields']['indirect_access'] \ - = [format_role_perm(r) for r in indirect_access_roles.distinct()] \ - + [y for x in (format_team_role_perm(r, all_permissive_role_ids) for r in indirect_team_roles.distinct()) for y in x] + = [format_role_perm(r) for r in indirect_access_roles.distinct()] return ret class CredentialSerializer(BaseSerializer): + show_capabilities = ['edit', 'delete'] class Meta: model = Credential @@ -1650,11 +1727,11 @@ class CredentialSerializer(BaseSerializer): owner_teams = reverse('api:credential_owner_teams_list', args=(obj.pk,)), )) - parents = obj.admin_role.parents.exclude(object_id__isnull=True) - if parents.count() > 0: + parents = [role for role in obj.admin_role.parents.all() if role.object_id is not None] + if parents: res.update({parents[0].content_type.name:parents[0].content_object.get_absolute_url()}) - elif obj.admin_role.members.count() > 0: - user = obj.admin_role.members.first() + elif len(obj.admin_role.members.all()) > 0: + user = obj.admin_role.members.all()[0] res.update({'user': reverse('api:user_detail', args=(user.pk,))}) return res @@ -1672,7 +1749,7 @@ class CredentialSerializer(BaseSerializer): 'url': reverse('api:user_detail', args=(user.pk,)), }) - for parent in obj.admin_role.parents.exclude(object_id__isnull=True).all(): + for parent in [role for role in obj.admin_role.parents.all() if role.object_id is not None]: summary_dict['owners'].append({ 'id': parent.content_object.pk, 'type': camelcase_to_underscore(parent.content_object.__class__.__name__), @@ -1689,18 +1766,18 @@ class CredentialSerializerCreate(CredentialSerializer): user = serializers.PrimaryKeyRelatedField( queryset=User.objects.all(), required=False, default=None, write_only=True, allow_null=True, - help_text='Write-only field used to add user to owner role. If provided, ' - 'do not give either team or organization. Only valid for creation.') + help_text=_('Write-only field used to add user to owner role. If provided, ' + 'do not give either team or organization. Only valid for creation.')) team = serializers.PrimaryKeyRelatedField( queryset=Team.objects.all(), required=False, default=None, write_only=True, allow_null=True, - help_text='Write-only field used to add team to owner role. If provided, ' - 'do not give either user or organization. Only valid for creation.') + help_text=_('Write-only field used to add team to owner role. If provided, ' + 'do not give either user or organization. Only valid for creation.')) organization = serializers.PrimaryKeyRelatedField( queryset=Organization.objects.all(), - required=False, default=None, write_only=True, allow_null=True, - help_text='Write-only field used to add organization to owner role. If provided, ' - 'do not give either team or team. Only valid for creation.') + required=False, default=None, allow_null=True, + help_text=_('Inherit permissions from organization roles. If provided on creation, ' + 'do not give either user or team.')) class Meta: model = Credential @@ -1715,7 +1792,7 @@ class CredentialSerializerCreate(CredentialSerializer): else: attrs.pop(field) if not owner_fields: - raise serializers.ValidationError({"detail": "Missing 'user', 'team', or 'organization'."}) + raise serializers.ValidationError({"detail": _("Missing 'user', 'team', or 'organization'.")}) return super(CredentialSerializerCreate, self).validate(attrs) def create(self, validated_data): @@ -1728,7 +1805,7 @@ class CredentialSerializerCreate(CredentialSerializer): credential.admin_role.members.add(user) if team: if not credential.organization or team.organization.id != credential.organization.id: - raise serializers.ValidationError({"detail": "Credential organization must be set and match before assigning to a team"}) + raise serializers.ValidationError({"detail": _("Credential organization must be set and match before assigning to a team")}) credential.admin_role.parents.add(team.admin_role) credential.use_role.parents.add(team.member_role) return credential @@ -1755,13 +1832,33 @@ class OrganizationCredentialSerializerCreate(CredentialSerializerCreate): fields = ('*', '-user', '-team') -class JobOptionsSerializer(BaseSerializer): +class LabelsListMixin(object): + + def _summary_field_labels(self, obj): + if hasattr(obj, '_prefetched_objects_cache') and obj.labels.prefetch_cache_name in obj._prefetched_objects_cache: + label_list = [{'id': x.id, 'name': x.name} for x in obj.labels.all()[:10]] + label_ct = len(obj.labels.all()) + else: + label_list = [{'id': x.id, 'name': x.name} for x in obj.labels.all().order_by('name')[:10]] + if len(label_list) < 10: + label_ct = len(label_list) + else: + label_ct = obj.labels.count() + return {'count': label_ct, 'results': label_list} + + def get_summary_fields(self, obj): + res = super(LabelsListMixin, self).get_summary_fields(obj) + res['labels'] = self._summary_field_labels(obj) + return res + + +class JobOptionsSerializer(LabelsListMixin, BaseSerializer): class Meta: fields = ('*', 'job_type', 'inventory', 'project', 'playbook', 'credential', 'cloud_credential', 'network_credential', 'forks', 'limit', 'verbosity', 'extra_vars', 'job_tags', 'force_handlers', - 'skip_tags', 'start_at_task',) + 'skip_tags', 'start_at_task', 'timeout') def get_related(self, obj): res = super(JobOptionsSerializer, self).get_related(obj) @@ -1780,14 +1877,6 @@ class JobOptionsSerializer(BaseSerializer): args=(obj.network_credential.pk,)) return res - def _summary_field_labels(self, obj): - return {'count': obj.labels.count(), 'results': [{'id': x.id, 'name': x.name} for x in obj.labels.all().order_by('name')[:10]]} - - def get_summary_fields(self, obj): - res = super(JobOptionsSerializer, self).get_summary_fields(obj) - res['labels'] = self._summary_field_labels(obj) - return res - def to_representation(self, obj): ret = super(JobOptionsSerializer, self).to_representation(obj) if obj is None: @@ -1812,16 +1901,40 @@ class JobOptionsSerializer(BaseSerializer): playbook = attrs.get('playbook', self.instance and self.instance.playbook or '') job_type = attrs.get('job_type', self.instance and self.instance.job_type or None) if not project and job_type != PERM_INVENTORY_SCAN: - raise serializers.ValidationError({'project': 'This field is required.'}) - if project and playbook and force_text(playbook) not in project.playbooks: - raise serializers.ValidationError({'playbook': 'Playbook not found for project.'}) + raise serializers.ValidationError({'project': _('This field is required.')}) + if project and project.scm_type and playbook and force_text(playbook) not in project.playbook_files: + raise serializers.ValidationError({'playbook': _('Playbook not found for project.')}) + if project and not project.scm_type and playbook and force_text(playbook) not in project.playbooks: + raise serializers.ValidationError({'playbook': _('Playbook not found for project.')}) if project and not playbook: - raise serializers.ValidationError({'playbook': 'Must select playbook for project.'}) + raise serializers.ValidationError({'playbook': _('Must select playbook for project.')}) return super(JobOptionsSerializer, self).validate(attrs) -class JobTemplateSerializer(UnifiedJobTemplateSerializer, JobOptionsSerializer): +class JobTemplateMixin(object): + ''' + Provide recent jobs and survey details in summary_fields + ''' + + def _recent_jobs(self, obj): + if hasattr(obj, 'workflow_jobs'): + job_mgr = obj.workflow_jobs + else: + job_mgr = obj.jobs + return [{'id': x.id, 'status': x.status, 'finished': x.finished} + for x in job_mgr.all().order_by('-created')[:10]] + + def get_summary_fields(self, obj): + d = super(JobTemplateMixin, self).get_summary_fields(obj) + if obj.survey_spec is not None and ('name' in obj.survey_spec and 'description' in obj.survey_spec): + d['survey'] = dict(title=obj.survey_spec['name'], description=obj.survey_spec['description']) + d['recent_jobs'] = self._recent_jobs(obj) + return d + + +class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobOptionsSerializer): + show_capabilities = ['start', 'schedule', 'copy', 'edit', 'delete'] status = serializers.ChoiceField(choices=JobTemplate.JOB_TEMPLATE_STATUS_CHOICES, read_only=True, required=False) @@ -1850,61 +1963,34 @@ class JobTemplateSerializer(UnifiedJobTemplateSerializer, JobOptionsSerializer): res['callback'] = reverse('api:job_template_callback', args=(obj.pk,)) return res - def _recent_jobs(self, obj): - return [{'id': x.id, 'status': x.status, 'finished': x.finished} for x in obj.jobs.all().order_by('-created')[:10]] - - def get_summary_fields(self, obj): - d = super(JobTemplateSerializer, self).get_summary_fields(obj) - if obj.survey_spec is not None and ('name' in obj.survey_spec and 'description' in obj.survey_spec): - d['survey'] = dict(title=obj.survey_spec['name'], description=obj.survey_spec['description']) - request = self.context.get('request', None) - - # Check for conditions that would create a validation error if coppied - validation_errors, resources_needed_to_start = obj.resource_validation_data() - - if request is None or request.user is None: - d['can_copy'] = False - d['can_edit'] = False - elif request.user.is_superuser: - d['can_copy'] = not validation_errors - d['can_edit'] = True - else: - d['can_copy'] = (not validation_errors) and request.user.can_access(JobTemplate, 'add', {"reference_obj": obj}) - d['can_edit'] = request.user.can_access(JobTemplate, 'change', obj, {}) - - d['recent_jobs'] = self._recent_jobs(obj) - return d - def validate(self, attrs): - survey_enabled = attrs.get('survey_enabled', self.instance and self.instance.survey_enabled or False) - job_type = attrs.get('job_type', self.instance and self.instance.job_type or None) - inventory = attrs.get('inventory', self.instance and self.instance.inventory or None) - project = attrs.get('project', self.instance and self.instance.project or None) + def get_field_from_model_or_attrs(fd): + return attrs.get(fd, self.instance and getattr(self.instance, fd) or None) + survey_enabled = get_field_from_model_or_attrs('survey_enabled') + job_type = get_field_from_model_or_attrs('job_type') + inventory = get_field_from_model_or_attrs('inventory') + credential = get_field_from_model_or_attrs('credential') + project = get_field_from_model_or_attrs('project') + + prompting_error_message = _("Must either set a default value or ask to prompt on launch.") if job_type == "scan": if inventory is None or attrs.get('ask_inventory_on_launch', False): - raise serializers.ValidationError({'inventory': 'Scan jobs must be assigned a fixed inventory.'}) + raise serializers.ValidationError({'inventory': _('Scan jobs must be assigned a fixed inventory.')}) elif project is None: - raise serializers.ValidationError({'project': "Job types 'run' and 'check' must have assigned a project."}) + raise serializers.ValidationError({'project': _("Job types 'run' and 'check' must have assigned a project.")}) + elif credential is None and not get_field_from_model_or_attrs('ask_credential_on_launch'): + raise serializers.ValidationError({'credential': prompting_error_message}) + elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'): + raise serializers.ValidationError({'inventory': prompting_error_message}) if survey_enabled and job_type == PERM_INVENTORY_SCAN: - raise serializers.ValidationError({'survey_enabled': 'Survey Enabled can not be used with scan jobs.'}) + raise serializers.ValidationError({'survey_enabled': _('Survey Enabled cannot be used with scan jobs.')}) return super(JobTemplateSerializer, self).validate(attrs) def validate_extra_vars(self, value): - # extra_vars must be blank, a valid JSON or YAML dict, or ... - try: - json.loads((value or '').strip() or '{}') - return value - except ValueError: - pass - try: - yaml.safe_load(value) - return value - except yaml.YAMLError: - pass - raise serializers.ValidationError('Must be valid JSON or YAML.') + return vars_validate_or_raise(value) class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer): @@ -1917,19 +2003,19 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer): ask_job_type_on_launch = serializers.ReadOnlyField() ask_inventory_on_launch = serializers.ReadOnlyField() ask_credential_on_launch = serializers.ReadOnlyField() + artifacts = serializers.SerializerMethodField() class Meta: model = Job fields = ('*', 'job_template', 'passwords_needed_to_start', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch', 'ask_skip_tags_on_launch', - 'ask_job_type_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch') + 'ask_job_type_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch', + 'allow_simultaneous', 'artifacts', 'scm_revision',) def get_related(self, obj): res = super(JobSerializer, self).get_related(obj) res.update(dict( job_events = reverse('api:job_job_events_list', args=(obj.pk,)), - job_plays = reverse('api:job_job_plays_list', args=(obj.pk,)), - job_tasks = reverse('api:job_job_tasks_list', args=(obj.pk,)), job_host_summaries = reverse('api:job_job_host_summaries_list', args=(obj.pk,)), activity_stream = reverse('api:job_activity_stream_list', args=(obj.pk,)), notifications = reverse('api:job_notifications_list', args=(obj.pk,)), @@ -1942,9 +2028,16 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer): res['start'] = reverse('api:job_start', args=(obj.pk,)) if obj.can_cancel or True: res['cancel'] = reverse('api:job_cancel', args=(obj.pk,)) + if obj.project_update: + res['project_update'] = reverse('api:project_update_detail', args=(obj.project_update.pk,)) res['relaunch'] = reverse('api:job_relaunch', args=(obj.pk,)) return res + def get_artifacts(self, obj): + if obj: + return obj.display_artifacts() + return {} + def to_internal_value(self, data): # When creating a new job and a job template is specified, populate any # fields not provided in data from the job template. @@ -1952,7 +2045,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer): try: job_template = JobTemplate.objects.get(pk=data['job_template']) except JobTemplate.DoesNotExist: - raise serializers.ValidationError({'job_template': 'Invalid job template.'}) + raise serializers.ValidationError({'job_template': _('Invalid job template.')}) data.setdefault('name', job_template.name) data.setdefault('description', job_template.description) data.setdefault('job_type', job_template.job_type) @@ -2037,14 +2130,15 @@ class JobRelaunchSerializer(JobSerializer): def validate(self, attrs): obj = self.context.get('obj') if not obj.credential: - raise serializers.ValidationError(dict(credential=["Credential not found or deleted."])) + raise serializers.ValidationError(dict(credential=[_("Credential not found or deleted.")])) if obj.job_type != PERM_INVENTORY_SCAN and obj.project is None: - raise serializers.ValidationError(dict(errors=["Job Template Project is missing or undefined."])) + raise serializers.ValidationError(dict(errors=[_("Job Template Project is missing or undefined.")])) if obj.inventory is None: - raise serializers.ValidationError(dict(errors=["Job Template Inventory is missing or undefined."])) + raise serializers.ValidationError(dict(errors=[_("Job Template Inventory is missing or undefined.")])) attrs = super(JobRelaunchSerializer, self).validate(attrs) return attrs + class AdHocCommandSerializer(UnifiedJobSerializer): class Meta: @@ -2069,7 +2163,7 @@ class AdHocCommandSerializer(UnifiedJobSerializer): # Load module name choices dynamically from DB settings. if field_name == 'module_name': field_class = serializers.ChoiceField - module_name_choices = [(x, x) for x in tower_settings.AD_HOC_COMMANDS] + module_name_choices = [(x, x) for x in settings.AD_HOC_COMMANDS] module_name_default = 'command' if 'command' in [x[0] for x in module_name_choices] else '' field_kwargs['choices'] = module_name_choices field_kwargs['required'] = bool(not module_name_default) @@ -2087,6 +2181,7 @@ class AdHocCommandSerializer(UnifiedJobSerializer): res.update(dict( events = reverse('api:ad_hoc_command_ad_hoc_command_events_list', args=(obj.pk,)), activity_stream = reverse('api:ad_hoc_command_activity_stream_list', args=(obj.pk,)), + notifications = reverse('api:ad_hoc_command_notifications_list', args=(obj.pk,)), )) res['cancel'] = reverse('api:ad_hoc_command_cancel', args=(obj.pk,)) res['relaunch'] = reverse('api:ad_hoc_command_relaunch', args=(obj.pk,)) @@ -2144,6 +2239,7 @@ class SystemJobTemplateSerializer(UnifiedJobTemplateSerializer): )) return res + class SystemJobSerializer(UnifiedJobSerializer): class Meta: @@ -2160,6 +2256,7 @@ class SystemJobSerializer(UnifiedJobSerializer): res['cancel'] = reverse('api:system_job_cancel', args=(obj.pk,)) return res + class SystemJobCancelSerializer(SystemJobSerializer): can_cancel = serializers.BooleanField(read_only=True) @@ -2167,15 +2264,232 @@ class SystemJobCancelSerializer(SystemJobSerializer): class Meta: fields = ('can_cancel',) + +class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJobTemplateSerializer): + show_capabilities = ['start', 'schedule', 'edit', 'copy', 'delete'] + + class Meta: + model = WorkflowJobTemplate + fields = ('*', 'extra_vars', 'organization', 'survey_enabled',) + + def get_related(self, obj): + res = super(WorkflowJobTemplateSerializer, self).get_related(obj) + res.update(dict( + workflow_jobs = reverse('api:workflow_job_template_jobs_list', args=(obj.pk,)), + schedules = reverse('api:workflow_job_template_schedules_list', args=(obj.pk,)), + launch = reverse('api:workflow_job_template_launch', args=(obj.pk,)), + copy = reverse('api:workflow_job_template_copy', args=(obj.pk,)), + workflow_nodes = reverse('api:workflow_job_template_workflow_nodes_list', args=(obj.pk,)), + labels = reverse('api:workflow_job_template_label_list', args=(obj.pk,)), + activity_stream = reverse('api:workflow_job_template_activity_stream_list', args=(obj.pk,)), + notification_templates_any = reverse('api:workflow_job_template_notification_templates_any_list', args=(obj.pk,)), + notification_templates_success = reverse('api:workflow_job_template_notification_templates_success_list', args=(obj.pk,)), + notification_templates_error = reverse('api:workflow_job_template_notification_templates_error_list', args=(obj.pk,)), + access_list = reverse('api:workflow_job_template_access_list', args=(obj.pk,)), + object_roles = reverse('api:workflow_job_template_object_roles_list', args=(obj.pk,)), + survey_spec = reverse('api:workflow_job_template_survey_spec', args=(obj.pk,)), + )) + if obj.organization: + res['organization'] = reverse('api:organization_detail', args=(obj.organization.pk,)) + return res + + def validate_extra_vars(self, value): + return vars_validate_or_raise(value) + + +# TODO: +class WorkflowJobTemplateListSerializer(WorkflowJobTemplateSerializer): + pass + + +# TODO: +class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer): + + class Meta: + model = WorkflowJob + fields = ('*', 'workflow_job_template', 'extra_vars') + + def get_related(self, obj): + res = super(WorkflowJobSerializer, self).get_related(obj) + if obj.workflow_job_template: + res['workflow_job_template'] = reverse('api:workflow_job_template_detail', + args=(obj.workflow_job_template.pk,)) + res['notifications'] = reverse('api:workflow_job_notifications_list', args=(obj.pk,)) + res['workflow_nodes'] = reverse('api:workflow_job_workflow_nodes_list', args=(obj.pk,)) + res['labels'] = reverse('api:workflow_job_label_list', args=(obj.pk,)) + res['activity_stream'] = reverse('api:workflow_job_activity_stream_list', args=(obj.pk,)) + res['relaunch'] = reverse('api:workflow_job_relaunch', args=(obj.pk,)) + if obj.can_cancel or True: + res['cancel'] = reverse('api:workflow_job_cancel', args=(obj.pk,)) + return res + + def to_representation(self, obj): + ret = super(WorkflowJobSerializer, self).to_representation(obj) + if obj is None: + return ret + if 'extra_vars' in ret: + ret['extra_vars'] = obj.display_extra_vars() + return ret + + +# TODO: +class WorkflowJobListSerializer(WorkflowJobSerializer, UnifiedJobListSerializer): + pass + + +class WorkflowJobCancelSerializer(WorkflowJobSerializer): + + can_cancel = serializers.BooleanField(read_only=True) + + class Meta: + fields = ('can_cancel',) + + +class WorkflowNodeBaseSerializer(BaseSerializer): + job_type = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) + job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) + limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) + skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) + success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True) + failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True) + always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True) + + class Meta: + fields = ('*', '-name', '-description', 'id', 'url', 'related', + 'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes', + 'inventory', 'credential', 'job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags') + + def get_related(self, obj): + res = super(WorkflowNodeBaseSerializer, self).get_related(obj) + if obj.unified_job_template: + res['unified_job_template'] = obj.unified_job_template.get_absolute_url() + return res + + def validate(self, attrs): + # char_prompts go through different validation, so remove them here + for fd in ['job_type', 'job_tags', 'skip_tags', 'limit']: + if fd in attrs: + attrs.pop(fd) + return super(WorkflowNodeBaseSerializer, self).validate(attrs) + + +class WorkflowJobTemplateNodeSerializer(WorkflowNodeBaseSerializer): + class Meta: + model = WorkflowJobTemplateNode + fields = ('*', 'workflow_job_template',) + + def get_related(self, obj): + res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj) + res['success_nodes'] = reverse('api:workflow_job_template_node_success_nodes_list', args=(obj.pk,)) + res['failure_nodes'] = reverse('api:workflow_job_template_node_failure_nodes_list', args=(obj.pk,)) + res['always_nodes'] = reverse('api:workflow_job_template_node_always_nodes_list', args=(obj.pk,)) + if obj.workflow_job_template: + res['workflow_job_template'] = reverse('api:workflow_job_template_detail', args=(obj.workflow_job_template.pk,)) + return res + + def to_internal_value(self, data): + internal_value = super(WorkflowNodeBaseSerializer, self).to_internal_value(data) + view = self.context.get('view', None) + request_method = None + if view and view.request: + request_method = view.request.method + if request_method in ['PATCH']: + obj = self.instance + char_prompts = copy.copy(obj.char_prompts) + char_prompts.update(self.extract_char_prompts(data)) + else: + char_prompts = self.extract_char_prompts(data) + for fd in copy.copy(char_prompts): + if char_prompts[fd] is None: + char_prompts.pop(fd) + internal_value['char_prompts'] = char_prompts + return internal_value + + def extract_char_prompts(self, data): + char_prompts = {} + for fd in ['job_type', 'job_tags', 'skip_tags', 'limit']: + # Accept null values, if given + if fd in data: + char_prompts[fd] = data[fd] + return char_prompts + + def validate(self, attrs): + if 'char_prompts' in attrs: + if 'job_type' in attrs['char_prompts']: + job_types = [t for t, v in JOB_TYPE_CHOICES] + if attrs['char_prompts']['job_type'] not in job_types: + raise serializers.ValidationError({ + "job_type": _("%(job_type)s is not a valid job type. The choices are %(choices)s.") % { + 'job_type': attrs['char_prompts']['job_type'], 'choices': job_types}}) + if self.instance is None and ('workflow_job_template' not in attrs or + attrs['workflow_job_template'] is None): + raise serializers.ValidationError({ + "workflow_job_template": _("Workflow job template is missing during creation.") + }) + ujt_obj = attrs.get('unified_job_template', None) + if isinstance(ujt_obj, (WorkflowJobTemplate, SystemJobTemplate)): + raise serializers.ValidationError({ + "unified_job_template": _("Cannot nest a %s inside a WorkflowJobTemplate") % ujt_obj.__class__.__name__}) + return super(WorkflowJobTemplateNodeSerializer, self).validate(attrs) + + +class WorkflowJobNodeSerializer(WorkflowNodeBaseSerializer): + class Meta: + model = WorkflowJobNode + fields = ('*', 'job', 'workflow_job',) + + def get_related(self, obj): + res = super(WorkflowJobNodeSerializer, self).get_related(obj) + res['success_nodes'] = reverse('api:workflow_job_node_success_nodes_list', args=(obj.pk,)) + res['failure_nodes'] = reverse('api:workflow_job_node_failure_nodes_list', args=(obj.pk,)) + res['always_nodes'] = reverse('api:workflow_job_node_always_nodes_list', args=(obj.pk,)) + if obj.job: + res['job'] = obj.job.get_absolute_url() + if obj.workflow_job: + res['workflow_job'] = reverse('api:workflow_job_detail', args=(obj.workflow_job.pk,)) + return res + + +class WorkflowJobNodeListSerializer(WorkflowJobNodeSerializer): + pass + + +class WorkflowJobNodeDetailSerializer(WorkflowJobNodeSerializer): + pass + + +class WorkflowJobTemplateNodeDetailSerializer(WorkflowJobTemplateNodeSerializer): + ''' + Influence the api browser sample data to not include workflow_job_template + when editing a WorkflowNode. + + Note: I was not able to accomplish this trough the use of extra_kwargs. + Maybe something to do with workflow_job_template being a relational field? + ''' + def build_relational_field(self, field_name, relation_info): + field_class, field_kwargs = super(WorkflowJobTemplateNodeDetailSerializer, self).build_relational_field(field_name, relation_info) + if self.instance and field_name == 'workflow_job_template': + field_kwargs['read_only'] = True + field_kwargs.pop('queryset', None) + return field_class, field_kwargs + + +class WorkflowJobTemplateNodeListSerializer(WorkflowJobTemplateNodeSerializer): + pass + + class JobListSerializer(JobSerializer, UnifiedJobListSerializer): pass + class AdHocCommandListSerializer(AdHocCommandSerializer, UnifiedJobListSerializer): pass + class SystemJobListSerializer(SystemJobSerializer, UnifiedJobListSerializer): pass + class JobHostSummarySerializer(BaseSerializer): class Meta: @@ -2212,21 +2526,22 @@ class JobEventSerializer(BaseSerializer): model = JobEvent fields = ('*', '-name', '-description', 'job', 'event', 'counter', 'event_display', 'event_data', 'event_level', 'failed', - 'changed', 'host', 'host_name', 'parent', 'play', 'task', 'role') + 'changed', 'uuid', 'parent_uuid', 'host', 'host_name', 'parent', + 'playbook', 'play', 'task', 'role', 'stdout', 'start_line', 'end_line', + 'verbosity') def get_related(self, obj): res = super(JobEventSerializer, self).get_related(obj) res.update(dict( - job = reverse('api:job_detail', args=(obj.job.pk,)), - #children = reverse('api:job_event_children_list', args=(obj.pk,)), + job = reverse('api:job_detail', args=(obj.job_id,)), )) - if obj.parent: - res['parent'] = reverse('api:job_event_detail', args=(obj.parent.pk,)) - if obj.children.count(): + if obj.parent_id: + res['parent'] = reverse('api:job_event_detail', args=(obj.parent_id,)) + if obj.children.exists(): res['children'] = reverse('api:job_event_children_list', args=(obj.pk,)) - if obj.host: - res['host'] = reverse('api:host_detail', args=(obj.host.pk,)) - if obj.hosts.count(): + if obj.host_id: + res['host'] = reverse('api:host_detail', args=(obj.host_id,)) + if obj.hosts.exists(): res['hosts'] = reverse('api:job_event_hosts_list', args=(obj.pk,)) return res @@ -2239,6 +2554,19 @@ class JobEventSerializer(BaseSerializer): pass return d + def to_representation(self, obj): + ret = super(JobEventSerializer, self).to_representation(obj) + # Show full stdout for event detail view, truncate only for list view. + if hasattr(self.context.get('view', None), 'retrieve'): + return ret + # Show full stdout for playbook_on_* events. + if obj and obj.event.startswith('playbook_on'): + return ret + max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY + if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes: + ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026' + return ret + class AdHocCommandEventSerializer(BaseSerializer): @@ -2248,16 +2576,8 @@ class AdHocCommandEventSerializer(BaseSerializer): model = AdHocCommandEvent fields = ('*', '-name', '-description', 'ad_hoc_command', 'event', 'counter', 'event_display', 'event_data', 'failed', - 'changed', 'host', 'host_name') - - def to_internal_value(self, data): - ret = super(AdHocCommandEventSerializer, self).to_internal_value(data) - # AdHocCommandAdHocCommandEventsList should be the only view creating - # AdHocCommandEvent instances, so keep the ad_hoc_command it sets, even - # though ad_hoc_command is a read-only field. - if 'ad_hoc_command' in data: - ret['ad_hoc_command'] = data['ad_hoc_command'] - return ret + 'changed', 'uuid', 'host', 'host_name', 'stdout', + 'start_line', 'end_line', 'verbosity') def get_related(self, obj): res = super(AdHocCommandEventSerializer, self).get_related(obj) @@ -2268,6 +2588,16 @@ class AdHocCommandEventSerializer(BaseSerializer): res['host'] = reverse('api:host_detail', args=(obj.host.pk,)) return res + def to_representation(self, obj): + ret = super(AdHocCommandEventSerializer, self).to_representation(obj) + # Show full stdout for event detail view, truncate only for list view. + if hasattr(self.context.get('view', None), 'retrieve'): + return ret + max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY + if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes: + ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026' + return ret + class JobLaunchSerializer(BaseSerializer): @@ -2337,7 +2667,7 @@ class JobLaunchSerializer(BaseSerializer): for field in obj.resources_needed_to_start: if not (attrs.get(field, False) and obj._ask_for_vars_dict().get(field, False)): - errors[field] = "Job Template '%s' is missing or undefined." % field + errors[field] = _("Job Template '%s' is missing or undefined.") % field if (not obj.ask_credential_on_launch) or (not attrs.get('credential', None)): credential = obj.credential @@ -2363,7 +2693,7 @@ class JobLaunchSerializer(BaseSerializer): extra_vars = yaml.safe_load(extra_vars) assert isinstance(extra_vars, dict) except (yaml.YAMLError, TypeError, AttributeError, AssertionError): - errors['extra_vars'] = 'Must be a valid JSON or YAML dictionary.' + errors['extra_vars'] = _('Must be a valid JSON or YAML dictionary.') if not isinstance(extra_vars, dict): extra_vars = {} @@ -2374,13 +2704,7 @@ class JobLaunchSerializer(BaseSerializer): errors['variables_needed_to_start'] = validation_errors # Special prohibited cases for scan jobs - if 'job_type' in data and obj.ask_job_type_on_launch: - if ((obj.job_type == PERM_INVENTORY_SCAN and not data['job_type'] == PERM_INVENTORY_SCAN) or - (data['job_type'] == PERM_INVENTORY_SCAN and not obj.job_type == PERM_INVENTORY_SCAN)): - errors['job_type'] = 'Can not override job_type to or from a scan job.' - if (obj.job_type == PERM_INVENTORY_SCAN and ('inventory' in data) and obj.ask_inventory_on_launch and - obj.inventory != data['inventory']): - errors['inventory'] = 'Inventory can not be changed at runtime for scan jobs.' + errors.update(obj._extra_job_type_errors(data)) if errors: raise serializers.ValidationError(errors) @@ -2402,7 +2726,65 @@ class JobLaunchSerializer(BaseSerializer): obj.credential = JT_credential return attrs + +class WorkflowJobLaunchSerializer(BaseSerializer): + + can_start_without_user_input = serializers.BooleanField(read_only=True) + variables_needed_to_start = serializers.ReadOnlyField() + survey_enabled = serializers.SerializerMethodField() + extra_vars = VerbatimField(required=False, write_only=True) + workflow_job_template_data = serializers.SerializerMethodField() + + class Meta: + model = WorkflowJobTemplate + fields = ('can_start_without_user_input', 'extra_vars', + 'survey_enabled', 'variables_needed_to_start', + 'node_templates_missing', 'node_prompts_rejected', + 'workflow_job_template_data') + + def get_survey_enabled(self, obj): + if obj: + return obj.survey_enabled and 'spec' in obj.survey_spec + return False + + def get_workflow_job_template_data(self, obj): + return dict(name=obj.name, id=obj.id, description=obj.description) + + def validate(self, attrs): + errors = {} + obj = self.instance + + extra_vars = attrs.get('extra_vars', {}) + + if isinstance(extra_vars, basestring): + try: + extra_vars = json.loads(extra_vars) + except (ValueError, TypeError): + try: + extra_vars = yaml.safe_load(extra_vars) + assert isinstance(extra_vars, dict) + except (yaml.YAMLError, TypeError, AttributeError, AssertionError): + errors['extra_vars'] = 'Must be a valid JSON or YAML dictionary.' + + if not isinstance(extra_vars, dict): + extra_vars = {} + + if self.get_survey_enabled(obj): + validation_errors = obj.survey_variable_validation(extra_vars) + if validation_errors: + errors['variables_needed_to_start'] = validation_errors + + if errors: + raise serializers.ValidationError(errors) + + WFJT_extra_vars = obj.extra_vars + attrs = super(WorkflowJobLaunchSerializer, self).validate(attrs) + obj.extra_vars = WFJT_extra_vars + return attrs + + class NotificationTemplateSerializer(BaseSerializer): + show_capabilities = ['edit', 'delete'] class Meta: model = NotificationTemplate @@ -2418,9 +2800,10 @@ class NotificationTemplateSerializer(BaseSerializer): def to_representation(self, obj): ret = super(NotificationTemplateSerializer, self).to_representation(obj) for field in obj.notification_class.init_parameters: - if field in ret['notification_configuration'] and \ - force_text(ret['notification_configuration'][field]).startswith('$encrypted$'): - ret['notification_configuration'][field] = '$encrypted$' + config = obj.notification_configuration + if field in config and force_text(config[field]).startswith('$encrypted$'): + config[field] = '$encrypted$' + ret['notification_configuration'] = config return ret def get_related(self, obj): @@ -2452,7 +2835,7 @@ class NotificationTemplateSerializer(BaseSerializer): else: notification_type = None if not notification_type: - raise serializers.ValidationError('Missing required fields for Notification Configuration: notification_type') + raise serializers.ValidationError(_('Missing required fields for Notification Configuration: notification_type')) notification_class = NotificationTemplate.CLASS_FOR_NOTIFICATION_TYPE[notification_type] missing_fields = [] @@ -2475,20 +2858,21 @@ class NotificationTemplateSerializer(BaseSerializer): incorrect_type_fields.append((field, field_type)) continue if field_type == "list" and len(field_val) < 1: - error_list.append("No values specified for field '{}'".format(field)) + error_list.append(_("No values specified for field '{}'").format(field)) continue if field_type == "password" and field_val == "$encrypted$" and object_actual is not None: attrs['notification_configuration'][field] = object_actual.notification_configuration[field] if missing_fields: - error_list.append("Missing required fields for Notification Configuration: {}.".format(missing_fields)) + error_list.append(_("Missing required fields for Notification Configuration: {}.").format(missing_fields)) if incorrect_type_fields: for type_field_error in incorrect_type_fields: - error_list.append("Configuration field '{}' incorrect type, expected {}.".format(type_field_error[0], - type_field_error[1])) + error_list.append(_("Configuration field '{}' incorrect type, expected {}.").format(type_field_error[0], + type_field_error[1])) if error_list: raise serializers.ValidationError(error_list) return attrs + class NotificationSerializer(BaseSerializer): class Meta: @@ -2503,6 +2887,7 @@ class NotificationSerializer(BaseSerializer): )) return res + class LabelSerializer(BaseSerializer): class Meta: @@ -2515,7 +2900,9 @@ class LabelSerializer(BaseSerializer): res['organization'] = reverse('api:organization_detail', args=(obj.organization.pk,)) return res + class ScheduleSerializer(BaseSerializer): + show_capabilities = ['edit', 'delete'] class Meta: model = Schedule @@ -2532,7 +2919,9 @@ class ScheduleSerializer(BaseSerializer): def validate_unified_job_template(self, value): if type(value) == InventorySource and value.source not in SCHEDULEABLE_PROVIDERS: - raise serializers.ValidationError('Inventory Source must be a cloud resource.') + raise serializers.ValidationError(_('Inventory Source must be a cloud resource.')) + elif type(value) == Project and value.scm_type == '': + raise serializers.ValidationError(_('Manual Project can not have a schedule set.')) return value # We reject rrules if: @@ -2554,44 +2943,62 @@ class ScheduleSerializer(BaseSerializer): match_multiple_dtstart = re.findall(".*?(DTSTART\:[0-9]+T[0-9]+Z)", rrule_value) match_multiple_rrule = re.findall(".*?(RRULE\:)", rrule_value) if not len(match_multiple_dtstart): - raise serializers.ValidationError('DTSTART required in rrule. Value should match: DTSTART:YYYYMMDDTHHMMSSZ') + raise serializers.ValidationError(_('DTSTART required in rrule. Value should match: DTSTART:YYYYMMDDTHHMMSSZ')) if len(match_multiple_dtstart) > 1: - raise serializers.ValidationError('Multiple DTSTART is not supported.') + raise serializers.ValidationError(_('Multiple DTSTART is not supported.')) if not len(match_multiple_rrule): - raise serializers.ValidationError('RRULE require in rrule.') + raise serializers.ValidationError(_('RRULE require in rrule.')) if len(match_multiple_rrule) > 1: - raise serializers.ValidationError('Multiple RRULE is not supported.') + raise serializers.ValidationError(_('Multiple RRULE is not supported.')) if 'interval' not in rrule_value.lower(): - raise serializers.ValidationError('INTERVAL required in rrule.') + raise serializers.ValidationError(_('INTERVAL required in rrule.')) if 'tzid' in rrule_value.lower(): - raise serializers.ValidationError('TZID is not supported.') + raise serializers.ValidationError(_('TZID is not supported.')) if 'secondly' in rrule_value.lower(): - raise serializers.ValidationError('SECONDLY is not supported.') + raise serializers.ValidationError(_('SECONDLY is not supported.')) if re.match(multi_by_month_day, rrule_value): - raise serializers.ValidationError('Multiple BYMONTHDAYs not supported.') + raise serializers.ValidationError(_('Multiple BYMONTHDAYs not supported.')) if re.match(multi_by_month, rrule_value): - raise serializers.ValidationError('Multiple BYMONTHs not supported.') + raise serializers.ValidationError(_('Multiple BYMONTHs not supported.')) if re.match(by_day_with_numeric_prefix, rrule_value): - raise serializers.ValidationError("BYDAY with numeric prefix not supported.") + raise serializers.ValidationError(_("BYDAY with numeric prefix not supported.")) if 'byyearday' in rrule_value.lower(): - raise serializers.ValidationError("BYYEARDAY not supported.") + raise serializers.ValidationError(_("BYYEARDAY not supported.")) if 'byweekno' in rrule_value.lower(): - raise serializers.ValidationError("BYWEEKNO not supported.") + raise serializers.ValidationError(_("BYWEEKNO not supported.")) if match_count: count_val = match_count.groups()[0].strip().split("=") if int(count_val[1]) > 999: - raise serializers.ValidationError("COUNT > 999 is unsupported.") + raise serializers.ValidationError(_("COUNT > 999 is unsupported.")) try: rrule.rrulestr(rrule_value) except Exception: - raise serializers.ValidationError("rrule parsing failed validation.") + raise serializers.ValidationError(_("rrule parsing failed validation.")) return value + class ActivityStreamSerializer(BaseSerializer): changes = serializers.SerializerMethodField() object_association = serializers.SerializerMethodField() + @cached_property + def _local_summarizable_fk_fields(self): + summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS) + # Special requests + summary_dict['group'] = summary_dict['group'] + ('inventory_id',) + for key in summary_dict.keys(): + if 'id' not in summary_dict[key]: + summary_dict[key] = summary_dict[key] + ('id',) + field_list = summary_dict.items() + # Needed related fields that are not in the default summary fields + field_list += [ + ('workflow_job_template_node', ('id', 'unified_job_template_id')), + ('label', ('id', 'name', 'organization_id')), + ('notification', ('id', 'status', 'notification_type', 'notification_template_id')) + ] + return field_list + class Meta: model = ActivityStream fields = ('*', '-name', '-description', '-created', '-modified', @@ -2601,15 +3008,15 @@ class ActivityStreamSerializer(BaseSerializer): ret = super(ActivityStreamSerializer, self).get_fields() for key, field in ret.items(): if key == 'changes': - field.help_text = 'A summary of the new and changed values when an object is created, updated, or deleted' + field.help_text = _('A summary of the new and changed values when an object is created, updated, or deleted') if key == 'object1': - field.help_text = ('For create, update, and delete events this is the object type that was affected. ' - 'For associate and disassociate events this is the object type associated or disassociated with object2.') + field.help_text = _('For create, update, and delete events this is the object type that was affected. ' + 'For associate and disassociate events this is the object type associated or disassociated with object2.') if key == 'object2': - field.help_text = ('Unpopulated for create, update, and delete events. For associate and disassociate ' - 'events this is the object type that object1 is being associated with.') + field.help_text = _('Unpopulated for create, update, and delete events. For associate and disassociate ' + 'events this is the object type that object1 is being associated with.') if key == 'operation': - field.help_text = 'The action taken with respect to the given object(s).' + field.help_text = _('The action taken with respect to the given object(s).') return ret def get_changes(self, obj): @@ -2632,13 +3039,17 @@ class ActivityStreamSerializer(BaseSerializer): rel = {} if obj.actor is not None: rel['actor'] = reverse('api:user_detail', args=(obj.actor.pk,)) - for fk, _ in SUMMARIZABLE_FK_FIELDS.items(): + for fk, __ in self._local_summarizable_fk_fields: if not hasattr(obj, fk): continue - allm2m = getattr(obj, fk).distinct() + allm2m = getattr(obj, fk).all() if getattr(obj, fk).exists(): rel[fk] = [] + id_list = [] for thisItem in allm2m: + if getattr(thisItem, 'id', None) in id_list: + continue + id_list.append(getattr(thisItem, 'id', None)) if fk == 'custom_inventory_script': rel[fk].append(reverse('api:inventory_script_detail', args=(thisItem.id,))) else: @@ -2650,11 +3061,11 @@ class ActivityStreamSerializer(BaseSerializer): def get_summary_fields(self, obj): summary_fields = OrderedDict() - for fk, related_fields in SUMMARIZABLE_FK_FIELDS.items(): + for fk, related_fields in self._local_summarizable_fk_fields: try: if not hasattr(obj, fk): continue - allm2m = getattr(obj, fk).distinct() + allm2m = getattr(obj, fk).all() if getattr(obj, fk).exists(): summary_fields[fk] = [] for thisItem in allm2m: @@ -2675,14 +3086,13 @@ class ActivityStreamSerializer(BaseSerializer): summary_fields[get_type_for_model(unified_job_template)] = {'id': unified_job_template.id, 'name': unified_job_template.name} thisItemDict = {} - if 'id' not in related_fields: - related_fields = related_fields + ('id',) for field in related_fields: fval = getattr(thisItem, field, None) if fval is not None: thisItemDict[field] = fval - if fk == 'group': - thisItemDict['inventory_id'] = getattr(thisItem, 'inventory_id', None) + if thisItemDict.get('id', None): + if thisItemDict.get('id', None) in [obj_dict.get('id', None) for obj_dict in summary_fields[fk]]: + continue summary_fields[fk].append(thisItemDict) except ObjectDoesNotExist: pass @@ -2694,58 +3104,6 @@ class ActivityStreamSerializer(BaseSerializer): return summary_fields -class TowerSettingsSerializer(BaseSerializer): - - value = VerbatimField() - - class Meta: - model = TowerSettings - fields = ('key', 'description', 'category', 'value', 'value_type', 'user') - read_only_fields = ('description', 'category', 'value_type', 'user') - - def __init__(self, instance=None, data=serializers.empty, **kwargs): - if instance is None and data is not serializers.empty and 'key' in data: - try: - instance = TowerSettings.objects.get(key=data['key']) - except TowerSettings.DoesNotExist: - pass - super(TowerSettingsSerializer, self).__init__(instance, data, **kwargs) - - def to_representation(self, obj): - ret = super(TowerSettingsSerializer, self).to_representation(obj) - ret['value'] = getattr(obj, 'value_converted', obj.value) - return ret - - def to_internal_value(self, data): - if data['key'] not in settings.TOWER_SETTINGS_MANIFEST: - raise serializers.ValidationError({'key': ['Key {0} is not a valid settings key.'.format(data['key'])]}) - ret = super(TowerSettingsSerializer, self).to_internal_value(data) - manifest_val = settings.TOWER_SETTINGS_MANIFEST[data['key']] - ret['description'] = manifest_val['description'] - ret['category'] = manifest_val['category'] - ret['value_type'] = manifest_val['type'] - return ret - - def validate(self, attrs): - manifest = settings.TOWER_SETTINGS_MANIFEST - if attrs['key'] not in manifest: - raise serializers.ValidationError(dict(key=["Key {0} is not a valid settings key.".format(attrs['key'])])) - - if attrs['value_type'] == 'json': - attrs['value'] = json.dumps(attrs['value']) - elif attrs['value_type'] == 'list': - try: - attrs['value'] = ','.join(map(force_text, attrs['value'])) - except TypeError: - attrs['value'] = force_text(attrs['value']) - elif attrs['value_type'] == 'bool': - attrs['value'] = force_text(bool(attrs['value'])) - else: - attrs['value'] = force_text(attrs['value']) - - return super(TowerSettingsSerializer, self).validate(attrs) - - class AuthTokenSerializer(serializers.Serializer): username = serializers.CharField() @@ -2761,9 +3119,9 @@ class AuthTokenSerializer(serializers.Serializer): attrs['user'] = user return attrs else: - raise serializers.ValidationError('Unable to login with provided credentials.') + raise serializers.ValidationError(_('Unable to login with provided credentials.')) else: - raise serializers.ValidationError('Must include "username" and "password".') + raise serializers.ValidationError(_('Must include "username" and "password".')) class FactVersionSerializer(BaseFactSerializer): @@ -2782,6 +3140,7 @@ class FactVersionSerializer(BaseFactSerializer): res['fact_view'] = build_url('api:host_fact_compare_view', args=(obj.host.pk,), get=params) return res + class FactSerializer(BaseFactSerializer): class Meta: @@ -2794,3 +3153,11 @@ class FactSerializer(BaseFactSerializer): res = super(FactSerializer, self).get_related(obj) res['host'] = obj.host.get_absolute_url() return res + + def to_representation(self, obj): + ret = super(FactSerializer, self).to_representation(obj) + if obj is None: + return ret + if 'facts' in ret and isinstance(ret['facts'], six.string_types): + ret['facts'] = json.loads(ret['facts']) + return ret diff --git a/awx/api/templates/api/_list_common.md b/awx/api/templates/api/_list_common.md index e355421de3..706ae732a5 100644 --- a/awx/api/templates/api/_list_common.md +++ b/awx/api/templates/api/_list_common.md @@ -56,6 +56,10 @@ within all designated text fields of a model. _Added in AWX 1.4_ +(_Added in Ansible Tower 3.1.0_) Search across related fields: + + ?related__search=findme + ## Filtering Any additional query string parameters may be used to filter the list of @@ -132,3 +136,8 @@ values. Lists (for the `in` lookup) may be specified as a comma-separated list of values. + +(_Added in Ansible Tower 3.1.0_) Filtering based on the requesting user's +level of access by query string parameter. + +* `role_level`: Level of role to filter on, such as `admin_role` diff --git a/awx/api/templates/api/_new_in_awx.md b/awx/api/templates/api/_new_in_awx.md index 4df45be686..a113b9d5fa 100644 --- a/awx/api/templates/api/_new_in_awx.md +++ b/awx/api/templates/api/_new_in_awx.md @@ -1,9 +1,13 @@ +{% if not version_label_flag or version_label_flag == 'true' %} {% if new_in_13 %}> _Added in AWX 1.3_{% endif %} {% if new_in_14 %}> _Added in AWX 1.4_{% endif %} {% if new_in_145 %}> _Added in Ansible Tower 1.4.5_{% endif %} {% if new_in_148 %}> _Added in Ansible Tower 1.4.8_{% endif %} -{% if new_in_200 %}> _New in Ansible Tower 2.0.0_{% endif %} -{% if new_in_220 %}> _New in Ansible Tower 2.2.0_{% endif %} -{% if new_in_230 %}> _New in Ansible Tower 2.3.0_{% endif %} -{% if new_in_240 %}> _New in Ansible Tower 2.4.0_{% endif %} -{% if new_in_300 %}> _New in Ansible Tower 3.0.0_{% endif %} +{% if new_in_200 %}> _Added in Ansible Tower 2.0.0_{% endif %} +{% if new_in_220 %}> _Added in Ansible Tower 2.2.0_{% endif %} +{% if new_in_230 %}> _Added in Ansible Tower 2.3.0_{% endif %} +{% if new_in_240 %}> _Added in Ansible Tower 2.4.0_{% endif %} +{% if new_in_300 %}> _Added in Ansible Tower 3.0.0_{% endif %} +{% if new_in_310 %}> _New in Ansible Tower 3.1.0_{% endif %} +{% if deprecated %}> _This resource has been deprecated and will be removed in a future release_{% endif %} +{% endif %} diff --git a/awx/api/templates/api/auth_token_view.md b/awx/api/templates/api/auth_token_view.md index c25c658aef..69078842d4 100644 --- a/awx/api/templates/api/auth_token_view.md +++ b/awx/api/templates/api/auth_token_view.md @@ -32,3 +32,6 @@ agent that originally obtained it. Each request that uses the token for authentication will refresh its expiration timestamp and keep it from expiring. A token only expires when it is not used for the configured timeout interval (default 1800 seconds). + +A DELETE request with the token set will cause the token to be invalidated and +no further requests can be made with it. diff --git a/awx/api/templates/api/job_template_label_list.md b/awx/api/templates/api/job_template_label_list.md index 9d503e9c65..fa2163141b 100644 --- a/awx/api/templates/api/job_template_label_list.md +++ b/awx/api/templates/api/job_template_label_list.md @@ -1,4 +1,6 @@ +{% with 'false' as version_label_flag %} {% include "api/sub_list_create_api_view.md" %} +{% endwith %} Labels not associated with any other resources are deleted. A label can become disassociated with a resource as a result of 3 events. @@ -6,4 +8,6 @@ Labels not associated with any other resources are deleted. A label can become d 2. A job is deleted with labels 3. A cleanup job deletes a job with labels +{% with 'true' as version_label_flag %} {% include "api/_new_in_awx.md" %} +{% endwith %} diff --git a/awx/api/templates/api/job_template_survey_spec.md b/awx/api/templates/api/job_template_survey_spec.md index d1a222b31f..d4c98bac3f 100644 --- a/awx/api/templates/api/job_template_survey_spec.md +++ b/awx/api/templates/api/job_template_survey_spec.md @@ -1,4 +1,4 @@ -POST requests to this resource should include the full specification for a Job Template Survey +POST requests to this resource should include the full specification for a {{ model_verbose_name|title }}'s Survey Here is an example survey specification: @@ -30,7 +30,7 @@ Within each survey item `type` must be one of: * multiselect: For survey questions where multiple items from a presented list can be selected Each item must contain a `question_name` and `question_description` field that describes the survey question itself. -The `variable` elements of each survey items represents the key that will be given to the playbook when the job template +The `variable` elements of each survey items represents the key that will be given to the playbook when the {{model_verbose_name}} is launched. It will contain the value as a result of the survey. Here is a more comprehensive example showing the various question types and their acceptable parameters: diff --git a/awx/api/templates/api/system_job_template_launch.md b/awx/api/templates/api/system_job_template_launch.md index 4543014005..3a5d2d3b7a 100644 --- a/awx/api/templates/api/system_job_template_launch.md +++ b/awx/api/templates/api/system_job_template_launch.md @@ -2,20 +2,33 @@ Launch a Job Template: Make a POST request to this resource to launch the system job template. -An extra parameter `extra_vars` is suggested in order to pass extra parameters -to the system job task. +Variables specified inside of the parameter `extra_vars` are passed to the +system job task as command line parameters. These tasks can be ran manually +on the host system via the `tower-manage` command. For example on `cleanup_jobs` and `cleanup_activitystream`: -`{"days": 30}` +`{"extra_vars": {"days": 30}}` Which will act on data older than 30 days. For `cleanup_facts`: -`{"older_than": "4w", `granularity`: "3d"}` +`{"extra_vars": {"older_than": "4w", "granularity": "3d"}}` Which will reduce the granularity of scan data to one scan per 3 days when the data is older than 4w. +For `cleanup_activitystream` and `cleanup_jobs` commands, providing +`"dry_run": true` inside of `extra_vars` will show items that will be +removed without deleting them. + +Each individual system job task has its own default values, which are +applicable either when running it from the command line or launching its +system job template with empty `extra_vars`. + + - Defaults for `cleanup_activitystream`: days=90 + - Defaults for `cleanup_facts`: older_than="30d", granularity="1w" + - Defaults for `cleanup_jobs`: days=90 + If successful, the response status code will be 202. If the job cannot be launched, a 405 status code will be returned. diff --git a/awx/api/templates/api/unified_job_stdout.md b/awx/api/templates/api/unified_job_stdout.md index 63f7acea8e..d86c6e2378 100644 --- a/awx/api/templates/api/unified_job_stdout.md +++ b/awx/api/templates/api/unified_job_stdout.md @@ -13,6 +13,7 @@ Use the `format` query string parameter to specify the output format. * Plain Text with ANSI color codes: `?format=ansi` * JSON structure: `?format=json` * Downloaded Plain Text: `?format=txt_download` +* Downloaded Plain Text with ANSI color codes: `?format=ansi_download` (_New in Ansible Tower 2.0.0_) When using the Browsable API, HTML and JSON formats, the `start_line` and `end_line` query string parameters can be used @@ -21,7 +22,8 @@ to specify a range of line numbers to retrieve. Use `dark=1` or `dark=0` as a query string parameter to force or disable a dark background. -+Files over {{ settings.STDOUT_MAX_BYTES_DISPLAY|filesizeformat }} (configurable) will not display in the browser. Use the `txt_download` -+format to download the file directly to view it. +Files over {{ settings.STDOUT_MAX_BYTES_DISPLAY|filesizeformat }} (configurable) +will not display in the browser. Use the `txt_download` or `ansi_download` +formats to download the file directly to view it. {% include "api/_new_in_awx.md" %} diff --git a/awx/api/templates/api/workflow_job_cancel.md b/awx/api/templates/api/workflow_job_cancel.md new file mode 100644 index 0000000000..bcdd347d36 --- /dev/null +++ b/awx/api/templates/api/workflow_job_cancel.md @@ -0,0 +1,12 @@ +# Cancel Workflow Job + +Make a GET request to this resource to determine if the workflow job can be +canceled. The response will include the following field: + +* `can_cancel`: Indicates whether this workflow job is in a state that can + be canceled (boolean, read-only) + +Make a POST request to this endpoint to submit a request to cancel a pending +or running workflow job. The response status code will be 202 if the +request to cancel was successfully submitted, or 405 if the workflow job +cannot be canceled. diff --git a/awx/api/templates/api/workflow_job_relaunch.md b/awx/api/templates/api/workflow_job_relaunch.md new file mode 100644 index 0000000000..f9a9b2c31c --- /dev/null +++ b/awx/api/templates/api/workflow_job_relaunch.md @@ -0,0 +1,5 @@ +Relaunch a workflow job: + +Make a POST request to this endpoint to launch a workflow job identical to the parent workflow job. This will spawn jobs, project updates, or inventory updates based on the unified job templates referenced in the workflow nodes in the workflow job. No POST data is accepted for this action. + +If successful, the response status code will be 201 and serialized data of the new workflow job will be returned. \ No newline at end of file diff --git a/awx/api/templates/api/workflow_job_template_copy.md b/awx/api/templates/api/workflow_job_template_copy.md new file mode 100644 index 0000000000..86944ed707 --- /dev/null +++ b/awx/api/templates/api/workflow_job_template_copy.md @@ -0,0 +1,34 @@ +Copy a Workflow Job Template: + +Make a GET request to this resource to determine if the current user has +permission to copy the {{model_verbose_name}} and whether any linked +templates or prompted fields will be ignored due to permissions problems. +The response will include the following fields: + +* `can_copy`: Flag indicating whether the active user has permission to make + a copy of this {{model_verbose_name}}, provides same content as the + {{model_verbose_name}} detail view summary_fields.user_capabilities.copy + (boolean, read-only) +* `can_copy_without_user_input`: Flag indicating if the user should be + prompted for confirmation before the copy is executed (boolean, read-only) +* `templates_unable_to_copy`: List of node ids of nodes that have a related + job template, project, or inventory that the current user lacks permission + to use and will be missing in workflow nodes of the copy (array, read-only) +* `inventories_unable_to_copy`: List of node ids of nodes that have a related + prompted inventory that the current user lacks permission + to use and will be missing in workflow nodes of the copy (array, read-only) +* `credentials_unable_to_copy`: List of node ids of nodes that have a related + prompted credential that the current user lacks permission + to use and will be missing in workflow nodes of the copy (array, read-only) + +Make a POST request to this endpoint to save a copy of this +{{model_verbose_name}}. No POST data is accepted for this action. + +If successful, the response status code will be 201. The response body will +contain serialized data about the new {{model_verbose_name}}, which will be +similar to the original {{model_verbose_name}}, but with an additional `@` +and a timestamp in the name. + +All workflow nodes and connections in the original will also exist in the +copy. The nodes will be missing related resources if the user did not have +access to use them. diff --git a/awx/api/templates/api/workflow_job_template_launch.md b/awx/api/templates/api/workflow_job_template_launch.md new file mode 100644 index 0000000000..dca08c59d9 --- /dev/null +++ b/awx/api/templates/api/workflow_job_template_launch.md @@ -0,0 +1,34 @@ +Launch a Workflow Job Template: + +Make a GET request to this resource to determine if the workflow_job_template +can be launched and whether any passwords are required to launch the +workflow_job_template. The response will include the following fields: + +* `can_start_without_user_input`: Flag indicating if the workflow_job_template + can be launched without user-input (boolean, read-only) +* `variables_needed_to_start`: Required variable names required to launch the + workflow_job_template (array, read-only) +* `survey_enabled`: Flag indicating whether the workflow_job_template has an + enabled survey (boolean, read-only) +* `extra_vars`: Text which is the `extra_vars` field of this workflow_job_template + (text, read-only) +* `node_templates_missing`: List of node ids of all nodes that have a + null `unified_job_template`, which will cause their branches to stop + execution (list, read-only) +* `node_prompts_rejected`: List of node ids of all nodes that have + specified a field that will be rejected because its `unified_job_template` + does not allow prompting for this field, this will not halt execution of + the branch but the field will be ignored (list, read-only) +* `workflow_job_template_data`: JSON object listing general information of + this workflow_job_template (JSON object, read-only) + +Make a POST request to this resource to launch the workflow_job_template. If any +credential, inventory, project or extra variables (extra_vars) are required, they +must be passed via POST data, with extra_vars given as a YAML or JSON string and +escaped parentheses. + +If successful, the response status code will be 201. If any required passwords +are not provided, a 400 status code will be returned. If the workflow job cannot +be launched, a 405 status code will be returned. If the provided credential or +inventory are not allowed to be used by the user, then a 403 status code will +be returned. diff --git a/awx/api/templates/api/workflow_job_template_workflow_nodes_list.md b/awx/api/templates/api/workflow_job_template_workflow_nodes_list.md new file mode 100644 index 0000000000..9e5d0f688f --- /dev/null +++ b/awx/api/templates/api/workflow_job_template_workflow_nodes_list.md @@ -0,0 +1,15 @@ +# Workflow Job Template Workflow Node List + +Workflow nodes reference templates to execute and define the ordering +in which to execute them. After a job in this workflow finishes, +the subsequent actions are to: + + - run nodes contained in "failure_nodes" or "always_nodes" if job failed + - run nodes contained in "success_nodes" or "always_nodes" if job succeeded + +The workflow job is marked as `successful` if all of the jobs running as +a part of the workflow job have completed, and the workflow job has not +been canceled. Even if a job within the workflow has failed, the workflow +job will not be marked as failed. + +{% include "api/sub_list_create_api_view.md" %} \ No newline at end of file diff --git a/awx/api/urls.py b/awx/api/urls.py index 19ecdd3e1b..f3abfae3fe 100644 --- a/awx/api/urls.py +++ b/awx/api/urls.py @@ -18,6 +18,7 @@ organization_urls = patterns('awx.api.views', url(r'^(?P[0-9]+)/admins/$', 'organization_admins_list'), url(r'^(?P[0-9]+)/inventories/$', 'organization_inventories_list'), url(r'^(?P[0-9]+)/projects/$', 'organization_projects_list'), + url(r'^(?P[0-9]+)/workflow_job_templates/$', 'organization_workflow_job_templates_list'), url(r'^(?P[0-9]+)/teams/$', 'organization_teams_list'), url(r'^(?P[0-9]+)/credentials/$', 'organization_credential_list'), url(r'^(?P[0-9]+)/activity_stream/$', 'organization_activity_stream_list'), @@ -60,6 +61,7 @@ project_urls = patterns('awx.api.views', ) project_update_urls = patterns('awx.api.views', + url(r'^$', 'project_update_list'), url(r'^(?P[0-9]+)/$', 'project_update_detail'), url(r'^(?P[0-9]+)/cancel/$', 'project_update_cancel'), url(r'^(?P[0-9]+)/stdout/$', 'project_update_stdout'), @@ -145,6 +147,7 @@ inventory_source_urls = patterns('awx.api.views', ) inventory_update_urls = patterns('awx.api.views', + url(r'^$', 'inventory_update_list'), url(r'^(?P[0-9]+)/$', 'inventory_update_detail'), url(r'^(?P[0-9]+)/cancel/$', 'inventory_update_cancel'), url(r'^(?P[0-9]+)/stdout/$', 'inventory_update_stdout'), @@ -202,8 +205,6 @@ job_urls = patterns('awx.api.views', url(r'^(?P[0-9]+)/relaunch/$', 'job_relaunch'), url(r'^(?P[0-9]+)/job_host_summaries/$', 'job_job_host_summaries_list'), url(r'^(?P[0-9]+)/job_events/$', 'job_job_events_list'), - url(r'^(?P[0-9]+)/job_plays/$', 'job_job_plays_list'), - url(r'^(?P[0-9]+)/job_tasks/$', 'job_job_tasks_list'), url(r'^(?P[0-9]+)/activity_stream/$', 'job_activity_stream_list'), url(r'^(?P[0-9]+)/stdout/$', 'job_stdout'), url(r'^(?P[0-9]+)/notifications/$', 'job_notifications_list'), @@ -228,6 +229,7 @@ ad_hoc_command_urls = patterns('awx.api.views', url(r'^(?P[0-9]+)/relaunch/$', 'ad_hoc_command_relaunch'), url(r'^(?P[0-9]+)/events/$', 'ad_hoc_command_ad_hoc_command_events_list'), url(r'^(?P[0-9]+)/activity_stream/$', 'ad_hoc_command_activity_stream_list'), + url(r'^(?P[0-9]+)/notifications/$', 'ad_hoc_command_notifications_list'), url(r'^(?P[0-9]+)/stdout/$', 'ad_hoc_command_stdout'), ) @@ -254,6 +256,36 @@ system_job_urls = patterns('awx.api.views', url(r'^(?P[0-9]+)/notifications/$', 'system_job_notifications_list'), ) +workflow_job_template_urls = patterns('awx.api.views', + url(r'^$', 'workflow_job_template_list'), + url(r'^(?P[0-9]+)/$', 'workflow_job_template_detail'), + url(r'^(?P[0-9]+)/workflow_jobs/$', 'workflow_job_template_jobs_list'), + url(r'^(?P[0-9]+)/launch/$', 'workflow_job_template_launch'), + url(r'^(?P[0-9]+)/copy/$', 'workflow_job_template_copy'), + url(r'^(?P[0-9]+)/schedules/$', 'workflow_job_template_schedules_list'), + url(r'^(?P[0-9]+)/survey_spec/$', 'workflow_job_template_survey_spec'), + url(r'^(?P[0-9]+)/workflow_nodes/$', 'workflow_job_template_workflow_nodes_list'), + url(r'^(?P[0-9]+)/activity_stream/$', 'workflow_job_template_activity_stream_list'), + url(r'^(?P[0-9]+)/notification_templates_any/$', 'workflow_job_template_notification_templates_any_list'), + url(r'^(?P[0-9]+)/notification_templates_error/$', 'workflow_job_template_notification_templates_error_list'), + url(r'^(?P[0-9]+)/notification_templates_success/$', 'workflow_job_template_notification_templates_success_list'), + url(r'^(?P[0-9]+)/access_list/$', 'workflow_job_template_access_list'), + url(r'^(?P[0-9]+)/object_roles/$', 'workflow_job_template_object_roles_list'), + url(r'^(?P[0-9]+)/labels/$', 'workflow_job_template_label_list'), +) + +workflow_job_urls = patterns('awx.api.views', + url(r'^$', 'workflow_job_list'), + url(r'^(?P[0-9]+)/$', 'workflow_job_detail'), + url(r'^(?P[0-9]+)/workflow_nodes/$', 'workflow_job_workflow_nodes_list'), + url(r'^(?P[0-9]+)/labels/$', 'workflow_job_label_list'), + url(r'^(?P[0-9]+)/cancel/$', 'workflow_job_cancel'), + url(r'^(?P[0-9]+)/relaunch/$', 'workflow_job_relaunch'), + url(r'^(?P[0-9]+)/notifications/$', 'workflow_job_notifications_list'), + url(r'^(?P[0-9]+)/activity_stream/$', 'workflow_job_activity_stream_list'), +) + + notification_template_urls = patterns('awx.api.views', url(r'^$', 'notification_template_list'), url(r'^(?P[0-9]+)/$', 'notification_template_detail'), @@ -271,6 +303,22 @@ label_urls = patterns('awx.api.views', url(r'^(?P[0-9]+)/$', 'label_detail'), ) +workflow_job_template_node_urls = patterns('awx.api.views', + url(r'^$', 'workflow_job_template_node_list'), + url(r'^(?P[0-9]+)/$', 'workflow_job_template_node_detail'), + url(r'^(?P[0-9]+)/success_nodes/$', 'workflow_job_template_node_success_nodes_list'), + url(r'^(?P[0-9]+)/failure_nodes/$', 'workflow_job_template_node_failure_nodes_list'), + url(r'^(?P[0-9]+)/always_nodes/$', 'workflow_job_template_node_always_nodes_list'), +) + +workflow_job_node_urls = patterns('awx.api.views', + url(r'^$', 'workflow_job_node_list'), + url(r'^(?P[0-9]+)/$', 'workflow_job_node_detail'), + url(r'^(?P[0-9]+)/success_nodes/$', 'workflow_job_node_success_nodes_list'), + url(r'^(?P[0-9]+)/failure_nodes/$', 'workflow_job_node_failure_nodes_list'), + url(r'^(?P[0-9]+)/always_nodes/$', 'workflow_job_node_always_nodes_list'), +) + schedule_urls = patterns('awx.api.views', url(r'^$', 'schedule_list'), url(r'^(?P[0-9]+)/$', 'schedule_detail'), @@ -282,10 +330,6 @@ activity_stream_urls = patterns('awx.api.views', url(r'^(?P[0-9]+)/$', 'activity_stream_detail'), ) -settings_urls = patterns('awx.api.views', - url(r'^$', 'settings_list'), - url(r'^reset/$', 'settings_reset')) - v1_urls = patterns('awx.api.views', url(r'^$', 'api_v1_root_view'), url(r'^ping/$', 'api_v1_ping_view'), @@ -295,8 +339,7 @@ v1_urls = patterns('awx.api.views', url(r'^me/$', 'user_me_list'), url(r'^dashboard/$', 'dashboard_view'), url(r'^dashboard/graphs/jobs/$','dashboard_jobs_graph_view'), - # TODO: Uncomment aftger 3.0 when we bring database settings endpoints back - # url(r'^settings/', include(settings_urls)), + url(r'^settings/', include('awx.conf.urls')), url(r'^schedules/', include(schedule_urls)), url(r'^organizations/', include(organization_urls)), url(r'^users/', include(user_urls)), @@ -321,7 +364,11 @@ v1_urls = patterns('awx.api.views', url(r'^system_jobs/', include(system_job_urls)), url(r'^notification_templates/', include(notification_template_urls)), url(r'^notifications/', include(notification_urls)), + url(r'^workflow_job_templates/',include(workflow_job_template_urls)), + url(r'^workflow_jobs/' ,include(workflow_job_urls)), url(r'^labels/', include(label_urls)), + url(r'^workflow_job_template_nodes/', include(workflow_job_template_node_urls)), + url(r'^workflow_job_nodes/', include(workflow_job_node_urls)), url(r'^unified_job_templates/$','unified_job_template_list'), url(r'^unified_jobs/$', 'unified_job_list'), url(r'^activity_stream/', include(activity_stream_urls)), diff --git a/awx/api/utils/decorators.py b/awx/api/utils/decorators.py deleted file mode 100644 index 223125597c..0000000000 --- a/awx/api/utils/decorators.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -from collections import OrderedDict -import copy -import functools - -from rest_framework.response import Response -from rest_framework.settings import api_settings -from rest_framework import status - -def paginated(method): - """Given an method with a Django REST Framework API method signature - (e.g. `def get(self, request, ...):`), abstract out boilerplate pagination - duties. - - This causes the method to receive two additional keyword arguments: - `limit`, and `offset`. The method expects a two-tuple to be - returned, with a result list as the first item, and the total number - of results (across all pages) as the second item. - """ - @functools.wraps(method) - def func(self, request, *args, **kwargs): - # Manually spin up pagination. - # How many results do we show? - paginator_class = api_settings.DEFAULT_PAGINATION_CLASS - limit = paginator_class.page_size - if request.query_params.get(paginator_class.page_size_query_param, False): - limit = request.query_params[paginator_class.page_size_query_param] - if paginator_class.max_page_size: - limit = min(paginator_class.max_page_size, limit) - limit = int(limit) - - # Get the order parameter if it's given - if request.query_params.get("ordering", False): - ordering = request.query_params["ordering"] - else: - ordering = None - - # What page are we on? - page = int(request.query_params.get('page', 1)) - offset = (page - 1) * limit - - # Add the limit, offset, page, and order variables to the keyword arguments - # being sent to the underlying method. - kwargs['limit'] = limit - kwargs['offset'] = offset - kwargs['ordering'] = ordering - - # Okay, call the underlying method. - results, count, stat = method(self, request, *args, **kwargs) - if stat is None: - stat = status.HTTP_200_OK - - if stat == status.HTTP_200_OK: - # Determine the next and previous pages, if any. - prev, next_ = None, None - if page > 1: - get_copy = copy.copy(request.GET) - get_copy['page'] = page - 1 - prev = '%s?%s' % (request.path, get_copy.urlencode()) - if count > offset + limit: - get_copy = copy.copy(request.GET) - get_copy['page'] = page + 1 - next_ = '%s?%s' % (request.path, get_copy.urlencode()) - - # Compile the results into a dictionary with pagination - # information. - answer = OrderedDict(( - ('count', count), - ('next', next_), - ('previous', prev), - ('results', results), - )) - else: - answer = results - - # Okay, we're done; return response data. - return Response(answer, status=stat) - return func - diff --git a/awx/api/views.py b/awx/api/views.py index 4496122bf4..e69dc57fd4 100644 --- a/awx/api/views.py +++ b/awx/api/views.py @@ -4,24 +4,26 @@ # Python import os +import re import cgi import datetime import dateutil import time import socket +import subprocess import sys -import errno import logging from base64 import b64encode from collections import OrderedDict # Django from django.conf import settings -from django.contrib.auth.models import User +from django.contrib.auth.models import User, AnonymousUser +from django.core.cache import cache from django.core.urlresolvers import reverse from django.core.exceptions import FieldError -from django.db.models import Q, Count -from django.db import IntegrityError, transaction +from django.db.models import Q, Count, F +from django.db import IntegrityError, transaction, connection from django.shortcuts import get_object_or_404 from django.utils.encoding import smart_text, force_text from django.utils.safestring import mark_safe @@ -31,6 +33,7 @@ from django.template.loader import render_to_string from django.core.servers.basehttp import FileWrapper from django.http import HttpResponse from django.contrib.contenttypes.models import ContentType +from django.utils.translation import ugettext_lazy as _ # Django REST Framework @@ -56,26 +59,29 @@ import ansiconv from social.backends.utils import load_backends # AWX -from awx.main.task_engine import TaskSerializer, TASK_FILE, TEMPORARY_TASK_FILE from awx.main.tasks import send_notifications from awx.main.access import get_user_queryset from awx.main.ha import is_ha_environment from awx.api.authentication import TaskAuthentication, TokenGetAuthentication -from awx.api.utils.decorators import paginated from awx.api.generics import get_view_name from awx.api.generics import * # noqa -from awx.api.license import feature_enabled, feature_exists, LicenseForbids +from awx.conf.license import get_license, feature_enabled, feature_exists, LicenseForbids from awx.main.models import * # noqa from awx.main.utils import * # noqa +from awx.main.utils import ( + callback_filter_out_ansible_extra_vars +) from awx.api.permissions import * # noqa from awx.api.renderers import * # noqa from awx.api.serializers import * # noqa from awx.api.metadata import RoleMetadata -from awx.main.utils import emit_websocket_notification -from awx.main.conf import tower_settings +from awx.main.consumers import emit_channel_notification +from awx.main.models.unified_jobs import ACTIVE_STATES +from awx.main.scheduler.tasks import run_job_complete logger = logging.getLogger('awx.api.views') + def api_exception_handler(exc, context): ''' Override default API exception handler to catch IntegrityError exceptions. @@ -86,30 +92,65 @@ def api_exception_handler(exc, context): exc = ParseError(exc.args[0]) return exception_handler(exc, context) + +class ActivityStreamEnforcementMixin(object): + ''' + Mixin to check that license supports activity streams. + ''' + def check_permissions(self, request): + if not feature_enabled('activity_streams'): + raise LicenseForbids(_('Your license does not allow use of the activity stream.')) + return super(ActivityStreamEnforcementMixin, self).check_permissions(request) + + +class SystemTrackingEnforcementMixin(object): + ''' + Mixin to check that license supports system tracking. + ''' + def check_permissions(self, request): + if not feature_enabled('system_tracking'): + raise LicenseForbids(_('Your license does not permit use of system tracking.')) + return super(SystemTrackingEnforcementMixin, self).check_permissions(request) + + +class WorkflowsEnforcementMixin(object): + ''' + Mixin to check that license supports workflows. + ''' + def check_permissions(self, request): + if not feature_enabled('workflows'): + raise LicenseForbids(_('Your license does not allow use of workflows.')) + return super(WorkflowsEnforcementMixin, self).check_permissions(request) + + class ApiRootView(APIView): authentication_classes = [] permission_classes = (AllowAny,) - view_name = 'REST API' + view_name = _('REST API') def get(self, request, format=None): ''' list supported API versions ''' current = reverse('api:api_v1_root_view', args=[]) data = dict( - description = 'Ansible Tower REST API', + description = _('Ansible Tower REST API'), current_version = current, available_versions = dict( v1 = current - ) + ), ) + if feature_enabled('rebranding'): + data['custom_logo'] = settings.CUSTOM_LOGO + data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO return Response(data) + class ApiV1RootView(APIView): authentication_classes = [] permission_classes = (AllowAny,) - view_name = 'Version 1' + view_name = _('Version 1') def get(self, request, format=None): ''' list top level resources ''' @@ -118,18 +159,19 @@ class ApiV1RootView(APIView): data['authtoken'] = reverse('api:auth_token_view') data['ping'] = reverse('api:api_v1_ping_view') data['config'] = reverse('api:api_v1_config_view') - # TODO: Uncomment after 3.0 when we bring database settings endpoints back - # data['settings'] = reverse('api:settings_list') + data['settings'] = reverse('api:setting_category_list') data['me'] = reverse('api:user_me_list') data['dashboard'] = reverse('api:dashboard_view') data['organizations'] = reverse('api:organization_list') data['users'] = reverse('api:user_list') data['projects'] = reverse('api:project_list') + data['project_updates'] = reverse('api:project_update_list') data['teams'] = reverse('api:team_list') data['credentials'] = reverse('api:credential_list') data['inventory'] = reverse('api:inventory_list') data['inventory_scripts'] = reverse('api:inventory_script_list') data['inventory_sources'] = reverse('api:inventory_source_list') + data['inventory_updates'] = reverse('api:inventory_update_list') data['groups'] = reverse('api:group_list') data['hosts'] = reverse('api:host_list') data['job_templates'] = reverse('api:job_template_list') @@ -146,6 +188,10 @@ class ApiV1RootView(APIView): data['unified_job_templates'] = reverse('api:unified_job_template_list') data['unified_jobs'] = reverse('api:unified_job_list') data['activity_stream'] = reverse('api:activity_stream_list') + data['workflow_job_templates'] = reverse('api:workflow_job_template_list') + data['workflow_jobs'] = reverse('api:workflow_job_list') + data['workflow_job_template_nodes'] = reverse('api:workflow_job_template_node_list') + data['workflow_job_nodes'] = reverse('api:workflow_job_node_list') return Response(data) @@ -155,7 +201,7 @@ class ApiV1PingView(APIView): """ permission_classes = (AllowAny,) authentication_classes = () - view_name = 'Ping' + view_name = _('Ping') new_in_210 = True def get(self, request, format=None): @@ -164,48 +210,47 @@ class ApiV1PingView(APIView): Everything returned here should be considered public / insecure, as this requires no auth and is intended for use by the installer process. """ - # Most of this response is canned; just build the dictionary. + active_tasks = cache.get("active_celery_tasks", None) response = { 'ha': is_ha_environment(), - 'role': Instance.objects.my_role(), 'version': get_awx_version(), + 'active_node': settings.CLUSTER_HOST_ID, } - # If this is an HA environment, we also include the IP address of - # all of the instances. - # - # Set up a default structure. - response['instances'] = { - 'primary': None, - 'secondaries': [], - } + if not isinstance(request.user, AnonymousUser): + response['celery_active_tasks'] = json.loads(active_tasks) if active_tasks is not None else None - # Add all of the instances into the structure. + response['instances'] = [] for instance in Instance.objects.all(): - if instance.primary: - response['instances']['primary'] = instance.hostname - else: - response['instances']['secondaries'].append(instance.hostname) - response['instances']['secondaries'].sort() - - # Done; return the response. + response['instances'].append(dict(node=instance.hostname, heartbeat=instance.modified, capacity=instance.capacity)) + response['instances'].sort() return Response(response) class ApiV1ConfigView(APIView): permission_classes = (IsAuthenticated,) - view_name = 'Configuration' + view_name = _('Configuration') + + def check_permissions(self, request): + super(ApiV1ConfigView, self).check_permissions(request) + if not request.user.is_superuser and request.method.lower() not in {'options', 'head', 'get'}: + self.permission_denied(request) # Raises PermissionDenied exception. def get(self, request, format=None): '''Return various sitewide configuration settings.''' - license_reader = TaskSerializer() - license_data = license_reader.from_database(show_key=request.user.is_superuser or request.user.is_system_auditor) + if request.user.is_superuser or request.user.is_system_auditor: + license_data = get_license(show_key=True) + else: + license_data = get_license(show_key=False) + if not license_data.get('valid_key', False): + license_data = {} if license_data and 'features' in license_data and 'activity_streams' in license_data['features']: - license_data['features']['activity_streams'] &= tower_settings.ACTIVITY_STREAM_ENABLED + # FIXME: Make the final setting value dependent on the feature? + license_data['features']['activity_streams'] &= settings.ACTIVITY_STREAM_ENABLED - pendo_state = tower_settings.PENDO_TRACKING_STATE if tower_settings.PENDO_TRACKING_STATE in ('off', 'anonymous', 'detailed') else 'off' + pendo_state = settings.PENDO_TRACKING_STATE if settings.PENDO_TRACKING_STATE in ('off', 'anonymous', 'detailed') else 'off' data = dict( time_zone=settings.TIME_ZONE, @@ -237,65 +282,55 @@ class ApiV1ConfigView(APIView): return Response(data) def post(self, request): - if not request.user.is_superuser: - return Response(None, status=status.HTTP_404_NOT_FOUND) if not isinstance(request.data, dict): - return Response({"error": "Invalid license data"}, status=status.HTTP_400_BAD_REQUEST) + return Response({"error": _("Invalid license data")}, status=status.HTTP_400_BAD_REQUEST) if "eula_accepted" not in request.data: - return Response({"error": "Missing 'eula_accepted' property"}, status=status.HTTP_400_BAD_REQUEST) + return Response({"error": _("Missing 'eula_accepted' property")}, status=status.HTTP_400_BAD_REQUEST) try: eula_accepted = to_python_boolean(request.data["eula_accepted"]) except ValueError: - return Response({"error": "'eula_accepted' value is invalid"}, status=status.HTTP_400_BAD_REQUEST) + return Response({"error": _("'eula_accepted' value is invalid")}, status=status.HTTP_400_BAD_REQUEST) if not eula_accepted: - return Response({"error": "'eula_accepted' must be True"}, status=status.HTTP_400_BAD_REQUEST) + return Response({"error": _("'eula_accepted' must be True")}, status=status.HTTP_400_BAD_REQUEST) request.data.pop("eula_accepted") try: data_actual = json.dumps(request.data) except Exception: - # FIX: Log - return Response({"error": "Invalid JSON"}, status=status.HTTP_400_BAD_REQUEST) - license_reader = TaskSerializer() + logger.info(smart_text(u"Invalid JSON submitted for Tower license."), + extra=dict(actor=request.user.username)) + return Response({"error": _("Invalid JSON")}, status=status.HTTP_400_BAD_REQUEST) try: - license_data = license_reader.from_string(data_actual) + from awx.main.task_engine import TaskEnhancer + license_data = json.loads(data_actual) + license_data_validated = TaskEnhancer(**license_data).validate_enhancements() except Exception: - # FIX: Log - return Response({"error": "Invalid License"}, status=status.HTTP_400_BAD_REQUEST) + logger.warning(smart_text(u"Invalid Tower license submitted."), + extra=dict(actor=request.user.username)) + return Response({"error": _("Invalid License")}, status=status.HTTP_400_BAD_REQUEST) - # If the license is valid, write it to disk. - if license_data['valid_key']: - tower_settings.LICENSE = data_actual - tower_settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host()) - return Response(license_data) + # If the license is valid, write it to the database. + if license_data_validated['valid_key']: + settings.LICENSE = license_data + settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host()) + return Response(license_data_validated) - return Response({"error": "Invalid license"}, status=status.HTTP_400_BAD_REQUEST) + logger.warning(smart_text(u"Invalid Tower license submitted."), + extra=dict(actor=request.user.username)) + return Response({"error": _("Invalid license")}, status=status.HTTP_400_BAD_REQUEST) def delete(self, request): - if not request.user.is_superuser: - return Response(None, status=status.HTTP_404_NOT_FOUND) - - # Remove license file - has_error = None - for fname in (TEMPORARY_TASK_FILE, TASK_FILE): - try: - os.remove(fname) - except OSError as e: - if e.errno != errno.ENOENT: - has_error = e.errno - break - - TowerSettings.objects.filter(key="LICENSE").delete() - - # Only stop mongod if license removal succeeded - if has_error is None: + try: + settings.LICENSE = {} return Response(status=status.HTTP_204_NO_CONTENT) - else: - return Response({"error": "Failed to remove license (%s)" % has_error}, status=status.HTTP_400_BAD_REQUEST) + except: + # FIX: Log + return Response({"error": _("Failed to remove license (%s)") % has_error}, status=status.HTTP_400_BAD_REQUEST) + class DashboardView(APIView): - view_name = "Dashboard" + view_name = _("Dashboard") new_in_14 = True def get(self, request, format=None): @@ -398,9 +433,10 @@ class DashboardView(APIView): 'total': job_template_list.count()} return Response(data) + class DashboardJobsGraphView(APIView): - view_name = "Dashboard Jobs Graphs" + view_name = _("Dashboard Jobs Graphs") new_in_200 = True def get(self, request, format=None): @@ -436,7 +472,7 @@ class DashboardJobsGraphView(APIView): end_date = start_date - dateutil.relativedelta.relativedelta(days=1) interval = 'hours' else: - return Response({'error': 'Unknown period "%s"' % str(period)}, status=status.HTTP_400_BAD_REQUEST) + return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST) dashboard_data = {"jobs": {"successful": [], "failed": []}} for element in success_qss.time_series(end_date, start_date, interval=interval): @@ -450,26 +486,29 @@ class DashboardJobsGraphView(APIView): class ScheduleList(ListAPIView): - view_name = "Schedules" + view_name = _("Schedules") model = Schedule serializer_class = ScheduleSerializer new_in_148 = True + class ScheduleDetail(RetrieveUpdateDestroyAPIView): model = Schedule serializer_class = ScheduleSerializer new_in_148 = True + class ScheduleUnifiedJobsList(SubListAPIView): model = UnifiedJob serializer_class = UnifiedJobSerializer parent_model = Schedule relationship = 'unifiedjob_set' - view_name = 'Schedule Jobs List' + view_name = _('Schedule Jobs List') new_in_148 = True + class AuthView(APIView): authentication_classes = [] @@ -479,7 +518,7 @@ class AuthView(APIView): def get(self, request): data = OrderedDict() err_backend, err_message = request.session.get('social_auth_error', (None, None)) - auth_backends = load_backends(settings.AUTHENTICATION_BACKENDS).items() + auth_backends = load_backends(settings.AUTHENTICATION_BACKENDS, force_load=True).items() # Return auth backends in consistent order: Google, GitHub, SAML. auth_backends.sort(key=lambda x: 'g' if x[0] == 'google-oauth2' else x[0]) for name, backend in auth_backends: @@ -500,7 +539,7 @@ class AuthView(APIView): saml_backend_data = dict(backend_data.items()) saml_backend_data['login_url'] = '%s?idp=%s' % (login_url, idp) full_backend_name = '%s:%s' % (name, idp) - if err_backend == full_backend_name and err_message: + if (err_backend == full_backend_name or err_backend == name) and err_message: saml_backend_data['error'] = err_message data[full_backend_name] = saml_backend_data else: @@ -509,6 +548,7 @@ class AuthView(APIView): data[name] = backend_data return Response(data) + class AuthTokenView(APIView): authentication_classes = [] @@ -540,34 +580,45 @@ class AuthTokenView(APIView): reason='')[0] token.refresh() if 'username' in request.data: - logger.info(smart_text(u"User {} logged in".format(request.data['username']))) + logger.info(smart_text(u"User {} logged in".format(request.data['username'])), + extra=dict(actor=request.data['username'])) except IndexError: token = AuthToken.objects.create(user=serializer.validated_data['user'], request_hash=request_hash) if 'username' in request.data: - logger.info(smart_text(u"User {} logged in".format(request.data['username']))) + logger.info(smart_text(u"User {} logged in".format(request.data['username'])), + extra=dict(actor=request.data['username'])) # Get user un-expired tokens that are not invalidated that are # over the configured limit. # Mark them as invalid and inform the user invalid_tokens = AuthToken.get_tokens_over_limit(serializer.validated_data['user']) for t in invalid_tokens: - # TODO: send socket notification - emit_websocket_notification('/socket.io/control', - 'limit_reached', - dict(reason=force_text(AuthToken.reason_long('limit_reached'))), - token_key=t.key) + emit_channel_notification('control-limit_reached', dict(group_name='control', + reason=force_text(AuthToken.reason_long('limit_reached')), + token_key=t.key)) t.invalidate(reason='limit_reached') # Note: This header is normally added in the middleware whenever an # auth token is included in the request header. headers = { - 'Auth-Token-Timeout': int(tower_settings.AUTH_TOKEN_EXPIRATION) + 'Auth-Token-Timeout': int(settings.AUTH_TOKEN_EXPIRATION) } return Response({'token': token.key, 'expires': token.expires}, headers=headers) if 'username' in request.data: - logger.warning(smart_text(u"Login failed for user {}".format(request.data['username']))) + logger.warning(smart_text(u"Login failed for user {}".format(request.data['username'])), + extra=dict(actor=request.data['username'])) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + def delete(self, request): + if 'HTTP_AUTHORIZATION' in request.META: + token_match = re.match("Token\s(.+)", request.META['HTTP_AUTHORIZATION']) + if token_match: + filter_tokens = AuthToken.objects.filter(key=token_match.groups()[0]) + if filter_tokens.exists(): + filter_tokens[0].invalidate() + return Response(status=status.HTTP_204_NO_CONTENT) + + class OrganizationCountsMixin(object): def get_serializer_context(self, *args, **kwargs): @@ -595,15 +646,16 @@ class OrganizationCountsMixin(object): self.request.user, 'read_role').values('organization').annotate( Count('organization')).order_by('organization') - JT_reference = 'project__organization' - db_results['job_templates'] = JobTemplate.accessible_objects( - self.request.user, 'read_role').exclude(job_type='scan').values(JT_reference).annotate( - Count(JT_reference)).order_by(JT_reference) + JT_project_reference = 'project__organization' + JT_inventory_reference = 'inventory__organization' + db_results['job_templates_project'] = JobTemplate.accessible_objects( + self.request.user, 'read_role').exclude( + project__organization=F(JT_inventory_reference)).values(JT_project_reference).annotate( + Count(JT_project_reference)).order_by(JT_project_reference) - JT_scan_reference = 'inventory__organization' - db_results['job_templates_scan'] = JobTemplate.accessible_objects( - self.request.user, 'read_role').filter(job_type='scan').values(JT_scan_reference).annotate( - Count(JT_scan_reference)).order_by(JT_scan_reference) + db_results['job_templates_inventory'] = JobTemplate.accessible_objects( + self.request.user, 'read_role').values(JT_inventory_reference).annotate( + Count(JT_inventory_reference)).order_by(JT_inventory_reference) db_results['projects'] = project_qs\ .values('organization').annotate(Count('organization')).order_by('organization') @@ -621,16 +673,16 @@ class OrganizationCountsMixin(object): 'inventories': 0, 'teams': 0, 'users': 0, 'job_templates': 0, 'admins': 0, 'projects': 0} - for res in db_results: - if res == 'job_templates': - org_reference = JT_reference - elif res == 'job_templates_scan': - org_reference = JT_scan_reference + for res, count_qs in db_results.items(): + if res == 'job_templates_project': + org_reference = JT_project_reference + elif res == 'job_templates_inventory': + org_reference = JT_inventory_reference elif res == 'users': org_reference = 'id' else: org_reference = 'organization' - for entry in db_results[res]: + for entry in count_qs: org_id = entry[org_reference] if org_id in count_context: if res == 'users': @@ -639,16 +691,19 @@ class OrganizationCountsMixin(object): continue count_context[org_id][res] = entry['%s__count' % org_reference] - # Combine the counts for job templates with scan job templates + # Combine the counts for job templates by project and inventory for org in org_id_list: org_id = org['id'] - if 'job_templates_scan' in count_context[org_id]: - count_context[org_id]['job_templates'] += count_context[org_id].pop('job_templates_scan') + count_context[org_id]['job_templates'] = 0 + for related_path in ['job_templates_project', 'job_templates_inventory']: + if related_path in count_context[org_id]: + count_context[org_id]['job_templates'] += count_context[org_id].pop(related_path) full_context['related_field_counts'] = count_context return full_context + class OrganizationList(OrganizationCountsMixin, ListCreateAPIView): model = Organization @@ -657,6 +712,7 @@ class OrganizationList(OrganizationCountsMixin, ListCreateAPIView): def get_queryset(self): qs = Organization.accessible_objects(self.request.user, 'read_role') qs = qs.select_related('admin_role', 'auditor_role', 'member_role', 'read_role') + qs = qs.prefetch_related('created_by', 'modified_by') return qs def create(self, request, *args, **kwargs): @@ -671,12 +727,13 @@ class OrganizationList(OrganizationCountsMixin, ListCreateAPIView): # if no organizations exist in the system. if (not feature_enabled('multiple_organizations') and self.model.objects.exists()): - raise LicenseForbids('Your Tower license only permits a single ' - 'organization to exist.') + raise LicenseForbids(_('Your Tower license only permits a single ' + 'organization to exist.')) # Okay, create the organization as usual. return super(OrganizationList, self).create(request, *args, **kwargs) + class OrganizationDetail(RetrieveUpdateDestroyAPIView): model = Organization @@ -716,6 +773,7 @@ class OrganizationDetail(RetrieveUpdateDestroyAPIView): return full_context + class OrganizationInventoriesList(SubListAPIView): model = Inventory @@ -728,7 +786,7 @@ class BaseUsersList(SubListCreateAttachDetachAPIView): def post(self, request, *args, **kwargs): ret = super(BaseUsersList, self).post( request, *args, **kwargs) try: - if request.data.get('is_system_auditor', False): + if ret.data is not None and request.data.get('is_system_auditor', False): # This is a faux-field that just maps to checking the system # auditor role member list.. unfortunately this means we can't # set it on creation, and thus needs to be set here. @@ -740,6 +798,7 @@ class BaseUsersList(SubListCreateAttachDetachAPIView): pass return ret + class OrganizationUsersList(BaseUsersList): model = User @@ -747,6 +806,7 @@ class OrganizationUsersList(BaseUsersList): parent_model = Organization relationship = 'member_role.members' + class OrganizationAdminsList(BaseUsersList): model = User @@ -754,6 +814,7 @@ class OrganizationAdminsList(BaseUsersList): parent_model = Organization relationship = 'admin_role.members' + class OrganizationProjectsList(SubListCreateAttachDetachAPIView): model = Project @@ -762,6 +823,17 @@ class OrganizationProjectsList(SubListCreateAttachDetachAPIView): relationship = 'projects' parent_key = 'organization' + +class OrganizationWorkflowJobTemplatesList(SubListCreateAttachDetachAPIView): + + model = WorkflowJobTemplate + serializer_class = WorkflowJobTemplateListSerializer + parent_model = Organization + relationship = 'workflows' + parent_key = 'organization' + new_in_310 = True + + class OrganizationTeamsList(SubListCreateAttachDetachAPIView): model = Team @@ -770,7 +842,8 @@ class OrganizationTeamsList(SubListCreateAttachDetachAPIView): relationship = 'teams' parent_key = 'organization' -class OrganizationActivityStreamList(SubListAPIView): + +class OrganizationActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView): model = ActivityStream serializer_class = ActivityStreamSerializer @@ -778,15 +851,6 @@ class OrganizationActivityStreamList(SubListAPIView): relationship = 'activitystream_set' new_in_145 = True - def get(self, request, *args, **kwargs): - # Sanity check: Does this license allow activity streams? - # If not, forbid this request. - if not feature_enabled('activity_streams'): - raise LicenseForbids('Your license does not allow use of ' - 'the activity stream.') - - # Okay, let it through. - return super(OrganizationActivityStreamList, self).get(request, *args, **kwargs) class OrganizationNotificationTemplatesList(SubListCreateAttachDetachAPIView): @@ -796,12 +860,15 @@ class OrganizationNotificationTemplatesList(SubListCreateAttachDetachAPIView): relationship = 'notification_templates' parent_key = 'organization' + class OrganizationNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView): model = NotificationTemplate serializer_class = NotificationTemplateSerializer parent_model = Organization relationship = 'notification_templates_any' + new_in_300 = True + class OrganizationNotificationTemplatesErrorList(SubListCreateAttachDetachAPIView): @@ -809,6 +876,8 @@ class OrganizationNotificationTemplatesErrorList(SubListCreateAttachDetachAPIVie serializer_class = NotificationTemplateSerializer parent_model = Organization relationship = 'notification_templates_error' + new_in_300 = True + class OrganizationNotificationTemplatesSuccessList(SubListCreateAttachDetachAPIView): @@ -816,13 +885,16 @@ class OrganizationNotificationTemplatesSuccessList(SubListCreateAttachDetachAPIV serializer_class = NotificationTemplateSerializer parent_model = Organization relationship = 'notification_templates_success' + new_in_300 = True + class OrganizationAccessList(ResourceAccessList): model = User # needs to be User for AccessLists's - resource_model = Organization + parent_model = Organization new_in_300 = True + class OrganizationObjectRolesList(SubListAPIView): model = Role @@ -835,6 +907,7 @@ class OrganizationObjectRolesList(SubListAPIView): content_type = ContentType.objects.get_for_model(self.parent_model) return Role.objects.filter(content_type=content_type, object_id=po.pk) + class TeamList(ListCreateAPIView): model = Team @@ -845,11 +918,13 @@ class TeamList(ListCreateAPIView): qs = qs.select_related('admin_role', 'read_role', 'member_role', 'organization') return qs + class TeamDetail(RetrieveUpdateDestroyAPIView): model = Team serializer_class = TeamSerializer + class TeamUsersList(BaseUsersList): model = User @@ -861,10 +936,11 @@ class TeamUsersList(BaseUsersList): class TeamRolesList(SubListCreateAttachDetachAPIView): model = Role - serializer_class = RoleSerializer + serializer_class = RoleSerializerWithParentAccess metadata_class = RoleMetadata parent_model = Team relationship='member_role.children' + new_in_300 = True def get_queryset(self): team = get_object_or_404(Team, pk=self.kwargs['pk']) @@ -876,24 +952,29 @@ class TeamRolesList(SubListCreateAttachDetachAPIView): # Forbid implicit role creation here sub_id = request.data.get('id', None) if not sub_id: - data = dict(msg="Role 'id' field is missing.") + data = dict(msg=_("Role 'id' field is missing.")) return Response(data, status=status.HTTP_400_BAD_REQUEST) role = get_object_or_400(Role, pk=sub_id) org_content_type = ContentType.objects.get_for_model(Organization) if role.content_type == org_content_type: - data = dict(msg="You cannot assign an Organization role as a child role for a Team.") + data = dict(msg=_("You cannot assign an Organization role as a child role for a Team.")) + return Response(data, status=status.HTTP_400_BAD_REQUEST) + + if role.is_singleton(): + data = dict(msg=_("You cannot grant system-level permissions to a team.")) return Response(data, status=status.HTTP_400_BAD_REQUEST) team = get_object_or_404(Team, pk=self.kwargs['pk']) credential_content_type = ContentType.objects.get_for_model(Credential) if role.content_type == credential_content_type: if not role.content_object.organization or role.content_object.organization.id != team.organization.id: - data = dict(msg="You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization") + data = dict(msg=_("You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization")) return Response(data, status=status.HTTP_400_BAD_REQUEST) return super(TeamRolesList, self).post(request, *args, **kwargs) + class TeamObjectRolesList(SubListAPIView): model = Role @@ -906,6 +987,7 @@ class TeamObjectRolesList(SubListAPIView): content_type = ContentType.objects.get_for_model(self.parent_model) return Role.objects.filter(content_type=content_type, object_id=po.pk) + class TeamProjectsList(SubListAPIView): model = Project @@ -923,7 +1005,8 @@ class TeamProjectsList(SubListAPIView): ) return self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=[t.content_object.pk for t in proj_roles]) -class TeamActivityStreamList(SubListAPIView): + +class TeamActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView): model = ActivityStream serializer_class = ActivityStreamSerializer @@ -931,16 +1014,6 @@ class TeamActivityStreamList(SubListAPIView): relationship = 'activitystream_set' new_in_145 = True - def get(self, request, *args, **kwargs): - # Sanity check: Does this license allow activity streams? - # If not, forbid this request. - if not feature_enabled('activity_streams'): - raise LicenseForbids('Your license does not allow use of ' - 'the activity stream.') - - # Okay, let it through. - return super(TeamActivityStreamList, self).get(request, *args, **kwargs) - def get_queryset(self): parent = self.get_parent_object() self.check_parent_access(parent) @@ -950,16 +1023,19 @@ class TeamActivityStreamList(SubListAPIView): Q(project__in=Project.accessible_objects(parent, 'read_role')) | Q(credential__in=Credential.accessible_objects(parent, 'read_role'))) + class TeamAccessList(ResourceAccessList): model = User # needs to be User for AccessLists's - resource_model = Team + parent_model = Team new_in_300 = True + class ProjectList(ListCreateAPIView): model = Project serializer_class = ProjectSerializer + capabilities_prefetch = ['admin', 'update'] def get_queryset(self): projects_qs = Project.accessible_objects(self.request.user, 'read_role') @@ -970,16 +1046,9 @@ class ProjectList(ListCreateAPIView): 'update_role', 'read_role', ) + projects_qs = projects_qs.prefetch_related('last_job', 'created_by') return projects_qs - def get(self, request, *args, **kwargs): - # Not optimal, but make sure the project status and last_updated fields - # are up to date here... - projects_qs = Project.objects - projects_qs = projects_qs.select_related('current_job', 'last_job') - for project in projects_qs: - project._set_status_and_last_job_run() - return super(ProjectList, self).get(request, *args, **kwargs) class ProjectDetail(RetrieveUpdateDestroyAPIView): @@ -990,16 +1059,18 @@ class ProjectDetail(RetrieveUpdateDestroyAPIView): obj = self.get_object() can_delete = request.user.can_access(Project, 'delete', obj) if not can_delete: - raise PermissionDenied("Cannot delete project.") + raise PermissionDenied(_("Cannot delete project.")) for pu in obj.project_updates.filter(status__in=['new', 'pending', 'waiting', 'running']): pu.cancel() return super(ProjectDetail, self).destroy(request, *args, **kwargs) + class ProjectPlaybooks(RetrieveAPIView): model = Project serializer_class = ProjectPlaybooksSerializer + class ProjectTeamsList(ListAPIView): model = Team @@ -1014,9 +1085,10 @@ class ProjectTeamsList(ListAPIView): all_roles = Role.objects.filter(Q(descendents__content_type=project_ct) & Q(descendents__object_id=p.pk), content_type=team_ct) return self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=[t.content_object.pk for t in all_roles]) -class ProjectSchedulesList(SubListCreateAttachDetachAPIView): - view_name = "Project Schedules" +class ProjectSchedulesList(SubListCreateAPIView): + + view_name = _("Project Schedules") model = Schedule serializer_class = ScheduleSerializer @@ -1025,7 +1097,8 @@ class ProjectSchedulesList(SubListCreateAttachDetachAPIView): parent_key = 'unified_job_template' new_in_148 = True -class ProjectActivityStreamList(SubListAPIView): + +class ProjectActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView): model = ActivityStream serializer_class = ActivityStreamSerializer @@ -1033,16 +1106,6 @@ class ProjectActivityStreamList(SubListAPIView): relationship = 'activitystream_set' new_in_145 = True - def get(self, request, *args, **kwargs): - # Sanity check: Does this license allow activity streams? - # If not, forbid this request. - if not feature_enabled('activity_streams'): - raise LicenseForbids('Your license does not allow use of ' - 'the activity stream.') - - # Okay, let it through. - return super(ProjectActivityStreamList, self).get(request, *args, **kwargs) - def get_queryset(self): parent = self.get_parent_object() self.check_parent_access(parent) @@ -1053,12 +1116,15 @@ class ProjectActivityStreamList(SubListAPIView): return qs.filter(project=parent) return qs.filter(Q(project=parent) | Q(credential=parent.credential)) + class ProjectNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView): model = NotificationTemplate serializer_class = NotificationTemplateSerializer parent_model = Project relationship = 'notification_templates_any' + new_in_300 = True + class ProjectNotificationTemplatesErrorList(SubListCreateAttachDetachAPIView): @@ -1066,6 +1132,8 @@ class ProjectNotificationTemplatesErrorList(SubListCreateAttachDetachAPIView): serializer_class = NotificationTemplateSerializer parent_model = Project relationship = 'notification_templates_error' + new_in_300 = True + class ProjectNotificationTemplatesSuccessList(SubListCreateAttachDetachAPIView): @@ -1073,6 +1141,8 @@ class ProjectNotificationTemplatesSuccessList(SubListCreateAttachDetachAPIView): serializer_class = NotificationTemplateSerializer parent_model = Project relationship = 'notification_templates_success' + new_in_300 = True + class ProjectUpdatesList(SubListAPIView): @@ -1082,6 +1152,7 @@ class ProjectUpdatesList(SubListAPIView): relationship = 'project_updates' new_in_13 = True + class ProjectUpdateView(RetrieveAPIView): model = Project @@ -1103,12 +1174,30 @@ class ProjectUpdateView(RetrieveAPIView): else: return self.http_method_not_allowed(request, *args, **kwargs) + +class ProjectUpdateList(ListAPIView): + + model = ProjectUpdate + serializer_class = ProjectUpdateListSerializer + new_in_13 = True + + class ProjectUpdateDetail(RetrieveDestroyAPIView): model = ProjectUpdate serializer_class = ProjectUpdateSerializer new_in_13 = True + def destroy(self, request, *args, **kwargs): + obj = self.get_object() + try: + if obj.unified_job_node.workflow_job.status in ACTIVE_STATES: + raise PermissionDenied(detail=_('Cannot delete job resource when associated workflow job is running.')) + except ProjectUpdate.unified_job_node.RelatedObjectDoesNotExist: + pass + return super(ProjectUpdateDetail, self).destroy(request, *args, **kwargs) + + class ProjectUpdateCancel(RetrieveAPIView): model = ProjectUpdate @@ -1124,19 +1213,23 @@ class ProjectUpdateCancel(RetrieveAPIView): else: return self.http_method_not_allowed(request, *args, **kwargs) + class ProjectUpdateNotificationsList(SubListAPIView): model = Notification serializer_class = NotificationSerializer parent_model = ProjectUpdate relationship = 'notifications' + new_in_300 = True + class ProjectAccessList(ResourceAccessList): model = User # needs to be User for AccessLists's - resource_model = Project + parent_model = Project new_in_300 = True + class ProjectObjectRolesList(SubListAPIView): model = Role @@ -1149,6 +1242,7 @@ class ProjectObjectRolesList(SubListAPIView): content_type = ContentType.objects.get_for_model(self.parent_model) return Role.objects.filter(content_type=content_type, object_id=po.pk) + class UserList(ListCreateAPIView): model = User @@ -1170,15 +1264,17 @@ class UserList(ListCreateAPIView): pass return ret + class UserMeList(ListAPIView): model = User serializer_class = UserSerializer - view_name = 'Me' + view_name = _('Me') def get_queryset(self): return self.model.objects.filter(pk=self.request.user.pk) + class UserTeamsList(ListAPIView): model = User @@ -1188,16 +1284,19 @@ class UserTeamsList(ListAPIView): u = get_object_or_404(User, pk=self.kwargs['pk']) if not self.request.user.can_access(User, 'read', u): raise PermissionDenied() - return Team.accessible_objects(self.request.user, 'read_role').filter(member_role__members=u) + return Team.accessible_objects(self.request.user, 'read_role').filter( + Q(member_role__members=u) | Q(admin_role__members=u)).distinct() + class UserRolesList(SubListCreateAttachDetachAPIView): model = Role - serializer_class = RoleSerializer + serializer_class = RoleSerializerWithParentAccess metadata_class = RoleMetadata parent_model = User relationship='roles' permission_classes = (IsAuthenticated,) + new_in_300 = True def get_queryset(self): u = get_object_or_404(User, pk=self.kwargs['pk']) @@ -1212,26 +1311,26 @@ class UserRolesList(SubListCreateAttachDetachAPIView): # Forbid implicit role creation here sub_id = request.data.get('id', None) if not sub_id: - data = dict(msg="Role 'id' field is missing.") + data = dict(msg=_("Role 'id' field is missing.")) return Response(data, status=status.HTTP_400_BAD_REQUEST) if sub_id == self.request.user.admin_role.pk: - raise PermissionDenied('You may not perform any action with your own admin_role.') + raise PermissionDenied(_('You may not perform any action with your own admin_role.')) user = get_object_or_400(User, pk=self.kwargs['pk']) role = get_object_or_400(Role, pk=sub_id) user_content_type = ContentType.objects.get_for_model(User) if role.content_type == user_content_type: - raise PermissionDenied('You may not change the membership of a users admin_role') + raise PermissionDenied(_('You may not change the membership of a users admin_role')) credential_content_type = ContentType.objects.get_for_model(Credential) if role.content_type == credential_content_type: if role.content_object.organization and user not in role.content_object.organization.member_role: - data = dict(msg="You cannot grant credential access to a user not in the credentials' organization") + data = dict(msg=_("You cannot grant credential access to a user not in the credentials' organization")) return Response(data, status=status.HTTP_400_BAD_REQUEST) if not role.content_object.organization and not request.user.is_superuser: - data = dict(msg="You cannot grant private credential access to another user") + data = dict(msg=_("You cannot grant private credential access to another user")) return Response(data, status=status.HTTP_400_BAD_REQUEST) @@ -1241,6 +1340,7 @@ class UserRolesList(SubListCreateAttachDetachAPIView): # We hide roles that shouldn't be seen in our queryset return True + class UserProjectsList(SubListAPIView): model = Project @@ -1254,6 +1354,7 @@ class UserProjectsList(SubListAPIView): user_qs = Project.accessible_objects(parent, 'read_role') return my_qs & user_qs + class UserOrganizationsList(OrganizationCountsMixin, SubListAPIView): model = Organization @@ -1268,6 +1369,7 @@ class UserOrganizationsList(OrganizationCountsMixin, SubListAPIView): user_qs = Organization.objects.filter(member_role__members=parent) return my_qs & user_qs + class UserAdminOfOrganizationsList(OrganizationCountsMixin, SubListAPIView): model = Organization @@ -1282,7 +1384,8 @@ class UserAdminOfOrganizationsList(OrganizationCountsMixin, SubListAPIView): user_qs = Organization.objects.filter(admin_role__members=parent) return my_qs & user_qs -class UserActivityStreamList(SubListAPIView): + +class UserActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView): model = ActivityStream serializer_class = ActivityStreamSerializer @@ -1290,16 +1393,6 @@ class UserActivityStreamList(SubListAPIView): relationship = 'activitystream_set' new_in_145 = True - def get(self, request, *args, **kwargs): - # Sanity check: Does this license allow activity streams? - # If not, forbid this request. - if not feature_enabled('activity_streams'): - raise LicenseForbids('Your license does not allow use of ' - 'the activity stream.') - - # Okay, let it through. - return super(UserActivityStreamList, self).get(request, *args, **kwargs) - def get_queryset(self): parent = self.get_parent_object() self.check_parent_access(parent) @@ -1335,19 +1428,20 @@ class UserDetail(RetrieveUpdateDestroyAPIView): if left is not None and right is not None and left != right: bad_changes[field] = (left, right) if bad_changes: - raise PermissionDenied('Cannot change %s.' % ', '.join(bad_changes.keys())) + raise PermissionDenied(_('Cannot change %s.') % ', '.join(bad_changes.keys())) def destroy(self, request, *args, **kwargs): obj = self.get_object() can_delete = request.user.can_access(User, 'delete', obj) if not can_delete: - raise PermissionDenied('Cannot delete user.') + raise PermissionDenied(_('Cannot delete user.')) return super(UserDetail, self).destroy(request, *args, **kwargs) + class UserAccessList(ResourceAccessList): model = User # needs to be User for AccessLists's - resource_model = User + parent_model = User new_in_300 = True @@ -1355,6 +1449,7 @@ class CredentialList(ListCreateAPIView): model = Credential serializer_class = CredentialSerializerCreate + capabilities_prefetch = ['admin', 'use'] class CredentialOwnerUsersList(SubListAPIView): @@ -1441,7 +1536,8 @@ class CredentialDetail(RetrieveUpdateDestroyAPIView): model = Credential serializer_class = CredentialSerializer -class CredentialActivityStreamList(SubListAPIView): + +class CredentialActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView): model = ActivityStream serializer_class = ActivityStreamSerializer @@ -1449,22 +1545,14 @@ class CredentialActivityStreamList(SubListAPIView): relationship = 'activitystream_set' new_in_145 = True - def get(self, request, *args, **kwargs): - # Sanity check: Does this license allow activity streams? - # If not, forbid this request. - if not feature_enabled('activity_streams'): - raise LicenseForbids('Your license does not allow use of ' - 'the activity stream.') - - # Okay, let it through. - return super(CredentialActivityStreamList, self).get(request, *args, **kwargs) class CredentialAccessList(ResourceAccessList): model = User # needs to be User for AccessLists's - resource_model = Credential + parent_model = Credential new_in_300 = True + class CredentialObjectRolesList(SubListAPIView): model = Role @@ -1477,26 +1565,31 @@ class CredentialObjectRolesList(SubListAPIView): content_type = ContentType.objects.get_for_model(self.parent_model) return Role.objects.filter(content_type=content_type, object_id=po.pk) + class InventoryScriptList(ListCreateAPIView): model = CustomInventoryScript serializer_class = CustomInventoryScriptSerializer + new_in_210 = True + class InventoryScriptDetail(RetrieveUpdateDestroyAPIView): model = CustomInventoryScript serializer_class = CustomInventoryScriptSerializer + new_in_210 = True def destroy(self, request, *args, **kwargs): instance = self.get_object() can_delete = request.user.can_access(self.model, 'delete', instance) if not can_delete: - raise PermissionDenied("Cannot delete inventory script.") + raise PermissionDenied(_("Cannot delete inventory script.")) for inv_src in InventorySource.objects.filter(source_script=instance): inv_src.source_script = None inv_src.save() return super(InventoryScriptDetail, self).destroy(request, *args, **kwargs) + class InventoryScriptObjectRolesList(SubListAPIView): model = Role @@ -1509,16 +1602,20 @@ class InventoryScriptObjectRolesList(SubListAPIView): content_type = ContentType.objects.get_for_model(self.parent_model) return Role.objects.filter(content_type=content_type, object_id=po.pk) + class InventoryList(ListCreateAPIView): model = Inventory serializer_class = InventorySerializer + capabilities_prefetch = ['admin', 'adhoc'] def get_queryset(self): qs = Inventory.accessible_objects(self.request.user, 'read_role') qs = qs.select_related('admin_role', 'read_role', 'update_role', 'use_role', 'adhoc_role') + qs = qs.prefetch_related('created_by', 'modified_by', 'organization') return qs + class InventoryDetail(RetrieveUpdateDestroyAPIView): model = Inventory @@ -1529,7 +1626,8 @@ class InventoryDetail(RetrieveUpdateDestroyAPIView): with ignore_inventory_group_removal(): return super(InventoryDetail, self).destroy(request, *args, **kwargs) -class InventoryActivityStreamList(SubListAPIView): + +class InventoryActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView): model = ActivityStream serializer_class = ActivityStreamSerializer @@ -1537,28 +1635,20 @@ class InventoryActivityStreamList(SubListAPIView): relationship = 'activitystream_set' new_in_145 = True - def get(self, request, *args, **kwargs): - # Sanity check: Does this license allow activity streams? - # If not, forbid this request. - if not feature_enabled('activity_streams'): - raise LicenseForbids('Your license does not allow use of ' - 'the activity stream.') - - # Okay, let it through. - return super(InventoryActivityStreamList, self).get(request, *args, **kwargs) - def get_queryset(self): parent = self.get_parent_object() self.check_parent_access(parent) qs = self.request.user.get_queryset(self.model) return qs.filter(Q(inventory=parent) | Q(host__in=parent.hosts.all()) | Q(group__in=parent.groups.all())) + class InventoryAccessList(ResourceAccessList): model = User # needs to be User for AccessLists's - resource_model = Inventory + parent_model = Inventory new_in_300 = True + class InventoryObjectRolesList(SubListAPIView): model = Role @@ -1571,6 +1661,7 @@ class InventoryObjectRolesList(SubListAPIView): content_type = ContentType.objects.get_for_model(self.parent_model) return Role.objects.filter(content_type=content_type, object_id=po.pk) + class InventoryJobTemplateList(SubListAPIView): model = JobTemplate @@ -1585,6 +1676,7 @@ class InventoryJobTemplateList(SubListAPIView): qs = self.request.user.get_queryset(self.model) return qs.filter(inventory=parent) + class InventoryScanJobTemplateList(SubListAPIView): model = JobTemplate @@ -1599,17 +1691,22 @@ class InventoryScanJobTemplateList(SubListAPIView): qs = self.request.user.get_queryset(self.model) return qs.filter(job_type=PERM_INVENTORY_SCAN, inventory=parent) + class HostList(ListCreateAPIView): always_allow_superuser = False model = Host serializer_class = HostSerializer + capabilities_prefetch = ['inventory.admin'] + class HostDetail(RetrieveUpdateDestroyAPIView): + always_allow_superuser = False model = Host serializer_class = HostSerializer + class InventoryHostsList(SubListCreateAttachDetachAPIView): model = Host @@ -1618,6 +1715,7 @@ class InventoryHostsList(SubListCreateAttachDetachAPIView): relationship = 'hosts' parent_key = 'inventory' + class HostGroupsList(SubListCreateAttachDetachAPIView): ''' the list of groups a host is directly a member of ''' @@ -1639,6 +1737,7 @@ class HostGroupsList(SubListCreateAttachDetachAPIView): data['inventory'] = self.get_parent_object().inventory_id return super(HostGroupsList, self).create(request, *args, **kwargs) + class HostAllGroupsList(SubListAPIView): ''' the list of all groups of which the host is directly or indirectly a member ''' @@ -1654,6 +1753,7 @@ class HostAllGroupsList(SubListAPIView): sublist_qs = parent.all_groups.distinct() return qs & sublist_qs + class HostInventorySourcesList(SubListAPIView): model = InventorySource @@ -1662,7 +1762,8 @@ class HostInventorySourcesList(SubListAPIView): relationship = 'inventory_sources' new_in_148 = True -class HostActivityStreamList(SubListAPIView): + +class HostActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView): model = ActivityStream serializer_class = ActivityStreamSerializer @@ -1670,33 +1771,14 @@ class HostActivityStreamList(SubListAPIView): relationship = 'activitystream_set' new_in_145 = True - def get(self, request, *args, **kwargs): - # Sanity check: Does this license allow activity streams? - # If not, forbid this request. - if not feature_enabled('activity_streams'): - raise LicenseForbids('Your license does not allow use of ' - 'the activity stream.') - - # Okay, let it through. - return super(HostActivityStreamList, self).get(request, *args, **kwargs) - def get_queryset(self): parent = self.get_parent_object() self.check_parent_access(parent) qs = self.request.user.get_queryset(self.model) return qs.filter(Q(host=parent) | Q(inventory=parent.inventory)) -class SystemTrackingEnforcementMixin(APIView): - ''' - Use check_permissions instead of initial() because it's in the OPTION's path as well - ''' - def check_permissions(self, request): - if not feature_enabled("system_tracking"): - raise LicenseForbids("Your license does not permit use " - "of system tracking.") - return super(SystemTrackingEnforcementMixin, self).check_permissions(request) -class HostFactVersionsList(ListAPIView, ParentMixin, SystemTrackingEnforcementMixin): +class HostFactVersionsList(SystemTrackingEnforcementMixin, ParentMixin, ListAPIView): model = Fact serializer_class = FactVersionSerializer @@ -1721,7 +1803,8 @@ class HostFactVersionsList(ListAPIView, ParentMixin, SystemTrackingEnforcementMi queryset = self.get_queryset() or [] return Response(dict(results=self.serializer_class(queryset, many=True).data)) -class HostFactCompareView(SubDetailAPIView, SystemTrackingEnforcementMixin): + +class HostFactCompareView(SystemTrackingEnforcementMixin, SubDetailAPIView): model = Fact new_in_220 = True @@ -1737,24 +1820,35 @@ class HostFactCompareView(SubDetailAPIView, SystemTrackingEnforcementMixin): fact_entry = Fact.get_host_fact(host_obj.id, module_spec, datetime_actual) if not fact_entry: - return Response({'detail': 'Fact not found.'}, status=status.HTTP_404_NOT_FOUND) + return Response({'detail': _('Fact not found.')}, status=status.HTTP_404_NOT_FOUND) return Response(self.serializer_class(instance=fact_entry).data) + class GroupList(ListCreateAPIView): model = Group serializer_class = GroupSerializer + capabilities_prefetch = ['inventory.admin', 'inventory.adhoc', 'inventory.update'] -class GroupChildrenList(SubListCreateAttachDetachAPIView): - model = Group - serializer_class = GroupSerializer - parent_model = Group - relationship = 'children' +class EnforceParentRelationshipMixin(object): + ''' + Useful when you have a self-refering ManyToManyRelationship. + * Tower uses a shallow (2-deep only) url pattern. For example: + + When an object hangs off of a parent object you would have the url of the + form /api/v1/parent_model/34/child_model. If you then wanted a child of the + child model you would NOT do /api/v1/parent_model/34/child_model/87/child_child_model + Instead, you would access the child_child_model via /api/v1/child_child_model/87/ + and you would create child_child_model's off of /api/v1/child_model/87/child_child_model_set + Now, when creating child_child_model related to child_model you still want to + link child_child_model to parent_model. That's what this class is for + ''' + enforce_parent_relationship = '' def update_raw_data(self, data): - data.pop('inventory', None) - return super(GroupChildrenList, self).update_raw_data(data) + data.pop(self.enforce_parent_relationship, None) + return super(EnforceParentRelationshipMixin, self).update_raw_data(data) def create(self, request, *args, **kwargs): # Inject parent group inventory ID into new group data. @@ -1762,17 +1856,29 @@ class GroupChildrenList(SubListCreateAttachDetachAPIView): # HACK: Make request data mutable. if getattr(data, '_mutable', None) is False: data._mutable = True - data['inventory'] = self.get_parent_object().inventory_id - return super(GroupChildrenList, self).create(request, *args, **kwargs) + data[self.enforce_parent_relationship] = getattr(self.get_parent_object(), '%s_id' % self.enforce_parent_relationship) + return super(EnforceParentRelationshipMixin, self).create(request, *args, **kwargs) + + +class GroupChildrenList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView): + + model = Group + serializer_class = GroupSerializer + parent_model = Group + relationship = 'children' + enforce_parent_relationship = 'inventory' def unattach(self, request, *args, **kwargs): sub_id = request.data.get('id', None) if sub_id is not None: return super(GroupChildrenList, self).unattach(request, *args, **kwargs) parent = self.get_parent_object() + if not request.user.can_access(self.model, 'delete', parent): + raise PermissionDenied() parent.delete() return Response(status=status.HTTP_204_NO_CONTENT) + class GroupPotentialChildrenList(SubListAPIView): model = Group @@ -1790,6 +1896,7 @@ class GroupPotentialChildrenList(SubListAPIView): except_pks.update(parent.all_children.values_list('pk', flat=True)) return qs.exclude(pk__in=except_pks) + class GroupHostsList(SubListCreateAttachDetachAPIView): ''' the list of hosts directly below a group ''' @@ -1815,6 +1922,7 @@ class GroupHostsList(SubListCreateAttachDetachAPIView): return self.attach(request, *args, **kwargs) return super(GroupHostsList, self).create(request, *args, **kwargs) + class GroupAllHostsList(SubListAPIView): ''' the list of all hosts below a group, even including subgroups ''' @@ -1830,6 +1938,7 @@ class GroupAllHostsList(SubListAPIView): sublist_qs = parent.all_hosts.distinct() return qs & sublist_qs + class GroupInventorySourcesList(SubListAPIView): model = InventorySource @@ -1838,7 +1947,8 @@ class GroupInventorySourcesList(SubListAPIView): relationship = 'inventory_sources' new_in_148 = True -class GroupActivityStreamList(SubListAPIView): + +class GroupActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView): model = ActivityStream serializer_class = ActivityStreamSerializer @@ -1846,22 +1956,13 @@ class GroupActivityStreamList(SubListAPIView): relationship = 'activitystream_set' new_in_145 = True - def get(self, request, *args, **kwargs): - # Sanity check: Does this license allow activity streams? - # If not, forbid this request. - if not feature_enabled('activity_streams'): - raise LicenseForbids('Your license does not allow use of ' - 'the activity stream.') - - # Okay, let it through. - return super(GroupActivityStreamList, self).get(request, *args, **kwargs) - def get_queryset(self): parent = self.get_parent_object() self.check_parent_access(parent) qs = self.request.user.get_queryset(self.model) return qs.filter(Q(group=parent) | Q(host__in=parent.hosts.all())) + class GroupDetail(RetrieveUpdateDestroyAPIView): model = Group @@ -1874,6 +1975,7 @@ class GroupDetail(RetrieveUpdateDestroyAPIView): obj.delete_recursive() return Response(status=status.HTTP_204_NO_CONTENT) + class InventoryGroupsList(SubListCreateAttachDetachAPIView): model = Group @@ -1882,6 +1984,7 @@ class InventoryGroupsList(SubListCreateAttachDetachAPIView): relationship = 'groups' parent_key = 'inventory' + class InventoryRootGroupsList(SubListCreateAttachDetachAPIView): model = Group @@ -1896,27 +1999,32 @@ class InventoryRootGroupsList(SubListCreateAttachDetachAPIView): qs = self.request.user.get_queryset(self.model).distinct() # need distinct for '&' operator return qs & parent.root_groups + class BaseVariableData(RetrieveUpdateAPIView): parser_classes = api_settings.DEFAULT_PARSER_CLASSES + [YAMLParser] renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES + [YAMLRenderer] is_variable_data = True # Special flag for permissions check. + class InventoryVariableData(BaseVariableData): model = Inventory serializer_class = InventoryVariableDataSerializer + class HostVariableData(BaseVariableData): model = Host serializer_class = HostVariableDataSerializer + class GroupVariableData(BaseVariableData): model = Group serializer_class = GroupVariableDataSerializer + class InventoryScriptView(RetrieveAPIView): model = Inventory @@ -2002,6 +2110,7 @@ class InventoryScriptView(RetrieveAPIView): return Response(data) + class InventoryTreeView(RetrieveAPIView): model = Inventory @@ -2034,13 +2143,14 @@ class InventoryTreeView(RetrieveAPIView): group_children_map) return Response(tree_data) + class InventoryInventorySourcesList(SubListAPIView): model = InventorySource serializer_class = InventorySourceSerializer parent_model = Inventory relationship = None # Not defined since using get_queryset(). - view_name = 'Inventory Source List' + view_name = _('Inventory Source List') new_in_14 = True def get_queryset(self): @@ -2050,12 +2160,14 @@ class InventoryInventorySourcesList(SubListAPIView): return qs.filter(Q(inventory__pk=parent.pk) | Q(group__inventory__pk=parent.pk)) + class InventorySourceList(ListAPIView): model = InventorySource serializer_class = InventorySourceSerializer new_in_14 = True + class InventorySourceDetail(RetrieveUpdateAPIView): model = InventorySource @@ -2066,14 +2178,15 @@ class InventorySourceDetail(RetrieveUpdateAPIView): obj = self.get_object() can_delete = request.user.can_access(InventorySource, 'delete', obj) if not can_delete: - raise PermissionDenied("Cannot delete inventory source.") + raise PermissionDenied(_("Cannot delete inventory source.")) for pu in obj.inventory_updates.filter(status__in=['new', 'pending', 'waiting', 'running']): pu.cancel() return super(InventorySourceDetail, self).destroy(request, *args, **kwargs) -class InventorySourceSchedulesList(SubListCreateAttachDetachAPIView): - view_name = "Inventory Source Schedules" +class InventorySourceSchedulesList(SubListCreateAPIView): + + view_name = _("Inventory Source Schedules") model = Schedule serializer_class = ScheduleSerializer @@ -2082,7 +2195,8 @@ class InventorySourceSchedulesList(SubListCreateAttachDetachAPIView): parent_key = 'unified_job_template' new_in_148 = True -class InventorySourceActivityStreamList(SubListAPIView): + +class InventorySourceActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView): model = ActivityStream serializer_class = ActivityStreamSerializer @@ -2090,15 +2204,6 @@ class InventorySourceActivityStreamList(SubListAPIView): relationship = 'activitystream_set' new_in_145 = True - def get(self, request, *args, **kwargs): - # Sanity check: Does this license allow activity streams? - # If not, forbid this request. - if not feature_enabled('activity_streams'): - raise LicenseForbids('Your license does not allow use of ' - 'the activity stream.') - - # Okay, let it through. - return super(InventorySourceActivityStreamList, self).get(request, *args, **kwargs) class InventorySourceNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView): @@ -2106,23 +2211,27 @@ class InventorySourceNotificationTemplatesAnyList(SubListCreateAttachDetachAPIVi serializer_class = NotificationTemplateSerializer parent_model = InventorySource relationship = 'notification_templates_any' + new_in_300 = True def post(self, request, *args, **kwargs): parent = self.get_parent_object() if parent.source not in CLOUD_INVENTORY_SOURCES: - return Response(dict(msg="Notification Templates can only be assigned when source is one of {}." + return Response(dict(msg=_("Notification Templates can only be assigned when source is one of {}.") .format(CLOUD_INVENTORY_SOURCES, parent.source)), status=status.HTTP_400_BAD_REQUEST) return super(InventorySourceNotificationTemplatesAnyList, self).post(request, *args, **kwargs) + class InventorySourceNotificationTemplatesErrorList(InventorySourceNotificationTemplatesAnyList): relationship = 'notification_templates_error' + class InventorySourceNotificationTemplatesSuccessList(InventorySourceNotificationTemplatesAnyList): relationship = 'notification_templates_success' + class InventorySourceHostsList(SubListAPIView): model = Host @@ -2131,6 +2240,7 @@ class InventorySourceHostsList(SubListAPIView): relationship = 'hosts' new_in_148 = True + class InventorySourceGroupsList(SubListAPIView): model = Group @@ -2139,6 +2249,7 @@ class InventorySourceGroupsList(SubListAPIView): relationship = 'groups' new_in_148 = True + class InventorySourceUpdatesList(SubListAPIView): model = InventoryUpdate @@ -2147,6 +2258,7 @@ class InventorySourceUpdatesList(SubListAPIView): relationship = 'inventory_updates' new_in_14 = True + class InventorySourceUpdateView(RetrieveAPIView): model = InventorySource @@ -2166,12 +2278,29 @@ class InventorySourceUpdateView(RetrieveAPIView): else: return self.http_method_not_allowed(request, *args, **kwargs) + +class InventoryUpdateList(ListAPIView): + + model = InventoryUpdate + serializer_class = InventoryUpdateListSerializer + + class InventoryUpdateDetail(RetrieveDestroyAPIView): model = InventoryUpdate serializer_class = InventoryUpdateSerializer new_in_14 = True + def destroy(self, request, *args, **kwargs): + obj = self.get_object() + try: + if obj.unified_job_node.workflow_job.status in ACTIVE_STATES: + raise PermissionDenied(detail=_('Cannot delete job resource when associated workflow job is running.')) + except InventoryUpdate.unified_job_node.RelatedObjectDoesNotExist: + pass + return super(InventoryUpdateDetail, self).destroy(request, *args, **kwargs) + + class InventoryUpdateCancel(RetrieveAPIView): model = InventoryUpdate @@ -2187,18 +2316,25 @@ class InventoryUpdateCancel(RetrieveAPIView): else: return self.http_method_not_allowed(request, *args, **kwargs) + class InventoryUpdateNotificationsList(SubListAPIView): model = Notification serializer_class = NotificationSerializer parent_model = InventoryUpdate relationship = 'notifications' + new_in_300 = True + class JobTemplateList(ListCreateAPIView): model = JobTemplate serializer_class = JobTemplateSerializer always_allow_superuser = False + capabilities_prefetch = [ + 'admin', 'execute', + {'copy': ['project.use', 'inventory.use', 'credential.use', 'cloud_credential.use', 'network_credential.use']} + ] def post(self, request, *args, **kwargs): ret = super(JobTemplateList, self).post(request, *args, **kwargs) @@ -2207,12 +2343,14 @@ class JobTemplateList(ListCreateAPIView): job_template.admin_role.members.add(request.user) return ret + class JobTemplateDetail(RetrieveUpdateDestroyAPIView): model = JobTemplate serializer_class = JobTemplateSerializer always_allow_superuser = False + class JobTemplateLaunch(RetrieveAPIView, GenericAPIView): model = JobTemplate @@ -2221,7 +2359,10 @@ class JobTemplateLaunch(RetrieveAPIView, GenericAPIView): always_allow_superuser = False def update_raw_data(self, data): - obj = self.get_object() + try: + obj = self.get_object() + except PermissionDenied: + return data extra_vars = data.pop('extra_vars', None) or {} if obj: for p in obj.passwords_needed_to_start: @@ -2243,8 +2384,6 @@ class JobTemplateLaunch(RetrieveAPIView, GenericAPIView): def post(self, request, *args, **kwargs): obj = self.get_object() - if not request.user.can_access(self.model, 'start', obj): - raise PermissionDenied() if 'credential' not in request.data and 'credential_id' in request.data: request.data['credential'] = request.data['credential_id'] @@ -2268,11 +2407,9 @@ class JobTemplateLaunch(RetrieveAPIView, GenericAPIView): if request.user not in new_inventory.use_role: raise PermissionDenied() - kv = prompted_fields - kv.update(passwords) + new_job = obj.create_unified_job(**prompted_fields) + result = new_job.signal_start(**passwords) - new_job = obj.create_unified_job(**kv) - result = new_job.signal_start(**kv) if not result: data = dict(passwords_needed_to_start=new_job.passwords_needed_to_start) new_job.delete() @@ -2280,13 +2417,14 @@ class JobTemplateLaunch(RetrieveAPIView, GenericAPIView): else: data = OrderedDict() data['ignored_fields'] = ignored_fields - data.update(JobSerializer(new_job).to_representation(new_job)) + data.update(JobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job)) data['job'] = new_job.id return Response(data, status=status.HTTP_201_CREATED) -class JobTemplateSchedulesList(SubListCreateAttachDetachAPIView): - view_name = "Job Template Schedules" +class JobTemplateSchedulesList(SubListCreateAPIView): + + view_name = _("Job Template Schedules") model = Schedule serializer_class = ScheduleSerializer @@ -2295,18 +2433,26 @@ class JobTemplateSchedulesList(SubListCreateAttachDetachAPIView): parent_key = 'unified_job_template' new_in_148 = True + class JobTemplateSurveySpec(GenericAPIView): model = JobTemplate parent_model = JobTemplate serializer_class = EmptySerializer + new_in_210 = True def get(self, request, *args, **kwargs): obj = self.get_object() if not feature_enabled('surveys'): - raise LicenseForbids('Your license does not allow ' - 'adding surveys.') - return Response(obj.survey_spec) + raise LicenseForbids(_('Your license does not allow ' + 'adding surveys.')) + survey_spec = obj.survey_spec + for pos, field in enumerate(survey_spec.get('spec', [])): + if field.get('type') == 'password': + if 'default' in field and field['default']: + field['default'] = '$encrypted$' + + return Response(survey_spec) def post(self, request, *args, **kwargs): obj = self.get_object() @@ -2314,44 +2460,52 @@ class JobTemplateSurveySpec(GenericAPIView): # Sanity check: Are surveys available on this license? # If not, do not allow them to be used. if not feature_enabled('surveys'): - raise LicenseForbids('Your license does not allow ' - 'adding surveys.') + raise LicenseForbids(_('Your license does not allow ' + 'adding surveys.')) if not request.user.can_access(self.model, 'change', obj, None): raise PermissionDenied() - try: - obj.survey_spec = json.dumps(request.data) - except ValueError: - return Response(dict(error="Invalid JSON when parsing survey spec."), status=status.HTTP_400_BAD_REQUEST) - if "name" not in obj.survey_spec: - return Response(dict(error="'name' missing from survey spec."), status=status.HTTP_400_BAD_REQUEST) - if "description" not in obj.survey_spec: - return Response(dict(error="'description' missing from survey spec."), status=status.HTTP_400_BAD_REQUEST) - if "spec" not in obj.survey_spec: - return Response(dict(error="'spec' missing from survey spec."), status=status.HTTP_400_BAD_REQUEST) - if not isinstance(obj.survey_spec["spec"], list): - return Response(dict(error="'spec' must be a list of items."), status=status.HTTP_400_BAD_REQUEST) - if len(obj.survey_spec["spec"]) < 1: - return Response(dict(error="'spec' doesn't contain any items."), status=status.HTTP_400_BAD_REQUEST) + new_spec = request.data + if "name" not in new_spec: + return Response(dict(error=_("'name' missing from survey spec.")), status=status.HTTP_400_BAD_REQUEST) + if "description" not in new_spec: + return Response(dict(error=_("'description' missing from survey spec.")), status=status.HTTP_400_BAD_REQUEST) + if "spec" not in new_spec: + return Response(dict(error=_("'spec' missing from survey spec.")), status=status.HTTP_400_BAD_REQUEST) + if not isinstance(new_spec["spec"], list): + return Response(dict(error=_("'spec' must be a list of items.")), status=status.HTTP_400_BAD_REQUEST) + if len(new_spec["spec"]) < 1: + return Response(dict(error=_("'spec' doesn't contain any items.")), status=status.HTTP_400_BAD_REQUEST) + idx = 0 variable_set = set() - for survey_item in obj.survey_spec["spec"]: + for survey_item in new_spec["spec"]: if not isinstance(survey_item, dict): - return Response(dict(error="Survey question %s is not a json object." % str(idx)), status=status.HTTP_400_BAD_REQUEST) + return Response(dict(error=_("Survey question %s is not a json object.") % str(idx)), status=status.HTTP_400_BAD_REQUEST) if "type" not in survey_item: - return Response(dict(error="'type' missing from survey question %s." % str(idx)), status=status.HTTP_400_BAD_REQUEST) + return Response(dict(error=_("'type' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST) if "question_name" not in survey_item: - return Response(dict(error="'question_name' missing from survey question %s." % str(idx)), status=status.HTTP_400_BAD_REQUEST) + return Response(dict(error=_("'question_name' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST) if "variable" not in survey_item: - return Response(dict(error="'variable' missing from survey question %s." % str(idx)), status=status.HTTP_400_BAD_REQUEST) + return Response(dict(error=_("'variable' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST) if survey_item['variable'] in variable_set: - return Response(dict(error="'variable' '%s' duplicated in survey question %s." % (survey_item['variable'], str(idx))), status=status.HTTP_400_BAD_REQUEST) + return Response(dict(error=_("'variable' '%(item)s' duplicated in survey question %(survey)s.") % { + 'item': survey_item['variable'], 'survey': str(idx)}), status=status.HTTP_400_BAD_REQUEST) else: variable_set.add(survey_item['variable']) if "required" not in survey_item: - return Response(dict(error="'required' missing from survey question %s." % str(idx)), status=status.HTTP_400_BAD_REQUEST) + return Response(dict(error=_("'required' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST) + + if survey_item["type"] == "password": + if survey_item.get("default") and survey_item["default"].startswith('$encrypted$'): + old_spec = obj.survey_spec + for old_item in old_spec['spec']: + if old_item['variable'] == survey_item['variable']: + survey_item['default'] = old_item['default'] idx += 1 - obj.save() + + obj.survey_spec = new_spec + obj.save(update_fields=['survey_spec']) return Response() def delete(self, request, *args, **kwargs): @@ -2362,7 +2516,15 @@ class JobTemplateSurveySpec(GenericAPIView): obj.save() return Response() -class JobTemplateActivityStreamList(SubListAPIView): + +class WorkflowJobTemplateSurveySpec(WorkflowsEnforcementMixin, JobTemplateSurveySpec): + + model = WorkflowJobTemplate + parent_model = WorkflowJobTemplate + new_in_310 = True + + +class JobTemplateActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView): model = ActivityStream serializer_class = ActivityStreamSerializer @@ -2370,15 +2532,6 @@ class JobTemplateActivityStreamList(SubListAPIView): relationship = 'activitystream_set' new_in_145 = True - def get(self, request, *args, **kwargs): - # Sanity check: Does this license allow activity streams? - # If not, forbid this request. - if not feature_enabled('activity_streams'): - raise LicenseForbids('Your license does not allow use of ' - 'the activity stream.') - - # Okay, let it through. - return super(JobTemplateActivityStreamList, self).get(request, *args, **kwargs) class JobTemplateNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView): @@ -2386,6 +2539,8 @@ class JobTemplateNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView): serializer_class = NotificationTemplateSerializer parent_model = JobTemplate relationship = 'notification_templates_any' + new_in_300 = True + class JobTemplateNotificationTemplatesErrorList(SubListCreateAttachDetachAPIView): @@ -2393,6 +2548,8 @@ class JobTemplateNotificationTemplatesErrorList(SubListCreateAttachDetachAPIView serializer_class = NotificationTemplateSerializer parent_model = JobTemplate relationship = 'notification_templates_error' + new_in_300 = True + class JobTemplateNotificationTemplatesSuccessList(SubListCreateAttachDetachAPIView): @@ -2400,6 +2557,8 @@ class JobTemplateNotificationTemplatesSuccessList(SubListCreateAttachDetachAPIVi serializer_class = NotificationTemplateSerializer parent_model = JobTemplate relationship = 'notification_templates_success' + new_in_300 = True + class JobTemplateLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView): @@ -2407,6 +2566,7 @@ class JobTemplateLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDeta serializer_class = LabelSerializer parent_model = JobTemplate relationship = 'labels' + new_in_300 = True def post(self, request, *args, **kwargs): # If a label already exists in the database, attach it instead of erroring out @@ -2418,8 +2578,12 @@ class JobTemplateLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDeta request.data['id'] = existing.id del request.data['name'] del request.data['organization'] + if Label.objects.filter(unifiedjobtemplate_labels=self.kwargs['pk']).count() > 100: + return Response(dict(msg=_('Maximum number of labels for {} reached.'.format( + self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST) return super(JobTemplateLabelList, self).post(request, *args, **kwargs) + class JobTemplateCallback(GenericAPIView): model = JobTemplate @@ -2468,23 +2632,25 @@ class JobTemplateCallback(GenericAPIView): return set([hosts.get(name__in=remote_hosts)]) except (Host.DoesNotExist, Host.MultipleObjectsReturned): pass - # Next, try matching based on name or ansible_ssh_host variable. + # Next, try matching based on name or ansible_host variables. matches = set() for host in hosts: - ansible_ssh_host = host.variables_dict.get('ansible_ssh_host', '') - if ansible_ssh_host in remote_hosts: - matches.add(host) - if host.name != ansible_ssh_host and host.name in remote_hosts: - matches.add(host) + for host_var in ['ansible_ssh_host', 'ansible_host']: + ansible_host = host.variables_dict.get(host_var, '') + if ansible_host in remote_hosts: + matches.add(host) + if host.name != ansible_host and host.name in remote_hosts: + matches.add(host) if len(matches) == 1: return matches # Try to resolve forward addresses for each host to find matches. for host in hosts: hostnames = set([host.name]) - ansible_ssh_host = host.variables_dict.get('ansible_ssh_host', '') - if ansible_ssh_host: - hostnames.add(ansible_ssh_host) + for host_var in ['ansible_ssh_host', 'ansible_host']: + ansible_host = host.variables_dict.get(host_var, '') + if ansible_host: + hostnames.add(ansible_host) for hostname in hostnames: try: result = socket.getaddrinfo(hostname, None) @@ -2542,22 +2708,22 @@ class JobTemplateCallback(GenericAPIView): matching_hosts = self.find_matching_hosts() # Check matching hosts. if not matching_hosts: - data = dict(msg='No matching host could be found!') + data = dict(msg=_('No matching host could be found!')) return Response(data, status=status.HTTP_400_BAD_REQUEST) elif len(matching_hosts) > 1: - data = dict(msg='Multiple hosts matched the request!') + data = dict(msg=_('Multiple hosts matched the request!')) return Response(data, status=status.HTTP_400_BAD_REQUEST) else: host = list(matching_hosts)[0] - if not job_template.can_start_without_user_input(): - data = dict(msg='Cannot start automatically, user input required!') + if not job_template.can_start_without_user_input(callback_extra_vars=extra_vars): + data = dict(msg=_('Cannot start automatically, user input required!')) return Response(data, status=status.HTTP_400_BAD_REQUEST) limit = host.name # NOTE: We limit this to one job waiting per host per callblack to keep them from stacking crazily if Job.objects.filter(status__in=['pending', 'waiting', 'running'], job_template=job_template, limit=limit).count() > 0: - data = dict(msg='Host callback job already pending.') + data = dict(msg=_('Host callback job already pending.')) return Response(data, status=status.HTTP_400_BAD_REQUEST) # Everything is fine; actually create the job. @@ -2567,10 +2733,10 @@ class JobTemplateCallback(GenericAPIView): # Send a signal to celery that the job should be started. kv = {"inventory_sources_already_updated": inventory_sources_already_updated} if extra_vars is not None: - kv['extra_vars'] = extra_vars + kv['extra_vars'] = callback_filter_out_ansible_extra_vars(extra_vars) result = job.signal_start(**kv) if not result: - data = dict(msg='Error starting job!') + data = dict(msg=_('Error starting job!')) return Response(data, status=status.HTTP_400_BAD_REQUEST) # Return the location of the new job. @@ -2586,12 +2752,14 @@ class JobTemplateJobsList(SubListCreateAPIView): relationship = 'jobs' parent_key = 'job_template' + class JobTemplateAccessList(ResourceAccessList): model = User # needs to be User for AccessLists's - resource_model = JobTemplate + parent_model = JobTemplate new_in_300 = True + class JobTemplateObjectRolesList(SubListAPIView): model = Role @@ -2604,48 +2772,488 @@ class JobTemplateObjectRolesList(SubListAPIView): content_type = ContentType.objects.get_for_model(self.parent_model) return Role.objects.filter(content_type=content_type, object_id=po.pk) -class SystemJobTemplateList(ListAPIView): - model = SystemJobTemplate - serializer_class = SystemJobTemplateSerializer +class WorkflowJobNodeList(WorkflowsEnforcementMixin, ListAPIView): - def get(self, request, *args, **kwargs): - if not request.user.is_superuser and not request.user.is_system_auditor: - raise PermissionDenied("Superuser privileges needed.") - return super(SystemJobTemplateList, self).get(request, *args, **kwargs) + model = WorkflowJobNode + serializer_class = WorkflowJobNodeListSerializer + new_in_310 = True -class SystemJobTemplateDetail(RetrieveAPIView): - model = SystemJobTemplate - serializer_class = SystemJobTemplateSerializer +class WorkflowJobNodeDetail(WorkflowsEnforcementMixin, RetrieveAPIView): -class SystemJobTemplateLaunch(GenericAPIView): + model = WorkflowJobNode + serializer_class = WorkflowJobNodeDetailSerializer + new_in_310 = True - model = SystemJobTemplate + +class WorkflowJobTemplateNodeList(WorkflowsEnforcementMixin, ListCreateAPIView): + + model = WorkflowJobTemplateNode + serializer_class = WorkflowJobTemplateNodeListSerializer + new_in_310 = True + + +class WorkflowJobTemplateNodeDetail(WorkflowsEnforcementMixin, RetrieveUpdateDestroyAPIView): + + model = WorkflowJobTemplateNode + serializer_class = WorkflowJobTemplateNodeDetailSerializer + new_in_310 = True + + def update_raw_data(self, data): + for fd in ['job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags']: + data[fd] = None + try: + obj = self.get_object() + data.update(obj.char_prompts) + except: + pass + return super(WorkflowJobTemplateNodeDetail, self).update_raw_data(data) + + +class WorkflowJobTemplateNodeChildrenBaseList(WorkflowsEnforcementMixin, EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView): + + model = WorkflowJobTemplateNode + serializer_class = WorkflowJobTemplateNodeListSerializer + always_allow_superuser = True + parent_model = WorkflowJobTemplateNode + relationship = '' + enforce_parent_relationship = 'workflow_job_template' + new_in_310 = True + + ''' + Limit the set of WorkflowJobTemplateNodes to the related nodes of specified by + 'relationship' + ''' + def get_queryset(self): + parent = self.get_parent_object() + self.check_parent_access(parent) + return getattr(parent, self.relationship).all() + + def is_valid_relation(self, parent, sub, created=False): + mutex_list = ('success_nodes', 'failure_nodes') if self.relationship == 'always_nodes' else ('always_nodes',) + for relation in mutex_list: + if getattr(parent, relation).all().exists(): + return {'Error': 'Cannot associate {0} when {1} have been associated.'.format(self.relationship, relation)} + + if created: + return None + + workflow_nodes = parent.workflow_job_template.workflow_job_template_nodes.all().\ + prefetch_related('success_nodes', 'failure_nodes', 'always_nodes') + graph = {} + for workflow_node in workflow_nodes: + graph[workflow_node.pk] = dict(node_object=workflow_node, metadata={'parent': None, 'traversed': False}) + + find = False + for node_type in ['success_nodes', 'failure_nodes', 'always_nodes']: + for workflow_node in workflow_nodes: + parent_node = graph[workflow_node.pk] + related_nodes = getattr(parent_node['node_object'], node_type).all() + for related_node in related_nodes: + sub_node = graph[related_node.pk] + sub_node['metadata']['parent'] = parent_node + if not find and parent == workflow_node and sub == related_node and self.relationship == node_type: + find = True + if not find: + sub_node = graph[sub.pk] + parent_node = graph[parent.pk] + if sub_node['metadata']['parent'] is not None: + return {"Error": "Multiple parent relationship not allowed."} + sub_node['metadata']['parent'] = parent_node + iter_node = sub_node + while iter_node is not None: + if iter_node['metadata']['traversed']: + return {"Error": "Cycle detected."} + iter_node['metadata']['traversed'] = True + iter_node = iter_node['metadata']['parent'] + + return None + + +class WorkflowJobTemplateNodeSuccessNodesList(WorkflowJobTemplateNodeChildrenBaseList): + relationship = 'success_nodes' + + +class WorkflowJobTemplateNodeFailureNodesList(WorkflowJobTemplateNodeChildrenBaseList): + relationship = 'failure_nodes' + + +class WorkflowJobTemplateNodeAlwaysNodesList(WorkflowJobTemplateNodeChildrenBaseList): + relationship = 'always_nodes' + + +class WorkflowJobNodeChildrenBaseList(WorkflowsEnforcementMixin, SubListAPIView): + + model = WorkflowJobNode + serializer_class = WorkflowJobNodeListSerializer + parent_model = WorkflowJobNode + relationship = '' + new_in_310 = True + + # + #Limit the set of WorkflowJobeNodes to the related nodes of specified by + #'relationship' + # + def get_queryset(self): + parent = self.get_parent_object() + self.check_parent_access(parent) + return getattr(parent, self.relationship).all() + + +class WorkflowJobNodeSuccessNodesList(WorkflowJobNodeChildrenBaseList): + relationship = 'success_nodes' + + +class WorkflowJobNodeFailureNodesList(WorkflowJobNodeChildrenBaseList): + relationship = 'failure_nodes' + + +class WorkflowJobNodeAlwaysNodesList(WorkflowJobNodeChildrenBaseList): + relationship = 'always_nodes' + + +class WorkflowJobTemplateList(WorkflowsEnforcementMixin, ListCreateAPIView): + + model = WorkflowJobTemplate + serializer_class = WorkflowJobTemplateListSerializer + always_allow_superuser = False + new_in_310 = True + + +class WorkflowJobTemplateDetail(WorkflowsEnforcementMixin, RetrieveUpdateDestroyAPIView): + + model = WorkflowJobTemplate + serializer_class = WorkflowJobTemplateSerializer + always_allow_superuser = False + new_in_310 = True + + +class WorkflowJobTemplateCopy(WorkflowsEnforcementMixin, GenericAPIView): + + model = WorkflowJobTemplate + parent_model = WorkflowJobTemplate serializer_class = EmptySerializer + new_in_310 = True def get(self, request, *args, **kwargs): - return Response({}) + obj = self.get_object() + can_copy, messages = request.user.can_access_with_errors(self.model, 'copy', obj) + data = OrderedDict([ + ('can_copy', can_copy), ('can_copy_without_user_input', can_copy), + ('templates_unable_to_copy', [] if can_copy else ['all']), + ('credentials_unable_to_copy', [] if can_copy else ['all']), + ('inventories_unable_to_copy', [] if can_copy else ['all']) + ]) + if messages and can_copy: + data['can_copy_without_user_input'] = False + data.update(messages) + return Response(data) + + def post(self, request, *args, **kwargs): + obj = self.get_object() + if not request.user.can_access(self.model, 'copy', obj): + raise PermissionDenied() + new_obj = obj.user_copy(request.user) + if request.user not in new_obj.admin_role: + new_obj.admin_role.members.add(request.user) + data = OrderedDict() + data.update(WorkflowJobTemplateSerializer( + new_obj, context=self.get_serializer_context()).to_representation(new_obj)) + return Response(data, status=status.HTTP_201_CREATED) + + +class WorkflowJobTemplateLabelList(WorkflowsEnforcementMixin, JobTemplateLabelList): + parent_model = WorkflowJobTemplate + new_in_310 = True + + +class WorkflowJobTemplateLaunch(WorkflowsEnforcementMixin, RetrieveAPIView): + + + model = WorkflowJobTemplate + serializer_class = WorkflowJobLaunchSerializer + new_in_310 = True + is_job_start = True + always_allow_superuser = False + + def update_raw_data(self, data): + try: + obj = self.get_object() + except PermissionDenied: + return data + extra_vars = data.pop('extra_vars', None) or {} + if obj: + for v in obj.variables_needed_to_start: + extra_vars.setdefault(v, u'') + if extra_vars: + data['extra_vars'] = extra_vars + return data def post(self, request, *args, **kwargs): obj = self.get_object() if not request.user.can_access(self.model, 'start', obj): raise PermissionDenied() - new_job = obj.create_unified_job(**request.data) - new_job.signal_start(**request.data) + serializer = self.serializer_class(instance=obj, data=request.data) + if not serializer.is_valid(): + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + prompted_fields, ignored_fields = obj._accept_or_ignore_job_kwargs(**request.data) + + new_job = obj.create_unified_job(**prompted_fields) + new_job.signal_start() + + data = OrderedDict() + data['ignored_fields'] = ignored_fields + data.update(WorkflowJobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job)) + data['workflow_job'] = new_job.id + return Response(data, status=status.HTTP_201_CREATED) + + +class WorkflowJobRelaunch(WorkflowsEnforcementMixin, GenericAPIView): + + model = WorkflowJob + serializer_class = EmptySerializer + is_job_start = True + new_in_310 = True + + def check_object_permissions(self, request, obj): + if request.method == 'POST' and obj: + relaunch_perm, messages = request.user.can_access_with_errors(self.model, 'start', obj) + if not relaunch_perm and 'workflow_job_template' in messages: + self.permission_denied(request, message=messages['workflow_job_template']) + return super(WorkflowJobRelaunch, self).check_object_permissions(request, obj) + + def get(self, request, *args, **kwargs): + return Response({}) + + def post(self, request, *args, **kwargs): + obj = self.get_object() + new_workflow_job = obj.create_relaunch_workflow_job() + new_workflow_job.signal_start() + + data = WorkflowJobSerializer(new_workflow_job, context=self.get_serializer_context()).data + headers = {'Location': new_workflow_job.get_absolute_url()} + return Response(data, status=status.HTTP_201_CREATED, headers=headers) + + +class WorkflowJobTemplateWorkflowNodesList(WorkflowsEnforcementMixin, SubListCreateAPIView): + + model = WorkflowJobTemplateNode + serializer_class = WorkflowJobTemplateNodeListSerializer + parent_model = WorkflowJobTemplate + relationship = 'workflow_job_template_nodes' + parent_key = 'workflow_job_template' + new_in_310 = True + + def update_raw_data(self, data): + for fd in ['job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags']: + data[fd] = None + return super(WorkflowJobTemplateWorkflowNodesList, self).update_raw_data(data) + + def get_queryset(self): + return super(WorkflowJobTemplateWorkflowNodesList, self).get_queryset().order_by('id') + + +class WorkflowJobTemplateJobsList(WorkflowsEnforcementMixin, SubListAPIView): + + model = WorkflowJob + serializer_class = WorkflowJobListSerializer + parent_model = WorkflowJobTemplate + relationship = 'workflow_jobs' + parent_key = 'workflow_job_template' + new_in_310 = True + + +class WorkflowJobTemplateSchedulesList(WorkflowsEnforcementMixin, SubListCreateAPIView): + + view_name = _("Workflow Job Template Schedules") + + model = Schedule + serializer_class = ScheduleSerializer + parent_model = WorkflowJobTemplate + relationship = 'schedules' + parent_key = 'unified_job_template' + new_in_310 = True + + +class WorkflowJobTemplateNotificationTemplatesAnyList(WorkflowsEnforcementMixin, SubListCreateAttachDetachAPIView): + + model = NotificationTemplate + serializer_class = NotificationTemplateSerializer + parent_model = WorkflowJobTemplate + relationship = 'notification_templates_any' + new_in_310 = True + + +class WorkflowJobTemplateNotificationTemplatesErrorList(WorkflowsEnforcementMixin, SubListCreateAttachDetachAPIView): + + model = NotificationTemplate + serializer_class = NotificationTemplateSerializer + parent_model = WorkflowJobTemplate + relationship = 'notification_templates_error' + new_in_310 = True + + +class WorkflowJobTemplateNotificationTemplatesSuccessList(WorkflowsEnforcementMixin, SubListCreateAttachDetachAPIView): + + model = NotificationTemplate + serializer_class = NotificationTemplateSerializer + parent_model = WorkflowJobTemplate + relationship = 'notification_templates_success' + new_in_310 = True + + +class WorkflowJobTemplateAccessList(WorkflowsEnforcementMixin, ResourceAccessList): + + model = User # needs to be User for AccessLists's + parent_model = WorkflowJobTemplate + new_in_310 = True + + +class WorkflowJobTemplateObjectRolesList(WorkflowsEnforcementMixin, SubListAPIView): + + model = Role + serializer_class = RoleSerializer + parent_model = WorkflowJobTemplate + new_in_310 = True + + def get_queryset(self): + po = self.get_parent_object() + content_type = ContentType.objects.get_for_model(self.parent_model) + return Role.objects.filter(content_type=content_type, object_id=po.pk) + + +class WorkflowJobTemplateActivityStreamList(WorkflowsEnforcementMixin, ActivityStreamEnforcementMixin, SubListAPIView): + + model = ActivityStream + serializer_class = ActivityStreamSerializer + parent_model = WorkflowJobTemplate + relationship = 'activitystream_set' + new_in_310 = True + + def get_queryset(self): + parent = self.get_parent_object() + self.check_parent_access(parent) + qs = self.request.user.get_queryset(self.model) + return qs.filter(Q(workflow_job_template=parent) | + Q(workflow_job_template_node__workflow_job_template=parent)).distinct() + + +class WorkflowJobList(WorkflowsEnforcementMixin, ListCreateAPIView): + + model = WorkflowJob + serializer_class = WorkflowJobListSerializer + new_in_310 = True + + +class WorkflowJobDetail(WorkflowsEnforcementMixin, RetrieveDestroyAPIView): + + model = WorkflowJob + serializer_class = WorkflowJobSerializer + new_in_310 = True + + +class WorkflowJobWorkflowNodesList(WorkflowsEnforcementMixin, SubListAPIView): + + model = WorkflowJobNode + serializer_class = WorkflowJobNodeListSerializer + always_allow_superuser = True + parent_model = WorkflowJob + relationship = 'workflow_job_nodes' + parent_key = 'workflow_job' + new_in_310 = True + + def get_queryset(self): + return super(WorkflowJobWorkflowNodesList, self).get_queryset().order_by('id') + + +class WorkflowJobCancel(WorkflowsEnforcementMixin, RetrieveAPIView): + + model = WorkflowJob + serializer_class = WorkflowJobCancelSerializer + is_job_cancel = True + new_in_310 = True + + def post(self, request, *args, **kwargs): + obj = self.get_object() + if obj.can_cancel: + obj.cancel() + #TODO: Figure out whether an immediate schedule is needed. + run_job_complete.delay(obj.id) + return Response(status=status.HTTP_202_ACCEPTED) + else: + return self.http_method_not_allowed(request, *args, **kwargs) + + +class WorkflowJobNotificationsList(WorkflowsEnforcementMixin, SubListAPIView): + + model = Notification + serializer_class = NotificationSerializer + parent_model = WorkflowJob + relationship = 'notifications' + new_in_310 = True + + +class WorkflowJobActivityStreamList(WorkflowsEnforcementMixin, ActivityStreamEnforcementMixin, SubListAPIView): + + model = ActivityStream + serializer_class = ActivityStreamSerializer + parent_model = WorkflowJob + relationship = 'activitystream_set' + new_in_310 = True + + +class SystemJobTemplateList(ListAPIView): + + model = SystemJobTemplate + serializer_class = SystemJobTemplateSerializer + new_in_210 = True + + def get(self, request, *args, **kwargs): + if not request.user.is_superuser and not request.user.is_system_auditor: + raise PermissionDenied(_("Superuser privileges needed.")) + return super(SystemJobTemplateList, self).get(request, *args, **kwargs) + + +class SystemJobTemplateDetail(RetrieveAPIView): + + model = SystemJobTemplate + serializer_class = SystemJobTemplateSerializer + new_in_210 = True + + +class SystemJobTemplateLaunch(GenericAPIView): + + model = SystemJobTemplate + serializer_class = EmptySerializer + is_job_start = True + new_in_210 = True + + def get(self, request, *args, **kwargs): + return Response({}) + + def post(self, request, *args, **kwargs): + obj = self.get_object() + + new_job = obj.create_unified_job(extra_vars=request.data.get('extra_vars', {})) + new_job.signal_start() data = dict(system_job=new_job.id) return Response(data, status=status.HTTP_201_CREATED) -class SystemJobTemplateSchedulesList(SubListCreateAttachDetachAPIView): - view_name = "System Job Template Schedules" +class SystemJobTemplateSchedulesList(SubListCreateAPIView): + + view_name = _("System Job Template Schedules") model = Schedule serializer_class = ScheduleSerializer parent_model = SystemJobTemplate relationship = 'schedules' parent_key = 'unified_job_template' + new_in_210 = True + class SystemJobTemplateJobsList(SubListAPIView): @@ -2654,6 +3262,8 @@ class SystemJobTemplateJobsList(SubListAPIView): parent_model = SystemJobTemplate relationship = 'jobs' parent_key = 'system_job_template' + new_in_210 = True + class SystemJobTemplateNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView): @@ -2661,6 +3271,8 @@ class SystemJobTemplateNotificationTemplatesAnyList(SubListCreateAttachDetachAPI serializer_class = NotificationTemplateSerializer parent_model = SystemJobTemplate relationship = 'notification_templates_any' + new_in_300 = True + class SystemJobTemplateNotificationTemplatesErrorList(SubListCreateAttachDetachAPIView): @@ -2668,6 +3280,8 @@ class SystemJobTemplateNotificationTemplatesErrorList(SubListCreateAttachDetachA serializer_class = NotificationTemplateSerializer parent_model = SystemJobTemplate relationship = 'notification_templates_error' + new_in_300 = True + class SystemJobTemplateNotificationTemplatesSuccessList(SubListCreateAttachDetachAPIView): @@ -2675,12 +3289,15 @@ class SystemJobTemplateNotificationTemplatesSuccessList(SubListCreateAttachDetac serializer_class = NotificationTemplateSerializer parent_model = SystemJobTemplate relationship = 'notification_templates_success' + new_in_300 = True + class JobList(ListCreateAPIView): model = Job serializer_class = JobListSerializer + class JobDetail(RetrieveUpdateDestroyAPIView): model = Job @@ -2693,6 +3310,16 @@ class JobDetail(RetrieveUpdateDestroyAPIView): return self.http_method_not_allowed(request, *args, **kwargs) return super(JobDetail, self).update(request, *args, **kwargs) + def destroy(self, request, *args, **kwargs): + obj = self.get_object() + try: + if obj.unified_job_node.workflow_job.status in ACTIVE_STATES: + raise PermissionDenied(detail=_('Cannot delete job resource when associated workflow job is running.')) + except Job.unified_job_node.RelatedObjectDoesNotExist: + pass + return super(JobDetail, self).destroy(request, *args, **kwargs) + + class JobLabelList(SubListAPIView): model = Label @@ -2700,8 +3327,15 @@ class JobLabelList(SubListAPIView): parent_model = Job relationship = 'labels' parent_key = 'job' + new_in_300 = True -class JobActivityStreamList(SubListAPIView): + +class WorkflowJobLabelList(WorkflowsEnforcementMixin, JobLabelList): + parent_model = WorkflowJob + new_in_310 = True + + +class JobActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView): model = ActivityStream serializer_class = ActivityStreamSerializer @@ -2709,21 +3343,13 @@ class JobActivityStreamList(SubListAPIView): relationship = 'activitystream_set' new_in_145 = True - def get(self, request, *args, **kwargs): - # Sanity check: Does this license allow activity streams? - # If not, forbid this request. - if not feature_enabled('activity_streams'): - raise LicenseForbids('Your license does not allow use of ' - 'the activity stream.') - - # Okay, let it through. - return super(JobActivityStreamList, self).get(request, *args, **kwargs) class JobStart(GenericAPIView): model = Job serializer_class = EmptySerializer is_job_start = True + deprecated = True def get(self, request, *args, **kwargs): obj = self.get_object() @@ -2737,8 +3363,6 @@ class JobStart(GenericAPIView): def post(self, request, *args, **kwargs): obj = self.get_object() - if not request.user.can_access(self.model, 'start', obj): - raise PermissionDenied() if obj.can_start: result = obj.signal_start(**request.data) if not result: @@ -2749,6 +3373,7 @@ class JobStart(GenericAPIView): else: return self.http_method_not_allowed(request, *args, **kwargs) + class JobCancel(RetrieveAPIView): model = Job @@ -2763,6 +3388,7 @@ class JobCancel(RetrieveAPIView): else: return self.http_method_not_allowed(request, *args, **kwargs) + class JobRelaunch(RetrieveAPIView, GenericAPIView): model = Job @@ -2776,8 +3402,6 @@ class JobRelaunch(RetrieveAPIView, GenericAPIView): def post(self, request, *args, **kwargs): obj = self.get_object() - if not request.user.can_access(self.model, 'start', obj): - raise PermissionDenied() # Note: is_valid() may modify request.data # It will remove any key/value pair who's key is not in the 'passwords_needed_to_start' list @@ -2785,25 +3409,27 @@ class JobRelaunch(RetrieveAPIView, GenericAPIView): if not serializer.is_valid(): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) - obj.launch_type = 'relaunch' - new_job = obj.copy() + new_job = obj.copy_unified_job() result = new_job.signal_start(**request.data) if not result: data = dict(passwords_needed_to_start=new_job.passwords_needed_to_start) return Response(data, status=status.HTTP_400_BAD_REQUEST) else: - data = JobSerializer(new_job).data + data = JobSerializer(new_job, context=self.get_serializer_context()).data # Add job key to match what old relaunch returned. data['job'] = new_job.id headers = {'Location': new_job.get_absolute_url()} return Response(data, status=status.HTTP_201_CREATED, headers=headers) + class JobNotificationsList(SubListAPIView): model = Notification serializer_class = NotificationSerializer parent_model = Job relationship = 'notifications' + new_in_300 = True + class BaseJobHostSummariesList(SubListAPIView): @@ -2811,42 +3437,55 @@ class BaseJobHostSummariesList(SubListAPIView): serializer_class = JobHostSummarySerializer parent_model = None # Subclasses must define this attribute. relationship = 'job_host_summaries' - view_name = 'Job Host Summaries List' + view_name = _('Job Host Summaries List') + + def get_queryset(self): + parent = self.get_parent_object() + self.check_parent_access(parent) + return getattr(parent, self.relationship).select_related('job', 'job__job_template', 'host') + class HostJobHostSummariesList(BaseJobHostSummariesList): parent_model = Host + class GroupJobHostSummariesList(BaseJobHostSummariesList): parent_model = Group + class JobJobHostSummariesList(BaseJobHostSummariesList): parent_model = Job + class JobHostSummaryDetail(RetrieveAPIView): model = JobHostSummary serializer_class = JobHostSummarySerializer + class JobEventList(ListAPIView): model = JobEvent serializer_class = JobEventSerializer + class JobEventDetail(RetrieveAPIView): model = JobEvent serializer_class = JobEventSerializer + class JobEventChildrenList(SubListAPIView): model = JobEvent serializer_class = JobEventSerializer parent_model = JobEvent relationship = 'children' - view_name = 'Job Event Children List' + view_name = _('Job Event Children List') + class JobEventHostsList(SubListAPIView): @@ -2854,7 +3493,8 @@ class JobEventHostsList(SubListAPIView): serializer_class = HostSerializer parent_model = JobEvent relationship = 'hosts' - view_name = 'Job Event Hosts List' + view_name = _('Job Event Hosts List') + class BaseJobEventsList(SubListAPIView): @@ -2862,243 +3502,41 @@ class BaseJobEventsList(SubListAPIView): serializer_class = JobEventSerializer parent_model = None # Subclasses must define this attribute. relationship = 'job_events' - view_name = 'Job Events List' + view_name = _('Job Events List') + + def finalize_response(self, request, response, *args, **kwargs): + response['X-UI-Max-Events'] = settings.RECOMMENDED_MAX_EVENTS_DISPLAY_HEADER + return super(BaseJobEventsList, self).finalize_response(request, response, *args, **kwargs) + class HostJobEventsList(BaseJobEventsList): parent_model = Host + def get_queryset(self): + parent_obj = self.get_parent_object() + self.check_parent_access(parent_obj) + qs = self.request.user.get_queryset(self.model).filter( + Q(host=parent_obj) | Q(hosts=parent_obj)).distinct() + return qs + + class GroupJobEventsList(BaseJobEventsList): parent_model = Group + class JobJobEventsList(BaseJobEventsList): parent_model = Job - authentication_classes = [TaskAuthentication] + api_settings.DEFAULT_AUTHENTICATION_CLASSES - permission_classes = (TaskPermission,) - # Post allowed for job event callback only. - def post(self, request, *args, **kwargs): - parent_obj = get_object_or_404(self.parent_model, pk=self.kwargs['pk']) - data = request.data.copy() - data['job'] = parent_obj.pk - serializer = self.get_serializer(data=data) - if serializer.is_valid(): - self.instance = serializer.save() - headers = {'Location': serializer.data['url']} - return Response(serializer.data, status=status.HTTP_201_CREATED, - headers=headers) - return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) - -class JobJobPlaysList(BaseJobEventsList): - - parent_model = Job - view_name = 'Job Plays List' - new_in_200 = True - - @paginated - def get(self, request, limit, offset, ordering, *args, **kwargs): - all_plays = [] - job = Job.objects.filter(pk=self.kwargs['pk']) - if not job.exists(): - return ({'detail': 'Job not found.'}, -1, status.HTTP_404_NOT_FOUND) - job = job[0] - - # Put together a queryset for relevant job events. - qs = job.job_events.filter(event='playbook_on_play_start') - if ordering is not None: - qs = qs.order_by(ordering) - - # This is a bit of a special case for filtering requested by the UI - # doing this here for the moment until/unless we need to implement more - # complex filtering (since we aren't under a serializer) - - if "id__in" in request.query_params: - qs = qs.filter(id__in=[int(filter_id) for filter_id in request.query_params["id__in"].split(",")]) - elif "id__gt" in request.query_params: - qs = qs.filter(id__gt=request.query_params['id__gt']) - elif "id__lt" in request.query_params: - qs = qs.filter(id__lt=request.query_params['id__lt']) - if "failed" in request.query_params: - qs = qs.filter(failed=(request.query_params['failed'].lower() == 'true')) - if "play__icontains" in request.query_params: - qs = qs.filter(play__icontains=request.query_params['play__icontains']) - - count = qs.count() - - # Iterate over the relevant play events and get the details. - for play_event in qs[offset:offset + limit]: - play_details = dict(id=play_event.id, play=play_event.play, started=play_event.created, failed=play_event.failed, changed=play_event.changed) - event_aggregates = JobEvent.objects.filter(parent__in=play_event.children.all()).values("event").annotate(Count("id")).order_by() - change_aggregates = JobEvent.objects.filter(parent__in=play_event.children.all(), event='runner_on_ok').values("changed").annotate(Count("id")).order_by() - failed_count = 0 - ok_count = 0 - changed_count = 0 - skipped_count = 0 - unreachable_count = 0 - for event_aggregate in event_aggregates: - if event_aggregate['event'] == 'runner_on_failed': - failed_count += event_aggregate['id__count'] - elif event_aggregate['event'] == 'runner_on_error': - failed_count += event_aggregate['id_count'] - elif event_aggregate['event'] == 'runner_on_skipped': - skipped_count = event_aggregate['id__count'] - elif event_aggregate['event'] == 'runner_on_unreachable': - unreachable_count = event_aggregate['id__count'] - for change_aggregate in change_aggregates: - if not change_aggregate['changed']: - ok_count = change_aggregate['id__count'] - else: - changed_count = change_aggregate['id__count'] - play_details['related'] = {'job_event': reverse('api:job_event_detail', args=(play_event.pk,))} - play_details['type'] = 'job_event' - play_details['ok_count'] = ok_count - play_details['failed_count'] = failed_count - play_details['changed_count'] = changed_count - play_details['skipped_count'] = skipped_count - play_details['unreachable_count'] = unreachable_count - all_plays.append(play_details) - - # Done; return the plays and the total count. - return all_plays, count, None - - -class JobJobTasksList(BaseJobEventsList): - """A view for displaying aggregate data about tasks within a job - and their completion status. - """ - parent_model = Job - view_name = 'Job Play Tasks List' - new_in_200 = True - - @paginated - def get(self, request, limit, offset, ordering, *args, **kwargs): - """Return aggregate data about each of the job tasks that is: - - an immediate child of the job event - - corresponding to the spinning up of a new task or playbook - """ - results = [] - - # Get the job and the parent task. - # If there's no event ID specified, this will return a 404. - job = Job.objects.filter(pk=self.kwargs['pk']) - if not job.exists(): - return ({'detail': 'Job not found.'}, -1, status.HTTP_404_NOT_FOUND) - job = job[0] - - if 'event_id' not in request.query_params: - return ({"detail": "'event_id' not provided."}, -1, status.HTTP_400_BAD_REQUEST) - - parent_task = job.job_events.filter(pk=int(request.query_params.get('event_id', -1))) - if not parent_task.exists(): - return ({'detail': 'Parent event not found.'}, -1, status.HTTP_404_NOT_FOUND) - parent_task = parent_task[0] - - # Some events correspond to a playbook or task starting up, - # and these are what we're interested in here. - STARTING_EVENTS = ('playbook_on_task_start', 'playbook_on_setup') - - # We need to pull information about each start event. - # - # This is super tricky, because this table has a one-to-many - # relationship with itself (parent-child), and we're getting - # information for an arbitrary number of children. This means we - # need stats on grandchildren, sorted by child. - queryset = (JobEvent.objects.filter(parent__parent=parent_task, - parent__event__in=STARTING_EVENTS) - .values('parent__id', 'event', 'changed') - .annotate(num=Count('event')) - .order_by('parent__id')) - - # The data above will come back in a list, but we are going to - # want to access it based on the parent id, so map it into a - # dictionary. - data = {} - for line in queryset[offset:offset + limit]: - parent_id = line.pop('parent__id') - data.setdefault(parent_id, []) - data[parent_id].append(line) - - # Iterate over the start events and compile information about each one - # using their children. - qs = parent_task.children.filter(event__in=STARTING_EVENTS, - id__in=data.keys()) - - # This is a bit of a special case for id filtering requested by the UI - # doing this here for the moment until/unless we need to implement more - # complex filtering (since we aren't under a serializer) - - if "id__in" in request.query_params: - qs = qs.filter(id__in=[int(filter_id) for filter_id in request.query_params["id__in"].split(",")]) - elif "id__gt" in request.query_params: - qs = qs.filter(id__gt=request.query_params['id__gt']) - elif "id__lt" in request.query_params: - qs = qs.filter(id__lt=request.query_params['id__lt']) - if "failed" in request.query_params: - qs = qs.filter(failed=(request.query_params['failed'].lower() == 'true')) - if "task__icontains" in request.query_params: - qs = qs.filter(task__icontains=request.query_params['task__icontains']) - - if ordering is not None: - qs = qs.order_by(ordering) - - count = 0 - for task_start_event in qs: - # Create initial task data. - task_data = { - 'related': {'job_event': reverse('api:job_event_detail', args=(task_start_event.pk,))}, - 'type': 'job_event', - 'changed': task_start_event.changed, - 'changed_count': 0, - 'created': task_start_event.created, - 'failed': task_start_event.failed, - 'failed_count': 0, - 'host_count': 0, - 'id': task_start_event.id, - 'modified': task_start_event.modified, - 'name': 'Gathering Facts' if task_start_event.event == 'playbook_on_setup' else task_start_event.task, - 'reported_hosts': 0, - 'skipped_count': 0, - 'unreachable_count': 0, - 'successful_count': 0, - } - - # Iterate over the data compiled for this child event, and - # make appropriate changes to the task data. - for child_data in data.get(task_start_event.id, []): - if child_data['event'] == 'runner_on_failed': - task_data['failed'] = True - task_data['host_count'] += child_data['num'] - task_data['reported_hosts'] += child_data['num'] - task_data['failed_count'] += child_data['num'] - elif child_data['event'] == 'runner_on_ok': - task_data['host_count'] += child_data['num'] - task_data['reported_hosts'] += child_data['num'] - if child_data['changed']: - task_data['changed_count'] += child_data['num'] - task_data['changed'] = True - else: - task_data['successful_count'] += child_data['num'] - elif child_data['event'] == 'runner_on_unreachable': - task_data['host_count'] += child_data['num'] - task_data['unreachable_count'] += child_data['num'] - elif child_data['event'] == 'runner_on_skipped': - task_data['host_count'] += child_data['num'] - task_data['reported_hosts'] += child_data['num'] - task_data['skipped_count'] += child_data['num'] - elif child_data['event'] == 'runner_on_error': - task_data['host_count'] += child_data['num'] - task_data['reported_hosts'] += child_data['num'] - task_data['failed'] = True - task_data['failed_count'] += child_data['num'] - elif child_data['event'] == 'runner_on_no_hosts': - task_data['host_count'] += child_data['num'] - count += 1 - results.append(task_data) - - # Done; return the results and count. - return (results, count, None) + def get_queryset(self): + job = self.get_parent_object() + self.check_parent_access(job) + qs = job.job_events + qs = qs.select_related('host') + qs = qs.prefetch_related('hosts', 'children') + return qs.all() class AdHocCommandList(ListCreateAPIView): @@ -3220,8 +3658,6 @@ class AdHocCommandRelaunch(GenericAPIView): def post(self, request, *args, **kwargs): obj = self.get_object() - if not request.user.can_access(self.model, 'start', obj): - raise PermissionDenied() # Re-validate ad hoc command against serializer to check if module is # still allowed. @@ -3252,7 +3688,7 @@ class AdHocCommandRelaunch(GenericAPIView): data = dict(passwords_needed_to_start=new_ad_hoc_command.passwords_needed_to_start) return Response(data, status=status.HTTP_400_BAD_REQUEST) else: - data = AdHocCommandSerializer(new_ad_hoc_command).data + data = AdHocCommandSerializer(new_ad_hoc_command, context=self.get_serializer_context()).data # Add ad_hoc_command key to match what was previously returned. data['ad_hoc_command'] = new_ad_hoc_command.id headers = {'Location': new_ad_hoc_command.get_absolute_url()} @@ -3279,7 +3715,7 @@ class BaseAdHocCommandEventsList(SubListAPIView): serializer_class = AdHocCommandEventSerializer parent_model = None # Subclasses must define this attribute. relationship = 'ad_hoc_command_events' - view_name = 'Ad Hoc Command Events List' + view_name = _('Ad Hoc Command Events List') new_in_220 = True @@ -3288,6 +3724,7 @@ class HostAdHocCommandEventsList(BaseAdHocCommandEventsList): parent_model = Host new_in_220 = True + #class GroupJobEventsList(BaseJobEventsList): # parent_model = Group @@ -3295,27 +3732,10 @@ class HostAdHocCommandEventsList(BaseAdHocCommandEventsList): class AdHocCommandAdHocCommandEventsList(BaseAdHocCommandEventsList): parent_model = AdHocCommand - authentication_classes = [TaskAuthentication] + api_settings.DEFAULT_AUTHENTICATION_CLASSES - permission_classes = (TaskPermission,) new_in_220 = True - # Post allowed for ad hoc event callback only. - def post(self, request, *args, **kwargs): - if request.user: - raise PermissionDenied() - parent_obj = get_object_or_404(self.parent_model, pk=self.kwargs['pk']) - data = request.data.copy() - data['ad_hoc_command'] = parent_obj - serializer = self.get_serializer(data=data) - if serializer.is_valid(): - self.instance = serializer.save() - headers = {'Location': serializer.data['url']} - return Response(serializer.data, status=status.HTTP_201_CREATED, - headers=headers) - return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) - -class AdHocCommandActivityStreamList(SubListAPIView): +class AdHocCommandActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView): model = ActivityStream serializer_class = ActivityStreamSerializer @@ -3323,25 +3743,25 @@ class AdHocCommandActivityStreamList(SubListAPIView): relationship = 'activitystream_set' new_in_220 = True - def get(self, request, *args, **kwargs): - # Sanity check: Does this license allow activity streams? - # If not, forbid this request. - if not feature_enabled('activity_streams'): - raise LicenseForbids('Your license does not allow use of ' - 'the activity stream.') - # Okay, let it through. - return super(AdHocCommandActivityStreamList, self).get(request, *args, **kwargs) +class AdHocCommandNotificationsList(SubListAPIView): + + model = Notification + serializer_class = NotificationSerializer + parent_model = AdHocCommand + relationship = 'notifications' + new_in_300 = True class SystemJobList(ListCreateAPIView): model = SystemJob serializer_class = SystemJobListSerializer + new_in_210 = True def get(self, request, *args, **kwargs): if not request.user.is_superuser and not request.user.is_system_auditor: - raise PermissionDenied("Superuser privileges needed.") + raise PermissionDenied(_("Superuser privileges needed.")) return super(SystemJobList, self).get(request, *args, **kwargs) @@ -3349,12 +3769,15 @@ class SystemJobDetail(RetrieveDestroyAPIView): model = SystemJob serializer_class = SystemJobSerializer + new_in_210 = True + class SystemJobCancel(RetrieveAPIView): model = SystemJob serializer_class = SystemJobCancelSerializer is_job_cancel = True + new_in_210 = True def post(self, request, *args, **kwargs): obj = self.get_object() @@ -3364,18 +3787,28 @@ class SystemJobCancel(RetrieveAPIView): else: return self.http_method_not_allowed(request, *args, **kwargs) + class SystemJobNotificationsList(SubListAPIView): model = Notification serializer_class = NotificationSerializer parent_model = SystemJob relationship = 'notifications' + new_in_300 = True + class UnifiedJobTemplateList(ListAPIView): model = UnifiedJobTemplate serializer_class = UnifiedJobTemplateSerializer new_in_148 = True + capabilities_prefetch = [ + 'admin', 'execute', + {'copy': ['jobtemplate.project.use', 'jobtemplate.inventory.use', 'jobtemplate.credential.use', + 'jobtemplate.cloud_credential.use', 'jobtemplate.network_credential.use', + 'workflowjobtemplate.organization.admin']} + ] + class UnifiedJobList(ListAPIView): @@ -3383,22 +3816,51 @@ class UnifiedJobList(ListAPIView): serializer_class = UnifiedJobListSerializer new_in_148 = True + +class StdoutANSIFilter(object): + + def __init__(self, fileobj): + self.fileobj = fileobj + self.extra_data = '' + if hasattr(fileobj,'close'): + self.close = fileobj.close + + def read(self, size=-1): + data = self.extra_data + while size > 0 and len(data) < size: + line = self.fileobj.readline(size) + if not line: + break + # Remove ANSI escape sequences used to embed event data. + line = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', line) + # Remove ANSI color escape sequences. + line = re.sub(r'\x1b[^m]*m', '', line) + data += line + if size > 0 and len(data) > size: + self.extra_data = data[size:] + data = data[:size] + else: + self.extra_data = '' + return data + + class UnifiedJobStdout(RetrieveAPIView): authentication_classes = [TokenGetAuthentication] + api_settings.DEFAULT_AUTHENTICATION_CLASSES serializer_class = UnifiedJobStdoutSerializer renderer_classes = [BrowsableAPIRenderer, renderers.StaticHTMLRenderer, PlainTextRenderer, AnsiTextRenderer, - renderers.JSONRenderer, DownloadTextRenderer] + renderers.JSONRenderer, DownloadTextRenderer, AnsiDownloadRenderer] filter_backends = () new_in_148 = True def retrieve(self, request, *args, **kwargs): unified_job = self.get_object() obj_size = unified_job.result_stdout_size - if request.accepted_renderer.format != 'txt_download' and obj_size > tower_settings.STDOUT_MAX_BYTES_DISPLAY: - response_message = "Standard Output too large to display (%d bytes), only download supported for sizes over %d bytes" % (obj_size, - tower_settings.STDOUT_MAX_BYTES_DISPLAY) + if request.accepted_renderer.format not in {'txt_download', 'ansi_download'} and obj_size > settings.STDOUT_MAX_BYTES_DISPLAY: + response_message = _("Standard Output too large to display (%(text_size)d bytes), " + "only download supported for sizes over %(supported_size)d bytes") % { + 'text_size': obj_size, 'supported_size': settings.STDOUT_MAX_BYTES_DISPLAY} if request.accepted_renderer.format == 'json': return Response({'range': {'start': 0, 'end': 1, 'absolute_end': 1}, 'content': response_message}) else: @@ -3415,7 +3877,11 @@ class UnifiedJobStdout(RetrieveAPIView): dark_bg = (content_only and dark) or (not content_only and (dark or not dark_val)) content, start, end, absolute_end = unified_job.result_stdout_raw_limited(start_line, end_line) + # Remove any ANSI escape sequences containing job event data. + content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content) + body = ansiconv.to_html(cgi.escape(content)) + context = { 'title': get_view_name(self.__class__), 'body': mark_safe(body), @@ -3432,44 +3898,68 @@ class UnifiedJobStdout(RetrieveAPIView): elif content_format == 'html': return Response({'range': {'start': start, 'end': end, 'absolute_end': absolute_end}, 'content': body}) return Response(data) - elif request.accepted_renderer.format == 'ansi': - return Response(unified_job.result_stdout_raw) - elif request.accepted_renderer.format == 'txt_download': - try: - content_fd = open(unified_job.result_stdout_file, 'r') - response = HttpResponse(FileWrapper(content_fd), content_type='text/plain') - response["Content-Disposition"] = 'attachment; filename="job_%s.txt"' % str(unified_job.id) - return response - except Exception as e: - return Response({"error": "Error generating stdout download file: %s" % str(e)}, status=status.HTTP_400_BAD_REQUEST) elif request.accepted_renderer.format == 'txt': return Response(unified_job.result_stdout) + elif request.accepted_renderer.format == 'ansi': + return Response(unified_job.result_stdout_raw) + elif request.accepted_renderer.format in {'txt_download', 'ansi_download'}: + if not os.path.exists(unified_job.result_stdout_file): + write_fd = open(unified_job.result_stdout_file, 'w') + with connection.cursor() as cursor: + try: + cursor.copy_expert("copy (select stdout from main_jobevent where job_id={} order by start_line) to stdout".format(unified_job.id), + write_fd) + write_fd.close() + subprocess.Popen("sed -i 's/\\\\r\\\\n/\\n/g' {}".format(unified_job.result_stdout_file), + shell=True).wait() + except Exception as e: + return Response({"error": _("Error generating stdout download file: {}".format(e))}) + try: + content_fd = open(unified_job.result_stdout_file, 'r') + if request.accepted_renderer.format == 'txt_download': + # For txt downloads, filter out ANSI escape sequences. + content_fd = StdoutANSIFilter(content_fd) + suffix = '' + else: + suffix = '_ansi' + response = HttpResponse(FileWrapper(content_fd), content_type='text/plain') + response["Content-Disposition"] = 'attachment; filename="job_%s%s.txt"' % (str(unified_job.id), suffix) + return response + except Exception as e: + return Response({"error": _("Error generating stdout download file: %s") % str(e)}, status=status.HTTP_400_BAD_REQUEST) else: return super(UnifiedJobStdout, self).retrieve(request, *args, **kwargs) + class ProjectUpdateStdout(UnifiedJobStdout): model = ProjectUpdate + new_in_13 = True + class InventoryUpdateStdout(UnifiedJobStdout): model = InventoryUpdate + class JobStdout(UnifiedJobStdout): model = Job + class AdHocCommandStdout(UnifiedJobStdout): model = AdHocCommand new_in_220 = True + class NotificationTemplateList(ListCreateAPIView): model = NotificationTemplate serializer_class = NotificationTemplateSerializer new_in_300 = True + class NotificationTemplateDetail(RetrieveUpdateDestroyAPIView): model = NotificationTemplate @@ -3481,21 +3971,23 @@ class NotificationTemplateDetail(RetrieveUpdateDestroyAPIView): if not request.user.can_access(self.model, 'delete', obj): return Response(status=status.HTTP_404_NOT_FOUND) if obj.notifications.filter(status='pending').exists(): - return Response({"error": "Delete not allowed while there are pending notifications"}, + return Response({"error": _("Delete not allowed while there are pending notifications")}, status=status.HTTP_405_METHOD_NOT_ALLOWED) return super(NotificationTemplateDetail, self).delete(request, *args, **kwargs) + class NotificationTemplateTest(GenericAPIView): - view_name = 'NotificationTemplate Test' + view_name = _('Notification Template Test') model = NotificationTemplate serializer_class = EmptySerializer new_in_300 = True + is_job_start = True def post(self, request, *args, **kwargs): obj = self.get_object() - notification = obj.generate_notification("Tower Notification Test {} {}".format(obj.id, tower_settings.TOWER_URL_BASE), - {"body": "Ansible Tower Test Notification {} {}".format(obj.id, tower_settings.TOWER_URL_BASE)}) + notification = obj.generate_notification("Tower Notification Test {} {}".format(obj.id, settings.TOWER_URL_BASE), + {"body": "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)}) if not notification: return Response({}, status=status.HTTP_400_BAD_REQUEST) else: @@ -3505,6 +3997,7 @@ class NotificationTemplateTest(GenericAPIView): headers=headers, status=status.HTTP_202_ACCEPTED) + class NotificationTemplateNotificationList(SubListAPIView): model = Notification @@ -3512,6 +4005,8 @@ class NotificationTemplateNotificationList(SubListAPIView): parent_model = NotificationTemplate relationship = 'notifications' parent_key = 'notification_template' + new_in_300 = True + class NotificationList(ListAPIView): @@ -3519,122 +4014,41 @@ class NotificationList(ListAPIView): serializer_class = NotificationSerializer new_in_300 = True + class NotificationDetail(RetrieveAPIView): model = Notification serializer_class = NotificationSerializer new_in_300 = True + class LabelList(ListCreateAPIView): model = Label serializer_class = LabelSerializer new_in_300 = True + class LabelDetail(RetrieveUpdateAPIView): model = Label serializer_class = LabelSerializer new_in_300 = True -class ActivityStreamList(SimpleListAPIView): + +class ActivityStreamList(ActivityStreamEnforcementMixin, SimpleListAPIView): model = ActivityStream serializer_class = ActivityStreamSerializer new_in_145 = True - def get(self, request, *args, **kwargs): - # Sanity check: Does this license allow activity streams? - # If not, forbid this request. - if not feature_enabled('activity_streams'): - raise LicenseForbids('Your license does not allow use of ' - 'the activity stream.') - # Okay, let it through. - return super(ActivityStreamList, self).get(request, *args, **kwargs) - - -class ActivityStreamDetail(RetrieveAPIView): +class ActivityStreamDetail(ActivityStreamEnforcementMixin, RetrieveAPIView): model = ActivityStream serializer_class = ActivityStreamSerializer new_in_145 = True - def get(self, request, *args, **kwargs): - # Sanity check: Does this license allow activity streams? - # If not, forbid this request. - if not feature_enabled('activity_streams'): - raise LicenseForbids('Your license does not allow use of ' - 'the activity stream.') - - # Okay, let it through. - return super(ActivityStreamDetail, self).get(request, *args, **kwargs) - -class SettingsList(ListCreateAPIView): - - model = TowerSettings - serializer_class = TowerSettingsSerializer - authentication_classes = [TokenGetAuthentication] + api_settings.DEFAULT_AUTHENTICATION_CLASSES - new_in_300 = True - filter_backends = () - - def get_queryset(self): - class SettingsIntermediary(object): - def __init__(self, key, description, category, value, - value_type, user=None): - self.key = key - self.description = description - self.category = category - self.value = value - self.value_type = value_type - self.user = user - - if not self.request.user.is_superuser: - # NOTE: Shortcutting the rbac class due to the merging of the settings manifest and the database - # we'll need to extend this more in the future when we have user settings - return [] - all_defined_settings = {} - for s in TowerSettings.objects.all(): - all_defined_settings[s.key] = SettingsIntermediary(s.key, - s.description, - s.category, - s.value_converted, - s.value_type, - s.user) - manifest_settings = settings.TOWER_SETTINGS_MANIFEST - settings_actual = [] - for settings_key in manifest_settings: - if settings_key in all_defined_settings: - settings_actual.append(all_defined_settings[settings_key]) - else: - m_entry = manifest_settings[settings_key] - settings_actual.append(SettingsIntermediary(settings_key, - m_entry['description'], - m_entry['category'], - m_entry['default'], - m_entry['type'])) - return settings_actual - - def delete(self, request, *args, **kwargs): - if not request.user.can_access(self.model, 'delete', None): - raise PermissionDenied() - TowerSettings.objects.all().delete() - return Response() - -class SettingsReset(APIView): - - view_name = "Reset a settings value" - new_in_300 = True - - def post(self, request): - # NOTE: Extend more with user settings - if not request.user.can_access(TowerSettings, 'delete', None): - raise PermissionDenied() - settings_key = request.data.get('key', None) - if settings_key is not None: - TowerSettings.objects.filter(key=settings_key).delete() - return Response(status=status.HTTP_204_NO_CONTENT) - class RoleList(ListAPIView): @@ -3644,7 +4058,16 @@ class RoleList(ListAPIView): new_in_300 = True def get_queryset(self): - return Role.visible_roles(self.request.user) + result = Role.visible_roles(self.request.user) + # Sanity check: is the requesting user an orphaned non-admin/auditor? + # if yes, make system admin/auditor mandatorily visible. + if not self.request.user.organizations.exists() and\ + not self.request.user.is_superuser and\ + not self.request.user.is_system_auditor: + mandatories = ('system_administrator', 'system_auditor') + super_qs = Role.objects.filter(singleton_name__in=mandatories) + result = result | super_qs + return result class RoleDetail(RetrieveAPIView): @@ -3671,26 +4094,26 @@ class RoleUsersList(SubListCreateAttachDetachAPIView): # Forbid implicit user creation here sub_id = request.data.get('id', None) if not sub_id: - data = dict(msg="User 'id' field is missing.") + data = dict(msg=_("User 'id' field is missing.")) return Response(data, status=status.HTTP_400_BAD_REQUEST) user = get_object_or_400(User, pk=sub_id) role = self.get_parent_object() if role == self.request.user.admin_role: - raise PermissionDenied('You may not perform any action with your own admin_role.') + raise PermissionDenied(_('You may not perform any action with your own admin_role.')) user_content_type = ContentType.objects.get_for_model(User) if role.content_type == user_content_type: - raise PermissionDenied('You may not change the membership of a users admin_role') + raise PermissionDenied(_('You may not change the membership of a users admin_role')) credential_content_type = ContentType.objects.get_for_model(Credential) if role.content_type == credential_content_type: if role.content_object.organization and user not in role.content_object.organization.member_role: - data = dict(msg="You cannot grant credential access to a user not in the credentials' organization") + data = dict(msg=_("You cannot grant credential access to a user not in the credentials' organization")) return Response(data, status=status.HTTP_400_BAD_REQUEST) if not role.content_object.organization and not request.user.is_superuser: - data = dict(msg="You cannot grant private credential access to another user") + data = dict(msg=_("You cannot grant private credential access to another user")) return Response(data, status=status.HTTP_400_BAD_REQUEST) return super(RoleUsersList, self).post(request, *args, **kwargs) @@ -3714,7 +4137,7 @@ class RoleTeamsList(SubListAPIView): # Forbid implicit team creation here sub_id = request.data.get('id', None) if not sub_id: - data = dict(msg="Team 'id' field is missing.") + data = dict(msg=_("Team 'id' field is missing.")) return Response(data, status=status.HTTP_400_BAD_REQUEST) team = get_object_or_400(Team, pk=sub_id) @@ -3722,18 +4145,23 @@ class RoleTeamsList(SubListAPIView): organization_content_type = ContentType.objects.get_for_model(Organization) if role.content_type == organization_content_type: - data = dict(msg="You cannot assign an Organization role as a child role for a Team.") + data = dict(msg=_("You cannot assign an Organization role as a child role for a Team.")) return Response(data, status=status.HTTP_400_BAD_REQUEST) credential_content_type = ContentType.objects.get_for_model(Credential) if role.content_type == credential_content_type: if not role.content_object.organization or role.content_object.organization.id != team.organization.id: - data = dict(msg="You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization") + data = dict(msg=_("You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization")) return Response(data, status=status.HTTP_400_BAD_REQUEST) action = 'attach' if request.data.get('disassociate', None): action = 'unattach' + + if role.is_singleton() and action == 'attach': + data = dict(msg=_("You cannot grant system-level permissions to a team.")) + return Response(data, status=status.HTTP_400_BAD_REQUEST) + if not request.user.can_access(self.parent_model, action, role, team, self.relationship, request.data, skip_sub_obj_read_check=False): @@ -3773,7 +4201,6 @@ class RoleChildrenList(SubListAPIView): return Role.filter_visible_roles(self.request.user, role.children.all()) - # Create view functions for all of the class-based views to simplify inclusion # in URL patterns and reverse URL lookups, converting CamelCase names to # lowercase_with_underscore (e.g. MyView.as_view() becomes my_view). diff --git a/awx/asgi.py b/awx/asgi.py new file mode 100644 index 0000000000..3190a7032c --- /dev/null +++ b/awx/asgi.py @@ -0,0 +1,37 @@ +# Copyright (c) 2015 Ansible, Inc. +# All Rights Reserved. +import os +import logging +from awx import __version__ as tower_version + +# Prepare the AWX environment. +from awx import prepare_env, MODE +prepare_env() # NOQA + +from django.core.wsgi import get_wsgi_application # NOQA +from channels.asgi import get_channel_layer + +""" +ASGI config for AWX project. + +It exposes the ASGI callable as a module-level variable named ``channel_layer``. + +For more information on this file, see +https://channels.readthedocs.io/en/latest/deploying.html +""" + +if MODE == 'production': + logger = logging.getLogger('awx.main.models.jobs') + try: + fd = open("/var/lib/awx/.tower_version", "r") + if fd.read().strip() != tower_version: + raise Exception() + except Exception: + logger.error("Missing or incorrect metadata for Tower version. Ensure Tower was installed using the setup playbook.") + raise Exception("Missing or incorrect metadata for Tower version. Ensure Tower was installed using the setup playbook.") + + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awx.settings") + + +channel_layer = get_channel_layer() diff --git a/awx/conf/__init__.py b/awx/conf/__init__.py new file mode 100644 index 0000000000..8f00d64865 --- /dev/null +++ b/awx/conf/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Django +from django.utils.module_loading import autodiscover_modules + +# Tower +from .registry import settings_registry + +default_app_config = 'awx.conf.apps.ConfConfig' + + +def register(setting, **kwargs): + settings_registry.register(setting, **kwargs) + + +def autodiscover(): + autodiscover_modules('conf', register_to=settings_registry) diff --git a/awx/conf/access.py b/awx/conf/access.py new file mode 100644 index 0000000000..84f4ca348c --- /dev/null +++ b/awx/conf/access.py @@ -0,0 +1,45 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Django +from django.db.models import Q + +# Tower +from awx.main.access import BaseAccess, register_access +from awx.conf.models import Setting + + +class SettingAccess(BaseAccess): + ''' + - I can see settings when I am a super user or system auditor. + - I can edit settings when I am a super user. + - I can clear settings when I am a super user. + - I can always see/edit/clear my own user settings. + ''' + + model = Setting + + # For the checks below, obj will be an instance of a "Settings" class with + # an attribute for each setting and a "user" attribute (set to None unless + # it is a user setting). + + def get_queryset(self): + if self.user.is_superuser or self.user.is_system_auditor: + return self.model.objects.filter(Q(user__isnull=True) | Q(user=self.user)) + else: + return self.model.objects.filter(user=self.user) + + def can_read(self, obj): + return bool(self.user.is_superuser or self.user.is_system_auditor or (obj and obj.user == self.user)) + + def can_add(self, data): + return False # There is no API endpoint to POST new settings. + + def can_change(self, obj, data): + return bool(self.user.is_superuser or (obj and obj.user == self.user)) + + def can_delete(self, obj): + return bool(self.user.is_superuser or (obj and obj.user == self.user)) + + +register_access(Setting, SettingAccess) diff --git a/awx/conf/apps.py b/awx/conf/apps.py new file mode 100644 index 0000000000..a70d21326c --- /dev/null +++ b/awx/conf/apps.py @@ -0,0 +1,18 @@ +# Django +from django.apps import AppConfig +# from django.core import checks +from django.utils.translation import ugettext_lazy as _ +from awx.main.utils.handlers import configure_external_logger +from django.conf import settings + + +class ConfConfig(AppConfig): + + name = 'awx.conf' + verbose_name = _('Configuration') + + def ready(self): + self.module.autodiscover() + from .settings import SettingsWrapper + SettingsWrapper.initialize() + configure_external_logger(settings) diff --git a/awx/conf/conf.py b/awx/conf/conf.py new file mode 100644 index 0000000000..8ba0d072b0 --- /dev/null +++ b/awx/conf/conf.py @@ -0,0 +1,103 @@ +# Django +from django.conf import settings +from django.utils.translation import ugettext_lazy as _ + +# Tower +from awx.conf import fields, register +from awx.conf import settings_registry + +# Define a conf.py file within your app and register each setting similarly to +# the example below. Any field class from Django REST Framework or subclass +# thereof can be used for validation/conversion of the setting. All keyword +# arguments to the register function (except field_class, category, +# category_slug, depends_on, placeholder) will be used to initialize +# the field_class. + +register( + 'ANSIBLE_COW_SELECTION', + field_class=fields.ChoiceField, + choices=[ + ('bud-frogs', _('Bud Frogs')), + ('bunny', _('Bunny')), + ('cheese', _('Cheese')), + ('daemon', _('Daemon')), + ('default', _('Default Cow')), + ('dragon', _('Dragon')), + ('elephant-in-snake', _('Elephant in Snake')), + ('elephant', _('Elephant')), + ('eyes', _('Eyes')), + ('hellokitty', _('Hello Kitty')), + ('kitty', _('Kitty')), + ('luke-koala', _('Luke Koala')), + ('meow', _('Meow')), + ('milk', _('Milk')), + ('moofasa', _('Moofasa')), + ('moose', _('Moose')), + ('ren', _('Ren')), + ('sheep', _('Sheep')), + ('small', _('Small Cow')), + ('stegosaurus', _('Stegosaurus')), + ('stimpy', _('Stimpy')), + ('supermilker', _('Super Milker')), + ('three-eyes', _('Three Eyes')), + ('turkey', _('Turkey')), + ('turtle', _('Turtle')), + ('tux', _('Tux')), + ('udder', _('Udder')), + ('vader-koala', _('Vader Koala')), + ('vader', _('Vader')), + ('www', _('WWW')), + ], + default='default', + label=_('Cow Selection'), + help_text=_('Select which cow to use with cowsay when running jobs.'), + category=_('Cows'), + # Optional; category_slug will be slugified version of category if not + # explicitly provided. + category_slug='cows', +) + + +def _get_read_only_ansible_cow_selection_default(): + return getattr(settings, 'ANSIBLE_COW_SELECTION', 'No default cow!') + + +register( + 'READONLY_ANSIBLE_COW_SELECTION', + field_class=fields.CharField, + # read_only must be set via kwargs even if field_class sets it. + read_only=True, + # default can be a callable to dynamically compute the value; should be in + # the plain JSON format stored in the DB and used in the API. + default=_get_read_only_ansible_cow_selection_default, + label=_('Example Read-Only Setting'), + help_text=_('Example setting that cannot be changed.'), + category=_('Cows'), + category_slug='cows', + # Optional; list of other settings this read-only setting depends on. When + # the other settings change, the cached value for this setting will be + # cleared to require it to be recomputed. + depends_on=['ANSIBLE_COW_SELECTION'], + # Optional; licensed feature required to be able to view or modify this + # setting. + feature_required='rebranding', + # Optional; field is stored encrypted in the database and only $encrypted$ + # is returned via the API. + encrypted=True, +) + +register( + 'EXAMPLE_USER_SETTING', + field_class=fields.CharField, + allow_blank=True, + label=_('Example Setting'), + help_text=_('Example setting which can be different for each user.'), + category=_('User'), + category_slug='user', + default='', +) + +# Unregister the example settings above. +settings_registry.unregister('ANSIBLE_COW_SELECTION') +settings_registry.unregister('READONLY_ANSIBLE_COW_SELECTION') +settings_registry.unregister('EXAMPLE_USER_SETTING') diff --git a/awx/conf/fields.py b/awx/conf/fields.py new file mode 100644 index 0000000000..f8d012a3aa --- /dev/null +++ b/awx/conf/fields.py @@ -0,0 +1,76 @@ +# Python +import logging +import urlparse + +# Django +from django.core.validators import URLValidator +from django.utils.translation import ugettext_lazy as _ + +# Django REST Framework +from rest_framework.fields import * # noqa + +logger = logging.getLogger('awx.conf.fields') + +# Use DRF fields to convert/validate settings: +# - to_representation(obj) should convert a native Python object to a primitive +# serializable type. This primitive type will be what is presented in the API +# and stored in the JSON field in the datbase. +# - to_internal_value(data) should convert the primitive type back into the +# appropriate Python type to be used in settings. + + +class CharField(CharField): + + def to_representation(self, value): + # django_rest_frameworks' default CharField implementation casts `None` + # to a string `"None"`: + # + # https://github.com/tomchristie/django-rest-framework/blob/cbad236f6d817d992873cd4df6527d46ab243ed1/rest_framework/fields.py#L761 + if value is None: + return None + return super(CharField, self).to_representation(value) + + +class StringListField(ListField): + + child = CharField() + + def to_representation(self, value): + if value is None and self.allow_null: + return None + return super(StringListField, self).to_representation(value) + + +class URLField(CharField): + + def __init__(self, **kwargs): + schemes = kwargs.pop('schemes', None) + self.allow_plain_hostname = kwargs.pop('allow_plain_hostname', False) + super(URLField, self).__init__(**kwargs) + validator_kwargs = dict(message=_('Enter a valid URL')) + if schemes is not None: + validator_kwargs['schemes'] = schemes + self.validators.append(URLValidator(**validator_kwargs)) + + def to_representation(self, value): + if value is None: + return '' + return super(URLField, self).to_representation(value) + + def run_validators(self, value): + if self.allow_plain_hostname: + try: + url_parts = urlparse.urlsplit(value) + if url_parts.hostname and '.' not in url_parts.hostname: + netloc = '{}.local'.format(url_parts.hostname) + if url_parts.port: + netloc = '{}:{}'.format(netloc, url_parts.port) + if url_parts.username: + if url_parts.password: + netloc = '{}:{}@{}' % (url_parts.username, url_parts.password, netloc) + else: + netloc = '{}@{}' % (url_parts.username, netloc) + value = urlparse.urlunsplit([url_parts.scheme, netloc, url_parts.path, url_parts.query, url_parts.fragment]) + except: + raise # If something fails here, just fall through and let the validators check it. + super(URLField, self).run_validators(value) diff --git a/awx/conf/license.py b/awx/conf/license.py new file mode 100644 index 0000000000..0df047caaa --- /dev/null +++ b/awx/conf/license.py @@ -0,0 +1,50 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Django +from django.utils.translation import ugettext_lazy as _ + +# Django REST Framework +from rest_framework.exceptions import APIException + +# Tower +from awx.main.task_engine import TaskEnhancer + +__all__ = ['LicenseForbids', 'get_license', 'get_licensed_features', + 'feature_enabled', 'feature_exists'] + + +class LicenseForbids(APIException): + status_code = 402 + default_detail = _('Your Tower license does not allow that.') + + +def _get_validated_license_data(): + return TaskEnhancer().validate_enhancements() + + +def get_license(show_key=False): + """Return a dictionary representing the active license on this Tower instance.""" + license_data = _get_validated_license_data() + if not show_key: + license_data.pop('license_key', None) + return license_data + + +def get_licensed_features(): + """Return a set of all features enabled by the active license.""" + features = set() + for feature, enabled in _get_validated_license_data().get('features', {}).items(): + if enabled: + features.add(feature) + return features + + +def feature_enabled(name): + """Return True if the requested feature is enabled, False otherwise.""" + return _get_validated_license_data().get('features', {}).get(name, False) + + +def feature_exists(name): + """Return True if the requested feature name exists, False otherwise.""" + return bool(name in _get_validated_license_data().get('features', {})) diff --git a/awx/api/utils/__init__.py b/awx/conf/management/__init__.py similarity index 100% rename from awx/api/utils/__init__.py rename to awx/conf/management/__init__.py diff --git a/awx/main/tests/functional/migrations/__init__.py b/awx/conf/management/commands/__init__.py similarity index 100% rename from awx/main/tests/functional/migrations/__init__.py rename to awx/conf/management/commands/__init__.py diff --git a/awx/conf/management/commands/migrate_to_database_settings.py b/awx/conf/management/commands/migrate_to_database_settings.py new file mode 100644 index 0000000000..30bb922704 --- /dev/null +++ b/awx/conf/management/commands/migrate_to_database_settings.py @@ -0,0 +1,459 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Python +import base64 +import collections +import difflib +import json +import os +import shutil + +# Django +from django.conf import settings +from django.core.management.base import BaseCommand, CommandError +from django.db import transaction +from django.utils.text import slugify +from django.utils.timezone import now +from django.utils.translation import ugettext_lazy as _ + +# Tower +from awx import MODE +from awx.conf import settings_registry +from awx.conf.fields import empty, SkipField +from awx.conf.models import Setting +from awx.conf.utils import comment_assignments + + +class Command(BaseCommand): + + def add_arguments(self, parser): + parser.add_argument( + 'category', + nargs='*', + type=str, + ) + parser.add_argument( + '--dry-run', + action='store_true', + dest='dry_run', + default=False, + help=_('Only show which settings would be commented/migrated.'), + ) + parser.add_argument( + '--skip-errors', + action='store_true', + dest='skip_errors', + default=False, + help=_('Skip over settings that would raise an error when commenting/migrating.'), + ) + parser.add_argument( + '--no-comment', + action='store_true', + dest='no_comment', + default=False, + help=_('Skip commenting out settings in files.'), + ) + parser.add_argument( + '--backup-suffix', + dest='backup_suffix', + default=now().strftime('.%Y%m%d%H%M%S'), + help=_('Backup existing settings files with this suffix.'), + ) + + @transaction.atomic + def handle(self, *args, **options): + self.verbosity = int(options.get('verbosity', 1)) + self.dry_run = bool(options.get('dry_run', False)) + self.skip_errors = bool(options.get('skip_errors', False)) + self.no_comment = bool(options.get('no_comment', False)) + self.backup_suffix = options.get('backup_suffix', '') + self.categories = options.get('category', None) or ['all'] + self.style.HEADING = self.style.MIGRATE_HEADING + self.style.LABEL = self.style.MIGRATE_LABEL + self.style.OK = self.style.SQL_FIELD + self.style.SKIP = self.style.WARNING + self.style.VALUE = self.style.SQL_KEYWORD + + # Determine if any categories provided are invalid. + category_slugs = [] + invalid_categories = [] + for category in self.categories: + category_slug = slugify(category) + if category_slug in settings_registry.get_registered_categories(): + if category_slug not in category_slugs: + category_slugs.append(category_slug) + else: + if category not in invalid_categories: + invalid_categories.append(category) + if len(invalid_categories) == 1: + raise CommandError('Invalid setting category: {}'.format(invalid_categories[0])) + elif len(invalid_categories) > 1: + raise CommandError('Invalid setting categories: {}'.format(', '.join(invalid_categories))) + + # Build a list of all settings to be migrated. + registered_settings = [] + for category_slug in category_slugs: + for registered_setting in settings_registry.get_registered_settings(category_slug=category_slug, read_only=False): + if registered_setting not in registered_settings: + registered_settings.append(registered_setting) + + self._migrate_settings(registered_settings) + + def _get_settings_file_patterns(self): + if MODE == 'development': + return [ + '/etc/tower/settings.py', + '/etc/tower/conf.d/*.py', + os.path.join(os.path.dirname(__file__), '..', '..', '..', 'settings', 'local_*.py') + ] + else: + return [ + os.environ.get('AWX_SETTINGS_FILE', '/etc/tower/settings.py'), + os.path.join(os.environ.get('AWX_SETTINGS_DIR', '/etc/tower/conf.d/'), '*.py'), + ] + + def _get_license_file(self): + return os.environ.get('AWX_LICENSE_FILE', '/etc/tower/license') + + def _comment_license_file(self, dry_run=True): + license_file = self._get_license_file() + diff_lines = [] + if os.path.exists(license_file): + try: + raw_license_data = open(license_file).read() + json.loads(raw_license_data) + except Exception as e: + raise CommandError('Error reading license from {0}: {1!r}'.format(license_file, e)) + if self.backup_suffix: + backup_license_file = '{}{}'.format(license_file, self.backup_suffix) + else: + backup_license_file = '{}.old'.format(license_file) + diff_lines = list(difflib.unified_diff( + raw_license_data.splitlines(), + [], + fromfile=backup_license_file, + tofile=license_file, + lineterm='', + )) + if not dry_run: + if self.backup_suffix: + shutil.copy2(license_file, backup_license_file) + os.remove(license_file) + return diff_lines + + def _get_local_settings_file(self): + if MODE == 'development': + static_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'ui', 'static') + else: + static_root = settings.STATIC_ROOT + return os.path.join(static_root, 'local_settings.json') + + def _comment_local_settings_file(self, dry_run=True): + local_settings_file = self._get_local_settings_file() + diff_lines = [] + if os.path.exists(local_settings_file): + try: + raw_local_settings_data = open(local_settings_file).read() + json.loads(raw_local_settings_data) + except Exception as e: + if not self.skip_errors: + raise CommandError('Error reading local settings from {0}: {1!r}'.format(local_settings_file, e)) + return diff_lines + if self.backup_suffix: + backup_local_settings_file = '{}{}'.format(local_settings_file, self.backup_suffix) + else: + backup_local_settings_file = '{}.old'.format(local_settings_file) + diff_lines = list(difflib.unified_diff( + raw_local_settings_data.splitlines(), + [], + fromfile=backup_local_settings_file, + tofile=local_settings_file, + lineterm='', + )) + if not dry_run: + if self.backup_suffix: + shutil.copy2(local_settings_file, backup_local_settings_file) + os.remove(local_settings_file) + return diff_lines + + def _get_custom_logo_file(self): + if MODE == 'development': + static_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'ui', 'static') + else: + static_root = settings.STATIC_ROOT + return os.path.join(static_root, 'assets', 'custom_console_logo.png') + + def _comment_custom_logo_file(self, dry_run=True): + custom_logo_file = self._get_custom_logo_file() + diff_lines = [] + if os.path.exists(custom_logo_file): + try: + raw_custom_logo_data = open(custom_logo_file).read() + except Exception as e: + if not self.skip_errors: + raise CommandError('Error reading custom logo from {0}: {1!r}'.format(custom_logo_file, e)) + return diff_lines + if self.backup_suffix: + backup_custom_logo_file = '{}{}'.format(custom_logo_file, self.backup_suffix) + else: + backup_custom_logo_file = '{}.old'.format(custom_logo_file) + diff_lines = list(difflib.unified_diff( + [''.format(len(raw_custom_logo_data))], + [], + fromfile=backup_custom_logo_file, + tofile=custom_logo_file, + lineterm='', + )) + if not dry_run: + if self.backup_suffix: + shutil.copy2(custom_logo_file, backup_custom_logo_file) + os.remove(custom_logo_file) + return diff_lines + + def _check_if_needs_comment(self, patterns, setting): + files_to_comment = [] + # If any diffs are returned, this setting needs to be commented. + diffs = comment_assignments(patterns, setting, dry_run=True) + if setting == 'LICENSE': + diffs.extend(self._comment_license_file(dry_run=True)) + elif setting == 'CUSTOM_LOGIN_INFO': + diffs.extend(self._comment_local_settings_file(dry_run=True)) + elif setting == 'CUSTOM_LOGO': + diffs.extend(self._comment_custom_logo_file(dry_run=True)) + for diff in diffs: + for line in diff.splitlines(): + if line.startswith('+++ '): + files_to_comment.append(line[4:]) + return files_to_comment + + def _check_if_needs_migration(self, setting): + # Check whether the current value differs from the default. + default_value = settings.DEFAULTS_SNAPSHOT.get(setting, empty) + if default_value is empty and setting != 'LICENSE': + field = settings_registry.get_setting_field(setting, read_only=True) + try: + default_value = field.get_default() + except SkipField: + pass + current_value = getattr(settings, setting, empty) + if setting == 'CUSTOM_LOGIN_INFO' and current_value in {empty, ''}: + local_settings_file = self._get_local_settings_file() + try: + if os.path.exists(local_settings_file): + local_settings = json.load(open(local_settings_file)) + current_value = local_settings.get('custom_login_info', '') + except Exception as e: + if not self.skip_errors: + raise CommandError('Error reading custom login info from {0}: {1!r}'.format(local_settings_file, e)) + if setting == 'CUSTOM_LOGO' and current_value in {empty, ''}: + custom_logo_file = self._get_custom_logo_file() + try: + if os.path.exists(custom_logo_file): + custom_logo_data = open(custom_logo_file).read() + if custom_logo_data: + current_value = 'data:image/png;base64,{}'.format(base64.b64encode(custom_logo_data)) + else: + current_value = '' + except Exception as e: + if not self.skip_errors: + raise CommandError('Error reading custom logo from {0}: {1!r}'.format(custom_logo_file, e)) + if current_value != default_value: + if current_value is empty: + current_value = None + return current_value + return empty + + def _display_tbd(self, setting, files_to_comment, migrate_value, comment_error=None, migrate_error=None): + if self.verbosity >= 1: + if files_to_comment: + if migrate_value is not empty: + action = 'Migrate + Comment' + else: + action = 'Comment' + if comment_error or migrate_error: + action = self.style.ERROR('{} (skipped)'.format(action)) + else: + action = self.style.OK(action) + self.stdout.write(' {}: {}'.format( + self.style.LABEL(setting), + action, + )) + if self.verbosity >= 2: + if migrate_error: + self.stdout.write(' - Migrate value: {}'.format( + self.style.ERROR(migrate_error), + )) + elif migrate_value is not empty: + self.stdout.write(' - Migrate value: {}'.format( + self.style.VALUE(repr(migrate_value)), + )) + if comment_error: + self.stdout.write(' - Comment: {}'.format( + self.style.ERROR(comment_error), + )) + elif files_to_comment: + for file_to_comment in files_to_comment: + self.stdout.write(' - Comment in: {}'.format( + self.style.VALUE(file_to_comment), + )) + else: + if self.verbosity >= 2: + self.stdout.write(' {}: {}'.format( + self.style.LABEL(setting), + self.style.SKIP('No Migration'), + )) + + def _display_migrate(self, setting, action, display_value): + if self.verbosity >= 1: + if action == 'No Change': + action = self.style.SKIP(action) + else: + action = self.style.OK(action) + self.stdout.write(' {}: {}'.format( + self.style.LABEL(setting), + action, + )) + if self.verbosity >= 2: + for line in display_value.splitlines(): + self.stdout.write(' {}'.format( + self.style.VALUE(line), + )) + + def _display_diff_summary(self, filename, added, removed): + self.stdout.write(' {} {}{} {}{}'.format( + self.style.LABEL(filename), + self.style.ERROR('-'), + self.style.ERROR(int(removed)), + self.style.OK('+'), + self.style.OK(str(added)), + )) + + def _display_comment(self, diffs): + for diff in diffs: + if self.verbosity >= 2: + for line in diff.splitlines(): + display_line = line + if line.startswith('--- ') or line.startswith('+++ '): + display_line = self.style.LABEL(line) + elif line.startswith('-'): + display_line = self.style.ERROR(line) + elif line.startswith('+'): + display_line = self.style.OK(line) + elif line.startswith('@@'): + display_line = self.style.VALUE(line) + if line.startswith('--- ') or line.startswith('+++ '): + self.stdout.write(' ' + display_line) + else: + self.stdout.write(' ' + display_line) + elif self.verbosity >= 1: + filename, lines_added, lines_removed = None, 0, 0 + for line in diff.splitlines(): + if line.startswith('+++ '): + if filename: + self._display_diff_summary(filename, lines_added, lines_removed) + filename, lines_added, lines_removed = line[4:], 0, 0 + elif line.startswith('+'): + lines_added += 1 + elif line.startswith('-'): + lines_removed += 1 + if filename: + self._display_diff_summary(filename, lines_added, lines_removed) + + def _migrate_settings(self, registered_settings): + patterns = self._get_settings_file_patterns() + + # Determine which settings need to be commented/migrated. + if self.verbosity >= 1: + self.stdout.write(self.style.HEADING('Discovering settings to be migrated and commented:')) + to_migrate = collections.OrderedDict() + to_comment = collections.OrderedDict() + for name in registered_settings: + comment_error, migrate_error = None, None + files_to_comment = [] + try: + files_to_comment = self._check_if_needs_comment(patterns, name) + except Exception as e: + comment_error = 'Error commenting {0}: {1!r}'.format(name, e) + if not self.skip_errors: + raise CommandError(comment_error) + if files_to_comment: + to_comment[name] = files_to_comment + migrate_value = empty + if files_to_comment: + migrate_value = self._check_if_needs_migration(name) + if migrate_value is not empty: + field = settings_registry.get_setting_field(name) + assert not field.read_only + try: + data = field.to_representation(migrate_value) + setting_value = field.run_validation(data) + db_value = field.to_representation(setting_value) + to_migrate[name] = db_value + except Exception as e: + to_comment.pop(name) + migrate_error = 'Unable to assign value {0!r} to setting "{1}: {2!s}".'.format(migrate_value, name, e) + if not self.skip_errors: + raise CommandError(migrate_error) + self._display_tbd(name, files_to_comment, migrate_value, comment_error, migrate_error) + if self.verbosity == 1 and not to_migrate and not to_comment: + self.stdout.write(' No settings found to migrate or comment!') + + # Now migrate those settings to the database. + if self.verbosity >= 1: + if self.dry_run: + self.stdout.write(self.style.HEADING('Migrating settings to database (dry-run):')) + else: + self.stdout.write(self.style.HEADING('Migrating settings to database:')) + if not to_migrate: + self.stdout.write(' No settings to migrate!') + for name, db_value in to_migrate.items(): + display_value = json.dumps(db_value, indent=4) + setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first() + action = 'No Change' + if not setting: + action = 'Migrated' + if not self.dry_run: + Setting.objects.create(key=name, user=None, value=db_value) + elif setting.value != db_value or type(setting.value) != type(db_value): + action = 'Updated' + if not self.dry_run: + setting.value = db_value + setting.save(update_fields=['value']) + self._display_migrate(name, action, display_value) + + # Now comment settings in settings files. + if self.verbosity >= 1: + if bool(self.dry_run or self.no_comment): + self.stdout.write(self.style.HEADING('Commenting settings in files (dry-run):')) + else: + self.stdout.write(self.style.HEADING('Commenting settings in files:')) + if not to_comment: + self.stdout.write(' No settings to comment!') + if to_comment: + to_comment_patterns = [] + license_file_to_comment = None + local_settings_file_to_comment = None + custom_logo_file_to_comment = None + for files_to_comment in to_comment.values(): + for file_to_comment in files_to_comment: + if file_to_comment == self._get_license_file(): + license_file_to_comment = file_to_comment + elif file_to_comment == self._get_local_settings_file(): + local_settings_file_to_comment = file_to_comment + elif file_to_comment == self._get_custom_logo_file(): + custom_logo_file_to_comment = file_to_comment + elif file_to_comment not in to_comment_patterns: + to_comment_patterns.append(file_to_comment) + # Run once in dry-run mode to catch any errors from updating the files. + diffs = comment_assignments(to_comment_patterns, to_comment.keys(), dry_run=True, backup_suffix=self.backup_suffix) + # Then, if really updating, run again. + if not self.dry_run and not self.no_comment: + diffs = comment_assignments(to_comment_patterns, to_comment.keys(), dry_run=False, backup_suffix=self.backup_suffix) + if license_file_to_comment: + diffs.extend(self._comment_license_file(dry_run=False)) + if local_settings_file_to_comment: + diffs.extend(self._comment_local_settings_file(dry_run=False)) + if custom_logo_file_to_comment: + diffs.extend(self._comment_custom_logo_file(dry_run=False)) + self._display_comment(diffs) diff --git a/awx/conf/migrations/0001_initial.py b/awx/conf/migrations/0001_initial.py new file mode 100644 index 0000000000..f9613b15d1 --- /dev/null +++ b/awx/conf/migrations/0001_initial.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models +import jsonfield.fields +from django.conf import settings + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ] + + operations = [ + migrations.CreateModel( + name='Setting', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('created', models.DateTimeField(default=None, editable=False)), + ('modified', models.DateTimeField(default=None, editable=False)), + ('key', models.CharField(max_length=255)), + ('value', jsonfield.fields.JSONField(null=True)), + ('user', models.ForeignKey(related_name='settings', default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)), + ], + options={ + 'abstract': False, + }, + ), + ] diff --git a/awx/conf/migrations/0002_v310_copy_tower_settings.py b/awx/conf/migrations/0002_v310_copy_tower_settings.py new file mode 100644 index 0000000000..7cf24b7061 --- /dev/null +++ b/awx/conf/migrations/0002_v310_copy_tower_settings.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +import json + +from django.db import migrations + + +def copy_tower_settings(apps, schema_editor): + TowerSettings = apps.get_model('main', 'TowerSettings') + Setting = apps.get_model('conf', 'Setting') + for tower_setting in TowerSettings.objects.all().iterator(): + try: + value = tower_setting.value + # LICENSE is stored as a string; convert it to a dict. + if tower_setting.key == 'LICENSE': + value = json.loads(value) + setting, created = Setting.objects.get_or_create( + key=tower_setting.key, + user=tower_setting.user, + created=tower_setting.created, + modified=tower_setting.modified, + defaults=dict(value=value), + ) + if not created and setting.value != value: + setting.value = value + setting.save(update_fields=['value']) + except Setting.MultipleObjectsReturned: + pass + + +def revert_tower_settings(apps, schema_editor): + TowerSettings = apps.get_model('main', 'TowerSettings') + Setting = apps.get_model('conf', 'Setting') + for setting in Setting.objects.all().iterator(): + value = setting.value + # LICENSE is stored as a JSON object; convert it back to a string. + if setting.key == 'LICENSE': + value = json.dumps(value) + defaults = dict( + value=value, + value_type='string', + description='', + category='', + ) + try: + tower_setting, created = TowerSettings.objects.get_or_create( + key=setting.key, + user=setting.user, + defaults=defaults, + ) + if not created: + update_fields = [] + for k, v in defaults.items(): + if getattr(tower_setting, k) != v: + setattr(tower_setting, k, v) + update_fields.append(k) + if update_fields: + tower_setting.save(update_fields=update_fields) + except TowerSettings.MultipleObjectsReturned: + pass + + +class Migration(migrations.Migration): + + dependencies = [ + ('conf', '0001_initial'), + ('main', '0034_v310_release'), + ] + + run_before = [ + ('main', '0035_v310_remove_tower_settings'), + ] + + operations = [ + migrations.RunPython(copy_tower_settings, revert_tower_settings), + ] diff --git a/awx/conf/migrations/0003_v310_JSONField_changes.py b/awx/conf/migrations/0003_v310_JSONField_changes.py new file mode 100644 index 0000000000..78a4c02de7 --- /dev/null +++ b/awx/conf/migrations/0003_v310_JSONField_changes.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations +import awx.main.fields + + +class Migration(migrations.Migration): + + dependencies = [ + ('conf', '0002_v310_copy_tower_settings'), + ] + + operations = [ + migrations.AlterField( + model_name='setting', + name='value', + field=awx.main.fields.JSONField(null=True), + ), + ] diff --git a/awx/ui/client/lib/sizzle/test/data/empty.js b/awx/conf/migrations/__init__.py similarity index 100% rename from awx/ui/client/lib/sizzle/test/data/empty.js rename to awx/conf/migrations/__init__.py diff --git a/awx/conf/models.py b/awx/conf/models.py new file mode 100644 index 0000000000..5c26e17c54 --- /dev/null +++ b/awx/conf/models.py @@ -0,0 +1,81 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Python +import json + +# Django +from django.db import models + +# Tower +from awx.main.models.base import CreatedModifiedModel, prevent_search +from awx.main.fields import JSONField +from awx.main.utils import encrypt_field +from awx.conf import settings_registry + +__all__ = ['Setting'] + + +class Setting(CreatedModifiedModel): + + key = models.CharField( + max_length=255, + ) + value = JSONField( + null=True, + ) + user = prevent_search(models.ForeignKey( + 'auth.User', + related_name='settings', + default=None, + null=True, + editable=False, + on_delete=models.CASCADE, + )) + + def __unicode__(self): + try: + json_value = json.dumps(self.value) + except ValueError: + # In the rare case the DB value is invalid JSON. + json_value = u'' + if self.user: + return u'{} ({}) = {}'.format(self.key, self.user, json_value) + else: + return u'{} = {}'.format(self.key, json_value) + + def save(self, *args, **kwargs): + encrypted = settings_registry.is_setting_encrypted(self.key) + new_instance = not bool(self.pk) + # If update_fields has been specified, add our field names to it, + # if it hasn't been specified, then we're just doing a normal save. + update_fields = kwargs.get('update_fields', []) + # When first saving to the database, don't store any encrypted field + # value, but instead save it until after the instance is created. + # Otherwise, store encrypted value to the database. + if encrypted: + if new_instance: + self._saved_value = self.value + self.value = '' + else: + self.value = encrypt_field(self, 'value') + if 'value' not in update_fields: + update_fields.append('value') + super(Setting, self).save(*args, **kwargs) + # After saving a new instance for the first time, set the encrypted + # field and save again. + if encrypted and new_instance: + self.value = self._saved_value + self.save(update_fields=['value']) + + @classmethod + def get_cache_key(self, key): + return key + + +import awx.conf.signals # noqa + +from awx.main.registrar import activity_stream_registrar # noqa +activity_stream_registrar.connect(Setting) + +import awx.conf.access # noqa diff --git a/awx/conf/registry.py b/awx/conf/registry.py new file mode 100644 index 0000000000..faf210fbb0 --- /dev/null +++ b/awx/conf/registry.py @@ -0,0 +1,156 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Python +from collections import OrderedDict +import logging + +# Django +from django.core.exceptions import ImproperlyConfigured +from django.utils.text import slugify +from django.utils.translation import ugettext_lazy as _ + +logger = logging.getLogger('awx.conf.registry') + +__all__ = ['settings_registry'] + + +class SettingsRegistry(object): + """Registry of all API-configurable settings and categories.""" + + def __init__(self, settings=None): + """ + :param settings: a ``django.conf.LazySettings`` object used to lookup + file-based field values (e.g., ``local_settings.py`` + and ``/etc/tower/conf.d/example.py``). If unspecified, + defaults to ``django.conf.settings``. + """ + if settings is None: + from django.conf import settings + self._registry = OrderedDict() + self._dependent_settings = {} + self.settings = settings + + def register(self, setting, **kwargs): + if setting in self._registry: + raise ImproperlyConfigured('Setting "{}" is already registered.'.format(setting)) + category = kwargs.setdefault('category', None) + category_slug = kwargs.setdefault('category_slug', slugify(category or '') or None) + if category_slug in {'all', 'changed', 'user-defaults'}: + raise ImproperlyConfigured('"{}" is a reserved category slug.'.format(category_slug)) + if 'field_class' not in kwargs: + raise ImproperlyConfigured('Setting must provide a field_class keyword argument.') + self._registry[setting] = kwargs + + # Normally for read-only/dynamic settings, depends_on will specify other + # settings whose changes may affect the value of this setting. Store + # this setting as a dependent for the other settings, so we can know + # which extra cache keys to clear when a setting changes. + depends_on = kwargs.setdefault('depends_on', None) or set() + for depends_on_setting in depends_on: + dependent_settings = self._dependent_settings.setdefault(depends_on_setting, set()) + dependent_settings.add(setting) + + def unregister(self, setting): + self._registry.pop(setting, None) + for dependent_settings in self._dependent_settings.values(): + dependent_settings.discard(setting) + + def get_dependent_settings(self, setting): + return self._dependent_settings.get(setting, set()) + + def get_registered_categories(self, features_enabled=None): + categories = { + 'all': _('All'), + 'changed': _('Changed'), + } + for setting, kwargs in self._registry.items(): + category_slug = kwargs.get('category_slug', None) + if category_slug is None or category_slug in categories: + continue + if features_enabled is not None: + feature_required = kwargs.get('feature_required', None) + if feature_required and feature_required not in features_enabled: + continue + if category_slug == 'user': + categories['user'] = _('User') + categories['user-defaults'] = _('User-Defaults') + else: + categories[category_slug] = kwargs.get('category', None) or category_slug + return categories + + def get_registered_settings(self, category_slug=None, read_only=None, features_enabled=None): + setting_names = [] + if category_slug == 'user-defaults': + category_slug = 'user' + if category_slug == 'changed': + category_slug = 'all' + for setting, kwargs in self._registry.items(): + if category_slug not in {None, 'all', kwargs.get('category_slug', None)}: + continue + if read_only in {True, False} and kwargs.get('read_only', False) != read_only: + # Note: Doesn't catch fields that set read_only via __init__; + # read-only field kwargs should always include read_only=True. + continue + if features_enabled is not None: + feature_required = kwargs.get('feature_required', None) + if feature_required and feature_required not in features_enabled: + continue + setting_names.append(setting) + return setting_names + + def is_setting_encrypted(self, setting): + return bool(self._registry.get(setting, {}).get('encrypted', False)) + + def get_setting_field(self, setting, mixin_class=None, for_user=False, **kwargs): + from rest_framework.fields import empty + field_kwargs = {} + field_kwargs.update(self._registry[setting]) + field_kwargs.update(kwargs) + field_class = original_field_class = field_kwargs.pop('field_class') + if mixin_class: + field_class = type(field_class.__name__, (mixin_class, field_class), {}) + category_slug = field_kwargs.pop('category_slug', None) + category = field_kwargs.pop('category', None) + depends_on = frozenset(field_kwargs.pop('depends_on', None) or []) + placeholder = field_kwargs.pop('placeholder', empty) + feature_required = field_kwargs.pop('feature_required', empty) + encrypted = bool(field_kwargs.pop('encrypted', False)) + defined_in_file = bool(field_kwargs.pop('defined_in_file', False)) + if getattr(field_kwargs.get('child', None), 'source', None) is not None: + field_kwargs['child'].source = None + field_instance = field_class(**field_kwargs) + field_instance.category_slug = category_slug + field_instance.category = category + field_instance.depends_on = depends_on + if placeholder is not empty: + field_instance.placeholder = placeholder + if feature_required is not empty: + field_instance.feature_required = feature_required + field_instance.defined_in_file = defined_in_file + if field_instance.defined_in_file: + field_instance.help_text = ( + str(_('This value has been set manually in a settings file.')) + + '\n\n' + + str(field_instance.help_text) + ) + field_instance.encrypted = encrypted + original_field_instance = field_instance + if field_class != original_field_class: + original_field_instance = original_field_class(**field_kwargs) + if category_slug == 'user' and for_user: + try: + field_instance.default = original_field_instance.to_representation(getattr(self.settings, setting)) + except: + logger.warning('Unable to retrieve default value for user setting "%s".', setting, exc_info=True) + elif not field_instance.read_only or field_instance.default is empty or field_instance.defined_in_file: + try: + field_instance.default = original_field_instance.to_representation(self.settings._awx_conf_settings._get_default(setting)) + except AttributeError: + pass + except: + logger.warning('Unable to retrieve default value for setting "%s".', setting, exc_info=True) + return field_instance + + +settings_registry = SettingsRegistry() diff --git a/awx/conf/serializers.py b/awx/conf/serializers.py new file mode 100644 index 0000000000..4c2dd4748d --- /dev/null +++ b/awx/conf/serializers.py @@ -0,0 +1,83 @@ +# Django REST Framework +from rest_framework import serializers + +# Tower +from awx.api.fields import VerbatimField +from awx.api.serializers import BaseSerializer +from awx.conf.models import Setting +from awx.conf import settings_registry + + +class SettingSerializer(BaseSerializer): + """Read-only serializer for activity stream.""" + + value = VerbatimField(allow_null=True) + + class Meta: + model = Setting + fields = ('id', 'key', 'value') + readonly_fields = ('id', 'key', 'value') + + def __init__(self, instance=None, data=serializers.empty, **kwargs): + if instance is None and data is not serializers.empty and 'key' in data: + try: + instance = Setting.objects.get(key=data['key']) + except Setting.DoesNotExist: + pass + super(SettingSerializer, self).__init__(instance, data, **kwargs) + + +class SettingCategorySerializer(serializers.Serializer): + """Serialize setting category """ + + url = serializers.CharField( + read_only=True, + ) + slug = serializers.CharField( + read_only=True, + ) + name = serializers.CharField( + read_only=True, + ) + + +class SettingFieldMixin(object): + """Mixin to use a registered setting field class for API display/validation.""" + + def to_representation(self, obj): + if getattr(self, 'encrypted', False) and isinstance(obj, basestring) and obj: + return '$encrypted$' + return obj + + def to_internal_value(self, value): + if getattr(self, 'encrypted', False) and isinstance(value, basestring) and value.startswith('$encrypted$'): + raise serializers.SkipField() + obj = super(SettingFieldMixin, self).to_internal_value(value) + return super(SettingFieldMixin, self).to_representation(obj) + + +class SettingSingletonSerializer(serializers.Serializer): + """Present a group of settings (by category) as a single object.""" + + def __init__(self, instance=None, data=serializers.empty, **kwargs): + # Instance (if given) should be an object with attributes for all of the + # settings in the category; never an actual Setting model instance. + assert instance is None or not hasattr(instance, 'pk') + super(SettingSingletonSerializer, self).__init__(instance, data, **kwargs) + + def get_fields(self): + fields = super(SettingSingletonSerializer, self).get_fields() + try: + category_slug = self.context['view'].kwargs.get('category_slug', 'all') + except (KeyError, AttributeError): + category_slug = '' + for key in settings_registry.get_registered_settings(category_slug=category_slug): + if self.instance and not hasattr(self.instance, key): + continue + extra_kwargs = {} + # Make LICENSE read-only here; update via /api/v1/config/ only. + if key == 'LICENSE': + extra_kwargs['read_only'] = True + field = settings_registry.get_setting_field(key, mixin_class=SettingFieldMixin, for_user=bool(category_slug == 'user'), **extra_kwargs) + fields[key] = field + return fields diff --git a/awx/conf/settings.py b/awx/conf/settings.py new file mode 100644 index 0000000000..616ed1fcd3 --- /dev/null +++ b/awx/conf/settings.py @@ -0,0 +1,406 @@ +# Python +from collections import namedtuple +import contextlib +import logging +import sys +import threading +import time + +import six + +# Django +from django.conf import settings, UserSettingsHolder +from django.core.cache import cache as django_cache +from django.core.exceptions import ImproperlyConfigured +from django.db import ProgrammingError, OperationalError + +# Django REST Framework +from rest_framework.fields import empty, SkipField + +# Tower +from awx.main.utils import encrypt_field, decrypt_field +from awx.main.utils.db import get_tower_migration_version +from awx.conf import settings_registry +from awx.conf.models import Setting + +# FIXME: Gracefully handle when settings are accessed before the database is +# ready (or during migrations). + +logger = logging.getLogger('awx.conf.settings') + +# Store a special value to indicate when a setting is not set in the database. +SETTING_CACHE_NOTSET = '___notset___' + +# Cannot store None in memcached; use a special value instead to indicate None. +# If the special value for None is the same as the "not set" value, then a value +# of None will be equivalent to the setting not being set (and will raise an +# AttributeError if there is no other default defined). +# SETTING_CACHE_NONE = '___none___' +SETTING_CACHE_NONE = SETTING_CACHE_NOTSET + +# Cannot store empty list/tuple in memcached; use a special value instead to +# indicate an empty list. +SETTING_CACHE_EMPTY_LIST = '___[]___' + +# Cannot store empty dict in memcached; use a special value instead to indicate +# an empty dict. +SETTING_CACHE_EMPTY_DICT = '___{}___' + +# Expire settings from cache after this many seconds. +SETTING_CACHE_TIMEOUT = 60 + +# Flag indicating whether to store field default values in the cache. +SETTING_CACHE_DEFAULTS = True + +__all__ = ['SettingsWrapper'] + + +@contextlib.contextmanager +def _log_database_error(): + try: + yield + except (ProgrammingError, OperationalError) as e: + if get_tower_migration_version() < '310': + logger.info('Using default settings until version 3.1 migration.') + else: + logger.warning('Database settings are not available, using defaults (%s)', e, exc_info=True) + finally: + pass + + +class EncryptedCacheProxy(object): + + def __init__(self, cache, registry, encrypter=None, decrypter=None): + """ + This proxy wraps a Django cache backend and overwrites the + `get`/`set`/`set_many` methods to handle field encryption/decryption + for sensitive values. + + :param cache: the Django cache backend to proxy to + :param registry: the settings registry instance used to determine if + a field is encrypted or not. + :param encrypter: a callable used to encrypt field values; defaults to + ``awx.main.utils.encrypt_field`` + :param decrypter: a callable used to decrypt field values; defaults to + ``awx.main.utils.decrypt_field`` + """ + + # These values have to be stored via self.__dict__ in this way to get + # around the magic __setattr__ method on this class. + self.__dict__['cache'] = cache + self.__dict__['registry'] = registry + self.__dict__['encrypter'] = encrypter or encrypt_field + self.__dict__['decrypter'] = decrypter or decrypt_field + + def get(self, key, **kwargs): + value = self.cache.get(key, **kwargs) + value = self._handle_encryption(self.decrypter, key, value) + + # python-memcached auto-encodes unicode on cache set in python2 + # https://github.com/linsomniac/python-memcached/issues/79 + # https://github.com/linsomniac/python-memcached/blob/288c159720eebcdf667727a859ef341f1e908308/memcache.py#L961 + if six.PY2 and isinstance(value, six.binary_type): + try: + six.text_type(value) + except UnicodeDecodeError: + value = value.decode('utf-8') + return value + + def set(self, key, value, **kwargs): + self.cache.set( + key, + self._handle_encryption(self.encrypter, key, value), + **kwargs + ) + + def set_many(self, data, **kwargs): + for key, value in data.items(): + self.set(key, value, **kwargs) + + def _handle_encryption(self, method, key, value): + TransientSetting = namedtuple('TransientSetting', ['pk', 'value']) + + if value is not empty and self.registry.is_setting_encrypted(key): + # If the setting exists in the database, we'll use its primary key + # as part of the AES key when encrypting/decrypting + return method( + TransientSetting( + pk=getattr(self._get_setting_from_db(key), 'pk', None), + value=value + ), + 'value' + ) + + # If the field in question isn't an "encrypted" field, this function is + # a no-op; it just returns the provided value + return value + + def _get_setting_from_db(self, key): + field = self.registry.get_setting_field(key) + if not field.read_only: + return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first() + + def __getattr__(self, name): + return getattr(self.cache, name) + + def __setattr__(self, name, value): + setattr(self.cache, name, value) + + +class SettingsWrapper(UserSettingsHolder): + + @classmethod + def initialize(cls, cache=None, registry=None): + """ + Used to initialize and wrap the Django settings context. + + :param cache: the Django cache backend to use for caching setting + values. ``django.core.cache`` is used by default. + :param registry: the settings registry instance used. The global + ``awx.conf.settings_registry`` is used by default. + """ + if not getattr(settings, '_awx_conf_settings', False): + settings_wrapper = cls( + settings._wrapped, + cache=cache or django_cache, + registry=registry or settings_registry + ) + settings._wrapped = settings_wrapper + + def __init__(self, default_settings, cache, registry): + """ + This constructor is generally not called directly, but by + ``SettingsWrapper.initialize`` at app startup time when settings are + parsed. + """ + + # These values have to be stored via self.__dict__ in this way to get + # around the magic __setattr__ method on this class (which is used to + # store API-assigned settings in the database). + self.__dict__['default_settings'] = default_settings + self.__dict__['_awx_conf_settings'] = self + self.__dict__['_awx_conf_preload_expires'] = None + self.__dict__['_awx_conf_preload_lock'] = threading.RLock() + self.__dict__['_awx_conf_init_readonly'] = False + self.__dict__['cache'] = EncryptedCacheProxy(cache, registry) + self.__dict__['registry'] = registry + + def _get_supported_settings(self): + return self.registry.get_registered_settings() + + def _get_writeable_settings(self): + return self.registry.get_registered_settings(read_only=False) + + def _get_cache_value(self, value): + if value is None: + value = SETTING_CACHE_NONE + elif isinstance(value, (list, tuple)) and len(value) == 0: + value = SETTING_CACHE_EMPTY_LIST + elif isinstance(value, (dict,)) and len(value) == 0: + value = SETTING_CACHE_EMPTY_DICT + return value + + def _preload_cache(self): + # Ensure we're only modifying local preload timeout from one thread. + with self._awx_conf_preload_lock: + # If local preload timeout has not expired, skip preloading. + if self._awx_conf_preload_expires and self._awx_conf_preload_expires > time.time(): + return + # Otherwise update local preload timeout. + self.__dict__['_awx_conf_preload_expires'] = time.time() + SETTING_CACHE_TIMEOUT + # Check for any settings that have been defined in Python files and + # make those read-only to avoid overriding in the database. + if not self._awx_conf_init_readonly and 'migrate_to_database_settings' not in sys.argv: + defaults_snapshot = self._get_default('DEFAULTS_SNAPSHOT') + for key in self._get_writeable_settings(): + init_default = defaults_snapshot.get(key, None) + try: + file_default = self._get_default(key) + except AttributeError: + file_default = None + if file_default != init_default and file_default is not None: + logger.debug('Setting %s has been marked read-only!', key) + self.registry._registry[key]['read_only'] = True + self.registry._registry[key]['defined_in_file'] = True + self.__dict__['_awx_conf_init_readonly'] = True + # If local preload timer has expired, check to see if another process + # has already preloaded the cache and skip preloading if so. + if self.cache.get('_awx_conf_preload_expires', default=empty) is not empty: + return + # Initialize all database-configurable settings with a marker value so + # to indicate from the cache that the setting is not configured without + # a database lookup. + settings_to_cache = dict([(key, SETTING_CACHE_NOTSET) for key in self._get_writeable_settings()]) + # Load all settings defined in the database. + for setting in Setting.objects.filter(key__in=settings_to_cache.keys(), user__isnull=True).order_by('pk'): + if settings_to_cache[setting.key] != SETTING_CACHE_NOTSET: + continue + if self.registry.is_setting_encrypted(setting.key): + value = decrypt_field(setting, 'value') + else: + value = setting.value + settings_to_cache[setting.key] = self._get_cache_value(value) + # Load field default value for any settings not found in the database. + if SETTING_CACHE_DEFAULTS: + for key, value in settings_to_cache.items(): + if value != SETTING_CACHE_NOTSET: + continue + field = self.registry.get_setting_field(key) + try: + settings_to_cache[key] = self._get_cache_value(field.get_default()) + except SkipField: + pass + # Generate a cache key for each setting and store them all at once. + settings_to_cache = dict([(Setting.get_cache_key(k), v) for k, v in settings_to_cache.items()]) + settings_to_cache['_awx_conf_preload_expires'] = self._awx_conf_preload_expires + logger.debug('cache set_many(%r, %r)', settings_to_cache, SETTING_CACHE_TIMEOUT) + self.cache.set_many(settings_to_cache, timeout=SETTING_CACHE_TIMEOUT) + + def _get_local(self, name): + self._preload_cache() + cache_key = Setting.get_cache_key(name) + try: + cache_value = self.cache.get(cache_key, default=empty) + except ValueError: + cache_value = empty + logger.debug('cache get(%r, %r) -> %r', cache_key, empty, cache_value) + if cache_value == SETTING_CACHE_NOTSET: + value = empty + elif cache_value == SETTING_CACHE_NONE: + value = None + elif cache_value == SETTING_CACHE_EMPTY_LIST: + value = [] + elif cache_value == SETTING_CACHE_EMPTY_DICT: + value = {} + else: + value = cache_value + field = self.registry.get_setting_field(name) + if value is empty: + setting = None + if not field.read_only: + setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first() + if setting: + if getattr(field, 'encrypted', False): + value = decrypt_field(setting, 'value') + else: + value = setting.value + else: + value = SETTING_CACHE_NOTSET + if SETTING_CACHE_DEFAULTS: + try: + value = field.get_default() + except SkipField: + pass + # If None implies not set, convert when reading the value. + if value is None and SETTING_CACHE_NOTSET == SETTING_CACHE_NONE: + value = SETTING_CACHE_NOTSET + if cache_value != value: + logger.debug('cache set(%r, %r, %r)', cache_key, + self._get_cache_value(value), + SETTING_CACHE_TIMEOUT) + self.cache.set(cache_key, self._get_cache_value(value), timeout=SETTING_CACHE_TIMEOUT) + if value == SETTING_CACHE_NOTSET and not SETTING_CACHE_DEFAULTS: + try: + value = field.get_default() + except SkipField: + pass + if value not in (empty, SETTING_CACHE_NOTSET): + try: + if field.read_only: + internal_value = field.to_internal_value(value) + field.run_validators(internal_value) + return internal_value + else: + return field.run_validation(value) + except: + logger.warning( + 'The current value "%r" for setting "%s" is invalid.', + value, name, exc_info=True) + return empty + + def _get_default(self, name): + return getattr(self.default_settings, name) + + @property + def SETTINGS_MODULE(self): + return self._get_default('SETTINGS_MODULE') + + def __getattr__(self, name): + value = empty + if name in self._get_supported_settings(): + with _log_database_error(): + value = self._get_local(name) + if value is not empty: + return value + return self._get_default(name) + + def _set_local(self, name, value): + field = self.registry.get_setting_field(name) + if field.read_only: + logger.warning('Attempt to set read only setting "%s".', name) + raise ImproperlyConfigured('Setting "%s" is read only.'.format(name)) + + try: + data = field.to_representation(value) + setting_value = field.run_validation(data) + db_value = field.to_representation(setting_value) + except Exception as e: + logger.exception('Unable to assign value "%r" to setting "%s".', + value, name, exc_info=True) + raise e + + setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first() + if not setting: + setting = Setting.objects.create(key=name, user=None, value=db_value) + # post_save handler will delete from cache when added. + elif setting.value != db_value or type(setting.value) != type(db_value): + setting.value = db_value + setting.save(update_fields=['value']) + # post_save handler will delete from cache when changed. + + def __setattr__(self, name, value): + if name in self._get_supported_settings(): + with _log_database_error(): + self._set_local(name, value) + else: + setattr(self.default_settings, name, value) + + def _del_local(self, name): + field = self.registry.get_setting_field(name) + if field.read_only: + logger.warning('Attempt to delete read only setting "%s".', name) + raise ImproperlyConfigured('Setting "%s" is read only.'.format(name)) + for setting in Setting.objects.filter(key=name, user__isnull=True): + setting.delete() + # pre_delete handler will delete from cache. + + def __delattr__(self, name): + if name in self._get_supported_settings(): + with _log_database_error(): + self._del_local(name) + else: + delattr(self.default_settings, name) + + def __dir__(self): + keys = [] + with _log_database_error(): + for setting in Setting.objects.filter( + key__in=self._get_supported_settings(), user__isnull=True): + # Skip returning settings that have been overridden but are + # considered to be "not set". + if setting.value is None and SETTING_CACHE_NOTSET == SETTING_CACHE_NONE: + continue + if setting.key not in keys: + keys.append(str(setting.key)) + for key in dir(self.default_settings): + if key not in keys: + keys.append(key) + return keys + + def is_overridden(self, setting): + set_locally = False + if setting in self._get_supported_settings(): + with _log_database_error(): + set_locally = Setting.objects.filter(key=setting, user__isnull=True).exists() + set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting) + return (set_locally or set_on_default) diff --git a/awx/conf/signals.py b/awx/conf/signals.py new file mode 100644 index 0000000000..b077cc5a18 --- /dev/null +++ b/awx/conf/signals.py @@ -0,0 +1,73 @@ +# Python +import logging +import sys + +# Django +from django.conf import settings +from django.core.signals import setting_changed +from django.db.models.signals import post_save, pre_delete, post_delete +from django.core.cache import cache +from django.dispatch import receiver + +# Tower +import awx.main.signals +from awx.conf import settings_registry +from awx.conf.models import Setting +from awx.conf.serializers import SettingSerializer +from awx.main.tasks import process_cache_changes + +logger = logging.getLogger('awx.conf.signals') + +awx.main.signals.model_serializer_mapping[Setting] = SettingSerializer + +__all__ = [] + + +def handle_setting_change(key, for_delete=False): + # When a setting changes or is deleted, remove its value from cache along + # with any other settings that depend on it. + setting_keys = [key] + for dependent_key in settings_registry.get_dependent_settings(key): + # Note: Doesn't handle multiple levels of dependencies! + setting_keys.append(dependent_key) + cache_keys = set([Setting.get_cache_key(k) for k in setting_keys]) + logger.debug('sending signals to delete cache keys(%r)', cache_keys) + cache.delete_many(cache_keys) + if 'migrate_to_database_settings' not in sys.argv: + process_cache_changes.delay(list(cache_keys)) + + # Send setting_changed signal with new value for each setting. + for setting_key in setting_keys: + setting_changed.send( + sender=Setting, + setting=setting_key, + value=getattr(settings, setting_key, None), + enter=not bool(for_delete), + ) + + +@receiver(post_save, sender=Setting) +def on_post_save_setting(sender, **kwargs): + instance = kwargs['instance'] + # Skip for user-specific settings. + if instance.user: + return + handle_setting_change(instance.key) + + +@receiver(pre_delete, sender=Setting) +def on_pre_delete_setting(sender, **kwargs): + instance = kwargs['instance'] + # Skip for user-specific settings. + if instance.user: + return + # Save instance key (setting name) for post_delete. + instance._saved_key_ = instance.key + + +@receiver(post_delete, sender=Setting) +def on_post_delete_setting(sender, **kwargs): + instance = kwargs['instance'] + key = getattr(instance, '_saved_key_', None) + if key: + handle_setting_change(key, True) diff --git a/awx/conf/tests/__init__.py b/awx/conf/tests/__init__.py new file mode 100644 index 0000000000..46176c348f --- /dev/null +++ b/awx/conf/tests/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2017 Ansible, Inc. +# All Rights Reserved. diff --git a/awx/conf/tests/unit/test_registry.py b/awx/conf/tests/unit/test_registry.py new file mode 100644 index 0000000000..e8fc5a477b --- /dev/null +++ b/awx/conf/tests/unit/test_registry.py @@ -0,0 +1,311 @@ +# Copyright (c) 2017 Ansible, Inc. +# All Rights Reserved. + +from uuid import uuid4 + +from django.conf import LazySettings +from django.core.cache.backends.locmem import LocMemCache +from django.core.exceptions import ImproperlyConfigured +from django.utils.translation import ugettext_lazy as _ +from rest_framework.fields import empty +import pytest + +from awx.conf import fields +from awx.conf.settings import SettingsWrapper +from awx.conf.registry import SettingsRegistry + + +@pytest.fixture() +def reg(request): + """ + This fixture initializes an awx settings registry object and passes it as + an argument into the test function. + """ + cache = LocMemCache(str(uuid4()), {}) # make a new random cache each time + settings = LazySettings() + registry = SettingsRegistry(settings) + + # @pytest.mark.defined_in_file can be used to mark specific setting values + # as "defined in a settings file". This is analogous to manually + # specifying a setting on the filesystem (e.g., in a local_settings.py in + # development, or in /etc/tower/conf.d/.py) + defaults = request.node.get_marker('defined_in_file') + if defaults: + settings.configure(**defaults.kwargs) + settings._wrapped = SettingsWrapper(settings._wrapped, + cache, + registry) + return registry + + +def test_simple_setting_registration(reg): + assert reg.get_registered_settings() == [] + reg.register( + 'AWX_SOME_SETTING_ENABLED', + field_class=fields.BooleanField, + category=_('System'), + category_slug='system', + ) + assert reg.get_registered_settings() == ['AWX_SOME_SETTING_ENABLED'] + + +def test_simple_setting_unregistration(reg): + reg.register( + 'AWX_SOME_SETTING_ENABLED', + field_class=fields.BooleanField, + category=_('System'), + category_slug='system', + ) + assert reg.get_registered_settings() == ['AWX_SOME_SETTING_ENABLED'] + + reg.unregister('AWX_SOME_SETTING_ENABLED') + assert reg.get_registered_settings() == [] + + +def test_duplicate_setting_registration(reg): + "ensure that settings cannot be registered twice." + with pytest.raises(ImproperlyConfigured): + for i in range(2): + reg.register( + 'AWX_SOME_SETTING_ENABLED', + field_class=fields.BooleanField, + category=_('System'), + category_slug='system', + ) + + +def test_field_class_required_for_registration(reg): + "settings must specify a field class to register" + with pytest.raises(ImproperlyConfigured): + reg.register('AWX_SOME_SETTING_ENABLED') + + +def test_get_registered_settings_by_slug(reg): + reg.register( + 'AWX_SOME_SETTING_ENABLED', + field_class=fields.BooleanField, + category=_('System'), + category_slug='system', + ) + assert reg.get_registered_settings(category_slug='system') == [ + 'AWX_SOME_SETTING_ENABLED' + ] + assert reg.get_registered_settings(category_slug='other') == [] + + +def test_get_registered_read_only_settings(reg): + reg.register( + 'AWX_SOME_SETTING_ENABLED', + field_class=fields.BooleanField, + category=_('System'), + category_slug='system' + ) + reg.register( + 'AWX_SOME_READ_ONLY', + field_class=fields.BooleanField, + category=_('System'), + category_slug='system', + read_only=True + ) + assert reg.get_registered_settings(read_only=True) ==[ + 'AWX_SOME_READ_ONLY' + ] + assert reg.get_registered_settings(read_only=False) == [ + 'AWX_SOME_SETTING_ENABLED' + ] + assert reg.get_registered_settings() == [ + 'AWX_SOME_SETTING_ENABLED', + 'AWX_SOME_READ_ONLY' + ] + + +def test_get_registered_settings_with_required_features(reg): + reg.register( + 'AWX_SOME_SETTING_ENABLED', + field_class=fields.BooleanField, + category=_('System'), + category_slug='system', + feature_required='superpowers', + ) + assert reg.get_registered_settings(features_enabled=[]) == [] + assert reg.get_registered_settings(features_enabled=['superpowers']) == [ + 'AWX_SOME_SETTING_ENABLED' + ] + + +def test_get_dependent_settings(reg): + reg.register( + 'AWX_SOME_SETTING_ENABLED', + field_class=fields.BooleanField, + category=_('System'), + category_slug='system' + ) + reg.register( + 'AWX_SOME_DEPENDENT_SETTING', + field_class=fields.BooleanField, + category=_('System'), + category_slug='system', + depends_on=['AWX_SOME_SETTING_ENABLED'] + ) + assert reg.get_dependent_settings('AWX_SOME_SETTING_ENABLED') == set([ + 'AWX_SOME_DEPENDENT_SETTING' + ]) + + +def test_get_registered_categories(reg): + reg.register( + 'AWX_SOME_SETTING_ENABLED', + field_class=fields.BooleanField, + category=_('System'), + category_slug='system' + ) + reg.register( + 'AWX_SOME_OTHER_SETTING_ENABLED', + field_class=fields.BooleanField, + category=_('OtherSystem'), + category_slug='other-system' + ) + assert reg.get_registered_categories() == { + 'all': _('All'), + 'changed': _('Changed'), + 'system': _('System'), + 'other-system': _('OtherSystem'), + } + + +def test_get_registered_categories_with_required_features(reg): + reg.register( + 'AWX_SOME_SETTING_ENABLED', + field_class=fields.BooleanField, + category=_('System'), + category_slug='system', + feature_required='superpowers' + ) + reg.register( + 'AWX_SOME_OTHER_SETTING_ENABLED', + field_class=fields.BooleanField, + category=_('OtherSystem'), + category_slug='other-system', + feature_required='sortapowers' + ) + assert reg.get_registered_categories(features_enabled=[]) == { + 'all': _('All'), + 'changed': _('Changed'), + } + assert reg.get_registered_categories(features_enabled=['superpowers']) == { + 'all': _('All'), + 'changed': _('Changed'), + 'system': _('System'), + } + assert reg.get_registered_categories(features_enabled=['sortapowers']) == { + 'all': _('All'), + 'changed': _('Changed'), + 'other-system': _('OtherSystem'), + } + assert reg.get_registered_categories( + features_enabled=['superpowers', 'sortapowers'] + ) == { + 'all': _('All'), + 'changed': _('Changed'), + 'system': _('System'), + 'other-system': _('OtherSystem'), + } + + +def test_is_setting_encrypted(reg): + reg.register( + 'AWX_SOME_SETTING_ENABLED', + field_class=fields.CharField, + category=_('System'), + category_slug='system' + ) + reg.register( + 'AWX_SOME_ENCRYPTED_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + encrypted=True + ) + assert reg.is_setting_encrypted('AWX_SOME_SETTING_ENABLED') is False + assert reg.is_setting_encrypted('AWX_SOME_ENCRYPTED_SETTING') is True + + +def test_simple_field(reg): + reg.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + placeholder='Example Value', + feature_required='superpowers' + ) + + field = reg.get_setting_field('AWX_SOME_SETTING') + assert isinstance(field, fields.CharField) + assert field.category == _('System') + assert field.category_slug == 'system' + assert field.default is empty + assert field.placeholder == 'Example Value' + assert field.feature_required == 'superpowers' + + +def test_field_with_custom_attribute(reg): + reg.register( + 'AWX_SOME_SETTING_ENABLED', + field_class=fields.BooleanField, + category_slug='system', + ) + + field = reg.get_setting_field('AWX_SOME_SETTING_ENABLED', + category_slug='other-system') + assert field.category_slug == 'other-system' + + +def test_field_with_custom_mixin(reg): + class GreatMixin(object): + + def is_great(self): + return True + + reg.register( + 'AWX_SOME_SETTING_ENABLED', + field_class=fields.BooleanField, + category_slug='system', + ) + + field = reg.get_setting_field('AWX_SOME_SETTING_ENABLED', + mixin_class=GreatMixin) + assert isinstance(field, fields.BooleanField) + assert isinstance(field, GreatMixin) + assert field.is_great() is True + + +@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT') +def test_default_value_from_settings(reg): + reg.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + ) + + field = reg.get_setting_field('AWX_SOME_SETTING') + assert field.default == 'DEFAULT' + + +@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT') +def test_default_value_from_settings_with_custom_representation(reg): + class LowercaseCharField(fields.CharField): + + def to_representation(self, value): + return value.lower() + + reg.register( + 'AWX_SOME_SETTING', + field_class=LowercaseCharField, + category=_('System'), + category_slug='system', + ) + + field = reg.get_setting_field('AWX_SOME_SETTING') + assert field.default == 'default' diff --git a/awx/conf/tests/unit/test_settings.py b/awx/conf/tests/unit/test_settings.py new file mode 100644 index 0000000000..f7f1540108 --- /dev/null +++ b/awx/conf/tests/unit/test_settings.py @@ -0,0 +1,467 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Ansible, Inc. +# All Rights Reserved. + +from contextlib import contextmanager +from uuid import uuid4 +import time + +from django.conf import LazySettings +from django.core.cache.backends.locmem import LocMemCache +from django.core.exceptions import ImproperlyConfigured +from django.utils.translation import ugettext_lazy as _ +import pytest +import six + +from awx.conf import models, fields +from awx.conf.settings import SettingsWrapper, EncryptedCacheProxy, SETTING_CACHE_NOTSET +from awx.conf.registry import SettingsRegistry + +from awx.main.utils import encrypt_field, decrypt_field + + +@contextmanager +def apply_patches(_patches): + [p.start() for p in _patches] + yield + [p.stop() for p in _patches] + + +@pytest.fixture() +def settings(request): + """ + This fixture initializes a Django settings object that wraps our + `awx.conf.settings.SettingsWrapper` and passes it as an argument into the + test function. + + This mimics the work done by `awx.conf.settings.SettingsWrapper.initialize` + on `django.conf.settings`. + """ + cache = LocMemCache(str(uuid4()), {}) # make a new random cache each time + settings = LazySettings() + registry = SettingsRegistry(settings) + + # @pytest.mark.defined_in_file can be used to mark specific setting values + # as "defined in a settings file". This is analogous to manually + # specifying a setting on the filesystem (e.g., in a local_settings.py in + # development, or in /etc/tower/conf.d/.py) + in_file_marker = request.node.get_marker('defined_in_file') + defaults = in_file_marker.kwargs if in_file_marker else {} + defaults['DEFAULTS_SNAPSHOT'] = {} + settings.configure(**defaults) + settings._wrapped = SettingsWrapper(settings._wrapped, + cache, + registry) + return settings + + +@pytest.mark.defined_in_file(DEBUG=True) +def test_unregistered_setting(settings): + "native Django settings are not stored in DB, and aren't cached" + assert settings.DEBUG is True + assert settings.cache.get('DEBUG') is None + + +def test_cached_settings_unicode_is_auto_decoded(settings): + # https://github.com/linsomniac/python-memcached/issues/79 + # https://github.com/linsomniac/python-memcached/blob/288c159720eebcdf667727a859ef341f1e908308/memcache.py#L961 + + value = six.u('Iñtërnâtiônàlizætiøn').encode('utf-8') # this simulates what python-memcached does on cache.set() + settings.cache.set('DEBUG', value) + assert settings.cache.get('DEBUG') == six.u('Iñtërnâtiônàlizætiøn') + + +def test_read_only_setting(settings): + settings.registry.register( + 'AWX_READ_ONLY', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + default='NO-EDITS', + read_only=True + ) + assert settings.AWX_READ_ONLY == 'NO-EDITS' + assert len(settings.registry.get_registered_settings(read_only=False)) == 0 + settings = settings.registry.get_registered_settings(read_only=True) + assert settings == ['AWX_READ_ONLY'] + + +@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT') +@pytest.mark.parametrize('read_only', [True, False]) +def test_setting_defined_in_file(settings, read_only): + kwargs = {'read_only': True} if read_only else {} + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + **kwargs + ) + assert settings.AWX_SOME_SETTING == 'DEFAULT' + assert len(settings.registry.get_registered_settings(read_only=False)) == 0 + settings = settings.registry.get_registered_settings(read_only=True) + assert settings == ['AWX_SOME_SETTING'] + + +@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT') +def test_setting_defined_in_file_with_empty_default(settings): + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + default='', + ) + assert settings.AWX_SOME_SETTING == 'DEFAULT' + assert len(settings.registry.get_registered_settings(read_only=False)) == 0 + settings = settings.registry.get_registered_settings(read_only=True) + assert settings == ['AWX_SOME_SETTING'] + + +@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT') +def test_setting_defined_in_file_with_specific_default(settings): + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + default=123 + ) + assert settings.AWX_SOME_SETTING == 'DEFAULT' + assert len(settings.registry.get_registered_settings(read_only=False)) == 0 + settings = settings.registry.get_registered_settings(read_only=True) + assert settings == ['AWX_SOME_SETTING'] + + +@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT') +def test_read_only_defaults_are_cached(settings): + "read-only settings are stored in the cache" + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system' + ) + assert settings.AWX_SOME_SETTING == 'DEFAULT' + assert settings.cache.get('AWX_SOME_SETTING') == 'DEFAULT' + + +@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT') +def test_cache_respects_timeout(settings): + "only preload the cache every SETTING_CACHE_TIMEOUT settings" + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system' + ) + + assert settings.AWX_SOME_SETTING == 'DEFAULT' + cache_expiration = settings.cache.get('_awx_conf_preload_expires') + assert cache_expiration > time.time() + + assert settings.AWX_SOME_SETTING == 'DEFAULT' + assert settings.cache.get('_awx_conf_preload_expires') == cache_expiration + + +def test_default_setting(settings, mocker): + "settings that specify a default are inserted into the cache" + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + default='DEFAULT' + ) + + settings_to_cache = mocker.Mock(**{'order_by.return_value': []}) + with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=settings_to_cache): + assert settings.AWX_SOME_SETTING == 'DEFAULT' + assert settings.cache.get('AWX_SOME_SETTING') == 'DEFAULT' + + +@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT') +def test_setting_is_from_setting_file(settings, mocker): + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system' + ) + assert settings.AWX_SOME_SETTING == 'DEFAULT' + assert settings.registry.get_setting_field('AWX_SOME_SETTING').defined_in_file is True + + +def test_setting_is_not_from_setting_file(settings, mocker): + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + default='DEFAULT' + ) + + settings_to_cache = mocker.Mock(**{'order_by.return_value': []}) + with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=settings_to_cache): + assert settings.AWX_SOME_SETTING == 'DEFAULT' + assert settings.registry.get_setting_field('AWX_SOME_SETTING').defined_in_file is False + + +def test_empty_setting(settings, mocker): + "settings with no default and no defined value are not valid" + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system' + ) + + mocks = mocker.Mock(**{ + 'order_by.return_value': mocker.Mock(**{ + '__iter__': lambda self: iter([]), + 'first.return_value': None + }), + }) + with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks): + with pytest.raises(AttributeError): + settings.AWX_SOME_SETTING + assert settings.cache.get('AWX_SOME_SETTING') == SETTING_CACHE_NOTSET + + +def test_setting_from_db(settings, mocker): + "settings can be loaded from the database" + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + default='DEFAULT' + ) + + setting_from_db = mocker.Mock(key='AWX_SOME_SETTING', value='FROM_DB') + mocks = mocker.Mock(**{ + 'order_by.return_value': mocker.Mock(**{ + '__iter__': lambda self: iter([setting_from_db]), + 'first.return_value': setting_from_db + }), + }) + with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks): + assert settings.AWX_SOME_SETTING == 'FROM_DB' + assert settings.cache.get('AWX_SOME_SETTING') == 'FROM_DB' + + +@pytest.mark.parametrize('encrypted', (True, False)) +def test_setting_from_db_with_unicode(settings, mocker, encrypted): + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + default='DEFAULT', + encrypted=encrypted + ) + # this simulates a bug in python-memcached; see https://github.com/linsomniac/python-memcached/issues/79 + value = six.u('Iñtërnâtiônàlizætiøn').encode('utf-8') + + setting_from_db = mocker.Mock(key='AWX_SOME_SETTING', value=value) + mocks = mocker.Mock(**{ + 'order_by.return_value': mocker.Mock(**{ + '__iter__': lambda self: iter([setting_from_db]), + 'first.return_value': setting_from_db + }), + }) + with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks): + assert settings.AWX_SOME_SETTING == six.u('Iñtërnâtiônàlizætiøn') + assert settings.cache.get('AWX_SOME_SETTING') == six.u('Iñtërnâtiônàlizætiøn') + + +@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT') +def test_read_only_setting_assignment(settings): + "read-only settings cannot be overwritten" + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system' + ) + assert settings.AWX_SOME_SETTING == 'DEFAULT' + with pytest.raises(ImproperlyConfigured): + settings.AWX_SOME_SETTING = 'CHANGED' + assert settings.AWX_SOME_SETTING == 'DEFAULT' + + +def test_db_setting_create(settings, mocker): + "settings are stored in the database when set for the first time" + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system' + ) + + setting_list = mocker.Mock(**{'order_by.return_value.first.return_value': None}) + with apply_patches([ + mocker.patch('awx.conf.models.Setting.objects.filter', + return_value=setting_list), + mocker.patch('awx.conf.models.Setting.objects.create', mocker.Mock()) + ]): + settings.AWX_SOME_SETTING = 'NEW-VALUE' + + models.Setting.objects.create.assert_called_with( + key='AWX_SOME_SETTING', + user=None, + value='NEW-VALUE' + ) + + +def test_db_setting_update(settings, mocker): + "settings are updated in the database when their value changes" + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system' + ) + + existing_setting = mocker.Mock(key='AWX_SOME_SETTING', value='FROM_DB') + setting_list = mocker.Mock(**{ + 'order_by.return_value.first.return_value': existing_setting + }) + with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=setting_list): + settings.AWX_SOME_SETTING = 'NEW-VALUE' + + assert existing_setting.value == 'NEW-VALUE' + existing_setting.save.assert_called_with(update_fields=['value']) + + +def test_db_setting_deletion(settings, mocker): + "settings are auto-deleted from the database" + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system' + ) + + existing_setting = mocker.Mock(key='AWX_SOME_SETTING', value='FROM_DB') + with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=[existing_setting]): + del settings.AWX_SOME_SETTING + + assert existing_setting.delete.call_count == 1 + + +@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT') +def test_read_only_setting_deletion(settings): + "read-only settings cannot be deleted" + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system' + ) + assert settings.AWX_SOME_SETTING == 'DEFAULT' + with pytest.raises(ImproperlyConfigured): + del settings.AWX_SOME_SETTING + assert settings.AWX_SOME_SETTING == 'DEFAULT' + + +def test_charfield_properly_sets_none(settings, mocker): + "see: https://github.com/ansible/ansible-tower/issues/5322" + settings.registry.register( + 'AWX_SOME_SETTING', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + allow_null=True + ) + + setting_list = mocker.Mock(**{'order_by.return_value.first.return_value': None}) + with apply_patches([ + mocker.patch('awx.conf.models.Setting.objects.filter', + return_value=setting_list), + mocker.patch('awx.conf.models.Setting.objects.create', mocker.Mock()) + ]): + settings.AWX_SOME_SETTING = None + + models.Setting.objects.create.assert_called_with( + key='AWX_SOME_SETTING', + user=None, + value=None + ) + + +def test_settings_use_an_encrypted_cache(settings): + settings.registry.register( + 'AWX_ENCRYPTED', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + encrypted=True + ) + assert isinstance(settings.cache, EncryptedCacheProxy) + assert settings.cache.__dict__['encrypter'] == encrypt_field + assert settings.cache.__dict__['decrypter'] == decrypt_field + + +def test_sensitive_cache_data_is_encrypted(settings, mocker): + "fields marked as `encrypted` are stored in the cache with encryption" + settings.registry.register( + 'AWX_ENCRYPTED', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + encrypted=True + ) + + def rot13(obj, attribute): + assert obj.pk == 123 + return getattr(obj, attribute).encode('rot13') + + native_cache = LocMemCache(str(uuid4()), {}) + cache = EncryptedCacheProxy( + native_cache, + settings.registry, + encrypter=rot13, + decrypter=rot13 + ) + # Insert the setting value into the database; the encryption process will + # use its primary key as part of the encryption key + setting_from_db = mocker.Mock(pk=123, key='AWX_ENCRYPTED', value='SECRET!') + mocks = mocker.Mock(**{ + 'order_by.return_value': mocker.Mock(**{ + '__iter__': lambda self: iter([setting_from_db]), + 'first.return_value': setting_from_db + }), + }) + with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks): + cache.set('AWX_ENCRYPTED', 'SECRET!') + assert cache.get('AWX_ENCRYPTED') == 'SECRET!' + assert native_cache.get('AWX_ENCRYPTED') == 'FRPERG!' + + +def test_readonly_sensitive_cache_data_is_encrypted(settings): + "readonly fields marked as `encrypted` are stored in the cache with encryption" + settings.registry.register( + 'AWX_ENCRYPTED', + field_class=fields.CharField, + category=_('System'), + category_slug='system', + read_only=True, + encrypted=True + ) + + def rot13(obj, attribute): + assert obj.pk is None + return getattr(obj, attribute).encode('rot13') + + native_cache = LocMemCache(str(uuid4()), {}) + cache = EncryptedCacheProxy( + native_cache, + settings.registry, + encrypter=rot13, + decrypter=rot13 + ) + cache.set('AWX_ENCRYPTED', 'SECRET!') + assert cache.get('AWX_ENCRYPTED') == 'SECRET!' + assert native_cache.get('AWX_ENCRYPTED') == 'FRPERG!' diff --git a/awx/conf/urls.py b/awx/conf/urls.py new file mode 100644 index 0000000000..15505f4c3c --- /dev/null +++ b/awx/conf/urls.py @@ -0,0 +1,15 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Django +from django.conf.urls import patterns + +# Tower +from awx.api.urls import url + + +urlpatterns = patterns( + 'awx.conf.views', + url(r'^$', 'setting_category_list'), + url(r'^(?P[a-z0-9-]+)/$', 'setting_singleton_detail'), +) diff --git a/awx/conf/utils.py b/awx/conf/utils.py new file mode 100755 index 0000000000..b780038e9f --- /dev/null +++ b/awx/conf/utils.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python + +# Python +import difflib +import glob +import os +import shutil + +# RedBaron +from redbaron import RedBaron, indent + +__all__ = ['comment_assignments'] + + +def comment_assignments(patterns, assignment_names, dry_run=True, backup_suffix='.old'): + if isinstance(patterns, basestring): + patterns = [patterns] + diffs = [] + for pattern in patterns: + for filename in sorted(glob.glob(pattern)): + filename = os.path.abspath(os.path.normpath(filename)) + if backup_suffix: + backup_filename = '{}{}'.format(filename, backup_suffix) + else: + backup_filename = None + diff = comment_assignments_in_file(filename, assignment_names, dry_run, backup_filename) + if diff: + diffs.append(diff) + return diffs + + +def comment_assignments_in_file(filename, assignment_names, dry_run=True, backup_filename=None): + if isinstance(assignment_names, basestring): + assignment_names = [assignment_names] + else: + assignment_names = assignment_names[:] + current_file_data = open(filename).read() + + for assignment_name in assignment_names[:]: + if assignment_name in current_file_data: + continue + if assignment_name in assignment_names: + assignment_names.remove(assignment_name) + if not assignment_names: + return '' + + replace_lines = {} + rb = RedBaron(current_file_data) + for assignment_node in rb.find_all('assignment'): + for assignment_name in assignment_names: + + # Only target direct assignments to a variable. + name_node = assignment_node.find('name', value=assignment_name) + if not name_node: + continue + if assignment_node.target.type != 'name': + continue + + # Build a new node that comments out the existing assignment node. + indentation = '{}# '.format(assignment_node.indentation or '') + new_node_content = indent(assignment_node.dumps(), indentation) + new_node_lines = new_node_content.splitlines() + # Add a pass statement in case the assignment block is the only + # child in a parent code block to prevent a syntax error. + if assignment_node.indentation: + new_node_lines[0] = new_node_lines[0].replace(indentation, '{}pass # '.format(assignment_node.indentation or ''), 1) + new_node_lines[0] = '{0}This setting is now configured via the Tower API.\n{1}'.format(indentation, new_node_lines[0]) + + # Store new node lines in dictionary to be replaced in file. + start_lineno = assignment_node.absolute_bounding_box.top_left.line + end_lineno = assignment_node.absolute_bounding_box.bottom_right.line + for n, new_node_line in enumerate(new_node_lines): + new_lineno = start_lineno + n + assert new_lineno <= end_lineno + replace_lines[new_lineno] = new_node_line + + if not replace_lines: + return '' + + # Iterate through all lines in current file and replace as needed. + current_file_lines = current_file_data.splitlines() + new_file_lines = [] + for n, line in enumerate(current_file_lines): + new_file_lines.append(replace_lines.get(n + 1, line)) + new_file_data = '\n'.join(new_file_lines) + new_file_lines = new_file_data.splitlines() + + # If changed, syntax check and write the new file; return a diff of changes. + diff_lines = [] + if new_file_data != current_file_data: + compile(new_file_data, filename, 'exec') + if backup_filename: + from_file = backup_filename + else: + from_file = '{}.old'.format(filename) + to_file = filename + diff_lines = list(difflib.unified_diff(current_file_lines, new_file_lines, fromfile=from_file, tofile=to_file, lineterm='')) + if not dry_run: + if backup_filename: + shutil.copy2(filename, backup_filename) + with open(filename, 'wb') as fileobj: + fileobj.write(new_file_data) + return '\n'.join(diff_lines) + + +if __name__ == '__main__': + pattern = os.path.join(os.path.dirname(__file__), '..', 'settings', 'local_*.py') + diffs = comment_assignments(pattern, ['AUTH_LDAP_ORGANIZATION_MAP']) + for diff in diffs: + print(diff) diff --git a/awx/conf/views.py b/awx/conf/views.py new file mode 100644 index 0000000000..99a3daab99 --- /dev/null +++ b/awx/conf/views.py @@ -0,0 +1,141 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Python +import collections +import sys + +# Django +from django.conf import settings +from django.core.urlresolvers import reverse +from django.http import Http404 +from django.utils.translation import ugettext_lazy as _ + +# Django REST Framework +from rest_framework.exceptions import PermissionDenied +from rest_framework.response import Response +from rest_framework import serializers +from rest_framework import status + +# Tower +from awx.api.generics import * # noqa +from awx.main.utils import * # noqa +from awx.conf.license import get_licensed_features +from awx.conf.models import Setting +from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer +from awx.conf import settings_registry + + +SettingCategory = collections.namedtuple('SettingCategory', ('url', 'slug', 'name')) + + +class SettingCategoryList(ListAPIView): + + model = Setting # Not exactly, but needed for the view. + serializer_class = SettingCategorySerializer + filter_backends = [] + new_in_310 = True + view_name = _('Setting Categories') + + def get_queryset(self): + setting_categories = [] + categories = settings_registry.get_registered_categories(features_enabled=get_licensed_features()) + if self.request.user.is_superuser or self.request.user.is_system_auditor: + pass # categories = categories + elif 'user' in categories: + categories = {'user', _('User')} + else: + categories = {} + for category_slug in sorted(categories.keys()): + url = reverse('api:setting_singleton_detail', args=(category_slug,)) + setting_categories.append(SettingCategory(url, category_slug, categories[category_slug])) + return setting_categories + + +class SettingSingletonDetail(RetrieveUpdateDestroyAPIView): + + model = Setting # Not exactly, but needed for the view. + serializer_class = SettingSingletonSerializer + filter_backends = [] + new_in_310 = True + view_name = _('Setting Detail') + + def get_queryset(self): + self.category_slug = self.kwargs.get('category_slug', 'all') + all_category_slugs = settings_registry.get_registered_categories(features_enabled=get_licensed_features()).keys() + if self.request.user.is_superuser or getattr(self.request.user, 'is_system_auditor', False): + category_slugs = all_category_slugs + else: + category_slugs = {'user'} + if self.category_slug not in all_category_slugs: + raise Http404 + if self.category_slug not in category_slugs: + raise PermissionDenied() + + registered_settings = settings_registry.get_registered_settings(category_slug=self.category_slug, read_only=False, features_enabled=get_licensed_features()) + if self.category_slug == 'user': + return Setting.objects.filter(key__in=registered_settings, user=self.request.user) + else: + return Setting.objects.filter(key__in=registered_settings, user__isnull=True) + + def get_object(self): + settings_qs = self.get_queryset() + registered_settings = settings_registry.get_registered_settings(category_slug=self.category_slug, features_enabled=get_licensed_features()) + all_settings = {} + for setting in settings_qs: + all_settings[setting.key] = setting.value + for key in registered_settings: + if key in all_settings or self.category_slug == 'changed': + continue + try: + field = settings_registry.get_setting_field(key, for_user=bool(self.category_slug == 'user')) + all_settings[key] = field.get_default() + except serializers.SkipField: + all_settings[key] = None + all_settings['user'] = self.request.user if self.category_slug == 'user' else None + obj = type('Settings', (object,), all_settings)() + self.check_object_permissions(self.request, obj) + return obj + + def perform_update(self, serializer): + settings_qs = self.get_queryset() + user = self.request.user if self.category_slug == 'user' else None + for key, value in serializer.validated_data.items(): + if key == 'LICENSE': + continue + if settings_registry.is_setting_encrypted(key) and isinstance(value, basestring) and value.startswith('$encrypted$'): + continue + setattr(serializer.instance, key, value) + setting = settings_qs.filter(key=key).order_by('pk').first() + if not setting: + setting = Setting.objects.create(key=key, user=user, value=value) + elif setting.value != value or type(setting.value) != type(value): + setting.value = value + setting.save(update_fields=['value']) + + def destroy(self, request, *args, **kwargs): + instance = self.get_object() + self.perform_destroy(instance) + return Response(status=status.HTTP_204_NO_CONTENT) + + def perform_destroy(self, instance): + for setting in self.get_queryset().exclude(key='LICENSE'): + setting.delete() + + # When TOWER_URL_BASE is deleted from the API, reset it to the hostname + # used to make the request as a default. + if hasattr(instance, 'TOWER_URL_BASE'): + url = '{}://{}'.format(self.request.scheme, self.request.get_host()) + if settings.TOWER_URL_BASE != url: + settings.TOWER_URL_BASE = url + + +# Create view functions for all of the class-based views to simplify inclusion +# in URL patterns and reverse URL lookups, converting CamelCase names to +# lowercase_with_underscore (e.g. MyView.as_view() becomes my_view). +this_module = sys.modules[__name__] +for attr, value in locals().items(): + if isinstance(value, type) and issubclass(value, APIView): + name = camelcase_to_underscore(attr) + view = value.as_view() + setattr(this_module, name, view) diff --git a/awx/fact/__init__.py b/awx/fact/__init__.py deleted file mode 100644 index e484e62be1..0000000000 --- a/awx/fact/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. diff --git a/awx/fact/management/__init__.py b/awx/fact/management/__init__.py deleted file mode 100644 index 3a75c16036..0000000000 --- a/awx/fact/management/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved diff --git a/awx/fact/models/__init__.py b/awx/fact/models/__init__.py deleted file mode 100644 index 049720a11a..0000000000 --- a/awx/fact/models/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved - -from __future__ import absolute_import - -from .fact import * # noqa diff --git a/awx/fact/models/fact.py b/awx/fact/models/fact.py deleted file mode 100644 index f52abe1106..0000000000 --- a/awx/fact/models/fact.py +++ /dev/null @@ -1,217 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved - -from mongoengine import connect -from mongoengine.base import BaseField -from mongoengine import Document, DateTimeField, ReferenceField, StringField, IntField -from mongoengine.connection import get_db, ConnectionError -from awx.fact.utils.dbtransform import register_key_transform, KeyTransform - -from django.conf import settings - -import logging -logger = logging.getLogger('awx.fact.models.fact') - - -key_transform = KeyTransform([('.', '\uff0E'), ('$', '\uff04')]) - -# NOTE: I think it might be better to use register_connection here: https://github.com/MongoEngine/mongoengine/blob/0.9/mongoengine/connection.py#L21 -# but I'm not doing that because I don't see how we can also register the key transform as needed or set the tz_aware preference -@classmethod -def _get_db_monkeypatched(cls): - """ Override the default _get_db mechanism to start a connection to the database """ - # Connect to Mongo - try: - # Sanity check: If we have intentionally invalid settings, then we - # know we cannot connect. - if settings.MONGO_HOST == NotImplemented: - raise ConnectionError - - # Attempt to connect to the MongoDB database. - connect(settings.MONGO_DB, - host=settings.MONGO_HOST, - port=int(settings.MONGO_PORT), - username=settings.MONGO_USERNAME, - password=settings.MONGO_PASSWORD, - tz_aware=settings.USE_TZ) - register_key_transform(get_db()) - except (ConnectionError, AttributeError): - logger.info('Failed to establish connect to MongoDB') - return get_db(cls._meta.get("db_alias", "default")) - -Document._get_db = _get_db_monkeypatched - -class TransformField(BaseField): - def to_python(self, value): - return key_transform.transform_outgoing(value, None) - - def prepare_query_value(self, op, value): - if op == 'set': - value = key_transform.transform_incoming(value, None) - return super(TransformField, self).prepare_query_value(op, value) - - def to_mongo(self, value): - value = key_transform.transform_incoming(value, None) - return value - -class FactHost(Document): - hostname = StringField(max_length=100, required=True, unique_with='inventory_id') - inventory_id = IntField(required=True, unique_with='hostname') - - # TODO: Consider using hashed index on hostname. django-mongo may not support this but - # executing raw js will - meta = { - 'indexes': [ - ('hostname', 'inventory_id') - ] - } - -class Fact(Document): - timestamp = DateTimeField(required=True) - host = ReferenceField(FactHost, required=True) - module = StringField(max_length=50, required=True) - fact = TransformField(required=True) - - # TODO: Consider using hashed index on host. django-mongo may not support this but - # executing raw js will - meta = { - 'indexes': [ - '-timestamp', - 'host', - ] - } - - @staticmethod - def add_fact(timestamp, fact, host, module): - fact_obj = Fact(timestamp=timestamp, host=host, module=module, fact=fact) - fact_obj.save() - version_obj = FactVersion(timestamp=timestamp, host=host, module=module, fact=fact_obj) - version_obj.save() - return (fact_obj, version_obj) - - # TODO: if we want to relax the need to include module... - # If module not specified then filter query may return more than 1 result. - # Thus, the resulting facts must somehow be unioned/concated/ or kept as an array. - @staticmethod - def get_host_version(hostname, inventory_id, timestamp, module): - try: - host = FactHost.objects.get(hostname=hostname, inventory_id=inventory_id) - except FactHost.DoesNotExist: - return None - - kv = { - 'host' : host.id, - 'timestamp__lte': timestamp, - 'module': module, - } - - try: - facts = Fact.objects.filter(**kv).order_by("-timestamp") - if not facts: - return None - return facts[0] - except Fact.DoesNotExist: - return None - - @staticmethod - def get_host_timeline(hostname, inventory_id, module): - try: - host = FactHost.objects.get(hostname=hostname, inventory_id=inventory_id) - except FactHost.DoesNotExist: - return None - - kv = { - 'host': host.id, - 'module': module, - } - - return FactVersion.objects.filter(**kv).order_by("-timestamp").values_list('timestamp') - - # FIXME: single facts no longer works with the addition of the inventory_id field to the FactHost document - @staticmethod - def get_single_facts(hostnames, fact_key, fact_value, timestamp, module): - kv = { - 'hostname': { - '$in': hostnames, - } - } - fields = { - '_id': 1 - } - host_ids = FactHost._get_collection().find(kv, fields) - if not host_ids or host_ids.count() == 0: - return None - # TODO: use mongo to transform [{_id: <>}, {_id: <>},...] into [_id, _id,...] - host_ids = [e['_id'] for e in host_ids] - - pipeline = [] - match = { - 'host': { - '$in': host_ids - }, - 'timestamp': { - '$lte': timestamp - }, - 'module': module - } - sort = { - 'timestamp': -1 - } - group = { - '_id': '$host', - 'timestamp': { - '$first': '$timestamp' - }, - 'fact': { - '$first': '$fact' - } - } - project = { - '_id': 0, - 'fact': 1, - } - pipeline.append({'$match': match}) # noqa - pipeline.append({'$sort': sort}) # noqa - pipeline.append({'$group': group}) # noqa - pipeline.append({'$project': project}) # noqa - q = FactVersion._get_collection().aggregate(pipeline) - if not q or 'result' not in q or len(q['result']) == 0: - return None - # TODO: use mongo to transform [{fact: <>}, {fact: <>},...] into [fact, fact,...] - fact_ids = [fact['fact'] for fact in q['result']] - - kv = { - 'fact.%s' % fact_key : fact_value, - '_id': { - '$in': fact_ids - } - } - fields = { - 'fact.%s.$' % fact_key : 1, - 'host': 1, - 'timestamp': 1, - 'module': 1, - } - facts = Fact._get_collection().find(kv, fields) - #fact_objs = [Fact(**f) for f in facts] - # Translate pymongo python structure to mongoengine Fact object - fact_objs = [] - for f in facts: - f['id'] = f.pop('_id') - fact_objs.append(Fact(**f)) - return fact_objs - -class FactVersion(Document): - timestamp = DateTimeField(required=True) - host = ReferenceField(FactHost, required=True) - module = StringField(max_length=50, required=True) - fact = ReferenceField(Fact, required=True) - # TODO: Consider using hashed index on module. django-mongo may not support this but - # executing raw js will - meta = { - 'indexes': [ - '-timestamp', - 'module', - 'host', - ] - } diff --git a/awx/fact/utils/__init__.py b/awx/fact/utils/__init__.py deleted file mode 100644 index 3a75c16036..0000000000 --- a/awx/fact/utils/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved diff --git a/awx/fact/utils/dbtransform.py b/awx/fact/utils/dbtransform.py deleted file mode 100644 index 0541c32fe0..0000000000 --- a/awx/fact/utils/dbtransform.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -# Pymongo -from pymongo.son_manipulator import SONManipulator - -class KeyTransform(SONManipulator): - - def __init__(self, replace): - self.replace = replace - - def replace_key(self, key): - for (replace, replacement) in self.replace: - key = key.replace(replace, replacement) - return key - - def revert_key(self, key): - for (replacement, replace) in self.replace: - key = key.replace(replace, replacement) - return key - - def replace_incoming(self, obj): - if isinstance(obj, dict): - value = {} - for k, v in obj.items(): - value[self.replace_key(k)] = self.replace_incoming(v) - elif isinstance(obj, list): - value = [self.replace_incoming(elem) - for elem in obj] - else: - value = obj - - return value - - def replace_outgoing(self, obj): - if isinstance(obj, dict): - value = {} - for k, v in obj.items(): - value[self.revert_key(k)] = self.replace_outgoing(v) - elif isinstance(obj, list): - value = [self.replace_outgoing(elem) - for elem in obj] - else: - value = obj - - return value - - def transform_incoming(self, son, collection): - return self.replace_incoming(son) - - def transform_outgoing(self, son, collection): - if not collection or collection.name != 'fact': - return son - return self.replace_outgoing(son) - -def register_key_transform(db): - #db.add_son_manipulator(KeyTransform([('.', '\uff0E'), ('$', '\uff04')])) - pass diff --git a/awx/lib/__init__.py b/awx/lib/__init__.py deleted file mode 100644 index e484e62be1..0000000000 --- a/awx/lib/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. diff --git a/awx/lib/compat.py b/awx/lib/compat.py deleted file mode 100644 index fb686dd11c..0000000000 --- a/awx/lib/compat.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -''' -Compability library for support of both Django 1.4.x and Django 1.5.x. -''' - -try: - from django.utils.html import format_html -except ImportError: - from django.utils.html import conditional_escape - from django.utils.safestring import mark_safe - - def format_html(format_string, *args, **kwargs): - args_safe = map(conditional_escape, args) - kwargs_safe = dict([(k, conditional_escape(v)) for (k, v) in - kwargs.items()]) - return mark_safe(format_string.format(*args_safe, **kwargs_safe)) - -try: - from django.utils.log import RequireDebugTrue -except ImportError: - import logging - from django.conf import settings - - class RequireDebugTrue(logging.Filter): - def filter(self, record): - return settings.DEBUG - -try: - from django.utils.text import slugify # noqa -except ImportError: - from django.template.defaultfilters import slugify # noqa diff --git a/awx/lib/sitecustomize.py b/awx/lib/sitecustomize.py new file mode 100644 index 0000000000..224840aae7 --- /dev/null +++ b/awx/lib/sitecustomize.py @@ -0,0 +1,26 @@ +# Python +import os +import sys + +# Based on http://stackoverflow.com/a/6879344/131141 -- Initialize tower display +# callback as early as possible to wrap ansible.display.Display methods. + + +def argv_ready(argv): + if argv and os.path.basename(argv[0]) in {'ansible', 'ansible-playbook'}: + import tower_display_callback # noqa + + +class argv_placeholder(object): + + def __del__(self): + try: + argv_ready(sys.argv) + except: + pass + + +if hasattr(sys, 'argv'): + argv_ready(sys.argv) +else: + sys.argv = argv_placeholder() diff --git a/awx/lib/tower_display_callback/__init__.py b/awx/lib/tower_display_callback/__init__.py new file mode 100644 index 0000000000..d984956c7f --- /dev/null +++ b/awx/lib/tower_display_callback/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) 2016 Ansible by Red Hat, Inc. +# +# This file is part of Ansible Tower, but depends on code imported from Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) + +# Tower Display Callback +from . import cleanup # noqa (registers control persistent cleanup) +from . import display # noqa (wraps ansible.display.Display methods) +from .module import TowerDefaultCallbackModule, TowerMinimalCallbackModule + +__all__ = ['TowerDefaultCallbackModule', 'TowerMinimalCallbackModule'] diff --git a/awx/lib/tower_display_callback/cleanup.py b/awx/lib/tower_display_callback/cleanup.py new file mode 100644 index 0000000000..8926e54f72 --- /dev/null +++ b/awx/lib/tower_display_callback/cleanup.py @@ -0,0 +1,80 @@ +# Copyright (c) 2016 Ansible by Red Hat, Inc. +# +# This file is part of Ansible Tower, but depends on code imported from Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) + +# Python +import atexit +import glob +import os +import pwd + +# PSUtil +import psutil + +__all__ = [] + +main_pid = os.getpid() + + +@atexit.register +def terminate_ssh_control_masters(): + # Only run this cleanup from the main process. + if os.getpid() != main_pid: + return + # Determine if control persist is being used and if any open sockets + # exist after running the playbook. + cp_path = os.environ.get('ANSIBLE_SSH_CONTROL_PATH', '') + if not cp_path: + return + cp_dir = os.path.dirname(cp_path) + if not os.path.exists(cp_dir): + return + cp_pattern = os.path.join(cp_dir, 'ansible-ssh-*') + cp_files = glob.glob(cp_pattern) + if not cp_files: + return + + # Attempt to find any running control master processes. + username = pwd.getpwuid(os.getuid())[0] + ssh_cm_procs = [] + for proc in psutil.process_iter(): + try: + pname = proc.name() + pcmdline = proc.cmdline() + pusername = proc.username() + except psutil.NoSuchProcess: + continue + if pusername != username: + continue + if pname != 'ssh': + continue + for cp_file in cp_files: + if pcmdline and cp_file in pcmdline[0]: + ssh_cm_procs.append(proc) + break + + # Terminate then kill control master processes. Workaround older + # version of psutil that may not have wait_procs implemented. + for proc in ssh_cm_procs: + try: + proc.terminate() + except psutil.NoSuchProcess: + continue + procs_gone, procs_alive = psutil.wait_procs(ssh_cm_procs, timeout=5) + for proc in procs_alive: + proc.kill() diff --git a/awx/lib/tower_display_callback/display.py b/awx/lib/tower_display_callback/display.py new file mode 100644 index 0000000000..ad5e8ba37a --- /dev/null +++ b/awx/lib/tower_display_callback/display.py @@ -0,0 +1,98 @@ +# Copyright (c) 2016 Ansible by Red Hat, Inc. +# +# This file is part of Ansible Tower, but depends on code imported from Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) + +# Python +import functools +import sys +import uuid + +# Ansible +from ansible.utils.display import Display + +# Tower Display Callback +from .events import event_context + +__all__ = [] + + +def with_context(**context): + global event_context + + def wrap(f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + with event_context.set_local(**context): + return f(*args, **kwargs) + return wrapper + return wrap + + +for attr in dir(Display): + if attr.startswith('_') or 'cow' in attr or 'prompt' in attr: + continue + if attr in ('display', 'v', 'vv', 'vvv', 'vvvv', 'vvvvv', 'vvvvvv', 'verbose'): + continue + if not callable(getattr(Display, attr)): + continue + setattr(Display, attr, with_context(**{attr: True})(getattr(Display, attr))) + + +def with_verbosity(f): + global event_context + + @functools.wraps(f) + def wrapper(*args, **kwargs): + host = args[2] if len(args) >= 3 else kwargs.get('host', None) + caplevel = args[3] if len(args) >= 4 else kwargs.get('caplevel', 2) + context = dict(verbose=True, verbosity=(caplevel + 1)) + if host is not None: + context['remote_addr'] = host + with event_context.set_local(**context): + return f(*args, **kwargs) + return wrapper + + +Display.verbose = with_verbosity(Display.verbose) + + +def display_with_context(f): + + @functools.wraps(f) + def wrapper(*args, **kwargs): + log_only = args[5] if len(args) >= 6 else kwargs.get('log_only', False) + stderr = args[3] if len(args) >= 4 else kwargs.get('stderr', False) + event_uuid = event_context.get().get('uuid', None) + with event_context.display_lock: + # If writing only to a log file or there is already an event UUID + # set (from a callback module method), skip dumping the event data. + if log_only or event_uuid: + return f(*args, **kwargs) + try: + fileobj = sys.stderr if stderr else sys.stdout + event_context.add_local(uuid=str(uuid.uuid4())) + event_context.dump_begin(fileobj) + return f(*args, **kwargs) + finally: + event_context.dump_end(fileobj) + event_context.remove_local(uuid=None) + + return wrapper + + +Display.display = display_with_context(Display.display) diff --git a/awx/lib/tower_display_callback/events.py b/awx/lib/tower_display_callback/events.py new file mode 100644 index 0000000000..a419b33e85 --- /dev/null +++ b/awx/lib/tower_display_callback/events.py @@ -0,0 +1,214 @@ +# Copyright (c) 2016 Ansible by Red Hat, Inc. +# +# This file is part of Ansible Tower, but depends on code imported from Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) + +# Python +import base64 +import contextlib +import datetime +import json +import logging +import multiprocessing +import os +import threading +import uuid +import memcache + +# Kombu +from kombu import Connection, Exchange, Producer + +__all__ = ['event_context'] + + +class CallbackQueueEventDispatcher(object): + + def __init__(self): + self.callback_connection = os.getenv('CALLBACK_CONNECTION', None) + self.connection_queue = os.getenv('CALLBACK_QUEUE', '') + self.connection = None + self.exchange = None + self._init_logging() + + def _init_logging(self): + try: + self.job_callback_debug = int(os.getenv('JOB_CALLBACK_DEBUG', '0')) + except ValueError: + self.job_callback_debug = 0 + self.logger = logging.getLogger('awx.plugins.callback.job_event_callback') + if self.job_callback_debug >= 2: + self.logger.setLevel(logging.DEBUG) + elif self.job_callback_debug >= 1: + self.logger.setLevel(logging.INFO) + else: + self.logger.setLevel(logging.WARNING) + handler = logging.StreamHandler() + formatter = logging.Formatter('%(levelname)-8s %(process)-8d %(message)s') + handler.setFormatter(formatter) + self.logger.addHandler(handler) + self.logger.propagate = False + + def dispatch(self, obj): + if not self.callback_connection or not self.connection_queue: + return + active_pid = os.getpid() + for retry_count in xrange(4): + try: + if not hasattr(self, 'connection_pid'): + self.connection_pid = active_pid + if self.connection_pid != active_pid: + self.connection = None + if self.connection is None: + self.connection = Connection(self.callback_connection) + self.exchange = Exchange(self.connection_queue, type='direct') + + producer = Producer(self.connection) + producer.publish(obj, + serializer='json', + compression='bzip2', + exchange=self.exchange, + declare=[self.exchange], + routing_key=self.connection_queue) + return + except Exception, e: + self.logger.info('Publish Job Event Exception: %r, retry=%d', e, + retry_count, exc_info=True) + retry_count += 1 + if retry_count >= 3: + break + + +class EventContext(object): + ''' + Store global and local (per thread/process) data associated with callback + events and other display output methods. + ''' + + def __init__(self): + self.display_lock = multiprocessing.RLock() + self.dispatcher = CallbackQueueEventDispatcher() + cache_actual = os.getenv('CACHE', '127.0.0.1:11211') + self.cache = memcache.Client([cache_actual], debug=0) + + def add_local(self, **kwargs): + if not hasattr(self, '_local'): + self._local = threading.local() + self._local._ctx = {} + self._local._ctx.update(kwargs) + + def remove_local(self, **kwargs): + if hasattr(self, '_local'): + for key in kwargs.keys(): + self._local._ctx.pop(key, None) + + @contextlib.contextmanager + def set_local(self, **kwargs): + try: + self.add_local(**kwargs) + yield + finally: + self.remove_local(**kwargs) + + def get_local(self): + return getattr(getattr(self, '_local', None), '_ctx', {}) + + def add_global(self, **kwargs): + if not hasattr(self, '_global_ctx'): + self._global_ctx = {} + self._global_ctx.update(kwargs) + + def remove_global(self, **kwargs): + if hasattr(self, '_global_ctx'): + for key in kwargs.keys(): + self._global_ctx.pop(key, None) + + @contextlib.contextmanager + def set_global(self, **kwargs): + try: + self.add_global(**kwargs) + yield + finally: + self.remove_global(**kwargs) + + def get_global(self): + return getattr(self, '_global_ctx', {}) + + def get(self): + ctx = {} + ctx.update(self.get_global()) + ctx.update(self.get_local()) + return ctx + + def get_begin_dict(self): + event_data = self.get() + if os.getenv('JOB_ID', ''): + event_data['job_id'] = int(os.getenv('JOB_ID', '0')) + if os.getenv('AD_HOC_COMMAND_ID', ''): + event_data['ad_hoc_command_id'] = int(os.getenv('AD_HOC_COMMAND_ID', '0')) + event_data.setdefault('pid', os.getpid()) + event_data.setdefault('uuid', str(uuid.uuid4())) + event_data.setdefault('created', datetime.datetime.utcnow().isoformat()) + if not event_data.get('parent_uuid', None) and event_data.get('job_id', None): + for key in ('task_uuid', 'play_uuid', 'playbook_uuid'): + parent_uuid = event_data.get(key, None) + if parent_uuid and parent_uuid != event_data.get('uuid', None): + event_data['parent_uuid'] = parent_uuid + break + + event = event_data.pop('event', None) + if not event: + event = 'verbose' + for key in ('debug', 'verbose', 'deprecated', 'warning', 'system_warning', 'error'): + if event_data.get(key, False): + event = key + break + max_res = int(os.getenv("MAX_EVENT_RES", 700000)) + if event not in ('playbook_on_stats',) and "res" in event_data and len(str(event_data['res'])) > max_res: + event_data['res'] = {} + event_dict = dict(event=event, event_data=event_data) + for key in event_data.keys(): + if key in ('job_id', 'ad_hoc_command_id', 'uuid', 'parent_uuid', 'created',): + event_dict[key] = event_data.pop(key) + elif key in ('verbosity', 'pid'): + event_dict[key] = event_data[key] + return event_dict + + def get_end_dict(self): + return {} + + def dump(self, fileobj, data, max_width=78, flush=False): + b64data = base64.b64encode(json.dumps(data)) + with self.display_lock: + fileobj.write(u'\x1b[K') + for offset in xrange(0, len(b64data), max_width): + chunk = b64data[offset:offset + max_width] + escaped_chunk = u'{}\x1b[{}D'.format(chunk, len(chunk)) + fileobj.write(escaped_chunk) + fileobj.write(u'\x1b[K') + if flush: + fileobj.flush() + + def dump_begin(self, fileobj): + begin_dict = self.get_begin_dict() + self.cache.set(":1:ev-{}".format(begin_dict['uuid']), begin_dict) + self.dump(fileobj, {'uuid': begin_dict['uuid']}) + + def dump_end(self, fileobj): + self.dump(fileobj, self.get_end_dict(), flush=True) + + +event_context = EventContext() diff --git a/awx/lib/tower_display_callback/minimal.py b/awx/lib/tower_display_callback/minimal.py new file mode 100644 index 0000000000..de7694213e --- /dev/null +++ b/awx/lib/tower_display_callback/minimal.py @@ -0,0 +1,28 @@ +# Copyright (c) 2016 Ansible by Red Hat, Inc. +# +# This file is part of Ansible Tower, but depends on code imported from Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) + +# Python +import os + +# Ansible +import ansible + +# Because of the way Ansible loads plugins, it's not possible to import +# ansible.plugins.callback.minimal when being loaded as the minimal plugin. Ugh. +execfile(os.path.join(os.path.dirname(ansible.__file__), 'plugins', 'callback', 'minimal.py')) diff --git a/awx/lib/tower_display_callback/module.py b/awx/lib/tower_display_callback/module.py new file mode 100644 index 0000000000..c553b08853 --- /dev/null +++ b/awx/lib/tower_display_callback/module.py @@ -0,0 +1,461 @@ +# Copyright (c) 2016 Ansible by Red Hat, Inc. +# +# This file is part of Ansible Tower, but depends on code imported from Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) + +# Python +import contextlib +import sys +import uuid + +# Ansible +from ansible.plugins.callback import CallbackBase +from ansible.plugins.callback.default import CallbackModule as DefaultCallbackModule + +# Tower Display Callback +from .events import event_context +from .minimal import CallbackModule as MinimalCallbackModule + + +class BaseCallbackModule(CallbackBase): + ''' + Callback module for logging ansible/ansible-playbook events. + ''' + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + + # These events should never have an associated play. + EVENTS_WITHOUT_PLAY = [ + 'playbook_on_start', + 'playbook_on_stats', + ] + + # These events should never have an associated task. + EVENTS_WITHOUT_TASK = EVENTS_WITHOUT_PLAY + [ + 'playbook_on_setup', + 'playbook_on_notify', + 'playbook_on_import_for_host', + 'playbook_on_not_import_for_host', + 'playbook_on_no_hosts_matched', + 'playbook_on_no_hosts_remaining', + ] + + CENSOR_FIELD_WHITELIST = [ + 'msg', + 'failed', + 'changed', + 'results', + 'start', + 'end', + 'delta', + 'cmd', + '_ansible_no_log', + 'rc', + 'failed_when_result', + 'skipped', + 'skip_reason', + ] + + def __init__(self): + super(BaseCallbackModule, self).__init__() + self.task_uuids = set() + + @contextlib.contextmanager + def capture_event_data(self, event, **event_data): + + event_data.setdefault('uuid', str(uuid.uuid4())) + + if event not in self.EVENTS_WITHOUT_TASK: + task = event_data.pop('task', None) + else: + task = None + + with event_context.display_lock: + try: + event_context.add_local(event=event, **event_data) + if task: + self.set_task(task, local=True) + event_context.dump_begin(sys.stdout) + yield + finally: + event_context.dump_end(sys.stdout) + if task: + self.clear_task(local=True) + event_context.remove_local(event=None, **event_data) + + def set_playbook(self, playbook): + # NOTE: Ansible doesn't generate a UUID for playbook_on_start so do it for them. + self.playbook_uuid = str(uuid.uuid4()) + file_name = getattr(playbook, '_file_name', '???') + event_context.add_global(playbook=file_name, playbook_uuid=self.playbook_uuid) + self.clear_play() + + def set_play(self, play): + if hasattr(play, 'hosts'): + if isinstance(play.hosts, list): + pattern = ','.join(play.hosts) + else: + pattern = play.hosts + else: + pattern = '' + name = play.get_name().strip() or pattern + event_context.add_global(play=name, play_uuid=str(play._uuid), play_pattern=pattern) + self.clear_task() + + def clear_play(self): + event_context.remove_global(play=None, play_uuid=None, play_pattern=None) + self.clear_task() + + def set_task(self, task, local=False): + # FIXME: Task is "global" unless using free strategy! + task_ctx = dict( + task=(task.name or task.action), + task_uuid=str(task._uuid), + task_action=task.action, + ) + try: + task_ctx['task_path'] = task.get_path() + except AttributeError: + pass + if not task.no_log: + task_args = ', '.join(('%s=%s' % a for a in task.args.items())) + task_ctx['task_args'] = task_args + if getattr(task, '_role', None): + task_role = task._role._role_name + else: + task_role = getattr(task, 'role_name', '') + if task_role: + task_ctx['role'] = task_role + if local: + event_context.add_local(**task_ctx) + else: + event_context.add_global(**task_ctx) + + def clear_task(self, local=False): + task_ctx = dict(task=None, task_path=None, task_uuid=None, task_action=None, task_args=None, role=None) + if local: + event_context.remove_local(**task_ctx) + else: + event_context.remove_global(**task_ctx) + + def v2_playbook_on_start(self, playbook): + self.set_playbook(playbook) + event_data = dict( + uuid=self.playbook_uuid, + ) + with self.capture_event_data('playbook_on_start', **event_data): + super(BaseCallbackModule, self).v2_playbook_on_start(playbook) + + def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, + encrypt=None, confirm=False, salt_size=None, + salt=None, default=None): + event_data = dict( + varname=varname, + private=private, + prompt=prompt, + encrypt=encrypt, + confirm=confirm, + salt_size=salt_size, + salt=salt, + default=default, + ) + with self.capture_event_data('playbook_on_vars_prompt', **event_data): + super(BaseCallbackModule, self).v2_playbook_on_vars_prompt( + varname, private, prompt, encrypt, confirm, salt_size, salt, + default, + ) + + def v2_playbook_on_include(self, included_file): + event_data = dict( + included_file=included_file._filename if included_file is not None else None, + ) + with self.capture_event_data('playbook_on_include', **event_data): + super(BaseCallbackModule, self).v2_playbook_on_include(included_file) + + def v2_playbook_on_play_start(self, play): + self.set_play(play) + if hasattr(play, 'hosts'): + if isinstance(play.hosts, list): + pattern = ','.join(play.hosts) + else: + pattern = play.hosts + else: + pattern = '' + name = play.get_name().strip() or pattern + event_data = dict( + name=name, + pattern=pattern, + uuid=str(play._uuid), + ) + with self.capture_event_data('playbook_on_play_start', **event_data): + super(BaseCallbackModule, self).v2_playbook_on_play_start(play) + + def v2_playbook_on_import_for_host(self, result, imported_file): + # NOTE: Not used by Ansible 2.x. + with self.capture_event_data('playbook_on_import_for_host'): + super(BaseCallbackModule, self).v2_playbook_on_import_for_host(result, imported_file) + + def v2_playbook_on_not_import_for_host(self, result, missing_file): + # NOTE: Not used by Ansible 2.x. + with self.capture_event_data('playbook_on_not_import_for_host'): + super(BaseCallbackModule, self).v2_playbook_on_not_import_for_host(result, missing_file) + + def v2_playbook_on_setup(self): + # NOTE: Not used by Ansible 2.x. + with self.capture_event_data('playbook_on_setup'): + super(BaseCallbackModule, self).v2_playbook_on_setup() + + def v2_playbook_on_task_start(self, task, is_conditional): + # FIXME: Flag task path output as vv. + task_uuid = str(task._uuid) + if task_uuid in self.task_uuids: + # FIXME: When this task UUID repeats, it means the play is using the + # free strategy, so different hosts may be running different tasks + # within a play. + return + self.task_uuids.add(task_uuid) + self.set_task(task) + event_data = dict( + task=task, + name=task.get_name(), + is_conditional=is_conditional, + uuid=task_uuid, + ) + with self.capture_event_data('playbook_on_task_start', **event_data): + super(BaseCallbackModule, self).v2_playbook_on_task_start(task, is_conditional) + + def v2_playbook_on_cleanup_task_start(self, task): + # NOTE: Not used by Ansible 2.x. + self.set_task(task) + event_data = dict( + task=task, + name=task.get_name(), + uuid=str(task._uuid), + is_conditional=True, + ) + with self.capture_event_data('playbook_on_task_start', **event_data): + super(BaseCallbackModule, self).v2_playbook_on_cleanup_task_start(task) + + def v2_playbook_on_handler_task_start(self, task): + # NOTE: Re-using playbook_on_task_start event for this v2-specific + # event, but setting is_conditional=True, which is how v1 identified a + # task run as a handler. + self.set_task(task) + event_data = dict( + task=task, + name=task.get_name(), + uuid=str(task._uuid), + is_conditional=True, + ) + with self.capture_event_data('playbook_on_task_start', **event_data): + super(BaseCallbackModule, self).v2_playbook_on_handler_task_start(task) + + def v2_playbook_on_no_hosts_matched(self): + with self.capture_event_data('playbook_on_no_hosts_matched'): + super(BaseCallbackModule, self).v2_playbook_on_no_hosts_matched() + + def v2_playbook_on_no_hosts_remaining(self): + with self.capture_event_data('playbook_on_no_hosts_remaining'): + super(BaseCallbackModule, self).v2_playbook_on_no_hosts_remaining() + + def v2_playbook_on_notify(self, result, handler): + # NOTE: Not used by Ansible 2.x. + event_data = dict( + host=result._host.get_name(), + task=result._task, + handler=handler, + ) + with self.capture_event_data('playbook_on_notify', **event_data): + super(BaseCallbackModule, self).v2_playbook_on_notify(result, handler) + + ''' + ansible_stats is, retoractively, added in 2.2 + ''' + def v2_playbook_on_stats(self, stats): + self.clear_play() + # FIXME: Add count of plays/tasks. + event_data = dict( + changed=stats.changed, + dark=stats.dark, + failures=stats.failures, + ok=stats.ok, + processed=stats.processed, + skipped=stats.skipped, + artifact_data=stats.custom.get('_run', {}) if hasattr(stats, 'custom') else {} + ) + + with self.capture_event_data('playbook_on_stats', **event_data): + super(BaseCallbackModule, self).v2_playbook_on_stats(stats) + + def v2_runner_on_ok(self, result): + # FIXME: Display detailed results or not based on verbosity. + event_data = dict( + host=result._host.get_name(), + remote_addr=result._host.address, + task=result._task, + res=result._result, + event_loop=result._task.loop if hasattr(result._task, 'loop') else None, + ) + with self.capture_event_data('runner_on_ok', **event_data): + super(BaseCallbackModule, self).v2_runner_on_ok(result) + + def v2_runner_on_failed(self, result, ignore_errors=False): + # FIXME: Add verbosity for exception/results output. + event_data = dict( + host=result._host.get_name(), + remote_addr=result._host.address, + res=result._result, + task=result._task, + ignore_errors=ignore_errors, + event_loop=result._task.loop if hasattr(result._task, 'loop') else None, + ) + with self.capture_event_data('runner_on_failed', **event_data): + super(BaseCallbackModule, self).v2_runner_on_failed(result, ignore_errors) + + def v2_runner_on_skipped(self, result): + event_data = dict( + host=result._host.get_name(), + remote_addr=result._host.address, + task=result._task, + event_loop=result._task.loop if hasattr(result._task, 'loop') else None, + ) + with self.capture_event_data('runner_on_skipped', **event_data): + super(BaseCallbackModule, self).v2_runner_on_skipped(result) + + def v2_runner_on_unreachable(self, result): + event_data = dict( + host=result._host.get_name(), + remote_addr=result._host.address, + task=result._task, + res=result._result, + ) + with self.capture_event_data('runner_on_unreachable', **event_data): + super(BaseCallbackModule, self).v2_runner_on_unreachable(result) + + def v2_runner_on_no_hosts(self, task): + # NOTE: Not used by Ansible 2.x. + event_data = dict( + task=task, + ) + with self.capture_event_data('runner_on_no_hosts', **event_data): + super(BaseCallbackModule, self).v2_runner_on_no_hosts(task) + + def v2_runner_on_async_poll(self, result): + # NOTE: Not used by Ansible 2.x. + event_data = dict( + host=result._host.get_name(), + task=result._task, + res=result._result, + jid=result._result.get('ansible_job_id'), + ) + with self.capture_event_data('runner_on_async_poll', **event_data): + super(BaseCallbackModule, self).v2_runner_on_async_poll(result) + + def v2_runner_on_async_ok(self, result): + # NOTE: Not used by Ansible 2.x. + event_data = dict( + host=result._host.get_name(), + task=result._task, + res=result._result, + jid=result._result.get('ansible_job_id'), + ) + with self.capture_event_data('runner_on_async_ok', **event_data): + super(BaseCallbackModule, self).v2_runner_on_async_ok(result) + + def v2_runner_on_async_failed(self, result): + # NOTE: Not used by Ansible 2.x. + event_data = dict( + host=result._host.get_name(), + task=result._task, + res=result._result, + jid=result._result.get('ansible_job_id'), + ) + with self.capture_event_data('runner_on_async_failed', **event_data): + super(BaseCallbackModule, self).v2_runner_on_async_failed(result) + + def v2_runner_on_file_diff(self, result, diff): + # NOTE: Not used by Ansible 2.x. + event_data = dict( + host=result._host.get_name(), + task=result._task, + diff=diff, + ) + with self.capture_event_data('runner_on_file_diff', **event_data): + super(BaseCallbackModule, self).v2_runner_on_file_diff(result, diff) + + def v2_on_file_diff(self, result): + # NOTE: Logged as runner_on_file_diff. + event_data = dict( + host=result._host.get_name(), + task=result._task, + diff=result._result.get('diff'), + ) + with self.capture_event_data('runner_on_file_diff', **event_data): + super(BaseCallbackModule, self).v2_on_file_diff(result) + + def v2_runner_item_on_ok(self, result): + event_data = dict( + host=result._host.get_name(), + task=result._task, + res=result._result, + ) + with self.capture_event_data('runner_item_on_ok', **event_data): + super(BaseCallbackModule, self).v2_runner_item_on_ok(result) + + def v2_runner_item_on_failed(self, result): + event_data = dict( + host=result._host.get_name(), + task=result._task, + res=result._result, + ) + with self.capture_event_data('runner_item_on_failed', **event_data): + super(BaseCallbackModule, self).v2_runner_item_on_failed(result) + + def v2_runner_item_on_skipped(self, result): + event_data = dict( + host=result._host.get_name(), + task=result._task, + res=result._result, + ) + with self.capture_event_data('runner_item_on_skipped', **event_data): + super(BaseCallbackModule, self).v2_runner_item_on_skipped(result) + + def v2_runner_retry(self, result): + event_data = dict( + host=result._host.get_name(), + task=result._task, + res=result._result, + ) + with self.capture_event_data('runner_retry', **event_data): + super(BaseCallbackModule, self).v2_runner_retry(result) + + +class TowerDefaultCallbackModule(BaseCallbackModule, DefaultCallbackModule): + + CALLBACK_NAME = 'tower_display' + + +class TowerMinimalCallbackModule(BaseCallbackModule, MinimalCallbackModule): + + CALLBACK_NAME = 'minimal' + + def v2_playbook_on_play_start(self, play): + pass + + def v2_playbook_on_task_start(self, task, is_conditional): + self.set_task(task) diff --git a/awx/locale/django.pot b/awx/locale/django.pot new file mode 100644 index 0000000000..9a47e8d755 --- /dev/null +++ b/awx/locale/django.pot @@ -0,0 +1,3833 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2017-02-09 14:32+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: awx/api/authentication.py:67 +msgid "Invalid token header. No credentials provided." +msgstr "" + +#: awx/api/authentication.py:70 +msgid "Invalid token header. Token string should not contain spaces." +msgstr "" + +#: awx/api/authentication.py:105 +msgid "User inactive or deleted" +msgstr "" + +#: awx/api/authentication.py:161 +msgid "Invalid task token" +msgstr "" + +#: awx/api/conf.py:12 +msgid "Idle Time Force Log Out" +msgstr "" + +#: awx/api/conf.py:13 +msgid "" +"Number of seconds that a user is inactive before they will need to login " +"again." +msgstr "" + +#: awx/api/conf.py:14 awx/api/conf.py:24 awx/api/conf.py:33 +#: awx/sso/conf.py:124 awx/sso/conf.py:135 awx/sso/conf.py:147 +#: awx/sso/conf.py:162 +msgid "Authentication" +msgstr "" + +#: awx/api/conf.py:22 +msgid "Maximum number of simultaneous logins" +msgstr "" + +#: awx/api/conf.py:23 +msgid "" +"Maximum number of simultaneous logins a user may have. To disable enter -1." +msgstr "" + +#: awx/api/conf.py:31 +msgid "Enable HTTP Basic Auth" +msgstr "" + +#: awx/api/conf.py:32 +msgid "Enable HTTP Basic Auth for the API Browser." +msgstr "" + +#: awx/api/generics.py:466 +msgid "\"id\" is required to disassociate" +msgstr "" + +#: awx/api/metadata.py:51 +msgid "Database ID for this {}." +msgstr "" + +#: awx/api/metadata.py:52 +msgid "Name of this {}." +msgstr "" + +#: awx/api/metadata.py:53 +msgid "Optional description of this {}." +msgstr "" + +#: awx/api/metadata.py:54 +msgid "Data type for this {}." +msgstr "" + +#: awx/api/metadata.py:55 +msgid "URL for this {}." +msgstr "" + +#: awx/api/metadata.py:56 +msgid "Data structure with URLs of related resources." +msgstr "" + +#: awx/api/metadata.py:57 +msgid "Data structure with name/description for related resources." +msgstr "" + +#: awx/api/metadata.py:58 +msgid "Timestamp when this {} was created." +msgstr "" + +#: awx/api/metadata.py:59 +msgid "Timestamp when this {} was last modified." +msgstr "" + +#: awx/api/parsers.py:31 +#, python-format +msgid "JSON parse error - %s" +msgstr "" + +#: awx/api/serializers.py:251 +msgid "Playbook Run" +msgstr "" + +#: awx/api/serializers.py:252 +msgid "Command" +msgstr "" + +#: awx/api/serializers.py:253 +msgid "SCM Update" +msgstr "" + +#: awx/api/serializers.py:254 +msgid "Inventory Sync" +msgstr "" + +#: awx/api/serializers.py:255 +msgid "Management Job" +msgstr "" + +#: awx/api/serializers.py:256 +msgid "Workflow Job" +msgstr "" + +#: awx/api/serializers.py:257 +msgid "Workflow Template" +msgstr "" + +#: awx/api/serializers.py:653 awx/api/serializers.py:711 awx/api/views.py:3842 +#, python-format +msgid "" +"Standard Output too large to display (%(text_size)d bytes), only download " +"supported for sizes over %(supported_size)d bytes" +msgstr "" + +#: awx/api/serializers.py:726 +msgid "Write-only field used to change the password." +msgstr "" + +#: awx/api/serializers.py:728 +msgid "Set if the account is managed by an external service" +msgstr "" + +#: awx/api/serializers.py:752 +msgid "Password required for new User." +msgstr "" + +#: awx/api/serializers.py:836 +#, python-format +msgid "Unable to change %s on user managed by LDAP." +msgstr "" + +#: awx/api/serializers.py:997 +msgid "Organization is missing" +msgstr "" + +#: awx/api/serializers.py:1001 +msgid "Update options must be set to false for manual projects." +msgstr "" + +#: awx/api/serializers.py:1007 +msgid "Array of playbooks available within this project." +msgstr "" + +#: awx/api/serializers.py:1189 +#, python-format +msgid "Invalid port specification: %s" +msgstr "" + +#: awx/api/serializers.py:1217 awx/main/validators.py:193 +msgid "Must be valid JSON or YAML." +msgstr "" + +#: awx/api/serializers.py:1274 +msgid "Invalid group name." +msgstr "" + +#: awx/api/serializers.py:1349 +msgid "" +"Script must begin with a hashbang sequence: i.e.... #!/usr/bin/env python" +msgstr "" + +#: awx/api/serializers.py:1402 +msgid "If 'source' is 'custom', 'source_script' must be provided." +msgstr "" + +#: awx/api/serializers.py:1406 +msgid "" +"The 'source_script' does not belong to the same organization as the " +"inventory." +msgstr "" + +#: awx/api/serializers.py:1408 +msgid "'source_script' doesn't exist." +msgstr "" + +#: awx/api/serializers.py:1770 +msgid "" +"Write-only field used to add user to owner role. If provided, do not give " +"either team or organization. Only valid for creation." +msgstr "" + +#: awx/api/serializers.py:1775 +msgid "" +"Write-only field used to add team to owner role. If provided, do not give " +"either user or organization. Only valid for creation." +msgstr "" + +#: awx/api/serializers.py:1780 +msgid "" +"Inherit permissions from organization roles. If provided on creation, do not " +"give either user or team." +msgstr "" + +#: awx/api/serializers.py:1796 +msgid "Missing 'user', 'team', or 'organization'." +msgstr "" + +#: awx/api/serializers.py:1809 +msgid "" +"Credential organization must be set and match before assigning to a team" +msgstr "" + +#: awx/api/serializers.py:1905 +msgid "This field is required." +msgstr "" + +#: awx/api/serializers.py:1907 awx/api/serializers.py:1909 +msgid "Playbook not found for project." +msgstr "" + +#: awx/api/serializers.py:1911 +msgid "Must select playbook for project." +msgstr "" + +#: awx/api/serializers.py:1977 +msgid "Must either set a default value or ask to prompt on launch." +msgstr "" + +#: awx/api/serializers.py:1980 awx/main/models/jobs.py:277 +msgid "Scan jobs must be assigned a fixed inventory." +msgstr "" + +#: awx/api/serializers.py:1982 awx/main/models/jobs.py:280 +msgid "Job types 'run' and 'check' must have assigned a project." +msgstr "" + +#: awx/api/serializers.py:1989 +msgid "Survey Enabled cannot be used with scan jobs." +msgstr "" + +#: awx/api/serializers.py:2049 +msgid "Invalid job template." +msgstr "" + +#: awx/api/serializers.py:2134 +msgid "Credential not found or deleted." +msgstr "" + +#: awx/api/serializers.py:2136 +msgid "Job Template Project is missing or undefined." +msgstr "" + +#: awx/api/serializers.py:2138 +msgid "Job Template Inventory is missing or undefined." +msgstr "" + +#: awx/api/serializers.py:2423 +#, python-format +msgid "%(job_type)s is not a valid job type. The choices are %(choices)s." +msgstr "" + +#: awx/api/serializers.py:2428 +msgid "Workflow job template is missing during creation." +msgstr "" + +#: awx/api/serializers.py:2433 +#, python-format +msgid "Cannot nest a %s inside a WorkflowJobTemplate" +msgstr "" + +#: awx/api/serializers.py:2671 +#, python-format +msgid "Job Template '%s' is missing or undefined." +msgstr "" + +#: awx/api/serializers.py:2697 +msgid "Must be a valid JSON or YAML dictionary." +msgstr "" + +#: awx/api/serializers.py:2839 +msgid "" +"Missing required fields for Notification Configuration: notification_type" +msgstr "" + +#: awx/api/serializers.py:2862 +msgid "No values specified for field '{}'" +msgstr "" + +#: awx/api/serializers.py:2867 +msgid "Missing required fields for Notification Configuration: {}." +msgstr "" + +#: awx/api/serializers.py:2870 +msgid "Configuration field '{}' incorrect type, expected {}." +msgstr "" + +#: awx/api/serializers.py:2923 +msgid "Inventory Source must be a cloud resource." +msgstr "" + +#: awx/api/serializers.py:2925 +msgid "Manual Project can not have a schedule set." +msgstr "" + +#: awx/api/serializers.py:2947 +msgid "DTSTART required in rrule. Value should match: DTSTART:YYYYMMDDTHHMMSSZ" +msgstr "" + +#: awx/api/serializers.py:2949 +msgid "Multiple DTSTART is not supported." +msgstr "" + +#: awx/api/serializers.py:2951 +msgid "RRULE require in rrule." +msgstr "" + +#: awx/api/serializers.py:2953 +msgid "Multiple RRULE is not supported." +msgstr "" + +#: awx/api/serializers.py:2955 +msgid "INTERVAL required in rrule." +msgstr "" + +#: awx/api/serializers.py:2957 +msgid "TZID is not supported." +msgstr "" + +#: awx/api/serializers.py:2959 +msgid "SECONDLY is not supported." +msgstr "" + +#: awx/api/serializers.py:2961 +msgid "Multiple BYMONTHDAYs not supported." +msgstr "" + +#: awx/api/serializers.py:2963 +msgid "Multiple BYMONTHs not supported." +msgstr "" + +#: awx/api/serializers.py:2965 +msgid "BYDAY with numeric prefix not supported." +msgstr "" + +#: awx/api/serializers.py:2967 +msgid "BYYEARDAY not supported." +msgstr "" + +#: awx/api/serializers.py:2969 +msgid "BYWEEKNO not supported." +msgstr "" + +#: awx/api/serializers.py:2973 +msgid "COUNT > 999 is unsupported." +msgstr "" + +#: awx/api/serializers.py:2977 +msgid "rrule parsing failed validation." +msgstr "" + +#: awx/api/serializers.py:3012 +msgid "" +"A summary of the new and changed values when an object is created, updated, " +"or deleted" +msgstr "" + +#: awx/api/serializers.py:3014 +msgid "" +"For create, update, and delete events this is the object type that was " +"affected. For associate and disassociate events this is the object type " +"associated or disassociated with object2." +msgstr "" + +#: awx/api/serializers.py:3017 +msgid "" +"Unpopulated for create, update, and delete events. For associate and " +"disassociate events this is the object type that object1 is being associated " +"with." +msgstr "" + +#: awx/api/serializers.py:3020 +msgid "The action taken with respect to the given object(s)." +msgstr "" + +#: awx/api/serializers.py:3123 +msgid "Unable to login with provided credentials." +msgstr "" + +#: awx/api/serializers.py:3125 +msgid "Must include \"username\" and \"password\"." +msgstr "" + +#: awx/api/views.py:102 +msgid "Your license does not allow use of the activity stream." +msgstr "" + +#: awx/api/views.py:112 +msgid "Your license does not permit use of system tracking." +msgstr "" + +#: awx/api/views.py:122 +msgid "Your license does not allow use of workflows." +msgstr "" + +#: awx/api/views.py:130 awx/templates/rest_framework/api.html:28 +msgid "REST API" +msgstr "" + +#: awx/api/views.py:137 awx/templates/rest_framework/api.html:4 +msgid "Ansible Tower REST API" +msgstr "" + +#: awx/api/views.py:153 +msgid "Version 1" +msgstr "" + +#: awx/api/views.py:204 +msgid "Ping" +msgstr "" + +#: awx/api/views.py:233 awx/conf/apps.py:12 +msgid "Configuration" +msgstr "" + +#: awx/api/views.py:286 +msgid "Invalid license data" +msgstr "" + +#: awx/api/views.py:288 +msgid "Missing 'eula_accepted' property" +msgstr "" + +#: awx/api/views.py:292 +msgid "'eula_accepted' value is invalid" +msgstr "" + +#: awx/api/views.py:295 +msgid "'eula_accepted' must be True" +msgstr "" + +#: awx/api/views.py:302 +msgid "Invalid JSON" +msgstr "" + +#: awx/api/views.py:310 +msgid "Invalid License" +msgstr "" + +#: awx/api/views.py:320 +msgid "Invalid license" +msgstr "" + +#: awx/api/views.py:328 +#, python-format +msgid "Failed to remove license (%s)" +msgstr "" + +#: awx/api/views.py:333 +msgid "Dashboard" +msgstr "" + +#: awx/api/views.py:439 +msgid "Dashboard Jobs Graphs" +msgstr "" + +#: awx/api/views.py:475 +#, python-format +msgid "Unknown period \"%s\"" +msgstr "" + +#: awx/api/views.py:489 +msgid "Schedules" +msgstr "" + +#: awx/api/views.py:508 +msgid "Schedule Jobs List" +msgstr "" + +#: awx/api/views.py:727 +msgid "Your Tower license only permits a single organization to exist." +msgstr "" + +#: awx/api/views.py:952 awx/api/views.py:1311 +msgid "Role 'id' field is missing." +msgstr "" + +#: awx/api/views.py:958 awx/api/views.py:4129 +msgid "You cannot assign an Organization role as a child role for a Team." +msgstr "" + +#: awx/api/views.py:962 awx/api/views.py:4143 +msgid "You cannot grant system-level permissions to a team." +msgstr "" + +#: awx/api/views.py:969 awx/api/views.py:4135 +msgid "" +"You cannot grant credential access to a team when the Organization field " +"isn't set, or belongs to a different organization" +msgstr "" + +#: awx/api/views.py:1059 +msgid "Cannot delete project." +msgstr "" + +#: awx/api/views.py:1088 +msgid "Project Schedules" +msgstr "" + +#: awx/api/views.py:1192 awx/api/views.py:2285 awx/api/views.py:3298 +msgid "Cannot delete job resource when associated workflow job is running." +msgstr "" + +#: awx/api/views.py:1269 +msgid "Me" +msgstr "" + +#: awx/api/views.py:1315 awx/api/views.py:4084 +msgid "You may not perform any action with your own admin_role." +msgstr "" + +#: awx/api/views.py:1321 awx/api/views.py:4088 +msgid "You may not change the membership of a users admin_role" +msgstr "" + +#: awx/api/views.py:1326 awx/api/views.py:4093 +msgid "" +"You cannot grant credential access to a user not in the credentials' " +"organization" +msgstr "" + +#: awx/api/views.py:1330 awx/api/views.py:4097 +msgid "You cannot grant private credential access to another user" +msgstr "" + +#: awx/api/views.py:1428 +#, python-format +msgid "Cannot change %s." +msgstr "" + +#: awx/api/views.py:1434 +msgid "Cannot delete user." +msgstr "" + +#: awx/api/views.py:1583 +msgid "Cannot delete inventory script." +msgstr "" + +#: awx/api/views.py:1820 +msgid "Fact not found." +msgstr "" + +#: awx/api/views.py:2140 +msgid "Inventory Source List" +msgstr "" + +#: awx/api/views.py:2168 +msgid "Cannot delete inventory source." +msgstr "" + +#: awx/api/views.py:2176 +msgid "Inventory Source Schedules" +msgstr "" + +#: awx/api/views.py:2206 +msgid "Notification Templates can only be assigned when source is one of {}." +msgstr "" + +#: awx/api/views.py:2414 +msgid "Job Template Schedules" +msgstr "" + +#: awx/api/views.py:2434 awx/api/views.py:2450 +msgid "Your license does not allow adding surveys." +msgstr "" + +#: awx/api/views.py:2457 +msgid "'name' missing from survey spec." +msgstr "" + +#: awx/api/views.py:2459 +msgid "'description' missing from survey spec." +msgstr "" + +#: awx/api/views.py:2461 +msgid "'spec' missing from survey spec." +msgstr "" + +#: awx/api/views.py:2463 +msgid "'spec' must be a list of items." +msgstr "" + +#: awx/api/views.py:2465 +msgid "'spec' doesn't contain any items." +msgstr "" + +#: awx/api/views.py:2471 +#, python-format +msgid "Survey question %s is not a json object." +msgstr "" + +#: awx/api/views.py:2473 +#, python-format +msgid "'type' missing from survey question %s." +msgstr "" + +#: awx/api/views.py:2475 +#, python-format +msgid "'question_name' missing from survey question %s." +msgstr "" + +#: awx/api/views.py:2477 +#, python-format +msgid "'variable' missing from survey question %s." +msgstr "" + +#: awx/api/views.py:2479 +#, python-format +msgid "'variable' '%(item)s' duplicated in survey question %(survey)s." +msgstr "" + +#: awx/api/views.py:2484 +#, python-format +msgid "'required' missing from survey question %s." +msgstr "" + +#: awx/api/views.py:2569 +msgid "Maximum number of labels for {} reached." +msgstr "" + +#: awx/api/views.py:2698 +msgid "No matching host could be found!" +msgstr "" + +#: awx/api/views.py:2701 +msgid "Multiple hosts matched the request!" +msgstr "" + +#: awx/api/views.py:2706 +msgid "Cannot start automatically, user input required!" +msgstr "" + +#: awx/api/views.py:2713 +msgid "Host callback job already pending." +msgstr "" + +#: awx/api/views.py:2726 +msgid "Error starting job!" +msgstr "" + +#: awx/api/views.py:3055 +msgid "Workflow Job Template Schedules" +msgstr "" + +#: awx/api/views.py:3197 awx/api/views.py:3745 +msgid "Superuser privileges needed." +msgstr "" + +#: awx/api/views.py:3229 +msgid "System Job Template Schedules" +msgstr "" + +#: awx/api/views.py:3421 +msgid "Job Host Summaries List" +msgstr "" + +#: awx/api/views.py:3468 +msgid "Job Event Children List" +msgstr "" + +#: awx/api/views.py:3477 +msgid "Job Event Hosts List" +msgstr "" + +#: awx/api/views.py:3486 +msgid "Job Events List" +msgstr "" + +#: awx/api/views.py:3699 +msgid "Ad Hoc Command Events List" +msgstr "" + +#: awx/api/views.py:3897 +msgid "Error generating stdout download file: {}" +msgstr "" + +#: awx/api/views.py:3910 +#, python-format +msgid "Error generating stdout download file: %s" +msgstr "" + +#: awx/api/views.py:3955 +msgid "Delete not allowed while there are pending notifications" +msgstr "" + +#: awx/api/views.py:3962 +msgid "Notification Template Test" +msgstr "" + +#: awx/api/views.py:4078 +msgid "User 'id' field is missing." +msgstr "" + +#: awx/api/views.py:4121 +msgid "Team 'id' field is missing." +msgstr "" + +#: awx/conf/conf.py:20 +msgid "Bud Frogs" +msgstr "" + +#: awx/conf/conf.py:21 +msgid "Bunny" +msgstr "" + +#: awx/conf/conf.py:22 +msgid "Cheese" +msgstr "" + +#: awx/conf/conf.py:23 +msgid "Daemon" +msgstr "" + +#: awx/conf/conf.py:24 +msgid "Default Cow" +msgstr "" + +#: awx/conf/conf.py:25 +msgid "Dragon" +msgstr "" + +#: awx/conf/conf.py:26 +msgid "Elephant in Snake" +msgstr "" + +#: awx/conf/conf.py:27 +msgid "Elephant" +msgstr "" + +#: awx/conf/conf.py:28 +msgid "Eyes" +msgstr "" + +#: awx/conf/conf.py:29 +msgid "Hello Kitty" +msgstr "" + +#: awx/conf/conf.py:30 +msgid "Kitty" +msgstr "" + +#: awx/conf/conf.py:31 +msgid "Luke Koala" +msgstr "" + +#: awx/conf/conf.py:32 +msgid "Meow" +msgstr "" + +#: awx/conf/conf.py:33 +msgid "Milk" +msgstr "" + +#: awx/conf/conf.py:34 +msgid "Moofasa" +msgstr "" + +#: awx/conf/conf.py:35 +msgid "Moose" +msgstr "" + +#: awx/conf/conf.py:36 +msgid "Ren" +msgstr "" + +#: awx/conf/conf.py:37 +msgid "Sheep" +msgstr "" + +#: awx/conf/conf.py:38 +msgid "Small Cow" +msgstr "" + +#: awx/conf/conf.py:39 +msgid "Stegosaurus" +msgstr "" + +#: awx/conf/conf.py:40 +msgid "Stimpy" +msgstr "" + +#: awx/conf/conf.py:41 +msgid "Super Milker" +msgstr "" + +#: awx/conf/conf.py:42 +msgid "Three Eyes" +msgstr "" + +#: awx/conf/conf.py:43 +msgid "Turkey" +msgstr "" + +#: awx/conf/conf.py:44 +msgid "Turtle" +msgstr "" + +#: awx/conf/conf.py:45 +msgid "Tux" +msgstr "" + +#: awx/conf/conf.py:46 +msgid "Udder" +msgstr "" + +#: awx/conf/conf.py:47 +msgid "Vader Koala" +msgstr "" + +#: awx/conf/conf.py:48 +msgid "Vader" +msgstr "" + +#: awx/conf/conf.py:49 +msgid "WWW" +msgstr "" + +#: awx/conf/conf.py:52 +msgid "Cow Selection" +msgstr "" + +#: awx/conf/conf.py:53 +msgid "Select which cow to use with cowsay when running jobs." +msgstr "" + +#: awx/conf/conf.py:54 awx/conf/conf.py:75 +msgid "Cows" +msgstr "" + +#: awx/conf/conf.py:73 +msgid "Example Read-Only Setting" +msgstr "" + +#: awx/conf/conf.py:74 +msgid "Example setting that cannot be changed." +msgstr "" + +#: awx/conf/conf.py:93 +msgid "Example Setting" +msgstr "" + +#: awx/conf/conf.py:94 +msgid "Example setting which can be different for each user." +msgstr "" + +#: awx/conf/conf.py:95 awx/conf/registry.py:76 awx/conf/views.py:46 +msgid "User" +msgstr "" + +#: awx/conf/fields.py:38 +msgid "Enter a valid URL" +msgstr "" + +#: awx/conf/license.py:19 +msgid "Your Tower license does not allow that." +msgstr "" + +#: awx/conf/management/commands/migrate_to_database_settings.py:41 +msgid "Only show which settings would be commented/migrated." +msgstr "" + +#: awx/conf/management/commands/migrate_to_database_settings.py:48 +msgid "Skip over settings that would raise an error when commenting/migrating." +msgstr "" + +#: awx/conf/management/commands/migrate_to_database_settings.py:55 +msgid "Skip commenting out settings in files." +msgstr "" + +#: awx/conf/management/commands/migrate_to_database_settings.py:61 +msgid "Backup existing settings files with this suffix." +msgstr "" + +#: awx/conf/registry.py:64 awx/conf/tests/unit/test_registry.py:169 +#: awx/conf/tests/unit/test_registry.py:192 +#: awx/conf/tests/unit/test_registry.py:196 +#: awx/conf/tests/unit/test_registry.py:201 +#: awx/conf/tests/unit/test_registry.py:208 +msgid "All" +msgstr "" + +#: awx/conf/registry.py:65 awx/conf/tests/unit/test_registry.py:170 +#: awx/conf/tests/unit/test_registry.py:193 +#: awx/conf/tests/unit/test_registry.py:197 +#: awx/conf/tests/unit/test_registry.py:202 +#: awx/conf/tests/unit/test_registry.py:209 +msgid "Changed" +msgstr "" + +#: awx/conf/registry.py:77 +msgid "User-Defaults" +msgstr "" + +#: awx/conf/registry.py:133 +msgid "This value has been set manually in a settings file." +msgstr "" + +#: awx/conf/tests/unit/test_registry.py:46 +#: awx/conf/tests/unit/test_registry.py:56 +#: awx/conf/tests/unit/test_registry.py:72 +#: awx/conf/tests/unit/test_registry.py:87 +#: awx/conf/tests/unit/test_registry.py:100 +#: awx/conf/tests/unit/test_registry.py:106 +#: awx/conf/tests/unit/test_registry.py:126 +#: awx/conf/tests/unit/test_registry.py:140 +#: awx/conf/tests/unit/test_registry.py:146 +#: awx/conf/tests/unit/test_registry.py:159 +#: awx/conf/tests/unit/test_registry.py:171 +#: awx/conf/tests/unit/test_registry.py:180 +#: awx/conf/tests/unit/test_registry.py:198 +#: awx/conf/tests/unit/test_registry.py:210 +#: awx/conf/tests/unit/test_registry.py:219 +#: awx/conf/tests/unit/test_registry.py:225 +#: awx/conf/tests/unit/test_registry.py:237 +#: awx/conf/tests/unit/test_registry.py:245 +#: awx/conf/tests/unit/test_registry.py:288 +#: awx/conf/tests/unit/test_registry.py:306 +#: awx/conf/tests/unit/test_settings.py:68 +#: awx/conf/tests/unit/test_settings.py:86 +#: awx/conf/tests/unit/test_settings.py:101 +#: awx/conf/tests/unit/test_settings.py:116 +#: awx/conf/tests/unit/test_settings.py:132 +#: awx/conf/tests/unit/test_settings.py:145 +#: awx/conf/tests/unit/test_settings.py:162 +#: awx/conf/tests/unit/test_settings.py:178 +#: awx/conf/tests/unit/test_settings.py:189 +#: awx/conf/tests/unit/test_settings.py:205 +#: awx/conf/tests/unit/test_settings.py:226 +#: awx/conf/tests/unit/test_settings.py:249 +#: awx/conf/tests/unit/test_settings.py:263 +#: awx/conf/tests/unit/test_settings.py:287 +#: awx/conf/tests/unit/test_settings.py:307 +#: awx/conf/tests/unit/test_settings.py:324 +#: awx/conf/tests/unit/test_settings.py:337 +#: awx/conf/tests/unit/test_settings.py:351 +#: awx/conf/tests/unit/test_settings.py:387 awx/main/conf.py:19 +#: awx/main/conf.py:29 awx/main/conf.py:39 awx/main/conf.py:48 +#: awx/main/conf.py:60 awx/main/conf.py:78 awx/main/conf.py:103 +msgid "System" +msgstr "" + +#: awx/conf/tests/unit/test_registry.py:165 +#: awx/conf/tests/unit/test_registry.py:172 +#: awx/conf/tests/unit/test_registry.py:187 +#: awx/conf/tests/unit/test_registry.py:203 +#: awx/conf/tests/unit/test_registry.py:211 +msgid "OtherSystem" +msgstr "" + +#: awx/conf/views.py:38 +msgid "Setting Categories" +msgstr "" + +#: awx/conf/views.py:61 +msgid "Setting Detail" +msgstr "" + +#: awx/main/access.py:266 +#, python-format +msgid "Bad data found in related field %s." +msgstr "" + +#: awx/main/access.py:307 +msgid "License is missing." +msgstr "" + +#: awx/main/access.py:309 +msgid "License has expired." +msgstr "" + +#: awx/main/access.py:317 +#, python-format +msgid "License count of %s instances has been reached." +msgstr "" + +#: awx/main/access.py:319 +#, python-format +msgid "License count of %s instances has been exceeded." +msgstr "" + +#: awx/main/access.py:321 +msgid "Host count exceeds available instances." +msgstr "" + +#: awx/main/access.py:325 +#, python-format +msgid "Feature %s is not enabled in the active license." +msgstr "" + +#: awx/main/access.py:327 +msgid "Features not found in active license." +msgstr "" + +#: awx/main/access.py:525 awx/main/access.py:592 awx/main/access.py:717 +#: awx/main/access.py:987 awx/main/access.py:1222 awx/main/access.py:1619 +msgid "Resource is being used by running jobs" +msgstr "" + +#: awx/main/access.py:636 +msgid "Unable to change inventory on a host." +msgstr "" + +#: awx/main/access.py:653 awx/main/access.py:698 +msgid "Cannot associate two items from different inventories." +msgstr "" + +#: awx/main/access.py:686 +msgid "Unable to change inventory on a group." +msgstr "" + +#: awx/main/access.py:907 +msgid "Unable to change organization on a team." +msgstr "" + +#: awx/main/access.py:920 +msgid "The {} role cannot be assigned to a team" +msgstr "" + +#: awx/main/access.py:922 +msgid "The admin_role for a User cannot be assigned to a team" +msgstr "" + +#: awx/main/access.py:1692 +msgid "" +"You do not have permission to the workflow job resources required for " +"relaunch." +msgstr "" + +#: awx/main/apps.py:9 +msgid "Main" +msgstr "" + +#: awx/main/conf.py:17 +msgid "Enable Activity Stream" +msgstr "" + +#: awx/main/conf.py:18 +msgid "Enable capturing activity for the Tower activity stream." +msgstr "" + +#: awx/main/conf.py:27 +msgid "Enable Activity Stream for Inventory Sync" +msgstr "" + +#: awx/main/conf.py:28 +msgid "" +"Enable capturing activity for the Tower activity stream when running " +"inventory sync." +msgstr "" + +#: awx/main/conf.py:37 +msgid "All Users Visible to Organization Admins" +msgstr "" + +#: awx/main/conf.py:38 +msgid "" +"Controls whether any Organization Admin can view all users, even those not " +"associated with their Organization." +msgstr "" + +#: awx/main/conf.py:46 +msgid "Enable Tower Administrator Alerts" +msgstr "" + +#: awx/main/conf.py:47 +msgid "" +"Allow Tower to email Admin users for system events that may require " +"attention." +msgstr "" + +#: awx/main/conf.py:57 +msgid "Base URL of the Tower host" +msgstr "" + +#: awx/main/conf.py:58 +msgid "" +"This setting is used by services like notifications to render a valid url to " +"the Tower host." +msgstr "" + +#: awx/main/conf.py:67 +msgid "Remote Host Headers" +msgstr "" + +#: awx/main/conf.py:68 +msgid "" +"HTTP headers and meta keys to search to determine remote host name or IP. " +"Add additional items to this list, such as \"HTTP_X_FORWARDED_FOR\", if " +"behind a reverse proxy.\n" +"\n" +"Note: The headers will be searched in order and the first found remote host " +"name or IP will be used.\n" +"\n" +"In the below example 8.8.8.7 would be the chosen IP address.\n" +"X-Forwarded-For: 8.8.8.7, 192.168.2.1, 127.0.0.1\n" +"Host: 127.0.0.1\n" +"REMOTE_HOST_HEADERS = ['HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR', 'REMOTE_HOST']" +msgstr "" + +#: awx/main/conf.py:99 +msgid "Tower License" +msgstr "" + +#: awx/main/conf.py:100 +msgid "" +"The license controls which features and functionality are enabled in Tower. " +"Use /api/v1/config/ to update or change the license." +msgstr "" + +#: awx/main/conf.py:110 +msgid "Ansible Modules Allowed for Ad Hoc Jobs" +msgstr "" + +#: awx/main/conf.py:111 +msgid "List of modules allowed to be used by ad-hoc jobs." +msgstr "" + +#: awx/main/conf.py:112 awx/main/conf.py:121 awx/main/conf.py:130 +#: awx/main/conf.py:140 awx/main/conf.py:150 awx/main/conf.py:160 +#: awx/main/conf.py:170 awx/main/conf.py:180 awx/main/conf.py:190 +#: awx/main/conf.py:202 awx/main/conf.py:214 awx/main/conf.py:226 +msgid "Jobs" +msgstr "" + +#: awx/main/conf.py:119 +msgid "Enable job isolation" +msgstr "" + +#: awx/main/conf.py:120 +msgid "" +"Isolates an Ansible job from protected parts of the Tower system to prevent " +"exposing sensitive information." +msgstr "" + +#: awx/main/conf.py:128 +msgid "Job isolation execution path" +msgstr "" + +#: awx/main/conf.py:129 +msgid "" +"Create temporary working directories for isolated jobs in this location." +msgstr "" + +#: awx/main/conf.py:138 +msgid "Paths to hide from isolated jobs" +msgstr "" + +#: awx/main/conf.py:139 +msgid "Additional paths to hide from isolated processes." +msgstr "" + +#: awx/main/conf.py:148 +msgid "Paths to expose to isolated jobs" +msgstr "" + +#: awx/main/conf.py:149 +msgid "" +"Whitelist of paths that would otherwise be hidden to expose to isolated jobs." +msgstr "" + +#: awx/main/conf.py:158 +msgid "Standard Output Maximum Display Size" +msgstr "" + +#: awx/main/conf.py:159 +msgid "" +"Maximum Size of Standard Output in bytes to display before requiring the " +"output be downloaded." +msgstr "" + +#: awx/main/conf.py:168 +msgid "Job Event Standard Output Maximum Display Size" +msgstr "" + +#: awx/main/conf.py:169 +msgid "" +"Maximum Size of Standard Output in bytes to display for a single job or ad " +"hoc command event. `stdout` will end with `…` when truncated." +msgstr "" + +#: awx/main/conf.py:178 +msgid "Maximum Scheduled Jobs" +msgstr "" + +#: awx/main/conf.py:179 +msgid "" +"Maximum number of the same job template that can be waiting to run when " +"launching from a schedule before no more are created." +msgstr "" + +#: awx/main/conf.py:188 +msgid "Ansible Callback Plugins" +msgstr "" + +#: awx/main/conf.py:189 +msgid "" +"List of paths to search for extra callback plugins to be used when running " +"jobs." +msgstr "" + +#: awx/main/conf.py:199 +msgid "Default Job Timeout" +msgstr "" + +#: awx/main/conf.py:200 +msgid "" +"Maximum time to allow jobs to run. Use value of 0 to indicate that no " +"timeout should be imposed. A timeout set on an individual job template will " +"override this." +msgstr "" + +#: awx/main/conf.py:211 +msgid "Default Inventory Update Timeout" +msgstr "" + +#: awx/main/conf.py:212 +msgid "" +"Maximum time to allow inventory updates to run. Use value of 0 to indicate " +"that no timeout should be imposed. A timeout set on an individual inventory " +"source will override this." +msgstr "" + +#: awx/main/conf.py:223 +msgid "Default Project Update Timeout" +msgstr "" + +#: awx/main/conf.py:224 +msgid "" +"Maximum time to allow project updates to run. Use value of 0 to indicate " +"that no timeout should be imposed. A timeout set on an individual project " +"will override this." +msgstr "" + +#: awx/main/conf.py:234 +msgid "Logging Aggregator" +msgstr "" + +#: awx/main/conf.py:235 +msgid "Hostname/IP where external logs will be sent to." +msgstr "" + +#: awx/main/conf.py:236 awx/main/conf.py:245 awx/main/conf.py:255 +#: awx/main/conf.py:264 awx/main/conf.py:275 awx/main/conf.py:290 +#: awx/main/conf.py:302 awx/main/conf.py:311 +msgid "Logging" +msgstr "" + +#: awx/main/conf.py:243 +msgid "Logging Aggregator Port" +msgstr "" + +#: awx/main/conf.py:244 +msgid "Port on Logging Aggregator to send logs to (if required)." +msgstr "" + +#: awx/main/conf.py:253 +msgid "Logging Aggregator Type" +msgstr "" + +#: awx/main/conf.py:254 +msgid "Format messages for the chosen log aggregator." +msgstr "" + +#: awx/main/conf.py:262 +msgid "Logging Aggregator Username" +msgstr "" + +#: awx/main/conf.py:263 +msgid "Username for external log aggregator (if required)." +msgstr "" + +#: awx/main/conf.py:273 +msgid "Logging Aggregator Password/Token" +msgstr "" + +#: awx/main/conf.py:274 +msgid "" +"Password or authentication token for external log aggregator (if required)." +msgstr "" + +#: awx/main/conf.py:283 +msgid "Loggers to send data to the log aggregator from" +msgstr "" + +#: awx/main/conf.py:284 +msgid "" +"List of loggers that will send HTTP logs to the collector, these can include " +"any or all of: \n" +"awx - Tower service logs\n" +"activity_stream - activity stream records\n" +"job_events - callback data from Ansible job events\n" +"system_tracking - facts gathered from scan jobs." +msgstr "" + +#: awx/main/conf.py:297 +msgid "Log System Tracking Facts Individually" +msgstr "" + +#: awx/main/conf.py:298 +msgid "" +"If set, system tracking facts will be sent for each package, service, " +"orother item found in a scan, allowing for greater search query granularity. " +"If unset, facts will be sent as a single dictionary, allowing for greater " +"efficiency in fact processing." +msgstr "" + +#: awx/main/conf.py:309 +msgid "Enable External Logging" +msgstr "" + +#: awx/main/conf.py:310 +msgid "Enable sending logs to external log aggregator." +msgstr "" + +#: awx/main/models/activity_stream.py:22 +msgid "Entity Created" +msgstr "" + +#: awx/main/models/activity_stream.py:23 +msgid "Entity Updated" +msgstr "" + +#: awx/main/models/activity_stream.py:24 +msgid "Entity Deleted" +msgstr "" + +#: awx/main/models/activity_stream.py:25 +msgid "Entity Associated with another Entity" +msgstr "" + +#: awx/main/models/activity_stream.py:26 +msgid "Entity was Disassociated with another Entity" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:96 +msgid "No valid inventory." +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:103 awx/main/models/jobs.py:160 +msgid "You must provide a machine / SSH credential." +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:114 +#: awx/main/models/ad_hoc_commands.py:122 +msgid "Invalid type for ad hoc command" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:117 +msgid "Unsupported module for ad hoc commands." +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:125 +#, python-format +msgid "No argument passed to %s module." +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:222 awx/main/models/jobs.py:752 +msgid "Host Failed" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:223 awx/main/models/jobs.py:753 +msgid "Host OK" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:224 awx/main/models/jobs.py:756 +msgid "Host Unreachable" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:229 awx/main/models/jobs.py:755 +msgid "Host Skipped" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:239 awx/main/models/jobs.py:783 +msgid "Debug" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:240 awx/main/models/jobs.py:784 +msgid "Verbose" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:241 awx/main/models/jobs.py:785 +msgid "Deprecated" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:242 awx/main/models/jobs.py:786 +msgid "Warning" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:243 awx/main/models/jobs.py:787 +msgid "System Warning" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:244 awx/main/models/jobs.py:788 +#: awx/main/models/unified_jobs.py:64 +msgid "Error" +msgstr "" + +#: awx/main/models/base.py:45 awx/main/models/base.py:51 +#: awx/main/models/base.py:56 +msgid "Run" +msgstr "" + +#: awx/main/models/base.py:46 awx/main/models/base.py:52 +#: awx/main/models/base.py:57 +msgid "Check" +msgstr "" + +#: awx/main/models/base.py:47 +msgid "Scan" +msgstr "" + +#: awx/main/models/base.py:61 +msgid "Read Inventory" +msgstr "" + +#: awx/main/models/base.py:62 +msgid "Edit Inventory" +msgstr "" + +#: awx/main/models/base.py:63 +msgid "Administrate Inventory" +msgstr "" + +#: awx/main/models/base.py:64 +msgid "Deploy To Inventory" +msgstr "" + +#: awx/main/models/base.py:65 +msgid "Deploy To Inventory (Dry Run)" +msgstr "" + +#: awx/main/models/base.py:66 +msgid "Scan an Inventory" +msgstr "" + +#: awx/main/models/base.py:67 +msgid "Create a Job Template" +msgstr "" + +#: awx/main/models/credential.py:33 +msgid "Machine" +msgstr "" + +#: awx/main/models/credential.py:34 +msgid "Network" +msgstr "" + +#: awx/main/models/credential.py:35 +msgid "Source Control" +msgstr "" + +#: awx/main/models/credential.py:36 +msgid "Amazon Web Services" +msgstr "" + +#: awx/main/models/credential.py:37 +msgid "Rackspace" +msgstr "" + +#: awx/main/models/credential.py:38 awx/main/models/inventory.py:713 +msgid "VMware vCenter" +msgstr "" + +#: awx/main/models/credential.py:39 awx/main/models/inventory.py:714 +msgid "Red Hat Satellite 6" +msgstr "" + +#: awx/main/models/credential.py:40 awx/main/models/inventory.py:715 +msgid "Red Hat CloudForms" +msgstr "" + +#: awx/main/models/credential.py:41 awx/main/models/inventory.py:710 +msgid "Google Compute Engine" +msgstr "" + +#: awx/main/models/credential.py:42 awx/main/models/inventory.py:711 +msgid "Microsoft Azure Classic (deprecated)" +msgstr "" + +#: awx/main/models/credential.py:43 awx/main/models/inventory.py:712 +msgid "Microsoft Azure Resource Manager" +msgstr "" + +#: awx/main/models/credential.py:44 awx/main/models/inventory.py:716 +msgid "OpenStack" +msgstr "" + +#: awx/main/models/credential.py:48 +msgid "None" +msgstr "" + +#: awx/main/models/credential.py:49 +msgid "Sudo" +msgstr "" + +#: awx/main/models/credential.py:50 +msgid "Su" +msgstr "" + +#: awx/main/models/credential.py:51 +msgid "Pbrun" +msgstr "" + +#: awx/main/models/credential.py:52 +msgid "Pfexec" +msgstr "" + +#: awx/main/models/credential.py:53 +msgid "DZDO" +msgstr "" + +#: awx/main/models/credential.py:54 +msgid "Pmrun" +msgstr "" + +#: awx/main/models/credential.py:103 +msgid "Host" +msgstr "" + +#: awx/main/models/credential.py:104 +msgid "The hostname or IP address to use." +msgstr "" + +#: awx/main/models/credential.py:110 +msgid "Username" +msgstr "" + +#: awx/main/models/credential.py:111 +msgid "Username for this credential." +msgstr "" + +#: awx/main/models/credential.py:117 +msgid "Password" +msgstr "" + +#: awx/main/models/credential.py:118 +msgid "" +"Password for this credential (or \"ASK\" to prompt the user for machine " +"credentials)." +msgstr "" + +#: awx/main/models/credential.py:125 +msgid "Security Token" +msgstr "" + +#: awx/main/models/credential.py:126 +msgid "Security Token for this credential" +msgstr "" + +#: awx/main/models/credential.py:132 +msgid "Project" +msgstr "" + +#: awx/main/models/credential.py:133 +msgid "The identifier for the project." +msgstr "" + +#: awx/main/models/credential.py:139 +msgid "Domain" +msgstr "" + +#: awx/main/models/credential.py:140 +msgid "The identifier for the domain." +msgstr "" + +#: awx/main/models/credential.py:145 +msgid "SSH private key" +msgstr "" + +#: awx/main/models/credential.py:146 +msgid "RSA or DSA private key to be used instead of password." +msgstr "" + +#: awx/main/models/credential.py:152 +msgid "SSH key unlock" +msgstr "" + +#: awx/main/models/credential.py:153 +msgid "" +"Passphrase to unlock SSH private key if encrypted (or \"ASK\" to prompt the " +"user for machine credentials)." +msgstr "" + +#: awx/main/models/credential.py:161 +msgid "Privilege escalation method." +msgstr "" + +#: awx/main/models/credential.py:167 +msgid "Privilege escalation username." +msgstr "" + +#: awx/main/models/credential.py:173 +msgid "Password for privilege escalation method." +msgstr "" + +#: awx/main/models/credential.py:179 +msgid "Vault password (or \"ASK\" to prompt the user)." +msgstr "" + +#: awx/main/models/credential.py:183 +msgid "Whether to use the authorize mechanism." +msgstr "" + +#: awx/main/models/credential.py:189 +msgid "Password used by the authorize mechanism." +msgstr "" + +#: awx/main/models/credential.py:195 +msgid "Client Id or Application Id for the credential" +msgstr "" + +#: awx/main/models/credential.py:201 +msgid "Secret Token for this credential" +msgstr "" + +#: awx/main/models/credential.py:207 +msgid "Subscription identifier for this credential" +msgstr "" + +#: awx/main/models/credential.py:213 +msgid "Tenant identifier for this credential" +msgstr "" + +#: awx/main/models/credential.py:283 +msgid "Host required for VMware credential." +msgstr "" + +#: awx/main/models/credential.py:285 +msgid "Host required for OpenStack credential." +msgstr "" + +#: awx/main/models/credential.py:294 +msgid "Access key required for AWS credential." +msgstr "" + +#: awx/main/models/credential.py:296 +msgid "Username required for Rackspace credential." +msgstr "" + +#: awx/main/models/credential.py:299 +msgid "Username required for VMware credential." +msgstr "" + +#: awx/main/models/credential.py:301 +msgid "Username required for OpenStack credential." +msgstr "" + +#: awx/main/models/credential.py:307 +msgid "Secret key required for AWS credential." +msgstr "" + +#: awx/main/models/credential.py:309 +msgid "API key required for Rackspace credential." +msgstr "" + +#: awx/main/models/credential.py:311 +msgid "Password required for VMware credential." +msgstr "" + +#: awx/main/models/credential.py:313 +msgid "Password or API key required for OpenStack credential." +msgstr "" + +#: awx/main/models/credential.py:319 +msgid "Project name required for OpenStack credential." +msgstr "" + +#: awx/main/models/credential.py:346 +msgid "SSH key unlock must be set when SSH key is encrypted." +msgstr "" + +#: awx/main/models/credential.py:352 +msgid "Credential cannot be assigned to both a user and team." +msgstr "" + +#: awx/main/models/fact.py:21 +msgid "Host for the facts that the fact scan captured." +msgstr "" + +#: awx/main/models/fact.py:26 +msgid "Date and time of the corresponding fact scan gathering time." +msgstr "" + +#: awx/main/models/fact.py:29 +msgid "" +"Arbitrary JSON structure of module facts captured at timestamp for a single " +"host." +msgstr "" + +#: awx/main/models/inventory.py:45 +msgid "inventories" +msgstr "" + +#: awx/main/models/inventory.py:52 +msgid "Organization containing this inventory." +msgstr "" + +#: awx/main/models/inventory.py:58 +msgid "Inventory variables in JSON or YAML format." +msgstr "" + +#: awx/main/models/inventory.py:63 +msgid "Flag indicating whether any hosts in this inventory have failed." +msgstr "" + +#: awx/main/models/inventory.py:68 +msgid "Total number of hosts in this inventory." +msgstr "" + +#: awx/main/models/inventory.py:73 +msgid "Number of hosts in this inventory with active failures." +msgstr "" + +#: awx/main/models/inventory.py:78 +msgid "Total number of groups in this inventory." +msgstr "" + +#: awx/main/models/inventory.py:83 +msgid "Number of groups in this inventory with active failures." +msgstr "" + +#: awx/main/models/inventory.py:88 +msgid "" +"Flag indicating whether this inventory has any external inventory sources." +msgstr "" + +#: awx/main/models/inventory.py:93 +msgid "" +"Total number of external inventory sources configured within this inventory." +msgstr "" + +#: awx/main/models/inventory.py:98 +msgid "Number of external inventory sources in this inventory with failures." +msgstr "" + +#: awx/main/models/inventory.py:339 +msgid "Is this host online and available for running jobs?" +msgstr "" + +#: awx/main/models/inventory.py:345 +msgid "" +"The value used by the remote inventory source to uniquely identify the host" +msgstr "" + +#: awx/main/models/inventory.py:350 +msgid "Host variables in JSON or YAML format." +msgstr "" + +#: awx/main/models/inventory.py:372 +msgid "Flag indicating whether the last job failed for this host." +msgstr "" + +#: awx/main/models/inventory.py:377 +msgid "" +"Flag indicating whether this host was created/updated from any external " +"inventory sources." +msgstr "" + +#: awx/main/models/inventory.py:383 +msgid "Inventory source(s) that created or modified this host." +msgstr "" + +#: awx/main/models/inventory.py:474 +msgid "Group variables in JSON or YAML format." +msgstr "" + +#: awx/main/models/inventory.py:480 +msgid "Hosts associated directly with this group." +msgstr "" + +#: awx/main/models/inventory.py:485 +msgid "Total number of hosts directly or indirectly in this group." +msgstr "" + +#: awx/main/models/inventory.py:490 +msgid "Flag indicating whether this group has any hosts with active failures." +msgstr "" + +#: awx/main/models/inventory.py:495 +msgid "Number of hosts in this group with active failures." +msgstr "" + +#: awx/main/models/inventory.py:500 +msgid "Total number of child groups contained within this group." +msgstr "" + +#: awx/main/models/inventory.py:505 +msgid "Number of child groups within this group that have active failures." +msgstr "" + +#: awx/main/models/inventory.py:510 +msgid "" +"Flag indicating whether this group was created/updated from any external " +"inventory sources." +msgstr "" + +#: awx/main/models/inventory.py:516 +msgid "Inventory source(s) that created or modified this group." +msgstr "" + +#: awx/main/models/inventory.py:706 awx/main/models/projects.py:42 +#: awx/main/models/unified_jobs.py:411 +msgid "Manual" +msgstr "" + +#: awx/main/models/inventory.py:707 +msgid "Local File, Directory or Script" +msgstr "" + +#: awx/main/models/inventory.py:708 +msgid "Rackspace Cloud Servers" +msgstr "" + +#: awx/main/models/inventory.py:709 +msgid "Amazon EC2" +msgstr "" + +#: awx/main/models/inventory.py:717 +msgid "Custom Script" +msgstr "" + +#: awx/main/models/inventory.py:828 +msgid "Inventory source variables in YAML or JSON format." +msgstr "" + +#: awx/main/models/inventory.py:847 +msgid "" +"Comma-separated list of filter expressions (EC2 only). Hosts are imported " +"when ANY of the filters match." +msgstr "" + +#: awx/main/models/inventory.py:853 +msgid "Limit groups automatically created from inventory source (EC2 only)." +msgstr "" + +#: awx/main/models/inventory.py:857 +msgid "Overwrite local groups and hosts from remote inventory source." +msgstr "" + +#: awx/main/models/inventory.py:861 +msgid "Overwrite local variables from remote inventory source." +msgstr "" + +#: awx/main/models/inventory.py:893 +msgid "Availability Zone" +msgstr "" + +#: awx/main/models/inventory.py:894 +msgid "Image ID" +msgstr "" + +#: awx/main/models/inventory.py:895 +msgid "Instance ID" +msgstr "" + +#: awx/main/models/inventory.py:896 +msgid "Instance Type" +msgstr "" + +#: awx/main/models/inventory.py:897 +msgid "Key Name" +msgstr "" + +#: awx/main/models/inventory.py:898 +msgid "Region" +msgstr "" + +#: awx/main/models/inventory.py:899 +msgid "Security Group" +msgstr "" + +#: awx/main/models/inventory.py:900 +msgid "Tags" +msgstr "" + +#: awx/main/models/inventory.py:901 +msgid "VPC ID" +msgstr "" + +#: awx/main/models/inventory.py:902 +msgid "Tag None" +msgstr "" + +#: awx/main/models/inventory.py:973 +#, python-format +msgid "" +"Cloud-based inventory sources (such as %s) require credentials for the " +"matching cloud service." +msgstr "" + +#: awx/main/models/inventory.py:980 +msgid "Credential is required for a cloud source." +msgstr "" + +#: awx/main/models/inventory.py:1005 +#, python-format +msgid "Invalid %(source)s region: %(region)s" +msgstr "" + +#: awx/main/models/inventory.py:1030 +#, python-format +msgid "Invalid filter expression: %(filter)s" +msgstr "" + +#: awx/main/models/inventory.py:1048 +#, python-format +msgid "Invalid group by choice: %(choice)s" +msgstr "" + +#: awx/main/models/inventory.py:1195 +#, python-format +msgid "" +"Unable to configure this item for cloud sync. It is already managed by %s." +msgstr "" + +#: awx/main/models/inventory.py:1290 +msgid "Inventory script contents" +msgstr "" + +#: awx/main/models/inventory.py:1295 +msgid "Organization owning this inventory script" +msgstr "" + +#: awx/main/models/jobs.py:168 +msgid "You must provide a network credential." +msgstr "" + +#: awx/main/models/jobs.py:176 +msgid "" +"Must provide a credential for a cloud provider, such as Amazon Web Services " +"or Rackspace." +msgstr "" + +#: awx/main/models/jobs.py:268 +msgid "Job Template must provide 'inventory' or allow prompting for it." +msgstr "" + +#: awx/main/models/jobs.py:272 +msgid "Job Template must provide 'credential' or allow prompting for it." +msgstr "" + +#: awx/main/models/jobs.py:370 +msgid "Cannot override job_type to or from a scan job." +msgstr "" + +#: awx/main/models/jobs.py:373 +msgid "Inventory cannot be changed at runtime for scan jobs." +msgstr "" + +#: awx/main/models/jobs.py:439 awx/main/models/projects.py:243 +msgid "SCM Revision" +msgstr "" + +#: awx/main/models/jobs.py:440 +msgid "The SCM Revision from the Project used for this job, if available" +msgstr "" + +#: awx/main/models/jobs.py:448 +msgid "" +"The SCM Refresh task used to make sure the playbooks were available for the " +"job run" +msgstr "" + +#: awx/main/models/jobs.py:651 +msgid "job host summaries" +msgstr "" + +#: awx/main/models/jobs.py:754 +msgid "Host Failure" +msgstr "" + +#: awx/main/models/jobs.py:757 awx/main/models/jobs.py:771 +msgid "No Hosts Remaining" +msgstr "" + +#: awx/main/models/jobs.py:758 +msgid "Host Polling" +msgstr "" + +#: awx/main/models/jobs.py:759 +msgid "Host Async OK" +msgstr "" + +#: awx/main/models/jobs.py:760 +msgid "Host Async Failure" +msgstr "" + +#: awx/main/models/jobs.py:761 +msgid "Item OK" +msgstr "" + +#: awx/main/models/jobs.py:762 +msgid "Item Failed" +msgstr "" + +#: awx/main/models/jobs.py:763 +msgid "Item Skipped" +msgstr "" + +#: awx/main/models/jobs.py:764 +msgid "Host Retry" +msgstr "" + +#: awx/main/models/jobs.py:766 +msgid "File Difference" +msgstr "" + +#: awx/main/models/jobs.py:767 +msgid "Playbook Started" +msgstr "" + +#: awx/main/models/jobs.py:768 +msgid "Running Handlers" +msgstr "" + +#: awx/main/models/jobs.py:769 +msgid "Including File" +msgstr "" + +#: awx/main/models/jobs.py:770 +msgid "No Hosts Matched" +msgstr "" + +#: awx/main/models/jobs.py:772 +msgid "Task Started" +msgstr "" + +#: awx/main/models/jobs.py:774 +msgid "Variables Prompted" +msgstr "" + +#: awx/main/models/jobs.py:775 +msgid "Gathering Facts" +msgstr "" + +#: awx/main/models/jobs.py:776 +msgid "internal: on Import for Host" +msgstr "" + +#: awx/main/models/jobs.py:777 +msgid "internal: on Not Import for Host" +msgstr "" + +#: awx/main/models/jobs.py:778 +msgid "Play Started" +msgstr "" + +#: awx/main/models/jobs.py:779 +msgid "Playbook Complete" +msgstr "" + +#: awx/main/models/jobs.py:1189 +msgid "Remove jobs older than a certain number of days" +msgstr "" + +#: awx/main/models/jobs.py:1190 +msgid "Remove activity stream entries older than a certain number of days" +msgstr "" + +#: awx/main/models/jobs.py:1191 +msgid "Purge and/or reduce the granularity of system tracking data" +msgstr "" + +#: awx/main/models/label.py:29 +msgid "Organization this label belongs to." +msgstr "" + +#: awx/main/models/notifications.py:31 +msgid "Email" +msgstr "" + +#: awx/main/models/notifications.py:32 +msgid "Slack" +msgstr "" + +#: awx/main/models/notifications.py:33 +msgid "Twilio" +msgstr "" + +#: awx/main/models/notifications.py:34 +msgid "Pagerduty" +msgstr "" + +#: awx/main/models/notifications.py:35 +msgid "HipChat" +msgstr "" + +#: awx/main/models/notifications.py:36 +msgid "Webhook" +msgstr "" + +#: awx/main/models/notifications.py:37 +msgid "IRC" +msgstr "" + +#: awx/main/models/notifications.py:127 awx/main/models/unified_jobs.py:59 +msgid "Pending" +msgstr "" + +#: awx/main/models/notifications.py:128 awx/main/models/unified_jobs.py:62 +msgid "Successful" +msgstr "" + +#: awx/main/models/notifications.py:129 awx/main/models/unified_jobs.py:63 +msgid "Failed" +msgstr "" + +#: awx/main/models/organization.py:157 +msgid "Execute Commands on the Inventory" +msgstr "" + +#: awx/main/models/organization.py:211 +msgid "Token not invalidated" +msgstr "" + +#: awx/main/models/organization.py:212 +msgid "Token is expired" +msgstr "" + +#: awx/main/models/organization.py:213 +msgid "The maximum number of allowed sessions for this user has been exceeded." +msgstr "" + +#: awx/main/models/organization.py:216 +msgid "Invalid token" +msgstr "" + +#: awx/main/models/organization.py:233 +msgid "Reason the auth token was invalidated." +msgstr "" + +#: awx/main/models/organization.py:272 +msgid "Invalid reason specified" +msgstr "" + +#: awx/main/models/projects.py:43 +msgid "Git" +msgstr "" + +#: awx/main/models/projects.py:44 +msgid "Mercurial" +msgstr "" + +#: awx/main/models/projects.py:45 +msgid "Subversion" +msgstr "" + +#: awx/main/models/projects.py:71 +msgid "" +"Local path (relative to PROJECTS_ROOT) containing playbooks and related " +"files for this project." +msgstr "" + +#: awx/main/models/projects.py:80 +msgid "SCM Type" +msgstr "" + +#: awx/main/models/projects.py:81 +msgid "Specifies the source control system used to store the project." +msgstr "" + +#: awx/main/models/projects.py:87 +msgid "SCM URL" +msgstr "" + +#: awx/main/models/projects.py:88 +msgid "The location where the project is stored." +msgstr "" + +#: awx/main/models/projects.py:94 +msgid "SCM Branch" +msgstr "" + +#: awx/main/models/projects.py:95 +msgid "Specific branch, tag or commit to checkout." +msgstr "" + +#: awx/main/models/projects.py:99 +msgid "Discard any local changes before syncing the project." +msgstr "" + +#: awx/main/models/projects.py:103 +msgid "Delete the project before syncing." +msgstr "" + +#: awx/main/models/projects.py:116 +msgid "The amount of time to run before the task is canceled." +msgstr "" + +#: awx/main/models/projects.py:130 +msgid "Invalid SCM URL." +msgstr "" + +#: awx/main/models/projects.py:133 +msgid "SCM URL is required." +msgstr "" + +#: awx/main/models/projects.py:142 +msgid "Credential kind must be 'scm'." +msgstr "" + +#: awx/main/models/projects.py:157 +msgid "Invalid credential." +msgstr "" + +#: awx/main/models/projects.py:229 +msgid "Update the project when a job is launched that uses the project." +msgstr "" + +#: awx/main/models/projects.py:234 +msgid "" +"The number of seconds after the last project update ran that a newproject " +"update will be launched as a job dependency." +msgstr "" + +#: awx/main/models/projects.py:244 +msgid "The last revision fetched by a project update" +msgstr "" + +#: awx/main/models/projects.py:251 +msgid "Playbook Files" +msgstr "" + +#: awx/main/models/projects.py:252 +msgid "List of playbooks found in the project" +msgstr "" + +#: awx/main/models/rbac.py:37 +msgid "System Administrator" +msgstr "" + +#: awx/main/models/rbac.py:38 +msgid "System Auditor" +msgstr "" + +#: awx/main/models/rbac.py:39 +msgid "Ad Hoc" +msgstr "" + +#: awx/main/models/rbac.py:40 +msgid "Admin" +msgstr "" + +#: awx/main/models/rbac.py:41 +msgid "Auditor" +msgstr "" + +#: awx/main/models/rbac.py:42 +msgid "Execute" +msgstr "" + +#: awx/main/models/rbac.py:43 +msgid "Member" +msgstr "" + +#: awx/main/models/rbac.py:44 +msgid "Read" +msgstr "" + +#: awx/main/models/rbac.py:45 +msgid "Update" +msgstr "" + +#: awx/main/models/rbac.py:46 +msgid "Use" +msgstr "" + +#: awx/main/models/rbac.py:50 +msgid "Can manage all aspects of the system" +msgstr "" + +#: awx/main/models/rbac.py:51 +msgid "Can view all settings on the system" +msgstr "" + +#: awx/main/models/rbac.py:52 +msgid "May run ad hoc commands on an inventory" +msgstr "" + +#: awx/main/models/rbac.py:53 +#, python-format +msgid "Can manage all aspects of the %s" +msgstr "" + +#: awx/main/models/rbac.py:54 +#, python-format +msgid "Can view all settings for the %s" +msgstr "" + +#: awx/main/models/rbac.py:55 +#, python-format +msgid "May run the %s" +msgstr "" + +#: awx/main/models/rbac.py:56 +#, python-format +msgid "User is a member of the %s" +msgstr "" + +#: awx/main/models/rbac.py:57 +#, python-format +msgid "May view settings for the %s" +msgstr "" + +#: awx/main/models/rbac.py:58 +msgid "" +"May update project or inventory or group using the configured source update " +"system" +msgstr "" + +#: awx/main/models/rbac.py:59 +#, python-format +msgid "Can use the %s in a job template" +msgstr "" + +#: awx/main/models/rbac.py:123 +msgid "roles" +msgstr "" + +#: awx/main/models/rbac.py:435 +msgid "role_ancestors" +msgstr "" + +#: awx/main/models/schedules.py:69 +msgid "Enables processing of this schedule by Tower." +msgstr "" + +#: awx/main/models/schedules.py:75 +msgid "The first occurrence of the schedule occurs on or after this time." +msgstr "" + +#: awx/main/models/schedules.py:81 +msgid "" +"The last occurrence of the schedule occurs before this time, aftewards the " +"schedule expires." +msgstr "" + +#: awx/main/models/schedules.py:85 +msgid "A value representing the schedules iCal recurrence rule." +msgstr "" + +#: awx/main/models/schedules.py:91 +msgid "The next time that the scheduled action will run." +msgstr "" + +#: awx/main/models/unified_jobs.py:58 +msgid "New" +msgstr "" + +#: awx/main/models/unified_jobs.py:60 +msgid "Waiting" +msgstr "" + +#: awx/main/models/unified_jobs.py:61 +msgid "Running" +msgstr "" + +#: awx/main/models/unified_jobs.py:65 +msgid "Canceled" +msgstr "" + +#: awx/main/models/unified_jobs.py:69 +msgid "Never Updated" +msgstr "" + +#: awx/main/models/unified_jobs.py:73 awx/ui/templates/ui/index.html:85 +#: awx/ui/templates/ui/index.html.py:104 +msgid "OK" +msgstr "" + +#: awx/main/models/unified_jobs.py:74 +msgid "Missing" +msgstr "" + +#: awx/main/models/unified_jobs.py:78 +msgid "No External Source" +msgstr "" + +#: awx/main/models/unified_jobs.py:85 +msgid "Updating" +msgstr "" + +#: awx/main/models/unified_jobs.py:412 +msgid "Relaunch" +msgstr "" + +#: awx/main/models/unified_jobs.py:413 +msgid "Callback" +msgstr "" + +#: awx/main/models/unified_jobs.py:414 +msgid "Scheduled" +msgstr "" + +#: awx/main/models/unified_jobs.py:415 +msgid "Dependency" +msgstr "" + +#: awx/main/models/unified_jobs.py:416 +msgid "Workflow" +msgstr "" + +#: awx/main/models/unified_jobs.py:417 +msgid "Sync" +msgstr "" + +#: awx/main/models/unified_jobs.py:463 +msgid "The Tower node the job executed on." +msgstr "" + +#: awx/main/models/unified_jobs.py:489 +msgid "The date and time the job was queued for starting." +msgstr "" + +#: awx/main/models/unified_jobs.py:495 +msgid "The date and time the job finished execution." +msgstr "" + +#: awx/main/models/unified_jobs.py:501 +msgid "Elapsed time in seconds that the job ran." +msgstr "" + +#: awx/main/models/unified_jobs.py:523 +msgid "" +"A status field to indicate the state of the job if it wasn't able to run and " +"capture stdout" +msgstr "" + +#: awx/main/notifications/base.py:17 +#: awx/main/notifications/email_backend.py:28 +msgid "" +"{} #{} had status {} on Ansible Tower, view details at {}\n" +"\n" +msgstr "" + +#: awx/main/notifications/hipchat_backend.py:46 +msgid "Error sending messages: {}" +msgstr "" + +#: awx/main/notifications/hipchat_backend.py:48 +msgid "Error sending message to hipchat: {}" +msgstr "" + +#: awx/main/notifications/irc_backend.py:54 +msgid "Exception connecting to irc server: {}" +msgstr "" + +#: awx/main/notifications/pagerduty_backend.py:39 +msgid "Exception connecting to PagerDuty: {}" +msgstr "" + +#: awx/main/notifications/pagerduty_backend.py:48 +#: awx/main/notifications/slack_backend.py:52 +#: awx/main/notifications/twilio_backend.py:46 +msgid "Exception sending messages: {}" +msgstr "" + +#: awx/main/notifications/twilio_backend.py:36 +msgid "Exception connecting to Twilio: {}" +msgstr "" + +#: awx/main/notifications/webhook_backend.py:38 +#: awx/main/notifications/webhook_backend.py:40 +msgid "Error sending notification webhook: {}" +msgstr "" + +#: awx/main/scheduler/__init__.py:127 +msgid "" +"Job spawned from workflow could not start because it was not in the right " +"state or required manual credentials" +msgstr "" + +#: awx/main/scheduler/__init__.py:131 +msgid "" +"Job spawned from workflow could not start because it was missing a related " +"resource such as project or inventory" +msgstr "" + +#: awx/main/tasks.py:180 +msgid "Ansible Tower host usage over 90%" +msgstr "" + +#: awx/main/tasks.py:185 +msgid "Ansible Tower license will expire soon" +msgstr "" + +#: awx/main/tasks.py:249 +msgid "status_str must be either succeeded or failed" +msgstr "" + +#: awx/main/utils/common.py:89 +#, python-format +msgid "Unable to convert \"%s\" to boolean" +msgstr "" + +#: awx/main/utils/common.py:245 +#, python-format +msgid "Unsupported SCM type \"%s\"" +msgstr "" + +#: awx/main/utils/common.py:252 awx/main/utils/common.py:264 +#: awx/main/utils/common.py:283 +#, python-format +msgid "Invalid %s URL" +msgstr "" + +#: awx/main/utils/common.py:254 awx/main/utils/common.py:292 +#, python-format +msgid "Unsupported %s URL" +msgstr "" + +#: awx/main/utils/common.py:294 +#, python-format +msgid "Unsupported host \"%s\" for file:// URL" +msgstr "" + +#: awx/main/utils/common.py:296 +#, python-format +msgid "Host is required for %s URL" +msgstr "" + +#: awx/main/utils/common.py:314 +#, python-format +msgid "Username must be \"git\" for SSH access to %s." +msgstr "" + +#: awx/main/utils/common.py:320 +#, python-format +msgid "Username must be \"hg\" for SSH access to %s." +msgstr "" + +#: awx/main/validators.py:60 +#, python-format +msgid "Invalid certificate or key: %r..." +msgstr "" + +#: awx/main/validators.py:74 +#, python-format +msgid "Invalid private key: unsupported type \"%s\"" +msgstr "" + +#: awx/main/validators.py:78 +#, python-format +msgid "Unsupported PEM object type: \"%s\"" +msgstr "" + +#: awx/main/validators.py:103 +msgid "Invalid base64-encoded data" +msgstr "" + +#: awx/main/validators.py:122 +msgid "Exactly one private key is required." +msgstr "" + +#: awx/main/validators.py:124 +msgid "At least one private key is required." +msgstr "" + +#: awx/main/validators.py:126 +#, python-format +msgid "" +"At least %(min_keys)d private keys are required, only %(key_count)d provided." +msgstr "" + +#: awx/main/validators.py:129 +#, python-format +msgid "Only one private key is allowed, %(key_count)d provided." +msgstr "" + +#: awx/main/validators.py:131 +#, python-format +msgid "" +"No more than %(max_keys)d private keys are allowed, %(key_count)d provided." +msgstr "" + +#: awx/main/validators.py:136 +msgid "Exactly one certificate is required." +msgstr "" + +#: awx/main/validators.py:138 +msgid "At least one certificate is required." +msgstr "" + +#: awx/main/validators.py:140 +#, python-format +msgid "" +"At least %(min_certs)d certificates are required, only %(cert_count)d " +"provided." +msgstr "" + +#: awx/main/validators.py:143 +#, python-format +msgid "Only one certificate is allowed, %(cert_count)d provided." +msgstr "" + +#: awx/main/validators.py:145 +#, python-format +msgid "" +"No more than %(max_certs)d certificates are allowed, %(cert_count)d provided." +msgstr "" + +#: awx/main/views.py:20 +msgid "API Error" +msgstr "" + +#: awx/main/views.py:49 +msgid "Bad Request" +msgstr "" + +#: awx/main/views.py:50 +msgid "The request could not be understood by the server." +msgstr "" + +#: awx/main/views.py:57 +msgid "Forbidden" +msgstr "" + +#: awx/main/views.py:58 +msgid "You don't have permission to access the requested resource." +msgstr "" + +#: awx/main/views.py:65 +msgid "Not Found" +msgstr "" + +#: awx/main/views.py:66 +msgid "The requested resource could not be found." +msgstr "" + +#: awx/main/views.py:73 +msgid "Server Error" +msgstr "" + +#: awx/main/views.py:74 +msgid "A server error has occurred." +msgstr "" + +#: awx/settings/defaults.py:625 +msgid "Chicago" +msgstr "" + +#: awx/settings/defaults.py:626 +msgid "Dallas/Ft. Worth" +msgstr "" + +#: awx/settings/defaults.py:627 +msgid "Northern Virginia" +msgstr "" + +#: awx/settings/defaults.py:628 +msgid "London" +msgstr "" + +#: awx/settings/defaults.py:629 +msgid "Sydney" +msgstr "" + +#: awx/settings/defaults.py:630 +msgid "Hong Kong" +msgstr "" + +#: awx/settings/defaults.py:657 +msgid "US East (Northern Virginia)" +msgstr "" + +#: awx/settings/defaults.py:658 +msgid "US East (Ohio)" +msgstr "" + +#: awx/settings/defaults.py:659 +msgid "US West (Oregon)" +msgstr "" + +#: awx/settings/defaults.py:660 +msgid "US West (Northern California)" +msgstr "" + +#: awx/settings/defaults.py:661 +msgid "Canada (Central)" +msgstr "" + +#: awx/settings/defaults.py:662 +msgid "EU (Frankfurt)" +msgstr "" + +#: awx/settings/defaults.py:663 +msgid "EU (Ireland)" +msgstr "" + +#: awx/settings/defaults.py:664 +msgid "EU (London)" +msgstr "" + +#: awx/settings/defaults.py:665 +msgid "Asia Pacific (Singapore)" +msgstr "" + +#: awx/settings/defaults.py:666 +msgid "Asia Pacific (Sydney)" +msgstr "" + +#: awx/settings/defaults.py:667 +msgid "Asia Pacific (Tokyo)" +msgstr "" + +#: awx/settings/defaults.py:668 +msgid "Asia Pacific (Seoul)" +msgstr "" + +#: awx/settings/defaults.py:669 +msgid "Asia Pacific (Mumbai)" +msgstr "" + +#: awx/settings/defaults.py:670 +msgid "South America (Sao Paulo)" +msgstr "" + +#: awx/settings/defaults.py:671 +msgid "US West (GovCloud)" +msgstr "" + +#: awx/settings/defaults.py:672 +msgid "China (Beijing)" +msgstr "" + +#: awx/settings/defaults.py:721 +msgid "US East (B)" +msgstr "" + +#: awx/settings/defaults.py:722 +msgid "US East (C)" +msgstr "" + +#: awx/settings/defaults.py:723 +msgid "US East (D)" +msgstr "" + +#: awx/settings/defaults.py:724 +msgid "US Central (A)" +msgstr "" + +#: awx/settings/defaults.py:725 +msgid "US Central (B)" +msgstr "" + +#: awx/settings/defaults.py:726 +msgid "US Central (C)" +msgstr "" + +#: awx/settings/defaults.py:727 +msgid "US Central (F)" +msgstr "" + +#: awx/settings/defaults.py:728 +msgid "Europe West (B)" +msgstr "" + +#: awx/settings/defaults.py:729 +msgid "Europe West (C)" +msgstr "" + +#: awx/settings/defaults.py:730 +msgid "Europe West (D)" +msgstr "" + +#: awx/settings/defaults.py:731 +msgid "Asia East (A)" +msgstr "" + +#: awx/settings/defaults.py:732 +msgid "Asia East (B)" +msgstr "" + +#: awx/settings/defaults.py:733 +msgid "Asia East (C)" +msgstr "" + +#: awx/settings/defaults.py:757 +msgid "US Central" +msgstr "" + +#: awx/settings/defaults.py:758 +msgid "US East" +msgstr "" + +#: awx/settings/defaults.py:759 +msgid "US East 2" +msgstr "" + +#: awx/settings/defaults.py:760 +msgid "US North Central" +msgstr "" + +#: awx/settings/defaults.py:761 +msgid "US South Central" +msgstr "" + +#: awx/settings/defaults.py:762 +msgid "US West" +msgstr "" + +#: awx/settings/defaults.py:763 +msgid "Europe North" +msgstr "" + +#: awx/settings/defaults.py:764 +msgid "Europe West" +msgstr "" + +#: awx/settings/defaults.py:765 +msgid "Asia Pacific East" +msgstr "" + +#: awx/settings/defaults.py:766 +msgid "Asia Pacific Southeast" +msgstr "" + +#: awx/settings/defaults.py:767 +msgid "Japan East" +msgstr "" + +#: awx/settings/defaults.py:768 +msgid "Japan West" +msgstr "" + +#: awx/settings/defaults.py:769 +msgid "Brazil South" +msgstr "" + +#: awx/sso/apps.py:9 +msgid "Single Sign-On" +msgstr "" + +#: awx/sso/conf.py:27 +msgid "" +"Mapping to organization admins/users from social auth accounts. This " +"setting\n" +"controls which users are placed into which Tower organizations based on\n" +"their username and email address. Dictionary keys are organization names.\n" +"organizations will be created if not present if the license allows for\n" +"multiple organizations, otherwise the single default organization is used\n" +"regardless of the key. Values are dictionaries defining the options for\n" +"each organization's membership. For each organization it is possible to\n" +"specify which users are automatically users of the organization and also\n" +"which users can administer the organization. \n" +"\n" +"- admins: None, True/False, string or list of strings.\n" +" If None, organization admins will not be updated.\n" +" If True, all users using social auth will automatically be added as " +"admins\n" +" of the organization.\n" +" If False, no social auth users will be automatically added as admins of\n" +" the organization.\n" +" If a string or list of strings, specifies the usernames and emails for\n" +" users who will be added to the organization. Strings in the format\n" +" \"//\" will be interpreted as JavaScript regular " +"expressions and\n" +" may also be used instead of string literals; only \"i\" and \"m\" are " +"supported\n" +" for flags.\n" +"- remove_admins: True/False. Defaults to True.\n" +" If True, a user who does not match will be removed from the " +"organization's\n" +" administrative list.\n" +"- users: None, True/False, string or list of strings. Same rules apply as " +"for\n" +" admins.\n" +"- remove_users: True/False. Defaults to True. Same rules as apply for \n" +" remove_admins." +msgstr "" + +#: awx/sso/conf.py:76 +msgid "" +"Mapping of team members (users) from social auth accounts. Keys are team\n" +"names (will be created if not present). Values are dictionaries of options\n" +"for each team's membership, where each can contain the following " +"parameters:\n" +"\n" +"- organization: string. The name of the organization to which the team\n" +" belongs. The team will be created if the combination of organization and\n" +" team name does not exist. The organization will first be created if it\n" +" does not exist. If the license does not allow for multiple " +"organizations,\n" +" the team will always be assigned to the single default organization.\n" +"- users: None, True/False, string or list of strings.\n" +" If None, team members will not be updated.\n" +" If True/False, all social auth users will be added/removed as team\n" +" members.\n" +" If a string or list of strings, specifies expressions used to match " +"users.\n" +" User will be added as a team member if the username or email matches.\n" +" Strings in the format \"//\" will be interpreted as " +"JavaScript\n" +" regular expressions and may also be used instead of string literals; only " +"\"i\"\n" +" and \"m\" are supported for flags.\n" +"- remove: True/False. Defaults to True. If True, a user who does not match\n" +" the rules above will be removed from the team." +msgstr "" + +#: awx/sso/conf.py:119 +msgid "Authentication Backends" +msgstr "" + +#: awx/sso/conf.py:120 +msgid "" +"List of authentication backends that are enabled based on license features " +"and other authentication settings." +msgstr "" + +#: awx/sso/conf.py:133 +msgid "Social Auth Organization Map" +msgstr "" + +#: awx/sso/conf.py:145 +msgid "Social Auth Team Map" +msgstr "" + +#: awx/sso/conf.py:157 +msgid "Social Auth User Fields" +msgstr "" + +#: awx/sso/conf.py:158 +msgid "" +"When set to an empty list `[]`, this setting prevents new user accounts from " +"being created. Only users who have previously logged in using social auth or " +"have a user account with a matching email address will be able to login." +msgstr "" + +#: awx/sso/conf.py:176 +msgid "LDAP Server URI" +msgstr "" + +#: awx/sso/conf.py:177 +msgid "" +"URI to connect to LDAP server, such as \"ldap://ldap.example.com:389\" (non-" +"SSL) or \"ldaps://ldap.example.com:636\" (SSL). Multiple LDAP servers may be " +"specified by separating with spaces or commas. LDAP authentication is " +"disabled if this parameter is empty." +msgstr "" + +#: awx/sso/conf.py:181 awx/sso/conf.py:199 awx/sso/conf.py:211 +#: awx/sso/conf.py:223 awx/sso/conf.py:239 awx/sso/conf.py:259 +#: awx/sso/conf.py:281 awx/sso/conf.py:297 awx/sso/conf.py:316 +#: awx/sso/conf.py:333 awx/sso/conf.py:350 awx/sso/conf.py:366 +#: awx/sso/conf.py:383 awx/sso/conf.py:421 awx/sso/conf.py:462 +msgid "LDAP" +msgstr "" + +#: awx/sso/conf.py:193 +msgid "LDAP Bind DN" +msgstr "" + +#: awx/sso/conf.py:194 +msgid "" +"DN (Distinguished Name) of user to bind for all search queries. Normally in " +"the format \"CN=Some User,OU=Users,DC=example,DC=com\" but may also be " +"specified as \"DOMAIN\\username\" for Active Directory. This is the system " +"user account we will use to login to query LDAP for other user information." +msgstr "" + +#: awx/sso/conf.py:209 +msgid "LDAP Bind Password" +msgstr "" + +#: awx/sso/conf.py:210 +msgid "Password used to bind LDAP user account." +msgstr "" + +#: awx/sso/conf.py:221 +msgid "LDAP Start TLS" +msgstr "" + +#: awx/sso/conf.py:222 +msgid "Whether to enable TLS when the LDAP connection is not using SSL." +msgstr "" + +#: awx/sso/conf.py:232 +msgid "LDAP Connection Options" +msgstr "" + +#: awx/sso/conf.py:233 +msgid "" +"Additional options to set for the LDAP connection. LDAP referrals are " +"disabled by default (to prevent certain LDAP queries from hanging with AD). " +"Option names should be strings (e.g. \"OPT_REFERRALS\"). Refer to https://" +"www.python-ldap.org/doc/html/ldap.html#options for possible options and " +"values that can be set." +msgstr "" + +#: awx/sso/conf.py:252 +msgid "LDAP User Search" +msgstr "" + +#: awx/sso/conf.py:253 +msgid "" +"LDAP search query to find users. Any user that matches the given pattern " +"will be able to login to Tower. The user should also be mapped into an " +"Tower organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). " +"If multiple search queries need to be supported use of \"LDAPUnion\" is " +"possible. See python-ldap documentation as linked at the top of this section." +msgstr "" + +#: awx/sso/conf.py:275 +msgid "LDAP User DN Template" +msgstr "" + +#: awx/sso/conf.py:276 +msgid "" +"Alternative to user search, if user DNs are all of the same format. This " +"approach will be more efficient for user lookups than searching if it is " +"usable in your organizational environment. If this setting has a value it " +"will be used instead of AUTH_LDAP_USER_SEARCH." +msgstr "" + +#: awx/sso/conf.py:291 +msgid "LDAP User Attribute Map" +msgstr "" + +#: awx/sso/conf.py:292 +msgid "" +"Mapping of LDAP user schema to Tower API user attributes (key is user " +"attribute name, value is LDAP attribute name). The default setting is valid " +"for ActiveDirectory but users with other LDAP configurations may need to " +"change the values (not the keys) of the dictionary/hash-table." +msgstr "" + +#: awx/sso/conf.py:311 +msgid "LDAP Group Search" +msgstr "" + +#: awx/sso/conf.py:312 +msgid "" +"Users in Tower are mapped to organizations based on their membership in LDAP " +"groups. This setting defines the LDAP search query to find groups. Note that " +"this, unlike the user search above, does not support LDAPSearchUnion." +msgstr "" + +#: awx/sso/conf.py:329 +msgid "LDAP Group Type" +msgstr "" + +#: awx/sso/conf.py:330 +msgid "" +"The group type may need to be changed based on the type of the LDAP server. " +"Values are listed at: http://pythonhosted.org/django-auth-ldap/groups." +"html#types-of-groups" +msgstr "" + +#: awx/sso/conf.py:345 +msgid "LDAP Require Group" +msgstr "" + +#: awx/sso/conf.py:346 +msgid "" +"Group DN required to login. If specified, user must be a member of this " +"group to login via LDAP. If not set, everyone in LDAP that matches the user " +"search will be able to login via Tower. Only one require group is supported." +msgstr "" + +#: awx/sso/conf.py:362 +msgid "LDAP Deny Group" +msgstr "" + +#: awx/sso/conf.py:363 +msgid "" +"Group DN denied from login. If specified, user will not be allowed to login " +"if a member of this group. Only one deny group is supported." +msgstr "" + +#: awx/sso/conf.py:376 +msgid "LDAP User Flags By Group" +msgstr "" + +#: awx/sso/conf.py:377 +msgid "" +"User profile flags updated from group membership (key is user attribute " +"name, value is group DN). These are boolean fields that are matched based " +"on whether the user is a member of the given group. So far only " +"is_superuser is settable via this method. This flag is set both true and " +"false at login time based on current LDAP settings." +msgstr "" + +#: awx/sso/conf.py:395 +msgid "LDAP Organization Map" +msgstr "" + +#: awx/sso/conf.py:396 +msgid "" +"Mapping between organization admins/users and LDAP groups. This controls " +"what users are placed into what Tower organizations relative to their LDAP " +"group memberships. Keys are organization names. Organizations will be " +"created if not present. Values are dictionaries defining the options for " +"each organization's membership. For each organization it is possible to " +"specify what groups are automatically users of the organization and also " +"what groups can administer the organization.\n" +"\n" +" - admins: None, True/False, string or list of strings.\n" +" If None, organization admins will not be updated based on LDAP values.\n" +" If True, all users in LDAP will automatically be added as admins of the " +"organization.\n" +" If False, no LDAP users will be automatically added as admins of the " +"organization.\n" +" If a string or list of strings, specifies the group DN(s) that will be " +"added of the organization if they match any of the specified groups.\n" +" - remove_admins: True/False. Defaults to True.\n" +" If True, a user who is not an member of the given groups will be removed " +"from the organization's administrative list.\n" +" - users: None, True/False, string or list of strings. Same rules apply as " +"for admins.\n" +" - remove_users: True/False. Defaults to True. Same rules apply as for " +"remove_admins." +msgstr "" + +#: awx/sso/conf.py:444 +msgid "LDAP Team Map" +msgstr "" + +#: awx/sso/conf.py:445 +msgid "" +"Mapping between team members (users) and LDAP groups. Keys are team names " +"(will be created if not present). Values are dictionaries of options for " +"each team's membership, where each can contain the following parameters:\n" +"\n" +" - organization: string. The name of the organization to which the team " +"belongs. The team will be created if the combination of organization and " +"team name does not exist. The organization will first be created if it does " +"not exist.\n" +" - users: None, True/False, string or list of strings.\n" +" If None, team members will not be updated.\n" +" If True/False, all LDAP users will be added/removed as team members.\n" +" If a string or list of strings, specifies the group DN(s). User will be " +"added as a team member if the user is a member of ANY of these groups.\n" +"- remove: True/False. Defaults to True. If True, a user who is not a member " +"of the given groups will be removed from the team." +msgstr "" + +#: awx/sso/conf.py:488 +msgid "RADIUS Server" +msgstr "" + +#: awx/sso/conf.py:489 +msgid "" +"Hostname/IP of RADIUS server. RADIUS authentication will be disabled if this " +"setting is empty." +msgstr "" + +#: awx/sso/conf.py:491 awx/sso/conf.py:505 awx/sso/conf.py:517 +msgid "RADIUS" +msgstr "" + +#: awx/sso/conf.py:503 +msgid "RADIUS Port" +msgstr "" + +#: awx/sso/conf.py:504 +msgid "Port of RADIUS server." +msgstr "" + +#: awx/sso/conf.py:515 +msgid "RADIUS Secret" +msgstr "" + +#: awx/sso/conf.py:516 +msgid "Shared secret for authenticating to RADIUS server." +msgstr "" + +#: awx/sso/conf.py:532 +msgid "Google OAuth2 Callback URL" +msgstr "" + +#: awx/sso/conf.py:533 +msgid "" +"Create a project at https://console.developers.google.com/ to obtain an " +"OAuth2 key and secret for a web application. Ensure that the Google+ API is " +"enabled. Provide this URL as the callback URL for your application." +msgstr "" + +#: awx/sso/conf.py:537 awx/sso/conf.py:548 awx/sso/conf.py:559 +#: awx/sso/conf.py:572 awx/sso/conf.py:586 awx/sso/conf.py:598 +#: awx/sso/conf.py:610 +msgid "Google OAuth2" +msgstr "" + +#: awx/sso/conf.py:546 +msgid "Google OAuth2 Key" +msgstr "" + +#: awx/sso/conf.py:547 +msgid "" +"The OAuth2 key from your web application at https://console.developers." +"google.com/." +msgstr "" + +#: awx/sso/conf.py:557 +msgid "Google OAuth2 Secret" +msgstr "" + +#: awx/sso/conf.py:558 +msgid "" +"The OAuth2 secret from your web application at https://console.developers." +"google.com/." +msgstr "" + +#: awx/sso/conf.py:569 +msgid "Google OAuth2 Whitelisted Domains" +msgstr "" + +#: awx/sso/conf.py:570 +msgid "" +"Update this setting to restrict the domains who are allowed to login using " +"Google OAuth2." +msgstr "" + +#: awx/sso/conf.py:581 +msgid "Google OAuth2 Extra Arguments" +msgstr "" + +#: awx/sso/conf.py:582 +msgid "" +"Extra arguments for Google OAuth2 login. When only allowing a single domain " +"to authenticate, set to `{\"hd\": \"yourdomain.com\"}` and Google will not " +"display any other accounts even if the user is logged in with multiple " +"Google accounts." +msgstr "" + +#: awx/sso/conf.py:596 +msgid "Google OAuth2 Organization Map" +msgstr "" + +#: awx/sso/conf.py:608 +msgid "Google OAuth2 Team Map" +msgstr "" + +#: awx/sso/conf.py:624 +msgid "GitHub OAuth2 Callback URL" +msgstr "" + +#: awx/sso/conf.py:625 +msgid "" +"Create a developer application at https://github.com/settings/developers to " +"obtain an OAuth2 key (Client ID) and secret (Client Secret). Provide this " +"URL as the callback URL for your application." +msgstr "" + +#: awx/sso/conf.py:629 awx/sso/conf.py:640 awx/sso/conf.py:650 +#: awx/sso/conf.py:662 awx/sso/conf.py:674 +msgid "GitHub OAuth2" +msgstr "" + +#: awx/sso/conf.py:638 +msgid "GitHub OAuth2 Key" +msgstr "" + +#: awx/sso/conf.py:639 +msgid "The OAuth2 key (Client ID) from your GitHub developer application." +msgstr "" + +#: awx/sso/conf.py:648 +msgid "GitHub OAuth2 Secret" +msgstr "" + +#: awx/sso/conf.py:649 +msgid "" +"The OAuth2 secret (Client Secret) from your GitHub developer application." +msgstr "" + +#: awx/sso/conf.py:660 +msgid "GitHub OAuth2 Organization Map" +msgstr "" + +#: awx/sso/conf.py:672 +msgid "GitHub OAuth2 Team Map" +msgstr "" + +#: awx/sso/conf.py:688 +msgid "GitHub Organization OAuth2 Callback URL" +msgstr "" + +#: awx/sso/conf.py:689 awx/sso/conf.py:764 +msgid "" +"Create an organization-owned application at https://github.com/organizations/" +"/settings/applications and obtain an OAuth2 key (Client ID) and " +"secret (Client Secret). Provide this URL as the callback URL for your " +"application." +msgstr "" + +#: awx/sso/conf.py:693 awx/sso/conf.py:704 awx/sso/conf.py:714 +#: awx/sso/conf.py:726 awx/sso/conf.py:737 awx/sso/conf.py:749 +msgid "GitHub Organization OAuth2" +msgstr "" + +#: awx/sso/conf.py:702 +msgid "GitHub Organization OAuth2 Key" +msgstr "" + +#: awx/sso/conf.py:703 awx/sso/conf.py:778 +msgid "The OAuth2 key (Client ID) from your GitHub organization application." +msgstr "" + +#: awx/sso/conf.py:712 +msgid "GitHub Organization OAuth2 Secret" +msgstr "" + +#: awx/sso/conf.py:713 awx/sso/conf.py:788 +msgid "" +"The OAuth2 secret (Client Secret) from your GitHub organization application." +msgstr "" + +#: awx/sso/conf.py:723 +msgid "GitHub Organization Name" +msgstr "" + +#: awx/sso/conf.py:724 +msgid "" +"The name of your GitHub organization, as used in your organization's URL: " +"https://github.com//." +msgstr "" + +#: awx/sso/conf.py:735 +msgid "GitHub Organization OAuth2 Organization Map" +msgstr "" + +#: awx/sso/conf.py:747 +msgid "GitHub Organization OAuth2 Team Map" +msgstr "" + +#: awx/sso/conf.py:763 +msgid "GitHub Team OAuth2 Callback URL" +msgstr "" + +#: awx/sso/conf.py:768 awx/sso/conf.py:779 awx/sso/conf.py:789 +#: awx/sso/conf.py:801 awx/sso/conf.py:812 awx/sso/conf.py:824 +msgid "GitHub Team OAuth2" +msgstr "" + +#: awx/sso/conf.py:777 +msgid "GitHub Team OAuth2 Key" +msgstr "" + +#: awx/sso/conf.py:787 +msgid "GitHub Team OAuth2 Secret" +msgstr "" + +#: awx/sso/conf.py:798 +msgid "GitHub Team ID" +msgstr "" + +#: awx/sso/conf.py:799 +msgid "" +"Find the numeric team ID using the Github API: http://fabian-kostadinov." +"github.io/2015/01/16/how-to-find-a-github-team-id/." +msgstr "" + +#: awx/sso/conf.py:810 +msgid "GitHub Team OAuth2 Organization Map" +msgstr "" + +#: awx/sso/conf.py:822 +msgid "GitHub Team OAuth2 Team Map" +msgstr "" + +#: awx/sso/conf.py:838 +msgid "Azure AD OAuth2 Callback URL" +msgstr "" + +#: awx/sso/conf.py:839 +msgid "" +"Register an Azure AD application as described by https://msdn.microsoft.com/" +"en-us/library/azure/dn132599.aspx and obtain an OAuth2 key (Client ID) and " +"secret (Client Secret). Provide this URL as the callback URL for your " +"application." +msgstr "" + +#: awx/sso/conf.py:843 awx/sso/conf.py:854 awx/sso/conf.py:864 +#: awx/sso/conf.py:876 awx/sso/conf.py:888 +msgid "Azure AD OAuth2" +msgstr "" + +#: awx/sso/conf.py:852 +msgid "Azure AD OAuth2 Key" +msgstr "" + +#: awx/sso/conf.py:853 +msgid "The OAuth2 key (Client ID) from your Azure AD application." +msgstr "" + +#: awx/sso/conf.py:862 +msgid "Azure AD OAuth2 Secret" +msgstr "" + +#: awx/sso/conf.py:863 +msgid "The OAuth2 secret (Client Secret) from your Azure AD application." +msgstr "" + +#: awx/sso/conf.py:874 +msgid "Azure AD OAuth2 Organization Map" +msgstr "" + +#: awx/sso/conf.py:886 +msgid "Azure AD OAuth2 Team Map" +msgstr "" + +#: awx/sso/conf.py:907 +msgid "SAML Service Provider Callback URL" +msgstr "" + +#: awx/sso/conf.py:908 +msgid "" +"Register Tower as a service provider (SP) with each identity provider (IdP) " +"you have configured. Provide your SP Entity ID and this callback URL for " +"your application." +msgstr "" + +#: awx/sso/conf.py:911 awx/sso/conf.py:925 awx/sso/conf.py:938 +#: awx/sso/conf.py:952 awx/sso/conf.py:966 awx/sso/conf.py:984 +#: awx/sso/conf.py:1006 awx/sso/conf.py:1025 awx/sso/conf.py:1045 +#: awx/sso/conf.py:1079 awx/sso/conf.py:1092 +msgid "SAML" +msgstr "" + +#: awx/sso/conf.py:922 +msgid "SAML Service Provider Metadata URL" +msgstr "" + +#: awx/sso/conf.py:923 +msgid "" +"If your identity provider (IdP) allows uploading an XML metadata file, you " +"can download one from this URL." +msgstr "" + +#: awx/sso/conf.py:935 +msgid "SAML Service Provider Entity ID" +msgstr "" + +#: awx/sso/conf.py:936 +msgid "" +"The application-defined unique identifier used as the audience of the SAML " +"service provider (SP) configuration." +msgstr "" + +#: awx/sso/conf.py:949 +msgid "SAML Service Provider Public Certificate" +msgstr "" + +#: awx/sso/conf.py:950 +msgid "" +"Create a keypair for Tower to use as a service provider (SP) and include the " +"certificate content here." +msgstr "" + +#: awx/sso/conf.py:963 +msgid "SAML Service Provider Private Key" +msgstr "" + +#: awx/sso/conf.py:964 +msgid "" +"Create a keypair for Tower to use as a service provider (SP) and include the " +"private key content here." +msgstr "" + +#: awx/sso/conf.py:982 +msgid "SAML Service Provider Organization Info" +msgstr "" + +#: awx/sso/conf.py:983 +msgid "Configure this setting with information about your app." +msgstr "" + +#: awx/sso/conf.py:1004 +msgid "SAML Service Provider Technical Contact" +msgstr "" + +#: awx/sso/conf.py:1005 awx/sso/conf.py:1024 +msgid "Configure this setting with your contact information." +msgstr "" + +#: awx/sso/conf.py:1023 +msgid "SAML Service Provider Support Contact" +msgstr "" + +#: awx/sso/conf.py:1038 +msgid "SAML Enabled Identity Providers" +msgstr "" + +#: awx/sso/conf.py:1039 +msgid "" +"Configure the Entity ID, SSO URL and certificate for each identity provider " +"(IdP) in use. Multiple SAML IdPs are supported. Some IdPs may provide user " +"data using attribute names that differ from the default OIDs (https://github." +"com/omab/python-social-auth/blob/master/social/backends/saml.py#L16). " +"Attribute names may be overridden for each IdP." +msgstr "" + +#: awx/sso/conf.py:1077 +msgid "SAML Organization Map" +msgstr "" + +#: awx/sso/conf.py:1090 +msgid "SAML Team Map" +msgstr "" + +#: awx/sso/fields.py:123 +msgid "Invalid connection option(s): {invalid_options}." +msgstr "" + +#: awx/sso/fields.py:194 +msgid "Base" +msgstr "" + +#: awx/sso/fields.py:195 +msgid "One Level" +msgstr "" + +#: awx/sso/fields.py:196 +msgid "Subtree" +msgstr "" + +#: awx/sso/fields.py:214 +msgid "Expected a list of three items but got {length} instead." +msgstr "" + +#: awx/sso/fields.py:215 +msgid "Expected an instance of LDAPSearch but got {input_type} instead." +msgstr "" + +#: awx/sso/fields.py:251 +msgid "" +"Expected an instance of LDAPSearch or LDAPSearchUnion but got {input_type} " +"instead." +msgstr "" + +#: awx/sso/fields.py:278 +msgid "Invalid user attribute(s): {invalid_attrs}." +msgstr "" + +#: awx/sso/fields.py:295 +msgid "Expected an instance of LDAPGroupType but got {input_type} instead." +msgstr "" + +#: awx/sso/fields.py:323 +msgid "Invalid user flag: \"{invalid_flag}\"." +msgstr "" + +#: awx/sso/fields.py:339 awx/sso/fields.py:506 +msgid "" +"Expected None, True, False, a string or list of strings but got {input_type} " +"instead." +msgstr "" + +#: awx/sso/fields.py:375 +msgid "Missing key(s): {missing_keys}." +msgstr "" + +#: awx/sso/fields.py:376 +msgid "Invalid key(s): {invalid_keys}." +msgstr "" + +#: awx/sso/fields.py:425 awx/sso/fields.py:542 +msgid "Invalid key(s) for organization map: {invalid_keys}." +msgstr "" + +#: awx/sso/fields.py:443 +msgid "Missing required key for team map: {invalid_keys}." +msgstr "" + +#: awx/sso/fields.py:444 awx/sso/fields.py:561 +msgid "Invalid key(s) for team map: {invalid_keys}." +msgstr "" + +#: awx/sso/fields.py:560 +msgid "Missing required key for team map: {missing_keys}." +msgstr "" + +#: awx/sso/fields.py:578 +msgid "Missing required key(s) for org info record: {missing_keys}." +msgstr "" + +#: awx/sso/fields.py:591 +msgid "Invalid language code(s) for org info: {invalid_lang_codes}." +msgstr "" + +#: awx/sso/fields.py:610 +msgid "Missing required key(s) for contact: {missing_keys}." +msgstr "" + +#: awx/sso/fields.py:622 +msgid "Missing required key(s) for IdP: {missing_keys}." +msgstr "" + +#: awx/sso/pipeline.py:24 +msgid "An account cannot be found for {0}" +msgstr "" + +#: awx/sso/pipeline.py:30 +msgid "Your account is inactive" +msgstr "" + +#: awx/sso/validators.py:19 awx/sso/validators.py:44 +#, python-format +msgid "DN must include \"%%(user)s\" placeholder for username: %s" +msgstr "" + +#: awx/sso/validators.py:26 +#, python-format +msgid "Invalid DN: %s" +msgstr "" + +#: awx/sso/validators.py:56 +#, python-format +msgid "Invalid filter: %s" +msgstr "" + +#: awx/templates/error.html:4 awx/ui/templates/ui/index.html:8 +msgid "Ansible Tower" +msgstr "" + +#: awx/templates/rest_framework/api.html:39 +msgid "Ansible Tower API Guide" +msgstr "" + +#: awx/templates/rest_framework/api.html:40 +msgid "Back to Ansible Tower" +msgstr "" + +#: awx/templates/rest_framework/api.html:41 +msgid "Resize" +msgstr "" + +#: awx/templates/rest_framework/base.html:78 +#: awx/templates/rest_framework/base.html:92 +#, python-format +msgid "Make a GET request on the %(name)s resource" +msgstr "" + +#: awx/templates/rest_framework/base.html:80 +msgid "Specify a format for the GET request" +msgstr "" + +#: awx/templates/rest_framework/base.html:86 +#, python-format +msgid "" +"Make a GET request on the %(name)s resource with the format set to `" +"%(format)s`" +msgstr "" + +#: awx/templates/rest_framework/base.html:100 +#, python-format +msgid "Make an OPTIONS request on the %(name)s resource" +msgstr "" + +#: awx/templates/rest_framework/base.html:106 +#, python-format +msgid "Make a DELETE request on the %(name)s resource" +msgstr "" + +#: awx/templates/rest_framework/base.html:113 +msgid "Filters" +msgstr "" + +#: awx/templates/rest_framework/base.html:172 +#: awx/templates/rest_framework/base.html:186 +#, python-format +msgid "Make a POST request on the %(name)s resource" +msgstr "" + +#: awx/templates/rest_framework/base.html:216 +#: awx/templates/rest_framework/base.html:230 +#, python-format +msgid "Make a PUT request on the %(name)s resource" +msgstr "" + +#: awx/templates/rest_framework/base.html:233 +#, python-format +msgid "Make a PATCH request on the %(name)s resource" +msgstr "" + +#: awx/ui/apps.py:9 awx/ui/conf.py:22 awx/ui/conf.py:38 awx/ui/conf.py:53 +msgid "UI" +msgstr "" + +#: awx/ui/conf.py:16 +msgid "Off" +msgstr "" + +#: awx/ui/conf.py:17 +msgid "Anonymous" +msgstr "" + +#: awx/ui/conf.py:18 +msgid "Detailed" +msgstr "" + +#: awx/ui/conf.py:20 +msgid "Analytics Tracking State" +msgstr "" + +#: awx/ui/conf.py:21 +msgid "Enable or Disable Analytics Tracking." +msgstr "" + +#: awx/ui/conf.py:31 +msgid "Custom Login Info" +msgstr "" + +#: awx/ui/conf.py:32 +msgid "" +"If needed, you can add specific information (such as a legal notice or a " +"disclaimer) to a text box in the login modal using this setting. Any content " +"added must be in plain text, as custom HTML or other markup languages are " +"not supported. If multiple paragraphs of text are needed, new lines " +"(paragraphs) must be escaped as `\\n` within the block of text." +msgstr "" + +#: awx/ui/conf.py:48 +msgid "Custom Logo" +msgstr "" + +#: awx/ui/conf.py:49 +msgid "" +"To set up a custom logo, provide a file that you create. For the custom logo " +"to look its best, use a .png file with a transparent background. GIF, PNG " +"and JPEG formats are supported." +msgstr "" + +#: awx/ui/fields.py:29 +msgid "" +"Invalid format for custom logo. Must be a data URL with a base64-encoded " +"GIF, PNG or JPEG image." +msgstr "" + +#: awx/ui/fields.py:30 +msgid "Invalid base64-encoded data in data URL." +msgstr "" + +#: awx/ui/templates/ui/index.html:49 +msgid "" +"Your session will expire in 60 seconds, would you like to continue?" +msgstr "" + +#: awx/ui/templates/ui/index.html:64 +msgid "CANCEL" +msgstr "" + +#: awx/ui/templates/ui/index.html:116 +msgid "Set how many days of data should be retained." +msgstr "" + +#: awx/ui/templates/ui/index.html:122 +msgid "" +"Please enter an integer that is not " +"negative that is lower than 9999." +msgstr "" + +#: awx/ui/templates/ui/index.html:127 +msgid "" +"For facts collected older than the time period specified, save one fact scan " +"(snapshot) per time window (frequency). For example, facts older than 30 " +"days are purged, while one weekly fact scan is kept.\n" +"
\n" +"
CAUTION: Setting both numerical variables to \"0\" " +"will delete all facts.\n" +"
\n" +"
" +msgstr "" + +#: awx/ui/templates/ui/index.html:136 +msgid "Select a time period after which to remove old facts" +msgstr "" + +#: awx/ui/templates/ui/index.html:150 +msgid "" +"Please enter an integer " +"that is not negative " +"that is lower than 9999." +msgstr "" + +#: awx/ui/templates/ui/index.html:155 +msgid "Select a frequency for snapshot retention" +msgstr "" + +#: awx/ui/templates/ui/index.html:169 +msgid "" +"Please enter an integer that is not negative that is " +"lower than 9999." +msgstr "" + +#: awx/ui/templates/ui/index.html:175 +msgid "working..." +msgstr "" diff --git a/awx/locale/en-us/LC_MESSAGES/django.po b/awx/locale/en-us/LC_MESSAGES/django.po new file mode 100644 index 0000000000..56771e6f92 --- /dev/null +++ b/awx/locale/en-us/LC_MESSAGES/django.po @@ -0,0 +1,3812 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2017-01-31 20:58+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: awx/api/authentication.py:67 +msgid "Invalid token header. No credentials provided." +msgstr "" + +#: awx/api/authentication.py:70 +msgid "Invalid token header. Token string should not contain spaces." +msgstr "" + +#: awx/api/authentication.py:105 +msgid "User inactive or deleted" +msgstr "" + +#: awx/api/authentication.py:161 +msgid "Invalid task token" +msgstr "" + +#: awx/api/conf.py:12 +msgid "Idle Time Force Log Out" +msgstr "" + +#: awx/api/conf.py:13 +msgid "" +"Number of seconds that a user is inactive before they will need to login " +"again." +msgstr "" + +#: awx/api/conf.py:14 awx/api/conf.py:24 awx/api/conf.py:33 +#: awx/sso/conf.py:124 awx/sso/conf.py:135 awx/sso/conf.py:147 +#: awx/sso/conf.py:162 +msgid "Authentication" +msgstr "" + +#: awx/api/conf.py:22 +msgid "Maximum number of simultaneous logins" +msgstr "" + +#: awx/api/conf.py:23 +msgid "" +"Maximum number of simultaneous logins a user may have. To disable enter -1." +msgstr "" + +#: awx/api/conf.py:31 +msgid "Enable HTTP Basic Auth" +msgstr "" + +#: awx/api/conf.py:32 +msgid "Enable HTTP Basic Auth for the API Browser." +msgstr "" + +#: awx/api/generics.py:466 +msgid "\"id\" is required to disassociate" +msgstr "" + +#: awx/api/metadata.py:50 +msgid "Database ID for this {}." +msgstr "" + +#: awx/api/metadata.py:51 +msgid "Name of this {}." +msgstr "" + +#: awx/api/metadata.py:52 +msgid "Optional description of this {}." +msgstr "" + +#: awx/api/metadata.py:53 +msgid "Data type for this {}." +msgstr "" + +#: awx/api/metadata.py:54 +msgid "URL for this {}." +msgstr "" + +#: awx/api/metadata.py:55 +msgid "Data structure with URLs of related resources." +msgstr "" + +#: awx/api/metadata.py:56 +msgid "Data structure with name/description for related resources." +msgstr "" + +#: awx/api/metadata.py:57 +msgid "Timestamp when this {} was created." +msgstr "" + +#: awx/api/metadata.py:58 +msgid "Timestamp when this {} was last modified." +msgstr "" + +#: awx/api/parsers.py:31 +#, python-format +msgid "JSON parse error - %s" +msgstr "" + +#: awx/api/serializers.py:250 +msgid "Playbook Run" +msgstr "" + +#: awx/api/serializers.py:251 +msgid "Command" +msgstr "" + +#: awx/api/serializers.py:252 +msgid "SCM Update" +msgstr "" + +#: awx/api/serializers.py:253 +msgid "Inventory Sync" +msgstr "" + +#: awx/api/serializers.py:254 +msgid "Management Job" +msgstr "" + +#: awx/api/serializers.py:255 +msgid "Workflow Job" +msgstr "" + +#: awx/api/serializers.py:256 +msgid "Workflow Template" +msgstr "" + +#: awx/api/serializers.py:658 awx/api/serializers.py:716 awx/api/views.py:3819 +#, python-format +msgid "" +"Standard Output too large to display (%(text_size)d bytes), only download " +"supported for sizes over %(supported_size)d bytes" +msgstr "" + +#: awx/api/serializers.py:731 +msgid "Write-only field used to change the password." +msgstr "" + +#: awx/api/serializers.py:733 +msgid "Set if the account is managed by an external service" +msgstr "" + +#: awx/api/serializers.py:757 +msgid "Password required for new User." +msgstr "" + +#: awx/api/serializers.py:841 +#, python-format +msgid "Unable to change %s on user managed by LDAP." +msgstr "" + +#: awx/api/serializers.py:1002 +msgid "Organization is missing" +msgstr "" + +#: awx/api/serializers.py:1006 +msgid "Update options must be set to false for manual projects." +msgstr "" + +#: awx/api/serializers.py:1012 +msgid "Array of playbooks available within this project." +msgstr "" + +#: awx/api/serializers.py:1194 +#, python-format +msgid "Invalid port specification: %s" +msgstr "" + +#: awx/api/serializers.py:1222 awx/main/validators.py:193 +msgid "Must be valid JSON or YAML." +msgstr "" + +#: awx/api/serializers.py:1279 +msgid "Invalid group name." +msgstr "" + +#: awx/api/serializers.py:1354 +msgid "" +"Script must begin with a hashbang sequence: i.e.... #!/usr/bin/env python" +msgstr "" + +#: awx/api/serializers.py:1407 +msgid "If 'source' is 'custom', 'source_script' must be provided." +msgstr "" + +#: awx/api/serializers.py:1411 +msgid "" +"The 'source_script' does not belong to the same organization as the " +"inventory." +msgstr "" + +#: awx/api/serializers.py:1413 +msgid "'source_script' doesn't exist." +msgstr "" + +#: awx/api/serializers.py:1772 +msgid "" +"Write-only field used to add user to owner role. If provided, do not give " +"either team or organization. Only valid for creation." +msgstr "" + +#: awx/api/serializers.py:1777 +msgid "" +"Write-only field used to add team to owner role. If provided, do not give " +"either user or organization. Only valid for creation." +msgstr "" + +#: awx/api/serializers.py:1782 +msgid "" +"Inherit permissions from organization roles. If provided on creation, do not " +"give either user or team." +msgstr "" + +#: awx/api/serializers.py:1798 +msgid "Missing 'user', 'team', or 'organization'." +msgstr "" + +#: awx/api/serializers.py:1811 +msgid "" +"Credential organization must be set and match before assigning to a team" +msgstr "" + +#: awx/api/serializers.py:1903 +msgid "This field is required." +msgstr "" + +#: awx/api/serializers.py:1905 awx/api/serializers.py:1907 +msgid "Playbook not found for project." +msgstr "" + +#: awx/api/serializers.py:1909 +msgid "Must select playbook for project." +msgstr "" + +#: awx/api/serializers.py:1975 +msgid "Must either set a default value or ask to prompt on launch." +msgstr "" + +#: awx/api/serializers.py:1978 awx/main/models/jobs.py:278 +msgid "Scan jobs must be assigned a fixed inventory." +msgstr "" + +#: awx/api/serializers.py:1980 awx/main/models/jobs.py:281 +msgid "Job types 'run' and 'check' must have assigned a project." +msgstr "" + +#: awx/api/serializers.py:1987 +msgid "Survey Enabled cannot be used with scan jobs." +msgstr "" + +#: awx/api/serializers.py:2047 +msgid "Invalid job template." +msgstr "" + +#: awx/api/serializers.py:2132 +msgid "Credential not found or deleted." +msgstr "" + +#: awx/api/serializers.py:2134 +msgid "Job Template Project is missing or undefined." +msgstr "" + +#: awx/api/serializers.py:2136 +msgid "Job Template Inventory is missing or undefined." +msgstr "" + +#: awx/api/serializers.py:2421 +#, python-format +msgid "%(job_type)s is not a valid job type. The choices are %(choices)s." +msgstr "" + +#: awx/api/serializers.py:2426 +msgid "Workflow job template is missing during creation." +msgstr "" + +#: awx/api/serializers.py:2431 +#, python-format +msgid "Cannot nest a %s inside a WorkflowJobTemplate" +msgstr "" + +#: awx/api/serializers.py:2669 +#, python-format +msgid "Job Template '%s' is missing or undefined." +msgstr "" + +#: awx/api/serializers.py:2695 +msgid "Must be a valid JSON or YAML dictionary." +msgstr "" + +#: awx/api/serializers.py:2837 +msgid "" +"Missing required fields for Notification Configuration: notification_type" +msgstr "" + +#: awx/api/serializers.py:2860 +msgid "No values specified for field '{}'" +msgstr "" + +#: awx/api/serializers.py:2865 +msgid "Missing required fields for Notification Configuration: {}." +msgstr "" + +#: awx/api/serializers.py:2868 +msgid "Configuration field '{}' incorrect type, expected {}." +msgstr "" + +#: awx/api/serializers.py:2921 +msgid "Inventory Source must be a cloud resource." +msgstr "" + +#: awx/api/serializers.py:2923 +msgid "Manual Project can not have a schedule set." +msgstr "" + +#: awx/api/serializers.py:2945 +msgid "DTSTART required in rrule. Value should match: DTSTART:YYYYMMDDTHHMMSSZ" +msgstr "" + +#: awx/api/serializers.py:2947 +msgid "Multiple DTSTART is not supported." +msgstr "" + +#: awx/api/serializers.py:2949 +msgid "RRULE require in rrule." +msgstr "" + +#: awx/api/serializers.py:2951 +msgid "Multiple RRULE is not supported." +msgstr "" + +#: awx/api/serializers.py:2953 +msgid "INTERVAL required in rrule." +msgstr "" + +#: awx/api/serializers.py:2955 +msgid "TZID is not supported." +msgstr "" + +#: awx/api/serializers.py:2957 +msgid "SECONDLY is not supported." +msgstr "" + +#: awx/api/serializers.py:2959 +msgid "Multiple BYMONTHDAYs not supported." +msgstr "" + +#: awx/api/serializers.py:2961 +msgid "Multiple BYMONTHs not supported." +msgstr "" + +#: awx/api/serializers.py:2963 +msgid "BYDAY with numeric prefix not supported." +msgstr "" + +#: awx/api/serializers.py:2965 +msgid "BYYEARDAY not supported." +msgstr "" + +#: awx/api/serializers.py:2967 +msgid "BYWEEKNO not supported." +msgstr "" + +#: awx/api/serializers.py:2971 +msgid "COUNT > 999 is unsupported." +msgstr "" + +#: awx/api/serializers.py:2975 +msgid "rrule parsing failed validation." +msgstr "" + +#: awx/api/serializers.py:2997 +msgid "" +"A summary of the new and changed values when an object is created, updated, " +"or deleted" +msgstr "" + +#: awx/api/serializers.py:2999 +msgid "" +"For create, update, and delete events this is the object type that was " +"affected. For associate and disassociate events this is the object type " +"associated or disassociated with object2." +msgstr "" + +#: awx/api/serializers.py:3002 +msgid "" +"Unpopulated for create, update, and delete events. For associate and " +"disassociate events this is the object type that object1 is being associated " +"with." +msgstr "" + +#: awx/api/serializers.py:3005 +msgid "The action taken with respect to the given object(s)." +msgstr "" + +#: awx/api/serializers.py:3112 +msgid "Unable to login with provided credentials." +msgstr "" + +#: awx/api/serializers.py:3114 +msgid "Must include \"username\" and \"password\"." +msgstr "" + +#: awx/api/views.py:101 +msgid "Your license does not allow use of the activity stream." +msgstr "" + +#: awx/api/views.py:111 +msgid "Your license does not permit use of system tracking." +msgstr "" + +#: awx/api/views.py:121 +msgid "Your license does not allow use of workflows." +msgstr "" + +#: awx/api/views.py:129 awx/templates/rest_framework/api.html:28 +msgid "REST API" +msgstr "" + +#: awx/api/views.py:136 awx/templates/rest_framework/api.html:4 +msgid "Ansible Tower REST API" +msgstr "" + +#: awx/api/views.py:152 +msgid "Version 1" +msgstr "" + +#: awx/api/views.py:203 +msgid "Ping" +msgstr "" + +#: awx/api/views.py:232 awx/conf/apps.py:12 +msgid "Configuration" +msgstr "" + +#: awx/api/views.py:285 +msgid "Invalid license data" +msgstr "" + +#: awx/api/views.py:287 +msgid "Missing 'eula_accepted' property" +msgstr "" + +#: awx/api/views.py:291 +msgid "'eula_accepted' value is invalid" +msgstr "" + +#: awx/api/views.py:294 +msgid "'eula_accepted' must be True" +msgstr "" + +#: awx/api/views.py:301 +msgid "Invalid JSON" +msgstr "" + +#: awx/api/views.py:309 +msgid "Invalid License" +msgstr "" + +#: awx/api/views.py:319 +msgid "Invalid license" +msgstr "" + +#: awx/api/views.py:327 +#, python-format +msgid "Failed to remove license (%s)" +msgstr "" + +#: awx/api/views.py:332 +msgid "Dashboard" +msgstr "" + +#: awx/api/views.py:438 +msgid "Dashboard Jobs Graphs" +msgstr "" + +#: awx/api/views.py:474 +#, python-format +msgid "Unknown period \"%s\"" +msgstr "" + +#: awx/api/views.py:488 +msgid "Schedules" +msgstr "" + +#: awx/api/views.py:507 +msgid "Schedule Jobs List" +msgstr "" + +#: awx/api/views.py:717 +msgid "Your Tower license only permits a single organization to exist." +msgstr "" + +#: awx/api/views.py:942 awx/api/views.py:1301 +msgid "Role 'id' field is missing." +msgstr "" + +#: awx/api/views.py:948 awx/api/views.py:4106 +msgid "You cannot assign an Organization role as a child role for a Team." +msgstr "" + +#: awx/api/views.py:952 awx/api/views.py:4120 +msgid "You cannot grant system-level permissions to a team." +msgstr "" + +#: awx/api/views.py:959 awx/api/views.py:4112 +msgid "" +"You cannot grant credential access to a team when the Organization field " +"isn't set, or belongs to a different organization" +msgstr "" + +#: awx/api/views.py:1049 +msgid "Cannot delete project." +msgstr "" + +#: awx/api/views.py:1078 +msgid "Project Schedules" +msgstr "" + +#: awx/api/views.py:1182 awx/api/views.py:2273 awx/api/views.py:3286 +msgid "Cannot delete job resource when associated workflow job is running." +msgstr "" + +#: awx/api/views.py:1259 +msgid "Me" +msgstr "" + +#: awx/api/views.py:1305 awx/api/views.py:4061 +msgid "You may not perform any action with your own admin_role." +msgstr "" + +#: awx/api/views.py:1311 awx/api/views.py:4065 +msgid "You may not change the membership of a users admin_role" +msgstr "" + +#: awx/api/views.py:1316 awx/api/views.py:4070 +msgid "" +"You cannot grant credential access to a user not in the credentials' " +"organization" +msgstr "" + +#: awx/api/views.py:1320 awx/api/views.py:4074 +msgid "You cannot grant private credential access to another user" +msgstr "" + +#: awx/api/views.py:1418 +#, python-format +msgid "Cannot change %s." +msgstr "" + +#: awx/api/views.py:1424 +msgid "Cannot delete user." +msgstr "" + +#: awx/api/views.py:1572 +msgid "Cannot delete inventory script." +msgstr "" + +#: awx/api/views.py:1808 +msgid "Fact not found." +msgstr "" + +#: awx/api/views.py:2128 +msgid "Inventory Source List" +msgstr "" + +#: awx/api/views.py:2156 +msgid "Cannot delete inventory source." +msgstr "" + +#: awx/api/views.py:2164 +msgid "Inventory Source Schedules" +msgstr "" + +#: awx/api/views.py:2194 +msgid "Notification Templates can only be assigned when source is one of {}." +msgstr "" + +#: awx/api/views.py:2405 +msgid "Job Template Schedules" +msgstr "" + +#: awx/api/views.py:2425 awx/api/views.py:2441 +msgid "Your license does not allow adding surveys." +msgstr "" + +#: awx/api/views.py:2448 +msgid "'name' missing from survey spec." +msgstr "" + +#: awx/api/views.py:2450 +msgid "'description' missing from survey spec." +msgstr "" + +#: awx/api/views.py:2452 +msgid "'spec' missing from survey spec." +msgstr "" + +#: awx/api/views.py:2454 +msgid "'spec' must be a list of items." +msgstr "" + +#: awx/api/views.py:2456 +msgid "'spec' doesn't contain any items." +msgstr "" + +#: awx/api/views.py:2462 +#, python-format +msgid "Survey question %s is not a json object." +msgstr "" + +#: awx/api/views.py:2464 +#, python-format +msgid "'type' missing from survey question %s." +msgstr "" + +#: awx/api/views.py:2466 +#, python-format +msgid "'question_name' missing from survey question %s." +msgstr "" + +#: awx/api/views.py:2468 +#, python-format +msgid "'variable' missing from survey question %s." +msgstr "" + +#: awx/api/views.py:2470 +#, python-format +msgid "'variable' '%(item)s' duplicated in survey question %(survey)s." +msgstr "" + +#: awx/api/views.py:2475 +#, python-format +msgid "'required' missing from survey question %s." +msgstr "" + +#: awx/api/views.py:2686 +msgid "No matching host could be found!" +msgstr "" + +#: awx/api/views.py:2689 +msgid "Multiple hosts matched the request!" +msgstr "" + +#: awx/api/views.py:2694 +msgid "Cannot start automatically, user input required!" +msgstr "" + +#: awx/api/views.py:2701 +msgid "Host callback job already pending." +msgstr "" + +#: awx/api/views.py:2714 +msgid "Error starting job!" +msgstr "" + +#: awx/api/views.py:3043 +msgid "Workflow Job Template Schedules" +msgstr "" + +#: awx/api/views.py:3185 awx/api/views.py:3728 +msgid "Superuser privileges needed." +msgstr "" + +#: awx/api/views.py:3217 +msgid "System Job Template Schedules" +msgstr "" + +#: awx/api/views.py:3409 +msgid "Job Host Summaries List" +msgstr "" + +#: awx/api/views.py:3451 +msgid "Job Event Children List" +msgstr "" + +#: awx/api/views.py:3460 +msgid "Job Event Hosts List" +msgstr "" + +#: awx/api/views.py:3469 +msgid "Job Events List" +msgstr "" + +#: awx/api/views.py:3682 +msgid "Ad Hoc Command Events List" +msgstr "" + +#: awx/api/views.py:3874 +msgid "Error generating stdout download file: {}" +msgstr "" + +#: awx/api/views.py:3887 +#, python-format +msgid "Error generating stdout download file: %s" +msgstr "" + +#: awx/api/views.py:3932 +msgid "Delete not allowed while there are pending notifications" +msgstr "" + +#: awx/api/views.py:3939 +msgid "Notification Template Test" +msgstr "" + +#: awx/api/views.py:4055 +msgid "User 'id' field is missing." +msgstr "" + +#: awx/api/views.py:4098 +msgid "Team 'id' field is missing." +msgstr "" + +#: awx/conf/conf.py:20 +msgid "Bud Frogs" +msgstr "" + +#: awx/conf/conf.py:21 +msgid "Bunny" +msgstr "" + +#: awx/conf/conf.py:22 +msgid "Cheese" +msgstr "" + +#: awx/conf/conf.py:23 +msgid "Daemon" +msgstr "" + +#: awx/conf/conf.py:24 +msgid "Default Cow" +msgstr "" + +#: awx/conf/conf.py:25 +msgid "Dragon" +msgstr "" + +#: awx/conf/conf.py:26 +msgid "Elephant in Snake" +msgstr "" + +#: awx/conf/conf.py:27 +msgid "Elephant" +msgstr "" + +#: awx/conf/conf.py:28 +msgid "Eyes" +msgstr "" + +#: awx/conf/conf.py:29 +msgid "Hello Kitty" +msgstr "" + +#: awx/conf/conf.py:30 +msgid "Kitty" +msgstr "" + +#: awx/conf/conf.py:31 +msgid "Luke Koala" +msgstr "" + +#: awx/conf/conf.py:32 +msgid "Meow" +msgstr "" + +#: awx/conf/conf.py:33 +msgid "Milk" +msgstr "" + +#: awx/conf/conf.py:34 +msgid "Moofasa" +msgstr "" + +#: awx/conf/conf.py:35 +msgid "Moose" +msgstr "" + +#: awx/conf/conf.py:36 +msgid "Ren" +msgstr "" + +#: awx/conf/conf.py:37 +msgid "Sheep" +msgstr "" + +#: awx/conf/conf.py:38 +msgid "Small Cow" +msgstr "" + +#: awx/conf/conf.py:39 +msgid "Stegosaurus" +msgstr "" + +#: awx/conf/conf.py:40 +msgid "Stimpy" +msgstr "" + +#: awx/conf/conf.py:41 +msgid "Super Milker" +msgstr "" + +#: awx/conf/conf.py:42 +msgid "Three Eyes" +msgstr "" + +#: awx/conf/conf.py:43 +msgid "Turkey" +msgstr "" + +#: awx/conf/conf.py:44 +msgid "Turtle" +msgstr "" + +#: awx/conf/conf.py:45 +msgid "Tux" +msgstr "" + +#: awx/conf/conf.py:46 +msgid "Udder" +msgstr "" + +#: awx/conf/conf.py:47 +msgid "Vader Koala" +msgstr "" + +#: awx/conf/conf.py:48 +msgid "Vader" +msgstr "" + +#: awx/conf/conf.py:49 +msgid "WWW" +msgstr "" + +#: awx/conf/conf.py:52 +msgid "Cow Selection" +msgstr "" + +#: awx/conf/conf.py:53 +msgid "Select which cow to use with cowsay when running jobs." +msgstr "" + +#: awx/conf/conf.py:54 awx/conf/conf.py:75 +msgid "Cows" +msgstr "" + +#: awx/conf/conf.py:73 +msgid "Example Read-Only Setting" +msgstr "" + +#: awx/conf/conf.py:74 +msgid "Example setting that cannot be changed." +msgstr "" + +#: awx/conf/conf.py:93 +msgid "Example Setting" +msgstr "" + +#: awx/conf/conf.py:94 +msgid "Example setting which can be different for each user." +msgstr "" + +#: awx/conf/conf.py:95 awx/conf/registry.py:76 awx/conf/views.py:46 +msgid "User" +msgstr "" + +#: awx/conf/fields.py:38 +msgid "Enter a valid URL" +msgstr "" + +#: awx/conf/license.py:19 +msgid "Your Tower license does not allow that." +msgstr "" + +#: awx/conf/management/commands/migrate_to_database_settings.py:41 +msgid "Only show which settings would be commented/migrated." +msgstr "" + +#: awx/conf/management/commands/migrate_to_database_settings.py:48 +msgid "Skip over settings that would raise an error when commenting/migrating." +msgstr "" + +#: awx/conf/management/commands/migrate_to_database_settings.py:55 +msgid "Skip commenting out settings in files." +msgstr "" + +#: awx/conf/management/commands/migrate_to_database_settings.py:61 +msgid "Backup existing settings files with this suffix." +msgstr "" + +#: awx/conf/registry.py:64 awx/conf/tests/unit/test_registry.py:169 +#: awx/conf/tests/unit/test_registry.py:192 +#: awx/conf/tests/unit/test_registry.py:196 +#: awx/conf/tests/unit/test_registry.py:201 +#: awx/conf/tests/unit/test_registry.py:208 +msgid "All" +msgstr "" + +#: awx/conf/registry.py:65 awx/conf/tests/unit/test_registry.py:170 +#: awx/conf/tests/unit/test_registry.py:193 +#: awx/conf/tests/unit/test_registry.py:197 +#: awx/conf/tests/unit/test_registry.py:202 +#: awx/conf/tests/unit/test_registry.py:209 +msgid "Changed" +msgstr "" + +#: awx/conf/registry.py:77 +msgid "User-Defaults" +msgstr "" + +#: awx/conf/tests/unit/test_registry.py:46 +#: awx/conf/tests/unit/test_registry.py:56 +#: awx/conf/tests/unit/test_registry.py:72 +#: awx/conf/tests/unit/test_registry.py:87 +#: awx/conf/tests/unit/test_registry.py:100 +#: awx/conf/tests/unit/test_registry.py:106 +#: awx/conf/tests/unit/test_registry.py:126 +#: awx/conf/tests/unit/test_registry.py:140 +#: awx/conf/tests/unit/test_registry.py:146 +#: awx/conf/tests/unit/test_registry.py:159 +#: awx/conf/tests/unit/test_registry.py:171 +#: awx/conf/tests/unit/test_registry.py:180 +#: awx/conf/tests/unit/test_registry.py:198 +#: awx/conf/tests/unit/test_registry.py:210 +#: awx/conf/tests/unit/test_registry.py:219 +#: awx/conf/tests/unit/test_registry.py:225 +#: awx/conf/tests/unit/test_registry.py:237 +#: awx/conf/tests/unit/test_registry.py:245 +#: awx/conf/tests/unit/test_registry.py:288 +#: awx/conf/tests/unit/test_registry.py:306 +#: awx/conf/tests/unit/test_settings.py:67 +#: awx/conf/tests/unit/test_settings.py:81 +#: awx/conf/tests/unit/test_settings.py:97 +#: awx/conf/tests/unit/test_settings.py:110 +#: awx/conf/tests/unit/test_settings.py:127 +#: awx/conf/tests/unit/test_settings.py:143 +#: awx/conf/tests/unit/test_settings.py:162 +#: awx/conf/tests/unit/test_settings.py:183 +#: awx/conf/tests/unit/test_settings.py:197 +#: awx/conf/tests/unit/test_settings.py:221 +#: awx/conf/tests/unit/test_settings.py:241 +#: awx/conf/tests/unit/test_settings.py:258 awx/main/conf.py:19 +#: awx/main/conf.py:29 awx/main/conf.py:39 awx/main/conf.py:48 +#: awx/main/conf.py:60 awx/main/conf.py:78 awx/main/conf.py:103 +msgid "System" +msgstr "" + +#: awx/conf/tests/unit/test_registry.py:165 +#: awx/conf/tests/unit/test_registry.py:172 +#: awx/conf/tests/unit/test_registry.py:187 +#: awx/conf/tests/unit/test_registry.py:203 +#: awx/conf/tests/unit/test_registry.py:211 +msgid "OtherSystem" +msgstr "" + +#: awx/conf/views.py:38 +msgid "Setting Categories" +msgstr "" + +#: awx/conf/views.py:61 +msgid "Setting Detail" +msgstr "" + +#: awx/main/access.py:255 +#, python-format +msgid "Bad data found in related field %s." +msgstr "" + +#: awx/main/access.py:296 +msgid "License is missing." +msgstr "" + +#: awx/main/access.py:298 +msgid "License has expired." +msgstr "" + +#: awx/main/access.py:306 +#, python-format +msgid "License count of %s instances has been reached." +msgstr "" + +#: awx/main/access.py:308 +#, python-format +msgid "License count of %s instances has been exceeded." +msgstr "" + +#: awx/main/access.py:310 +msgid "Host count exceeds available instances." +msgstr "" + +#: awx/main/access.py:314 +#, python-format +msgid "Feature %s is not enabled in the active license." +msgstr "" + +#: awx/main/access.py:316 +msgid "Features not found in active license." +msgstr "" + +#: awx/main/access.py:514 awx/main/access.py:581 awx/main/access.py:706 +#: awx/main/access.py:969 awx/main/access.py:1208 awx/main/access.py:1605 +msgid "Resource is being used by running jobs" +msgstr "" + +#: awx/main/access.py:625 +msgid "Unable to change inventory on a host." +msgstr "" + +#: awx/main/access.py:642 awx/main/access.py:687 +msgid "Cannot associate two items from different inventories." +msgstr "" + +#: awx/main/access.py:675 +msgid "Unable to change inventory on a group." +msgstr "" + +#: awx/main/access.py:889 +msgid "Unable to change organization on a team." +msgstr "" + +#: awx/main/access.py:902 +msgid "The {} role cannot be assigned to a team" +msgstr "" + +#: awx/main/access.py:904 +msgid "The admin_role for a User cannot be assigned to a team" +msgstr "" + +#: awx/main/access.py:1678 +msgid "" +"You do not have permission to the workflow job resources required for " +"relaunch." +msgstr "" + +#: awx/main/apps.py:9 +msgid "Main" +msgstr "" + +#: awx/main/conf.py:17 +msgid "Enable Activity Stream" +msgstr "" + +#: awx/main/conf.py:18 +msgid "Enable capturing activity for the Tower activity stream." +msgstr "" + +#: awx/main/conf.py:27 +msgid "Enable Activity Stream for Inventory Sync" +msgstr "" + +#: awx/main/conf.py:28 +msgid "" +"Enable capturing activity for the Tower activity stream when running " +"inventory sync." +msgstr "" + +#: awx/main/conf.py:37 +msgid "All Users Visible to Organization Admins" +msgstr "" + +#: awx/main/conf.py:38 +msgid "" +"Controls whether any Organization Admin can view all users, even those not " +"associated with their Organization." +msgstr "" + +#: awx/main/conf.py:46 +msgid "Enable Tower Administrator Alerts" +msgstr "" + +#: awx/main/conf.py:47 +msgid "" +"Allow Tower to email Admin users for system events that may require " +"attention." +msgstr "" + +#: awx/main/conf.py:57 +msgid "Base URL of the Tower host" +msgstr "" + +#: awx/main/conf.py:58 +msgid "" +"This setting is used by services like notifications to render a valid url to " +"the Tower host." +msgstr "" + +#: awx/main/conf.py:67 +msgid "Remote Host Headers" +msgstr "" + +#: awx/main/conf.py:68 +msgid "" +"HTTP headers and meta keys to search to determine remote host name or IP. " +"Add additional items to this list, such as \"HTTP_X_FORWARDED_FOR\", if " +"behind a reverse proxy.\n" +"\n" +"Note: The headers will be searched in order and the first found remote host " +"name or IP will be used.\n" +"\n" +"In the below example 8.8.8.7 would be the chosen IP address.\n" +"X-Forwarded-For: 8.8.8.7, 192.168.2.1, 127.0.0.1\n" +"Host: 127.0.0.1\n" +"REMOTE_HOST_HEADERS = ['HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR', 'REMOTE_HOST']" +msgstr "" + +#: awx/main/conf.py:99 +msgid "Tower License" +msgstr "" + +#: awx/main/conf.py:100 +msgid "" +"The license controls which features and functionality are enabled in Tower. " +"Use /api/v1/config/ to update or change the license." +msgstr "" + +#: awx/main/conf.py:110 +msgid "Ansible Modules Allowed for Ad Hoc Jobs" +msgstr "" + +#: awx/main/conf.py:111 +msgid "List of modules allowed to be used by ad-hoc jobs." +msgstr "" + +#: awx/main/conf.py:112 awx/main/conf.py:121 awx/main/conf.py:130 +#: awx/main/conf.py:140 awx/main/conf.py:150 awx/main/conf.py:160 +#: awx/main/conf.py:170 awx/main/conf.py:180 awx/main/conf.py:190 +#: awx/main/conf.py:202 awx/main/conf.py:214 awx/main/conf.py:226 +msgid "Jobs" +msgstr "" + +#: awx/main/conf.py:119 +msgid "Enable job isolation" +msgstr "" + +#: awx/main/conf.py:120 +msgid "" +"Isolates an Ansible job from protected parts of the Tower system to prevent " +"exposing sensitive information." +msgstr "" + +#: awx/main/conf.py:128 +msgid "Job isolation execution path" +msgstr "" + +#: awx/main/conf.py:129 +msgid "" +"Create temporary working directories for isolated jobs in this location." +msgstr "" + +#: awx/main/conf.py:138 +msgid "Paths to hide from isolated jobs" +msgstr "" + +#: awx/main/conf.py:139 +msgid "Additional paths to hide from isolated processes." +msgstr "" + +#: awx/main/conf.py:148 +msgid "Paths to expose to isolated jobs" +msgstr "" + +#: awx/main/conf.py:149 +msgid "" +"Whitelist of paths that would otherwise be hidden to expose to isolated jobs." +msgstr "" + +#: awx/main/conf.py:158 +msgid "Standard Output Maximum Display Size" +msgstr "" + +#: awx/main/conf.py:159 +msgid "" +"Maximum Size of Standard Output in bytes to display before requiring the " +"output be downloaded." +msgstr "" + +#: awx/main/conf.py:168 +msgid "Job Event Standard Output Maximum Display Size" +msgstr "" + +#: awx/main/conf.py:169 +msgid "" +"Maximum Size of Standard Output in bytes to display for a single job or ad " +"hoc command event. `stdout` will end with `…` when truncated." +msgstr "" + +#: awx/main/conf.py:178 +msgid "Maximum Scheduled Jobs" +msgstr "" + +#: awx/main/conf.py:179 +msgid "" +"Maximum number of the same job template that can be waiting to run when " +"launching from a schedule before no more are created." +msgstr "" + +#: awx/main/conf.py:188 +msgid "Ansible Callback Plugins" +msgstr "" + +#: awx/main/conf.py:189 +msgid "" +"List of paths to search for extra callback plugins to be used when running " +"jobs." +msgstr "" + +#: awx/main/conf.py:199 +msgid "Default Job Timeout" +msgstr "" + +#: awx/main/conf.py:200 +msgid "" +"Maximum time to allow jobs to run. Use value of 0 to indicate that no " +"timeout should be imposed. A timeout set on an individual job template will " +"override this." +msgstr "" + +#: awx/main/conf.py:211 +msgid "Default Inventory Update Timeout" +msgstr "" + +#: awx/main/conf.py:212 +msgid "" +"Maximum time to allow inventory updates to run. Use value of 0 to indicate " +"that no timeout should be imposed. A timeout set on an individual inventory " +"source will override this." +msgstr "" + +#: awx/main/conf.py:223 +msgid "Default Project Update Timeout" +msgstr "" + +#: awx/main/conf.py:224 +msgid "" +"Maximum time to allow project updates to run. Use value of 0 to indicate " +"that no timeout should be imposed. A timeout set on an individual project " +"will override this." +msgstr "" + +#: awx/main/conf.py:234 +msgid "Logging Aggregator" +msgstr "" + +#: awx/main/conf.py:235 +msgid "Hostname/IP where external logs will be sent to." +msgstr "" + +#: awx/main/conf.py:236 awx/main/conf.py:245 awx/main/conf.py:255 +#: awx/main/conf.py:264 awx/main/conf.py:274 awx/main/conf.py:288 +#: awx/main/conf.py:300 awx/main/conf.py:309 +msgid "Logging" +msgstr "" + +#: awx/main/conf.py:243 +msgid "Logging Aggregator Port" +msgstr "" + +#: awx/main/conf.py:244 +msgid "Port on Logging Aggregator to send logs to (if required)." +msgstr "" + +#: awx/main/conf.py:253 +msgid "Logging Aggregator Type" +msgstr "" + +#: awx/main/conf.py:254 +msgid "Format messages for the chosen log aggregator." +msgstr "" + +#: awx/main/conf.py:262 +msgid "Logging Aggregator Username" +msgstr "" + +#: awx/main/conf.py:263 +msgid "Username for external log aggregator (if required)." +msgstr "" + +#: awx/main/conf.py:272 +msgid "Logging Aggregator Password/Token" +msgstr "" + +#: awx/main/conf.py:273 +msgid "" +"Password or authentication token for external log aggregator (if required)." +msgstr "" + +#: awx/main/conf.py:281 +msgid "Loggers to send data to the log aggregator from" +msgstr "" + +#: awx/main/conf.py:282 +msgid "" +"List of loggers that will send HTTP logs to the collector, these can include " +"any or all of: \n" +"awx - Tower service logs\n" +"activity_stream - activity stream records\n" +"job_events - callback data from Ansible job events\n" +"system_tracking - facts gathered from scan jobs." +msgstr "" + +#: awx/main/conf.py:295 +msgid "Log System Tracking Facts Individually" +msgstr "" + +#: awx/main/conf.py:296 +msgid "" +"If set, system tracking facts will be sent for each package, service, " +"orother item found in a scan, allowing for greater search query granularity. " +"If unset, facts will be sent as a single dictionary, allowing for greater " +"efficiency in fact processing." +msgstr "" + +#: awx/main/conf.py:307 +msgid "Enable External Logging" +msgstr "" + +#: awx/main/conf.py:308 +msgid "Enable sending logs to external log aggregator." +msgstr "" + +#: awx/main/models/activity_stream.py:22 +msgid "Entity Created" +msgstr "" + +#: awx/main/models/activity_stream.py:23 +msgid "Entity Updated" +msgstr "" + +#: awx/main/models/activity_stream.py:24 +msgid "Entity Deleted" +msgstr "" + +#: awx/main/models/activity_stream.py:25 +msgid "Entity Associated with another Entity" +msgstr "" + +#: awx/main/models/activity_stream.py:26 +msgid "Entity was Disassociated with another Entity" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:96 +msgid "No valid inventory." +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:103 awx/main/models/jobs.py:161 +msgid "You must provide a machine / SSH credential." +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:114 +#: awx/main/models/ad_hoc_commands.py:122 +msgid "Invalid type for ad hoc command" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:117 +msgid "Unsupported module for ad hoc commands." +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:125 +#, python-format +msgid "No argument passed to %s module." +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:222 awx/main/models/jobs.py:766 +msgid "Host Failed" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:223 awx/main/models/jobs.py:767 +msgid "Host OK" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:224 awx/main/models/jobs.py:770 +msgid "Host Unreachable" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:229 awx/main/models/jobs.py:769 +msgid "Host Skipped" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:239 awx/main/models/jobs.py:797 +msgid "Debug" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:240 awx/main/models/jobs.py:798 +msgid "Verbose" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:241 awx/main/models/jobs.py:799 +msgid "Deprecated" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:242 awx/main/models/jobs.py:800 +msgid "Warning" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:243 awx/main/models/jobs.py:801 +msgid "System Warning" +msgstr "" + +#: awx/main/models/ad_hoc_commands.py:244 awx/main/models/jobs.py:802 +#: awx/main/models/unified_jobs.py:64 +msgid "Error" +msgstr "" + +#: awx/main/models/base.py:45 awx/main/models/base.py:51 +#: awx/main/models/base.py:56 +msgid "Run" +msgstr "" + +#: awx/main/models/base.py:46 awx/main/models/base.py:52 +#: awx/main/models/base.py:57 +msgid "Check" +msgstr "" + +#: awx/main/models/base.py:47 +msgid "Scan" +msgstr "" + +#: awx/main/models/base.py:61 +msgid "Read Inventory" +msgstr "" + +#: awx/main/models/base.py:62 +msgid "Edit Inventory" +msgstr "" + +#: awx/main/models/base.py:63 +msgid "Administrate Inventory" +msgstr "" + +#: awx/main/models/base.py:64 +msgid "Deploy To Inventory" +msgstr "" + +#: awx/main/models/base.py:65 +msgid "Deploy To Inventory (Dry Run)" +msgstr "" + +#: awx/main/models/base.py:66 +msgid "Scan an Inventory" +msgstr "" + +#: awx/main/models/base.py:67 +msgid "Create a Job Template" +msgstr "" + +#: awx/main/models/credential.py:33 +msgid "Machine" +msgstr "" + +#: awx/main/models/credential.py:34 +msgid "Network" +msgstr "" + +#: awx/main/models/credential.py:35 +msgid "Source Control" +msgstr "" + +#: awx/main/models/credential.py:36 +msgid "Amazon Web Services" +msgstr "" + +#: awx/main/models/credential.py:37 +msgid "Rackspace" +msgstr "" + +#: awx/main/models/credential.py:38 awx/main/models/inventory.py:713 +msgid "VMware vCenter" +msgstr "" + +#: awx/main/models/credential.py:39 awx/main/models/inventory.py:714 +msgid "Red Hat Satellite 6" +msgstr "" + +#: awx/main/models/credential.py:40 awx/main/models/inventory.py:715 +msgid "Red Hat CloudForms" +msgstr "" + +#: awx/main/models/credential.py:41 awx/main/models/inventory.py:710 +msgid "Google Compute Engine" +msgstr "" + +#: awx/main/models/credential.py:42 awx/main/models/inventory.py:711 +msgid "Microsoft Azure Classic (deprecated)" +msgstr "" + +#: awx/main/models/credential.py:43 awx/main/models/inventory.py:712 +msgid "Microsoft Azure Resource Manager" +msgstr "" + +#: awx/main/models/credential.py:44 awx/main/models/inventory.py:716 +msgid "OpenStack" +msgstr "" + +#: awx/main/models/credential.py:48 +msgid "None" +msgstr "" + +#: awx/main/models/credential.py:49 +msgid "Sudo" +msgstr "" + +#: awx/main/models/credential.py:50 +msgid "Su" +msgstr "" + +#: awx/main/models/credential.py:51 +msgid "Pbrun" +msgstr "" + +#: awx/main/models/credential.py:52 +msgid "Pfexec" +msgstr "" + +#: awx/main/models/credential.py:53 +msgid "DZDO" +msgstr "" + +#: awx/main/models/credential.py:54 +msgid "Pmrun" +msgstr "" + +#: awx/main/models/credential.py:103 +msgid "Host" +msgstr "" + +#: awx/main/models/credential.py:104 +msgid "The hostname or IP address to use." +msgstr "" + +#: awx/main/models/credential.py:110 +msgid "Username" +msgstr "" + +#: awx/main/models/credential.py:111 +msgid "Username for this credential." +msgstr "" + +#: awx/main/models/credential.py:117 +msgid "Password" +msgstr "" + +#: awx/main/models/credential.py:118 +msgid "" +"Password for this credential (or \"ASK\" to prompt the user for machine " +"credentials)." +msgstr "" + +#: awx/main/models/credential.py:125 +msgid "Security Token" +msgstr "" + +#: awx/main/models/credential.py:126 +msgid "Security Token for this credential" +msgstr "" + +#: awx/main/models/credential.py:132 +msgid "Project" +msgstr "" + +#: awx/main/models/credential.py:133 +msgid "The identifier for the project." +msgstr "" + +#: awx/main/models/credential.py:139 +msgid "Domain" +msgstr "" + +#: awx/main/models/credential.py:140 +msgid "The identifier for the domain." +msgstr "" + +#: awx/main/models/credential.py:145 +msgid "SSH private key" +msgstr "" + +#: awx/main/models/credential.py:146 +msgid "RSA or DSA private key to be used instead of password." +msgstr "" + +#: awx/main/models/credential.py:152 +msgid "SSH key unlock" +msgstr "" + +#: awx/main/models/credential.py:153 +msgid "" +"Passphrase to unlock SSH private key if encrypted (or \"ASK\" to prompt the " +"user for machine credentials)." +msgstr "" + +#: awx/main/models/credential.py:161 +msgid "Privilege escalation method." +msgstr "" + +#: awx/main/models/credential.py:167 +msgid "Privilege escalation username." +msgstr "" + +#: awx/main/models/credential.py:173 +msgid "Password for privilege escalation method." +msgstr "" + +#: awx/main/models/credential.py:179 +msgid "Vault password (or \"ASK\" to prompt the user)." +msgstr "" + +#: awx/main/models/credential.py:183 +msgid "Whether to use the authorize mechanism." +msgstr "" + +#: awx/main/models/credential.py:189 +msgid "Password used by the authorize mechanism." +msgstr "" + +#: awx/main/models/credential.py:195 +msgid "Client Id or Application Id for the credential" +msgstr "" + +#: awx/main/models/credential.py:201 +msgid "Secret Token for this credential" +msgstr "" + +#: awx/main/models/credential.py:207 +msgid "Subscription identifier for this credential" +msgstr "" + +#: awx/main/models/credential.py:213 +msgid "Tenant identifier for this credential" +msgstr "" + +#: awx/main/models/credential.py:283 +msgid "Host required for VMware credential." +msgstr "" + +#: awx/main/models/credential.py:285 +msgid "Host required for OpenStack credential." +msgstr "" + +#: awx/main/models/credential.py:294 +msgid "Access key required for AWS credential." +msgstr "" + +#: awx/main/models/credential.py:296 +msgid "Username required for Rackspace credential." +msgstr "" + +#: awx/main/models/credential.py:299 +msgid "Username required for VMware credential." +msgstr "" + +#: awx/main/models/credential.py:301 +msgid "Username required for OpenStack credential." +msgstr "" + +#: awx/main/models/credential.py:307 +msgid "Secret key required for AWS credential." +msgstr "" + +#: awx/main/models/credential.py:309 +msgid "API key required for Rackspace credential." +msgstr "" + +#: awx/main/models/credential.py:311 +msgid "Password required for VMware credential." +msgstr "" + +#: awx/main/models/credential.py:313 +msgid "Password or API key required for OpenStack credential." +msgstr "" + +#: awx/main/models/credential.py:319 +msgid "Project name required for OpenStack credential." +msgstr "" + +#: awx/main/models/credential.py:346 +msgid "SSH key unlock must be set when SSH key is encrypted." +msgstr "" + +#: awx/main/models/credential.py:352 +msgid "Credential cannot be assigned to both a user and team." +msgstr "" + +#: awx/main/models/fact.py:21 +msgid "Host for the facts that the fact scan captured." +msgstr "" + +#: awx/main/models/fact.py:26 +msgid "Date and time of the corresponding fact scan gathering time." +msgstr "" + +#: awx/main/models/fact.py:29 +msgid "" +"Arbitrary JSON structure of module facts captured at timestamp for a single " +"host." +msgstr "" + +#: awx/main/models/inventory.py:45 +msgid "inventories" +msgstr "" + +#: awx/main/models/inventory.py:52 +msgid "Organization containing this inventory." +msgstr "" + +#: awx/main/models/inventory.py:58 +msgid "Inventory variables in JSON or YAML format." +msgstr "" + +#: awx/main/models/inventory.py:63 +msgid "Flag indicating whether any hosts in this inventory have failed." +msgstr "" + +#: awx/main/models/inventory.py:68 +msgid "Total number of hosts in this inventory." +msgstr "" + +#: awx/main/models/inventory.py:73 +msgid "Number of hosts in this inventory with active failures." +msgstr "" + +#: awx/main/models/inventory.py:78 +msgid "Total number of groups in this inventory." +msgstr "" + +#: awx/main/models/inventory.py:83 +msgid "Number of groups in this inventory with active failures." +msgstr "" + +#: awx/main/models/inventory.py:88 +msgid "" +"Flag indicating whether this inventory has any external inventory sources." +msgstr "" + +#: awx/main/models/inventory.py:93 +msgid "" +"Total number of external inventory sources configured within this inventory." +msgstr "" + +#: awx/main/models/inventory.py:98 +msgid "Number of external inventory sources in this inventory with failures." +msgstr "" + +#: awx/main/models/inventory.py:339 +msgid "Is this host online and available for running jobs?" +msgstr "" + +#: awx/main/models/inventory.py:345 +msgid "" +"The value used by the remote inventory source to uniquely identify the host" +msgstr "" + +#: awx/main/models/inventory.py:350 +msgid "Host variables in JSON or YAML format." +msgstr "" + +#: awx/main/models/inventory.py:372 +msgid "Flag indicating whether the last job failed for this host." +msgstr "" + +#: awx/main/models/inventory.py:377 +msgid "" +"Flag indicating whether this host was created/updated from any external " +"inventory sources." +msgstr "" + +#: awx/main/models/inventory.py:383 +msgid "Inventory source(s) that created or modified this host." +msgstr "" + +#: awx/main/models/inventory.py:474 +msgid "Group variables in JSON or YAML format." +msgstr "" + +#: awx/main/models/inventory.py:480 +msgid "Hosts associated directly with this group." +msgstr "" + +#: awx/main/models/inventory.py:485 +msgid "Total number of hosts directly or indirectly in this group." +msgstr "" + +#: awx/main/models/inventory.py:490 +msgid "Flag indicating whether this group has any hosts with active failures." +msgstr "" + +#: awx/main/models/inventory.py:495 +msgid "Number of hosts in this group with active failures." +msgstr "" + +#: awx/main/models/inventory.py:500 +msgid "Total number of child groups contained within this group." +msgstr "" + +#: awx/main/models/inventory.py:505 +msgid "Number of child groups within this group that have active failures." +msgstr "" + +#: awx/main/models/inventory.py:510 +msgid "" +"Flag indicating whether this group was created/updated from any external " +"inventory sources." +msgstr "" + +#: awx/main/models/inventory.py:516 +msgid "Inventory source(s) that created or modified this group." +msgstr "" + +#: awx/main/models/inventory.py:706 awx/main/models/projects.py:42 +#: awx/main/models/unified_jobs.py:402 +msgid "Manual" +msgstr "" + +#: awx/main/models/inventory.py:707 +msgid "Local File, Directory or Script" +msgstr "" + +#: awx/main/models/inventory.py:708 +msgid "Rackspace Cloud Servers" +msgstr "" + +#: awx/main/models/inventory.py:709 +msgid "Amazon EC2" +msgstr "" + +#: awx/main/models/inventory.py:717 +msgid "Custom Script" +msgstr "" + +#: awx/main/models/inventory.py:828 +msgid "Inventory source variables in YAML or JSON format." +msgstr "" + +#: awx/main/models/inventory.py:847 +msgid "" +"Comma-separated list of filter expressions (EC2 only). Hosts are imported " +"when ANY of the filters match." +msgstr "" + +#: awx/main/models/inventory.py:853 +msgid "Limit groups automatically created from inventory source (EC2 only)." +msgstr "" + +#: awx/main/models/inventory.py:857 +msgid "Overwrite local groups and hosts from remote inventory source." +msgstr "" + +#: awx/main/models/inventory.py:861 +msgid "Overwrite local variables from remote inventory source." +msgstr "" + +#: awx/main/models/inventory.py:893 +msgid "Availability Zone" +msgstr "" + +#: awx/main/models/inventory.py:894 +msgid "Image ID" +msgstr "" + +#: awx/main/models/inventory.py:895 +msgid "Instance ID" +msgstr "" + +#: awx/main/models/inventory.py:896 +msgid "Instance Type" +msgstr "" + +#: awx/main/models/inventory.py:897 +msgid "Key Name" +msgstr "" + +#: awx/main/models/inventory.py:898 +msgid "Region" +msgstr "" + +#: awx/main/models/inventory.py:899 +msgid "Security Group" +msgstr "" + +#: awx/main/models/inventory.py:900 +msgid "Tags" +msgstr "" + +#: awx/main/models/inventory.py:901 +msgid "VPC ID" +msgstr "" + +#: awx/main/models/inventory.py:902 +msgid "Tag None" +msgstr "" + +#: awx/main/models/inventory.py:973 +#, python-format +msgid "" +"Cloud-based inventory sources (such as %s) require credentials for the " +"matching cloud service." +msgstr "" + +#: awx/main/models/inventory.py:980 +msgid "Credential is required for a cloud source." +msgstr "" + +#: awx/main/models/inventory.py:1005 +#, python-format +msgid "Invalid %(source)s region: %(region)s" +msgstr "" + +#: awx/main/models/inventory.py:1030 +#, python-format +msgid "Invalid filter expression: %(filter)s" +msgstr "" + +#: awx/main/models/inventory.py:1048 +#, python-format +msgid "Invalid group by choice: %(choice)s" +msgstr "" + +#: awx/main/models/inventory.py:1195 +#, python-format +msgid "" +"Unable to configure this item for cloud sync. It is already managed by %s." +msgstr "" + +#: awx/main/models/inventory.py:1290 +msgid "Inventory script contents" +msgstr "" + +#: awx/main/models/inventory.py:1295 +msgid "Organization owning this inventory script" +msgstr "" + +#: awx/main/models/jobs.py:169 +msgid "You must provide a network credential." +msgstr "" + +#: awx/main/models/jobs.py:177 +msgid "" +"Must provide a credential for a cloud provider, such as Amazon Web Services " +"or Rackspace." +msgstr "" + +#: awx/main/models/jobs.py:269 +msgid "Job Template must provide 'inventory' or allow prompting for it." +msgstr "" + +#: awx/main/models/jobs.py:273 +msgid "Job Template must provide 'credential' or allow prompting for it." +msgstr "" + +#: awx/main/models/jobs.py:362 +msgid "Cannot override job_type to or from a scan job." +msgstr "" + +#: awx/main/models/jobs.py:365 +msgid "Inventory cannot be changed at runtime for scan jobs." +msgstr "" + +#: awx/main/models/jobs.py:431 awx/main/models/projects.py:243 +msgid "SCM Revision" +msgstr "" + +#: awx/main/models/jobs.py:432 +msgid "The SCM Revision from the Project used for this job, if available" +msgstr "" + +#: awx/main/models/jobs.py:440 +msgid "" +"The SCM Refresh task used to make sure the playbooks were available for the " +"job run" +msgstr "" + +#: awx/main/models/jobs.py:665 +msgid "job host summaries" +msgstr "" + +#: awx/main/models/jobs.py:768 +msgid "Host Failure" +msgstr "" + +#: awx/main/models/jobs.py:771 awx/main/models/jobs.py:785 +msgid "No Hosts Remaining" +msgstr "" + +#: awx/main/models/jobs.py:772 +msgid "Host Polling" +msgstr "" + +#: awx/main/models/jobs.py:773 +msgid "Host Async OK" +msgstr "" + +#: awx/main/models/jobs.py:774 +msgid "Host Async Failure" +msgstr "" + +#: awx/main/models/jobs.py:775 +msgid "Item OK" +msgstr "" + +#: awx/main/models/jobs.py:776 +msgid "Item Failed" +msgstr "" + +#: awx/main/models/jobs.py:777 +msgid "Item Skipped" +msgstr "" + +#: awx/main/models/jobs.py:778 +msgid "Host Retry" +msgstr "" + +#: awx/main/models/jobs.py:780 +msgid "File Difference" +msgstr "" + +#: awx/main/models/jobs.py:781 +msgid "Playbook Started" +msgstr "" + +#: awx/main/models/jobs.py:782 +msgid "Running Handlers" +msgstr "" + +#: awx/main/models/jobs.py:783 +msgid "Including File" +msgstr "" + +#: awx/main/models/jobs.py:784 +msgid "No Hosts Matched" +msgstr "" + +#: awx/main/models/jobs.py:786 +msgid "Task Started" +msgstr "" + +#: awx/main/models/jobs.py:788 +msgid "Variables Prompted" +msgstr "" + +#: awx/main/models/jobs.py:789 +msgid "Gathering Facts" +msgstr "" + +#: awx/main/models/jobs.py:790 +msgid "internal: on Import for Host" +msgstr "" + +#: awx/main/models/jobs.py:791 +msgid "internal: on Not Import for Host" +msgstr "" + +#: awx/main/models/jobs.py:792 +msgid "Play Started" +msgstr "" + +#: awx/main/models/jobs.py:793 +msgid "Playbook Complete" +msgstr "" + +#: awx/main/models/jobs.py:1203 +msgid "Remove jobs older than a certain number of days" +msgstr "" + +#: awx/main/models/jobs.py:1204 +msgid "Remove activity stream entries older than a certain number of days" +msgstr "" + +#: awx/main/models/jobs.py:1205 +msgid "Purge and/or reduce the granularity of system tracking data" +msgstr "" + +#: awx/main/models/label.py:29 +msgid "Organization this label belongs to." +msgstr "" + +#: awx/main/models/notifications.py:31 +msgid "Email" +msgstr "" + +#: awx/main/models/notifications.py:32 +msgid "Slack" +msgstr "" + +#: awx/main/models/notifications.py:33 +msgid "Twilio" +msgstr "" + +#: awx/main/models/notifications.py:34 +msgid "Pagerduty" +msgstr "" + +#: awx/main/models/notifications.py:35 +msgid "HipChat" +msgstr "" + +#: awx/main/models/notifications.py:36 +msgid "Webhook" +msgstr "" + +#: awx/main/models/notifications.py:37 +msgid "IRC" +msgstr "" + +#: awx/main/models/notifications.py:127 awx/main/models/unified_jobs.py:59 +msgid "Pending" +msgstr "" + +#: awx/main/models/notifications.py:128 awx/main/models/unified_jobs.py:62 +msgid "Successful" +msgstr "" + +#: awx/main/models/notifications.py:129 awx/main/models/unified_jobs.py:63 +msgid "Failed" +msgstr "" + +#: awx/main/models/organization.py:157 +msgid "Execute Commands on the Inventory" +msgstr "" + +#: awx/main/models/organization.py:211 +msgid "Token not invalidated" +msgstr "" + +#: awx/main/models/organization.py:212 +msgid "Token is expired" +msgstr "" + +#: awx/main/models/organization.py:213 +msgid "The maximum number of allowed sessions for this user has been exceeded." +msgstr "" + +#: awx/main/models/organization.py:216 +msgid "Invalid token" +msgstr "" + +#: awx/main/models/organization.py:233 +msgid "Reason the auth token was invalidated." +msgstr "" + +#: awx/main/models/organization.py:272 +msgid "Invalid reason specified" +msgstr "" + +#: awx/main/models/projects.py:43 +msgid "Git" +msgstr "" + +#: awx/main/models/projects.py:44 +msgid "Mercurial" +msgstr "" + +#: awx/main/models/projects.py:45 +msgid "Subversion" +msgstr "" + +#: awx/main/models/projects.py:71 +msgid "" +"Local path (relative to PROJECTS_ROOT) containing playbooks and related " +"files for this project." +msgstr "" + +#: awx/main/models/projects.py:80 +msgid "SCM Type" +msgstr "" + +#: awx/main/models/projects.py:81 +msgid "Specifies the source control system used to store the project." +msgstr "" + +#: awx/main/models/projects.py:87 +msgid "SCM URL" +msgstr "" + +#: awx/main/models/projects.py:88 +msgid "The location where the project is stored." +msgstr "" + +#: awx/main/models/projects.py:94 +msgid "SCM Branch" +msgstr "" + +#: awx/main/models/projects.py:95 +msgid "Specific branch, tag or commit to checkout." +msgstr "" + +#: awx/main/models/projects.py:99 +msgid "Discard any local changes before syncing the project." +msgstr "" + +#: awx/main/models/projects.py:103 +msgid "Delete the project before syncing." +msgstr "" + +#: awx/main/models/projects.py:116 +msgid "The amount of time to run before the task is canceled." +msgstr "" + +#: awx/main/models/projects.py:130 +msgid "Invalid SCM URL." +msgstr "" + +#: awx/main/models/projects.py:133 +msgid "SCM URL is required." +msgstr "" + +#: awx/main/models/projects.py:142 +msgid "Credential kind must be 'scm'." +msgstr "" + +#: awx/main/models/projects.py:157 +msgid "Invalid credential." +msgstr "" + +#: awx/main/models/projects.py:229 +msgid "Update the project when a job is launched that uses the project." +msgstr "" + +#: awx/main/models/projects.py:234 +msgid "" +"The number of seconds after the last project update ran that a newproject " +"update will be launched as a job dependency." +msgstr "" + +#: awx/main/models/projects.py:244 +msgid "The last revision fetched by a project update" +msgstr "" + +#: awx/main/models/projects.py:251 +msgid "Playbook Files" +msgstr "" + +#: awx/main/models/projects.py:252 +msgid "List of playbooks found in the project" +msgstr "" + +#: awx/main/models/rbac.py:36 +msgid "System Administrator" +msgstr "" + +#: awx/main/models/rbac.py:37 +msgid "System Auditor" +msgstr "" + +#: awx/main/models/rbac.py:38 +msgid "Ad Hoc" +msgstr "" + +#: awx/main/models/rbac.py:39 +msgid "Admin" +msgstr "" + +#: awx/main/models/rbac.py:40 +msgid "Auditor" +msgstr "" + +#: awx/main/models/rbac.py:41 +msgid "Execute" +msgstr "" + +#: awx/main/models/rbac.py:42 +msgid "Member" +msgstr "" + +#: awx/main/models/rbac.py:43 +msgid "Read" +msgstr "" + +#: awx/main/models/rbac.py:44 +msgid "Update" +msgstr "" + +#: awx/main/models/rbac.py:45 +msgid "Use" +msgstr "" + +#: awx/main/models/rbac.py:49 +msgid "Can manage all aspects of the system" +msgstr "" + +#: awx/main/models/rbac.py:50 +msgid "Can view all settings on the system" +msgstr "" + +#: awx/main/models/rbac.py:51 +msgid "May run ad hoc commands on an inventory" +msgstr "" + +#: awx/main/models/rbac.py:52 +#, python-format +msgid "Can manage all aspects of the %s" +msgstr "" + +#: awx/main/models/rbac.py:53 +#, python-format +msgid "Can view all settings for the %s" +msgstr "" + +#: awx/main/models/rbac.py:54 +#, python-format +msgid "May run the %s" +msgstr "" + +#: awx/main/models/rbac.py:55 +#, python-format +msgid "User is a member of the %s" +msgstr "" + +#: awx/main/models/rbac.py:56 +#, python-format +msgid "May view settings for the %s" +msgstr "" + +#: awx/main/models/rbac.py:57 +msgid "" +"May update project or inventory or group using the configured source update " +"system" +msgstr "" + +#: awx/main/models/rbac.py:58 +#, python-format +msgid "Can use the %s in a job template" +msgstr "" + +#: awx/main/models/rbac.py:122 +msgid "roles" +msgstr "" + +#: awx/main/models/rbac.py:438 +msgid "role_ancestors" +msgstr "" + +#: awx/main/models/schedules.py:69 +msgid "Enables processing of this schedule by Tower." +msgstr "" + +#: awx/main/models/schedules.py:75 +msgid "The first occurrence of the schedule occurs on or after this time." +msgstr "" + +#: awx/main/models/schedules.py:81 +msgid "" +"The last occurrence of the schedule occurs before this time, aftewards the " +"schedule expires." +msgstr "" + +#: awx/main/models/schedules.py:85 +msgid "A value representing the schedules iCal recurrence rule." +msgstr "" + +#: awx/main/models/schedules.py:91 +msgid "The next time that the scheduled action will run." +msgstr "" + +#: awx/main/models/unified_jobs.py:58 +msgid "New" +msgstr "" + +#: awx/main/models/unified_jobs.py:60 +msgid "Waiting" +msgstr "" + +#: awx/main/models/unified_jobs.py:61 +msgid "Running" +msgstr "" + +#: awx/main/models/unified_jobs.py:65 +msgid "Canceled" +msgstr "" + +#: awx/main/models/unified_jobs.py:69 +msgid "Never Updated" +msgstr "" + +#: awx/main/models/unified_jobs.py:73 awx/ui/templates/ui/index.html:85 +#: awx/ui/templates/ui/index.html.py:104 +msgid "OK" +msgstr "" + +#: awx/main/models/unified_jobs.py:74 +msgid "Missing" +msgstr "" + +#: awx/main/models/unified_jobs.py:78 +msgid "No External Source" +msgstr "" + +#: awx/main/models/unified_jobs.py:85 +msgid "Updating" +msgstr "" + +#: awx/main/models/unified_jobs.py:403 +msgid "Relaunch" +msgstr "" + +#: awx/main/models/unified_jobs.py:404 +msgid "Callback" +msgstr "" + +#: awx/main/models/unified_jobs.py:405 +msgid "Scheduled" +msgstr "" + +#: awx/main/models/unified_jobs.py:406 +msgid "Dependency" +msgstr "" + +#: awx/main/models/unified_jobs.py:407 +msgid "Workflow" +msgstr "" + +#: awx/main/models/unified_jobs.py:408 +msgid "Sync" +msgstr "" + +#: awx/main/models/unified_jobs.py:454 +msgid "The Tower node the job executed on." +msgstr "" + +#: awx/main/models/unified_jobs.py:480 +msgid "The date and time the job was queued for starting." +msgstr "" + +#: awx/main/models/unified_jobs.py:486 +msgid "The date and time the job finished execution." +msgstr "" + +#: awx/main/models/unified_jobs.py:492 +msgid "Elapsed time in seconds that the job ran." +msgstr "" + +#: awx/main/models/unified_jobs.py:514 +msgid "" +"A status field to indicate the state of the job if it wasn't able to run and " +"capture stdout" +msgstr "" + +#: awx/main/notifications/base.py:17 +#: awx/main/notifications/email_backend.py:28 +msgid "" +"{} #{} had status {} on Ansible Tower, view details at {}\n" +"\n" +msgstr "" + +#: awx/main/notifications/hipchat_backend.py:46 +msgid "Error sending messages: {}" +msgstr "" + +#: awx/main/notifications/hipchat_backend.py:48 +msgid "Error sending message to hipchat: {}" +msgstr "" + +#: awx/main/notifications/irc_backend.py:54 +msgid "Exception connecting to irc server: {}" +msgstr "" + +#: awx/main/notifications/pagerduty_backend.py:39 +msgid "Exception connecting to PagerDuty: {}" +msgstr "" + +#: awx/main/notifications/pagerduty_backend.py:48 +#: awx/main/notifications/slack_backend.py:52 +#: awx/main/notifications/twilio_backend.py:46 +msgid "Exception sending messages: {}" +msgstr "" + +#: awx/main/notifications/twilio_backend.py:36 +msgid "Exception connecting to Twilio: {}" +msgstr "" + +#: awx/main/notifications/webhook_backend.py:38 +#: awx/main/notifications/webhook_backend.py:40 +msgid "Error sending notification webhook: {}" +msgstr "" + +#: awx/main/scheduler/__init__.py:130 +msgid "" +"Job spawned from workflow could not start because it was not in the right " +"state or required manual credentials" +msgstr "" + +#: awx/main/tasks.py:180 +msgid "Ansible Tower host usage over 90%" +msgstr "" + +#: awx/main/tasks.py:185 +msgid "Ansible Tower license will expire soon" +msgstr "" + +#: awx/main/tasks.py:249 +msgid "status_str must be either succeeded or failed" +msgstr "" + +#: awx/main/utils/common.py:89 +#, python-format +msgid "Unable to convert \"%s\" to boolean" +msgstr "" + +#: awx/main/utils/common.py:243 +#, python-format +msgid "Unsupported SCM type \"%s\"" +msgstr "" + +#: awx/main/utils/common.py:250 awx/main/utils/common.py:262 +#: awx/main/utils/common.py:281 +#, python-format +msgid "Invalid %s URL" +msgstr "" + +#: awx/main/utils/common.py:252 awx/main/utils/common.py:290 +#, python-format +msgid "Unsupported %s URL" +msgstr "" + +#: awx/main/utils/common.py:292 +#, python-format +msgid "Unsupported host \"%s\" for file:// URL" +msgstr "" + +#: awx/main/utils/common.py:294 +#, python-format +msgid "Host is required for %s URL" +msgstr "" + +#: awx/main/utils/common.py:312 +#, python-format +msgid "Username must be \"git\" for SSH access to %s." +msgstr "" + +#: awx/main/utils/common.py:318 +#, python-format +msgid "Username must be \"hg\" for SSH access to %s." +msgstr "" + +#: awx/main/validators.py:60 +#, python-format +msgid "Invalid certificate or key: %r..." +msgstr "" + +#: awx/main/validators.py:74 +#, python-format +msgid "Invalid private key: unsupported type \"%s\"" +msgstr "" + +#: awx/main/validators.py:78 +#, python-format +msgid "Unsupported PEM object type: \"%s\"" +msgstr "" + +#: awx/main/validators.py:103 +msgid "Invalid base64-encoded data" +msgstr "" + +#: awx/main/validators.py:122 +msgid "Exactly one private key is required." +msgstr "" + +#: awx/main/validators.py:124 +msgid "At least one private key is required." +msgstr "" + +#: awx/main/validators.py:126 +#, python-format +msgid "" +"At least %(min_keys)d private keys are required, only %(key_count)d provided." +msgstr "" + +#: awx/main/validators.py:129 +#, python-format +msgid "Only one private key is allowed, %(key_count)d provided." +msgstr "" + +#: awx/main/validators.py:131 +#, python-format +msgid "" +"No more than %(max_keys)d private keys are allowed, %(key_count)d provided." +msgstr "" + +#: awx/main/validators.py:136 +msgid "Exactly one certificate is required." +msgstr "" + +#: awx/main/validators.py:138 +msgid "At least one certificate is required." +msgstr "" + +#: awx/main/validators.py:140 +#, python-format +msgid "" +"At least %(min_certs)d certificates are required, only %(cert_count)d " +"provided." +msgstr "" + +#: awx/main/validators.py:143 +#, python-format +msgid "Only one certificate is allowed, %(cert_count)d provided." +msgstr "" + +#: awx/main/validators.py:145 +#, python-format +msgid "" +"No more than %(max_certs)d certificates are allowed, %(cert_count)d provided." +msgstr "" + +#: awx/main/views.py:20 +msgid "API Error" +msgstr "" + +#: awx/main/views.py:49 +msgid "Bad Request" +msgstr "" + +#: awx/main/views.py:50 +msgid "The request could not be understood by the server." +msgstr "" + +#: awx/main/views.py:57 +msgid "Forbidden" +msgstr "" + +#: awx/main/views.py:58 +msgid "You don't have permission to access the requested resource." +msgstr "" + +#: awx/main/views.py:65 +msgid "Not Found" +msgstr "" + +#: awx/main/views.py:66 +msgid "The requested resource could not be found." +msgstr "" + +#: awx/main/views.py:73 +msgid "Server Error" +msgstr "" + +#: awx/main/views.py:74 +msgid "A server error has occurred." +msgstr "" + +#: awx/settings/defaults.py:624 +msgid "Chicago" +msgstr "" + +#: awx/settings/defaults.py:625 +msgid "Dallas/Ft. Worth" +msgstr "" + +#: awx/settings/defaults.py:626 +msgid "Northern Virginia" +msgstr "" + +#: awx/settings/defaults.py:627 +msgid "London" +msgstr "" + +#: awx/settings/defaults.py:628 +msgid "Sydney" +msgstr "" + +#: awx/settings/defaults.py:629 +msgid "Hong Kong" +msgstr "" + +#: awx/settings/defaults.py:656 +msgid "US East (Northern Virginia)" +msgstr "" + +#: awx/settings/defaults.py:657 +msgid "US East (Ohio)" +msgstr "" + +#: awx/settings/defaults.py:658 +msgid "US West (Oregon)" +msgstr "" + +#: awx/settings/defaults.py:659 +msgid "US West (Northern California)" +msgstr "" + +#: awx/settings/defaults.py:660 +msgid "Canada (Central)" +msgstr "" + +#: awx/settings/defaults.py:661 +msgid "EU (Frankfurt)" +msgstr "" + +#: awx/settings/defaults.py:662 +msgid "EU (Ireland)" +msgstr "" + +#: awx/settings/defaults.py:663 +msgid "EU (London)" +msgstr "" + +#: awx/settings/defaults.py:664 +msgid "Asia Pacific (Singapore)" +msgstr "" + +#: awx/settings/defaults.py:665 +msgid "Asia Pacific (Sydney)" +msgstr "" + +#: awx/settings/defaults.py:666 +msgid "Asia Pacific (Tokyo)" +msgstr "" + +#: awx/settings/defaults.py:667 +msgid "Asia Pacific (Seoul)" +msgstr "" + +#: awx/settings/defaults.py:668 +msgid "Asia Pacific (Mumbai)" +msgstr "" + +#: awx/settings/defaults.py:669 +msgid "South America (Sao Paulo)" +msgstr "" + +#: awx/settings/defaults.py:670 +msgid "US West (GovCloud)" +msgstr "" + +#: awx/settings/defaults.py:671 +msgid "China (Beijing)" +msgstr "" + +#: awx/settings/defaults.py:720 +msgid "US East (B)" +msgstr "" + +#: awx/settings/defaults.py:721 +msgid "US East (C)" +msgstr "" + +#: awx/settings/defaults.py:722 +msgid "US East (D)" +msgstr "" + +#: awx/settings/defaults.py:723 +msgid "US Central (A)" +msgstr "" + +#: awx/settings/defaults.py:724 +msgid "US Central (B)" +msgstr "" + +#: awx/settings/defaults.py:725 +msgid "US Central (C)" +msgstr "" + +#: awx/settings/defaults.py:726 +msgid "US Central (F)" +msgstr "" + +#: awx/settings/defaults.py:727 +msgid "Europe West (B)" +msgstr "" + +#: awx/settings/defaults.py:728 +msgid "Europe West (C)" +msgstr "" + +#: awx/settings/defaults.py:729 +msgid "Europe West (D)" +msgstr "" + +#: awx/settings/defaults.py:730 +msgid "Asia East (A)" +msgstr "" + +#: awx/settings/defaults.py:731 +msgid "Asia East (B)" +msgstr "" + +#: awx/settings/defaults.py:732 +msgid "Asia East (C)" +msgstr "" + +#: awx/settings/defaults.py:756 +msgid "US Central" +msgstr "" + +#: awx/settings/defaults.py:757 +msgid "US East" +msgstr "" + +#: awx/settings/defaults.py:758 +msgid "US East 2" +msgstr "" + +#: awx/settings/defaults.py:759 +msgid "US North Central" +msgstr "" + +#: awx/settings/defaults.py:760 +msgid "US South Central" +msgstr "" + +#: awx/settings/defaults.py:761 +msgid "US West" +msgstr "" + +#: awx/settings/defaults.py:762 +msgid "Europe North" +msgstr "" + +#: awx/settings/defaults.py:763 +msgid "Europe West" +msgstr "" + +#: awx/settings/defaults.py:764 +msgid "Asia Pacific East" +msgstr "" + +#: awx/settings/defaults.py:765 +msgid "Asia Pacific Southeast" +msgstr "" + +#: awx/settings/defaults.py:766 +msgid "Japan East" +msgstr "" + +#: awx/settings/defaults.py:767 +msgid "Japan West" +msgstr "" + +#: awx/settings/defaults.py:768 +msgid "Brazil South" +msgstr "" + +#: awx/sso/apps.py:9 +msgid "Single Sign-On" +msgstr "" + +#: awx/sso/conf.py:27 +msgid "" +"Mapping to organization admins/users from social auth accounts. This " +"setting\n" +"controls which users are placed into which Tower organizations based on\n" +"their username and email address. Dictionary keys are organization names.\n" +"organizations will be created if not present if the license allows for\n" +"multiple organizations, otherwise the single default organization is used\n" +"regardless of the key. Values are dictionaries defining the options for\n" +"each organization's membership. For each organization it is possible to\n" +"specify which users are automatically users of the organization and also\n" +"which users can administer the organization. \n" +"\n" +"- admins: None, True/False, string or list of strings.\n" +" If None, organization admins will not be updated.\n" +" If True, all users using social auth will automatically be added as " +"admins\n" +" of the organization.\n" +" If False, no social auth users will be automatically added as admins of\n" +" the organization.\n" +" If a string or list of strings, specifies the usernames and emails for\n" +" users who will be added to the organization. Strings in the format\n" +" \"//\" will be interpreted as JavaScript regular " +"expressions and\n" +" may also be used instead of string literals; only \"i\" and \"m\" are " +"supported\n" +" for flags.\n" +"- remove_admins: True/False. Defaults to True.\n" +" If True, a user who does not match will be removed from the " +"organization's\n" +" administrative list.\n" +"- users: None, True/False, string or list of strings. Same rules apply as " +"for\n" +" admins.\n" +"- remove_users: True/False. Defaults to True. Same rules as apply for \n" +" remove_admins." +msgstr "" + +#: awx/sso/conf.py:76 +msgid "" +"Mapping of team members (users) from social auth accounts. Keys are team\n" +"names (will be created if not present). Values are dictionaries of options\n" +"for each team's membership, where each can contain the following " +"parameters:\n" +"\n" +"- organization: string. The name of the organization to which the team\n" +" belongs. The team will be created if the combination of organization and\n" +" team name does not exist. The organization will first be created if it\n" +" does not exist. If the license does not allow for multiple " +"organizations,\n" +" the team will always be assigned to the single default organization.\n" +"- users: None, True/False, string or list of strings.\n" +" If None, team members will not be updated.\n" +" If True/False, all social auth users will be added/removed as team\n" +" members.\n" +" If a string or list of strings, specifies expressions used to match " +"users.\n" +" User will be added as a team member if the username or email matches.\n" +" Strings in the format \"//\" will be interpreted as " +"JavaScript\n" +" regular expressions and may also be used instead of string literals; only " +"\"i\"\n" +" and \"m\" are supported for flags.\n" +"- remove: True/False. Defaults to True. If True, a user who does not match\n" +" the rules above will be removed from the team." +msgstr "" + +#: awx/sso/conf.py:119 +msgid "Authentication Backends" +msgstr "" + +#: awx/sso/conf.py:120 +msgid "" +"List of authentication backends that are enabled based on license features " +"and other authentication settings." +msgstr "" + +#: awx/sso/conf.py:133 +msgid "Social Auth Organization Map" +msgstr "" + +#: awx/sso/conf.py:145 +msgid "Social Auth Team Map" +msgstr "" + +#: awx/sso/conf.py:157 +msgid "Social Auth User Fields" +msgstr "" + +#: awx/sso/conf.py:158 +msgid "" +"When set to an empty list `[]`, this setting prevents new user accounts from " +"being created. Only users who have previously logged in using social auth or " +"have a user account with a matching email address will be able to login." +msgstr "" + +#: awx/sso/conf.py:176 +msgid "LDAP Server URI" +msgstr "" + +#: awx/sso/conf.py:177 +msgid "" +"URI to connect to LDAP server, such as \"ldap://ldap.example.com:389\" (non-" +"SSL) or \"ldaps://ldap.example.com:636\" (SSL). Multiple LDAP servers may be " +"specified by separating with spaces or commas. LDAP authentication is " +"disabled if this parameter is empty." +msgstr "" + +#: awx/sso/conf.py:181 awx/sso/conf.py:199 awx/sso/conf.py:211 +#: awx/sso/conf.py:223 awx/sso/conf.py:239 awx/sso/conf.py:258 +#: awx/sso/conf.py:280 awx/sso/conf.py:296 awx/sso/conf.py:315 +#: awx/sso/conf.py:332 awx/sso/conf.py:349 awx/sso/conf.py:365 +#: awx/sso/conf.py:382 awx/sso/conf.py:420 awx/sso/conf.py:461 +msgid "LDAP" +msgstr "" + +#: awx/sso/conf.py:193 +msgid "LDAP Bind DN" +msgstr "" + +#: awx/sso/conf.py:194 +msgid "" +"DN (Distinguished Name) of user to bind for all search queries. Normally in " +"the format \"CN=Some User,OU=Users,DC=example,DC=com\" but may also be " +"specified as \"DOMAIN\\username\" for Active Directory. This is the system " +"user account we will use to login to query LDAP for other user information." +msgstr "" + +#: awx/sso/conf.py:209 +msgid "LDAP Bind Password" +msgstr "" + +#: awx/sso/conf.py:210 +msgid "Password used to bind LDAP user account." +msgstr "" + +#: awx/sso/conf.py:221 +msgid "LDAP Start TLS" +msgstr "" + +#: awx/sso/conf.py:222 +msgid "Whether to enable TLS when the LDAP connection is not using SSL." +msgstr "" + +#: awx/sso/conf.py:232 +msgid "LDAP Connection Options" +msgstr "" + +#: awx/sso/conf.py:233 +msgid "" +"Additional options to set for the LDAP connection. LDAP referrals are " +"disabled by default (to prevent certain LDAP queries from hanging with AD). " +"Option names should be strings (e.g. \"OPT_REFERRALS\"). Refer to https://" +"www.python-ldap.org/doc/html/ldap.html#options for possible options and " +"values that can be set." +msgstr "" + +#: awx/sso/conf.py:251 +msgid "LDAP User Search" +msgstr "" + +#: awx/sso/conf.py:252 +msgid "" +"LDAP search query to find users. Any user that matches the given pattern " +"will be able to login to Tower. The user should also be mapped into an " +"Tower organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). " +"If multiple search queries need to be supported use of \"LDAPUnion\" is " +"possible. See python-ldap documentation as linked at the top of this section." +msgstr "" + +#: awx/sso/conf.py:274 +msgid "LDAP User DN Template" +msgstr "" + +#: awx/sso/conf.py:275 +msgid "" +"Alternative to user search, if user DNs are all of the same format. This " +"approach will be more efficient for user lookups than searching if it is " +"usable in your organizational environment. If this setting has a value it " +"will be used instead of AUTH_LDAP_USER_SEARCH." +msgstr "" + +#: awx/sso/conf.py:290 +msgid "LDAP User Attribute Map" +msgstr "" + +#: awx/sso/conf.py:291 +msgid "" +"Mapping of LDAP user schema to Tower API user attributes (key is user " +"attribute name, value is LDAP attribute name). The default setting is valid " +"for ActiveDirectory but users with other LDAP configurations may need to " +"change the values (not the keys) of the dictionary/hash-table." +msgstr "" + +#: awx/sso/conf.py:310 +msgid "LDAP Group Search" +msgstr "" + +#: awx/sso/conf.py:311 +msgid "" +"Users in Tower are mapped to organizations based on their membership in LDAP " +"groups. This setting defines the LDAP search query to find groups. Note that " +"this, unlike the user search above, does not support LDAPSearchUnion." +msgstr "" + +#: awx/sso/conf.py:328 +msgid "LDAP Group Type" +msgstr "" + +#: awx/sso/conf.py:329 +msgid "" +"The group type may need to be changed based on the type of the LDAP server. " +"Values are listed at: http://pythonhosted.org/django-auth-ldap/groups." +"html#types-of-groups" +msgstr "" + +#: awx/sso/conf.py:344 +msgid "LDAP Require Group" +msgstr "" + +#: awx/sso/conf.py:345 +msgid "" +"Group DN required to login. If specified, user must be a member of this " +"group to login via LDAP. If not set, everyone in LDAP that matches the user " +"search will be able to login via Tower. Only one require group is supported." +msgstr "" + +#: awx/sso/conf.py:361 +msgid "LDAP Deny Group" +msgstr "" + +#: awx/sso/conf.py:362 +msgid "" +"Group DN denied from login. If specified, user will not be allowed to login " +"if a member of this group. Only one deny group is supported." +msgstr "" + +#: awx/sso/conf.py:375 +msgid "LDAP User Flags By Group" +msgstr "" + +#: awx/sso/conf.py:376 +msgid "" +"User profile flags updated from group membership (key is user attribute " +"name, value is group DN). These are boolean fields that are matched based " +"on whether the user is a member of the given group. So far only " +"is_superuser is settable via this method. This flag is set both true and " +"false at login time based on current LDAP settings." +msgstr "" + +#: awx/sso/conf.py:394 +msgid "LDAP Organization Map" +msgstr "" + +#: awx/sso/conf.py:395 +msgid "" +"Mapping between organization admins/users and LDAP groups. This controls " +"what users are placed into what Tower organizations relative to their LDAP " +"group memberships. Keys are organization names. Organizations will be " +"created if not present. Values are dictionaries defining the options for " +"each organization's membership. For each organization it is possible to " +"specify what groups are automatically users of the organization and also " +"what groups can administer the organization.\n" +"\n" +" - admins: None, True/False, string or list of strings.\n" +" If None, organization admins will not be updated based on LDAP values.\n" +" If True, all users in LDAP will automatically be added as admins of the " +"organization.\n" +" If False, no LDAP users will be automatically added as admins of the " +"organization.\n" +" If a string or list of strings, specifies the group DN(s) that will be " +"added of the organization if they match any of the specified groups.\n" +" - remove_admins: True/False. Defaults to True.\n" +" If True, a user who is not an member of the given groups will be removed " +"from the organization's administrative list.\n" +" - users: None, True/False, string or list of strings. Same rules apply as " +"for admins.\n" +" - remove_users: True/False. Defaults to True. Same rules apply as for " +"remove_admins." +msgstr "" + +#: awx/sso/conf.py:443 +msgid "LDAP Team Map" +msgstr "" + +#: awx/sso/conf.py:444 +msgid "" +"Mapping between team members (users) and LDAP groups. Keys are team names " +"(will be created if not present). Values are dictionaries of options for " +"each team's membership, where each can contain the following parameters:\n" +"\n" +" - organization: string. The name of the organization to which the team " +"belongs. The team will be created if the combination of organization and " +"team name does not exist. The organization will first be created if it does " +"not exist.\n" +" - users: None, True/False, string or list of strings.\n" +" If None, team members will not be updated.\n" +" If True/False, all LDAP users will be added/removed as team members.\n" +" If a string or list of strings, specifies the group DN(s). User will be " +"added as a team member if the user is a member of ANY of these groups.\n" +"- remove: True/False. Defaults to True. If True, a user who is not a member " +"of the given groups will be removed from the team." +msgstr "" + +#: awx/sso/conf.py:487 +msgid "RADIUS Server" +msgstr "" + +#: awx/sso/conf.py:488 +msgid "" +"Hostname/IP of RADIUS server. RADIUS authentication will be disabled if this " +"setting is empty." +msgstr "" + +#: awx/sso/conf.py:490 awx/sso/conf.py:504 awx/sso/conf.py:516 +msgid "RADIUS" +msgstr "" + +#: awx/sso/conf.py:502 +msgid "RADIUS Port" +msgstr "" + +#: awx/sso/conf.py:503 +msgid "Port of RADIUS server." +msgstr "" + +#: awx/sso/conf.py:514 +msgid "RADIUS Secret" +msgstr "" + +#: awx/sso/conf.py:515 +msgid "Shared secret for authenticating to RADIUS server." +msgstr "" + +#: awx/sso/conf.py:531 +msgid "Google OAuth2 Callback URL" +msgstr "" + +#: awx/sso/conf.py:532 +msgid "" +"Create a project at https://console.developers.google.com/ to obtain an " +"OAuth2 key and secret for a web application. Ensure that the Google+ API is " +"enabled. Provide this URL as the callback URL for your application." +msgstr "" + +#: awx/sso/conf.py:536 awx/sso/conf.py:547 awx/sso/conf.py:558 +#: awx/sso/conf.py:571 awx/sso/conf.py:585 awx/sso/conf.py:597 +#: awx/sso/conf.py:609 +msgid "Google OAuth2" +msgstr "" + +#: awx/sso/conf.py:545 +msgid "Google OAuth2 Key" +msgstr "" + +#: awx/sso/conf.py:546 +msgid "" +"The OAuth2 key from your web application at https://console.developers." +"google.com/." +msgstr "" + +#: awx/sso/conf.py:556 +msgid "Google OAuth2 Secret" +msgstr "" + +#: awx/sso/conf.py:557 +msgid "" +"The OAuth2 secret from your web application at https://console.developers." +"google.com/." +msgstr "" + +#: awx/sso/conf.py:568 +msgid "Google OAuth2 Whitelisted Domains" +msgstr "" + +#: awx/sso/conf.py:569 +msgid "" +"Update this setting to restrict the domains who are allowed to login using " +"Google OAuth2." +msgstr "" + +#: awx/sso/conf.py:580 +msgid "Google OAuth2 Extra Arguments" +msgstr "" + +#: awx/sso/conf.py:581 +msgid "" +"Extra arguments for Google OAuth2 login. When only allowing a single domain " +"to authenticate, set to `{\"hd\": \"yourdomain.com\"}` and Google will not " +"display any other accounts even if the user is logged in with multiple " +"Google accounts." +msgstr "" + +#: awx/sso/conf.py:595 +msgid "Google OAuth2 Organization Map" +msgstr "" + +#: awx/sso/conf.py:607 +msgid "Google OAuth2 Team Map" +msgstr "" + +#: awx/sso/conf.py:623 +msgid "GitHub OAuth2 Callback URL" +msgstr "" + +#: awx/sso/conf.py:624 +msgid "" +"Create a developer application at https://github.com/settings/developers to " +"obtain an OAuth2 key (Client ID) and secret (Client Secret). Provide this " +"URL as the callback URL for your application." +msgstr "" + +#: awx/sso/conf.py:628 awx/sso/conf.py:639 awx/sso/conf.py:649 +#: awx/sso/conf.py:661 awx/sso/conf.py:673 +msgid "GitHub OAuth2" +msgstr "" + +#: awx/sso/conf.py:637 +msgid "GitHub OAuth2 Key" +msgstr "" + +#: awx/sso/conf.py:638 +msgid "The OAuth2 key (Client ID) from your GitHub developer application." +msgstr "" + +#: awx/sso/conf.py:647 +msgid "GitHub OAuth2 Secret" +msgstr "" + +#: awx/sso/conf.py:648 +msgid "" +"The OAuth2 secret (Client Secret) from your GitHub developer application." +msgstr "" + +#: awx/sso/conf.py:659 +msgid "GitHub OAuth2 Organization Map" +msgstr "" + +#: awx/sso/conf.py:671 +msgid "GitHub OAuth2 Team Map" +msgstr "" + +#: awx/sso/conf.py:687 +msgid "GitHub Organization OAuth2 Callback URL" +msgstr "" + +#: awx/sso/conf.py:688 awx/sso/conf.py:763 +msgid "" +"Create an organization-owned application at https://github.com/organizations/" +"/settings/applications and obtain an OAuth2 key (Client ID) and " +"secret (Client Secret). Provide this URL as the callback URL for your " +"application." +msgstr "" + +#: awx/sso/conf.py:692 awx/sso/conf.py:703 awx/sso/conf.py:713 +#: awx/sso/conf.py:725 awx/sso/conf.py:736 awx/sso/conf.py:748 +msgid "GitHub Organization OAuth2" +msgstr "" + +#: awx/sso/conf.py:701 +msgid "GitHub Organization OAuth2 Key" +msgstr "" + +#: awx/sso/conf.py:702 awx/sso/conf.py:777 +msgid "The OAuth2 key (Client ID) from your GitHub organization application." +msgstr "" + +#: awx/sso/conf.py:711 +msgid "GitHub Organization OAuth2 Secret" +msgstr "" + +#: awx/sso/conf.py:712 awx/sso/conf.py:787 +msgid "" +"The OAuth2 secret (Client Secret) from your GitHub organization application." +msgstr "" + +#: awx/sso/conf.py:722 +msgid "GitHub Organization Name" +msgstr "" + +#: awx/sso/conf.py:723 +msgid "" +"The name of your GitHub organization, as used in your organization's URL: " +"https://github.com//." +msgstr "" + +#: awx/sso/conf.py:734 +msgid "GitHub Organization OAuth2 Organization Map" +msgstr "" + +#: awx/sso/conf.py:746 +msgid "GitHub Organization OAuth2 Team Map" +msgstr "" + +#: awx/sso/conf.py:762 +msgid "GitHub Team OAuth2 Callback URL" +msgstr "" + +#: awx/sso/conf.py:767 awx/sso/conf.py:778 awx/sso/conf.py:788 +#: awx/sso/conf.py:800 awx/sso/conf.py:811 awx/sso/conf.py:823 +msgid "GitHub Team OAuth2" +msgstr "" + +#: awx/sso/conf.py:776 +msgid "GitHub Team OAuth2 Key" +msgstr "" + +#: awx/sso/conf.py:786 +msgid "GitHub Team OAuth2 Secret" +msgstr "" + +#: awx/sso/conf.py:797 +msgid "GitHub Team ID" +msgstr "" + +#: awx/sso/conf.py:798 +msgid "" +"Find the numeric team ID using the Github API: http://fabian-kostadinov." +"github.io/2015/01/16/how-to-find-a-github-team-id/." +msgstr "" + +#: awx/sso/conf.py:809 +msgid "GitHub Team OAuth2 Organization Map" +msgstr "" + +#: awx/sso/conf.py:821 +msgid "GitHub Team OAuth2 Team Map" +msgstr "" + +#: awx/sso/conf.py:837 +msgid "Azure AD OAuth2 Callback URL" +msgstr "" + +#: awx/sso/conf.py:838 +msgid "" +"Register an Azure AD application as described by https://msdn.microsoft.com/" +"en-us/library/azure/dn132599.aspx and obtain an OAuth2 key (Client ID) and " +"secret (Client Secret). Provide this URL as the callback URL for your " +"application." +msgstr "" + +#: awx/sso/conf.py:842 awx/sso/conf.py:853 awx/sso/conf.py:863 +#: awx/sso/conf.py:875 awx/sso/conf.py:887 +msgid "Azure AD OAuth2" +msgstr "" + +#: awx/sso/conf.py:851 +msgid "Azure AD OAuth2 Key" +msgstr "" + +#: awx/sso/conf.py:852 +msgid "The OAuth2 key (Client ID) from your Azure AD application." +msgstr "" + +#: awx/sso/conf.py:861 +msgid "Azure AD OAuth2 Secret" +msgstr "" + +#: awx/sso/conf.py:862 +msgid "The OAuth2 secret (Client Secret) from your Azure AD application." +msgstr "" + +#: awx/sso/conf.py:873 +msgid "Azure AD OAuth2 Organization Map" +msgstr "" + +#: awx/sso/conf.py:885 +msgid "Azure AD OAuth2 Team Map" +msgstr "" + +#: awx/sso/conf.py:906 +msgid "SAML Service Provider Callback URL" +msgstr "" + +#: awx/sso/conf.py:907 +msgid "" +"Register Tower as a service provider (SP) with each identity provider (IdP) " +"you have configured. Provide your SP Entity ID and this callback URL for " +"your application." +msgstr "" + +#: awx/sso/conf.py:910 awx/sso/conf.py:924 awx/sso/conf.py:937 +#: awx/sso/conf.py:951 awx/sso/conf.py:965 awx/sso/conf.py:983 +#: awx/sso/conf.py:1005 awx/sso/conf.py:1024 awx/sso/conf.py:1044 +#: awx/sso/conf.py:1078 awx/sso/conf.py:1091 +msgid "SAML" +msgstr "" + +#: awx/sso/conf.py:921 +msgid "SAML Service Provider Metadata URL" +msgstr "" + +#: awx/sso/conf.py:922 +msgid "" +"If your identity provider (IdP) allows uploading an XML metadata file, you " +"can download one from this URL." +msgstr "" + +#: awx/sso/conf.py:934 +msgid "SAML Service Provider Entity ID" +msgstr "" + +#: awx/sso/conf.py:935 +msgid "" +"The application-defined unique identifier used as the audience of the SAML " +"service provider (SP) configuration." +msgstr "" + +#: awx/sso/conf.py:948 +msgid "SAML Service Provider Public Certificate" +msgstr "" + +#: awx/sso/conf.py:949 +msgid "" +"Create a keypair for Tower to use as a service provider (SP) and include the " +"certificate content here." +msgstr "" + +#: awx/sso/conf.py:962 +msgid "SAML Service Provider Private Key" +msgstr "" + +#: awx/sso/conf.py:963 +msgid "" +"Create a keypair for Tower to use as a service provider (SP) and include the " +"private key content here." +msgstr "" + +#: awx/sso/conf.py:981 +msgid "SAML Service Provider Organization Info" +msgstr "" + +#: awx/sso/conf.py:982 +msgid "Configure this setting with information about your app." +msgstr "" + +#: awx/sso/conf.py:1003 +msgid "SAML Service Provider Technical Contact" +msgstr "" + +#: awx/sso/conf.py:1004 awx/sso/conf.py:1023 +msgid "Configure this setting with your contact information." +msgstr "" + +#: awx/sso/conf.py:1022 +msgid "SAML Service Provider Support Contact" +msgstr "" + +#: awx/sso/conf.py:1037 +msgid "SAML Enabled Identity Providers" +msgstr "" + +#: awx/sso/conf.py:1038 +msgid "" +"Configure the Entity ID, SSO URL and certificate for each identity provider " +"(IdP) in use. Multiple SAML IdPs are supported. Some IdPs may provide user " +"data using attribute names that differ from the default OIDs (https://github." +"com/omab/python-social-auth/blob/master/social/backends/saml.py#L16). " +"Attribute names may be overridden for each IdP." +msgstr "" + +#: awx/sso/conf.py:1076 +msgid "SAML Organization Map" +msgstr "" + +#: awx/sso/conf.py:1089 +msgid "SAML Team Map" +msgstr "" + +#: awx/sso/fields.py:123 +msgid "Invalid connection option(s): {invalid_options}." +msgstr "" + +#: awx/sso/fields.py:194 +msgid "Base" +msgstr "" + +#: awx/sso/fields.py:195 +msgid "One Level" +msgstr "" + +#: awx/sso/fields.py:196 +msgid "Subtree" +msgstr "" + +#: awx/sso/fields.py:214 +msgid "Expected a list of three items but got {length} instead." +msgstr "" + +#: awx/sso/fields.py:215 +msgid "Expected an instance of LDAPSearch but got {input_type} instead." +msgstr "" + +#: awx/sso/fields.py:251 +msgid "" +"Expected an instance of LDAPSearch or LDAPSearchUnion but got {input_type} " +"instead." +msgstr "" + +#: awx/sso/fields.py:278 +msgid "Invalid user attribute(s): {invalid_attrs}." +msgstr "" + +#: awx/sso/fields.py:295 +msgid "Expected an instance of LDAPGroupType but got {input_type} instead." +msgstr "" + +#: awx/sso/fields.py:323 +msgid "Invalid user flag: \"{invalid_flag}\"." +msgstr "" + +#: awx/sso/fields.py:339 awx/sso/fields.py:506 +msgid "" +"Expected None, True, False, a string or list of strings but got {input_type} " +"instead." +msgstr "" + +#: awx/sso/fields.py:375 +msgid "Missing key(s): {missing_keys}." +msgstr "" + +#: awx/sso/fields.py:376 +msgid "Invalid key(s): {invalid_keys}." +msgstr "" + +#: awx/sso/fields.py:425 awx/sso/fields.py:542 +msgid "Invalid key(s) for organization map: {invalid_keys}." +msgstr "" + +#: awx/sso/fields.py:443 +msgid "Missing required key for team map: {invalid_keys}." +msgstr "" + +#: awx/sso/fields.py:444 awx/sso/fields.py:561 +msgid "Invalid key(s) for team map: {invalid_keys}." +msgstr "" + +#: awx/sso/fields.py:560 +msgid "Missing required key for team map: {missing_keys}." +msgstr "" + +#: awx/sso/fields.py:578 +msgid "Missing required key(s) for org info record: {missing_keys}." +msgstr "" + +#: awx/sso/fields.py:591 +msgid "Invalid language code(s) for org info: {invalid_lang_codes}." +msgstr "" + +#: awx/sso/fields.py:610 +msgid "Missing required key(s) for contact: {missing_keys}." +msgstr "" + +#: awx/sso/fields.py:622 +msgid "Missing required key(s) for IdP: {missing_keys}." +msgstr "" + +#: awx/sso/pipeline.py:24 +msgid "An account cannot be found for {0}" +msgstr "" + +#: awx/sso/pipeline.py:30 +msgid "Your account is inactive" +msgstr "" + +#: awx/sso/validators.py:19 awx/sso/validators.py:44 +#, python-format +msgid "DN must include \"%%(user)s\" placeholder for username: %s" +msgstr "" + +#: awx/sso/validators.py:26 +#, python-format +msgid "Invalid DN: %s" +msgstr "" + +#: awx/sso/validators.py:56 +#, python-format +msgid "Invalid filter: %s" +msgstr "" + +#: awx/templates/error.html:4 awx/ui/templates/ui/index.html:8 +msgid "Ansible Tower" +msgstr "" + +#: awx/templates/rest_framework/api.html:39 +msgid "Ansible Tower API Guide" +msgstr "" + +#: awx/templates/rest_framework/api.html:40 +msgid "Back to Ansible Tower" +msgstr "" + +#: awx/templates/rest_framework/api.html:41 +msgid "Resize" +msgstr "" + +#: awx/templates/rest_framework/base.html:78 +#: awx/templates/rest_framework/base.html:92 +#, python-format +msgid "Make a GET request on the %(name)s resource" +msgstr "" + +#: awx/templates/rest_framework/base.html:80 +msgid "Specify a format for the GET request" +msgstr "" + +#: awx/templates/rest_framework/base.html:86 +#, python-format +msgid "" +"Make a GET request on the %(name)s resource with the format set to `" +"%(format)s`" +msgstr "" + +#: awx/templates/rest_framework/base.html:100 +#, python-format +msgid "Make an OPTIONS request on the %(name)s resource" +msgstr "" + +#: awx/templates/rest_framework/base.html:106 +#, python-format +msgid "Make a DELETE request on the %(name)s resource" +msgstr "" + +#: awx/templates/rest_framework/base.html:113 +msgid "Filters" +msgstr "" + +#: awx/templates/rest_framework/base.html:172 +#: awx/templates/rest_framework/base.html:186 +#, python-format +msgid "Make a POST request on the %(name)s resource" +msgstr "" + +#: awx/templates/rest_framework/base.html:216 +#: awx/templates/rest_framework/base.html:230 +#, python-format +msgid "Make a PUT request on the %(name)s resource" +msgstr "" + +#: awx/templates/rest_framework/base.html:233 +#, python-format +msgid "Make a PATCH request on the %(name)s resource" +msgstr "" + +#: awx/ui/apps.py:9 awx/ui/conf.py:22 awx/ui/conf.py:38 awx/ui/conf.py:53 +msgid "UI" +msgstr "" + +#: awx/ui/conf.py:16 +msgid "Off" +msgstr "" + +#: awx/ui/conf.py:17 +msgid "Anonymous" +msgstr "" + +#: awx/ui/conf.py:18 +msgid "Detailed" +msgstr "" + +#: awx/ui/conf.py:20 +msgid "Analytics Tracking State" +msgstr "" + +#: awx/ui/conf.py:21 +msgid "Enable or Disable Analytics Tracking." +msgstr "" + +#: awx/ui/conf.py:31 +msgid "Custom Login Info" +msgstr "" + +#: awx/ui/conf.py:32 +msgid "" +"If needed, you can add specific information (such as a legal notice or a " +"disclaimer) to a text box in the login modal using this setting. Any content " +"added must be in plain text, as custom HTML or other markup languages are " +"not supported. If multiple paragraphs of text are needed, new lines " +"(paragraphs) must be escaped as `\\n` within the block of text." +msgstr "" + +#: awx/ui/conf.py:48 +msgid "Custom Logo" +msgstr "" + +#: awx/ui/conf.py:49 +msgid "" +"To set up a custom logo, provide a file that you create. For the custom logo " +"to look its best, use a `.png` file with a transparent background. GIF, PNG " +"and JPEG formats are supported." +msgstr "" + +#: awx/ui/fields.py:29 +msgid "" +"Invalid format for custom logo. Must be a data URL with a base64-encoded " +"GIF, PNG or JPEG image." +msgstr "" + +#: awx/ui/fields.py:30 +msgid "Invalid base64-encoded data in data URL." +msgstr "" + +#: awx/ui/templates/ui/index.html:49 +msgid "" +"Your session will expire in 60 seconds, would you like to continue?" +msgstr "" + +#: awx/ui/templates/ui/index.html:64 +msgid "CANCEL" +msgstr "" + +#: awx/ui/templates/ui/index.html:116 +msgid "Set how many days of data should be retained." +msgstr "" + +#: awx/ui/templates/ui/index.html:122 +msgid "" +"Please enter an integer that is not " +"negative that is lower than 9999." +msgstr "" + +#: awx/ui/templates/ui/index.html:127 +msgid "" +"For facts collected older than the time period specified, save one fact scan " +"(snapshot) per time window (frequency). For example, facts older than 30 " +"days are purged, while one weekly fact scan is kept.\n" +"
\n" +"
CAUTION: Setting both numerical variables to \"0\" " +"will delete all facts.\n" +"
\n" +"
" +msgstr "" + +#: awx/ui/templates/ui/index.html:136 +msgid "Select a time period after which to remove old facts" +msgstr "" + +#: awx/ui/templates/ui/index.html:150 +msgid "" +"Please enter an integer " +"that is not negative " +"that is lower than 9999." +msgstr "" + +#: awx/ui/templates/ui/index.html:155 +msgid "Select a frequency for snapshot retention" +msgstr "" + +#: awx/ui/templates/ui/index.html:169 +msgid "" +"Please enter an integer that is not negative that is " +"lower than 9999." +msgstr "" + +#: awx/ui/templates/ui/index.html:175 +msgid "working..." +msgstr "" diff --git a/awx/locale/fr/LC_MESSAGES/django.po b/awx/locale/fr/LC_MESSAGES/django.po new file mode 100644 index 0000000000..23961c10bc --- /dev/null +++ b/awx/locale/fr/LC_MESSAGES/django.po @@ -0,0 +1,4453 @@ +# Corina Roe , 2017. #zanata +# Sam Friedmann , 2017. #zanata +# tfujiwar , 2017. #zanata +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2017-01-31 18:51+0000\n" +"PO-Revision-Date: 2017-02-02 01:42+0000\n" +"Last-Translator: Corina Roe \n" +"Language-Team: French\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: fr\n" +"Plural-Forms: nplurals=2; plural=(n > 1)\n" +"X-Generator: Zanata 3.9.6\n" + +#: awx/api/authentication.py:67 +msgid "Invalid token header. No credentials provided." +msgstr "" +"En-tête de token non valide. Aucune information d'identification fournie." + +#: awx/api/authentication.py:70 +msgid "Invalid token header. Token string should not contain spaces." +msgstr "" +"En-tête de token non valide. La chaîne token ne doit pas contenir d'espaces." + +#: awx/api/authentication.py:105 +msgid "User inactive or deleted" +msgstr "Utilisateur inactif ou supprimé" + +#: awx/api/authentication.py:161 +msgid "Invalid task token" +msgstr "Token de tâche non valide" + +#: awx/api/conf.py:12 +msgid "Idle Time Force Log Out" +msgstr "Temps d'inactivité - Forcer la déconnexion" + +#: awx/api/conf.py:13 +msgid "" +"Number of seconds that a user is inactive before they will need to login " +"again." +msgstr "" +"Délai en secondes pendant lequel un utilisateur peut rester inactif avant de " +"devoir se reconnecter." + +#: awx/api/conf.py:14 +#: awx/api/conf.py:24 +#: awx/api/conf.py:33 +#: awx/sso/conf.py:124 +#: awx/sso/conf.py:135 +#: awx/sso/conf.py:147 +#: awx/sso/conf.py:162 +msgid "Authentication" +msgstr "Authentification" + +#: awx/api/conf.py:22 +msgid "Maximum number of simultaneous logins" +msgstr "Nombre maximal de connexions simultanées" + +#: awx/api/conf.py:23 +msgid "" +"Maximum number of simultaneous logins a user may have. To disable enter -1." +msgstr "" +"Nombre maximal de connexions simultanées dont un utilisateur peut disposer. " +"Pour désactiver cette option, entrez -1." + +#: awx/api/conf.py:31 +msgid "Enable HTTP Basic Auth" +msgstr "Activer l'authentification HTTP de base" + +#: awx/api/conf.py:32 +msgid "Enable HTTP Basic Auth for the API Browser." +msgstr "Activer l'authentification HTTP de base pour le navigateur d'API." + +#: awx/api/generics.py:466 +msgid "\"id\" is required to disassociate" +msgstr "\"id\" est nécessaire pour dissocier" + +#: awx/api/metadata.py:50 +msgid "Database ID for this {}." +msgstr "ID de base de données pour ce {}." + +#: awx/api/metadata.py:51 +msgid "Name of this {}." +msgstr "Nom de ce {}." + +#: awx/api/metadata.py:52 +msgid "Optional description of this {}." +msgstr "Description facultative de ce {}." + +#: awx/api/metadata.py:53 +msgid "Data type for this {}." +msgstr "Type de données pour ce {}." + +#: awx/api/metadata.py:54 +msgid "URL for this {}." +msgstr "URL de ce {}." + +#: awx/api/metadata.py:55 +msgid "Data structure with URLs of related resources." +msgstr "Structure de données avec URL des ressources associées." + +#: awx/api/metadata.py:56 +msgid "Data structure with name/description for related resources." +msgstr "Structure de données avec nom/description des ressources associées." + +#: awx/api/metadata.py:57 +msgid "Timestamp when this {} was created." +msgstr "Horodatage lors de la création de ce {}." + +#: awx/api/metadata.py:58 +msgid "Timestamp when this {} was last modified." +msgstr "Horodatage lors de la modification de ce {}." + +#: awx/api/parsers.py:31 +#, python-format +msgid "JSON parse error - %s" +msgstr "Erreur d'analyse JSON - %s" + +#: awx/api/serializers.py:250 +msgid "Playbook Run" +msgstr "Exécution du playbook" + +#: awx/api/serializers.py:251 +msgid "Command" +msgstr "Commande" + +#: awx/api/serializers.py:252 +msgid "SCM Update" +msgstr "Mise à jour SCM" + +#: awx/api/serializers.py:253 +msgid "Inventory Sync" +msgstr "Synchronisation des inventaires" + +#: awx/api/serializers.py:254 +msgid "Management Job" +msgstr "Tâche de gestion" + +#: awx/api/serializers.py:255 +msgid "Workflow Job" +msgstr "Tâche de workflow" + +#: awx/api/serializers.py:256 +msgid "Workflow Template" +msgstr "Modèle de workflow" + +#: awx/api/serializers.py:658 +#: awx/api/serializers.py:716 +#: awx/api/views.py:3817 +#, python-format +msgid "" +"Standard Output too large to display (%(text_size)d bytes), only download " +"supported for sizes over %(supported_size)d bytes" +msgstr "" +"Sortie standard trop grande pour pouvoir s'afficher (%(text_size)d octets). " +"Le téléchargement est pris en charge seulement pour une taille supérieure à " +"%(supported_size)d octets" + +#: awx/api/serializers.py:731 +msgid "Write-only field used to change the password." +msgstr "Champ en écriture seule servant à modifier le mot de passe." + +#: awx/api/serializers.py:733 +msgid "Set if the account is managed by an external service" +msgstr "À définir si le compte est géré par un service externe" + +#: awx/api/serializers.py:757 +msgid "Password required for new User." +msgstr "Mot de passe requis pour le nouvel utilisateur." + +#: awx/api/serializers.py:841 +#, python-format +msgid "Unable to change %s on user managed by LDAP." +msgstr "Impossible de redéfinir %s sur un utilisateur géré par LDAP." + +#: awx/api/serializers.py:1002 +msgid "Organization is missing" +msgstr "L'organisation est manquante" + +#: awx/api/serializers.py:1006 +msgid "Update options must be set to false for manual projects." +msgstr "" +"La Mise à jour des options doit être définie à false pour les projets " +"manuels." + +#: awx/api/serializers.py:1012 +msgid "Array of playbooks available within this project." +msgstr "Tableau des playbooks disponibles dans ce projet." + +#: awx/api/serializers.py:1194 +#, python-format +msgid "Invalid port specification: %s" +msgstr "Spécification de port non valide : %s" + +#: awx/api/serializers.py:1222 +#: awx/main/validators.py:193 +msgid "Must be valid JSON or YAML." +msgstr "Syntaxe JSON ou YAML valide exigée." + +#: awx/api/serializers.py:1279 +msgid "Invalid group name." +msgstr "Nom de groupe incorrect." + +#: awx/api/serializers.py:1354 +msgid "" +"Script must begin with a hashbang sequence: i.e.... #!/usr/bin/env python" +msgstr "" +"Le script doit commencer par une séquence hashbang : c.-à-d. ... #!/usr/bin/" +"env python" + +#: awx/api/serializers.py:1407 +msgid "If 'source' is 'custom', 'source_script' must be provided." +msgstr "Si la valeur 'source' est 'custom', 'source_script' doit être défini." + +#: awx/api/serializers.py:1411 +msgid "" +"The 'source_script' does not belong to the same organization as the " +"inventory." +msgstr "" +"Le 'source_script' n'appartient pas à la même organisation que l'inventaire." + +#: awx/api/serializers.py:1413 +msgid "'source_script' doesn't exist." +msgstr "'source_script' n'existe pas." + +#: awx/api/serializers.py:1772 +msgid "" +"Write-only field used to add user to owner role. If provided, do not give " +"either team or organization. Only valid for creation." +msgstr "" +"Champ en écriture seule qui sert à ajouter un utilisateur au rôle de " +"propriétaire. Si vous le définissez, n'entrez ni équipe ni organisation. " +"Seulement valable pour la création." + +#: awx/api/serializers.py:1777 +msgid "" +"Write-only field used to add team to owner role. If provided, do not give " +"either user or organization. Only valid for creation." +msgstr "" +"Champ en écriture seule qui sert à ajouter une équipe au rôle de " +"propriétaire. Si vous le définissez, n'entrez ni utilisateur ni organisation." +" Seulement valable pour la création." + +#: awx/api/serializers.py:1782 +msgid "" +"Inherit permissions from organization roles. If provided on creation, do not " +"give either user or team." +msgstr "" +"Hériter des permissions à partir des rôles d'organisation. Si vous le " +"définissez lors de la création, n'entrez ni utilisateur ni équipe." + +#: awx/api/serializers.py:1798 +msgid "Missing 'user', 'team', or 'organization'." +msgstr "Valeur 'utilisateur', 'équipe' ou 'organisation' manquante." + +#: awx/api/serializers.py:1811 +msgid "" +"Credential organization must be set and match before assigning to a team" +msgstr "" +"L'organisation des informations d'identification doit être définie et mise " +"en correspondance avant de l'attribuer à une équipe" + +#: awx/api/serializers.py:1903 +msgid "This field is required." +msgstr "Ce champ est obligatoire." + +#: awx/api/serializers.py:1905 +#: awx/api/serializers.py:1907 +msgid "Playbook not found for project." +msgstr "Playbook introuvable pour le projet." + +#: awx/api/serializers.py:1909 +msgid "Must select playbook for project." +msgstr "Un playbook doit être sélectionné pour le project." + +#: awx/api/serializers.py:1975 +msgid "Must either set a default value or ask to prompt on launch." +msgstr "" +"Une valeur par défaut doit être définie ou bien demander une invite au " +"moment du lancement." + +#: awx/api/serializers.py:1978 +#: awx/main/models/jobs.py:278 +msgid "Scan jobs must be assigned a fixed inventory." +msgstr "Un inventaire fixe doit être assigné aux tâches de scan." + +#: awx/api/serializers.py:1980 +#: awx/main/models/jobs.py:281 +msgid "Job types 'run' and 'check' must have assigned a project." +msgstr "Un projet doit être assigné aux types de tâche 'run' et 'check'." + +#: awx/api/serializers.py:1987 +msgid "Survey Enabled cannot be used with scan jobs." +msgstr "" +"L'option Questionnaire activé ne peut pas être utilisée avec les tâches de " +"scan." + +#: awx/api/serializers.py:2047 +msgid "Invalid job template." +msgstr "Modèle de tâche non valide." + +#: awx/api/serializers.py:2132 +msgid "Credential not found or deleted." +msgstr "Informations d'identification introuvables ou supprimées." + +#: awx/api/serializers.py:2134 +msgid "Job Template Project is missing or undefined." +msgstr "Le projet de modèle de tâche est manquant ou non défini." + +#: awx/api/serializers.py:2136 +msgid "Job Template Inventory is missing or undefined." +msgstr "Le projet de modèle d'inventaire est manquant ou non défini." + +#: awx/api/serializers.py:2421 +#, python-format +msgid "%(job_type)s is not a valid job type. The choices are %(choices)s." +msgstr "" +"%(job_type)s n'est pas un type de tâche valide. Les choix sont %(choices)s." + +#: awx/api/serializers.py:2426 +msgid "Workflow job template is missing during creation." +msgstr "Le modèle de tâche Workflow est manquant lors de la création." + +#: awx/api/serializers.py:2431 +#, python-format +msgid "Cannot nest a %s inside a WorkflowJobTemplate" +msgstr "Impossible d'imbriquer %s dans un modèle de tâche Workflow." + +#: awx/api/serializers.py:2669 +#, python-format +msgid "Job Template '%s' is missing or undefined." +msgstr "Le modèle de tâche '%s' est manquant ou non défini." + +#: awx/api/serializers.py:2695 +msgid "Must be a valid JSON or YAML dictionary." +msgstr "Dictionnaire JSON ou YAML valide exigé." + +#: awx/api/serializers.py:2837 +msgid "" +"Missing required fields for Notification Configuration: notification_type" +msgstr "" +"Champs obligatoires manquants pour la configuration des notifications : " +"notification_type" + +#: awx/api/serializers.py:2860 +msgid "No values specified for field '{}'" +msgstr "Aucune valeur spécifiée pour le champ '{}'" + +#: awx/api/serializers.py:2865 +msgid "Missing required fields for Notification Configuration: {}." +msgstr "" +"Champs obligatoires manquants pour la configuration des notifications : {}." + +#: awx/api/serializers.py:2868 +msgid "Configuration field '{}' incorrect type, expected {}." +msgstr "Type de champ de configuration '{}' incorrect, {} attendu." + +#: awx/api/serializers.py:2921 +msgid "Inventory Source must be a cloud resource." +msgstr "La source d'inventaire doit être une ressource cloud." + +#: awx/api/serializers.py:2923 +msgid "Manual Project can not have a schedule set." +msgstr "Le projet manuel ne peut pas avoir de calendrier défini." + +#: awx/api/serializers.py:2945 +msgid "" +"DTSTART required in rrule. Value should match: DTSTART:YYYYMMDDTHHMMSSZ" +msgstr "" +"DTSTART obligatoire dans rrule. La valeur doit correspondre à : DTSTART:" +"YYYYMMDDTHHMMSSZ" + +#: awx/api/serializers.py:2947 +msgid "Multiple DTSTART is not supported." +msgstr "Une seule valeur DTSTART est prise en charge." + +#: awx/api/serializers.py:2949 +msgid "RRULE require in rrule." +msgstr "RRULE obligatoire dans rrule." + +#: awx/api/serializers.py:2951 +msgid "Multiple RRULE is not supported." +msgstr "Une seule valeur RRULE est prise en charge." + +#: awx/api/serializers.py:2953 +msgid "INTERVAL required in rrule." +msgstr "INTERVAL obligatoire dans rrule." + +#: awx/api/serializers.py:2955 +msgid "TZID is not supported." +msgstr "TZID n'est pas pris en charge." + +#: awx/api/serializers.py:2957 +msgid "SECONDLY is not supported." +msgstr "SECONDLY n'est pas pris en charge." + +#: awx/api/serializers.py:2959 +msgid "Multiple BYMONTHDAYs not supported." +msgstr "Une seule valeur BYMONTHDAY est prise en charge." + +#: awx/api/serializers.py:2961 +msgid "Multiple BYMONTHs not supported." +msgstr "Une seule valeur BYMONTH est prise en charge." + +#: awx/api/serializers.py:2963 +msgid "BYDAY with numeric prefix not supported." +msgstr "BYDAY avec un préfixe numérique non pris en charge." + +#: awx/api/serializers.py:2965 +msgid "BYYEARDAY not supported." +msgstr "BYYEARDAY non pris en charge." + +#: awx/api/serializers.py:2967 +msgid "BYWEEKNO not supported." +msgstr "BYWEEKNO non pris en charge." + +#: awx/api/serializers.py:2971 +msgid "COUNT > 999 is unsupported." +msgstr "COUNT > 999 non pris en charge." + +#: awx/api/serializers.py:2975 +msgid "rrule parsing failed validation." +msgstr "L'analyse rrule n'a pas pu être validée." + +#: awx/api/serializers.py:2997 +msgid "" +"A summary of the new and changed values when an object is created, updated, " +"or deleted" +msgstr "" +"Un récapitulatif des valeurs nouvelles et modifiées lorsqu'un objet est " +"créé, mis à jour ou supprimé" + +#: awx/api/serializers.py:2999 +msgid "" +"For create, update, and delete events this is the object type that was " +"affected. For associate and disassociate events this is the object type " +"associated or disassociated with object2." +msgstr "" +"Pour créer, mettre à jour et supprimer des événements, il s'agit du type " +"d'objet qui a été affecté. Pour associer et dissocier des événements, il " +"s'agit du type d'objet associé à ou dissocié de object2." + +#: awx/api/serializers.py:3002 +msgid "" +"Unpopulated for create, update, and delete events. For associate and " +"disassociate events this is the object type that object1 is being associated " +"with." +msgstr "" +"Laisser vide pour créer, mettre à jour et supprimer des événements. Pour " +"associer et dissocier des événements, il s'agit du type d'objet auquel " +"object1 est associé." + +#: awx/api/serializers.py:3005 +msgid "The action taken with respect to the given object(s)." +msgstr "Action appliquée par rapport à l'objet ou aux objets donnés." + +#: awx/api/serializers.py:3112 +msgid "Unable to login with provided credentials." +msgstr "Connexion impossible avec les informations d'identification fournies." + +#: awx/api/serializers.py:3114 +msgid "Must include \"username\" and \"password\"." +msgstr "Elles doivent inclure le nom d'utilisateur et le mot de passe." + +#: awx/api/views.py:99 +msgid "Your license does not allow use of the activity stream." +msgstr "Votre licence ne permet pas l'utilisation du flux d'activité." + +#: awx/api/views.py:109 +msgid "Your license does not permit use of system tracking." +msgstr "Votre licence ne permet pas l'utilisation du suivi du système." + +#: awx/api/views.py:119 +msgid "Your license does not allow use of workflows." +msgstr "Votre licence ne permet pas l'utilisation de workflows." + +#: awx/api/views.py:127 +#: awx/templates/rest_framework/api.html:28 +msgid "REST API" +msgstr "API REST" + +#: awx/api/views.py:134 +#: awx/templates/rest_framework/api.html:4 +msgid "Ansible Tower REST API" +msgstr "API REST Ansible Tower" + +#: awx/api/views.py:150 +msgid "Version 1" +msgstr "Version 1" + +#: awx/api/views.py:201 +msgid "Ping" +msgstr "Ping" + +#: awx/api/views.py:230 +#: awx/conf/apps.py:12 +msgid "Configuration" +msgstr "Configuration" + +#: awx/api/views.py:283 +msgid "Invalid license data" +msgstr "Données de licence non valides" + +#: awx/api/views.py:285 +msgid "Missing 'eula_accepted' property" +msgstr "Propriété 'eula_accepted' manquante" + +#: awx/api/views.py:289 +msgid "'eula_accepted' value is invalid" +msgstr "La valeur 'eula_accepted' n'est pas valide" + +#: awx/api/views.py:292 +msgid "'eula_accepted' must be True" +msgstr "La valeur 'eula_accepted' doit être True" + +#: awx/api/views.py:299 +msgid "Invalid JSON" +msgstr "Syntaxe JSON non valide" + +#: awx/api/views.py:307 +msgid "Invalid License" +msgstr "Licence non valide" + +#: awx/api/views.py:317 +msgid "Invalid license" +msgstr "Licence non valide" + +#: awx/api/views.py:325 +#, python-format +msgid "Failed to remove license (%s)" +msgstr "Suppression de la licence (%s) impossible" + +#: awx/api/views.py:330 +msgid "Dashboard" +msgstr "Tableau de bord" + +#: awx/api/views.py:436 +msgid "Dashboard Jobs Graphs" +msgstr "Graphiques de tâches du tableau de bord" + +#: awx/api/views.py:472 +#, python-format +msgid "Unknown period \"%s\"" +msgstr "Période \"%s\" inconnue" + +#: awx/api/views.py:486 +msgid "Schedules" +msgstr "Calendriers" + +#: awx/api/views.py:505 +msgid "Schedule Jobs List" +msgstr "Listes des tâches de planification" + +#: awx/api/views.py:715 +msgid "Your Tower license only permits a single organization to exist." +msgstr "Votre licence Tower permet l'existence d'une seule organisation." + +#: awx/api/views.py:940 +#: awx/api/views.py:1299 +msgid "Role 'id' field is missing." +msgstr "Le champ \"id\" du rôle est manquant." + +#: awx/api/views.py:946 +#: awx/api/views.py:4093 +msgid "You cannot assign an Organization role as a child role for a Team." +msgstr "" +"Vous ne pouvez pas attribuer un rôle Organisation en tant que rôle enfant " +"pour une équipe." + +#: awx/api/views.py:950 +#: awx/api/views.py:4107 +msgid "You cannot grant system-level permissions to a team." +msgstr "" +"Vous ne pouvez pas accorder de permissions au niveau système à une équipe." + +#: awx/api/views.py:957 +#: awx/api/views.py:4099 +msgid "" +"You cannot grant credential access to a team when the Organization field " +"isn't set, or belongs to a different organization" +msgstr "" +"Vous ne pouvez pas accorder d'accès par informations d'identification à une " +"équipe lorsque le champ Organisation n'est pas défini ou qu'elle appartient " +"à une organisation différente" + +#: awx/api/views.py:1047 +msgid "Cannot delete project." +msgstr "Suppression du projet impossible." + +#: awx/api/views.py:1076 +msgid "Project Schedules" +msgstr "Calendriers des projets" + +#: awx/api/views.py:1180 +#: awx/api/views.py:2271 +#: awx/api/views.py:3284 +msgid "Cannot delete job resource when associated workflow job is running." +msgstr "" +"Impossible de supprimer les ressources de tâche lorsqu'une tâche de workflow " +"associée est en cours d'exécution." + +#: awx/api/views.py:1257 +msgid "Me" +msgstr "Moi-même" + +#: awx/api/views.py:1303 +#: awx/api/views.py:4048 +msgid "You may not perform any action with your own admin_role." +msgstr "Vous ne pouvez pas effectuer d'action avec votre propre admin_role." + +#: awx/api/views.py:1309 +#: awx/api/views.py:4052 +msgid "You may not change the membership of a users admin_role" +msgstr "" +"Vous ne pouvez pas modifier l'appartenance de l'admin_role d'un utilisateur" + +#: awx/api/views.py:1314 +#: awx/api/views.py:4057 +msgid "" +"You cannot grant credential access to a user not in the credentials' " +"organization" +msgstr "" +"Vous ne pouvez pas accorder d'accès par informations d'identification à un " +"utilisateur ne figurant pas dans l'organisation d'informations " +"d'identification." + +#: awx/api/views.py:1318 +#: awx/api/views.py:4061 +msgid "You cannot grant private credential access to another user" +msgstr "" +"Vous ne pouvez pas accorder d'accès privé par informations d'identification " +"à un autre utilisateur" + +#: awx/api/views.py:1416 +#, python-format +msgid "Cannot change %s." +msgstr "Impossible de modifier %s." + +#: awx/api/views.py:1422 +msgid "Cannot delete user." +msgstr "Impossible de supprimer l'utilisateur." + +#: awx/api/views.py:1570 +msgid "Cannot delete inventory script." +msgstr "Impossible de supprimer le script d'inventaire." + +#: awx/api/views.py:1806 +msgid "Fact not found." +msgstr "Fait introuvable." + +#: awx/api/views.py:2126 +msgid "Inventory Source List" +msgstr "Liste des sources d'inventaire" + +#: awx/api/views.py:2154 +msgid "Cannot delete inventory source." +msgstr "Impossible de supprimer la source d'inventaire." + +#: awx/api/views.py:2162 +msgid "Inventory Source Schedules" +msgstr "Calendriers des sources d'inventaire" + +#: awx/api/views.py:2192 +msgid "Notification Templates can only be assigned when source is one of {}." +msgstr "" +"Les modèles de notification ne peuvent être attribués que lorsque la source " +"est l'une des {}." + +#: awx/api/views.py:2403 +msgid "Job Template Schedules" +msgstr "Calendriers des modèles de tâche" + +#: awx/api/views.py:2423 +#: awx/api/views.py:2439 +msgid "Your license does not allow adding surveys." +msgstr "Votre licence ne permet pas l'ajout de questionnaires." + +#: awx/api/views.py:2446 +msgid "'name' missing from survey spec." +msgstr "'name' manquant dans la spécification du questionnaire." + +#: awx/api/views.py:2448 +msgid "'description' missing from survey spec." +msgstr "'description' manquante dans la spécification du questionnaire." + +#: awx/api/views.py:2450 +msgid "'spec' missing from survey spec." +msgstr "'spec' manquante dans la spécification du questionnaire." + +#: awx/api/views.py:2452 +msgid "'spec' must be a list of items." +msgstr "'spec' doit être une liste d'éléments" + +#: awx/api/views.py:2454 +msgid "'spec' doesn't contain any items." +msgstr "'spec' ne contient aucun élément." + +#: awx/api/views.py:2460 +#, python-format +msgid "Survey question %s is not a json object." +msgstr "La question %s n'est pas un objet json." + +#: awx/api/views.py:2462 +#, python-format +msgid "'type' missing from survey question %s." +msgstr "'type' est manquant dans la question %s." + +#: awx/api/views.py:2464 +#, python-format +msgid "'question_name' missing from survey question %s." +msgstr "'question_name' est manquant dans la question %s." + +#: awx/api/views.py:2466 +#, python-format +msgid "'variable' missing from survey question %s." +msgstr "'variable' est manquant dans la question %s." + +#: awx/api/views.py:2468 +#, python-format +msgid "'variable' '%(item)s' duplicated in survey question %(survey)s." +msgstr "'variable' '%(item)s' en double dans la question %(survey)s." + +#: awx/api/views.py:2473 +#, python-format +msgid "'required' missing from survey question %s." +msgstr "'required' est manquant dans la question %s." + +#: awx/api/views.py:2684 +msgid "No matching host could be found!" +msgstr "Aucun hôte correspondant n'a été trouvé." + +#: awx/api/views.py:2687 +msgid "Multiple hosts matched the request!" +msgstr "Plusieurs hôtes correspondent à la requête." + +#: awx/api/views.py:2692 +msgid "Cannot start automatically, user input required!" +msgstr "" +"Impossible de démarrer automatiquement, saisie de l'utilisateur obligatoire." + +#: awx/api/views.py:2699 +msgid "Host callback job already pending." +msgstr "La tâche de rappel de l'hôte est déjà en attente." + +#: awx/api/views.py:2712 +msgid "Error starting job!" +msgstr "Erreur lors du démarrage de la tâche." + +#: awx/api/views.py:3041 +msgid "Workflow Job Template Schedules" +msgstr "Calendriers des modèles de tâche Workflow" + +#: awx/api/views.py:3183 +#: awx/api/views.py:3726 +msgid "Superuser privileges needed." +msgstr "Privilèges de superutilisateur requis." + +#: awx/api/views.py:3215 +msgid "System Job Template Schedules" +msgstr "Calendriers des modèles de tâche Système" + +#: awx/api/views.py:3407 +msgid "Job Host Summaries List" +msgstr "Liste récapitulative des hôtes de la tâche" + +#: awx/api/views.py:3449 +msgid "Job Event Children List" +msgstr "Liste des enfants d'événement de la tâche" + +#: awx/api/views.py:3458 +msgid "Job Event Hosts List" +msgstr "Liste des hôtes d'événement de la tâche" + +#: awx/api/views.py:3467 +msgid "Job Events List" +msgstr "Liste des événements de la tâche" + +#: awx/api/views.py:3680 +msgid "Ad Hoc Command Events List" +msgstr "Liste d'événements de la commande ad hoc" + +#: awx/api/views.py:3874 +#, python-format +msgid "Error generating stdout download file: %s" +msgstr "Erreur lors de la génération du fichier de téléchargement stdout : %s" + +#: awx/api/views.py:3919 +msgid "Delete not allowed while there are pending notifications" +msgstr "Suppression non autorisée tant que des notifications sont en attente" + +#: awx/api/views.py:3926 +msgid "Notification Template Test" +msgstr "Test de modèle de notification" + +#: awx/api/views.py:4042 +msgid "User 'id' field is missing." +msgstr "Le champ \"id\" de l'utilisateur est manquant." + +#: awx/api/views.py:4085 +msgid "Team 'id' field is missing." +msgstr "Le champ \"id\" de l'équipe est manquant." + +#: awx/conf/conf.py:20 +msgid "Bud Frogs" +msgstr "Bud Frogs" + +#: awx/conf/conf.py:21 +msgid "Bunny" +msgstr "Bunny" + +#: awx/conf/conf.py:22 +msgid "Cheese" +msgstr "Cheese" + +#: awx/conf/conf.py:23 +msgid "Daemon" +msgstr "Daemon" + +#: awx/conf/conf.py:24 +msgid "Default Cow" +msgstr "Default Cow" + +#: awx/conf/conf.py:25 +msgid "Dragon" +msgstr "Dragon" + +#: awx/conf/conf.py:26 +msgid "Elephant in Snake" +msgstr "Elephant in Snake" + +#: awx/conf/conf.py:27 +msgid "Elephant" +msgstr "Elephant" + +#: awx/conf/conf.py:28 +msgid "Eyes" +msgstr "Eyes" + +#: awx/conf/conf.py:29 +msgid "Hello Kitty" +msgstr "Hello Kitty" + +#: awx/conf/conf.py:30 +msgid "Kitty" +msgstr "Kitty" + +#: awx/conf/conf.py:31 +msgid "Luke Koala" +msgstr "Luke Koala" + +#: awx/conf/conf.py:32 +msgid "Meow" +msgstr "Meow" + +#: awx/conf/conf.py:33 +msgid "Milk" +msgstr "Milk" + +#: awx/conf/conf.py:34 +msgid "Moofasa" +msgstr "Moofasa" + +#: awx/conf/conf.py:35 +msgid "Moose" +msgstr "Moose" + +#: awx/conf/conf.py:36 +msgid "Ren" +msgstr "Ren" + +#: awx/conf/conf.py:37 +msgid "Sheep" +msgstr "Sheep" + +#: awx/conf/conf.py:38 +msgid "Small Cow" +msgstr "Small Cow" + +#: awx/conf/conf.py:39 +msgid "Stegosaurus" +msgstr "Stegosaurus" + +#: awx/conf/conf.py:40 +msgid "Stimpy" +msgstr "Stimpy" + +#: awx/conf/conf.py:41 +msgid "Super Milker" +msgstr "Super Milker" + +#: awx/conf/conf.py:42 +msgid "Three Eyes" +msgstr "Three Eyes" + +#: awx/conf/conf.py:43 +msgid "Turkey" +msgstr "Turkey" + +#: awx/conf/conf.py:44 +msgid "Turtle" +msgstr "Turtle" + +#: awx/conf/conf.py:45 +msgid "Tux" +msgstr "Tux" + +#: awx/conf/conf.py:46 +msgid "Udder" +msgstr "Udder" + +#: awx/conf/conf.py:47 +msgid "Vader Koala" +msgstr "Vader Koala" + +#: awx/conf/conf.py:48 +msgid "Vader" +msgstr "Vader" + +#: awx/conf/conf.py:49 +msgid "WWW" +msgstr "WWW" + +#: awx/conf/conf.py:52 +msgid "Cow Selection" +msgstr "Sélection cow" + +#: awx/conf/conf.py:53 +msgid "Select which cow to use with cowsay when running jobs." +msgstr "" +"Sélectionnez quel cow utiliser avec cowsay lors de l'exécution de tâches." + +#: awx/conf/conf.py:54 +#: awx/conf/conf.py:75 +msgid "Cows" +msgstr "Cows" + +#: awx/conf/conf.py:73 +msgid "Example Read-Only Setting" +msgstr "Exemple de paramètre en lecture seule" + +#: awx/conf/conf.py:74 +msgid "Example setting that cannot be changed." +msgstr "L'exemple de paramètre ne peut pas être modifié." + +#: awx/conf/conf.py:93 +msgid "Example Setting" +msgstr "Exemple de paramètre" + +#: awx/conf/conf.py:94 +msgid "Example setting which can be different for each user." +msgstr "Exemple de paramètre qui peut être différent pour chaque utilisateur." + +#: awx/conf/conf.py:95 +#: awx/conf/registry.py:76 +#: awx/conf/views.py:46 +msgid "User" +msgstr "Utilisateur" + +#: awx/conf/fields.py:38 +msgid "Enter a valid URL" +msgstr "Entez une URL valide" + +#: awx/conf/license.py:19 +msgid "Your Tower license does not allow that." +msgstr "Votre licence Tower ne vous y autorise pas." + +#: awx/conf/management/commands/migrate_to_database_settings.py:41 +msgid "Only show which settings would be commented/migrated." +msgstr "" +"Afficher seulement les paramètres qui pourraient être commentés/migrés." + +#: awx/conf/management/commands/migrate_to_database_settings.py:48 +msgid "" +"Skip over settings that would raise an error when commenting/migrating." +msgstr "" +"Ignorer les paramètres qui pourraient provoquer une erreur lors de la saisie " +"de commentaires/de la migration." + +#: awx/conf/management/commands/migrate_to_database_settings.py:55 +msgid "Skip commenting out settings in files." +msgstr "Ignorer la saisie de commentaires de paramètres dans les fichiers." + +#: awx/conf/management/commands/migrate_to_database_settings.py:61 +msgid "Backup existing settings files with this suffix." +msgstr "Sauvegardez les fichiers de paramètres existants avec ce suffixe." + +#: awx/conf/registry.py:64 +#: awx/conf/tests/unit/test_registry.py:169 +#: awx/conf/tests/unit/test_registry.py:192 +#: awx/conf/tests/unit/test_registry.py:196 +#: awx/conf/tests/unit/test_registry.py:201 +#: awx/conf/tests/unit/test_registry.py:208 +msgid "All" +msgstr "Tous" + +#: awx/conf/registry.py:65 +#: awx/conf/tests/unit/test_registry.py:170 +#: awx/conf/tests/unit/test_registry.py:193 +#: awx/conf/tests/unit/test_registry.py:197 +#: awx/conf/tests/unit/test_registry.py:202 +#: awx/conf/tests/unit/test_registry.py:209 +msgid "Changed" +msgstr "Modifié" + +#: awx/conf/registry.py:77 +msgid "User-Defaults" +msgstr "Paramètres utilisateur par défaut" + +#: awx/conf/tests/unit/test_registry.py:46 +#: awx/conf/tests/unit/test_registry.py:56 +#: awx/conf/tests/unit/test_registry.py:72 +#: awx/conf/tests/unit/test_registry.py:87 +#: awx/conf/tests/unit/test_registry.py:100 +#: awx/conf/tests/unit/test_registry.py:106 +#: awx/conf/tests/unit/test_registry.py:126 +#: awx/conf/tests/unit/test_registry.py:140 +#: awx/conf/tests/unit/test_registry.py:146 +#: awx/conf/tests/unit/test_registry.py:159 +#: awx/conf/tests/unit/test_registry.py:171 +#: awx/conf/tests/unit/test_registry.py:180 +#: awx/conf/tests/unit/test_registry.py:198 +#: awx/conf/tests/unit/test_registry.py:210 +#: awx/conf/tests/unit/test_registry.py:219 +#: awx/conf/tests/unit/test_registry.py:225 +#: awx/conf/tests/unit/test_registry.py:237 +#: awx/conf/tests/unit/test_registry.py:245 +#: awx/conf/tests/unit/test_registry.py:288 +#: awx/conf/tests/unit/test_registry.py:306 +#: awx/conf/tests/unit/test_settings.py:67 +#: awx/conf/tests/unit/test_settings.py:81 +#: awx/conf/tests/unit/test_settings.py:97 +#: awx/conf/tests/unit/test_settings.py:110 +#: awx/conf/tests/unit/test_settings.py:127 +#: awx/conf/tests/unit/test_settings.py:143 +#: awx/conf/tests/unit/test_settings.py:162 +#: awx/conf/tests/unit/test_settings.py:183 +#: awx/conf/tests/unit/test_settings.py:197 +#: awx/conf/tests/unit/test_settings.py:221 +#: awx/conf/tests/unit/test_settings.py:241 +#: awx/conf/tests/unit/test_settings.py:258 +#: awx/main/conf.py:19 +#: awx/main/conf.py:29 +#: awx/main/conf.py:39 +#: awx/main/conf.py:48 +#: awx/main/conf.py:60 +#: awx/main/conf.py:78 +#: awx/main/conf.py:103 +msgid "System" +msgstr "Système" + +#: awx/conf/tests/unit/test_registry.py:165 +#: awx/conf/tests/unit/test_registry.py:172 +#: awx/conf/tests/unit/test_registry.py:187 +#: awx/conf/tests/unit/test_registry.py:203 +#: awx/conf/tests/unit/test_registry.py:211 +msgid "OtherSystem" +msgstr "Autre Système" + +#: awx/conf/views.py:38 +msgid "Setting Categories" +msgstr "Catégories de paramètre" + +#: awx/conf/views.py:61 +msgid "Setting Detail" +msgstr "Détails du paramètre" + +#: awx/main/access.py:255 +#, python-format +msgid "Bad data found in related field %s." +msgstr "Données incorrectes trouvées dans le champ %s associé." + +#: awx/main/access.py:296 +msgid "License is missing." +msgstr "La licence est manquante." + +#: awx/main/access.py:298 +msgid "License has expired." +msgstr "La licence est arrivée à expiration." + +#: awx/main/access.py:306 +#, python-format +msgid "License count of %s instances has been reached." +msgstr "Le nombre de licences d'instances %s a été atteint." + +#: awx/main/access.py:308 +#, python-format +msgid "License count of %s instances has been exceeded." +msgstr "Le nombre de licences d'instances %s a été dépassé." + +#: awx/main/access.py:310 +msgid "Host count exceeds available instances." +msgstr "Le nombre d'hôtes dépasse celui des instances disponibles." + +#: awx/main/access.py:314 +#, python-format +msgid "Feature %s is not enabled in the active license." +msgstr "La fonctionnalité %s n'est pas activée dans la licence active." + +#: awx/main/access.py:316 +msgid "Features not found in active license." +msgstr "Fonctionnalités introuvables dans la licence active." + +#: awx/main/access.py:514 +#: awx/main/access.py:581 +#: awx/main/access.py:706 +#: awx/main/access.py:969 +#: awx/main/access.py:1208 +#: awx/main/access.py:1605 +msgid "Resource is being used by running jobs" +msgstr "La ressource est utilisée par des tâches en cours d'exécution" + +#: awx/main/access.py:625 +msgid "Unable to change inventory on a host." +msgstr "Impossible de modifier l'inventaire sur un hôte." + +#: awx/main/access.py:642 +#: awx/main/access.py:687 +msgid "Cannot associate two items from different inventories." +msgstr "Impossible d'associer deux éléments d'inventaires différents." + +#: awx/main/access.py:675 +msgid "Unable to change inventory on a group." +msgstr "Impossible de modifier l'inventaire sur un groupe." + +#: awx/main/access.py:889 +msgid "Unable to change organization on a team." +msgstr "Impossible de modifier l'organisation d'une équipe." + +#: awx/main/access.py:902 +msgid "The {} role cannot be assigned to a team" +msgstr "Le rôle {} ne peut pas être attribué à une équipe" + +#: awx/main/access.py:904 +msgid "The admin_role for a User cannot be assigned to a team" +msgstr "L'admin_role d'un utilisateur ne peut pas être attribué à une équipe" + +#: awx/main/access.py:1678 +msgid "" +"You do not have permission to the workflow job resources required for " +"relaunch." +msgstr "" +"Vous n'avez pas la permission pour accéder aux permissions de tâche de " +"workflow requises pour le second lancement." + +#: awx/main/apps.py:9 +msgid "Main" +msgstr "Principal" + +#: awx/main/conf.py:17 +msgid "Enable Activity Stream" +msgstr "Activer le flux d'activité" + +#: awx/main/conf.py:18 +msgid "Enable capturing activity for the Tower activity stream." +msgstr "Activer la capture d'activités pour le flux d'activité Tower." + +#: awx/main/conf.py:27 +msgid "Enable Activity Stream for Inventory Sync" +msgstr "Activer le flux d'activité pour la synchronisation des inventaires" + +#: awx/main/conf.py:28 +msgid "" +"Enable capturing activity for the Tower activity stream when running " +"inventory sync." +msgstr "" +"Activer la capture d'activités pour le flux d'activité Tower lors de la " +"synchronisation des inventaires." + +#: awx/main/conf.py:37 +msgid "All Users Visible to Organization Admins" +msgstr "" +"Tous les utilisateurs visibles pour les administrateurs de l'organisation" + +#: awx/main/conf.py:38 +msgid "" +"Controls whether any Organization Admin can view all users, even those not " +"associated with their Organization." +msgstr "" +"Contrôle si un administrateur d'organisation peut ou non afficher tous les " +"utilisateurs, même ceux qui ne sont pas associés à son organisation." + +#: awx/main/conf.py:46 +msgid "Enable Tower Administrator Alerts" +msgstr "Activer les alertes administrateur de Tower" + +#: awx/main/conf.py:47 +msgid "" +"Allow Tower to email Admin users for system events that may require " +"attention." +msgstr "" +"Autoriser Tower à alerter les administrateurs par email concernant des " +"événements système susceptibles de mériter leur attention." + +#: awx/main/conf.py:57 +msgid "Base URL of the Tower host" +msgstr "URL de base pour l'hôte Tower" + +#: awx/main/conf.py:58 +msgid "" +"This setting is used by services like notifications to render a valid url to " +"the Tower host." +msgstr "" +"Ce paramètre est utilisé par des services sous la forme de notifications " +"permettant de rendre valide une URL pour l'hôte Tower." + +#: awx/main/conf.py:67 +msgid "Remote Host Headers" +msgstr "En-têtes d'hôte distant" + +#: awx/main/conf.py:68 +msgid "" +"HTTP headers and meta keys to search to determine remote host name or IP. " +"Add additional items to this list, such as \"HTTP_X_FORWARDED_FOR\", if " +"behind a reverse proxy.\n" +"\n" +"Note: The headers will be searched in order and the first found remote host " +"name or IP will be used.\n" +"\n" +"In the below example 8.8.8.7 would be the chosen IP address.\n" +"X-Forwarded-For: 8.8.8.7, 192.168.2.1, 127.0.0.1\n" +"Host: 127.0.0.1\n" +"REMOTE_HOST_HEADERS = ['HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR', 'REMOTE_HOST']" +msgstr "" +"En-têtes HTTP et méta-clés à rechercher afin de déterminer le nom ou " +"l'adresse IP d'un hôte distant. Ajoutez des éléments supplémentaires à cette " +"liste, tels que \"HTTP_X_FORWARDED_FOR\", en présence d'un proxy inverse.\n" +"\n" +"Remarque : les en-têtes seront recherchés dans l'ordre, et le premier nom ou " +"la première adresse IP d'hôte distant trouvé(e) sera utilisé(e).\n" +"\n" +"Dans l'exemple ci-dessous 8.8.8.7 est l'adresse IP choisie. \n" +"X-Forwarded-For : 8.8.8.7, 192.168.2.1, 127.0.0.1\n" +"Hôte : 127.0.0.1\n" +"REMOTE_HOST_HEADERS = ['HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR', 'REMOTE_HOST']" + +#: awx/main/conf.py:99 +msgid "Tower License" +msgstr "Licence Tower" + +#: awx/main/conf.py:100 +msgid "" +"The license controls which features and functionality are enabled in Tower. " +"Use /api/v1/config/ to update or change the license." +msgstr "" +"La licence détermine les fonctionnalités et les fonctions qui sont activées " +"dans Tower. Utilisez /api/v1/config/ pour mettre à jour ou modifier la " +"licence." + +#: awx/main/conf.py:110 +msgid "Ansible Modules Allowed for Ad Hoc Jobs" +msgstr "Modules Ansible autorisés pour des tâches ad hoc" + +#: awx/main/conf.py:111 +msgid "List of modules allowed to be used by ad-hoc jobs." +msgstr "Liste des modules que des tâches ad hoc sont autorisées à utiliser." + +#: awx/main/conf.py:112 +#: awx/main/conf.py:121 +#: awx/main/conf.py:130 +#: awx/main/conf.py:140 +#: awx/main/conf.py:150 +#: awx/main/conf.py:160 +#: awx/main/conf.py:170 +#: awx/main/conf.py:180 +#: awx/main/conf.py:190 +#: awx/main/conf.py:202 +#: awx/main/conf.py:214 +#: awx/main/conf.py:226 +msgid "Jobs" +msgstr "Tâches" + +#: awx/main/conf.py:119 +msgid "Enable job isolation" +msgstr "Activer l'isolement des tâches" + +#: awx/main/conf.py:120 +msgid "" +"Isolates an Ansible job from protected parts of the Tower system to prevent " +"exposing sensitive information." +msgstr "" +"Permet d'isoler une tâche Ansible des parties protégées du système Tower " +"pour éviter l'exposition d'informations sensibles." + +#: awx/main/conf.py:128 +msgid "Job isolation execution path" +msgstr "Chemin d'exécution pour l'isolement des tâches" + +#: awx/main/conf.py:129 +msgid "" +"Create temporary working directories for isolated jobs in this location." +msgstr "" +"Créez des répertoires de travail temporaires pour les tâches isolées à cet " +"emplacement." + +#: awx/main/conf.py:138 +msgid "Paths to hide from isolated jobs" +msgstr "Chemins à dissimuler des tâches isolées" + +#: awx/main/conf.py:139 +msgid "Additional paths to hide from isolated processes." +msgstr "Chemins supplémentaires à dissimuler des processus isolés." + +#: awx/main/conf.py:148 +msgid "Paths to expose to isolated jobs" +msgstr "Chemins à exposer aux tâches isolées" + +#: awx/main/conf.py:149 +msgid "" +"Whitelist of paths that would otherwise be hidden to expose to isolated jobs." +"" +msgstr "" +"Liste blanche des chemins qui seraient autrement dissimulés de façon à ne " +"pas être exposés aux tâches isolées." + +#: awx/main/conf.py:158 +msgid "Standard Output Maximum Display Size" +msgstr "Taille d'affichage maximale pour une sortie standard" + +#: awx/main/conf.py:159 +msgid "" +"Maximum Size of Standard Output in bytes to display before requiring the " +"output be downloaded." +msgstr "" +"Taille maximale d'une sortie standard en octets à afficher avant de demander " +"le téléchargement de la sortie." + +#: awx/main/conf.py:168 +msgid "Job Event Standard Output Maximum Display Size" +msgstr "" +"Taille d'affichage maximale pour une sortie standard d'événement de tâche" + +#: awx/main/conf.py:169 +msgid "" +"Maximum Size of Standard Output in bytes to display for a single job or ad " +"hoc command event. `stdout` will end with `…` when truncated." +msgstr "" +"Taille maximale de la sortie standard en octets à afficher pour une seule " +"tâche ou pour un seul événement de commande ad hoc. `stdout` se terminera " +"par `...` quand il sera tronqué." + +#: awx/main/conf.py:178 +msgid "Maximum Scheduled Jobs" +msgstr "Nombre max. de tâches planifiées" + +#: awx/main/conf.py:179 +msgid "" +"Maximum number of the same job template that can be waiting to run when " +"launching from a schedule before no more are created." +msgstr "" +"Nombre maximal du même modèle de tâche qui peut être mis en attente " +"d'exécution lors de son lancement à partir d'un calendrier, avant que " +"d'autres ne soient créés." + +#: awx/main/conf.py:188 +msgid "Ansible Callback Plugins" +msgstr "Plug-ins de rappel Ansible" + +#: awx/main/conf.py:189 +msgid "" +"List of paths to search for extra callback plugins to be used when running " +"jobs." +msgstr "" +"Liste des chemins servant à rechercher d'autres plug-ins de rappel qui " +"serviront lors de l'exécution de tâches." + +#: awx/main/conf.py:199 +msgid "Default Job Timeout" +msgstr "Délai d'attente par défaut des tâches" + +#: awx/main/conf.py:200 +msgid "" +"Maximum time to allow jobs to run. Use value of 0 to indicate that no " +"timeout should be imposed. A timeout set on an individual job template will " +"override this." +msgstr "" +"Délai maximal d'exécution des tâches. Utilisez la valeur 0 pour indiquer " +"qu'aucun délai ne doit être imposé. Un délai d'attente défini sur celui d'un " +"modèle de tâche précis écrasera cette valeur." + +#: awx/main/conf.py:211 +msgid "Default Inventory Update Timeout" +msgstr "Délai d'attente par défaut pour la mise à jour d'inventaire" + +#: awx/main/conf.py:212 +msgid "" +"Maximum time to allow inventory updates to run. Use value of 0 to indicate " +"that no timeout should be imposed. A timeout set on an individual inventory " +"source will override this." +msgstr "" +"Délai maximal d'exécution des mises à jour d'inventaire. Utilisez la valeur " +"0 pour indiquer qu'aucun délai ne doit être imposé. Un délai d'attente " +"défini sur celui d'une source d'inventaire précise écrasera cette valeur." + +#: awx/main/conf.py:223 +msgid "Default Project Update Timeout" +msgstr "Délai d'attente par défaut pour la mise à jour de projet" + +#: awx/main/conf.py:224 +msgid "" +"Maximum time to allow project updates to run. Use value of 0 to indicate " +"that no timeout should be imposed. A timeout set on an individual project " +"will override this." +msgstr "" +"Délai maximal d'exécution des mises à jour de projet. Utilisez la valeur 0 " +"pour indiquer qu'aucun délai ne doit être imposé. Un délai d'attente défini " +"sur celui d'un projet précis écrasera cette valeur." + +#: awx/main/conf.py:234 +msgid "Logging Aggregator" +msgstr "Agrégateur de journalisation" + +#: awx/main/conf.py:235 +msgid "Hostname/IP where external logs will be sent to." +msgstr "Nom d'hôte / IP où les journaux externes seront envoyés." + +#: awx/main/conf.py:236 +#: awx/main/conf.py:245 +#: awx/main/conf.py:255 +#: awx/main/conf.py:264 +#: awx/main/conf.py:274 +#: awx/main/conf.py:288 +#: awx/main/conf.py:300 +#: awx/main/conf.py:309 +msgid "Logging" +msgstr "Journalisation" + +#: awx/main/conf.py:243 +msgid "Logging Aggregator Port" +msgstr "Port d'agrégateur de journalisation" + +#: awx/main/conf.py:244 +msgid "Port on Logging Aggregator to send logs to (if required)." +msgstr "" +"Port d'agrégateur de journalisation où envoyer les journaux (le cas échéant)." +"" + +#: awx/main/conf.py:253 +msgid "Logging Aggregator Type" +msgstr "Type d'agrégateur de journalisation" + +#: awx/main/conf.py:254 +msgid "Format messages for the chosen log aggregator." +msgstr "" +"Formater les messages pour l'agrégateur de journalisation que vous aurez " +"choisi." + +#: awx/main/conf.py:262 +msgid "Logging Aggregator Username" +msgstr "Nom d'utilisateur de l'agrégateur de journalisation" + +#: awx/main/conf.py:263 +msgid "Username for external log aggregator (if required)." +msgstr "" +"Nom d'utilisateur pour agrégateur de journalisation externe (le cas échéant)." +"" + +#: awx/main/conf.py:272 +msgid "Logging Aggregator Password/Token" +msgstr "Mot de passe / Jeton d'agrégateur de journalisation" + +#: awx/main/conf.py:273 +msgid "" +"Password or authentication token for external log aggregator (if required)." +msgstr "" +"Mot de passe ou jeton d'authentification d'agrégateur de journalisation " +"externe (le cas échéant)." + +#: awx/main/conf.py:281 +msgid "Loggers to send data to the log aggregator from" +msgstr "" +"Journaliseurs à partir duquel envoyer des données à l'agrégateur de journaux" + +#: awx/main/conf.py:282 +msgid "" +"List of loggers that will send HTTP logs to the collector, these can include " +"any or all of: \n" +"awx - Tower service logs\n" +"activity_stream - activity stream records\n" +"job_events - callback data from Ansible job events\n" +"system_tracking - facts gathered from scan jobs." +msgstr "" +"Liste des journaliseurs qui enverront des journaux HTTP au collecteur " +"notamment (tous les types ou certains seulement) : \n" +"awx - journaux de service Tower\n" +"activity_stream - enregistrements de flux d'activité " +" job_events - " +"données de rappel issues d'événements de tâche Ansible\n" +"system_tracking - données générées par des tâches de scan." + +#: awx/main/conf.py:295 +msgid "Log System Tracking Facts Individually" +msgstr "Système de journalisation traçant des facts individuellement" + +#: awx/main/conf.py:296 +msgid "" +"If set, system tracking facts will be sent for each package, service, " +"orother item found in a scan, allowing for greater search query granularity. " +"If unset, facts will be sent as a single dictionary, allowing for greater " +"efficiency in fact processing." +msgstr "" +"Si défini, les facts de traçage de système seront envoyés pour chaque " +"package, service, ou autre item se trouvant dans un scan, ce qui permet une " +"meilleure granularité de recherche. Si non définis, les facts seront envoyés " +"sous forme de dictionnaire unique, ce qui permet une meilleure efficacité du " +"processus pour les facts." + +#: awx/main/conf.py:307 +msgid "Enable External Logging" +msgstr "Activer la journalisation externe" + +#: awx/main/conf.py:308 +msgid "Enable sending logs to external log aggregator." +msgstr "Activer l'envoi de journaux à un agrégateur de journaux externe." + +#: awx/main/models/activity_stream.py:22 +msgid "Entity Created" +msgstr "Entité créée" + +#: awx/main/models/activity_stream.py:23 +msgid "Entity Updated" +msgstr "Entité mise à jour" + +#: awx/main/models/activity_stream.py:24 +msgid "Entity Deleted" +msgstr "Entité supprimée" + +#: awx/main/models/activity_stream.py:25 +msgid "Entity Associated with another Entity" +msgstr "Entité associée à une autre entité" + +#: awx/main/models/activity_stream.py:26 +msgid "Entity was Disassociated with another Entity" +msgstr "Entité dissociée d'une autre entité" + +#: awx/main/models/ad_hoc_commands.py:96 +msgid "No valid inventory." +msgstr "Aucun inventaire valide." + +#: awx/main/models/ad_hoc_commands.py:103 +#: awx/main/models/jobs.py:161 +msgid "You must provide a machine / SSH credential." +msgstr "Vous devez fournir des informations d'identification machine / SSH." + +#: awx/main/models/ad_hoc_commands.py:114 +#: awx/main/models/ad_hoc_commands.py:122 +msgid "Invalid type for ad hoc command" +msgstr "Type non valide pour la commande ad hoc" + +#: awx/main/models/ad_hoc_commands.py:117 +msgid "Unsupported module for ad hoc commands." +msgstr "Module non pris en charge pour les commandes ad hoc." + +#: awx/main/models/ad_hoc_commands.py:125 +#, python-format +msgid "No argument passed to %s module." +msgstr "Aucun argument transmis au module %s." + +#: awx/main/models/ad_hoc_commands.py:222 +#: awx/main/models/jobs.py:763 +msgid "Host Failed" +msgstr "Échec de l'hôte" + +#: awx/main/models/ad_hoc_commands.py:223 +#: awx/main/models/jobs.py:764 +msgid "Host OK" +msgstr "Hôte OK" + +#: awx/main/models/ad_hoc_commands.py:224 +#: awx/main/models/jobs.py:767 +msgid "Host Unreachable" +msgstr "Hôte inaccessible" + +#: awx/main/models/ad_hoc_commands.py:229 +#: awx/main/models/jobs.py:766 +msgid "Host Skipped" +msgstr "Hôte ignoré" + +#: awx/main/models/ad_hoc_commands.py:239 +#: awx/main/models/jobs.py:794 +msgid "Debug" +msgstr "Déboguer" + +#: awx/main/models/ad_hoc_commands.py:240 +#: awx/main/models/jobs.py:795 +msgid "Verbose" +msgstr "Verbeux" + +#: awx/main/models/ad_hoc_commands.py:241 +#: awx/main/models/jobs.py:796 +msgid "Deprecated" +msgstr "Obsolète" + +#: awx/main/models/ad_hoc_commands.py:242 +#: awx/main/models/jobs.py:797 +msgid "Warning" +msgstr "Avertissement" + +#: awx/main/models/ad_hoc_commands.py:243 +#: awx/main/models/jobs.py:798 +msgid "System Warning" +msgstr "Avertissement système" + +#: awx/main/models/ad_hoc_commands.py:244 +#: awx/main/models/jobs.py:799 +#: awx/main/models/unified_jobs.py:64 +msgid "Error" +msgstr "Erreur" + +#: awx/main/models/base.py:45 +#: awx/main/models/base.py:51 +#: awx/main/models/base.py:56 +msgid "Run" +msgstr "Exécuter" + +#: awx/main/models/base.py:46 +#: awx/main/models/base.py:52 +#: awx/main/models/base.py:57 +msgid "Check" +msgstr "Vérifier" + +#: awx/main/models/base.py:47 +msgid "Scan" +msgstr "Scanner" + +#: awx/main/models/base.py:61 +msgid "Read Inventory" +msgstr "Lire l'inventaire" + +#: awx/main/models/base.py:62 +msgid "Edit Inventory" +msgstr "Modifier l'inventaire" + +#: awx/main/models/base.py:63 +msgid "Administrate Inventory" +msgstr "Administrer l'inventaire" + +#: awx/main/models/base.py:64 +msgid "Deploy To Inventory" +msgstr "Déployer dans l'inventaire" + +#: awx/main/models/base.py:65 +msgid "Deploy To Inventory (Dry Run)" +msgstr "Déployer dans l'inventaire (test uniquement)" + +#: awx/main/models/base.py:66 +msgid "Scan an Inventory" +msgstr "Scanner un inventaire" + +#: awx/main/models/base.py:67 +msgid "Create a Job Template" +msgstr "Créer un modèle de tâche" + +#: awx/main/models/credential.py:33 +msgid "Machine" +msgstr "Machine" + +#: awx/main/models/credential.py:34 +msgid "Network" +msgstr "Réseau" + +#: awx/main/models/credential.py:35 +msgid "Source Control" +msgstr "Contrôle de la source" + +#: awx/main/models/credential.py:36 +msgid "Amazon Web Services" +msgstr "Amazon Web Services" + +#: awx/main/models/credential.py:37 +msgid "Rackspace" +msgstr "Rackspace" + +#: awx/main/models/credential.py:38 +#: awx/main/models/inventory.py:713 +msgid "VMware vCenter" +msgstr "VMware vCenter" + +#: awx/main/models/credential.py:39 +#: awx/main/models/inventory.py:714 +msgid "Red Hat Satellite 6" +msgstr "Red Hat Satellite 6" + +#: awx/main/models/credential.py:40 +#: awx/main/models/inventory.py:715 +msgid "Red Hat CloudForms" +msgstr "Red Hat CloudForms" + +#: awx/main/models/credential.py:41 +#: awx/main/models/inventory.py:710 +msgid "Google Compute Engine" +msgstr "Google Compute Engine" + +#: awx/main/models/credential.py:42 +#: awx/main/models/inventory.py:711 +msgid "Microsoft Azure Classic (deprecated)" +msgstr "Microsoft Azure Classic (obsolète)" + +#: awx/main/models/credential.py:43 +#: awx/main/models/inventory.py:712 +msgid "Microsoft Azure Resource Manager" +msgstr "Microsoft Azure Resource Manager" + +#: awx/main/models/credential.py:44 +#: awx/main/models/inventory.py:716 +msgid "OpenStack" +msgstr "OpenStack" + +#: awx/main/models/credential.py:48 +msgid "None" +msgstr "Aucun" + +#: awx/main/models/credential.py:49 +msgid "Sudo" +msgstr "Sudo" + +#: awx/main/models/credential.py:50 +msgid "Su" +msgstr "Su" + +#: awx/main/models/credential.py:51 +msgid "Pbrun" +msgstr "Pbrun" + +#: awx/main/models/credential.py:52 +msgid "Pfexec" +msgstr "Pfexec" + +#: awx/main/models/credential.py:53 +msgid "DZDO" +msgstr "DZDO" + +#: awx/main/models/credential.py:54 +msgid "Pmrun" +msgstr "Pmrun" + +#: awx/main/models/credential.py:103 +msgid "Host" +msgstr "Hôte" + +#: awx/main/models/credential.py:104 +msgid "The hostname or IP address to use." +msgstr "Nom d'hôte ou adresse IP à utiliser." + +#: awx/main/models/credential.py:110 +msgid "Username" +msgstr "Nom d'utilisateur" + +#: awx/main/models/credential.py:111 +msgid "Username for this credential." +msgstr "Nom d'utilisateur pour ces informations d'identification." + +#: awx/main/models/credential.py:117 +msgid "Password" +msgstr "Mot de passe" + +#: awx/main/models/credential.py:118 +msgid "" +"Password for this credential (or \"ASK\" to prompt the user for machine " +"credentials)." +msgstr "" +"Mot de passe pour ces informations d'identification (ou \"ASK\" pour " +"demander à l'utilisateur les informations d'identification de la machine)." + +#: awx/main/models/credential.py:125 +msgid "Security Token" +msgstr "Token de sécurité" + +#: awx/main/models/credential.py:126 +msgid "Security Token for this credential" +msgstr "Token de sécurité pour ces informations d'identification" + +#: awx/main/models/credential.py:132 +msgid "Project" +msgstr "Projet" + +#: awx/main/models/credential.py:133 +msgid "The identifier for the project." +msgstr "Identifiant du projet." + +#: awx/main/models/credential.py:139 +msgid "Domain" +msgstr "Domaine" + +#: awx/main/models/credential.py:140 +msgid "The identifier for the domain." +msgstr "Identifiant du domaine." + +#: awx/main/models/credential.py:145 +msgid "SSH private key" +msgstr "Clé privée SSH" + +#: awx/main/models/credential.py:146 +msgid "RSA or DSA private key to be used instead of password." +msgstr "Clé privée RSA ou DSA à utiliser au lieu du mot de passe." + +#: awx/main/models/credential.py:152 +msgid "SSH key unlock" +msgstr "Déverrouillage de la clé SSH" + +#: awx/main/models/credential.py:153 +msgid "" +"Passphrase to unlock SSH private key if encrypted (or \"ASK\" to prompt the " +"user for machine credentials)." +msgstr "" +"Phrase de passe servant à déverrouiller la clé privée SSH si elle est " +"chiffrée (ou \"ASK\" pour demander à l'utilisateur les informations " +"d'identification de la machine)." + +#: awx/main/models/credential.py:161 +msgid "Privilege escalation method." +msgstr "Méthode d'élévation des privilèges." + +#: awx/main/models/credential.py:167 +msgid "Privilege escalation username." +msgstr "Nom d'utilisateur pour l'élévation des privilèges" + +#: awx/main/models/credential.py:173 +msgid "Password for privilege escalation method." +msgstr "Mot de passe pour la méthode d'élévation des privilèges." + +#: awx/main/models/credential.py:179 +msgid "Vault password (or \"ASK\" to prompt the user)." +msgstr "Mot de passe Vault (ou \"ASK\" pour le demander à l'utilisateur)." + +#: awx/main/models/credential.py:183 +msgid "Whether to use the authorize mechanism." +msgstr "Indique s'il faut ou non utiliser le mécanisme d'autorisation." + +#: awx/main/models/credential.py:189 +msgid "Password used by the authorize mechanism." +msgstr "Mot de passe utilisé par le mécanisme d'autorisation." + +#: awx/main/models/credential.py:195 +msgid "Client Id or Application Id for the credential" +msgstr "" +"ID du client ou de l'application pour les informations d'identification" + +#: awx/main/models/credential.py:201 +msgid "Secret Token for this credential" +msgstr "Token secret pour ces informations d'identification" + +#: awx/main/models/credential.py:207 +msgid "Subscription identifier for this credential" +msgstr "ID d'abonnement pour ces informations d'identification" + +#: awx/main/models/credential.py:213 +msgid "Tenant identifier for this credential" +msgstr "ID de tenant pour ces informations d'identification" + +#: awx/main/models/credential.py:283 +msgid "Host required for VMware credential." +msgstr "Hôte requis pour les informations d'identification VMware." + +#: awx/main/models/credential.py:285 +msgid "Host required for OpenStack credential." +msgstr "Hôte requis pour les informations d'identification OpenStack." + +#: awx/main/models/credential.py:294 +msgid "Access key required for AWS credential." +msgstr "Clé d'accès requise pour les informations d'identification AWS." + +#: awx/main/models/credential.py:296 +msgid "Username required for Rackspace credential." +msgstr "" +"Nom d'utilisateur requis pour les informations d'identification Rackspace." + +#: awx/main/models/credential.py:299 +msgid "Username required for VMware credential." +msgstr "" +"Nom d'utilisateur requis pour les informations d'identification VMware." + +#: awx/main/models/credential.py:301 +msgid "Username required for OpenStack credential." +msgstr "" +"Nom d'utilisateur requis pour les informations d'identification OpenStack." + +#: awx/main/models/credential.py:307 +msgid "Secret key required for AWS credential." +msgstr "Clé secrète requise pour les informations d'identification AWS." + +#: awx/main/models/credential.py:309 +msgid "API key required for Rackspace credential." +msgstr "Clé API requise pour les informations d'identification Rackspace." + +#: awx/main/models/credential.py:311 +msgid "Password required for VMware credential." +msgstr "Mot de passe requis pour les informations d'identification VMware." + +#: awx/main/models/credential.py:313 +msgid "Password or API key required for OpenStack credential." +msgstr "" +"Mot de passe ou clé API requis(e) pour les informations d'identification " +"OpenStack." + +#: awx/main/models/credential.py:319 +msgid "Project name required for OpenStack credential." +msgstr "" +"Nom de projet requis pour les informations d'identification OpenStack." + +#: awx/main/models/credential.py:346 +msgid "SSH key unlock must be set when SSH key is encrypted." +msgstr "" +"Le déverrouillage de la clé SSH doit être défini lorsque la clé SSH est " +"chiffrée." + +#: awx/main/models/credential.py:352 +msgid "Credential cannot be assigned to both a user and team." +msgstr "" +"Les informations d'identification ne peuvent pas être attribuées à la fois à " +"un utilisateur et une équipe." + +#: awx/main/models/fact.py:21 +msgid "Host for the facts that the fact scan captured." +msgstr "Hôte pour les faits que le scan de faits a capturés." + +#: awx/main/models/fact.py:26 +msgid "Date and time of the corresponding fact scan gathering time." +msgstr "" +"Date et heure du scan de faits correspondant au moment de la collecte des " +"faits." + +#: awx/main/models/fact.py:29 +msgid "" +"Arbitrary JSON structure of module facts captured at timestamp for a single " +"host." +msgstr "" +"Structure JSON arbitraire des faits de module capturés au moment de " +"l'horodatage pour un seul hôte." + +#: awx/main/models/inventory.py:45 +msgid "inventories" +msgstr "inventaires" + +#: awx/main/models/inventory.py:52 +msgid "Organization containing this inventory." +msgstr "Organisation contenant cet inventaire." + +#: awx/main/models/inventory.py:58 +msgid "Inventory variables in JSON or YAML format." +msgstr "Variables d'inventaire au format JSON ou YAML." + +#: awx/main/models/inventory.py:63 +msgid "Flag indicating whether any hosts in this inventory have failed." +msgstr "Marqueur indiquant si les hôtes de cet inventaire ont échoué." + +#: awx/main/models/inventory.py:68 +msgid "Total number of hosts in this inventory." +msgstr "Nombre total d'hôtes dans cet inventaire." + +#: awx/main/models/inventory.py:73 +msgid "Number of hosts in this inventory with active failures." +msgstr "Nombre d'hôtes dans cet inventaire avec des échecs non résolus." + +#: awx/main/models/inventory.py:78 +msgid "Total number of groups in this inventory." +msgstr "Nombre total de groupes dans cet inventaire." + +#: awx/main/models/inventory.py:83 +msgid "Number of groups in this inventory with active failures." +msgstr "Nombre de groupes dans cet inventaire avec des échecs non résolus." + +#: awx/main/models/inventory.py:88 +msgid "" +"Flag indicating whether this inventory has any external inventory sources." +msgstr "" +"Marqueur indiquant si cet inventaire contient des sources d'inventaire " +"externes." + +#: awx/main/models/inventory.py:93 +msgid "" +"Total number of external inventory sources configured within this inventory." +msgstr "" +"Nombre total de sources d'inventaire externes configurées dans cet " +"inventaire." + +#: awx/main/models/inventory.py:98 +msgid "Number of external inventory sources in this inventory with failures." +msgstr "" +"Nombre total de sources d'inventaire externes en échec dans cet inventaire." + +#: awx/main/models/inventory.py:339 +msgid "Is this host online and available for running jobs?" +msgstr "Cet hôte est-il en ligne et disponible pour exécuter des tâches ?" + +#: awx/main/models/inventory.py:345 +msgid "" +"The value used by the remote inventory source to uniquely identify the host" +msgstr "" +"Valeur utilisée par la source d'inventaire distante pour identifier l'hôte " +"de façon unique" + +#: awx/main/models/inventory.py:350 +msgid "Host variables in JSON or YAML format." +msgstr "Variables d'hôte au format JSON ou YAML." + +#: awx/main/models/inventory.py:372 +msgid "Flag indicating whether the last job failed for this host." +msgstr "Marqueur indiquant si la dernière tâche a échoué pour cet hôte." + +#: awx/main/models/inventory.py:377 +msgid "" +"Flag indicating whether this host was created/updated from any external " +"inventory sources." +msgstr "" +"Marqueur indiquant si cet hôte a été créé/mis à jour à partir de sources " +"d'inventaire externes." + +#: awx/main/models/inventory.py:383 +msgid "Inventory source(s) that created or modified this host." +msgstr "Sources d'inventaire qui ont créé ou modifié cet hôte." + +#: awx/main/models/inventory.py:474 +msgid "Group variables in JSON or YAML format." +msgstr "Variables de groupe au format JSON ou YAML." + +#: awx/main/models/inventory.py:480 +msgid "Hosts associated directly with this group." +msgstr "Hôtes associés directement à ce groupe." + +#: awx/main/models/inventory.py:485 +msgid "Total number of hosts directly or indirectly in this group." +msgstr "" +"Nombre total d'hôtes associés directement ou indirectement à ce groupe." + +#: awx/main/models/inventory.py:490 +msgid "Flag indicating whether this group has any hosts with active failures." +msgstr "" +"Marqueur indiquant si ce groupe possède ou non des hôtes avec des échecs non " +"résolus." + +#: awx/main/models/inventory.py:495 +msgid "Number of hosts in this group with active failures." +msgstr "Nombre d'hôtes dans ce groupe avec des échecs non résolus." + +#: awx/main/models/inventory.py:500 +msgid "Total number of child groups contained within this group." +msgstr "Nombre total de groupes enfants compris dans ce groupe." + +#: awx/main/models/inventory.py:505 +msgid "Number of child groups within this group that have active failures." +msgstr "Nombre de groupes enfants dans ce groupe avec des échecs non résolus." + +#: awx/main/models/inventory.py:510 +msgid "" +"Flag indicating whether this group was created/updated from any external " +"inventory sources." +msgstr "" +"Marqueur indiquant si ce groupe a été créé/mis à jour à partir de sources " +"d'inventaire externes." + +#: awx/main/models/inventory.py:516 +msgid "Inventory source(s) that created or modified this group." +msgstr "Sources d'inventaire qui ont créé ou modifié ce groupe." + +#: awx/main/models/inventory.py:706 +#: awx/main/models/projects.py:42 +#: awx/main/models/unified_jobs.py:402 +msgid "Manual" +msgstr "Manuel" + +#: awx/main/models/inventory.py:707 +msgid "Local File, Directory or Script" +msgstr "Fichier local, répertoire ou script" + +#: awx/main/models/inventory.py:708 +msgid "Rackspace Cloud Servers" +msgstr "Serveurs cloud Rackspace" + +#: awx/main/models/inventory.py:709 +msgid "Amazon EC2" +msgstr "Amazon EC2" + +#: awx/main/models/inventory.py:717 +msgid "Custom Script" +msgstr "Script personnalisé" + +#: awx/main/models/inventory.py:828 +msgid "Inventory source variables in YAML or JSON format." +msgstr "Variables de source d'inventaire au format JSON ou YAML." + +#: awx/main/models/inventory.py:847 +msgid "" +"Comma-separated list of filter expressions (EC2 only). Hosts are imported " +"when ANY of the filters match." +msgstr "" +"Liste d'expressions de filtre séparées par des virgules (EC2 uniquement). " +"Les hôtes sont importés lorsque l'UN des filtres correspondent." + +#: awx/main/models/inventory.py:853 +msgid "Limit groups automatically created from inventory source (EC2 only)." +msgstr "" +"Limiter automatiquement les groupes créés à partir de la source d'inventaire " +"(EC2 uniquement)." + +#: awx/main/models/inventory.py:857 +msgid "Overwrite local groups and hosts from remote inventory source." +msgstr "" +"Écraser les groupes locaux et les hôtes de la source d'inventaire distante." + +#: awx/main/models/inventory.py:861 +msgid "Overwrite local variables from remote inventory source." +msgstr "Écraser les variables locales de la source d'inventaire distante." + +#: awx/main/models/inventory.py:893 +msgid "Availability Zone" +msgstr "Zone de disponibilité" + +#: awx/main/models/inventory.py:894 +msgid "Image ID" +msgstr "ID d'image" + +#: awx/main/models/inventory.py:895 +msgid "Instance ID" +msgstr "ID d'instance" + +#: awx/main/models/inventory.py:896 +msgid "Instance Type" +msgstr "Type d'instance" + +#: awx/main/models/inventory.py:897 +msgid "Key Name" +msgstr "Nom de la clé" + +#: awx/main/models/inventory.py:898 +msgid "Region" +msgstr "Région" + +#: awx/main/models/inventory.py:899 +msgid "Security Group" +msgstr "Groupe de sécurité" + +#: awx/main/models/inventory.py:900 +msgid "Tags" +msgstr "Balises" + +#: awx/main/models/inventory.py:901 +msgid "VPC ID" +msgstr "ID VPC" + +#: awx/main/models/inventory.py:902 +msgid "Tag None" +msgstr "Ne rien baliser" + +#: awx/main/models/inventory.py:973 +#, python-format +msgid "" +"Cloud-based inventory sources (such as %s) require credentials for the " +"matching cloud service." +msgstr "" +"Les sources d'inventaire cloud (telles que %s) requièrent des informations " +"d'identification pour le service cloud correspondant." + +#: awx/main/models/inventory.py:980 +msgid "Credential is required for a cloud source." +msgstr "" +"Les informations d'identification sont requises pour une source cloud." + +#: awx/main/models/inventory.py:1005 +#, python-format +msgid "Invalid %(source)s region: %(region)s" +msgstr "Région %(source)s non valide : %(region)s" + +#: awx/main/models/inventory.py:1030 +#, python-format +msgid "Invalid filter expression: %(filter)s" +msgstr "Expression de filtre non valide : %(filter)s" + +#: awx/main/models/inventory.py:1048 +#, python-format +msgid "Invalid group by choice: %(choice)s" +msgstr "Choix de regroupement non valide : %(choice)s" + +#: awx/main/models/inventory.py:1195 +#, python-format +msgid "" +"Unable to configure this item for cloud sync. It is already managed by %s." +msgstr "" +"Impossible de configurer cet élément pour la synchronisation dans le cloud. " +"Il est déjà géré par %s." + +#: awx/main/models/inventory.py:1290 +msgid "Inventory script contents" +msgstr "Contenus des scripts d'inventaire" + +#: awx/main/models/inventory.py:1295 +msgid "Organization owning this inventory script" +msgstr "Organisation propriétaire de ce script d'inventaire." + +#: awx/main/models/jobs.py:169 +msgid "You must provide a network credential." +msgstr "Vous devez fournir des informations d'identification réseau." + +#: awx/main/models/jobs.py:177 +msgid "" +"Must provide a credential for a cloud provider, such as Amazon Web Services " +"or Rackspace." +msgstr "" +"Entrez les informations d'identification d'un fournisseur de services cloud " +"comme Amazon Web Services ou Rackspace." + +#: awx/main/models/jobs.py:269 +msgid "Job Template must provide 'inventory' or allow prompting for it." +msgstr "" +"Le modèle de tâche doit fournir un inventaire ou permettre d'en demander un." + +#: awx/main/models/jobs.py:273 +msgid "Job Template must provide 'credential' or allow prompting for it." +msgstr "" +"Le modèle de tâche doit fournir des informations d'identification ou " +"permettre d'en demander." + +#: awx/main/models/jobs.py:362 +msgid "Cannot override job_type to or from a scan job." +msgstr "Impossible de remplacer job_type vers ou depuis une tâche de scan." + +#: awx/main/models/jobs.py:365 +msgid "Inventory cannot be changed at runtime for scan jobs." +msgstr "" +"L'inventaire ne peut pas être modifié à l'exécution pour les tâches de scan." + +#: awx/main/models/jobs.py:431 +#: awx/main/models/projects.py:243 +msgid "SCM Revision" +msgstr "Révision SCM" + +#: awx/main/models/jobs.py:432 +msgid "The SCM Revision from the Project used for this job, if available" +msgstr "Révision SCM du projet utilisé pour cette tâche, le cas échéant" + +#: awx/main/models/jobs.py:440 +msgid "" +"The SCM Refresh task used to make sure the playbooks were available for the " +"job run" +msgstr "" +"Activité d'actualisation du SCM qui permet de s'assurer que les playbooks " +"étaient disponibles pour l'exécution de la tâche" + +#: awx/main/models/jobs.py:662 +msgid "job host summaries" +msgstr "récapitulatifs des hôtes pour la tâche" + +#: awx/main/models/jobs.py:765 +msgid "Host Failure" +msgstr "Échec de l'hôte" + +#: awx/main/models/jobs.py:768 +#: awx/main/models/jobs.py:782 +msgid "No Hosts Remaining" +msgstr "Aucun hôte restant" + +#: awx/main/models/jobs.py:769 +msgid "Host Polling" +msgstr "Interrogation de l'hôte" + +#: awx/main/models/jobs.py:770 +msgid "Host Async OK" +msgstr "Désynchronisation des hôtes OK" + +#: awx/main/models/jobs.py:771 +msgid "Host Async Failure" +msgstr "Échec de désynchronisation des hôtes" + +#: awx/main/models/jobs.py:772 +msgid "Item OK" +msgstr "Élément OK" + +#: awx/main/models/jobs.py:773 +msgid "Item Failed" +msgstr "Échec de l'élément" + +#: awx/main/models/jobs.py:774 +msgid "Item Skipped" +msgstr "Élément ignoré" + +#: awx/main/models/jobs.py:775 +msgid "Host Retry" +msgstr "Nouvel essai de l'hôte" + +#: awx/main/models/jobs.py:777 +msgid "File Difference" +msgstr "Écart entre les fichiers" + +#: awx/main/models/jobs.py:778 +msgid "Playbook Started" +msgstr "Playbook démarré" + +#: awx/main/models/jobs.py:779 +msgid "Running Handlers" +msgstr "Descripteurs d'exécution" + +#: awx/main/models/jobs.py:780 +msgid "Including File" +msgstr "Ajout de fichier" + +#: awx/main/models/jobs.py:781 +msgid "No Hosts Matched" +msgstr "Aucun hôte correspondant" + +#: awx/main/models/jobs.py:783 +msgid "Task Started" +msgstr "Tâche démarrée" + +#: awx/main/models/jobs.py:785 +msgid "Variables Prompted" +msgstr "Variables demandées" + +#: awx/main/models/jobs.py:786 +msgid "Gathering Facts" +msgstr "Collecte des faits" + +#: awx/main/models/jobs.py:787 +msgid "internal: on Import for Host" +msgstr "interne : à l'importation pour l'hôte" + +#: awx/main/models/jobs.py:788 +msgid "internal: on Not Import for Host" +msgstr "interne : à la non-importation pour l'hôte" + +#: awx/main/models/jobs.py:789 +msgid "Play Started" +msgstr "Scène démarrée" + +#: awx/main/models/jobs.py:790 +msgid "Playbook Complete" +msgstr "Playbook terminé" + +#: awx/main/models/jobs.py:1200 +msgid "Remove jobs older than a certain number of days" +msgstr "Supprimer les tâches plus anciennes qu'un certain nombre de jours" + +#: awx/main/models/jobs.py:1201 +msgid "Remove activity stream entries older than a certain number of days" +msgstr "" +"Supprimer les entrées du flux d'activité plus anciennes qu'un certain nombre " +"de jours" + +#: awx/main/models/jobs.py:1202 +msgid "Purge and/or reduce the granularity of system tracking data" +msgstr "Purger et/ou réduire la granularité des données de suivi du système" + +#: awx/main/models/label.py:29 +msgid "Organization this label belongs to." +msgstr "Organisation à laquelle appartient ce libellé." + +#: awx/main/models/notifications.py:31 +msgid "Email" +msgstr "Email" + +#: awx/main/models/notifications.py:32 +msgid "Slack" +msgstr "Slack" + +#: awx/main/models/notifications.py:33 +msgid "Twilio" +msgstr "Twilio" + +#: awx/main/models/notifications.py:34 +msgid "Pagerduty" +msgstr "Pagerduty" + +#: awx/main/models/notifications.py:35 +msgid "HipChat" +msgstr "HipChat" + +#: awx/main/models/notifications.py:36 +msgid "Webhook" +msgstr "Webhook" + +#: awx/main/models/notifications.py:37 +msgid "IRC" +msgstr "IRC" + +#: awx/main/models/notifications.py:127 +#: awx/main/models/unified_jobs.py:59 +msgid "Pending" +msgstr "En attente" + +#: awx/main/models/notifications.py:128 +#: awx/main/models/unified_jobs.py:62 +msgid "Successful" +msgstr "Réussi" + +#: awx/main/models/notifications.py:129 +#: awx/main/models/unified_jobs.py:63 +msgid "Failed" +msgstr "Échec" + +#: awx/main/models/organization.py:157 +msgid "Execute Commands on the Inventory" +msgstr "Exécuter des commandes sur l'inventaire" + +#: awx/main/models/organization.py:211 +msgid "Token not invalidated" +msgstr "Token non invalidé" + +#: awx/main/models/organization.py:212 +msgid "Token is expired" +msgstr "Token arrivé à expiration" + +#: awx/main/models/organization.py:213 +msgid "" +"The maximum number of allowed sessions for this user has been exceeded." +msgstr "" +"Le nombre maximum de sessions autorisées pour cet utilisateur a été dépassé." + +#: awx/main/models/organization.py:216 +msgid "Invalid token" +msgstr "Token non valide" + +#: awx/main/models/organization.py:233 +msgid "Reason the auth token was invalidated." +msgstr "" +"Raison pour laquelle le token d'authentification a été rendu non valide." + +#: awx/main/models/organization.py:272 +msgid "Invalid reason specified" +msgstr "Raison de non validité spécifiée" + +#: awx/main/models/projects.py:43 +msgid "Git" +msgstr "Git" + +#: awx/main/models/projects.py:44 +msgid "Mercurial" +msgstr "Mercurial" + +#: awx/main/models/projects.py:45 +msgid "Subversion" +msgstr "Subversion" + +#: awx/main/models/projects.py:71 +msgid "" +"Local path (relative to PROJECTS_ROOT) containing playbooks and related " +"files for this project." +msgstr "" +"Chemin local (relatif à PROJECTS_ROOT) contenant des playbooks et des " +"fichiers associés pour ce projet." + +#: awx/main/models/projects.py:80 +msgid "SCM Type" +msgstr "Type de SCM" + +#: awx/main/models/projects.py:81 +msgid "Specifies the source control system used to store the project." +msgstr "" +"Spécifie le système de contrôle des sources utilisé pour stocker le projet." + +#: awx/main/models/projects.py:87 +msgid "SCM URL" +msgstr "URL du SCM" + +#: awx/main/models/projects.py:88 +msgid "The location where the project is stored." +msgstr "Emplacement où le projet est stocké." + +#: awx/main/models/projects.py:94 +msgid "SCM Branch" +msgstr "Branche SCM" + +#: awx/main/models/projects.py:95 +msgid "Specific branch, tag or commit to checkout." +msgstr "Branche, balise ou validation spécifique à valider." + +#: awx/main/models/projects.py:99 +msgid "Discard any local changes before syncing the project." +msgstr "Ignorez les modifications locales avant de synchroniser le projet." + +#: awx/main/models/projects.py:103 +msgid "Delete the project before syncing." +msgstr "Supprimez le projet avant la synchronisation." + +#: awx/main/models/projects.py:116 +msgid "The amount of time to run before the task is canceled." +msgstr "Délai écoulé avant que la tâche ne soit annulée." + +#: awx/main/models/projects.py:130 +msgid "Invalid SCM URL." +msgstr "URL du SCM incorrecte." + +#: awx/main/models/projects.py:133 +msgid "SCM URL is required." +msgstr "L'URL du SCM est requise." + +#: awx/main/models/projects.py:142 +msgid "Credential kind must be 'scm'." +msgstr "Le type d'informations d'identification doit être 'scm'." + +#: awx/main/models/projects.py:157 +msgid "Invalid credential." +msgstr "Informations d'identification non valides." + +#: awx/main/models/projects.py:229 +msgid "Update the project when a job is launched that uses the project." +msgstr "Mettez à jour le projet lorsqu'une tâche qui l'utilise est lancée." + +#: awx/main/models/projects.py:234 +msgid "" +"The number of seconds after the last project update ran that a newproject " +"update will be launched as a job dependency." +msgstr "" +"Délai écoulé (en secondes) entre la dernière mise à jour du projet et le " +"lancement d'une nouvelle mise à jour en tant que dépendance de la tâche." + +#: awx/main/models/projects.py:244 +msgid "The last revision fetched by a project update" +msgstr "Dernière révision récupérée par une mise à jour du projet" + +#: awx/main/models/projects.py:251 +msgid "Playbook Files" +msgstr "Fichiers de playbook" + +#: awx/main/models/projects.py:252 +msgid "List of playbooks found in the project" +msgstr "Liste des playbooks trouvés dans le projet" + +#: awx/main/models/rbac.py:36 +msgid "System Administrator" +msgstr "Administrateur du système" + +#: awx/main/models/rbac.py:37 +msgid "System Auditor" +msgstr "Auditeur système" + +#: awx/main/models/rbac.py:38 +msgid "Ad Hoc" +msgstr "Ad Hoc" + +#: awx/main/models/rbac.py:39 +msgid "Admin" +msgstr "Admin" + +#: awx/main/models/rbac.py:40 +msgid "Auditor" +msgstr "Auditeur" + +#: awx/main/models/rbac.py:41 +msgid "Execute" +msgstr "Execution" + +#: awx/main/models/rbac.py:42 +msgid "Member" +msgstr "Membre" + +#: awx/main/models/rbac.py:43 +msgid "Read" +msgstr "Lecture" + +#: awx/main/models/rbac.py:44 +msgid "Update" +msgstr "Mise à jour" + +#: awx/main/models/rbac.py:45 +msgid "Use" +msgstr "Utilisation" + +#: awx/main/models/rbac.py:49 +msgid "Can manage all aspects of the system" +msgstr "Peut gérer tous les aspects du système" + +#: awx/main/models/rbac.py:50 +msgid "Can view all settings on the system" +msgstr "Peut afficher tous les paramètres de configuration du système" + +#: awx/main/models/rbac.py:51 +msgid "May run ad hoc commands on an inventory" +msgstr "Peut exécuter des commandes ad hoc sur un inventaire" + +#: awx/main/models/rbac.py:52 +#, python-format +msgid "Can manage all aspects of the %s" +msgstr "Peut exécuter tous les aspects de %s" + +#: awx/main/models/rbac.py:53 +#, python-format +msgid "Can view all settings for the %s" +msgstr "Peut afficher tous les paramètres de configuration du %s" + +#: awx/main/models/rbac.py:54 +#, python-format +msgid "May run the %s" +msgstr "Peut exécuter %s" + +#: awx/main/models/rbac.py:55 +#, python-format +msgid "User is a member of the %s" +msgstr "L'utilisateur est un membre de %s" + +#: awx/main/models/rbac.py:56 +#, python-format +msgid "May view settings for the %s" +msgstr "Peut afficher les paramètres de configuration de %s" + +#: awx/main/models/rbac.py:57 +msgid "" +"May update project or inventory or group using the configured source update " +"system" +msgstr "" +"Peut mettre un projet, un inventaire, ou un groupe à jour en utilisant le " +"système de mise à jour de la source configuré." + +#: awx/main/models/rbac.py:58 +#, python-format +msgid "Can use the %s in a job template" +msgstr "Peut utiliser %s dans un modèle de tâche" + +#: awx/main/models/rbac.py:122 +msgid "roles" +msgstr "rôles" + +#: awx/main/models/rbac.py:438 +msgid "role_ancestors" +msgstr "role_ancestors" + +#: awx/main/models/schedules.py:69 +msgid "Enables processing of this schedule by Tower." +msgstr "Active le traitement de ce calendrier par Tower." + +#: awx/main/models/schedules.py:75 +msgid "The first occurrence of the schedule occurs on or after this time." +msgstr "" +"La première occurrence du calendrier se produit à ce moment précis ou " +"ultérieurement." + +#: awx/main/models/schedules.py:81 +msgid "" +"The last occurrence of the schedule occurs before this time, aftewards the " +"schedule expires." +msgstr "" +"La dernière occurrence du calendrier se produit avant ce moment précis. " +"Passé ce délai, le calendrier arrive à expiration." + +#: awx/main/models/schedules.py:85 +msgid "A value representing the schedules iCal recurrence rule." +msgstr "Valeur représentant la règle de récurrence iCal des calendriers." + +#: awx/main/models/schedules.py:91 +msgid "The next time that the scheduled action will run." +msgstr "La prochaine fois que l'action planifiée s'exécutera." + +#: awx/main/models/unified_jobs.py:58 +msgid "New" +msgstr "Nouveau" + +#: awx/main/models/unified_jobs.py:60 +msgid "Waiting" +msgstr "En attente" + +#: awx/main/models/unified_jobs.py:61 +msgid "Running" +msgstr "En cours d'exécution" + +#: awx/main/models/unified_jobs.py:65 +msgid "Canceled" +msgstr "Annulé" + +#: awx/main/models/unified_jobs.py:69 +msgid "Never Updated" +msgstr "Jamais mis à jour" + +#: awx/main/models/unified_jobs.py:73 +#: awx/ui/templates/ui/index.html:85 +#: awx/ui/templates/ui/index.html.py:104 +msgid "OK" +msgstr "OK" + +#: awx/main/models/unified_jobs.py:74 +msgid "Missing" +msgstr "Manquant" + +#: awx/main/models/unified_jobs.py:78 +msgid "No External Source" +msgstr "Aucune source externe" + +#: awx/main/models/unified_jobs.py:85 +msgid "Updating" +msgstr "Mise à jour en cours" + +#: awx/main/models/unified_jobs.py:403 +msgid "Relaunch" +msgstr "Relancer" + +#: awx/main/models/unified_jobs.py:404 +msgid "Callback" +msgstr "Rappeler" + +#: awx/main/models/unified_jobs.py:405 +msgid "Scheduled" +msgstr "Planifié" + +#: awx/main/models/unified_jobs.py:406 +msgid "Dependency" +msgstr "Dépendance" + +#: awx/main/models/unified_jobs.py:407 +msgid "Workflow" +msgstr "Workflow" + +#: awx/main/models/unified_jobs.py:408 +msgid "Sync" +msgstr "Sync" + +#: awx/main/models/unified_jobs.py:454 +msgid "The Tower node the job executed on." +msgstr "Nœud Tower sur lequel la tâche s'est exécutée." + +#: awx/main/models/unified_jobs.py:480 +msgid "The date and time the job was queued for starting." +msgstr "" +"Date et heure auxquelles la tâche a été mise en file d'attente pour le " +"démarrage." + +#: awx/main/models/unified_jobs.py:486 +msgid "The date and time the job finished execution." +msgstr "Date et heure de fin d'exécution de la tâche." + +#: awx/main/models/unified_jobs.py:492 +msgid "Elapsed time in seconds that the job ran." +msgstr "Délai écoulé (en secondes) pendant lequel la tâche s'est exécutée." + +#: awx/main/models/unified_jobs.py:514 +msgid "" +"A status field to indicate the state of the job if it wasn't able to run and " +"capture stdout" +msgstr "" +"Champ d'état indiquant l'état de la tâche si elle n'a pas pu s'exécuter et " +"capturer stdout" + +#: awx/main/notifications/base.py:17 +#: awx/main/notifications/email_backend.py:28 +msgid "{} #{} had status {} on Ansible Tower, view details at {}\n" +"\n" +msgstr "{} #{} était à l'état {} sur Ansible Tower, voir les détails sur {}\n" +"\n" + +#: awx/main/notifications/hipchat_backend.py:46 +msgid "Error sending messages: {}" +msgstr "Erreur lors de l'envoi de messages : {}" + +#: awx/main/notifications/hipchat_backend.py:48 +msgid "Error sending message to hipchat: {}" +msgstr "Erreur lors de l'envoi d'un message à hipchat : {}" + +#: awx/main/notifications/irc_backend.py:54 +msgid "Exception connecting to irc server: {}" +msgstr "Exception lors de la connexion au serveur irc : {}" + +#: awx/main/notifications/pagerduty_backend.py:39 +msgid "Exception connecting to PagerDuty: {}" +msgstr "Exception lors de la connexion à PagerDuty : {}" + +#: awx/main/notifications/pagerduty_backend.py:48 +#: awx/main/notifications/slack_backend.py:52 +#: awx/main/notifications/twilio_backend.py:46 +msgid "Exception sending messages: {}" +msgstr "Exception lors de l'envoi de messages : {}" + +#: awx/main/notifications/twilio_backend.py:36 +msgid "Exception connecting to Twilio: {}" +msgstr "Exception lors de la connexion à Twilio : {}" + +#: awx/main/notifications/webhook_backend.py:38 +#: awx/main/notifications/webhook_backend.py:40 +msgid "Error sending notification webhook: {}" +msgstr "Erreur lors de l'envoi d'un webhook de notification : {}" + +#: awx/main/scheduler/__init__.py:130 +msgid "" +"Job spawned from workflow could not start because it was not in the right " +"state or required manual credentials" +msgstr "" +"Tâche, lancée à partir du workflow, ne pouvant démarrer, pour faute d'être " +"dans l'état qui convient ou nécessitant des informations d'identification " +"manuelles adéquates." + +#: awx/main/tasks.py:180 +msgid "Ansible Tower host usage over 90%" +msgstr "Utilisation d'hôtes Ansible Tower supérieure à 90 %" + +#: awx/main/tasks.py:185 +msgid "Ansible Tower license will expire soon" +msgstr "La licence Ansible Tower expirera bientôt" + +#: awx/main/tasks.py:240 +msgid "status_str must be either succeeded or failed" +msgstr "status_str doit être une réussite ou un échec" + +#: awx/main/utils/common.py:89 +#, python-format +msgid "Unable to convert \"%s\" to boolean" +msgstr "Impossible de convertir \"%s\" en booléen" + +#: awx/main/utils/common.py:243 +#, python-format +msgid "Unsupported SCM type \"%s\"" +msgstr "Type de SCM \"%s\" non pris en charge" + +#: awx/main/utils/common.py:250 +#: awx/main/utils/common.py:262 +#: awx/main/utils/common.py:281 +#, python-format +msgid "Invalid %s URL" +msgstr "URL %s non valide." + +#: awx/main/utils/common.py:252 +#: awx/main/utils/common.py:290 +#, python-format +msgid "Unsupported %s URL" +msgstr "URL %s non prise en charge" + +#: awx/main/utils/common.py:292 +#, python-format +msgid "Unsupported host \"%s\" for file:// URL" +msgstr "Hôte \"%s\" non pris en charge pour le fichier ://URL" + +#: awx/main/utils/common.py:294 +#, python-format +msgid "Host is required for %s URL" +msgstr "L'hôte est requis pour l'URL %s" + +#: awx/main/utils/common.py:312 +#, python-format +msgid "Username must be \"git\" for SSH access to %s." +msgstr "Le nom d'utilisateur doit être \"git\" pour l'accès SSH à %s." + +#: awx/main/utils/common.py:318 +#, python-format +msgid "Username must be \"hg\" for SSH access to %s." +msgstr "Le nom d'utilisateur doit être \"hg\" pour l'accès SSH à %s." + +#: awx/main/validators.py:60 +#, python-format +msgid "Invalid certificate or key: %r..." +msgstr "Certificat ou clé non valide : %r..." + +#: awx/main/validators.py:74 +#, python-format +msgid "Invalid private key: unsupported type \"%s\"" +msgstr "Clé privée non valide : type \"%s\" non pris en charge" + +#: awx/main/validators.py:78 +#, python-format +msgid "Unsupported PEM object type: \"%s\"" +msgstr "Type d'objet PEM non pris en charge : \"%s\"" + +#: awx/main/validators.py:103 +msgid "Invalid base64-encoded data" +msgstr "Données codées en base64 non valides" + +#: awx/main/validators.py:122 +msgid "Exactly one private key is required." +msgstr "Une clé privée uniquement est nécessaire." + +#: awx/main/validators.py:124 +msgid "At least one private key is required." +msgstr "Une clé privée au moins est nécessaire." + +#: awx/main/validators.py:126 +#, python-format +msgid "" +"At least %(min_keys)d private keys are required, only %(key_count)d provided." +"" +msgstr "" +"%(min_keys)d clés privées au moins sont requises, mais %(key_count)d " +"uniquement ont été fournies." + +#: awx/main/validators.py:129 +#, python-format +msgid "Only one private key is allowed, %(key_count)d provided." +msgstr "Une seule clé privée est autorisée, %(key_count)d ont été fournies." + +#: awx/main/validators.py:131 +#, python-format +msgid "" +"No more than %(max_keys)d private keys are allowed, %(key_count)d provided." +msgstr "" +"Pas plus de %(max_keys)d clés privées sont autorisées, %(key_count)d ont été " +"fournies." + +#: awx/main/validators.py:136 +msgid "Exactly one certificate is required." +msgstr "Un certificat uniquement est nécessaire." + +#: awx/main/validators.py:138 +msgid "At least one certificate is required." +msgstr "Un certificat au moins est nécessaire." + +#: awx/main/validators.py:140 +#, python-format +msgid "" +"At least %(min_certs)d certificates are required, only %(cert_count)d " +"provided." +msgstr "" +"%(min_certs)d certificats au moins sont requis, mais %(cert_count)d " +"uniquement ont été fournis." + +#: awx/main/validators.py:143 +#, python-format +msgid "Only one certificate is allowed, %(cert_count)d provided." +msgstr "Un seul certificat est autorisé, %(cert_count)d ont été fournis." + +#: awx/main/validators.py:145 +#, python-format +msgid "" +"No more than %(max_certs)d certificates are allowed, %(cert_count)d provided." +"" +msgstr "" +"Pas plus de %(max_certs)d certificats sont autorisés, %(cert_count)d ont été " +"fournis." + +#: awx/main/views.py:20 +msgid "API Error" +msgstr "Erreur API" + +#: awx/main/views.py:49 +msgid "Bad Request" +msgstr "Requête incorrecte" + +#: awx/main/views.py:50 +msgid "The request could not be understood by the server." +msgstr "La requête n'a pas pu être comprise par le serveur." + +#: awx/main/views.py:57 +msgid "Forbidden" +msgstr "Interdiction" + +#: awx/main/views.py:58 +msgid "You don't have permission to access the requested resource." +msgstr "Vous n'êtes pas autorisé à accéder à la ressource demandée." + +#: awx/main/views.py:65 +msgid "Not Found" +msgstr "Introuvable" + +#: awx/main/views.py:66 +msgid "The requested resource could not be found." +msgstr "Impossible de trouver la ressource demandée." + +#: awx/main/views.py:73 +msgid "Server Error" +msgstr "Erreur serveur" + +#: awx/main/views.py:74 +msgid "A server error has occurred." +msgstr "Une erreur serveur s'est produite." + +#: awx/settings/defaults.py:611 +msgid "Chicago" +msgstr "Chicago" + +#: awx/settings/defaults.py:612 +msgid "Dallas/Ft. Worth" +msgstr "Dallas/Ft. Worth" + +#: awx/settings/defaults.py:613 +msgid "Northern Virginia" +msgstr "Virginie du Nord" + +#: awx/settings/defaults.py:614 +msgid "London" +msgstr "Londres" + +#: awx/settings/defaults.py:615 +msgid "Sydney" +msgstr "Sydney" + +#: awx/settings/defaults.py:616 +msgid "Hong Kong" +msgstr "Hong Kong" + +#: awx/settings/defaults.py:643 +msgid "US East (Northern Virginia)" +msgstr "Est des États-Unis (Virginie du Nord)" + +#: awx/settings/defaults.py:644 +msgid "US East (Ohio)" +msgstr "Est des États-Unis (Ohio)" + +#: awx/settings/defaults.py:645 +msgid "US West (Oregon)" +msgstr "Ouest des États-Unis (Oregon)" + +#: awx/settings/defaults.py:646 +msgid "US West (Northern California)" +msgstr "Ouest des États-Unis (Nord de la Californie)" + +#: awx/settings/defaults.py:647 +msgid "Canada (Central)" +msgstr "Canada (Central)" + +#: awx/settings/defaults.py:648 +msgid "EU (Frankfurt)" +msgstr "UE (Francfort)" + +#: awx/settings/defaults.py:649 +msgid "EU (Ireland)" +msgstr "UE (Irlande)" + +#: awx/settings/defaults.py:650 +msgid "EU (London)" +msgstr "UE (Londres)" + +#: awx/settings/defaults.py:651 +msgid "Asia Pacific (Singapore)" +msgstr "Asie-Pacifique (Singapour)" + +#: awx/settings/defaults.py:652 +msgid "Asia Pacific (Sydney)" +msgstr "Asie-Pacifique (Sydney)" + +#: awx/settings/defaults.py:653 +msgid "Asia Pacific (Tokyo)" +msgstr "Asie-Pacifique (Tokyo)" + +#: awx/settings/defaults.py:654 +msgid "Asia Pacific (Seoul)" +msgstr "Asie-Pacifique (Séoul)" + +#: awx/settings/defaults.py:655 +msgid "Asia Pacific (Mumbai)" +msgstr "Asie-Pacifique (Mumbai)" + +#: awx/settings/defaults.py:656 +msgid "South America (Sao Paulo)" +msgstr "Amérique du Sud (Sao Paulo)" + +#: awx/settings/defaults.py:657 +msgid "US West (GovCloud)" +msgstr "Ouest des États-Unis (GovCloud)" + +#: awx/settings/defaults.py:658 +msgid "China (Beijing)" +msgstr "Chine (Pékin)" + +#: awx/settings/defaults.py:707 +msgid "US East (B)" +msgstr "Est des États-Unis (B)" + +#: awx/settings/defaults.py:708 +msgid "US East (C)" +msgstr "Est des États-Unis (C)" + +#: awx/settings/defaults.py:709 +msgid "US East (D)" +msgstr "Est des États-Unis (D)" + +#: awx/settings/defaults.py:710 +msgid "US Central (A)" +msgstr "Centre des États-Unis (A)" + +#: awx/settings/defaults.py:711 +msgid "US Central (B)" +msgstr "Centre des États-Unis (B)" + +#: awx/settings/defaults.py:712 +msgid "US Central (C)" +msgstr "Centre des États-Unis (C)" + +#: awx/settings/defaults.py:713 +msgid "US Central (F)" +msgstr "Centre des États-Unis (F)" + +#: awx/settings/defaults.py:714 +msgid "Europe West (B)" +msgstr "Europe de l'Ouest (B)" + +#: awx/settings/defaults.py:715 +msgid "Europe West (C)" +msgstr "Europe de l'Ouest (C)" + +#: awx/settings/defaults.py:716 +msgid "Europe West (D)" +msgstr "Europe de l'Ouest (D)" + +#: awx/settings/defaults.py:717 +msgid "Asia East (A)" +msgstr "Asie de l'Est (A)" + +#: awx/settings/defaults.py:718 +msgid "Asia East (B)" +msgstr "Asie de l'Est (B)" + +#: awx/settings/defaults.py:719 +msgid "Asia East (C)" +msgstr "Asie de l'Est (C)" + +#: awx/settings/defaults.py:743 +msgid "US Central" +msgstr "Centre des États-Unis" + +#: awx/settings/defaults.py:744 +msgid "US East" +msgstr "Est des États-Unis" + +#: awx/settings/defaults.py:745 +msgid "US East 2" +msgstr "Est des États-Unis 2" + +#: awx/settings/defaults.py:746 +msgid "US North Central" +msgstr "Centre-Nord des États-Unis" + +#: awx/settings/defaults.py:747 +msgid "US South Central" +msgstr "Centre-Sud des États-Unis" + +#: awx/settings/defaults.py:748 +msgid "US West" +msgstr "Ouest des États-Unis" + +#: awx/settings/defaults.py:749 +msgid "Europe North" +msgstr "Europe du Nord" + +#: awx/settings/defaults.py:750 +msgid "Europe West" +msgstr "Europe de l'Ouest" + +#: awx/settings/defaults.py:751 +msgid "Asia Pacific East" +msgstr "Asie-Pacifique Est" + +#: awx/settings/defaults.py:752 +msgid "Asia Pacific Southeast" +msgstr "Asie-Pacifique Sud-Est" + +#: awx/settings/defaults.py:753 +msgid "Japan East" +msgstr "Est du Japon" + +#: awx/settings/defaults.py:754 +msgid "Japan West" +msgstr "Ouest du Japon" + +#: awx/settings/defaults.py:755 +msgid "Brazil South" +msgstr "Sud du Brésil" + +#: awx/sso/apps.py:9 +msgid "Single Sign-On" +msgstr "Single Sign-On" + +#: awx/sso/conf.py:27 +msgid "" +"Mapping to organization admins/users from social auth accounts. This setting\n" +"controls which users are placed into which Tower organizations based on\n" +"their username and email address. Dictionary keys are organization names.\n" +"organizations will be created if not present if the license allows for\n" +"multiple organizations, otherwise the single default organization is used\n" +"regardless of the key. Values are dictionaries defining the options for\n" +"each organization's membership. For each organization it is possible to\n" +"specify which users are automatically users of the organization and also\n" +"which users can administer the organization. \n" +"\n" +"- admins: None, True/False, string or list of strings.\n" +" If None, organization admins will not be updated.\n" +" If True, all users using social auth will automatically be added as admins\n" +" of the organization.\n" +" If False, no social auth users will be automatically added as admins of\n" +" the organization.\n" +" If a string or list of strings, specifies the usernames and emails for\n" +" users who will be added to the organization. Strings in the format\n" +" \"//\" will be interpreted as JavaScript regular " +"expressions and\n" +" may also be used instead of string literals; only \"i\" and \"m\" are " +"supported\n" +" for flags.\n" +"- remove_admins: True/False. Defaults to True.\n" +" If True, a user who does not match will be removed from the organization's\n" +" administrative list.\n" +"- users: None, True/False, string or list of strings. Same rules apply as " +"for\n" +" admins.\n" +"- remove_users: True/False. Defaults to True. Same rules as apply for \n" +" remove_admins." +msgstr "" +"Mappage avec des administrateurs/utilisateurs d'organisation appartenant à " +"des comptes d'authentification sociale. Ce paramètre\n" +"contrôle les utilisateurs qui sont placés dans les organisations Tower en " +"fonction de\n" +"leur nom d'utilisateur et adresse électronique. Les clés de dictionnaire " +"sont des noms d'organisation.\n" +"Des organisations seront créées si elles ne sont pas présentes dans le cas " +"où la licence autoriserait\n" +"plusieurs organisations, sinon l'organisation par défaut est utilisée\n" +"indépendamment de la clé. Les valeurs sont des dictionnaires définissant les " +"options\n" +"d'appartenance de chaque organisation. Pour chaque organisation, il est " +"possible de\n" +"préciser les utilisateurs qui sont automatiquement utilisateurs de " +"l'organisation et\n" +"ceux qui peuvent administrer l'organisation. \n" +"\n" +"- admins : None, True/False, chaîne ou liste de chaînes.\n" +" Si défini sur None, les administrateurs de l'organisation ne sont pas mis à " +"jour.\n" +" Si défini sur True, tous les utilisateurs se servant de l'authentification " +"sociale sont automatiquement ajoutés en tant qu'administrateurs\n" +" de l'organisation.\n" +" Si défini sur False, aucun utilisateur d'authentification sociale n'est " +"automatiquement ajouté en tant qu'administrateur de\n" +" l'organisation.\n" +" Si une chaîne ou une liste de chaînes est entrée, elle spécifie les noms " +"d'utilisateur et les adresses électroniques des\n" +" utilisateurs qui seront ajoutés à l'organisation. Les chaînes au format\n" +" \"//\" sont interprétées comme des expressions JavaScript " +"normales et\n" +" peuvent également être utilisées à la place de littéraux de chaîne ; seuls " +"\"i\" et \"m\" sont pris en charge\n" +" pour les marqueurs.\n" +"- remove_admins : True/False. Par défaut défini sur True.\n" +" Si défini sur True, l'utilisateur qui ne correspond pas est supprimé de la " +"liste administrative\n" +" de l'organisation.\n" +"- users : None, True/False, chaîne ou liste de chaînes. Les mêmes règles " +"s'appliquent que pour\n" +" admins.\n" +"- remove_users : True/False. Par défaut défini sur True. Les mêmes règles " +"s'appliquent que pour \n" +" remove_admins." + +#: awx/sso/conf.py:76 +msgid "" +"Mapping of team members (users) from social auth accounts. Keys are team\n" +"names (will be created if not present). Values are dictionaries of options\n" +"for each team's membership, where each can contain the following parameters:\n" +"\n" +"- organization: string. The name of the organization to which the team\n" +" belongs. The team will be created if the combination of organization and\n" +" team name does not exist. The organization will first be created if it\n" +" does not exist. If the license does not allow for multiple organizations,\n" +" the team will always be assigned to the single default organization.\n" +"- users: None, True/False, string or list of strings.\n" +" If None, team members will not be updated.\n" +" If True/False, all social auth users will be added/removed as team\n" +" members.\n" +" If a string or list of strings, specifies expressions used to match users.\n" +" User will be added as a team member if the username or email matches.\n" +" Strings in the format \"//\" will be interpreted as " +"JavaScript\n" +" regular expressions and may also be used instead of string literals; only " +"\"i\"\n" +" and \"m\" are supported for flags.\n" +"- remove: True/False. Defaults to True. If True, a user who does not match\n" +" the rules above will be removed from the team." +msgstr "" +"Mappage des membres d'équipe (utilisateurs) de compte d'authentification " +"sociale. Les clés sont des \n" +"noms d'équipe (seront créés s'ils ne sont pas présents). Les valeurs sont " +"des dictionnaires d'options\n" +"d'appartenance à chaque équipe, où chacune peut contenir les paramètres " +"suivants :\n" +"\n" +"-organization : chaîne. Nom de l'organisation à laquelle l'équipe\n" +" appartient. Une équipe est créée si la combinaison nom de l'organisation/" +"nom de \n" +"l'équipe n'existe pas. L'organisation sera d'abord créée \n" +"si elle n'existe pas. Si la licence n'autorise pas plusieurs organisations, \n" +"l'équipe est toujours attribuée à l'organisation par défaut. \n" +"- users : None, True/False, chaîne ou liste de chaînes.\n" +" Si défini sur None, les membres de l'équipe ne sont pas mis à jour.\n" +" Si défini sur True/False, tous les utilisateurs d'authentification sociale " +"sont ajoutés/supprimés en tant que membres\n" +" d'équipe.\n" +" Si une chaîne ou une liste de chaînes est entrée, elle spécifie les " +"expressions utilisées pour comparer les utilisateurs. \n" +"L'utilisateur est ajouté en tant que membre d'équipe si son nom " +"d'utilisateur ou son adresse électronique correspond.\n" +" Les chaînes au format \"//\" sont interprétées comme des " +"expressions JavaScript\n" +" normales et peuvent également être utilisées à la place de littéraux de " +"chaîne ; Seuls \"i\"\n" +" et \"m\" sont pris en charge pour les marqueurs.\n" +"- remove : True/False. Par défaut défini sur True. Si défini sur True, tout " +"utilisateur qui ne correspond\n" +" pas aux règles ci-dessus est supprimé de l'équipe." + +#: awx/sso/conf.py:119 +msgid "Authentication Backends" +msgstr "Backends d'authentification" + +#: awx/sso/conf.py:120 +msgid "" +"List of authentication backends that are enabled based on license features " +"and other authentication settings." +msgstr "" +"Liste des backends d'authentification activés en fonction des " +"caractéristiques des licences et d'autres paramètres d'authentification." + +#: awx/sso/conf.py:133 +msgid "Social Auth Organization Map" +msgstr "Authentification sociale - Mappage des organisations" + +#: awx/sso/conf.py:145 +msgid "Social Auth Team Map" +msgstr "Authentification sociale - Mappage des équipes" + +#: awx/sso/conf.py:157 +msgid "Social Auth User Fields" +msgstr "Authentification sociale - Champs d'utilisateurs" + +#: awx/sso/conf.py:158 +msgid "" +"When set to an empty list `[]`, this setting prevents new user accounts from " +"being created. Only users who have previously logged in using social auth or " +"have a user account with a matching email address will be able to login." +msgstr "" +"Lorsqu'il est défini sur une liste vide `[]`, ce paramètre empêche la " +"création de nouveaux comptes d'utilisateur. Seuls les utilisateurs ayant " +"déjà ouvert une session au moyen de l'authentification sociale ou disposant " +"d'un compte utilisateur avec une adresse électronique correspondante " +"pourront se connecter." + +#: awx/sso/conf.py:176 +msgid "LDAP Server URI" +msgstr "URI du serveur LDAP" + +#: awx/sso/conf.py:177 +msgid "" +"URI to connect to LDAP server, such as \"ldap://ldap.example.com:389\" (non-" +"SSL) or \"ldaps://ldap.example.com:636\" (SSL). Multiple LDAP servers may be " +"specified by separating with spaces or commas. LDAP authentication is " +"disabled if this parameter is empty." +msgstr "" +"URI de connexion au serveur LDAP, tel que \"ldap://ldap.exemple.com:389\" " +"(non SSL) ou \"ldaps://ldap.exemple.com:636\" (SSL). Plusieurs serveurs LDAP " +"peuvent être définis en les séparant par des espaces ou des virgules. " +"L'authentification LDAP est désactivée si ce paramètre est vide." + +#: awx/sso/conf.py:181 +#: awx/sso/conf.py:199 +#: awx/sso/conf.py:211 +#: awx/sso/conf.py:223 +#: awx/sso/conf.py:239 +#: awx/sso/conf.py:258 +#: awx/sso/conf.py:280 +#: awx/sso/conf.py:296 +#: awx/sso/conf.py:315 +#: awx/sso/conf.py:332 +#: awx/sso/conf.py:349 +#: awx/sso/conf.py:365 +#: awx/sso/conf.py:382 +#: awx/sso/conf.py:420 +#: awx/sso/conf.py:461 +msgid "LDAP" +msgstr "LDAP" + +#: awx/sso/conf.py:193 +msgid "LDAP Bind DN" +msgstr "ND de la liaison LDAP" + +#: awx/sso/conf.py:194 +msgid "" +"DN (Distinguished Name) of user to bind for all search queries. Normally in " +"the format \"CN=Some User,OU=Users,DC=example,DC=com\" but may also be " +"specified as \"DOMAIN\\username\" for Active Directory. This is the system " +"user account we will use to login to query LDAP for other user information." +msgstr "" +"ND (nom distinctif) de l'utilisateur à lier pour toutes les requêtes de " +"recherche. Normalement, au format \"CN = Certains utilisateurs, OU = " +"Utilisateurs, DC = exemple, DC = com\" mais peut aussi être entré au format " +"\"DOMAINE\\nom d'utilisateur\" pour Active Directory. Il s'agit du compte " +"utilisateur système que nous utiliserons pour nous connecter afin " +"d'interroger LDAP et obtenir d'autres informations utilisateur." + +#: awx/sso/conf.py:209 +msgid "LDAP Bind Password" +msgstr "Mot de passe de la liaison LDAP" + +#: awx/sso/conf.py:210 +msgid "Password used to bind LDAP user account." +msgstr "Mot de passe utilisé pour lier le compte utilisateur LDAP." + +#: awx/sso/conf.py:221 +msgid "LDAP Start TLS" +msgstr "LDAP - Lancer TLS" + +#: awx/sso/conf.py:222 +msgid "Whether to enable TLS when the LDAP connection is not using SSL." +msgstr "Pour activer ou non TLS lorsque la connexion LDAP n'utilise pas SSL." + +#: awx/sso/conf.py:232 +msgid "LDAP Connection Options" +msgstr "Options de connexion à LDAP" + +#: awx/sso/conf.py:233 +msgid "" +"Additional options to set for the LDAP connection. LDAP referrals are " +"disabled by default (to prevent certain LDAP queries from hanging with AD). " +"Option names should be strings (e.g. \"OPT_REFERRALS\"). Refer to https://" +"www.python-ldap.org/doc/html/ldap.html#options for possible options and " +"values that can be set." +msgstr "" +"Options supplémentaires à définir pour la connexion LDAP. Les références " +"LDAP sont désactivées par défaut (pour empêcher certaines requêtes LDAP de " +"se bloquer avec AD). Les noms d'options doivent être des chaînes (par " +"exemple \"OPT_REFERRALS\"). Reportez-vous à https://www.python-ldap.org/doc/" +"html/ldap.html#options afin de connaître les options possibles et les " +"valeurs que vous pouvez définir." + +#: awx/sso/conf.py:251 +msgid "LDAP User Search" +msgstr "Recherche d'utilisateurs LDAP" + +#: awx/sso/conf.py:252 +msgid "" +"LDAP search query to find users. Any user that matches the given pattern " +"will be able to login to Tower. The user should also be mapped into an " +"Tower organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). " +"If multiple search queries need to be supported use of \"LDAPUnion\" is " +"possible. See python-ldap documentation as linked at the top of this section." +"" +msgstr "" +"Requête de recherche LDAP servant à retrouver des utilisateurs. Tout " +"utilisateur qui correspond au modèle donné pourra se connecter à Tower. " +"L'utilisateur doit également être mappé dans une organisation Tower (tel que " +"défini dans le paramètre AUTH_LDAP_ORGANIZATION_MAP). Si plusieurs requêtes " +"de recherche doivent être prises en charge, l'utilisation de \"LDAPUnion\" " +"est possible. Se reporter à la documentation sur python-ldap en suivant le " +"lien indiqué en haut de cette section." + +#: awx/sso/conf.py:274 +msgid "LDAP User DN Template" +msgstr "Modèle de ND pour les utilisateurs LDAP" + +#: awx/sso/conf.py:275 +msgid "" +"Alternative to user search, if user DNs are all of the same format. This " +"approach will be more efficient for user lookups than searching if it is " +"usable in your organizational environment. If this setting has a value it " +"will be used instead of AUTH_LDAP_USER_SEARCH." +msgstr "" +"Autre méthode de recherche d'utilisateurs, si les ND d'utilisateur se " +"présentent tous au même format. Cette approche est plus efficace qu'une " +"recherche d'utilisateurs si vous pouvez l'utiliser dans votre environnement " +"organisationnel. Si ce paramètre est défini, sa valeur sera utilisée à la " +"place de AUTH_LDAP_USER_SEARCH." + +#: awx/sso/conf.py:290 +msgid "LDAP User Attribute Map" +msgstr "Mappe des attributs d'utilisateurs LDAP" + +#: awx/sso/conf.py:291 +msgid "" +"Mapping of LDAP user schema to Tower API user attributes (key is user " +"attribute name, value is LDAP attribute name). The default setting is valid " +"for ActiveDirectory but users with other LDAP configurations may need to " +"change the values (not the keys) of the dictionary/hash-table." +msgstr "" +"Mappage du schéma utilisateur LDAP avec les attributs utilisateur d'API " +"Tower (la clé est le nom de l'attribut utilisateur, la valeur est le nom de " +"l'attribut LDAP). Le paramètre par défaut est valide pour ActiveDirectory, " +"mais les utilisateurs ayant d'autres configurations LDAP peuvent être amenés " +"à modifier les valeurs (et non les clés) du dictionnaire/de la table de " +"hachage." + +#: awx/sso/conf.py:310 +msgid "LDAP Group Search" +msgstr "Recherche de groupes LDAP" + +#: awx/sso/conf.py:311 +msgid "" +"Users in Tower are mapped to organizations based on their membership in LDAP " +"groups. This setting defines the LDAP search query to find groups. Note that " +"this, unlike the user search above, does not support LDAPSearchUnion." +msgstr "" +"Les utilisateurs de Tower sont mappés à des organisations en fonction de " +"leur appartenance à des groupes LDAP. Ce paramètre définit la requête de " +"recherche LDAP servant à rechercher des groupes. Notez que cette méthode, " +"contrairement à la recherche d'utilisateurs LDAP, ne prend pas en charge " +"LDAPSearchUnion." + +#: awx/sso/conf.py:328 +msgid "LDAP Group Type" +msgstr "Type de groupe LDAP" + +#: awx/sso/conf.py:329 +msgid "" +"The group type may need to be changed based on the type of the LDAP server. " +"Values are listed at: http://pythonhosted.org/django-auth-ldap/groups." +"html#types-of-groups" +msgstr "" +"Il convient parfois de modifier le type de groupe en fonction du type de " +"serveur LDAP. Les valeurs sont répertoriées à l'adresse suivante : http://" +"pythonhosted.org/django-auth-ldap/groups.html#types-of-groups" + +#: awx/sso/conf.py:344 +msgid "LDAP Require Group" +msgstr "Groupe LDAP obligatoire" + +#: awx/sso/conf.py:345 +msgid "" +"Group DN required to login. If specified, user must be a member of this " +"group to login via LDAP. If not set, everyone in LDAP that matches the user " +"search will be able to login via Tower. Only one require group is supported." +msgstr "" +"Le ND du groupe d'utilisateurs qui doit se connecter. S'il est spécifié, " +"l'utilisateur doit être membre de ce groupe pour pouvoir se connecter via " +"LDAP. S'il n'est pas défini, tout utilisateur LDAP qui correspond à la " +"recherche d'utilisateurs pourra se connecter via Tower. Un seul groupe est " +"pris en charge." + +#: awx/sso/conf.py:361 +msgid "LDAP Deny Group" +msgstr "Groupe LDAP refusé" + +#: awx/sso/conf.py:362 +msgid "" +"Group DN denied from login. If specified, user will not be allowed to login " +"if a member of this group. Only one deny group is supported." +msgstr "" +"ND du groupe dont la connexion est refusée. S'il est spécifié, l'utilisateur " +"n'est pas autorisé à se connecter s'il est membre de ce groupe. Un seul " +"groupe refusé est pris en charge." + +#: awx/sso/conf.py:375 +msgid "LDAP User Flags By Group" +msgstr "Marqueurs d'utilisateur LDAP par groupe" + +#: awx/sso/conf.py:376 +msgid "" +"User profile flags updated from group membership (key is user attribute " +"name, value is group DN). These are boolean fields that are matched based " +"on whether the user is a member of the given group. So far only " +"is_superuser is settable via this method. This flag is set both true and " +"false at login time based on current LDAP settings." +msgstr "" +"Marqueurs de profil utilisateur mis à jour selon l'appartenance au groupe " +"(la clé est le nom de l'attribut utilisateur, la valeur est le ND du groupe)." +" Il s'agit de champs booléens qui sont associés selon que l'utilisateur est " +"ou non membre du groupe donné. Jusqu'à présent, seul is_superuser peut être " +"défini avec cette méthode. Ce marqueur est défini à la fois sur True et " +"False au moment de la connexion, en fonction des paramètres LDAP actifs." + +#: awx/sso/conf.py:394 +msgid "LDAP Organization Map" +msgstr "Mappe d'organisations LDAP" + +#: awx/sso/conf.py:395 +msgid "" +"Mapping between organization admins/users and LDAP groups. This controls " +"what users are placed into what Tower organizations relative to their LDAP " +"group memberships. Keys are organization names. Organizations will be " +"created if not present. Values are dictionaries defining the options for " +"each organization's membership. For each organization it is possible to " +"specify what groups are automatically users of the organization and also " +"what groups can administer the organization.\n" +"\n" +" - admins: None, True/False, string or list of strings.\n" +" If None, organization admins will not be updated based on LDAP values.\n" +" If True, all users in LDAP will automatically be added as admins of the " +"organization.\n" +" If False, no LDAP users will be automatically added as admins of the " +"organization.\n" +" If a string or list of strings, specifies the group DN(s) that will be " +"added of the organization if they match any of the specified groups.\n" +" - remove_admins: True/False. Defaults to True.\n" +" If True, a user who is not an member of the given groups will be removed " +"from the organization's administrative list.\n" +" - users: None, True/False, string or list of strings. Same rules apply as " +"for admins.\n" +" - remove_users: True/False. Defaults to True. Same rules apply as for " +"remove_admins." +msgstr "" +"Mappage entre les administrateurs/utilisateurs de l'organisation et les " +"groupes LDAP. Ce paramètre détermine les utilisateurs qui sont placés dans " +"les organisations Tower par rapport à leurs appartenances à un groupe LDAP. " +"Les clés sont les noms d'organisation. Les organisations seront créées si " +"elles ne sont pas présentes. Les valeurs sont des dictionnaires définissant " +"les options d'appartenance à chaque organisation. Pour chaque organisation, " +"il est possible de spécifier les groupes qui sont automatiquement des " +"utilisateurs de l'organisation et ceux qui peuvent administrer " +"l'organisation.\n" +"\n" +" - admins : None, True/False, chaîne ou liste de chaînes.\n" +"Si défini sur None, les administrateurs de l'organisation ne sont pas mis à " +"jour en fonction des valeurs LDAP.\n" +" Si défini sur True, tous les utilisateurs LDAP sont automatiquement ajoutés " +"en tant qu'administrateurs de l'organisation.\n" +" Si défini sur False, aucun utilisateur LDAP n'est automatiquement ajouté en " +"tant qu'administrateur de l'organisation.\n" +" Si une chaîne ou une liste de chaînes est entrée, elle spécifie le ou les " +"NDD de groupe qui seront ajoutés à l'organisation s'ils correspondent à l'un " +"des groupes spécifiés.\n" +" - remove_admins : True/False. Par défaut défini sur True.\n" +" Si défini sur True, tout utilisateur qui n'est pas membre des groupes " +"donnés est supprimé de la liste administrative de l'organisation.\n" +" - users : None, True/False, chaîne ou liste de chaînes. Les mêmes règles " +"s'appliquent que pour admins.\n" +" - remove_users : True/False. Par défaut défini sur True. Les mêmes règles " +"s'appliquent que pour remove_admins." + +#: awx/sso/conf.py:443 +msgid "LDAP Team Map" +msgstr "Mappe d'équipes LDAP" + +#: awx/sso/conf.py:444 +msgid "" +"Mapping between team members (users) and LDAP groups. Keys are team names " +"(will be created if not present). Values are dictionaries of options for " +"each team's membership, where each can contain the following parameters:\n" +"\n" +" - organization: string. The name of the organization to which the team " +"belongs. The team will be created if the combination of organization and " +"team name does not exist. The organization will first be created if it does " +"not exist.\n" +" - users: None, True/False, string or list of strings.\n" +" If None, team members will not be updated.\n" +" If True/False, all LDAP users will be added/removed as team members.\n" +" If a string or list of strings, specifies the group DN(s). User will be " +"added as a team member if the user is a member of ANY of these groups.\n" +"- remove: True/False. Defaults to True. If True, a user who is not a member " +"of the given groups will be removed from the team." +msgstr "" +"Mappage entre les membres d'équipe (utilisateurs) et les groupes LDAP. Les " +"clés sont des noms d'équipe (seront créés s'ils ne sont pas présents). Les " +"valeurs sont des dictionnaires d'options d'appartenance à chaque équipe, où " +"chacune peut contenir les paramètres suivants :\n" +"\n" +" - organization : chaîne. Nom de l'organisation à laquelle l'équipe " +"appartient. Une équipe est créée si la combinaison nom de l'organisation/nom " +"de l'équipe n'existe pas. L'organisation sera d'abord créée si elle n'existe " +"pas.\n" +" - users : None, True/False, chaîne ou liste de chaînes.\n" +" Si défini sur None, les membres de l'équipe ne sont pas mis à jour.\n" +" Si défini sur True/False, tous les utilisateurs LDAP seront ajoutés/" +"supprimés en tant que membres d'équipe.\n" +" Si une chaîne ou une liste de chaînes est entrée, elle spécifie les NDD des " +"groupes. L'utilisateur est ajouté en tant que membre d'équipe s'il est " +"membre de l'UN de ces groupes.\n" +"- remove : True/False. Par défaut défini sur True. Si défini sur True, tout " +"utilisateur qui n'est pas membre des groupes donnés est supprimé de l'équipe." +"" + +#: awx/sso/conf.py:487 +msgid "RADIUS Server" +msgstr "Serveur RADIUS" + +#: awx/sso/conf.py:488 +msgid "" +"Hostname/IP of RADIUS server. RADIUS authentication will be disabled if this " +"setting is empty." +msgstr "" +"Nom d'hôte/IP du serveur RADIUS. L'authentification RADIUS est désactivée si " +"ce paramètre est vide." + +#: awx/sso/conf.py:490 +#: awx/sso/conf.py:504 +#: awx/sso/conf.py:516 +msgid "RADIUS" +msgstr "RADIUS" + +#: awx/sso/conf.py:502 +msgid "RADIUS Port" +msgstr "Port RADIUS" + +#: awx/sso/conf.py:503 +msgid "Port of RADIUS server." +msgstr "Port du serveur RADIUS." + +#: awx/sso/conf.py:514 +msgid "RADIUS Secret" +msgstr "Secret RADIUS" + +#: awx/sso/conf.py:515 +msgid "Shared secret for authenticating to RADIUS server." +msgstr "Secret partagé pour l'authentification sur le serveur RADIUS." + +#: awx/sso/conf.py:531 +msgid "Google OAuth2 Callback URL" +msgstr "URL de rappel OAuth2 pour Google" + +#: awx/sso/conf.py:532 +msgid "" +"Create a project at https://console.developers.google.com/ to obtain an " +"OAuth2 key and secret for a web application. Ensure that the Google+ API is " +"enabled. Provide this URL as the callback URL for your application." +msgstr "" +"Créez un projet sur https://console.developers.google.com/ afin d'obtenir " +"une clé OAuth2 et un secret pour une application Web. Assurez-vous que l'API " +"Google+ est activée. Entrez cette URL comme URL de rappel de votre " +"application." + +#: awx/sso/conf.py:536 +#: awx/sso/conf.py:547 +#: awx/sso/conf.py:558 +#: awx/sso/conf.py:571 +#: awx/sso/conf.py:585 +#: awx/sso/conf.py:597 +#: awx/sso/conf.py:609 +msgid "Google OAuth2" +msgstr "OAuth2 pour Google" + +#: awx/sso/conf.py:545 +msgid "Google OAuth2 Key" +msgstr "Clé OAuth2 pour Google" + +#: awx/sso/conf.py:546 +msgid "" +"The OAuth2 key from your web application at https://console.developers." +"google.com/." +msgstr "" +"Clé OAuth2 de votre application Web sur https://console.developers.google." +"com/." + +#: awx/sso/conf.py:556 +msgid "Google OAuth2 Secret" +msgstr "Secret OAuth2 pour Google" + +#: awx/sso/conf.py:557 +msgid "" +"The OAuth2 secret from your web application at https://console.developers." +"google.com/." +msgstr "" +"Secret OAuth2 de votre application Web sur https://console.developers.google." +"com/." + +#: awx/sso/conf.py:568 +msgid "Google OAuth2 Whitelisted Domains" +msgstr "Domaines sur liste blanche OAuth2 pour Google" + +#: awx/sso/conf.py:569 +msgid "" +"Update this setting to restrict the domains who are allowed to login using " +"Google OAuth2." +msgstr "" +"Mettez à jour ce paramètre pour limiter les domaines qui sont autorisés à se " +"connecter à l'aide de l'authentification OAuth2 avec un compte Google." + +#: awx/sso/conf.py:580 +msgid "Google OAuth2 Extra Arguments" +msgstr "Arguments OAuth2 supplémentaires pour Google" + +#: awx/sso/conf.py:581 +msgid "" +"Extra arguments for Google OAuth2 login. When only allowing a single domain " +"to authenticate, set to `{\"hd\": \"yourdomain.com\"}` and Google will not " +"display any other accounts even if the user is logged in with multiple " +"Google accounts." +msgstr "" +"Arguments supplémentaires pour l'authentification OAuth2 avec un compte " +"Google. Lorsque vous autorisez un seul domaine à s'authentifier, définissez " +"ce paramètre sur {{\"hd\": \"votredomaine.com\"}. Google n'affichera aucun " +"autre compte même si l'utilisateur est connecté avec plusieurs comptes " +"Google." + +#: awx/sso/conf.py:595 +msgid "Google OAuth2 Organization Map" +msgstr "Mappe d'organisations OAuth2 pour Google" + +#: awx/sso/conf.py:607 +msgid "Google OAuth2 Team Map" +msgstr "Mappe d'équipes OAuth2 pour Google" + +#: awx/sso/conf.py:623 +msgid "GitHub OAuth2 Callback URL" +msgstr "URL de rappel OAuth2 pour GitHub" + +#: awx/sso/conf.py:624 +msgid "" +"Create a developer application at https://github.com/settings/developers to " +"obtain an OAuth2 key (Client ID) and secret (Client Secret). Provide this " +"URL as the callback URL for your application." +msgstr "" +"Créez une application de développeur sur https://github.com/settings/" +"developers pour obtenir une clé OAuth2 (ID client) et un secret (secret " +"client). Entrez cette URL comme URL de rappel de votre application." + +#: awx/sso/conf.py:628 +#: awx/sso/conf.py:639 +#: awx/sso/conf.py:649 +#: awx/sso/conf.py:661 +#: awx/sso/conf.py:673 +msgid "GitHub OAuth2" +msgstr "OAuth2 pour GitHub" + +#: awx/sso/conf.py:637 +msgid "GitHub OAuth2 Key" +msgstr "Clé OAuth2 pour GitHub" + +#: awx/sso/conf.py:638 +msgid "The OAuth2 key (Client ID) from your GitHub developer application." +msgstr "Clé OAuth2 (ID client) de votre application de développeur GitHub." + +#: awx/sso/conf.py:647 +msgid "GitHub OAuth2 Secret" +msgstr "Secret OAuth2 pour GitHub" + +#: awx/sso/conf.py:648 +msgid "" +"The OAuth2 secret (Client Secret) from your GitHub developer application." +msgstr "" +"Secret OAuth2 (secret client) de votre application de développeur GitHub." + +#: awx/sso/conf.py:659 +msgid "GitHub OAuth2 Organization Map" +msgstr "Mappe d'organisations OAuth2 pour GitHub" + +#: awx/sso/conf.py:671 +msgid "GitHub OAuth2 Team Map" +msgstr "Mappe d'équipes OAuth2 pour GitHub" + +#: awx/sso/conf.py:687 +msgid "GitHub Organization OAuth2 Callback URL" +msgstr "URL de rappel OAuth2 pour les organisations GitHub" + +#: awx/sso/conf.py:688 +#: awx/sso/conf.py:763 +msgid "" +"Create an organization-owned application at https://github.com/organizations/" +"/settings/applications and obtain an OAuth2 key (Client ID) and " +"secret (Client Secret). Provide this URL as the callback URL for your " +"application." +msgstr "" +"Créez une application appartenant à une organisation sur https://github.com/" +"organizations//settings/applications et obtenez une clé OAuth2 (ID " +"client) et un secret (secret client). Entrez cette URL comme URL de rappel " +"de votre application." + +#: awx/sso/conf.py:692 +#: awx/sso/conf.py:703 +#: awx/sso/conf.py:713 +#: awx/sso/conf.py:725 +#: awx/sso/conf.py:736 +#: awx/sso/conf.py:748 +msgid "GitHub Organization OAuth2" +msgstr "OAuth2 pour les organisations GitHub" + +#: awx/sso/conf.py:701 +msgid "GitHub Organization OAuth2 Key" +msgstr "Clé OAuth2 pour les organisations GitHub" + +#: awx/sso/conf.py:702 +#: awx/sso/conf.py:777 +msgid "The OAuth2 key (Client ID) from your GitHub organization application." +msgstr "Clé OAuth2 (ID client) de votre application d'organisation GitHub." + +#: awx/sso/conf.py:711 +msgid "GitHub Organization OAuth2 Secret" +msgstr "Secret OAuth2 pour les organisations GitHub" + +#: awx/sso/conf.py:712 +#: awx/sso/conf.py:787 +msgid "" +"The OAuth2 secret (Client Secret) from your GitHub organization application." +msgstr "" +"Secret OAuth2 (secret client) de votre application d'organisation GitHub." + +#: awx/sso/conf.py:722 +msgid "GitHub Organization Name" +msgstr "Nom de l'organisation GitHub" + +#: awx/sso/conf.py:723 +msgid "" +"The name of your GitHub organization, as used in your organization's URL: " +"https://github.com//." +msgstr "" +"Nom de votre organisation GitHub, tel qu'utilisé dans l'URL de votre " +"organisation : https://github.com//." + +#: awx/sso/conf.py:734 +msgid "GitHub Organization OAuth2 Organization Map" +msgstr "Mappe d'organisations OAuth2 pour les organisations GitHub" + +#: awx/sso/conf.py:746 +msgid "GitHub Organization OAuth2 Team Map" +msgstr "Mappe d'équipes OAuth2 pour les organisations GitHub" + +#: awx/sso/conf.py:762 +msgid "GitHub Team OAuth2 Callback URL" +msgstr "URL de rappel OAuth2 pour les équipes GitHub" + +#: awx/sso/conf.py:767 +#: awx/sso/conf.py:778 +#: awx/sso/conf.py:788 +#: awx/sso/conf.py:800 +#: awx/sso/conf.py:811 +#: awx/sso/conf.py:823 +msgid "GitHub Team OAuth2" +msgstr "OAuth2 pour les équipes GitHub" + +#: awx/sso/conf.py:776 +msgid "GitHub Team OAuth2 Key" +msgstr "Clé OAuth2 pour les équipes GitHub" + +#: awx/sso/conf.py:786 +msgid "GitHub Team OAuth2 Secret" +msgstr "Secret OAuth2 pour les équipes GitHub" + +#: awx/sso/conf.py:797 +msgid "GitHub Team ID" +msgstr "ID d'équipe GitHub" + +#: awx/sso/conf.py:798 +msgid "" +"Find the numeric team ID using the Github API: http://fabian-kostadinov." +"github.io/2015/01/16/how-to-find-a-github-team-id/." +msgstr "" +"Recherchez votre ID d'équipe numérique à l'aide de l'API Github : http://" +"fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/." + +#: awx/sso/conf.py:809 +msgid "GitHub Team OAuth2 Organization Map" +msgstr "Mappe d'organisations OAuth2 pour les équipes GitHub" + +#: awx/sso/conf.py:821 +msgid "GitHub Team OAuth2 Team Map" +msgstr "Mappe d'équipes OAuth2 pour les équipes GitHub" + +#: awx/sso/conf.py:837 +msgid "Azure AD OAuth2 Callback URL" +msgstr "URL de rappel OAuth2 pour Azure AD" + +#: awx/sso/conf.py:838 +msgid "" +"Register an Azure AD application as described by https://msdn.microsoft.com/" +"en-us/library/azure/dn132599.aspx and obtain an OAuth2 key (Client ID) and " +"secret (Client Secret). Provide this URL as the callback URL for your " +"application." +msgstr "" +"Enregistrez une application AD Azure selon la procédure décrite sur https://" +"msdn.microsoft.com/en-us/library/azure/dn132599.aspx et obtenez une clé " +"OAuth2 (ID client) et un secret (secret client). Entrez cette URL comme URL " +"de rappel de votre application." + +#: awx/sso/conf.py:842 +#: awx/sso/conf.py:853 +#: awx/sso/conf.py:863 +#: awx/sso/conf.py:875 +#: awx/sso/conf.py:887 +msgid "Azure AD OAuth2" +msgstr "OAuth2 pour Azure AD" + +#: awx/sso/conf.py:851 +msgid "Azure AD OAuth2 Key" +msgstr "Clé OAuth2 pour Azure AD" + +#: awx/sso/conf.py:852 +msgid "The OAuth2 key (Client ID) from your Azure AD application." +msgstr "Clé OAuth2 (ID client) de votre application Azure AD." + +#: awx/sso/conf.py:861 +msgid "Azure AD OAuth2 Secret" +msgstr "Secret OAuth2 pour Azure AD" + +#: awx/sso/conf.py:862 +msgid "The OAuth2 secret (Client Secret) from your Azure AD application." +msgstr "Secret OAuth2 (secret client) de votre application Azure AD." + +#: awx/sso/conf.py:873 +msgid "Azure AD OAuth2 Organization Map" +msgstr "Mappe d'organisations OAuth2 pour Azure AD" + +#: awx/sso/conf.py:885 +msgid "Azure AD OAuth2 Team Map" +msgstr "Mappe d'équipes OAuth2 pour Azure AD" + +#: awx/sso/conf.py:906 +msgid "SAML Service Provider Callback URL" +msgstr "URL de rappel du fournisseur de services SAML" + +#: awx/sso/conf.py:907 +msgid "" +"Register Tower as a service provider (SP) with each identity provider (IdP) " +"you have configured. Provide your SP Entity ID and this callback URL for " +"your application." +msgstr "" +"Enregistrez Tower en tant que fournisseur de services (SP) auprès de chaque " +"fournisseur d'identité (IdP) que vous avez configuré. Entrez votre ID " +"d'entité SP et cette URL de rappel pour votre application." + +#: awx/sso/conf.py:910 +#: awx/sso/conf.py:924 +#: awx/sso/conf.py:937 +#: awx/sso/conf.py:951 +#: awx/sso/conf.py:965 +#: awx/sso/conf.py:983 +#: awx/sso/conf.py:1005 +#: awx/sso/conf.py:1024 +#: awx/sso/conf.py:1044 +#: awx/sso/conf.py:1078 +#: awx/sso/conf.py:1091 +msgid "SAML" +msgstr "SAML" + +#: awx/sso/conf.py:921 +msgid "SAML Service Provider Metadata URL" +msgstr "URL de métadonnées du fournisseur de services SAML" + +#: awx/sso/conf.py:922 +msgid "" +"If your identity provider (IdP) allows uploading an XML metadata file, you " +"can download one from this URL." +msgstr "" +"Si votre fournisseur d'identité (IdP) permet de télécharger un fichier de " +"métadonnées XML, vous pouvez le faire à partir de cette URL." + +#: awx/sso/conf.py:934 +msgid "SAML Service Provider Entity ID" +msgstr "ID d'entité du fournisseur de services SAML" + +#: awx/sso/conf.py:935 +msgid "" +"The application-defined unique identifier used as the audience of the SAML " +"service provider (SP) configuration." +msgstr "" +"Identifiant unique défini par l'application utilisé comme audience dans la " +"configuration du fournisseur de services (SP) SAML." + +#: awx/sso/conf.py:948 +msgid "SAML Service Provider Public Certificate" +msgstr "Certificat public du fournisseur de services SAML" + +#: awx/sso/conf.py:949 +msgid "" +"Create a keypair for Tower to use as a service provider (SP) and include the " +"certificate content here." +msgstr "" +"Créez une paire de clés pour que Tower puisse être utilisé comme fournisseur " +"de services (SP) et entrez le contenu du certificat ici." + +#: awx/sso/conf.py:962 +msgid "SAML Service Provider Private Key" +msgstr "Clé privée du fournisseur de services SAML" + +#: awx/sso/conf.py:963 +msgid "" +"Create a keypair for Tower to use as a service provider (SP) and include the " +"private key content here." +msgstr "" +"Créez une paire de clés pour que Tower puisse être utilisé comme fournisseur " +"de services (SP) et entrez le contenu de la clé privée ici." + +#: awx/sso/conf.py:981 +msgid "SAML Service Provider Organization Info" +msgstr "Infos organisationnelles du fournisseur de services SAML" + +#: awx/sso/conf.py:982 +msgid "Configure this setting with information about your app." +msgstr "" +"Configurez ce paramètre en vous servant des informations de votre " +"application." + +#: awx/sso/conf.py:1003 +msgid "SAML Service Provider Technical Contact" +msgstr "Contact technique du fournisseur de services SAML" + +#: awx/sso/conf.py:1004 +#: awx/sso/conf.py:1023 +msgid "Configure this setting with your contact information." +msgstr "Configurez ce paramètre en vous servant de vos coordonnées." + +#: awx/sso/conf.py:1022 +msgid "SAML Service Provider Support Contact" +msgstr "Contact support du fournisseur de services SAML" + +#: awx/sso/conf.py:1037 +msgid "SAML Enabled Identity Providers" +msgstr "Fournisseurs d'identité compatibles SAML" + +#: awx/sso/conf.py:1038 +msgid "" +"Configure the Entity ID, SSO URL and certificate for each identity provider " +"(IdP) in use. Multiple SAML IdPs are supported. Some IdPs may provide user " +"data using attribute names that differ from the default OIDs (https://github." +"com/omab/python-social-auth/blob/master/social/backends/saml.py#L16). " +"Attribute names may be overridden for each IdP." +msgstr "" +"Configurez l'ID d'entité, l'URL SSO et le certificat pour chaque fournisseur " +"d'identité (IdP) utilisé. Plusieurs IdP SAML sont pris en charge. Certains " +"IdP peuvent fournir des données utilisateur à l'aide de noms d'attributs qui " +"diffèrent des OID par défaut (https://github.com/omab/python-social-auth/" +"blob/master/social/backends/saml.py#L16). Les noms d'attributs peuvent être " +"remplacés pour chaque IdP." + +#: awx/sso/conf.py:1076 +msgid "SAML Organization Map" +msgstr "Mappe d'organisations SAML" + +#: awx/sso/conf.py:1089 +msgid "SAML Team Map" +msgstr "Mappe d'équipes SAML" + +#: awx/sso/fields.py:123 +msgid "Invalid connection option(s): {invalid_options}." +msgstr "Option(s) de connexion non valide(s) : {invalid_options}." + +#: awx/sso/fields.py:194 +msgid "Base" +msgstr "Base" + +#: awx/sso/fields.py:195 +msgid "One Level" +msgstr "Un niveau" + +#: awx/sso/fields.py:196 +msgid "Subtree" +msgstr "Sous-arborescence" + +#: awx/sso/fields.py:214 +msgid "Expected a list of three items but got {length} instead." +msgstr "" +"Une liste de trois éléments était attendue, mais {length} a été obtenu à la " +"place." + +#: awx/sso/fields.py:215 +msgid "Expected an instance of LDAPSearch but got {input_type} instead." +msgstr "" +"Une instance de LDAPSearch était attendue, mais {input_type} a été obtenu à " +"la place." + +#: awx/sso/fields.py:251 +msgid "" +"Expected an instance of LDAPSearch or LDAPSearchUnion but got {input_type} " +"instead." +msgstr "" +"Une instance de LDAPSearch ou de LDAPSearchUnion était attendue, mais " +"{input_type} a été obtenu à la place." + +#: awx/sso/fields.py:278 +msgid "Invalid user attribute(s): {invalid_attrs}." +msgstr "Attribut(s) d'utilisateur non valide(s) : {invalid_attrs}." + +#: awx/sso/fields.py:295 +msgid "Expected an instance of LDAPGroupType but got {input_type} instead." +msgstr "" +"Une instance de LDAPGroupType était attendue, mais {input_type} a été obtenu " +"à la place." + +#: awx/sso/fields.py:323 +msgid "Invalid user flag: \"{invalid_flag}\"." +msgstr "Marqueur d'utilisateur non valide : \"{invalid_flag}\"." + +#: awx/sso/fields.py:339 +#: awx/sso/fields.py:506 +msgid "" +"Expected None, True, False, a string or list of strings but got {input_type} " +"instead." +msgstr "" +"Les valeurs None, True, False, une chaîne ou une liste de chaînes étaient " +"attendues, mais {input_type} a été obtenu à la place." + +#: awx/sso/fields.py:375 +msgid "Missing key(s): {missing_keys}." +msgstr "Clé(s) manquante(s) : {missing_keys}." + +#: awx/sso/fields.py:376 +msgid "Invalid key(s): {invalid_keys}." +msgstr "Clé(s) non valide(s) : {invalid_keys}." + +#: awx/sso/fields.py:425 +#: awx/sso/fields.py:542 +msgid "Invalid key(s) for organization map: {invalid_keys}." +msgstr "Clé(s) non valide(s) pour la mappe d'organisations : {invalid_keys}." + +#: awx/sso/fields.py:443 +msgid "Missing required key for team map: {invalid_keys}." +msgstr "Clé obligatoire manquante pour la mappe d'équipes : {invalid_keys}." + +#: awx/sso/fields.py:444 +#: awx/sso/fields.py:561 +msgid "Invalid key(s) for team map: {invalid_keys}." +msgstr "Clé(s) non valide(s) pour la mappe d'équipes : {invalid_keys}." + +#: awx/sso/fields.py:560 +msgid "Missing required key for team map: {missing_keys}." +msgstr "Clé obligatoire manquante pour la mappe d'équipes : {missing_keys}." + +#: awx/sso/fields.py:578 +msgid "Missing required key(s) for org info record: {missing_keys}." +msgstr "" +"Clé(s) obligatoire(s) manquante(s) pour l'enregistrement des infos organis. :" +" {missing_keys}." + +#: awx/sso/fields.py:591 +msgid "Invalid language code(s) for org info: {invalid_lang_codes}." +msgstr "" +"Code(s) langage non valide(s) pour les infos organis. : {invalid_lang_codes}." +"" + +#: awx/sso/fields.py:610 +msgid "Missing required key(s) for contact: {missing_keys}." +msgstr "Clé(s) obligatoire(s) manquante(s) pour le contact : {missing_keys}." + +#: awx/sso/fields.py:622 +msgid "Missing required key(s) for IdP: {missing_keys}." +msgstr "Clé(s) obligatoire(s) manquante(s) pour l'IdP : {missing_keys}." + +#: awx/sso/pipeline.py:24 +msgid "An account cannot be found for {0}" +msgstr "Impossible de trouver un compte pour {0}" + +#: awx/sso/pipeline.py:30 +msgid "Your account is inactive" +msgstr "Votre compte est inactif" + +#: awx/sso/validators.py:19 +#: awx/sso/validators.py:44 +#, python-format +msgid "DN must include \"%%(user)s\" placeholder for username: %s" +msgstr "" +"Le ND doit inclure l'espace réservé \"%%(user)s\" pour le nom d'utilisateur :" +" %s" + +#: awx/sso/validators.py:26 +#, python-format +msgid "Invalid DN: %s" +msgstr "ND non valide : %s" + +#: awx/sso/validators.py:56 +#, python-format +msgid "Invalid filter: %s" +msgstr "Filtre incorrect : %s" + +#: awx/templates/error.html:4 +#: awx/ui/templates/ui/index.html:8 +msgid "Ansible Tower" +msgstr "Ansible Tower" + +#: awx/templates/rest_framework/api.html:39 +msgid "Ansible Tower API Guide" +msgstr "Guide pour les API d'Ansible Tower" + +#: awx/templates/rest_framework/api.html:40 +msgid "Back to Ansible Tower" +msgstr "Retour à Ansible Tower" + +#: awx/templates/rest_framework/api.html:41 +msgid "Resize" +msgstr "Redimensionner" + +#: awx/templates/rest_framework/base.html:78 +#: awx/templates/rest_framework/base.html:92 +#, python-format +msgid "Make a GET request on the %(name)s resource" +msgstr "Appliquez une requête GET sur la ressource %(name)s" + +#: awx/templates/rest_framework/base.html:80 +msgid "Specify a format for the GET request" +msgstr "Spécifiez un format pour la requête GET" + +#: awx/templates/rest_framework/base.html:86 +#, python-format +msgid "" +"Make a GET request on the %(name)s resource with the format set to " +"`%(format)s`" +msgstr "" +"Appliquez une requête GET sur la ressource %(name)s avec un format défini " +"sur`%(format)s`" + +#: awx/templates/rest_framework/base.html:100 +#, python-format +msgid "Make an OPTIONS request on the %(name)s resource" +msgstr "Appliquez une requête OPTIONS sur la ressource %(name)s" + +#: awx/templates/rest_framework/base.html:106 +#, python-format +msgid "Make a DELETE request on the %(name)s resource" +msgstr "Appliquez une requête DELETE sur la ressource %(name)s" + +#: awx/templates/rest_framework/base.html:113 +msgid "Filters" +msgstr "Filtres" + +#: awx/templates/rest_framework/base.html:172 +#: awx/templates/rest_framework/base.html:186 +#, python-format +msgid "Make a POST request on the %(name)s resource" +msgstr "Appliquez une requête POST sur la ressource %(name)s" + +#: awx/templates/rest_framework/base.html:216 +#: awx/templates/rest_framework/base.html:230 +#, python-format +msgid "Make a PUT request on the %(name)s resource" +msgstr "Appliquez une requête PUT sur la ressource %(name)s" + +#: awx/templates/rest_framework/base.html:233 +#, python-format +msgid "Make a PATCH request on the %(name)s resource" +msgstr "Appliquez une requête PATCH sur la ressource %(name)s" + +#: awx/ui/apps.py:9 +#: awx/ui/conf.py:22 +#: awx/ui/conf.py:38 +#: awx/ui/conf.py:53 +msgid "UI" +msgstr "IU" + +#: awx/ui/conf.py:16 +msgid "Off" +msgstr "Désactivé" + +#: awx/ui/conf.py:17 +msgid "Anonymous" +msgstr "Anonyme" + +#: awx/ui/conf.py:18 +msgid "Detailed" +msgstr "Détaillé" + +#: awx/ui/conf.py:20 +msgid "Analytics Tracking State" +msgstr "État du suivi analytique" + +#: awx/ui/conf.py:21 +msgid "Enable or Disable Analytics Tracking." +msgstr "Activez ou désactivez le suivi analytique." + +#: awx/ui/conf.py:31 +msgid "Custom Login Info" +msgstr "Infos de connexion personnalisées" + +#: awx/ui/conf.py:32 +msgid "" +"If needed, you can add specific information (such as a legal notice or a " +"disclaimer) to a text box in the login modal using this setting. Any content " +"added must be in plain text, as custom HTML or other markup languages are " +"not supported. If multiple paragraphs of text are needed, new lines " +"(paragraphs) must be escaped as `\\n` within the block of text." +msgstr "" +"Si nécessaire, vous pouvez ajouter des informations particulières (telles " +"qu'une mention légale ou une clause de non-responsabilité) à une zone de " +"texte dans la fenêtre modale de connexion, grâce à ce paramètre. Tout " +"contenu ajouté doit l'être en texte brut, dans la mesure où le langage HTML " +"personnalisé et les autres langages de balisage ne sont pas pris en charge. " +"Si plusieurs paragraphes de texte sont nécessaires, les nouvelles lignes " +"(paragraphes) doivent être échappées sous la forme `\\n` dans le bloc de " +"texte." + +#: awx/ui/conf.py:48 +msgid "Custom Logo" +msgstr "Logo personnalisé" + +#: awx/ui/conf.py:49 +msgid "" +"To set up a custom logo, provide a file that you create. For the custom logo " +"to look its best, use a `.png` file with a transparent background. GIF, PNG " +"and JPEG formats are supported." +msgstr "" +"Pour configurer un logo personnalisé, chargez un fichier que vous avez créé. " +"Pour optimiser l'affichage du logo personnalisé, utilisez un fichier `.png` " +"avec un fond transparent. Les formats GIF, PNG et JPEG sont pris en charge." + +#: awx/ui/fields.py:29 +msgid "" +"Invalid format for custom logo. Must be a data URL with a base64-encoded " +"GIF, PNG or JPEG image." +msgstr "" +"Format de logo personnalisé non valide. Entrez une URL de données avec une " +"image GIF, PNG ou JPEG codée en base64." + +#: awx/ui/fields.py:30 +msgid "Invalid base64-encoded data in data URL." +msgstr "Données codées en base64 non valides dans l'URL de données" + +#: awx/ui/templates/ui/index.html:49 +msgid "" +"Your session will expire in 60 seconds, would you like to continue?" +msgstr "" +"Votre session expirera dans 60 secondes, voulez-vous continuer ?" + +#: awx/ui/templates/ui/index.html:64 +msgid "CANCEL" +msgstr "ANNULER" + +#: awx/ui/templates/ui/index.html:116 +msgid "Set how many days of data should be retained." +msgstr "" +"Définissez le nombre de jours pendant lesquels les données doivent être " +"conservées." + +#: awx/ui/templates/ui/index.html:122 +msgid "" +"Please enter an integer that is not " +"negative that is lower than 9999." +msgstr "" +"Entrez un entier non négatif et inférieur à 9999." + +#: awx/ui/templates/ui/index.html:127 +msgid "" +"For facts collected older than the time period specified, save one fact scan " +"(snapshot) per time window (frequency). For example, facts older than 30 " +"days are purged, while one weekly fact scan is kept.\n" +"
\n" +"
CAUTION: Setting both numerical variables to \"0\" " +"will delete all facts.\n" +"
\n" +"
" +msgstr "" +"Pour les faits collectés en amont de la période spécifiée, enregistrez un " +"scan des faits (instantané) par fenêtre temporelle (fréquence). Par exemple, " +"les faits antérieurs à 30 jours sont purgés, tandis qu'un scan de faits " +"hebdomadaire est conservé.\n" +"
\n" +"
ATTENTION : le paramétrage des deux variables numériques sur \"0\" " +"supprime l'ensemble des faits.\n" +"
\n" +"
" + +#: awx/ui/templates/ui/index.html:136 +msgid "Select a time period after which to remove old facts" +msgstr "" +"Sélectionnez un intervalle de temps après lequel les faits anciens pourront " +"être supprimés" + +#: awx/ui/templates/ui/index.html:150 +msgid "" +"Please enter an integer " +"that is not negative " +"that is lower than 9999." +msgstr "" +"Entrez un entier non négatif et inférieur à 9999." + +#: awx/ui/templates/ui/index.html:155 +msgid "Select a frequency for snapshot retention" +msgstr "Sélectionnez une fréquence pour la conservation des instantanés" + +#: awx/ui/templates/ui/index.html:169 +msgid "" +"Please enter an integer that is not negative that is " +"lower than 9999." +msgstr "" +"Entrez un entier non négatif et " +"inférieur à 9999." + +#: awx/ui/templates/ui/index.html:175 +msgid "working..." +msgstr "en cours..." diff --git a/awx/locale/ja/LC_MESSAGES/django.po b/awx/locale/ja/LC_MESSAGES/django.po new file mode 100644 index 0000000000..721de24e63 --- /dev/null +++ b/awx/locale/ja/LC_MESSAGES/django.po @@ -0,0 +1,4126 @@ +# asasaki , 2017. #zanata +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2016-12-15 12:05+0530\n" +"PO-Revision-Date: 2017-02-02 01:04+0000\n" +"Last-Translator: asasaki \n" +"Language-Team: Japanese\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ja\n" +"Plural-Forms: nplurals=1; plural=0\n" +"X-Generator: Zanata 3.9.6\n" + +#: awx/api/authentication.py:67 +msgid "Invalid token header. No credentials provided." +msgstr "無効なトークンヘッダーです。認証情報が提供されていません。" + +#: awx/api/authentication.py:70 +msgid "Invalid token header. Token string should not contain spaces." +msgstr "無効なトークンヘッダーです。トークン文字列にはスペースを含めることができません。" + +#: awx/api/authentication.py:105 +msgid "User inactive or deleted" +msgstr "ユーザーが非アクティブか、または削除されています" + +#: awx/api/authentication.py:161 +msgid "Invalid task token" +msgstr "無効なタスクトークン" + +#: awx/api/conf.py:12 +msgid "Idle Time Force Log Out" +msgstr "アイドル時間、強制ログアウト" + +#: awx/api/conf.py:13 +msgid "" +"Number of seconds that a user is inactive before they will need to login " +"again." +msgstr "ユーザーが再ログインするまでに非アクティブな状態になる秒数です。" + +#: awx/api/conf.py:14 +#: awx/api/conf.py:24 +#: awx/api/conf.py:33 +#: awx/sso/conf.py:124 +#: awx/sso/conf.py:135 +#: awx/sso/conf.py:147 +#: awx/sso/conf.py:162 +msgid "Authentication" +msgstr "認証" + +#: awx/api/conf.py:22 +msgid "Maximum number of simultaneous logins" +msgstr "同時ログインの最大数" + +#: awx/api/conf.py:23 +msgid "" +"Maximum number of simultaneous logins a user may have. To disable enter -1." +msgstr "ユーザーが実行できる同時ログインの最大数です。無効にするには -1 を入力します。" + +#: awx/api/conf.py:31 +msgid "Enable HTTP Basic Auth" +msgstr "HTTP Basic 認証の有効化" + +#: awx/api/conf.py:32 +msgid "Enable HTTP Basic Auth for the API Browser." +msgstr "API ブラウザーの HTTP Basic 認証を有効にします。" + +#: awx/api/generics.py:466 +msgid "\"id\" is required to disassociate" +msgstr "関連付けを解除するには 「id」が必要です" + +#: awx/api/metadata.py:50 +msgid "Database ID for this {}." +msgstr "この{}のデータベース ID。" + +#: awx/api/metadata.py:51 +msgid "Name of this {}." +msgstr "この{}の名前。" + +#: awx/api/metadata.py:52 +msgid "Optional description of this {}." +msgstr "この{}のオプションの説明。" + +#: awx/api/metadata.py:53 +msgid "Data type for this {}." +msgstr "この{}のデータタイプ。" + +#: awx/api/metadata.py:54 +msgid "URL for this {}." +msgstr "この{}の URL。" + +#: awx/api/metadata.py:55 +msgid "Data structure with URLs of related resources." +msgstr "関連リソースの URL のあるデータ構造。" + +#: awx/api/metadata.py:56 +msgid "Data structure with name/description for related resources." +msgstr "関連リソースの名前/説明のあるデータ構造。" + +#: awx/api/metadata.py:57 +msgid "Timestamp when this {} was created." +msgstr "この {} の作成時のタイムスタンプ。" + +#: awx/api/metadata.py:58 +msgid "Timestamp when this {} was last modified." +msgstr "この {} の最終変更時のタイムスタンプ。" + +#: awx/api/parsers.py:31 +#, python-format +msgid "JSON parse error - %s" +msgstr "JSON パースエラー: %s" + +#: awx/api/serializers.py:250 +msgid "Playbook Run" +msgstr "Playbook 実行" + +#: awx/api/serializers.py:251 +msgid "Command" +msgstr "コマンド" + +#: awx/api/serializers.py:252 +msgid "SCM Update" +msgstr "SCM 更新" + +#: awx/api/serializers.py:253 +msgid "Inventory Sync" +msgstr "インベントリーの同期" + +#: awx/api/serializers.py:254 +msgid "Management Job" +msgstr "管理ジョブ" + +#: awx/api/serializers.py:255 +msgid "Workflow Job" +msgstr "ワークフロージョブ" + +#: awx/api/serializers.py:256 +msgid "Workflow Template" +msgstr "ワークフローテンプレート" + +#: awx/api/serializers.py:658 +#: awx/api/serializers.py:716 +#: awx/api/views.py:3817 +#, python-format +msgid "" +"Standard Output too large to display (%(text_size)d bytes), only download " +"supported for sizes over %(supported_size)d bytes" +msgstr "" +"標準出力が大きすぎて表示できません (%(text_size)d バイト)。サイズが %(supported_size)d " +"バイトを超える場合はダウンロードのみがサポートされます。" + +#: awx/api/serializers.py:731 +msgid "Write-only field used to change the password." +msgstr "パスワードを変更するために使用される書き込み専用フィールド。" + +#: awx/api/serializers.py:733 +msgid "Set if the account is managed by an external service" +msgstr "アカウントが外部サービスで管理される場合に設定されます" + +#: awx/api/serializers.py:757 +msgid "Password required for new User." +msgstr "新規ユーザーのパスワードを入力してください。" + +#: awx/api/serializers.py:841 +#, python-format +msgid "Unable to change %s on user managed by LDAP." +msgstr "LDAP で管理されたユーザーの %s を変更できません。" + +#: awx/api/serializers.py:1002 +msgid "Organization is missing" +msgstr "組織がありません" + +#: awx/api/serializers.py:1006 +msgid "Update options must be set to false for manual projects." +msgstr "手動プロジェクトについては更新オプションを false に設定する必要があります。" + +#: awx/api/serializers.py:1012 +msgid "Array of playbooks available within this project." +msgstr "このプロジェクト内で利用可能な一連の Playbook。" + +#: awx/api/serializers.py:1194 +#, python-format +msgid "Invalid port specification: %s" +msgstr "無効なポート指定: %s" + +#: awx/api/serializers.py:1222 +#: awx/main/validators.py:193 +msgid "Must be valid JSON or YAML." +msgstr "有効な JSON または YAML である必要があります。" + +#: awx/api/serializers.py:1279 +msgid "Invalid group name." +msgstr "無効なグループ名。" + +#: awx/api/serializers.py:1354 +msgid "" +"Script must begin with a hashbang sequence: i.e.... #!/usr/bin/env python" +msgstr "スクリプトは hashbang シーケンスで開始する必要があります (例: .... #!/usr/bin/env python)" + +#: awx/api/serializers.py:1407 +msgid "If 'source' is 'custom', 'source_script' must be provided." +msgstr "「source」が「custom」である場合、「source_script」を指定する必要があります。" + +#: awx/api/serializers.py:1411 +msgid "" +"The 'source_script' does not belong to the same organization as the " +"inventory." +msgstr "「source_script」はインベントリーと同じ組織に属しません。" + +#: awx/api/serializers.py:1413 +msgid "'source_script' doesn't exist." +msgstr "「source_script」は存在しません。" + +#: awx/api/serializers.py:1772 +msgid "" +"Write-only field used to add user to owner role. If provided, do not give " +"either team or organization. Only valid for creation." +msgstr "" +"ユーザーを所有者ロールに追加するために使用される書き込み専用フィールドです。提供されている場合は、チームまたは組織のいずれも指定しないでください。作成時にのみ有効です。" + +#: awx/api/serializers.py:1777 +msgid "" +"Write-only field used to add team to owner role. If provided, do not give " +"either user or organization. Only valid for creation." +msgstr "" +"チームを所有者ロールに追加するために使用される書き込み専用フィールドです。提供されている場合は、ユーザーまたは組織のいずれも指定しないでください。作成時にのみ有効です。" + +#: awx/api/serializers.py:1782 +msgid "" +"Inherit permissions from organization roles. If provided on creation, do not " +"give either user or team." +msgstr "組織ロールからパーミッションを継承します。作成時に提供される場合は、ユーザーまたはチームのいずれも指定しないでください。" + +#: awx/api/serializers.py:1798 +msgid "Missing 'user', 'team', or 'organization'." +msgstr "「user」、「team」、または「organization」がありません。" + +#: awx/api/serializers.py:1811 +msgid "" +"Credential organization must be set and match before assigning to a team" +msgstr "認証情報の組織が設定され、一致している状態でチームに割り当てる必要があります。" + +#: awx/api/serializers.py:1903 +msgid "This field is required." +msgstr "このフィールドは必須です。" + +#: awx/api/serializers.py:1905 +#: awx/api/serializers.py:1907 +msgid "Playbook not found for project." +msgstr "プロジェクトの Playbook が見つかりません。" + +#: awx/api/serializers.py:1909 +msgid "Must select playbook for project." +msgstr "プロジェクトの Playbook を選択してください。" + +#: awx/api/serializers.py:1975 +msgid "Must either set a default value or ask to prompt on launch." +msgstr "起動時にプロントを出すには、デフォルト値を設定するか、またはプロンプトを出すよう指定する必要があります。" + +#: awx/api/serializers.py:1978 +#: awx/main/models/jobs.py:278 +msgid "Scan jobs must be assigned a fixed inventory." +msgstr "スキャンジョブに固定インベントリーが割り当てられている必要があります。" + +#: awx/api/serializers.py:1980 +#: awx/main/models/jobs.py:281 +msgid "Job types 'run' and 'check' must have assigned a project." +msgstr "ジョブタイプ「run」および「check」によりプロジェクトが割り当てられている必要があります。" + +#: awx/api/serializers.py:1987 +msgid "Survey Enabled cannot be used with scan jobs." +msgstr "Survey Enabled はスキャンジョブで使用できません。" + +#: awx/api/serializers.py:2047 +msgid "Invalid job template." +msgstr "無効なジョブテンプレート。" + +#: awx/api/serializers.py:2132 +msgid "Credential not found or deleted." +msgstr "認証情報が見つからないか、または削除されました。" + +#: awx/api/serializers.py:2134 +msgid "Job Template Project is missing or undefined." +msgstr "ジョブテンプレートプロジェクトが見つからないか、または定義されていません。" + +#: awx/api/serializers.py:2136 +msgid "Job Template Inventory is missing or undefined." +msgstr "ジョブテンプレートインベントリーが見つからないか、または定義されていません。" + +#: awx/api/serializers.py:2421 +#, python-format +msgid "%(job_type)s is not a valid job type. The choices are %(choices)s." +msgstr "%(job_type)s は有効なジョブタイプではありません。%(choices)s を選択できます。" + +#: awx/api/serializers.py:2426 +msgid "Workflow job template is missing during creation." +msgstr "ワークフロージョブテンプレートが作成時に見つかりません。" + +#: awx/api/serializers.py:2431 +#, python-format +msgid "Cannot nest a %s inside a WorkflowJobTemplate" +msgstr "ワークフロージョブテンプレート内に %s をネストできません" + +#: awx/api/serializers.py:2669 +#, python-format +msgid "Job Template '%s' is missing or undefined." +msgstr "ジョブテンプレート「%s」が見つからない、または定義されていません。" + +#: awx/api/serializers.py:2695 +msgid "Must be a valid JSON or YAML dictionary." +msgstr "有効な JSON または YAML 辞書でなければなりません。" + +#: awx/api/serializers.py:2837 +msgid "" +"Missing required fields for Notification Configuration: notification_type" +msgstr "通知設定の必須フィールドがありません: notification_type" + +#: awx/api/serializers.py:2860 +msgid "No values specified for field '{}'" +msgstr "フィールド '{}' に値が指定されていません" + +#: awx/api/serializers.py:2865 +msgid "Missing required fields for Notification Configuration: {}." +msgstr "通知設定の必須フィールドがありません: {}。" + +#: awx/api/serializers.py:2868 +msgid "Configuration field '{}' incorrect type, expected {}." +msgstr "設定フィールド '{}' のタイプが正しくありません。{} が予期されました。" + +#: awx/api/serializers.py:2921 +msgid "Inventory Source must be a cloud resource." +msgstr "インベントリーソースはクラウドリソースでなければなりません。" + +#: awx/api/serializers.py:2923 +msgid "Manual Project can not have a schedule set." +msgstr "手動プロジェクトにはスケジュールを設定できません。" + +#: awx/api/serializers.py:2945 +msgid "" +"DTSTART required in rrule. Value should match: DTSTART:YYYYMMDDTHHMMSSZ" +msgstr "DTSTART が rrule で必要です。値は、DSTART:YYYYMMDDTHHMMSSZ に一致する必要があります。" + +#: awx/api/serializers.py:2947 +msgid "Multiple DTSTART is not supported." +msgstr "複数の DTSTART はサポートされません。" + +#: awx/api/serializers.py:2949 +msgid "RRULE require in rrule." +msgstr "RRULE が rrule で必要です。" + +#: awx/api/serializers.py:2951 +msgid "Multiple RRULE is not supported." +msgstr "複数の RRULE はサポートされません。" + +#: awx/api/serializers.py:2953 +msgid "INTERVAL required in rrule." +msgstr "INTERVAL が rrule で必要です。" + +#: awx/api/serializers.py:2955 +msgid "TZID is not supported." +msgstr "TZID はサポートされません。" + +#: awx/api/serializers.py:2957 +msgid "SECONDLY is not supported." +msgstr "SECONDLY はサポートされません。" + +#: awx/api/serializers.py:2959 +msgid "Multiple BYMONTHDAYs not supported." +msgstr "複数の BYMONTHDAY はサポートされません。" + +#: awx/api/serializers.py:2961 +msgid "Multiple BYMONTHs not supported." +msgstr "複数の BYMONTH はサポートされません。" + +#: awx/api/serializers.py:2963 +msgid "BYDAY with numeric prefix not supported." +msgstr "数字の接頭辞のある BYDAY はサポートされません。" + +#: awx/api/serializers.py:2965 +msgid "BYYEARDAY not supported." +msgstr "BYYEARDAY はサポートされません。" + +#: awx/api/serializers.py:2967 +msgid "BYWEEKNO not supported." +msgstr "BYWEEKNO はサポートされません。" + +#: awx/api/serializers.py:2971 +msgid "COUNT > 999 is unsupported." +msgstr "COUNT > 999 はサポートされません。" + +#: awx/api/serializers.py:2975 +msgid "rrule parsing failed validation." +msgstr "rrule の構文解析で検証に失敗しました。" + +#: awx/api/serializers.py:2997 +msgid "" +"A summary of the new and changed values when an object is created, updated, " +"or deleted" +msgstr "オブジェクトの作成、更新または削除時の新規値および変更された値の概要" + +#: awx/api/serializers.py:2999 +msgid "" +"For create, update, and delete events this is the object type that was " +"affected. For associate and disassociate events this is the object type " +"associated or disassociated with object2." +msgstr "" +"作成、更新、および削除イベントの場合、これは影響を受けたオブジェクトタイプになります。関連付けおよび関連付け解除イベントの場合、これは object2 " +"に関連付けられたか、またはその関連付けが解除されたオブジェクトタイプになります。" + +#: awx/api/serializers.py:3002 +msgid "" +"Unpopulated for create, update, and delete events. For associate and " +"disassociate events this is the object type that object1 is being associated " +"with." +msgstr "" +"作成、更新、および削除イベントの場合は設定されません。関連付けおよび関連付け解除イベントの場合、これは object1 " +"が関連付けられるオブジェクトタイプになります。" + +#: awx/api/serializers.py:3005 +msgid "The action taken with respect to the given object(s)." +msgstr "指定されたオブジェクトについて実行されたアクション。" + +#: awx/api/serializers.py:3112 +msgid "Unable to login with provided credentials." +msgstr "提供される認証情報でログインできません。" + +#: awx/api/serializers.py:3114 +msgid "Must include \"username\" and \"password\"." +msgstr "「username」および「password」を含める必要があります。" + +#: awx/api/views.py:99 +msgid "Your license does not allow use of the activity stream." +msgstr "お使いのライセンスではアクティビティーストリームを使用できません。" + +#: awx/api/views.py:109 +msgid "Your license does not permit use of system tracking." +msgstr "お使いのライセンスではシステムトラッキングを使用できません。" + +#: awx/api/views.py:119 +msgid "Your license does not allow use of workflows." +msgstr "お使いのライセンスではワークフローを使用できません。" + +#: awx/api/views.py:127 +#: awx/templates/rest_framework/api.html:28 +msgid "REST API" +msgstr "REST API" + +#: awx/api/views.py:134 +#: awx/templates/rest_framework/api.html:4 +msgid "Ansible Tower REST API" +msgstr "Ansible Tower REST API" + +#: awx/api/views.py:150 +msgid "Version 1" +msgstr "バージョン 1" + +#: awx/api/views.py:201 +msgid "Ping" +msgstr "Ping" + +#: awx/api/views.py:230 +#: awx/conf/apps.py:12 +msgid "Configuration" +msgstr "設定" + +#: awx/api/views.py:283 +msgid "Invalid license data" +msgstr "無効なライセンスデータ" + +#: awx/api/views.py:285 +msgid "Missing 'eula_accepted' property" +msgstr "'eula_accepted' プロパティーがありません" + +#: awx/api/views.py:289 +msgid "'eula_accepted' value is invalid" +msgstr "'eula_accepted' 値は無効です。" + +#: awx/api/views.py:292 +msgid "'eula_accepted' must be True" +msgstr "'eula_accepted' は True でなければなりません" + +#: awx/api/views.py:299 +msgid "Invalid JSON" +msgstr "無効な JSON" + +#: awx/api/views.py:307 +msgid "Invalid License" +msgstr "無効なライセンス" + +#: awx/api/views.py:317 +msgid "Invalid license" +msgstr "無効なライセンス" + +#: awx/api/views.py:325 +#, python-format +msgid "Failed to remove license (%s)" +msgstr "ライセンスの削除に失敗しました (%s)" + +#: awx/api/views.py:330 +msgid "Dashboard" +msgstr "ダッシュボード" + +#: awx/api/views.py:436 +msgid "Dashboard Jobs Graphs" +msgstr "ダッシュボードのジョブグラフ" + +#: awx/api/views.py:472 +#, python-format +msgid "Unknown period \"%s\"" +msgstr "不明な期間 \"%s\"" + +#: awx/api/views.py:486 +msgid "Schedules" +msgstr "スケジュール" + +#: awx/api/views.py:505 +msgid "Schedule Jobs List" +msgstr "スケジュールジョブの一覧" + +#: awx/api/views.py:715 +msgid "Your Tower license only permits a single organization to exist." +msgstr "お使いの Tower ライセンスでは、単一組織のみの存在が許可されます。" + +#: awx/api/views.py:940 +#: awx/api/views.py:1299 +msgid "Role 'id' field is missing." +msgstr "ロール「id」フィールドがありません。" + +#: awx/api/views.py:946 +#: awx/api/views.py:4093 +msgid "You cannot assign an Organization role as a child role for a Team." +msgstr "組織ロールをチームの子ロールとして割り当てることができません。" + +#: awx/api/views.py:950 +#: awx/api/views.py:4107 +msgid "You cannot grant system-level permissions to a team." +msgstr "システムレベルのパーミッションをチームに付与できません。" + +#: awx/api/views.py:957 +#: awx/api/views.py:4099 +msgid "" +"You cannot grant credential access to a team when the Organization field " +"isn't set, or belongs to a different organization" +msgstr "組織フィールドが設定されていないか、または別の組織に属する場合に認証情報のアクセス権をチームに付与できません" + +#: awx/api/views.py:1047 +msgid "Cannot delete project." +msgstr "プロジェクトを削除できません。" + +#: awx/api/views.py:1076 +msgid "Project Schedules" +msgstr "プロジェクトのスケジュール" + +#: awx/api/views.py:1180 +#: awx/api/views.py:2271 +#: awx/api/views.py:3284 +msgid "Cannot delete job resource when associated workflow job is running." +msgstr "関連付けられたワークフロージョブが実行中の場合、ジョブリソースを削除できません。" + +#: awx/api/views.py:1257 +msgid "Me" +msgstr "自分" + +#: awx/api/views.py:1303 +#: awx/api/views.py:4048 +msgid "You may not perform any action with your own admin_role." +msgstr "独自の admin_role でアクションを実行することはできません。" + +#: awx/api/views.py:1309 +#: awx/api/views.py:4052 +msgid "You may not change the membership of a users admin_role" +msgstr "ユーザーの admin_role のメンバーシップを変更することはできません" + +#: awx/api/views.py:1314 +#: awx/api/views.py:4057 +msgid "" +"You cannot grant credential access to a user not in the credentials' " +"organization" +msgstr "認証情報の組織に属さないユーザーに認証情報のアクセス権を付与することはできません" + +#: awx/api/views.py:1318 +#: awx/api/views.py:4061 +msgid "You cannot grant private credential access to another user" +msgstr "非公開の認証情報のアクセス権を別のユーザーに付与することはできません" + +#: awx/api/views.py:1416 +#, python-format +msgid "Cannot change %s." +msgstr "%s を変更できません。" + +#: awx/api/views.py:1422 +msgid "Cannot delete user." +msgstr "ユーザーを削除できません。" + +#: awx/api/views.py:1570 +msgid "Cannot delete inventory script." +msgstr "インベントリースクリプトを削除できません。" + +#: awx/api/views.py:1806 +msgid "Fact not found." +msgstr "ファクトが見つかりませんでした。" + +#: awx/api/views.py:2126 +msgid "Inventory Source List" +msgstr "インベントリーソース一覧" + +#: awx/api/views.py:2154 +msgid "Cannot delete inventory source." +msgstr "インベントリーソースを削除できません。" + +#: awx/api/views.py:2162 +msgid "Inventory Source Schedules" +msgstr "インベントリーソースのスケジュール" + +#: awx/api/views.py:2192 +msgid "Notification Templates can only be assigned when source is one of {}." +msgstr "ソースが {} のいずれかである場合、通知テンプレートのみを割り当てることができます。" + +#: awx/api/views.py:2403 +msgid "Job Template Schedules" +msgstr "ジョブテンプレートスケジュール" + +#: awx/api/views.py:2423 +#: awx/api/views.py:2439 +msgid "Your license does not allow adding surveys." +msgstr "お使いのライセンスでは Survey を追加できません。" + +#: awx/api/views.py:2446 +msgid "'name' missing from survey spec." +msgstr "Survey の指定に「name」がありません。" + +#: awx/api/views.py:2448 +msgid "'description' missing from survey spec." +msgstr "Survey の指定に「description」がありません。" + +#: awx/api/views.py:2450 +msgid "'spec' missing from survey spec." +msgstr "Survey の指定に「spec」がありません。" + +#: awx/api/views.py:2452 +msgid "'spec' must be a list of items." +msgstr "「spec」は項目の一覧にする必要があります。" + +#: awx/api/views.py:2454 +msgid "'spec' doesn't contain any items." +msgstr "「spec」には項目が含まれません。" + +#: awx/api/views.py:2460 +#, python-format +msgid "Survey question %s is not a json object." +msgstr "Survey の質問 %s は json オブジェクトではありません。" + +#: awx/api/views.py:2462 +#, python-format +msgid "'type' missing from survey question %s." +msgstr "Survey の質問 %s に「type」がありません。" + +#: awx/api/views.py:2464 +#, python-format +msgid "'question_name' missing from survey question %s." +msgstr "Survey の質問 %s に「question_name」がありません。" + +#: awx/api/views.py:2466 +#, python-format +msgid "'variable' missing from survey question %s." +msgstr "Survey の質問 %s に「variable」がありません。" + +#: awx/api/views.py:2468 +#, python-format +msgid "'variable' '%(item)s' duplicated in survey question %(survey)s." +msgstr "Survey の質問%(survey)s で「variable」の「%(item)s」が重複しています。" + +#: awx/api/views.py:2473 +#, python-format +msgid "'required' missing from survey question %s." +msgstr "Survey の質問 %s に「required」がありません。" + +#: awx/api/views.py:2684 +msgid "No matching host could be found!" +msgstr "一致するホストが見つかりませんでした!" + +#: awx/api/views.py:2687 +msgid "Multiple hosts matched the request!" +msgstr "複数のホストが要求に一致しました!" + +#: awx/api/views.py:2692 +msgid "Cannot start automatically, user input required!" +msgstr "自動的に開始できません。ユーザー入力が必要です!" + +#: awx/api/views.py:2699 +msgid "Host callback job already pending." +msgstr "ホストのコールバックジョブがすでに保留中です。" + +#: awx/api/views.py:2712 +msgid "Error starting job!" +msgstr "ジョブの開始時にエラーが発生しました!" + +#: awx/api/views.py:3041 +msgid "Workflow Job Template Schedules" +msgstr "ワークフロージョブテンプレートのスケジュール" + +#: awx/api/views.py:3183 +#: awx/api/views.py:3726 +msgid "Superuser privileges needed." +msgstr "スーパーユーザー権限が必要です。" + +#: awx/api/views.py:3215 +msgid "System Job Template Schedules" +msgstr "システムジョブテンプレートのスケジュール" + +#: awx/api/views.py:3407 +msgid "Job Host Summaries List" +msgstr "ジョブホスト概要一覧" + +#: awx/api/views.py:3449 +msgid "Job Event Children List" +msgstr "ジョブイベント子一覧" + +#: awx/api/views.py:3458 +msgid "Job Event Hosts List" +msgstr "ジョブイベントホスト一覧" + +#: awx/api/views.py:3467 +msgid "Job Events List" +msgstr "ジョブイベント一覧" + +#: awx/api/views.py:3680 +msgid "Ad Hoc Command Events List" +msgstr "アドホックコマンドイベント一覧" + +#: awx/api/views.py:3874 +#, python-format +msgid "Error generating stdout download file: %s" +msgstr "stdout ダウンロードファイルの生成中にエラーが発生しました: %s" + +#: awx/api/views.py:3919 +msgid "Delete not allowed while there are pending notifications" +msgstr "保留中の通知がある場合に削除は許可されません" + +#: awx/api/views.py:3926 +msgid "Notification Template Test" +msgstr "通知テンプレートテスト" + +#: awx/api/views.py:4042 +msgid "User 'id' field is missing." +msgstr "ユーザー「id」フィールドがありません。" + +#: awx/api/views.py:4085 +msgid "Team 'id' field is missing." +msgstr "チーム「id」フィールドがありません。" + +#: awx/conf/conf.py:20 +msgid "Bud Frogs" +msgstr "Bud Frogs" + +#: awx/conf/conf.py:21 +msgid "Bunny" +msgstr "Bunny" + +#: awx/conf/conf.py:22 +msgid "Cheese" +msgstr "Cheese" + +#: awx/conf/conf.py:23 +msgid "Daemon" +msgstr "Daemon" + +#: awx/conf/conf.py:24 +msgid "Default Cow" +msgstr "Default Cow" + +#: awx/conf/conf.py:25 +msgid "Dragon" +msgstr "Dragon" + +#: awx/conf/conf.py:26 +msgid "Elephant in Snake" +msgstr "Elephant in Snake" + +#: awx/conf/conf.py:27 +msgid "Elephant" +msgstr "Elephant" + +#: awx/conf/conf.py:28 +msgid "Eyes" +msgstr "Eyes" + +#: awx/conf/conf.py:29 +msgid "Hello Kitty" +msgstr "Hello Kitty" + +#: awx/conf/conf.py:30 +msgid "Kitty" +msgstr "Kitty" + +#: awx/conf/conf.py:31 +msgid "Luke Koala" +msgstr "Luke Koala" + +#: awx/conf/conf.py:32 +msgid "Meow" +msgstr "Meow" + +#: awx/conf/conf.py:33 +msgid "Milk" +msgstr "Milk" + +#: awx/conf/conf.py:34 +msgid "Moofasa" +msgstr "Moofasa" + +#: awx/conf/conf.py:35 +msgid "Moose" +msgstr "Moose" + +#: awx/conf/conf.py:36 +msgid "Ren" +msgstr "Ren" + +#: awx/conf/conf.py:37 +msgid "Sheep" +msgstr "Sheep" + +#: awx/conf/conf.py:38 +msgid "Small Cow" +msgstr "Small Cow" + +#: awx/conf/conf.py:39 +msgid "Stegosaurus" +msgstr "Stegosaurus" + +#: awx/conf/conf.py:40 +msgid "Stimpy" +msgstr "Stimpy" + +#: awx/conf/conf.py:41 +msgid "Super Milker" +msgstr "Super Milker" + +#: awx/conf/conf.py:42 +msgid "Three Eyes" +msgstr "Three Eyes" + +#: awx/conf/conf.py:43 +msgid "Turkey" +msgstr "Turkey" + +#: awx/conf/conf.py:44 +msgid "Turtle" +msgstr "Turtle" + +#: awx/conf/conf.py:45 +msgid "Tux" +msgstr "Tux" + +#: awx/conf/conf.py:46 +msgid "Udder" +msgstr "Udder" + +#: awx/conf/conf.py:47 +msgid "Vader Koala" +msgstr "Vader Koala" + +#: awx/conf/conf.py:48 +msgid "Vader" +msgstr "Vader" + +#: awx/conf/conf.py:49 +msgid "WWW" +msgstr "WWW" + +#: awx/conf/conf.py:52 +msgid "Cow Selection" +msgstr "Cow Selection" + +#: awx/conf/conf.py:53 +msgid "Select which cow to use with cowsay when running jobs." +msgstr "ジョブの実行時に cowsay で使用する cow を選択します。" + +#: awx/conf/conf.py:54 +#: awx/conf/conf.py:75 +msgid "Cows" +msgstr "Cows" + +#: awx/conf/conf.py:73 +msgid "Example Read-Only Setting" +msgstr "読み取り専用設定の例" + +#: awx/conf/conf.py:74 +msgid "Example setting that cannot be changed." +msgstr "変更不可能な設定例" + +#: awx/conf/conf.py:93 +msgid "Example Setting" +msgstr "設定例" + +#: awx/conf/conf.py:94 +msgid "Example setting which can be different for each user." +msgstr "ユーザーごとに異なる設定例" + +#: awx/conf/conf.py:95 +#: awx/conf/registry.py:76 +#: awx/conf/views.py:46 +msgid "User" +msgstr "ユーザー" + +#: awx/conf/fields.py:38 +msgid "Enter a valid URL" +msgstr "無効な URL の入力" + +#: awx/conf/license.py:19 +msgid "Your Tower license does not allow that." +msgstr "お使いの Tower ライセンスではこれを許可しません。" + +#: awx/conf/management/commands/migrate_to_database_settings.py:41 +msgid "Only show which settings would be commented/migrated." +msgstr "コメント/移行する設定についてのみ表示します。" + +#: awx/conf/management/commands/migrate_to_database_settings.py:48 +msgid "" +"Skip over settings that would raise an error when commenting/migrating." +msgstr "コメント/移行時にエラーを発生させる設定をスキップします。" + +#: awx/conf/management/commands/migrate_to_database_settings.py:55 +msgid "Skip commenting out settings in files." +msgstr "ファイル内の設定のコメント化をスキップします。" + +#: awx/conf/management/commands/migrate_to_database_settings.py:61 +msgid "Backup existing settings files with this suffix." +msgstr "この接尾辞を持つ既存の設定ファイルをバックアップします。" + +#: awx/conf/registry.py:64 +#: awx/conf/tests/unit/test_registry.py:169 +#: awx/conf/tests/unit/test_registry.py:192 +#: awx/conf/tests/unit/test_registry.py:196 +#: awx/conf/tests/unit/test_registry.py:201 +#: awx/conf/tests/unit/test_registry.py:208 +msgid "All" +msgstr "すべて" + +#: awx/conf/registry.py:65 +#: awx/conf/tests/unit/test_registry.py:170 +#: awx/conf/tests/unit/test_registry.py:193 +#: awx/conf/tests/unit/test_registry.py:197 +#: awx/conf/tests/unit/test_registry.py:202 +#: awx/conf/tests/unit/test_registry.py:209 +msgid "Changed" +msgstr "変更済み" + +#: awx/conf/registry.py:77 +msgid "User-Defaults" +msgstr "ユーザー設定" + +#: awx/conf/tests/unit/test_registry.py:46 +#: awx/conf/tests/unit/test_registry.py:56 +#: awx/conf/tests/unit/test_registry.py:72 +#: awx/conf/tests/unit/test_registry.py:87 +#: awx/conf/tests/unit/test_registry.py:100 +#: awx/conf/tests/unit/test_registry.py:106 +#: awx/conf/tests/unit/test_registry.py:126 +#: awx/conf/tests/unit/test_registry.py:140 +#: awx/conf/tests/unit/test_registry.py:146 +#: awx/conf/tests/unit/test_registry.py:159 +#: awx/conf/tests/unit/test_registry.py:171 +#: awx/conf/tests/unit/test_registry.py:180 +#: awx/conf/tests/unit/test_registry.py:198 +#: awx/conf/tests/unit/test_registry.py:210 +#: awx/conf/tests/unit/test_registry.py:219 +#: awx/conf/tests/unit/test_registry.py:225 +#: awx/conf/tests/unit/test_registry.py:237 +#: awx/conf/tests/unit/test_registry.py:245 +#: awx/conf/tests/unit/test_registry.py:288 +#: awx/conf/tests/unit/test_registry.py:306 +#: awx/conf/tests/unit/test_settings.py:67 +#: awx/conf/tests/unit/test_settings.py:81 +#: awx/conf/tests/unit/test_settings.py:97 +#: awx/conf/tests/unit/test_settings.py:110 +#: awx/conf/tests/unit/test_settings.py:127 +#: awx/conf/tests/unit/test_settings.py:143 +#: awx/conf/tests/unit/test_settings.py:162 +#: awx/conf/tests/unit/test_settings.py:183 +#: awx/conf/tests/unit/test_settings.py:197 +#: awx/conf/tests/unit/test_settings.py:221 +#: awx/conf/tests/unit/test_settings.py:241 +#: awx/conf/tests/unit/test_settings.py:258 +#: awx/main/conf.py:19 +#: awx/main/conf.py:29 +#: awx/main/conf.py:39 +#: awx/main/conf.py:48 +#: awx/main/conf.py:60 +#: awx/main/conf.py:78 +#: awx/main/conf.py:103 +msgid "System" +msgstr "システム" + +#: awx/conf/tests/unit/test_registry.py:165 +#: awx/conf/tests/unit/test_registry.py:172 +#: awx/conf/tests/unit/test_registry.py:187 +#: awx/conf/tests/unit/test_registry.py:203 +#: awx/conf/tests/unit/test_registry.py:211 +msgid "OtherSystem" +msgstr "他のシステム" + +#: awx/conf/views.py:38 +msgid "Setting Categories" +msgstr "設定カテゴリー" + +#: awx/conf/views.py:61 +msgid "Setting Detail" +msgstr "設定の詳細" + +#: awx/main/access.py:255 +#, python-format +msgid "Bad data found in related field %s." +msgstr "関連フィールド %s に不正データが見つかりました。" + +#: awx/main/access.py:296 +msgid "License is missing." +msgstr "ライセンスが見つかりません。" + +#: awx/main/access.py:298 +msgid "License has expired." +msgstr "ライセンスの有効期限が切れました。" + +#: awx/main/access.py:306 +#, python-format +msgid "License count of %s instances has been reached." +msgstr "%s インスタンスのライセンス数に達しました。" + +#: awx/main/access.py:308 +#, python-format +msgid "License count of %s instances has been exceeded." +msgstr "%s インスタンスのライセンス数を超えました。" + +#: awx/main/access.py:310 +msgid "Host count exceeds available instances." +msgstr "ホスト数が利用可能なインスタンスの上限を上回っています。" + +#: awx/main/access.py:314 +#, python-format +msgid "Feature %s is not enabled in the active license." +msgstr "機能 %s はアクティブなライセンスで有効にされていません。" + +#: awx/main/access.py:316 +msgid "Features not found in active license." +msgstr "各種機能はアクティブなライセンスにありません。" + +#: awx/main/access.py:514 +#: awx/main/access.py:581 +#: awx/main/access.py:706 +#: awx/main/access.py:969 +#: awx/main/access.py:1208 +#: awx/main/access.py:1605 +msgid "Resource is being used by running jobs" +msgstr "リソースが実行中のジョブで使用されています" + +#: awx/main/access.py:625 +msgid "Unable to change inventory on a host." +msgstr "ホストのインベントリーを変更できません。" + +#: awx/main/access.py:642 +#: awx/main/access.py:687 +msgid "Cannot associate two items from different inventories." +msgstr "異なるインベントリーの 2 つの項目を関連付けることはできません。" + +#: awx/main/access.py:675 +msgid "Unable to change inventory on a group." +msgstr "グループのインベントリーを変更できません。" + +#: awx/main/access.py:889 +msgid "Unable to change organization on a team." +msgstr "チームの組織を変更できません。" + +#: awx/main/access.py:902 +msgid "The {} role cannot be assigned to a team" +msgstr "{} ロールをチームに割り当てることができません" + +#: awx/main/access.py:904 +msgid "The admin_role for a User cannot be assigned to a team" +msgstr "ユーザーの admin_role をチームに割り当てることができません" + +#: awx/main/access.py:1678 +msgid "" +"You do not have permission to the workflow job resources required for " +"relaunch." +msgstr "再起動に必要なワークフロージョブリソースへのパーミッションがありません。" + +#: awx/main/apps.py:9 +msgid "Main" +msgstr "メイン" + +#: awx/main/conf.py:17 +msgid "Enable Activity Stream" +msgstr "アクティビティーストリームの有効化" + +#: awx/main/conf.py:18 +msgid "Enable capturing activity for the Tower activity stream." +msgstr "Tower アクティビティーストリームのアクティビティーのキャプチャーを有効にします。" + +#: awx/main/conf.py:27 +msgid "Enable Activity Stream for Inventory Sync" +msgstr "インベントリー同期のアクティビティティーストリームの有効化" + +#: awx/main/conf.py:28 +msgid "" +"Enable capturing activity for the Tower activity stream when running " +"inventory sync." +msgstr "インベントリー同期の実行時に Tower アクティビティーストリームのアクティビティーのキャプチャーを有効にします。" + +#: awx/main/conf.py:37 +msgid "All Users Visible to Organization Admins" +msgstr "組織管理者に表示されるすべてのユーザー" + +#: awx/main/conf.py:38 +msgid "" +"Controls whether any Organization Admin can view all users, even those not " +"associated with their Organization." +msgstr "組織管理者が、それぞれの組織に関連付けられていないすべてのユーザーを閲覧できるかどうかを制御します。" + +#: awx/main/conf.py:46 +msgid "Enable Tower Administrator Alerts" +msgstr "Tower 管理者アラートの有効化" + +#: awx/main/conf.py:47 +msgid "" +"Allow Tower to email Admin users for system events that may require " +"attention." +msgstr "Tower から管理者ユーザーに対し、注意を要する可能性のあるシステムイベントについてのメールを送信することを許可します。" + +#: awx/main/conf.py:57 +msgid "Base URL of the Tower host" +msgstr "Tower ホストのベース URL" + +#: awx/main/conf.py:58 +msgid "" +"This setting is used by services like notifications to render a valid url to " +"the Tower host." +msgstr "この設定は、有効な URL を Tower ホストにレンダリングする通知などのサービスで使用されます。" + +#: awx/main/conf.py:67 +msgid "Remote Host Headers" +msgstr "リモートホストヘッダー" + +#: awx/main/conf.py:68 +msgid "" +"HTTP headers and meta keys to search to determine remote host name or IP. " +"Add additional items to this list, such as \"HTTP_X_FORWARDED_FOR\", if " +"behind a reverse proxy.\n" +"\n" +"Note: The headers will be searched in order and the first found remote host " +"name or IP will be used.\n" +"\n" +"In the below example 8.8.8.7 would be the chosen IP address.\n" +"X-Forwarded-For: 8.8.8.7, 192.168.2.1, 127.0.0.1\n" +"Host: 127.0.0.1\n" +"REMOTE_HOST_HEADERS = ['HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR', 'REMOTE_HOST']" +msgstr "" +"リモートホスト名または IP を判別するために検索する HTTP " +"ヘッダーおよびメタキーです。リバースプロキシーの後ろの場合は、\"HTTP_X_FORWARDED_FOR\" のように項目をこの一覧に追加します。\n" +"\n" +"注: ヘッダーが順番に検索され、最初に検出されるリモートホスト名または IP が使用されます。\n" +"\n" +"以下の例では、8.8.8.7 が選択された IP アドレスになります。\n" +"X-Forwarded-For: 8.8.8.7, 192.168.2.1, 127.0.0.1\n" +"Host: 127.0.0.1\n" +"REMOTE_HOST_HEADERS = ['HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR', 'REMOTE_HOST']" + +#: awx/main/conf.py:99 +msgid "Tower License" +msgstr "Tower ライセンス" + +#: awx/main/conf.py:100 +msgid "" +"The license controls which features and functionality are enabled in Tower. " +"Use /api/v1/config/ to update or change the license." +msgstr "" +"ライセンスによって、Tower で有効にされる特長および機能が制御されます。ライセンスを更新または変更するには、/api/v1/config/ " +"を使用します。" + +#: awx/main/conf.py:110 +msgid "Ansible Modules Allowed for Ad Hoc Jobs" +msgstr "アドホックジョブで許可される Ansible モジュール" + +#: awx/main/conf.py:111 +msgid "List of modules allowed to be used by ad-hoc jobs." +msgstr "アドホックジョブで使用できるモジュール一覧。" + +#: awx/main/conf.py:112 +#: awx/main/conf.py:121 +#: awx/main/conf.py:130 +#: awx/main/conf.py:140 +#: awx/main/conf.py:150 +#: awx/main/conf.py:160 +#: awx/main/conf.py:170 +#: awx/main/conf.py:180 +#: awx/main/conf.py:190 +#: awx/main/conf.py:202 +#: awx/main/conf.py:214 +#: awx/main/conf.py:226 +msgid "Jobs" +msgstr "ジョブ" + +#: awx/main/conf.py:119 +msgid "Enable job isolation" +msgstr "ジョブの分離の有効化" + +#: awx/main/conf.py:120 +msgid "" +"Isolates an Ansible job from protected parts of the Tower system to prevent " +"exposing sensitive information." +msgstr "機密情報の公開を防ぐために Tower システムの保護された部分から Ansible ジョブを分離します。" + +#: awx/main/conf.py:128 +msgid "Job isolation execution path" +msgstr "ジョブ分離の実行パス" + +#: awx/main/conf.py:129 +msgid "" +"Create temporary working directories for isolated jobs in this location." +msgstr "この場所に分離されたジョブの一時作業ディレクトリーを作成します。" + +#: awx/main/conf.py:138 +msgid "Paths to hide from isolated jobs" +msgstr "分離されたジョブから非表示にするパス" + +#: awx/main/conf.py:139 +msgid "Additional paths to hide from isolated processes." +msgstr "分離されたプロセスから非表示にする追加パス。" + +#: awx/main/conf.py:148 +msgid "Paths to expose to isolated jobs" +msgstr "分離されたジョブに公開するパス" + +#: awx/main/conf.py:149 +msgid "" +"Whitelist of paths that would otherwise be hidden to expose to isolated jobs." +"" +msgstr "分離されたジョブに公開されないように非表示にされることがあるパスのホワイトリスト。" + +#: awx/main/conf.py:158 +msgid "Standard Output Maximum Display Size" +msgstr "標準出力の最大表示サイズ" + +#: awx/main/conf.py:159 +msgid "" +"Maximum Size of Standard Output in bytes to display before requiring the " +"output be downloaded." +msgstr "出力のダウンロードを要求する前に表示される標準出力の最大サイズ (バイト単位)。" + +#: awx/main/conf.py:168 +msgid "Job Event Standard Output Maximum Display Size" +msgstr "ジョブイベントの標準出力の最大表示サイズ" + +#: awx/main/conf.py:169 +msgid "" +"Maximum Size of Standard Output in bytes to display for a single job or ad " +"hoc command event. `stdout` will end with `…` when truncated." +msgstr "" +"単一ジョブまたはアドホックコマンドイベントについて表示される標準出力の最大サイズ (バイト単位)。`stdout` は切り捨てが実行されると `…` " +"で終了します。" + +#: awx/main/conf.py:178 +msgid "Maximum Scheduled Jobs" +msgstr "スケジュール済みジョブの最大数" + +#: awx/main/conf.py:179 +msgid "" +"Maximum number of the same job template that can be waiting to run when " +"launching from a schedule before no more are created." +msgstr "スケジュールからの起動時に実行を待機している同じジョブテンプレートの最大数です (これ以上作成されることはありません)。" + +#: awx/main/conf.py:188 +msgid "Ansible Callback Plugins" +msgstr "Ansible コールバックプラグイン" + +#: awx/main/conf.py:189 +msgid "" +"List of paths to search for extra callback plugins to be used when running " +"jobs." +msgstr "ジョブの実行時に使用される追加のコールバックプラグインについて検索するパスの一覧。" + +#: awx/main/conf.py:199 +msgid "Default Job Timeout" +msgstr "デフォルトのジョブタイムアウト" + +#: awx/main/conf.py:200 +msgid "" +"Maximum time to allow jobs to run. Use value of 0 to indicate that no " +"timeout should be imposed. A timeout set on an individual job template will " +"override this." +msgstr "" +"ジョブの実行可能な最大時間。値 0 " +"が使用されている場合はタイムアウトを設定できないことを示します。個別のジョブテンプレートに設定されるタイムアウトはこれを上書きします。" + +#: awx/main/conf.py:211 +msgid "Default Inventory Update Timeout" +msgstr "デフォルトのインベントリー更新タイムアウト" + +#: awx/main/conf.py:212 +msgid "" +"Maximum time to allow inventory updates to run. Use value of 0 to indicate " +"that no timeout should be imposed. A timeout set on an individual inventory " +"source will override this." +msgstr "" +"インベントリー更新の実行可能な最大時間。値 0 " +"が設定されている場合はタイムアウトを設定できないことを示します。個別のインベントリーソースに設定されるタイムアウトはこれを上書きします。" + +#: awx/main/conf.py:223 +msgid "Default Project Update Timeout" +msgstr "デフォルトのプロジェクト更新タイムアウト" + +#: awx/main/conf.py:224 +msgid "" +"Maximum time to allow project updates to run. Use value of 0 to indicate " +"that no timeout should be imposed. A timeout set on an individual project " +"will override this." +msgstr "" +"プロジェクト更新の実行可能な最大時間。値 0 " +"が設定されている場合はタイムアウトを設定できないことを示します。個別のプロジェクトに設定されるタイムアウトはこれを上書きします。" + +#: awx/main/conf.py:234 +msgid "Logging Aggregator" +msgstr "ログアグリゲーター" + +#: awx/main/conf.py:235 +msgid "Hostname/IP where external logs will be sent to." +msgstr "外部ログの送信先のホスト名/IP" + +#: awx/main/conf.py:236 +#: awx/main/conf.py:245 +#: awx/main/conf.py:255 +#: awx/main/conf.py:264 +#: awx/main/conf.py:274 +#: awx/main/conf.py:288 +#: awx/main/conf.py:300 +#: awx/main/conf.py:309 +msgid "Logging" +msgstr "ロギング" + +#: awx/main/conf.py:243 +msgid "Logging Aggregator Port" +msgstr "ログアグリゲーターポート" + +#: awx/main/conf.py:244 +msgid "Port on Logging Aggregator to send logs to (if required)." +msgstr "ログの送信先のログアグリゲーターのポート (必要な場合)。" + +#: awx/main/conf.py:253 +msgid "Logging Aggregator Type" +msgstr "ログアグリゲーターのタイプ" + +#: awx/main/conf.py:254 +msgid "Format messages for the chosen log aggregator." +msgstr "選択されたログアグリゲーターのメッセージのフォーマット。" + +#: awx/main/conf.py:262 +msgid "Logging Aggregator Username" +msgstr "ログアグリゲーターのユーザー名" + +#: awx/main/conf.py:263 +msgid "Username for external log aggregator (if required)." +msgstr "外部ログアグリゲーターのユーザー名 (必要な場合)。" + +#: awx/main/conf.py:272 +msgid "Logging Aggregator Password/Token" +msgstr "ログアグリゲーターのパスワード/トークン" + +#: awx/main/conf.py:273 +msgid "" +"Password or authentication token for external log aggregator (if required)." +msgstr "外部ログアグリゲーターのパスワードまたは認証トークン (必要な場合)。" + +#: awx/main/conf.py:281 +msgid "Loggers to send data to the log aggregator from" +msgstr "ログアグリゲーターにデータを送信するロガー" + +#: awx/main/conf.py:282 +msgid "" +"List of loggers that will send HTTP logs to the collector, these can include " +"any or all of: \n" +"awx - Tower service logs\n" +"activity_stream - activity stream records\n" +"job_events - callback data from Ansible job events\n" +"system_tracking - facts gathered from scan jobs." +msgstr "" +"HTTP ログをコレクターに送信するロガーの一覧です。これらには以下のいずれか、またはすべてが含まれます。\n" +"awx - Tower サービスログ\n" +"activity_stream - アクティビティーストリームレコード\n" +"job_events - Ansible ジョブイベントからのコールバックデータ\n" +"system_tracking - スキャンジョブから生成されるファクト" + +#: awx/main/conf.py:295 +msgid "Log System Tracking Facts Individually" +msgstr "ログシステムトラッキングの個別ファクト" + +#: awx/main/conf.py:296 +msgid "" +"If set, system tracking facts will be sent for each package, service, " +"orother item found in a scan, allowing for greater search query granularity. " +"If unset, facts will be sent as a single dictionary, allowing for greater " +"efficiency in fact processing." +msgstr "" +"設定されている場合、スキャンで見つかる各パッケージ、サービスその他の項目についてのサービスシステムトラッキングのファクトが送信され、検索クエリーの詳細度が上がります。設定されていない場合、ファクトは単一辞書として送信され、ファクトの処理の効率が上がります。" + +#: awx/main/conf.py:307 +msgid "Enable External Logging" +msgstr "外部ログの有効化" + +#: awx/main/conf.py:308 +msgid "Enable sending logs to external log aggregator." +msgstr "外部ログアグリゲーターへのログ送信の有効化" + +#: awx/main/models/activity_stream.py:22 +msgid "Entity Created" +msgstr "エンティティーの作成" + +#: awx/main/models/activity_stream.py:23 +msgid "Entity Updated" +msgstr "エンティティーの更新" + +#: awx/main/models/activity_stream.py:24 +msgid "Entity Deleted" +msgstr "エンティティーの削除" + +#: awx/main/models/activity_stream.py:25 +msgid "Entity Associated with another Entity" +msgstr "エンティティーの別のエンティティーへの関連付け" + +#: awx/main/models/activity_stream.py:26 +msgid "Entity was Disassociated with another Entity" +msgstr "エンティティーの別のエンティティーとの関連付けの解除" + +#: awx/main/models/ad_hoc_commands.py:96 +msgid "No valid inventory." +msgstr "有効なインベントリーはありません。" + +#: awx/main/models/ad_hoc_commands.py:103 +#: awx/main/models/jobs.py:161 +msgid "You must provide a machine / SSH credential." +msgstr "マシン/SSH 認証情報を入力してください。" + +#: awx/main/models/ad_hoc_commands.py:114 +#: awx/main/models/ad_hoc_commands.py:122 +msgid "Invalid type for ad hoc command" +msgstr "アドホックコマンドの無効なタイプ" + +#: awx/main/models/ad_hoc_commands.py:117 +msgid "Unsupported module for ad hoc commands." +msgstr "アドホックコマンドのサポートされていないモジュール。" + +#: awx/main/models/ad_hoc_commands.py:125 +#, python-format +msgid "No argument passed to %s module." +msgstr "%s モジュールに渡される引数はありません。" + +#: awx/main/models/ad_hoc_commands.py:222 +#: awx/main/models/jobs.py:763 +msgid "Host Failed" +msgstr "ホストの失敗" + +#: awx/main/models/ad_hoc_commands.py:223 +#: awx/main/models/jobs.py:764 +msgid "Host OK" +msgstr "ホスト OK" + +#: awx/main/models/ad_hoc_commands.py:224 +#: awx/main/models/jobs.py:767 +msgid "Host Unreachable" +msgstr "ホストに到達できません" + +#: awx/main/models/ad_hoc_commands.py:229 +#: awx/main/models/jobs.py:766 +msgid "Host Skipped" +msgstr "ホストがスキップされました" + +#: awx/main/models/ad_hoc_commands.py:239 +#: awx/main/models/jobs.py:794 +msgid "Debug" +msgstr "デバッグ" + +#: awx/main/models/ad_hoc_commands.py:240 +#: awx/main/models/jobs.py:795 +msgid "Verbose" +msgstr "詳細" + +#: awx/main/models/ad_hoc_commands.py:241 +#: awx/main/models/jobs.py:796 +msgid "Deprecated" +msgstr "非推奨" + +#: awx/main/models/ad_hoc_commands.py:242 +#: awx/main/models/jobs.py:797 +msgid "Warning" +msgstr "警告" + +#: awx/main/models/ad_hoc_commands.py:243 +#: awx/main/models/jobs.py:798 +msgid "System Warning" +msgstr "システム警告" + +#: awx/main/models/ad_hoc_commands.py:244 +#: awx/main/models/jobs.py:799 +#: awx/main/models/unified_jobs.py:64 +msgid "Error" +msgstr "エラー" + +#: awx/main/models/base.py:45 +#: awx/main/models/base.py:51 +#: awx/main/models/base.py:56 +msgid "Run" +msgstr "実行" + +#: awx/main/models/base.py:46 +#: awx/main/models/base.py:52 +#: awx/main/models/base.py:57 +msgid "Check" +msgstr "チェック" + +#: awx/main/models/base.py:47 +msgid "Scan" +msgstr "スキャン" + +#: awx/main/models/base.py:61 +msgid "Read Inventory" +msgstr "インベントリーの読み取り" + +#: awx/main/models/base.py:62 +msgid "Edit Inventory" +msgstr "インベントリーの編集" + +#: awx/main/models/base.py:63 +msgid "Administrate Inventory" +msgstr "インベントリーの管理" + +#: awx/main/models/base.py:64 +msgid "Deploy To Inventory" +msgstr "インベントリーへのデプロイ" + +#: awx/main/models/base.py:65 +msgid "Deploy To Inventory (Dry Run)" +msgstr "インベントリーへのデプロイ (ドライラン)" + +#: awx/main/models/base.py:66 +msgid "Scan an Inventory" +msgstr "インベントリーのスキャン" + +#: awx/main/models/base.py:67 +msgid "Create a Job Template" +msgstr "ジョブテンプレートの作成" + +#: awx/main/models/credential.py:33 +msgid "Machine" +msgstr "マシン" + +#: awx/main/models/credential.py:34 +msgid "Network" +msgstr "ネットワーク" + +#: awx/main/models/credential.py:35 +msgid "Source Control" +msgstr "ソースコントロール" + +#: awx/main/models/credential.py:36 +msgid "Amazon Web Services" +msgstr "Amazon Web サービス" + +#: awx/main/models/credential.py:37 +msgid "Rackspace" +msgstr "Rackspace" + +#: awx/main/models/credential.py:38 +#: awx/main/models/inventory.py:713 +msgid "VMware vCenter" +msgstr "VMware vCenter" + +#: awx/main/models/credential.py:39 +#: awx/main/models/inventory.py:714 +msgid "Red Hat Satellite 6" +msgstr "Red Hat Satellite 6" + +#: awx/main/models/credential.py:40 +#: awx/main/models/inventory.py:715 +msgid "Red Hat CloudForms" +msgstr "Red Hat CloudForms" + +#: awx/main/models/credential.py:41 +#: awx/main/models/inventory.py:710 +msgid "Google Compute Engine" +msgstr "Google Compute Engine" + +#: awx/main/models/credential.py:42 +#: awx/main/models/inventory.py:711 +msgid "Microsoft Azure Classic (deprecated)" +msgstr "Microsoft Azure Classic (非推奨)" + +#: awx/main/models/credential.py:43 +#: awx/main/models/inventory.py:712 +msgid "Microsoft Azure Resource Manager" +msgstr "Microsoft Azure Resource Manager" + +#: awx/main/models/credential.py:44 +#: awx/main/models/inventory.py:716 +msgid "OpenStack" +msgstr "OpenStack" + +#: awx/main/models/credential.py:48 +msgid "None" +msgstr "なし" + +#: awx/main/models/credential.py:49 +msgid "Sudo" +msgstr "Sudo" + +#: awx/main/models/credential.py:50 +msgid "Su" +msgstr "Su" + +#: awx/main/models/credential.py:51 +msgid "Pbrun" +msgstr "Pbrun" + +#: awx/main/models/credential.py:52 +msgid "Pfexec" +msgstr "Pfexec" + +#: awx/main/models/credential.py:53 +msgid "DZDO" +msgstr "DZDO" + +#: awx/main/models/credential.py:54 +msgid "Pmrun" +msgstr "Pmrun" + +#: awx/main/models/credential.py:103 +msgid "Host" +msgstr "ホスト" + +#: awx/main/models/credential.py:104 +msgid "The hostname or IP address to use." +msgstr "使用するホスト名または IP アドレス。" + +#: awx/main/models/credential.py:110 +msgid "Username" +msgstr "ユーザー名" + +#: awx/main/models/credential.py:111 +msgid "Username for this credential." +msgstr "この認証情報のユーザー名。" + +#: awx/main/models/credential.py:117 +msgid "Password" +msgstr "パスワード" + +#: awx/main/models/credential.py:118 +msgid "" +"Password for this credential (or \"ASK\" to prompt the user for machine " +"credentials)." +msgstr "この認証情報のパスワード (またはマシンの認証情報を求めるプロンプトを出すには 「ASK」)。" + +#: awx/main/models/credential.py:125 +msgid "Security Token" +msgstr "セキュリティートークン" + +#: awx/main/models/credential.py:126 +msgid "Security Token for this credential" +msgstr "この認証情報のセキュリティートークン" + +#: awx/main/models/credential.py:132 +msgid "Project" +msgstr "プロジェクト" + +#: awx/main/models/credential.py:133 +msgid "The identifier for the project." +msgstr "プロジェクトの識別子。" + +#: awx/main/models/credential.py:139 +msgid "Domain" +msgstr "ドメイン" + +#: awx/main/models/credential.py:140 +msgid "The identifier for the domain." +msgstr "ドメインの識別子。" + +#: awx/main/models/credential.py:145 +msgid "SSH private key" +msgstr "SSH 秘密鍵" + +#: awx/main/models/credential.py:146 +msgid "RSA or DSA private key to be used instead of password." +msgstr "パスワードの代わりに使用される RSA または DSA 秘密鍵。" + +#: awx/main/models/credential.py:152 +msgid "SSH key unlock" +msgstr "SSH キーのロック解除" + +#: awx/main/models/credential.py:153 +msgid "" +"Passphrase to unlock SSH private key if encrypted (or \"ASK\" to prompt the " +"user for machine credentials)." +msgstr "" +"暗号化されている場合は SSH 秘密鍵のロックを解除するためのパスフレーズ (またはマシンの認証情報を求めるプロンプトを出すには「ASK」)。" + +#: awx/main/models/credential.py:161 +msgid "Privilege escalation method." +msgstr "権限昇格メソッド。" + +#: awx/main/models/credential.py:167 +msgid "Privilege escalation username." +msgstr "権限昇格ユーザー名。" + +#: awx/main/models/credential.py:173 +msgid "Password for privilege escalation method." +msgstr "権限昇格メソッドのパスワード。" + +#: awx/main/models/credential.py:179 +msgid "Vault password (or \"ASK\" to prompt the user)." +msgstr "Vault パスワード (またはユーザーにプロンプトを出すには「ASK」)。" + +#: awx/main/models/credential.py:183 +msgid "Whether to use the authorize mechanism." +msgstr "承認メカニズムを使用するかどうか。" + +#: awx/main/models/credential.py:189 +msgid "Password used by the authorize mechanism." +msgstr "承認メカニズムで使用されるパスワード。" + +#: awx/main/models/credential.py:195 +msgid "Client Id or Application Id for the credential" +msgstr "認証情報のクライアント ID またはアプリケーション ID" + +#: awx/main/models/credential.py:201 +msgid "Secret Token for this credential" +msgstr "この認証情報のシークレットトークン" + +#: awx/main/models/credential.py:207 +msgid "Subscription identifier for this credential" +msgstr "この認証情報のサブスクリプション識別子" + +#: awx/main/models/credential.py:213 +msgid "Tenant identifier for this credential" +msgstr "この認証情報のテナント識別子" + +#: awx/main/models/credential.py:283 +msgid "Host required for VMware credential." +msgstr "VMware 認証情報に必要なホスト。" + +#: awx/main/models/credential.py:285 +msgid "Host required for OpenStack credential." +msgstr "OpenStack 認証情報に必要なホスト。" + +#: awx/main/models/credential.py:294 +msgid "Access key required for AWS credential." +msgstr "AWS 認証情報に必要なアクセスキー。" + +#: awx/main/models/credential.py:296 +msgid "Username required for Rackspace credential." +msgstr "Rackspace 認証情報に必要なユーザー名。" + +#: awx/main/models/credential.py:299 +msgid "Username required for VMware credential." +msgstr "VMware 認証情報に必要なユーザー名。" + +#: awx/main/models/credential.py:301 +msgid "Username required for OpenStack credential." +msgstr "OpenStack 認証情報に必要なユーザー名。" + +#: awx/main/models/credential.py:307 +msgid "Secret key required for AWS credential." +msgstr "AWS 認証情報に必要なシークレットキー。" + +#: awx/main/models/credential.py:309 +msgid "API key required for Rackspace credential." +msgstr "Rackspace 認証情報に必要な API キー。" + +#: awx/main/models/credential.py:311 +msgid "Password required for VMware credential." +msgstr "VMware 認証情報に必要なパスワード。" + +#: awx/main/models/credential.py:313 +msgid "Password or API key required for OpenStack credential." +msgstr "OpenStack 認証情報に必要なパスワードまたは API キー。" + +#: awx/main/models/credential.py:319 +msgid "Project name required for OpenStack credential." +msgstr "OpenStack 認証情報に必要なプロジェクト名。" + +#: awx/main/models/credential.py:346 +msgid "SSH key unlock must be set when SSH key is encrypted." +msgstr "SSH キーの暗号化時に SSH キーのロック解除を設定する必要があります。" + +#: awx/main/models/credential.py:352 +msgid "Credential cannot be assigned to both a user and team." +msgstr "認証情報はユーザーとチームの両方に割り当てることができません。" + +#: awx/main/models/fact.py:21 +msgid "Host for the facts that the fact scan captured." +msgstr "ファクトスキャンがキャプチャーしたファクトのホスト。" + +#: awx/main/models/fact.py:26 +msgid "Date and time of the corresponding fact scan gathering time." +msgstr "対応するファクトスキャン収集時間の日時。" + +#: awx/main/models/fact.py:29 +msgid "" +"Arbitrary JSON structure of module facts captured at timestamp for a single " +"host." +msgstr "単一ホストのタイムスタンプでキャプチャーされるモジュールファクトの任意の JSON 構造。" + +#: awx/main/models/inventory.py:45 +msgid "inventories" +msgstr "インベントリー" + +#: awx/main/models/inventory.py:52 +msgid "Organization containing this inventory." +msgstr "このインベントリーを含む組織。" + +#: awx/main/models/inventory.py:58 +msgid "Inventory variables in JSON or YAML format." +msgstr "JSON または YAML 形式のインベントリー変数。" + +#: awx/main/models/inventory.py:63 +msgid "Flag indicating whether any hosts in this inventory have failed." +msgstr "このインベントリーのホストが失敗したかどうかを示すフラグ。" + +#: awx/main/models/inventory.py:68 +msgid "Total number of hosts in this inventory." +msgstr "このインべントリー内のホストの合計数。" + +#: awx/main/models/inventory.py:73 +msgid "Number of hosts in this inventory with active failures." +msgstr "アクティブなエラーのあるこのインベントリー内のホストの数。" + +#: awx/main/models/inventory.py:78 +msgid "Total number of groups in this inventory." +msgstr "このインべントリー内のグループの合計数。" + +#: awx/main/models/inventory.py:83 +msgid "Number of groups in this inventory with active failures." +msgstr "アクティブなエラーのあるこのインベントリー内のグループの数。" + +#: awx/main/models/inventory.py:88 +msgid "" +"Flag indicating whether this inventory has any external inventory sources." +msgstr "このインベントリーに外部のインベントリーソースがあるかどうかを示すフラグ。" + +#: awx/main/models/inventory.py:93 +msgid "" +"Total number of external inventory sources configured within this inventory." +msgstr "このインベントリー内で設定される外部インベントリーソースの合計数。" + +#: awx/main/models/inventory.py:98 +msgid "Number of external inventory sources in this inventory with failures." +msgstr "エラーのあるこのインベントリー内の外部インベントリーソースの数。" + +#: awx/main/models/inventory.py:339 +msgid "Is this host online and available for running jobs?" +msgstr "このホストはオンラインで、ジョブを実行するために利用できますか?" + +#: awx/main/models/inventory.py:345 +msgid "" +"The value used by the remote inventory source to uniquely identify the host" +msgstr "ホストを一意に識別するためにリモートインベントリーソースで使用される値" + +#: awx/main/models/inventory.py:350 +msgid "Host variables in JSON or YAML format." +msgstr "JSON または YAML 形式のホスト変数。" + +#: awx/main/models/inventory.py:372 +msgid "Flag indicating whether the last job failed for this host." +msgstr "このホストの最後のジョブが失敗したかどうかを示すフラグ。" + +#: awx/main/models/inventory.py:377 +msgid "" +"Flag indicating whether this host was created/updated from any external " +"inventory sources." +msgstr "このホストが外部インベントリーソースから作成/更新されたかどうかを示すフラグ。" + +#: awx/main/models/inventory.py:383 +msgid "Inventory source(s) that created or modified this host." +msgstr "このホストを作成または変更したインベントリーソース。" + +#: awx/main/models/inventory.py:474 +msgid "Group variables in JSON or YAML format." +msgstr "JSON または YAML 形式のグループ変数。" + +#: awx/main/models/inventory.py:480 +msgid "Hosts associated directly with this group." +msgstr "このグループに直接関連付けられたホスト。" + +#: awx/main/models/inventory.py:485 +msgid "Total number of hosts directly or indirectly in this group." +msgstr "このグループに直接的または間接的に属するホストの合計数。" + +#: awx/main/models/inventory.py:490 +msgid "Flag indicating whether this group has any hosts with active failures." +msgstr "このグループにアクティブなエラーのあるホストがあるかどうかを示すフラグ。" + +#: awx/main/models/inventory.py:495 +msgid "Number of hosts in this group with active failures." +msgstr "アクティブなエラーのあるこのグループ内のホストの数。" + +#: awx/main/models/inventory.py:500 +msgid "Total number of child groups contained within this group." +msgstr "このグループに含まれる子グループの合計数。" + +#: awx/main/models/inventory.py:505 +msgid "Number of child groups within this group that have active failures." +msgstr "アクティブなエラーのあるこのグループ内の子グループの数。" + +#: awx/main/models/inventory.py:510 +msgid "" +"Flag indicating whether this group was created/updated from any external " +"inventory sources." +msgstr "このグループが外部インベントリーソースから作成/更新されたかどうかを示すフラグ。" + +#: awx/main/models/inventory.py:516 +msgid "Inventory source(s) that created or modified this group." +msgstr "このグループを作成または変更したインベントリーソース。" + +#: awx/main/models/inventory.py:706 +#: awx/main/models/projects.py:42 +#: awx/main/models/unified_jobs.py:402 +msgid "Manual" +msgstr "手動" + +#: awx/main/models/inventory.py:707 +msgid "Local File, Directory or Script" +msgstr "ローカルファイル、ディレクトリーまたはスクリプト" + +#: awx/main/models/inventory.py:708 +msgid "Rackspace Cloud Servers" +msgstr "Rackspace クラウドサーバー" + +#: awx/main/models/inventory.py:709 +msgid "Amazon EC2" +msgstr "Amazon EC2" + +#: awx/main/models/inventory.py:717 +msgid "Custom Script" +msgstr "カスタムスクリプト" + +#: awx/main/models/inventory.py:828 +msgid "Inventory source variables in YAML or JSON format." +msgstr "YAML または JSON 形式のインベントリーソース変数。" + +#: awx/main/models/inventory.py:847 +msgid "" +"Comma-separated list of filter expressions (EC2 only). Hosts are imported " +"when ANY of the filters match." +msgstr "カンマ区切りのフィルター式の一覧 (EC2 のみ) です。ホストは、フィルターのいずれかが一致する場合にインポートされます。" + +#: awx/main/models/inventory.py:853 +msgid "Limit groups automatically created from inventory source (EC2 only)." +msgstr "インベントリーソースから自動的に作成されるグループを制限します (EC2 のみ)。" + +#: awx/main/models/inventory.py:857 +msgid "Overwrite local groups and hosts from remote inventory source." +msgstr "リモートインベントリーソースからのローカルグループおよびホストを上書きします。" + +#: awx/main/models/inventory.py:861 +msgid "Overwrite local variables from remote inventory source." +msgstr "リモートインベントリーソースからのローカル変数を上書きします。" + +#: awx/main/models/inventory.py:893 +msgid "Availability Zone" +msgstr "アベイラビリティーゾーン" + +#: awx/main/models/inventory.py:894 +msgid "Image ID" +msgstr "イメージ ID" + +#: awx/main/models/inventory.py:895 +msgid "Instance ID" +msgstr "インスタンス ID" + +#: awx/main/models/inventory.py:896 +msgid "Instance Type" +msgstr "インスタンスタイプ" + +#: awx/main/models/inventory.py:897 +msgid "Key Name" +msgstr "キー名" + +#: awx/main/models/inventory.py:898 +msgid "Region" +msgstr "リージョン" + +#: awx/main/models/inventory.py:899 +msgid "Security Group" +msgstr "セキュリティーグループ" + +#: awx/main/models/inventory.py:900 +msgid "Tags" +msgstr "タグ" + +#: awx/main/models/inventory.py:901 +msgid "VPC ID" +msgstr "VPC ID" + +#: awx/main/models/inventory.py:902 +msgid "Tag None" +msgstr "タグ None" + +#: awx/main/models/inventory.py:973 +#, python-format +msgid "" +"Cloud-based inventory sources (such as %s) require credentials for the " +"matching cloud service." +msgstr "クラウドベースのインベントリーソース (%s など) には一致するクラウドサービスの認証情報が必要です。" + +#: awx/main/models/inventory.py:980 +msgid "Credential is required for a cloud source." +msgstr "認証情報がクラウドソースに必要です。" + +#: awx/main/models/inventory.py:1005 +#, python-format +msgid "Invalid %(source)s region: %(region)s" +msgstr "無効な %(source)s リージョン: %(region)s" + +#: awx/main/models/inventory.py:1030 +#, python-format +msgid "Invalid filter expression: %(filter)s" +msgstr "無効なフィルター式: %(filter)s" + +#: awx/main/models/inventory.py:1048 +#, python-format +msgid "Invalid group by choice: %(choice)s" +msgstr "無効なグループ (選択による): %(choice)s" + +#: awx/main/models/inventory.py:1195 +#, python-format +msgid "" +"Unable to configure this item for cloud sync. It is already managed by %s." +msgstr "クラウド同期用にこの項目を設定できません。すでに %s によって管理されています。" + +#: awx/main/models/inventory.py:1290 +msgid "Inventory script contents" +msgstr "インベントリースクリプトの内容" + +#: awx/main/models/inventory.py:1295 +msgid "Organization owning this inventory script" +msgstr "このインベントリースクリプトを所有する組織" + +#: awx/main/models/jobs.py:169 +msgid "You must provide a network credential." +msgstr "ネットワーク認証情報を指定する必要があります。" + +#: awx/main/models/jobs.py:177 +msgid "" +"Must provide a credential for a cloud provider, such as Amazon Web Services " +"or Rackspace." +msgstr "Amazon Web Services または Rackspace などのクラウドプロバイダーの認証情報を指定する必要があります。" + +#: awx/main/models/jobs.py:269 +msgid "Job Template must provide 'inventory' or allow prompting for it." +msgstr "ジョブテンプレートは「inventory」を指定するか、このプロンプトを許可する必要があります。" + +#: awx/main/models/jobs.py:273 +msgid "Job Template must provide 'credential' or allow prompting for it." +msgstr "ジョブテンプレートは「credential」を指定するか、このプロンプトを許可する必要があります。" + +#: awx/main/models/jobs.py:362 +msgid "Cannot override job_type to or from a scan job." +msgstr "スキャンジョブから/への job_type を上書きを実行できません。" + +#: awx/main/models/jobs.py:365 +msgid "Inventory cannot be changed at runtime for scan jobs." +msgstr "インベントリーはスキャンジョブの実行時に変更できません。" + +#: awx/main/models/jobs.py:431 +#: awx/main/models/projects.py:243 +msgid "SCM Revision" +msgstr "SCM リビジョン" + +#: awx/main/models/jobs.py:432 +msgid "The SCM Revision from the Project used for this job, if available" +msgstr "このジョブに使用されるプロジェクトからの SCM リビジョン (ある場合)" + +#: awx/main/models/jobs.py:440 +msgid "" +"The SCM Refresh task used to make sure the playbooks were available for the " +"job run" +msgstr "SCM 更新タスクは、Playbook がジョブの実行で利用可能であったことを確認するために使用されます" + +#: awx/main/models/jobs.py:662 +msgid "job host summaries" +msgstr "ジョブホストの概要" + +#: awx/main/models/jobs.py:765 +msgid "Host Failure" +msgstr "ホストの失敗" + +#: awx/main/models/jobs.py:768 +#: awx/main/models/jobs.py:782 +msgid "No Hosts Remaining" +msgstr "残りのホストがありません" + +#: awx/main/models/jobs.py:769 +msgid "Host Polling" +msgstr "ホストのポーリング" + +#: awx/main/models/jobs.py:770 +msgid "Host Async OK" +msgstr "ホストの非同期 OK" + +#: awx/main/models/jobs.py:771 +msgid "Host Async Failure" +msgstr "ホストの非同期失敗" + +#: awx/main/models/jobs.py:772 +msgid "Item OK" +msgstr "項目 OK" + +#: awx/main/models/jobs.py:773 +msgid "Item Failed" +msgstr "項目の失敗" + +#: awx/main/models/jobs.py:774 +msgid "Item Skipped" +msgstr "項目のスキップ" + +#: awx/main/models/jobs.py:775 +msgid "Host Retry" +msgstr "ホストの再試行" + +#: awx/main/models/jobs.py:777 +msgid "File Difference" +msgstr "ファイルの相違点" + +#: awx/main/models/jobs.py:778 +msgid "Playbook Started" +msgstr "Playbook の開始" + +#: awx/main/models/jobs.py:779 +msgid "Running Handlers" +msgstr "実行中のハンドラー" + +#: awx/main/models/jobs.py:780 +msgid "Including File" +msgstr "組み込みファイル" + +#: awx/main/models/jobs.py:781 +msgid "No Hosts Matched" +msgstr "一致するホストがありません" + +#: awx/main/models/jobs.py:783 +msgid "Task Started" +msgstr "タスクの開始" + +#: awx/main/models/jobs.py:785 +msgid "Variables Prompted" +msgstr "変数のプロモート" + +#: awx/main/models/jobs.py:786 +msgid "Gathering Facts" +msgstr "ファクトの収集" + +#: awx/main/models/jobs.py:787 +msgid "internal: on Import for Host" +msgstr "内部: ホストのインポート時" + +#: awx/main/models/jobs.py:788 +msgid "internal: on Not Import for Host" +msgstr "内部: ホストの非インポート時" + +#: awx/main/models/jobs.py:789 +msgid "Play Started" +msgstr "プレイの開始" + +#: awx/main/models/jobs.py:790 +msgid "Playbook Complete" +msgstr "Playbook の完了" + +#: awx/main/models/jobs.py:1200 +msgid "Remove jobs older than a certain number of days" +msgstr "特定の日数より前のジョブを削除" + +#: awx/main/models/jobs.py:1201 +msgid "Remove activity stream entries older than a certain number of days" +msgstr "特定の日数より前のアクティビティーストリームのエントリーを削除" + +#: awx/main/models/jobs.py:1202 +msgid "Purge and/or reduce the granularity of system tracking data" +msgstr "システムトラッキングデータの詳細度の削除/削減" + +#: awx/main/models/label.py:29 +msgid "Organization this label belongs to." +msgstr "このラベルが属する組織。" + +#: awx/main/models/notifications.py:31 +msgid "Email" +msgstr "メール" + +#: awx/main/models/notifications.py:32 +msgid "Slack" +msgstr "Slack" + +#: awx/main/models/notifications.py:33 +msgid "Twilio" +msgstr "Twilio" + +#: awx/main/models/notifications.py:34 +msgid "Pagerduty" +msgstr "Pagerduty" + +#: awx/main/models/notifications.py:35 +msgid "HipChat" +msgstr "HipChat" + +#: awx/main/models/notifications.py:36 +msgid "Webhook" +msgstr "Webhook" + +#: awx/main/models/notifications.py:37 +msgid "IRC" +msgstr "IRC" + +#: awx/main/models/notifications.py:127 +#: awx/main/models/unified_jobs.py:59 +msgid "Pending" +msgstr "保留中" + +#: awx/main/models/notifications.py:128 +#: awx/main/models/unified_jobs.py:62 +msgid "Successful" +msgstr "成功" + +#: awx/main/models/notifications.py:129 +#: awx/main/models/unified_jobs.py:63 +msgid "Failed" +msgstr "失敗" + +#: awx/main/models/organization.py:157 +msgid "Execute Commands on the Inventory" +msgstr "インベントリーでのコマンドの実行" + +#: awx/main/models/organization.py:211 +msgid "Token not invalidated" +msgstr "トークンが無効にされませんでした" + +#: awx/main/models/organization.py:212 +msgid "Token is expired" +msgstr "トークンは期限切れです" + +#: awx/main/models/organization.py:213 +msgid "" +"The maximum number of allowed sessions for this user has been exceeded." +msgstr "このユーザーに許可される最大セッション数を超えました。" + +#: awx/main/models/organization.py:216 +msgid "Invalid token" +msgstr "無効なトークン" + +#: awx/main/models/organization.py:233 +msgid "Reason the auth token was invalidated." +msgstr "認証トークンが無効にされた理由。" + +#: awx/main/models/organization.py:272 +msgid "Invalid reason specified" +msgstr "無効な理由が特定されました" + +#: awx/main/models/projects.py:43 +msgid "Git" +msgstr "Git" + +#: awx/main/models/projects.py:44 +msgid "Mercurial" +msgstr "Mercurial" + +#: awx/main/models/projects.py:45 +msgid "Subversion" +msgstr "Subversion" + +#: awx/main/models/projects.py:71 +msgid "" +"Local path (relative to PROJECTS_ROOT) containing playbooks and related " +"files for this project." +msgstr "このプロジェクトの Playbook および関連するファイルを含むローカルパス (PROJECTS_ROOT との相対)。" + +#: awx/main/models/projects.py:80 +msgid "SCM Type" +msgstr "SCM タイプ" + +#: awx/main/models/projects.py:81 +msgid "Specifies the source control system used to store the project." +msgstr "プロジェクトを保存するために使用されるソースコントロールシステムを指定します。" + +#: awx/main/models/projects.py:87 +msgid "SCM URL" +msgstr "SCM URL" + +#: awx/main/models/projects.py:88 +msgid "The location where the project is stored." +msgstr "プロジェクトが保存される場所。" + +#: awx/main/models/projects.py:94 +msgid "SCM Branch" +msgstr "SCM ブランチ" + +#: awx/main/models/projects.py:95 +msgid "Specific branch, tag or commit to checkout." +msgstr "チェックアウトする特定のブランチ、タグまたはコミット。" + +#: awx/main/models/projects.py:99 +msgid "Discard any local changes before syncing the project." +msgstr "ローカル変更を破棄してからプロジェクトを同期します。" + +#: awx/main/models/projects.py:103 +msgid "Delete the project before syncing." +msgstr "プロジェクトを削除してから同期します。" + +#: awx/main/models/projects.py:116 +msgid "The amount of time to run before the task is canceled." +msgstr "タスクが取り消される前の実行時間。" + +#: awx/main/models/projects.py:130 +msgid "Invalid SCM URL." +msgstr "無効な SCM URL。" + +#: awx/main/models/projects.py:133 +msgid "SCM URL is required." +msgstr "SCM URL が必要です。" + +#: awx/main/models/projects.py:142 +msgid "Credential kind must be 'scm'." +msgstr "認証情報の種類は 'scm' にする必要があります。" + +#: awx/main/models/projects.py:157 +msgid "Invalid credential." +msgstr "無効な認証情報。" + +#: awx/main/models/projects.py:229 +msgid "Update the project when a job is launched that uses the project." +msgstr "プロジェクトを使用するジョブの起動時にプロジェクトを更新します。" + +#: awx/main/models/projects.py:234 +msgid "" +"The number of seconds after the last project update ran that a newproject " +"update will be launched as a job dependency." +msgstr "新規プロジェクトの更新がジョブの依存関係として起動される最終プロジェクト更新後の秒数。" + +#: awx/main/models/projects.py:244 +msgid "The last revision fetched by a project update" +msgstr "プロジェクト更新で取得される最新リビジョン" + +#: awx/main/models/projects.py:251 +msgid "Playbook Files" +msgstr "Playbook ファイル" + +#: awx/main/models/projects.py:252 +msgid "List of playbooks found in the project" +msgstr "プロジェクトにある Playbook の一覧" + +#: awx/main/models/rbac.py:36 +msgid "System Administrator" +msgstr "システム管理者" + +#: awx/main/models/rbac.py:37 +msgid "System Auditor" +msgstr "システム監査者" + +#: awx/main/models/rbac.py:38 +msgid "Ad Hoc" +msgstr "アドホック" + +#: awx/main/models/rbac.py:39 +msgid "Admin" +msgstr "管理者" + +#: awx/main/models/rbac.py:40 +msgid "Auditor" +msgstr "監査者" + +#: awx/main/models/rbac.py:41 +msgid "Execute" +msgstr "実行" + +#: awx/main/models/rbac.py:42 +msgid "Member" +msgstr "メンバー" + +#: awx/main/models/rbac.py:43 +msgid "Read" +msgstr "読み込み" + +#: awx/main/models/rbac.py:44 +msgid "Update" +msgstr "更新" + +#: awx/main/models/rbac.py:45 +msgid "Use" +msgstr "使用" + +#: awx/main/models/rbac.py:49 +msgid "Can manage all aspects of the system" +msgstr "システムのすべての側面を管理可能" + +#: awx/main/models/rbac.py:50 +msgid "Can view all settings on the system" +msgstr "システムのすべての設定を表示可能" + +#: awx/main/models/rbac.py:51 +msgid "May run ad hoc commands on an inventory" +msgstr "インベントリーでアドホックコマンドを実行可能" + +#: awx/main/models/rbac.py:52 +#, python-format +msgid "Can manage all aspects of the %s" +msgstr "%s のすべての側面を管理可能" + +#: awx/main/models/rbac.py:53 +#, python-format +msgid "Can view all settings for the %s" +msgstr "%s のすべての設定を表示可能" + +#: awx/main/models/rbac.py:54 +#, python-format +msgid "May run the %s" +msgstr "%s を実行可能" + +#: awx/main/models/rbac.py:55 +#, python-format +msgid "User is a member of the %s" +msgstr "ユーザーは %s のメンバーです" + +#: awx/main/models/rbac.py:56 +#, python-format +msgid "May view settings for the %s" +msgstr "%s の設定を表示可能" + +#: awx/main/models/rbac.py:57 +msgid "" +"May update project or inventory or group using the configured source update " +"system" +msgstr "設定済みのソース更新システムを使用してプロジェクト、インベントリーまたはグループを更新可能" + +#: awx/main/models/rbac.py:58 +#, python-format +msgid "Can use the %s in a job template" +msgstr "ジョブテンプレートで %s を使用可能" + +#: awx/main/models/rbac.py:122 +msgid "roles" +msgstr "ロール" + +#: awx/main/models/rbac.py:438 +msgid "role_ancestors" +msgstr "role_ancestors" + +#: awx/main/models/schedules.py:69 +msgid "Enables processing of this schedule by Tower." +msgstr "Tower によるこのスケジュールの処理を有効にします。" + +#: awx/main/models/schedules.py:75 +msgid "The first occurrence of the schedule occurs on or after this time." +msgstr "スケジュールの最初のオカレンスはこの時間またはこの時間の後に生じます。" + +#: awx/main/models/schedules.py:81 +msgid "" +"The last occurrence of the schedule occurs before this time, aftewards the " +"schedule expires." +msgstr "スケジュールの最後のオカレンスはこの時間の前に生じます。その後スケジュールが期限切れになります。" + +#: awx/main/models/schedules.py:85 +msgid "A value representing the schedules iCal recurrence rule." +msgstr "スケジュールの iCal 繰り返しルールを表す値。" + +#: awx/main/models/schedules.py:91 +msgid "The next time that the scheduled action will run." +msgstr "スケジュールされたアクションが次に実行される時間。" + +#: awx/main/models/unified_jobs.py:58 +msgid "New" +msgstr "新規" + +#: awx/main/models/unified_jobs.py:60 +msgid "Waiting" +msgstr "待機中" + +#: awx/main/models/unified_jobs.py:61 +msgid "Running" +msgstr "実行中" + +#: awx/main/models/unified_jobs.py:65 +msgid "Canceled" +msgstr "取り消されました" + +#: awx/main/models/unified_jobs.py:69 +msgid "Never Updated" +msgstr "更新されていません" + +#: awx/main/models/unified_jobs.py:73 +#: awx/ui/templates/ui/index.html:85 +#: awx/ui/templates/ui/index.html.py:104 +msgid "OK" +msgstr "OK" + +#: awx/main/models/unified_jobs.py:74 +msgid "Missing" +msgstr "不明" + +#: awx/main/models/unified_jobs.py:78 +msgid "No External Source" +msgstr "外部ソースがありません" + +#: awx/main/models/unified_jobs.py:85 +msgid "Updating" +msgstr "更新中" + +#: awx/main/models/unified_jobs.py:403 +msgid "Relaunch" +msgstr "再起動" + +#: awx/main/models/unified_jobs.py:404 +msgid "Callback" +msgstr "コールバック" + +#: awx/main/models/unified_jobs.py:405 +msgid "Scheduled" +msgstr "スケジュール済み" + +#: awx/main/models/unified_jobs.py:406 +msgid "Dependency" +msgstr "依存関係" + +#: awx/main/models/unified_jobs.py:407 +msgid "Workflow" +msgstr "ワークフロー" + +#: awx/main/models/unified_jobs.py:408 +msgid "Sync" +msgstr "同期" + +#: awx/main/models/unified_jobs.py:454 +msgid "The Tower node the job executed on." +msgstr "ジョブが実行される Tower ノード。" + +#: awx/main/models/unified_jobs.py:480 +msgid "The date and time the job was queued for starting." +msgstr "ジョブが開始のために待機した日時。" + +#: awx/main/models/unified_jobs.py:486 +msgid "The date and time the job finished execution." +msgstr "ジョブが実行を完了した日時。" + +#: awx/main/models/unified_jobs.py:492 +msgid "Elapsed time in seconds that the job ran." +msgstr "ジョブ実行の経過時間 (秒単位)" + +#: awx/main/models/unified_jobs.py:514 +msgid "" +"A status field to indicate the state of the job if it wasn't able to run and " +"capture stdout" +msgstr "stdout の実行およびキャプチャーを実行できない場合のジョブの状態を示すための状態フィールド" + +#: awx/main/notifications/base.py:17 +#: awx/main/notifications/email_backend.py:28 +msgid "{} #{} had status {} on Ansible Tower, view details at {}\n" +"\n" +msgstr "{} #{} には Ansible Tower のステータス {} があります。詳細については {} で確認してください\n" +"\n" + +#: awx/main/notifications/hipchat_backend.py:46 +msgid "Error sending messages: {}" +msgstr "メッセージの送信時のエラー: {}" + +#: awx/main/notifications/hipchat_backend.py:48 +msgid "Error sending message to hipchat: {}" +msgstr "メッセージの hipchat への送信時のエラー: {}" + +#: awx/main/notifications/irc_backend.py:54 +msgid "Exception connecting to irc server: {}" +msgstr "irc サーバーへの接続時の例外: {}" + +#: awx/main/notifications/pagerduty_backend.py:39 +msgid "Exception connecting to PagerDuty: {}" +msgstr "PagerDuty への接続時の例外: {}" + +#: awx/main/notifications/pagerduty_backend.py:48 +#: awx/main/notifications/slack_backend.py:52 +#: awx/main/notifications/twilio_backend.py:46 +msgid "Exception sending messages: {}" +msgstr "メッセージの送信時の例外: {}" + +#: awx/main/notifications/twilio_backend.py:36 +msgid "Exception connecting to Twilio: {}" +msgstr "Twilio への接続時の例外: {}" + +#: awx/main/notifications/webhook_backend.py:38 +#: awx/main/notifications/webhook_backend.py:40 +msgid "Error sending notification webhook: {}" +msgstr "通知 webhook の送信時のエラー: {}" + +#: awx/main/scheduler/__init__.py:130 +msgid "" +"Job spawned from workflow could not start because it was not in the right " +"state or required manual credentials" +msgstr "ワークフローから起動されるジョブは、正常な状態にないか、または手動の認証が必要であるために開始できませんでした" + +#: awx/main/tasks.py:180 +msgid "Ansible Tower host usage over 90%" +msgstr "Ansible Tower ホストの使用率が 90% を超えました" + +#: awx/main/tasks.py:185 +msgid "Ansible Tower license will expire soon" +msgstr "Ansible Tower ライセンスがまもなく期限切れになります" + +#: awx/main/tasks.py:240 +msgid "status_str must be either succeeded or failed" +msgstr "status_str は成功または失敗のいずれかである必要があります" + +#: awx/main/utils/common.py:89 +#, python-format +msgid "Unable to convert \"%s\" to boolean" +msgstr "\"%s\" をブール値に変換できません" + +#: awx/main/utils/common.py:243 +#, python-format +msgid "Unsupported SCM type \"%s\"" +msgstr "サポートされない SCM タイプ \"%s\"" + +#: awx/main/utils/common.py:250 +#: awx/main/utils/common.py:262 +#: awx/main/utils/common.py:281 +#, python-format +msgid "Invalid %s URL" +msgstr "無効な %s URL" + +#: awx/main/utils/common.py:252 +#: awx/main/utils/common.py:290 +#, python-format +msgid "Unsupported %s URL" +msgstr "サポートされていない %s URL" + +#: awx/main/utils/common.py:292 +#, python-format +msgid "Unsupported host \"%s\" for file:// URL" +msgstr "ファイル:// URL のサポートされていないホスト \"%s\" " + +#: awx/main/utils/common.py:294 +#, python-format +msgid "Host is required for %s URL" +msgstr "%s URL にはホストが必要です" + +#: awx/main/utils/common.py:312 +#, python-format +msgid "Username must be \"git\" for SSH access to %s." +msgstr "%s への SSH アクセスではユーザー名を \"git\" にする必要があります。" + +#: awx/main/utils/common.py:318 +#, python-format +msgid "Username must be \"hg\" for SSH access to %s." +msgstr "%s への SSH アクセスではユーザー名を \"hg\" にする必要があります。" + +#: awx/main/validators.py:60 +#, python-format +msgid "Invalid certificate or key: %r..." +msgstr "無効な証明書またはキー: %r..." + +#: awx/main/validators.py:74 +#, python-format +msgid "Invalid private key: unsupported type \"%s\"" +msgstr "無効な秘密鍵: サポートされていないタイプ \"%s\"" + +#: awx/main/validators.py:78 +#, python-format +msgid "Unsupported PEM object type: \"%s\"" +msgstr "サポートされていない PEM オブジェクトタイプ: \"%s\"" + +#: awx/main/validators.py:103 +msgid "Invalid base64-encoded data" +msgstr "無効な base64 エンコードされたデータ" + +#: awx/main/validators.py:122 +msgid "Exactly one private key is required." +msgstr "秘密鍵が 1 つのみ必要です。" + +#: awx/main/validators.py:124 +msgid "At least one private key is required." +msgstr "1 つ以上の秘密鍵が必要です。" + +#: awx/main/validators.py:126 +#, python-format +msgid "" +"At least %(min_keys)d private keys are required, only %(key_count)d provided." +"" +msgstr "%(min_keys)d 以上の秘密鍵が必要です。提供数: %(key_count)d のみ。" + +#: awx/main/validators.py:129 +#, python-format +msgid "Only one private key is allowed, %(key_count)d provided." +msgstr "秘密鍵が 1 つのみ許可されます。提供数: %(key_count)d" + +#: awx/main/validators.py:131 +#, python-format +msgid "" +"No more than %(max_keys)d private keys are allowed, %(key_count)d provided." +msgstr "%(max_keys)d を超える秘密鍵は許可されません。提供数: %(key_count)d " + +#: awx/main/validators.py:136 +msgid "Exactly one certificate is required." +msgstr "証明書が 1 つのみ必要です。" + +#: awx/main/validators.py:138 +msgid "At least one certificate is required." +msgstr "1 つ以上の証明書が必要です。" + +#: awx/main/validators.py:140 +#, python-format +msgid "" +"At least %(min_certs)d certificates are required, only %(cert_count)d " +"provided." +msgstr "%(min_certs)d 以上の証明書が必要です。提供数: %(cert_count)d のみ。" + +#: awx/main/validators.py:143 +#, python-format +msgid "Only one certificate is allowed, %(cert_count)d provided." +msgstr "証明書が 1 つのみ許可されます。提供数: %(cert_count)d" + +#: awx/main/validators.py:145 +#, python-format +msgid "" +"No more than %(max_certs)d certificates are allowed, %(cert_count)d provided." +"" +msgstr "%(max_certs)d を超える証明書は許可されません。提供数: %(cert_count)d" + +#: awx/main/views.py:20 +msgid "API Error" +msgstr "API エラー" + +#: awx/main/views.py:49 +msgid "Bad Request" +msgstr "不正な要求です" + +#: awx/main/views.py:50 +msgid "The request could not be understood by the server." +msgstr "要求がサーバーによって認識されませんでした。" + +#: awx/main/views.py:57 +msgid "Forbidden" +msgstr "許可されていません" + +#: awx/main/views.py:58 +msgid "You don't have permission to access the requested resource." +msgstr "要求されたリソースにアクセスするためのパーミッションがありません。" + +#: awx/main/views.py:65 +msgid "Not Found" +msgstr "見つかりません" + +#: awx/main/views.py:66 +msgid "The requested resource could not be found." +msgstr "要求されたリソースは見つかりませんでした。" + +#: awx/main/views.py:73 +msgid "Server Error" +msgstr "サーバーエラー" + +#: awx/main/views.py:74 +msgid "A server error has occurred." +msgstr "サーバーエラーが発生しました。" + +#: awx/settings/defaults.py:611 +msgid "Chicago" +msgstr "シカゴ" + +#: awx/settings/defaults.py:612 +msgid "Dallas/Ft. Worth" +msgstr "ダラス/フォートワース" + +#: awx/settings/defaults.py:613 +msgid "Northern Virginia" +msgstr "北バージニア" + +#: awx/settings/defaults.py:614 +msgid "London" +msgstr "ロンドン" + +#: awx/settings/defaults.py:615 +msgid "Sydney" +msgstr "シドニー" + +#: awx/settings/defaults.py:616 +msgid "Hong Kong" +msgstr "香港" + +#: awx/settings/defaults.py:643 +msgid "US East (Northern Virginia)" +msgstr "米国東部 (バージニア北部)" + +#: awx/settings/defaults.py:644 +msgid "US East (Ohio)" +msgstr "米国東部 (オハイオ)" + +#: awx/settings/defaults.py:645 +msgid "US West (Oregon)" +msgstr "米国西部 (オレゴン)" + +#: awx/settings/defaults.py:646 +msgid "US West (Northern California)" +msgstr "米国西部 (北カリフォルニア)" + +#: awx/settings/defaults.py:647 +msgid "Canada (Central)" +msgstr "カナダ (中部)" + +#: awx/settings/defaults.py:648 +msgid "EU (Frankfurt)" +msgstr "EU (フランクフルト)" + +#: awx/settings/defaults.py:649 +msgid "EU (Ireland)" +msgstr "EU (アイルランド)" + +#: awx/settings/defaults.py:650 +msgid "EU (London)" +msgstr "EU (ロンドン)" + +#: awx/settings/defaults.py:651 +msgid "Asia Pacific (Singapore)" +msgstr "アジア太平洋 (シンガポール)" + +#: awx/settings/defaults.py:652 +msgid "Asia Pacific (Sydney)" +msgstr "アジア太平洋 (シドニー)" + +#: awx/settings/defaults.py:653 +msgid "Asia Pacific (Tokyo)" +msgstr "アジア太平洋 (東京)" + +#: awx/settings/defaults.py:654 +msgid "Asia Pacific (Seoul)" +msgstr "アジア太平洋 (ソウル)" + +#: awx/settings/defaults.py:655 +msgid "Asia Pacific (Mumbai)" +msgstr "アジア太平洋 (ムンバイ)" + +#: awx/settings/defaults.py:656 +msgid "South America (Sao Paulo)" +msgstr "南アメリカ (サンパウロ)" + +#: awx/settings/defaults.py:657 +msgid "US West (GovCloud)" +msgstr "米国西部 (GovCloud)" + +#: awx/settings/defaults.py:658 +msgid "China (Beijing)" +msgstr "中国 (北京)" + +#: awx/settings/defaults.py:707 +msgid "US East (B)" +msgstr "米国東部 (B)" + +#: awx/settings/defaults.py:708 +msgid "US East (C)" +msgstr "米国東部 (C)" + +#: awx/settings/defaults.py:709 +msgid "US East (D)" +msgstr "米国東部 (D)" + +#: awx/settings/defaults.py:710 +msgid "US Central (A)" +msgstr "米国中部 (A)" + +#: awx/settings/defaults.py:711 +msgid "US Central (B)" +msgstr "米国中部 (B)" + +#: awx/settings/defaults.py:712 +msgid "US Central (C)" +msgstr "米国中部 (C)" + +#: awx/settings/defaults.py:713 +msgid "US Central (F)" +msgstr "米国中部 (F)" + +#: awx/settings/defaults.py:714 +msgid "Europe West (B)" +msgstr "欧州西部 (B)" + +#: awx/settings/defaults.py:715 +msgid "Europe West (C)" +msgstr "欧州西部 (C)" + +#: awx/settings/defaults.py:716 +msgid "Europe West (D)" +msgstr "欧州西部 (D)" + +#: awx/settings/defaults.py:717 +msgid "Asia East (A)" +msgstr "アジア東部 (A)" + +#: awx/settings/defaults.py:718 +msgid "Asia East (B)" +msgstr "アジア東部 (B)" + +#: awx/settings/defaults.py:719 +msgid "Asia East (C)" +msgstr "アジア東部 (C)" + +#: awx/settings/defaults.py:743 +msgid "US Central" +msgstr "米国中部" + +#: awx/settings/defaults.py:744 +msgid "US East" +msgstr "米国東部" + +#: awx/settings/defaults.py:745 +msgid "US East 2" +msgstr "米国東部 2" + +#: awx/settings/defaults.py:746 +msgid "US North Central" +msgstr "米国中北部" + +#: awx/settings/defaults.py:747 +msgid "US South Central" +msgstr "米国中南部" + +#: awx/settings/defaults.py:748 +msgid "US West" +msgstr "米国西部" + +#: awx/settings/defaults.py:749 +msgid "Europe North" +msgstr "欧州北部" + +#: awx/settings/defaults.py:750 +msgid "Europe West" +msgstr "欧州西部" + +#: awx/settings/defaults.py:751 +msgid "Asia Pacific East" +msgstr "アジア太平洋東部" + +#: awx/settings/defaults.py:752 +msgid "Asia Pacific Southeast" +msgstr "アジア太平洋南東部" + +#: awx/settings/defaults.py:753 +msgid "Japan East" +msgstr "日本東部" + +#: awx/settings/defaults.py:754 +msgid "Japan West" +msgstr "日本西部" + +#: awx/settings/defaults.py:755 +msgid "Brazil South" +msgstr "ブラジル南部" + +#: awx/sso/apps.py:9 +msgid "Single Sign-On" +msgstr "シングルサインオン" + +#: awx/sso/conf.py:27 +msgid "" +"Mapping to organization admins/users from social auth accounts. This setting\n" +"controls which users are placed into which Tower organizations based on\n" +"their username and email address. Dictionary keys are organization names.\n" +"organizations will be created if not present if the license allows for\n" +"multiple organizations, otherwise the single default organization is used\n" +"regardless of the key. Values are dictionaries defining the options for\n" +"each organization's membership. For each organization it is possible to\n" +"specify which users are automatically users of the organization and also\n" +"which users can administer the organization. \n" +"\n" +"- admins: None, True/False, string or list of strings.\n" +" If None, organization admins will not be updated.\n" +" If True, all users using social auth will automatically be added as admins\n" +" of the organization.\n" +" If False, no social auth users will be automatically added as admins of\n" +" the organization.\n" +" If a string or list of strings, specifies the usernames and emails for\n" +" users who will be added to the organization. Strings in the format\n" +" \"//\" will be interpreted as JavaScript regular " +"expressions and\n" +" may also be used instead of string literals; only \"i\" and \"m\" are " +"supported\n" +" for flags.\n" +"- remove_admins: True/False. Defaults to True.\n" +" If True, a user who does not match will be removed from the organization's\n" +" administrative list.\n" +"- users: None, True/False, string or list of strings. Same rules apply as " +"for\n" +" admins.\n" +"- remove_users: True/False. Defaults to True. Same rules as apply for \n" +" remove_admins." +msgstr "" +"ソーシャル認証アカウントから組織管理者/ユーザーへのマッピングです。この設定\n" +"は、ユーザーのユーザー名とメールアドレスに基づいてどのユーザーをどの Tower 組織に配置するかを制御します。\n" +"辞書キーは組織名です。\n" +"組織は、存在しない場合、ライセンスで複数の組織が許可される場合に作成されます。そうでない場合、キーとは無関係に単一のデフォルト組織が使用されます。\n" +"値は、各組織のメンバーシップのオプションを定義する辞書です。\n" +"各組織については、自動的に組織のユーザーにするユーザーと\n" +"組織を管理できるユーザーを指定できます。\n" +"\n" +"- admins: None、True/False、文字列または文字列の一覧。\n" +" None の場合、組織管理者は更新されません。\n" +" True の場合、ソーシャル認証を使用するすべてのユーザーが組織の管理者として\n" +" 自動的に追加されます。\n" +" False の場合、ソーシャル認証ユーザーは組織の管理者として自動的に\n" +" 追加されません。\n" +" 文字列または文字列の一覧の場合、組織に追加されるユーザーの\n" +" ユーザー名およびメールを指定します。\"//\" 形式の文字列\n" +" は JavaScript 正規表現として解釈され、文字列リテラルの代わりに使用できます。\n" +" \"i\" と \"m\" のみがフラグでサポートされます。\n" +" - remove_admins: True/False。デフォルトで True に設定されます。\n" +" True の場合、一致しないユーザーは組織の管理者リストから削除されます。\n" +" - users: None、True/False、文字列または文字列の一覧。管理者の場合と同じルールが\n" +" 適用されます。\n" +"- remove_users: True/False。デフォルトで True に設定されます。remove_admins の\n" +" 場合と同じルールが適用されます。" + +#: awx/sso/conf.py:76 +msgid "" +"Mapping of team members (users) from social auth accounts. Keys are team\n" +"names (will be created if not present). Values are dictionaries of options\n" +"for each team's membership, where each can contain the following parameters:\n" +"\n" +"- organization: string. The name of the organization to which the team\n" +" belongs. The team will be created if the combination of organization and\n" +" team name does not exist. The organization will first be created if it\n" +" does not exist. If the license does not allow for multiple organizations,\n" +" the team will always be assigned to the single default organization.\n" +"- users: None, True/False, string or list of strings.\n" +" If None, team members will not be updated.\n" +" If True/False, all social auth users will be added/removed as team\n" +" members.\n" +" If a string or list of strings, specifies expressions used to match users.\n" +" User will be added as a team member if the username or email matches.\n" +" Strings in the format \"//\" will be interpreted as " +"JavaScript\n" +" regular expressions and may also be used instead of string literals; only " +"\"i\"\n" +" and \"m\" are supported for flags.\n" +"- remove: True/False. Defaults to True. If True, a user who does not match\n" +" the rules above will be removed from the team." +msgstr "" +"ソーシャル認証アカウントからチームメンバー (ユーザー) へのマッピングです。\n" +"キーはチーム名です (存在しない場合に作成されます)。値は各チームの\n" +"メンバーシップのオプションの辞書です。各値には以下のパラメーターが含まれます。\n" +"\n" +"- organization: 文字列。チームが属する組織の名前です。\n" +" チームは組織とチーム名の組み合わせが存在しない場合に作成されます。\n" +" 組織がまず作成されます (存在しない場合)。ライセンスにより複数の組織が許可\n" +" されない場合、チームは常に単一のデフォルト組織に割り当てられます。\n" +"- ユーザー: None、True/False、文字列または文字列の一覧。\n" +" None の場合、チームメンバーは更新されません。\n" +" True/False の場合、すべてのソーシャル認証ユーザーがチームメンバーとして\n" +" 追加/削除されます。\n" +" 文字列または文字列の一覧の場合、ユーザーに一致する表現を指定します。\n" +" ユーザーは、ユーザー名またはメールが一致する場合にチームメンバーとして\n" +" 追加されます。\n" +" \"//\" 形式の文字列が JavaScript 正規表現として解釈され、\n" +" 文字列リテラルの代わりに使用することもできます。\"i\"\n" +" および \"m\" のみがフラグでサポートされます。\n" +"- remove: True/False。デフォルトで True に設定されます。True の場合、上記のルール\n" +" に一致しないユーザーはチームから削除されます。" + +#: awx/sso/conf.py:119 +msgid "Authentication Backends" +msgstr "認証バックエンド" + +#: awx/sso/conf.py:120 +msgid "" +"List of authentication backends that are enabled based on license features " +"and other authentication settings." +msgstr "ライセンスの特長およびその他の認証設定に基づいて有効にされる認証バックエンドの一覧。" + +#: awx/sso/conf.py:133 +msgid "Social Auth Organization Map" +msgstr "ソーシャル認証組織マップ" + +#: awx/sso/conf.py:145 +msgid "Social Auth Team Map" +msgstr "ソーシャル認証チームマップ" + +#: awx/sso/conf.py:157 +msgid "Social Auth User Fields" +msgstr "ソーシャル認証ユーザーフィールド" + +#: awx/sso/conf.py:158 +msgid "" +"When set to an empty list `[]`, this setting prevents new user accounts from " +"being created. Only users who have previously logged in using social auth or " +"have a user account with a matching email address will be able to login." +msgstr "" +"空リスト " +"`[]`に設定される場合、この設定により新規ユーザーアカウントは作成できなくなります。ソーシャル認証を使ってログインしたことのあるユーザーまたは一致するメールアドレスのユーザーアカウントを持つユーザーのみがログインできます。" + +#: awx/sso/conf.py:176 +msgid "LDAP Server URI" +msgstr "LDAP サーバー URI" + +#: awx/sso/conf.py:177 +msgid "" +"URI to connect to LDAP server, such as \"ldap://ldap.example.com:389\" (non-" +"SSL) or \"ldaps://ldap.example.com:636\" (SSL). Multiple LDAP servers may be " +"specified by separating with spaces or commas. LDAP authentication is " +"disabled if this parameter is empty." +msgstr "" +"\"ldap://ldap.example.com:389\" (非 SSL) または \"ldaps://ldap.example.com:636\" " +"(SSL) などの LDAP サーバーに接続する URI です。複数の LDAP サーバーをスペースまたはカンマで区切って指定できます。LDAP " +"認証は、このパラメーターが空の場合は無効になります。" + +#: awx/sso/conf.py:181 +#: awx/sso/conf.py:199 +#: awx/sso/conf.py:211 +#: awx/sso/conf.py:223 +#: awx/sso/conf.py:239 +#: awx/sso/conf.py:258 +#: awx/sso/conf.py:280 +#: awx/sso/conf.py:296 +#: awx/sso/conf.py:315 +#: awx/sso/conf.py:332 +#: awx/sso/conf.py:349 +#: awx/sso/conf.py:365 +#: awx/sso/conf.py:382 +#: awx/sso/conf.py:420 +#: awx/sso/conf.py:461 +msgid "LDAP" +msgstr "LDAP" + +#: awx/sso/conf.py:193 +msgid "LDAP Bind DN" +msgstr "LDAP バインド DN" + +#: awx/sso/conf.py:194 +msgid "" +"DN (Distinguished Name) of user to bind for all search queries. Normally in " +"the format \"CN=Some User,OU=Users,DC=example,DC=com\" but may also be " +"specified as \"DOMAIN\\username\" for Active Directory. This is the system " +"user account we will use to login to query LDAP for other user information." +msgstr "" +"すべての検索クエリーについてバインドするユーザーの DN (識別名) です。通常、形式は \"CN=Some User,OU=Users,DC=" +"example,DC=com\" になりますが、Active Directory の場合 \"DOMAIN\\username\" " +"として指定することもできます。これは、他のユーザー情報についての LDAP クエリー実行時のログインに使用するシステムユーザーアカウントです。" + +#: awx/sso/conf.py:209 +msgid "LDAP Bind Password" +msgstr "LDAP バインドパスワード" + +#: awx/sso/conf.py:210 +msgid "Password used to bind LDAP user account." +msgstr "LDAP ユーザーアカウントをバインドするために使用されるパスワード。" + +#: awx/sso/conf.py:221 +msgid "LDAP Start TLS" +msgstr "LDAP Start TLS" + +#: awx/sso/conf.py:222 +msgid "Whether to enable TLS when the LDAP connection is not using SSL." +msgstr "LDAP 接続が SSL を使用していない場合に TLS を有効にするかどうか。" + +#: awx/sso/conf.py:232 +msgid "LDAP Connection Options" +msgstr "LDAP 接続オプション" + +#: awx/sso/conf.py:233 +msgid "" +"Additional options to set for the LDAP connection. LDAP referrals are " +"disabled by default (to prevent certain LDAP queries from hanging with AD). " +"Option names should be strings (e.g. \"OPT_REFERRALS\"). Refer to https://" +"www.python-ldap.org/doc/html/ldap.html#options for possible options and " +"values that can be set." +msgstr "" +"LDAP 設定に設定する追加オプションです。LDAP 照会はデフォルトで無効にされます (特定の LDAP クエリーが AD " +"でハングすることを避けるため)。オプション名は文字列でなければなりません (例: " +"\"OPT_REFERRALS\")。可能なオプションおよび設定できる値については、https://www.python-ldap.org/doc/" +"html/ldap.html#options を参照してください。" + +#: awx/sso/conf.py:251 +msgid "LDAP User Search" +msgstr "LDAP ユーザー検索" + +#: awx/sso/conf.py:252 +msgid "" +"LDAP search query to find users. Any user that matches the given pattern " +"will be able to login to Tower. The user should also be mapped into an " +"Tower organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). " +"If multiple search queries need to be supported use of \"LDAPUnion\" is " +"possible. See python-ldap documentation as linked at the top of this section." +"" +msgstr "" +"ユーザーを検索するための LDAP 検索クエリーです。指定パターンに一致するユーザーは Tower にログインできます。ユーザーは Tower " +"組織にマップされている必要もあります (AUTH_LDAP_ORGANIZATION_MAP " +"設定で定義)。複数の検索クエリーがサポートされる必要がある場合、\"LDAPUnion\" を使用できます。このセクションの先頭にリンクされている " +"python-ldap ドキュメントを参照してください。" + +#: awx/sso/conf.py:274 +msgid "LDAP User DN Template" +msgstr "LDAP ユーザー DN テンプレート" + +#: awx/sso/conf.py:275 +msgid "" +"Alternative to user search, if user DNs are all of the same format. This " +"approach will be more efficient for user lookups than searching if it is " +"usable in your organizational environment. If this setting has a value it " +"will be used instead of AUTH_LDAP_USER_SEARCH." +msgstr "" +"ユーザー DN " +"の形式がすべて同じである場合のユーザー検索の代替法になります。この方法は、組織の環境で使用可能であるかどうかを検索する場合よりも効率的なユーザー検索方法になります。この設定に値がある場合、それが " +"AUTH_LDAP_USER_SEARCH の代わりに使用されます。" + +#: awx/sso/conf.py:290 +msgid "LDAP User Attribute Map" +msgstr "LDAP ユーザー属性マップ" + +#: awx/sso/conf.py:291 +msgid "" +"Mapping of LDAP user schema to Tower API user attributes (key is user " +"attribute name, value is LDAP attribute name). The default setting is valid " +"for ActiveDirectory but users with other LDAP configurations may need to " +"change the values (not the keys) of the dictionary/hash-table." +msgstr "" +"LDAP ユーザースキーマの Tower API ユーザー属性へのマッピングです (キーはユーザー属性名で、値は LDAP " +"属性名です)。デフォルト設定は ActiveDirectory で有効ですが、他の LDAP 設定を持つユーザーは、辞書/ハッシュテーブルの値 " +"(キーではない) を変更する必要ある場合があります。" + +#: awx/sso/conf.py:310 +msgid "LDAP Group Search" +msgstr "LDAP グループ検索" + +#: awx/sso/conf.py:311 +msgid "" +"Users in Tower are mapped to organizations based on their membership in LDAP " +"groups. This setting defines the LDAP search query to find groups. Note that " +"this, unlike the user search above, does not support LDAPSearchUnion." +msgstr "" +"Tower のユーザーは LDAP グループのメンバーシップに基づいて組織にマップされます。この設定は、グループを検索できるように LDAP " +"検索クエリーを定義します。上記のユーザー検索とは異なり、これは LDAPSearchUnion をサポートしないことに注意してください。" + +#: awx/sso/conf.py:328 +msgid "LDAP Group Type" +msgstr "LDAP グループタイプ" + +#: awx/sso/conf.py:329 +msgid "" +"The group type may need to be changed based on the type of the LDAP server. " +"Values are listed at: http://pythonhosted.org/django-auth-ldap/groups." +"html#types-of-groups" +msgstr "" +"グループタイプは LDAP サーバーのタイプに基づいて変更する必要がある場合があります。値は以下に記載されています: http://" +"pythonhosted.org/django-auth-ldap/groups.html#types-of-groups" + +#: awx/sso/conf.py:344 +msgid "LDAP Require Group" +msgstr "LDAP 要求グループ" + +#: awx/sso/conf.py:345 +msgid "" +"Group DN required to login. If specified, user must be a member of this " +"group to login via LDAP. If not set, everyone in LDAP that matches the user " +"search will be able to login via Tower. Only one require group is supported." +msgstr "" +"ログインに必要なグループ DN。指定されている場合、LDAP " +"経由でログインするにはユーザーはこのグループのメンバーである必要があります。設定されていない場合は、ユーザー検索に一致する LDAP " +"のすべてのユーザーが Tower 経由でログインできます。1つの要求グループのみがサポートされます。" + +#: awx/sso/conf.py:361 +msgid "LDAP Deny Group" +msgstr "LDAP 拒否グループ" + +#: awx/sso/conf.py:362 +msgid "" +"Group DN denied from login. If specified, user will not be allowed to login " +"if a member of this group. Only one deny group is supported." +msgstr "" +"グループ DN がログインで拒否されます。指定されている場合、ユーザーはこのグループのメンバーの場合にログインできません。1 " +"つの拒否グループのみがサポートされます。" + +#: awx/sso/conf.py:375 +msgid "LDAP User Flags By Group" +msgstr "LDAP ユーザーフラグ (グループ別)" + +#: awx/sso/conf.py:376 +msgid "" +"User profile flags updated from group membership (key is user attribute " +"name, value is group DN). These are boolean fields that are matched based " +"on whether the user is a member of the given group. So far only " +"is_superuser is settable via this method. This flag is set both true and " +"false at login time based on current LDAP settings." +msgstr "" +"グループメンバーシップから更新されるユーザープロファイルフラグです (キーはユーザー属性名、値はグループ " +"DN)。これらは、ユーザーが指定グループのメンバーであるかに基づいて一致するブール値フィールドです。is_superuser " +"のみがこのメソッドで設定可能です。このフラグは、現在の LDAP 設定に基づいてログイン時に true および false に設定されます。" + +#: awx/sso/conf.py:394 +msgid "LDAP Organization Map" +msgstr "LDAP 組織マップ" + +#: awx/sso/conf.py:395 +msgid "" +"Mapping between organization admins/users and LDAP groups. This controls " +"what users are placed into what Tower organizations relative to their LDAP " +"group memberships. Keys are organization names. Organizations will be " +"created if not present. Values are dictionaries defining the options for " +"each organization's membership. For each organization it is possible to " +"specify what groups are automatically users of the organization and also " +"what groups can administer the organization.\n" +"\n" +" - admins: None, True/False, string or list of strings.\n" +" If None, organization admins will not be updated based on LDAP values.\n" +" If True, all users in LDAP will automatically be added as admins of the " +"organization.\n" +" If False, no LDAP users will be automatically added as admins of the " +"organization.\n" +" If a string or list of strings, specifies the group DN(s) that will be " +"added of the organization if they match any of the specified groups.\n" +" - remove_admins: True/False. Defaults to True.\n" +" If True, a user who is not an member of the given groups will be removed " +"from the organization's administrative list.\n" +" - users: None, True/False, string or list of strings. Same rules apply as " +"for admins.\n" +" - remove_users: True/False. Defaults to True. Same rules apply as for " +"remove_admins." +msgstr "" +"組織管理者/ユーザーと LDAP グループ間のマッピングです。これは、LDAP グループメンバーシップと相対してどのユーザーをどの Tower " +"組織に配置するかを制御します。キーは組織名です。組織は存在しない場合に作成されます。値は、各組織のメンバーシップのオプションを定義する辞書です。各組織については、自動的に組織のユーザーにするユーザーと組織を管理できるグループを指定できます。\n" +"\n" +" - admins: None、True/False、文字列または文字列の一覧。\n" +" None の場合、組織管理者は LDAP 値に基づいて更新されません。\n" +" True の場合、LDAP のすべてのユーザーが組織の管理者として自動的に追加されます。\n" +" False の場合、LDAP ユーザーは組織の管理者として自動的に追加されません。\n" +" 文字列または文字列の一覧の場合、指定されるグループのいずれかに一致する場合に組織に追加されるグループ DN を指定します。\n" +" - remove_admins: True/False。デフォルトで True に設定されます。\n" +" True の場合、指定グループのメンバーでないユーザーは組織の管理者リストから削除されます。\n" +" - users: None、True/False、文字列または文字列の一覧。管理者の場合と同じルールが適用されます。\n" +" - remove_users: True/False。デフォルトで True に設定されます。remove_admins " +"の場合と同じルールが適用されます。" + +#: awx/sso/conf.py:443 +msgid "LDAP Team Map" +msgstr "LDAP チームマップ" + +#: awx/sso/conf.py:444 +msgid "" +"Mapping between team members (users) and LDAP groups. Keys are team names " +"(will be created if not present). Values are dictionaries of options for " +"each team's membership, where each can contain the following parameters:\n" +"\n" +" - organization: string. The name of the organization to which the team " +"belongs. The team will be created if the combination of organization and " +"team name does not exist. The organization will first be created if it does " +"not exist.\n" +" - users: None, True/False, string or list of strings.\n" +" If None, team members will not be updated.\n" +" If True/False, all LDAP users will be added/removed as team members.\n" +" If a string or list of strings, specifies the group DN(s). User will be " +"added as a team member if the user is a member of ANY of these groups.\n" +"- remove: True/False. Defaults to True. If True, a user who is not a member " +"of the given groups will be removed from the team." +msgstr "" +"チームメンバー (ユーザー) と LDAP グループ間のマッピングです。キーはチーム名です " +"(存在しない場合に作成されます)。値は各チームのメンバーシップのオプションの辞書です。各値には以下のパラメーターが含まれます。\n" +"\n" +" - organization: 文字列。チームが属する組織の名前です。組織とチーム名の組み合わせ\n" +" が存在しない場合にチームが作成されます。組織がまず作成されます (存在しない場合)。\n" +" - users: None、True/False、文字列または文字列の一覧。\n" +" None の場合、チームメンバーは更新されません。\n" +" True/False の場合、すべての LDAP ユーザーがチームメンバーとして追加/削除されます。\n" +" 文字列または文字列の一覧の場合、グループ DN を指定します。\n" +" ユーザーがこれらのグループのいずれかのメンバーである場合、チームメンバーとして追加されます。\n" +"- remove: True/False。デフォルトで True に設定されます。True " +"の場合、指定グループのメンバーでないユーザーはチームから削除されます。" + +#: awx/sso/conf.py:487 +msgid "RADIUS Server" +msgstr "RADIUS サーバー" + +#: awx/sso/conf.py:488 +msgid "" +"Hostname/IP of RADIUS server. RADIUS authentication will be disabled if this " +"setting is empty." +msgstr "RADIUS サーバーのホスト名/IP です。この設定が空の場合は RADIUS 認証は無効にされます。" + +#: awx/sso/conf.py:490 +#: awx/sso/conf.py:504 +#: awx/sso/conf.py:516 +msgid "RADIUS" +msgstr "RADIUS" + +#: awx/sso/conf.py:502 +msgid "RADIUS Port" +msgstr "RADIUS ポート" + +#: awx/sso/conf.py:503 +msgid "Port of RADIUS server." +msgstr "RADIUS サーバーのポート。" + +#: awx/sso/conf.py:514 +msgid "RADIUS Secret" +msgstr "RADIUS シークレット" + +#: awx/sso/conf.py:515 +msgid "Shared secret for authenticating to RADIUS server." +msgstr "RADIUS サーバーに対して認証するための共有シークレット。" + +#: awx/sso/conf.py:531 +msgid "Google OAuth2 Callback URL" +msgstr "Google OAuth2 コールバック URL" + +#: awx/sso/conf.py:532 +msgid "" +"Create a project at https://console.developers.google.com/ to obtain an " +"OAuth2 key and secret for a web application. Ensure that the Google+ API is " +"enabled. Provide this URL as the callback URL for your application." +msgstr "" +"web アプリケーションの OAuth2 キーおよびシークレットを取得するために https://console.developers.google." +"com/ にプロジェクトを作成します。Google+ API が有効であることを確認します。この URL をアプリケーションのコールバック URL " +"として指定します。" + +#: awx/sso/conf.py:536 +#: awx/sso/conf.py:547 +#: awx/sso/conf.py:558 +#: awx/sso/conf.py:571 +#: awx/sso/conf.py:585 +#: awx/sso/conf.py:597 +#: awx/sso/conf.py:609 +msgid "Google OAuth2" +msgstr "Google OAuth2" + +#: awx/sso/conf.py:545 +msgid "Google OAuth2 Key" +msgstr "Google OAuth2 キー" + +#: awx/sso/conf.py:546 +msgid "" +"The OAuth2 key from your web application at https://console.developers." +"google.com/." +msgstr "web アプリケーションの OAuth2 キー (https://console.developers.google.com/)。" + +#: awx/sso/conf.py:556 +msgid "Google OAuth2 Secret" +msgstr "Google OAuth2 シークレット" + +#: awx/sso/conf.py:557 +msgid "" +"The OAuth2 secret from your web application at https://console.developers." +"google.com/." +msgstr "web アプリケーションの OAuth2 シークレット (https://console.developers.google.com/)。" + +#: awx/sso/conf.py:568 +msgid "Google OAuth2 Whitelisted Domains" +msgstr "Google OAuth2 ホワイトリストドメイン" + +#: awx/sso/conf.py:569 +msgid "" +"Update this setting to restrict the domains who are allowed to login using " +"Google OAuth2." +msgstr "この設定を更新し、Google OAuth2 を使用してログインできるドメインを制限します。" + +#: awx/sso/conf.py:580 +msgid "Google OAuth2 Extra Arguments" +msgstr "Google OAuth2 追加引数" + +#: awx/sso/conf.py:581 +msgid "" +"Extra arguments for Google OAuth2 login. When only allowing a single domain " +"to authenticate, set to `{\"hd\": \"yourdomain.com\"}` and Google will not " +"display any other accounts even if the user is logged in with multiple " +"Google accounts." +msgstr "" +"Google OAuth2 ログインの追加引数です。単一ドメインの認証のみを許可する場合、`{\"hd\": \"yourdomain.com\"}` " +"に設定すると、Google はユーザーが複数の Google アカウントでログインしている場合でもその他のアカウントを表示しません。" + +#: awx/sso/conf.py:595 +msgid "Google OAuth2 Organization Map" +msgstr "Google OAuth2 組織マップ" + +#: awx/sso/conf.py:607 +msgid "Google OAuth2 Team Map" +msgstr "Google OAuth2 チームマップ" + +#: awx/sso/conf.py:623 +msgid "GitHub OAuth2 Callback URL" +msgstr "GitHub OAuth2 コールバック URL" + +#: awx/sso/conf.py:624 +msgid "" +"Create a developer application at https://github.com/settings/developers to " +"obtain an OAuth2 key (Client ID) and secret (Client Secret). Provide this " +"URL as the callback URL for your application." +msgstr "" +"OAuth2 キー (クライアント ID) およびシークレット (クライアントシークレット) を取得するために https://github.com/" +"settings/developers に開発者アプリケーションを作成します。この URL をアプリケーションのコールバック URL として指定します。" + +#: awx/sso/conf.py:628 +#: awx/sso/conf.py:639 +#: awx/sso/conf.py:649 +#: awx/sso/conf.py:661 +#: awx/sso/conf.py:673 +msgid "GitHub OAuth2" +msgstr "GitHub OAuth2" + +#: awx/sso/conf.py:637 +msgid "GitHub OAuth2 Key" +msgstr "GitHub OAuth2 キー" + +#: awx/sso/conf.py:638 +msgid "The OAuth2 key (Client ID) from your GitHub developer application." +msgstr "GitHub 開発者アプリケーションからの OAuth2 キー (クライアント ID)。" + +#: awx/sso/conf.py:647 +msgid "GitHub OAuth2 Secret" +msgstr "GitHub OAuth2 シークレット" + +#: awx/sso/conf.py:648 +msgid "" +"The OAuth2 secret (Client Secret) from your GitHub developer application." +msgstr "GitHub 開発者アプリケーションからの OAuth2 シークレット (クライアントシークレット)。" + +#: awx/sso/conf.py:659 +msgid "GitHub OAuth2 Organization Map" +msgstr "GitHub OAuth2 組織マップ" + +#: awx/sso/conf.py:671 +msgid "GitHub OAuth2 Team Map" +msgstr "GitHub OAuth2 チームマップ" + +#: awx/sso/conf.py:687 +msgid "GitHub Organization OAuth2 Callback URL" +msgstr "GitHub 組織 OAuth2 コールバック URL" + +#: awx/sso/conf.py:688 +#: awx/sso/conf.py:763 +msgid "" +"Create an organization-owned application at https://github.com/organizations/" +"/settings/applications and obtain an OAuth2 key (Client ID) and " +"secret (Client Secret). Provide this URL as the callback URL for your " +"application." +msgstr "" +"組織が所有するアプリケーションを https://github.com/organizations//settings/" +"applications に作成し、OAuth2 キー (クライアント ID) およびシークレット (クライアントシークレット) を取得します。この " +"URL をアプリケーションのコールバック URL として指定します。" + +#: awx/sso/conf.py:692 +#: awx/sso/conf.py:703 +#: awx/sso/conf.py:713 +#: awx/sso/conf.py:725 +#: awx/sso/conf.py:736 +#: awx/sso/conf.py:748 +msgid "GitHub Organization OAuth2" +msgstr "GitHub 組織 OAuth2" + +#: awx/sso/conf.py:701 +msgid "GitHub Organization OAuth2 Key" +msgstr "GitHub 組織 OAuth2 キー" + +#: awx/sso/conf.py:702 +#: awx/sso/conf.py:777 +msgid "The OAuth2 key (Client ID) from your GitHub organization application." +msgstr "GitHub 組織アプリケーションからの OAuth2 キー (クライアント ID)。" + +#: awx/sso/conf.py:711 +msgid "GitHub Organization OAuth2 Secret" +msgstr "GitHub 組織 OAuth2 シークレット" + +#: awx/sso/conf.py:712 +#: awx/sso/conf.py:787 +msgid "" +"The OAuth2 secret (Client Secret) from your GitHub organization application." +msgstr "GitHub 組織アプリケーションからの OAuth2 シークレット (クライアントシークレット)。" + +#: awx/sso/conf.py:722 +msgid "GitHub Organization Name" +msgstr "GitHub 組織名" + +#: awx/sso/conf.py:723 +msgid "" +"The name of your GitHub organization, as used in your organization's URL: " +"https://github.com//." +msgstr "GitHub 組織の名前で、組織の URL (https://github.com//) で使用されます。" + +#: awx/sso/conf.py:734 +msgid "GitHub Organization OAuth2 Organization Map" +msgstr "GitHub 組織 OAuth2 組織マップ" + +#: awx/sso/conf.py:746 +msgid "GitHub Organization OAuth2 Team Map" +msgstr "GitHub 組織 OAuth2 チームマップ" + +#: awx/sso/conf.py:762 +msgid "GitHub Team OAuth2 Callback URL" +msgstr "GitHub チーム OAuth2 コールバック URL" + +#: awx/sso/conf.py:767 +#: awx/sso/conf.py:778 +#: awx/sso/conf.py:788 +#: awx/sso/conf.py:800 +#: awx/sso/conf.py:811 +#: awx/sso/conf.py:823 +msgid "GitHub Team OAuth2" +msgstr "GitHub チーム OAuth2" + +#: awx/sso/conf.py:776 +msgid "GitHub Team OAuth2 Key" +msgstr "GitHub チーム OAuth2 キー" + +#: awx/sso/conf.py:786 +msgid "GitHub Team OAuth2 Secret" +msgstr "GitHub チーム OAuth2 シークレット" + +#: awx/sso/conf.py:797 +msgid "GitHub Team ID" +msgstr "GitHub チーム ID" + +#: awx/sso/conf.py:798 +msgid "" +"Find the numeric team ID using the Github API: http://fabian-kostadinov." +"github.io/2015/01/16/how-to-find-a-github-team-id/." +msgstr "" +"Github API を使用して数値のチーム ID を検索します: http://fabian-kostadinov.github.io/2015/01/" +"16/how-to-find-a-github-team-id/" + +#: awx/sso/conf.py:809 +msgid "GitHub Team OAuth2 Organization Map" +msgstr "GitHub チーム OAuth2 組織マップ" + +#: awx/sso/conf.py:821 +msgid "GitHub Team OAuth2 Team Map" +msgstr "GitHub チーム OAuth2 チームマップ" + +#: awx/sso/conf.py:837 +msgid "Azure AD OAuth2 Callback URL" +msgstr "Azure AD OAuth2 コールバック URL" + +#: awx/sso/conf.py:838 +msgid "" +"Register an Azure AD application as described by https://msdn.microsoft.com/" +"en-us/library/azure/dn132599.aspx and obtain an OAuth2 key (Client ID) and " +"secret (Client Secret). Provide this URL as the callback URL for your " +"application." +msgstr "" +"Azure AD アプリケーションを https://msdn.microsoft.com/en-us/library/azure/dn132599." +"aspx の説明に従って登録し、OAuth2 キー (クライアント ID) およびシークレット (クライアントシークレット) を取得します。この URL " +"をアプリケーションのコールバック URL として指定します。" + +#: awx/sso/conf.py:842 +#: awx/sso/conf.py:853 +#: awx/sso/conf.py:863 +#: awx/sso/conf.py:875 +#: awx/sso/conf.py:887 +msgid "Azure AD OAuth2" +msgstr "Azure AD OAuth2" + +#: awx/sso/conf.py:851 +msgid "Azure AD OAuth2 Key" +msgstr "Azure AD OAuth2 キー" + +#: awx/sso/conf.py:852 +msgid "The OAuth2 key (Client ID) from your Azure AD application." +msgstr "Azure AD アプリケーションからの OAuth2 キー (クライアント ID)。" + +#: awx/sso/conf.py:861 +msgid "Azure AD OAuth2 Secret" +msgstr "Azure AD OAuth2 シークレット" + +#: awx/sso/conf.py:862 +msgid "The OAuth2 secret (Client Secret) from your Azure AD application." +msgstr "Azure AD アプリケーションからの OAuth2 シークレット (クライアントシークレット)。" + +#: awx/sso/conf.py:873 +msgid "Azure AD OAuth2 Organization Map" +msgstr "Azure AD OAuth2 組織マップ" + +#: awx/sso/conf.py:885 +msgid "Azure AD OAuth2 Team Map" +msgstr "Azure AD OAuth2 チームマップ" + +#: awx/sso/conf.py:906 +msgid "SAML Service Provider Callback URL" +msgstr "SAML サービスプロバイダーコールバック URL" + +#: awx/sso/conf.py:907 +msgid "" +"Register Tower as a service provider (SP) with each identity provider (IdP) " +"you have configured. Provide your SP Entity ID and this callback URL for " +"your application." +msgstr "" +"設定済みの各アイデンティティープロバイダー (IdP) で Tower をサービスプロバイダー (SP) として登録します。SP エンティティー ID " +"およびアプリケーションのこのコールバック URL を指定します。" + +#: awx/sso/conf.py:910 +#: awx/sso/conf.py:924 +#: awx/sso/conf.py:937 +#: awx/sso/conf.py:951 +#: awx/sso/conf.py:965 +#: awx/sso/conf.py:983 +#: awx/sso/conf.py:1005 +#: awx/sso/conf.py:1024 +#: awx/sso/conf.py:1044 +#: awx/sso/conf.py:1078 +#: awx/sso/conf.py:1091 +msgid "SAML" +msgstr "SAML" + +#: awx/sso/conf.py:921 +msgid "SAML Service Provider Metadata URL" +msgstr "SAML サービスプロバイダーメタデータ URL" + +#: awx/sso/conf.py:922 +msgid "" +"If your identity provider (IdP) allows uploading an XML metadata file, you " +"can download one from this URL." +msgstr "" +"アイデンティティープロバイダー (IdP) が XML メタデータファイルのアップロードを許可する場合、この URL からダウンロードできます。" + +#: awx/sso/conf.py:934 +msgid "SAML Service Provider Entity ID" +msgstr "SAML サービスプロバイダーエンティティー ID" + +#: awx/sso/conf.py:935 +msgid "" +"The application-defined unique identifier used as the audience of the SAML " +"service provider (SP) configuration." +msgstr "SAML サービスプロバイダー (SP) 設定の対象として使用されるアプリケーションで定義される固有識別子。" + +#: awx/sso/conf.py:948 +msgid "SAML Service Provider Public Certificate" +msgstr "SAML サービスプロバイダーの公開証明書" + +#: awx/sso/conf.py:949 +msgid "" +"Create a keypair for Tower to use as a service provider (SP) and include the " +"certificate content here." +msgstr "サービスプロバイダー (SP) として使用するための Tower のキーペアを作成し、ここに証明書の内容を組み込みます。" + +#: awx/sso/conf.py:962 +msgid "SAML Service Provider Private Key" +msgstr "SAML サービスプロバイダーの秘密鍵|" + +#: awx/sso/conf.py:963 +msgid "" +"Create a keypair for Tower to use as a service provider (SP) and include the " +"private key content here." +msgstr "サービスプロバイダー (SP) として使用するための Tower のキーペアを作成し、ここに秘密鍵の内容を組み込みます。" + +#: awx/sso/conf.py:981 +msgid "SAML Service Provider Organization Info" +msgstr "SAML サービスプロバイダーの組織情報" + +#: awx/sso/conf.py:982 +msgid "Configure this setting with information about your app." +msgstr "アプリの情報でこの設定を行います。" + +#: awx/sso/conf.py:1003 +msgid "SAML Service Provider Technical Contact" +msgstr "SAML サービスプロバイダーテクニカルサポートの問い合わせ先" + +#: awx/sso/conf.py:1004 +#: awx/sso/conf.py:1023 +msgid "Configure this setting with your contact information." +msgstr "問い合わせ先情報で設定を行います。" + +#: awx/sso/conf.py:1022 +msgid "SAML Service Provider Support Contact" +msgstr "SAML サービスプロバイダーサポートの問い合わせ先" + +#: awx/sso/conf.py:1037 +msgid "SAML Enabled Identity Providers" +msgstr "SAML で有効にされたアイデンティティープロバイダー" + +#: awx/sso/conf.py:1038 +msgid "" +"Configure the Entity ID, SSO URL and certificate for each identity provider " +"(IdP) in use. Multiple SAML IdPs are supported. Some IdPs may provide user " +"data using attribute names that differ from the default OIDs (https://github." +"com/omab/python-social-auth/blob/master/social/backends/saml.py#L16). " +"Attribute names may be overridden for each IdP." +msgstr "" +"使用中のそれぞれのアイデンティティープロバイダー (IdP) についてのエンティティー ID、SSO URL および証明書を設定します。複数の SAML " +"IdP がサポートされます。一部の IdP はデフォルト OID とは異なる属性名を使用してユーザーデータを提供することがあります (https://" +"github.com/omab/python-social-auth/blob/master/social/backends/saml." +"py#L16)。それぞれの IdP の属性名を上書きできます。" + +#: awx/sso/conf.py:1076 +msgid "SAML Organization Map" +msgstr "SAML 組織マップ" + +#: awx/sso/conf.py:1089 +msgid "SAML Team Map" +msgstr "SAML チームマップ" + +#: awx/sso/fields.py:123 +msgid "Invalid connection option(s): {invalid_options}." +msgstr "無効な接続オプション: {invalid_options}" + +#: awx/sso/fields.py:194 +msgid "Base" +msgstr "ベース" + +#: awx/sso/fields.py:195 +msgid "One Level" +msgstr "1 レベル" + +#: awx/sso/fields.py:196 +msgid "Subtree" +msgstr "サブツリー" + +#: awx/sso/fields.py:214 +msgid "Expected a list of three items but got {length} instead." +msgstr "3 つの項目の一覧が予期されましが、{length} が取得されました。" + +#: awx/sso/fields.py:215 +msgid "Expected an instance of LDAPSearch but got {input_type} instead." +msgstr "LDAPSearch のインスタンスが予期されましたが、{input_type} が取得されました。" + +#: awx/sso/fields.py:251 +msgid "" +"Expected an instance of LDAPSearch or LDAPSearchUnion but got {input_type} " +"instead." +msgstr "" +"LDAPSearch または LDAPSearchUnion のインスタンスが予期されましたが、{input_type} が取得されました。" + +#: awx/sso/fields.py:278 +msgid "Invalid user attribute(s): {invalid_attrs}." +msgstr "無効なユーザー属性: {invalid_attrs}" + +#: awx/sso/fields.py:295 +msgid "Expected an instance of LDAPGroupType but got {input_type} instead." +msgstr "LDAPGroupType のインスタンスが予期されましたが、{input_type} が取得されました。" + +#: awx/sso/fields.py:323 +msgid "Invalid user flag: \"{invalid_flag}\"." +msgstr "無効なユーザーフラグ: \"{invalid_flag}\"" + +#: awx/sso/fields.py:339 +#: awx/sso/fields.py:506 +msgid "" +"Expected None, True, False, a string or list of strings but got {input_type} " +"instead." +msgstr "None、True、False、文字列または文字列の一覧が予期されましたが、{input_type} が取得されました。" + +#: awx/sso/fields.py:375 +msgid "Missing key(s): {missing_keys}." +msgstr "キーがありません: {missing_keys}" + +#: awx/sso/fields.py:376 +msgid "Invalid key(s): {invalid_keys}." +msgstr "無効なキー: {invalid_keys}" + +#: awx/sso/fields.py:425 +#: awx/sso/fields.py:542 +msgid "Invalid key(s) for organization map: {invalid_keys}." +msgstr "組織マップの無効なキー: {invalid_keys}" + +#: awx/sso/fields.py:443 +msgid "Missing required key for team map: {invalid_keys}." +msgstr "チームマップの必要なキーがありません: {invalid_keys}" + +#: awx/sso/fields.py:444 +#: awx/sso/fields.py:561 +msgid "Invalid key(s) for team map: {invalid_keys}." +msgstr "チームマップの無効なキー: {invalid_keys}" + +#: awx/sso/fields.py:560 +msgid "Missing required key for team map: {missing_keys}." +msgstr "チームマップで必要なキーがありません: {missing_keys}" + +#: awx/sso/fields.py:578 +msgid "Missing required key(s) for org info record: {missing_keys}." +msgstr "組織情報レコードで必要なキーがありません: {missing_keys}" + +#: awx/sso/fields.py:591 +msgid "Invalid language code(s) for org info: {invalid_lang_codes}." +msgstr "組織情報の無効な言語コード: {invalid_lang_codes}" + +#: awx/sso/fields.py:610 +msgid "Missing required key(s) for contact: {missing_keys}." +msgstr "問い合わせ先の必要なキーがありません: {missing_keys}" + +#: awx/sso/fields.py:622 +msgid "Missing required key(s) for IdP: {missing_keys}." +msgstr "IdP で必要なキーがありません: {missing_keys}" + +#: awx/sso/pipeline.py:24 +msgid "An account cannot be found for {0}" +msgstr "{0} のアカウントが見つかりません" + +#: awx/sso/pipeline.py:30 +msgid "Your account is inactive" +msgstr "アカウントが非アクティブです" + +#: awx/sso/validators.py:19 +#: awx/sso/validators.py:44 +#, python-format +msgid "DN must include \"%%(user)s\" placeholder for username: %s" +msgstr "DN にはユーザー名の \"%%(user)s\" プレースホルダーを含める必要があります: %s" + +#: awx/sso/validators.py:26 +#, python-format +msgid "Invalid DN: %s" +msgstr "無効な DN: %s" + +#: awx/sso/validators.py:56 +#, python-format +msgid "Invalid filter: %s" +msgstr "無効なフィルター: %s" + +#: awx/templates/error.html:4 +#: awx/ui/templates/ui/index.html:8 +msgid "Ansible Tower" +msgstr "Ansible Tower" + +#: awx/templates/rest_framework/api.html:39 +msgid "Ansible Tower API Guide" +msgstr "Ansible Tower API ガイド" + +#: awx/templates/rest_framework/api.html:40 +msgid "Back to Ansible Tower" +msgstr "Ansible Tower に戻る" + +#: awx/templates/rest_framework/api.html:41 +msgid "Resize" +msgstr "サイズの変更" + +#: awx/templates/rest_framework/base.html:78 +#: awx/templates/rest_framework/base.html:92 +#, python-format +msgid "Make a GET request on the %(name)s resource" +msgstr "%(name)s リソースでの GET 要求" + +#: awx/templates/rest_framework/base.html:80 +msgid "Specify a format for the GET request" +msgstr "GET 要求の形式を指定" + +#: awx/templates/rest_framework/base.html:86 +#, python-format +msgid "" +"Make a GET request on the %(name)s resource with the format set to " +"`%(format)s`" +msgstr "形式が `%(format)s` に設定された状態での %(name)s リソースでの GET 要求" + +#: awx/templates/rest_framework/base.html:100 +#, python-format +msgid "Make an OPTIONS request on the %(name)s resource" +msgstr "%(name)s リソースでの OPTIONS 要求" + +#: awx/templates/rest_framework/base.html:106 +#, python-format +msgid "Make a DELETE request on the %(name)s resource" +msgstr "%(name)s リソースでの DELETE 要求" + +#: awx/templates/rest_framework/base.html:113 +msgid "Filters" +msgstr "フィルター" + +#: awx/templates/rest_framework/base.html:172 +#: awx/templates/rest_framework/base.html:186 +#, python-format +msgid "Make a POST request on the %(name)s resource" +msgstr "%(name)s リソースでの POST 要求" + +#: awx/templates/rest_framework/base.html:216 +#: awx/templates/rest_framework/base.html:230 +#, python-format +msgid "Make a PUT request on the %(name)s resource" +msgstr "%(name)s リソースでの PUT 要求" + +#: awx/templates/rest_framework/base.html:233 +#, python-format +msgid "Make a PATCH request on the %(name)s resource" +msgstr "%(name)s リソースでの PATCH 要求" + +#: awx/ui/apps.py:9 +#: awx/ui/conf.py:22 +#: awx/ui/conf.py:38 +#: awx/ui/conf.py:53 +msgid "UI" +msgstr "UI" + +#: awx/ui/conf.py:16 +msgid "Off" +msgstr "オフ" + +#: awx/ui/conf.py:17 +msgid "Anonymous" +msgstr "匿名" + +#: awx/ui/conf.py:18 +msgid "Detailed" +msgstr "詳細" + +#: awx/ui/conf.py:20 +msgid "Analytics Tracking State" +msgstr "アナリティクストラッキングの状態" + +#: awx/ui/conf.py:21 +msgid "Enable or Disable Analytics Tracking." +msgstr "アナリティクストラッキングの有効化/無効化。" + +#: awx/ui/conf.py:31 +msgid "Custom Login Info" +msgstr "カスタムログイン情報" + +#: awx/ui/conf.py:32 +msgid "" +"If needed, you can add specific information (such as a legal notice or a " +"disclaimer) to a text box in the login modal using this setting. Any content " +"added must be in plain text, as custom HTML or other markup languages are " +"not supported. If multiple paragraphs of text are needed, new lines " +"(paragraphs) must be escaped as `\\n` within the block of text." +msgstr "" +"必要な場合は、この設定を使ってログインモーダルのテキストボックスに特定の情報 (法律上の通知または免責事項など) " +"を追加できます。追加されるすべてのコンテンツは、カスタム HTML " +"や他のマークアップ言語がサポートされないため、プレーンテキストでなければなりません。テキストの複数のパラグラフが必要な場合、改行 (パラグラフ) " +"をテキストのブロック内の `\\n` としてエスケープする必要があります。" + +#: awx/ui/conf.py:48 +msgid "Custom Logo" +msgstr "カスタムロゴ" + +#: awx/ui/conf.py:49 +msgid "" +"To set up a custom logo, provide a file that you create. For the custom logo " +"to look its best, use a `.png` file with a transparent background. GIF, PNG " +"and JPEG formats are supported." +msgstr "" +"カスタムロゴをセットアップするには、作成するファイルを指定します。カスタムロゴを最適化するには、背景が透明の「." +"png」ファイルを使用します。GIF、PNG および JPEG 形式がサポートされます。" + +#: awx/ui/fields.py:29 +msgid "" +"Invalid format for custom logo. Must be a data URL with a base64-encoded " +"GIF, PNG or JPEG image." +msgstr "" +"カスタムロゴの無効な形式です。base64 エンコードされた GIF、PNG または JPEG イメージと共にデータ URL を指定する必要があります。" + +#: awx/ui/fields.py:30 +msgid "Invalid base64-encoded data in data URL." +msgstr "データ URL の無効な base64 エンコードされたデータ。" + +#: awx/ui/templates/ui/index.html:49 +msgid "" +"Your session will expire in 60 seconds, would you like to continue?" +msgstr "" +"セッションは 60 秒後に期限切れになります。続行しますか?" + +#: awx/ui/templates/ui/index.html:64 +msgid "CANCEL" +msgstr "取り消し" + +#: awx/ui/templates/ui/index.html:116 +msgid "Set how many days of data should be retained." +msgstr "データの保持日数を設定します。" + +#: awx/ui/templates/ui/index.html:122 +msgid "" +"Please enter an integer that is not " +"negative that is lower than 9999." +msgstr "" +"負でない 9999 より値の小さい整数を入力してください。" + +#: awx/ui/templates/ui/index.html:127 +msgid "" +"For facts collected older than the time period specified, save one fact scan " +"(snapshot) per time window (frequency). For example, facts older than 30 " +"days are purged, while one weekly fact scan is kept.\n" +"
\n" +"
CAUTION: Setting both numerical variables to \"0\" " +"will delete all facts.\n" +"
\n" +"
" +msgstr "" +"指定された期間の前に収集されたファクトについては、時間枠 (頻度) ごとに 1 つのファクトスキャン (スナップショット) を保存します。たとえば、30 " +"日間の前のファクトは削除され、1 つの週次ファクトは保持されます。\n" +"
\n" +"
注意: どちらの数値変数も「0」に設定すると、すべてのファクトが削除されます。\n" +"
\n" +"
" + +#: awx/ui/templates/ui/index.html:136 +msgid "Select a time period after which to remove old facts" +msgstr "古いファクトを削除するまでの期間を選択" + +#: awx/ui/templates/ui/index.html:150 +msgid "" +"Please enter an integer " +"that is not negative " +"that is lower than 9999." +msgstr "" +"負でない 9999 より値の小さい整数を入力してください。" + +#: awx/ui/templates/ui/index.html:155 +msgid "Select a frequency for snapshot retention" +msgstr "スナップショットの保持頻度を選択" + +#: awx/ui/templates/ui/index.html:169 +msgid "" +"Please enter an integer that is not negative that is " +"lower than 9999." +msgstr "" +"負でない " +"9999 よりも値の小さい整数を入力してください。" + +#: awx/ui/templates/ui/index.html:175 +msgid "working..." +msgstr "実行中..." diff --git a/awx/main/__init__.py b/awx/main/__init__.py index e484e62be1..f500f439b6 100644 --- a/awx/main/__init__.py +++ b/awx/main/__init__.py @@ -1,2 +1,4 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. + +default_app_config = 'awx.main.apps.MainConfig' diff --git a/awx/main/access.py b/awx/main/access.py index d122d48da8..20becf2cbe 100644 --- a/awx/main/access.py +++ b/awx/main/access.py @@ -7,9 +7,11 @@ import sys import logging # Django -from django.db.models import Q +from django.conf import settings +from django.db.models import Q, Prefetch from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType +from django.utils.translation import ugettext_lazy as _ # Django REST Framework from rest_framework.exceptions import ParseError, PermissionDenied, ValidationError @@ -19,12 +21,11 @@ from awx.main.utils import * # noqa from awx.main.models import * # noqa from awx.main.models.unified_jobs import ACTIVE_STATES from awx.main.models.mixins import ResourceMixin -from awx.api.license import LicenseForbids -from awx.main.task_engine import TaskSerializer -from awx.main.conf import tower_settings +from awx.main.task_engine import TaskEnhancer +from awx.conf.license import LicenseForbids -__all__ = ['get_user_queryset', 'check_user_access', - 'user_accessible_objects', +__all__ = ['get_user_queryset', 'check_user_access', 'check_user_access_with_errors', + 'user_accessible_objects', 'consumer_access', 'user_admin_role', 'StateConflict',] PERMISSION_TYPES = [ @@ -57,13 +58,16 @@ access_registry = { # ... } + class StateConflict(ValidationError): status_code = 409 + def register_access(model_class, access_class): access_classes = access_registry.setdefault(model_class, []) access_classes.append(access_class) + @property def user_admin_role(self): role = Role.objects.get( @@ -75,9 +79,11 @@ def user_admin_role(self): role.parents = [org.admin_role.pk for org in self.organizations] return role + def user_accessible_objects(user, role_name): return ResourceMixin._accessible_objects(User, user, role_name) + def get_user_queryset(user, model_class): ''' Return a queryset for the given model_class containing only the instances @@ -97,6 +103,7 @@ def get_user_queryset(user, model_class): queryset = queryset.filter(pk__in=qs.values_list('pk', flat=True)) return queryset + def check_user_access(user, model_class, action, *args, **kwargs): ''' Return True if user can perform action against model_class with the @@ -116,6 +123,34 @@ def check_user_access(user, model_class, action, *args, **kwargs): return result return False + +def check_user_access_with_errors(user, model_class, action, *args, **kwargs): + ''' + Return T/F permission and summary of problems with the action. + ''' + for access_class in access_registry.get(model_class, []): + access_instance = access_class(user, save_messages=True) + access_method = getattr(access_instance, 'can_%s' % action, None) + result = access_method(*args, **kwargs) + logger.debug('%s.%s %r returned %r', access_instance.__class__.__name__, + access_method.__name__, args, result) + return (result, access_instance.messages) + return (False, '') + + +def get_user_capabilities(user, instance, **kwargs): + ''' + Returns a dictionary of capabilities the user has on the particular + instance. *NOTE* This is not a direct mapping of can_* methods into this + dictionary, it is intended to munge some queries in a way that is + convenient for the user interface to consume and hide or show various + actions in the interface. + ''' + for access_class in access_registry.get(type(instance), []): + return access_class(user).get_user_capabilities(instance, **kwargs) + return None + + def check_superuser(func): ''' check_superuser is a decorator that provides a simple short circuit @@ -128,6 +163,18 @@ def check_superuser(func): return func(self, *args, **kwargs) return wrapper + +def consumer_access(group_name): + ''' + consumer_access returns the proper Access class based on group_name + for a channels consumer. + ''' + class_map = {'job_events': JobAccess, + 'workflow_events': WorkflowJobAccess, + 'ad_hoc_command_events': AdHocCommandAccess} + return class_map.get(group_name) + + class BaseAccess(object): ''' Base class for checking user access to a given model. Subclasses should @@ -138,8 +185,11 @@ class BaseAccess(object): model = None - def __init__(self, user): + def __init__(self, user, save_messages=False): self.user = user + self.save_messages = save_messages + if save_messages: + self.messages = {} def get_queryset(self): if self.user.is_superuser or self.user.is_system_auditor: @@ -179,33 +229,183 @@ class BaseAccess(object): def can_unattach(self, obj, sub_obj, relationship, data=None): return self.can_change(obj, data) - def check_license(self, add_host=False, feature=None, check_expiration=True): - reader = TaskSerializer() - validation_info = reader.from_database() + def check_related(self, field, Model, data, role_field='admin_role', + obj=None, mandatory=False): + ''' + Check permission for related field, in scenarios: + - creating a new resource, user must have permission if + resource is specified in `data` + - editing an existing resource, user must have permission to resource + in `data`, as well as existing related resource on `obj` + + If `mandatory` is set, new resources require the field and + existing field will always be checked + ''' + new = None + changed = True + if data and 'reference_obj' in data: + # Use reference object's related fields, if given + new = getattr(data['reference_obj'], field) + elif data and field in data: + # Obtain the resource specified in `data` + raw_value = data[field] + if isinstance(raw_value, Model): + new = raw_value + elif raw_value is None: + new = None + else: + try: + new_pk = int(raw_value) + # Avoid database query by comparing pk to model for similarity + if obj and new_pk == getattr(obj, '%s_id' % field, None): + changed = False + else: + # Get the new resource from the database + new = get_object_or_400(Model, pk=new_pk) + except (TypeError, ValueError): + raise ParseError(_("Bad data found in related field %s." % field)) + elif data is None or field not in data: + changed = False + + # Obtain existing related resource + current = None + if obj and (changed or mandatory): + current = getattr(obj, field) + + if obj and new == current: + # Resource not changed, like a PUT request + changed = False + + if (not new) and (not obj) and mandatory: + # Restrict ability to create resource without required field + return self.user.is_superuser + + def user_has_resource_access(resource): + role = getattr(resource, role_field, None) + if role is None: + # Handle special case where resource does not have direct roles + access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field] + return self.user.can_access(type(resource), access_method_type, resource, None) + return self.user in role + + if new and changed and (not user_has_resource_access(new)): + return False # User lacks access to provided resource + + if current and (changed or mandatory) and (not user_has_resource_access(current)): + return False # User lacks access to existing resource + + return True # User has access to both, permission check passed + + def check_license(self, add_host_name=None, feature=None, check_expiration=True): + validation_info = TaskEnhancer().validate_enhancements() if ('test' in sys.argv or 'py.test' in sys.argv[0] or 'jenkins' in sys.argv) and not os.environ.get('SKIP_LICENSE_FIXUP_FOR_TEST', ''): validation_info['free_instances'] = 99999999 validation_info['time_remaining'] = 99999999 validation_info['grace_period_remaining'] = 99999999 if check_expiration and validation_info.get('time_remaining', None) is None: - raise PermissionDenied("License is missing.") + raise PermissionDenied(_("License is missing.")) if check_expiration and validation_info.get("grace_period_remaining") <= 0: - raise PermissionDenied("License has expired.") + raise PermissionDenied(_("License has expired.")) free_instances = validation_info.get('free_instances', 0) available_instances = validation_info.get('available_instances', 0) - if add_host and free_instances == 0: - raise PermissionDenied("License count of %s instances has been reached." % available_instances) - elif add_host and free_instances < 0: - raise PermissionDenied("License count of %s instances has been exceeded." % available_instances) - elif not add_host and free_instances < 0: - raise PermissionDenied("Host count exceeds available instances.") + + if add_host_name: + host_exists = Host.objects.filter(name=add_host_name).exists() + if not host_exists and free_instances == 0: + raise PermissionDenied(_("License count of %s instances has been reached.") % available_instances) + elif not host_exists and free_instances < 0: + raise PermissionDenied(_("License count of %s instances has been exceeded.") % available_instances) + elif not add_host_name and free_instances < 0: + raise PermissionDenied(_("Host count exceeds available instances.")) if feature is not None: if "features" in validation_info and not validation_info["features"].get(feature, False): - raise LicenseForbids("Feature %s is not enabled in the active license." % feature) + raise LicenseForbids(_("Feature %s is not enabled in the active license.") % feature) elif "features" not in validation_info: - raise LicenseForbids("Features not found in active license.") + raise LicenseForbids(_("Features not found in active license.")) + + def get_user_capabilities(self, obj, method_list=[], parent_obj=None): + if obj is None: + return {} + user_capabilities = {} + + # Custom ordering to loop through methods so we can reuse earlier calcs + for display_method in ['edit', 'delete', 'start', 'schedule', 'copy', 'adhoc', 'unattach']: + if display_method not in method_list: + continue + + # Actions not possible for reason unrelated to RBAC + # Cannot copy with validation errors, or update a manual group/project + if display_method == 'copy' and isinstance(obj, JobTemplate): + validation_errors, resources_needed_to_start = obj.resource_validation_data() + if validation_errors: + user_capabilities[display_method] = False + continue + elif display_method == 'copy' and isinstance(obj, WorkflowJobTemplate) and obj.organization_id is None: + user_capabilities[display_method] = self.user.is_superuser + continue + elif display_method in ['start', 'schedule'] and isinstance(obj, Group): + if obj.inventory_source and not obj.inventory_source._can_update(): + user_capabilities[display_method] = False + continue + elif display_method in ['start', 'schedule'] and isinstance(obj, (Project)): + if obj.scm_type == '': + user_capabilities[display_method] = False + continue + + # Grab the answer from the cache, if available + if hasattr(obj, 'capabilities_cache') and display_method in obj.capabilities_cache: + user_capabilities[display_method] = obj.capabilities_cache[display_method] + if self.user.is_superuser and not user_capabilities[display_method]: + # Cache override for models with bad orphaned state + user_capabilities[display_method] = True + continue + + # Aliases for going form UI language to API language + if display_method == 'edit': + method = 'change' + elif display_method == 'adhoc': + method = 'run_ad_hoc_commands' + else: + method = display_method + + # Shortcuts in certain cases by deferring to earlier property + if display_method == 'schedule': + user_capabilities['schedule'] = user_capabilities['start'] + continue + elif display_method == 'delete' and not isinstance(obj, (User, UnifiedJob)): + user_capabilities['delete'] = user_capabilities['edit'] + continue + elif display_method == 'copy' and isinstance(obj, (Group, Host)): + user_capabilities['copy'] = user_capabilities['edit'] + continue + + # Compute permission + user_capabilities[display_method] = self.get_method_capability(method, obj, parent_obj) + + return user_capabilities + + def get_method_capability(self, method, obj, parent_obj): + if method in ['change']: # 3 args + return self.can_change(obj, {}) + elif method in ['delete', 'run_ad_hoc_commands', 'copy']: + access_method = getattr(self, "can_%s" % method) + return access_method(obj) + elif method in ['start']: + return self.can_start(obj, validate_license=False) + elif method in ['add']: # 2 args with data + return self.can_add({}) + elif method in ['attach', 'unattach']: # parent/sub-object call + access_method = getattr(self, "can_%s" % method) + if type(parent_obj) == Team: + relationship = 'parents' + parent_obj = parent_obj.member_role + else: + relationship = 'members' + return access_method(obj, parent_obj, relationship, skip_sub_obj_read_check=True, data={}) + return False class UserAccess(BaseAccess): @@ -225,23 +425,24 @@ class UserAccess(BaseAccess): def get_queryset(self): if self.user.is_superuser or self.user.is_system_auditor: - return User.objects.all() + qs = User.objects.all() - if tower_settings.ORG_ADMINS_CAN_SEE_ALL_USERS and \ + elif settings.ORG_ADMINS_CAN_SEE_ALL_USERS and \ (self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()): - return User.objects.all() - - return ( - User.objects.filter( - pk__in=Organization.accessible_objects(self.user, 'read_role').values('member_role__members') - ) | - User.objects.filter( - pk=self.user.id - ) | - User.objects.filter( - pk__in=Role.objects.filter(singleton_name__in = [ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR]).values('members') - ) - ).distinct() + qs = User.objects.all() + else: + qs = ( + User.objects.filter( + pk__in=Organization.accessible_objects(self.user, 'read_role').values('member_role__members') + ) | + User.objects.filter( + pk=self.user.id + ) | + User.objects.filter( + pk__in=Role.objects.filter(singleton_name__in = [ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR]).values('members') + ) + ).distinct() + return qs.prefetch_related('profile') def can_add(self, data): @@ -265,7 +466,8 @@ class UserAccess(BaseAccess): @check_superuser def can_admin(self, obj, data): - return Organization.objects.filter(member_role__members=obj, admin_role__members=self.user).exists() + return Organization.objects.filter(Q(member_role__members=obj) | Q(admin_role__members=obj), + Q(admin_role__members=self.user)).exists() def can_delete(self, obj): if obj == self.user: @@ -307,7 +509,7 @@ class OrganizationAccess(BaseAccess): def get_queryset(self): qs = self.model.accessible_objects(self.user, 'read_role') - return qs.select_related('created_by', 'modified_by').all() + return qs.prefetch_related('created_by', 'modified_by').all() @check_superuser def can_change(self, obj, data): @@ -326,10 +528,11 @@ class OrganizationAccess(BaseAccess): active_jobs.extend([dict(type="inventory_update", id=o.id) for o in InventoryUpdate.objects.filter(inventory_source__inventory__organization=obj, status__in=ACTIVE_STATES)]) if len(active_jobs) > 0: - raise StateConflict({"conflict": "Resource is being used by running jobs", + raise StateConflict({"conflict": _("Resource is being used by running jobs"), "active_jobs": active_jobs}) return True + class InventoryAccess(BaseAccess): ''' I can see inventory when: @@ -370,9 +573,7 @@ class InventoryAccess(BaseAccess): if not data: return Organization.accessible_objects(self.user, 'admin_role').exists() - org_pk = get_pk_from_dict(data, 'organization') - org = get_object_or_400(Organization, pk=org_pk) - return self.user in org.admin_role + return self.check_related('organization', Organization, data) @check_superuser def can_change(self, obj, data): @@ -381,14 +582,8 @@ class InventoryAccess(BaseAccess): @check_superuser def can_admin(self, obj, data): # Verify that the user has access to the new organization if moving an - # inventory to a new organization. - org_pk = get_pk_from_dict(data, 'organization') - if obj and org_pk and obj.organization.pk != org_pk: - org = get_object_or_400(Organization, pk=org_pk) - if self.user not in org.admin_role: - return False - # Otherwise, just check for admin permission. - return self.user in obj.admin_role + # inventory to a new organization. Otherwise, just check for admin permission. + return self.check_related('organization', Organization, data, obj=obj) and self.user in obj.admin_role def can_delete(self, obj): is_can_admin = self.can_admin(obj, None) @@ -400,13 +595,14 @@ class InventoryAccess(BaseAccess): active_jobs.extend([dict(type="inventory_update", id=o.id) for o in InventoryUpdate.objects.filter(inventory_source__inventory=obj, status__in=ACTIVE_STATES)]) if len(active_jobs) > 0: - raise StateConflict({"conflict": "Resource is being used by running jobs", + raise StateConflict({"conflict": _("Resource is being used by running jobs"), "active_jobs": active_jobs}) return True def can_run_ad_hoc_commands(self, obj): return self.user in obj.adhoc_role + class HostAccess(BaseAccess): ''' I can see hosts whenever I can see their inventory. @@ -432,20 +628,23 @@ class HostAccess(BaseAccess): return Inventory.accessible_objects(self.user, 'admin_role').exists() # Checks for admin or change permission on inventory. - inventory_pk = get_pk_from_dict(data, 'inventory') - inventory = get_object_or_400(Inventory, pk=inventory_pk) - if self.user not in inventory.admin_role: + if not self.check_related('inventory', Inventory, data): return False # Check to see if we have enough licenses - self.check_license(add_host=True) + self.check_license(add_host_name=data.get('name', None)) return True def can_change(self, obj, data): # Prevent moving a host to a different inventory. inventory_pk = get_pk_from_dict(data, 'inventory') if obj and inventory_pk and obj.inventory.pk != inventory_pk: - raise PermissionDenied('Unable to change inventory on a host.') + raise PermissionDenied(_('Unable to change inventory on a host.')) + + # Prevent renaming a host that might exceed license count + if data and 'name' in data: + self.check_license(add_host_name=data['name']) + # Checks for admin or change permission on inventory, controls whether # the user can edit variable data. return obj and self.user in obj.inventory.admin_role @@ -457,12 +656,13 @@ class HostAccess(BaseAccess): return False # Prevent assignments between different inventories. if obj.inventory != sub_obj.inventory: - raise ParseError('Cannot associate two items from different inventories.') + raise ParseError(_('Cannot associate two items from different inventories.')) return True def can_delete(self, obj): return obj and self.user in obj.inventory.admin_role + class GroupAccess(BaseAccess): ''' I can see groups whenever I can see their inventory. @@ -483,15 +683,13 @@ class GroupAccess(BaseAccess): if not data or 'inventory' not in data: return False # Checks for admin or change permission on inventory. - inventory_pk = get_pk_from_dict(data, 'inventory') - inventory = get_object_or_400(Inventory, pk=inventory_pk) - return self.user in inventory.admin_role + return self.check_related('inventory', Inventory, data) def can_change(self, obj, data): # Prevent moving a group to a different inventory. inventory_pk = get_pk_from_dict(data, 'inventory') if obj and inventory_pk and obj.inventory.pk != inventory_pk: - raise PermissionDenied('Unable to change inventory on a group.') + raise PermissionDenied(_('Unable to change inventory on a group.')) # Checks for admin or change permission on inventory, controls whether # the user can attach subgroups or edit variable data. return obj and self.user in obj.inventory.admin_role @@ -503,7 +701,7 @@ class GroupAccess(BaseAccess): return False # Prevent assignments between different inventories. if obj.inventory != sub_obj.inventory: - raise ParseError('Cannot associate two items from different inventories.') + raise ParseError(_('Cannot associate two items from different inventories.')) # Prevent group from being assigned as its own (grand)child. if type(obj) == type(sub_obj): parent_pks = set(obj.all_parents.values_list('pk', flat=True)) @@ -522,10 +720,17 @@ class GroupAccess(BaseAccess): active_jobs.extend([dict(type="inventory_update", id=o.id) for o in InventoryUpdate.objects.filter(inventory_source__in=obj.inventory_sources.all(), status__in=ACTIVE_STATES)]) if len(active_jobs) > 0: - raise StateConflict({"conflict": "Resource is being used by running jobs", + raise StateConflict({"conflict": _("Resource is being used by running jobs"), "active_jobs": active_jobs}) return True + def can_start(self, obj, validate_license=True): + # Used as another alias to inventory_source start access for user_capabilities + if obj and obj.inventory_source: + return self.user.can_access(InventorySource, 'start', obj.inventory_source, validate_license=validate_license) + return False + + class InventorySourceAccess(BaseAccess): ''' I can see inventory sources whenever I can see their group or inventory. @@ -556,13 +761,16 @@ class InventorySourceAccess(BaseAccess): def can_change(self, obj, data): # Checks for admin or change permission on group. if obj and obj.group: - return self.user.can_access(Group, 'change', obj.group, None) + return ( + self.user.can_access(Group, 'change', obj.group, None) and + self.check_related('credential', Credential, data, obj=obj, role_field='use_role') + ) # Can't change inventory sources attached to only the inventory, since # these are created automatically from the management command. else: return False - def can_start(self, obj): + def can_start(self, obj, validate_license=True): if obj and obj.group: return obj.can_update and self.user in obj.group.inventory.update_role elif obj and obj.inventory: @@ -594,10 +802,18 @@ class InventoryUpdateAccess(BaseAccess): # Inventory cascade deletes to inventory update, descends from org admin return self.user in obj.inventory_source.inventory.admin_role + def can_start(self, obj, validate_license=True): + # For relaunching + if obj and obj.inventory_source: + access = InventorySourceAccess(self.user) + return access.can_start(obj.inventory_source, validate_license=validate_license) + return False + @check_superuser def can_delete(self, obj): return self.user in obj.inventory_source.inventory.admin_role + class CredentialAccess(BaseAccess): ''' I can see credentials when: @@ -621,7 +837,11 @@ class CredentialAccess(BaseAccess): permitted to see. """ qs = self.model.accessible_objects(self.user, 'read_role') - return qs.select_related('created_by', 'modified_by').all() + qs = qs.select_related('created_by', 'modified_by') + qs = qs.prefetch_related( + 'admin_role', 'use_role', 'read_role', + 'admin_role__parents', 'admin_role__members') + return qs @check_superuser def can_read(self, obj): @@ -653,15 +873,7 @@ class CredentialAccess(BaseAccess): def can_change(self, obj, data): if not obj: return False - - # Cannot change the organization for a credential after it's been created - if data and 'organization' in data: - organization_pk = get_pk_from_dict(data, 'organization') - if (organization_pk and (not obj.organization or organization_pk != obj.organization.id)) \ - or (not organization_pk and obj.organization): - return False - - return self.user in obj.admin_role + return self.user in obj.admin_role and self.check_related('organization', Organization, data, obj=obj) def can_delete(self, obj): # Unassociated credentials may be marked deleted by anyone, though we @@ -670,6 +882,7 @@ class CredentialAccess(BaseAccess): # return True return self.can_change(obj, None) + class TeamAccess(BaseAccess): ''' I can see a team when: @@ -691,17 +904,13 @@ class TeamAccess(BaseAccess): def can_add(self, data): if not data: # So the browseable API will work return Organization.accessible_objects(self.user, 'admin_role').exists() - org_pk = get_pk_from_dict(data, 'organization') - org = get_object_or_400(Organization, pk=org_pk) - if self.user in org.admin_role: - return True - return False + return self.check_related('organization', Organization, data) def can_change(self, obj, data): # Prevent moving a team to a different organization. org_pk = get_pk_from_dict(data, 'organization') if obj and org_pk and obj.organization.pk != org_pk: - raise PermissionDenied('Unable to change organization on a team.') + raise PermissionDenied(_('Unable to change organization on a team.')) if self.user.is_superuser: return True return self.user in obj.admin_role @@ -714,9 +923,9 @@ class TeamAccess(BaseAccess): of a resource role to the team.""" if isinstance(sub_obj, Role): if sub_obj.content_object is None: - raise PermissionDenied("The {} role cannot be assigned to a team".format(sub_obj.name)) + raise PermissionDenied(_("The {} role cannot be assigned to a team").format(sub_obj.name)) elif isinstance(sub_obj.content_object, User): - raise PermissionDenied("The admin_role for a User cannot be assigned to a team") + raise PermissionDenied(_("The admin_role for a User cannot be assigned to a team")) if isinstance(sub_obj.content_object, ResourceMixin): role_access = RoleAccess(self.user) @@ -734,6 +943,7 @@ class TeamAccess(BaseAccess): return super(TeamAccess, self).can_unattach(obj, sub_obj, relationship, *args, **kwargs) + class ProjectAccess(BaseAccess): ''' I can see projects when: @@ -762,12 +972,12 @@ class ProjectAccess(BaseAccess): def can_add(self, data): if not data: # So the browseable API will work return Organization.accessible_objects(self.user, 'admin_role').exists() - organization_pk = get_pk_from_dict(data, 'organization') - org = get_object_or_400(Organization, pk=organization_pk) - return self.user in org.admin_role + return self.check_related('organization', Organization, data, mandatory=True) @check_superuser def can_change(self, obj, data): + if not self.check_related('organization', Organization, data, obj=obj): + return False return self.user in obj.admin_role def can_delete(self, obj): @@ -780,14 +990,15 @@ class ProjectAccess(BaseAccess): active_jobs.extend([dict(type="project_update", id=o.id) for o in ProjectUpdate.objects.filter(project=obj, status__in=ACTIVE_STATES)]) if len(active_jobs) > 0: - raise StateConflict({"conflict": "Resource is being used by running jobs", + raise StateConflict({"conflict": _("Resource is being used by running jobs"), "active_jobs": active_jobs}) return True @check_superuser - def can_start(self, obj): + def can_start(self, obj, validate_license=True): return obj and self.user in obj.update_role + class ProjectUpdateAccess(BaseAccess): ''' I can see project updates when I can see the project. @@ -807,17 +1018,22 @@ class ProjectUpdateAccess(BaseAccess): @check_superuser def can_cancel(self, obj): - if not obj.can_cancel: - return False if self.user == obj.created_by: return True # Project updates cascade delete with project, admin role descends from org admin return self.user in obj.project.admin_role + def can_start(self, obj, validate_license=True): + # for relaunching + if obj and obj.project: + return self.user in obj.project.update_role + return False + @check_superuser def can_delete(self, obj): return obj and self.user in obj.project.admin_role + class JobTemplateAccess(BaseAccess): ''' I can see job templates when: @@ -841,10 +1057,6 @@ class JobTemplateAccess(BaseAccess): return qs.select_related('created_by', 'modified_by', 'inventory', 'project', 'credential', 'cloud_credential', 'next_schedule').all() - @check_superuser - def can_read(self, obj): - return self.user in obj.read_role - def can_add(self, data): ''' a user can create a job template if they are a superuser, an org admin @@ -854,10 +1066,12 @@ class JobTemplateAccess(BaseAccess): Users who are able to create deploy jobs can also run normal and check (dry run) jobs. ''' if not data: # So the browseable API will work - return True + return ( + Project.accessible_objects(self.user, 'use_role').exists() or + Inventory.accessible_objects(self.user, 'use_role').exists()) - # if reference_obj is provided, determine if it can be coppied - reference_obj = data.pop('reference_obj', None) + # if reference_obj is provided, determine if it can be copied + reference_obj = data.get('reference_obj', None) if 'job_type' in data and data['job_type'] == PERM_INVENTORY_SCAN: self.check_license(feature='system_tracking') @@ -879,22 +1093,16 @@ class JobTemplateAccess(BaseAccess): return None # If a credential is provided, the user should have use access to it. - credential = get_value(Credential, 'credential') - if credential: - if self.user not in credential.use_role: - return False + if not self.check_related('credential', Credential, data, role_field='use_role'): + return False # If a cloud credential is provided, the user should have use access. - cloud_credential = get_value(Credential, 'cloud_credential') - if cloud_credential: - if self.user not in cloud_credential.use_role: - return False + if not self.check_related('cloud_credential', Credential, data, role_field='use_role'): + return False # If a network credential is provided, the user should have use access. - network_credential = get_value(Credential, 'network_credential') - if network_credential: - if self.user not in network_credential.use_role: - return False + if not self.check_related('network_credential', Credential, data, role_field='use_role'): + return False # If an inventory is provided, the user should have use access. inventory = get_value(Inventory, 'inventory') @@ -919,6 +1127,9 @@ class JobTemplateAccess(BaseAccess): else: return False + def can_copy(self, obj): + return self.can_add({'reference_obj': obj}) + def can_start(self, obj, validate_license=True): # Check license. if validate_license: @@ -927,6 +1138,8 @@ class JobTemplateAccess(BaseAccess): self.check_license(feature='system_tracking') if obj.survey_enabled: self.check_license(feature='surveys') + if Instance.objects.active_count() > 1: + self.check_license(feature='ha') # Super users can start any job if self.user.is_superuser: @@ -959,7 +1172,7 @@ class JobTemplateAccess(BaseAccess): required_obj = getattr(obj, required_field, None) if required_field not in data_for_change and required_obj is not None: data_for_change[required_field] = required_obj.pk - return self.can_read(obj) and self.can_add(data_for_change) + return self.can_read(obj) and (self.can_add(data_for_change) if data is not None else True) def changes_are_non_sensitive(self, obj, data): ''' @@ -1012,10 +1225,18 @@ class JobTemplateAccess(BaseAccess): active_jobs = [dict(type="job", id=o.id) for o in obj.jobs.filter(status__in=ACTIVE_STATES)] if len(active_jobs) > 0: - raise StateConflict({"conflict": "Resource is being used by running jobs", + raise StateConflict({"conflict": _("Resource is being used by running jobs"), "active_jobs": active_jobs}) return True + @check_superuser + def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): + if isinstance(sub_obj, NotificationTemplate): + return self.check_related('organization', Organization, {}, obj=sub_obj, mandatory=True) + return super(JobTemplateAccess, self).can_attach( + obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check) + + class JobAccess(BaseAccess): ''' I can see jobs when: @@ -1031,7 +1252,7 @@ class JobAccess(BaseAccess): model = Job def get_queryset(self): - qs = self.model.objects.distinct() + qs = self.model.objects qs = qs.select_related('created_by', 'modified_by', 'job_template', 'inventory', 'project', 'credential', 'cloud_credential', 'job_template') qs = qs.prefetch_related('unified_job_template') @@ -1052,7 +1273,33 @@ class JobAccess(BaseAccess): Q(inventory__organization__in=org_access_qs) | Q(project__organization__in=org_access_qs)).distinct() - def can_add(self, data): + def related_orgs(self, obj): + orgs = [] + if obj.inventory and obj.inventory.organization: + orgs.append(obj.inventory.organization) + if obj.project and obj.project.organization and obj.project.organization not in orgs: + orgs.append(obj.project.organization) + return orgs + + def org_access(self, obj, role_types=['admin_role']): + orgs = self.related_orgs(obj) + for org in orgs: + for role_type in role_types: + role = getattr(org, role_type) + if self.user in role: + return True + return False + + @check_superuser + def can_read(self, obj): + if obj.job_template and self.user in obj.job_template.read_role: + return True + return self.org_access(obj, role_types=['auditor_role', 'admin_role']) + + def can_add(self, data, validate_license=True): + if validate_license: + self.check_license() + if not data: # So the browseable API will work return True if not self.user.is_superuser: @@ -1076,19 +1323,17 @@ class JobAccess(BaseAccess): return True def can_change(self, obj, data): - return obj.status == 'new' and self.can_read(obj) and self.can_add(data) + return (obj.status == 'new' and + self.can_read(obj) and + self.can_add(data, validate_license=False)) @check_superuser def can_delete(self, obj): - if obj.inventory is not None and self.user in obj.inventory.organization.admin_role: - return True - if (obj.project is not None and obj.project.organization is not None and - self.user in obj.project.organization.admin_role): - return True - return False + return self.org_access(obj) - def can_start(self, obj): - self.check_license() + def can_start(self, obj, validate_license=True): + if validate_license: + self.check_license() # A super user can relaunch a job if self.user.is_superuser: @@ -1130,6 +1375,7 @@ class JobAccess(BaseAccess): return True return obj.job_template is not None and self.user in obj.job_template.admin_role + class SystemJobTemplateAccess(BaseAccess): ''' I can only see/manage System Job Templates if I'm a super user @@ -1138,16 +1384,334 @@ class SystemJobTemplateAccess(BaseAccess): model = SystemJobTemplate @check_superuser - def can_start(self, obj): + def can_start(self, obj, validate_license=True): '''Only a superuser can start a job from a SystemJobTemplate''' return False + class SystemJobAccess(BaseAccess): ''' I can only see manage System Jobs if I'm a super user ''' model = SystemJob + def can_start(self, obj, validate_license=True): + return False # no relaunching of system jobs + + +class WorkflowJobTemplateNodeAccess(BaseAccess): + ''' + I can see/use a WorkflowJobTemplateNode if I have read permission + to associated Workflow Job Template + + In order to add a node, I need: + - admin access to parent WFJT + - execute access to the unified job template being used + - access to any credential or inventory provided as the prompted fields + + In order to do anything to a node, I need admin access to its WFJT + + In order to edit fields on a node, I need: + - execute access to the unified job template of the node + - access to BOTH credential and inventory post-change, if present + + In order to delete a node, I only need the admin access its WFJT + + In order to manage connections (edges) between nodes I do not need anything + beyond the standard admin access to its WFJT + ''' + model = WorkflowJobTemplateNode + + def get_queryset(self): + if self.user.is_superuser or self.user.is_system_auditor: + qs = self.model.objects.all() + else: + qs = self.model.objects.filter( + workflow_job_template__in=WorkflowJobTemplate.accessible_objects( + self.user, 'read_role')) + qs = qs.prefetch_related('success_nodes', 'failure_nodes', 'always_nodes', + 'unified_job_template') + return qs + + def can_use_prompted_resources(self, data): + return ( + self.check_related('credential', Credential, data, role_field='use_role') and + self.check_related('inventory', Inventory, data, role_field='use_role')) + + @check_superuser + def can_add(self, data): + if not data: # So the browseable API will work + return True + return ( + self.check_related('workflow_job_template', WorkflowJobTemplate, data, mandatory=True) and + self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role') and + self.can_use_prompted_resources(data)) + + def wfjt_admin(self, obj): + if not obj.workflow_job_template: + return self.user.is_superuser + else: + return self.user in obj.workflow_job_template.admin_role + + def ujt_execute(self, obj): + if not obj.unified_job_template: + return self.wfjt_admin(obj) + else: + return self.user in obj.unified_job_template.execute_role and self.wfjt_admin(obj) + + def can_change(self, obj, data): + if not data: + return True + + if not self.ujt_execute(obj): + # should not be able to edit the prompts if lacking access to UJT + return False + + if 'credential' in data or 'inventory' in data: + new_data = data + if 'credential' not in data: + new_data['credential'] = self.credential + if 'inventory' not in data: + new_data['inventory'] = self.inventory + return self.can_use_prompted_resources(new_data) + return True + + def can_delete(self, obj): + return self.wfjt_admin(obj) + + def check_same_WFJT(self, obj, sub_obj): + if type(obj) != self.model or type(sub_obj) != self.model: + raise Exception('Attaching workflow nodes only allowed for other nodes') + if obj.workflow_job_template != sub_obj.workflow_job_template: + return False + return True + + def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): + return self.wfjt_admin(obj) and self.check_same_WFJT(obj, sub_obj) + + def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): + return self.wfjt_admin(obj) and self.check_same_WFJT(obj, sub_obj) + + +class WorkflowJobNodeAccess(BaseAccess): + ''' + I can see a WorkflowJobNode if I have permission to... + the workflow job template associated with... + the workflow job associated with the node. + + Any deletion of editing of individual nodes would undermine the integrity + of the graph structure. + Deletion must happen as a cascade delete from the workflow job. + ''' + model = WorkflowJobNode + + def get_queryset(self): + if self.user.is_superuser or self.user.is_system_auditor: + qs = self.model.objects.all() + else: + qs = self.model.objects.filter( + workflow_job__workflow_job_template__in=WorkflowJobTemplate.accessible_objects( + self.user, 'read_role')) + qs = qs.select_related('unified_job_template', 'job') + qs = qs.prefetch_related('success_nodes', 'failure_nodes', 'always_nodes') + return qs + + @check_superuser + def can_add(self, data): + if data is None: # Hide direct creation in API browser + return False + return ( + self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role') and + self.check_related('credential', Credential, data, role_field='use_role') and + self.check_related('inventory', Inventory, data, role_field='use_role')) + + def can_change(self, obj, data): + return False + + def can_delete(self, obj): + return False + + +# TODO: notification attachments? +class WorkflowJobTemplateAccess(BaseAccess): + ''' + I can only see/manage Workflow Job Templates if I'm a super user + ''' + + model = WorkflowJobTemplate + + def get_queryset(self): + if self.user.is_superuser or self.user.is_system_auditor: + qs = self.model.objects.all() + else: + qs = self.model.accessible_objects(self.user, 'read_role') + return qs.select_related('created_by', 'modified_by', 'next_schedule', + 'admin_role', 'execute_role', 'read_role').all() + + @check_superuser + def can_read(self, obj): + return self.user in obj.read_role + + @check_superuser + def can_add(self, data): + ''' + a user can create a job template if they are a superuser, an org admin + of any org that the project is a member, or if they have user or team + based permissions tying the project to the inventory source for the + given action as well as the 'create' deploy permission. + Users who are able to create deploy jobs can also run normal and check (dry run) jobs. + ''' + if not data: # So the browseable API will work + return Organization.accessible_objects(self.user, 'admin_role').exists() + + # will check this if surveys are added to WFJT + if 'survey_enabled' in data and data['survey_enabled']: + self.check_license(feature='surveys') + + return self.check_related('organization', Organization, data, mandatory=True) + + def can_copy(self, obj): + if self.save_messages: + missing_ujt = [] + missing_credentials = [] + missing_inventories = [] + qs = obj.workflow_job_template_nodes + qs = qs.prefetch_related('unified_job_template', 'inventory__use_role', 'credential__use_role') + for node in qs.all(): + node_errors = {} + if node.inventory and self.user not in node.inventory.use_role: + missing_inventories.append(node.inventory.name) + if node.credential and self.user not in node.credential.use_role: + missing_credentials.append(node.credential.name) + ujt = node.unified_job_template + if ujt and not self.user.can_access(UnifiedJobTemplate, 'start', ujt, validate_license=False): + missing_ujt.append(ujt.name) + if node_errors: + wfjt_errors[node.id] = node_errors + if missing_ujt: + self.messages['templates_unable_to_copy'] = missing_ujt + if missing_credentials: + self.messages['credentials_unable_to_copy'] = missing_credentials + if missing_inventories: + self.messages['inventories_unable_to_copy'] = missing_inventories + + return self.check_related('organization', Organization, {'reference_obj': obj}, mandatory=True) + + def can_start(self, obj, validate_license=True): + if validate_license: + # check basic license, node count + self.check_license() + # if surveys are added to WFJTs, check license here + if obj.survey_enabled: + self.check_license(feature='surveys') + + # Super users can start any job + if self.user.is_superuser: + return True + + return self.user in obj.execute_role + + def can_change(self, obj, data): + # Check survey license if surveys are added to WFJTs + if (data and 'survey_enabled' in data and + obj.survey_enabled != data['survey_enabled'] and data['survey_enabled']): + self.check_license(feature='surveys') + + if self.user.is_superuser: + return True + + return self.check_related('organization', Organization, data, obj=obj) and self.user in obj.admin_role + + def can_delete(self, obj): + is_delete_allowed = self.user.is_superuser or self.user in obj.admin_role + if not is_delete_allowed: + return False + active_jobs = [dict(type="workflow_job", id=o.id) + for o in obj.workflow_jobs.filter(status__in=ACTIVE_STATES)] + if len(active_jobs) > 0: + raise StateConflict({"conflict": _("Resource is being used by running jobs"), + "active_jobs": active_jobs}) + return True + + +class WorkflowJobAccess(BaseAccess): + ''' + I can only see Workflow Jobs if I can see the associated + workflow job template that it was created from. + I can delete them if I am admin of their workflow job template + I can cancel one if I can delete it + I can also cancel it if I started it + ''' + model = WorkflowJob + + def get_queryset(self): + if self.user.is_superuser or self.user.is_system_auditor: + qs = self.model.objects.all() + else: + qs = WorkflowJob.objects.filter( + workflow_job_template__in=WorkflowJobTemplate.accessible_objects( + self.user, 'read_role')) + return qs.select_related('created_by', 'modified_by') + + def can_add(self, data): + # Old add-start system for launching jobs is being depreciated, and + # not supported for new types of resources + return False + + def can_change(self, obj, data): + return False + + @check_superuser + def can_delete(self, obj): + return (obj.workflow_job_template and + obj.workflow_job_template.organization and + self.user in obj.workflow_job_template.organization.admin_role) + + def get_method_capability(self, method, obj, parent_obj): + if method == 'start': + # Return simplistic permission, will perform detailed check on POST + if not obj.workflow_job_template: + return self.user.is_superuser + return self.user in obj.workflow_job_template.execute_role + return super(WorkflowJobAccess, self).get_method_capability(method, obj, parent_obj) + + def can_start(self, obj, validate_license=True): + if validate_license: + self.check_license() + + if self.user.is_superuser: + return True + + wfjt = obj.workflow_job_template + # only superusers can relaunch orphans + if not wfjt: + return False + + # execute permission to WFJT is mandatory for any relaunch + if self.user not in wfjt.execute_role: + return False + + # user's WFJT access doesn't guarentee permission to launch, introspect nodes + return self.can_recreate(obj) + + def can_recreate(self, obj): + node_qs = obj.workflow_job_nodes.all().prefetch_related('inventory', 'credential', 'unified_job_template') + node_access = WorkflowJobNodeAccess(user=self.user) + wj_add_perm = True + for node in node_qs: + if not node_access.can_add({'reference_obj': node}): + wj_add_perm = False + if not wj_add_perm and self.save_messages: + self.messages['workflow_job_template'] = _('You do not have permission to the workflow job ' + 'resources required for relaunch.') + return wj_add_perm + + def can_cancel(self, obj): + if not obj.can_cancel: + return False + return self.can_delete(obj) or self.user == obj.created_by + + class AdHocCommandAccess(BaseAccess): ''' I can only see/run ad hoc commands when: @@ -1166,26 +1730,21 @@ class AdHocCommandAccess(BaseAccess): inventory_qs = Inventory.accessible_objects(self.user, 'read_role') return qs.filter(inventory__in=inventory_qs) - def can_add(self, data): + def can_add(self, data, validate_license=True): if not data: # So the browseable API will work return True - self.check_license() + if validate_license: + self.check_license() # If a credential is provided, the user should have use access to it. - credential_pk = get_pk_from_dict(data, 'credential') - if credential_pk: - credential = get_object_or_400(Credential, pk=credential_pk) - if self.user not in credential.use_role: - return False + if not self.check_related('credential', Credential, data, role_field='use_role'): + return False # Check that the user has the run ad hoc command permission on the # given inventory. - inventory_pk = get_pk_from_dict(data, 'inventory') - if inventory_pk: - inventory = get_object_or_400(Inventory, pk=inventory_pk) - if self.user not in inventory.adhoc_role: - return False + if not self.check_related('inventory', Inventory, data, role_field='adhoc_role'): + return False return True @@ -1196,11 +1755,11 @@ class AdHocCommandAccess(BaseAccess): def can_delete(self, obj): return obj.inventory is not None and self.user in obj.inventory.organization.admin_role - def can_start(self, obj): + def can_start(self, obj, validate_license=True): return self.can_add({ 'credential': obj.credential_id, 'inventory': obj.inventory_id, - }) + }, validate_license=validate_license) def can_cancel(self, obj): if not obj.can_cancel: @@ -1209,6 +1768,7 @@ class AdHocCommandAccess(BaseAccess): return True return obj.inventory is not None and self.user in obj.inventory.admin_role + class AdHocCommandEventAccess(BaseAccess): ''' I can see ad hoc command event records whenever I can read both ad hoc @@ -1237,6 +1797,7 @@ class AdHocCommandEventAccess(BaseAccess): def can_delete(self, obj): return False + class JobHostSummaryAccess(BaseAccess): ''' I can see job/host summary records whenever I can read both job and host. @@ -1262,6 +1823,7 @@ class JobHostSummaryAccess(BaseAccess): def can_delete(self, obj): return False + class JobEventAccess(BaseAccess): ''' I can see job event records whenever I can read both job and host. @@ -1270,21 +1832,15 @@ class JobEventAccess(BaseAccess): model = JobEvent def get_queryset(self): - qs = self.model.objects.all() - qs = qs.select_related('job', 'job__job_template', 'host', 'parent') - qs = qs.prefetch_related('hosts', 'children') - - # Filter certain "internal" events generated by async polling. - qs = qs.exclude(event__in=('runner_on_ok', 'runner_on_failed'), - event_data__icontains='"ansible_job_id": "', - event_data__contains='"module_name": "async_status"') + qs = self.model.objects + qs = qs.prefetch_related('hosts', 'children', 'job__job_template', 'host') if self.user.is_superuser or self.user.is_system_auditor: return qs.all() - job_qs = self.user.get_queryset(Job) - host_qs = self.user.get_queryset(Host) - return qs.filter(Q(host__isnull=True) | Q(host__in=host_qs), job__in=job_qs) + return qs.filter( + Q(host__inventory__in=Inventory.accessible_pk_qs(self.user, 'read_role')) | + Q(job__job_template__in=JobTemplate.accessible_pk_qs(self.user, 'read_role'))) def can_add(self, data): return False @@ -1295,32 +1851,36 @@ class JobEventAccess(BaseAccess): def can_delete(self, obj): return False + class UnifiedJobTemplateAccess(BaseAccess): ''' I can see a unified job template whenever I can see the same project, - inventory source or job template. Unified job templates do not include - projects without SCM configured or inventory sources without a cloud - source. + inventory source, WFJT, or job template. Unified job templates do not include + inventory sources without a cloud source. ''' model = UnifiedJobTemplate def get_queryset(self): - qs = self.model.objects.all() - project_qs = self.user.get_queryset(Project).filter(scm_type__in=[s[0] for s in Project.SCM_TYPE_CHOICES]) - inventory_source_qs = self.user.get_queryset(InventorySource).filter(source__in=CLOUD_INVENTORY_SOURCES) - job_template_qs = self.user.get_queryset(JobTemplate) - system_job_template_qs = self.user.get_queryset(SystemJobTemplate) - qs = qs.filter(Q(Project___in=project_qs) | - Q(InventorySource___in=inventory_source_qs) | - Q(JobTemplate___in=job_template_qs) | - Q(systemjobtemplate__in=system_job_template_qs)) + if self.user.is_superuser or self.user.is_system_auditor: + qs = self.model.objects.all() + else: + qs = self.model.objects.filter( + Q(pk__in=self.model.accessible_pk_qs(self.user, 'read_role')) | + Q(inventorysource__inventory__id__in=Inventory._accessible_pk_qs( + Inventory, self.user, 'read_role'))) + qs = qs.exclude(inventorysource__source="") + qs = qs.select_related( 'created_by', 'modified_by', 'next_schedule', + ) + # prefetch last/current jobs so we get the real instance + qs = qs.prefetch_related( 'last_job', 'current_job', + Prefetch('labels', queryset=Label.objects.all().order_by('name')) ) # WISH - sure would be nice if the following worked, but it does not. @@ -1335,6 +1895,12 @@ class UnifiedJobTemplateAccess(BaseAccess): return qs.all() + def can_start(self, obj, validate_license=True): + access_class = access_registry.get(obj.__class__, [])[0] + access_instance = access_class(self.user) + return access_instance.can_start(obj, validate_license=validate_license) + + class UnifiedJobAccess(BaseAccess): ''' I can see a unified job whenever I can see the same project update, @@ -1344,23 +1910,25 @@ class UnifiedJobAccess(BaseAccess): model = UnifiedJob def get_queryset(self): - qs = self.model.objects.all() - project_update_qs = self.user.get_queryset(ProjectUpdate) - inventory_update_qs = self.user.get_queryset(InventoryUpdate).filter(source__in=CLOUD_INVENTORY_SOURCES) - job_qs = self.user.get_queryset(Job) - ad_hoc_command_qs = self.user.get_queryset(AdHocCommand) - system_job_qs = self.user.get_queryset(SystemJob) - qs = qs.filter(Q(ProjectUpdate___in=project_update_qs) | - Q(InventoryUpdate___in=inventory_update_qs) | - Q(Job___in=job_qs) | - Q(AdHocCommand___in=ad_hoc_command_qs) | - Q(SystemJob___in=system_job_qs)) - qs = qs.select_related( + if self.user.is_superuser or self.user.is_system_auditor: + qs = self.model.objects.all() + else: + inv_pk_qs = Inventory._accessible_pk_qs(Inventory, self.user, 'read_role') + org_auditor_qs = Organization.objects.filter( + Q(admin_role__members=self.user) | Q(auditor_role__members=self.user)) + qs = self.model.objects.filter( + Q(unified_job_template_id__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role')) | + Q(inventoryupdate__inventory_source__inventory__id__in=inv_pk_qs) | + Q(adhoccommand__inventory__id__in=inv_pk_qs) | + Q(job__inventory__organization__in=org_auditor_qs) | + Q(job__project__organization__in=org_auditor_qs) + ) + qs = qs.prefetch_related( 'created_by', 'modified_by', - ) - qs = qs.prefetch_related( + 'unified_job_node__workflow_job', 'unified_job_template', + Prefetch('labels', queryset=Label.objects.all().order_by('name')) ) # WISH - sure would be nice if the following worked, but it does not. @@ -1383,6 +1951,7 @@ class UnifiedJobAccess(BaseAccess): #) return qs.all() + class ScheduleAccess(BaseAccess): ''' I can see a schedule if I can see it's related unified job, I can create them or update them if I have write access @@ -1396,13 +1965,12 @@ class ScheduleAccess(BaseAccess): qs = qs.prefetch_related('unified_job_template') if self.user.is_superuser or self.user.is_system_auditor: return qs.all() - job_template_qs = self.user.get_queryset(JobTemplate) - inventory_source_qs = self.user.get_queryset(InventorySource) - project_qs = self.user.get_queryset(Project) - unified_qs = UnifiedJobTemplate.objects.filter(jobtemplate__in=job_template_qs) | \ - UnifiedJobTemplate.objects.filter(Q(project__in=project_qs)) | \ - UnifiedJobTemplate.objects.filter(Q(inventorysource__in=inventory_source_qs)) - return qs.filter(unified_job_template__in=unified_qs) + + unified_pk_qs = UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role') + inv_src_qs = InventorySource.objects.filter(inventory_id=Inventory._accessible_pk_qs(Inventory, self.user, 'read_role')) + return qs.filter( + Q(unified_job_template_id__in=unified_pk_qs) | + Q(unified_job_template_id__in=inv_src_qs.values_list('pk', flat=True))) @check_superuser def can_read(self, obj): @@ -1414,28 +1982,21 @@ class ScheduleAccess(BaseAccess): @check_superuser def can_add(self, data): - pk = get_pk_from_dict(data, 'unified_job_template') - obj = get_object_or_400(UnifiedJobTemplate, pk=pk) - if obj: - return self.user.can_access(type(obj), 'change', obj, None) - else: - return False + return self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role', mandatory=True) @check_superuser def can_change(self, obj, data): - if obj and obj.unified_job_template: - job_class = obj.unified_job_template - return self.user.can_access(type(job_class), 'change', job_class, None) - else: - return False + if self.check_related('unified_job_template', UnifiedJobTemplate, data, obj=obj, mandatory=True): + return True + # Users with execute role can modify the schedules they created + return ( + obj.created_by == self.user and + self.check_related('unified_job_template', UnifiedJobTemplate, data, obj=obj, role_field='execute_role', mandatory=True)) + - @check_superuser def can_delete(self, obj): - if obj and obj.unified_job_template: - job_class = obj.unified_job_template - return self.user.can_access(type(job_class), 'change', job_class, None) - else: - return False + return self.can_change(obj, {}) + class NotificationTemplateAccess(BaseAccess): ''' @@ -1464,21 +2025,14 @@ class NotificationTemplateAccess(BaseAccess): def can_add(self, data): if not data: return Organization.accessible_objects(self.user, 'admin_role').exists() - org_pk = get_pk_from_dict(data, 'organization') - org = get_object_or_400(Organization, pk=org_pk) - return self.user in org.admin_role + return self.check_related('organization', Organization, data, mandatory=True) @check_superuser def can_change(self, obj, data): if obj.organization is None: # only superusers are allowed to edit orphan notification templates return False - org_pk = get_pk_from_dict(data, 'organization') - if obj and org_pk and obj.organization.pk != org_pk: - org = get_object_or_400(Organization, pk=org_pk) - if self.user not in org.admin_role: - return False - return self.user in obj.organization.admin_role + return self.check_related('organization', Organization, data, obj=obj, mandatory=True) def can_admin(self, obj, data): return self.can_change(obj, data) @@ -1486,6 +2040,13 @@ class NotificationTemplateAccess(BaseAccess): def can_delete(self, obj): return self.can_change(obj, None) + @check_superuser + def can_start(self, obj, validate_license=True): + if obj.organization is None: + return False + return self.user in obj.organization.admin_role + + class NotificationAccess(BaseAccess): ''' I can see/use a notification if I have permission to @@ -1493,9 +2054,9 @@ class NotificationAccess(BaseAccess): model = Notification def get_queryset(self): - qs = self.model.objects.all() + qs = self.model.objects.prefetch_related('notification_template') if self.user.is_superuser or self.user.is_system_auditor: - return qs + return qs.all() return self.model.objects.filter( Q(notification_template__organization__in=self.user.admin_of_organizations) | Q(notification_template__organization__in=self.user.auditor_of_organizations) @@ -1507,6 +2068,7 @@ class NotificationAccess(BaseAccess): def can_delete(self, obj): return self.user.can_access(NotificationTemplate, 'delete', obj.notification_template) + class LabelAccess(BaseAccess): ''' I can see/use a Label if I have permission to associated organization @@ -1516,7 +2078,7 @@ class LabelAccess(BaseAccess): def get_queryset(self): if self.user.is_superuser or self.user.is_system_auditor: return self.model.objects.all() - return self.model.objects.filter( + return self.model.objects.all().filter( organization__in=Organization.accessible_objects(self.user, 'read_role') ) @@ -1528,10 +2090,7 @@ class LabelAccess(BaseAccess): def can_add(self, data): if not data: # So the browseable API will work return True - - org_pk = get_pk_from_dict(data, 'organization') - org = get_object_or_400(Organization, pk=org_pk) - return self.user in org.member_role + return self.check_related('organization', Organization, data, role_field='member_role', mandatory=True) @check_superuser def can_change(self, obj, data): @@ -1543,6 +2102,7 @@ class LabelAccess(BaseAccess): def can_delete(self, obj): return self.can_change(obj, None) + class ActivityStreamAccess(BaseAccess): ''' I can see activity stream events only when I have permission on all objects included in the event @@ -1572,11 +2132,12 @@ class ActivityStreamAccess(BaseAccess): - custom inventory scripts ''' qs = self.model.objects.all() - qs = qs.select_related('actor') qs = qs.prefetch_related('organization', 'user', 'inventory', 'host', 'group', 'inventory_source', 'inventory_update', 'credential', 'team', 'project', 'project_update', - 'permission', 'job_template', 'job', 'ad_hoc_command', - 'notification_template', 'notification', 'label', 'role') + 'job_template', 'job', 'ad_hoc_command', + 'notification_template', 'notification', 'label', 'role', 'actor', + 'schedule', 'custom_inventory_script', 'unified_job_template', + 'workflow_job_template', 'workflow_job', 'workflow_job_template_node') if self.user.is_superuser or self.user.is_system_auditor: return qs.all() @@ -1589,6 +2150,7 @@ class ActivityStreamAccess(BaseAccess): project_set = Project.accessible_objects(self.user, 'read_role') jt_set = JobTemplate.accessible_objects(self.user, 'read_role') team_set = Team.accessible_objects(self.user, 'read_role') + wfjt_set = WorkflowJobTemplate.accessible_objects(self.user, 'read_role') return qs.filter( Q(ad_hoc_command__inventory__in=inventory_set) | @@ -1606,6 +2168,9 @@ class ActivityStreamAccess(BaseAccess): Q(project_update__project__in=project_set) | Q(job_template__in=jt_set) | Q(job__job_template__in=jt_set) | + Q(workflow_job_template__in=wfjt_set) | + Q(workflow_job_template_node__workflow_job_template__in=wfjt_set) | + Q(workflow_job__workflow_job_template__in=wfjt_set) | Q(notification_template__organization__in=auditing_orgs) | Q(notification__notification_template__organization__in=auditing_orgs) | Q(label__organization__in=auditing_orgs) | @@ -1621,6 +2186,7 @@ class ActivityStreamAccess(BaseAccess): def can_delete(self, obj): return False + class CustomInventoryScriptAccess(BaseAccess): model = CustomInventoryScript @@ -1634,36 +2200,21 @@ class CustomInventoryScriptAccess(BaseAccess): def can_add(self, data): if not data: # So the browseable API will work return Organization.accessible_objects(self.user, 'admin_role').exists() - org_pk = get_pk_from_dict(data, 'organization') - org = get_object_or_400(Organization, pk=org_pk) - return self.user in org.admin_role + return self.check_related('organization', Organization, data, mandatory=True) @check_superuser def can_admin(self, obj, data=None): - return self.user in obj.admin_role + return self.check_related('organization', Organization, data, obj=obj) and self.user in obj.admin_role @check_superuser def can_change(self, obj, data): - return self.can_admin(obj) + return self.can_admin(obj, data=data) @check_superuser def can_delete(self, obj): return self.can_admin(obj) -class TowerSettingsAccess(BaseAccess): - ''' - - I can see settings when - - I am a super user - - I can edit settings when - - I am a super user - - I can clear settings when - - I am a super user - ''' - - model = TowerSettings - - class RoleAccess(BaseAccess): ''' - I can see roles when @@ -1694,8 +2245,13 @@ class RoleAccess(BaseAccess): @check_superuser def can_unattach(self, obj, sub_obj, relationship, data=None, skip_sub_obj_read_check=False): - if not skip_sub_obj_read_check and relationship in ['members', 'member_role.parents']: - if not check_user_access(self.user, sub_obj.__class__, 'read', sub_obj): + if not skip_sub_obj_read_check and relationship in ['members', 'member_role.parents', 'parents']: + # If we are unattaching a team Role, check the Team read access + if relationship == 'parents': + sub_obj_resource = sub_obj.content_object + else: + sub_obj_resource = sub_obj + if not check_user_access(self.user, sub_obj_resource.__class__, 'read', sub_obj_resource): return False if isinstance(obj.content_object, ResourceMixin) and \ @@ -1735,8 +2291,11 @@ register_access(UnifiedJobTemplate, UnifiedJobTemplateAccess) register_access(UnifiedJob, UnifiedJobAccess) register_access(ActivityStream, ActivityStreamAccess) register_access(CustomInventoryScript, CustomInventoryScriptAccess) -register_access(TowerSettings, TowerSettingsAccess) register_access(Role, RoleAccess) register_access(NotificationTemplate, NotificationTemplateAccess) register_access(Notification, NotificationAccess) register_access(Label, LabelAccess) +register_access(WorkflowJobTemplateNode, WorkflowJobTemplateNodeAccess) +register_access(WorkflowJobNode, WorkflowJobNodeAccess) +register_access(WorkflowJobTemplate, WorkflowJobTemplateAccess) +register_access(WorkflowJob, WorkflowJobAccess) diff --git a/awx/main/apps.py b/awx/main/apps.py new file mode 100644 index 0000000000..f1ebe624d2 --- /dev/null +++ b/awx/main/apps.py @@ -0,0 +1,9 @@ +# Django +from django.apps import AppConfig +from django.utils.translation import ugettext_lazy as _ + + +class MainConfig(AppConfig): + + name = 'awx.main' + verbose_name = _('Main') diff --git a/awx/main/conf.py b/awx/main/conf.py index e506432f21..bbdf159ae6 100644 --- a/awx/main/conf.py +++ b/awx/main/conf.py @@ -1,50 +1,326 @@ -# Copyright (c) 2015 Ansible, Inc.. -# All Rights Reserved. - +# Python +import json import logging +import os -from django.conf import settings as django_settings -from django.db.utils import ProgrammingError -from django.db import OperationalError -from awx.main.models.configuration import TowerSettings +# Django +from django.utils.translation import ugettext_lazy as _ + +# Tower +from awx.conf import fields, register logger = logging.getLogger('awx.main.conf') -class TowerConfiguration(object): +register( + 'ACTIVITY_STREAM_ENABLED', + field_class=fields.BooleanField, + label=_('Enable Activity Stream'), + help_text=_('Enable capturing activity for the Tower activity stream.'), + category=_('System'), + category_slug='system', + feature_required='activity_streams', +) - # TODO: Caching so we don't have to hit the database every time for settings - def __getattr__(self, key): - settings_manifest = django_settings.TOWER_SETTINGS_MANIFEST - if key not in settings_manifest: - raise AttributeError("Tower Setting with key '{0}' is not defined in the manifest".format(key)) - default_value = settings_manifest[key]['default'] - ts = TowerSettings.objects.filter(key=key) - try: - if not ts.exists(): - try: - val_actual = getattr(django_settings, key) - except AttributeError: - val_actual = default_value - return val_actual - return ts[0].value_converted - except (ProgrammingError, OperationalError), e: - # Database is not available yet, usually during migrations so lets use the default - logger.debug("Database settings not available yet, using defaults ({0})".format(e)) - return default_value +register( + 'ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC', + field_class=fields.BooleanField, + label=_('Enable Activity Stream for Inventory Sync'), + help_text=_('Enable capturing activity for the Tower activity stream when running inventory sync.'), + category=_('System'), + category_slug='system', + feature_required='activity_streams', +) - def __setattr__(self, key, value): - settings_manifest = django_settings.TOWER_SETTINGS_MANIFEST - if key not in settings_manifest: - raise AttributeError("Tower Setting with key '{0}' does not exist".format(key)) - settings_entry = settings_manifest[key] - try: - settings_actual = TowerSettings.objects.get(key=key) - except TowerSettings.DoesNotExist: - settings_actual = TowerSettings(key=key, - description=settings_entry['description'], - category=settings_entry['category'], - value_type=settings_entry['type']) - settings_actual.value_converted = value - settings_actual.save() +register( + 'ORG_ADMINS_CAN_SEE_ALL_USERS', + field_class=fields.BooleanField, + label=_('All Users Visible to Organization Admins'), + help_text=_('Controls whether any Organization Admin can view all users, even those not associated with their Organization.'), + category=_('System'), + category_slug='system', +) -tower_settings = TowerConfiguration() +register( + 'TOWER_ADMIN_ALERTS', + field_class=fields.BooleanField, + label=_('Enable Tower Administrator Alerts'), + help_text=_('Allow Tower to email Admin users for system events that may require attention.'), + category=_('System'), + category_slug='system', +) + +register( + 'TOWER_URL_BASE', + field_class=fields.URLField, + schemes=('http', 'https'), + allow_plain_hostname=True, # Allow hostname only without TLD. + label=_('Base URL of the Tower host'), + help_text=_('This setting is used by services like notifications to render ' + 'a valid url to the Tower host.'), + category=_('System'), + category_slug='system', +) + +register( + 'REMOTE_HOST_HEADERS', + field_class=fields.StringListField, + label=_('Remote Host Headers'), + help_text=_('HTTP headers and meta keys to search to determine remote host ' + 'name or IP. Add additional items to this list, such as ' + '"HTTP_X_FORWARDED_FOR", if behind a reverse proxy.\n\n' + 'Note: The headers will be searched in order and the first ' + 'found remote host name or IP will be used.\n\n' + 'In the below example 8.8.8.7 would be the chosen IP address.\n' + 'X-Forwarded-For: 8.8.8.7, 192.168.2.1, 127.0.0.1\n' + 'Host: 127.0.0.1\n' + 'REMOTE_HOST_HEADERS = [\'HTTP_X_FORWARDED_FOR\', ' + '\'REMOTE_ADDR\', \'REMOTE_HOST\']'), + category=_('System'), + category_slug='system', +) + + +def _load_default_license_from_file(): + try: + license_file = os.environ.get('AWX_LICENSE_FILE', '/etc/tower/license') + if os.path.exists(license_file): + license_data = json.load(open(license_file)) + logger.debug('Read license data from "%s".', license_file) + return license_data + except: + logger.warning('Could not read license from "%s".', license_file, exc_info=True) + return {} + + +register( + 'LICENSE', + field_class=fields.DictField, + default=_load_default_license_from_file, + label=_('Tower License'), + help_text=_('The license controls which features and functionality are ' + 'enabled in Tower. Use /api/v1/config/ to update or change ' + 'the license.'), + category=_('System'), + category_slug='system', +) + +register( + 'AD_HOC_COMMANDS', + field_class=fields.StringListField, + label=_('Ansible Modules Allowed for Ad Hoc Jobs'), + help_text=_('List of modules allowed to be used by ad-hoc jobs.'), + category=_('Jobs'), + category_slug='jobs', + required=False, +) + +register( + 'AWX_PROOT_ENABLED', + field_class=fields.BooleanField, + label=_('Enable job isolation'), + help_text=_('Isolates an Ansible job from protected parts of the Tower system to prevent exposing sensitive information.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'AWX_PROOT_BASE_PATH', + field_class=fields.CharField, + label=_('Job isolation execution path'), + help_text=_('Create temporary working directories for isolated jobs in this location.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'AWX_PROOT_HIDE_PATHS', + field_class=fields.StringListField, + required=False, + label=_('Paths to hide from isolated jobs'), + help_text=_('Additional paths to hide from isolated processes.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'AWX_PROOT_SHOW_PATHS', + field_class=fields.StringListField, + required=False, + label=_('Paths to expose to isolated jobs'), + help_text=_('Whitelist of paths that would otherwise be hidden to expose to isolated jobs.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'STDOUT_MAX_BYTES_DISPLAY', + field_class=fields.IntegerField, + min_value=0, + label=_('Standard Output Maximum Display Size'), + help_text=_('Maximum Size of Standard Output in bytes to display before requiring the output be downloaded.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'EVENT_STDOUT_MAX_BYTES_DISPLAY', + field_class=fields.IntegerField, + min_value=0, + label=_('Job Event Standard Output Maximum Display Size'), + help_text=_(u'Maximum Size of Standard Output in bytes to display for a single job or ad hoc command event. `stdout` will end with `\u2026` when truncated.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'SCHEDULE_MAX_JOBS', + field_class=fields.IntegerField, + min_value=1, + label=_('Maximum Scheduled Jobs'), + help_text=_('Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'AWX_ANSIBLE_CALLBACK_PLUGINS', + field_class=fields.StringListField, + required=False, + label=_('Ansible Callback Plugins'), + help_text=_('List of paths to search for extra callback plugins to be used when running jobs.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'DEFAULT_JOB_TIMEOUT', + field_class=fields.IntegerField, + min_value=0, + default=0, + label=_('Default Job Timeout'), + help_text=_('Maximum time to allow jobs to run. Use value of 0 to indicate that no ' + 'timeout should be imposed. A timeout set on an individual job template will override this.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'DEFAULT_INVENTORY_UPDATE_TIMEOUT', + field_class=fields.IntegerField, + min_value=0, + default=0, + label=_('Default Inventory Update Timeout'), + help_text=_('Maximum time to allow inventory updates to run. Use value of 0 to indicate that no ' + 'timeout should be imposed. A timeout set on an individual inventory source will override this.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'DEFAULT_PROJECT_UPDATE_TIMEOUT', + field_class=fields.IntegerField, + min_value=0, + default=0, + label=_('Default Project Update Timeout'), + help_text=_('Maximum time to allow project updates to run. Use value of 0 to indicate that no ' + 'timeout should be imposed. A timeout set on an individual project will override this.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'LOG_AGGREGATOR_HOST', + field_class=fields.CharField, + allow_null=True, + label=_('Logging Aggregator'), + help_text=_('Hostname/IP where external logs will be sent to.'), + category=_('Logging'), + category_slug='logging', +) +register( + 'LOG_AGGREGATOR_PORT', + field_class=fields.IntegerField, + allow_null=True, + label=_('Logging Aggregator Port'), + help_text=_('Port on Logging Aggregator to send logs to (if required).'), + category=_('Logging'), + category_slug='logging', +) +register( + 'LOG_AGGREGATOR_TYPE', + field_class=fields.ChoiceField, + choices=['logstash', 'splunk', 'loggly', 'sumologic', 'other'], + allow_null=True, + label=_('Logging Aggregator Type'), + help_text=_('Format messages for the chosen log aggregator.'), + category=_('Logging'), + category_slug='logging', +) +register( + 'LOG_AGGREGATOR_USERNAME', + field_class=fields.CharField, + allow_blank=True, + default='', + label=_('Logging Aggregator Username'), + help_text=_('Username for external log aggregator (if required).'), + category=_('Logging'), + category_slug='logging', + required=False, +) +register( + 'LOG_AGGREGATOR_PASSWORD', + field_class=fields.CharField, + allow_blank=True, + default='', + encrypted=True, + label=_('Logging Aggregator Password/Token'), + help_text=_('Password or authentication token for external log aggregator (if required).'), + category=_('Logging'), + category_slug='logging', + required=False, +) +register( + 'LOG_AGGREGATOR_LOGGERS', + field_class=fields.StringListField, + default=['awx', 'activity_stream', 'job_events', 'system_tracking'], + label=_('Loggers to send data to the log aggregator from'), + help_text=_('List of loggers that will send HTTP logs to the collector, these can ' + 'include any or all of: \n' + 'awx - Tower service logs\n' + 'activity_stream - activity stream records\n' + 'job_events - callback data from Ansible job events\n' + 'system_tracking - facts gathered from scan jobs.'), + category=_('Logging'), + category_slug='logging', +) +register( + 'LOG_AGGREGATOR_INDIVIDUAL_FACTS', + field_class=fields.BooleanField, + default=False, + label=_('Log System Tracking Facts Individually'), + help_text=_('If set, system tracking facts will be sent for each package, service, or' + 'other item found in a scan, allowing for greater search query granularity. ' + 'If unset, facts will be sent as a single dictionary, allowing for greater ' + 'efficiency in fact processing.'), + category=_('Logging'), + category_slug='logging', +) +register( + 'LOG_AGGREGATOR_ENABLED', + field_class=fields.BooleanField, + default=False, + label=_('Enable External Logging'), + help_text=_('Enable sending logs to external log aggregator.'), + category=_('Logging'), + category_slug='logging', +) +register( + 'LOG_AGGREGATOR_TOWER_UUID', + field_class=fields.CharField, + allow_blank=True, + label=_('Cluster-wide Tower unique identifier.'), + help_text=_('Useful to uniquely identify Tower instances.'), + category=_('Logging'), + category_slug='logging', + default=None, +) diff --git a/awx/main/consumers.py b/awx/main/consumers.py new file mode 100644 index 0000000000..c42f16ef21 --- /dev/null +++ b/awx/main/consumers.py @@ -0,0 +1,89 @@ +import json +import logging +import urllib + +from channels import Group +from channels.sessions import channel_session +from channels.handler import AsgiRequest + +from django.core.serializers.json import DjangoJSONEncoder + +from django.contrib.auth.models import User +from awx.main.models.organization import AuthToken + + +logger = logging.getLogger('awx.main.consumers') + + +def discard_groups(message): + if 'groups' in message.channel_session: + for group in message.channel_session['groups']: + Group(group).discard(message.reply_channel) + + +@channel_session +def ws_connect(message): + connect_text = {'accept':False, 'user':None} + + message.content['method'] = 'FAKE' + request = AsgiRequest(message) + token = request.COOKIES.get('token', None) + if token is not None: + token = urllib.unquote(token).strip('"') + try: + auth_token = AuthToken.objects.get(key=token) + if auth_token.in_valid_tokens: + message.channel_session['user_id'] = auth_token.user_id + connect_text['accept'] = True + connect_text['user'] = auth_token.user_id + except AuthToken.DoesNotExist: + logger.error("auth_token provided was invalid.") + message.reply_channel.send({"text": json.dumps(connect_text)}) + + +@channel_session +def ws_disconnect(message): + discard_groups(message) + + +@channel_session +def ws_receive(message): + from awx.main.access import consumer_access + + user_id = message.channel_session.get('user_id', None) + if user_id is None: + logger.error("No valid user found for websocket.") + message.reply_channel.send({"text": json.dumps({"error": "no valid user"})}) + return None + + user = User.objects.get(pk=user_id) + raw_data = message.content['text'] + data = json.loads(raw_data) + + if 'groups' in data: + discard_groups(message) + groups = data['groups'] + current_groups = set(message.channel_session.pop('groups') if 'groups' in message.channel_session else []) + for group_name,v in groups.items(): + if type(v) is list: + for oid in v: + name = '{}-{}'.format(group_name, oid) + access_cls = consumer_access(group_name) + if access_cls is not None: + user_access = access_cls(user) + if not user_access.get_queryset().filter(pk=oid).exists(): + message.reply_channel.send({"text": json.dumps({"error": "access denied to channel {0} for resource id {1}".format(group_name, oid)})}) + continue + current_groups.add(name) + Group(name).add(message.reply_channel) + else: + current_groups.add(group_name) + Group(group_name).add(message.reply_channel) + message.channel_session['groups'] = list(current_groups) + + +def emit_channel_notification(group, payload): + try: + Group(group).send({"text": json.dumps(payload, cls=DjangoJSONEncoder)}) + except ValueError: + logger.error("Invalid payload emitting channel {} on topic: {}".format(group, payload)) diff --git a/awx/main/fields.py b/awx/main/fields.py index e95dbc1ee7..fe7e86b96f 100644 --- a/awx/main/fields.py +++ b/awx/main/fields.py @@ -19,17 +19,31 @@ from django.db.models.fields.related import ( ) from django.utils.encoding import smart_text +# Django-JSONField +from jsonfield import JSONField as upstream_JSONField + # AWX from awx.main.models.rbac import batch_role_ancestor_rebuilding, Role from awx.main.utils import get_current_apps -__all__ = ['AutoOneToOneField', 'ImplicitRoleField'] +__all__ = ['AutoOneToOneField', 'ImplicitRoleField', 'JSONField'] +class JSONField(upstream_JSONField): + + def db_type(self, connection): + return 'text' + + def from_db_value(self, value, expression, connection, context): + if value in {'', None} and not self.null: + return {} + return super(JSONField, self).from_db_value(value, expression, connection, context) + # Based on AutoOneToOneField from django-annoying: # https://bitbucket.org/offline/django-annoying/src/a0de8b294db3/annoying/fields.py + class AutoSingleRelatedObjectDescriptor(SingleRelatedObjectDescriptor): """Descriptor for access to the object from its related class.""" @@ -46,6 +60,7 @@ class AutoSingleRelatedObjectDescriptor(SingleRelatedObjectDescriptor): obj.save() return obj + class AutoOneToOneField(models.OneToOneField): """OneToOneField that creates related object if it doesn't exist.""" diff --git a/awx/main/ha.py b/awx/main/ha.py index 5341ea32bb..35ed6f64f0 100644 --- a/awx/main/ha.py +++ b/awx/main/ha.py @@ -4,6 +4,7 @@ # AWX from awx.main.models import Instance + def is_ha_environment(): """Return True if this is an HA environment, and False otherwise. diff --git a/awx/main/management/commands/_base_instance.py b/awx/main/management/commands/_base_instance.py deleted file mode 100644 index c92fa3b640..0000000000 --- a/awx/main/management/commands/_base_instance.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -from optparse import make_option - -from django.core.management.base import BaseCommand, CommandError -from django.conf import settings - -from awx.main.models import Project - - -class OptionEnforceError(Exception): - def __init__(self, value): - self.value = value - - def __str__(self): - return repr(self.value) - -class BaseCommandInstance(BaseCommand): - #option_list = BaseCommand.option_list - - def __init__(self): - super(BaseCommandInstance, self).__init__() - self.enforce_primary_role = False - self.enforce_roles = False - self.enforce_hostname_set = False - self.enforce_unique_find = False - - self.option_primary = False - self.option_secondary = False - self.option_hostname = None - self.option_uuid = None - - self.UUID = settings.SYSTEM_UUID - self.unique_fields = {} - - @staticmethod - def generate_option_hostname(): - return make_option('--hostname', - dest='hostname', - default='', - help='Find instance by specified hostname.') - - @staticmethod - def generate_option_hostname_set(): - return make_option('--hostname', - dest='hostname', - default='', - help='Hostname to assign to the new instance.') - - @staticmethod - def generate_option_primary(): - return make_option('--primary', - action='store_true', - default=False, - dest='primary', - help='Register instance as primary.') - - @staticmethod - def generate_option_secondary(): - return make_option('--secondary', - action='store_true', - default=False, - dest='secondary', - help='Register instance as secondary.') - - @staticmethod - def generate_option_uuid(): - return make_option('--uuid', - dest='uuid', - default='', - help='Find instance by specified uuid.') - - def include_option_primary_role(self): - BaseCommand.option_list += ( BaseCommandInstance.generate_option_primary(), ) - self.enforce_primary_role = True - - def include_options_roles(self): - self.include_option_primary_role() - BaseCommand.option_list += ( BaseCommandInstance.generate_option_secondary(), ) - self.enforce_roles = True - - def include_option_hostname_set(self): - BaseCommand.option_list += ( BaseCommandInstance.generate_option_hostname_set(), ) - self.enforce_hostname_set = True - - def include_option_hostname_uuid_find(self): - BaseCommand.option_list += ( BaseCommandInstance.generate_option_hostname(), BaseCommandInstance.generate_option_uuid(), ) - self.enforce_unique_find = True - - def get_option_hostname(self): - return self.option_hostname - - def get_option_uuid(self): - return self.option_uuid - - def is_option_primary(self): - return self.option_primary - - def is_option_secondary(self): - return self.option_secondary - - def get_UUID(self): - return self.UUID - - # for the enforce_unique_find policy - def get_unique_fields(self): - return self.unique_fields - - @property - def usage_error(self): - if self.enforce_roles and self.enforce_hostname_set: - return CommandError('--hostname and one of --primary or --secondary is required.') - elif self.enforce_hostname_set: - return CommandError('--hostname is required.') - elif self.enforce_primary_role: - return CommandError('--primary is required.') - elif self.enforce_roles: - return CommandError('One of --primary or --secondary is required.') - - def handle(self, *args, **options): - if self.enforce_hostname_set and self.enforce_unique_find: - raise OptionEnforceError('Can not enforce --hostname as a setter and --hostname as a getter') - - if self.enforce_roles: - self.option_primary = options['primary'] - self.option_secondary = options['secondary'] - - if self.is_option_primary() and self.is_option_secondary() or not (self.is_option_primary() or self.is_option_secondary()): - raise self.usage_error - elif self.enforce_primary_role: - if options['primary']: - self.option_primary = options['primary'] - else: - raise self.usage_error - - if self.enforce_hostname_set: - if options['hostname']: - self.option_hostname = options['hostname'] - else: - raise self.usage_error - - if self.enforce_unique_find: - if options['hostname']: - self.unique_fields['hostname'] = self.option_hostname = options['hostname'] - - if options['uuid']: - self.unique_fields['uuid'] = self.option_uuid = options['uuid'] - - if len(self.unique_fields) == 0: - self.unique_fields['uuid'] = self.get_UUID() - - @staticmethod - def __instance_str(instance, fields): - string = '(' - for field in fields: - string += '%s="%s",' % (field, getattr(instance, field)) - if len(fields) > 0: - string = string[:-1] - string += ')' - return string - - @staticmethod - def instance_str(instance): - return BaseCommandInstance.__instance_str(instance, ('uuid', 'hostname', 'role')) - - def update_projects(self, instance): - """Update all projects, ensuring the job runs against this instance, - which is the primary instance. - """ - for project in Project.objects.all(): - project.update() diff --git a/awx/main/management/commands/cleanup_activitystream.py b/awx/main/management/commands/cleanup_activitystream.py index f4803d2d84..cd3711790a 100644 --- a/awx/main/management/commands/cleanup_activitystream.py +++ b/awx/main/management/commands/cleanup_activitystream.py @@ -13,6 +13,7 @@ from django.utils.timezone import now # AWX from awx.main.models import ActivityStream + class Command(NoArgsCommand): ''' Management command to purge old activity stream events. diff --git a/awx/main/management/commands/cleanup_authtokens.py b/awx/main/management/commands/cleanup_authtokens.py index 65a8d67e6b..113fa52b2f 100644 --- a/awx/main/management/commands/cleanup_authtokens.py +++ b/awx/main/management/commands/cleanup_authtokens.py @@ -12,6 +12,7 @@ from django.utils.timezone import now # AWX from awx.main.models import * # noqa + class Command(BaseCommand): ''' Management command to cleanup expired auth tokens diff --git a/awx/main/management/commands/cleanup_facts.py b/awx/main/management/commands/cleanup_facts.py index 578bee3441..f6b3c76b26 100644 --- a/awx/main/management/commands/cleanup_facts.py +++ b/awx/main/management/commands/cleanup_facts.py @@ -13,11 +13,12 @@ from django.utils.timezone import now # AWX from awx.main.models.fact import Fact -from awx.api.license import feature_enabled +from awx.conf.license import feature_enabled OLDER_THAN = 'older_than' GRANULARITY = 'granularity' + class CleanupFacts(object): def __init__(self): self.timestamp = None @@ -27,7 +28,7 @@ class CleanupFacts(object): # Find all factVersion < pivot && > (pivot - granularity) grouped by host sorted by time descending (because it's indexed this way) # foreach group # Delete all except LAST entry (or Delete all except the FIRST entry, it's an arbitrary decision) - # + # # pivot -= granularity # group by host def cleanup(self, older_than_abs, granularity, module=None): @@ -89,17 +90,18 @@ class CleanupFacts(object): deleted_count = self.cleanup(t - older_than, granularity, module=module) print("Deleted %d facts." % deleted_count) + class Command(BaseCommand): help = 'Cleanup facts. For each host older than the value specified, keep one fact scan for each time window (granularity).' option_list = BaseCommand.option_list + ( make_option('--older_than', dest='older_than', - default=None, - help='Specify the relative time to consider facts older than (w)eek (d)ay or (y)ear (i.e. 5d, 2w, 1y).'), + default='30d', + help='Specify the relative time to consider facts older than (w)eek (d)ay or (y)ear (i.e. 5d, 2w, 1y). Defaults to 30d.'), make_option('--granularity', dest='granularity', - default=None, - help='Window duration to group same hosts by for deletion (w)eek (d)ay or (y)ear (i.e. 5d, 2w, 1y).'), + default='1w', + help='Window duration to group same hosts by for deletion (w)eek (d)ay or (y)ear (i.e. 5d, 2w, 1y). Defaults to 1w.'), make_option('--module', dest='module', default=None, @@ -142,4 +144,3 @@ class Command(BaseCommand): raise CommandError('--granularity invalid value "%s"' % options[GRANULARITY]) cleanup_facts.run(older_than, granularity, module=options['module']) - diff --git a/awx/main/management/commands/cleanup_jobs.py b/awx/main/management/commands/cleanup_jobs.py index 777d21204f..cb03e4e9d6 100644 --- a/awx/main/management/commands/cleanup_jobs.py +++ b/awx/main/management/commands/cleanup_jobs.py @@ -12,7 +12,18 @@ from django.db import transaction from django.utils.timezone import now # AWX -from awx.main.models import Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob +from awx.main.models import ( + Job, AdHocCommand, ProjectUpdate, InventoryUpdate, + SystemJob, WorkflowJob, Notification +) +from awx.main.signals import ( # noqa + emit_update_inventory_on_created_or_deleted, + emit_update_inventory_computed_fields, + disable_activity_stream, + disable_computed_fields +) +from django.db.models.signals import post_save, post_delete, m2m_changed # noqa + class Command(NoArgsCommand): ''' @@ -29,107 +40,140 @@ class Command(NoArgsCommand): 'be removed)'), make_option('--jobs', dest='only_jobs', action='store_true', default=False, - help='Only remove jobs'), + help='Remove jobs'), make_option('--ad-hoc-commands', dest='only_ad_hoc_commands', action='store_true', default=False, - help='Only remove ad hoc commands'), + help='Remove ad hoc commands'), make_option('--project-updates', dest='only_project_updates', action='store_true', default=False, - help='Only remove project updates'), + help='Remove project updates'), make_option('--inventory-updates', dest='only_inventory_updates', action='store_true', default=False, - help='Only remove inventory updates'), + help='Remove inventory updates'), make_option('--management-jobs', default=False, action='store_true', dest='only_management_jobs', - help='Only remove management jobs') + help='Remove management jobs'), + make_option('--notifications', dest='only_notifications', + action='store_true', default=False, + help='Remove notifications'), + make_option('--workflow-jobs', default=False, + action='store_true', dest='only_workflow_jobs', + help='Remove workflow jobs') ) def cleanup_jobs(self): #jobs_qs = Job.objects.exclude(status__in=('pending', 'running')) #jobs_qs = jobs_qs.filter(created__lte=self.cutoff) + skipped, deleted = 0, 0 for job in Job.objects.all(): - job_display = '"%s" (started %s, %d host summaries, %d events)' % \ - (unicode(job), unicode(job.created), + job_display = '"%s" (%d host summaries, %d events)' % \ + (unicode(job), job.job_host_summaries.count(), job.job_events.count()) if job.status in ('pending', 'waiting', 'running'): action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s job %s', action_text, job.status, job_display) + skipped += 1 elif job.created >= self.cutoff: action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s', action_text, job_display) + skipped += 1 else: action_text = 'would delete' if self.dry_run else 'deleting' self.logger.info('%s %s', action_text, job_display) if not self.dry_run: job.delete() + deleted += 1 + return skipped, deleted def cleanup_ad_hoc_commands(self): + skipped, deleted = 0, 0 for ad_hoc_command in AdHocCommand.objects.all(): - ad_hoc_command_display = '"%s" (started %s, %d events)' % \ - (unicode(ad_hoc_command), unicode(ad_hoc_command.created), + ad_hoc_command_display = '"%s" (%d events)' % \ + (unicode(ad_hoc_command), ad_hoc_command.ad_hoc_command_events.count()) if ad_hoc_command.status in ('pending', 'waiting', 'running'): action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s ad hoc command %s', action_text, ad_hoc_command.status, ad_hoc_command_display) + skipped += 1 elif ad_hoc_command.created >= self.cutoff: action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s', action_text, ad_hoc_command_display) + skipped += 1 else: action_text = 'would delete' if self.dry_run else 'deleting' self.logger.info('%s %s', action_text, ad_hoc_command_display) if not self.dry_run: ad_hoc_command.delete() + deleted += 1 + return skipped, deleted def cleanup_project_updates(self): + skipped, deleted = 0, 0 for pu in ProjectUpdate.objects.all(): - pu_display = '"%s" (started %s)' % (unicode(pu), unicode(pu.created)) + pu_display = '"%s" (type %s)' % (unicode(pu), unicode(pu.launch_type)) if pu.status in ('pending', 'waiting', 'running'): action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s project update %s', action_text, pu.status, pu_display) - if pu in (pu.project.current_update, pu.project.last_update) and pu.project.scm_type: + skipped += 1 + elif pu in (pu.project.current_update, pu.project.last_update) and pu.project.scm_type: action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s', action_text, pu_display) + skipped += 1 elif pu.created >= self.cutoff: action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s', action_text, pu_display) + skipped += 1 else: action_text = 'would delete' if self.dry_run else 'deleting' self.logger.info('%s %s', action_text, pu_display) if not self.dry_run: pu.delete() + deleted += 1 + return skipped, deleted def cleanup_inventory_updates(self): + skipped, deleted = 0, 0 for iu in InventoryUpdate.objects.all(): - iu_display = '"%s" (started %s)' % (unicode(iu), unicode(iu.created)) + iu_display = '"%s" (source %s)' % (unicode(iu), unicode(iu.source)) if iu.status in ('pending', 'waiting', 'running'): action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s inventory update %s', action_text, iu.status, iu_display) - if iu in (iu.inventory_source.current_update, iu.inventory_source.last_update) and iu.inventory_source.source: + skipped += 1 + elif iu in (iu.inventory_source.current_update, iu.inventory_source.last_update) and iu.inventory_source.source: action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s', action_text, iu_display) + skipped += 1 elif iu.created >= self.cutoff: action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s', action_text, iu_display) + skipped += 1 else: action_text = 'would delete' if self.dry_run else 'deleting' self.logger.info('%s %s', action_text, iu_display) if not self.dry_run: iu.delete() + deleted += 1 + return skipped, deleted def cleanup_management_jobs(self): + skipped, deleted = 0, 0 for sj in SystemJob.objects.all(): - sj_display = '"%s" (started %s)' % (unicode(sj), unicode(sj.created)) + sj_display = '"%s" (type %s)' % (unicode(sj), unicode(sj.job_type)) if sj.status in ('pending', 'waiting', 'running'): action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s system_job %s', action_text, sj.status, sj_display) + skipped += 1 elif sj.created >= self.cutoff: action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s', action_text, sj_display) + skipped += 1 else: action_text = 'would delete' if self.dry_run else 'deleting' self.logger.info('%s %s', action_text, sj_display) if not self.dry_run: sj.delete() + deleted += 1 + return skipped, deleted def init_logging(self): log_levels = dict(enumerate([logging.ERROR, logging.INFO, @@ -141,6 +185,50 @@ class Command(NoArgsCommand): self.logger.addHandler(handler) self.logger.propagate = False + def cleanup_workflow_jobs(self): + skipped, deleted = 0, 0 + for workflow_job in WorkflowJob.objects.all(): + workflow_job_display = '"{}" ({} nodes)'.format( + unicode(workflow_job), + workflow_job.workflow_nodes.count()) + if workflow_job.status in ('pending', 'waiting', 'running'): + action_text = 'would skip' if self.dry_run else 'skipping' + self.logger.debug('%s %s job %s', action_text, workflow_job.status, workflow_job_display) + skipped += 1 + elif workflow_job.created >= self.cutoff: + action_text = 'would skip' if self.dry_run else 'skipping' + self.logger.debug('%s %s', action_text, workflow_job_display) + skipped += 1 + else: + action_text = 'would delete' if self.dry_run else 'deleting' + self.logger.info('%s %s', action_text, workflow_job_display) + if not self.dry_run: + workflow_job.delete() + deleted += 1 + return skipped, deleted + + def cleanup_notifications(self): + skipped, deleted = 0, 0 + for notification in Notification.objects.all(): + notification_display = '"{}" (started {}, {} type, {} sent)'.format( + unicode(notification), unicode(notification.created), + notification.notification_type, notification.notifications_sent) + if notification.status in ('pending',): + action_text = 'would skip' if self.dry_run else 'skipping' + self.logger.debug('%s %s notification %s', action_text, notification.status, notification_display) + skipped += 1 + elif notification.created >= self.cutoff: + action_text = 'would skip' if self.dry_run else 'skipping' + self.logger.debug('%s %s', action_text, notification_display) + skipped += 1 + else: + action_text = 'would delete' if self.dry_run else 'deleting' + self.logger.info('%s %s', action_text, notification_display) + if not self.dry_run: + notification.delete() + deleted += 1 + return skipped, deleted + @transaction.atomic def handle_noargs(self, **options): self.verbosity = int(options.get('verbosity', 1)) @@ -151,13 +239,19 @@ class Command(NoArgsCommand): self.cutoff = now() - datetime.timedelta(days=self.days) except OverflowError: raise CommandError('--days specified is too large. Try something less than 99999 (about 270 years).') - model_names = ('jobs', 'ad_hoc_commands', 'project_updates', 'inventory_updates', 'management_jobs') + model_names = ('jobs', 'ad_hoc_commands', 'project_updates', 'inventory_updates', + 'management_jobs', 'workflow_jobs', 'notifications') models_to_cleanup = set() for m in model_names: if options.get('only_%s' % m, False): models_to_cleanup.add(m) if not models_to_cleanup: models_to_cleanup.update(model_names) - for m in model_names: - if m in models_to_cleanup: - getattr(self, 'cleanup_%s' % m)() + with disable_activity_stream(), disable_computed_fields(): + for m in model_names: + if m in models_to_cleanup: + skipped, deleted = getattr(self, 'cleanup_%s' % m)() + if self.dry_run: + self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped) + else: + self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped) diff --git a/awx/main/management/commands/create_preload_data.py b/awx/main/management/commands/create_preload_data.py index a6b1e41f0d..caeba2c0a3 100644 --- a/awx/main/management/commands/create_preload_data.py +++ b/awx/main/management/commands/create_preload_data.py @@ -47,3 +47,4 @@ class Command(BaseCommand): inventory=i, credential=c) print('Default organization added.') + print('Demo Credential, Inventory, and Job Template added.') diff --git a/awx/main/management/commands/deprovision_node.py b/awx/main/management/commands/deprovision_node.py new file mode 100644 index 0000000000..8412b5bd86 --- /dev/null +++ b/awx/main/management/commands/deprovision_node.py @@ -0,0 +1,33 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved + +import subprocess +from django.core.management.base import BaseCommand, CommandError +from optparse import make_option +from awx.main.models import Instance + + +class Command(BaseCommand): + """ + Deprovision a Tower cluster node + """ + + option_list = BaseCommand.option_list + ( + make_option('--name', dest='name', type='string', + help='Hostname used during provisioning'), + ) + + def handle(self, *args, **options): + if not options.get('name'): + raise CommandError("--name is a required argument") + instance = Instance.objects.filter(hostname=options.get('name')) + if instance.exists(): + instance.delete() + result = subprocess.Popen("rabbitmqctl forget_cluster_node rabbitmq@{}".format(options.get('name')), shell=True).wait() + if result != 0: + print("Node deprovisioning may have failed when attempting to remove the RabbitMQ instance from the cluster") + else: + print('Successfully deprovisioned {}'.format(options.get('name'))) + else: + print('No instance found matching name {}'.format(options.get('name'))) + diff --git a/awx/main/management/commands/inventory_import.py b/awx/main/management/commands/inventory_import.py index 4ae521cd5c..c1399e1a11 100644 --- a/awx/main/management/commands/inventory_import.py +++ b/awx/main/management/commands/inventory_import.py @@ -26,10 +26,9 @@ from django.utils.encoding import smart_text # AWX from awx.main.models import * # noqa +from awx.main.task_engine import TaskEnhancer from awx.main.utils import ignore_inventory_computed_fields, check_proot_installed, wrap_args_with_proot from awx.main.signals import disable_activity_stream -from awx.main.task_engine import TaskSerializer as LicenseReader -from awx.main.conf import tower_settings logger = logging.getLogger('awx.main.commands.inventory_import') @@ -65,7 +64,7 @@ class MemObject(object): all_vars = {} files_found = 0 for suffix in ('', '.yml', '.yaml', '.json'): - path = ''.join([base_path, suffix]) + path = ''.join([base_path, suffix]).encode("utf-8") if not os.path.exists(path): continue if not os.path.isfile(path): @@ -358,7 +357,7 @@ class ExecutableJsonLoader(BaseLoader): data = {} stdout, stderr = '', '' try: - if self.is_custom and getattr(tower_settings, 'AWX_PROOT_ENABLED', False): + if self.is_custom and getattr(settings, 'AWX_PROOT_ENABLED', False): if not check_proot_installed(): raise RuntimeError("proot is not installed but is configured for use") kwargs = {'proot_temp_dir': self.source_dir} # TODO: Remove proot dir @@ -463,7 +462,7 @@ class ExecutableJsonLoader(BaseLoader): # to set their variables for k,v in self.all_group.all_hosts.iteritems(): if 'hostvars' not in _meta: - data = self.command_to_json([self.source, '--host', k]) + data = self.command_to_json([self.source, '--host', k.encode("utf-8")]) else: data = _meta['hostvars'].get(k, {}) if isinstance(data, dict): @@ -483,6 +482,7 @@ def load_inventory_source(source, all_group=None, group_filter_re=None, # good naming conventions source = source.replace('azure.py', 'windows_azure.py') source = source.replace('satellite6.py', 'foreman.py') + source = source.replace('vmware.py', 'vmware_inventory.py') logger.debug('Analyzing type of source: %s', source) original_all_group = all_group if not os.path.exists(source): @@ -1191,9 +1191,8 @@ class Command(NoArgsCommand): self._create_update_group_hosts() def check_license(self): - reader = LicenseReader() - license_info = reader.from_database() - if not license_info or len(license_info) == 0: + license_info = TaskEnhancer().validate_enhancements() + if license_info.get('license_key', 'UNLICENSED') == 'UNLICENSED': self.logger.error(LICENSE_NON_EXISTANT_MESSAGE) raise CommandError('No Tower license found!') available_instances = license_info.get('available_instances', 0) @@ -1255,6 +1254,12 @@ class Command(NoArgsCommand): except re.error: raise CommandError('invalid regular expression for --host-filter') + ''' + TODO: Remove this deprecation when we remove support for rax.py + ''' + if self.source == "rax.py": + self.logger.info("Rackspace inventory sync is Deprecated in Tower 3.1.0 and support for Rackspace will be removed in a future release.") + begin = time.time() self.load_inventory_from_database() diff --git a/awx/main/management/commands/list_instances.py b/awx/main/management/commands/list_instances.py index 08ccc928ca..e193a45dd0 100644 --- a/awx/main/management/commands/list_instances.py +++ b/awx/main/management/commands/list_instances.py @@ -1,12 +1,11 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved -from awx.main.management.commands._base_instance import BaseCommandInstance from awx.main.models import Instance +from django.core.management.base import NoArgsCommand -instance_str = BaseCommandInstance.instance_str -class Command(BaseCommandInstance): +class Command(NoArgsCommand): """List instances from the Tower database """ @@ -14,5 +13,4 @@ class Command(BaseCommandInstance): super(Command, self).__init__() for instance in Instance.objects.all(): - print("uuid: %s; hostname: %s; primary: %s; created: %s; modified: %s" % - (instance.uuid, instance.hostname, instance.primary, instance.created, instance.modified)) + print("hostname: {}; created: {}; heartbeat: {}".format(instance.hostname, instance.created, instance.modified)) diff --git a/awx/main/management/commands/register_instance.py b/awx/main/management/commands/register_instance.py index 942eb9af4d..7ce6be787b 100644 --- a/awx/main/management/commands/register_instance.py +++ b/awx/main/management/commands/register_instance.py @@ -1,63 +1,30 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved -from django.core.management.base import CommandError - -from awx.main.management.commands._base_instance import BaseCommandInstance from awx.main.models import Instance +from django.conf import settings -instance_str = BaseCommandInstance.instance_str +from optparse import make_option +from django.core.management.base import BaseCommand -class Command(BaseCommandInstance): - """Internal tower command. - Regsiter this instance with the database for HA tracking. - This command is idempotent. - - This command will error out in the following conditions: - - * Attempting to register a secondary machine with no primary machines. - * Attempting to register a primary instance when a different primary - instance exists. - * Attempting to re-register an instance with changed values. +class Command(BaseCommand): + """ + Internal tower command. + Regsiter this instance with the database for HA tracking. """ - def __init__(self): - super(Command, self).__init__() - self.include_options_roles() - self.include_option_hostname_set() + option_list = BaseCommand.option_list + ( + make_option('--hostname', dest='hostname', type='string', + help='Hostname used during provisioning'), + ) - def handle(self, *args, **options): - super(Command, self).handle(*args, **options) - - uuid = self.get_UUID() - - # Is there an existing record for this machine? If so, retrieve that record and look for issues. - try: - instance = Instance.objects.get(uuid=uuid) - if instance.hostname != self.get_option_hostname(): - raise CommandError('Instance already registered with a different hostname %s.' % instance_str(instance)) - print("Instance already registered %s" % instance_str(instance)) - except Instance.DoesNotExist: - # Get a status on primary machines (excluding this one, regardless of its status). - other_instances = Instance.objects.exclude(uuid=uuid) - primaries = other_instances.filter(primary=True).count() - - # If this instance is being set to primary and a *different* primary machine alreadyexists, error out. - if self.is_option_primary() and primaries: - raise CommandError('Another instance is already registered as primary.') - - # Lastly, if there are no primary machines at all, then don't allow this to be registered as a secondary machine. - if self.is_option_secondary() and not primaries: - raise CommandError('Unable to register a secondary machine until another primary machine has been registered.') - - # Okay, we've checked for appropriate errata; perform the registration. - instance = Instance(uuid=uuid, primary=self.is_option_primary(), hostname=self.get_option_hostname()) - instance.save() - - # If this is a primary instance, update projects. - if instance.primary: - self.update_projects(instance) - - # Done! - print('Successfully registered instance %s.' % instance_str(instance)) + def handle(self, **options): + uuid = settings.SYSTEM_UUID + instance = Instance.objects.filter(hostname=options.get('hostname')) + if instance.exists(): + print("Instance already registered {}".format(instance[0])) + return + instance = Instance(uuid=uuid, hostname=options.get('hostname')) + instance.save() + print('Successfully registered instance {}'.format(instance)) diff --git a/awx/main/management/commands/remove_instance.py b/awx/main/management/commands/remove_instance.py deleted file mode 100644 index d8712137be..0000000000 --- a/awx/main/management/commands/remove_instance.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved - -from django.core.management.base import CommandError -from awx.main.management.commands._base_instance import BaseCommandInstance - -from awx.main.models import Instance - -instance_str = BaseCommandInstance.instance_str - -class Command(BaseCommandInstance): - """Internal tower command. - Remove an existing instance from the HA instance table. - - This command is idempotent. - - This command will error out in the following conditions: - - * Attempting to remove a primary instance. - """ - def __init__(self): - super(Command, self).__init__() - - self.include_option_hostname_uuid_find() - - def handle(self, *args, **options): - super(Command, self).handle(*args, **options) - - # Is there an existing record for this machine? If so, retrieve that record and look for issues. - try: - # Get the instance. - instance = Instance.objects.get(**self.get_unique_fields()) - - # Sanity check: Do not remove the primary instance. - if instance.primary: - raise CommandError('Cannot remove primary instance %s. Another instance must be promoted to primary first.' % instance_str(instance)) - - # Remove the instance. - instance.delete() - print('Successfully removed instance %s.' % instance_str(instance)) - except Instance.DoesNotExist: - print('No matching instance found to remove.') - diff --git a/awx/main/management/commands/run_callback_receiver.py b/awx/main/management/commands/run_callback_receiver.py index c31b3dffe2..c262b23024 100644 --- a/awx/main/management/commands/run_callback_receiver.py +++ b/awx/main/management/commands/run_callback_receiver.py @@ -2,34 +2,38 @@ # All Rights Reserved. # Python -import os -import sys -import datetime import logging import signal -import time -from multiprocessing import Process, Queue +from uuid import UUID +from multiprocessing import Process +from multiprocessing import Queue as MPQueue from Queue import Empty as QueueEmpty +from Queue import Full as QueueFull + +from kombu import Connection, Exchange, Queue +from kombu.mixins import ConsumerMixin # Django from django.conf import settings from django.core.management.base import NoArgsCommand -from django.db import transaction, DatabaseError -from django.utils.dateparse import parse_datetime -from django.utils.timezone import FixedOffset -from django.db import connection +from django.db import connection as django_connection +from django.db import DatabaseError +from django.core.cache import cache as django_cache # AWX from awx.main.models import * # noqa -from awx.main.socket import Socket logger = logging.getLogger('awx.main.commands.run_callback_receiver') -class CallbackReceiver(object): - def __init__(self): - self.parent_mappings = {} - def run_subscriber(self, use_workers=True): +class CallbackBrokerWorker(ConsumerMixin): + def __init__(self, connection, use_workers=True): + self.connection = connection + self.worker_queues = [] + self.total_messages = 0 + self.init_workers(use_workers) + + def init_workers(self, use_workers=True): def shutdown_handler(active_workers): def _handler(signum, frame): try: @@ -42,253 +46,88 @@ class CallbackReceiver(object): pass return _handler - def check_pre_handle(data): - event = data.get('event', '') - if event == 'playbook_on_play_start': - return True - return False - - worker_queues = [] - if use_workers: - connection.close() + django_connection.close() + django_cache.close() for idx in range(settings.JOB_EVENT_WORKERS): - queue_actual = Queue(settings.JOB_EVENT_MAX_QUEUE_SIZE) + queue_actual = MPQueue(settings.JOB_EVENT_MAX_QUEUE_SIZE) w = Process(target=self.callback_worker, args=(queue_actual, idx,)) w.start() if settings.DEBUG: logger.info('Started worker %s' % str(idx)) - worker_queues.append([0, queue_actual, w]) + self.worker_queues.append([0, queue_actual, w]) elif settings.DEBUG: logger.warn('Started callback receiver (no workers)') - main_process = Process( - target=self.callback_handler, - args=(use_workers, worker_queues,) - ) - main_process.daemon = True - main_process.start() + signal.signal(signal.SIGINT, shutdown_handler([p[2] for p in self.worker_queues])) + signal.signal(signal.SIGTERM, shutdown_handler([p[2] for p in self.worker_queues])) - signal.signal(signal.SIGINT, shutdown_handler([p[2] for p in worker_queues] + [main_process])) - signal.signal(signal.SIGTERM, shutdown_handler([p[2] for p in worker_queues] + [main_process])) - while True: - workers_changed = False - idx = 0 - for queue_worker in worker_queues: - if not queue_worker[2].is_alive(): - logger.warn("Worker %s was not alive, restarting" % str(queue_worker)) - workers_changed = True - queue_worker[2].join() - w = Process(target=self.callback_worker, args=(queue_worker[1], idx,)) - w.daemon = True - w.start() - signal.signal(signal.SIGINT, shutdown_handler([w])) - signal.signal(signal.SIGTERM, shutdown_handler([w])) - queue_worker[2] = w - idx += 1 - if workers_changed: - signal.signal(signal.SIGINT, shutdown_handler([p[2] for p in worker_queues] + [main_process])) - signal.signal(signal.SIGTERM, shutdown_handler([p[2] for p in worker_queues] + [main_process])) - if not main_process.is_alive(): - logger.error("Main process is not alive") - for queue_worker in worker_queues: - queue_worker[2].terminate() - break - time.sleep(0.1) + def get_consumers(self, Consumer, channel): + return [Consumer(queues=[Queue(settings.CALLBACK_QUEUE, + Exchange(settings.CALLBACK_QUEUE, type='direct'), + routing_key=settings.CALLBACK_QUEUE)], + accept=['json'], + callbacks=[self.process_task])] - def write_queue_worker(self, preferred_queue, worker_queues, message): + def process_task(self, body, message): + if "uuid" in body and body['uuid']: + try: + queue = UUID(body['uuid']).int % settings.JOB_EVENT_WORKERS + except Exception: + queue = self.total_messages % settings.JOB_EVENT_WORKERS + else: + queue = self.total_messages % settings.JOB_EVENT_WORKERS + self.write_queue_worker(queue, body) + self.total_messages += 1 + message.ack() + + def write_queue_worker(self, preferred_queue, body): queue_order = sorted(range(settings.JOB_EVENT_WORKERS), cmp=lambda x, y: -1 if x==preferred_queue else 0) + write_attempt_order = [] for queue_actual in queue_order: try: - worker_actual = worker_queues[queue_actual] - worker_actual[1].put(message, block=True, timeout=2) + worker_actual = self.worker_queues[queue_actual] + worker_actual[1].put(body, block=True, timeout=5) worker_actual[0] += 1 return queue_actual + except QueueFull: + pass except Exception: + import traceback + tb = traceback.format_exc() logger.warn("Could not write to queue %s" % preferred_queue) - continue - return None - - def callback_handler(self, use_workers, worker_queues): - total_messages = 0 - last_parent_events = {} - with Socket('callbacks', 'r') as callbacks: - for message in callbacks.listen(): - total_messages += 1 - if 'ad_hoc_command_id' in message: - self.process_ad_hoc_event(message) - elif not use_workers: - self.process_job_event(message) - else: - job_parent_events = last_parent_events.get(message['job_id'], {}) - if message['event'] in ('playbook_on_play_start', 'playbook_on_stats', 'playbook_on_vars_prompt'): - parent = job_parent_events.get('playbook_on_start', None) - elif message['event'] in ('playbook_on_notify', - 'playbook_on_setup', - 'playbook_on_task_start', - 'playbook_on_no_hosts_matched', - 'playbook_on_no_hosts_remaining', - 'playbook_on_include', - 'playbook_on_import_for_host', - 'playbook_on_not_import_for_host'): - parent = job_parent_events.get('playbook_on_play_start', None) - elif message['event'].startswith('runner_on_') or message['event'].startswith('runner_item_on_'): - list_parents = [] - list_parents.append(job_parent_events.get('playbook_on_setup', None)) - list_parents.append(job_parent_events.get('playbook_on_task_start', None)) - list_parents = sorted(filter(lambda x: x is not None, list_parents), cmp=lambda x, y: y.id - x.id) - parent = list_parents[0] if len(list_parents) > 0 else None - else: - parent = None - if parent is not None: - message['parent'] = parent.id - if 'created' in message: - del(message['created']) - if message['event'] in ('playbook_on_start', 'playbook_on_play_start', - 'playbook_on_setup', 'playbook_on_task_start'): - job_parent_events[message['event']] = self.process_job_event(message) - else: - if message['event'] == 'playbook_on_stats': - job_parent_events = {} - - actual_queue = self.write_queue_worker(total_messages % settings.JOB_EVENT_WORKERS, worker_queues, message) - # NOTE: It might be better to recycle the entire callback receiver process if one or more of the queues are too full - # the drawback is that if we under extremely high load we may be legitimately taking a while to process messages - if actual_queue is None: - logger.error("All queues full!") - sys.exit(1) - last_parent_events[message['job_id']] = job_parent_events - - @transaction.atomic - def process_job_event(self, data): - # Sanity check: Do we need to do anything at all? - event = data.get('event', '') - parent_id = data.get('parent', None) - if not event or 'job_id' not in data: - return - - # Get the correct "verbose" value from the job. - # If for any reason there's a problem, just use 0. - try: - verbose = Job.objects.get(id=data['job_id']).verbosity - except Exception as e: - verbose = 0 - - # Convert the datetime for the job event's creation appropriately, - # and include a time zone for it. - # - # In the event of any issue, throw it out, and Django will just save - # the current time. - try: - if not isinstance(data['created'], datetime.datetime): - data['created'] = parse_datetime(data['created']) - if not data['created'].tzinfo: - data['created'] = data['created'].replace(tzinfo=FixedOffset(0)) - except (KeyError, ValueError): - data.pop('created', None) - - # Print the data to stdout if we're in DEBUG mode. - if settings.DEBUG: - print(data) - - # Sanity check: Don't honor keys that we don't recognize. - for key in data.keys(): - if key not in ('job_id', 'event', 'event_data', - 'created', 'counter'): - data.pop(key) - - # Save any modifications to the job event to the database. - # If we get a database error of some kind, bail out. - try: - # If we're not in verbose mode, wipe out any module - # arguments. - res = data['event_data'].get('res', {}) - if isinstance(res, dict): - i = res.get('invocation', {}) - if verbose == 0 and 'module_args' in i: - i['module_args'] = '' - - # Create a new JobEvent object. - job_event = JobEvent(**data) - if parent_id is not None: - job_event.parent = JobEvent.objects.get(id=parent_id) - job_event.save(post_process=True) - - # Retrun the job event object. - return job_event - except DatabaseError as e: - # Log the error and bail out. - logger.error('Database error saving job event: %s', e) - return None - - @transaction.atomic - def process_ad_hoc_event(self, data): - # Sanity check: Do we need to do anything at all? - event = data.get('event', '') - if not event or 'ad_hoc_command_id' not in data: - return - - # Get the correct "verbose" value from the job. - # If for any reason there's a problem, just use 0. - try: - verbose = AdHocCommand.objects.get(id=data['ad_hoc_command_id']).verbosity - except Exception as e: - verbose = 0 - - # Convert the datetime for the job event's creation appropriately, - # and include a time zone for it. - # - # In the event of any issue, throw it out, and Django will just save - # the current time. - try: - if not isinstance(data['created'], datetime.datetime): - data['created'] = parse_datetime(data['created']) - if not data['created'].tzinfo: - data['created'] = data['created'].replace(tzinfo=FixedOffset(0)) - except (KeyError, ValueError): - data.pop('created', None) - - # Print the data to stdout if we're in DEBUG mode. - if settings.DEBUG: - print(data) - - # Sanity check: Don't honor keys that we don't recognize. - for key in data.keys(): - if key not in ('ad_hoc_command_id', 'event', 'event_data', - 'created', 'counter'): - data.pop(key) - - # Save any modifications to the ad hoc command event to the database. - # If we get a database error of some kind, bail out. - try: - # If we're not in verbose mode, wipe out any module - # arguments. FIXME: Needed for adhoc? - res = data['event_data'].get('res', {}) - if isinstance(res, dict): - i = res.get('invocation', {}) - if verbose == 0 and 'module_args' in i: - i['module_args'] = '' - - # Create a new AdHocCommandEvent object. - ad_hoc_command_event = AdHocCommandEvent.objects.create(**data) - - # Retrun the ad hoc comamnd event object. - return ad_hoc_command_event - except DatabaseError as e: - # Log the error and bail out. - logger.error('Database error saving ad hoc command event: %s', e) + logger.warn("Detail: {}".format(tb)) + write_attempt_order.append(preferred_queue) + logger.warn("Could not write payload to any queue, attempted order: {}".format(write_attempt_order)) return None def callback_worker(self, queue_actual, idx): while True: try: - message = queue_actual.get(block=True, timeout=1) + body = queue_actual.get(block=True, timeout=1) except QueueEmpty: continue except Exception as e: - logger.error("Exception on listen socket, restarting: " + str(e)) - break - self.process_job_event(message) + logger.error("Exception on worker thread, restarting: " + str(e)) + continue + try: + if 'job_id' not in body and 'ad_hoc_command_id' not in body: + raise Exception('Payload does not have a job_id or ad_hoc_command_id') + if settings.DEBUG: + logger.info('Body: {}'.format(body)) + try: + if 'job_id' in body: + JobEvent.create_from_data(**body) + elif 'ad_hoc_command_id' in body: + AdHocCommandEvent.create_from_data(**body) + except DatabaseError as e: + logger.error('Database Error Saving Job Event: {}'.format(e)) + except Exception as exc: + import traceback + tb = traceback.format_exc() + logger.error('Callback Task Processor Raised Exception: %r', exc) + logger.error('Detail: {}'.format(tb)) + class Command(NoArgsCommand): ''' @@ -299,9 +138,9 @@ class Command(NoArgsCommand): help = 'Launch the job callback receiver' def handle_noargs(self, **options): - cr = CallbackReceiver() - try: - cr.run_subscriber() - except KeyboardInterrupt: - pass - + with Connection(settings.BROKER_URL) as conn: + try: + worker = CallbackBrokerWorker(conn) + worker.run() + except KeyboardInterrupt: + print('Terminating Callback Receiver') diff --git a/awx/main/management/commands/run_fact_cache_receiver.py b/awx/main/management/commands/run_fact_cache_receiver.py index 4241a2000c..5a111c6fa2 100644 --- a/awx/main/management/commands/run_fact_cache_receiver.py +++ b/awx/main/management/commands/run_fact_cache_receiver.py @@ -3,9 +3,11 @@ # Python import logging -from threading import Thread from datetime import datetime +from kombu import Connection, Exchange, Queue +from kombu.mixins import ConsumerMixin + # Django from django.core.management.base import NoArgsCommand from django.conf import settings @@ -14,14 +16,24 @@ from django.utils import timezone # AWX from awx.main.models.fact import Fact from awx.main.models.inventory import Host -from awx.main.socket import Socket logger = logging.getLogger('awx.main.commands.run_fact_cache_receiver') +analytics_logger = logging.getLogger('awx.analytics.system_tracking') -class FactCacheReceiver(object): - def __init__(self): + +class FactBrokerWorker(ConsumerMixin): + + def __init__(self, connection): + self.connection = connection self.timestamp = None + def get_consumers(self, Consumer, channel): + return [Consumer(queues=[Queue(settings.FACT_QUEUE, + Exchange(settings.FACT_QUEUE, type='direct'), + routing_key=settings.FACT_QUEUE)], + accept=['json'], + callbacks=[self.process_fact_message])] + def _determine_module(self, facts): # Symantically determine the module type if len(facts) == 1: @@ -39,17 +51,11 @@ class FactCacheReceiver(object): facts = self._extract_module_facts(module, facts) return (module, facts) - def process_fact_message(self, message): - hostname = message['host'] - inventory_id = message['inventory_id'] - facts_data = message['facts'] - date_key = message['date_key'] - - # TODO: in ansible < v2 module_setup is emitted for "smart" fact caching. - # ansible v2 will not emit this message. Thus, this can be removed at that time. - if 'module_setup' in facts_data and len(facts_data) == 1: - logger.info('Received module_setup message') - return None + def process_fact_message(self, body, message): + hostname = body['host'] + inventory_id = body['inventory_id'] + facts_data = body['facts'] + date_key = body['date_key'] try: host_obj = Host.objects.get(name=hostname, inventory__id=inventory_id) @@ -76,33 +82,22 @@ class FactCacheReceiver(object): # Create new Fact entry fact_obj = Fact.add_fact(host_obj.id, module_name, self.timestamp, facts) logger.info('Created new fact <%s, %s>' % (fact_obj.id, module_name)) + analytics_logger.info('Received message with fact data', extra=dict( + module_name=module_name, facts_data=facts)) return fact_obj - def run_receiver(self, use_processing_threads=True): - with Socket('fact_cache', 'r') as facts: - for message in facts.listen(): - if 'host' not in message or 'facts' not in message or 'date_key' not in message: - logger.warn('Received invalid message %s' % message) - continue - logger.info('Received message %s' % message) - if use_processing_threads: - wt = Thread(target=self.process_fact_message, args=(message,)) - wt.start() - else: - self.process_fact_message(message) class Command(NoArgsCommand): ''' - blah blah + Save Fact Event packets to the database as emitted from a Tower Scan Job ''' help = 'Launch the Fact Cache Receiver' def handle_noargs(self, **options): - fcr = FactCacheReceiver() - fact_cache_port = settings.FACT_CACHE_PORT - logger.info('Listening on port http://0.0.0.0:' + str(fact_cache_port)) - try: - fcr.run_receiver() - except KeyboardInterrupt: - pass + with Connection(settings.BROKER_URL) as conn: + try: + worker = FactBrokerWorker(conn) + worker.run() + except KeyboardInterrupt: + pass diff --git a/awx/main/management/commands/run_socketio_service.py b/awx/main/management/commands/run_socketio_service.py deleted file mode 100644 index 0e3df4ccaf..0000000000 --- a/awx/main/management/commands/run_socketio_service.py +++ /dev/null @@ -1,289 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -# Python -import os -import logging -import urllib -import weakref -from optparse import make_option -from threading import Thread - -# Django -from django.conf import settings -from django.core.management.base import NoArgsCommand - -# AWX -import awx -from awx.main.models import * # noqa -from awx.main.socket import Socket - -# socketio -from socketio import socketio_manage -from socketio.server import SocketIOServer -from socketio.namespace import BaseNamespace - -logger = logging.getLogger('awx.main.commands.run_socketio_service') - -class SocketSession(object): - def __init__(self, session_id, token_key, socket): - self.socket = weakref.ref(socket) - self.session_id = session_id - self.token_key = token_key - self._valid = True - - def is_valid(self): - return bool(self._valid) - - def invalidate(self): - self._valid = False - - def is_db_token_valid(self): - auth_token = AuthToken.objects.filter(key=self.token_key, reason='') - if not auth_token.exists(): - return False - auth_token = auth_token[0] - return bool(not auth_token.is_expired()) - -class SocketSessionManager(object): - - def __init__(self): - self.SESSIONS_MAX = 1000 - self.socket_sessions = [] - self.socket_session_token_key_map = {} - - def _prune(self): - if len(self.socket_sessions) > self.SESSIONS_MAX: - session = self.socket_sessions[0] - entries = self.socket_session_token_key_map[session.token_key] - del entries[session.session_id] - if len(entries) == 0: - del self.socket_session_token_key_map[session.token_key] - self.socket_sessions.pop(0) - - ''' - Returns an dict of sessions - ''' - def lookup(self, token_key=None): - if not token_key: - raise ValueError("token_key required") - return self.socket_session_token_key_map.get(token_key, None) - - def add_session(self, session): - self.socket_sessions.append(session) - entries = self.socket_session_token_key_map.get(session.token_key, None) - if not entries: - entries = {} - self.socket_session_token_key_map[session.token_key] = entries - entries[session.session_id] = session - self._prune() - return session - -class SocketController(object): - - def __init__(self, SocketSessionManager): - self.server = None - self.SocketSessionManager = SocketSessionManager - - def add_session(self, session): - return self.SocketSessionManager.add_session(session) - - def broadcast_packet(self, packet): - # Broadcast message to everyone at endpoint - # Loop over the 'raw' list of sockets (don't trust our list) - for session_id, socket in list(self.server.sockets.iteritems()): - socket_session = socket.session.get('socket_session', None) - if socket_session and socket_session.is_valid(): - try: - socket.send_packet(packet) - except Exception as e: - logger.error("Error sending client packet to %s: %s" % (str(session_id), str(packet))) - logger.error("Error was: " + str(e)) - - def send_packet(self, packet, token_key): - if not token_key: - raise ValueError("token_key is required") - socket_sessions = self.SocketSessionManager.lookup(token_key=token_key) - # We may not find the socket_session if the user disconnected - # (it's actually more compliciated than that because of our prune logic) - if not socket_sessions: - return None - for session_id, socket_session in socket_sessions.iteritems(): - logger.warn("Maybe sending packet to %s" % session_id) - if socket_session and socket_session.is_valid(): - logger.warn("Sending packet to %s" % session_id) - socket = socket_session.socket() - if socket: - try: - socket.send_packet(packet) - except Exception as e: - logger.error("Error sending client packet to %s: %s" % (str(socket_session.session_id), str(packet))) - logger.error("Error was: " + str(e)) - - def set_server(self, server): - self.server = server - return server - -socketController = SocketController(SocketSessionManager()) - -# -# Socket session is attached to self.session['socket_session'] -# self.session and self.socket.session point to the same dict -# -class TowerBaseNamespace(BaseNamespace): - - def get_allowed_methods(self): - return ['recv_disconnect'] - - def get_initial_acl(self): - request_token = self._get_request_token() - if request_token: - # (1) This is the first time the socket has been seen (first - # namespace joined). - # (2) This socket has already been seen (already joined and maybe - # left a namespace) - # - # Note: Assume that the user token is valid if the session is found - socket_session = self.session.get('socket_session', None) - if not socket_session: - socket_session = SocketSession(self.socket.sessid, request_token, self.socket) - if socket_session.is_db_token_valid(): - self.session['socket_session'] = socket_session - socketController.add_session(socket_session) - else: - socket_session.invalidate() - - return set(['recv_connect'] + self.get_allowed_methods()) - else: - logger.warn("Authentication Failure validating user") - self.emit("connect_failed", "Authentication failed") - return set(['recv_connect']) - - def _get_request_token(self): - if 'QUERY_STRING' not in self.environ: - return False - - try: - k, v = self.environ['QUERY_STRING'].split("=") - if k == "Token": - token_actual = urllib.unquote_plus(v).decode().replace("\"","") - return token_actual - except Exception as e: - logger.error("Exception validating user: " + str(e)) - return False - return False - - def recv_connect(self): - socket_session = self.session.get('socket_session', None) - if socket_session and not socket_session.is_valid(): - self.disconnect(silent=False) - -class TestNamespace(TowerBaseNamespace): - - def recv_connect(self): - logger.info("Received client connect for test namespace from %s" % str(self.environ['REMOTE_ADDR'])) - self.emit('test', "If you see this then you attempted to connect to the test socket endpoint") - super(TestNamespace, self).recv_connect() - -class JobNamespace(TowerBaseNamespace): - - def recv_connect(self): - logger.info("Received client connect for job namespace from %s" % str(self.environ['REMOTE_ADDR'])) - super(JobNamespace, self).recv_connect() - -class JobEventNamespace(TowerBaseNamespace): - - def recv_connect(self): - logger.info("Received client connect for job event namespace from %s" % str(self.environ['REMOTE_ADDR'])) - super(JobEventNamespace, self).recv_connect() - -class AdHocCommandEventNamespace(TowerBaseNamespace): - - def recv_connect(self): - logger.info("Received client connect for ad hoc command event namespace from %s" % str(self.environ['REMOTE_ADDR'])) - super(AdHocCommandEventNamespace, self).recv_connect() - -class ScheduleNamespace(TowerBaseNamespace): - - def get_allowed_methods(self): - parent_allowed = super(ScheduleNamespace, self).get_allowed_methods() - return parent_allowed + ["schedule_changed"] - - def recv_connect(self): - logger.info("Received client connect for schedule namespace from %s" % str(self.environ['REMOTE_ADDR'])) - super(ScheduleNamespace, self).recv_connect() - -# Catch-all namespace. -# Deliver 'global' events over this namespace -class ControlNamespace(TowerBaseNamespace): - - def recv_connect(self): - logger.warn("Received client connect for control namespace from %s" % str(self.environ['REMOTE_ADDR'])) - super(ControlNamespace, self).recv_connect() - -class TowerSocket(object): - - def __call__(self, environ, start_response): - path = environ['PATH_INFO'].strip('/') or 'index.html' - if path.startswith('socket.io'): - socketio_manage(environ, {'/socket.io/test': TestNamespace, - '/socket.io/jobs': JobNamespace, - '/socket.io/job_events': JobEventNamespace, - '/socket.io/ad_hoc_command_events': AdHocCommandEventNamespace, - '/socket.io/schedules': ScheduleNamespace, - '/socket.io/control': ControlNamespace}) - else: - logger.warn("Invalid connect path received: " + path) - start_response('404 Not Found', []) - return ['Tower version %s' % awx.__version__] - -def notification_handler(server): - with Socket('websocket', 'r') as websocket: - for message in websocket.listen(): - packet = { - 'args': message, - 'endpoint': message['endpoint'], - 'name': message['event'], - 'type': 'event', - } - - if 'token_key' in message: - # Best practice not to send the token over the socket - socketController.send_packet(packet, message.pop('token_key')) - else: - socketController.broadcast_packet(packet) - -class Command(NoArgsCommand): - ''' - SocketIO event emitter Tower service - Receives notifications from other services destined for UI notification - ''' - - help = 'Launch the SocketIO event emitter service' - - option_list = NoArgsCommand.option_list + ( - make_option('--receive_port', dest='receive_port', type='int', default=5559, - help='Port to listen for new events that will be destined for a client'), - make_option('--socketio_port', dest='socketio_port', type='int', default=8080, - help='Port to accept socketio requests from clients'),) - - def handle_noargs(self, **options): - socketio_listen_port = settings.SOCKETIO_LISTEN_PORT - - try: - if os.path.exists('/etc/tower/tower.cert') and os.path.exists('/etc/tower/tower.key'): - logger.info('Listening on port https://0.0.0.0:' + str(socketio_listen_port)) - server = SocketIOServer(('0.0.0.0', socketio_listen_port), TowerSocket(), resource='socket.io', - keyfile='/etc/tower/tower.key', certfile='/etc/tower/tower.cert') - else: - logger.info('Listening on port http://0.0.0.0:' + str(socketio_listen_port)) - server = SocketIOServer(('0.0.0.0', socketio_listen_port), TowerSocket(), resource='socket.io') - - socketController.set_server(server) - handler_thread = Thread(target=notification_handler, args=(server,)) - handler_thread.daemon = True - handler_thread.start() - - server.serve_forever() - except KeyboardInterrupt: - pass diff --git a/awx/main/management/commands/run_task_system.py b/awx/main/management/commands/run_task_system.py deleted file mode 100644 index f91309030c..0000000000 --- a/awx/main/management/commands/run_task_system.py +++ /dev/null @@ -1,357 +0,0 @@ -#Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved - -# Python -import os -import datetime -import logging -import signal -import time - -# Django -from django.conf import settings -from django.core.management.base import NoArgsCommand - -# AWX -from awx.main.models import * # noqa -from awx.main.queue import FifoQueue -from awx.main.tasks import handle_work_error, handle_work_success -from awx.main.utils import get_system_task_capacity - -# Celery -from celery.task.control import inspect - -logger = logging.getLogger('awx.main.commands.run_task_system') - -queue = FifoQueue('tower_task_manager') - -class SimpleDAG(object): - ''' A simple implementation of a directed acyclic graph ''' - - def __init__(self): - self.nodes = [] - self.edges = [] - - def __contains__(self, obj): - for node in self.nodes: - if node['node_object'] == obj: - return True - return False - - def __len__(self): - return len(self.nodes) - - def __iter__(self): - return self.nodes.__iter__() - - def generate_graphviz_plot(self): - def short_string_obj(obj): - if type(obj) == Job: - type_str = "Job" - if type(obj) == AdHocCommand: - type_str = "AdHocCommand" - elif type(obj) == InventoryUpdate: - type_str = "Inventory" - elif type(obj) == ProjectUpdate: - type_str = "Project" - else: - type_str = "Unknown" - type_str += "%s" % str(obj.id) - return type_str - - doc = """ - digraph g { - rankdir = LR - """ - for n in self.nodes: - doc += "%s [color = %s]\n" % ( - short_string_obj(n['node_object']), - "red" if n['node_object'].status == 'running' else "black", - ) - for from_node, to_node in self.edges: - doc += "%s -> %s;\n" % ( - short_string_obj(self.nodes[from_node]['node_object']), - short_string_obj(self.nodes[to_node]['node_object']), - ) - doc += "}\n" - gv_file = open('/tmp/graph.gv', 'w') - gv_file.write(doc) - gv_file.close() - - def add_node(self, obj, metadata=None): - if self.find_ord(obj) is None: - self.nodes.append(dict(node_object=obj, metadata=metadata)) - - def add_edge(self, from_obj, to_obj): - from_obj_ord = self.find_ord(from_obj) - to_obj_ord = self.find_ord(to_obj) - if from_obj_ord is None or to_obj_ord is None: - raise LookupError("Object not found") - self.edges.append((from_obj_ord, to_obj_ord)) - - def add_edges(self, edgelist): - for edge_pair in edgelist: - self.add_edge(edge_pair[0], edge_pair[1]) - - def find_ord(self, obj): - for idx in range(len(self.nodes)): - if obj == self.nodes[idx]['node_object']: - return idx - return None - - def get_node_type(self, obj): - if type(obj) == Job: - return "job" - elif type(obj) == AdHocCommand: - return "ad_hoc_command" - elif type(obj) == InventoryUpdate: - return "inventory_update" - elif type(obj) == ProjectUpdate: - return "project_update" - elif type(obj) == SystemJob: - return "system_job" - return "unknown" - - def get_dependencies(self, obj): - antecedents = [] - this_ord = self.find_ord(obj) - for node, dep in self.edges: - if node == this_ord: - antecedents.append(self.nodes[dep]) - return antecedents - - def get_dependents(self, obj): - decendents = [] - this_ord = self.find_ord(obj) - for node, dep in self.edges: - if dep == this_ord: - decendents.append(self.nodes[node]) - return decendents - - def get_leaf_nodes(self): - leafs = [] - for n in self.nodes: - if len(self.get_dependencies(n['node_object'])) < 1: - leafs.append(n) - return leafs - -def get_tasks(): - """Fetch all Tower tasks that are relevant to the task management - system. - """ - RELEVANT_JOBS = ('pending', 'waiting', 'running') - # TODO: Replace this when we can grab all objects in a sane way. - graph_jobs = [j for j in Job.objects.filter(status__in=RELEVANT_JOBS)] - graph_ad_hoc_commands = [ahc for ahc in AdHocCommand.objects.filter(status__in=RELEVANT_JOBS)] - graph_inventory_updates = [iu for iu in - InventoryUpdate.objects.filter(status__in=RELEVANT_JOBS)] - graph_project_updates = [pu for pu in - ProjectUpdate.objects.filter(status__in=RELEVANT_JOBS)] - graph_system_jobs = [sj for sj in - SystemJob.objects.filter(status__in=RELEVANT_JOBS)] - all_actions = sorted(graph_jobs + graph_ad_hoc_commands + graph_inventory_updates + - graph_project_updates + graph_system_jobs, - key=lambda task: task.created) - return all_actions - -def rebuild_graph(message): - """Regenerate the task graph by refreshing known tasks from Tower, purging - orphaned running tasks, and creating dependencies for new tasks before - generating directed edge relationships between those tasks. - """ - # Sanity check: Only do this on the primary node. - if Instance.objects.my_role() == 'secondary': - return None - - inspector = inspect() - if not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'): - active_task_queues = inspector.active() - else: - logger.warn("Ignoring celery task inspector") - active_task_queues = None - - all_sorted_tasks = get_tasks() - if not len(all_sorted_tasks): - return None - - active_tasks = [] - if active_task_queues is not None: - for queue in active_task_queues: - active_tasks += [at['id'] for at in active_task_queues[queue]] - else: - logger.error("Could not communicate with celery!") - # TODO: Something needs to be done here to signal to the system - # as a whole that celery appears to be down. - if not hasattr(settings, 'CELERY_UNIT_TEST'): - return None - running_tasks = filter(lambda t: t.status == 'running', all_sorted_tasks) - waiting_tasks = filter(lambda t: t.status != 'running', all_sorted_tasks) - new_tasks = filter(lambda t: t.status == 'pending', all_sorted_tasks) - - # Check running tasks and make sure they are active in celery - logger.debug("Active celery tasks: " + str(active_tasks)) - for task in list(running_tasks): - if (task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')): - # NOTE: Pull status again and make sure it didn't finish in - # the meantime? - task.status = 'failed' - task.job_explanation += ' '.join(( - 'Task was marked as running in Tower but was not present in', - 'Celery, so it has been marked as failed.', - )) - task.save() - task.socketio_emit_status("failed") - running_tasks.pop(running_tasks.index(task)) - logger.error("Task %s appears orphaned... marking as failed" % task) - - # Create and process dependencies for new tasks - for task in new_tasks: - logger.debug("Checking dependencies for: %s" % str(task)) - try: - task_dependencies = task.generate_dependencies(running_tasks + waiting_tasks) - except Exception, e: - logger.error("Failed processing dependencies for {}: {}".format(task, e)) - task.status = 'failed' - task.job_explanation += 'Task failed to generate dependencies: {}'.format(e) - task.save() - task.socketio_emit_status("failed") - continue - logger.debug("New dependencies: %s" % str(task_dependencies)) - for dep in task_dependencies: - # We recalculate the created time for the moment to ensure the - # dependencies are always sorted in the right order relative to - # the dependent task. - time_delt = len(task_dependencies) - task_dependencies.index(dep) - dep.created = task.created - datetime.timedelta(seconds=1 + time_delt) - dep.status = 'waiting' - dep.save() - waiting_tasks.insert(waiting_tasks.index(task), dep) - if not hasattr(settings, 'UNIT_TEST_IGNORE_TASK_WAIT'): - task.status = 'waiting' - task.save() - - # Rebuild graph - graph = SimpleDAG() - for task in running_tasks: - graph.add_node(task) - for wait_task in waiting_tasks[:50]: - node_dependencies = [] - for node in graph: - if wait_task.is_blocked_by(node['node_object']): - node_dependencies.append(node['node_object']) - graph.add_node(wait_task) - for dependency in node_dependencies: - graph.add_edge(wait_task, dependency) - if settings.DEBUG: - graph.generate_graphviz_plot() - return graph - -def process_graph(graph, task_capacity): - """Given a task dependency graph, start and manage tasks given their - priority and weight. - """ - leaf_nodes = graph.get_leaf_nodes() - running_nodes = filter(lambda x: x['node_object'].status == 'running', leaf_nodes) - running_impact = sum([t['node_object'].task_impact for t in running_nodes]) - ready_nodes = filter(lambda x: x['node_object'].status != 'running', leaf_nodes) - remaining_volume = task_capacity - running_impact - logger.info('Running Nodes: %s; Capacity: %s; Running Impact: %s; ' - 'Remaining Capacity: %s' % - (str(running_nodes), str(task_capacity), - str(running_impact), str(remaining_volume))) - logger.info("Ready Nodes: %s" % str(ready_nodes)) - for task_node in ready_nodes: - node_obj = task_node['node_object'] - # NOTE: This could be used to pass metadata through the task system - # node_args = task_node['metadata'] - impact = node_obj.task_impact - if impact <= remaining_volume or running_impact == 0: - node_dependencies = graph.get_dependents(node_obj) - # Allow other tasks to continue if a job fails, even if they are - # other jobs. - if graph.get_node_type(node_obj) == 'job': - node_dependencies = [] - dependent_nodes = [{'type': graph.get_node_type(node_obj), 'id': node_obj.id}] + \ - [{'type': graph.get_node_type(n['node_object']), - 'id': n['node_object'].id} for n in node_dependencies] - error_handler = handle_work_error.s(subtasks=dependent_nodes) - success_handler = handle_work_success.s(task_actual={'type': graph.get_node_type(node_obj), - 'id': node_obj.id}) - start_status = node_obj.start(error_callback=error_handler, success_callback=success_handler) - if not start_status: - node_obj.status = 'failed' - if node_obj.job_explanation: - node_obj.job_explanation += ' ' - node_obj.job_explanation += 'Task failed pre-start check.' - node_obj.save() - continue - remaining_volume -= impact - running_impact += impact - logger.info('Started Node: %s (capacity hit: %s) ' - 'Remaining Capacity: %s' % - (str(node_obj), str(impact), str(remaining_volume))) - -def run_taskmanager(): - """Receive task start and finish signals to rebuild a dependency graph - and manage the actual running of tasks. - """ - def shutdown_handler(): - def _handler(signum, frame): - signal.signal(signum, signal.SIG_DFL) - os.kill(os.getpid(), signum) - return _handler - signal.signal(signal.SIGINT, shutdown_handler()) - signal.signal(signal.SIGTERM, shutdown_handler()) - paused = False - task_capacity = get_system_task_capacity() - last_rebuild = datetime.datetime.fromtimestamp(0) - - # Attempt to pull messages off of the task system queue into perpetuity. - # - # A quick explanation of what is happening here: - # The popping messages off the queue bit is something of a sham. We remove - # the messages from the queue and then immediately throw them away. The - # `rebuild_graph` function, while it takes the message as an argument, - # ignores it. - # - # What actually happens is that we just check the database every 10 seconds - # to see what the task dependency graph looks like, and go do that. This - # is the job of the `rebuild_graph` function. - # - # There is some placeholder here: we may choose to actually use the message - # in the future. - while True: - # Pop a message off the queue. - # (If the queue is empty, None will be returned.) - message = queue.pop() - - # Parse out the message appropriately, rebuilding our graph if - # appropriate. - if (datetime.datetime.now() - last_rebuild).seconds > 10: - if message is not None and 'pause' in message: - logger.info("Pause command received: %s" % str(message)) - paused = message['pause'] - graph = rebuild_graph(message) - if not paused and graph is not None: - process_graph(graph, task_capacity) - last_rebuild = datetime.datetime.now() - time.sleep(0.1) - - -class Command(NoArgsCommand): - """Tower Task Management System - This daemon is designed to reside between our tasks and celery and - provide a mechanism for understanding the relationship between those tasks - and their dependencies. - - It also actively prevents situations in which Tower can get blocked - because it doesn't have an understanding of what is progressing through - celery. - """ - help = 'Launch the Tower task management system' - - def handle_noargs(self, **options): - try: - run_taskmanager() - except KeyboardInterrupt: - pass diff --git a/awx/main/management/commands/stats.py b/awx/main/management/commands/stats.py index 68b5ceef00..f55068d076 100644 --- a/awx/main/management/commands/stats.py +++ b/awx/main/management/commands/stats.py @@ -9,6 +9,7 @@ from django.core.management.base import BaseCommand # AWX from awx.main.models import * # noqa + class Command(BaseCommand): ''' Emits some simple statistics suitable for external monitoring diff --git a/awx/main/management/commands/update_instance.py b/awx/main/management/commands/update_instance.py deleted file mode 100644 index 9cfecfb22d..0000000000 --- a/awx/main/management/commands/update_instance.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved - -from django.core.management.base import CommandError -from django.db import transaction - -from awx.main.management.commands._base_instance import BaseCommandInstance -from awx.api.license import feature_enabled -from awx.main.models import Instance - -instance_str = BaseCommandInstance.instance_str - -class Command(BaseCommandInstance): - """Set an already registered instance to primary or secondary for HA - tracking. - - This command is idempotent. Settings a new primary instance when a - primary instance already exists will result in the existing primary - instance set to secondary and the new primary set to primary. - - This command will error out under the following circumstances: - - * Attempting to update a secondary instance with no primary instances. - * When a matching instance is not found. - """ - def __init__(self): - super(Command, self).__init__() - - self.include_option_primary_role() - self.include_option_hostname_uuid_find() - - @transaction.atomic - def handle(self, *args, **options): - super(Command, self).handle(*args, **options) - - # You can only promote/demote if your license allows HA - if not feature_enabled('ha'): - raise CommandError('Your Tower license does not permit promoting a secondary instance') - - # Is there an existing record for this machine? If so, retrieve that record and look for issues. - try: - instance = Instance.objects.get(**self.get_unique_fields()) - except Instance.DoesNotExist: - raise CommandError('No matching instance found to update.') - - # Get a status on primary machines (excluding this one, regardless of its status). - other_instances = Instance.objects.exclude(**self.get_unique_fields()) - primaries = other_instances.filter(primary=True).count() - - # If this is a primary machine and there is another primary machine, it must be de-primary-ified. - if self.is_option_primary() and primaries: - for old_primary in other_instances.filter(primary=True): - old_primary.primary = False - old_primary.save() - - # Okay, we've checked for appropriate errata; perform the registration. - instance.primary = self.is_option_primary() - instance.save() - - # If this is a primary instance, update projects. - if self.is_option_primary(): - self.update_projects(instance) - - # Done! - print('Successfully updated instance role %s' % instance_str(instance)) diff --git a/awx/main/management/commands/update_password.py b/awx/main/management/commands/update_password.py index fe45799776..18a9fb053d 100644 --- a/awx/main/management/commands/update_password.py +++ b/awx/main/management/commands/update_password.py @@ -9,10 +9,11 @@ from django.core.management.base import BaseCommand from django.core.management.base import CommandError from django.contrib.auth.models import User + class UpdatePassword(object): def update_password(self, username, password): changed = False - u = User.objects.get(username=username) + u = User.objects.get(username=username) if not u: raise RuntimeError("User not found") check = u.check_password(password) @@ -22,6 +23,7 @@ class UpdatePassword(object): changed = True return changed + class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option('--username', dest='username', action='store', type='string', default=None, diff --git a/awx/main/management/commands/workload_generator.py b/awx/main/management/commands/workload_generator.py deleted file mode 100644 index 658d5c7228..0000000000 --- a/awx/main/management/commands/workload_generator.py +++ /dev/null @@ -1,337 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved - -# Python -import pymongo -import sys -from optparse import make_option -import datetime -import json - -# Django -from django.core.management.base import BaseCommand -from django.utils.timezone import now - -# Mongoengine -import mongoengine - -# awx -from awx.fact.models.fact import * # noqa -from awx.main.models import * # noqa -from awx.main.utils import timedelta_total_seconds - -TEST_FACT_ANSIBLE = { - "ansible_swapfree_mb" : 4092, - "ansible_default_ipv6" : { - - }, - "ansible_distribution_release" : "trusty", - "ansible_system_vendor" : "innotek GmbH", - "ansible_os_family" : "Debian", - "ansible_all_ipv4_addresses" : [ - "192.168.1.145" - ], - "ansible_lsb" : { - "release" : "14.04", - "major_release" : "14", - "codename" : "trusty", - "id" : "Ubuntu", - "description" : "Ubuntu 14.04.2 LTS" - }, -} - -TEST_FACT_PACKAGES = [ - { - "name": "accountsservice", - "architecture": "amd64", - "source": "apt", - "version": "0.6.35-0ubuntu7.1" - }, - { - "name": "acpid", - "architecture": "amd64", - "source": "apt", - "version": "1:2.0.21-1ubuntu2" - }, - { - "name": "adduser", - "architecture": "all", - "source": "apt", - "version": "3.113+nmu3ubuntu3" - }, -] - -TEST_FACT_SERVICES = [ - { - "source" : "upstart", - "state" : "waiting", - "name" : "ureadahead-other", - "goal" : "stop" - }, - { - "source" : "upstart", - "state" : "running", - "name" : "apport", - "goal" : "start" - }, - { - "source" : "upstart", - "state" : "waiting", - "name" : "console-setup", - "goal" : "stop" - }, -] - -TEST_FACT_FILES = [ - { - "uid": 0, - "woth": False, - "mtime": 1436810539.5895822, - "inode": 525214, - "isgid": False, - "size": 0, - "isuid": False, - "isreg": True, - "gid": 0, - "ischr": False, - "wusr": True, - "xoth": False, - "islnk": False, - "nlink": 1, - "issock": False, - "rgrp": True, - "path": "/test/1948", - "xusr": False, - "atime": 1436810539.5895822, - "isdir": False, - "ctime": 1436810539.5895822, - "isblk": False, - "wgrp": False, - "xgrp": False, - "dev": 64768, - "roth": True, - "isfifo": False, - "mode": "0644", - "rusr": True - }, - { - "uid": 0, - "woth": False, - "mtime": 1436810540.4955823, - "inode": 526295, - "isgid": False, - "size": 0, - "isuid": False, - "isreg": True, - "gid": 0, - "ischr": False, - "wusr": True, - "xoth": False, - "islnk": False, - "nlink": 1, - "issock": False, - "rgrp": True, - "path": "/test/3029", - "xusr": False, - "atime": 1436810540.4955823, - "isdir": False, - "ctime": 1436810540.4955823, - "isblk": False, - "wgrp": False, - "xgrp": False, - "dev": 64768, - "roth": True, - "isfifo": False, - "mode": "0644", - "rusr": True - }, - { - "uid": 0, - "woth": False, - "mtime": 1436810540.5825822, - "inode": 526401, - "isgid": False, - "size": 0, - "isuid": False, - "isreg": True, - "gid": 0, - "ischr": False, - "wusr": True, - "xoth": False, - "islnk": False, - "nlink": 1, - "issock": False, - "rgrp": True, - "path": "/test/3135", - "xusr": False, - "atime": 1436810540.5825822, - "isdir": False, - "ctime": 1436810540.5825822, - "isblk": False, - "wgrp": False, - "xgrp": False, - "dev": 64768, - "roth": True, - "isfifo": False, - "mode": "0644", - "rusr": True - }, -] - -FACT_FIXTURES = { - 'ansible': TEST_FACT_ANSIBLE, - 'packages': TEST_FACT_PACKAGES, - 'services': TEST_FACT_SERVICES, - 'files': TEST_FACT_FILES, -} - -EXPERIMENT_DEFAULT = { - "hosts": 10, - "scan": { - "duration" : int(525949), # 1 year - "period": 1440 # 1 day - }, - "modules": [ - "ansible", - "packages", - "services", - "files" - ] -} - -class Experiment(object): - def __init__(self, exp, fact_fixtures, raw_db, mongoengine_db): - self.db = raw_db - self.enginedb = mongoengine_db - - for module in exp['modules']: - if module not in fact_fixtures: - raise RuntimeError("Module %s fixture not found in %s" % (module, fact_fixtures)) - - # Setup experiment from experiment params - self.fact_fixtures = fact_fixtures - self.host_count = exp['hosts'] - self.scans_total = int(exp['scan']['duration'] / exp['scan']['period']) # round down - self.scan_end = int(timedelta_total_seconds((datetime.datetime(2015,1,1) - datetime.datetime(1970,1,1))) / 60) - self.scan_start = self.scan_end - exp['scan']['duration'] - self.scan_period = exp['scan']['period'] - self.modules = exp['modules'] - - # Changing vars - self.scan_time_last = self.scan_start - self.scan_time = self.scan_start - - # - self.user = None - self.org = None - self.inv = None - self.hosts = [] - - @property - def scan_datetime(self): - return datetime.datetime.fromtimestamp(self.scan_time * 60) - - def _delete_tower_metadata(self): - invs = Inventory.objects.filter(name='sys_tracking_inventory') - org = Organization.objects.filter(name='sys_tracking_organization') - if invs: - for inv in invs: - for host in inv.hosts.all(): - host.delete() - invs.delete() - org.delete() - User.objects.filter(username='boberson').delete() - - ''' - Create an org and an inventory - ''' - def _create_tower_metadata(self): - self.user = User.objects.create_user('boberson', "%s@example.com", 'scoobasteve') - self.org = Organization.objects.create( - name='sys_tracking_organization', - description='The system tracking organization is serious about seriousness.', - created_by=self.user, - ) - self.org.admins.add(self.user) - self.inv = self.org.inventories.create( - name='sys_tracking_inventory', - created_by=self.user, - ) - for x in range(0, self.host_count): - host = self.inv.hosts.create(name='hostname_%s.doesnotexist.ansible.com' % x, - inventory=self.inv) - self.hosts.append(host) - - def generate_workload(self): - time_start = now() - print("Started at: %s" % time_start) - - # TODO only call delete if --dropdb ?? - self._delete_tower_metadata() - - print("Creating needed tower models (i.e. org, inventory, etc.)") - self._create_tower_metadata() - print("Generating workload ") - - scan_time_backup = self.scan_time - for host_i in range(0, self.host_count): - # Reset scan time - self.scan_time = scan_time_backup - sys.stdout.write('.') - sys.stdout.flush() - host = FactHost(hostname='hostname_%s.doesnotexist.ansible.com' % host_i, inventory_id=self.inv.pk).save() - for scan_i in range(0, self.scans_total): - for module in self.modules: - Fact.add_fact(self.scan_datetime, host=host, module=module, fact=self.fact_fixtures[module]) - - self.scan_time_last = self.scan_time - self.scan_time += self.scan_period - time_end = now() - print("") - print("Finished at: %s" % time_end) - print("Total runtime: %s seconds" % timedelta_total_seconds(time_end - time_start)) - -class Command(BaseCommand): - option_list = BaseCommand.option_list + ( - make_option('--drop', dest='drop', action='store_true', default=False, - help='Drop collections before generating workload.'), - make_option('--experiment', dest='experiment', action='store', default=None, - help='experiment config file defining the params'), - make_option('--host', dest='host', action='store', type='string', default='localhost', - help='mongodb host'), - make_option('--username', dest='username', action='store', type='string', default=None, - help='mongodb username'), - make_option('--password', dest='password', action='store', type='string', default=None, - help='mongodb password'), - make_option('--port', dest='port', action='store', type='int', default=27017, - help='mongodb port'), - make_option('--db', dest='db', action='store', type='string', default='system_tracking_workload', - help='mongodb database name'), - make_option('--quite', dest='quite', action='store_true', default=False, - help='Surpress the printing of large results.'), - make_option('--silent', dest='silent', action='store_true', default=False, - help='Surpress the printing of ALL results.'), - ) - - def handle(self, *args, **options): - # TODO: load experiment from file, json - if options['experiment']: - f = open(options['experiment']) - exp = json.loads(f.read()) - else: - exp = EXPERIMENT_DEFAULT - print("Experiment settings\n%s\n" % exp) - - self.client = pymongo.MongoClient(options['host'], options['port']) - db = self.client[options['db']] - - connection_params = dict((k, options[k]) for k in ['host', 'port', 'username', 'password']) - - mongoengine.connection.disconnect() - enginedb = mongoengine.connection.connect(options['db'], **connection_params) - if options['drop']: - enginedb.drop_database(options['db']) - - self.experiment = Experiment(exp, FACT_FIXTURES, db, enginedb) - self.experiment.generate_workload() - diff --git a/awx/main/managers.py b/awx/main/managers.py index 4825a74cf8..522157a70f 100644 --- a/awx/main/managers.py +++ b/awx/main/managers.py @@ -2,9 +2,12 @@ # All Rights Reserved. import sys +from datetime import timedelta -from django.conf import settings from django.db import models +from django.utils.timezone import now +from django.db.models import Sum +from django.conf import settings class HostManager(models.Manager): @@ -17,6 +20,7 @@ class HostManager(models.Manager): except NotImplementedError: # For unit tests only, SQLite doesn't support distinct('name') return len(set(self.values_list('name', flat=True))) + class InstanceManager(models.Manager): """A custom manager class for the Instance model. @@ -26,33 +30,25 @@ class InstanceManager(models.Manager): def me(self): """Return the currently active instance.""" # If we are running unit tests, return a stub record. - if len(sys.argv) >= 2 and sys.argv[1] == 'test': - return self.model(id=1, primary=True, + if settings.IS_TESTING(sys.argv): + return self.model(id=1, + hostname='localhost', uuid='00000000-0000-0000-0000-000000000000') - # Return the appropriate record from the database. - return self.get(uuid=settings.SYSTEM_UUID) + node = self.filter(hostname=settings.CLUSTER_HOST_ID) + if node.exists(): + return node[0] + raise RuntimeError("No instance found with the current cluster host id") + + def active_count(self): + """Return count of active Tower nodes for licensing.""" + return self.all().count() + + def total_capacity(self): + sumval = self.filter(modified__gte=now() - timedelta(seconds=settings.AWX_ACTIVE_NODE_TIME)) \ + .aggregate(total_capacity=Sum('capacity'))['total_capacity'] + return max(50, sumval) def my_role(self): - """Return the role of the currently active instance, as a string - ('primary' or 'secondary'). - """ - # If we are running unit tests, we are primary, because reasons. - if len(sys.argv) >= 2 and sys.argv[1] == 'test': - return 'primary' - - # Check if this instance is primary; if so, return "primary", otherwise - # "secondary". - if self.me().primary: - return 'primary' - return 'secondary' - - def primary(self): - """Return the primary instance.""" - # If we are running unit tests, return a stub record. - if len(sys.argv) >= 2 and sys.argv[1] == 'test': - return self.model(id=1, primary=True, - uuid='00000000-0000-0000-0000-000000000000') - - # Return the appropriate record from the database. - return self.get(primary=True) + # NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing + return "tower" diff --git a/awx/main/middleware.py b/awx/main/middleware.py index 37903886ac..0e0e4c748c 100644 --- a/awx/main/middleware.py +++ b/awx/main/middleware.py @@ -5,20 +5,18 @@ import logging import threading import uuid +from django.conf import settings from django.contrib.auth.models import User from django.db.models.signals import post_save from django.db import IntegrityError -from django.http import HttpResponseRedirect -from django.template.response import TemplateResponse from django.utils.functional import curry -from awx import __version__ as version -from awx.main.models import ActivityStream, Instance -from awx.main.conf import tower_settings +from awx.main.models import ActivityStream from awx.api.authentication import TokenAuthentication logger = logging.getLogger('awx.main.middleware') +analytics_logger = logging.getLogger('awx.analytics.activity_stream') class ActivityStreamMiddleware(threading.local): @@ -49,6 +47,10 @@ class ActivityStreamMiddleware(threading.local): instance.actor = drf_user try: instance.save(update_fields=['actor']) + analytics_logger.info('Activity Stream update entry for %s' % str(instance.object1), + extra=dict(changes=instance.changes, relationship=instance.object_relationship_type, + actor=drf_user.username, operation=instance.operation, + object1=instance.object1, object2=instance.object2)) except IntegrityError: logger.debug("Integrity Error saving Activity Stream instance for id : " + str(instance.id)) # else: @@ -66,46 +68,11 @@ class ActivityStreamMiddleware(threading.local): if user.exists(): user = user[0] instance.actor = user - instance.save(update_fields=['actor']) else: if instance.id not in self.instance_ids: self.instance_ids.append(instance.id) -class HAMiddleware(object): - """A middleware class that checks to see whether the request is being - served on a secondary instance, and redirects the request back to the - primary instance if so. - """ - def process_request(self, request): - """Process the request, and redirect if this is a request on a - secondary node. - """ - # Is this the primary node? If so, we can just return None and be done; - # we just want normal behavior in this case. - if Instance.objects.my_role() == 'primary': - return None - - # Always allow the /ping/ endpoint. - if request.path.startswith('/api/v1/ping'): - return None - - # Get the primary instance. - primary = Instance.objects.primary() - - # If this is a request to /, then we return a special landing page that - # informs the user that they are on the secondary instance and will - # be redirected. - if request.path == '/': - return TemplateResponse(request, 'ha/redirect.html', { - 'primary': primary, - 'redirect_seconds': 30, - 'version': version, - }) - - # Redirect to the base page of the primary instance. - return HttpResponseRedirect('http://%s%s' % (primary.hostname, request.path)) - class AuthTokenTimeoutMiddleware(object): """Presume that when the user includes the auth header, they go through the authentication mechanism. Further, that mechanism is presumed to extend @@ -117,6 +84,6 @@ class AuthTokenTimeoutMiddleware(object): if not TokenAuthentication._get_x_auth_token_header(request): return response - response['Auth-Token-Timeout'] = int(tower_settings.AUTH_TOKEN_EXPIRATION) + response['Auth-Token-Timeout'] = int(settings.AUTH_TOKEN_EXPIRATION) return response diff --git a/awx/main/migrations/0002_squashed_v300_release.py b/awx/main/migrations/0002_squashed_v300_release.py new file mode 100644 index 0000000000..c398d18468 --- /dev/null +++ b/awx/main/migrations/0002_squashed_v300_release.py @@ -0,0 +1,742 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +from __future__ import unicode_literals + +import awx.main.fields + +from django.db import migrations, models +import django.db.models.deletion +from django.conf import settings +from django.utils.timezone import now + +import jsonfield.fields +import jsonbfield.fields +import taggit.managers + + +def create_system_job_templates(apps, schema_editor): + ''' + Create default system job templates if not present. Create default schedules + only if new system job templates were created (i.e. new database). + ''' + + SystemJobTemplate = apps.get_model('main', 'SystemJobTemplate') + Schedule = apps.get_model('main', 'Schedule') + ContentType = apps.get_model('contenttypes', 'ContentType') + sjt_ct = ContentType.objects.get_for_model(SystemJobTemplate) + now_dt = now() + now_str = now_dt.strftime('%Y%m%dT%H%M%SZ') + + sjt, created = SystemJobTemplate.objects.get_or_create( + job_type='cleanup_jobs', + defaults=dict( + name='Cleanup Job Details', + description='Remove job history', + created=now_dt, + modified=now_dt, + polymorphic_ctype=sjt_ct, + ), + ) + if created: + sched = Schedule( + name='Cleanup Job Schedule', + rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU' % now_str, + description='Automatically Generated Schedule', + enabled=True, + extra_data={'days': '120'}, + created=now_dt, + modified=now_dt, + ) + sched.unified_job_template = sjt + sched.save() + + existing_cd_jobs = SystemJobTemplate.objects.filter(job_type='cleanup_deleted') + Schedule.objects.filter(unified_job_template__in=existing_cd_jobs).delete() + existing_cd_jobs.delete() + + sjt, created = SystemJobTemplate.objects.get_or_create( + job_type='cleanup_activitystream', + defaults=dict( + name='Cleanup Activity Stream', + description='Remove activity stream history', + created=now_dt, + modified=now_dt, + polymorphic_ctype=sjt_ct, + ), + ) + if created: + sched = Schedule( + name='Cleanup Activity Schedule', + rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=TU' % now_str, + description='Automatically Generated Schedule', + enabled=True, + extra_data={'days': '355'}, + created=now_dt, + modified=now_dt, + ) + sched.unified_job_template = sjt + sched.save() + + sjt, created = SystemJobTemplate.objects.get_or_create( + job_type='cleanup_facts', + defaults=dict( + name='Cleanup Fact Details', + description='Remove system tracking history', + created=now_dt, + modified=now_dt, + polymorphic_ctype=sjt_ct, + ), + ) + if created: + sched = Schedule( + name='Cleanup Fact Schedule', + rrule='DTSTART:%s RRULE:FREQ=MONTHLY;INTERVAL=1;BYMONTHDAY=1' % now_str, + description='Automatically Generated Schedule', + enabled=True, + extra_data={'older_than': '120d', 'granularity': '1w'}, + created=now_dt, + modified=now_dt, + ) + sched.unified_job_template = sjt + sched.save() + + +class Migration(migrations.Migration): + replaces = [(b'main', '0002_v300_tower_settings_changes'), + (b'main', '0003_v300_notification_changes'), + (b'main', '0004_v300_fact_changes'), + (b'main', '0005_v300_migrate_facts'), + (b'main', '0006_v300_active_flag_cleanup'), + (b'main', '0007_v300_active_flag_removal'), + (b'main', '0008_v300_rbac_changes'), + (b'main', '0009_v300_rbac_migrations'), + (b'main', '0010_v300_create_system_job_templates'), + (b'main', '0011_v300_credential_domain_field'), + (b'main', '0012_v300_create_labels'), + (b'main', '0013_v300_label_changes'), + (b'main', '0014_v300_invsource_cred'), + (b'main', '0015_v300_label_changes'), + (b'main', '0016_v300_prompting_changes'), + (b'main', '0017_v300_prompting_migrations'), + (b'main', '0018_v300_host_ordering'), + (b'main', '0019_v300_new_azure_credential'),] + + dependencies = [ + ('taggit', '0002_auto_20150616_2121'), + ('contenttypes', '0002_remove_content_type_name'), + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('main', '0001_initial'), + ] + + operations = [ + # Tower settings changes + migrations.CreateModel( + name='TowerSettings', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('created', models.DateTimeField(default=None, editable=False)), + ('modified', models.DateTimeField(default=None, editable=False)), + ('key', models.CharField(unique=True, max_length=255)), + ('description', models.TextField()), + ('category', models.CharField(max_length=128)), + ('value', models.TextField(blank=True)), + ('value_type', models.CharField(max_length=12, choices=[(b'string', 'String'), (b'int', 'Integer'), (b'float', 'Decimal'), (b'json', 'JSON'), (b'bool', 'Boolean'), (b'password', 'Password'), (b'list', 'List')])), + ('user', models.ForeignKey(related_name='settings', default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)), + ], + ), + # Notification changes + migrations.CreateModel( + name='Notification', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('created', models.DateTimeField(default=None, editable=False)), + ('modified', models.DateTimeField(default=None, editable=False)), + ('status', models.CharField(default=b'pending', max_length=20, editable=False, choices=[(b'pending', 'Pending'), (b'successful', 'Successful'), (b'failed', 'Failed')])), + ('error', models.TextField(default=b'', editable=False, blank=True)), + ('notifications_sent', models.IntegerField(default=0, editable=False)), + ('notification_type', models.CharField(max_length=32, choices=[(b'email', 'Email'), (b'slack', 'Slack'), (b'twilio', 'Twilio'), (b'pagerduty', 'Pagerduty'), (b'hipchat', 'HipChat'), (b'webhook', 'Webhook'), (b'irc', 'IRC')])), + ('recipients', models.TextField(default=b'', editable=False, blank=True)), + ('subject', models.TextField(default=b'', editable=False, blank=True)), + ('body', jsonfield.fields.JSONField(default=dict, blank=True)), + ], + options={ + 'ordering': ('pk',), + }, + ), + migrations.CreateModel( + name='NotificationTemplate', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('created', models.DateTimeField(default=None, editable=False)), + ('modified', models.DateTimeField(default=None, editable=False)), + ('description', models.TextField(default=b'', blank=True)), + ('name', models.CharField(unique=True, max_length=512)), + ('notification_type', models.CharField(max_length=32, choices=[(b'email', 'Email'), (b'slack', 'Slack'), (b'twilio', 'Twilio'), (b'pagerduty', 'Pagerduty'), (b'hipchat', 'HipChat'), (b'webhook', 'Webhook'), (b'irc', 'IRC')])), + ('notification_configuration', jsonfield.fields.JSONField(default=dict)), + ('created_by', models.ForeignKey(related_name="{u'class': 'notificationtemplate', u'app_label': 'main'}(class)s_created+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)), + ('modified_by', models.ForeignKey(related_name="{u'class': 'notificationtemplate', u'app_label': 'main'}(class)s_modified+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)), + ('organization', models.ForeignKey(related_name='notification_templates', on_delete=django.db.models.deletion.SET_NULL, to='main.Organization', null=True)), + ('tags', taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags')), + ], + ), + migrations.AddField( + model_name='notification', + name='notification_template', + field=models.ForeignKey(related_name='notifications', editable=False, to='main.NotificationTemplate'), + ), + migrations.AddField( + model_name='activitystream', + name='notification', + field=models.ManyToManyField(to='main.Notification', blank=True), + ), + migrations.AddField( + model_name='activitystream', + name='notification_template', + field=models.ManyToManyField(to='main.NotificationTemplate', blank=True), + ), + migrations.AddField( + model_name='organization', + name='notification_templates_any', + field=models.ManyToManyField(related_name='organization_notification_templates_for_any', to='main.NotificationTemplate', blank=True), + ), + migrations.AddField( + model_name='organization', + name='notification_templates_error', + field=models.ManyToManyField(related_name='organization_notification_templates_for_errors', to='main.NotificationTemplate', blank=True), + ), + migrations.AddField( + model_name='organization', + name='notification_templates_success', + field=models.ManyToManyField(related_name='organization_notification_templates_for_success', to='main.NotificationTemplate', blank=True), + ), + migrations.AddField( + model_name='unifiedjob', + name='notifications', + field=models.ManyToManyField(related_name='unifiedjob_notifications', editable=False, to='main.Notification'), + ), + migrations.AddField( + model_name='unifiedjobtemplate', + name='notification_templates_any', + field=models.ManyToManyField(related_name='unifiedjobtemplate_notification_templates_for_any', to='main.NotificationTemplate', blank=True), + ), + migrations.AddField( + model_name='unifiedjobtemplate', + name='notification_templates_error', + field=models.ManyToManyField(related_name='unifiedjobtemplate_notification_templates_for_errors', to='main.NotificationTemplate', blank=True), + ), + migrations.AddField( + model_name='unifiedjobtemplate', + name='notification_templates_success', + field=models.ManyToManyField(related_name='unifiedjobtemplate_notification_templates_for_success', to='main.NotificationTemplate', blank=True), + ), + # Fact changes + migrations.CreateModel( + name='Fact', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('timestamp', models.DateTimeField(default=None, help_text='Date and time of the corresponding fact scan gathering time.', editable=False)), + ('module', models.CharField(max_length=128)), + ('facts', jsonbfield.fields.JSONField(default={}, help_text='Arbitrary JSON structure of module facts captured at timestamp for a single host.', blank=True)), + ('host', models.ForeignKey(related_name='facts', to='main.Host', help_text='Host for the facts that the fact scan captured.')), + ], + ), + migrations.AlterIndexTogether( + name='fact', + index_together=set([('timestamp', 'module', 'host')]), + ), + # Active flag removal + migrations.RemoveField( + model_name='credential', + name='active', + ), + migrations.RemoveField( + model_name='custominventoryscript', + name='active', + ), + migrations.RemoveField( + model_name='group', + name='active', + ), + migrations.RemoveField( + model_name='host', + name='active', + ), + migrations.RemoveField( + model_name='inventory', + name='active', + ), + migrations.RemoveField( + model_name='organization', + name='active', + ), + migrations.RemoveField( + model_name='permission', + name='active', + ), + migrations.RemoveField( + model_name='schedule', + name='active', + ), + migrations.RemoveField( + model_name='team', + name='active', + ), + migrations.RemoveField( + model_name='unifiedjob', + name='active', + ), + migrations.RemoveField( + model_name='unifiedjobtemplate', + name='active', + ), + + # RBAC Changes + # ############ + migrations.RenameField( + 'Organization', + 'admins', + 'deprecated_admins', + ), + migrations.RenameField( + 'Organization', + 'users', + 'deprecated_users', + ), + migrations.RenameField( + 'Team', + 'users', + 'deprecated_users', + ), + migrations.RenameField( + 'Team', + 'projects', + 'deprecated_projects', + ), + migrations.AddField( + model_name='project', + name='organization', + field=models.ForeignKey(related_name='projects', to='main.Organization', blank=True, null=True), + ), + migrations.AlterField( + model_name='team', + name='deprecated_projects', + field=models.ManyToManyField(related_name='deprecated_teams', to='main.Project', blank=True), + ), + migrations.RenameField( + model_name='organization', + old_name='projects', + new_name='deprecated_projects', + ), + migrations.AlterField( + model_name='organization', + name='deprecated_projects', + field=models.ManyToManyField(related_name='deprecated_organizations', to='main.Project', blank=True), + ), + migrations.RenameField( + 'Credential', + 'team', + 'deprecated_team', + ), + migrations.RenameField( + 'Credential', + 'user', + 'deprecated_user', + ), + migrations.AlterField( + model_name='organization', + name='deprecated_admins', + field=models.ManyToManyField(related_name='deprecated_admin_of_organizations', to=settings.AUTH_USER_MODEL, blank=True), + ), + migrations.AlterField( + model_name='organization', + name='deprecated_users', + field=models.ManyToManyField(related_name='deprecated_organizations', to=settings.AUTH_USER_MODEL, blank=True), + ), + migrations.AlterField( + model_name='team', + name='deprecated_users', + field=models.ManyToManyField(related_name='deprecated_teams', to=settings.AUTH_USER_MODEL, blank=True), + ), + migrations.AlterUniqueTogether( + name='credential', + unique_together=set([]), + ), + migrations.AddField( + model_name='credential', + name='organization', + field=models.ForeignKey(related_name='credentials', default=None, blank=True, to='main.Organization', null=True), + ), + + # + # New RBAC models and fields + # + migrations.CreateModel( + name='Role', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('role_field', models.TextField()), + ('singleton_name', models.TextField(default=None, unique=True, null=True, db_index=True)), + ('members', models.ManyToManyField(related_name='roles', to=settings.AUTH_USER_MODEL)), + ('parents', models.ManyToManyField(related_name='children', to='main.Role')), + ('implicit_parents', models.TextField(default=b'[]')), + ('content_type', models.ForeignKey(default=None, to='contenttypes.ContentType', null=True)), + ('object_id', models.PositiveIntegerField(default=None, null=True)), + + ], + options={ + 'db_table': 'main_rbac_roles', + 'verbose_name_plural': 'roles', + }, + ), + migrations.CreateModel( + name='RoleAncestorEntry', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('role_field', models.TextField()), + ('content_type_id', models.PositiveIntegerField()), + ('object_id', models.PositiveIntegerField()), + ('ancestor', models.ForeignKey(related_name='+', to='main.Role')), + ('descendent', models.ForeignKey(related_name='+', to='main.Role')), + ], + options={ + 'db_table': 'main_rbac_role_ancestors', + 'verbose_name_plural': 'role_ancestors', + }, + ), + migrations.AddField( + model_name='role', + name='ancestors', + field=models.ManyToManyField(related_name='descendents', through='main.RoleAncestorEntry', to='main.Role'), + ), + migrations.AlterIndexTogether( + name='role', + index_together=set([('content_type', 'object_id')]), + ), + migrations.AlterIndexTogether( + name='roleancestorentry', + index_together=set([('ancestor', 'content_type_id', 'object_id'), ('ancestor', 'content_type_id', 'role_field'), ('ancestor', 'descendent')]), + ), + migrations.AddField( + model_name='credential', + name='admin_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'singleton:system_administrator'], to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='credential', + name='use_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'admin_role'], to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='credential', + name='read_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'singleton:system_auditor', b'organization.auditor_role', b'use_role', b'admin_role'], to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='custominventoryscript', + name='admin_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=b'organization.admin_role', to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='custominventoryscript', + name='read_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'organization.auditor_role', b'organization.member_role', b'admin_role'], to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='inventory', + name='admin_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=b'organization.admin_role', to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='inventory', + name='adhoc_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=b'admin_role', to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='inventory', + name='update_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=b'admin_role', to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='inventory', + name='use_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=b'adhoc_role', to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='inventory', + name='read_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'organization.auditor_role', b'update_role', b'use_role', b'admin_role'], to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='jobtemplate', + name='admin_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'project.organization.admin_role', b'inventory.organization.admin_role'], to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='jobtemplate', + name='execute_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'admin_role'], to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='jobtemplate', + name='read_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'project.organization.auditor_role', b'inventory.organization.auditor_role', b'execute_role', b'admin_role'], to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='organization', + name='admin_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=b'singleton:system_administrator', to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='organization', + name='auditor_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=b'singleton:system_auditor', to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='organization', + name='member_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=b'admin_role', to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='organization', + name='read_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'member_role', b'auditor_role'], to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='project', + name='admin_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'organization.admin_role', b'singleton:system_administrator'], to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='project', + name='use_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=b'admin_role', to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='project', + name='update_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=b'admin_role', to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='project', + name='read_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'organization.auditor_role', b'singleton:system_auditor', b'use_role', b'update_role'], to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='team', + name='admin_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=b'organization.admin_role', to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='team', + name='member_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=None, to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='team', + name='read_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'admin_role', b'organization.auditor_role', b'member_role'], to='main.Role', null=b'True'), + ), + + # System Job Templates + migrations.RunPython(create_system_job_templates, migrations.RunPython.noop), + migrations.AlterField( + model_name='systemjob', + name='job_type', + field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'cleanup_jobs', 'Remove jobs older than a certain number of days'), (b'cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'), (b'cleanup_facts', 'Purge and/or reduce the granularity of system tracking data')]), + ), + migrations.AlterField( + model_name='systemjobtemplate', + name='job_type', + field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'cleanup_jobs', 'Remove jobs older than a certain number of days'), (b'cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'), (b'cleanup_facts', 'Purge and/or reduce the granularity of system tracking data')]), + ), + # Credential domain field + migrations.AddField( + model_name='credential', + name='domain', + field=models.CharField(default=b'', help_text='The identifier for the domain.', max_length=100, verbose_name='Domain', blank=True), + ), + # Create Labels + migrations.CreateModel( + name='Label', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('created', models.DateTimeField(default=None, editable=False)), + ('modified', models.DateTimeField(default=None, editable=False)), + ('description', models.TextField(default=b'', blank=True)), + ('name', models.CharField(max_length=512)), + ('created_by', models.ForeignKey(related_name="{u'class': 'label', u'app_label': 'main'}(class)s_created+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)), + ('modified_by', models.ForeignKey(related_name="{u'class': 'label', u'app_label': 'main'}(class)s_modified+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)), + ('organization', models.ForeignKey(related_name='labels', to='main.Organization', help_text='Organization this label belongs to.')), + ('tags', taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags')), + ], + options={ + 'ordering': ('organization', 'name'), + }, + ), + migrations.AddField( + model_name='activitystream', + name='label', + field=models.ManyToManyField(to='main.Label', blank=True), + ), + migrations.AddField( + model_name='job', + name='labels', + field=models.ManyToManyField(related_name='job_labels', to='main.Label', blank=True), + ), + migrations.AddField( + model_name='jobtemplate', + name='labels', + field=models.ManyToManyField(related_name='jobtemplate_labels', to='main.Label', blank=True), + ), + migrations.AlterUniqueTogether( + name='label', + unique_together=set([('name', 'organization')]), + ), + # Label changes + migrations.AlterField( + model_name='label', + name='organization', + field=models.ForeignKey(related_name='labels', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Organization', help_text='Organization this label belongs to.', null=True), + ), + migrations.AlterField( + model_name='label', + name='organization', + field=models.ForeignKey(related_name='labels', to='main.Organization', help_text='Organization this label belongs to.'), + ), + # InventorySource Credential + migrations.AddField( + model_name='job', + name='network_credential', + field=models.ForeignKey(related_name='jobs_as_network_credential+', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Credential', null=True), + ), + migrations.AddField( + model_name='jobtemplate', + name='network_credential', + field=models.ForeignKey(related_name='jobtemplates_as_network_credential+', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Credential', null=True), + ), + migrations.AddField( + model_name='credential', + name='authorize', + field=models.BooleanField(default=False, help_text='Whether to use the authorize mechanism.'), + ), + migrations.AddField( + model_name='credential', + name='authorize_password', + field=models.CharField(default=b'', help_text='Password used by the authorize mechanism.', max_length=1024, blank=True), + ), + migrations.AlterField( + model_name='credential', + name='deprecated_team', + field=models.ForeignKey(related_name='deprecated_credentials', default=None, blank=True, to='main.Team', null=True), + ), + migrations.AlterField( + model_name='credential', + name='deprecated_user', + field=models.ForeignKey(related_name='deprecated_credentials', default=None, blank=True, to=settings.AUTH_USER_MODEL, null=True), + ), + migrations.AlterField( + model_name='credential', + name='kind', + field=models.CharField(default=b'ssh', max_length=32, choices=[(b'ssh', 'Machine'), (b'net', 'Network'), (b'scm', 'Source Control'), (b'aws', 'Amazon Web Services'), (b'rax', 'Rackspace'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure'), (b'openstack', 'OpenStack')]), + ), + migrations.AlterField( + model_name='inventorysource', + name='source', + field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'Local File, Directory or Script'), (b'rax', 'Rackspace Cloud Servers'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'custom', 'Custom Script')]), + ), + migrations.AlterField( + model_name='inventoryupdate', + name='source', + field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'Local File, Directory or Script'), (b'rax', 'Rackspace Cloud Servers'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'custom', 'Custom Script')]), + ), + migrations.AlterField( + model_name='team', + name='deprecated_projects', + field=models.ManyToManyField(related_name='deprecated_teams', to='main.Project', blank=True), + ), + # Prompting changes + migrations.AddField( + model_name='jobtemplate', + name='ask_limit_on_launch', + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name='jobtemplate', + name='ask_inventory_on_launch', + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name='jobtemplate', + name='ask_credential_on_launch', + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name='jobtemplate', + name='ask_job_type_on_launch', + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name='jobtemplate', + name='ask_tags_on_launch', + field=models.BooleanField(default=False), + ), + migrations.AlterField( + model_name='job', + name='inventory', + field=models.ForeignKey(related_name='jobs', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Inventory', null=True), + ), + migrations.AlterField( + model_name='jobtemplate', + name='inventory', + field=models.ForeignKey(related_name='jobtemplates', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Inventory', null=True), + ), + # Host ordering + migrations.AlterModelOptions( + name='host', + options={'ordering': ('name',)}, + ), + # New Azure credential + migrations.AddField( + model_name='credential', + name='client', + field=models.CharField(default=b'', help_text='Client Id or Application Id for the credential', max_length=128, blank=True), + ), + migrations.AddField( + model_name='credential', + name='secret', + field=models.CharField(default=b'', help_text='Secret Token for this credential', max_length=1024, blank=True), + ), + migrations.AddField( + model_name='credential', + name='subscription', + field=models.CharField(default=b'', help_text='Subscription identifier for this credential', max_length=1024, blank=True), + ), + migrations.AddField( + model_name='credential', + name='tenant', + field=models.CharField(default=b'', help_text='Tenant identifier for this credential', max_length=1024, blank=True), + ), + migrations.AlterField( + model_name='credential', + name='kind', + field=models.CharField(default=b'ssh', max_length=32, choices=[(b'ssh', 'Machine'), (b'net', 'Network'), (b'scm', 'Source Control'), (b'aws', 'Amazon Web Services'), (b'rax', 'Rackspace'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Satellite 6'), (b'cloudforms', 'CloudForms'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure Classic (deprecated)'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'openstack', 'OpenStack')]), + ), + migrations.AlterField( + model_name='host', + name='instance_id', + field=models.CharField(default=b'', max_length=1024, blank=True), + ), + migrations.AlterField( + model_name='inventorysource', + name='source', + field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'Local File, Directory or Script'), (b'rax', 'Rackspace Cloud Servers'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure Classic (deprecated)'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Satellite 6'), (b'cloudforms', 'CloudForms'), (b'openstack', 'OpenStack'), (b'custom', 'Custom Script')]), + ), + migrations.AlterField( + model_name='inventoryupdate', + name='source', + field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'Local File, Directory or Script'), (b'rax', 'Rackspace Cloud Servers'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure Classic (deprecated)'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Satellite 6'), (b'cloudforms', 'CloudForms'), (b'openstack', 'OpenStack'), (b'custom', 'Custom Script')]), + ), + ] diff --git a/awx/main/migrations/0003_squashed_v300_v303_updates.py b/awx/main/migrations/0003_squashed_v300_v303_updates.py new file mode 100644 index 0000000000..82d781ec85 --- /dev/null +++ b/awx/main/migrations/0003_squashed_v300_v303_updates.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +from __future__ import unicode_literals + +from django.db import migrations, models +from django.conf import settings +import awx.main.fields +import jsonfield.fields + + +def update_dashed_host_variables(apps, schema_editor): + Host = apps.get_model('main', 'Host') + for host in Host.objects.filter(variables='---'): + host.variables = '' + host.save() + + +class Migration(migrations.Migration): + replaces = [(b'main', '0020_v300_labels_changes'), + (b'main', '0021_v300_activity_stream'), + (b'main', '0022_v300_adhoc_extravars'), + (b'main', '0023_v300_activity_stream_ordering'), + (b'main', '0024_v300_jobtemplate_allow_simul'), + (b'main', '0025_v300_update_rbac_parents'), + (b'main', '0026_v300_credential_unique'), + (b'main', '0027_v300_team_migrations'), + (b'main', '0028_v300_org_team_cascade'), + (b'main', '0029_v302_add_ask_skip_tags'), + (b'main', '0030_v302_job_survey_passwords'), + (b'main', '0031_v302_migrate_survey_passwords'), + (b'main', '0032_v302_credential_permissions_update'), + (b'main', '0033_v303_v245_host_variable_fix'),] + + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('main', '0002_squashed_v300_release'), + ] + + operations = [ + # Labels Changes + migrations.RemoveField( + model_name='job', + name='labels', + ), + migrations.RemoveField( + model_name='jobtemplate', + name='labels', + ), + migrations.AddField( + model_name='unifiedjob', + name='labels', + field=models.ManyToManyField(related_name='unifiedjob_labels', to='main.Label', blank=True), + ), + migrations.AddField( + model_name='unifiedjobtemplate', + name='labels', + field=models.ManyToManyField(related_name='unifiedjobtemplate_labels', to='main.Label', blank=True), + ), + # Activity Stream + migrations.AddField( + model_name='activitystream', + name='role', + field=models.ManyToManyField(to='main.Role', blank=True), + ), + migrations.AlterModelOptions( + name='activitystream', + options={'ordering': ('pk',)}, + ), + # Adhoc extra vars + migrations.AddField( + model_name='adhoccommand', + name='extra_vars', + field=models.TextField(default=b'', blank=True), + ), + migrations.AlterField( + model_name='credential', + name='kind', + field=models.CharField(default=b'ssh', max_length=32, choices=[(b'ssh', 'Machine'), (b'net', 'Network'), (b'scm', 'Source Control'), (b'aws', 'Amazon Web Services'), (b'rax', 'Rackspace'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure Classic (deprecated)'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'openstack', 'OpenStack')]), + ), + migrations.AlterField( + model_name='inventorysource', + name='source', + field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'Local File, Directory or Script'), (b'rax', 'Rackspace Cloud Servers'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure Classic (deprecated)'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'custom', 'Custom Script')]), + ), + migrations.AlterField( + model_name='inventoryupdate', + name='source', + field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'Local File, Directory or Script'), (b'rax', 'Rackspace Cloud Servers'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure Classic (deprecated)'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'custom', 'Custom Script')]), + ), + # jobtemplate allow simul + migrations.AddField( + model_name='jobtemplate', + name='allow_simultaneous', + field=models.BooleanField(default=False), + ), + # RBAC update parents + migrations.AlterField( + model_name='credential', + name='use_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'organization.admin_role', b'admin_role'], to='main.Role', null=b'True'), + ), + migrations.AlterField( + model_name='team', + name='member_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=b'admin_role', to='main.Role', null=b'True'), + ), + migrations.AlterField( + model_name='team', + name='read_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'organization.auditor_role', b'member_role'], to='main.Role', null=b'True'), + ), + # Unique credential + migrations.AlterUniqueTogether( + name='credential', + unique_together=set([('organization', 'name', 'kind')]), + ), + migrations.AlterField( + model_name='credential', + name='read_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'singleton:system_auditor', b'organization.auditor_role', b'use_role', b'admin_role'], to='main.Role', null=b'True'), + ), + # Team cascade + migrations.AlterField( + model_name='team', + name='organization', + field=models.ForeignKey(related_name='teams', to='main.Organization'), + preserve_default=False, + ), + # add ask skip tags + migrations.AddField( + model_name='jobtemplate', + name='ask_skip_tags_on_launch', + field=models.BooleanField(default=False), + ), + # job survery passwords + migrations.AddField( + model_name='job', + name='survey_passwords', + field=jsonfield.fields.JSONField(default={}, editable=False, blank=True), + ), + # RBAC credential permission updates + migrations.AlterField( + model_name='credential', + name='admin_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'singleton:system_administrator', b'organization.admin_role'], to='main.Role', null=b'True'), + ), + migrations.AlterField( + model_name='credential', + name='use_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'admin_role'], to='main.Role', null=b'True'), + ), + ] diff --git a/awx/main/migrations/0005_v300_migrate_facts.py b/awx/main/migrations/0005_v300_migrate_facts.py index 8362227c2f..058c5970df 100644 --- a/awx/main/migrations/0005_v300_migrate_facts.py +++ b/awx/main/migrations/0005_v300_migrate_facts.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- from __future__ import unicode_literals -from awx.main.migrations import _system_tracking as system_tracking from django.db import migrations class Migration(migrations.Migration): @@ -11,5 +10,4 @@ class Migration(migrations.Migration): ] operations = [ - migrations.RunPython(system_tracking.migrate_facts), ] diff --git a/awx/main/migrations/0034_v310_release.py b/awx/main/migrations/0034_v310_release.py new file mode 100644 index 0000000000..d23843a5fe --- /dev/null +++ b/awx/main/migrations/0034_v310_release.py @@ -0,0 +1,614 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models +import awx.main.models.notifications +import jsonfield.fields +import django.db.models.deletion +import awx.main.models.workflow +import awx.main.fields + + +class Migration(migrations.Migration): + + dependencies = [ + ('main', '0033_v303_v245_host_variable_fix'), + ] + + operations = [ + # Create ChannelGroup table + migrations.CreateModel( + name='ChannelGroup', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('group', models.CharField(unique=True, max_length=200)), + ('channels', models.TextField()), + ], + ), + # Allow simultaneous Job + migrations.AddField( + model_name='job', + name='allow_simultaneous', + field=models.BooleanField(default=False), + ), + # Job Event UUID + migrations.AddField( + model_name='jobevent', + name='uuid', + field=models.CharField(default=b'', max_length=1024, editable=False), + ), + # Job Parent Event UUID + migrations.AddField( + model_name='jobevent', + name='parent_uuid', + field=models.CharField(default=b'', max_length=1024, editable=False), + ), + # Modify the HA Instance + migrations.RemoveField( + model_name='instance', + name='primary', + ), + migrations.AlterField( + model_name='instance', + name='uuid', + field=models.CharField(max_length=40), + ), + migrations.AlterField( + model_name='credential', + name='become_method', + field=models.CharField(default=b'', help_text='Privilege escalation method.', max_length=32, blank=True, choices=[(b'', 'None'), (b'sudo', 'Sudo'), (b'su', 'Su'), (b'pbrun', 'Pbrun'), (b'pfexec', 'Pfexec'), (b'dzdo', 'DZDO'), (b'pmrun', 'Pmrun')]), + ), + # Add Workflows + migrations.AlterField( + model_name='unifiedjob', + name='launch_type', + field=models.CharField(default=b'manual', max_length=20, editable=False, choices=[(b'manual', 'Manual'), (b'relaunch', 'Relaunch'), (b'callback', 'Callback'), (b'scheduled', 'Scheduled'), (b'dependency', 'Dependency'), (b'workflow', 'Workflow'), (b'sync', 'Sync')]), + ), + migrations.CreateModel( + name='WorkflowJob', + fields=[ + ('unifiedjob_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='main.UnifiedJob')), + ('extra_vars', models.TextField(default=b'', blank=True)), + ], + options={ + 'ordering': ('id',), + }, + bases=('main.unifiedjob', models.Model, awx.main.models.notifications.JobNotificationMixin), + ), + migrations.CreateModel( + name='WorkflowJobNode', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('created', models.DateTimeField(default=None, editable=False)), + ('modified', models.DateTimeField(default=None, editable=False)), + ('always_nodes', models.ManyToManyField(related_name='workflowjobnodes_always', to='main.WorkflowJobNode', blank=True)), + ('failure_nodes', models.ManyToManyField(related_name='workflowjobnodes_failure', to='main.WorkflowJobNode', blank=True)), + ('job', models.OneToOneField(related_name='unified_job_node', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.UnifiedJob', null=True)), + ('success_nodes', models.ManyToManyField(related_name='workflowjobnodes_success', to='main.WorkflowJobNode', blank=True)), + ], + options={ + 'abstract': False, + }, + ), + migrations.CreateModel( + name='WorkflowJobTemplate', + fields=[ + ('unifiedjobtemplate_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='main.UnifiedJobTemplate')), + ('extra_vars', models.TextField(default=b'', blank=True)), + ('admin_role', awx.main.fields.ImplicitRoleField(related_name='+', parent_role=b'singleton:system_administrator', to='main.Role', null=b'True')), + ], + bases=('main.unifiedjobtemplate', models.Model), + ), + migrations.CreateModel( + name='WorkflowJobTemplateNode', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('created', models.DateTimeField(default=None, editable=False)), + ('modified', models.DateTimeField(default=None, editable=False)), + ('always_nodes', models.ManyToManyField(related_name='workflowjobtemplatenodes_always', to='main.WorkflowJobTemplateNode', blank=True)), + ('failure_nodes', models.ManyToManyField(related_name='workflowjobtemplatenodes_failure', to='main.WorkflowJobTemplateNode', blank=True)), + ('success_nodes', models.ManyToManyField(related_name='workflowjobtemplatenodes_success', to='main.WorkflowJobTemplateNode', blank=True)), + ('unified_job_template', models.ForeignKey(related_name='workflowjobtemplatenodes', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.UnifiedJobTemplate', null=True)), + ('workflow_job_template', models.ForeignKey(related_name='workflow_job_template_nodes', default=None, blank=True, to='main.WorkflowJobTemplate', null=True)), + ], + options={ + 'abstract': False, + }, + ), + migrations.AddField( + model_name='workflowjobnode', + name='unified_job_template', + field=models.ForeignKey(related_name='workflowjobnodes', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.UnifiedJobTemplate', null=True), + ), + migrations.AddField( + model_name='workflowjobnode', + name='workflow_job', + field=models.ForeignKey(related_name='workflow_job_nodes', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.WorkflowJob', null=True), + ), + migrations.AddField( + model_name='workflowjob', + name='workflow_job_template', + field=models.ForeignKey(related_name='workflow_jobs', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.WorkflowJobTemplate', null=True), + ), + migrations.AddField( + model_name='activitystream', + name='workflow_job', + field=models.ManyToManyField(to='main.WorkflowJob', blank=True), + ), + migrations.AddField( + model_name='activitystream', + name='workflow_job_node', + field=models.ManyToManyField(to='main.WorkflowJobNode', blank=True), + ), + migrations.AddField( + model_name='activitystream', + name='workflow_job_template', + field=models.ManyToManyField(to='main.WorkflowJobTemplate', blank=True), + ), + migrations.AddField( + model_name='activitystream', + name='workflow_job_template_node', + field=models.ManyToManyField(to='main.WorkflowJobTemplateNode', blank=True), + ), + # Workflow RBAC prompts + migrations.AddField( + model_name='workflowjobnode', + name='char_prompts', + field=jsonfield.fields.JSONField(default={}, blank=True), + ), + migrations.AddField( + model_name='workflowjobnode', + name='credential', + field=models.ForeignKey(related_name='workflowjobnodes', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Credential', null=True), + ), + migrations.AddField( + model_name='workflowjobnode', + name='inventory', + field=models.ForeignKey(related_name='workflowjobnodes', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Inventory', null=True), + ), + migrations.AddField( + model_name='workflowjobtemplate', + name='execute_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'admin_role'], to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='workflowjobtemplate', + name='organization', + field=models.ForeignKey(related_name='workflows', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='main.Organization', null=True), + ), + migrations.AddField( + model_name='workflowjobtemplate', + name='read_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'singleton:system_auditor', b'organization.auditor_role', b'execute_role', b'admin_role'], to='main.Role', null=b'True'), + ), + migrations.AddField( + model_name='workflowjobtemplatenode', + name='char_prompts', + field=jsonfield.fields.JSONField(default={}, blank=True), + ), + migrations.AddField( + model_name='workflowjobtemplatenode', + name='credential', + field=models.ForeignKey(related_name='workflowjobtemplatenodes', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Credential', null=True), + ), + migrations.AddField( + model_name='workflowjobtemplatenode', + name='inventory', + field=models.ForeignKey(related_name='workflowjobtemplatenodes', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Inventory', null=True), + ), + migrations.AlterField( + model_name='workflowjobnode', + name='unified_job_template', + field=models.ForeignKey(related_name='workflowjobnodes', on_delete=django.db.models.deletion.SET_NULL, default=None, to='main.UnifiedJobTemplate', null=True), + ), + migrations.AlterField( + model_name='workflowjobnode', + name='workflow_job', + field=models.ForeignKey(related_name='workflow_job_nodes', default=None, blank=True, to='main.WorkflowJob', null=True), + ), + migrations.AlterField( + model_name='workflowjobtemplate', + name='admin_role', + field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'singleton:system_administrator', b'organization.admin_role'], to='main.Role', null=b'True'), + ), + migrations.AlterField( + model_name='workflowjobtemplatenode', + name='unified_job_template', + field=models.ForeignKey(related_name='workflowjobtemplatenodes', on_delete=django.db.models.deletion.SET_NULL, default=None, to='main.UnifiedJobTemplate', null=True), + ), + # Job artifacts + migrations.AddField( + model_name='job', + name='artifacts', + field=jsonfield.fields.JSONField(default={}, editable=False, blank=True), + ), + migrations.AddField( + model_name='workflowjobnode', + name='ancestor_artifacts', + field=jsonfield.fields.JSONField(default={}, editable=False, blank=True), + ), + # Job timeout settings + migrations.AddField( + model_name='inventorysource', + name='timeout', + field=models.IntegerField(default=0, blank=True), + ), + migrations.AddField( + model_name='inventoryupdate', + name='timeout', + field=models.IntegerField(default=0, blank=True), + ), + migrations.AddField( + model_name='job', + name='timeout', + field=models.IntegerField(default=0, blank=True), + ), + migrations.AddField( + model_name='jobtemplate', + name='timeout', + field=models.IntegerField(default=0, blank=True), + ), + migrations.AddField( + model_name='project', + name='timeout', + field=models.IntegerField(default=0, blank=True), + ), + migrations.AddField( + model_name='projectupdate', + name='timeout', + field=models.IntegerField(default=0, blank=True), + ), + # Execution Node + migrations.AddField( + model_name='unifiedjob', + name='execution_node', + field=models.TextField(default=b'', editable=False, blank=True), + ), + # SCM Revision + migrations.AddField( + model_name='project', + name='scm_revision', + field=models.CharField(default=b'', editable=False, max_length=1024, blank=True, help_text='The last revision fetched by a project update', verbose_name='SCM Revision'), + ), + migrations.AddField( + model_name='projectupdate', + name='job_type', + field=models.CharField(default=b'check', max_length=64, choices=[(b'run', 'Run'), (b'check', 'Check')]), + ), + migrations.AddField( + model_name='job', + name='scm_revision', + field=models.CharField(default=b'', editable=False, max_length=1024, blank=True, help_text='The SCM Revision from the Project used for this job, if available', verbose_name='SCM Revision'), + ), + # Project Playbook Files + migrations.AddField( + model_name='project', + name='playbook_files', + field=jsonfield.fields.JSONField(default=[], help_text='List of playbooks found in the project', verbose_name='Playbook Files', editable=False, blank=True), + ), + # Job events to stdout + migrations.AddField( + model_name='adhoccommandevent', + name='end_line', + field=models.PositiveIntegerField(default=0, editable=False), + ), + migrations.AddField( + model_name='adhoccommandevent', + name='start_line', + field=models.PositiveIntegerField(default=0, editable=False), + ), + migrations.AddField( + model_name='adhoccommandevent', + name='stdout', + field=models.TextField(default=b'', editable=False), + ), + migrations.AddField( + model_name='adhoccommandevent', + name='uuid', + field=models.CharField(default=b'', max_length=1024, editable=False), + ), + migrations.AddField( + model_name='adhoccommandevent', + name='verbosity', + field=models.PositiveIntegerField(default=0, editable=False), + ), + migrations.AddField( + model_name='jobevent', + name='end_line', + field=models.PositiveIntegerField(default=0, editable=False), + ), + migrations.AddField( + model_name='jobevent', + name='playbook', + field=models.CharField(default=b'', max_length=1024, editable=False), + ), + migrations.AddField( + model_name='jobevent', + name='start_line', + field=models.PositiveIntegerField(default=0, editable=False), + ), + migrations.AddField( + model_name='jobevent', + name='stdout', + field=models.TextField(default=b'', editable=False), + ), + migrations.AddField( + model_name='jobevent', + name='verbosity', + field=models.PositiveIntegerField(default=0, editable=False), + ), + migrations.AlterField( + model_name='adhoccommandevent', + name='counter', + field=models.PositiveIntegerField(default=0, editable=False), + ), + migrations.AlterField( + model_name='adhoccommandevent', + name='event', + field=models.CharField(max_length=100, choices=[(b'runner_on_failed', 'Host Failed'), (b'runner_on_ok', 'Host OK'), (b'runner_on_unreachable', 'Host Unreachable'), (b'runner_on_skipped', 'Host Skipped'), (b'debug', 'Debug'), (b'verbose', 'Verbose'), (b'deprecated', 'Deprecated'), (b'warning', 'Warning'), (b'system_warning', 'System Warning'), (b'error', 'Error')]), + ), + migrations.AlterField( + model_name='jobevent', + name='counter', + field=models.PositiveIntegerField(default=0, editable=False), + ), + migrations.AlterField( + model_name='jobevent', + name='event', + field=models.CharField(max_length=100, choices=[(b'runner_on_failed', 'Host Failed'), (b'runner_on_ok', 'Host OK'), (b'runner_on_error', 'Host Failure'), (b'runner_on_skipped', 'Host Skipped'), (b'runner_on_unreachable', 'Host Unreachable'), (b'runner_on_no_hosts', 'No Hosts Remaining'), (b'runner_on_async_poll', 'Host Polling'), (b'runner_on_async_ok', 'Host Async OK'), (b'runner_on_async_failed', 'Host Async Failure'), (b'runner_item_on_ok', 'Item OK'), (b'runner_item_on_failed', 'Item Failed'), (b'runner_item_on_skipped', 'Item Skipped'), (b'runner_retry', 'Host Retry'), (b'runner_on_file_diff', 'File Difference'), (b'playbook_on_start', 'Playbook Started'), (b'playbook_on_notify', 'Running Handlers'), (b'playbook_on_include', 'Including File'), (b'playbook_on_no_hosts_matched', 'No Hosts Matched'), (b'playbook_on_no_hosts_remaining', 'No Hosts Remaining'), (b'playbook_on_task_start', 'Task Started'), (b'playbook_on_vars_prompt', 'Variables Prompted'), (b'playbook_on_setup', 'Gathering Facts'), (b'playbook_on_import_for_host', 'internal: on Import for Host'), (b'playbook_on_not_import_for_host', 'internal: on Not Import for Host'), (b'playbook_on_play_start', 'Play Started'), (b'playbook_on_stats', 'Playbook Complete'), (b'debug', 'Debug'), (b'verbose', 'Verbose'), (b'deprecated', 'Deprecated'), (b'warning', 'Warning'), (b'system_warning', 'System Warning'), (b'error', 'Error')]), + ), + migrations.AlterUniqueTogether( + name='adhoccommandevent', + unique_together=set([]), + ), + migrations.AlterIndexTogether( + name='adhoccommandevent', + index_together=set([('ad_hoc_command', 'event'), ('ad_hoc_command', 'uuid'), ('ad_hoc_command', 'end_line'), ('ad_hoc_command', 'start_line')]), + ), + migrations.AlterIndexTogether( + name='jobevent', + index_together=set([('job', 'event'), ('job', 'parent_uuid'), ('job', 'start_line'), ('job', 'uuid'), ('job', 'end_line')]), + ), + # Tower state + migrations.CreateModel( + name='TowerScheduleState', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('schedule_last_run', models.DateTimeField(auto_now_add=True)), + ], + options={ + 'abstract': False, + }, + ), + # Tower instance capacity + migrations.AddField( + model_name='instance', + name='capacity', + field=models.PositiveIntegerField(default=100, editable=False), + ), + # Workflow surveys + migrations.AddField( + model_name='workflowjob', + name='survey_passwords', + field=jsonfield.fields.JSONField(default={}, editable=False, blank=True), + ), + migrations.AddField( + model_name='workflowjobtemplate', + name='survey_enabled', + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name='workflowjobtemplate', + name='survey_spec', + field=jsonfield.fields.JSONField(default={}, blank=True), + ), + # JSON field changes + migrations.AlterField( + model_name='adhoccommandevent', + name='event_data', + field=awx.main.fields.JSONField(default={}, blank=True), + ), + migrations.AlterField( + model_name='job', + name='artifacts', + field=awx.main.fields.JSONField(default={}, editable=False, blank=True), + ), + migrations.AlterField( + model_name='job', + name='survey_passwords', + field=awx.main.fields.JSONField(default={}, editable=False, blank=True), + ), + migrations.AlterField( + model_name='jobevent', + name='event_data', + field=awx.main.fields.JSONField(default={}, blank=True), + ), + migrations.AlterField( + model_name='jobtemplate', + name='survey_spec', + field=awx.main.fields.JSONField(default={}, blank=True), + ), + migrations.AlterField( + model_name='notification', + name='body', + field=awx.main.fields.JSONField(default=dict, blank=True), + ), + migrations.AlterField( + model_name='notificationtemplate', + name='notification_configuration', + field=awx.main.fields.JSONField(default=dict), + ), + migrations.AlterField( + model_name='project', + name='playbook_files', + field=awx.main.fields.JSONField(default=[], help_text='List of playbooks found in the project', verbose_name='Playbook Files', editable=False, blank=True), + ), + migrations.AlterField( + model_name='schedule', + name='extra_data', + field=awx.main.fields.JSONField(default={}, blank=True), + ), + migrations.AlterField( + model_name='unifiedjob', + name='job_env', + field=awx.main.fields.JSONField(default={}, editable=False, blank=True), + ), + migrations.AlterField( + model_name='workflowjob', + name='survey_passwords', + field=awx.main.fields.JSONField(default={}, editable=False, blank=True), + ), + migrations.AlterField( + model_name='workflowjobnode', + name='ancestor_artifacts', + field=awx.main.fields.JSONField(default={}, editable=False, blank=True), + ), + migrations.AlterField( + model_name='workflowjobnode', + name='char_prompts', + field=awx.main.fields.JSONField(default={}, blank=True), + ), + migrations.AlterField( + model_name='workflowjobtemplate', + name='survey_spec', + field=awx.main.fields.JSONField(default={}, blank=True), + ), + migrations.AlterField( + model_name='workflowjobtemplatenode', + name='char_prompts', + field=awx.main.fields.JSONField(default={}, blank=True), + ), + # Job Project Update + migrations.AddField( + model_name='job', + name='project_update', + field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.ProjectUpdate', help_text='The SCM Refresh task used to make sure the playbooks were available for the job run', null=True), + ), + # Inventory, non-unique name + migrations.AlterField( + model_name='inventory', + name='name', + field=models.CharField(max_length=512), + ), + # Text and has schedules + migrations.RemoveField( + model_name='unifiedjobtemplate', + name='has_schedules', + ), + migrations.AlterField( + model_name='host', + name='instance_id', + field=models.CharField(default=b'', help_text='The value used by the remote inventory source to uniquely identify the host', max_length=1024, blank=True), + ), + migrations.AlterField( + model_name='project', + name='scm_clean', + field=models.BooleanField(default=False, help_text='Discard any local changes before syncing the project.'), + ), + migrations.AlterField( + model_name='project', + name='scm_delete_on_update', + field=models.BooleanField(default=False, help_text='Delete the project before syncing.'), + ), + migrations.AlterField( + model_name='project', + name='scm_type', + field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'), + ), + migrations.AlterField( + model_name='project', + name='scm_update_cache_timeout', + field=models.PositiveIntegerField(default=0, help_text='The number of seconds after the last project update ran that a newproject update will be launched as a job dependency.', blank=True), + ), + migrations.AlterField( + model_name='project', + name='scm_update_on_launch', + field=models.BooleanField(default=False, help_text='Update the project when a job is launched that uses the project.'), + ), + migrations.AlterField( + model_name='project', + name='scm_url', + field=models.CharField(default=b'', help_text='The location where the project is stored.', max_length=1024, verbose_name='SCM URL', blank=True), + ), + migrations.AlterField( + model_name='project', + name='timeout', + field=models.IntegerField(default=0, help_text='The amount of time to run before the task is canceled.', blank=True), + ), + migrations.AlterField( + model_name='projectupdate', + name='scm_clean', + field=models.BooleanField(default=False, help_text='Discard any local changes before syncing the project.'), + ), + migrations.AlterField( + model_name='projectupdate', + name='scm_delete_on_update', + field=models.BooleanField(default=False, help_text='Delete the project before syncing.'), + ), + migrations.AlterField( + model_name='projectupdate', + name='scm_type', + field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'), + ), + migrations.AlterField( + model_name='projectupdate', + name='scm_url', + field=models.CharField(default=b'', help_text='The location where the project is stored.', max_length=1024, verbose_name='SCM URL', blank=True), + ), + migrations.AlterField( + model_name='projectupdate', + name='timeout', + field=models.IntegerField(default=0, help_text='The amount of time to run before the task is canceled.', blank=True), + ), + migrations.AlterField( + model_name='schedule', + name='dtend', + field=models.DateTimeField(default=None, help_text='The last occurrence of the schedule occurs before this time, aftewards the schedule expires.', null=True, editable=False), + ), + migrations.AlterField( + model_name='schedule', + name='dtstart', + field=models.DateTimeField(default=None, help_text='The first occurrence of the schedule occurs on or after this time.', null=True, editable=False), + ), + migrations.AlterField( + model_name='schedule', + name='enabled', + field=models.BooleanField(default=True, help_text='Enables processing of this schedule by Tower.'), + ), + migrations.AlterField( + model_name='schedule', + name='next_run', + field=models.DateTimeField(default=None, help_text='The next time that the scheduled action will run.', null=True, editable=False), + ), + migrations.AlterField( + model_name='schedule', + name='rrule', + field=models.CharField(help_text='A value representing the schedules iCal recurrence rule.', max_length=255), + ), + migrations.AlterField( + model_name='unifiedjob', + name='elapsed', + field=models.DecimalField(help_text='Elapsed time in seconds that the job ran.', editable=False, max_digits=12, decimal_places=3), + ), + migrations.AlterField( + model_name='unifiedjob', + name='execution_node', + field=models.TextField(default=b'', help_text='The Tower node the job executed on.', editable=False, blank=True), + ), + migrations.AlterField( + model_name='unifiedjob', + name='finished', + field=models.DateTimeField(default=None, help_text='The date and time the job finished execution.', null=True, editable=False), + ), + migrations.AlterField( + model_name='unifiedjob', + name='job_explanation', + field=models.TextField(default=b'', help_text="A status field to indicate the state of the job if it wasn't able to run and capture stdout", editable=False, blank=True), + ), + migrations.AlterField( + model_name='unifiedjob', + name='started', + field=models.DateTimeField(default=None, help_text='The date and time the job was queued for starting.', null=True, editable=False), + ), + + ] diff --git a/awx/main/migrations/0035_v310_remove_tower_settings.py b/awx/main/migrations/0035_v310_remove_tower_settings.py new file mode 100644 index 0000000000..e92dfe605c --- /dev/null +++ b/awx/main/migrations/0035_v310_remove_tower_settings.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('main', '0034_v310_release'), + ] + + operations = [ + # Remove Tower settings, these settings are now in separate awx.conf app. + migrations.RemoveField( + model_name='towersettings', + name='user', + ), + migrations.DeleteModel( + name='TowerSettings', + ), + ] diff --git a/awx/main/migrations/_old_access.py b/awx/main/migrations/_old_access.py index da49723a9e..2996816abe 100644 --- a/awx/main/migrations/_old_access.py +++ b/awx/main/migrations/_old_access.py @@ -13,6 +13,7 @@ import sys import logging # Django +from django.conf import settings from django.db.models import F, Q from django.contrib.auth.models import User @@ -22,9 +23,7 @@ from rest_framework.exceptions import ParseError, PermissionDenied # AWX from awx.main.utils import * # noqa from awx.main.models import * # noqa -from awx.api.license import LicenseForbids -from awx.main.task_engine import TaskSerializer -from awx.main.conf import tower_settings +from awx.conf.license import LicenseForbids __all__ = ['get_user_queryset', 'check_user_access'] @@ -153,8 +152,8 @@ class BaseAccess(object): return self.can_change(obj, None) def check_license(self, add_host=False, feature=None, check_expiration=True): - reader = TaskSerializer() - validation_info = reader.from_database() + from awx.main.task_engine import TaskEnhancer + validation_info = TaskEnhancer().validate_enhancements() if ('test' in sys.argv or 'py.test' in sys.argv[0] or 'jenkins' in sys.argv) and not os.environ.get('SKIP_LICENSE_FIXUP_FOR_TEST', ''): validation_info['free_instances'] = 99999999 validation_info['time_remaining'] = 99999999 @@ -202,7 +201,7 @@ class UserAccess(BaseAccess): qs = self.model.objects.distinct() if self.user.is_superuser: return qs - if tower_settings.ORG_ADMINS_CAN_SEE_ALL_USERS and self.user.deprecated_admin_of_organizations.all().exists(): + if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and self.user.deprecated_admin_of_organizations.all().exists(): return qs return qs.filter( Q(pk=self.user.pk) | @@ -1624,29 +1623,6 @@ class CustomInventoryScriptAccess(BaseAccess): return False -class TowerSettingsAccess(BaseAccess): - ''' - - I can see settings when - - I am a super user - - I can edit settings when - - I am a super user - - I can clear settings when - - I am a super user - ''' - - model = TowerSettings - - def get_queryset(self): - if self.user.is_superuser: - return self.model.objects.all() - return self.model.objects.none() - - def can_change(self, obj, data): - return self.user.is_superuser - - def can_delete(self, obj): - return self.user.is_superuser - register_access(User, UserAccess) register_access(Organization, OrganizationAccess) register_access(Inventory, InventoryAccess) @@ -1672,4 +1648,3 @@ register_access(UnifiedJobTemplate, UnifiedJobTemplateAccess) register_access(UnifiedJob, UnifiedJobAccess) register_access(ActivityStream, ActivityStreamAccess) register_access(CustomInventoryScript, CustomInventoryScriptAccess) -register_access(TowerSettings, TowerSettingsAccess) diff --git a/awx/main/migrations/_system_tracking.py b/awx/main/migrations/_system_tracking.py deleted file mode 100644 index 931c5c467c..0000000000 --- a/awx/main/migrations/_system_tracking.py +++ /dev/null @@ -1,52 +0,0 @@ - -import logging - -from django.utils.encoding import smart_text -from django.conf import settings - -from awx.fact.models import FactVersion -from awx.fact.utils.dbtransform import KeyTransform -from mongoengine.connection import ConnectionError -from pymongo.errors import OperationFailure - -logger = logging.getLogger('system_tracking_migrations') - -def migrate_facts(apps, schema_editor): - Fact = apps.get_model('main', "Fact") - Host = apps.get_model('main', "Host") - - if (not hasattr(settings, 'MONGO_HOST')) or settings.MONGO_HOST == NotImplemented: - logger.info("failed to find MONGO_HOST in settings. Will NOT attempt to migrate system_tracking data from Mongo to Postgres.") - # If settings do not specify a mongo database, do not raise error or drop db - return (0, 0) - - try: - n = FactVersion.objects.all().count() - except ConnectionError: - # Let the user know about the error. Likely this is - # a new install and we just don't need to do this - logger.info(smart_text(u"failed to connect to mongo database host {}. Will NOT attempt to migrate system_tracking data from Mongo to Postgres.".format(settings.MONGO_HOST))) - return (0, 0) - except OperationFailure: - # The database was up but something happened when we tried to query it - logger.info(smart_text(u"failed to connect to issue Mongo query on host {}. Will NOT attempt to migrate system_tracking data from Mongo to Postgres.".format(settings.MONGO_HOST))) - return (0, 0) - - migrated_count = 0 - not_migrated_count = 0 - transform = KeyTransform([('.', '\uff0E'), ('$', '\uff04')]) - for factver in FactVersion.objects.all().no_cache(): - try: - host = Host.objects.only('id').get(inventory__id=factver.host.inventory_id, name=factver.host.hostname) - fact_obj = transform.replace_outgoing(factver.fact) - Fact.objects.create(host_id=host.id, timestamp=fact_obj.timestamp, module=fact_obj.module, facts=fact_obj.fact).save() - migrated_count += 1 - except Host.DoesNotExist: - # No host was found to migrate the facts to. - # This isn't a hard error. Just something the user would want to know. - logger.info(smart_text(u"unable to migrate fact {} not found in Postgres <{}, {}>".format(factver.id, factver.host.inventory_id, factver.host.hostname))) - not_migrated_count += 1 - - logger.info(smart_text(u"successfully migrated {} records of system_tracking data from Mongo to Postgres. {} records not migrated due to corresponding pairs not found in Postgres.".format(migrated_count, not_migrated_count))) - return (migrated_count, not_migrated_count) - diff --git a/awx/main/models/__init__.py b/awx/main/models/__init__.py index 1e320e6238..2f4d02a7b9 100644 --- a/awx/main/models/__init__.py +++ b/awx/main/models/__init__.py @@ -16,62 +16,85 @@ from awx.main.models.ad_hoc_commands import * # noqa from awx.main.models.schedules import * # noqa from awx.main.models.activity_stream import * # noqa from awx.main.models.ha import * # noqa -from awx.main.models.configuration import * # noqa from awx.main.models.rbac import * # noqa from awx.main.models.mixins import * # noqa from awx.main.models.notifications import * # noqa from awx.main.models.fact import * # noqa from awx.main.models.label import * # noqa +from awx.main.models.workflow import * # noqa +from awx.main.models.channels import * # noqa # Monkeypatch Django serializer to ignore django-taggit fields (which break # the dumpdata command; see https://github.com/alex/django-taggit/issues/155). from django.core.serializers.python import Serializer as _PythonSerializer _original_handle_m2m_field = _PythonSerializer.handle_m2m_field + + def _new_handle_m2m_field(self, obj, field): try: field.rel.through._meta except AttributeError: return return _original_handle_m2m_field(self, obj, field) + + _PythonSerializer.handle_m2m_field = _new_handle_m2m_field + # Add custom methods to User model for permissions checks. -from django.contrib.auth.models import User # noqa +from django.contrib.auth.models import User # noqa from awx.main.access import * # noqa User.add_to_class('get_queryset', get_user_queryset) User.add_to_class('can_access', check_user_access) +User.add_to_class('can_access_with_errors', check_user_access_with_errors) User.add_to_class('accessible_objects', user_accessible_objects) User.add_to_class('admin_role', user_admin_role) + @property def user_get_organizations(user): return Organization.objects.filter(member_role__members=user) + @property def user_get_admin_of_organizations(user): return Organization.objects.filter(admin_role__members=user) + @property def user_get_auditor_of_organizations(user): return Organization.objects.filter(auditor_role__members=user) + User.add_to_class('organizations', user_get_organizations) User.add_to_class('admin_of_organizations', user_get_admin_of_organizations) User.add_to_class('auditor_of_organizations', user_get_auditor_of_organizations) + @property def user_is_system_auditor(user): - return Role.singleton('system_auditor').members.filter(id=user.id).exists() + if not hasattr(user, '_is_system_auditor'): + if user.pk: + user._is_system_auditor = user.roles.filter( + singleton_name='system_auditor', role_field='system_auditor').exists() + else: + # Odd case where user is unsaved, this should never be relied on + return False + return user._is_system_auditor + @user_is_system_auditor.setter def user_is_system_auditor(user, tf): if user.id: if tf: Role.singleton('system_auditor').members.add(user) + user._is_system_auditor = True else: Role.singleton('system_auditor').members.remove(user) + user._is_system_auditor = False + User.add_to_class('is_system_auditor', user_is_system_auditor) @@ -98,8 +121,13 @@ activity_stream_registrar.connect(AdHocCommand) # activity_stream_registrar.connect(Profile) activity_stream_registrar.connect(Schedule) activity_stream_registrar.connect(CustomInventoryScript) -activity_stream_registrar.connect(TowerSettings) activity_stream_registrar.connect(NotificationTemplate) activity_stream_registrar.connect(Notification) activity_stream_registrar.connect(Label) activity_stream_registrar.connect(User) +activity_stream_registrar.connect(WorkflowJobTemplate) +activity_stream_registrar.connect(WorkflowJobTemplateNode) +activity_stream_registrar.connect(WorkflowJob) + +# prevent API filtering on certain Django-supplied sensitive fields +prevent_search(User._meta.get_field('password')) diff --git a/awx/main/models/activity_stream.py b/awx/main/models/activity_stream.py index 8ff285cb45..b0d58fc031 100644 --- a/awx/main/models/activity_stream.py +++ b/awx/main/models/activity_stream.py @@ -49,6 +49,10 @@ class ActivityStream(models.Model): permission = models.ManyToManyField("Permission", blank=True) job_template = models.ManyToManyField("JobTemplate", blank=True) job = models.ManyToManyField("Job", blank=True) + workflow_job_template_node = models.ManyToManyField("WorkflowJobTemplateNode", blank=True) + workflow_job_node = models.ManyToManyField("WorkflowJobNode", blank=True) + workflow_job_template = models.ManyToManyField("WorkflowJobTemplate", blank=True) + workflow_job = models.ManyToManyField("WorkflowJob", blank=True) unified_job_template = models.ManyToManyField("UnifiedJobTemplate", blank=True, related_name='activity_stream_as_unified_job_template+') unified_job = models.ManyToManyField("UnifiedJob", blank=True, related_name='activity_stream_as_unified_job+') ad_hoc_command = models.ManyToManyField("AdHocCommand", blank=True) diff --git a/awx/main/models/ad_hoc_commands.py b/awx/main/models/ad_hoc_commands.py index da9aaf740b..3636aa8e0a 100644 --- a/awx/main/models/ad_hoc_commands.py +++ b/awx/main/models/ad_hoc_commands.py @@ -2,34 +2,33 @@ # All Rights Reserved. # Python +import datetime import hmac -import json import logging from urlparse import urljoin # Django from django.conf import settings from django.db import models +from django.utils.dateparse import parse_datetime from django.utils.text import Truncator +from django.utils.timezone import utc from django.utils.translation import ugettext_lazy as _ from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse -# Django-JSONField -from jsonfield import JSONField - # AWX from awx.main.models.base import * # noqa from awx.main.models.unified_jobs import * # noqa -from awx.main.utils import decrypt_field -from awx.main.conf import tower_settings +from awx.main.models.notifications import JobNotificationMixin, NotificationTemplate +from awx.main.fields import JSONField logger = logging.getLogger('awx.main.models.ad_hoc_commands') __all__ = ['AdHocCommand', 'AdHocCommandEvent'] -class AdHocCommand(UnifiedJob): +class AdHocCommand(UnifiedJob, JobNotificationMixin): class Meta(object): app_label = 'main' @@ -84,24 +83,24 @@ class AdHocCommand(UnifiedJob): editable=False, through='AdHocCommandEvent', ) - extra_vars = models.TextField( + extra_vars = prevent_search(models.TextField( blank=True, default='', - ) + )) extra_vars_dict = VarsDictProperty('extra_vars', True) def clean_inventory(self): inv = self.inventory if not inv: - raise ValidationError('No valid inventory.') + raise ValidationError(_('No valid inventory.')) return inv def clean_credential(self): cred = self.credential if cred and cred.kind != 'ssh': raise ValidationError( - 'You must provide a machine / SSH credential.', + _('You must provide a machine / SSH credential.'), ) return cred @@ -112,18 +111,18 @@ class AdHocCommand(UnifiedJob): def clean_module_name(self): if type(self.module_name) not in (str, unicode): - raise ValidationError("Invalid type for ad hoc command") + raise ValidationError(_("Invalid type for ad hoc command")) module_name = self.module_name.strip() or 'command' - if module_name not in tower_settings.AD_HOC_COMMANDS: - raise ValidationError('Unsupported module for ad hoc commands.') + if module_name not in settings.AD_HOC_COMMANDS: + raise ValidationError(_('Unsupported module for ad hoc commands.')) return module_name def clean_module_args(self): if type(self.module_args) not in (str, unicode): - raise ValidationError("Invalid type for ad hoc command") + raise ValidationError(_("Invalid type for ad hoc command")) module_args = self.module_args if self.module_name in ('command', 'shell') and not module_args: - raise ValidationError('No argument passed to %s module.' % self.module_name) + raise ValidationError(_('No argument passed to %s module.') % self.module_name) return module_args @property @@ -147,7 +146,7 @@ class AdHocCommand(UnifiedJob): return reverse('api:ad_hoc_command_detail', args=(self.pk,)) def get_ui_url(self): - return urljoin(tower_settings.TOWER_URL_BASE, "/#/ad_hoc_commands/{}".format(self.pk)) + return urljoin(settings.TOWER_URL_BASE, "/#/ad_hoc_commands/{}".format(self.pk)) @property def task_auth_token(self): @@ -158,18 +157,20 @@ class AdHocCommand(UnifiedJob): @property def notification_templates(self): - all_inventory_sources = set() + all_orgs = set() for h in self.hosts.all(): - for invsrc in h.inventory_sources.all(): - all_inventory_sources.add(invsrc) + all_orgs.add(h.inventory.organization) active_templates = dict(error=set(), success=set(), any=set()) - for invsrc in all_inventory_sources: - notifications_dict = invsrc.notification_templates - for notification_type in active_templates.keys(): - for templ in notifications_dict[notification_type]: - active_templates[notification_type].add(templ) + base_notification_templates = NotificationTemplate.objects + for org in all_orgs: + for templ in base_notification_templates.filter(organization_notification_templates_for_errors=org): + active_templates['error'].add(templ) + for templ in base_notification_templates.filter(organization_notification_templates_for_success=org): + active_templates['success'].add(templ) + for templ in base_notification_templates.filter(organization_notification_templates_for_any=org): + active_templates['any'].add(templ) active_templates['error'] = list(active_templates['error']) active_templates['any'] = list(active_templates['any']) active_templates['success'] = list(active_templates['success']) @@ -178,13 +179,6 @@ class AdHocCommand(UnifiedJob): def get_passwords_needed_to_start(self): return self.passwords_needed_to_start - def is_blocked_by(self, obj): - from awx.main.models import InventoryUpdate - if type(obj) == InventoryUpdate: - if self.inventory == obj.inventory_source.inventory: - return True - return False - @property def task_impact(self): # NOTE: We sorta have to assume the host count matches and that forks default to 5 @@ -192,35 +186,6 @@ class AdHocCommand(UnifiedJob): count_hosts = Host.objects.filter( enabled=True, inventory__ad_hoc_commands__pk=self.pk).count() return min(count_hosts, 5 if self.forks == 0 else self.forks) * 10 - def generate_dependencies(self, active_tasks): - from awx.main.models import InventoryUpdate - if not self.inventory: - return [] - inventory_sources = self.inventory.inventory_sources.filter( update_on_launch=True) - inventory_sources_found = [] - dependencies = [] - for obj in active_tasks: - if type(obj) == InventoryUpdate: - if obj.inventory_source in inventory_sources: - inventory_sources_found.append(obj.inventory_source) - # Skip updating any inventory sources that were already updated before - # running this job (via callback inventory refresh). - try: - start_args = json.loads(decrypt_field(self, 'start_args')) - except Exception: - start_args = None - start_args = start_args or {} - inventory_sources_already_updated = start_args.get('inventory_sources_already_updated', []) - if inventory_sources_already_updated: - for source in inventory_sources.filter(pk__in=inventory_sources_already_updated): - if source not in inventory_sources_found: - inventory_sources_found.append(source) - if inventory_sources.count(): # and not has_setup_failures? Probably handled as an error scenario in the task runner - for source in inventory_sources: - if source not in inventory_sources_found and source.needs_update_on_launch: - dependencies.append(source.create_inventory_update(launch_type='dependency')) - return dependencies - def copy(self): data = {} for field in ('job_type', 'inventory_id', 'limit', 'credential_id', @@ -237,6 +202,15 @@ class AdHocCommand(UnifiedJob): update_fields.append('name') super(AdHocCommand, self).save(*args, **kwargs) + ''' + JobNotificationMixin + ''' + def get_notification_templates(self): + return self.notification_templates + + def get_notification_friendly_name(self): + return "AdHoc Command" + class AdHocCommandEvent(CreatedModifiedModel): ''' @@ -249,24 +223,38 @@ class AdHocCommandEvent(CreatedModifiedModel): ('runner_on_ok', _('Host OK'), False), ('runner_on_unreachable', _('Host Unreachable'), True), # Tower won't see no_hosts (check is done earlier without callback). - #('runner_on_no_hosts', _('No Hosts Matched'), False), + # ('runner_on_no_hosts', _('No Hosts Matched'), False), # Tower will see skipped (when running in check mode for a module that # does not support check mode). ('runner_on_skipped', _('Host Skipped'), False), - # Tower does not support async for ad hoc commands. - #('runner_on_async_poll', _('Host Polling'), False), - #('runner_on_async_ok', _('Host Async OK'), False), - #('runner_on_async_failed', _('Host Async Failure'), True), - # Tower does not yet support --diff mode - #('runner_on_file_diff', _('File Difference'), False), + # Tower does not support async for ad hoc commands (not used in v2). + # ('runner_on_async_poll', _('Host Polling'), False), + # ('runner_on_async_ok', _('Host Async OK'), False), + # ('runner_on_async_failed', _('Host Async Failure'), True), + # Tower does not yet support --diff mode. + # ('runner_on_file_diff', _('File Difference'), False), + + # Additional event types for captured stdout not directly related to + # runner events. + ('debug', _('Debug'), False), + ('verbose', _('Verbose'), False), + ('deprecated', _('Deprecated'), False), + ('warning', _('Warning'), False), + ('system_warning', _('System Warning'), False), + ('error', _('Error'), False), ] FAILED_EVENTS = [x[0] for x in EVENT_TYPES if x[2]] EVENT_CHOICES = [(x[0], x[1]) for x in EVENT_TYPES] class Meta: app_label = 'main' - unique_together = [('ad_hoc_command', 'host_name')] ordering = ('-pk',) + index_together = [ + ('ad_hoc_command', 'event'), + ('ad_hoc_command', 'uuid'), + ('ad_hoc_command', 'start_line'), + ('ad_hoc_command', 'end_line'), + ] ad_hoc_command = models.ForeignKey( 'AdHocCommand', @@ -303,8 +291,30 @@ class AdHocCommandEvent(CreatedModifiedModel): default=False, editable=False, ) + uuid = models.CharField( + max_length=1024, + default='', + editable=False, + ) counter = models.PositiveIntegerField( default=0, + editable=False, + ) + stdout = models.TextField( + default='', + editable=False, + ) + verbosity = models.PositiveIntegerField( + default=0, + editable=False, + ) + start_line = models.PositiveIntegerField( + default=0, + editable=False, + ) + end_line = models.PositiveIntegerField( + default=0, + editable=False, ) def get_absolute_url(self): @@ -342,3 +352,28 @@ class AdHocCommandEvent(CreatedModifiedModel): except (IndexError, AttributeError): pass super(AdHocCommandEvent, self).save(*args, **kwargs) + + @classmethod + def create_from_data(self, **kwargs): + # Convert the datetime for the ad hoc command event's creation + # appropriately, and include a time zone for it. + # + # In the event of any issue, throw it out, and Django will just save + # the current time. + try: + if not isinstance(kwargs['created'], datetime.datetime): + kwargs['created'] = parse_datetime(kwargs['created']) + if not kwargs['created'].tzinfo: + kwargs['created'] = kwargs['created'].replace(tzinfo=utc) + except (KeyError, ValueError): + kwargs.pop('created', None) + + # Sanity check: Don't honor keys that we don't recognize. + valid_keys = {'ad_hoc_command_id', 'event', 'event_data', 'created', + 'counter', 'uuid', 'stdout', 'start_line', 'end_line', + 'verbosity'} + for key in kwargs.keys(): + if key not in valid_keys: + kwargs.pop(key) + + return AdHocCommandEvent.objects.create(**kwargs) diff --git a/awx/main/models/base.py b/awx/main/models/base.py index c4914cdd20..81e00f92c6 100644 --- a/awx/main/models/base.py +++ b/awx/main/models/base.py @@ -23,13 +23,14 @@ from crum import get_current_user # Ansible Tower from awx.main.utils import encrypt_field -__all__ = ['VarsDictProperty', 'BaseModel', 'CreatedModifiedModel', +__all__ = ['prevent_search', 'VarsDictProperty', 'BaseModel', 'CreatedModifiedModel', 'PasswordFieldsModel', 'PrimordialModel', 'CommonModel', 'CommonModelNameNotUnique', 'NotificationFieldsModel', 'PERM_INVENTORY_ADMIN', 'PERM_INVENTORY_READ', 'PERM_INVENTORY_WRITE', 'PERM_INVENTORY_DEPLOY', 'PERM_INVENTORY_SCAN', 'PERM_INVENTORY_CHECK', 'PERM_JOBTEMPLATE_CREATE', 'JOB_TYPE_CHOICES', - 'AD_HOC_JOB_TYPE_CHOICES', 'PERMISSION_TYPE_CHOICES', 'CLOUD_INVENTORY_SOURCES', + 'AD_HOC_JOB_TYPE_CHOICES', 'PROJECT_UPDATE_JOB_TYPE_CHOICES', + 'PERMISSION_TYPE_CHOICES', 'CLOUD_INVENTORY_SOURCES', 'VERBOSITY_CHOICES'] PERM_INVENTORY_ADMIN = 'admin' @@ -51,6 +52,11 @@ AD_HOC_JOB_TYPE_CHOICES = [ (PERM_INVENTORY_CHECK, _('Check')), ] +PROJECT_UPDATE_JOB_TYPE_CHOICES = [ + (PERM_INVENTORY_DEPLOY, _('Run')), + (PERM_INVENTORY_CHECK, _('Check')), +] + PERMISSION_TYPE_CHOICES = [ (PERM_INVENTORY_READ, _('Read Inventory')), (PERM_INVENTORY_WRITE, _('Edit Inventory')), @@ -314,6 +320,7 @@ class CommonModelNameNotUnique(PrimordialModel): unique=False, ) + class NotificationFieldsModel(BaseModel): class Meta: @@ -336,3 +343,21 @@ class NotificationFieldsModel(BaseModel): blank=True, related_name='%(class)s_notification_templates_for_any' ) + + + +def prevent_search(relation): + """ + Used to mark a model field or relation as "restricted from filtering" + e.g., + + class AuthToken(BaseModel): + user = prevent_search(models.ForeignKey(...)) + sensitive_data = prevent_search(models.CharField(...)) + + The flag set by this function is used by + `awx.api.filters.FieldLookupBackend` to blacklist fields and relations that + should not be searchable/filterable via search query params + """ + setattr(relation, '__prevent_search__', True) + return relation diff --git a/awx/main/models/channels.py b/awx/main/models/channels.py new file mode 100644 index 0000000000..bd4f9514ba --- /dev/null +++ b/awx/main/models/channels.py @@ -0,0 +1,6 @@ +from django.db import models + + +class ChannelGroup(models.Model): + group = models.CharField(max_length=200, unique=True) + channels = models.TextField() diff --git a/awx/main/models/configuration.py b/awx/main/models/configuration.py deleted file mode 100644 index 208ccbd487..0000000000 --- a/awx/main/models/configuration.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -# Python -import json - -# Django -from django.db import models -from django.utils.encoding import force_text -from django.utils.translation import ugettext_lazy as _ - -# Tower -from awx.main.models.base import CreatedModifiedModel - - -class TowerSettings(CreatedModifiedModel): - - class Meta: - app_label = 'main' - - SETTINGS_TYPE_CHOICES = [ - ('string', _("String")), - ('int', _('Integer')), - ('float', _('Decimal')), - ('json', _('JSON')), - ('bool', _('Boolean')), - ('password', _('Password')), - ('list', _('List')) - ] - - key = models.CharField( - max_length=255, - unique=True - ) - description = models.TextField() - category = models.CharField(max_length=128) - value = models.TextField( - blank=True, - ) - value_type = models.CharField( - max_length=12, - choices=SETTINGS_TYPE_CHOICES - ) - user = models.ForeignKey( - 'auth.User', - related_name='settings', - default=None, - null=True, - editable=False, - ) - - @property - def value_converted(self): - if self.value_type == 'json': - converted_type = json.loads(self.value) - elif self.value_type == 'password': - converted_type = self.value - elif self.value_type == 'list': - if self.value: - converted_type = [x.strip() for x in self.value.split(',')] - else: - converted_type = [] - elif self.value_type == 'bool': - converted_type = force_text(self.value).lower() in ('true', 'yes', '1') - elif self.value_type == 'string': - converted_type = self.value - else: - t = __builtins__[self.value_type] - converted_type = t(self.value) - return converted_type - - @value_converted.setter - def value_converted(self, value): - if self.value_type == 'json': - self.value = json.dumps(value) - elif self.value_type == 'list': - try: - self.value = ','.join(map(force_text, value)) - except TypeError: - self.value = force_text(value) - elif self.value_type == 'bool': - self.value = force_text(bool(value)) - else: - self.value = force_text(value) diff --git a/awx/main/models/credential.py b/awx/main/models/credential.py index 3188e10083..3342c8b750 100644 --- a/awx/main/models/credential.py +++ b/awx/main/models/credential.py @@ -1,9 +1,6 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. -import base64 -import re - # Django from django.db import models from django.utils.translation import ugettext_lazy as _ @@ -14,6 +11,7 @@ from django.core.urlresolvers import reverse from awx.main.fields import ImplicitRoleField from awx.main.constants import CLOUD_PROVIDERS from awx.main.utils import decrypt_field +from awx.main.validators import validate_ssh_private_key from awx.main.models.base import * # noqa from awx.main.models.mixins import ResourceMixin from awx.main.models.rbac import ( @@ -52,6 +50,8 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin): ('su', _('Su')), ('pbrun', _('Pbrun')), ('pfexec', _('Pfexec')), + ('dzdo', _('DZDO')), + ('pmrun', _('Pmrun')), #('runas', _('Runas')), ] @@ -241,11 +241,13 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin): else: ssh_key_data = self.ssh_key_data try: - key_data = validate_ssh_private_key(ssh_key_data) + pem_objects = validate_ssh_private_key(ssh_key_data) + for pem_object in pem_objects: + if pem_object.get('key_enc', False): + return True except ValidationError: - return False - else: - return bool(key_data['key_enc']) + pass + return False @property def needs_ssh_key_unlock(self): @@ -278,9 +280,9 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin): """ host = self.host or '' if not host and self.kind == 'vmware': - raise ValidationError('Host required for VMware credential.') + raise ValidationError(_('Host required for VMware credential.')) if not host and self.kind == 'openstack': - raise ValidationError('Host required for OpenStack credential.') + raise ValidationError(_('Host required for OpenStack credential.')) return host def clean_domain(self): @@ -289,32 +291,32 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin): def clean_username(self): username = self.username or '' if not username and self.kind == 'aws': - raise ValidationError('Access key required for AWS credential.') + raise ValidationError(_('Access key required for AWS credential.')) if not username and self.kind == 'rax': - raise ValidationError('Username required for Rackspace ' - 'credential.') + raise ValidationError(_('Username required for Rackspace ' + 'credential.')) if not username and self.kind == 'vmware': - raise ValidationError('Username required for VMware credential.') + raise ValidationError(_('Username required for VMware credential.')) if not username and self.kind == 'openstack': - raise ValidationError('Username required for OpenStack credential.') + raise ValidationError(_('Username required for OpenStack credential.')) return username def clean_password(self): password = self.password or '' if not password and self.kind == 'aws': - raise ValidationError('Secret key required for AWS credential.') + raise ValidationError(_('Secret key required for AWS credential.')) if not password and self.kind == 'rax': - raise ValidationError('API key required for Rackspace credential.') + raise ValidationError(_('API key required for Rackspace credential.')) if not password and self.kind == 'vmware': - raise ValidationError('Password required for VMware credential.') + raise ValidationError(_('Password required for VMware credential.')) if not password and self.kind == 'openstack': - raise ValidationError('Password or API key required for OpenStack credential.') + raise ValidationError(_('Password or API key required for OpenStack credential.')) return password def clean_project(self): project = self.project or '' if self.kind == 'openstack' and not project: - raise ValidationError('Project name required for OpenStack credential.') + raise ValidationError(_('Project name required for OpenStack credential.')) return project def clean_ssh_key_data(self): @@ -341,13 +343,16 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin): def clean_ssh_key_unlock(self): if self.has_encrypted_ssh_key_data and not self.ssh_key_unlock: - raise ValidationError('SSH key unlock must be set when SSH key ' - 'is encrypted.') + raise ValidationError(_('SSH key unlock must be set when SSH key ' + 'is encrypted.')) + if not self.has_encrypted_ssh_key_data and self.ssh_key_unlock: + raise ValidationError(_('SSH key unlock should not be set when ' + 'SSH key is not encrypted.')) return self.ssh_key_unlock def clean(self): if self.deprecated_user and self.deprecated_team: - raise ValidationError('Credential cannot be assigned to both a user and team.') + raise ValidationError(_('Credential cannot be assigned to both a user and team.')) def _password_field_allows_ask(self, field): return bool(self.kind == 'ssh' and field != 'ssh_key_data') @@ -379,126 +384,3 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin): if 'cloud' not in update_fields: update_fields.append('cloud') super(Credential, self).save(*args, **kwargs) - - -def validate_ssh_private_key(data): - """Validate that the given SSH private key or certificate is, - in fact, valid. - """ - # Map the X in BEGIN X PRIVATE KEY to the key type (ssh-keygen -t). - # Tower jobs using OPENSSH format private keys may still fail if the - # system SSH implementation lacks support for this format. - key_types = { - 'RSA': 'rsa', - 'DSA': 'dsa', - 'EC': 'ecdsa', - 'OPENSSH': 'ed25519', - '': 'rsa1', - } - # Key properties to return if valid. - key_data = { - 'key_type': None, # Key type (from above mapping). - 'key_seg': '', # Key segment (all text including begin/end). - 'key_b64': '', # Key data as base64. - 'key_bin': '', # Key data as binary. - 'key_enc': None, # Boolean, whether key is encrypted. - 'cert_seg': '', # Cert segment (all text including begin/end). - 'cert_b64': '', # Cert data as base64. - 'cert_bin': '', # Cert data as binary. - } - data = data.strip() - validation_error = ValidationError('Invalid private key.') - - # Sanity check: We may potentially receive a full PEM certificate, - # and we want to accept these. - cert_begin_re = r'(-{4,})\s*BEGIN\s+CERTIFICATE\s*(-{4,})' - cert_end_re = r'(-{4,})\s*END\s+CERTIFICATE\s*(-{4,})' - cert_begin_match = re.search(cert_begin_re, data) - cert_end_match = re.search(cert_end_re, data) - if cert_begin_match and not cert_end_match: - raise validation_error - elif not cert_begin_match and cert_end_match: - raise validation_error - elif cert_begin_match and cert_end_match: - cert_dashes = set([cert_begin_match.groups()[0], cert_begin_match.groups()[1], - cert_end_match.groups()[0], cert_end_match.groups()[1]]) - if len(cert_dashes) != 1: - raise validation_error - key_data['cert_seg'] = data[cert_begin_match.start():cert_end_match.end()] - - # Find the private key, and also ensure that it internally matches - # itself. - # Set up the valid private key header and footer. - begin_re = r'(-{4,})\s*BEGIN\s+([A-Z0-9]+)?\s*PRIVATE\sKEY\s*(-{4,})' - end_re = r'(-{4,})\s*END\s+([A-Z0-9]+)?\s*PRIVATE\sKEY\s*(-{4,})' - begin_match = re.search(begin_re, data) - end_match = re.search(end_re, data) - if not begin_match or not end_match: - raise validation_error - - # Ensure that everything, such as dash counts and key type, lines up, - # and raise an error if it does not. - dashes = set([begin_match.groups()[0], begin_match.groups()[2], - end_match.groups()[0], end_match.groups()[2]]) - if len(dashes) != 1: - raise validation_error - if begin_match.groups()[1] != end_match.groups()[1]: - raise validation_error - key_type = begin_match.groups()[1] or '' - try: - key_data['key_type'] = key_types[key_type] - except KeyError: - raise ValidationError('Invalid private key: unsupported type %s' % key_type) - - # The private key data begins and ends with the private key. - key_data['key_seg'] = data[begin_match.start():end_match.end()] - - # Establish that we are able to base64 decode the private key; - # if we can't, then it's not a valid key. - # - # If we got a certificate, validate that also, in the same way. - header_re = re.compile(r'^(.+?):\s*?(.+?)(\\??)$') - for segment_name in ('cert', 'key'): - segment_to_validate = key_data['%s_seg' % segment_name] - # If we have nothing; skip this one. - # We've already validated that we have a private key above, - # so we don't need to do it again. - if not segment_to_validate: - continue - - # Ensure that this segment is valid base64 data. - base64_data = '' - line_continues = False - lines = segment_to_validate.splitlines() - for line in lines[1:-1]: - line = line.strip() - if not line: - continue - if line_continues: - line_continues = line.endswith('\\') - continue - line_match = header_re.match(line) - if line_match: - line_continues = line.endswith('\\') - continue - base64_data += line - try: - decoded_data = base64.b64decode(base64_data) - if not decoded_data: - raise validation_error - key_data['%s_b64' % segment_name] = base64_data - key_data['%s_bin' % segment_name] = decoded_data - except TypeError: - raise validation_error - - # Determine if key is encrypted. - if key_data['key_type'] == 'ed25519': - # See https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L3218 - # Decoded key data starts with magic string (null-terminated), four byte - # length field, followed by the ciphername -- if ciphername is anything - # other than 'none' the key is encrypted. - key_data['key_enc'] = not bool(key_data['key_bin'].startswith('openssh-key-v1\x00\x00\x00\x00\x04none')) - else: - key_data['key_enc'] = bool('ENCRYPTED' in key_data['key_seg']) - - return key_data diff --git a/awx/main/models/fact.py b/awx/main/models/fact.py index 16a67eb45e..480834c2c1 100644 --- a/awx/main/models/fact.py +++ b/awx/main/models/fact.py @@ -8,6 +8,7 @@ from jsonbfield.fields import JSONField __all__ = ('Fact', ) + class Fact(models.Model): """A model representing a fact returned from Ansible. Facts are stored as JSON dictionaries. @@ -20,8 +21,8 @@ class Fact(models.Model): help_text=_('Host for the facts that the fact scan captured.'), ) timestamp = models.DateTimeField( - default=None, - editable=False, + default=None, + editable=False, help_text=_('Date and time of the corresponding fact scan gathering time.') ) module = models.CharField(max_length=128) diff --git a/awx/main/models/ha.py b/awx/main/models/ha.py index 3725e6afe5..cb01d03722 100644 --- a/awx/main/models/ha.py +++ b/awx/main/models/ha.py @@ -1,19 +1,19 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. -import functools - from django.db import models from django.db.models.signals import post_save from django.dispatch import receiver +from solo.models import SingletonModel + from awx.main.managers import InstanceManager from awx.main.models.inventory import InventoryUpdate from awx.main.models.jobs import Job from awx.main.models.projects import ProjectUpdate -from awx.main.models.unified_jobs import UnifiedJob, CAN_CANCEL +from awx.main.models.unified_jobs import UnifiedJob -__all__ = ('Instance', 'JobOrigin') +__all__ = ('Instance', 'JobOrigin', 'TowerScheduleState',) class Instance(models.Model): @@ -22,40 +22,26 @@ class Instance(models.Model): """ objects = InstanceManager() - uuid = models.CharField(max_length=40, unique=True) + uuid = models.CharField(max_length=40) hostname = models.CharField(max_length=250, unique=True) - primary = models.BooleanField(default=False) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) + capacity = models.PositiveIntegerField( + default=100, + editable=False, + ) class Meta: app_label = 'main' @property def role(self): - """Return the role of this instance, as a string.""" - if self.primary: - return 'primary' - return 'secondary' + # NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing + return "tower" - @functools.wraps(models.Model.save) - def save(self, *args, **kwargs): - """Save the instance. If this is a secondary instance, then ensure - that any currently-running jobs that this instance started are - canceled. - """ - # Perform the normal save. - result = super(Instance, self).save(*args, **kwargs) - # If this is not a primary instance, then kill any jobs that this - # instance was responsible for starting. - if not self.primary: - for job in UnifiedJob.objects.filter(job_origin__instance=self, - status__in=CAN_CANCEL): - job.cancel() - - # Return back the original result. - return result +class TowerScheduleState(SingletonModel): + schedule_last_run = models.DateTimeField(auto_now_add=True) class JobOrigin(models.Model): diff --git a/awx/main/models/inventory.py b/awx/main/models/inventory.py index 8dde9f3b3b..387277c5e9 100644 --- a/awx/main/models/inventory.py +++ b/awx/main/models/inventory.py @@ -22,19 +22,21 @@ from awx.main.constants import CLOUD_PROVIDERS from awx.main.fields import AutoOneToOneField, ImplicitRoleField from awx.main.managers import HostManager from awx.main.models.base import * # noqa -from awx.main.models.jobs import Job from awx.main.models.unified_jobs import * # noqa +from awx.main.models.jobs import Job from awx.main.models.mixins import ResourceMixin -from awx.main.models.notifications import NotificationTemplate +from awx.main.models.notifications import ( + NotificationTemplate, + JobNotificationMixin, +) from awx.main.utils import _inventory_updates -from awx.main.conf import tower_settings __all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'CustomInventoryScript'] logger = logging.getLogger('awx.main.models.inventory') -class Inventory(CommonModel, ResourceMixin): +class Inventory(CommonModelNameNotUnique, ResourceMixin): ''' an inventory source contains lists and hosts. ''' @@ -341,6 +343,7 @@ class Host(CommonModelNameNotUnique): max_length=1024, blank=True, default='', + help_text=_('The value used by the remote inventory source to uniquely identify the host'), ) variables = models.TextField( blank=True, @@ -858,6 +861,10 @@ class InventorySourceOptions(BaseModel): default=False, help_text=_('Overwrite local variables from remote inventory source.'), ) + timeout = models.IntegerField( + blank=True, + default=0, + ) @classmethod def get_ec2_region_choices(cls): @@ -884,16 +891,16 @@ class InventorySourceOptions(BaseModel): @classmethod def get_ec2_group_by_choices(cls): return [ - ('availability_zone', 'Availability Zone'), - ('ami_id', 'Image ID'), - ('instance_id', 'Instance ID'), - ('instance_type', 'Instance Type'), - ('key_pair', 'Key Name'), - ('region', 'Region'), - ('security_group', 'Security Group'), - ('tag_keys', 'Tags'), - ('vpc_id', 'VPC ID'), - ('tag_none', 'Tag None'), + ('availability_zone', _('Availability Zone')), + ('ami_id', _('Image ID')), + ('instance_id', _('Instance ID')), + ('instance_type', _('Instance Type')), + ('key_pair', _('Key Name')), + ('region', _('Region')), + ('security_group', _('Security Group')), + ('tag_keys', _('Tags')), + ('vpc_id', _('VPC ID')), + ('tag_none', _('Tag None')), ] @classmethod @@ -964,14 +971,14 @@ class InventorySourceOptions(BaseModel): # credentials; Rackspace requires Rackspace credentials; etc...) if self.source.replace('ec2', 'aws') != cred.kind: raise ValidationError( - 'Cloud-based inventory sources (such as %s) require ' - 'credentials for the matching cloud service.' % self.source + _('Cloud-based inventory sources (such as %s) require ' + 'credentials for the matching cloud service.') % self.source ) # Allow an EC2 source to omit the credential. If Tower is running on # an EC2 instance with an IAM Role assigned, boto will use credentials # from the instance metadata instead of those explicitly provided. elif self.source in CLOUD_PROVIDERS and self.source != 'ec2': - raise ValidationError('Credential is required for a cloud source.') + raise ValidationError(_('Credential is required for a cloud source.')) return cred def clean_source_regions(self): @@ -996,9 +1003,8 @@ class InventorySourceOptions(BaseModel): if r not in valid_regions and r not in invalid_regions: invalid_regions.append(r) if invalid_regions: - raise ValidationError('Invalid %s region%s: %s' % (self.source, - '' if len(invalid_regions) == 1 else 's', - ', '.join(invalid_regions))) + raise ValidationError(_('Invalid %(source)s region: %(region)s') % { + 'source': self.source, 'region': ', '.join(invalid_regions)}) return ','.join(regions) source_vars_dict = VarsDictProperty('source_vars') @@ -1022,9 +1028,8 @@ class InventorySourceOptions(BaseModel): if instance_filter_name not in self.INSTANCE_FILTER_NAMES: invalid_filters.append(instance_filter) if invalid_filters: - raise ValidationError('Invalid filter expression%s: %s' % - ('' if len(invalid_filters) == 1 else 's', - ', '.join(invalid_filters))) + raise ValidationError(_('Invalid filter expression: %(filter)s') % + {'filter': ', '.join(invalid_filters)}) return instance_filters def clean_group_by(self): @@ -1041,9 +1046,8 @@ class InventorySourceOptions(BaseModel): if c not in valid_choices and c not in invalid_choices: invalid_choices.append(c) if invalid_choices: - raise ValidationError('Invalid group by choice%s: %s' % - ('' if len(invalid_choices) == 1 else 's', - ', '.join(invalid_choices))) + raise ValidationError(_('Invalid group by choice: %(choice)s') % + {'choice': ', '.join(invalid_choices)}) return ','.join(choices) @@ -1082,7 +1086,8 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions): @classmethod def _get_unified_job_field_names(cls): return ['name', 'description', 'source', 'source_path', 'source_script', 'source_vars', 'schedule', - 'credential', 'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars'] + 'credential', 'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars', + 'timeout', 'launch_type',] def save(self, *args, **kwargs): # If update_fields has been specified, add our field names to it, @@ -1188,11 +1193,11 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions): existing_sources = qs.exclude(pk=self.pk) if existing_sources.count(): s = u', '.join([x.group.name for x in existing_sources]) - raise ValidationError('Unable to configure this item for cloud sync. It is already managed by %s.' % s) + raise ValidationError(_('Unable to configure this item for cloud sync. It is already managed by %s.') % s) return source -class InventoryUpdate(UnifiedJob, InventorySourceOptions): +class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin): ''' Internal job for tracking inventory updates from external sources. ''' @@ -1220,10 +1225,14 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions): from awx.main.tasks import RunInventoryUpdate return RunInventoryUpdate - def socketio_emit_data(self): + def _global_timeout_setting(self): + return 'DEFAULT_INVENTORY_UPDATE_TIMEOUT' + + def websocket_emit_data(self): + websocket_data = super(InventoryUpdate, self).websocket_emit_data() if self.inventory_source.group is not None: - return dict(group_id=self.inventory_source.group.id) - return {} + websocket_data.update(dict(group_id=self.inventory_source.group.id)) + return websocket_data def save(self, *args, **kwargs): update_fields = kwargs.get('update_fields', []) @@ -1241,16 +1250,7 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions): return reverse('api:inventory_update_detail', args=(self.pk,)) def get_ui_url(self): - return urljoin(tower_settings.TOWER_URL_BASE, "/#/inventory_sync/{}".format(self.pk)) - - def is_blocked_by(self, obj): - if type(obj) == InventoryUpdate: - if self.inventory_source.inventory == obj.inventory_source.inventory: - return True - if type(obj) == Job: - if self.inventory_source.inventory == obj.inventory: - return True - return False + return urljoin(settings.TOWER_URL_BASE, "/#/inventory_sync/{}".format(self.pk)) @property def task_impact(self): @@ -1268,6 +1268,21 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions): return False return True + ''' + JobNotificationMixin + ''' + def get_notification_templates(self): + return self.inventory_source.notification_templates + + def get_notification_friendly_name(self): + return "Inventory Update" + + def cancel(self): + res = super(InventoryUpdate, self).cancel() + if res: + map(lambda x: x.cancel(), Job.objects.filter(dependent_jobs__in=[self.id])) + return res + class CustomInventoryScript(CommonModelNameNotUnique, ResourceMixin): @@ -1276,11 +1291,11 @@ class CustomInventoryScript(CommonModelNameNotUnique, ResourceMixin): unique_together = [('name', 'organization')] ordering = ('name',) - script = models.TextField( + script = prevent_search(models.TextField( blank=True, default='', help_text=_('Inventory script contents'), - ) + )) organization = models.ForeignKey( 'Organization', related_name='custom_inventory_scripts', @@ -1299,4 +1314,3 @@ class CustomInventoryScript(CommonModelNameNotUnique, ResourceMixin): def get_absolute_url(self): return reverse('api:inventory_script_detail', args=(self.pk,)) - diff --git a/awx/main/models/jobs.py b/awx/main/models/jobs.py index bbf53b86ce..00a68c69ca 100644 --- a/awx/main/models/jobs.py +++ b/awx/main/models/jobs.py @@ -2,9 +2,8 @@ # All Rights Reserved. # Python +import datetime import hmac -import json -import yaml import logging import time from urlparse import urljoin @@ -12,28 +11,36 @@ from urlparse import urljoin # Django from django.conf import settings from django.db import models -from django.db.models import Q +from django.db.models import Q, Count +from django.utils.dateparse import parse_datetime +from django.utils.encoding import force_text +from django.utils.timezone import utc from django.utils.translation import ugettext_lazy as _ from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse -# Django-JSONField -from jsonfield import JSONField - # AWX from awx.main.constants import CLOUD_PROVIDERS from awx.main.models.base import * # noqa from awx.main.models.unified_jobs import * # noqa -from awx.main.models.notifications import NotificationTemplate -from awx.main.utils import decrypt_field, ignore_inventory_computed_fields -from awx.main.utils import emit_websocket_notification -from awx.main.redact import PlainTextCleaner -from awx.main.conf import tower_settings +from awx.main.models.notifications import ( + NotificationTemplate, + JobNotificationMixin, +) +from awx.main.utils import ( + ignore_inventory_computed_fields, + parse_yaml_or_json, +) from awx.main.fields import ImplicitRoleField -from awx.main.models.mixins import ResourceMixin +from awx.main.models.mixins import ResourceMixin, SurveyJobTemplateMixin, SurveyJobMixin +from awx.main.models.base import PERM_INVENTORY_SCAN +from awx.main.fields import JSONField + +from awx.main.consumers import emit_channel_notification logger = logging.getLogger('awx.main.models.jobs') +analytics_logger = logging.getLogger('awx.analytics.job_events') __all__ = ['JobTemplate', 'Job', 'JobHostSummary', 'JobEvent', 'SystemJobOptions', 'SystemJobTemplate', 'SystemJob'] @@ -110,10 +117,10 @@ class JobOptions(BaseModel): blank=True, default=0, ) - extra_vars = models.TextField( + extra_vars = prevent_search(models.TextField( blank=True, default='', - ) + )) job_tags = models.CharField( max_length=1024, blank=True, @@ -136,6 +143,13 @@ class JobOptions(BaseModel): become_enabled = models.BooleanField( default=False, ) + allow_simultaneous = models.BooleanField( + default=False, + ) + timeout = models.IntegerField( + blank=True, + default=0, + ) extra_vars_dict = VarsDictProperty('extra_vars', True) @@ -143,7 +157,7 @@ class JobOptions(BaseModel): cred = self.credential if cred and cred.kind != 'ssh': raise ValidationError( - 'You must provide a machine / SSH credential.', + _('You must provide a machine / SSH credential.'), ) return cred @@ -151,7 +165,7 @@ class JobOptions(BaseModel): cred = self.network_credential if cred and cred.kind != 'net': raise ValidationError( - 'You must provide a network credential.', + _('You must provide a network credential.'), ) return cred @@ -159,8 +173,8 @@ class JobOptions(BaseModel): cred = self.cloud_credential if cred and cred.kind not in CLOUD_PROVIDERS + ('aws',): raise ValidationError( - 'Must provide a credential for a cloud provider, such as ' - 'Amazon Web Services or Rackspace.', + _('Must provide a credential for a cloud provider, such as ' + 'Amazon Web Services or Rackspace.'), ) return cred @@ -172,7 +186,8 @@ class JobOptions(BaseModel): else: return [] -class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin): + +class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, ResourceMixin): ''' A job template is a reusable job definition for applying a project (with playbook) to an inventory source with a given credential. @@ -216,15 +231,6 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin): blank=True, default=False, ) - - survey_enabled = models.BooleanField( - default=False, - ) - - survey_spec = JSONField( - blank=True, - default={}, - ) admin_role = ImplicitRoleField( parent_role=['project.organization.admin_role', 'inventory.organization.admin_role'] ) @@ -234,9 +240,6 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin): read_role = ImplicitRoleField( parent_role=['project.organization.auditor_role', 'inventory.organization.auditor_role', 'execute_role', 'admin_role'], ) - allow_simultaneous = models.BooleanField( - default=False, - ) @classmethod @@ -249,7 +252,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin): 'playbook', 'credential', 'cloud_credential', 'network_credential', 'forks', 'schedule', 'limit', 'verbosity', 'job_tags', 'extra_vars', 'launch_type', 'force_handlers', 'skip_tags', 'start_at_task', 'become_enabled', - 'labels', 'survey_passwords'] + 'labels', 'survey_passwords', 'allow_simultaneous', 'timeout'] def resource_validation_data(self): ''' @@ -262,19 +265,19 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin): if self.inventory is None: resources_needed_to_start.append('inventory') if not self.ask_inventory_on_launch: - validation_errors['inventory'] = ["Job Template must provide 'inventory' or allow prompting for it.",] + validation_errors['inventory'] = [_("Job Template must provide 'inventory' or allow prompting for it."),] if self.credential is None: resources_needed_to_start.append('credential') if not self.ask_credential_on_launch: - validation_errors['credential'] = ["Job Template must provide 'credential' or allow prompting for it.",] + validation_errors['credential'] = [_("Job Template must provide 'credential' or allow prompting for it."),] # Job type dependent checks - if self.job_type == 'scan': + if self.job_type == PERM_INVENTORY_SCAN: if self.inventory is None or self.ask_inventory_on_launch: - validation_errors['inventory'] = ["Scan jobs must be assigned a fixed inventory.",] + validation_errors['inventory'] = [_("Scan jobs must be assigned a fixed inventory."),] elif self.project is None: resources_needed_to_start.append('project') - validation_errors['project'] = ["Job types 'run' and 'check' must have assigned a project.",] + validation_errors['project'] = [_("Job types 'run' and 'check' must have assigned a project."),] return (validation_errors, resources_needed_to_start) @@ -292,131 +295,27 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin): def get_absolute_url(self): return reverse('api:job_template_detail', args=(self.pk,)) - def can_start_without_user_input(self): + def can_start_without_user_input(self, callback_extra_vars=None): ''' Return whether job template can be used to start a new job without requiring any user input. ''' + variables_needed = False + if callback_extra_vars: + extra_vars_dict = parse_yaml_or_json(callback_extra_vars) + for var in self.variables_needed_to_start: + if var not in extra_vars_dict: + variables_needed = True + break + elif self.variables_needed_to_start: + variables_needed = True prompting_needed = False for value in self._ask_for_vars_dict().values(): if value: prompting_needed = True return (not prompting_needed and not self.passwords_needed_to_start and - not self.variables_needed_to_start) - - @property - def variables_needed_to_start(self): - vars = [] - if self.survey_enabled and 'spec' in self.survey_spec: - for survey_element in self.survey_spec['spec']: - if survey_element['required']: - vars.append(survey_element['variable']) - return vars - - def survey_password_variables(self): - vars = [] - if self.survey_enabled and 'spec' in self.survey_spec: - # Get variables that are type password - for survey_element in self.survey_spec['spec']: - if survey_element['type'] == 'password': - vars.append(survey_element['variable']) - return vars - - def survey_variable_validation(self, data): - errors = [] - if not self.survey_enabled: - return errors - if 'name' not in self.survey_spec: - errors.append("'name' missing from survey spec.") - if 'description' not in self.survey_spec: - errors.append("'description' missing from survey spec.") - for survey_element in self.survey_spec.get("spec", []): - if survey_element['variable'] not in data and \ - survey_element['required']: - errors.append("'%s' value missing" % survey_element['variable']) - elif survey_element['type'] in ["textarea", "text", "password"]: - if survey_element['variable'] in data: - if 'min' in survey_element and survey_element['min'] not in ["", None] and len(data[survey_element['variable']]) < int(survey_element['min']): - errors.append("'%s' value %s is too small (length is %s must be at least %s)." % - (survey_element['variable'], data[survey_element['variable']], len(data[survey_element['variable']]), survey_element['min'])) - if 'max' in survey_element and survey_element['max'] not in ["", None] and len(data[survey_element['variable']]) > int(survey_element['max']): - errors.append("'%s' value %s is too large (must be no more than %s)." % - (survey_element['variable'], data[survey_element['variable']], survey_element['max'])) - elif survey_element['type'] == 'integer': - if survey_element['variable'] in data: - if 'min' in survey_element and survey_element['min'] not in ["", None] and survey_element['variable'] in data and \ - data[survey_element['variable']] < int(survey_element['min']): - errors.append("'%s' value %s is too small (must be at least %s)." % - (survey_element['variable'], data[survey_element['variable']], survey_element['min'])) - if 'max' in survey_element and survey_element['max'] not in ["", None] and survey_element['variable'] in data and \ - data[survey_element['variable']] > int(survey_element['max']): - errors.append("'%s' value %s is too large (must be no more than %s)." % - (survey_element['variable'], data[survey_element['variable']], survey_element['max'])) - if type(data[survey_element['variable']]) != int: - errors.append("Value %s for '%s' expected to be an integer." % (data[survey_element['variable']], - survey_element['variable'])) - elif survey_element['type'] == 'float': - if survey_element['variable'] in data: - if 'min' in survey_element and survey_element['min'] not in ["", None] and data[survey_element['variable']] < float(survey_element['min']): - errors.append("'%s' value %s is too small (must be at least %s)." % - (survey_element['variable'], data[survey_element['variable']], survey_element['min'])) - if 'max' in survey_element and survey_element['max'] not in ["", None] and data[survey_element['variable']] > float(survey_element['max']): - errors.append("'%s' value %s is too large (must be no more than %s)." % - (survey_element['variable'], data[survey_element['variable']], survey_element['max'])) - if type(data[survey_element['variable']]) not in (float, int): - errors.append("Value %s for '%s' expected to be a numeric type." % (data[survey_element['variable']], - survey_element['variable'])) - elif survey_element['type'] == 'multiselect': - if survey_element['variable'] in data: - if type(data[survey_element['variable']]) != list: - errors.append("'%s' value is expected to be a list." % survey_element['variable']) - else: - for val in data[survey_element['variable']]: - if val not in survey_element['choices']: - errors.append("Value %s for '%s' expected to be one of %s." % (val, survey_element['variable'], - survey_element['choices'])) - elif survey_element['type'] == 'multiplechoice': - if survey_element['variable'] in data: - if data[survey_element['variable']] not in survey_element['choices']: - errors.append("Value %s for '%s' expected to be one of %s." % (data[survey_element['variable']], - survey_element['variable'], - survey_element['choices'])) - return errors - - def _update_unified_job_kwargs(self, **kwargs): - if 'launch_type' in kwargs and kwargs['launch_type'] == 'relaunch': - return kwargs - - # Job Template extra_vars - extra_vars = self.extra_vars_dict - - # Overwrite with job template extra vars with survey default vars - if self.survey_enabled and 'spec' in self.survey_spec: - for survey_element in self.survey_spec.get("spec", []): - if 'default' in survey_element and survey_element['default']: - extra_vars[survey_element['variable']] = survey_element['default'] - - # transform to dict - if 'extra_vars' in kwargs: - kwargs_extra_vars = kwargs['extra_vars'] - if not isinstance(kwargs_extra_vars, dict): - try: - kwargs_extra_vars = json.loads(kwargs_extra_vars) - except Exception: - try: - kwargs_extra_vars = yaml.safe_load(kwargs_extra_vars) - assert isinstance(kwargs_extra_vars, dict) - except: - kwargs_extra_vars = {} - else: - kwargs_extra_vars = {} - - # Overwrite job template extra vars with explicit job extra vars - # and add on job extra vars - extra_vars.update(kwargs_extra_vars) - kwargs['extra_vars'] = json.dumps(extra_vars) - return kwargs + not variables_needed) def _ask_for_vars_dict(self): return dict( @@ -447,16 +346,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin): if field == 'extra_vars' and self.survey_enabled and self.survey_spec: # Accept vars defined in the survey and no others survey_vars = [question['variable'] for question in self.survey_spec.get('spec', [])] - extra_vars = kwargs[field] - if isinstance(extra_vars, basestring): - try: - extra_vars = json.loads(extra_vars) - except (ValueError, TypeError): - try: - extra_vars = yaml.safe_load(extra_vars) - assert isinstance(extra_vars, dict) - except (yaml.YAMLError, TypeError, AttributeError, AssertionError): - extra_vars = {} + extra_vars = parse_yaml_or_json(kwargs[field]) for key in extra_vars: if key in survey_vars: prompted_fields[field][key] = extra_vars[key] @@ -465,18 +355,29 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin): else: ignored_fields[field] = kwargs[field] - # Special case to ignore inventory if it is a scan job - if prompted_fields.get('job_type', None) == 'scan' or self.job_type == 'scan': - if 'inventory' in prompted_fields: - ignored_fields['inventory'] = prompted_fields.pop('inventory') - return prompted_fields, ignored_fields + def _extra_job_type_errors(self, data): + """ + Used to enforce 2 special cases around scan jobs and prompting + - the inventory cannot be changed on a scan job template + - scan jobs cannot be switched to run/check type and vice versa + """ + errors = {} + if 'job_type' in data and self.ask_job_type_on_launch: + if ((self.job_type == PERM_INVENTORY_SCAN and not data['job_type'] == PERM_INVENTORY_SCAN) or + (data['job_type'] == PERM_INVENTORY_SCAN and not self.job_type == PERM_INVENTORY_SCAN)): + errors['job_type'] = _('Cannot override job_type to or from a scan job.') + if (self.job_type == PERM_INVENTORY_SCAN and ('inventory' in data) and self.ask_inventory_on_launch and + self.inventory != data['inventory']): + errors['inventory'] = _('Inventory cannot be changed at runtime for scan jobs.') + return errors + @property def cache_timeout_blocked(self): - if Job.objects.filter(job_template=self, status__in=['pending', 'waiting', 'running']).count() > getattr(tower_settings, 'SCHEDULE_MAX_JOBS', 10): + if Job.objects.filter(job_template=self, status__in=['pending', 'waiting', 'running']).count() > getattr(settings, 'SCHEDULE_MAX_JOBS', 10): logger.error("Job template %s could not be started because there are more than %s other jobs from that template waiting to run" % - (self.name, getattr(tower_settings, 'SCHEDULE_MAX_JOBS', 10))) + (self.name, getattr(settings, 'SCHEDULE_MAX_JOBS', 10))) return True return False @@ -499,7 +400,8 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin): any_notification_templates = set(any_notification_templates + list(base_notification_templates.filter(organization_notification_templates_for_any=self.project.organization))) return dict(error=list(error_notification_templates), success=list(success_notification_templates), any=list(any_notification_templates)) -class Job(UnifiedJob, JobOptions): + +class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin): ''' A job applies a project (with playbook) to an inventory source with a given credential. It represents a single invocation of ansible-playbook with the @@ -524,11 +426,29 @@ class Job(UnifiedJob, JobOptions): editable=False, through='JobHostSummary', ) - survey_passwords = JSONField( + artifacts = JSONField( blank=True, default={}, editable=False, ) + scm_revision = models.CharField( + max_length=1024, + blank=True, + default='', + editable=False, + verbose_name=_('SCM Revision'), + help_text=_('The SCM Revision from the Project used for this job, if available'), + ) + project_update = models.ForeignKey( + 'ProjectUpdate', + blank=True, + null=True, + default=None, + on_delete=models.SET_NULL, + help_text=_('The SCM Refresh task used to make sure the playbooks were available for the job run'), + ) + + @classmethod def _get_parent_field_name(cls): @@ -539,11 +459,18 @@ class Job(UnifiedJob, JobOptions): from awx.main.tasks import RunJob return RunJob + def _global_timeout_setting(self): + return 'DEFAULT_JOB_TIMEOUT' + + @classmethod + def _get_unified_job_template_class(cls): + return JobTemplate + def get_absolute_url(self): return reverse('api:job_detail', args=(self.pk,)) def get_ui_url(self): - return urljoin(tower_settings.TOWER_URL_BASE, "/#/jobs/{}".format(self.pk)) + return urljoin(settings.TOWER_URL_BASE, "/#/jobs/{}".format(self.pk)) @property def task_auth_token(self): @@ -602,29 +529,6 @@ class Job(UnifiedJob, JobOptions): kwargs['job_host_summaries__job__pk'] = self.pk return Host.objects.filter(**kwargs) - def is_blocked_by(self, obj): - from awx.main.models import InventoryUpdate, ProjectUpdate - if type(obj) == Job: - if obj.job_template is not None and obj.inventory is not None: - if obj.job_template == self.job_template and \ - obj.inventory == self.inventory: - if self.job_template.allow_simultaneous: - return False - if obj.launch_type == 'callback' and self.launch_type == 'callback' and \ - obj.limit != self.limit: - return False - return True - return False - if type(obj) == InventoryUpdate: - if self.inventory == obj.inventory_source.inventory: - return True - return False - if type(obj) == ProjectUpdate: - if obj.project == self.project: - return True - return False - return False - @property def task_impact(self): # NOTE: We sorta have to assume the host count matches and that forks default to 5 @@ -663,39 +567,6 @@ class Job(UnifiedJob, JobOptions): def processed_hosts(self): return self._get_hosts(job_host_summaries__processed__gt=0) - def generate_dependencies(self, active_tasks): - from awx.main.models import InventoryUpdate, ProjectUpdate - inventory_sources = self.inventory.inventory_sources.filter(update_on_launch=True) - project_found = False - inventory_sources_found = [] - dependencies = [] - for obj in active_tasks: - if type(obj) == ProjectUpdate and self.project is not None: - if obj.project == self.project: - project_found = True - if type(obj) == InventoryUpdate: - if obj.inventory_source in inventory_sources: - inventory_sources_found.append(obj.inventory_source) - # Skip updating any inventory sources that were already updated before - # running this job (via callback inventory refresh). - try: - start_args = json.loads(decrypt_field(self, 'start_args')) - except Exception: - start_args = None - start_args = start_args or {} - inventory_sources_already_updated = start_args.get('inventory_sources_already_updated', []) - if inventory_sources_already_updated: - for source in inventory_sources.filter(pk__in=inventory_sources_already_updated): - if source not in inventory_sources_found: - inventory_sources_found.append(source) - if not project_found and self.project is not None and self.project.needs_update_on_launch: - dependencies.append(self.project.create_project_update(launch_type='dependency')) - if inventory_sources.count(): # and not has_setup_failures? Probably handled as an error scenario in the task runner - for source in inventory_sources: - if source not in inventory_sources_found and source.needs_update_on_launch: - dependencies.append(source.create_inventory_update(launch_type='dependency')) - return dependencies - def notification_data(self, block=5): data = super(Job, self).notification_data() all_hosts = {} @@ -724,60 +595,19 @@ class Job(UnifiedJob, JobOptions): hosts=all_hosts)) return data - def handle_extra_data(self, extra_data): - extra_vars = {} - if isinstance(extra_data, dict): - extra_vars = extra_data - elif extra_data is None: - return - else: - if extra_data == "": - return - try: - extra_vars = json.loads(extra_data) - except Exception as e: - logger.warn("Exception deserializing extra vars: " + str(e)) - evars = self.extra_vars_dict - evars.update(extra_vars) - self.update_fields(extra_vars=json.dumps(evars)) + def _resources_sufficient_for_launch(self): + if self.job_type == PERM_INVENTORY_SCAN: + return self.inventory_id is not None + return not (self.inventory_id is None or self.project_id is None) - def display_extra_vars(self): + def display_artifacts(self): ''' - Hides fields marked as passwords in survey. + Hides artifacts if they are marked as no_log type artifacts. ''' - if self.survey_passwords: - extra_vars = json.loads(self.extra_vars) - extra_vars.update(self.survey_passwords) - return json.dumps(extra_vars) - else: - return self.extra_vars - - def _survey_search_and_replace(self, content): - # Use job template survey spec to identify password fields. - # Then lookup password fields in extra_vars and save the values - jt = self.job_template - if jt and jt.survey_enabled and 'spec' in jt.survey_spec: - # Use password vars to find in extra_vars - for key in jt.survey_password_variables(): - if key in self.extra_vars_dict: - content = PlainTextCleaner.remove_sensitive(content, self.extra_vars_dict[key]) - return content - - def _result_stdout_raw_limited(self, *args, **kwargs): - buff, start, end, abs_end = super(Job, self)._result_stdout_raw_limited(*args, **kwargs) - return self._survey_search_and_replace(buff), start, end, abs_end - - def _result_stdout_raw(self, *args, **kwargs): - content = super(Job, self)._result_stdout_raw(*args, **kwargs) - return self._survey_search_and_replace(content) - - def copy(self): - presets = {} - for kw in JobTemplate._get_unified_job_field_names(): - presets[kw] = getattr(self, kw) - if not self.job_template: - self.job_template = JobTemplate(name='temporary') - return self.job_template.create_unified_job(**presets) + artifacts = self.artifacts + if artifacts.get('_ansible_no_log', False): + return "$hidden due to Ansible no_log flag$" + return artifacts # Job Credential required @property @@ -790,6 +620,26 @@ class Job(UnifiedJob, JobOptions): return True + ''' + JobNotificationMixin + ''' + def get_notification_templates(self): + return self.job_template.notification_templates + + def get_notification_friendly_name(self): + return "Job" + + ''' + Canceling a job also cancels the implicit project update with launch_type + run. + ''' + def cancel(self): + res = super(Job, self).cancel() + if self.project_update: + self.project_update.cancel() + return res + + class JobHostSummary(CreatedModifiedModel): ''' Per-host statistics for each job. @@ -872,24 +722,29 @@ class JobEvent(CreatedModifiedModel): # - playbook_on_vars_prompt (for each play, but before play starts, we # currently don't handle responding to these prompts) # - playbook_on_play_start (once for each play) - # - playbook_on_import_for_host - # - playbook_on_not_import_for_host + # - playbook_on_import_for_host (not logged, not used for v2) + # - playbook_on_not_import_for_host (not logged, not used for v2) # - playbook_on_no_hosts_matched # - playbook_on_no_hosts_remaining - # - playbook_on_setup + # - playbook_on_include (only v2 - only used for handlers?) + # - playbook_on_setup (not used for v2) # - runner_on* # - playbook_on_task_start (once for each task within a play) # - runner_on_failed # - runner_on_ok - # - runner_on_error + # - runner_on_error (not used for v2) # - runner_on_skipped # - runner_on_unreachable - # - runner_on_no_hosts - # - runner_on_async_poll - # - runner_on_async_ok - # - runner_on_async_failed - # - runner_on_file_diff - # - playbook_on_notify (once for each notification from the play) + # - runner_on_no_hosts (not used for v2) + # - runner_on_async_poll (not used for v2) + # - runner_on_async_ok (not used for v2) + # - runner_on_async_failed (not used for v2) + # - runner_on_file_diff (v2 event is v2_on_file_diff) + # - runner_item_on_ok (v2 only) + # - runner_item_on_failed (v2 only) + # - runner_item_on_skipped (v2 only) + # - runner_retry (v2 only) + # - playbook_on_notify (once for each notification from the play, not used for v2) # - playbook_on_stats EVENT_TYPES = [ @@ -903,22 +758,34 @@ class JobEvent(CreatedModifiedModel): (3, 'runner_on_async_poll', _('Host Polling'), False), (3, 'runner_on_async_ok', _('Host Async OK'), False), (3, 'runner_on_async_failed', _('Host Async Failure'), True), - # AWX does not yet support --diff mode + (3, 'runner_item_on_ok', _('Item OK'), False), + (3, 'runner_item_on_failed', _('Item Failed'), True), + (3, 'runner_item_on_skipped', _('Item Skipped'), False), + (3, 'runner_retry', _('Host Retry'), False), + # Tower does not yet support --diff mode. (3, 'runner_on_file_diff', _('File Difference'), False), (0, 'playbook_on_start', _('Playbook Started'), False), (2, 'playbook_on_notify', _('Running Handlers'), False), + (2, 'playbook_on_include', _('Including File'), False), (2, 'playbook_on_no_hosts_matched', _('No Hosts Matched'), False), (2, 'playbook_on_no_hosts_remaining', _('No Hosts Remaining'), False), (2, 'playbook_on_task_start', _('Task Started'), False), - # AWX does not yet support vars_prompt (and will probably hang :) + # Tower does not yet support vars_prompt (and will probably hang :) (1, 'playbook_on_vars_prompt', _('Variables Prompted'), False), (2, 'playbook_on_setup', _('Gathering Facts'), False), - # callback will not record this (2, 'playbook_on_import_for_host', _('internal: on Import for Host'), False), - # callback will not record this (2, 'playbook_on_not_import_for_host', _('internal: on Not Import for Host'), False), (1, 'playbook_on_play_start', _('Play Started'), False), (1, 'playbook_on_stats', _('Playbook Complete'), False), + + # Additional event types for captured stdout not directly related to + # playbook or runner events. + (0, 'debug', _('Debug'), False), + (0, 'verbose', _('Verbose'), False), + (0, 'deprecated', _('Deprecated'), False), + (0, 'warning', _('Warning'), False), + (0, 'system_warning', _('System Warning'), False), + (0, 'error', _('Error'), True), ] FAILED_EVENTS = [x[1] for x in EVENT_TYPES if x[3]] EVENT_CHOICES = [(x[1], x[2]) for x in EVENT_TYPES] @@ -927,6 +794,13 @@ class JobEvent(CreatedModifiedModel): class Meta: app_label = 'main' ordering = ('pk',) + index_together = [ + ('job', 'event'), + ('job', 'uuid'), + ('job', 'start_line'), + ('job', 'end_line'), + ('job', 'parent_uuid'), + ] job = models.ForeignKey( 'Job', @@ -950,6 +824,11 @@ class JobEvent(CreatedModifiedModel): default=False, editable=False, ) + uuid = models.CharField( + max_length=1024, + default='', + editable=False, + ) host = models.ForeignKey( 'Host', related_name='job_events_as_primary_host', @@ -968,12 +847,17 @@ class JobEvent(CreatedModifiedModel): related_name='job_events', editable=False, ) + playbook = models.CharField( + max_length=1024, + default='', + editable=False, + ) play = models.CharField( max_length=1024, default='', editable=False, ) - role = models.CharField( # FIXME: Determine from callback or task name. + role = models.CharField( max_length=1024, default='', editable=False, @@ -991,10 +875,31 @@ class JobEvent(CreatedModifiedModel): on_delete=models.SET_NULL, editable=False, ) + parent_uuid = models.CharField( + max_length=1024, + default='', + editable=False, + ) counter = models.PositiveIntegerField( default=0, + editable=False, + ) + stdout = models.TextField( + default='', + editable=False, + ) + verbosity = models.PositiveIntegerField( + default=0, + editable=False, + ) + start_line = models.PositiveIntegerField( + default=0, + editable=False, + ) + end_line = models.PositiveIntegerField( + default=0, + editable=False, ) - def get_absolute_url(self): return reverse('api:job_event_detail', args=(self.pk,)) @@ -1055,130 +960,58 @@ class JobEvent(CreatedModifiedModel): pass return msg - def _find_parent(self): - parent_events = set() - if self.event in ('playbook_on_play_start', 'playbook_on_stats', - 'playbook_on_vars_prompt'): - parent_events.add('playbook_on_start') - elif self.event in ('playbook_on_notify', 'playbook_on_setup', - 'playbook_on_task_start', - 'playbook_on_no_hosts_matched', - 'playbook_on_no_hosts_remaining', - 'playbook_on_import_for_host', - 'playbook_on_not_import_for_host'): - parent_events.add('playbook_on_play_start') - elif self.event.startswith('runner_on_'): - parent_events.add('playbook_on_setup') - parent_events.add('playbook_on_task_start') - if parent_events: - try: - qs = JobEvent.objects.filter(job_id=self.job_id) - if self.pk: - qs = qs.filter(pk__lt=self.pk, event__in=parent_events) - else: - qs = qs.filter(event__in=parent_events) - return qs.order_by('-pk')[0] - except IndexError: - pass - return None - - def save(self, *args, **kwargs): - from awx.main.models.inventory import Host - # If update_fields has been specified, add our field names to it, - # if it hasn't been specified, then we're just doing a normal save. - update_fields = kwargs.get('update_fields', []) - # Skip normal checks on save if we're only updating failed/changed - # flags triggered from a child event. - from_parent_update = kwargs.pop('from_parent_update', False) - if not from_parent_update: - res = self.event_data.get('res', None) - # Workaround for Ansible 1.2, where the runner_on_async_ok event is - # created even when the async task failed. Change the event to be - # correct. - if self.event == 'runner_on_async_ok': - try: - if res.get('failed', False) or res.get('rc', 0) != 0: - self.event = 'runner_on_async_failed' - except (AttributeError, TypeError): - pass - if self.event in self.FAILED_EVENTS: - if not self.event_data.get('ignore_errors', False): - self.failed = True - if 'failed' not in update_fields: - update_fields.append('failed') - if isinstance(res, dict) and res.get('changed', False): + def _update_from_event_data(self): + # Update job event model fields from event data. + updated_fields = set() + job = self.job + verbosity = job.verbosity + event_data = self.event_data + res = event_data.get('res', None) + if self.event in self.FAILED_EVENTS and not event_data.get('ignore_errors', False): + self.failed = True + updated_fields.add('failed') + if isinstance(res, dict): + if res.get('changed', False): self.changed = True - if 'changed' not in update_fields: - update_fields.append('changed') - if self.event == 'playbook_on_stats': - try: - failures_dict = self.event_data.get('failures', {}) - dark_dict = self.event_data.get('dark', {}) - self.failed = bool(sum(failures_dict.values()) + - sum(dark_dict.values())) - if 'failed' not in update_fields: - update_fields.append('failed') - changed_dict = self.event_data.get('changed', {}) - self.changed = bool(sum(changed_dict.values())) - if 'changed' not in update_fields: - update_fields.append('changed') - except (AttributeError, TypeError): - pass - self.play = self.event_data.get('play', '').strip() - if 'play' not in update_fields: - update_fields.append('play') - self.task = self.event_data.get('task', '').strip() - if 'task' not in update_fields: - update_fields.append('task') - self.role = self.event_data.get('role', '').strip() - if 'role' not in update_fields: - update_fields.append('role') - self.host_name = self.event_data.get('host', '').strip() - if 'host_name' not in update_fields: - update_fields.append('host_name') - # Only update job event hierarchy and related models during post - # processing (after running job). - post_process = kwargs.pop('post_process', False) - if post_process: + updated_fields.add('changed') + # If we're not in verbose mode, wipe out any module arguments. + invocation = res.get('invocation', None) + if isinstance(invocation, dict) and verbosity == 0 and 'module_args' in invocation: + event_data['res']['invocation']['module_args'] = '' + self.event_data = event_data + updated_fields.add('event_data') + if self.event == 'playbook_on_stats': try: - if not self.host_id and self.host_name: - host_qs = Host.objects.filter(inventory__jobs__id=self.job_id, name=self.host_name) - host_id = host_qs.only('id').values_list('id', flat=True) - if host_id.exists(): - self.host_id = host_id[0] - if 'host_id' not in update_fields: - update_fields.append('host_id') - except (IndexError, AttributeError): + failures_dict = event_data.get('failures', {}) + dark_dict = event_data.get('dark', {}) + self.failed = bool(sum(failures_dict.values()) + + sum(dark_dict.values())) + updated_fields.add('failed') + changed_dict = event_data.get('changed', {}) + self.changed = bool(sum(changed_dict.values())) + updated_fields.add('changed') + except (AttributeError, TypeError): pass - if self.parent is None: - self.parent = self._find_parent() - if 'parent' not in update_fields: - update_fields.append('parent') - super(JobEvent, self).save(*args, **kwargs) - if post_process and not from_parent_update: - self.update_parent_failed_and_changed() - # FIXME: The update_hosts() call (and its queries) are the current - # performance bottleneck.... - if getattr(settings, 'CAPTURE_JOB_EVENT_HOSTS', False): - self.update_hosts() - self.update_host_summary_from_stats() + for field in ('playbook', 'play', 'task', 'role', 'host'): + value = force_text(event_data.get(field, '')).strip() + if field == 'host': + field = 'host_name' + if value != getattr(self, field): + setattr(self, field, value) + updated_fields.add(field) + return updated_fields - def update_parent_failed_and_changed(self): - # Propagage failed and changed flags to parent events. - if self.parent: - parent = self.parent - update_fields = [] - if self.failed and not parent.failed: - parent.failed = True - update_fields.append('failed') - if self.changed and not parent.changed: - parent.changed = True - update_fields.append('changed') - if update_fields: - parent.save(update_fields=update_fields, from_parent_update=True) - parent.update_parent_failed_and_changed() + def _update_parents_failed_and_changed(self): + # Update parent events to reflect failed, changed + runner_events = JobEvent.objects.filter(job=self.job, + event__startswith='runner_on') + changed_events = runner_events.filter(changed=True) + failed_events = runner_events.filter(failed=True) + JobEvent.objects.filter(uuid__in=changed_events.values_list('parent_uuid', flat=True)).update(changed=True) + JobEvent.objects.filter(uuid__in=failed_events.values_list('parent_uuid', flat=True)).update(failed=True) - def update_hosts(self, extra_host_pks=None): + def _update_hosts(self, extra_host_pks=None): + # Update job event hosts m2m from host_name, propagate to parent events. from awx.main.models.inventory import Host extra_host_pks = set(extra_host_pks or []) hostnames = set() @@ -1192,27 +1025,27 @@ class JobEvent(CreatedModifiedModel): pass qs = Host.objects.filter(inventory__jobs__id=self.job_id) qs = qs.filter(Q(name__in=hostnames) | Q(pk__in=extra_host_pks)) - qs = qs.exclude(job_events__pk=self.id) - for host in qs.only('id'): + qs = qs.exclude(job_events__pk=self.id).only('id') + for host in qs: self.hosts.add(host) - if self.parent: - self.parent.update_hosts(self.hosts.only('id').values_list('id', flat=True)) + if self.parent_uuid: + parent = JobEvent.objects.filter(uuid=self.parent_uuid) + if parent.exists(): + parent = parent[0] + parent._update_hosts(qs.values_list('id', flat=True)) - def update_host_summary_from_stats(self): + def _update_host_summary_from_stats(self): from awx.main.models.inventory import Host - if self.event != 'playbook_on_stats': - return hostnames = set() try: - for v in self.event_data.values(): - hostnames.update(v.keys()) + for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'): + hostnames.update(self.event_data.get(stat, {}).keys()) except AttributeError: # In case event_data or v isn't a dict. pass with ignore_inventory_computed_fields(): qs = Host.objects.filter(inventory__jobs__id=self.job_id, name__in=hostnames) job = self.job - #for host in qs.only('id', 'name'): for host in hostnames: host_stats = {} for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'): @@ -1234,7 +1067,118 @@ class JobEvent(CreatedModifiedModel): if update_fields: host_summary.save(update_fields=update_fields) job.inventory.update_computed_fields() - emit_websocket_notification('/socket.io/jobs', 'summary_complete', dict(unified_job_id=job.id)) + emit_channel_notification('jobs-summary', dict(group_name='jobs', unified_job_id=job.id)) + + def save(self, *args, **kwargs): + from awx.main.models.inventory import Host + # If update_fields has been specified, add our field names to it, + # if it hasn't been specified, then we're just doing a normal save. + update_fields = kwargs.get('update_fields', []) + # Update model fields and related objects unless we're only updating + # failed/changed flags triggered from a child event. + from_parent_update = kwargs.pop('from_parent_update', False) + if not from_parent_update: + # Update model fields from event data. + updated_fields = self._update_from_event_data() + for field in updated_fields: + if field not in update_fields: + update_fields.append(field) + # Update host related field from host_name. + if not self.host_id and self.host_name: + host_qs = Host.objects.filter(inventory__jobs__id=self.job_id, name=self.host_name) + host_id = host_qs.only('id').values_list('id', flat=True).first() + if host_id != self.host_id: + self.host_id = host_id + if 'host_id' not in update_fields: + update_fields.append('host_id') + super(JobEvent, self).save(*args, **kwargs) + # Update related objects after this event is saved. + if not from_parent_update: + if getattr(settings, 'CAPTURE_JOB_EVENT_HOSTS', False): + self._update_hosts() + if self.event == 'playbook_on_stats': + self._update_parents_failed_and_changed() + self._update_host_summary_from_stats() + + @classmethod + def create_from_data(self, **kwargs): + # Must have a job_id specified. + if not kwargs.get('job_id', None): + return + + # Convert the datetime for the job event's creation appropriately, + # and include a time zone for it. + # + # In the event of any issue, throw it out, and Django will just save + # the current time. + try: + if not isinstance(kwargs['created'], datetime.datetime): + kwargs['created'] = parse_datetime(kwargs['created']) + if not kwargs['created'].tzinfo: + kwargs['created'] = kwargs['created'].replace(tzinfo=utc) + except (KeyError, ValueError): + kwargs.pop('created', None) + + # Sanity check: Don't honor keys that we don't recognize. + valid_keys = {'job_id', 'event', 'event_data', 'playbook', 'play', + 'role', 'task', 'created', 'counter', 'uuid', 'stdout', + 'parent_uuid', 'start_line', 'end_line', 'verbosity'} + for key in kwargs.keys(): + if key not in valid_keys: + kwargs.pop(key) + + event_data = kwargs.get('event_data', None) + artifact_dict = None + if event_data: + artifact_dict = event_data.pop('artifact_data', None) + + analytics_logger.info('Job event data saved.', extra=dict(event_model_data=kwargs)) + + job_event = JobEvent.objects.create(**kwargs) + + # Save artifact data to parent job (if provided). + if artifact_dict: + if event_data and isinstance(event_data, dict): + # Note: Core has not added support for marking artifacts as + # sensitive yet. Going forward, core will not use + # _ansible_no_log to denote sensitive set_stats calls. + # Instead, they plan to add a flag outside of the traditional + # no_log mechanism. no_log will not work for this feature, + # in core, because sensitive data is scrubbed before sending + # data to the callback. The playbook_on_stats is the callback + # in which the set_stats data is used. + + # Again, the sensitive artifact feature has not yet landed in + # core. The below is how we mark artifacts payload as + # senstive + # artifact_dict['_ansible_no_log'] = True + # + parent_job = Job.objects.filter(pk=kwargs['job_id']).first() + if parent_job and parent_job.artifacts != artifact_dict: + parent_job.artifacts = artifact_dict + parent_job.save(update_fields=['artifacts']) + + return job_event + + @classmethod + def get_startevent_queryset(cls, parent_task, starting_events, ordering=None): + ''' + We need to pull information about each start event. + + This is super tricky, because this table has a one-to-many + relationship with itself (parent-child), and we're getting + information for an arbitrary number of children. This means we + need stats on grandchildren, sorted by child. + ''' + qs = (JobEvent.objects.filter(parent__parent=parent_task, + parent__event__in=starting_events) + .values('parent__id', 'event', 'changed') + .annotate(num=Count('event')) + .order_by('parent__id')) + if ordering is not None: + qs = qs.order_by(ordering) + return qs + class SystemJobOptions(BaseModel): ''' @@ -1257,6 +1201,7 @@ class SystemJobOptions(BaseModel): default='', ) + class SystemJobTemplate(UnifiedJobTemplate, SystemJobOptions): class Meta: @@ -1292,7 +1237,7 @@ class SystemJobTemplate(UnifiedJobTemplate, SystemJobOptions): any=list(any_notification_templates)) -class SystemJob(UnifiedJob, SystemJobOptions): +class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin): class Meta: app_label = 'main' @@ -1307,10 +1252,10 @@ class SystemJob(UnifiedJob, SystemJobOptions): on_delete=models.SET_NULL, ) - extra_vars = models.TextField( + extra_vars = prevent_search(models.TextField( blank=True, default='', - ) + )) extra_vars_dict = VarsDictProperty('extra_vars', True) @@ -1323,35 +1268,24 @@ class SystemJob(UnifiedJob, SystemJobOptions): from awx.main.tasks import RunSystemJob return RunSystemJob - def socketio_emit_data(self): + def websocket_emit_data(self): return {} def get_absolute_url(self): return reverse('api:system_job_detail', args=(self.pk,)) def get_ui_url(self): - return urljoin(tower_settings.TOWER_URL_BASE, "/#/management_jobs/{}".format(self.pk)) - - def is_blocked_by(self, obj): - return True - - def handle_extra_data(self, extra_data): - extra_vars = {} - if isinstance(extra_data, dict): - extra_vars = extra_data - elif extra_data is None: - return - else: - if extra_data == "": - return - try: - extra_vars = json.loads(extra_data) - except Exception as e: - logger.warn("Exception deserializing extra vars: " + str(e)) - evars = self.extra_vars_dict - evars.update(extra_vars) - self.update_fields(extra_vars=json.dumps(evars)) + return urljoin(settings.TOWER_URL_BASE, "/#/management_jobs/{}".format(self.pk)) @property def task_impact(self): return 150 + + ''' + JobNotificationMixin + ''' + def get_notification_templates(self): + return self.system_job_template.notification_templates + + def get_notification_friendly_name(self): + return "System Job" diff --git a/awx/main/models/label.py b/awx/main/models/label.py index af9a2241b7..665d0dd98e 100644 --- a/awx/main/models/label.py +++ b/awx/main/models/label.py @@ -12,6 +12,7 @@ from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob __all__ = ('Label', ) + class Label(CommonModelNameNotUnique): ''' Generic Tag. Designed for tagging Job Templates, but expandable to other models. @@ -37,7 +38,7 @@ class Label(CommonModelNameNotUnique): return \ Label.objects.filter( organization=None, - jobtemplate_labels__isnull=True + unifiedjobtemplate_labels__isnull=True ) def is_detached(self): @@ -55,4 +56,3 @@ class Label(CommonModelNameNotUnique): return True else: return False - diff --git a/awx/main/models/mixins.py b/awx/main/models/mixins.py index 3f9f2043d5..3ae26eaf71 100644 --- a/awx/main/models/mixins.py +++ b/awx/main/models/mixins.py @@ -1,15 +1,22 @@ +# Python +import json + # Django from django.db import models from django.contrib.contenttypes.models import ContentType from django.contrib.auth.models import User # noqa # AWX +from awx.main.models.base import prevent_search from awx.main.models.rbac import ( Role, RoleAncestorEntry, get_roles_on_resource ) +from awx.main.utils import parse_yaml_or_json +from awx.main.fields import JSONField -__all__ = ['ResourceMixin'] +__all__ = ['ResourceMixin', 'SurveyJobTemplateMixin', 'SurveyJobMixin'] + class ResourceMixin(models.Model): @@ -31,8 +38,12 @@ class ResourceMixin(models.Model): ''' return ResourceMixin._accessible_objects(cls, accessor, role_field) + @classmethod + def accessible_pk_qs(cls, accessor, role_field): + return ResourceMixin._accessible_pk_qs(cls, accessor, role_field) + @staticmethod - def _accessible_objects(cls, accessor, role_field): + def _accessible_pk_qs(cls, accessor, role_field, content_types=None): if type(accessor) == User: ancestor_roles = accessor.roles.all() elif type(accessor) == Role: @@ -41,14 +52,22 @@ class ResourceMixin(models.Model): accessor_type = ContentType.objects.get_for_model(accessor) ancestor_roles = Role.objects.filter(content_type__pk=accessor_type.id, object_id=accessor.id) - qs = cls.objects.filter(pk__in = - RoleAncestorEntry.objects.filter( - ancestor__in=ancestor_roles, - content_type_id = ContentType.objects.get_for_model(cls).id, - role_field = role_field - ).values_list('object_id').distinct() - ) - return qs + + if content_types is None: + ct_kwarg = dict(content_type_id = ContentType.objects.get_for_model(cls).id) + else: + ct_kwarg = dict(content_type_id__in = content_types) + + return RoleAncestorEntry.objects.filter( + ancestor__in = ancestor_roles, + role_field = role_field, + **ct_kwarg + ).values_list('object_id').distinct() + + + @staticmethod + def _accessible_objects(cls, accessor, role_field): + return cls.objects.filter(pk__in = ResourceMixin._accessible_pk_qs(cls, accessor, role_field)) def get_permissions(self, accessor): @@ -60,3 +79,158 @@ class ResourceMixin(models.Model): return get_roles_on_resource(self, accessor) + +class SurveyJobTemplateMixin(models.Model): + class Meta: + abstract = True + + survey_enabled = models.BooleanField( + default=False, + ) + survey_spec = prevent_search(JSONField( + blank=True, + default={}, + )) + + def survey_password_variables(self): + vars = [] + if self.survey_enabled and 'spec' in self.survey_spec: + # Get variables that are type password + for survey_element in self.survey_spec['spec']: + if survey_element['type'] == 'password': + vars.append(survey_element['variable']) + return vars + + @property + def variables_needed_to_start(self): + vars = [] + if self.survey_enabled and 'spec' in self.survey_spec: + for survey_element in self.survey_spec['spec']: + if survey_element['required']: + vars.append(survey_element['variable']) + return vars + + def _update_unified_job_kwargs(self, **kwargs): + ''' + Combine extra_vars with variable precedence order: + JT extra_vars -> JT survey defaults -> runtime extra_vars + ''' + # Job Template extra_vars + extra_vars = self.extra_vars_dict + + # transform to dict + if 'extra_vars' in kwargs: + kwargs_extra_vars = kwargs['extra_vars'] + kwargs_extra_vars = parse_yaml_or_json(kwargs_extra_vars) + else: + kwargs_extra_vars = {} + + # Overwrite with job template extra vars with survey default vars + if self.survey_enabled and 'spec' in self.survey_spec: + for survey_element in self.survey_spec.get("spec", []): + default = survey_element.get('default') + variable_key = survey_element.get('variable') + if survey_element.get('type') == 'password': + if variable_key in kwargs_extra_vars and default: + kw_value = kwargs_extra_vars[variable_key] + if kw_value.startswith('$encrypted$') and kw_value != default: + kwargs_extra_vars[variable_key] = default + if default is not None: + extra_vars[variable_key] = default + + # Overwrite job template extra vars with explicit job extra vars + # and add on job extra vars + extra_vars.update(kwargs_extra_vars) + kwargs['extra_vars'] = json.dumps(extra_vars) + return kwargs + + def survey_variable_validation(self, data): + errors = [] + if not self.survey_enabled: + return errors + if 'name' not in self.survey_spec: + errors.append("'name' missing from survey spec.") + if 'description' not in self.survey_spec: + errors.append("'description' missing from survey spec.") + for survey_element in self.survey_spec.get("spec", []): + if survey_element['variable'] not in data and \ + survey_element['required']: + errors.append("'%s' value missing" % survey_element['variable']) + elif survey_element['type'] in ["textarea", "text", "password"]: + if survey_element['variable'] in data: + if type(data[survey_element['variable']]) not in (str, unicode): + errors.append("Value %s for '%s' expected to be a string." % (data[survey_element['variable']], + survey_element['variable'])) + continue + if 'min' in survey_element and survey_element['min'] not in ["", None] and len(data[survey_element['variable']]) < int(survey_element['min']): + errors.append("'%s' value %s is too small (length is %s must be at least %s)." % + (survey_element['variable'], data[survey_element['variable']], len(data[survey_element['variable']]), survey_element['min'])) + if 'max' in survey_element and survey_element['max'] not in ["", None] and len(data[survey_element['variable']]) > int(survey_element['max']): + errors.append("'%s' value %s is too large (must be no more than %s)." % + (survey_element['variable'], data[survey_element['variable']], survey_element['max'])) + elif survey_element['type'] == 'integer': + if survey_element['variable'] in data: + if type(data[survey_element['variable']]) != int: + errors.append("Value %s for '%s' expected to be an integer." % (data[survey_element['variable']], + survey_element['variable'])) + continue + if 'min' in survey_element and survey_element['min'] not in ["", None] and survey_element['variable'] in data and \ + data[survey_element['variable']] < int(survey_element['min']): + errors.append("'%s' value %s is too small (must be at least %s)." % + (survey_element['variable'], data[survey_element['variable']], survey_element['min'])) + if 'max' in survey_element and survey_element['max'] not in ["", None] and survey_element['variable'] in data and \ + data[survey_element['variable']] > int(survey_element['max']): + errors.append("'%s' value %s is too large (must be no more than %s)." % + (survey_element['variable'], data[survey_element['variable']], survey_element['max'])) + elif survey_element['type'] == 'float': + if survey_element['variable'] in data: + if type(data[survey_element['variable']]) not in (float, int): + errors.append("Value %s for '%s' expected to be a numeric type." % (data[survey_element['variable']], + survey_element['variable'])) + continue + if 'min' in survey_element and survey_element['min'] not in ["", None] and data[survey_element['variable']] < float(survey_element['min']): + errors.append("'%s' value %s is too small (must be at least %s)." % + (survey_element['variable'], data[survey_element['variable']], survey_element['min'])) + if 'max' in survey_element and survey_element['max'] not in ["", None] and data[survey_element['variable']] > float(survey_element['max']): + errors.append("'%s' value %s is too large (must be no more than %s)." % + (survey_element['variable'], data[survey_element['variable']], survey_element['max'])) + elif survey_element['type'] == 'multiselect': + if survey_element['variable'] in data: + if type(data[survey_element['variable']]) != list: + errors.append("'%s' value is expected to be a list." % survey_element['variable']) + else: + for val in data[survey_element['variable']]: + if val not in survey_element['choices']: + errors.append("Value %s for '%s' expected to be one of %s." % (val, survey_element['variable'], + survey_element['choices'])) + elif survey_element['type'] == 'multiplechoice': + if survey_element['variable'] in data: + if data[survey_element['variable']] not in survey_element['choices']: + errors.append("Value %s for '%s' expected to be one of %s." % (data[survey_element['variable']], + survey_element['variable'], + survey_element['choices'])) + return errors + + +class SurveyJobMixin(models.Model): + class Meta: + abstract = True + + survey_passwords = prevent_search(JSONField( + blank=True, + default={}, + editable=False, + )) + + def display_extra_vars(self): + ''' + Hides fields marked as passwords in survey. + ''' + if self.survey_passwords: + extra_vars = json.loads(self.extra_vars) + for key, value in self.survey_passwords.items(): + if key in extra_vars: + extra_vars[key] = value + return json.dumps(extra_vars) + else: + return self.extra_vars diff --git a/awx/main/models/notifications.py b/awx/main/models/notifications.py index 75fffea642..31b96aa8dd 100644 --- a/awx/main/models/notifications.py +++ b/awx/main/models/notifications.py @@ -18,14 +18,14 @@ from awx.main.notifications.pagerduty_backend import PagerDutyBackend from awx.main.notifications.hipchat_backend import HipChatBackend from awx.main.notifications.webhook_backend import WebhookBackend from awx.main.notifications.irc_backend import IrcBackend +from awx.main.fields import JSONField -# Django-JSONField -from jsonfield import JSONField logger = logging.getLogger('awx.main.models.notifications') __all__ = ['NotificationTemplate', 'Notification'] + class NotificationTemplate(CommonModel): NOTIFICATION_TYPES = [('email', _('Email'), CustomEmailBackend), @@ -75,7 +75,7 @@ class NotificationTemplate(CommonModel): setattr(self, '_saved_{}_{}'.format("config", field), value) self.notification_configuration[field] = '' else: - encrypted = encrypt_field(self, 'notification_configuration', subfield=field) + encrypted = encrypt_field(self, 'notification_configuration', subfield=field, skip_utf8=True) self.notification_configuration[field] = encrypted if 'notification_configuration' not in update_fields: update_fields.append('notification_configuration') @@ -117,6 +117,7 @@ class NotificationTemplate(CommonModel): notification_obj = EmailMessage(subject, backend_obj.format_body(body), sender, recipients) return backend_obj.send_messages([notification_obj]) + class Notification(CreatedModifiedModel): ''' A notification event emitted when a NotificationTemplate is run @@ -171,3 +172,27 @@ class Notification(CreatedModifiedModel): def get_absolute_url(self): return reverse('api:notification_detail', args=(self.pk,)) + + +class JobNotificationMixin(object): + def get_notification_templates(self): + raise RuntimeError("Define me") + + def get_notification_friendly_name(self): + raise RuntimeError("Define me") + + def _build_notification_message(self, status_str): + notification_body = self.notification_data() + notification_subject = u"{} #{} '{}' {} on Ansible Tower: {}".format(self.get_notification_friendly_name(), + self.id, + self.name, + status_str, + notification_body['url']) + notification_body['friendly_name'] = self.get_notification_friendly_name() + return (notification_subject, notification_body) + + def build_notification_succeeded_message(self): + return self._build_notification_message('succeeded') + + def build_notification_failed_message(self): + return self._build_notification_message('failed') diff --git a/awx/main/models/organization.py b/awx/main/models/organization.py index 5f3dc9d7c9..99023c86e1 100644 --- a/awx/main/models/organization.py +++ b/awx/main/models/organization.py @@ -23,7 +23,6 @@ from awx.main.models.rbac import ( ROLE_SINGLETON_SYSTEM_AUDITOR, ) from awx.main.models.mixins import ResourceMixin -from awx.main.conf import tower_settings __all__ = ['Organization', 'Team', 'Permission', 'Profile', 'AuthToken'] @@ -73,7 +72,6 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin): return self.name - class Team(CommonModelNameNotUnique, ResourceMixin): ''' A team is a group of users that work on common projects. @@ -192,6 +190,7 @@ class Profile(CreatedModifiedModel): default='', ) + """ Since expiration and session expiration is event driven a token could be invalidated for both reasons. Further, we only support a single reason for a @@ -200,6 +199,8 @@ session token being invalid. For this case, mark the token as expired. Note: Again, because the value of reason is event based. The reason may not be set (i.e. may equal '') even though a session is expired or a limit is reached. """ + + class AuthToken(BaseModel): ''' Custom authentication tokens per user with expiration and request-specific @@ -209,7 +210,7 @@ class AuthToken(BaseModel): REASON_CHOICES = [ ('', _('Token not invalidated')), ('timeout_reached', _('Token is expired')), - ('limit_reached', _('Maximum per-user sessions reached')), + ('limit_reached', _('The maximum number of allowed sessions for this user has been exceeded.')), # invalid_token is not a used data-base value, but is returned by the # api when a token is not found ('invalid_token', _('Invalid token')), @@ -219,12 +220,13 @@ class AuthToken(BaseModel): app_label = 'main' key = models.CharField(max_length=40, primary_key=True) - user = models.ForeignKey('auth.User', related_name='auth_tokens', - on_delete=models.CASCADE) + user = prevent_search(models.ForeignKey('auth.User', + related_name='auth_tokens', on_delete=models.CASCADE)) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) expires = models.DateTimeField(default=tz_now) - request_hash = models.CharField(max_length=40, blank=True, default='') + request_hash = prevent_search(models.CharField(max_length=40, blank=True, + default='')) reason = models.CharField( max_length=1024, blank=True, @@ -262,13 +264,13 @@ class AuthToken(BaseModel): if not now: now = tz_now() if not self.pk or not self.is_expired(now=now): - self.expires = now + datetime.timedelta(seconds=tower_settings.AUTH_TOKEN_EXPIRATION) + self.expires = now + datetime.timedelta(seconds=settings.AUTH_TOKEN_EXPIRATION) if save: self.save() def invalidate(self, reason='timeout_reached', save=True): if not AuthToken.reason_long(reason): - raise ValueError('Invalid reason specified') + raise ValueError(_('Invalid reason specified')) self.reason = reason if save: self.save() @@ -279,12 +281,12 @@ class AuthToken(BaseModel): if now is None: now = tz_now() invalid_tokens = AuthToken.objects.none() - if tower_settings.AUTH_TOKEN_PER_USER != -1: + if settings.AUTH_TOKEN_PER_USER != -1: invalid_tokens = AuthToken.objects.filter( user=user, expires__gt=now, reason='', - ).order_by('-created')[tower_settings.AUTH_TOKEN_PER_USER:] + ).order_by('-created')[settings.AUTH_TOKEN_PER_USER:] return invalid_tokens def generate_key(self): @@ -313,7 +315,7 @@ class AuthToken(BaseModel): valid_n_tokens_qs = self.user.auth_tokens.filter( expires__gt=now, reason='', - ).order_by('-created')[0:tower_settings.AUTH_TOKEN_PER_USER] + ).order_by('-created')[0:settings.AUTH_TOKEN_PER_USER] valid_n_tokens = valid_n_tokens_qs.values_list('key', flat=True) return bool(self.key in valid_n_tokens) diff --git a/awx/main/models/projects.py b/awx/main/models/projects.py index 93c4a42e36..9897067843 100644 --- a/awx/main/models/projects.py +++ b/awx/main/models/projects.py @@ -12,24 +12,26 @@ from django.conf import settings from django.db import models from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import smart_str, smart_text +from django.utils.text import slugify from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.utils.timezone import now, make_aware, get_default_timezone # AWX -from awx.lib.compat import slugify from awx.main.models.base import * # noqa -from awx.main.models.jobs import Job -from awx.main.models.notifications import NotificationTemplate +from awx.main.models.notifications import ( + NotificationTemplate, + JobNotificationMixin, +) from awx.main.models.unified_jobs import * # noqa from awx.main.models.mixins import ResourceMixin from awx.main.utils import update_scm_url from awx.main.fields import ImplicitRoleField -from awx.main.conf import tower_settings from awx.main.models.rbac import ( ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR, ) +from awx.main.fields import JSONField __all__ = ['Project', 'ProjectUpdate'] @@ -76,12 +78,14 @@ class ProjectOptions(models.Model): blank=True, default='', verbose_name=_('SCM Type'), + help_text=_("Specifies the source control system used to store the project."), ) scm_url = models.CharField( max_length=1024, blank=True, default='', verbose_name=_('SCM URL'), + help_text=_("The location where the project is stored."), ) scm_branch = models.CharField( max_length=256, @@ -92,9 +96,11 @@ class ProjectOptions(models.Model): ) scm_clean = models.BooleanField( default=False, + help_text=_('Discard any local changes before syncing the project.'), ) scm_delete_on_update = models.BooleanField( default=False, + help_text=_('Delete the project before syncing.'), ) credential = models.ForeignKey( 'Credential', @@ -104,6 +110,11 @@ class ProjectOptions(models.Model): default=None, on_delete=models.SET_NULL, ) + timeout = models.IntegerField( + blank=True, + default=0, + help_text=_("The amount of time to run before the task is canceled."), + ) def clean_scm_type(self): return self.scm_type or '' @@ -116,10 +127,10 @@ class ProjectOptions(models.Model): scm_url = update_scm_url(self.scm_type, scm_url, check_special_cases=False) except ValueError as e: - raise ValidationError((e.args or ('Invalid SCM URL.',))[0]) + raise ValidationError((e.args or (_('Invalid SCM URL.'),))[0]) scm_url_parts = urlparse.urlsplit(scm_url) if self.scm_type and not any(scm_url_parts): - raise ValidationError('SCM URL is required.') + raise ValidationError(_('SCM URL is required.')) return unicode(self.scm_url or '') def clean_credential(self): @@ -128,7 +139,7 @@ class ProjectOptions(models.Model): cred = self.credential if cred: if cred.kind != 'scm': - raise ValidationError("Credential kind must be 'scm'.") + raise ValidationError(_("Credential kind must be 'scm'.")) try: scm_url = update_scm_url(self.scm_type, self.scm_url, check_special_cases=False) @@ -143,7 +154,7 @@ class ProjectOptions(models.Model): update_scm_url(self.scm_type, self.scm_url, scm_username, scm_password) except ValueError as e: - raise ValidationError((e.args or ('Invalid credential.',))[0]) + raise ValidationError((e.args or (_('Invalid credential.'),))[0]) except ValueError: pass return cred @@ -215,10 +226,30 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin): ) scm_update_on_launch = models.BooleanField( default=False, + help_text=_('Update the project when a job is launched that uses the project.'), ) scm_update_cache_timeout = models.PositiveIntegerField( default=0, blank=True, + help_text=_('The number of seconds after the last project update ran that a new' + 'project update will be launched as a job dependency.'), + ) + + scm_revision = models.CharField( + max_length=1024, + blank=True, + default='', + editable=False, + verbose_name=_('SCM Revision'), + help_text=_('The last revision fetched by a project update'), + ) + + playbook_files = JSONField( + blank=True, + default=[], + editable=False, + verbose_name=_('Playbook Files'), + help_text=_('List of playbooks found in the project'), ) admin_role = ImplicitRoleField(parent_role=[ @@ -249,7 +280,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin): def _get_unified_job_field_names(cls): return ['name', 'description', 'local_path', 'scm_type', 'scm_url', 'scm_branch', 'scm_clean', 'scm_delete_on_update', - 'credential', 'schedule'] + 'credential', 'schedule', 'timeout', 'launch_type',] def save(self, *args, **kwargs): new_instance = not bool(self.pk) @@ -292,10 +323,6 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin): # inherit the child job status on failure elif self.last_job_failed: return self.last_job.status - # Even on a successful child run, a missing project path overides - # the successful status - elif not self.get_project_path(): - return 'missing' # Return the successful status else: return self.last_job.status @@ -373,7 +400,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin): return reverse('api:project_detail', args=(self.pk,)) -class ProjectUpdate(UnifiedJob, ProjectOptions): +class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin): ''' Internal job for tracking project updates from SCM. ''' @@ -388,6 +415,12 @@ class ProjectUpdate(UnifiedJob, ProjectOptions): editable=False, ) + job_type = models.CharField( + max_length=64, + choices=PROJECT_UPDATE_JOB_TYPE_CHOICES, + default='check', + ) + @classmethod def _get_parent_field_name(cls): return 'project' @@ -397,6 +430,9 @@ class ProjectUpdate(UnifiedJob, ProjectOptions): from awx.main.tasks import RunProjectUpdate return RunProjectUpdate + def _global_timeout_setting(self): + return 'DEFAULT_PROJECT_UPDATE_TIMEOUT' + def is_blocked_by(self, obj): if type(obj) == ProjectUpdate: if self.project == obj.project: @@ -406,8 +442,10 @@ class ProjectUpdate(UnifiedJob, ProjectOptions): return True return False - def socketio_emit_data(self): - return dict(project_id=self.project.id) + def websocket_emit_data(self): + websocket_data = super(ProjectUpdate, self).websocket_emit_data() + websocket_data.update(dict(project_id=self.project.id)) + return websocket_data @property def task_impact(self): @@ -431,11 +469,11 @@ class ProjectUpdate(UnifiedJob, ProjectOptions): return reverse('api:project_update_detail', args=(self.pk,)) def get_ui_url(self): - return urlparse.urljoin(tower_settings.TOWER_URL_BASE, "/#/scm_update/{}".format(self.pk)) + return urlparse.urljoin(settings.TOWER_URL_BASE, "/#/scm_update/{}".format(self.pk)) def _update_parent_instance(self): parent_instance = self._get_parent_instance() - if parent_instance: + if parent_instance and self.job_type == 'check': update_fields = self._update_parent_instance_no_save(parent_instance) if self.status in ('successful', 'failed', 'error', 'canceled'): if not self.failed and parent_instance.scm_delete_on_next_update: @@ -443,3 +481,12 @@ class ProjectUpdate(UnifiedJob, ProjectOptions): if 'scm_delete_on_next_update' not in update_fields: update_fields.append('scm_delete_on_next_update') parent_instance.save(update_fields=update_fields) + + ''' + JobNotificationMixin + ''' + def get_notification_templates(self): + return self.project.notification_templates + + def get_notification_friendly_name(self): + return "Project Update" diff --git a/awx/main/models/rbac.py b/awx/main/models/rbac.py index 5e040b85a1..39b57b96c5 100644 --- a/awx/main/models/rbac.py +++ b/awx/main/models/rbac.py @@ -10,9 +10,9 @@ import re # Django from django.db import models, transaction, connection from django.core.urlresolvers import reverse -from django.utils.translation import ugettext_lazy as _ from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.fields import GenericForeignKey +from django.utils.translation import ugettext_lazy as _ # AWX @@ -25,6 +25,7 @@ __all__ = [ 'get_roles_on_resource', 'ROLE_SINGLETON_SYSTEM_ADMINISTRATOR', 'ROLE_SINGLETON_SYSTEM_AUDITOR', + 'role_summary_fields_generator' ] logger = logging.getLogger('awx.main.models.rbac') @@ -33,29 +34,29 @@ ROLE_SINGLETON_SYSTEM_ADMINISTRATOR='system_administrator' ROLE_SINGLETON_SYSTEM_AUDITOR='system_auditor' role_names = { - 'system_administrator' : 'System Administrator', - 'system_auditor' : 'System Auditor', - 'adhoc_role' : 'Ad Hoc', - 'admin_role' : 'Admin', - 'auditor_role' : 'Auditor', - 'execute_role' : 'Execute', - 'member_role' : 'Member', - 'read_role' : 'Read', - 'update_role' : 'Update', - 'use_role' : 'Use', + 'system_administrator' : _('System Administrator'), + 'system_auditor' : _('System Auditor'), + 'adhoc_role' : _('Ad Hoc'), + 'admin_role' : _('Admin'), + 'auditor_role' : _('Auditor'), + 'execute_role' : _('Execute'), + 'member_role' : _('Member'), + 'read_role' : _('Read'), + 'update_role' : _('Update'), + 'use_role' : _('Use'), } role_descriptions = { - 'system_administrator' : 'Can manage all aspects of the system', - 'system_auditor' : 'Can view all settings on the system', - 'adhoc_role' : 'May run ad hoc commands on an inventory', - 'admin_role' : 'Can manage all aspects of the %s', - 'auditor_role' : 'Can view all settings for the %s', - 'execute_role' : 'May run the job template', - 'member_role' : 'User is a member of the %s', - 'read_role' : 'May view settings for the %s', - 'update_role' : 'May update project or inventory or group using the configured source update system', - 'use_role' : 'Can use the %s in a job template', + 'system_administrator' : _('Can manage all aspects of the system'), + 'system_auditor' : _('Can view all settings on the system'), + 'adhoc_role' : _('May run ad hoc commands on an inventory'), + 'admin_role' : _('Can manage all aspects of the %s'), + 'auditor_role' : _('Can view all settings for the %s'), + 'execute_role' : _('May run the %s'), + 'member_role' : _('User is a member of the %s'), + 'read_role' : _('May view settings for the %s'), + 'update_role' : _('May update project or inventory or group using the configured source update system'), + 'use_role' : _('Can use the %s in a job template'), } @@ -79,6 +80,7 @@ def check_singleton(func): return func(*args, **kwargs) return wrapper + @contextlib.contextmanager def batch_role_ancestor_rebuilding(allow_nesting=False): ''' @@ -168,8 +170,9 @@ class Role(models.Model): def description(self): global role_descriptions description = role_descriptions[self.role_field] - if '%s' in description and self.content_type: - model = self.content_type.model_class() + content_type = self.content_type + if '%s' in description and content_type: + model = content_type.model_class() model_name = re.sub(r'([a-z])([A-Z])', r'\1 \2', model.__name__).lower() description = description % model_name @@ -376,7 +379,7 @@ class Role(models.Model): 'ancestors_table': Role.ancestors.through._meta.db_table, 'parents_table': Role.parents.through._meta.db_table, 'roles_table': Role._meta.db_table, - 'ids': ','.join(str(x) for x in user.roles.values_list('id', flat=True)) + 'ids': ','.join(str(x) for x in user.roles.values_list('id', flat=True)), } qs = Role.objects.extra( @@ -421,6 +424,10 @@ class Role(models.Model): def is_ancestor_of(self, role): return role.ancestors.filter(id=self.id).exists() + def is_singleton(self): + return self.singleton_name in [ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR] + + class RoleAncestorEntry(models.Model): class Meta: @@ -464,3 +471,20 @@ def get_roles_on_resource(resource, accessor): object_id=resource.id ).values_list('role_field', flat=True).distinct() ] + + +def role_summary_fields_generator(content_object, role_field): + global role_descriptions + global role_names + summary = {} + description = role_descriptions[role_field] + content_type = ContentType.objects.get_for_model(content_object) + if '%s' in description and content_type: + model = content_object.__class__ + model_name = re.sub(r'([a-z])([A-Z])', r'\1 \2', model.__name__).lower() + description = description % model_name + + summary['description'] = description + summary['name'] = role_names[role_field] + summary['id'] = getattr(content_object, '{}_id'.format(role_field)) + return summary diff --git a/awx/main/models/schedules.py b/awx/main/models/schedules.py index d9de8b394e..21ecf49916 100644 --- a/awx/main/models/schedules.py +++ b/awx/main/models/schedules.py @@ -10,14 +10,14 @@ import dateutil.rrule from django.db import models from django.db.models.query import QuerySet from django.utils.timezone import now, make_aware, get_default_timezone - -# Django-JSONField -from jsonfield import JSONField +from django.utils.translation import ugettext_lazy as _ # AWX from awx.main.models.base import * # noqa -from awx.main.utils import ignore_inventory_computed_fields, emit_websocket_notification +from awx.main.utils import ignore_inventory_computed_fields +from awx.main.consumers import emit_channel_notification from django.core.urlresolvers import reverse +from awx.main.fields import JSONField logger = logging.getLogger('awx.main.models.schedule') @@ -66,24 +66,29 @@ class Schedule(CommonModel): ) enabled = models.BooleanField( default=True, + help_text=_("Enables processing of this schedule by Tower.") ) dtstart = models.DateTimeField( null=True, default=None, editable=False, + help_text=_("The first occurrence of the schedule occurs on or after this time.") ) dtend = models.DateTimeField( null=True, default=None, editable=False, + help_text=_("The last occurrence of the schedule occurs before this time, aftewards the schedule expires.") ) rrule = models.CharField( max_length=255, + help_text=_("A value representing the schedules iCal recurrence rule.") ) next_run = models.DateTimeField( null=True, default=None, editable=False, + help_text=_("The next time that the scheduled action will run.") ) extra_data = JSONField( blank=True, @@ -112,7 +117,7 @@ class Schedule(CommonModel): self.dtend = make_aware(datetime.datetime.strptime(until_date, "%Y%m%dT%H%M%SZ"), get_default_timezone()) if 'count' in self.rrule.lower(): self.dtend = future_rs[-1] - emit_websocket_notification('/socket.io/schedules', 'schedule_changed', dict(id=self.id)) + emit_channel_notification('schedules-changed', dict(id=self.id, group_name='schedules')) with ignore_inventory_computed_fields(): self.unified_job_template.update_computed_fields() diff --git a/awx/main/models/unified_jobs.py b/awx/main/models/unified_jobs.py index f39ee35c4c..f17b2b4c55 100644 --- a/awx/main/models/unified_jobs.py +++ b/awx/main/models/unified_jobs.py @@ -10,17 +10,17 @@ import os import os.path from collections import OrderedDict from StringIO import StringIO +from datetime import datetime # Django from django.conf import settings -from django.db import models +from django.db import models, connection from django.core.exceptions import NON_FIELD_ERRORS from django.utils.translation import ugettext_lazy as _ from django.utils.timezone import now from django.utils.encoding import smart_text - -# Django-JSONField -from jsonfield import JSONField +from django.apps import apps +from django.contrib.contenttypes.models import ContentType # Django-Polymorphic from polymorphic import PolymorphicModel @@ -31,8 +31,14 @@ from djcelery.models import TaskMeta # AWX from awx.main.models.base import * # noqa from awx.main.models.schedules import Schedule -from awx.main.utils import decrypt_field, emit_websocket_notification, _inventory_updates +from awx.main.models.mixins import ResourceMixin +from awx.main.utils import ( + decrypt_field, _inventory_updates, + copy_model_by_class, copy_m2m_relationships +) from awx.main.redact import UriCleaner, REPLACE_STR +from awx.main.consumers import emit_channel_notification +from awx.main.fields import JSONField __all__ = ['UnifiedJobTemplate', 'UnifiedJob'] @@ -60,12 +66,12 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio ] COMMON_STATUS_CHOICES = JOB_STATUS_CHOICES + [ - ('never updated', 'Never Updated'), # A job has never been run using this template. + ('never updated', _('Never Updated')), # A job has never been run using this template. ] PROJECT_STATUS_CHOICES = COMMON_STATUS_CHOICES + [ - ('ok', 'OK'), # Project is not configured for SCM and path exists. - ('missing', 'Missing'), # Project path does not exist. + ('ok', _('OK')), # Project is not configured for SCM and path exists. + ('missing', _('Missing')), # Project path does not exist. ] INVENTORY_SOURCE_STATUS_CHOICES = COMMON_STATUS_CHOICES + [ @@ -118,10 +124,6 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio default=None, editable=False, ) - has_schedules = models.BooleanField( - default=False, - editable=False, - ) #on_missed_schedule = models.CharField( # max_length=32, # choices=[], @@ -166,6 +168,32 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio else: return super(UnifiedJobTemplate, self).unique_error_message(model_class, unique_check) + @classmethod + def invalid_user_capabilities_prefetch_models(cls): + if cls != UnifiedJobTemplate: + return [] + return ['project', 'inventorysource', 'systemjobtemplate'] + + @classmethod + def _submodels_with_roles(cls): + ujt_classes = [c for c in cls.__subclasses__() + if c._meta.model_name not in ['inventorysource', 'systemjobtemplate']] + ct_dict = ContentType.objects.get_for_models(*ujt_classes) + return [ct.id for ct in ct_dict.values()] + + @classmethod + def accessible_pk_qs(cls, accessor, role_field): + ''' + A re-implementation of accessible pk queryset for the "normal" unified JTs. + Does not return inventory sources or system JTs, these should + be handled inside of get_queryset where it is utilized. + ''' + # do not use this if in a subclass + if cls != UnifiedJobTemplate: + return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field) + return ResourceMixin._accessible_pk_qs( + cls, accessor, role_field, content_types=cls._submodels_with_roles()) + def _perform_unique_checks(self, unique_checks): # Handle the list of unique fields returned above. Replace with an # appropriate error message for the remaining field(s) in the unique @@ -290,12 +318,6 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio ''' raise NotImplementedError # Implement in subclass. - def _update_unified_job_kwargs(self, **kwargs): - ''' - Hook for subclasses to update kwargs. - ''' - return kwargs # Override if needed in subclass. - @property def notification_templates(self): ''' @@ -309,59 +331,79 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio Create a new unified job based on this unified job template. ''' unified_job_class = self._get_unified_job_class() + fields = self._get_unified_job_field_names() + unified_job = copy_model_by_class(self, unified_job_class, fields, kwargs) + + # Set the unified job template back-link on the job parent_field_name = unified_job_class._get_parent_field_name() - kwargs.pop('%s_id' % parent_field_name, None) - create_kwargs = {} - m2m_fields = {} - if self.pk: - create_kwargs[parent_field_name] = self - for field_name in self._get_unified_job_field_names(): - # Foreign keys can be specified as field_name or field_name_id. - id_field_name = '%s_id' % field_name - if hasattr(self, id_field_name): - if field_name in kwargs: - value = kwargs[field_name] - elif id_field_name in kwargs: - value = kwargs[id_field_name] - else: - value = getattr(self, id_field_name) - if hasattr(value, 'id'): - value = value.id - create_kwargs[id_field_name] = value - elif field_name in kwargs: - if field_name == 'extra_vars' and isinstance(kwargs[field_name], dict): - create_kwargs[field_name] = json.dumps(kwargs['extra_vars']) - # We can't get a hold of django.db.models.fields.related.ManyRelatedManager to compare - # so this is the next best thing. - elif kwargs[field_name].__class__.__name__ is 'ManyRelatedManager': - m2m_fields[field_name] = kwargs[field_name] - else: - create_kwargs[field_name] = kwargs[field_name] - elif hasattr(self, field_name): - field_obj = self._meta.get_field_by_name(field_name)[0] - # Many to Many can be specified as field_name - if isinstance(field_obj, models.ManyToManyField): - m2m_fields[field_name] = getattr(self, field_name) - else: - create_kwargs[field_name] = getattr(self, field_name) - new_kwargs = self._update_unified_job_kwargs(**create_kwargs) - unified_job = unified_job_class(**new_kwargs) - # For JobTemplate-based jobs with surveys, save list for perma-redaction - if (hasattr(self, 'survey_spec') and getattr(self, 'survey_enabled', False) and - not getattr(unified_job, 'survey_passwords', False)): + setattr(unified_job, parent_field_name, self) + + # For JobTemplate-based jobs with surveys, add passwords to list for perma-redaction + if hasattr(self, 'survey_spec') and getattr(self, 'survey_enabled', False): password_list = self.survey_password_variables() - hide_password_dict = {} + hide_password_dict = getattr(unified_job, 'survey_passwords', {}) for password in password_list: hide_password_dict[password] = REPLACE_STR unified_job.survey_passwords = hide_password_dict + unified_job.save() - for field_name, src_field_value in m2m_fields.iteritems(): - dest_field = getattr(unified_job, field_name) - dest_field.add(*list(src_field_value.all().values_list('id', flat=True))) + # Labels coppied here + copy_m2m_relationships(self, unified_job, fields, kwargs=kwargs) return unified_job + @classmethod + def _get_unified_jt_copy_names(cls): + return cls._get_unified_job_field_names() -class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique): + def copy_unified_jt(self): + ''' + Returns saved object, including related fields. + Create a copy of this unified job template. + ''' + unified_jt_class = self.__class__ + fields = self._get_unified_jt_copy_names() + unified_jt = copy_model_by_class(self, unified_jt_class, fields, {}) + + time_now = datetime.now() + unified_jt.name = unified_jt.name.split('@', 1)[0] + ' @ ' + time_now.strftime('%I:%M:%S %p') + + unified_jt.save() + copy_m2m_relationships(self, unified_jt, fields) + return unified_jt + + +class UnifiedJobTypeStringMixin(object): + @classmethod + def _underscore_to_camel(cls, word): + return ''.join(x.capitalize() or '_' for x in word.split('_')) + + @classmethod + def _camel_to_underscore(cls, word): + return re.sub('(?!^)([A-Z]+)', r'_\1', word).lower() + + @classmethod + def _model_type(cls, job_type): + # Django >= 1.9 + #app = apps.get_app_config('main') + model_str = cls._underscore_to_camel(job_type) + try: + return apps.get_model('main', model_str) + except LookupError: + print("Lookup model error") + return None + + @classmethod + def get_instance_by_type(cls, job_type, job_id): + model = cls._model_type(job_type) + if not model: + return None + return model.objects.get(id=job_id) + + def model_to_str(self): + return UnifiedJobTypeStringMixin._camel_to_underscore(self.__class__.__name__) + + +class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique, UnifiedJobTypeStringMixin): ''' Concrete base class for unified job run by the task engine. ''' @@ -374,6 +416,8 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique ('callback', _('Callback')), # Job was started via host callback. ('scheduled', _('Scheduled')), # Job was started from a schedule. ('dependency', _('Dependency')), # Job was started as a dependency of another job. + ('workflow', _('Workflow')), # Job was started from a workflow job. + ('sync', _('Sync')), # Job was started from a project sync. ] PASSWORD_FIELDS = ('start_args',) @@ -415,6 +459,12 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique editable=False, related_name='%(class)s_blocked_jobs+', ) + execution_node = models.TextField( + blank=True, + default='', + editable=False, + help_text=_("The Tower node the job executed on."), + ) notifications = models.ManyToManyField( 'Notification', editable=False, @@ -439,43 +489,47 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique null=True, default=None, editable=False, + help_text=_("The date and time the job was queued for starting."), ) finished = models.DateTimeField( null=True, default=None, editable=False, + help_text=_("The date and time the job finished execution."), ) elapsed = models.DecimalField( max_digits=12, decimal_places=3, editable=False, + help_text=_("Elapsed time in seconds that the job ran."), ) - job_args = models.TextField( + job_args = prevent_search(models.TextField( blank=True, default='', editable=False, - ) + )) job_cwd = models.CharField( max_length=1024, blank=True, default='', editable=False, ) - job_env = JSONField( + job_env = prevent_search(JSONField( blank=True, default={}, editable=False, - ) + )) job_explanation = models.TextField( blank=True, default='', editable=False, + help_text=_("A status field to indicate the state of the job if it wasn't able to run and capture stdout"), ) - start_args = models.TextField( + start_args = prevent_search(models.TextField( blank=True, default='', editable=False, - ) + )) result_stdout_text = models.TextField( blank=True, default='', @@ -503,7 +557,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique related_name='%(class)s_labels' ) - def get_absolute_url(self): real_instance = self.get_real_instance() if real_instance != self: @@ -526,6 +579,20 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique def _get_parent_field_name(cls): return 'unified_job_template' # Override in subclasses. + @classmethod + def _get_unified_job_template_class(cls): + ''' + Return subclass of UnifiedJobTemplate that applies to this unified job. + ''' + raise NotImplementedError # Implement in subclass. + + def _global_timeout_setting(self): + "Override in child classes, None value indicates this is not configurable" + return None + + def _resources_sufficient_for_launch(self): + return True + def __unicode__(self): return u'%s-%s-%s' % (self.created, self.id, self.status) @@ -636,6 +703,24 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique pass super(UnifiedJob, self).delete() + def copy_unified_job(self): + ''' + Returns saved object, including related fields. + Create a copy of this unified job for the purpose of relaunch + ''' + unified_job_class = self.__class__ + unified_jt_class = self._get_unified_job_template_class() + parent_field_name = unified_job_class._get_parent_field_name() + + fields = unified_jt_class._get_unified_job_field_names() + [parent_field_name] + unified_job = copy_model_by_class(self, unified_job_class, fields, {}) + unified_job.launch_type = 'relaunch' + unified_job.save() + + # Labels coppied here + copy_m2m_relationships(self, unified_job, fields) + return unified_job + def result_stdout_raw_handle(self, attempt=0): """Return a file-like object containing the standard out of the job's result. @@ -668,8 +753,11 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique return StringIO(msg['missing' if self.finished else 'pending']) def _escape_ascii(self, content): - ansi_escape = re.compile(r'\x1b[^m]*m') - return ansi_escape.sub('', content) + # Remove ANSI escape sequences used to embed event data. + content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content) + # Remove ANSI color escape sequences. + content = re.sub(r'\x1b[^m]*m', '', content) + return content def _result_stdout_raw(self, redact_sensitive=False, escape_ascii=False): content = self.result_stdout_raw_handle().read() @@ -725,6 +813,28 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique def result_stdout_limited(self, start_line=0, end_line=None, redact_sensitive=False): return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive, escape_ascii=True) + @property + def spawned_by_workflow(self): + return self.launch_type == 'workflow' + + @property + def workflow_job_id(self): + if self.spawned_by_workflow: + try: + return self.unified_job_node.workflow_job.pk + except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist: + pass + return None + + @property + def workflow_node_id(self): + if self.spawned_by_workflow: + try: + return self.unified_job_node.pk + except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist: + pass + return None + @property def celery_task(self): try: @@ -737,7 +847,22 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique return [] def handle_extra_data(self, extra_data): - return + if hasattr(self, 'extra_vars'): + extra_vars = {} + if isinstance(extra_data, dict): + extra_vars = extra_data + elif extra_data is None: + return + else: + if extra_data == "": + return + try: + extra_vars = json.loads(extra_data) + except Exception as e: + logger.warn("Exception deserializing extra vars: " + str(e)) + evars = self.extra_vars_dict + evars.update(extra_vars) + self.update_fields(extra_vars=json.dumps(evars)) @property def can_start(self): @@ -747,23 +872,23 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique def task_impact(self): raise NotImplementedError # Implement in subclass. - def is_blocked_by(self, task_object): - ''' Given another task object determine if this task would be blocked by it ''' - raise NotImplementedError # Implement in subclass. - - def socketio_emit_data(self): + def websocket_emit_data(self): ''' Return extra data that should be included when submitting data to the browser over the websocket connection ''' - return {} + websocket_data = dict() + if self.spawned_by_workflow: + websocket_data.update(dict(workflow_job_id=self.workflow_job_id, + workflow_node_id=self.workflow_node_id)) + return websocket_data - def socketio_emit_status(self, status): + def websocket_emit_status(self, status): status_data = dict(unified_job_id=self.id, status=status) - status_data.update(self.socketio_emit_data()) - emit_websocket_notification('/socket.io/jobs', 'status_changed', status_data) + status_data.update(self.websocket_emit_data()) + status_data['group_name'] = 'jobs' + emit_channel_notification('jobs-status_changed', status_data) - def generate_dependencies(self, active_tasks): - ''' Generate any tasks that the current task might be dependent on given a list of active - tasks that might preclude creating one''' - return [] + if self.spawned_by_workflow: + status_data['group_name'] = "workflow_events" + emit_channel_notification('workflow_events-' + str(self.workflow_job_id), status_data) def notification_data(self): return dict(id=self.id, @@ -775,34 +900,46 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique status=self.status, traceback=self.result_traceback) - def start(self, error_callback, success_callback, **kwargs): - ''' - Start the task running via Celery. - ''' - task_class = self._get_task_class() + def pre_start(self, **kwargs): if not self.can_start: - self.job_explanation = u'%s is not in a startable status: %s, expecting one of %s' % (self._meta.verbose_name, self.status, str(('new', 'waiting'))) + self.job_explanation = u'%s is not in a startable state: %s, expecting one of %s' % (self._meta.verbose_name, self.status, str(('new', 'waiting'))) self.save(update_fields=['job_explanation']) - return False + return (False, None) + needed = self.get_passwords_needed_to_start() try: start_args = json.loads(decrypt_field(self, 'start_args')) except Exception: start_args = None + if start_args in (None, ''): start_args = kwargs + opts = dict([(field, start_args.get(field, '')) for field in needed]) + if not all(opts.values()): missing_fields = ', '.join([k for k,v in opts.items() if not v]) self.job_explanation = u'Missing needed fields: %s.' % missing_fields self.save(update_fields=['job_explanation']) - return False - #extra_data = dict([(field, kwargs[field]) for field in kwargs - # if field not in needed]) + return (False, None) + if 'extra_vars' in kwargs: self.handle_extra_data(kwargs['extra_vars']) + + return (True, opts) + + def start_celery_task(self, opts, error_callback, success_callback): + task_class = self._get_task_class() task_class().apply_async((self.pk,), opts, link_error=error_callback, link=success_callback) - return True + + def start(self, error_callback, success_callback, **kwargs): + ''' + Start the task running via Celery. + ''' + (res, opts) = self.pre_start(**kwargs) + if res: + self.start_celery_task(opts, error_callback, success_callback) + return res def signal_start(self, **kwargs): """Notify the task runner system to begin work on this task.""" @@ -827,7 +964,10 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique # Save the pending status, and inform the SocketIO listener. self.update_fields(start_args=json.dumps(kwargs), status='pending') - self.socketio_emit_status("pending") + self.websocket_emit_status("pending") + + from awx.main.scheduler.tasks import run_job_launch + connection.on_commit(lambda: run_job_launch.delay(self.id)) # Each type of unified job has a different Task class; get the # appropirate one. @@ -877,7 +1017,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique instance.job_explanation = 'Forced cancel' update_fields.append('job_explanation') instance.save(update_fields=update_fields) - self.socketio_emit_status("canceled") + self.websocket_emit_status("canceled") except: # FIXME: Log this exception! if settings.DEBUG: raise @@ -891,8 +1031,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique self.status = 'canceled' cancel_fields.append('status') self.save(update_fields=cancel_fields) - self.socketio_emit_status("canceled") + self.websocket_emit_status("canceled") if settings.BROKER_URL.startswith('amqp://'): self._force_cancel() return self.cancel_flag - diff --git a/awx/main/models/workflow.py b/awx/main/models/workflow.py new file mode 100644 index 0000000000..874822e013 --- /dev/null +++ b/awx/main/models/workflow.py @@ -0,0 +1,503 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Python +#import urlparse + +# Django +from django.db import models +from django.conf import settings +from django.core.urlresolvers import reverse +#from django import settings as tower_settings + +# AWX +from awx.main.models import prevent_search, UnifiedJobTemplate, UnifiedJob +from awx.main.models.notifications import ( + NotificationTemplate, + JobNotificationMixin +) +from awx.main.models.base import BaseModel, CreatedModifiedModel, VarsDictProperty +from awx.main.models.rbac import ( + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, + ROLE_SINGLETON_SYSTEM_AUDITOR +) +from awx.main.fields import ImplicitRoleField +from awx.main.models.mixins import ResourceMixin, SurveyJobTemplateMixin, SurveyJobMixin +from awx.main.redact import REPLACE_STR +from awx.main.utils import parse_yaml_or_json +from awx.main.fields import JSONField + +from copy import copy +from urlparse import urljoin + +__all__ = ['WorkflowJobTemplate', 'WorkflowJob', 'WorkflowJobOptions', 'WorkflowJobNode', 'WorkflowJobTemplateNode',] + +CHAR_PROMPTS_LIST = ['job_type', 'job_tags', 'skip_tags', 'limit'] + + +class WorkflowNodeBase(CreatedModifiedModel): + class Meta: + abstract = True + app_label = 'main' + + success_nodes = models.ManyToManyField( + 'self', + blank=True, + symmetrical=False, + related_name='%(class)ss_success', + ) + failure_nodes = models.ManyToManyField( + 'self', + blank=True, + symmetrical=False, + related_name='%(class)ss_failure', + ) + always_nodes = models.ManyToManyField( + 'self', + blank=True, + symmetrical=False, + related_name='%(class)ss_always', + ) + unified_job_template = models.ForeignKey( + 'UnifiedJobTemplate', + related_name='%(class)ss', + blank=False, + null=True, + default=None, + on_delete=models.SET_NULL, + ) + # Prompting-related fields + inventory = models.ForeignKey( + 'Inventory', + related_name='%(class)ss', + blank=True, + null=True, + default=None, + on_delete=models.SET_NULL, + ) + credential = models.ForeignKey( + 'Credential', + related_name='%(class)ss', + blank=True, + null=True, + default=None, + on_delete=models.SET_NULL, + ) + char_prompts = JSONField( + blank=True, + default={} + ) + + def prompts_dict(self): + data = {} + if self.inventory: + data['inventory'] = self.inventory.pk + if self.credential: + data['credential'] = self.credential.pk + for fd in CHAR_PROMPTS_LIST: + if fd in self.char_prompts: + data[fd] = self.char_prompts[fd] + return data + + @property + def job_type(self): + return self.char_prompts.get('job_type', None) + + @property + def job_tags(self): + return self.char_prompts.get('job_tags', None) + + @property + def skip_tags(self): + return self.char_prompts.get('skip_tags', None) + + @property + def limit(self): + return self.char_prompts.get('limit', None) + + def get_prompts_warnings(self): + ujt_obj = self.unified_job_template + if ujt_obj is None: + return {} + prompts_dict = self.prompts_dict() + if not hasattr(ujt_obj, '_ask_for_vars_dict'): + if prompts_dict: + return {'ignored': {'all': 'Cannot use prompts on unified_job_template that is not type of job template'}} + else: + return {} + + accepted_fields, ignored_fields = ujt_obj._accept_or_ignore_job_kwargs(**prompts_dict) + + ignored_dict = {} + for fd in ignored_fields: + ignored_dict[fd] = 'Workflow node provided field, but job template is not set to ask on launch' + scan_errors = ujt_obj._extra_job_type_errors(accepted_fields) + ignored_dict.update(scan_errors) + + data = {} + if ignored_dict: + data['ignored'] = ignored_dict + return data + + def get_parent_nodes(self): + '''Returns queryset containing all parents of this node''' + success_parents = getattr(self, '%ss_success' % self.__class__.__name__.lower()).all() + failure_parents = getattr(self, '%ss_failure' % self.__class__.__name__.lower()).all() + always_parents = getattr(self, '%ss_always' % self.__class__.__name__.lower()).all() + return success_parents | failure_parents | always_parents + + @classmethod + def _get_workflow_job_field_names(cls): + ''' + Return field names that should be copied from template node to job node. + ''' + return ['workflow_job', 'unified_job_template', + 'inventory', 'credential', 'char_prompts'] + + def create_workflow_job_node(self, **kwargs): + ''' + Create a new workflow job node based on this workflow node. + ''' + create_kwargs = {} + for field_name in self._get_workflow_job_field_names(): + if field_name in kwargs: + create_kwargs[field_name] = kwargs[field_name] + elif hasattr(self, field_name): + create_kwargs[field_name] = getattr(self, field_name) + return WorkflowJobNode.objects.create(**create_kwargs) + + +class WorkflowJobTemplateNode(WorkflowNodeBase): + workflow_job_template = models.ForeignKey( + 'WorkflowJobTemplate', + related_name='workflow_job_template_nodes', + blank=True, + null=True, + default=None, + on_delete=models.CASCADE, + ) + + def get_absolute_url(self): + return reverse('api:workflow_job_template_node_detail', args=(self.pk,)) + + def create_wfjt_node_copy(self, user, workflow_job_template=None): + ''' + Copy this node to a new WFJT, leaving out related fields the user + is not allowed to access + ''' + create_kwargs = {} + for field_name in self._get_workflow_job_field_names(): + if hasattr(self, field_name): + item = getattr(self, field_name) + if field_name in ['inventory', 'credential']: + if not user.can_access(item.__class__, 'use', item): + continue + if field_name in ['unified_job_template']: + if not user.can_access(item.__class__, 'start', item, validate_license=False): + continue + create_kwargs[field_name] = item + create_kwargs['workflow_job_template'] = workflow_job_template + return self.__class__.objects.create(**create_kwargs) + + +class WorkflowJobNode(WorkflowNodeBase): + job = models.OneToOneField( + 'UnifiedJob', + related_name='unified_job_node', + blank=True, + null=True, + default=None, + on_delete=models.SET_NULL, + ) + workflow_job = models.ForeignKey( + 'WorkflowJob', + related_name='workflow_job_nodes', + blank=True, + null=True, + default=None, + on_delete=models.CASCADE, + ) + ancestor_artifacts = JSONField( + blank=True, + default={}, + editable=False, + ) + + def get_absolute_url(self): + return reverse('api:workflow_job_node_detail', args=(self.pk,)) + + def get_job_kwargs(self): + ''' + In advance of creating a new unified job as part of a workflow, + this method builds the attributes to use + It alters the node by saving its updated version of + ancestor_artifacts, making it available to subsequent nodes. + ''' + # reject/accept prompted fields + data = {} + ujt_obj = self.unified_job_template + if ujt_obj and hasattr(ujt_obj, '_ask_for_vars_dict'): + accepted_fields, ignored_fields = ujt_obj._accept_or_ignore_job_kwargs(**self.prompts_dict()) + for fd in ujt_obj._extra_job_type_errors(accepted_fields): + accepted_fields.pop(fd) + data.update(accepted_fields) # missing fields are handled in the scheduler + # build ancestor artifacts, save them to node model for later + aa_dict = {} + for parent_node in self.get_parent_nodes(): + aa_dict.update(parent_node.ancestor_artifacts) + if parent_node.job and hasattr(parent_node.job, 'artifacts'): + aa_dict.update(parent_node.job.artifacts) + if aa_dict: + self.ancestor_artifacts = aa_dict + self.save(update_fields=['ancestor_artifacts']) + password_dict = {} + if '_ansible_no_log' in aa_dict: + for key in aa_dict: + if key != '_ansible_no_log': + password_dict[key] = REPLACE_STR + workflow_job_survey_passwords = self.workflow_job.survey_passwords + if workflow_job_survey_passwords: + password_dict.update(workflow_job_survey_passwords) + if password_dict: + data['survey_passwords'] = password_dict + # process extra_vars + extra_vars = {} + if aa_dict: + functional_aa_dict = copy(aa_dict) + functional_aa_dict.pop('_ansible_no_log', None) + extra_vars.update(functional_aa_dict) + # Workflow Job extra_vars higher precedence than ancestor artifacts + if self.workflow_job and self.workflow_job.extra_vars: + extra_vars.update(self.workflow_job.extra_vars_dict) + if extra_vars: + data['extra_vars'] = extra_vars + # ensure that unified jobs created by WorkflowJobs are marked + data['launch_type'] = 'workflow' + return data + + +class WorkflowJobOptions(BaseModel): + class Meta: + abstract = True + + extra_vars = prevent_search(models.TextField( + blank=True, + default='', + )) + + extra_vars_dict = VarsDictProperty('extra_vars', True) + + @property + def workflow_nodes(self): + raise NotImplementedError() + + def _create_workflow_nodes(self, old_node_list, user=None): + node_links = {} + for old_node in old_node_list: + if user: + new_node = old_node.create_wfjt_node_copy(user, workflow_job_template=self) + else: + new_node = old_node.create_workflow_job_node(workflow_job=self) + node_links[old_node.pk] = new_node + return node_links + + def _inherit_node_relationships(self, old_node_list, node_links): + for old_node in old_node_list: + new_node = node_links[old_node.pk] + for relationship in ['always_nodes', 'success_nodes', 'failure_nodes']: + old_manager = getattr(old_node, relationship) + for old_child_node in old_manager.all(): + new_child_node = node_links[old_child_node.pk] + new_manager = getattr(new_node, relationship) + new_manager.add(new_child_node) + + def copy_nodes_from_original(self, original=None, user=None): + old_node_list = original.workflow_nodes.prefetch_related('always_nodes', 'success_nodes', 'failure_nodes').all() + node_links = self._create_workflow_nodes(old_node_list, user=user) + self._inherit_node_relationships(old_node_list, node_links) + + def create_relaunch_workflow_job(self): + new_workflow_job = self.copy_unified_job() + new_workflow_job.copy_nodes_from_original(original=self) + return new_workflow_job + + +class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTemplateMixin, ResourceMixin): + class Meta: + app_label = 'main' + + organization = models.ForeignKey( + 'Organization', + blank=True, + null=True, + on_delete=models.SET_NULL, + related_name='workflows', + ) + admin_role = ImplicitRoleField(parent_role=[ + 'singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, + 'organization.admin_role' + ]) + execute_role = ImplicitRoleField(parent_role=[ + 'admin_role' + ]) + read_role = ImplicitRoleField(parent_role=[ + 'singleton:' + ROLE_SINGLETON_SYSTEM_AUDITOR, + 'organization.auditor_role', 'execute_role', 'admin_role' + ]) + + @property + def workflow_nodes(self): + return self.workflow_job_template_nodes + + @classmethod + def _get_unified_job_class(cls): + return WorkflowJob + + @classmethod + def _get_unified_job_field_names(cls): + return ['name', 'description', 'extra_vars', 'labels', 'survey_passwords', + 'schedule', 'launch_type'] + + @classmethod + def _get_unified_jt_copy_names(cls): + base_list = super(WorkflowJobTemplate, cls)._get_unified_jt_copy_names() + base_list.remove('labels') + return (base_list + + ['survey_spec', 'survey_enabled', 'organization']) + + def get_absolute_url(self): + return reverse('api:workflow_job_template_detail', args=(self.pk,)) + + @property + def cache_timeout_blocked(self): + # TODO: don't allow running of job template if same workflow template running + return False + + @property + def notification_templates(self): + base_notification_templates = NotificationTemplate.objects.all() + error_notification_templates = list(base_notification_templates + .filter(unifiedjobtemplate_notification_templates_for_errors__in=[self])) + success_notification_templates = list(base_notification_templates + .filter(unifiedjobtemplate_notification_templates_for_success__in=[self])) + any_notification_templates = list(base_notification_templates + .filter(unifiedjobtemplate_notification_templates_for_any__in=[self])) + return dict(error=list(error_notification_templates), + success=list(success_notification_templates), + any=list(any_notification_templates)) + + def create_unified_job(self, **kwargs): + workflow_job = super(WorkflowJobTemplate, self).create_unified_job(**kwargs) + workflow_job.copy_nodes_from_original(original=self) + return workflow_job + + def _accept_or_ignore_job_kwargs(self, extra_vars=None, **kwargs): + # Only accept allowed survey variables + ignored_fields = {} + prompted_fields = {} + prompted_fields['extra_vars'] = {} + ignored_fields['extra_vars'] = {} + extra_vars = parse_yaml_or_json(extra_vars) + if self.survey_enabled and self.survey_spec: + survey_vars = [question['variable'] for question in self.survey_spec.get('spec', [])] + for key in extra_vars: + if key in survey_vars: + prompted_fields['extra_vars'][key] = extra_vars[key] + else: + ignored_fields['extra_vars'][key] = extra_vars[key] + else: + prompted_fields['extra_vars'] = extra_vars + + return prompted_fields, ignored_fields + + def can_start_without_user_input(self): + '''Return whether WFJT can be launched without survey passwords.''' + return not bool( + self.variables_needed_to_start or + self.node_templates_missing() or + self.node_prompts_rejected()) + + def node_templates_missing(self): + return [node.pk for node in self.workflow_job_template_nodes.filter( + unified_job_template__isnull=True).all()] + + def node_prompts_rejected(self): + node_list = [] + for node in self.workflow_job_template_nodes.prefetch_related('unified_job_template').all(): + node_prompts_warnings = node.get_prompts_warnings() + if node_prompts_warnings: + node_list.append(node.pk) + return node_list + + def user_copy(self, user): + new_wfjt = self.copy_unified_jt() + new_wfjt.copy_nodes_from_original(original=self, user=user) + return new_wfjt + + +class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificationMixin): + class Meta: + app_label = 'main' + ordering = ('id',) + + workflow_job_template = models.ForeignKey( + 'WorkflowJobTemplate', + related_name='workflow_jobs', + blank=True, + null=True, + default=None, + on_delete=models.SET_NULL, + ) + + @property + def workflow_nodes(self): + return self.workflow_job_nodes + + @classmethod + def _get_parent_field_name(cls): + return 'workflow_job_template' + + @classmethod + def _get_unified_job_template_class(cls): + return WorkflowJobTemplate + + def _has_failed(self): + return False + + def socketio_emit_data(self): + return {} + + def get_absolute_url(self): + return reverse('api:workflow_job_detail', args=(self.pk,)) + + def get_ui_url(self): + return urljoin(settings.TOWER_URL_BASE, '/#/workflows/{}'.format(self.pk)) + + def notification_data(self): + result = super(WorkflowJob, self).notification_data() + str_arr = ['Workflow job summary:', ''] + for node in self.workflow_job_nodes.all().select_related('job'): + if node.job is None: + node_job_description = 'no job.' + else: + node_job_description = ('job #{0}, "{1}", which finished with status {2}.' + .format(node.job.id, node.job.name, node.job.status)) + str_arr.append("- node #{0} spawns {1}".format(node.id, node_job_description)) + result['body'] = '\n'.join(str_arr) + return result + + @property + def task_impact(self): + return 0 + + def get_notification_templates(self): + return self.workflow_job_template.notification_templates + + def get_notification_friendly_name(self): + return "Workflow Job" + + ''' + A WorkflowJob is a virtual job. It doesn't result in a celery task. + ''' + def start_celery_task(self, opts, error_callback, success_callback): + return None diff --git a/awx/main/notifications/base.py b/awx/main/notifications/base.py index a68c88ed46..58202f6612 100644 --- a/awx/main/notifications/base.py +++ b/awx/main/notifications/base.py @@ -5,6 +5,8 @@ import json from django.utils.encoding import smart_text from django.core.mail.backends.base import BaseEmailBackend +from django.utils.translation import ugettext_lazy as _ + class TowerBaseEmailBackend(BaseEmailBackend): @@ -12,9 +14,8 @@ class TowerBaseEmailBackend(BaseEmailBackend): if "body" in body: body_actual = body['body'] else: - body_actual = smart_text("{} #{} had status {} on Ansible Tower, view details at {}\n\n".format(body['friendly_name'], - body['id'], - body['status'], - body['url'])) + body_actual = smart_text(_("{} #{} had status {} on Ansible Tower, view details at {}\n\n").format( + body['friendly_name'], body['id'], body['status'], body['url']) + ) body_actual += json.dumps(body, indent=4) return body_actual diff --git a/awx/main/notifications/email_backend.py b/awx/main/notifications/email_backend.py index 7ca5690b28..b3c1db67f7 100644 --- a/awx/main/notifications/email_backend.py +++ b/awx/main/notifications/email_backend.py @@ -5,6 +5,8 @@ import json from django.utils.encoding import smart_text from django.core.mail.backends.smtp import EmailBackend +from django.utils.translation import ugettext_lazy as _ + class CustomEmailBackend(EmailBackend): @@ -23,9 +25,8 @@ class CustomEmailBackend(EmailBackend): if "body" in body: body_actual = body['body'] else: - body_actual = smart_text("{} #{} had status {} on Ansible Tower, view details at {}\n\n".format(body['friendly_name'], - body['id'], - body['status'], - body['url'])) + body_actual = smart_text(_("{} #{} had status {} on Ansible Tower, view details at {}\n\n").format( + body['friendly_name'], body['id'], body['status'], body['url']) + ) body_actual += json.dumps(body, indent=4) return body_actual diff --git a/awx/main/notifications/hipchat_backend.py b/awx/main/notifications/hipchat_backend.py index b34c5e5fd1..b286439954 100644 --- a/awx/main/notifications/hipchat_backend.py +++ b/awx/main/notifications/hipchat_backend.py @@ -6,11 +6,12 @@ import logging import requests from django.utils.encoding import smart_text - +from django.utils.translation import ugettext_lazy as _ from awx.main.notifications.base import TowerBaseEmailBackend logger = logging.getLogger('awx.main.notifications.hipchat_backend') + class HipChatBackend(TowerBaseEmailBackend): init_parameters = {"token": {"label": "Token", "type": "password"}, @@ -36,14 +37,15 @@ class HipChatBackend(TowerBaseEmailBackend): for rcp in m.recipients(): r = requests.post("{}/v2/room/{}/notification".format(self.api_url, rcp), params={"auth_token": self.token}, + verify=False, json={"color": self.color, "message": m.subject, "notify": self.notify, "from": m.from_email, "message_format": "text"}) if r.status_code != 204: - logger.error(smart_text("Error sending messages: {}".format(r.text))) + logger.error(smart_text(_("Error sending messages: {}").format(r.text))) if not self.fail_silently: - raise Exception(smart_text("Error sending message to hipchat: {}".format(r.text))) + raise Exception(smart_text(_("Error sending message to hipchat: {}").format(r.text))) sent_messages += 1 return sent_messages diff --git a/awx/main/notifications/irc_backend.py b/awx/main/notifications/irc_backend.py index 61158bbe5d..277364cf07 100644 --- a/awx/main/notifications/irc_backend.py +++ b/awx/main/notifications/irc_backend.py @@ -8,11 +8,12 @@ import logging import irc.client from django.utils.encoding import smart_text - +from django.utils.translation import ugettext_lazy as _ from awx.main.notifications.base import TowerBaseEmailBackend logger = logging.getLogger('awx.main.notifications.irc_backend') + class IrcBackend(TowerBaseEmailBackend): init_parameters = {"server": {"label": "IRC Server Address", "type": "string"}, @@ -50,7 +51,7 @@ class IrcBackend(TowerBaseEmailBackend): connect_factory=connection_factory, ) except irc.client.ServerConnectionError as e: - logger.error(smart_text("Exception connecting to irc server: {}".format(e))) + logger.error(smart_text(_("Exception connecting to irc server: {}").format(e))) if not self.fail_silently: raise return True diff --git a/awx/main/notifications/pagerduty_backend.py b/awx/main/notifications/pagerduty_backend.py index 390fac3d20..76322c18cf 100644 --- a/awx/main/notifications/pagerduty_backend.py +++ b/awx/main/notifications/pagerduty_backend.py @@ -5,11 +5,12 @@ import logging import pygerduty from django.utils.encoding import smart_text - +from django.utils.translation import ugettext_lazy as _ from awx.main.notifications.base import TowerBaseEmailBackend logger = logging.getLogger('awx.main.notifications.pagerduty_backend') + class PagerDutyBackend(TowerBaseEmailBackend): init_parameters = {"subdomain": {"label": "Pagerduty subdomain", "type": "string"}, @@ -35,7 +36,7 @@ class PagerDutyBackend(TowerBaseEmailBackend): except Exception as e: if not self.fail_silently: raise - logger.error(smart_text("Exception connecting to PagerDuty: {}".format(e))) + logger.error(smart_text(_("Exception connecting to PagerDuty: {}").format(e))) for m in messages: try: pager.trigger_incident(m.recipients()[0], @@ -44,7 +45,7 @@ class PagerDutyBackend(TowerBaseEmailBackend): client=m.from_email) sent_messages += 1 except Exception as e: - logger.error(smart_text("Exception sending messages: {}".format(e))) + logger.error(smart_text(_("Exception sending messages: {}").format(e))) if not self.fail_silently: raise return sent_messages diff --git a/awx/main/notifications/slack_backend.py b/awx/main/notifications/slack_backend.py index ffc52bc44b..2da5c5d8a3 100644 --- a/awx/main/notifications/slack_backend.py +++ b/awx/main/notifications/slack_backend.py @@ -5,11 +5,12 @@ import logging from slackclient import SlackClient from django.utils.encoding import smart_text - +from django.utils.translation import ugettext_lazy as _ from awx.main.notifications.base import TowerBaseEmailBackend logger = logging.getLogger('awx.main.notifications.slack_backend') + class SlackBackend(TowerBaseEmailBackend): init_parameters = {"token": {"label": "Token", "type": "password"}, @@ -48,7 +49,7 @@ class SlackBackend(TowerBaseEmailBackend): self.connection.rtm_send_message(r, m.subject) sent_messages += 1 except Exception as e: - logger.error(smart_text("Exception sending messages: {}".format(e))) + logger.error(smart_text(_("Exception sending messages: {}").format(e))) if not self.fail_silently: raise return sent_messages diff --git a/awx/main/notifications/twilio_backend.py b/awx/main/notifications/twilio_backend.py index df411c68c5..e1f75837c2 100644 --- a/awx/main/notifications/twilio_backend.py +++ b/awx/main/notifications/twilio_backend.py @@ -6,11 +6,12 @@ import logging from twilio.rest import TwilioRestClient from django.utils.encoding import smart_text - +from django.utils.translation import ugettext_lazy as _ from awx.main.notifications.base import TowerBaseEmailBackend logger = logging.getLogger('awx.main.notifications.twilio_backend') + class TwilioBackend(TowerBaseEmailBackend): init_parameters = {"account_sid": {"label": "Account SID", "type": "string"}, @@ -32,7 +33,7 @@ class TwilioBackend(TowerBaseEmailBackend): except Exception as e: if not self.fail_silently: raise - logger.error(smart_text("Exception connecting to Twilio: {}".format(e))) + logger.error(smart_text(_("Exception connecting to Twilio: {}").format(e))) for m in messages: try: @@ -42,7 +43,7 @@ class TwilioBackend(TowerBaseEmailBackend): body=m.subject) sent_messages += 1 except Exception as e: - logger.error(smart_text("Exception sending messages: {}".format(e))) + logger.error(smart_text(_("Exception sending messages: {}").format(e))) if not self.fail_silently: raise return sent_messages diff --git a/awx/main/notifications/webhook_backend.py b/awx/main/notifications/webhook_backend.py index e74f39f654..8489a90f7b 100644 --- a/awx/main/notifications/webhook_backend.py +++ b/awx/main/notifications/webhook_backend.py @@ -5,12 +5,13 @@ import logging import requests from django.utils.encoding import smart_text - +from django.utils.translation import ugettext_lazy as _ from awx.main.notifications.base import TowerBaseEmailBackend from awx.main.utils import get_awx_version logger = logging.getLogger('awx.main.notifications.webhook_backend') + class WebhookBackend(TowerBaseEmailBackend): init_parameters = {"url": {"label": "Target URL", "type": "string"}, @@ -34,8 +35,8 @@ class WebhookBackend(TowerBaseEmailBackend): json=m.body, headers=self.headers) if r.status_code >= 400: - logger.error(smart_text("Error sending notification webhook: {}".format(r.text))) + logger.error(smart_text(_("Error sending notification webhook: {}").format(r.text))) if not self.fail_silently: - raise Exception(smart_text("Error sending notification webhook: {}".format(r.text))) + raise Exception(smart_text(_("Error sending notification webhook: {}").format(r.text))) sent_messages += 1 return sent_messages diff --git a/awx/main/queue.py b/awx/main/queue.py index 9b0b1f60b3..03a7b2a2cf 100644 --- a/awx/main/queue.py +++ b/awx/main/queue.py @@ -1,67 +1,51 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. -import json - -from redis import StrictRedis +# Python +import logging +import os +# Django from django.conf import settings -__all__ = ['FifoQueue'] +# Kombu +from kombu import Connection, Exchange, Producer + +__all__ = ['CallbackQueueDispatcher'] -# Determine, based on settings.BROKER_URL (for celery), what the correct Redis -# connection settings are. -redis_kwargs = {} -broker_url = settings.BROKER_URL -if not broker_url.lower().startswith('redis://'): - raise RuntimeError('Error importing awx.main.queue: Cannot use queue with ' - 'a non-Redis broker configured for celery.\n' - 'Broker is set to: %s' % broker_url) -broker_url = broker_url[8:] +class CallbackQueueDispatcher(object): -# There may or may not be a password; address both situations by checking -# for an "@" in the broker URL. -if '@' in broker_url: - broker_auth, broker_host = broker_url.split('@') - redis_kwargs['password'] = broker_auth.split(':')[1] -else: - broker_host = broker_url + def __init__(self): + self.callback_connection = getattr(settings, 'BROKER_URL', None) + self.connection_queue = getattr(settings, 'CALLBACK_QUEUE', '') + self.connection = None + self.exchange = None + self.logger = logging.getLogger('awx.main.queue.CallbackQueueDispatcher') -# Ignore anything after a / in the broker host. -broker_host = broker_host.split('/')[0] + def dispatch(self, obj): + if not self.callback_connection or not self.connection_queue: + return + active_pid = os.getpid() + for retry_count in xrange(4): + try: + if not hasattr(self, 'connection_pid'): + self.connection_pid = active_pid + if self.connection_pid != active_pid: + self.connection = None + if self.connection is None: + self.connection = Connection(self.callback_connection) + self.exchange = Exchange(self.connection_queue, type='direct') -# If a custom port is present, parse it out. -if ':' in broker_host: - broker_host, broker_port = broker_host.split(':') - redis_kwargs['port'] = int(broker_port) - -# Now create a StrictRedis object that knows how to connect appropriately. -redis = StrictRedis(broker_host, **redis_kwargs) - - -class FifoQueue(object): - """An abstraction class implemented for a simple push/pull queue. - - Intended to allow alteration of backend details in a single, consistent - way throughout the Tower application. - """ - def __init__(self, queue_name): - """Instantiate a queue object, which is able to interact with a - particular queue. - """ - self._queue_name = queue_name - - def __len__(self): - """Return the length of the Redis list.""" - return redis.llen(self._queue_name) - - def push(self, value): - """Push a value onto the right side of the queue.""" - redis.rpush(self._queue_name, json.dumps(value)) - - def pop(self): - """Retrieve a value from the left side of the queue.""" - answer = redis.lpop(self._queue_name) - if answer: - return json.loads(answer) + producer = Producer(self.connection) + producer.publish(obj, + serializer='json', + compression='bzip2', + exchange=self.exchange, + declare=[self.exchange], + delivery_mode="persistent" if settings.PERSISTENT_CALLBACK_MESSAGES else "transient", + routing_key=self.connection_queue) + return + except Exception, e: + self.logger.info('Publish Job Event Exception: %r, retry=%d', e, + retry_count, exc_info=True) diff --git a/awx/main/redact.py b/awx/main/redact.py index 0e9ad0b5d1..5f945ae9b7 100644 --- a/awx/main/redact.py +++ b/awx/main/redact.py @@ -3,6 +3,7 @@ import urlparse REPLACE_STR = '$encrypted$' + class UriCleaner(object): REPLACE_STR = REPLACE_STR # https://regex101.com/r/sV2dO2/2 @@ -51,9 +52,12 @@ class UriCleaner(object): return redactedtext + class PlainTextCleaner(object): REPLACE_STR = REPLACE_STR @staticmethod def remove_sensitive(cleartext, sensitive): + if sensitive == '': + return cleartext return re.sub(r'%s' % re.escape(sensitive), '$encrypted$', cleartext) diff --git a/awx/main/registrar.py b/awx/main/registrar.py index de6673fc5a..6d0ccfe495 100644 --- a/awx/main/registrar.py +++ b/awx/main/registrar.py @@ -1,11 +1,8 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. -import logging - from django.db.models.signals import pre_save, post_save, pre_delete, m2m_changed -logger = logging.getLogger('awx.main.registrar') class ActivityStreamRegistrar(object): @@ -13,9 +10,7 @@ class ActivityStreamRegistrar(object): self.models = [] def connect(self, model): - from awx.main.conf import tower_settings - if not getattr(tower_settings, 'ACTIVITY_STREAM_ENABLED', True): - return + # Always register model; the signal handlers will check if activity stream is enabled. from awx.main.signals import activity_stream_create, activity_stream_update, activity_stream_delete, activity_stream_associate if model not in self.models: @@ -44,4 +39,5 @@ class ActivityStreamRegistrar(object): m2m_attr = getattr(model, m2mfield.name) m2m_changed.disconnect(dispatch_uid=str(self.__class__) + str(m2m_attr.through) + "_associate") + activity_stream_registrar = ActivityStreamRegistrar() diff --git a/awx/main/routing.py b/awx/main/routing.py new file mode 100644 index 0000000000..0a49f25c6c --- /dev/null +++ b/awx/main/routing.py @@ -0,0 +1,8 @@ +from channels.routing import route + + +channel_routing = [ + route("websocket.connect", "awx.main.consumers.ws_connect", path=r'^/websocket/$'), + route("websocket.disconnect", "awx.main.consumers.ws_disconnect", path=r'^/websocket/$'), + route("websocket.receive", "awx.main.consumers.ws_receive", path=r'^/websocket/$'), +] diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py new file mode 100644 index 0000000000..a48ca3ad23 --- /dev/null +++ b/awx/main/scheduler/__init__.py @@ -0,0 +1,450 @@ +#Copyright (c) 2015 Ansible, Inc. +# All Rights Reserved + +# Python +from datetime import timedelta +import logging +from sets import Set + +# Django +from django.conf import settings +from django.db import transaction, connection +from django.db.utils import DatabaseError +from django.utils.translation import ugettext_lazy as _ + +# AWX +from awx.main.models import * # noqa +#from awx.main.scheduler.dag_simple import SimpleDAG +from awx.main.scheduler.dag_workflow import WorkflowDAG + +from awx.main.scheduler.dependency_graph import DependencyGraph +from awx.main.scheduler.partial import ( + JobDict, + ProjectUpdateDict, + ProjectUpdateLatestDict, + InventoryUpdateDict, + InventoryUpdateLatestDict, + InventorySourceDict, + SystemJobDict, + AdHocCommandDict, + WorkflowJobDict, +) +from awx.main.tasks import _send_notification_templates + +# Celery +from celery.task.control import inspect + +logger = logging.getLogger('awx.main.scheduler') + + +class TaskManager(): + def __init__(self): + self.graph = DependencyGraph() + self.capacity_total = Instance.objects.total_capacity() + self.capacity_used = 0 + + def get_tasks(self): + status_list = ('pending', 'waiting', 'running') + + jobs = JobDict.filter_partial(status=status_list) + inventory_updates = InventoryUpdateDict.filter_partial(status=status_list) + project_updates = ProjectUpdateDict.filter_partial(status=status_list) + system_jobs = SystemJobDict.filter_partial(status=status_list) + ad_hoc_commands = AdHocCommandDict.filter_partial(status=status_list) + workflow_jobs = WorkflowJobDict.filter_partial(status=status_list) + + all_actions = sorted(jobs + project_updates + inventory_updates + system_jobs + ad_hoc_commands + workflow_jobs, + key=lambda task: task['created']) + return all_actions + + ''' + Tasks that are running and SHOULD have a celery task. + ''' + def get_running_tasks(self): + status_list = ('running',) + + jobs = JobDict.filter_partial(status=status_list) + inventory_updates = InventoryUpdateDict.filter_partial(status=status_list) + project_updates = ProjectUpdateDict.filter_partial(status=status_list) + system_jobs = SystemJobDict.filter_partial(status=status_list) + ad_hoc_commands = AdHocCommandDict.filter_partial(status=status_list) + + all_actions = sorted(jobs + project_updates + inventory_updates + system_jobs + ad_hoc_commands, + key=lambda task: task['created']) + return all_actions + + # TODO: Consider a database query for this logic + def get_latest_project_update_tasks(self, all_sorted_tasks): + project_ids = Set() + for task in all_sorted_tasks: + if type(task) == JobDict: + project_ids.add(task['project_id']) + + return ProjectUpdateLatestDict.filter_partial(list(project_ids)) + + # TODO: Consider a database query for this logic + def get_latest_inventory_update_tasks(self, all_sorted_tasks): + inventory_ids = Set() + for task in all_sorted_tasks: + if type(task) == JobDict: + inventory_ids.add(task['inventory_id']) + + return InventoryUpdateLatestDict.filter_partial(list(inventory_ids)) + + + def get_running_workflow_jobs(self): + graph_workflow_jobs = [wf for wf in + WorkflowJob.objects.filter(status='running')] + return graph_workflow_jobs + + # TODO: Consider a database query for this logic + def get_inventory_source_tasks(self, all_sorted_tasks): + inventory_ids = Set() + results = [] + for task in all_sorted_tasks: + if type(task) is JobDict: + inventory_ids.add(task['inventory_id']) + + for inventory_id in inventory_ids: + results.append((inventory_id, InventorySourceDict.filter_partial(inventory_id))) + + return results + + def spawn_workflow_graph_jobs(self, workflow_jobs): + for workflow_job in workflow_jobs: + dag = WorkflowDAG(workflow_job) + spawn_nodes = dag.bfs_nodes_to_run() + for spawn_node in spawn_nodes: + if spawn_node.unified_job_template is None: + continue + kv = spawn_node.get_job_kwargs() + job = spawn_node.unified_job_template.create_unified_job(**kv) + spawn_node.job = job + spawn_node.save() + if job._resources_sufficient_for_launch(): + can_start = job.signal_start(**kv) + if not can_start: + job.job_explanation = _("Job spawned from workflow could not start because it " + "was not in the right state or required manual credentials") + else: + can_start = False + job.job_explanation = _("Job spawned from workflow could not start because it " + "was missing a related resource such as project or inventory") + if not can_start: + job.status = 'failed' + job.save(update_fields=['status', 'job_explanation']) + connection.on_commit(lambda: job.websocket_emit_status('failed')) + + # TODO: should we emit a status on the socket here similar to tasks.py tower_periodic_scheduler() ? + #emit_websocket_notification('/socket.io/jobs', '', dict(id=)) + + # See comment in tasks.py::RunWorkflowJob::run() + def process_finished_workflow_jobs(self, workflow_jobs): + result = [] + for workflow_job in workflow_jobs: + dag = WorkflowDAG(workflow_job) + if workflow_job.cancel_flag: + workflow_job.status = 'canceled' + workflow_job.save() + dag.cancel_node_jobs() + connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status)) + elif dag.is_workflow_done(): + result.append(workflow_job.id) + if workflow_job._has_failed(): + workflow_job.status = 'failed' + else: + workflow_job.status = 'successful' + workflow_job.save() + connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status)) + return result + + def get_active_tasks(self): + inspector = inspect() + if not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'): + active_task_queues = inspector.active() + else: + logger.warn("Ignoring celery task inspector") + active_task_queues = None + + active_tasks = set() + if active_task_queues is not None: + for queue in active_task_queues: + map(lambda at: active_tasks.add(at['id']), active_task_queues[queue]) + else: + if not hasattr(settings, 'CELERY_UNIT_TEST'): + return (None, None) + + return (active_task_queues, active_tasks) + + def get_dependent_jobs_for_inv_and_proj_update(self, job_obj): + return [{'type': j.model_to_str(), 'id': j.id} for j in job_obj.dependent_jobs.all()] + + def start_task(self, task, dependent_tasks=[]): + from awx.main.tasks import handle_work_error, handle_work_success + + task_actual = { + 'type':task.get_job_type_str(), + 'id': task['id'], + } + dependencies = [{'type': t.get_job_type_str(), 'id': t['id']} for t in dependent_tasks] + + error_handler = handle_work_error.s(subtasks=[task_actual] + dependencies) + success_handler = handle_work_success.s(task_actual=task_actual) + + job_obj = task.get_full() + ''' + This is to account for when there isn't enough capacity to execute all + dependent jobs (i.e. proj or inv update) within the same schedule() + call. + + Proceeding calls to schedule() need to recontruct the proj or inv + update -> job fail logic dependency. The below call recontructs that + failure dependency. + ''' + if len(dependencies) == 0: + dependencies = self.get_dependent_jobs_for_inv_and_proj_update(job_obj) + job_obj.status = 'waiting' + + (start_status, opts) = job_obj.pre_start() + if not start_status: + job_obj.status = 'failed' + if job_obj.job_explanation: + job_obj.job_explanation += ' ' + job_obj.job_explanation += 'Task failed pre-start check.' + job_obj.save() + # TODO: run error handler to fail sub-tasks and send notifications + else: + if type(job_obj) is WorkflowJob: + job_obj.status = 'running' + + job_obj.save() + + self.consume_capacity(task) + + def post_commit(): + job_obj.websocket_emit_status(job_obj.status) + if job_obj.status != 'failed': + job_obj.start_celery_task(opts, error_callback=error_handler, success_callback=success_handler) + + connection.on_commit(post_commit) + + def process_runnable_tasks(self, runnable_tasks): + map(lambda task: self.graph.add_job(task), runnable_tasks) + + def create_project_update(self, task): + dep = Project.objects.get(id=task['project_id']).create_project_update(launch_type='dependency') + + # Project created 1 seconds behind + dep.created = task['created'] - timedelta(seconds=1) + dep.status = 'pending' + dep.save() + + project_task = ProjectUpdateDict.get_partial(dep.id) + + return project_task + + def create_inventory_update(self, task, inventory_source_task): + dep = InventorySource.objects.get(id=inventory_source_task['id']).create_inventory_update(launch_type='dependency') + + dep.created = task['created'] - timedelta(seconds=2) + dep.status = 'pending' + dep.save() + + inventory_task = InventoryUpdateDict.get_partial(dep.id) + + ''' + Update internal datastructures with the newly created inventory update + ''' + # Should be only 1 inventory update. The one for the job (task) + latest_inventory_updates = self.get_latest_inventory_update_tasks([task]) + self.process_latest_inventory_updates(latest_inventory_updates) + + inventory_sources = self.get_inventory_source_tasks([task]) + self.process_inventory_sources(inventory_sources) + + self.graph.add_job(inventory_task) + + return inventory_task + + ''' + Since we are dealing with partial objects we don't get to take advantage + of Django to resolve the type of related Many to Many field dependent_job. + + Hence the, potentional, double query in this method. + ''' + def get_related_dependent_jobs_as_patials(self, job_ids): + dependent_partial_jobs = [] + for id in job_ids: + if ProjectUpdate.objects.filter(id=id).exists(): + dependent_partial_jobs.append(ProjectUpdateDict({"id": id}).refresh_partial()) + elif InventoryUpdate.objects.filter(id=id).exists(): + dependent_partial_jobs.append(InventoryUpdateDict({"id": id}).refresh_partial()) + return dependent_partial_jobs + + def capture_chain_failure_dependencies(self, task, dependencies): + for dep in dependencies: + dep_obj = dep.get_full() + dep_obj.dependent_jobs.add(task['id']) + dep_obj.save() + ''' + if not 'dependent_jobs__id' in task.data: + task.data['dependent_jobs__id'] = [dep_obj.data['id']] + else: + task.data['dependent_jobs__id'].append(dep_obj.data['id']) + ''' + + def generate_dependencies(self, task): + dependencies = [] + # TODO: What if the project is null ? + if type(task) is JobDict: + + if task['project__scm_update_on_launch'] is True and \ + self.graph.should_update_related_project(task): + project_task = self.create_project_update(task) + dependencies.append(project_task) + # Inventory created 2 seconds behind job + + ''' + Inventory may have already been synced from a provision callback. + ''' + inventory_sources_already_updated = task.get_inventory_sources_already_updated() + + ''' + get_inventory_sources() only return update on launch sources + ''' + for inventory_source_task in self.graph.get_inventory_sources(task['inventory_id']): + if inventory_source_task['id'] in inventory_sources_already_updated: + continue + if self.graph.should_update_related_inventory_source(task, inventory_source_task['id']): + inventory_task = self.create_inventory_update(task, inventory_source_task) + dependencies.append(inventory_task) + + self.capture_chain_failure_dependencies(task, dependencies) + return dependencies + + def process_latest_project_updates(self, latest_project_updates): + map(lambda task: self.graph.add_latest_project_update(task), latest_project_updates) + + def process_latest_inventory_updates(self, latest_inventory_updates): + map(lambda task: self.graph.add_latest_inventory_update(task), latest_inventory_updates) + + def process_inventory_sources(self, inventory_id_sources): + map(lambda (inventory_id, inventory_sources): self.graph.add_inventory_sources(inventory_id, inventory_sources), inventory_id_sources) + + def process_dependencies(self, dependent_task, dependency_tasks): + for task in dependency_tasks: + # ProjectUpdate or InventoryUpdate may be blocked by another of + # the same type. + if not self.graph.is_job_blocked(task): + self.graph.add_job(task) + if not self.would_exceed_capacity(task): + self.start_task(task, [dependent_task]) + else: + self.graph.add_job(task) + + def process_pending_tasks(self, pending_tasks): + for task in pending_tasks: + # Stop processing tasks if we know we are out of capacity + if self.get_remaining_capacity() <= 0: + return + + if not self.graph.is_job_blocked(task): + dependencies = self.generate_dependencies(task) + self.process_dependencies(task, dependencies) + + # Spawning deps might have blocked us + if not self.graph.is_job_blocked(task): + self.graph.add_job(task) + if not self.would_exceed_capacity(task): + self.start_task(task) + else: + self.graph.add_job(task) + + def process_celery_tasks(self, active_tasks, all_running_sorted_tasks): + ''' + Rectify tower db <-> celery inconsistent view of jobs state + ''' + for task in all_running_sorted_tasks: + + if (task['celery_task_id'] not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')): + # TODO: try catch the getting of the job. The job COULD have been deleted + task_obj = task.get_full() + # Ensure job did not finish running between the time we get the + # list of task id's from celery and now. + # Note: This is an actual fix, not a reduction in the time + # window that this can happen. + if task_obj.status != 'running': + continue + task_obj.status = 'failed' + task_obj.job_explanation += ' '.join(( + 'Task was marked as running in Tower but was not present in', + 'Celery, so it has been marked as failed.', + )) + task_obj.save() + _send_notification_templates(task_obj, 'failed') + connection.on_commit(lambda: task_obj.websocket_emit_status('failed')) + + logger.error("Task %s appears orphaned... marking as failed" % task) + + + def calculate_capacity_used(self, tasks): + self.capacity_used = 0 + for t in tasks: + self.capacity_used += t.task_impact() + + def would_exceed_capacity(self, task): + if self.capacity_used == 0: + return False + return (task.task_impact() + self.capacity_used > self.capacity_total) + + def consume_capacity(self, task): + self.capacity_used += task.task_impact() + + def get_remaining_capacity(self): + return (self.capacity_total - self.capacity_used) + + def process_tasks(self, all_sorted_tasks): + running_tasks = filter(lambda t: t['status'] == 'running', all_sorted_tasks) + runnable_tasks = filter(lambda t: t['status'] in ['waiting', 'running'], all_sorted_tasks) + + self.calculate_capacity_used(running_tasks) + + self.process_runnable_tasks(runnable_tasks) + + pending_tasks = filter(lambda t: t['status'] in 'pending', all_sorted_tasks) + self.process_pending_tasks(pending_tasks) + + def _schedule(self): + finished_wfjs = [] + all_sorted_tasks = self.get_tasks() + if len(all_sorted_tasks) > 0: + latest_project_updates = self.get_latest_project_update_tasks(all_sorted_tasks) + self.process_latest_project_updates(latest_project_updates) + + latest_inventory_updates = self.get_latest_inventory_update_tasks(all_sorted_tasks) + self.process_latest_inventory_updates(latest_inventory_updates) + + inventory_id_sources = self.get_inventory_source_tasks(all_sorted_tasks) + self.process_inventory_sources(inventory_id_sources) + + running_workflow_tasks = self.get_running_workflow_jobs() + finished_wfjs = self.process_finished_workflow_jobs(running_workflow_tasks) + + self.spawn_workflow_graph_jobs(running_workflow_tasks) + + self.process_tasks(all_sorted_tasks) + return finished_wfjs + + def schedule(self): + with transaction.atomic(): + # Lock + try: + Instance.objects.select_for_update(nowait=True).all()[0] + except DatabaseError: + return + + finished_wfjs = self._schedule() + + # Operations whose queries rely on modifications made during the atomic scheduling session + for wfj in WorkflowJob.objects.filter(id__in=finished_wfjs): + _send_notification_templates(wfj, 'succeeded' if wfj.status == 'successful' else 'failed') diff --git a/awx/main/scheduler/dag_simple.py b/awx/main/scheduler/dag_simple.py new file mode 100644 index 0000000000..1bfb387569 --- /dev/null +++ b/awx/main/scheduler/dag_simple.py @@ -0,0 +1,140 @@ + +from awx.main.models import ( + Job, + AdHocCommand, + InventoryUpdate, + ProjectUpdate, + WorkflowJob, + SystemJob, +) + + +class SimpleDAG(object): + ''' A simple implementation of a directed acyclic graph ''' + + def __init__(self): + self.nodes = [] + self.edges = [] + + def __contains__(self, obj): + for node in self.nodes: + if node['node_object'] == obj: + return True + return False + + def __len__(self): + return len(self.nodes) + + def __iter__(self): + return self.nodes.__iter__() + + def generate_graphviz_plot(self): + def short_string_obj(obj): + if type(obj) == Job: + type_str = "Job" + if type(obj) == AdHocCommand: + type_str = "AdHocCommand" + elif type(obj) == InventoryUpdate: + type_str = "Inventory" + elif type(obj) == ProjectUpdate: + type_str = "Project" + elif type(obj) == WorkflowJob: + type_str = "Workflow" + else: + type_str = "Unknown" + type_str += "%s" % str(obj.id) + return type_str + + doc = """ + digraph g { + rankdir = LR + """ + for n in self.nodes: + doc += "%s [color = %s]\n" % ( + short_string_obj(n['node_object']), + "red" if n['node_object'].status == 'running' else "black", + ) + for from_node, to_node, label in self.edges: + doc += "%s -> %s [ label=\"%s\" ];\n" % ( + short_string_obj(self.nodes[from_node]['node_object']), + short_string_obj(self.nodes[to_node]['node_object']), + label, + ) + doc += "}\n" + gv_file = open('/tmp/graph.gv', 'w') + gv_file.write(doc) + gv_file.close() + + def add_node(self, obj, metadata=None): + if self.find_ord(obj) is None: + self.nodes.append(dict(node_object=obj, metadata=metadata)) + + def add_edge(self, from_obj, to_obj, label=None): + from_obj_ord = self.find_ord(from_obj) + to_obj_ord = self.find_ord(to_obj) + if from_obj_ord is None or to_obj_ord is None: + raise LookupError("Object not found") + self.edges.append((from_obj_ord, to_obj_ord, label)) + + def add_edges(self, edgelist): + for edge_pair in edgelist: + self.add_edge(edge_pair[0], edge_pair[1], edge_pair[2]) + + def find_ord(self, obj): + for idx in range(len(self.nodes)): + if obj == self.nodes[idx]['node_object']: + return idx + return None + + def get_node_type(self, obj): + if type(obj) == Job: + return "job" + elif type(obj) == AdHocCommand: + return "ad_hoc_command" + elif type(obj) == InventoryUpdate: + return "inventory_update" + elif type(obj) == ProjectUpdate: + return "project_update" + elif type(obj) == SystemJob: + return "system_job" + elif type(obj) == WorkflowJob: + return "workflow_job" + return "unknown" + + def get_dependencies(self, obj, label=None): + antecedents = [] + this_ord = self.find_ord(obj) + for node, dep, lbl in self.edges: + if label: + if node == this_ord and lbl == label: + antecedents.append(self.nodes[dep]) + else: + if node == this_ord: + antecedents.append(self.nodes[dep]) + return antecedents + + def get_dependents(self, obj, label=None): + decendents = [] + this_ord = self.find_ord(obj) + for node, dep, lbl in self.edges: + if label: + if dep == this_ord and lbl == label: + decendents.append(self.nodes[node]) + else: + if dep == this_ord: + decendents.append(self.nodes[node]) + return decendents + + def get_leaf_nodes(self): + leafs = [] + for n in self.nodes: + if len(self.get_dependencies(n['node_object'])) < 1: + leafs.append(n) + return leafs + + def get_root_nodes(self): + roots = [] + for n in self.nodes: + if len(self.get_dependents(n['node_object'])) < 1: + roots.append(n) + return roots diff --git a/awx/main/scheduler/dag_workflow.py b/awx/main/scheduler/dag_workflow.py new file mode 100644 index 0000000000..5fc716584a --- /dev/null +++ b/awx/main/scheduler/dag_workflow.py @@ -0,0 +1,92 @@ + +# AWX +from awx.main.scheduler.dag_simple import SimpleDAG + + +class WorkflowDAG(SimpleDAG): + def __init__(self, workflow_job=None): + super(WorkflowDAG, self).__init__() + if workflow_job: + self._init_graph(workflow_job) + + def _init_graph(self, workflow_job): + node_qs = workflow_job.workflow_job_nodes + workflow_nodes = node_qs.prefetch_related('success_nodes', 'failure_nodes', 'always_nodes').all() + for workflow_node in workflow_nodes: + self.add_node(workflow_node) + + for node_type in ['success_nodes', 'failure_nodes', 'always_nodes']: + for workflow_node in workflow_nodes: + related_nodes = getattr(workflow_node, node_type).all() + for related_node in related_nodes: + self.add_edge(workflow_node, related_node, node_type) + + def bfs_nodes_to_run(self): + root_nodes = self.get_root_nodes() + nodes = root_nodes + nodes_found = [] + + for index, n in enumerate(nodes): + obj = n['node_object'] + job = obj.job + + if not job: + nodes_found.append(n) + # Job is about to run or is running. Hold our horses and wait for + # the job to finish. We can't proceed down the graph path until we + # have the job result. + elif job.status not in ['failed', 'successful']: + continue + elif job.status == 'failed': + children_failed = self.get_dependencies(obj, 'failure_nodes') + children_always = self.get_dependencies(obj, 'always_nodes') + children_all = children_failed + children_always + nodes.extend(children_all) + elif job.status == 'successful': + children_success = self.get_dependencies(obj, 'success_nodes') + children_always = self.get_dependencies(obj, 'always_nodes') + children_all = children_success + children_always + nodes.extend(children_all) + return [n['node_object'] for n in nodes_found] + + def cancel_node_jobs(self): + for n in self.nodes: + obj = n['node_object'] + job = obj.job + + if not job: + continue + elif job.can_cancel: + job.cancel() + + def is_workflow_done(self): + root_nodes = self.get_root_nodes() + nodes = root_nodes + + for index, n in enumerate(nodes): + obj = n['node_object'] + job = obj.job + + if obj.unified_job_template is None: + continue + if not job: + return False + # Job is about to run or is running. Hold our horses and wait for + # the job to finish. We can't proceed down the graph path until we + # have the job result. + elif job.status in ['canceled', 'error']: + continue + elif job.status not in ['failed', 'successful']: + return False + elif job.status == 'failed': + children_failed = self.get_dependencies(obj, 'failure_nodes') + children_always = self.get_dependencies(obj, 'always_nodes') + children_all = children_failed + children_always + nodes.extend(children_all) + elif job.status == 'successful': + children_success = self.get_dependencies(obj, 'success_nodes') + children_always = self.get_dependencies(obj, 'always_nodes') + children_all = children_success + children_always + nodes.extend(children_all) + return True + diff --git a/awx/main/scheduler/dependency_graph.py b/awx/main/scheduler/dependency_graph.py new file mode 100644 index 0000000000..a94a158335 --- /dev/null +++ b/awx/main/scheduler/dependency_graph.py @@ -0,0 +1,220 @@ +from datetime import timedelta +from django.utils.timezone import now as tz_now + +from awx.main.scheduler.partial import ( + JobDict, + ProjectUpdateDict, + InventoryUpdateDict, + SystemJobDict, + AdHocCommandDict, + WorkflowJobDict, +) + + +class DependencyGraph(object): + PROJECT_UPDATES = 'project_updates' + INVENTORY_UPDATES = 'inventory_updates' + + JOB_TEMPLATE_JOBS = 'job_template_jobs' + JOB_PROJECT_IDS = 'job_project_ids' + JOB_INVENTORY_IDS = 'job_inventory_ids' + + SYSTEM_JOB = 'system_job' + INVENTORY_SOURCE_UPDATES = 'inventory_source_updates' + WORKFLOW_JOB_TEMPLATES_JOBS = 'workflow_job_template_jobs' + + LATEST_PROJECT_UPDATES = 'latest_project_updates' + LATEST_INVENTORY_UPDATES = 'latest_inventory_updates' + + INVENTORY_SOURCES = 'inventory_source_ids' + + def __init__(self, *args, **kwargs): + self.data = {} + # project_id -> True / False + self.data[self.PROJECT_UPDATES] = {} + # inventory_id -> True / False + self.data[self.INVENTORY_UPDATES] = {} + # job_template_id -> True / False + self.data[self.JOB_TEMPLATE_JOBS] = {} + + ''' + Track runnable job related project and inventory to ensure updates + don't run while a job needing those resources is running. + ''' + # project_id -> True / False + self.data[self.JOB_PROJECT_IDS] = {} + # inventory_id -> True / False + self.data[self.JOB_INVENTORY_IDS] = {} + + # inventory_source_id -> True / False + self.data[self.INVENTORY_SOURCE_UPDATES] = {} + # True / False + self.data[self.SYSTEM_JOB] = True + # workflow_job_template_id -> True / False + self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS] = {} + + # project_id -> latest ProjectUpdateLatestDict + self.data[self.LATEST_PROJECT_UPDATES] = {} + # inventory_source_id -> latest InventoryUpdateLatestDict + self.data[self.LATEST_INVENTORY_UPDATES] = {} + + # inventory_id -> [inventory_source_ids] + self.data[self.INVENTORY_SOURCES] = {} + + def add_latest_project_update(self, job): + self.data[self.LATEST_PROJECT_UPDATES][job['project_id']] = job + + def add_latest_inventory_update(self, job): + self.data[self.LATEST_INVENTORY_UPDATES][job['inventory_source_id']] = job + + def add_inventory_sources(self, inventory_id, inventory_sources): + self.data[self.INVENTORY_SOURCES][inventory_id] = inventory_sources + + def get_inventory_sources(self, inventory_id): + return self.data[self.INVENTORY_SOURCES].get(inventory_id, []) + + def get_now(self): + return tz_now() + + ''' + JobDict + + Presume that job is related to a project that is update on launch + ''' + def should_update_related_project(self, job): + now = self.get_now() + + # Already processed dependencies for this job + if job.data['dependent_jobs__id'] is not None: + return False + + latest_project_update = self.data[self.LATEST_PROJECT_UPDATES].get(job['project_id'], None) + if not latest_project_update: + return True + + # TODO: Other finished, failed cases? i.e. error ? + if latest_project_update['status'] in ['failed', 'canceled']: + return True + + ''' + This is a bit of fuzzy logic. + If the latest project update has a created time == job_created_time-1 + then consider the project update found. This is so we don't enter an infinite loop + of updating the project when cache timeout is 0. + ''' + if latest_project_update['project__scm_update_cache_timeout'] == 0 and \ + latest_project_update['launch_type'] == 'dependency' and \ + latest_project_update['created'] == job['created'] - timedelta(seconds=1): + return False + + ''' + Normal, expected, cache timeout logic + ''' + timeout_seconds = timedelta(seconds=latest_project_update['project__scm_update_cache_timeout']) + if (latest_project_update['finished'] + timeout_seconds) < now: + return True + + return False + + def should_update_related_inventory_source(self, job, inventory_source_id): + now = self.get_now() + + # Already processed dependencies for this job + if job.data['dependent_jobs__id'] is not None: + return False + + latest_inventory_update = self.data[self.LATEST_INVENTORY_UPDATES].get(inventory_source_id, None) + if not latest_inventory_update: + return True + + ''' + Normal, expected, cache timeout logic + ''' + timeout_seconds = timedelta(seconds=latest_inventory_update['inventory_source__update_cache_timeout']) + if (latest_inventory_update['finished'] + timeout_seconds) < now: + return True + + if latest_inventory_update['inventory_source__update_on_launch'] is True and \ + latest_inventory_update['status'] in ['failed', 'canceled', 'error']: + return True + + return False + + def mark_system_job(self): + self.data[self.SYSTEM_JOB] = False + + def mark_project_update(self, job): + self.data[self.PROJECT_UPDATES][job['project_id']] = False + + def mark_inventory_update(self, inventory_id): + self.data[self.INVENTORY_UPDATES][inventory_id] = False + + def mark_inventory_source_update(self, inventory_source_id): + self.data[self.INVENTORY_SOURCE_UPDATES][inventory_source_id] = False + + def mark_job_template_job(self, job): + self.data[self.JOB_INVENTORY_IDS][job['inventory_id']] = False + self.data[self.JOB_PROJECT_IDS][job['project_id']] = False + self.data[self.JOB_TEMPLATE_JOBS][job['job_template_id']] = False + + def mark_workflow_job(self, job): + self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS][job['workflow_job_template_id']] = False + + def can_project_update_run(self, job): + return self.data[self.JOB_PROJECT_IDS].get(job['project_id'], True) and \ + self.data[self.PROJECT_UPDATES].get(job['project_id'], True) + + def can_inventory_update_run(self, job): + return self.data[self.JOB_INVENTORY_IDS].get(job['inventory_source__inventory_id'], True) and \ + self.data[self.INVENTORY_SOURCE_UPDATES].get(job['inventory_source_id'], True) + + def can_job_run(self, job): + if self.data[self.PROJECT_UPDATES].get(job['project_id'], True) is True and \ + self.data[self.INVENTORY_UPDATES].get(job['inventory_id'], True) is True: + if job['allow_simultaneous'] is False: + return self.data[self.JOB_TEMPLATE_JOBS].get(job['job_template_id'], True) + else: + return True + return False + + def can_workflow_job_run(self, job): + return self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS].get(job['workflow_job_template_id'], True) + + def can_system_job_run(self): + return self.data[self.SYSTEM_JOB] + + def can_ad_hoc_command_run(self, job): + return self.data[self.INVENTORY_UPDATES].get(job['inventory_id'], True) + + def is_job_blocked(self, job): + if type(job) is ProjectUpdateDict: + return not self.can_project_update_run(job) + elif type(job) is InventoryUpdateDict: + return not self.can_inventory_update_run(job) + elif type(job) is JobDict: + return not self.can_job_run(job) + elif type(job) is SystemJobDict: + return not self.can_system_job_run() + elif type(job) is AdHocCommandDict: + return not self.can_ad_hoc_command_run(job) + elif type(job) is WorkflowJobDict: + return not self.can_workflow_job_run(job) + + def add_job(self, job): + if type(job) is ProjectUpdateDict: + self.mark_project_update(job) + elif type(job) is InventoryUpdateDict: + self.mark_inventory_update(job['inventory_source__inventory_id']) + self.mark_inventory_source_update(job['inventory_source_id']) + elif type(job) is JobDict: + self.mark_job_template_job(job) + elif type(job) is WorkflowJobDict: + self.mark_workflow_job(job) + elif type(job) is SystemJobDict: + self.mark_system_job() + elif type(job) is AdHocCommandDict: + self.mark_inventory_update(job['inventory_id']) + + def add_jobs(self, jobs): + map(lambda j: self.add_job(j), jobs) + diff --git a/awx/main/scheduler/partial.py b/awx/main/scheduler/partial.py new file mode 100644 index 0000000000..6cb87add15 --- /dev/null +++ b/awx/main/scheduler/partial.py @@ -0,0 +1,275 @@ + +# Python +import json +import itertools + +# AWX +from awx.main.utils import decrypt_field_value +from awx.main.models import ( + Job, + ProjectUpdate, + InventoryUpdate, + InventorySource, + SystemJob, + AdHocCommand, + WorkflowJob, +) + + +class PartialModelDict(object): + FIELDS = () + model = None + data = None + + def __init__(self, data): + if type(data) is not dict: + raise RuntimeError("Expected data to be of type dict not %s" % type(data)) + self.data = data + + def __getitem__(self, index): + return self.data[index] + + def __setitem__(self, key, value): + self.data[key] = value + + def get(self, key, **kwargs): + return self.data.get(key, **kwargs) + + def get_full(self): + return self.model.objects.get(id=self.data['id']) + + def refresh_partial(self): + return self.__class__(self.model.objects.filter(id=self.data['id']).values(*self.__class__.get_db_values())[0]) + + @classmethod + def get_partial(cls, id): + return cls(cls.model.objects.filter(id=id).values(*cls.get_db_values())[0]) + + @classmethod + def get_db_values(cls): + return cls.FIELDS + + @classmethod + def filter_partial(cls, status=[]): + kv = { + 'status__in': status + } + return [cls(o) for o in cls.model.objects.filter(**kv).values(*cls.get_db_values())] + + def get_job_type_str(self): + raise RuntimeError("Inherit and implement me") + + def task_impact(self): + raise RuntimeError("Inherit and implement me") + + @classmethod + def merge_values(cls, values): + grouped_results = itertools.groupby(values, key=lambda value: value['id']) + + merged_values = [] + for k, g in grouped_results: + groups = list(g) + merged_value = {} + for group in groups: + for key, val in group.iteritems(): + if not merged_value.get(key): + merged_value[key] = val + elif val != merged_value[key]: + if isinstance(merged_value[key], list): + if val not in merged_value[key]: + merged_value[key].append(val) + else: + old_val = merged_value[key] + merged_value[key] = [old_val, val] + merged_values.append(merged_value) + return merged_values + + +class JobDict(PartialModelDict): + FIELDS = ( + 'id', 'status', 'job_template_id', 'inventory_id', 'project_id', + 'launch_type', 'limit', 'allow_simultaneous', 'created', + 'job_type', 'celery_task_id', 'project__scm_update_on_launch', + 'forks', 'start_args', 'dependent_jobs__id', + ) + model = Job + + def get_job_type_str(self): + return 'job' + + def task_impact(self): + return (5 if self.data['forks'] == 0 else self.data['forks']) * 10 + + def get_inventory_sources_already_updated(self): + try: + start_args = json.loads(decrypt_field_value(self.data['id'], 'start_args', self.data['start_args'])) + except Exception: + return [] + start_args = start_args or {} + return start_args.get('inventory_sources_already_updated', []) + + @classmethod + def filter_partial(cls, status=[]): + kv = { + 'status__in': status + } + merged = PartialModelDict.merge_values(cls.model.objects.filter(**kv).values(*cls.get_db_values())) + return [cls(o) for o in merged] + + +class ProjectUpdateDict(PartialModelDict): + FIELDS = ( + 'id', 'status', 'project_id', 'created', 'celery_task_id', + 'launch_type', 'project__scm_update_cache_timeout', + 'project__scm_update_on_launch', + ) + model = ProjectUpdate + + def get_job_type_str(self): + return 'project_update' + + def task_impact(self): + return 10 + + @classmethod + def filter_partial(cls, status=[]): + kv = { + 'status__in': status, + 'job_type': 'check', + } + return [cls(o) for o in cls.model.objects.filter(**kv).values(*cls.get_db_values())] + + +class ProjectUpdateLatestDict(ProjectUpdateDict): + FIELDS = ( + 'id', 'status', 'project_id', 'created', 'finished', + 'project__scm_update_cache_timeout', + 'launch_type', 'project__scm_update_on_launch', + ) + model = ProjectUpdate + + @classmethod + def filter_partial(cls, project_ids): + # TODO: This can shurley be made more efficient + # * shouldn't have to do a query per inventory_id + # * shouldn't have to call .values() on all the results, only to get the first result + results = [] + for project_id in project_ids: + qs = cls.model.objects.filter(project_id=project_id, status__in=['waiting', 'successful', 'failed']).order_by('-finished', '-started', '-created',) + if qs.count() > 0: + results.append(cls(cls.model.objects.filter(id=qs[0].id).values(*cls.get_db_values())[0])) + return results + + +class InventoryUpdateDict(PartialModelDict): + #'inventory_source__update_on_launch', + #'inventory_source__update_cache_timeout', + FIELDS = ( + 'id', 'status', 'created', 'celery_task_id', 'inventory_source_id', + 'inventory_source__inventory_id', + ) + model = InventoryUpdate + + def get_job_type_str(self): + return 'inventory_update' + + def task_impact(self): + return 20 + + +class InventoryUpdateLatestDict(InventoryUpdateDict): + #'inventory_source__update_on_launch', + #'inventory_source__update_cache_timeout', + FIELDS = ( + 'id', 'status', 'created', 'celery_task_id', 'inventory_source_id', + 'finished', 'inventory_source__update_cache_timeout', 'launch_type', + 'inventory_source__update_on_launch', + ) + model = InventoryUpdate + + @classmethod + def filter_partial(cls, inventory_ids): + # TODO: This can shurley be made more efficient + # * shouldn't have to do a query per inventory_id nor per inventory_source_id + # * shouldn't have to call .values() on all the results, only to get the first result + results = [] + for inventory_id in inventory_ids: + inventory_source_ids = InventorySource.objects.filter(inventory_id=inventory_id, + update_on_launch=True).values_list('id', flat=True) + # Find the most recent inventory update for each inventory source + for inventory_source_id in inventory_source_ids: + qs = cls.model.objects.filter(inventory_source_id=inventory_source_id, + status__in=['waiting', 'successful', 'failed'], + inventory_source__update_on_launch=True).order_by('-finished', '-started', '-created') + if qs.count() > 0: + results.append(cls(cls.model.objects.filter(id=qs[0].id).values(*cls.get_db_values())[0])) + return results + + +class InventorySourceDict(PartialModelDict): + FIELDS = ( + 'id', + ) + model = InventorySource + + def get_job_type_str(self): + return 'inventory_source' + + def task_impact(self): + return 20 + + @classmethod + # TODO: Optimize this to run the query once + def filter_partial(cls, inventory_id): + kv = { + 'inventory_id': inventory_id, + 'update_on_launch': True, + } + return [cls(o) for o in cls.model.objects.filter(**kv).values(*cls.get_db_values())] + + +class SystemJobDict(PartialModelDict): + FIELDS = ( + 'id', 'created', 'status', 'celery_task_id', + ) + model = SystemJob + + def get_job_type_str(self): + return 'system_job' + + def task_impact(self): + return 20 + + @classmethod + def filter_partial(cls, status=[]): + kv = { + 'status__in': status + } + return [cls(o) for o in cls.model.objects.filter(**kv).values(*cls.get_db_values())] + + +class AdHocCommandDict(PartialModelDict): + FIELDS = ( + 'id', 'created', 'status', 'inventory_id', 'dependent_jobs__id', 'celery_task_id', + ) + model = AdHocCommand + + def get_job_type_str(self): + return 'ad_hoc_command' + + def task_impact(self): + return 20 + + +class WorkflowJobDict(PartialModelDict): + FIELDS = ( + 'id', 'created', 'status', 'workflow_job_template_id', + ) + model = WorkflowJob + + def get_job_type_str(self): + return 'workflow_job' + + def task_impact(self): + return 0 + diff --git a/awx/main/scheduler/tasks.py b/awx/main/scheduler/tasks.py new file mode 100644 index 0000000000..6e169224b7 --- /dev/null +++ b/awx/main/scheduler/tasks.py @@ -0,0 +1,59 @@ + +# Python +import logging +import json + +# Django +from django.db import transaction +from django.db.utils import DatabaseError + +# Celery +from celery import task + +# AWX +from awx.main.models import Instance +from awx.main.scheduler import TaskManager +from django.core.cache import cache + +logger = logging.getLogger('awx.main.scheduler') + +# TODO: move logic to UnifiedJob model and use bind=True feature of celery. +# Would we need the request loop then? I think so. Even if we get the in-memory +# updated model, the call to schedule() may get stale data. + + +@task +def run_job_launch(job_id): + TaskManager().schedule() + + +@task +def run_job_complete(job_id): + TaskManager().schedule() + + +@task +def run_task_manager(): + logger.debug("Running Tower task manager.") + TaskManager().schedule() + + +@task +def run_fail_inconsistent_running_jobs(): + logger.debug("Running task to fail inconsistent running jobs.") + with transaction.atomic(): + # Lock + try: + Instance.objects.select_for_update(nowait=True).all()[0] + scheduler = TaskManager() + active_task_queues, active_tasks = scheduler.get_active_tasks() + cache.set("active_celery_tasks", json.dumps(active_task_queues)) + if active_tasks is None: + # TODO: Failed to contact celery. We should surface this. + return None + + all_running_sorted_tasks = scheduler.get_running_tasks() + scheduler.process_celery_tasks(active_tasks, all_running_sorted_tasks) + except DatabaseError: + return + diff --git a/awx/main/signals.py b/awx/main/signals.py index 7389f01763..20dcd2dcd6 100644 --- a/awx/main/signals.py +++ b/awx/main/signals.py @@ -8,6 +8,7 @@ import threading import json # Django +from django.conf import settings from django.db.models.signals import post_save, pre_delete, post_delete, m2m_changed from django.dispatch import receiver @@ -18,10 +19,11 @@ from crum.signals import current_user_getter # AWX from awx.main.models import * # noqa from awx.api.serializers import * # noqa -from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore, emit_websocket_notification +from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_group_removal, _inventory_updates from awx.main.tasks import update_inventory_computed_fields -from awx.main.conf import tower_settings + +from awx.main.consumers import emit_channel_notification __all__ = [] @@ -30,6 +32,7 @@ logger = logging.getLogger('awx.main.signals') # Update has_active_failures for inventory/groups when a Host/Group is deleted, # when a Host-Group or Group-Group relationship is updated, or when a Job is deleted + def emit_job_event_detail(sender, **kwargs): instance = kwargs['instance'] created = kwargs['created'] @@ -39,7 +42,9 @@ def emit_job_event_detail(sender, **kwargs): event_serialized["created"] = event_serialized["created"].isoformat() event_serialized["modified"] = event_serialized["modified"].isoformat() event_serialized["event_name"] = instance.event - emit_websocket_notification('/socket.io/job_events', 'job_events-' + str(instance.job.id), event_serialized) + event_serialized["group_name"] = "job_events" + emit_channel_notification('job_events-' + str(instance.job.id), event_serialized) + def emit_ad_hoc_command_event_detail(sender, **kwargs): instance = kwargs['instance'] @@ -50,7 +55,9 @@ def emit_ad_hoc_command_event_detail(sender, **kwargs): event_serialized["created"] = event_serialized["created"].isoformat() event_serialized["modified"] = event_serialized["modified"].isoformat() event_serialized["event_name"] = instance.event - emit_websocket_notification('/socket.io/ad_hoc_command_events', 'ad_hoc_command_events-' + str(instance.ad_hoc_command_id), event_serialized) + event_serialized["group_name"] = "ad_hoc_command_events" + emit_channel_notification('ad_hoc_command_events-' + str(instance.ad_hoc_command_id), event_serialized) + def emit_update_inventory_computed_fields(sender, **kwargs): logger.debug("In update inventory computed fields") @@ -86,6 +93,7 @@ def emit_update_inventory_computed_fields(sender, **kwargs): else: update_inventory_computed_fields.delay(inventory.id, True) + def emit_update_inventory_on_created_or_deleted(sender, **kwargs): if getattr(_inventory_updates, 'is_updating', False): return @@ -106,6 +114,7 @@ def emit_update_inventory_on_created_or_deleted(sender, **kwargs): if inventory is not None: update_inventory_computed_fields.delay(inventory.id, True) + def rebuild_role_ancestor_list(reverse, model, instance, pk_set, action, **kwargs): 'When a role parent is added or removed, update our role hierarchy list' if action == 'post_add': @@ -120,6 +129,7 @@ def rebuild_role_ancestor_list(reverse, model, instance, pk_set, action, **kwarg else: model.rebuild_role_ancestor_list([], [instance.id]) + def sync_superuser_status_to_rbac(instance, **kwargs): 'When the is_superuser flag is changed on a user, reflect that in the membership of the System Admnistrator role' if instance.is_superuser: @@ -127,6 +137,7 @@ def sync_superuser_status_to_rbac(instance, **kwargs): else: Role.singleton(ROLE_SINGLETON_SYSTEM_ADMINISTRATOR).members.remove(instance) + def create_user_role(instance, **kwargs): try: Role.objects.get( @@ -141,6 +152,7 @@ def create_user_role(instance, **kwargs): ) role.members.add(instance) + def org_admin_edit_members(instance, action, model, reverse, pk_set, **kwargs): content_type = ContentType.objects.get_for_model(Organization) @@ -156,6 +168,7 @@ def org_admin_edit_members(instance, action, model, reverse, pk_set, **kwargs): if action == 'pre_remove': instance.content_object.admin_role.children.remove(user.admin_role) + def rbac_activity_stream(instance, sender, **kwargs): user_type = ContentType.objects.get_for_model(User) # Only if we are associating/disassociating @@ -167,8 +180,16 @@ def rbac_activity_stream(instance, sender, **kwargs): elif sender.__name__ == 'Role_parents': role = kwargs['model'].objects.filter(pk__in=kwargs['pk_set']).first() # don't record implicit creation / parents - if role is not None and role.content_type is not None: - parent = role.content_type.name + "." + role.role_field + if role is not None: + if role.content_type is None: + if role.is_singleton(): + parent = 'singleton:' + role.singleton_name + else: + # Ill-defined role, may need additional logic in the + # case of future expansions of the RBAC system + parent = str(role.role_field) + else: + parent = role.content_type.name + "." + role.role_field # Get the list of implicit parents that were defined at the class level. # We have to take this list from the class property to avoid including parents # that may have been added since the creation of the ImplicitRoleField @@ -190,23 +211,31 @@ def rbac_activity_stream(instance, sender, **kwargs): activity_stream_associate(sender, instance, role=role, **kwargs) + def cleanup_detached_labels_on_deleted_parent(sender, instance, **kwargs): for l in instance.labels.all(): if l.is_candidate_for_detach(): l.delete() -post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Host) -post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host) -post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Group) -post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Group) -m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.hosts.through) -m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.parents.through) -m2m_changed.connect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through) -m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through) -post_save.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource) -post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource) -post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Job) -post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Job) + +def connect_computed_field_signals(): + post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Host) + post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host) + post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Group) + post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Group) + m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.hosts.through) + m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.parents.through) + m2m_changed.connect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through) + m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through) + post_save.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource) + post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource) + post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Job) + post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Job) + + +connect_computed_field_signals() + + post_save.connect(emit_job_event_detail, sender=JobEvent) post_save.connect(emit_ad_hoc_command_event_detail, sender=AdHocCommandEvent) m2m_changed.connect(rebuild_role_ancestor_list, Role.parents.through) @@ -220,6 +249,7 @@ pre_delete.connect(cleanup_detached_labels_on_deleted_parent, sender=UnifiedJobT # Migrate hosts, groups to parent group(s) whenever a group is deleted + @receiver(pre_delete, sender=Group) def save_related_pks_before_group_delete(sender, **kwargs): if getattr(_inventory_updates, 'is_removing', False): @@ -230,6 +260,7 @@ def save_related_pks_before_group_delete(sender, **kwargs): instance._saved_hosts_pks = set(instance.hosts.values_list('pk', flat=True)) instance._saved_children_pks = set(instance.children.values_list('pk', flat=True)) + @receiver(post_delete, sender=Group) def migrate_children_from_deleted_group_to_parent_groups(sender, **kwargs): if getattr(_inventory_updates, 'is_removing', False): @@ -263,6 +294,7 @@ def migrate_children_from_deleted_group_to_parent_groups(sender, **kwargs): # Update host pointers to last_job and last_job_host_summary when a job is deleted + def _update_host_last_jhs(host): jhs_qs = JobHostSummary.objects.filter(host__pk=host.pk) try: @@ -280,12 +312,14 @@ def _update_host_last_jhs(host): if update_fields: host.save(update_fields=update_fields) + @receiver(pre_delete, sender=Job) def save_host_pks_before_job_delete(sender, **kwargs): instance = kwargs['instance'] hosts_qs = Host.objects.filter( last_job__pk=instance.pk) instance._saved_hosts_pks = set(hosts_qs.values_list('pk', flat=True)) + @receiver(post_delete, sender=Job) def update_host_last_job_after_job_deleted(sender, **kwargs): instance = kwargs['instance'] @@ -295,15 +329,18 @@ def update_host_last_job_after_job_deleted(sender, **kwargs): # Set via ActivityStreamRegistrar to record activity stream events + class ActivityStreamEnabled(threading.local): def __init__(self): - self.enabled = getattr(tower_settings, 'ACTIVITY_STREAM_ENABLED', True) + self.enabled = True def __nonzero__(self): - return bool(self.enabled) + return bool(self.enabled and getattr(settings, 'ACTIVITY_STREAM_ENABLED', True)) + activity_stream_enabled = ActivityStreamEnabled() + @contextlib.contextmanager def disable_activity_stream(): ''' @@ -317,6 +354,24 @@ def disable_activity_stream(): activity_stream_enabled.enabled = previous_value +@contextlib.contextmanager +def disable_computed_fields(): + post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Host) + post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Host) + post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group) + post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group) + m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.hosts.through) + m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.parents.through) + m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through) + m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through) + post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource) + post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource) + post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Job) + post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Job) + yield + connect_computed_field_signals() + + model_serializer_mapping = { Organization: OrganizationSerializer, Inventory: InventorySerializer, @@ -330,11 +385,11 @@ model_serializer_mapping = { JobTemplate: JobTemplateSerializer, Job: JobSerializer, AdHocCommand: AdHocCommandSerializer, - TowerSettings: TowerSettingsSerializer, NotificationTemplate: NotificationTemplateSerializer, Notification: NotificationSerializer, } + def activity_stream_create(sender, instance, created, **kwargs): if created and activity_stream_enabled: # Skip recording any inventory source directly associated with a group. @@ -354,9 +409,10 @@ def activity_stream_create(sender, instance, created, **kwargs): #TODO: Weird situation where cascade SETNULL doesn't work # it might actually be a good idea to remove all of these FK references since # we don't really use them anyway. - if type(instance) is not TowerSettings: + if instance._meta.model_name != 'setting': # Is not conf.Setting instance getattr(activity_entry, object1).add(instance) + def activity_stream_update(sender, instance, **kwargs): if instance.id is None: return @@ -377,9 +433,10 @@ def activity_stream_update(sender, instance, **kwargs): object1=object1, changes=json.dumps(changes)) activity_entry.save() - if type(instance) is not TowerSettings: + if instance._meta.model_name != 'setting': # Is not conf.Setting instance getattr(activity_entry, object1).add(instance) + def activity_stream_delete(sender, instance, **kwargs): if not activity_stream_enabled: return @@ -394,6 +451,7 @@ def activity_stream_delete(sender, instance, **kwargs): object1=object1) activity_entry.save() + def activity_stream_associate(sender, instance, **kwargs): if not activity_stream_enabled: return diff --git a/awx/main/socket.py b/awx/main/socket.py deleted file mode 100644 index b2b78396fa..0000000000 --- a/awx/main/socket.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -import os - -import zmq - -from django.conf import settings - - -class Socket(object): - """An abstraction class implemented for a dumb OS socket. - - Intended to allow alteration of backend details in a single, consistent - way throughout the Tower application. - """ - def __init__(self, bucket, rw, debug=0, logger=None, nowait=False): - """Instantiate a Socket object, which uses ZeroMQ to actually perform - passing a message back and forth. - - Designed to be used as a context manager: - - with Socket('callbacks', 'w') as socket: - socket.publish({'message': 'foo bar baz'}) - - If listening for messages through a socket, the `listen` method - is a simple generator: - - with Socket('callbacks', 'r') as socket: - for message in socket.listen(): - [...] - """ - self._bucket = bucket - self._rw = { - 'r': zmq.REP, - 'w': zmq.REQ, - }[rw.lower()] - - self._connection_pid = None - self._context = None - self._socket = None - - self._debug = debug - self._logger = logger - self._nowait = nowait - - def __enter__(self): - self.connect() - return self - - def __exit__(self, *args, **kwargs): - self.close() - - @property - def is_connected(self): - if self._socket: - return True - return False - - @property - def port(self): - return { - 'callbacks': os.environ.get('CALLBACK_CONSUMER_PORT', - settings.CALLBACK_CONSUMER_PORT), - 'task_commands': settings.TASK_COMMAND_PORT, - 'websocket': settings.SOCKETIO_NOTIFICATION_PORT, - 'fact_cache': settings.FACT_CACHE_PORT, - }[self._bucket] - - def connect(self): - """Connect to ZeroMQ.""" - - # Make sure that we are clearing everything out if there is - # a problem; PID crossover can cause bad news. - active_pid = os.getpid() - if self._connection_pid is None: - self._connection_pid = active_pid - if self._connection_pid != active_pid: - self._context = None - self._socket = None - self._connection_pid = active_pid - - # If the port is an integer, convert it into tcp:// - port = self.port - if isinstance(port, int): - port = 'tcp://127.0.0.1:%d' % port - - # If the port is None, then this is an intentional dummy; - # honor this. (For testing.) - if not port: - return - - # Okay, create the connection. - if self._context is None: - self._context = zmq.Context() - self._socket = self._context.socket(self._rw) - if self._nowait: - self._socket.setsockopt(zmq.RCVTIMEO, 2000) - self._socket.setsockopt(zmq.LINGER, 1000) - if self._rw == zmq.REQ: - self._socket.connect(port) - else: - self._socket.bind(port) - - def close(self): - """Disconnect and tear down.""" - if self._socket: - self._socket.close() - self._socket = None - self._context = None - - def publish(self, message): - """Publish a message over the socket.""" - - # If the port is None, no-op. - if self.port is None: - return - - # If we are not connected, whine. - if not self.is_connected: - raise RuntimeError('Cannot publish a message when not connected ' - 'to the socket.') - - # If we are in the wrong mode, whine. - if self._rw != zmq.REQ: - raise RuntimeError('This socket is not opened for writing.') - - # If we are in debug mode; provide the PID. - if self._debug: - message.update({'pid': os.getpid(), - 'connection_pid': self._connection_pid}) - - # Send the message. - for retry in xrange(4): - try: - self._socket.send_json(message) - self._socket.recv() - break - except Exception as ex: - if self._logger: - self._logger.error('Publish Exception: %r; retry=%d', - ex, retry, exc_info=True) - if retry >= 3: - raise - - def listen(self): - """Retrieve a single message from the subcription channel - and return it. - """ - # If the port is None, no-op. - if self.port is None: - raise StopIteration - - # If we are not connected, whine. - if not self.is_connected: - raise RuntimeError('Cannot publish a message when not connected ' - 'to the socket.') - - # If we are in the wrong mode, whine. - if self._rw != zmq.REP: - raise RuntimeError('This socket is not opened for reading.') - - # Actually listen to the socket. - while True: - try: - message = self._socket.recv_json() - yield message - finally: - self._socket.send('1') diff --git a/awx/main/south_migrations/0071_v240_changes.py b/awx/main/south_migrations/0071_v240_changes.py index cae03ef27a..98c44d4ad7 100644 --- a/awx/main/south_migrations/0071_v240_changes.py +++ b/awx/main/south_migrations/0071_v240_changes.py @@ -4,7 +4,7 @@ from south.db import db from south.v2 import DataMigration from django.db import models from django.utils.timezone import now -from awx.api.license import feature_enabled +from awx.conf.license import feature_enabled class Migration(DataMigration): diff --git a/awx/main/tasks.py b/awx/main/tasks.py index c76107d601..584e6ce0bb 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -21,7 +21,6 @@ import traceback import urlparse import uuid from distutils.version import LooseVersion as Version -import dateutil.parser import yaml try: import psutil @@ -33,7 +32,7 @@ import pexpect # Celery from celery import Task, task -from celery.signals import celeryd_init +from celery.signals import celeryd_init, worker_process_init # Django from django.conf import settings @@ -42,21 +41,26 @@ from django.utils.timezone import now from django.utils.encoding import smart_str from django.core.mail import send_mail from django.contrib.auth.models import User +from django.utils.translation import ugettext_lazy as _ +from django.core.cache import cache # AWX from awx.main.constants import CLOUD_PROVIDERS from awx.main.models import * # noqa from awx.main.models import UnifiedJob -from awx.main.queue import FifoQueue -from awx.main.conf import tower_settings -from awx.main.task_engine import TaskSerializer, TASK_TIMEOUT_INTERVAL +from awx.main.queue import CallbackQueueDispatcher +from awx.main.task_engine import TaskEnhancer from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field, update_scm_url, - emit_websocket_notification, - check_proot_installed, build_proot_temp_dir, wrap_args_with_proot) + check_proot_installed, build_proot_temp_dir, wrap_args_with_proot, + get_system_task_capacity, OutputEventFilter, parse_yaml_or_json) +from awx.main.utils.reload import restart_local_services +from awx.main.utils.handlers import configure_external_logger +from awx.main.consumers import emit_channel_notification __all__ = ['RunJob', 'RunSystemJob', 'RunProjectUpdate', 'RunInventoryUpdate', - 'RunAdHocCommand', 'handle_work_error', 'handle_work_success', - 'update_inventory_computed_fields', 'send_notifications', 'run_administrative_checks'] + 'RunAdHocCommand', 'handle_work_error', + 'handle_work_success', 'update_inventory_computed_fields', + 'send_notifications', 'run_administrative_checks', 'purge_old_stdout_files'] HIDDEN_PASSWORD = '**********' @@ -68,11 +72,13 @@ Try upgrading OpenSSH or providing your private key in an different format. \ logger = logging.getLogger('awx.main.tasks') + @celeryd_init.connect def celery_startup(conf=None, **kwargs): # Re-init all schedules # NOTE: Rework this during the Rampart work - logger.info("Syncing Tower Schedules") + startup_logger = logging.getLogger('awx.main.tasks') + startup_logger.info("Syncing Tower Schedules") for sch in Schedule.objects.all(): try: sch.update_computed_fields() @@ -80,14 +86,42 @@ def celery_startup(conf=None, **kwargs): except Exception as e: logger.error("Failed to rebuild schedule {}: {}".format(sch, e)) -@task() + +@worker_process_init.connect +def task_set_logger_pre_run(*args, **kwargs): + cache.close() + configure_external_logger(settings, async_flag=False, is_startup=False) + + +def _clear_cache_keys(set_of_keys): + logger.debug('cache delete_many(%r)', set_of_keys) + cache.delete_many(set_of_keys) + + +@task(queue='broadcast_all') +def process_cache_changes(cache_keys): + logger.warn('Processing cache changes, task args: {0.args!r} kwargs: {0.kwargs!r}'.format( + process_cache_changes.request)) + set_of_keys = set([key for key in cache_keys]) + _clear_cache_keys(set_of_keys) + for setting_key in set_of_keys: + if setting_key.startswith('LOG_AGGREGATOR_'): + restart_local_services(['uwsgi', 'celery', 'beat', 'callback', 'fact']) + break + + +@task(queue='default') def send_notifications(notification_list, job_id=None): if not isinstance(notification_list, list): raise TypeError("notification_list should be of type list") if job_id is not None: job_actual = UnifiedJob.objects.get(id=job_id) - for notification_id in notification_list: - notification = Notification.objects.get(id=notification_id) + + notifications = Notification.objects.filter(id__in=notification_list) + if job_id is not None: + job_actual.notifications.add(*notifications) + + for notification in notifications: try: sent = notification.notification_template.send(notification.subject, notification.body) notification.status = "successful" @@ -98,65 +132,65 @@ def send_notifications(notification_list, job_id=None): notification.error = smart_str(e) finally: notification.save() - if job_id is not None: - job_actual.notifications.add(notification) -@task(bind=True) + +@task(bind=True, queue='default') def run_administrative_checks(self): - if not tower_settings.TOWER_ADMIN_ALERTS: + logger.warn("Running administrative checks.") + if not settings.TOWER_ADMIN_ALERTS: return - reader = TaskSerializer() - validation_info = reader.from_database() + validation_info = TaskEnhancer().validate_enhancements() if validation_info.get('instance_count', 0) < 1: return used_percentage = float(validation_info.get('current_instances', 0)) / float(validation_info.get('instance_count', 100)) tower_admin_emails = User.objects.filter(is_superuser=True).values_list('email', flat=True) if (used_percentage * 100) > 90: send_mail("Ansible Tower host usage over 90%", - "Ansible Tower host usage over 90%", + _("Ansible Tower host usage over 90%"), tower_admin_emails, fail_silently=True) - if validation_info.get('time_remaining', 0) < TASK_TIMEOUT_INTERVAL: + if validation_info.get('date_warning', False): send_mail("Ansible Tower license will expire soon", - "Ansible Tower license will expire soon", + _("Ansible Tower license will expire soon"), tower_admin_emails, fail_silently=True) -@task(bind=True) + +@task(bind=True, queue='default') def cleanup_authtokens(self): + logger.warn("Cleaning up expired authtokens.") AuthToken.objects.filter(expires__lt=now()).delete() + @task(bind=True) +def purge_old_stdout_files(self): + nowtime = time.time() + for f in os.listdir(settings.JOBOUTPUT_ROOT): + if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT,f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME: + os.unlink(os.path.join(settings.JOBOUTPUT_ROOT,f)) + logger.info("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT,f))) + + +@task(bind=True) +def cluster_node_heartbeat(self): + logger.debug("Cluster node heartbeat task.") + inst = Instance.objects.filter(hostname=settings.CLUSTER_HOST_ID) + if inst.exists(): + inst = inst[0] + inst.capacity = get_system_task_capacity() + inst.save() + return + raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID)) + + +@task(bind=True, queue='default') def tower_periodic_scheduler(self): - def get_last_run(): - if not os.path.exists(settings.SCHEDULE_METADATA_LOCATION): - return None - fd = open(settings.SCHEDULE_METADATA_LOCATION) - try: - last_run = dateutil.parser.parse(fd.read()) - return last_run - except Exception as exc: - logger.error("get_last_run failed: {}".format(exc)) - return None - - def write_last_run(last_run): - fd = open(settings.SCHEDULE_METADATA_LOCATION, 'w') - fd.write(last_run.isoformat()) - fd.close() - run_now = now() - last_run = get_last_run() - if not last_run: - logger.debug("First run time") - write_last_run(run_now) - return + state = TowerScheduleState.get_solo() + last_run = state.schedule_last_run logger.debug("Last run was: %s", last_run) - write_last_run(run_now) - - # Sanity check: If this is a secondary machine, there is nothing - # on the schedule. - if Instance.objects.my_role() == 'secondary': - return + state.schedule_last_run = run_now + state.save() old_schedules = Schedule.objects.enabled().before(last_run) for schedule in old_schedules: @@ -169,132 +203,85 @@ def tower_periodic_scheduler(self): logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id)) continue new_unified_job = template.create_unified_job(launch_type='scheduled', schedule=schedule) - can_start = new_unified_job.signal_start(extra_vars=schedule.extra_data) + can_start = new_unified_job.signal_start(extra_vars=parse_yaml_or_json(schedule.extra_data)) if not can_start: new_unified_job.status = 'failed' new_unified_job.job_explanation = "Scheduled job could not start because it was not in the right state or required manual credentials" new_unified_job.save(update_fields=['status', 'job_explanation']) - new_unified_job.socketio_emit_status("failed") - emit_websocket_notification('/socket.io/schedules', 'schedule_changed', dict(id=schedule.id)) + new_unified_job.websocket_emit_status("failed") + emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules")) + state.save() -@task() -def notify_task_runner(metadata_dict): - """Add the given task into the Tower task manager's queue, to be consumed - by the task system. - """ - queue = FifoQueue('tower_task_manager') - queue.push(metadata_dict) -@task(bind=True) +def _send_notification_templates(instance, status_str): + if status_str not in ['succeeded', 'failed']: + raise ValueError(_("status_str must be either succeeded or failed")) + notification_templates = instance.get_notification_templates() + if notification_templates: + all_notification_templates = set(notification_templates.get('success', []) + notification_templates.get('any', [])) + if len(all_notification_templates): + try: + (notification_subject, notification_body) = getattr(instance, 'build_notification_%s_message' % status_str)() + except AttributeError: + raise NotImplementedError("build_notification_%s_message() does not exist" % status_str) + send_notifications.delay([n.generate_notification(notification_subject, notification_body).id + for n in all_notification_templates], + job_id=instance.id) + + +@task(bind=True, queue='default') def handle_work_success(self, result, task_actual): - if task_actual['type'] == 'project_update': - instance = ProjectUpdate.objects.get(id=task_actual['id']) - instance_name = instance.name - notification_templates = instance.project.notification_templates - friendly_name = "Project Update" - elif task_actual['type'] == 'inventory_update': - instance = InventoryUpdate.objects.get(id=task_actual['id']) - instance_name = instance.name - notification_templates = instance.inventory_source.notification_templates - friendly_name = "Inventory Update" - elif task_actual['type'] == 'job': - instance = Job.objects.get(id=task_actual['id']) - instance_name = instance.job_template.name - notification_templates = instance.job_template.notification_templates - friendly_name = "Job" - elif task_actual['type'] == 'ad_hoc_command': - instance = AdHocCommand.objects.get(id=task_actual['id']) - instance_name = instance.module_name - notification_templates = instance.notification_templates - friendly_name = "AdHoc Command" - elif task_actual['type'] == 'system_job': - instance = SystemJob.objects.get(id=task_actual['id']) - instance_name = instance.system_job_template.name - notification_templates = instance.system_job_template.notification_templates - friendly_name = "System Job" - else: + instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id']) + if not instance: return - all_notification_templates = set(notification_templates.get('success', []) + notification_templates.get('any', [])) - if len(all_notification_templates): - notification_body = instance.notification_data() - notification_subject = "{} #{} '{}' succeeded on Ansible Tower: {}".format(friendly_name, - task_actual['id'], - smart_str(instance_name), - notification_body['url']) - notification_body['friendly_name'] = friendly_name - send_notifications.delay([n.generate_notification(notification_subject, notification_body).id - for n in all_notification_templates], - job_id=task_actual['id']) + _send_notification_templates(instance, 'succeeded') -@task(bind=True) + from awx.main.scheduler.tasks import run_job_complete + run_job_complete.delay(instance.id) + + +@task(bind=True, queue='default') def handle_work_error(self, task_id, subtasks=None): print('Executing error task id %s, subtasks: %s' % (str(self.request.id), str(subtasks))) - first_task = None - first_task_id = None - first_task_type = '' - first_task_name = '' + first_instance = None + first_instance_type = '' if subtasks is not None: for each_task in subtasks: - instance_name = '' - if each_task['type'] == 'project_update': - instance = ProjectUpdate.objects.get(id=each_task['id']) - instance_name = instance.name - notification_templates = instance.project.notification_templates - friendly_name = "Project Update" - elif each_task['type'] == 'inventory_update': - instance = InventoryUpdate.objects.get(id=each_task['id']) - instance_name = instance.name - notification_templates = instance.inventory_source.notification_templates - friendly_name = "Inventory Update" - elif each_task['type'] == 'job': - instance = Job.objects.get(id=each_task['id']) - instance_name = instance.job_template.name - notification_templates = instance.job_template.notification_templates - friendly_name = "Job" - elif each_task['type'] == 'ad_hoc_command': - instance = AdHocCommand.objects.get(id=each_task['id']) - instance_name = instance.module_name - notification_templates = instance.notification_templates - friendly_name = "AdHoc Command" - elif each_task['type'] == 'system_job': - instance = SystemJob.objects.get(id=each_task['id']) - instance_name = instance.system_job_template.name - notification_templates = instance.system_job_template.notification_templates - friendly_name = "System Job" - else: + instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id']) + if not instance: # Unknown task type logger.warn("Unknown task type: {}".format(each_task['type'])) continue - if first_task is None: - first_task = instance - first_task_id = instance.id - first_task_type = each_task['type'] - first_task_name = instance_name - first_task_friendly_name = friendly_name + + if first_instance is None: + first_instance = instance + first_instance_type = each_task['type'] + if instance.celery_task_id != task_id: instance.status = 'failed' instance.failed = True - instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \ - (first_task_type, first_task_name, first_task_id) + if not instance.job_explanation: + instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \ + (first_instance_type, first_instance.name, first_instance.id) instance.save() - instance.socketio_emit_status("failed") + instance.websocket_emit_status("failed") - all_notification_templates = set(notification_templates.get('error', []) + notification_templates.get('any', [])) - if len(all_notification_templates): - notification_body = first_task.notification_data() - notification_subject = "{} #{} '{}' failed on Ansible Tower: {}".format(first_task_friendly_name, - first_task_id, - smart_str(first_task_name), - notification_body['url']) - notification_body['friendly_name'] = first_task_friendly_name - send_notifications.delay([n.generate_notification(notification_subject, notification_body).id - for n in all_notification_templates], - job_id=first_task_id) + if first_instance: + _send_notification_templates(first_instance, 'failed') + + # We only send 1 job complete message since all the job completion message + # handling does is trigger the scheduler. If we extend the functionality of + # what the job complete message handler does then we may want to send a + # completion event for each job here. + if first_instance: + from awx.main.scheduler.tasks import run_job_complete + run_job_complete.delay(first_instance.id) + pass -@task() +@task(queue='default') def update_inventory_computed_fields(inventory_id, should_update_hosts=True): ''' Signal handler and wrapper around inventory.update_computed_fields to @@ -406,9 +393,12 @@ class BaseTask(Task): data += '\n' # For credentials used with ssh-add, write to a named pipe which # will be read then closed, instead of leaving the SSH key on disk. - if name in ('credential', 'network_credential', 'scm_credential', 'ad_hoc_credential') and not ssh_too_old: + if name in ('credential', 'scm_credential', 'ad_hoc_credential') and not ssh_too_old: path = os.path.join(kwargs.get('private_data_dir', tempfile.gettempdir()), name) self.open_fifo_write(path, data) + # Ansible network modules do not yet support ssh-agent. + # Instead, ssh private key file is explicitly passed via an + # env variable. else: handle, path = tempfile.mkstemp(dir=kwargs.get('private_data_dir', None)) f = os.fdopen(handle, 'w') @@ -446,6 +436,8 @@ class BaseTask(Task): if os.path.isdir(os.path.join(venv_libdir, python_ver)): env['PYTHONPATH'] = os.path.join(venv_libdir, python_ver, "site-packages") + ":" break + # Add awx/lib to PYTHONPATH. + env['PYTHONPATH'] = ':'.join(filter(None, [self.get_path_to('..', 'lib'), env.get('PYTHONPATH', '')])) return env def add_tower_venv(self, env): @@ -472,7 +464,7 @@ class BaseTask(Task): # NOTE: # Derived class should call add_ansible_venv() or add_tower_venv() if self.should_use_proot(instance, **kwargs): - env['PROOT_TMP_DIR'] = tower_settings.AWX_PROOT_BASE_PATH + env['PROOT_TMP_DIR'] = settings.AWX_PROOT_BASE_PATH return env def build_safe_env(self, instance, **kwargs): @@ -486,7 +478,7 @@ class BaseTask(Task): for k,v in env.items(): if k in ('REST_API_URL', 'AWS_ACCESS_KEY', 'AWS_ACCESS_KEY_ID'): continue - elif k.startswith('ANSIBLE_'): + elif k.startswith('ANSIBLE_') and not k.startswith('ANSIBLE_NET'): continue elif hidden_re.search(k): env[k] = HIDDEN_PASSWORD @@ -543,8 +535,19 @@ class BaseTask(Task): ''' return OrderedDict() + def get_stdout_handle(self, instance): + ''' + Return an open file object for capturing stdout. + ''' + if not os.path.exists(settings.JOBOUTPUT_ROOT): + os.makedirs(settings.JOBOUTPUT_ROOT) + stdout_filename = os.path.join(settings.JOBOUTPUT_ROOT, "%d-%s.out" % (instance.pk, str(uuid.uuid1()))) + stdout_handle = codecs.open(stdout_filename, 'w', encoding='utf-8') + assert stdout_handle.name == stdout_filename + return stdout_handle + def run_pexpect(self, instance, args, cwd, env, passwords, stdout_handle, - output_replacements=None): + output_replacements=None, extra_update_fields=None): ''' Run the given command using pexpect to capture output and provide passwords when requested. @@ -560,9 +563,18 @@ class BaseTask(Task): if pexpect_sleep is not None: logger.info("Suspending Job Execution for QA Work") time.sleep(pexpect_sleep) + global_timeout_setting_name = instance._global_timeout_setting() + if global_timeout_setting_name: + global_timeout = getattr(settings, global_timeout_setting_name, 0) + local_timeout = getattr(instance, 'timeout', 0) + job_timeout = global_timeout if local_timeout == 0 else local_timeout + job_timeout = 0 if local_timeout < 0 else job_timeout + else: + job_timeout = 0 child = pexpect.spawnu(args[0], args[1:], cwd=cwd, env=env) child.logfile_read = logfile canceled = False + timed_out = False last_stdout_update = time.time() idle_timeout = self.get_idle_timeout() expect_list = [] @@ -573,7 +585,9 @@ class BaseTask(Task): expect_passwords[n] = passwords.get(item[1], '') or '' expect_list.extend([pexpect.TIMEOUT, pexpect.EOF]) instance = self.update_model(instance.pk, status='running', + execution_node=settings.CLUSTER_HOST_ID, output_replacements=output_replacements) + job_start = time.time() while child.isalive(): result_id = child.expect(expect_list, timeout=pexpect_timeout, searchwindowsize=100) if result_id in expect_passwords: @@ -584,45 +598,65 @@ class BaseTask(Task): # Refresh model instance from the database (to check cancel flag). instance = self.update_model(instance.pk) if instance.cancel_flag: - try: - if tower_settings.AWX_PROOT_ENABLED and self.should_use_proot(instance): - # NOTE: Refactor this once we get a newer psutil across the board - if not psutil: - os.kill(child.pid, signal.SIGKILL) - else: - try: - main_proc = psutil.Process(pid=child.pid) - if hasattr(main_proc, "children"): - child_procs = main_proc.children(recursive=True) - else: - child_procs = main_proc.get_children(recursive=True) - for child_proc in child_procs: - os.kill(child_proc.pid, signal.SIGKILL) - os.kill(main_proc.pid, signal.SIGKILL) - except TypeError: - os.kill(child.pid, signal.SIGKILL) - else: - os.kill(child.pid, signal.SIGTERM) - time.sleep(3) - canceled = True - except OSError: - logger.warn("Attempted to cancel already finished job, ignoring") + canceled = True + elif job_timeout != 0 and (time.time() - job_start) > job_timeout: + timed_out = True + if isinstance(extra_update_fields, dict): + extra_update_fields['job_explanation'] = "Job terminated due to timeout" + if canceled or timed_out: + self._handle_termination(instance, child, is_cancel=canceled) if idle_timeout and (time.time() - last_stdout_update) > idle_timeout: child.close(True) canceled = True if canceled: return 'canceled', child.exitstatus - elif child.exitstatus == 0: + elif child.exitstatus == 0 and not timed_out: return 'successful', child.exitstatus else: return 'failed', child.exitstatus + def _handle_termination(self, instance, job, is_cancel=True): + '''Helper function to properly terminate specified job. + + Args: + instance: The corresponding model instance of this task. + job: The pexpect subprocess running the job. + is_cancel: Flag showing whether this termination is caused by instance's + cancel_flag. + + Return: + None. + ''' + try: + if settings.AWX_PROOT_ENABLED and self.should_use_proot(instance): + # NOTE: Refactor this once we get a newer psutil across the board + if not psutil: + os.kill(job.pid, signal.SIGKILL) + else: + try: + main_proc = psutil.Process(pid=job.pid) + if hasattr(main_proc, "children"): + child_procs = main_proc.children(recursive=True) + else: + child_procs = main_proc.get_children(recursive=True) + for child_proc in child_procs: + os.kill(child_proc.pid, signal.SIGKILL) + os.kill(main_proc.pid, signal.SIGKILL) + except (TypeError, psutil.Error): + os.kill(job.pid, signal.SIGKILL) + else: + os.kill(job.pid, signal.SIGTERM) + time.sleep(3) + except OSError: + keyword = 'cancel' if is_cancel else 'timeout' + logger.warn("Attempted to %s already finished job, ignoring" % keyword) + def pre_run_hook(self, instance, **kwargs): ''' Hook for any steps to run before the job/task starts ''' - def post_run_hook(self, instance, **kwargs): + def post_run_hook(self, instance, status, **kwargs): ''' Hook for any steps to run after job/task is complete. ''' @@ -631,11 +665,12 @@ class BaseTask(Task): ''' Run the job/task and capture its output. ''' - instance = self.update_model(pk, status='running', celery_task_id=self.request.id) + instance = self.update_model(pk, status='running', celery_task_id='' if self.request.id is None else self.request.id) - instance.socketio_emit_status("running") + instance.websocket_emit_status("running") status, rc, tb = 'error', None, '' output_replacements = [] + extra_update_fields = {} try: self.pre_run_hook(instance, **kwargs) if instance.cancel_flag: @@ -661,13 +696,10 @@ class BaseTask(Task): cwd = self.build_cwd(instance, **kwargs) env = self.build_env(instance, **kwargs) safe_env = self.build_safe_env(instance, **kwargs) - if not os.path.exists(settings.JOBOUTPUT_ROOT): - os.makedirs(settings.JOBOUTPUT_ROOT) - stdout_filename = os.path.join(settings.JOBOUTPUT_ROOT, "%d-%s.out" % (pk, str(uuid.uuid1()))) - stdout_handle = codecs.open(stdout_filename, 'w', encoding='utf-8') + stdout_handle = self.get_stdout_handle(instance) if self.should_use_proot(instance, **kwargs): if not check_proot_installed(): - raise RuntimeError('proot is not installed') + raise RuntimeError('bubblewrap is not installed') kwargs['proot_temp_dir'] = build_proot_temp_dir() args = wrap_args_with_proot(args, cwd, **kwargs) safe_args = wrap_args_with_proot(safe_args, cwd, **kwargs) @@ -678,8 +710,9 @@ class BaseTask(Task): args = self.wrap_args_with_ssh_agent(args, ssh_key_path, ssh_auth_sock) safe_args = self.wrap_args_with_ssh_agent(safe_args, ssh_key_path, ssh_auth_sock) instance = self.update_model(pk, job_args=json.dumps(safe_args), - job_cwd=cwd, job_env=safe_env, result_stdout_file=stdout_filename) - status, rc = self.run_pexpect(instance, args, cwd, env, kwargs['passwords'], stdout_handle) + job_cwd=cwd, job_env=safe_env, result_stdout_file=stdout_handle.name) + status, rc = self.run_pexpect(instance, args, cwd, env, kwargs['passwords'], stdout_handle, + extra_update_fields=extra_update_fields) except Exception: if status != 'canceled': tb = traceback.format_exc() @@ -699,17 +732,24 @@ class BaseTask(Task): stdout_handle.close() except Exception: pass + + instance = self.update_model(pk) + if instance.cancel_flag: + status = 'canceled' + instance = self.update_model(pk, status=status, result_traceback=tb, - output_replacements=output_replacements) - self.post_run_hook(instance, **kwargs) - instance.socketio_emit_status(status) + output_replacements=output_replacements, + **extra_update_fields) + self.post_run_hook(instance, status, **kwargs) + instance.websocket_emit_status(status) if status != 'successful' and not hasattr(settings, 'CELERY_UNIT_TEST'): # Raising an exception will mark the job as 'failed' in celery # and will stop a task chain from continuing to execute if status == 'canceled': raise Exception("Task %s(pk:%s) was canceled (rc=%s)" % (str(self.model.__class__), str(pk), str(rc))) else: - raise Exception("Task %s(pk:%s) encountered an error (rc=%s)" % (str(self.model.__class__), str(pk), str(rc))) + raise Exception("Task %s(pk:%s) encountered an error (rc=%s), please see task stdout for details." % + (str(self.model.__class__), str(pk), str(rc))) if not hasattr(settings, 'CELERY_UNIT_TEST'): self.signal_finished(pk) @@ -781,9 +821,9 @@ class RunJob(BaseTask): ''' plugin_dir = self.get_path_to('..', 'plugins', 'callback') plugin_dirs = [plugin_dir] - if hasattr(tower_settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and \ - tower_settings.AWX_ANSIBLE_CALLBACK_PLUGINS: - plugin_dirs.append(tower_settings.AWX_ANSIBLE_CALLBACK_PLUGINS) + if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and \ + settings.AWX_ANSIBLE_CALLBACK_PLUGINS: + plugin_dirs.extend(settings.AWX_ANSIBLE_CALLBACK_PLUGINS) plugin_path = ':'.join(plugin_dirs) env = super(RunJob, self).build_env(job, **kwargs) env = self.add_ansible_venv(env) @@ -791,10 +831,18 @@ class RunJob(BaseTask): # callbacks to work. env['JOB_ID'] = str(job.pk) env['INVENTORY_ID'] = str(job.inventory.pk) + if job.project: + env['PROJECT_REVISION'] = job.project.scm_revision + env['ANSIBLE_RETRY_FILES_ENABLED'] = "False" env['ANSIBLE_CALLBACK_PLUGINS'] = plugin_path + env['ANSIBLE_STDOUT_CALLBACK'] = 'tower_display' env['REST_API_URL'] = settings.INTERNAL_API_URL env['REST_API_TOKEN'] = job.task_auth_token or '' - env['CALLBACK_CONSUMER_PORT'] = str(settings.CALLBACK_CONSUMER_PORT) + env['TOWER_HOST'] = settings.TOWER_URL_BASE + env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA) + env['CALLBACK_QUEUE'] = settings.CALLBACK_QUEUE + env['CALLBACK_CONNECTION'] = settings.BROKER_URL + env['CACHE'] = settings.CACHES['default']['LOCATION'] if 'LOCATION' in settings.CACHES['default'] else '' if getattr(settings, 'JOB_CALLBACK_DEBUG', False): env['JOB_CALLBACK_DEBUG'] = '2' elif settings.DEBUG: @@ -805,7 +853,7 @@ class RunJob(BaseTask): cp_dir = os.path.join(kwargs['private_data_dir'], 'cp') if not os.path.exists(cp_dir): os.mkdir(cp_dir, 0700) - env['ANSIBLE_SSH_CONTROL_PATH'] = os.path.join(cp_dir, 'ansible-ssh-%%h-%%p-%%r') + env['ANSIBLE_SSH_CONTROL_PATH'] = os.path.join(cp_dir, '%%h%%p%%r') # Allow the inventory script to include host variables inline via ['_meta']['hostvars']. env['INVENTORY_HOSTVARS'] = str(True) @@ -851,13 +899,18 @@ class RunJob(BaseTask): env['ANSIBLE_NET_USERNAME'] = network_cred.username env['ANSIBLE_NET_PASSWORD'] = decrypt_field(network_cred, 'password') + ssh_keyfile = kwargs.get('private_data_files', {}).get('network_credential', '') + if ssh_keyfile: + env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile + authorize = network_cred.authorize env['ANSIBLE_NET_AUTHORIZE'] = unicode(int(authorize)) if authorize: - env['ANSIBLE_NET_AUTHORIZE_PASSWORD'] = decrypt_field(network_cred, 'authorize_password') + env['ANSIBLE_NET_AUTH_PASS'] = decrypt_field(network_cred, 'authorize_password') # Set environment variables related to scan jobs if job.job_type == PERM_INVENTORY_SCAN: + env['FACT_QUEUE'] = settings.FACT_QUEUE env['ANSIBLE_LIBRARY'] = self.get_path_to('..', 'plugins', 'library') env['ANSIBLE_CACHE_PLUGINS'] = self.get_path_to('..', 'plugins', 'fact_caching') env['ANSIBLE_CACHE_PLUGIN'] = "tower" @@ -891,27 +944,14 @@ class RunJob(BaseTask): args.extend(['-u', ssh_username]) if 'ssh_password' in kwargs.get('passwords', {}): args.append('--ask-pass') - try: - if Version(kwargs['ansible_version']) < Version('1.9'): - if become_method and become_method == "sudo" and become_username != "": - args.extend(['-U', become_username]) - if become_method and become_method == "sudo" and "become_password" in kwargs.get("passwords", {}): - args.append("--ask-sudo-pass") - if become_method and become_method == "su" and become_username != "": - args.extend(['-R', become_username]) - if become_method and become_method == "su" and "become_password" in kwargs.get("passwords", {}): - args.append("--ask-su-pass") - else: - if job.become_enabled: - args.append('--become') - if become_method: - args.extend(['--become-method', become_method]) - if become_username: - args.extend(['--become-user', become_username]) - if 'become_password' in kwargs.get('passwords', {}): - args.append('--ask-become-pass') - except ValueError: - pass + if job.become_enabled: + args.append('--become') + if become_method: + args.extend(['--become-method', become_method]) + if become_username: + args.extend(['--become-user', become_username]) + if 'become_password' in kwargs.get('passwords', {}): + args.append('--ask-become-pass') # Support prompting for a vault password. if 'vault_password' in kwargs.get('passwords', {}): args.append('--ask-vault-pass') @@ -936,6 +976,10 @@ class RunJob(BaseTask): 'tower_job_id': job.pk, 'tower_job_launch_type': job.launch_type, } + if job.project: + extra_vars.update({ + 'tower_project_revision': job.project.scm_revision, + }) if job.job_template: extra_vars.update({ 'tower_job_template_id': job.job_template.pk, @@ -947,7 +991,7 @@ class RunJob(BaseTask): 'tower_user_name': job.created_by.username, }) if job.extra_vars_dict: - if kwargs.get('display', False) and job.job_template and job.job_template.survey_enabled: + if kwargs.get('display', False) and job.job_template: extra_vars.update(json.loads(job.display_extra_vars())) else: extra_vars.update(job.extra_vars_dict) @@ -978,23 +1022,48 @@ class RunJob(BaseTask): def get_password_prompts(self): d = super(RunJob, self).get_password_prompts() - d[re.compile(r'^Enter passphrase for .*:\s*?$', re.M)] = 'ssh_key_unlock' - d[re.compile(r'^Bad passphrase, try again for .*:\s*?$', re.M)] = '' - d[re.compile(r'^sudo password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^SUDO password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^su password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^SU password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^PBRUN password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^pbrun password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^PFEXEC password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^pfexec password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^RUNAS password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^runas password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^SSH password:\s*?$', re.M)] = 'ssh_password' - d[re.compile(r'^Password:\s*?$', re.M)] = 'ssh_password' - d[re.compile(r'^Vault password:\s*?$', re.M)] = 'vault_password' + d[re.compile(r'Enter passphrase for .*:\s*?$', re.M)] = 'ssh_key_unlock' + d[re.compile(r'Bad passphrase, try again for .*:\s*?$', re.M)] = '' + d[re.compile(r'sudo password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'SUDO password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'su password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'SU password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'PBRUN password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'pbrun password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'PFEXEC password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'pfexec password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'RUNAS password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'runas password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'DZDO password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'dzdo password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'SSH password:\s*?$', re.M)] = 'ssh_password' + d[re.compile(r'Password:\s*?$', re.M)] = 'ssh_password' + d[re.compile(r'Vault password:\s*?$', re.M)] = 'vault_password' return d + def get_stdout_handle(self, instance): + ''' + Wrap stdout file object to capture events. + ''' + stdout_handle = super(RunJob, self).get_stdout_handle(instance) + + if getattr(settings, 'USE_CALLBACK_QUEUE', False): + dispatcher = CallbackQueueDispatcher() + + def job_event_callback(event_data): + event_data.setdefault('job_id', instance.id) + if 'uuid' in event_data: + cache_event = cache.get('ev-{}'.format(event_data['uuid']), None) + if cache_event is not None: + event_data.update(cache_event) + dispatcher.dispatch(event_data) + else: + def job_event_callback(event_data): + event_data.setdefault('job_id', instance.id) + JobEvent.create_from_data(**event_data) + + return OutputEventFilter(stdout_handle, job_event_callback) + def get_ssh_key_path(self, instance, **kwargs): ''' If using an SSH key, return the path for use by ssh-agent. @@ -1002,32 +1071,53 @@ class RunJob(BaseTask): private_data_files = kwargs.get('private_data_files', {}) if 'credential' in private_data_files: return private_data_files.get('credential') - elif 'network_credential' in private_data_files: - return private_data_files.get('network_credential') + ''' + Note: Don't inject network ssh key data into ssh-agent for network + credentials because the ansible modules do not yet support it. + We will want to add back in support when/if Ansible network modules + support this. + ''' + #elif 'network_credential' in private_data_files: + # return private_data_files.get('network_credential') + return '' def should_use_proot(self, instance, **kwargs): ''' Return whether this task should use proot. ''' - return getattr(tower_settings, 'AWX_PROOT_ENABLED', False) + return getattr(settings, 'AWX_PROOT_ENABLED', False) - def post_run_hook(self, job, **kwargs): + def pre_run_hook(self, job, **kwargs): + if job.project and job.project.scm_type: + local_project_sync = job.project.create_project_update(launch_type="sync") + local_project_sync.job_type = 'run' + local_project_sync.save() + # save the associated project update before calling run() so that a + # cancel() call on the job can cancel the project update + job = self.update_model(job.pk, project_update=local_project_sync) + + project_update_task = local_project_sync._get_task_class() + try: + project_update_task().run(local_project_sync.id) + job = self.update_model(job.pk, scm_revision=job.project.scm_revision) + except Exception: + job = self.update_model(job.pk, status='failed', + job_explanation=('Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % + ('project_update', local_project_sync.name, local_project_sync.id))) + raise + + def post_run_hook(self, job, status, **kwargs): ''' Hook for actions to run after job/task has completed. ''' - super(RunJob, self).post_run_hook(job, **kwargs) + super(RunJob, self).post_run_hook(job, status, **kwargs) try: inventory = job.inventory except Inventory.DoesNotExist: pass else: update_inventory_computed_fields.delay(inventory.id, True) - # Update job event fields after job has completed (only when using REST - # API callback). - if not settings.CALLBACK_CONSUMER_PORT: - for job_event in job.job_events.order_by('pk'): - job_event.save(post_process=True) class RunProjectUpdate(BaseTask): @@ -1039,6 +1129,7 @@ class RunProjectUpdate(BaseTask): ''' Return SSH private key data needed for this project update. ''' + handle, self.revision_path = tempfile.mkstemp() private_data = {} if project_update.credential: credential = project_update.credential @@ -1117,14 +1208,19 @@ class RunProjectUpdate(BaseTask): args.append('-v') scm_url, extra_vars = self._build_scm_url_extra_vars(project_update, **kwargs) - scm_branch = project_update.scm_branch or {'hg': 'tip'}.get(project_update.scm_type, 'HEAD') + if project_update.project.scm_revision and project_update.job_type == 'run': + scm_branch = project_update.project.scm_revision + else: + scm_branch = project_update.scm_branch or {'hg': 'tip'}.get(project_update.scm_type, 'HEAD') extra_vars.update({ 'project_path': project_update.get_project_path(check_if_exists=False), 'scm_type': project_update.scm_type, 'scm_url': scm_url, 'scm_branch': scm_branch, 'scm_clean': project_update.scm_clean, - 'scm_delete_on_update': project_update.scm_delete_on_update, + 'scm_delete_on_update': project_update.scm_delete_on_update if project_update.job_type == 'check' else False, + 'scm_full_checkout': True if project_update.job_type == 'run' else False, + 'scm_revision_output': self.revision_path }) args.extend(['-e', json.dumps(extra_vars)]) args.append('project_update.yml') @@ -1179,12 +1275,12 @@ class RunProjectUpdate(BaseTask): def get_password_prompts(self): d = super(RunProjectUpdate, self).get_password_prompts() - d[re.compile(r'^Username for.*:\s*?$', re.M)] = 'scm_username' - d[re.compile(r'^Password for.*:\s*?$', re.M)] = 'scm_password' - d[re.compile(r'^Password:\s*?$', re.M)] = 'scm_password' - d[re.compile(r'^\S+?@\S+?\'s\s+?password:\s*?$', re.M)] = 'scm_password' - d[re.compile(r'^Enter passphrase for .*:\s*?$', re.M)] = 'scm_key_unlock' - d[re.compile(r'^Bad passphrase, try again for .*:\s*?$', re.M)] = '' + d[re.compile(r'Username for.*:\s*?$', re.M)] = 'scm_username' + d[re.compile(r'Password for.*:\s*?$', re.M)] = 'scm_password' + d[re.compile(r'Password:\s*?$', re.M)] = 'scm_password' + d[re.compile(r'\S+?@\S+?\'s\s+?password:\s*?$', re.M)] = 'scm_password' + d[re.compile(r'Enter passphrase for .*:\s*?$', re.M)] = 'scm_key_unlock' + d[re.compile(r'Bad passphrase, try again for .*:\s*?$', re.M)] = '' # FIXME: Configure whether we should auto accept host keys? d[re.compile(r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$', re.M)] = 'yes' return d @@ -1198,6 +1294,32 @@ class RunProjectUpdate(BaseTask): ''' return kwargs.get('private_data_files', {}).get('scm_credential', '') + def get_stdout_handle(self, instance): + stdout_handle = super(RunProjectUpdate, self).get_stdout_handle(instance) + + def raw_callback(data): + instance_actual = ProjectUpdate.objects.get(pk=instance.pk) + instance_actual.result_stdout_text += data + instance_actual.save() + return OutputEventFilter(stdout_handle, raw_callback=raw_callback) + + def post_run_hook(self, instance, status, **kwargs): + if instance.job_type == 'check' and status not in ('failed', 'canceled',): + p = instance.project + fd = open(self.revision_path, 'r') + lines = fd.readlines() + if lines: + p.scm_revision = lines[0].strip() + p.playbook_files = p.playbooks + p.save() + else: + logger.error("Could not find scm revision in check") + try: + os.remove(self.revision_path) + except Exception, e: + logger.error("Failed removing revision tmp file: {}".format(e)) + + class RunInventoryUpdate(BaseTask): name = 'awx.main.tasks.run_inventory_update' @@ -1220,7 +1342,7 @@ class RunInventoryUpdate(BaseTask): project_name=credential.project) if credential.domain not in (None, ''): openstack_auth['domain_name'] = credential.domain - private_state = str(inventory_update.source_vars_dict.get('private', 'true')) + private_state = inventory_update.source_vars_dict.get('private', True) # Retrieve cache path from inventory update vars if available, # otherwise create a temporary cache path only for this update. cache = inventory_update.source_vars_dict.get('cache', {}) @@ -1282,10 +1404,22 @@ class RunInventoryUpdate(BaseTask): 'password')) # Allow custom options to vmware inventory script. elif inventory_update.source == 'vmware': - section = 'defaults' + credential = inventory_update.credential + + section = 'vmware' cp.add_section(section) + cp.set('vmware', 'cache_max_age', 0) + + cp.set('vmware', 'username', credential.username) + cp.set('vmware', 'password', decrypt_field(credential, 'password')) + cp.set('vmware', 'server', credential.host) + vmware_opts = dict(inventory_update.source_vars_dict.items()) - vmware_opts.setdefault('guests_only', 'True') + if inventory_update.instance_filters: + vmware_opts.setdefault('host_filters', inventory_update.instance_filters) + if inventory_update.group_by: + vmware_opts.setdefault('groupby_patterns', inventory_update.groupby_patterns) + for k,v in vmware_opts.items(): cp.set(section, k, unicode(v)) @@ -1306,7 +1440,9 @@ class RunInventoryUpdate(BaseTask): section = 'ansible' cp.add_section(section) - cp.set(section, 'group_patterns', '["{app}-{tier}-{color}", "{app}-{color}", "{app}", "{tier}"]') + cp.set(section, 'group_patterns', os.environ.get('SATELLITE6_GROUP_PATTERNS', [])) + cp.set(section, 'want_facts', True) + cp.set(section, 'group_prefix', os.environ.get('SATELLITE6_GROUP_PREFIX', 'foreman_')) section = 'cache' cp.add_section(section) @@ -1403,10 +1539,7 @@ class RunInventoryUpdate(BaseTask): # complain about not being able to determine its version number. env['PBR_VERSION'] = '0.5.21' elif inventory_update.source == 'vmware': - env['VMWARE_INI'] = cloud_credential - env['VMWARE_HOST'] = passwords.get('source_host', '') - env['VMWARE_USER'] = passwords.get('source_username', '') - env['VMWARE_PASSWORD'] = passwords.get('source_password', '') + env['VMWARE_INI_PATH'] = cloud_credential elif inventory_update.source == 'azure': env['AZURE_SUBSCRIPTION_ID'] = passwords.get('source_username', '') env['AZURE_CERT_PATH'] = cloud_credential @@ -1460,7 +1593,6 @@ class RunInventoryUpdate(BaseTask): if inventory_update.overwrite_vars: args.append('--overwrite-vars') args.append('--source') - # If this is a cloud-based inventory (e.g. from AWS, Rackspace, etc.) # then we need to set some extra flags based on settings in # Tower. @@ -1516,22 +1648,42 @@ class RunInventoryUpdate(BaseTask): os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) args.append(runpath) args.append("--custom") - # try: - # shutil.rmtree(runpath, True) - # except OSError: - # pass + self.custom_dir_path.append(runpath) verbosity = getattr(settings, 'INVENTORY_UPDATE_VERBOSITY', 1) args.append('-v%d' % verbosity) if settings.DEBUG: args.append('--traceback') return args + def get_stdout_handle(self, instance): + stdout_handle = super(RunInventoryUpdate, self).get_stdout_handle(instance) + + def raw_callback(data): + instance_actual = InventoryUpdate.objects.get(pk=instance.pk) + instance_actual.result_stdout_text += data + instance_actual.save() + return OutputEventFilter(stdout_handle, raw_callback=raw_callback) + def build_cwd(self, inventory_update, **kwargs): return self.get_path_to('..', 'plugins', 'inventory') def get_idle_timeout(self): return getattr(settings, 'INVENTORY_UPDATE_IDLE_TIMEOUT', None) + def pre_run_hook(self, instance, **kwargs): + self.custom_dir_path = [] + + def post_run_hook(self, instance, status, **kwargs): + print("In post run hook") + if self.custom_dir_path: + for p in self.custom_dir_path: + try: + shutil.rmtree(p, True) + except OSError: + pass + + + class RunAdHocCommand(BaseTask): ''' Celery task to run an ad hoc command using ansible. @@ -1584,10 +1736,13 @@ class RunAdHocCommand(BaseTask): env['INVENTORY_HOSTVARS'] = str(True) env['ANSIBLE_CALLBACK_PLUGINS'] = plugin_dir env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1' + env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal' # Hardcoded by Ansible for ad-hoc commands (either minimal or oneline). env['REST_API_URL'] = settings.INTERNAL_API_URL env['REST_API_TOKEN'] = ad_hoc_command.task_auth_token or '' - env['CALLBACK_CONSUMER_PORT'] = str(settings.CALLBACK_CONSUMER_PORT) + env['CALLBACK_QUEUE'] = settings.CALLBACK_QUEUE + env['CALLBACK_CONNECTION'] = settings.BROKER_URL env['ANSIBLE_SFTP_BATCH_MODE'] = 'False' + env['CACHE'] = settings.CACHES['default']['LOCATION'] if 'LOCATION' in settings.CACHES['default'] else '' if getattr(settings, 'JOB_CALLBACK_DEBUG', False): env['JOB_CALLBACK_DEBUG'] = '2' elif settings.DEBUG: @@ -1628,27 +1783,14 @@ class RunAdHocCommand(BaseTask): args.append('--ask-pass') # We only specify sudo/su user and password if explicitly given by the # credential. Credential should never specify both sudo and su. - try: - if Version(kwargs['ansible_version']) < Version('1.9'): - if become_method and become_method == "sudo" and become_username != "": - args.extend(['-U', become_username]) - if become_method and become_method == "sudo" and "become_password" in kwargs.get("passwords", {}): - args.append("--ask-sudo-pass") - if become_method and become_method == "su" and become_username != "": - args.extend(['-R', become_username]) - if become_method and become_method == "su" and "become_password" in kwargs.get("passwords", {}): - args.append("--ask-su-pass") - else: - if ad_hoc_command.become_enabled: - args.append('--become') - if become_method: - args.extend(['--become-method', become_method]) - if become_username: - args.extend(['--become-user', become_username]) - if 'become_password' in kwargs.get('passwords', {}): - args.append('--ask-become-pass') - except ValueError: - pass + if ad_hoc_command.become_enabled: + args.append('--become') + if become_method: + args.extend(['--become-method', become_method]) + if become_username: + args.extend(['--become-user', become_username]) + if 'become_password' in kwargs.get('passwords', {}): + args.append('--ask-become-pass') if ad_hoc_command.forks: # FIXME: Max limit? args.append('--forks=%d' % ad_hoc_command.forks) @@ -1676,22 +1818,47 @@ class RunAdHocCommand(BaseTask): def get_password_prompts(self): d = super(RunAdHocCommand, self).get_password_prompts() - d[re.compile(r'^Enter passphrase for .*:\s*?$', re.M)] = 'ssh_key_unlock' - d[re.compile(r'^Bad passphrase, try again for .*:\s*?$', re.M)] = '' - d[re.compile(r'^sudo password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^SUDO password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^su password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^SU password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^PBRUN password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^pbrun password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^PFEXEC password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^pfexec password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^RUNAS password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^runas password.*:\s*?$', re.M)] = 'become_password' - d[re.compile(r'^SSH password:\s*?$', re.M)] = 'ssh_password' - d[re.compile(r'^Password:\s*?$', re.M)] = 'ssh_password' + d[re.compile(r'Enter passphrase for .*:\s*?$', re.M)] = 'ssh_key_unlock' + d[re.compile(r'Bad passphrase, try again for .*:\s*?$', re.M)] = '' + d[re.compile(r'sudo password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'SUDO password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'su password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'SU password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'PBRUN password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'pbrun password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'PFEXEC password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'pfexec password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'RUNAS password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'runas password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'DZDO password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'dzdo password.*:\s*?$', re.M)] = 'become_password' + d[re.compile(r'SSH password:\s*?$', re.M)] = 'ssh_password' + d[re.compile(r'Password:\s*?$', re.M)] = 'ssh_password' return d + def get_stdout_handle(self, instance): + ''' + Wrap stdout file object to capture events. + ''' + stdout_handle = super(RunAdHocCommand, self).get_stdout_handle(instance) + + if getattr(settings, 'USE_CALLBACK_QUEUE', False): + dispatcher = CallbackQueueDispatcher() + + def ad_hoc_command_event_callback(event_data): + event_data.setdefault('ad_hoc_command_id', instance.id) + if 'uuid' in event_data: + cache_event = cache.get('ev-{}'.format(event_data['uuid']), None) + if cache_event is not None: + event_data.update(cache_event) + dispatcher.dispatch(event_data) + else: + def ad_hoc_command_event_callback(event_data): + event_data.setdefault('ad_hoc_command_id', instance.id) + AdHocCommandEvent.create_from_data(**event_data) + + return OutputEventFilter(stdout_handle, ad_hoc_command_event_callback) + def get_ssh_key_path(self, instance, **kwargs): ''' If using an SSH key, return the path for use by ssh-agent. @@ -1702,13 +1869,13 @@ class RunAdHocCommand(BaseTask): ''' Return whether this task should use proot. ''' - return getattr(tower_settings, 'AWX_PROOT_ENABLED', False) + return getattr(settings, 'AWX_PROOT_ENABLED', False) - def post_run_hook(self, ad_hoc_command, **kwargs): + def post_run_hook(self, ad_hoc_command, status, **kwargs): ''' Hook for actions to run after ad hoc command has completed. ''' - super(RunAdHocCommand, self).post_run_hook(ad_hoc_command, **kwargs) + super(RunAdHocCommand, self).post_run_hook(ad_hoc_command, status, **kwargs) class RunSystemJob(BaseTask): @@ -1722,8 +1889,12 @@ class RunSystemJob(BaseTask): json_vars = json.loads(system_job.extra_vars) if 'days' in json_vars and system_job.job_type != 'cleanup_facts': args.extend(['--days', str(json_vars.get('days', 60))]) + if 'dry_run' in json_vars and json_vars['dry_run'] and system_job.job_type != 'cleanup_facts': + args.extend(['--dry-run']) if system_job.job_type == 'cleanup_jobs': - args.extend(['--jobs', '--project-updates', '--inventory-updates', '--management-jobs', '--ad-hoc-commands']) + args.extend(['--jobs', '--project-updates', '--inventory-updates', + '--management-jobs', '--ad-hoc-commands', '--workflow-jobs', + '--notifications']) if system_job.job_type == 'cleanup_facts': if 'older_than' in json_vars: args.extend(['--older_than', str(json_vars['older_than'])]) @@ -1733,6 +1904,15 @@ class RunSystemJob(BaseTask): logger.error("Failed to parse system job: " + str(e)) return args + def get_stdout_handle(self, instance): + stdout_handle = super(RunSystemJob, self).get_stdout_handle(instance) + + def raw_callback(data): + instance_actual = SystemJob.objects.get(pk=instance.pk) + instance_actual.result_stdout_text += data + instance_actual.save() + return OutputEventFilter(stdout_handle, raw_callback=raw_callback) + def build_env(self, instance, **kwargs): env = super(RunSystemJob, self).build_env(instance, **kwargs) diff --git a/awx/main/tests/URI.py b/awx/main/tests/URI.py index d04da03436..54fd1d8b06 100644 --- a/awx/main/tests/URI.py +++ b/awx/main/tests/URI.py @@ -1,6 +1,8 @@ # Helps with test cases. # Save all components of a uri (i.e. scheme, username, password, etc.) so that # when we construct a uri string and decompose it, we can verify the decomposition + + class URI(object): DEFAULTS = { 'scheme' : 'http', diff --git a/awx/main/tests/base.py b/awx/main/tests/base.py index cd3754b23f..ca5f8237d2 100644 --- a/awx/main/tests/base.py +++ b/awx/main/tests/base.py @@ -12,8 +12,6 @@ import sys import tempfile import time import urllib -from multiprocessing import Process -from subprocess import Popen import re import mock @@ -24,16 +22,15 @@ import yaml import django.test from django.conf import settings, UserSettingsHolder from django.contrib.auth.models import User +from django.core.cache import cache from django.test.client import Client from django.test.utils import override_settings from django.utils.encoding import force_text # AWX from awx.main.models import * # noqa -from awx.main.management.commands.run_callback_receiver import CallbackReceiver -from awx.main.management.commands.run_task_system import run_taskmanager +from awx.main.task_engine import TaskEnhancer from awx.main.utils import get_ansible_version -from awx.main.task_engine import TaskEngager as LicenseWriter from awx.sso.backends import LDAPSettings from awx.main.tests.URI import URI # noqa @@ -44,9 +41,10 @@ TEST_PLAYBOOK = '''- hosts: mygroup command: test 1 = 1 ''' + class QueueTestMixin(object): def start_queue(self): - self.start_redis() + self.start_rabbit() receiver = CallbackReceiver() self.queue_process = Process(target=receiver.run_subscriber, args=(False,)) @@ -55,18 +53,20 @@ class QueueTestMixin(object): def terminate_queue(self): if hasattr(self, 'queue_process'): self.queue_process.terminate() - self.stop_redis() + self.stop_rabbit() - def start_redis(self): + def start_rabbit(self): if not getattr(self, 'redis_process', None): # Centos 6.5 redis is runnable by non-root user but is not in a normal users path by default env = dict(os.environ) env['PATH'] = '%s:/usr/sbin/' % env['PATH'] - self.redis_process = Popen('echo "port 16379" | redis-server - > /dev/null', + env['RABBITMQ_NODENAME'] = 'towerunittest' + env['RABBITMQ_NODE_PORT'] = '55672' + self.redis_process = Popen('rabbitmq-server > /dev/null', shell=True, executable='/bin/bash', env=env) - def stop_redis(self): + def stop_rabbit(self): if getattr(self, 'redis_process', None): self.redis_process.kill() self.redis_process = None @@ -84,14 +84,18 @@ class QueueStartStopTestMixin(QueueTestMixin): super(QueueStartStopTestMixin, self).tearDown() self.terminate_queue() + class MockCommonlySlowTestMixin(object): def __init__(self, *args, **kwargs): from awx.api import generics mock.patch.object(generics, 'get_view_description', return_value=None).start() super(MockCommonlySlowTestMixin, self).__init__(*args, **kwargs) + ansible_version = get_ansible_version() -class BaseTestMixin(QueueTestMixin, MockCommonlySlowTestMixin): + + +class BaseTestMixin(MockCommonlySlowTestMixin): ''' Mixin with shared code for use by all test cases. ''' @@ -129,17 +133,9 @@ class BaseTestMixin(QueueTestMixin, MockCommonlySlowTestMixin): # Set flag so that task chain works with unit tests. settings.CELERY_UNIT_TEST = True settings.SYSTEM_UUID='00000000-0000-0000-0000-000000000000' - settings.BROKER_URL='redis://localhost:16379/' + settings.BROKER_URL='redis://localhost:55672/' + settings.CALLBACK_QUEUE = 'callback_tasks_unit' - # Create unique random consumer and queue ports for zeromq callback. - if settings.CALLBACK_CONSUMER_PORT: - callback_port = random.randint(55700, 55799) - settings.CALLBACK_CONSUMER_PORT = 'tcp://127.0.0.1:%d' % callback_port - os.environ['CALLBACK_CONSUMER_PORT'] = settings.CALLBACK_CONSUMER_PORT - callback_queue_path = '/tmp/callback_receiver_test_%d.ipc' % callback_port - self._temp_paths.append(callback_queue_path) - settings.CALLBACK_QUEUE_PORT = 'ipc://%s' % callback_queue_path - settings.TASK_COMMAND_PORT = 'ipc:///tmp/task_command_receiver_%d.ipc' % callback_port # Disable socket notifications for unit tests. settings.SOCKETIO_NOTIFICATION_PORT = None # Make temp job status directory for unit tests. @@ -152,6 +148,7 @@ class BaseTestMixin(QueueTestMixin, MockCommonlySlowTestMixin): 'LOCATION': 'unittests' } } + cache.clear() self._start_time = time.time() def tearDown(self): @@ -181,34 +178,26 @@ class BaseTestMixin(QueueTestMixin, MockCommonlySlowTestMixin): rnd_str = '____' + str(random.randint(1, 9999999)) return __name__ + '-generated-' + string + rnd_str - def create_test_license_file(self, instance_count=10000, license_date=int(time.time() + 3600), features=None): - writer = LicenseWriter( + def create_test_license_file(self, instance_count=10000, license_date=int(time.time() + 3600), features={}): + settings.LICENSE = TaskEnhancer( company_name='AWX', contact_name='AWX Admin', contact_email='awx@example.com', license_date=license_date, instance_count=instance_count, license_type='enterprise', - features=features) - handle, license_path = tempfile.mkstemp(suffix='.json') - os.close(handle) - writer.write_file(license_path) - self._temp_paths.append(license_path) - os.environ['AWX_LICENSE_FILE'] = license_path + features=features, + ).enhance() def create_basic_license_file(self, instance_count=100, license_date=int(time.time() + 3600)): - writer = LicenseWriter( + settings.LICENSE = TaskEnhancer( company_name='AWX', contact_name='AWX Admin', contact_email='awx@example.com', license_date=license_date, instance_count=instance_count, - license_type='basic') - handle, license_path = tempfile.mkstemp(suffix='.json') - os.close(handle) - writer.write_file(license_path) - self._temp_paths.append(license_path) - os.environ['AWX_LICENSE_FILE'] = license_path + license_type='basic', + ).enhance() def create_expired_license_file(self, instance_count=1000, grace_period=False): license_date = time.time() - 1 @@ -392,7 +381,7 @@ class BaseTestMixin(QueueTestMixin, MockCommonlySlowTestMixin): return cred def setup_instances(self): - instance = Instance(uuid=settings.SYSTEM_UUID, primary=True, hostname='127.0.0.1') + instance = Instance(uuid=settings.SYSTEM_UUID, hostname='127.0.0.1') instance.save() def setup_users(self, just_super_user=False): @@ -692,28 +681,19 @@ class BaseTestMixin(QueueTestMixin, MockCommonlySlowTestMixin): job.result_traceback) - def start_taskmanager(self, command_port): - self.start_redis() - self.taskmanager_process = Process(target=run_taskmanager, - args=(command_port,)) - self.taskmanager_process.start() - - def terminate_taskmanager(self): - if hasattr(self, 'taskmanager_process'): - self.taskmanager_process.terminate() - self.stop_redis() - class BaseTest(BaseTestMixin, django.test.TestCase): ''' Base class for unit tests. ''' + class BaseTransactionTest(BaseTestMixin, django.test.TransactionTestCase): ''' Base class for tests requiring transactions (or where the test database needs to be accessed by subprocesses). ''' + @override_settings(CELERY_ALWAYS_EAGER=True, CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, ANSIBLE_TRANSPORT='local') @@ -725,11 +705,12 @@ class BaseLiveServerTest(BaseTestMixin, django.test.LiveServerTestCase): super(BaseLiveServerTest, self).setUp() settings.INTERNAL_API_URL = self.live_server_url + @override_settings(CELERY_ALWAYS_EAGER=True, CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, ANSIBLE_TRANSPORT='local', DEBUG=True) -class BaseJobExecutionTest(QueueStartStopTestMixin, BaseLiveServerTest): +class BaseJobExecutionTest(BaseLiveServerTest): ''' Base class for celery task tests. ''' diff --git a/awx/main/tests/conftest.py b/awx/main/tests/conftest.py index 1f21905fb9..1fdb3d1ca2 100644 --- a/awx/main/tests/conftest.py +++ b/awx/main/tests/conftest.py @@ -1,5 +1,6 @@ # Python +import time import pytest from awx.main.tests.factories import ( @@ -7,24 +8,30 @@ from awx.main.tests.factories import ( create_job_template, create_notification_template, create_survey_spec, + create_workflow_job_template, ) + @pytest.fixture def job_template_factory(): return create_job_template + @pytest.fixture def organization_factory(): return create_organization + @pytest.fixture def notification_template_factory(): return create_notification_template + @pytest.fixture def survey_spec_factory(): return create_survey_spec + @pytest.fixture def job_template_with_survey_passwords_factory(job_template_factory): def rf(persisted): @@ -36,6 +43,35 @@ def job_template_with_survey_passwords_factory(job_template_factory): return objects.job_template return rf + +@pytest.fixture +def job_with_secret_key_unit(job_with_secret_key_factory): + return job_with_secret_key_factory(persisted=False) + + +@pytest.fixture +def workflow_job_template_factory(): + return create_workflow_job_template + + +@pytest.fixture +def get_ssh_version(mocker): + return mocker.patch('awx.main.tasks.get_ssh_version', return_value='OpenSSH_6.9p1, LibreSSL 2.1.8') + + @pytest.fixture def job_template_with_survey_passwords_unit(job_template_with_survey_passwords_factory): return job_template_with_survey_passwords_factory(persisted=False) + + +@pytest.fixture +def enterprise_license(): + from awx.main.task_engine import TaskEnhancer + return TaskEnhancer( + company_name='AWX', + contact_name='AWX Admin', + contact_email='awx@example.com', + license_date=int(time.time() + 3600), + instance_count=10000, + license_type='enterprise', + ).enhance() diff --git a/awx/main/tests/data/ssh.py b/awx/main/tests/data/ssh.py index c2a9a29223..b3f5e8b675 100644 --- a/awx/main/tests/data/ssh.py +++ b/awx/main/tests/data/ssh.py @@ -1,3 +1,31 @@ +TEST_SSH_RSA1_KEY_DATA = '''-----BEGIN PRIVATE KEY----- +uFZFyag7VVqI+q/oGnQu+wj/pMi5ox+Qz5L3W0D745DzwgDXOeObAfNlr9NtIKbn +sZ5E0+rYB4Q/U0CYr5juNJQV1dbxq2Em1160axboe2QbvX6wE6Sm6wW9b9cr+PoF +MoYQebUnCY0ObrLbrRugSfZc17lyxK0ZGRgPXKhpMg6Ecv8XpvhjUYU9Esyqfuco +/p26Q140/HsHeHYNma0dQHCEjMr/qEzOY1qguHj+hRf3SARtM9Q+YNgpxchcDDVS +O+n+8Ljd/p82bpEJwxmpXealeWbI6gB9/R6wcCL+ZyCZpnHJd/NJ809Vtu47ZdDi +E6jvqS/3AQhuQKhJlLSDIzezB2VKKrHwOvHkg/+uLoCqHN34Gk6Qio7x69SvXy88 +a7q9D1l/Zx60o08FyZyqlo7l0l/r8EY+36cuI/lvAvfxc5VHVEOvKseUjFRBiCv9 +MkKNxaScoYsPwY7SIS6gD93tg3eM5pA0nfMfya9u1+uq/QCM1gNG3mm6Zd8YG4c/ +Dx4bmsj8cp5ni/Ffl/sKzKYq1THunJEFGXOZRibdxk/Fal3SQrRAwy7CgLQL8SMh +IWqcFm25OtSOP1r1LE25t5pQsMdmp0IP2fEF0t/pXPm1ZfrTurPMqpo4FGm2hkki +U3sH/o6nrkSOjklOLWlwtTkkL4dWPlNwc8OYj8zFizXJkAfv1spzhv3lRouNkw4N +Mm22W7us2f3Ob0H5C07k26h6VuXX+0AybD4tIIcUXCLoNTqA0HvqhKpEuHu3Ck10 +RaB8xHTxgwdhGVaNHMfy9B9l4tNs3Tb5k0LyeRRGVDhWCFo6axYULYebkj+hFLLY ++JE5RzPDFpTf1xbuT+e56H/lLFCUdDu0bn+D0W4ifXaVFegak4r6O4B53CbMqr+R +t6qDPKLUIuVJXK0J6Ay6XgmheXJGbgKh4OtDsc06gsTCE1nY4f/Z82AQahPBfTtF +J2z+NHdsLPn//HlxspGQtmLpuS7Wx0HYXZ+kPRSiE/vmITw85R2u8JSHQicVNN4C +2rlUo15TIU3tTx+WUIrHKHPidUNNotRb2p9n9FoSidU6upKnQHAT/JNv/zcvaia3 +Bhl/wagheWTDnFKSmJ4HlKxplM/32h6MfHqsMVOl4F6eZWKaKgSgN8doXyFJo+sc +yAC6S0gJlD2gQI24iTI4Du1+UGh2MGb69eChvi5mbbdesaZrlR1dRqZpHG+6ob4H +nYLndRvobXS5l6pgGTDRYoUgSbQe21a7Uf3soGl5jHqLWc1zEPwrxV7Wr31mApr6 +8VtGZcLSr0691Q1NLO3eIfuhbMN2mssX/Sl4t+4BibaucNIMfmhKQi8uHtwAXb47 ++TMFlG2EQhZULFM4fLdF1vaizInU3cBk8lsz8i71tDc+5VQTEwoEB7Gksy/XZWEt +6SGHxXUDtNYa+G2O+sQhgqBjLIkVTV6KJOpvNZM+s8Vzv8qoFnD7isKBBrRvF1bP +GOXEG1jd7nSR0WSwcMCHGOrFEELDQPw3k5jqEdPFgVODoZPr+drZVnVz5SAGBk5Y +wsCNaDW+1dABYFlqRTepP5rrSu9wHnRAZ3ZGv+DHoGqenIC5IBR0sQ== +-----END PRIVATE KEY-----''' + TEST_SSH_KEY_DATA = '''-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAyQ8F5bbgjHvk4SZJsKI9OmJKMFxZqRhvx4LaqjLTKbBwRBsY 1/C00NPiZn70dKbeyV7RNVZxuzM6yd3D3lwTdbDu/eJ0x72t3ch+TdLt/aenyy10 diff --git a/awx/main/tests/factories/__init__.py b/awx/main/tests/factories/__init__.py index 81a1144a52..4c039c63b9 100644 --- a/awx/main/tests/factories/__init__.py +++ b/awx/main/tests/factories/__init__.py @@ -3,6 +3,7 @@ from .tower import ( create_job_template, create_notification_template, create_survey_spec, + create_workflow_job_template, ) from .exc import ( @@ -14,5 +15,6 @@ __all__ = [ 'create_job_template', 'create_notification_template', 'create_survey_spec', + 'create_workflow_job_template', 'NotUnique', ] diff --git a/awx/main/tests/factories/fixtures.py b/awx/main/tests/factories/fixtures.py index feca114410..f7c7054f32 100644 --- a/awx/main/tests/factories/fixtures.py +++ b/awx/main/tests/factories/fixtures.py @@ -13,6 +13,10 @@ from awx.main.models import ( Credential, Inventory, Label, + WorkflowJobTemplate, + WorkflowJob, + WorkflowJobNode, + WorkflowJobTemplateNode, ) # mk methods should create only a single object of a single type. @@ -21,11 +25,12 @@ from awx.main.models import ( # persisted=False # + def mk_instance(persisted=True): if not persisted: raise RuntimeError('creating an Instance requires persisted=True') from django.conf import settings - return Instance.objects.get_or_create(uuid=settings.SYSTEM_UUID, primary=True, hostname="instance.example.org") + return Instance.objects.get_or_create(uuid=settings.SYSTEM_UUID, hostname="instance.example.org") def mk_organization(name, description=None, persisted=True): @@ -70,7 +75,8 @@ def mk_user(name, is_superuser=False, organization=None, team=None, persisted=Tr def mk_project(name, organization=None, description=None, persisted=True): description = description or '{}-description'.format(name) - project = Project(name=name, description=description) + project = Project(name=name, description=description, + playbook_files=['helloworld.yml', 'alt-helloworld.yml']) if organization is not None: project.organization = organization if persisted: @@ -130,7 +136,7 @@ def mk_job_template(name, job_type='run', extra_vars = json.dumps(extra_vars) jt = JobTemplate(name=name, job_type=job_type, extra_vars=extra_vars, - playbook='mocked') + playbook='helloworld.yml') jt.inventory = inventory if jt.inventory is None: @@ -152,3 +158,64 @@ def mk_job_template(name, job_type='run', if persisted: jt.save() return jt + + +def mk_workflow_job(status='new', workflow_job_template=None, extra_vars={}, + persisted=True): + job = WorkflowJob(status=status, extra_vars=json.dumps(extra_vars)) + + job.workflow_job_template = workflow_job_template + + if persisted: + job.save() + return job + + +def mk_workflow_job_template(name, extra_vars='', spec=None, organization=None, persisted=True): + if extra_vars: + extra_vars = json.dumps(extra_vars) + + wfjt = WorkflowJobTemplate(name=name, extra_vars=extra_vars, organization=organization) + + wfjt.survey_spec = spec + if wfjt.survey_spec: + wfjt.survey_enabled = True + + if persisted: + wfjt.save() + return wfjt + + +def mk_workflow_job_template_node(workflow_job_template=None, + unified_job_template=None, + success_nodes=None, + failure_nodes=None, + always_nodes=None, + persisted=True): + workflow_node = WorkflowJobTemplateNode(workflow_job_template=workflow_job_template, + unified_job_template=unified_job_template, + success_nodes=success_nodes, + failure_nodes=failure_nodes, + always_nodes=always_nodes) + if persisted: + workflow_node.save() + return workflow_node + + +def mk_workflow_job_node(unified_job_template=None, + success_nodes=None, + failure_nodes=None, + always_nodes=None, + workflow_job=None, + job=None, + persisted=True): + workflow_node = WorkflowJobNode(unified_job_template=unified_job_template, + success_nodes=success_nodes, + failure_nodes=failure_nodes, + always_nodes=always_nodes, + workflow_job=workflow_job, + job=job) + if persisted: + workflow_node.save() + return workflow_node + diff --git a/awx/main/tests/factories/objects.py b/awx/main/tests/factories/objects.py index 9f739cc9cf..7de49d998c 100644 --- a/awx/main/tests/factories/objects.py +++ b/awx/main/tests/factories/objects.py @@ -2,6 +2,7 @@ from collections import namedtuple from .exc import NotUnique + def generate_objects(artifacts, kwargs): '''generate_objects takes a list of artifacts that are supported by a create function and compares it to the kwargs passed in to the create diff --git a/awx/main/tests/factories/tower.py b/awx/main/tests/factories/tower.py index 8116ec83bf..975adde43b 100644 --- a/awx/main/tests/factories/tower.py +++ b/awx/main/tests/factories/tower.py @@ -9,6 +9,7 @@ from awx.main.models import ( Inventory, Job, Label, + WorkflowJobTemplateNode, ) from .objects import ( @@ -28,6 +29,7 @@ from .fixtures import ( mk_project, mk_label, mk_notification_template, + mk_workflow_job_template, ) @@ -59,7 +61,7 @@ def apply_roles(roles, objects, persisted): return None if not persisted: - raise RuntimeError('roles can not be used when persisted=False') + raise RuntimeError('roles cannot be used when persisted=False') for role in roles: obj_role, sep, member_role = role.partition(':') @@ -85,6 +87,7 @@ def apply_roles(roles, objects, persisted): else: raise RuntimeError('unable to add non-user {} for members list of {}'.format(member_str, obj_str)) + def generate_users(organization, teams, superuser, persisted, **kwargs): '''generate_users evaluates a mixed list of User objects and strings. If a string is encountered a user with that username is created and added to the lookup dict. @@ -110,6 +113,7 @@ def generate_users(organization, teams, superuser, persisted, **kwargs): users[p1] = mk_user(p1, organization=organization, team=None, is_superuser=superuser, persisted=persisted) return users + def generate_teams(organization, persisted, **kwargs): '''generate_teams evalutes a mixed list of Team objects and strings. If a string is encountered a team with that string name is created and added to the lookup dict. @@ -124,6 +128,7 @@ def generate_teams(organization, persisted, **kwargs): teams[t] = mk_team(t, organization=organization, persisted=persisted) return teams + def create_survey_spec(variables=None, default_type='integer', required=True): ''' Returns a valid survey spec for a job template, based on the input @@ -174,6 +179,7 @@ def create_survey_spec(variables=None, default_type='integer', required=True): # or encapsulated by specific factory fixtures in a conftest # + def create_job_template(name, roles=None, persisted=True, **kwargs): Objects = generate_objects(["job_template", "jobs", "organization", @@ -258,6 +264,7 @@ def create_job_template(name, roles=None, persisted=True, **kwargs): organization=org, survey=spec,) + def create_organization(name, roles=None, persisted=True, **kwargs): Objects = generate_objects(["organization", "teams", "users", @@ -317,6 +324,7 @@ def create_organization(name, roles=None, persisted=True, **kwargs): notification_templates=_Mapped(notification_templates), inventories=_Mapped(inventories)) + def create_notification_template(name, roles=None, persisted=True, **kwargs): Objects = generate_objects(["notification_template", "organization", @@ -343,3 +351,73 @@ def create_notification_template(name, roles=None, persisted=True, **kwargs): users=_Mapped(users), superusers=_Mapped(superusers), teams=teams) + + +def generate_workflow_job_template_nodes(workflow_job_template, + persisted, + **kwargs): + + workflow_job_template_nodes = kwargs.get('workflow_job_template_nodes', []) + if len(workflow_job_template_nodes) > 0 and not persisted: + raise RuntimeError('workflow job template nodes cannot be used when persisted=False') + + new_nodes = [] + + for i, node in enumerate(workflow_job_template_nodes): + new_node = WorkflowJobTemplateNode(workflow_job_template=workflow_job_template, + unified_job_template=node['unified_job_template'], + id=i) + if persisted: + new_node.save() + new_nodes.append(new_node) + + node_types = ['success_nodes', 'failure_nodes', 'always_nodes'] + for node_type in node_types: + for i, new_node in enumerate(new_nodes): + if node_type not in workflow_job_template_nodes[i]: + continue + for related_index in workflow_job_template_nodes[i][node_type]: + getattr(new_node, node_type).add(new_nodes[related_index]) + + +# TODO: Implement survey and jobs +def create_workflow_job_template(name, organization=None, persisted=True, **kwargs): + Objects = generate_objects(["workflow_job_template", + "workflow_job_template_nodes", + "survey",], kwargs) + + spec = None + #jobs = None + + extra_vars = kwargs.get('extra_vars', '') + + if 'survey' in kwargs: + spec = create_survey_spec(kwargs['survey']) + + wfjt = mk_workflow_job_template(name, + organization=organization, + spec=spec, + extra_vars=extra_vars, + persisted=persisted) + + + + workflow_jt_nodes = generate_workflow_job_template_nodes(wfjt, + persisted, + workflow_job_template_nodes=kwargs.get('workflow_job_template_nodes', [])) + + ''' + if 'jobs' in kwargs: + for i in kwargs['jobs']: + if type(i) is Job: + jobs[i.pk] = i + else: + # TODO: Create the job + raise RuntimeError("Currently, only already created jobs are supported") + ''' + return Objects(workflow_job_template=wfjt, + #jobs=jobs, + workflow_job_template_nodes=workflow_jt_nodes, + survey=spec,) + + diff --git a/awx/ui/client/tests/multi-select-list/multi-select-list.controller-test.js b/awx/main/tests/functional/__init__.py similarity index 100% rename from awx/ui/client/tests/multi-select-list/multi-select-list.controller-test.js rename to awx/main/tests/functional/__init__.py diff --git a/awx/main/tests/functional/api/test_activity_streams.py b/awx/main/tests/functional/api/test_activity_streams.py index f1c42cdd9d..c368b5f631 100644 --- a/awx/main/tests/functional/api/test_activity_streams.py +++ b/awx/main/tests/functional/api/test_activity_streams.py @@ -6,28 +6,31 @@ from awx.main.models.activity_stream import ActivityStream from awx.main.access import ActivityStreamAccess from django.core.urlresolvers import reverse -from django.conf import settings -def mock_feature_enabled(feature, bypass_database=None): + +def mock_feature_enabled(feature): return True + @pytest.fixture def activity_stream_entry(organization, org_admin): return ActivityStream.objects.filter(organization__pk=organization.pk, user=org_admin, operation='associate').first() -@pytest.mark.skipif(not getattr(settings, 'ACTIVITY_STREAM_ENABLED', True), reason="Activity stream not enabled") + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_get_activity_stream_list(monkeypatch, organization, get, user): +def test_get_activity_stream_list(monkeypatch, organization, get, user, settings): + settings.ACTIVITY_STREAM_ENABLED = True url = reverse('api:activity_stream_list') response = get(url, user('admin', True)) assert response.status_code == 200 -@pytest.mark.skipif(not getattr(settings, 'ACTIVITY_STREAM_ENABLED', True), reason="Activity stream not enabled") + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_basic_fields(monkeypatch, organization, get, user): +def test_basic_fields(monkeypatch, organization, get, user, settings): + settings.ACTIVITY_STREAM_ENABLED = True u = user('admin', True) activity_stream = ActivityStream.objects.filter(organization=organization).latest('pk') activity_stream.actor = u @@ -44,10 +47,11 @@ def test_basic_fields(monkeypatch, organization, get, user): assert 'organization' in response.data['summary_fields'] assert response.data['summary_fields']['organization'][0]['name'] == 'test-org' -@pytest.mark.skipif(not getattr(settings, 'ACTIVITY_STREAM_ENABLED', True), reason="Activity stream not enabled") + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_middleware_actor_added(monkeypatch, post, get, user): +def test_middleware_actor_added(monkeypatch, post, get, user, settings): + settings.ACTIVITY_STREAM_ENABLED = True u = user('admin-poster', True) url = reverse('api:organization_list') @@ -66,45 +70,47 @@ def test_middleware_actor_added(monkeypatch, post, get, user): assert response.status_code == 200 assert response.data['summary_fields']['actor']['username'] == 'admin-poster' -@pytest.mark.skipif(not getattr(settings, 'ACTIVITY_STREAM_ENABLED', True), reason="Activity stream not enabled") + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_rbac_stream_resource_roles(activity_stream_entry, organization, org_admin): - +def test_rbac_stream_resource_roles(activity_stream_entry, organization, org_admin, settings): + settings.ACTIVITY_STREAM_ENABLED = True assert activity_stream_entry.user.first() == org_admin assert activity_stream_entry.organization.first() == organization assert activity_stream_entry.role.first() == organization.admin_role assert activity_stream_entry.object_relationship_type == 'awx.main.models.organization.Organization.admin_role' -@pytest.mark.skipif(not getattr(settings, 'ACTIVITY_STREAM_ENABLED', True), reason="Activity stream not enabled") + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_rbac_stream_user_roles(activity_stream_entry, organization, org_admin): - +def test_rbac_stream_user_roles(activity_stream_entry, organization, org_admin, settings): + settings.ACTIVITY_STREAM_ENABLED = True assert activity_stream_entry.user.first() == org_admin assert activity_stream_entry.organization.first() == organization assert activity_stream_entry.role.first() == organization.admin_role assert activity_stream_entry.object_relationship_type == 'awx.main.models.organization.Organization.admin_role' + @pytest.mark.django_db @pytest.mark.activity_stream_access -@pytest.mark.skipif(not getattr(settings, 'ACTIVITY_STREAM_ENABLED', True), reason="Activity stream not enabled") @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) -def test_stream_access_cant_change(activity_stream_entry, organization, org_admin): +def test_stream_access_cant_change(activity_stream_entry, organization, org_admin, settings): + settings.ACTIVITY_STREAM_ENABLED = True access = ActivityStreamAccess(org_admin) - # These should always return false because the activity stream can not be edited + # These should always return false because the activity stream cannot be edited assert not access.can_add(activity_stream_entry) assert not access.can_change(activity_stream_entry, {'organization': None}) assert not access.can_delete(activity_stream_entry) + @pytest.mark.django_db @pytest.mark.activity_stream_access -@pytest.mark.skipif(not getattr(settings, 'ACTIVITY_STREAM_ENABLED', True), reason="Activity stream not enabled") @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) def test_stream_queryset_hides_shows_items( activity_stream_entry, organization, user, org_admin, project, org_credential, inventory, label, deploy_jobtemplate, - notification_template, group, host, team): + notification_template, group, host, team, settings): + settings.ACTIVITY_STREAM_ENABLED = True # this user is not in any organizations and should not see any resource activity no_access_user = user('no-access-user', False) queryset = ActivityStreamAccess(no_access_user).get_queryset() @@ -132,6 +138,7 @@ def test_stream_queryset_hides_shows_items( assert queryset.filter(team__pk=team.pk, operation='create').count() == 1 assert queryset.filter(notification_template__pk=notification_template.pk, operation='create').count() == 1 + @pytest.mark.django_db @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) def test_stream_user_direct_role_updates(get, post, organization_factory): diff --git a/awx/main/tests/functional/api/test_adhoc.py b/awx/main/tests/functional/api/test_adhoc.py index e7029b0c79..02e30afb12 100644 --- a/awx/main/tests/functional/api/test_adhoc.py +++ b/awx/main/tests/functional/api/test_adhoc.py @@ -4,7 +4,6 @@ import pytest from django.core.urlresolvers import reverse - """ def run_test_ad_hoc_command(self, **kwargs): # Post to list to start a new ad hoc command. @@ -23,6 +22,7 @@ from django.core.urlresolvers import reverse return self.post(url, data, expect=expect) """ + @pytest.fixture def post_adhoc(post, inventory, machine_credential): def f(url, data, user, expect=201): @@ -46,7 +46,6 @@ def post_adhoc(post, inventory, machine_credential): return f - @pytest.mark.django_db def test_admin_post_ad_hoc_command_list(admin, post_adhoc, inventory, machine_credential): res = post_adhoc(reverse('api:ad_hoc_command_list'), {}, admin, expect=201) @@ -65,35 +64,42 @@ def test_admin_post_ad_hoc_command_list(admin, post_adhoc, inventory, machine_cr def test_empty_post_403(admin, post): post(reverse('api:ad_hoc_command_list'), {}, admin, expect=400) + @pytest.mark.django_db def test_empty_put_405(admin, put): put(reverse('api:ad_hoc_command_list'), {}, admin, expect=405) + @pytest.mark.django_db def test_empty_patch_405(admin, patch): patch(reverse('api:ad_hoc_command_list'), {}, admin, expect=405) + @pytest.mark.django_db def test_empty_delete_405(admin, delete): delete(reverse('api:ad_hoc_command_list'), admin, expect=405) + @pytest.mark.django_db def test_user_post_ad_hoc_command_list(alice, post_adhoc, inventory, machine_credential): inventory.adhoc_role.members.add(alice) machine_credential.use_role.members.add(alice) post_adhoc(reverse('api:ad_hoc_command_list'), {}, alice, expect=201) + @pytest.mark.django_db def test_user_post_ad_hoc_command_list_xfail(alice, post_adhoc, inventory, machine_credential): inventory.read_role.members.add(alice) # just read access? no dice. machine_credential.use_role.members.add(alice) post_adhoc(reverse('api:ad_hoc_command_list'), {}, alice, expect=403) + @pytest.mark.django_db def test_user_post_ad_hoc_command_list_without_creds(alice, post_adhoc, inventory, machine_credential): inventory.adhoc_role.members.add(alice) post_adhoc(reverse('api:ad_hoc_command_list'), {}, alice, expect=403) + @pytest.mark.django_db def test_user_post_ad_hoc_command_list_without_inventory(alice, post_adhoc, inventory, machine_credential): machine_credential.use_role.members.add(alice) @@ -134,15 +140,17 @@ def test_get_inventory_ad_hoc_command_list(admin, alice, post_adhoc, get, invent def test_bad_data1(admin, post_adhoc): post_adhoc(reverse('api:ad_hoc_command_list'), {'module_name': 'command', 'module_args': None}, admin, expect=400) + @pytest.mark.django_db def test_bad_data2(admin, post_adhoc): post_adhoc(reverse('api:ad_hoc_command_list'), {'job_type': 'baddata'}, admin, expect=400) + @pytest.mark.django_db def test_bad_data3(admin, post_adhoc): post_adhoc(reverse('api:ad_hoc_command_list'), {'verbosity': -1}, admin, expect=400) + @pytest.mark.django_db def test_bad_data4(admin, post_adhoc): post_adhoc(reverse('api:ad_hoc_command_list'), {'forks': -1}, admin, expect=400) - diff --git a/awx/main/tests/functional/api/test_create_attach_views.py b/awx/main/tests/functional/api/test_create_attach_views.py index 5399356a21..b80cb4fa2c 100644 --- a/awx/main/tests/functional/api/test_create_attach_views.py +++ b/awx/main/tests/functional/api/test_create_attach_views.py @@ -16,6 +16,7 @@ def test_user_role_view_access(rando, inventory, mocker, post): inventory.admin_role, rando, 'members', data, skip_sub_obj_read_check=False) + @pytest.mark.django_db def test_team_role_view_access(rando, team, inventory, mocker, post): "Assure correct access method is called when assigning teams new roles" @@ -30,6 +31,7 @@ def test_team_role_view_access(rando, team, inventory, mocker, post): inventory.admin_role, team, 'member_role.parents', data, skip_sub_obj_read_check=False) + @pytest.mark.django_db def test_role_team_view_access(rando, team, inventory, mocker, post): """Assure that /role/N/teams/ enforces the same permission restrictions @@ -43,3 +45,18 @@ def test_role_team_view_access(rando, team, inventory, mocker, post): mock_access.assert_called_once_with( inventory.admin_role, team, 'member_role.parents', data, skip_sub_obj_read_check=False) + + +@pytest.mark.django_db +def test_org_associate_with_junk_data(rando, admin_user, organization, post): + """ + Assure that post-hoc enforcement of auditor role + will turn off if the action is an association + """ + user_data = {'is_system_auditor': True, 'id': rando.pk} + post(url=reverse('api:organization_users_list', args=(organization.pk,)), + data=user_data, expect=204, user=admin_user) + # assure user is now an org member + assert rando in organization.member_role + # assure that this did not also make them a system auditor + assert not rando.is_system_auditor diff --git a/awx/main/tests/functional/api/test_credential.py b/awx/main/tests/functional/api/test_credential.py index f1e7a2b1dd..8f596cdac9 100644 --- a/awx/main/tests/functional/api/test_credential.py +++ b/awx/main/tests/functional/api/test_credential.py @@ -8,6 +8,7 @@ from django.core.urlresolvers import reverse # user credential creation # + @pytest.mark.django_db def test_create_user_credential_via_credentials_list(post, get, alice): response = post(reverse('api:credential_list'), { @@ -21,6 +22,7 @@ def test_create_user_credential_via_credentials_list(post, get, alice): assert response.status_code == 200 assert response.data['count'] == 1 + @pytest.mark.django_db def test_credential_validation_error_with_bad_user(post, admin): response = post(reverse('api:credential_list'), { @@ -31,6 +33,7 @@ def test_credential_validation_error_with_bad_user(post, admin): assert response.status_code == 400 assert response.data['user'][0] == 'Incorrect type. Expected pk value, received unicode.' + @pytest.mark.django_db def test_create_user_credential_via_user_credentials_list(post, get, alice): response = post(reverse('api:user_credentials_list', args=(alice.pk,)), { @@ -44,6 +47,7 @@ def test_create_user_credential_via_user_credentials_list(post, get, alice): assert response.status_code == 200 assert response.data['count'] == 1 + @pytest.mark.django_db def test_create_user_credential_via_credentials_list_xfail(post, alice, bob): response = post(reverse('api:credential_list'), { @@ -53,6 +57,7 @@ def test_create_user_credential_via_credentials_list_xfail(post, alice, bob): }, alice) assert response.status_code == 403 + @pytest.mark.django_db def test_create_user_credential_via_user_credentials_list_xfail(post, alice, bob): response = post(reverse('api:user_credentials_list', args=(bob.pk,)), { @@ -67,6 +72,7 @@ def test_create_user_credential_via_user_credentials_list_xfail(post, alice, bob # team credential creation # + @pytest.mark.django_db def test_create_team_credential(post, get, team, organization, org_admin, team_member): response = post(reverse('api:credential_list'), { @@ -83,6 +89,7 @@ def test_create_team_credential(post, get, team, organization, org_admin, team_m # Assure that credential's organization is implictly set to team's org assert response.data['results'][0]['summary_fields']['organization']['id'] == team.organization.id + @pytest.mark.django_db def test_create_team_credential_via_team_credentials_list(post, get, team, org_admin, team_member): response = post(reverse('api:team_credentials_list', args=(team.pk,)), { @@ -96,6 +103,7 @@ def test_create_team_credential_via_team_credentials_list(post, get, team, org_a assert response.status_code == 200 assert response.data['count'] == 1 + @pytest.mark.django_db def test_create_team_credential_by_urelated_user_xfail(post, team, organization, alice, team_member): response = post(reverse('api:credential_list'), { @@ -106,6 +114,7 @@ def test_create_team_credential_by_urelated_user_xfail(post, team, organization, }, alice) assert response.status_code == 403 + @pytest.mark.django_db def test_create_team_credential_by_team_member_xfail(post, team, organization, alice, team_member): # Members can't add credentials, only org admins.. for now? @@ -122,6 +131,7 @@ def test_create_team_credential_by_team_member_xfail(post, team, organization, a # Permission granting # + @pytest.mark.django_db def test_grant_org_credential_to_org_user_through_role_users(post, credential, organization, org_admin, org_member): credential.organization = organization @@ -131,6 +141,7 @@ def test_grant_org_credential_to_org_user_through_role_users(post, credential, o }, org_admin) assert response.status_code == 204 + @pytest.mark.django_db def test_grant_org_credential_to_org_user_through_user_roles(post, credential, organization, org_admin, org_member): credential.organization = organization @@ -140,6 +151,7 @@ def test_grant_org_credential_to_org_user_through_user_roles(post, credential, o }, org_admin) assert response.status_code == 204 + @pytest.mark.django_db def test_grant_org_credential_to_non_org_user_through_role_users(post, credential, organization, org_admin, alice): credential.organization = organization @@ -149,6 +161,7 @@ def test_grant_org_credential_to_non_org_user_through_role_users(post, credentia }, org_admin) assert response.status_code == 400 + @pytest.mark.django_db def test_grant_org_credential_to_non_org_user_through_user_roles(post, credential, organization, org_admin, alice): credential.organization = organization @@ -158,6 +171,7 @@ def test_grant_org_credential_to_non_org_user_through_user_roles(post, credentia }, org_admin) assert response.status_code == 400 + @pytest.mark.django_db def test_grant_private_credential_to_user_through_role_users(post, credential, alice, bob): # normal users can't do this @@ -167,6 +181,7 @@ def test_grant_private_credential_to_user_through_role_users(post, credential, a }, alice) assert response.status_code == 400 + @pytest.mark.django_db def test_grant_private_credential_to_org_user_through_role_users(post, credential, org_admin, org_member): # org admins can't either @@ -176,6 +191,7 @@ def test_grant_private_credential_to_org_user_through_role_users(post, credentia }, org_admin) assert response.status_code == 400 + @pytest.mark.django_db def test_sa_grant_private_credential_to_user_through_role_users(post, credential, admin, bob): # but system admins can @@ -184,6 +200,7 @@ def test_sa_grant_private_credential_to_user_through_role_users(post, credential }, admin) assert response.status_code == 204 + @pytest.mark.django_db def test_grant_private_credential_to_user_through_user_roles(post, credential, alice, bob): # normal users can't do this @@ -193,6 +210,7 @@ def test_grant_private_credential_to_user_through_user_roles(post, credential, a }, alice) assert response.status_code == 400 + @pytest.mark.django_db def test_grant_private_credential_to_org_user_through_user_roles(post, credential, org_admin, org_member): # org admins can't either @@ -202,6 +220,7 @@ def test_grant_private_credential_to_org_user_through_user_roles(post, credentia }, org_admin) assert response.status_code == 400 + @pytest.mark.django_db def test_sa_grant_private_credential_to_user_through_user_roles(post, credential, admin, bob): # but system admins can @@ -210,6 +229,7 @@ def test_sa_grant_private_credential_to_user_through_user_roles(post, credential }, admin) assert response.status_code == 204 + @pytest.mark.django_db def test_grant_org_credential_to_team_through_role_teams(post, credential, organization, org_admin, org_auditor, team): assert org_auditor not in credential.read_role @@ -221,6 +241,7 @@ def test_grant_org_credential_to_team_through_role_teams(post, credential, organ assert response.status_code == 204 assert org_auditor in credential.read_role + @pytest.mark.django_db def test_grant_org_credential_to_team_through_team_roles(post, credential, organization, org_admin, org_auditor, team): assert org_auditor not in credential.read_role @@ -232,6 +253,7 @@ def test_grant_org_credential_to_team_through_team_roles(post, credential, organ assert response.status_code == 204 assert org_auditor in credential.read_role + @pytest.mark.django_db def test_sa_grant_private_credential_to_team_through_role_teams(post, credential, admin, team): # not even a system admin can grant a private cred to a team though @@ -240,6 +262,7 @@ def test_sa_grant_private_credential_to_team_through_role_teams(post, credential }, admin) assert response.status_code == 400 + @pytest.mark.django_db def test_sa_grant_private_credential_to_team_through_team_roles(post, credential, admin, team): # not even a system admin can grant a private cred to a team though @@ -249,12 +272,11 @@ def test_sa_grant_private_credential_to_team_through_team_roles(post, credential assert response.status_code == 400 - - # # organization credentials # + @pytest.mark.django_db def test_create_org_credential_as_not_admin(post, organization, org_member): response = post(reverse('api:credential_list'), { @@ -264,6 +286,7 @@ def test_create_org_credential_as_not_admin(post, organization, org_member): }, org_member) assert response.status_code == 403 + @pytest.mark.django_db def test_create_org_credential_as_admin(post, organization, org_admin): response = post(reverse('api:credential_list'), { @@ -273,6 +296,7 @@ def test_create_org_credential_as_admin(post, organization, org_admin): }, org_admin) assert response.status_code == 201 + @pytest.mark.django_db def test_credential_detail(post, get, organization, org_admin): response = post(reverse('api:credential_list'), { @@ -288,6 +312,7 @@ def test_credential_detail(post, get, organization, org_admin): related_fields = response.data['related'] assert 'organization' in related_fields + @pytest.mark.django_db def test_list_created_org_credentials(post, get, organization, org_admin, org_member): response = post(reverse('api:credential_list'), { @@ -314,42 +339,11 @@ def test_list_created_org_credentials(post, get, organization, org_admin, org_me assert response.data['count'] == 0 -@pytest.mark.django_db -def test_cant_change_organization(patch, credential, organization, org_admin): - credential.organization = organization - credential.save() - - response = patch(reverse('api:credential_detail', args=(organization.id,)), { - 'name': 'Some new name', - }, org_admin) - assert response.status_code == 200 - - response = patch(reverse('api:credential_detail', args=(organization.id,)), { - 'name': 'Some new name2', - 'organization': organization.id, # fine for it to be the same - }, org_admin) - assert response.status_code == 200 - - response = patch(reverse('api:credential_detail', args=(organization.id,)), { - 'name': 'Some new name3', - 'organization': None - }, org_admin) - assert response.status_code == 403 - -@pytest.mark.django_db -def test_cant_add_organization(patch, credential, organization, org_admin): - assert credential.organization is None - response = patch(reverse('api:credential_detail', args=(organization.id,)), { - 'name': 'Some new name', - 'organization': organization.id - }, org_admin) - assert response.status_code == 403 - - # # Openstack Credentials # + @pytest.mark.django_db def test_openstack_create_ok(post, organization, admin): data = { @@ -364,6 +358,7 @@ def test_openstack_create_ok(post, organization, admin): response = post(reverse('api:credential_list'), data, admin) assert response.status_code == 201 + @pytest.mark.django_db def test_openstack_create_fail_required_fields(post, organization, admin): data = { @@ -383,6 +378,7 @@ def test_openstack_create_fail_required_fields(post, organization, admin): # misc xfail conditions # + @pytest.mark.django_db def test_create_credential_missing_user_team_org_xfail(post, admin): # Must specify one of user, team, or organization @@ -391,4 +387,3 @@ def test_create_credential_missing_user_team_org_xfail(post, admin): 'username': 'someusername', }, admin) assert response.status_code == 400 - diff --git a/awx/main/tests/functional/api/test_fact_versions.py b/awx/main/tests/functional/api/test_fact_versions.py index fa42802077..b0b861525d 100644 --- a/awx/main/tests/functional/api/test_fact_versions.py +++ b/awx/main/tests/functional/api/test_fact_versions.py @@ -13,12 +13,15 @@ from awx.main.utils import timestamp_apiformat from django.core.urlresolvers import reverse from django.utils import timezone -def mock_feature_enabled(feature, bypass_database=None): + +def mock_feature_enabled(feature): return True -def mock_feature_disabled(feature, bypass_database=None): + +def mock_feature_disabled(feature): return False + def setup_common(hosts, fact_scans, get, user, epoch=timezone.now(), get_params={}, host_count=1): hosts = hosts(host_count=host_count) fact_scans(fact_scans=3, timestamp_epoch=epoch) @@ -28,6 +31,7 @@ def setup_common(hosts, fact_scans, get, user, epoch=timezone.now(), get_params= return (hosts[0], response) + def check_url(url1_full, fact_known, module): url1_split = urlparse.urlsplit(url1_full) url1 = url1_split.path @@ -37,7 +41,11 @@ def check_url(url1_full, fact_known, module): url2_params = [('module', module), ('datetime', timestamp_apiformat(fact_known.timestamp))] assert url1 == url2 - assert urllib.urlencode(url1_params) == urllib.urlencode(url2_params) + # Sort before comparing because urlencode can't be trusted + url1_params_sorted = sorted(url1_params, key=lambda val: val[0]) + url2_params_sorted = sorted(url2_params, key=lambda val: val[0]) + assert urllib.urlencode(url1_params_sorted) == urllib.urlencode(url2_params_sorted) + def check_response_facts(facts_known, response): for i, fact_known in enumerate(facts_known): @@ -45,10 +53,12 @@ def check_response_facts(facts_known, response): assert timestamp_apiformat(fact_known.timestamp) == response.data['results'][i]['timestamp'] check_url(response.data['results'][i]['related']['fact_view'], fact_known, fact_known.module) + def check_system_tracking_feature_forbidden(response): assert 402 == response.status_code assert 'Your license does not permit use of system tracking.' == response.data['detail'] + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_disabled) @pytest.mark.django_db @pytest.mark.license_feature @@ -59,6 +69,7 @@ def test_system_tracking_license_get(hosts, get, user): check_system_tracking_feature_forbidden(response) + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_disabled) @pytest.mark.django_db @pytest.mark.license_feature @@ -69,6 +80,7 @@ def test_system_tracking_license_options(hosts, options, user): check_system_tracking_feature_forbidden(response) + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db @pytest.mark.license_feature @@ -82,9 +94,10 @@ def test_no_facts_db(hosts, get, user): } assert response_expected == response.data + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_basic_fields(hosts, fact_scans, get, user): +def test_basic_fields(hosts, fact_scans, get, user, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() search = { 'from': epoch, @@ -98,10 +111,11 @@ def test_basic_fields(hosts, fact_scans, get, user): assert 'timestamp' in results[0] assert 'module' in results[0] + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db @pytest.mark.license_feature -def test_basic_options_fields(hosts, fact_scans, options, user): +def test_basic_options_fields(hosts, fact_scans, options, user, monkeypatch_jsonbfield_get_db_prep_save): hosts = hosts(host_count=1) fact_scans(fact_scans=1) @@ -114,9 +128,10 @@ def test_basic_options_fields(hosts, fact_scans, options, user): assert ("services", "Services") in response.data['actions']['GET']['module']['choices'] assert ("packages", "Packages") in response.data['actions']['GET']['module']['choices'] + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_related_fact_view(hosts, fact_scans, get, user): +def test_related_fact_view(hosts, fact_scans, get, user, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() (host, response) = setup_common(hosts, fact_scans, get, user, epoch=epoch) @@ -127,9 +142,10 @@ def test_related_fact_view(hosts, fact_scans, get, user): for i, fact_known in enumerate(facts_known): check_url(response.data['results'][i]['related']['fact_view'], fact_known, fact_known.module) + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_multiple_hosts(hosts, fact_scans, get, user): +def test_multiple_hosts(hosts, fact_scans, get, user, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() (host, response) = setup_common(hosts, fact_scans, get, user, epoch=epoch, host_count=3) @@ -140,9 +156,10 @@ def test_multiple_hosts(hosts, fact_scans, get, user): for i, fact_known in enumerate(facts_known): check_url(response.data['results'][i]['related']['fact_view'], fact_known, fact_known.module) + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_param_to_from(hosts, fact_scans, get, user): +def test_param_to_from(hosts, fact_scans, get, user, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() search = { 'from': epoch - timedelta(days=10), @@ -156,9 +173,10 @@ def test_param_to_from(hosts, fact_scans, get, user): check_response_facts(facts_known, response) + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_param_module(hosts, fact_scans, get, user): +def test_param_module(hosts, fact_scans, get, user, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() search = { 'module': 'packages', @@ -171,9 +189,10 @@ def test_param_module(hosts, fact_scans, get, user): check_response_facts(facts_known, response) + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_param_from(hosts, fact_scans, get, user): +def test_param_from(hosts, fact_scans, get, user, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() search = { 'from': epoch + timedelta(days=1), @@ -186,9 +205,10 @@ def test_param_from(hosts, fact_scans, get, user): check_response_facts(facts_known, response) + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_param_to(hosts, fact_scans, get, user): +def test_param_to(hosts, fact_scans, get, user, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() search = { 'to': epoch + timedelta(days=1), @@ -201,6 +221,7 @@ def test_param_to(hosts, fact_scans, get, user): check_response_facts(facts_known, response) + def _test_user_access_control(hosts, fact_scans, get, user_obj, team_obj): hosts = hosts(host_count=1) fact_scans(fact_scans=1) @@ -211,25 +232,28 @@ def _test_user_access_control(hosts, fact_scans, get, user_obj, team_obj): response = get(url, user_obj) return response + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.ac @pytest.mark.django_db -def test_normal_user_403(hosts, fact_scans, get, user, team): +def test_normal_user_403(hosts, fact_scans, get, user, team, monkeypatch_jsonbfield_get_db_prep_save): user_bob = user('bob', False) response = _test_user_access_control(hosts, fact_scans, get, user_bob, team) assert 403 == response.status_code assert "You do not have permission to perform this action." == response.data['detail'] + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.ac @pytest.mark.django_db -def test_super_user_ok(hosts, fact_scans, get, user, team): +def test_super_user_ok(hosts, fact_scans, get, user, team, monkeypatch_jsonbfield_get_db_prep_save): user_super = user('bob', True) response = _test_user_access_control(hosts, fact_scans, get, user_super, team) assert 200 == response.status_code + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.ac @pytest.mark.django_db @@ -241,10 +265,11 @@ def test_user_admin_ok(organization, hosts, fact_scans, get, user, team): assert 200 == response.status_code + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.ac @pytest.mark.django_db -def test_user_admin_403(organization, organizations, hosts, fact_scans, get, user, team): +def test_user_admin_403(organization, organizations, hosts, fact_scans, get, user, team, monkeypatch_jsonbfield_get_db_prep_save): user_admin = user('johnson', False) org2 = organizations(1) org2[0].admin_role.members.add(user_admin) @@ -252,4 +277,3 @@ def test_user_admin_403(organization, organizations, hosts, fact_scans, get, use response = _test_user_access_control(hosts, fact_scans, get, user_admin, team) assert 403 == response.status_code - diff --git a/awx/main/tests/functional/api/test_fact_view.py b/awx/main/tests/functional/api/test_fact_view.py index be5367ba52..de0bfbb804 100644 --- a/awx/main/tests/functional/api/test_fact_view.py +++ b/awx/main/tests/functional/api/test_fact_view.py @@ -6,12 +6,15 @@ from awx.main.utils import timestamp_apiformat from django.core.urlresolvers import reverse from django.utils import timezone -def mock_feature_enabled(feature, bypass_database=None): + +def mock_feature_enabled(feature): return True -def mock_feature_disabled(feature, bypass_database=None): + +def mock_feature_disabled(feature): return False + # TODO: Consider making the fact_scan() fixture a Class, instead of a function, and move this method into it def find_fact(facts, host_id, module_name, timestamp): for f in facts: @@ -19,6 +22,7 @@ def find_fact(facts, host_id, module_name, timestamp): return f raise RuntimeError('fact <%s, %s, %s> not found in %s', (host_id, module_name, timestamp, facts)) + def setup_common(hosts, fact_scans, get, user, epoch=timezone.now(), module_name='ansible', get_params={}): hosts = hosts(host_count=1) facts = fact_scans(fact_scans=1, timestamp_epoch=epoch) @@ -29,10 +33,12 @@ def setup_common(hosts, fact_scans, get, user, epoch=timezone.now(), module_name fact_known = find_fact(facts, hosts[0].id, module_name, epoch) return (fact_known, response) + def check_system_tracking_feature_forbidden(response): assert 402 == response.status_code assert 'Your license does not permit use of system tracking.' == response.data['detail'] + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_disabled) @pytest.mark.django_db @pytest.mark.license_feature @@ -43,6 +49,7 @@ def test_system_tracking_license_get(hosts, get, user): check_system_tracking_feature_forbidden(response) + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_disabled) @pytest.mark.django_db @pytest.mark.license_feature @@ -53,6 +60,7 @@ def test_system_tracking_license_options(hosts, options, user): check_system_tracking_feature_forbidden(response) + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db def test_no_fact_found(hosts, get, user): @@ -66,9 +74,10 @@ def test_no_fact_found(hosts, get, user): assert 404 == response.status_code assert expected_response == response.data + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_basic_fields(hosts, fact_scans, get, user): +def test_basic_fields(hosts, fact_scans, get, user, monkeypatch_jsonbfield_get_db_prep_save): hosts = hosts(host_count=1) fact_scans(fact_scans=1) @@ -88,9 +97,10 @@ def test_basic_fields(hosts, fact_scans, get, user): assert 'host' in response.data['related'] assert reverse('api:host_detail', args=(hosts[0].pk,)) == response.data['related']['host'] + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_content(hosts, fact_scans, get, user, fact_ansible_json): +def test_content(hosts, fact_scans, get, user, fact_ansible_json, monkeypatch_jsonbfield_get_db_prep_save): (fact_known, response) = setup_common(hosts, fact_scans, get, user) assert fact_known.host_id == response.data['host'] @@ -98,6 +108,7 @@ def test_content(hosts, fact_scans, get, user, fact_ansible_json): assert timestamp_apiformat(fact_known.timestamp) == response.data['timestamp'] assert fact_known.module == response.data['module'] + def _test_search_by_module(hosts, fact_scans, get, user, fact_json, module_name): params = { 'module': module_name @@ -108,19 +119,22 @@ def _test_search_by_module(hosts, fact_scans, get, user, fact_json, module_name) assert timestamp_apiformat(fact_known.timestamp) == response.data['timestamp'] assert module_name == response.data['module'] + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_search_by_module_packages(hosts, fact_scans, get, user, fact_packages_json): +def test_search_by_module_packages(hosts, fact_scans, get, user, fact_packages_json, monkeypatch_jsonbfield_get_db_prep_save): _test_search_by_module(hosts, fact_scans, get, user, fact_packages_json, 'packages') -@mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) -@pytest.mark.django_db -def test_search_by_module_services(hosts, fact_scans, get, user, fact_services_json): - _test_search_by_module(hosts, fact_scans, get, user, fact_services_json, 'services') @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_search_by_timestamp_and_module(hosts, fact_scans, get, user, fact_packages_json): +def test_search_by_module_services(hosts, fact_scans, get, user, fact_services_json, monkeypatch_jsonbfield_get_db_prep_save): + _test_search_by_module(hosts, fact_scans, get, user, fact_services_json, 'services') + + +@mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) +@pytest.mark.django_db +def test_search_by_timestamp_and_module(hosts, fact_scans, get, user, fact_packages_json, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() module_name = 'packages' @@ -128,6 +142,7 @@ def test_search_by_timestamp_and_module(hosts, fact_scans, get, user, fact_packa assert fact_known.id == response.data['id'] + def _test_user_access_control(hosts, fact_scans, get, user_obj, team_obj): hosts = hosts(host_count=1) fact_scans(fact_scans=1) @@ -138,29 +153,32 @@ def _test_user_access_control(hosts, fact_scans, get, user_obj, team_obj): response = get(url, user_obj) return response + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.ac @pytest.mark.django_db -def test_normal_user_403(hosts, fact_scans, get, user, team): +def test_normal_user_403(hosts, fact_scans, get, user, team, monkeypatch_jsonbfield_get_db_prep_save): user_bob = user('bob', False) response = _test_user_access_control(hosts, fact_scans, get, user_bob, team) assert 403 == response.status_code assert "You do not have permission to perform this action." == response.data['detail'] + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.ac @pytest.mark.django_db -def test_super_user_ok(hosts, fact_scans, get, user, team): +def test_super_user_ok(hosts, fact_scans, get, user, team, monkeypatch_jsonbfield_get_db_prep_save): user_super = user('bob', True) response = _test_user_access_control(hosts, fact_scans, get, user_super, team) assert 200 == response.status_code + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.ac @pytest.mark.django_db -def test_user_admin_ok(organization, hosts, fact_scans, get, user, team): +def test_user_admin_ok(organization, hosts, fact_scans, get, user, team, monkeypatch_jsonbfield_get_db_prep_save): user_admin = user('johnson', False) organization.admin_role.members.add(user_admin) @@ -168,10 +186,11 @@ def test_user_admin_ok(organization, hosts, fact_scans, get, user, team): assert 200 == response.status_code + @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.ac @pytest.mark.django_db -def test_user_admin_403(organization, organizations, hosts, fact_scans, get, user, team): +def test_user_admin_403(organization, organizations, hosts, fact_scans, get, user, team, monkeypatch_jsonbfield_get_db_prep_save): user_admin = user('johnson', False) org2 = organizations(1) org2[0].admin_role.members.add(user_admin) @@ -179,4 +198,3 @@ def test_user_admin_403(organization, organizations, hosts, fact_scans, get, use response = _test_user_access_control(hosts, fact_scans, get, user_admin, team) assert 403 == response.status_code - diff --git a/awx/main/tests/functional/api/test_host_detail.py b/awx/main/tests/functional/api/test_host_detail.py index 79213490b0..acfe73f4b5 100644 --- a/awx/main/tests/functional/api/test_host_detail.py +++ b/awx/main/tests/functional/api/test_host_detail.py @@ -4,6 +4,7 @@ import pytest from django.core.urlresolvers import reverse + @pytest.mark.django_db def test_basic_fields(hosts, fact_scans, get, user): hosts = hosts(host_count=1) diff --git a/awx/main/tests/functional/api/test_inventory.py b/awx/main/tests/functional/api/test_inventory.py index 38e06dad07..925d7352fd 100644 --- a/awx/main/tests/functional/api/test_inventory.py +++ b/awx/main/tests/functional/api/test_inventory.py @@ -2,6 +2,7 @@ import pytest from django.core.urlresolvers import reverse + @pytest.mark.django_db def test_inventory_source_notification_on_cloud_only(get, post, group_factory, user, notification_template): u = user('admin', True) @@ -48,6 +49,7 @@ def test_create_inventory_group(post, inventory, alice, role_field, expected_sta getattr(inventory, role_field).members.add(alice) post(reverse('api:inventory_groups_list', args=(inventory.id,)), data, alice, expect=expected_status_code) + @pytest.mark.parametrize("role_field,expected_status_code", [ (None, 403), ('admin_role', 201), @@ -106,6 +108,7 @@ def test_create_inventory_host(post, inventory, alice, role_field, expected_stat getattr(inventory, role_field).members.add(alice) post(reverse('api:inventory_hosts_list', args=(inventory.id,)), data, alice, expect=expected_status_code) + @pytest.mark.parametrize("role_field,expected_status_code", [ (None, 403), ('admin_role', 201), @@ -149,6 +152,7 @@ def test_delete_inventory_host(delete, host, alice, role_field, expected_status_ getattr(host.inventory, role_field).members.add(alice) delete(reverse('api:host_detail', args=(host.id,)), alice, expect=expected_status_code) + @pytest.mark.parametrize("role_field,expected_status_code", [ (None, 403), ('admin_role', 202), diff --git a/awx/main/tests/functional/api/test_job_runtime_params.py b/awx/main/tests/functional/api/test_job_runtime_params.py index af7d133c4c..af8bd659fd 100644 --- a/awx/main/tests/functional/api/test_job_runtime_params.py +++ b/awx/main/tests/functional/api/test_job_runtime_params.py @@ -8,6 +8,7 @@ from awx.main.models.jobs import Job, JobTemplate from django.core.urlresolvers import reverse + @pytest.fixture def runtime_data(organization): cred_obj = Credential.objects.create(name='runtime-cred', kind='ssh', username='test_user2', password='pas4word2') @@ -22,10 +23,12 @@ def runtime_data(organization): credential=cred_obj.pk, ) + @pytest.fixture def job_with_links(machine_credential, inventory): return Job.objects.create(name='existing-job', credential=machine_credential, inventory=inventory) + @pytest.fixture def job_template_prompts(project, inventory, machine_credential): def rf(on_off): @@ -45,6 +48,7 @@ def job_template_prompts(project, inventory, machine_credential): ) return rf + @pytest.fixture def job_template_prompts_null(project): return JobTemplate.objects.create( @@ -62,6 +66,7 @@ def job_template_prompts_null(project): ask_credential_on_launch=True, ) + @pytest.fixture def bad_scan_JT(job_template_prompts): job_template = job_template_prompts(True) @@ -69,6 +74,7 @@ def bad_scan_JT(job_template_prompts): job_template.save() return job_template + # End of setup, tests start here @pytest.mark.django_db @pytest.mark.job_runtime_vars @@ -77,17 +83,19 @@ def test_job_ignore_unprompted_vars(runtime_data, job_template_prompts, post, ad mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data) - with mocker.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.create_unified_job', return_value=mock_job): + with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job): with mocker.patch('awx.api.serializers.JobSerializer.to_representation'): response = post(reverse('api:job_template_launch', args=[job_template.pk]), runtime_data, admin_user, expect=201) + assert JobTemplate.create_unified_job.called + assert JobTemplate.create_unified_job.call_args == ({'extra_vars':{}},) # Check that job is serialized correctly job_id = response.data['job'] assert job_id == 968 # If job is created with no arguments, it will inherit JT attributes - mock_job.signal_start.assert_called_once_with(extra_vars={}) + mock_job.signal_start.assert_called_once() # Check that response tells us what things were ignored assert 'job_launch_var' in response.data['ignored_fields']['extra_vars'] @@ -98,6 +106,7 @@ def test_job_ignore_unprompted_vars(runtime_data, job_template_prompts, post, ad assert 'job_tags' in response.data['ignored_fields'] assert 'skip_tags' in response.data['ignored_fields'] + @pytest.mark.django_db @pytest.mark.job_runtime_vars def test_job_accept_prompted_vars(runtime_data, job_template_prompts, post, admin_user, mocker): @@ -105,15 +114,18 @@ def test_job_accept_prompted_vars(runtime_data, job_template_prompts, post, admi mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data) - with mocker.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.create_unified_job', return_value=mock_job): + with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job): with mocker.patch('awx.api.serializers.JobSerializer.to_representation'): response = post(reverse('api:job_template_launch', args=[job_template.pk]), runtime_data, admin_user, expect=201) + assert JobTemplate.create_unified_job.called + assert JobTemplate.create_unified_job.call_args == (runtime_data,) job_id = response.data['job'] assert job_id == 968 - mock_job.signal_start.assert_called_once_with(**runtime_data) + mock_job.signal_start.assert_called_once() + @pytest.mark.django_db @pytest.mark.job_runtime_vars @@ -122,12 +134,15 @@ def test_job_accept_null_tags(job_template_prompts, post, admin_user, mocker): mock_job = mocker.MagicMock(spec=Job, id=968) - with mocker.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.create_unified_job', return_value=mock_job): + with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job): with mocker.patch('awx.api.serializers.JobSerializer.to_representation'): post(reverse('api:job_template_launch', args=[job_template.pk]), {'job_tags': '', 'skip_tags': ''}, admin_user, expect=201) + assert JobTemplate.create_unified_job.called + assert JobTemplate.create_unified_job.call_args == ({'job_tags':'', 'skip_tags':''},) + + mock_job.signal_start.assert_called_once() - mock_job.signal_start.assert_called_once_with(job_tags='', skip_tags='') @pytest.mark.django_db @pytest.mark.job_runtime_vars @@ -145,14 +160,17 @@ def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null, mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data) - with mocker.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.create_unified_job', return_value=mock_job): + with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job): with mocker.patch('awx.api.serializers.JobSerializer.to_representation'): response = post(reverse('api:job_template_launch', args=[job_template.pk]), runtime_data, rando, expect=201) + assert JobTemplate.create_unified_job.called + assert JobTemplate.create_unified_job.call_args == (runtime_data,) job_id = response.data['job'] assert job_id == 968 - mock_job.signal_start.assert_called_once_with(**runtime_data) + mock_job.signal_start.assert_called_once() + @pytest.mark.django_db @pytest.mark.job_runtime_vars @@ -168,6 +186,7 @@ def test_job_reject_invalid_prompted_vars(runtime_data, job_template_prompts, po assert response.data['inventory'] == [u'Invalid pk "87865" - object does not exist.'] assert response.data['credential'] == [u'Invalid pk "48474" - object does not exist.'] + @pytest.mark.django_db @pytest.mark.job_runtime_vars def test_job_reject_invalid_prompted_extra_vars(runtime_data, job_template_prompts, post, admin_user): @@ -179,6 +198,7 @@ def test_job_reject_invalid_prompted_extra_vars(runtime_data, job_template_promp assert response.data['extra_vars'] == ['Must be a valid JSON or YAML dictionary.'] + @pytest.mark.django_db @pytest.mark.job_runtime_vars def test_job_launch_fails_without_inventory(deploy_jobtemplate, post, admin_user): @@ -190,6 +210,7 @@ def test_job_launch_fails_without_inventory(deploy_jobtemplate, post, admin_user assert response.data['inventory'] == ["Job Template 'inventory' is missing or undefined."] + @pytest.mark.django_db @pytest.mark.job_runtime_vars def test_job_launch_fails_without_inventory_access(job_template_prompts, runtime_data, post, rando): @@ -202,6 +223,7 @@ def test_job_launch_fails_without_inventory_access(job_template_prompts, runtime assert response.data['detail'] == u'You do not have permission to perform this action.' + @pytest.mark.django_db @pytest.mark.job_runtime_vars def test_job_launch_fails_without_credential_access(job_template_prompts, runtime_data, post, rando): @@ -214,6 +236,7 @@ def test_job_launch_fails_without_credential_access(job_template_prompts, runtim assert response.data['detail'] == u'You do not have permission to perform this action.' + @pytest.mark.django_db @pytest.mark.job_runtime_vars def test_job_block_scan_job_type_change(job_template_prompts, post, admin_user): @@ -225,6 +248,7 @@ def test_job_block_scan_job_type_change(job_template_prompts, post, admin_user): assert 'job_type' in response.data + @pytest.mark.django_db @pytest.mark.job_runtime_vars def test_job_block_scan_job_inv_change(mocker, bad_scan_JT, runtime_data, post, admin_user): @@ -236,6 +260,7 @@ def test_job_block_scan_job_inv_change(mocker, bad_scan_JT, runtime_data, post, assert 'inventory' in response.data + @pytest.mark.django_db @pytest.mark.job_runtime_vars def test_job_relaunch_copy_vars(job_with_links, machine_credential, inventory, @@ -244,13 +269,14 @@ def test_job_relaunch_copy_vars(job_with_links, machine_credential, inventory, job_with_links.limit = "my_server" with mocker.patch('awx.main.models.unified_jobs.UnifiedJobTemplate._get_unified_job_field_names', return_value=['inventory', 'credential', 'limit']): - second_job = job_with_links.copy() + second_job = job_with_links.copy_unified_job() # Check that job data matches the original variables assert second_job.credential == job_with_links.credential assert second_job.inventory == job_with_links.inventory assert second_job.limit == 'my_server' + @pytest.mark.django_db @pytest.mark.job_runtime_vars def test_job_relaunch_resource_access(job_with_links, user): @@ -263,14 +289,15 @@ def test_job_relaunch_resource_access(job_with_links, user): job_with_links.inventory.use_role.members.add(both_user) assert both_user.can_access(Job, 'start', job_with_links) - # Confirm that a user with credential access alone can not launch + # Confirm that a user with credential access alone cannot launch job_with_links.credential.use_role.members.add(credential_user) assert not credential_user.can_access(Job, 'start', job_with_links) - # Confirm that a user with inventory access alone can not launch + # Confirm that a user with inventory access alone cannot launch job_with_links.inventory.use_role.members.add(inventory_user) assert not inventory_user.can_access(Job, 'start', job_with_links) + @pytest.mark.django_db def test_job_launch_JT_with_validation(machine_credential, deploy_jobtemplate): deploy_jobtemplate.extra_vars = '{"job_template_var": 3}' @@ -291,6 +318,7 @@ def test_job_launch_JT_with_validation(machine_credential, deploy_jobtemplate): assert 'job_launch_var' in final_job_extra_vars assert job_obj.credential.id == machine_credential.id + @pytest.mark.django_db @pytest.mark.job_runtime_vars def test_job_launch_unprompted_vars_with_survey(mocker, survey_spec_factory, job_template_prompts, post, admin_user): @@ -301,15 +329,18 @@ def test_job_launch_unprompted_vars_with_survey(mocker, survey_spec_factory, job with mocker.patch('awx.main.access.BaseAccess.check_license'): mock_job = mocker.MagicMock(spec=Job, id=968, extra_vars={"job_launch_var": 3, "survey_var": 4}) - with mocker.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.create_unified_job', return_value=mock_job): + with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job): with mocker.patch('awx.api.serializers.JobSerializer.to_representation', return_value={}): response = post( reverse('api:job_template_launch', args=[job_template.pk]), dict(extra_vars={"job_launch_var": 3, "survey_var": 4}), admin_user, expect=201) + assert JobTemplate.create_unified_job.called + assert JobTemplate.create_unified_job.call_args == ({'extra_vars':{'survey_var': 4}},) + job_id = response.data['job'] assert job_id == 968 # Check that the survey variable is accepted and the job variable isn't - mock_job.signal_start.assert_called_once_with(extra_vars={"survey_var": 4}) + mock_job.signal_start.assert_called_once() diff --git a/awx/main/tests/functional/api/test_job_template.py b/awx/main/tests/functional/api/test_job_template.py index 88437a0037..ec4286176e 100644 --- a/awx/main/tests/functional/api/test_job_template.py +++ b/awx/main/tests/functional/api/test_job_template.py @@ -1,23 +1,16 @@ import pytest -import mock # AWX from awx.api.serializers import JobTemplateSerializer, JobLaunchSerializer -from awx.main.models.jobs import JobTemplate, Job -from awx.main.models.projects import ProjectOptions +from awx.main.models.jobs import Job from awx.main.migrations import _save_password_keys as save_password_keys # Django -from django.test.client import RequestFactory from django.core.urlresolvers import reverse from django.apps import apps -@property -def project_playbooks(self): - return ['mocked', 'mocked.yml', 'alt-mocked.yml'] @pytest.mark.django_db -@mock.patch.object(ProjectOptions, "playbooks", project_playbooks) @pytest.mark.parametrize( "grant_project, grant_credential, grant_inventory, expect", [ (True, True, True, 201), @@ -39,11 +32,11 @@ def test_create(post, project, machine_credential, inventory, alice, grant_proje 'project': project.id, 'credential': machine_credential.id, 'inventory': inventory.id, - 'playbook': 'mocked.yml', + 'playbook': 'helloworld.yml', }, alice, expect=expect) + @pytest.mark.django_db -@mock.patch.object(ProjectOptions, "playbooks", project_playbooks) @pytest.mark.parametrize( "grant_project, grant_credential, grant_inventory, expect", [ (True, True, True, 200), @@ -68,11 +61,22 @@ def test_edit_sensitive_fields(patch, job_template_factory, alice, grant_project 'project': objs.project.id, 'credential': objs.credential.id, 'inventory': objs.inventory.id, - 'playbook': 'alt-mocked.yml', + 'playbook': 'alt-helloworld.yml', }, alice, expect=expect) + +@pytest.mark.django_db +def test_reject_dict_extra_vars_patch(patch, job_template_factory, admin_user): + # Expect a string for extra_vars, raise 400 in this case that would + # otherwise have been saved incorrectly + jt = job_template_factory( + 'jt', organization='org1', project='prj', inventory='inv', credential='cred' + ).job_template + patch(reverse('api:job_template_detail', args=(jt.id,)), + {'extra_vars': {'foo': 5}}, admin_user, expect=400) + + @pytest.mark.django_db -@mock.patch.object(ProjectOptions, "playbooks", project_playbooks) def test_edit_playbook(patch, job_template_factory, alice): objs = job_template_factory('jt', organization='org1', project='prj', inventory='inv', credential='cred') objs.job_template.admin_role.members.add(alice) @@ -81,16 +85,16 @@ def test_edit_playbook(patch, job_template_factory, alice): objs.inventory.use_role.members.add(alice) patch(reverse('api:job_template_detail', args=(objs.job_template.id,)), { - 'playbook': 'alt-mocked.yml', + 'playbook': 'alt-helloworld.yml', }, alice, expect=200) objs.inventory.use_role.members.remove(alice) patch(reverse('api:job_template_detail', args=(objs.job_template.id,)), { - 'playbook': 'mocked.yml', + 'playbook': 'helloworld.yml', }, alice, expect=403) + @pytest.mark.django_db -@mock.patch.object(ProjectOptions, "playbooks", project_playbooks) def test_edit_nonsenstive(patch, job_template_factory, alice): objs = job_template_factory('jt', organization='org1', project='prj', inventory='inv', credential='cred') jt = objs.job_template @@ -115,6 +119,8 @@ def test_edit_nonsenstive(patch, job_template_factory, alice): }, alice, expect=200) print(res.data) assert res.data['name'] == 'updated' + + @pytest.fixture def jt_copy_edit(job_template_factory, project): objects = job_template_factory( @@ -122,9 +128,6 @@ def jt_copy_edit(job_template_factory, project): project=project) return objects.job_template -@property -def project_playbooks(self): - return ['mocked', 'mocked.yml', 'alt-mocked.yml'] @pytest.mark.django_db def test_job_template_role_user(post, organization_factory, job_template_factory): @@ -141,149 +144,22 @@ def test_job_template_role_user(post, organization_factory, job_template_factory response = post(url, dict(id=jt_objects.job_template.execute_role.pk), objects.superusers.admin) assert response.status_code == 204 -# Test protection against limited set of validation problems @pytest.mark.django_db -def test_bad_data_copy_edit(admin_user, project): - """ - If a required resource (inventory here) was deleted, copying not allowed - because doing so would caues a validation error - """ - - jt_res = JobTemplate.objects.create( - job_type='run', - project=project, - inventory=None, ask_inventory_on_launch=False, # not allowed - credential=None, ask_credential_on_launch=True, - name='deploy-job-template' - ) - serializer = JobTemplateSerializer(jt_res) - request = RequestFactory().get('/api/v1/job_templates/12/') - request.user = admin_user - serializer.context['request'] = request - response = serializer.to_representation(jt_res) - assert not response['summary_fields']['can_copy'] - assert response['summary_fields']['can_edit'] - -# Tests for correspondence between view info and actual access - -@pytest.mark.django_db -def test_admin_copy_edit(jt_copy_edit, admin_user): - "Absent a validation error, system admins can do everything" - - # Serializer can_copy/can_edit fields - serializer = JobTemplateSerializer(jt_copy_edit) - request = RequestFactory().get('/api/v1/job_templates/12/') - request.user = admin_user - serializer.context['request'] = request - response = serializer.to_representation(jt_copy_edit) - assert response['summary_fields']['can_copy'] - assert response['summary_fields']['can_edit'] - -@pytest.mark.django_db -def test_org_admin_copy_edit(jt_copy_edit, org_admin): - "Organization admins SHOULD be able to copy a JT firmly in their org" - - # Serializer can_copy/can_edit fields - serializer = JobTemplateSerializer(jt_copy_edit) - request = RequestFactory().get('/api/v1/job_templates/12/') - request.user = org_admin - serializer.context['request'] = request - response = serializer.to_representation(jt_copy_edit) - assert response['summary_fields']['can_copy'] - assert response['summary_fields']['can_edit'] - -@pytest.mark.django_db -def test_org_admin_foreign_cred_no_copy_edit(jt_copy_edit, org_admin, machine_credential): - """ - Organization admins without access to the 3 related resources: - SHOULD NOT be able to copy JT - SHOULD be able to edit that job template, for nonsensitive changes - """ - - # Attach credential to JT that org admin can not use - jt_copy_edit.credential = machine_credential - jt_copy_edit.save() - - # Serializer can_copy/can_edit fields - serializer = JobTemplateSerializer(jt_copy_edit) - request = RequestFactory().get('/api/v1/job_templates/12/') - request.user = org_admin - serializer.context['request'] = request - response = serializer.to_representation(jt_copy_edit) - assert not response['summary_fields']['can_copy'] - assert response['summary_fields']['can_edit'] - -@pytest.mark.django_db -def test_jt_admin_copy_edit(jt_copy_edit, rando): - """ - JT admins wihout access to associated resources SHOULD NOT be able to copy - SHOULD be able to make nonsensitive changes""" - - # random user given JT admin access only - jt_copy_edit.admin_role.members.add(rando) - jt_copy_edit.save() - - # Serializer can_copy/can_edit fields - serializer = JobTemplateSerializer(jt_copy_edit) - request = RequestFactory().get('/api/v1/job_templates/12/') - request.user = rando - serializer.context['request'] = request - response = serializer.to_representation(jt_copy_edit) - assert not response['summary_fields']['can_copy'] - assert response['summary_fields']['can_edit'] - -@pytest.mark.django_db -def test_proj_jt_admin_copy_edit(jt_copy_edit, rando): - "JT admins with access to associated resources SHOULD be able to copy" - - # random user given JT and project admin abilities - jt_copy_edit.admin_role.members.add(rando) - jt_copy_edit.save() - jt_copy_edit.project.admin_role.members.add(rando) - jt_copy_edit.project.save() - - # Serializer can_copy/can_edit fields - serializer = JobTemplateSerializer(jt_copy_edit) - request = RequestFactory().get('/api/v1/job_templates/12/') - request.user = rando - serializer.context['request'] = request - response = serializer.to_representation(jt_copy_edit) - assert response['summary_fields']['can_copy'] - assert response['summary_fields']['can_edit'] - -# Functional tests - create new JT with all returned fields, as the UI does - -@pytest.mark.django_db -@mock.patch.object(ProjectOptions, "playbooks", project_playbooks) -def test_org_admin_copy_edit_functional(jt_copy_edit, org_admin, get, post): - get_response = get(reverse('api:job_template_detail', args=[jt_copy_edit.pk]), user=org_admin) - assert get_response.status_code == 200 - assert get_response.data['summary_fields']['can_copy'] - - post_data = get_response.data - post_data['name'] = '%s @ 12:19:47 pm' % post_data['name'] - post_response = post(reverse('api:job_template_list', args=[]), user=org_admin, data=post_data) - assert post_response.status_code == 201 - assert post_response.data['name'] == 'copy-edit-job-template @ 12:19:47 pm' - -@pytest.mark.django_db -@mock.patch.object(ProjectOptions, "playbooks", project_playbooks) def test_jt_admin_copy_edit_functional(jt_copy_edit, rando, get, post): - # Grant random user JT admin access only jt_copy_edit.admin_role.members.add(rando) jt_copy_edit.save() get_response = get(reverse('api:job_template_detail', args=[jt_copy_edit.pk]), user=rando) assert get_response.status_code == 200 - assert not get_response.data['summary_fields']['can_copy'] post_data = get_response.data post_data['name'] = '%s @ 12:19:47 pm' % post_data['name'] post_response = post(reverse('api:job_template_list', args=[]), user=rando, data=post_data) assert post_response.status_code == 403 + @pytest.mark.django_db def test_scan_jt_no_inventory(job_template_factory): # A user should be able to create a scan job without a project, but an inventory is required @@ -316,6 +192,7 @@ def test_scan_jt_no_inventory(job_template_factory): assert not serializer.is_valid() assert 'inventory' in serializer.errors + @pytest.mark.django_db def test_scan_jt_surveys(inventory): serializer = JobTemplateSerializer(data={"name": "Test", "job_type": "scan", @@ -324,6 +201,7 @@ def test_scan_jt_surveys(inventory): assert not serializer.is_valid() assert "survey_enabled" in serializer.errors + @pytest.mark.django_db def test_jt_without_project(inventory): data = dict(name="Test", job_type="run", @@ -339,6 +217,7 @@ def test_jt_without_project(inventory): serializer = JobTemplateSerializer(data=data) assert serializer.is_valid() + @pytest.mark.django_db def test_disallow_template_delete_on_running_job(job_template_factory, delete, admin_user): objects = job_template_factory('jt', @@ -351,6 +230,7 @@ def test_disallow_template_delete_on_running_job(job_template_factory, delete, a delete_response = delete(reverse('api:job_template_detail', args=[objects.job_template.pk]), user=admin_user) assert delete_response.status_code == 409 + @pytest.mark.django_db def test_save_survey_passwords_to_job(job_template_with_survey_passwords): """Test that when a new job is created, the survey_passwords field is @@ -358,6 +238,7 @@ def test_save_survey_passwords_to_job(job_template_with_survey_passwords): job = job_template_with_survey_passwords.create_unified_job() assert job.survey_passwords == {'SSN': '$encrypted$', 'secret_key': '$encrypted$'} + @pytest.mark.django_db def test_save_survey_passwords_on_migration(job_template_with_survey_passwords): """Test that when upgrading to 3.0.2, the jobs connected to a JT that has diff --git a/awx/main/tests/functional/api/test_organization_counts.py b/awx/main/tests/functional/api/test_organization_counts.py index 0e57488fab..f08fd75d01 100644 --- a/awx/main/tests/functional/api/test_organization_counts.py +++ b/awx/main/tests/functional/api/test_organization_counts.py @@ -2,6 +2,7 @@ import pytest from django.core.urlresolvers import reverse + @pytest.fixture def organization_resource_creator(organization, user): def rf(users, admins, job_templates, projects, inventories, teams): @@ -40,6 +41,7 @@ def organization_resource_creator(organization, user): return organization return rf + COUNTS_PRIMES = { 'users': 11, 'admins': 5, @@ -57,10 +59,12 @@ COUNTS_ZEROS = { 'teams': 0 } + @pytest.fixture def resourced_organization(organization_resource_creator): return organization_resource_creator(**COUNTS_PRIMES) + @pytest.mark.django_db def test_org_counts_detail_admin(resourced_organization, user, get): # Check that all types of resources are counted by a superuser @@ -72,6 +76,7 @@ def test_org_counts_detail_admin(resourced_organization, user, get): counts = response.data['summary_fields']['related_field_counts'] assert counts == COUNTS_PRIMES + @pytest.mark.django_db def test_org_counts_detail_member(resourced_organization, user, get): # Check that a non-admin org member can only see users / admin in detail view @@ -90,6 +95,7 @@ def test_org_counts_detail_member(resourced_organization, user, get): 'teams': 0 } + @pytest.mark.django_db def test_org_counts_list_admin(resourced_organization, user, get): # Check that all types of resources are counted by a superuser @@ -100,6 +106,7 @@ def test_org_counts_list_admin(resourced_organization, user, get): counts = response.data['results'][0]['summary_fields']['related_field_counts'] assert counts == COUNTS_PRIMES + @pytest.mark.django_db def test_org_counts_list_member(resourced_organization, user, get): # Check that a non-admin user can only see the full project and @@ -119,6 +126,7 @@ def test_org_counts_list_member(resourced_organization, user, get): 'teams': 0 } + @pytest.mark.django_db def test_new_org_zero_counts(user, post): # Check that a POST to the organization list endpoint returns @@ -132,6 +140,7 @@ def test_new_org_zero_counts(user, post): counts_dict = new_org_list['summary_fields']['related_field_counts'] assert counts_dict == COUNTS_ZEROS + @pytest.mark.django_db def test_two_organizations(resourced_organization, organizations, user, get): # Check correct results for two organizations are returned @@ -150,6 +159,7 @@ def test_two_organizations(resourced_organization, organizations, user, get): assert counts[org_id_full] == COUNTS_PRIMES assert counts[org_id_zero] == COUNTS_ZEROS + @pytest.mark.django_db def test_scan_JT_counted(resourced_organization, user, get): admin_user = user('admin', True) @@ -170,6 +180,30 @@ def test_scan_JT_counted(resourced_organization, user, get): assert detail_response.status_code == 200 assert detail_response.data['summary_fields']['related_field_counts'] == counts_dict + +@pytest.mark.django_db +def test_JT_not_double_counted(resourced_organization, user, get): + admin_user = user('admin', True) + # Add a scan job template to the org + resourced_organization.projects.all()[0].jobtemplates.create( + job_type='run', + inventory=resourced_organization.inventories.all()[0], + project=resourced_organization.projects.all()[0], + name='double-linked-job-template') + counts_dict = COUNTS_PRIMES + counts_dict['job_templates'] += 1 + + # Test list view + list_response = get(reverse('api:organization_list', args=[]), admin_user) + assert list_response.status_code == 200 + assert list_response.data['results'][0]['summary_fields']['related_field_counts'] == counts_dict + + # Test detail view + detail_response = get(reverse('api:organization_detail', args=[resourced_organization.pk]), admin_user) + assert detail_response.status_code == 200 + assert detail_response.data['summary_fields']['related_field_counts'] == counts_dict + + @pytest.mark.django_db def test_JT_associated_with_project(organizations, project, user, get): # Check that adding a project to an organization gets the project's JT diff --git a/awx/main/tests/functional/api/test_organizations.py b/awx/main/tests/functional/api/test_organizations.py index d141ddd6b5..2e153c56d5 100644 --- a/awx/main/tests/functional/api/test_organizations.py +++ b/awx/main/tests/functional/api/test_organizations.py @@ -99,7 +99,7 @@ def test_organization_inventory_list(organization, inventory_factory, get, alice @pytest.mark.django_db -@mock.patch('awx.api.views.feature_enabled', lambda feature,bypass_db=None: True) +@mock.patch('awx.api.views.feature_enabled', lambda feature: True) def test_create_organization(post, admin, alice): new_org = { 'name': 'new org', @@ -111,7 +111,7 @@ def test_create_organization(post, admin, alice): @pytest.mark.django_db -@mock.patch('awx.api.views.feature_enabled', lambda feature,bypass_db=None: True) +@mock.patch('awx.api.views.feature_enabled', lambda feature: True) def test_create_organization_xfail(post, alice): new_org = { 'name': 'new org', diff --git a/awx/main/tests/functional/api/test_rbac_displays.py b/awx/main/tests/functional/api/test_rbac_displays.py new file mode 100644 index 0000000000..c0a3e463cf --- /dev/null +++ b/awx/main/tests/functional/api/test_rbac_displays.py @@ -0,0 +1,373 @@ +import pytest + +from django.core.urlresolvers import reverse +from django.test.client import RequestFactory + +from awx.main.models import Role, Group, UnifiedJobTemplate, JobTemplate +from awx.main.access import ( + access_registry, + get_user_capabilities +) +from awx.main.utils import cache_list_capabilities +from awx.api.serializers import JobTemplateSerializer + +# This file covers special-cases of displays of user_capabilities +# general functionality should be covered fully by unit tests, see: +# awx/main/tests/unit/api/serializers/test_job_template_serializers.py :: +# TestJobTemplateSerializerGetSummaryFields.test_copy_edit_standard +# awx/main/tests/unit/test_access.py :: +# test_user_capabilities_method + + +@pytest.mark.django_db +class TestOptionsRBAC: + """ + Several endpoints are relied-upon by the UI to list POST as an + allowed action or not depending on whether the user has permission + to create a resource. + """ + + def test_inventory_group_host_can_add(self, inventory, alice, options): + inventory.admin_role.members.add(alice) + + response = options(reverse('api:inventory_hosts_list', args=[inventory.pk]), alice) + assert 'POST' in response.data['actions'] + response = options(reverse('api:inventory_groups_list', args=[inventory.pk]), alice) + assert 'POST' in response.data['actions'] + + def test_inventory_group_host_can_not_add(self, inventory, bob, options): + inventory.read_role.members.add(bob) + + response = options(reverse('api:inventory_hosts_list', args=[inventory.pk]), bob) + assert 'POST' not in response.data['actions'] + response = options(reverse('api:inventory_groups_list', args=[inventory.pk]), bob) + assert 'POST' not in response.data['actions'] + + def test_user_list_can_add(self, org_member, org_admin, options): + response = options(reverse('api:user_list'), org_admin) + assert 'POST' in response.data['actions'] + + def test_user_list_can_not_add(self, org_member, org_admin, options): + response = options(reverse('api:user_list'), org_member) + assert 'POST' not in response.data['actions'] + + +@pytest.mark.django_db +class TestJobTemplateCopyEdit: + """ + Tests contain scenarios that were raised as issues in the past, + which resulted from failed copy/edit actions even though the buttons + to do these actions were displayed. + """ + + @pytest.fixture + def jt_copy_edit(self, job_template_factory, project): + objects = job_template_factory( + 'copy-edit-job-template', + project=project) + return objects.job_template + + def fake_context(self, user): + request = RequestFactory().get('/api/v1/resource/42/') + request.user = user + + class FakeView(object): + pass + + fake_view = FakeView() + fake_view.request = request + context = {} + context['view'] = fake_view + context['request'] = request + return context + + def test_validation_bad_data_copy_edit(self, admin_user, project): + """ + If a required resource (inventory here) was deleted, copying not allowed + because doing so would caues a validation error + """ + + jt_res = JobTemplate.objects.create( + job_type='run', + project=project, + inventory=None, ask_inventory_on_launch=False, # not allowed + credential=None, ask_credential_on_launch=True, + name='deploy-job-template' + ) + serializer = JobTemplateSerializer(jt_res) + serializer.context = self.fake_context(admin_user) + response = serializer.to_representation(jt_res) + assert not response['summary_fields']['user_capabilities']['copy'] + assert response['summary_fields']['user_capabilities']['edit'] + + def test_sys_admin_copy_edit(self, jt_copy_edit, admin_user): + "Absent a validation error, system admins can do everything" + serializer = JobTemplateSerializer(jt_copy_edit) + serializer.context = self.fake_context(admin_user) + response = serializer.to_representation(jt_copy_edit) + assert response['summary_fields']['user_capabilities']['copy'] + assert response['summary_fields']['user_capabilities']['edit'] + + def test_org_admin_copy_edit(self, jt_copy_edit, org_admin): + "Organization admins SHOULD be able to copy a JT firmly in their org" + serializer = JobTemplateSerializer(jt_copy_edit) + serializer.context = self.fake_context(org_admin) + response = serializer.to_representation(jt_copy_edit) + assert response['summary_fields']['user_capabilities']['copy'] + assert response['summary_fields']['user_capabilities']['edit'] + + def test_org_admin_foreign_cred_no_copy_edit(self, jt_copy_edit, org_admin, machine_credential): + """ + Organization admins without access to the 3 related resources: + SHOULD NOT be able to copy JT + SHOULD be able to edit that job template, for nonsensitive changes + """ + + # Attach credential to JT that org admin cannot use + jt_copy_edit.credential = machine_credential + jt_copy_edit.save() + + serializer = JobTemplateSerializer(jt_copy_edit) + serializer.context = self.fake_context(org_admin) + response = serializer.to_representation(jt_copy_edit) + assert not response['summary_fields']['user_capabilities']['copy'] + assert response['summary_fields']['user_capabilities']['edit'] + + def test_jt_admin_copy_edit(self, jt_copy_edit, rando): + """ + JT admins wihout access to associated resources SHOULD NOT be able to copy + SHOULD be able to make nonsensitive changes""" + + # random user given JT admin access only + jt_copy_edit.admin_role.members.add(rando) + jt_copy_edit.save() + + serializer = JobTemplateSerializer(jt_copy_edit) + serializer.context = self.fake_context(rando) + response = serializer.to_representation(jt_copy_edit) + assert not response['summary_fields']['user_capabilities']['copy'] + assert response['summary_fields']['user_capabilities']['edit'] + + def test_proj_jt_admin_copy_edit(self, jt_copy_edit, rando): + "JT admins with access to associated resources SHOULD be able to copy" + + # random user given JT and project admin abilities + jt_copy_edit.admin_role.members.add(rando) + jt_copy_edit.save() + jt_copy_edit.project.admin_role.members.add(rando) + jt_copy_edit.project.save() + + serializer = JobTemplateSerializer(jt_copy_edit) + serializer.context = self.fake_context(rando) + response = serializer.to_representation(jt_copy_edit) + assert response['summary_fields']['user_capabilities']['copy'] + assert response['summary_fields']['user_capabilities']['edit'] + + +@pytest.fixture +def mock_access_method(mocker): + mock_method = mocker.MagicMock() + mock_method.return_value = 'foobar' + mock_method.__name__ = 'bars' # Required for a logging statement + return mock_method + + +@pytest.mark.django_db +class TestAccessListCapabilities: + """ + Test that the access_list serializer shows the exact output of the RoleAccess.can_attach + - looks at /api/v1/inventories/N/access_list/ + - test for types: direct, indirect, and team access + """ + + extra_kwargs = dict(skip_sub_obj_read_check=False, data={}) + + def _assert_one_in_list(self, data, sublist='direct_access'): + "Establish that exactly 1 type of access exists so we know the entry is the right one" + assert len(data['results']) == 1 + assert len(data['results'][0]['summary_fields'][sublist]) == 1 + + def test_access_list_direct_access_capability( + self, inventory, rando, get, mocker, mock_access_method): + inventory.admin_role.members.add(rando) + + with mocker.patch.object(access_registry[Role][0], 'can_unattach', mock_access_method): + response = get(reverse('api:inventory_access_list', args=(inventory.id,)), rando) + + mock_access_method.assert_called_once_with(inventory.admin_role, rando, 'members', **self.extra_kwargs) + self._assert_one_in_list(response.data) + direct_access_list = response.data['results'][0]['summary_fields']['direct_access'] + assert direct_access_list[0]['role']['user_capabilities']['unattach'] == 'foobar' + + def test_access_list_indirect_access_capability( + self, inventory, organization, org_admin, get, mocker, mock_access_method): + with mocker.patch.object(access_registry[Role][0], 'can_unattach', mock_access_method): + response = get(reverse('api:inventory_access_list', args=(inventory.id,)), org_admin) + + mock_access_method.assert_called_once_with(organization.admin_role, org_admin, 'members', **self.extra_kwargs) + self._assert_one_in_list(response.data, sublist='indirect_access') + indirect_access_list = response.data['results'][0]['summary_fields']['indirect_access'] + assert indirect_access_list[0]['role']['user_capabilities']['unattach'] == 'foobar' + + def test_access_list_team_direct_access_capability( + self, inventory, team, team_member, get, mocker, mock_access_method): + team.member_role.children.add(inventory.admin_role) + + with mocker.patch.object(access_registry[Role][0], 'can_unattach', mock_access_method): + response = get(reverse('api:inventory_access_list', args=(inventory.id,)), team_member) + + mock_access_method.assert_called_once_with(inventory.admin_role, team.member_role, 'parents', **self.extra_kwargs) + self._assert_one_in_list(response.data) + direct_access_list = response.data['results'][0]['summary_fields']['direct_access'] + assert direct_access_list[0]['role']['user_capabilities']['unattach'] == 'foobar' + + def test_user_access_list_direct_access_capability(self, rando, get): + "When a user views their own access list, they cannot unattach their admin role" + response = get(reverse('api:user_access_list', args=(rando.id,)), rando) + direct_access_list = response.data['results'][0]['summary_fields']['direct_access'] + assert not direct_access_list[0]['role']['user_capabilities']['unattach'] + + +@pytest.mark.django_db +def test_team_roles_unattach(mocker, team, team_member, inventory, mock_access_method, get): + team.member_role.children.add(inventory.admin_role) + + with mocker.patch.object(access_registry[Role][0], 'can_unattach', mock_access_method): + response = get(reverse('api:team_roles_list', args=(team.id,)), team_member) + + # Did we assess whether team_member can remove team's permission to the inventory? + mock_access_method.assert_called_once_with( + inventory.admin_role, team.member_role, 'parents', skip_sub_obj_read_check=True, data={}) + assert response.data['results'][0]['summary_fields']['user_capabilities']['unattach'] == 'foobar' + + +@pytest.mark.django_db +def test_user_roles_unattach(mocker, organization, alice, bob, mock_access_method, get): + # Add to same organization so that alice and bob can see each other + organization.member_role.members.add(alice) + organization.member_role.members.add(bob) + + with mocker.patch.object(access_registry[Role][0], 'can_unattach', mock_access_method): + response = get(reverse('api:user_roles_list', args=(alice.id,)), bob) + + # Did we assess whether bob can remove alice's permission to the inventory? + mock_access_method.assert_called_once_with( + organization.member_role, alice, 'members', skip_sub_obj_read_check=True, data={}) + assert response.data['results'][0]['summary_fields']['user_capabilities']['unattach'] == 'foobar' + + +@pytest.mark.django_db +def test_team_roles_unattach_functional(team, team_member, inventory, get): + team.member_role.children.add(inventory.admin_role) + response = get(reverse('api:team_roles_list', args=(team.id,)), team_member) + # Team member should be able to remove access to inventory, becauase + # the inventory admin_role grants that ability + assert response.data['results'][0]['summary_fields']['user_capabilities']['unattach'] + + +@pytest.mark.django_db +def test_user_roles_unattach_functional(organization, alice, bob, get): + organization.member_role.members.add(alice) + organization.member_role.members.add(bob) + response = get(reverse('api:user_roles_list', args=(alice.id,)), bob) + # Org members cannot revoke the membership of other members + assert not response.data['results'][0]['summary_fields']['user_capabilities']['unattach'] + + +@pytest.mark.django_db +def test_prefetch_jt_capabilities(job_template, rando): + job_template.execute_role.members.add(rando) + qs = JobTemplate.objects.all() + cache_list_capabilities(qs, ['admin', 'execute'], JobTemplate, rando) + assert qs[0].capabilities_cache == {'edit': False, 'start': True} + + +@pytest.mark.django_db +def test_prefetch_ujt_job_template_capabilities(alice, bob, job_template): + job_template.execute_role.members.add(alice) + qs = UnifiedJobTemplate.objects.all() + cache_list_capabilities(qs, ['admin', 'execute'], UnifiedJobTemplate, alice) + assert qs[0].capabilities_cache == {'edit': False, 'start': True} + qs = UnifiedJobTemplate.objects.all() + cache_list_capabilities(qs, ['admin', 'execute'], UnifiedJobTemplate, bob) + assert qs[0].capabilities_cache == {'edit': False, 'start': False} + + +@pytest.mark.django_db +def test_prefetch_ujt_project_capabilities(alice, project): + project.update_role.members.add(alice) + qs = UnifiedJobTemplate.objects.all() + cache_list_capabilities(qs, ['admin', 'execute'], UnifiedJobTemplate, alice) + assert qs[0].capabilities_cache == {} + + +@pytest.mark.django_db +def test_prefetch_group_capabilities(group, rando): + group.inventory.adhoc_role.members.add(rando) + qs = Group.objects.all() + cache_list_capabilities(qs, ['inventory.admin', 'inventory.adhoc'], Group, rando) + assert qs[0].capabilities_cache == {'edit': False, 'adhoc': True} + + +@pytest.mark.django_db +def test_prefetch_jt_copy_capability(job_template, project, inventory, machine_credential, rando): + job_template.project = project + job_template.inventory = inventory + job_template.credential = machine_credential + job_template.save() + + qs = JobTemplate.objects.all() + cache_list_capabilities(qs, [{'copy': [ + 'project.use', 'inventory.use', 'credential.use', + 'cloud_credential.use', 'network_credential.use' + ]}], JobTemplate, rando) + assert qs[0].capabilities_cache == {'copy': False} + + project.use_role.members.add(rando) + inventory.use_role.members.add(rando) + machine_credential.use_role.members.add(rando) + + cache_list_capabilities(qs, [{'copy': [ + 'project.use', 'inventory.use', 'credential.use', + 'cloud_credential.use', 'network_credential.use' + ]}], JobTemplate, rando) + assert qs[0].capabilities_cache == {'copy': True} + + +@pytest.mark.django_db +def test_manual_projects_no_update(project, get, admin_user): + response = get(reverse('api:project_detail', args=[project.pk]), admin_user, expect=200) + assert not response.data['summary_fields']['user_capabilities']['start'] + assert not response.data['summary_fields']['user_capabilities']['schedule'] + + +@pytest.mark.django_db +def test_group_update_capabilities_possible(group, inventory_source, admin_user): + group.inventory_source = inventory_source + group.save() + + capabilities = get_user_capabilities(admin_user, group, method_list=['start']) + assert capabilities['start'] + + +@pytest.mark.django_db +def test_group_update_capabilities_impossible(group, inventory_source, admin_user): + "Manual groups can not be updated or scheduled" + inventory_source.source = "" + inventory_source.save() + group.inventory_source = inventory_source + group.save() + + capabilities = get_user_capabilities(admin_user, group, method_list=['edit', 'start', 'schedule']) + assert not capabilities['start'] + assert not capabilities['schedule'] + + +@pytest.mark.django_db +def test_license_check_not_called(mocker, job_template, project, org_admin, get): + job_template.project = project + job_template.save() # need this to make the JT visible + mock_license_check = mocker.MagicMock() + with mocker.patch('awx.main.access.BaseAccess.check_license', mock_license_check): + get(reverse('api:job_template_detail', args=[job_template.pk]), org_admin, expect=200) + assert not mock_license_check.called diff --git a/awx/main/tests/functional/api/test_resource_access_lists.py b/awx/main/tests/functional/api/test_resource_access_lists.py index 9d8d95c98a..96806d4d72 100644 --- a/awx/main/tests/functional/api/test_resource_access_lists.py +++ b/awx/main/tests/functional/api/test_resource_access_lists.py @@ -3,56 +3,56 @@ import pytest from django.core.urlresolvers import reverse from awx.main.models import Role + @pytest.mark.django_db def test_indirect_access_list(get, organization, project, team_factory, user, admin): project_admin = user('project_admin') - org_admin_team_member = user('org_admin_team_member') project_admin_team_member = user('project_admin_team_member') - org_admin_team = team_factory('org-admin-team') + team_admin = user('team_admin') + project_admin_team = team_factory('project-admin-team') project.admin_role.members.add(project_admin) - org_admin_team.member_role.members.add(org_admin_team_member) - org_admin_team.member_role.children.add(organization.admin_role) project_admin_team.member_role.members.add(project_admin_team_member) project_admin_team.member_role.children.add(project.admin_role) + project_admin_team.admin_role.members.add(team_admin) + result = get(reverse('api:project_access_list', args=(project.id,)), admin) assert result.status_code == 200 # Result should be: # project_admin should have direct access, # project_team_admin should have "direct" access through being a team member -> project admin, - # org_admin_team_member should have indirect access through being a team member -> org admin -> project admin, + # team_admin should have direct access the same as the project_team_admin, # admin should have access through system admin -> org admin -> project admin assert result.data['count'] == 4 project_admin_res = [r for r in result.data['results'] if r['id'] == project_admin.id][0] - org_admin_team_member_res = [r for r in result.data['results'] if r['id'] == org_admin_team_member.id][0] + team_admin_res = [r for r in result.data['results'] if r['id'] == team_admin.id][0] project_admin_team_member_res = [r for r in result.data['results'] if r['id'] == project_admin_team_member.id][0] admin_res = [r for r in result.data['results'] if r['id'] == admin.id][0] assert len(project_admin_res['summary_fields']['direct_access']) == 1 assert len(project_admin_res['summary_fields']['indirect_access']) == 0 - assert len(org_admin_team_member_res['summary_fields']['direct_access']) == 0 - assert len(org_admin_team_member_res['summary_fields']['indirect_access']) == 1 + assert len(team_admin_res['summary_fields']['direct_access']) == 1 + assert len(team_admin_res['summary_fields']['indirect_access']) == 0 assert len(admin_res['summary_fields']['direct_access']) == 0 assert len(admin_res['summary_fields']['indirect_access']) == 1 project_admin_entry = project_admin_res['summary_fields']['direct_access'][0]['role'] assert project_admin_entry['id'] == project.admin_role.id + # assure that results for team admin are the same as for team member + team_admin_entry = team_admin_res['summary_fields']['direct_access'][0]['role'] + assert team_admin_entry['id'] == project.admin_role.id + assert team_admin_entry['name'] == 'Admin' project_admin_team_member_entry = project_admin_team_member_res['summary_fields']['direct_access'][0]['role'] assert project_admin_team_member_entry['id'] == project.admin_role.id assert project_admin_team_member_entry['team_id'] == project_admin_team.id assert project_admin_team_member_entry['team_name'] == project_admin_team.name - org_admin_team_member_entry = org_admin_team_member_res['summary_fields']['indirect_access'][0]['role'] - assert org_admin_team_member_entry['id'] == organization.admin_role.id - assert org_admin_team_member_entry['team_id'] == org_admin_team.id - assert org_admin_team_member_entry['team_name'] == org_admin_team.name - admin_entry = admin_res['summary_fields']['indirect_access'][0]['role'] assert admin_entry['name'] == Role.singleton('system_administrator').name diff --git a/awx/main/tests/functional/api/test_role.py b/awx/main/tests/functional/api/test_role.py new file mode 100644 index 0000000000..98610bf8e4 --- /dev/null +++ b/awx/main/tests/functional/api/test_role.py @@ -0,0 +1,14 @@ +import pytest + +from django.core.urlresolvers import reverse + + +@pytest.mark.django_db +def test_admin_visible_to_orphaned_users(get, alice): + names = set() + + response = get(reverse('api:role_list'), user=alice) + for item in response.data['results']: + names.add(item['name']) + assert 'System Auditor' in names + assert 'System Administrator' in names diff --git a/awx/main/tests/functional/api/test_settings.py b/awx/main/tests/functional/api/test_settings.py new file mode 100644 index 0000000000..6322f354e7 --- /dev/null +++ b/awx/main/tests/functional/api/test_settings.py @@ -0,0 +1,185 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Python +import pytest +import os + +# Django +from django.core.urlresolvers import reverse + +# AWX +from awx.conf.models import Setting + +TEST_GIF_LOGO = 'data:image/gif;base64,R0lGODlhIQAjAPIAAP//////AP8AAMzMAJmZADNmAAAAAAAAACH/C05FVFNDQVBFMi4wAwEAAAAh+QQJCgAHACwAAAAAIQAjAAADo3i63P4wykmrvTjrzZsxXfR94WMQBFh6RECuixHMLyzPQ13ewZCvow9OpzEAjIBj79cJJmU+FceIVEZ3QRozxBttmyOBwPBtisdX4Bha3oxmS+llFIPHQXQKkiSEXz9PeklHBzx3hYNyEHt4fmmAhHp8Nz45KgV5FgWFOFEGmwWbGqEfniChohmoQZ+oqRiZDZhEgk81I4mwg4EKVbxzrDHBEAkAIfkECQoABwAsAAAAACEAIwAAA6V4utz+MMpJq724GpP15p1kEAQYQmOwnWjgrmxjuMEAx8rsDjZ+fJvdLWQAFAHGWo8FRM54JqIRmYTigDrDMqZTbbbMj0CgjTLHZKvPQH6CTx+a2vKR0XbbOsoZ7SphG057gjl+c0dGgzeGNiaBiSgbBQUHBV08NpOVlkMSk0FKjZuURHiiOJxQnSGfQJuoEKREejK0dFRGjoiQt7iOuLx0rgxYEQkAIfkECQoABwAsAAAAACEAIwAAA7h4utxnxslJDSGR6nrz/owxYB64QUEwlGaVqlB7vrAJscsd3Lhy+wBArGEICo3DUFH4QDqK0GMy51xOgcGlEAfJ+iAFie62chR+jYKaSAuQGOqwJp7jGQRDuol+F/jxZWsyCmoQfwYwgoM5Oyg1i2w0A2WQIW2TPYOIkleQmy+UlYygoaIPnJmapKmqKiusMmSdpjxypnALtrcHioq3ury7hGm3dnVosVpMWFmwREZbddDOSsjVswcJACH5BAkKAAcALAAAAAAhACMAAAOxeLrc/jDKSZUxNS9DCNYV54HURQwfGRlDEFwqdLVuGjOsW9/Odb0wnsUAKBKNwsMFQGwyNUHckVl8bqI4o43lA26PNkv1S9DtNuOeVirw+aTI3qWAQwnud1vhLSnQLS0GeFF+GoVKNF0fh4Z+LDQ6Bn5/MTNmL0mAl2E3j2aclTmRmYCQoKEDiaRDKFhJez6UmbKyQowHtzy1uEl8DLCnEktrQ2PBD1NxSlXKIW5hz6cJACH5BAkKAAcALAAAAAAhACMAAAOkeLrc/jDKSau9OOvNlTFd9H3hYxAEWDJfkK5LGwTq+g0zDR/GgM+10A04Cm56OANgqTRmkDTmSOiLMgFOTM9AnFJHuexzYBAIijZf2SweJ8ttbbXLmd5+wBiJosSCoGF/fXEeS1g8gHl9hxODKkh4gkwVIwUekESIhA4FlgV3PyCWG52WI2oGnR2lnUWpqhqVEF4Xi7QjhpsshpOFvLosrnpoEAkAIfkECQoABwAsAAAAACEAIwAAA6l4utz+MMpJq71YGpPr3t1kEAQXQltQnk8aBCa7bMMLy4wx1G8s072PL6SrGQDI4zBThCU/v50zCVhidIYgNPqxWZkDg0AgxB2K4vEXbBSvr1JtZ3uOext0x7FqovF6OXtfe1UzdjAxhINPM013ChtJER8FBQeVRX8GlpggFZWWfjwblTiigGZnfqRmpUKbljKxDrNMeY2eF4R8jUiSur6/Z8GFV2WBtwwJACH5BAkKAAcALAAAAAAhACMAAAO6eLrcZi3KyQwhkGpq8f6ONWQgaAxB8JTfg6YkO50pzD5xhaurhCsGAKCnEw6NucNDCAkyI8ugdAhFKpnJJdMaeiofBejowUseCr9GYa0j1GyMdVgjBxoEuPSZXWKf7gKBeHtzMms0gHgGfDIVLztmjScvNZEyk28qjT40b5aXlHCbDgOhnzedoqOOlKeopaqrCy56sgtotbYKhYW6e7e9tsHBssO6eSTIm1peV0iuFUZDyU7NJnmcuQsJACH5BAkKAAcALAAAAAAhACMAAAOteLrc/jDKSZsxNS9DCNYV54Hh4H0kdAXBgKaOwbYX/Miza1vrVe8KA2AoJL5gwiQgeZz4GMXlcHl8xozQ3kW3KTajL9zsBJ1+sV2fQfALem+XAlRApxu4ioI1UpC76zJ4fRqDBzI+LFyFhH1iiS59fkgziW07jjRAG5QDeECOLk2Tj6KjnZafW6hAej6Smgevr6yysza2tiCuMasUF2Yov2gZUUQbU8YaaqjLpQkAOw==' +TEST_PNG_LOGO = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACEAAAAjCAYAAAAaLGNkAAAAAXNSR0IB2cksfwAAAdVpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iPgogICAgICAgICA8dGlmZjpDb21wcmVzc2lvbj4xPC90aWZmOkNvbXByZXNzaW9uPgogICAgICAgICA8dGlmZjpQaG90b21ldHJpY0ludGVycHJldGF0aW9uPjI8L3RpZmY6UGhvdG9tZXRyaWNJbnRlcnByZXRhdGlvbj4KICAgICAgICAgPHRpZmY6T3JpZW50YXRpb24+MTwvdGlmZjpPcmllbnRhdGlvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+Cjl0tmoAAAHVSURBVFgJ7VZRsoMgDNTOu5E9U+/Ud6Z6JssGNg2oNKD90xkHCNnNkgTbYbieKwNXBn6bgSXQ4+16xi5UDiqDN3Pecr6+1fM5DHh7n1NEIPjjoRLKzOjG3qQ5dRtEy2LCjh/Gz2wDZE2nZYKkrxdn/kY9XQQkGCGqqDY5IgJFkEKgBCzDNGXhTKEye7boFRH6IPJj5EshiNCSjV4R4eSx7zhmR2tcdIuwmWiMeao7e0JHViZEWUI5aP8a9O+rx74D6sGEiJftiX3YeueIiFXg2KrhpqzjVC3dPZFYJZ7NOwwtNwM8R0UkLfH0sT5qck+OlkMq0BucKr0iWG7gpAQksD9esM1z3Lnf6SHjLh67nnKEGxC/iomWhByTeXOQJGHHcKxwHhHKnt1HIdYtmexkIb/HOURWTSJqn2gKMDG0bDUc/D0iAseovxUBoylmQCug6IVhSv+4DIeKI94jAr4AjiSEgQ25JYB+YWT9BZ94AM8erwgFkRifaArA6U0G5KT0m//z26REZuK9okgrT6VwE1jTHjbVzyNAyRwTEPOtuiex9FVBNZCkruaA4PZqFp1u8Rpww9/6rcK5y0EkAxRiZJt79PWOVYWGRE9pbJhavMengMflGyumk0akMsQnAAAAAElFTkSuQmCC' +TEST_JPEG_LOGO = 'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAASABIAAD/4QBkRXhpZgAATU0AKgAAAAgAAwEGAAMAAAABAAIAAAESAAMAAAABAAEAAIdpAAQAAAABAAAAMgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAAIaADAAQAAAABAAAAIwAAAAD/4QkhaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLwA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/PiA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA1LjQuMCI+IDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+IDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiLz4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA8P3hwYWNrZXQgZW5kPSJ3Ij8+AP/tADhQaG90b3Nob3AgMy4wADhCSU0EBAAAAAAAADhCSU0EJQAAAAAAENQdjNmPALIE6YAJmOz4Qn7/wAARCAAjACEDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9sAQwAGBgYGBgYKBgYKDgoKCg4SDg4ODhIXEhISEhIXHBcXFxcXFxwcHBwcHBwcIiIiIiIiJycnJycsLCwsLCwsLCws/9sAQwEHBwcLCgsTCgoTLh8aHy4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4u/90ABAAD/9oADAMBAAIRAxEAPwD6poormvFfivSvB2lHVtWLGMtsRE2hnYKzlVLsi52oxALDdjauWKqQCXQfFXh7xP8Aaf7AvYrz7HL5U3lk/K3YjIGVODtcZVsHBODXQV806bcT+E9L03XbCOS2udMsLQanbB4po72xYMfOQpKYyV2zPEwcNwVK7WAr6WriwWMWIUvdcZRdmnuu33rVFSjYKKKK7ST/0PqmuF8Vv4X8S+HNZ0+e/gIsYJvtEsL+bJZsI3UuyxNvBA3gpxvXchyCRXdV8ta3bW667DoloW1y10tLLTJxZWP2hoLSGYzNHclGZpJC0ESk8IAZcRB8is61T2cHK1/1DrY526h8YXHh691vxCz6dafY5Q0U7yGSeQxSxohNzJLcbUeQ4VnVNxBRCWL19b2eraVqE9xa2F3BcS2jbJ0ikV2ibJG1wpJU5UjBx0PpXzrrniy4k17TrrWrGex022ufMijvd9m11PGH8naXKqsUcgR3MhB5U7MA16x4L8F3vhq2sY9Ru4rg6day2tusEAhCrcOkknmEMRI2Y1AcLGT8xYMzZHjZFGu6cquKjaUnt2XS76vv/SN8RVjOdoKyXY9Cooor3TA//9H6pr4gfxRrMvxJ0/whLJE+maVrcVnZRtBCzwQQ3SIipMU80fKignflgPmJr7fr4A/5rf8A9zJ/7eUAdX8SfGviPwl8TtaPh6eK1eTyN0n2eCSUg28OV8ySNn2/KDtztzzjNfZVhY2umWMGm2KeXb2sSQxJknakYCqMkknAHUnNfBXxt/5Kdq//AG7/APpPFX3/AEAFFFFAH//Z' + + +@pytest.fixture +def mock_no_license_file(mocker): + ''' + Ensures that tests don't pick up dev container license file + ''' + os.environ['AWX_LICENSE_FILE'] = '/does_not_exist' + return None + + +@pytest.mark.django_db +def test_license_cannot_be_removed_via_system_settings(mock_no_license_file, get, put, patch, delete, admin, enterprise_license): + url = reverse('api:setting_singleton_detail', args=('system',)) + response = get(url, user=admin, expect=200) + assert not response.data['LICENSE'] + Setting.objects.create(key='TOWER_URL_BASE', value='https://towerhost') + Setting.objects.create(key='LICENSE', value=enterprise_license) + response = get(url, user=admin, expect=200) + assert response.data['LICENSE'] + put(url, user=admin, data=response.data, expect=200) + response = get(url, user=admin, expect=200) + assert response.data['LICENSE'] + patch(url, user=admin, data={}, expect=200) + response = get(url, user=admin, expect=200) + assert response.data['LICENSE'] + delete(url, user=admin, expect=204) + response = get(url, user=admin, expect=200) + assert response.data['LICENSE'] + + +@pytest.mark.django_db +def test_url_base_defaults_to_request(options, admin): + # If TOWER_URL_BASE is not set, default to the Tower request hostname + resp = options(reverse('api:setting_singleton_detail', args=('system',)), user=admin, expect=200) + assert resp.data['actions']['PUT']['TOWER_URL_BASE']['default'] == 'http://testserver' + + +@pytest.mark.django_db +def test_jobs_settings(get, put, patch, delete, admin): + url = reverse('api:setting_singleton_detail', args=('jobs',)) + get(url, user=admin, expect=200) + delete(url, user=admin, expect=204) + response = get(url, user=admin, expect=200) + data = dict(response.data.items()) + put(url, user=admin, data=data, expect=200) + patch(url, user=admin, data={'AWX_PROOT_HIDE_PATHS': ['/home']}, expect=200) + response = get(url, user=admin, expect=200) + assert response.data['AWX_PROOT_HIDE_PATHS'] == ['/home'] + data.pop('AWX_PROOT_HIDE_PATHS') + data.pop('AWX_PROOT_SHOW_PATHS') + data.pop('AWX_ANSIBLE_CALLBACK_PLUGINS') + put(url, user=admin, data=data, expect=200) + response = get(url, user=admin, expect=200) + assert response.data['AWX_PROOT_HIDE_PATHS'] == [] + assert response.data['AWX_PROOT_SHOW_PATHS'] == [] + assert response.data['AWX_ANSIBLE_CALLBACK_PLUGINS'] == [] + + +@pytest.mark.django_db +def test_ldap_settings(get, put, patch, delete, admin, enterprise_license): + url = reverse('api:setting_singleton_detail', args=('ldap',)) + get(url, user=admin, expect=404) + Setting.objects.create(key='LICENSE', value=enterprise_license) + get(url, user=admin, expect=200) + # The PUT below will fail at the moment because AUTH_LDAP_GROUP_TYPE + # defaults to None but cannot be set to None. + # put(url, user=admin, data=response.data, expect=200) + delete(url, user=admin, expect=204) + patch(url, user=admin, data={'AUTH_LDAP_SERVER_URI': ''}, expect=200) + patch(url, user=admin, data={'AUTH_LDAP_SERVER_URI': 'ldap.example.com'}, expect=400) + patch(url, user=admin, data={'AUTH_LDAP_SERVER_URI': 'ldap://ldap.example.com'}, expect=200) + patch(url, user=admin, data={'AUTH_LDAP_SERVER_URI': 'ldaps://ldap.example.com'}, expect=200) + patch(url, user=admin, data={'AUTH_LDAP_SERVER_URI': 'ldap://ldap.example.com:389'}, expect=200) + patch(url, user=admin, data={'AUTH_LDAP_SERVER_URI': 'ldaps://ldap.example.com:636'}, expect=200) + patch(url, user=admin, data={'AUTH_LDAP_SERVER_URI': 'ldap://ldap.example.com ldap://ldap2.example.com'}, expect=200) + patch(url, user=admin, data={'AUTH_LDAP_SERVER_URI': 'ldap://ldap.example.com,ldap://ldap2.example.com'}, expect=200) + patch(url, user=admin, data={'AUTH_LDAP_SERVER_URI': 'ldap://ldap.example.com, ldap://ldap2.example.com'}, expect=200) + + +@pytest.mark.parametrize('setting', [ + 'AUTH_LDAP_USER_DN_TEMPLATE', + 'AUTH_LDAP_REQUIRE_GROUP', + 'AUTH_LDAP_DENY_GROUP', +]) +@pytest.mark.django_db +def test_empty_ldap_dn(get, put, patch, delete, admin, enterprise_license, + setting): + url = reverse('api:setting_singleton_detail', args=('ldap',)) + Setting.objects.create(key='LICENSE', value=enterprise_license) + + patch(url, user=admin, data={setting: ''}, expect=200) + resp = get(url, user=admin, expect=200) + assert resp.data[setting] is None + + patch(url, user=admin, data={setting: None}, expect=200) + resp = get(url, user=admin, expect=200) + assert resp.data[setting] is None + + +@pytest.mark.django_db +def test_radius_settings(get, put, patch, delete, admin, enterprise_license, settings): + url = reverse('api:setting_singleton_detail', args=('radius',)) + get(url, user=admin, expect=404) + Setting.objects.create(key='LICENSE', value=enterprise_license) + response = get(url, user=admin, expect=200) + put(url, user=admin, data=response.data, expect=200) + # Set secret via the API. + patch(url, user=admin, data={'RADIUS_SECRET': 'mysecret'}, expect=200) + response = get(url, user=admin, expect=200) + assert response.data['RADIUS_SECRET'] == '$encrypted$' + assert Setting.objects.filter(key='RADIUS_SECRET').first().value.startswith('$encrypted$') + assert settings.RADIUS_SECRET == 'mysecret' + # Set secret via settings wrapper. + settings_wrapper = settings._awx_conf_settings + settings_wrapper.RADIUS_SECRET = 'mysecret2' + response = get(url, user=admin, expect=200) + assert response.data['RADIUS_SECRET'] == '$encrypted$' + assert Setting.objects.filter(key='RADIUS_SECRET').first().value.startswith('$encrypted$') + assert settings.RADIUS_SECRET == 'mysecret2' + # If we send back $encrypted$, the setting is not updated. + patch(url, user=admin, data={'RADIUS_SECRET': '$encrypted$'}, expect=200) + response = get(url, user=admin, expect=200) + assert response.data['RADIUS_SECRET'] == '$encrypted$' + assert Setting.objects.filter(key='RADIUS_SECRET').first().value.startswith('$encrypted$') + assert settings.RADIUS_SECRET == 'mysecret2' + # If we send an empty string, the setting is also set to an empty string. + patch(url, user=admin, data={'RADIUS_SECRET': ''}, expect=200) + response = get(url, user=admin, expect=200) + assert response.data['RADIUS_SECRET'] == '' + assert Setting.objects.filter(key='RADIUS_SECRET').first().value == '' + assert settings.RADIUS_SECRET == '' + + +@pytest.mark.django_db +def test_ui_settings(get, put, patch, delete, admin, enterprise_license): + url = reverse('api:setting_singleton_detail', args=('ui',)) + response = get(url, user=admin, expect=200) + assert 'CUSTOM_LOGO' not in response.data + assert 'CUSTOM_LOGIN_INFO' not in response.data + Setting.objects.create(key='LICENSE', value=enterprise_license) + response = get(url, user=admin, expect=200) + assert not response.data['CUSTOM_LOGO'] + assert not response.data['CUSTOM_LOGIN_INFO'] + put(url, user=admin, data=response.data, expect=200) + patch(url, user=admin, data={'CUSTOM_LOGO': 'data:text/plain;base64,'}, expect=400) + patch(url, user=admin, data={'CUSTOM_LOGO': 'data:image/png;base64,00'}, expect=400) + patch(url, user=admin, data={'CUSTOM_LOGO': TEST_GIF_LOGO}, expect=200) + response = get(url, user=admin, expect=200) + assert response.data['CUSTOM_LOGO'] == TEST_GIF_LOGO + patch(url, user=admin, data={'CUSTOM_LOGO': TEST_PNG_LOGO}, expect=200) + response = get(url, user=admin, expect=200) + assert response.data['CUSTOM_LOGO'] == TEST_PNG_LOGO + patch(url, user=admin, data={'CUSTOM_LOGO': TEST_JPEG_LOGO}, expect=200) + response = get(url, user=admin, expect=200) + assert response.data['CUSTOM_LOGO'] == TEST_JPEG_LOGO + patch(url, user=admin, data={'CUSTOM_LOGO': ''}, expect=200) + response = get(url, user=admin, expect=200) + assert not response.data['CUSTOM_LOGO'] + patch(url, user=admin, data={'CUSTOM_LOGIN_INFO': 'Customize Me!'}, expect=200) + response = get(url, user=admin, expect=200) + assert response.data['CUSTOM_LOGIN_INFO'] + patch(url, user=admin, data={'CUSTOM_LOGIN_INFO': ''}, expect=200) + response = get(url, user=admin, expect=200) + assert not response.data['CUSTOM_LOGIN_INFO'] + delete(url, user=admin, expect=204) + response = get(url, user=admin, expect=200) + assert not response.data['CUSTOM_LOGO'] + assert not response.data['CUSTOM_LOGIN_INFO'] diff --git a/awx/main/tests/functional/api/test_survey_spec.py b/awx/main/tests/functional/api/test_survey_spec.py index d6cc512847..f954538973 100644 --- a/awx/main/tests/functional/api/test_survey_spec.py +++ b/awx/main/tests/functional/api/test_survey_spec.py @@ -6,7 +6,7 @@ from django.core.urlresolvers import reverse from awx.main.models.jobs import JobTemplate, Job from awx.main.models.activity_stream import ActivityStream -from awx.api.license import LicenseForbids +from awx.conf.license import LicenseForbids from awx.main.access import JobTemplateAccess @@ -16,11 +16,13 @@ def mock_no_surveys(self, add_host=False, feature=None, check_expiration=True): else: pass + @pytest.fixture def job_template_with_survey(job_template_factory): objects = job_template_factory('jt', project='prj', survey='submitted_email') return objects.job_template + # Survey license-based denial tests @mock.patch('awx.api.views.feature_enabled', lambda feature: False) @pytest.mark.django_db @@ -31,6 +33,7 @@ def test_survey_spec_view_denied(job_template_with_survey, get, admin_user): args=(job_template_with_survey.id,)), admin_user, expect=402) assert response.data['detail'] == 'Your license does not allow adding surveys.' + @mock.patch('awx.main.access.BaseAccess.check_license', mock_no_surveys) @pytest.mark.django_db @pytest.mark.survey @@ -39,6 +42,7 @@ def test_deny_enabling_survey(deploy_jobtemplate, patch, admin_user): data=dict(survey_enabled=True), user=admin_user, expect=402) assert response.data['detail'] == 'Feature surveys is not enabled in the active license.' + @mock.patch('awx.main.access.BaseAccess.check_license', new=mock_no_surveys) @pytest.mark.django_db @pytest.mark.survey @@ -48,6 +52,7 @@ def test_job_start_blocked_without_survey_license(job_template_with_survey, admi with pytest.raises(LicenseForbids): access.can_start(job_template_with_survey) + @mock.patch('awx.main.access.BaseAccess.check_license', mock_no_surveys) @pytest.mark.django_db @pytest.mark.survey @@ -65,6 +70,7 @@ def test_deny_creating_with_survey(project, post, admin_user): user=admin_user, expect=402) assert response.data['detail'] == 'Feature surveys is not enabled in the active license.' + # Test normal operations with survey license work @mock.patch('awx.api.views.feature_enabled', lambda feature: True) @pytest.mark.django_db @@ -73,6 +79,7 @@ def test_survey_spec_view_allowed(deploy_jobtemplate, get, admin_user): get(reverse('api:job_template_survey_spec', args=(deploy_jobtemplate.id,)), admin_user, expect=200) + @mock.patch('awx.api.views.feature_enabled', lambda feature: True) @pytest.mark.django_db @pytest.mark.survey @@ -83,6 +90,7 @@ def test_survey_spec_sucessful_creation(survey_spec_factory, job_template, post, updated_jt = JobTemplate.objects.get(pk=job_template.pk) assert updated_jt.survey_spec == survey_input_data + # Tests related to survey content validation @mock.patch('awx.api.views.feature_enabled', lambda feature: True) @pytest.mark.django_db @@ -96,6 +104,7 @@ def test_survey_spec_non_dict_error(deploy_jobtemplate, post, admin_user): user=admin_user, expect=400) assert response.data['error'] == "Survey question 0 is not a json object." + @mock.patch('awx.api.views.feature_enabled', lambda feature: True) @pytest.mark.django_db @pytest.mark.survey @@ -106,6 +115,7 @@ def test_survey_spec_dual_names_error(survey_spec_factory, deploy_jobtemplate, p user=user('admin', True), expect=400) assert response.data['error'] == "'variable' 'submitter_email' duplicated in survey question 1." + # Test actions that should be allowed with non-survey license @mock.patch('awx.main.access.BaseAccess.check_license', new=mock_no_surveys) @pytest.mark.django_db @@ -115,6 +125,7 @@ def test_disable_survey_access_without_license(job_template_with_survey, admin_u access = JobTemplateAccess(admin_user) assert access.can_change(job_template_with_survey, dict(survey_enabled=False)) + @mock.patch('awx.main.access.BaseAccess.check_license', new=mock_no_surveys) @pytest.mark.django_db @pytest.mark.survey @@ -124,6 +135,7 @@ def test_delete_survey_access_without_license(job_template_with_survey, admin_us assert access.can_change(job_template_with_survey, dict(survey_spec=None)) assert access.can_change(job_template_with_survey, dict(survey_spec={})) + @mock.patch('awx.main.access.BaseAccess.check_license', new=mock_no_surveys) @pytest.mark.django_db @pytest.mark.survey @@ -137,6 +149,7 @@ def test_job_start_allowed_with_survey_spec(job_template_factory, admin_user): access = JobTemplateAccess(admin_user) assert access.can_start(job_template_with_survey, {}) + @mock.patch('awx.main.access.BaseAccess.check_license', new=mock_no_surveys) @pytest.mark.django_db @pytest.mark.survey @@ -146,6 +159,7 @@ def test_job_template_delete_access_with_survey(job_template_with_survey, admin_ access = JobTemplateAccess(admin_user) assert access.can_delete(job_template_with_survey) + @mock.patch('awx.api.views.feature_enabled', lambda feature: False) @mock.patch('awx.main.access.BaseAccess.check_license', new=mock_no_surveys) @pytest.mark.django_db @@ -157,6 +171,7 @@ def test_delete_survey_spec_without_license(job_template_with_survey, delete, ad new_jt = JobTemplate.objects.get(pk=job_template_with_survey.pk) assert new_jt.survey_spec == {} + @mock.patch('awx.main.access.BaseAccess.check_license', lambda self, **kwargs: True) @mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.create_unified_job', lambda self, extra_vars: mock.MagicMock(spec=Job, id=968)) @@ -174,6 +189,7 @@ def test_launch_survey_enabled_but_no_survey_spec(job_template_factory, post, ad dict(extra_vars=dict(survey_var=7)), admin_user, expect=201) assert 'survey_var' in response.data['ignored_fields']['extra_vars'] + @mock.patch('awx.main.access.BaseAccess.check_license', new=mock_no_surveys) @mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.create_unified_job', lambda self: mock.MagicMock(spec=Job, id=968)) @@ -191,6 +207,7 @@ def test_launch_with_non_empty_survey_spec_no_license(job_template_factory, post obj.save() post(reverse('api:job_template_launch', args=[obj.pk]), {}, admin_user, expect=201) + @pytest.mark.django_db @pytest.mark.survey def test_redact_survey_passwords_in_activity_stream(job_template_with_survey_passwords): diff --git a/awx/main/tests/functional/api/test_unified_job_template.py b/awx/main/tests/functional/api/test_unified_job_template.py new file mode 100644 index 0000000000..695bd51d23 --- /dev/null +++ b/awx/main/tests/functional/api/test_unified_job_template.py @@ -0,0 +1,11 @@ +import pytest + +from django.core.urlresolvers import reverse + + +@pytest.mark.django_db +def test_aliased_forward_reverse_field_searches(instance, options, get, admin): + url = reverse('api:unified_job_template_list') + response = options(url, None, admin) + assert 'job_template__search' in response.data['related_search_fields'] + get(reverse("api:unified_job_template_list") + "?job_template__search=anything", user=admin, expect=200) diff --git a/awx/main/tests/functional/api/test_unified_jobs_view.py b/awx/main/tests/functional/api/test_unified_jobs_view.py index ed7034a28e..0f0a2ca956 100644 --- a/awx/main/tests/functional/api/test_unified_jobs_view.py +++ b/awx/main/tests/functional/api/test_unified_jobs_view.py @@ -35,6 +35,7 @@ def test_cases(project): ret.append(e) return ret + @pytest.fixture def negative_test_cases(job_factory): ret = [] @@ -53,6 +54,7 @@ formats = [ ('html', 'text/html'), ] + @pytest.mark.parametrize("format,content_type", formats) @pytest.mark.django_db def test_project_update_redaction_enabled(get, format, content_type, test_cases, admin): @@ -66,6 +68,7 @@ def test_project_update_redaction_enabled(get, format, content_type, test_cases, assert test_data['uri'].password not in content assert content.count(test_data['uri'].host) == test_data['occurrences'] + @pytest.mark.parametrize("format,content_type", formats) @pytest.mark.django_db def test_job_redaction_disabled(get, format, content_type, negative_test_cases, admin): @@ -80,7 +83,6 @@ def test_job_redaction_disabled(get, format, content_type, negative_test_cases, @pytest.mark.django_db def test_options_fields_choices(instance, options, user): - url = reverse('api:unified_job_list') response = options(url, None, user('admin', True)) @@ -89,5 +91,3 @@ def test_options_fields_choices(instance, options, user): assert UnifiedJob.LAUNCH_TYPE_CHOICES == response.data['actions']['GET']['launch_type']['choices'] assert 'choice' == response.data['actions']['GET']['status']['type'] assert UnifiedJob.STATUS_CHOICES == response.data['actions']['GET']['status']['choices'] - - diff --git a/awx/main/tests/functional/api/test_user.py b/awx/main/tests/functional/api/test_user.py index d739d417c0..e3b7b4145c 100644 --- a/awx/main/tests/functional/api/test_user.py +++ b/awx/main/tests/functional/api/test_user.py @@ -7,62 +7,41 @@ from django.core.urlresolvers import reverse # user creation # +EXAMPLE_USER_DATA = { + "username": "affable", + "first_name": "a", + "last_name": "a", + "email": "a@a.com", + "is_superuser": False, + "password": "r$TyKiOCb#ED" +} + + @pytest.mark.django_db def test_user_create(post, admin): - response = post(reverse('api:user_list'), { - "username": "affable", - "first_name": "a", - "last_name": "a", - "email": "a@a.com", - "is_superuser": False, - "password": "fo0m4nchU" - }, admin) + response = post(reverse('api:user_list'), EXAMPLE_USER_DATA, admin) assert response.status_code == 201 + assert not response.data['is_superuser'] + assert not response.data['is_system_auditor'] + @pytest.mark.django_db def test_fail_double_create_user(post, admin): - response = post(reverse('api:user_list'), { - "username": "affable", - "first_name": "a", - "last_name": "a", - "email": "a@a.com", - "is_superuser": False, - "password": "fo0m4nchU" - }, admin) + response = post(reverse('api:user_list'), EXAMPLE_USER_DATA, admin) assert response.status_code == 201 - response = post(reverse('api:user_list'), { - "username": "affable", - "first_name": "a", - "last_name": "a", - "email": "a@a.com", - "is_superuser": False, - "password": "fo0m4nchU" - }, admin) + response = post(reverse('api:user_list'), EXAMPLE_USER_DATA, admin) assert response.status_code == 400 + @pytest.mark.django_db def test_create_delete_create_user(post, delete, admin): - response = post(reverse('api:user_list'), { - "username": "affable", - "first_name": "a", - "last_name": "a", - "email": "a@a.com", - "is_superuser": False, - "password": "fo0m4nchU" - }, admin) + response = post(reverse('api:user_list'), EXAMPLE_USER_DATA, admin) assert response.status_code == 201 response = delete(reverse('api:user_detail', args=(response.data['id'],)), admin) assert response.status_code == 204 - response = post(reverse('api:user_list'), { - "username": "affable", - "first_name": "a", - "last_name": "a", - "email": "a@a.com", - "is_superuser": False, - "password": "fo0m4nchU" - }, admin) + response = post(reverse('api:user_list'), EXAMPLE_USER_DATA, admin) print(response.data) assert response.status_code == 201 diff --git a/awx/main/tests/functional/commands/conftest.py b/awx/main/tests/functional/commands/conftest.py index 2de8846b0a..2917c10fcc 100644 --- a/awx/main/tests/functional/commands/conftest.py +++ b/awx/main/tests/functional/commands/conftest.py @@ -3,6 +3,7 @@ import time from datetime import datetime + @pytest.fixture def fact_msg_base(inventory, hosts): host_objs = hosts(1) @@ -13,6 +14,7 @@ def fact_msg_base(inventory, hosts): 'inventory_id': inventory.id } + @pytest.fixture def fact_msg_small(fact_msg_base): fact_msg_base['facts'] = { @@ -77,7 +79,7 @@ def fact_msg_small(fact_msg_base): } } return fact_msg_base - + ''' Facts sent from ansible to our fact cache reciever. @@ -92,18 +94,20 @@ key of 'ansible' } ''' + @pytest.fixture def fact_msg_ansible(fact_msg_base, fact_ansible_json): fact_msg_base['facts'] = fact_ansible_json return fact_msg_base + @pytest.fixture def fact_msg_packages(fact_msg_base, fact_packages_json): fact_msg_base['facts']['packages'] = fact_packages_json return fact_msg_base + @pytest.fixture def fact_msg_services(fact_msg_base, fact_services_json): fact_msg_base['facts']['services'] = fact_services_json return fact_msg_base - diff --git a/awx/main/tests/functional/commands/test_cleanup_facts.py b/awx/main/tests/functional/commands/test_cleanup_facts.py index 93ddb72d14..26a2a5fada 100644 --- a/awx/main/tests/functional/commands/test_cleanup_facts.py +++ b/awx/main/tests/functional/commands/test_cleanup_facts.py @@ -16,14 +16,17 @@ from awx.main.management.commands.cleanup_facts import CleanupFacts, Command from awx.main.models.fact import Fact from awx.main.models.inventory import Host -def mock_feature_enabled(feature, bypass_database=None): + +def mock_feature_enabled(feature): return True -def mock_feature_disabled(feature, bypass_database=None): + +def mock_feature_disabled(feature): return False + @pytest.mark.django_db -def test_cleanup_granularity(fact_scans, hosts): +def test_cleanup_granularity(fact_scans, hosts, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() hosts(5) fact_scans(10, timestamp_epoch=epoch) @@ -35,11 +38,12 @@ def test_cleanup_granularity(fact_scans, hosts): deleted_count = cleanup_facts.cleanup(timestamp_future, granularity) assert 60 == deleted_count -''' -Delete half of the scans -''' + @pytest.mark.django_db -def test_cleanup_older_than(fact_scans, hosts): +def test_cleanup_older_than(fact_scans, hosts, monkeypatch_jsonbfield_get_db_prep_save): + ''' + Delete half of the scans + ''' epoch = timezone.now() hosts(5) fact_scans(28, timestamp_epoch=epoch) @@ -51,8 +55,9 @@ def test_cleanup_older_than(fact_scans, hosts): deleted_count = cleanup_facts.cleanup(fact_middle.timestamp, granularity) assert 210 == deleted_count + @pytest.mark.django_db -def test_cleanup_older_than_granularity_module(fact_scans, hosts): +def test_cleanup_older_than_granularity_module(fact_scans, hosts, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() hosts(5) fact_scans(10, timestamp_epoch=epoch) @@ -65,11 +70,11 @@ def test_cleanup_older_than_granularity_module(fact_scans, hosts): assert 20 == deleted_count -''' -Reduce the granularity of half of the facts scans, by half. -''' @pytest.mark.django_db -def test_cleanup_logic(fact_scans, hosts): +def test_cleanup_logic(fact_scans, hosts, monkeypatch_jsonbfield_get_db_prep_save): + ''' + Reduce the granularity of half of the facts scans, by half. + ''' epoch = timezone.now() hosts = hosts(5) fact_scans(60, timestamp_epoch=epoch) @@ -95,6 +100,7 @@ def test_cleanup_logic(fact_scans, hosts): timestamp_pivot -= granularity assert fact.timestamp == timestamp_pivot + @mock.patch('awx.main.management.commands.cleanup_facts.feature_enabled', new=mock_feature_disabled) @pytest.mark.django_db @pytest.mark.license_feature @@ -104,6 +110,7 @@ def test_system_tracking_feature_disabled(mocker): cmd.handle(None) assert 'The System Tracking feature is not enabled for your Tower instance' in err.value + @mock.patch('awx.main.management.commands.cleanup_facts.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db def test_parameters_ok(mocker): @@ -117,6 +124,7 @@ def test_parameters_ok(mocker): cmd.handle(None, **kv) run.assert_called_once_with(relativedelta(days=1), relativedelta(days=1), module=None) + @pytest.mark.django_db def test_string_time_to_timestamp_ok(): kvs = [ @@ -146,6 +154,7 @@ def test_string_time_to_timestamp_ok(): res = cmd.string_time_to_timestamp(kv['time']) assert kv['timestamp'] == res + @pytest.mark.django_db def test_string_time_to_timestamp_invalid(): kvs = [ @@ -175,6 +184,7 @@ def test_string_time_to_timestamp_invalid(): res = cmd.string_time_to_timestamp(kv['time']) assert res is None + @mock.patch('awx.main.management.commands.cleanup_facts.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db def test_parameters_fail(mocker): @@ -197,4 +207,3 @@ def test_parameters_fail(mocker): with pytest.raises(CommandError) as err: cmd.handle(None, older_than=kv['older_than'], granularity=kv['granularity']) assert kv['msg'] in err.value - diff --git a/awx/main/tests/functional/commands/test_commands.py b/awx/main/tests/functional/commands/test_commands.py index 631de3ee04..95cd291cee 100644 --- a/awx/main/tests/functional/commands/test_commands.py +++ b/awx/main/tests/functional/commands/test_commands.py @@ -8,10 +8,8 @@ except ImportError: from django.core.management import call_command -from awx.main.models import Instance - from awx.main.management.commands.update_password import UpdatePassword -from awx.main.management.commands.remove_instance import Command as RemoveInstance + def run_command(name, *args, **options): command_runner = options.pop('command_runner', call_command) @@ -54,22 +52,3 @@ def test_update_password_command(mocker, username, password, expected, changed): assert stdout == expected else: assert str(result) == expected - - -@pytest.mark.parametrize( - "primary,hostname,startswith,exception", [ - (True, "127.0.0.1", "Cannot remove primary", None), - (False, "127.0.0.2", "Successfully removed", None), - (False, "127.0.0.3", "No matching instance", Instance.DoesNotExist), - ] -) -def test_remove_instance_command(mocker, primary, hostname, startswith, exception): - mock_instance = mocker.MagicMock(primary=primary, enforce_unique_find=True) - with mocker.patch.object(Instance.objects, 'get', return_value=mock_instance, side_effect=exception): - with mocker.patch.object(RemoveInstance, 'include_option_hostname_uuid_find'): - with mocker.patch.object(RemoveInstance, 'get_unique_fields', return_value={'hostname':hostname, 'uuid':1}): - result, stdout, stderr = run_command("remove_instance", hostname=hostname) - if result is None: - assert stdout.startswith(startswith) - else: - assert str(result).startswith(startswith) diff --git a/awx/main/tests/functional/commands/test_run_fact_cache_receiver.py b/awx/main/tests/functional/commands/test_run_fact_cache_receiver.py index 1a8965e81c..461024ee9c 100644 --- a/awx/main/tests/functional/commands/test_run_fact_cache_receiver.py +++ b/awx/main/tests/functional/commands/test_run_fact_cache_receiver.py @@ -10,10 +10,11 @@ import json from django.utils import timezone # AWX -from awx.main.management.commands.run_fact_cache_receiver import FactCacheReceiver +from awx.main.management.commands.run_fact_cache_receiver import FactBrokerWorker from awx.main.models.fact import Fact from awx.main.models.inventory import Host + # TODO: Check that timestamp and other attributes are as expected def check_process_fact_message_module(fact_returned, data, module_name): date_key = data['date_key'] @@ -36,60 +37,49 @@ def check_process_fact_message_module(fact_returned, data, module_name): assert timestamp == fact_returned.timestamp assert module_name == fact_returned.module -@pytest.mark.django_db -def test_process_fact_message_ansible(fact_msg_ansible): - receiver = FactCacheReceiver() - fact_returned = receiver.process_fact_message(fact_msg_ansible) +@pytest.mark.django_db +def test_process_fact_message_ansible(fact_msg_ansible, monkeypatch_jsonbfield_get_db_prep_save): + receiver = FactBrokerWorker(None) + fact_returned = receiver.process_fact_message(fact_msg_ansible, None) check_process_fact_message_module(fact_returned, fact_msg_ansible, 'ansible') -@pytest.mark.django_db -def test_process_fact_message_packages(fact_msg_packages): - receiver = FactCacheReceiver() - fact_returned = receiver.process_fact_message(fact_msg_packages) +@pytest.mark.django_db +def test_process_fact_message_packages(fact_msg_packages, monkeypatch_jsonbfield_get_db_prep_save): + receiver = FactBrokerWorker(None) + fact_returned = receiver.process_fact_message(fact_msg_packages, None) check_process_fact_message_module(fact_returned, fact_msg_packages, 'packages') -@pytest.mark.django_db -def test_process_fact_message_services(fact_msg_services): - receiver = FactCacheReceiver() - fact_returned = receiver.process_fact_message(fact_msg_services) +@pytest.mark.django_db +def test_process_fact_message_services(fact_msg_services, monkeypatch_jsonbfield_get_db_prep_save): + receiver = FactBrokerWorker(None) + fact_returned = receiver.process_fact_message(fact_msg_services, None) check_process_fact_message_module(fact_returned, fact_msg_services, 'services') -''' -We pickypack our fact sending onto the Ansible fact interface. -The interface is . Where facts is a json blob of all the facts. -This makes it hard to decipher what facts are new/changed. -Because of this, we handle the same fact module data being sent multiple times -and just keep the newest version. -''' + @pytest.mark.django_db -def test_process_facts_message_ansible_overwrite(fact_scans, fact_msg_ansible): +def test_process_facts_message_ansible_overwrite(fact_scans, fact_msg_ansible, monkeypatch_jsonbfield_get_db_prep_save): + ''' + We pickypack our fact sending onto the Ansible fact interface. + The interface is . Where facts is a json blob of all the facts. + This makes it hard to decipher what facts are new/changed. + Because of this, we handle the same fact module data being sent multiple times + and just keep the newest version. + ''' #epoch = timezone.now() epoch = datetime.fromtimestamp(fact_msg_ansible['date_key']) fact_scans(fact_scans=1, timestamp_epoch=epoch) key = 'ansible.overwrite' value = 'hello world' - receiver = FactCacheReceiver() - receiver.process_fact_message(fact_msg_ansible) + receiver = FactBrokerWorker(None) + receiver.process_fact_message(fact_msg_ansible, None) fact_msg_ansible['facts'][key] = value - fact_returned = receiver.process_fact_message(fact_msg_ansible) + fact_returned = receiver.process_fact_message(fact_msg_ansible, None) fact_obj = Fact.objects.get(id=fact_returned.id) assert key in fact_obj.facts assert fact_msg_ansible['facts'] == (json.loads(fact_obj.facts) if isinstance(fact_obj.facts, unicode) else fact_obj.facts) # TODO: Just make response.data['facts'] when we're only dealing with postgres, or if jsonfields ever fixes this bug - -# Ensure that the message flows from the socket through to process_fact_message() -@pytest.mark.django_db -def test_run_receiver(mocker, fact_msg_ansible): - mocker.patch("awx.main.socket.Socket.listen", return_value=[fact_msg_ansible]) - - receiver = FactCacheReceiver() - mocker.patch.object(receiver, 'process_fact_message', return_value=None) - - receiver.run_receiver(use_processing_threads=False) - - receiver.process_fact_message.assert_called_once_with(fact_msg_ansible) diff --git a/awx/main/tests/functional/conftest.py b/awx/main/tests/functional/conftest.py index e5e1222a39..3d79ca4c4c 100644 --- a/awx/main/tests/functional/conftest.py +++ b/awx/main/tests/functional/conftest.py @@ -4,14 +4,18 @@ import pytest import mock import json import os +import six from datetime import timedelta # Django from django.core.urlresolvers import resolve +from django.core.cache import cache from django.utils.six.moves.urllib.parse import urlparse from django.utils import timezone from django.contrib.auth.models import User from django.conf import settings +from django.core.serializers.json import DjangoJSONEncoder +from jsonbfield.fields import JSONField # AWX from awx.main.models.projects import Project @@ -37,30 +41,32 @@ from awx.main.models.organization import ( Permission, Team, ) - +from awx.main.models.rbac import Role from awx.main.models.notifications import ( NotificationTemplate, Notification ) -''' -Disable all django model signals. -''' -@pytest.fixture(scope="session", autouse=False) -def disable_signals(): - mocked = mock.patch('django.dispatch.Signal.send', autospec=True) - mocked.start() -''' -FIXME: Not sure how "far" just setting the BROKER_URL will get us. -We may need to incluence CELERY's configuration like we do in the old unit tests (see base.py) +@pytest.fixture(autouse=True) +def clear_cache(): + ''' + Clear cache (local memory) for each test to prevent using cached settings. + ''' + cache.clear() + -Allows django signal code to execute without the need for redis -''' @pytest.fixture(scope="session", autouse=True) def celery_memory_broker(): + ''' + FIXME: Not sure how "far" just setting the BROKER_URL will get us. + We may need to incluence CELERY's configuration like we do in the old unit tests (see base.py) + + Allows django signal code to execute without the need for redis + ''' settings.BROKER_URL='memory://localhost/' + @pytest.fixture def user(): def u(name, is_superuser=False): @@ -72,6 +78,7 @@ def user(): return user return u + @pytest.fixture def check_jobtemplate(project, inventory, credential): return \ @@ -83,6 +90,7 @@ def check_jobtemplate(project, inventory, credential): name='check-job-template' ) + @pytest.fixture def deploy_jobtemplate(project, inventory, credential): return \ @@ -94,10 +102,12 @@ def deploy_jobtemplate(project, inventory, credential): name='deploy-job-template' ) + @pytest.fixture def team(organization): return organization.teams.create(name='test-team') + @pytest.fixture def team_member(user, team): ret = user('team-member', False) @@ -105,15 +115,29 @@ def team_member(user, team): return ret +@pytest.fixture(scope="session", autouse=True) +def project_playbooks(): + ''' + Return playbook_files as playbooks for manual projects when testing. + ''' + class PlaybooksMock(mock.PropertyMock): + def __get__(self, obj, obj_type): + return obj.playbook_files + mocked = mock.patch.object(Project, 'playbooks', new_callable=PlaybooksMock) + mocked.start() + + @pytest.fixture @mock.patch.object(Project, "update", lambda self, **kwargs: None) def project(instance, organization): prj = Project.objects.create(name="test-proj", description="test-proj-desc", - organization=organization + organization=organization, + playbook_files=['helloworld.yml', 'alt-helloworld.yml'] ) return prj + @pytest.fixture def project_factory(organization): def factory(name): @@ -127,12 +151,14 @@ def project_factory(organization): return prj return factory + @pytest.fixture def job_factory(job_template, admin): def factory(job_template=job_template, initial_state='new', created_by=admin): return job_template.create_job(created_by=created_by, status=initial_state) return factory + @pytest.fixture def team_factory(organization): def factory(name): @@ -145,35 +171,43 @@ def team_factory(organization): return t return factory + @pytest.fixture def user_project(user): owner = user('owner') return Project.objects.create(name="test-user-project", created_by=owner, description="test-user-project-desc") + @pytest.fixture def instance(settings): - return Instance.objects.create(uuid=settings.SYSTEM_UUID, primary=True, hostname="instance.example.org") + return Instance.objects.create(uuid=settings.SYSTEM_UUID, hostname="instance.example.org", capacity=100) + @pytest.fixture def organization(instance): return Organization.objects.create(name="test-org", description="test-org-desc") + @pytest.fixture def credential(): return Credential.objects.create(kind='aws', name='test-cred', username='something', password='secret') + @pytest.fixture def machine_credential(): return Credential.objects.create(name='machine-cred', kind='ssh', username='test_user', password='pas4word') + @pytest.fixture def org_credential(organization): return Credential.objects.create(kind='aws', name='test-cred', username='something', password='secret', organization=organization) + @pytest.fixture def inventory(organization): return organization.inventories.create(name="test-inv") + @pytest.fixture def inventory_factory(organization): def factory(name, org=organization): @@ -184,10 +218,12 @@ def inventory_factory(organization): return inv return factory + @pytest.fixture def label(organization): return organization.labels.create(name="test-label", description="test-label-desc") + @pytest.fixture def notification_template(organization): return NotificationTemplate.objects.create(name='test-notification_template', @@ -196,6 +232,7 @@ def notification_template(organization): notification_configuration=dict(url="http://localhost", headers={"Test": "Header"})) + @pytest.fixture def notification(notification_template): return Notification.objects.create(notification_template=notification_template, @@ -205,27 +242,40 @@ def notification(notification_template): recipients='admin@redhat.com', subject='email subject') + @pytest.fixture def job_template_with_survey_passwords(job_template_with_survey_passwords_factory): return job_template_with_survey_passwords_factory(persisted=True) + @pytest.fixture def admin(user): return user('admin', True) + +@pytest.fixture +def system_auditor(user): + u = user(False) + Role.singleton('system_auditor').members.add(u) + return u + + @pytest.fixture def alice(user): return user('alice', False) + @pytest.fixture def bob(user): return user('bob', False) + @pytest.fixture def rando(user): "Rando, the random user that doesn't have access to anything" return user('rando', False) + @pytest.fixture def org_admin(user, organization): ret = user('org-admin', False) @@ -233,6 +283,7 @@ def org_admin(user, organization): organization.member_role.members.add(ret) return ret + @pytest.fixture def org_auditor(user, organization): ret = user('org-auditor', False) @@ -240,12 +291,14 @@ def org_auditor(user, organization): organization.member_role.members.add(ret) return ret + @pytest.fixture def org_member(user, organization): ret = user('org-member', False) organization.member_role.members.add(ret) return ret + @pytest.fixture def organizations(instance): def rf(organization_count=1): @@ -256,6 +309,7 @@ def organizations(instance): return orgs return rf + @pytest.fixture def group_factory(inventory): def g(name): @@ -265,6 +319,7 @@ def group_factory(inventory): return Group.objects.create(inventory=inventory, name=name) return g + @pytest.fixture def hosts(group_factory): group1 = group_factory('group-1') @@ -280,23 +335,28 @@ def hosts(group_factory): return hosts return rf + @pytest.fixture def group(inventory): return inventory.groups.create(name='single-group') + @pytest.fixture def inventory_source(group, inventory): return InventorySource.objects.create(name=group.name, group=group, inventory=inventory, source='gce') + @pytest.fixture def inventory_update(inventory_source): return InventoryUpdate.objects.create(inventory_source=inventory_source) + @pytest.fixture def host(group, inventory): return group.hosts.create(name='single-host', inventory=inventory) + @pytest.fixture def permissions(): return { @@ -338,36 +398,42 @@ def _request(verb): return response return rf + @pytest.fixture def post(): return _request('post') + @pytest.fixture def get(): return _request('get') + @pytest.fixture def put(): return _request('put') + @pytest.fixture def patch(): return _request('patch') + @pytest.fixture def delete(): return _request('delete') + @pytest.fixture def head(): return _request('head') + @pytest.fixture def options(): return _request('options') - @pytest.fixture def fact_scans(group_factory, fact_ansible_json, fact_packages_json, fact_services_json): group1 = group_factory('group-1') @@ -390,27 +456,33 @@ def fact_scans(group_factory, fact_ansible_json, fact_packages_json, fact_servic return facts return rf + def _fact_json(module_name): current_dir = os.path.dirname(os.path.realpath(__file__)) with open('%s/%s.json' % (current_dir, module_name)) as f: return json.load(f) + @pytest.fixture def fact_ansible_json(): return _fact_json('ansible') + @pytest.fixture def fact_packages_json(): return _fact_json('packages') + @pytest.fixture def fact_services_json(): return _fact_json('services') + @pytest.fixture def permission_inv_read(organization, inventory, team): return Permission.objects.create(inventory=inventory, team=team, permission_type=PERM_INVENTORY_READ) + @pytest.fixture def job_template(organization): jt = JobTemplate(name='test-job_template') @@ -418,6 +490,7 @@ def job_template(organization): return jt + @pytest.fixture def job_template_labels(organization, job_template): job_template.labels.create(name="label-1", organization=organization) @@ -425,3 +498,24 @@ def job_template_labels(organization, job_template): return job_template + +def dumps(value): + return DjangoJSONEncoder().encode(value) + + +# Taken from https://github.com/django-extensions/django-extensions/blob/54fe88df801d289882a79824be92d823ab7be33e/django_extensions/db/fields/json.py +def get_db_prep_save(self, value, connection, **kwargs): + """Convert our JSON object to a string before we save""" + if value is None and self.null: + return None + # default values come in as strings; only non-strings should be + # run through `dumps` + if not isinstance(value, six.string_types): + value = dumps(value) + + return value + + +@pytest.fixture +def monkeypatch_jsonbfield_get_db_prep_save(mocker): + JSONField.get_db_prep_save = get_db_prep_save diff --git a/awx/main/tests/functional/core/test_licenses.py b/awx/main/tests/functional/core/test_licenses.py index 37f3c63fa9..f2c3d9348e 100644 --- a/awx/main/tests/functional/core/test_licenses.py +++ b/awx/main/tests/functional/core/test_licenses.py @@ -1,28 +1,24 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. -import json -import mock -import os -import tempfile import time import pytest from datetime import datetime from awx.main.models import Host -from awx.main.task_engine import TaskSerializer, TaskEngager +from awx.main.task_engine import TaskEnhancer @pytest.mark.django_db def test_license_writer(inventory, admin): - writer = TaskEngager( + task_enhancer = TaskEnhancer( company_name='acmecorp', contact_name='Michael DeHaan', contact_email='michael@ansibleworks.com', license_date=25000, # seconds since epoch instance_count=500) - data = writer.get_data() + data = task_enhancer.enhance() Host.objects.bulk_create( [ @@ -42,13 +38,7 @@ def test_license_writer(inventory, admin): assert data['license_date'] == 25000 assert data['license_key'] == "11bae31f31c6a6cdcb483a278cdbe98bd8ac5761acd7163a50090b0f098b3a13" - strdata = writer.get_string() - strdata_loaded = json.loads(strdata) - assert strdata_loaded == data - - reader = TaskSerializer() - - vdata = reader.from_string(strdata) + vdata = task_enhancer.validate_enhancements() assert vdata['available_instances'] == 500 assert vdata['current_instances'] == 12 @@ -61,72 +51,44 @@ def test_license_writer(inventory, admin): assert vdata['compliant'] is False assert vdata['subscription_name'] + @pytest.mark.django_db def test_expired_licenses(): - reader = TaskSerializer() - writer = TaskEngager( + task_enhancer = TaskEnhancer( company_name='Tower', contact_name='Tower Admin', contact_email='tower@ansible.com', license_date=int(time.time() - 3600), instance_count=100, trial=True) - strdata = writer.get_string() - vdata = reader.from_string(strdata) + task_enhancer.enhance() + vdata = task_enhancer.validate_enhancements() assert vdata['compliant'] is False assert vdata['grace_period_remaining'] < 0 - writer = TaskEngager( + task_enhancer = TaskEnhancer( company_name='Tower', contact_name='Tower Admin', contact_email='tower@ansible.com', license_date=int(time.time() - 2592001), instance_count=100, trial=False) - strdata = writer.get_string() - vdata = reader.from_string(strdata) + task_enhancer.enhance() + vdata = task_enhancer.validate_enhancements() assert vdata['compliant'] is False assert vdata['grace_period_remaining'] < 0 - writer = TaskEngager( + task_enhancer = TaskEnhancer( company_name='Tower', contact_name='Tower Admin', contact_email='tower@ansible.com', license_date=int(time.time() - 3600), instance_count=100, trial=False) - strdata = writer.get_string() - vdata = reader.from_string(strdata) + task_enhancer.enhance() + vdata = task_enhancer.validate_enhancements() assert vdata['compliant'] is False assert vdata['grace_period_remaining'] > 0 - -@pytest.mark.django_db -def test_aws_license(): - os.environ['AWX_LICENSE_FILE'] = 'non-existent-license-file.json' - - h, path = tempfile.mkstemp() - with os.fdopen(h, 'w') as f: - json.dump({'instance_count': 100}, f) - - def fetch_ami(_self): - _self.attributes['ami-id'] = 'ami-00000000' - return True - - def fetch_instance(_self): - _self.attributes['instance-id'] = 'i-00000000' - return True - - with mock.patch('awx.main.task_engine.TEMPORARY_TASK_FILE', path): - with mock.patch('awx.main.task_engine.TemporaryTaskEngine.fetch_ami', fetch_ami): - with mock.patch('awx.main.task_engine.TemporaryTaskEngine.fetch_instance', fetch_instance): - reader = TaskSerializer() - license = reader.from_file() - assert license['is_aws'] - assert license['time_remaining'] - assert license['free_instances'] > 0 - assert license['grace_period_remaining'] > 0 - - os.unlink(path) diff --git a/awx/main/tests/functional/migrations/conftest.py b/awx/main/tests/functional/migrations/conftest.py deleted file mode 100644 index 0901f548d3..0000000000 --- a/awx/main/tests/functional/migrations/conftest.py +++ /dev/null @@ -1,84 +0,0 @@ -# Python -import pytest -from datetime import timedelta - -# Django -from django.utils import timezone -from django.conf import settings - -# AWX -from awx.fact.models.fact import Fact, FactHost - -# MongoEngine -from mongoengine.connection import ConnectionError - -@pytest.fixture(autouse=True) -def mongo_db(request): - marker = request.keywords.get('mongo_db', None) - if marker: - # Drop mongo database - try: - db = Fact._get_db() - db.connection.drop_database(settings.MONGO_DB) - except ConnectionError: - raise - -@pytest.fixture -def inventories(organization): - def rf(inventory_count=1): - invs = [] - for i in xrange(0, inventory_count): - inv = organization.inventories.create(name="test-inv-%d" % i, description="test-inv-desc") - invs.append(inv) - return invs - return rf - -''' -hosts naming convension should align with hosts_mongo -''' -@pytest.fixture -def hosts(organization): - def rf(host_count=1, inventories=[]): - hosts = [] - for inv in inventories: - for i in xrange(0, host_count): - name = '%s-host-%s' % (inv.name, i) - host = inv.hosts.create(name=name) - hosts.append(host) - return hosts - return rf - -@pytest.fixture -def hosts_mongo(organization): - def rf(host_count=1, inventories=[]): - hosts = [] - for inv in inventories: - for i in xrange(0, host_count): - name = '%s-host-%s' % (inv.name, i) - (host, created) = FactHost.objects.get_or_create(hostname=name, inventory_id=inv.id) - hosts.append(host) - return hosts - return rf - -@pytest.fixture -def fact_scans(organization, fact_ansible_json, fact_packages_json, fact_services_json): - def rf(fact_scans=1, inventories=[], timestamp_epoch=timezone.now()): - facts_json = {} - facts = [] - module_names = ['ansible', 'services', 'packages'] - - facts_json['ansible'] = fact_ansible_json - facts_json['packages'] = fact_packages_json - facts_json['services'] = fact_services_json - - for inv in inventories: - for host_obj in FactHost.objects.filter(inventory_id=inv.id): - timestamp_current = timestamp_epoch - for i in xrange(0, fact_scans): - for module_name in module_names: - facts.append(Fact.add_fact(timestamp_current, facts_json[module_name], host_obj, module_name)) - timestamp_current += timedelta(days=1) - return facts - return rf - - diff --git a/awx/main/tests/functional/migrations/test_fact.py b/awx/main/tests/functional/migrations/test_fact.py deleted file mode 100644 index 76dfcc4a40..0000000000 --- a/awx/main/tests/functional/migrations/test_fact.py +++ /dev/null @@ -1,63 +0,0 @@ -import pytest -import datetime - -from django.apps import apps -from django.conf import settings - -from awx.main.models.inventory import Host -from awx.main.models.fact import Fact - -from awx.main.migrations import _system_tracking as system_tracking - -def micro_to_milli(micro): - return micro - (((int)(micro / 1000)) * 1000) - -@pytest.mark.skipif(not getattr(settings, 'MONGO_DB', None), reason="MongoDB not configured") -@pytest.mark.django_db -@pytest.mark.mongo_db -def test_migrate_facts(inventories, hosts, hosts_mongo, fact_scans): - inventory_objs = inventories(2) - hosts(2, inventory_objs) - hosts_mongo(2, inventory_objs) - facts_known = fact_scans(2, inventory_objs) - - (migrated_count, not_migrated_count) = system_tracking.migrate_facts(apps, None) - # 4 hosts w/ 2 fact scans each, 3 modules each scan - assert migrated_count == 24 - assert not_migrated_count == 0 - - - for fact_mongo, fact_version in facts_known: - host = Host.objects.get(inventory_id=fact_mongo.host.inventory_id, name=fact_mongo.host.hostname) - t = fact_mongo.timestamp - datetime.timedelta(microseconds=micro_to_milli(fact_mongo.timestamp.microsecond)) - fact = Fact.objects.filter(host_id=host.id, timestamp=t, module=fact_mongo.module) - - assert len(fact) == 1 - assert fact[0] is not None - -@pytest.mark.skipif(not getattr(settings, 'MONGO_DB', None), reason="MongoDB not configured") -@pytest.mark.django_db -@pytest.mark.mongo_db -def test_migrate_facts_hostname_does_not_exist(inventories, hosts, hosts_mongo, fact_scans): - inventory_objs = inventories(2) - host_objs = hosts(1, inventory_objs) - hosts_mongo(2, inventory_objs) - facts_known = fact_scans(2, inventory_objs) - - (migrated_count, not_migrated_count) = system_tracking.migrate_facts(apps, None) - assert migrated_count == 12 - assert not_migrated_count == 12 - - - for fact_mongo, fact_version in facts_known: - # Facts that don't match the only host will not be migrated - if fact_mongo.host.hostname != host_objs[0].name: - continue - - host = Host.objects.get(inventory_id=fact_mongo.host.inventory_id, name=fact_mongo.host.hostname) - t = fact_mongo.timestamp - datetime.timedelta(microseconds=micro_to_milli(fact_mongo.timestamp.microsecond)) - fact = Fact.objects.filter(host_id=host.id, timestamp=t, module=fact_mongo.module) - - assert len(fact) == 1 - assert fact[0] is not None - diff --git a/awx/main/tests/functional/models/fact/test_get_host_fact.py b/awx/main/tests/functional/models/fact/test_get_host_fact.py index 2569417496..0388e12f08 100644 --- a/awx/main/tests/functional/models/fact/test_get_host_fact.py +++ b/awx/main/tests/functional/models/fact/test_get_host_fact.py @@ -5,8 +5,9 @@ from django.utils import timezone from awx.main.models import Fact + @pytest.mark.django_db -def test_newest_scan_exact(hosts, fact_scans): +def test_newest_scan_exact(hosts, fact_scans, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() hosts = hosts(host_count=2) facts = fact_scans(fact_scans=3, timestamp_epoch=epoch) @@ -20,18 +21,19 @@ def test_newest_scan_exact(hosts, fact_scans): assert fact_found == fact_known -''' -Show me the most recent state of the sytem at any point of time. -or, said differently -For any timestamp, get the first scan that is <= the timestamp. -''' -''' -Ensure most recent scan run is the scan returned. -Query by future date. -''' @pytest.mark.django_db -def test_newest_scan_less_than(hosts, fact_scans): +def test_newest_scan_less_than(hosts, fact_scans, monkeypatch_jsonbfield_get_db_prep_save): + ''' + Show me the most recent state of the sytem at any point of time. + or, said differently + For any timestamp, get the first scan that is <= the timestamp. + ''' + + ''' + Ensure most recent scan run is the scan returned. + Query by future date. + ''' epoch = timezone.now() timestamp_future = epoch + timedelta(days=10) hosts = hosts(host_count=2) @@ -48,11 +50,12 @@ def test_newest_scan_less_than(hosts, fact_scans): assert fact_found == fact_known -''' -Tests query Fact that is in the middle of the fact scan timeline, but not an exact timestamp. -''' + @pytest.mark.django_db -def test_query_middle_of_timeline(hosts, fact_scans): +def test_query_middle_of_timeline(hosts, fact_scans, monkeypatch_jsonbfield_get_db_prep_save): + ''' + Tests query Fact that is in the middle of the fact scan timeline, but not an exact timestamp. + ''' epoch = timezone.now() timestamp_middle = epoch + timedelta(days=1, hours=3) hosts = hosts(host_count=2) @@ -69,11 +72,12 @@ def test_query_middle_of_timeline(hosts, fact_scans): assert fact_found == fact_known -''' -Query time less than any fact scan. Should return None -''' + @pytest.mark.django_db -def test_query_result_empty(hosts, fact_scans): +def test_query_result_empty(hosts, fact_scans, monkeypatch_jsonbfield_get_db_prep_save): + ''' + Query time less than any fact scan. Should return None + ''' epoch = timezone.now() timestamp_less = epoch - timedelta(days=1) hosts = hosts(host_count=2) @@ -83,11 +87,12 @@ def test_query_result_empty(hosts, fact_scans): assert fact_found is None -''' -Query by fact module other than 'ansible' -''' + @pytest.mark.django_db -def test_by_module(hosts, fact_scans): +def test_by_module(hosts, fact_scans, monkeypatch_jsonbfield_get_db_prep_save): + ''' + Query by fact module other than 'ansible' + ''' epoch = timezone.now() hosts = hosts(host_count=2) facts = fact_scans(fact_scans=3, timestamp_epoch=epoch) @@ -108,4 +113,3 @@ def test_by_module(hosts, fact_scans): assert fact_found_services == fact_known_services assert fact_found_packages == fact_known_packages - diff --git a/awx/main/tests/functional/models/fact/test_get_timeline.py b/awx/main/tests/functional/models/fact/test_get_timeline.py index da3360340a..940498f913 100644 --- a/awx/main/tests/functional/models/fact/test_get_timeline.py +++ b/awx/main/tests/functional/models/fact/test_get_timeline.py @@ -5,6 +5,7 @@ from django.utils import timezone from awx.main.models import Fact + def setup_common(hosts, fact_scans, ts_from=None, ts_to=None, epoch=timezone.now(), module_name='ansible', ts_known=None): hosts = hosts(host_count=2) facts = fact_scans(fact_scans=3, timestamp_epoch=epoch) @@ -20,8 +21,9 @@ def setup_common(hosts, fact_scans, ts_from=None, ts_to=None, epoch=timezone.now fact_objs = Fact.get_timeline(hosts[0].id, module=module_name, ts_from=ts_from, ts_to=ts_to) return (facts_known, fact_objs) + @pytest.mark.django_db -def test_all(hosts, fact_scans): +def test_all(hosts, fact_scans, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() ts_from = epoch - timedelta(days=1) ts_to = epoch + timedelta(days=10) @@ -30,8 +32,9 @@ def test_all(hosts, fact_scans): assert 9 == len(facts_known) assert 9 == len(fact_objs) + @pytest.mark.django_db -def test_all_ansible(hosts, fact_scans): +def test_all_ansible(hosts, fact_scans, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() ts_from = epoch - timedelta(days=1) ts_to = epoch + timedelta(days=10) @@ -43,8 +46,9 @@ def test_all_ansible(hosts, fact_scans): for i in xrange(len(facts_known) - 1, 0): assert facts_known[i].id == fact_objs[i].id + @pytest.mark.django_db -def test_empty_db(hosts, fact_scans): +def test_empty_db(hosts, fact_scans, monkeypatch_jsonbfield_get_db_prep_save): hosts = hosts(host_count=2) epoch = timezone.now() ts_from = epoch - timedelta(days=1) @@ -54,8 +58,9 @@ def test_empty_db(hosts, fact_scans): assert 0 == len(fact_objs) + @pytest.mark.django_db -def test_no_results(hosts, fact_scans): +def test_no_results(hosts, fact_scans, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() ts_from = epoch - timedelta(days=100) ts_to = epoch - timedelta(days=50) @@ -63,8 +68,9 @@ def test_no_results(hosts, fact_scans): (facts_known, fact_objs) = setup_common(hosts, fact_scans, ts_from, ts_to, epoch=epoch) assert 0 == len(fact_objs) + @pytest.mark.django_db -def test_exact_same_equal(hosts, fact_scans): +def test_exact_same_equal(hosts, fact_scans, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() ts_to = ts_from = epoch + timedelta(days=1) @@ -74,8 +80,9 @@ def test_exact_same_equal(hosts, fact_scans): assert facts_known[0].id == fact_objs[0].id + @pytest.mark.django_db -def test_exact_from_exclusive_to_inclusive(hosts, fact_scans): +def test_exact_from_exclusive_to_inclusive(hosts, fact_scans, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() ts_from = epoch + timedelta(days=1) ts_to = epoch + timedelta(days=2) @@ -87,8 +94,9 @@ def test_exact_from_exclusive_to_inclusive(hosts, fact_scans): assert facts_known[0].id == fact_objs[0].id + @pytest.mark.django_db -def test_to_lte(hosts, fact_scans): +def test_to_lte(hosts, fact_scans, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() ts_to = epoch + timedelta(days=1) @@ -101,8 +109,9 @@ def test_to_lte(hosts, fact_scans): for i in xrange(0, len(fact_objs)): assert facts_known_subset[len(facts_known_subset) - i - 1].id == fact_objs[i].id + @pytest.mark.django_db -def test_from_gt(hosts, fact_scans): +def test_from_gt(hosts, fact_scans, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() ts_from = epoch @@ -115,8 +124,9 @@ def test_from_gt(hosts, fact_scans): for i in xrange(0, len(fact_objs)): assert facts_known_subset[len(facts_known_subset) - i - 1].id == fact_objs[i].id + @pytest.mark.django_db -def test_no_ts(hosts, fact_scans): +def test_no_ts(hosts, fact_scans, monkeypatch_jsonbfield_get_db_prep_save): epoch = timezone.now() (facts_known, fact_objs) = setup_common(hosts, fact_scans, ts_from=None, ts_to=None, epoch=epoch) @@ -125,5 +135,3 @@ def test_no_ts(hosts, fact_scans): for i in xrange(len(facts_known) - 1, 0): assert facts_known[i].id == fact_objs[i].id - - diff --git a/awx/main/tests/functional/models/test_activity_stream.py b/awx/main/tests/functional/models/test_activity_stream.py new file mode 100644 index 0000000000..b6e63a4377 --- /dev/null +++ b/awx/main/tests/functional/models/test_activity_stream.py @@ -0,0 +1,13 @@ +import pytest + +# AWX models +from awx.main.models.organization import Organization +from awx.main.models import ActivityStream + + + +@pytest.mark.django_db +def test_activity_stream_create_entries(): + Organization.objects.create(name='test-organization2') + assert ActivityStream.objects.filter(organization__isnull=False).count() == 1 + diff --git a/awx/main/tests/functional/models/test_context_managers.py b/awx/main/tests/functional/models/test_context_managers.py new file mode 100644 index 0000000000..61aad54ad4 --- /dev/null +++ b/awx/main/tests/functional/models/test_context_managers.py @@ -0,0 +1,47 @@ +import pytest + +# AWX context managers for testing +from awx.main.models.rbac import batch_role_ancestor_rebuilding +from awx.main.signals import ( + disable_activity_stream, + disable_computed_fields, + update_inventory_computed_fields +) + +# AWX models +from awx.main.models.organization import Organization +from awx.main.models import ActivityStream, Job + + +@pytest.mark.django_db +def test_rbac_batch_rebuilding(rando, organization): + with batch_role_ancestor_rebuilding(): + organization.admin_role.members.add(rando) + inventory = organization.inventories.create(name='test-inventory') + assert rando not in inventory.admin_role + assert rando in inventory.admin_role + + +@pytest.mark.django_db +def test_disable_activity_stream(): + with disable_activity_stream(): + Organization.objects.create(name='test-organization') + assert ActivityStream.objects.filter(organization__isnull=False).count() == 0 + + +@pytest.mark.django_db +class TestComputedFields: + + def test_computed_fields_normal_use(self, mocker, inventory): + job = Job.objects.create(name='fake-job', inventory=inventory) + with mocker.patch.object(update_inventory_computed_fields, 'delay'): + job.delete() + update_inventory_computed_fields.delay.assert_called_once_with(inventory.id, True) + + def test_disable_computed_fields(self, mocker, inventory): + job = Job.objects.create(name='fake-job', inventory=inventory) + with disable_computed_fields(): + with mocker.patch.object(update_inventory_computed_fields, 'delay'): + job.delete() + update_inventory_computed_fields.delay.assert_not_called() + diff --git a/awx/main/tests/functional/models/test_unified_job.py b/awx/main/tests/functional/models/test_unified_job.py index 870f9f034a..4d19e4191e 100644 --- a/awx/main/tests/functional/models/test_unified_job.py +++ b/awx/main/tests/functional/models/test_unified_job.py @@ -1,5 +1,20 @@ import pytest +# Django +from django.contrib.contenttypes.models import ContentType + +# AWX +from awx.main.models import UnifiedJobTemplate, JobTemplate, WorkflowJobTemplate, Project + + +@pytest.mark.django_db +def test_subclass_types(rando): + assert set(UnifiedJobTemplate._submodels_with_roles()) == set([ + ContentType.objects.get_for_model(JobTemplate).id, + ContentType.objects.get_for_model(Project).id, + ContentType.objects.get_for_model(WorkflowJobTemplate).id + ]) + class TestCreateUnifiedJob: ''' diff --git a/awx/main/tests/functional/models/test_workflow.py b/awx/main/tests/functional/models/test_workflow.py new file mode 100644 index 0000000000..0cb278826f --- /dev/null +++ b/awx/main/tests/functional/models/test_workflow.py @@ -0,0 +1,190 @@ + +# Python +import pytest + +# AWX +from awx.main.models.workflow import WorkflowJob, WorkflowJobNode, WorkflowJobTemplateNode +from awx.main.models.jobs import Job +from awx.main.models.projects import ProjectUpdate +from awx.main.scheduler.dag_workflow import WorkflowDAG + +# Django +from django.test import TransactionTestCase + + +@pytest.mark.django_db +class TestWorkflowDAGFunctional(TransactionTestCase): + def workflow_job(self): + wfj = WorkflowJob.objects.create() + nodes = [WorkflowJobNode.objects.create(workflow_job=wfj) for i in range(0, 5)] + nodes[0].success_nodes.add(nodes[1]) + nodes[1].success_nodes.add(nodes[2]) + nodes[0].failure_nodes.add(nodes[3]) + nodes[3].failure_nodes.add(nodes[4]) + return wfj + + def test_build_WFJT_dag(self): + ''' + Test that building the graph uses 4 queries + 1 to get the nodes + 3 to get the related success, failure, and always connections + ''' + dag = WorkflowDAG() + wfj = self.workflow_job() + with self.assertNumQueries(4): + dag._init_graph(wfj) + + +@pytest.mark.django_db +class TestWorkflowJob: + @pytest.fixture + def workflow_job(self, workflow_job_template_factory): + wfjt = workflow_job_template_factory('blah').workflow_job_template + wfj = WorkflowJob.objects.create(workflow_job_template=wfjt) + + nodes = [WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt) for i in range(0, 5)] + + nodes[0].success_nodes.add(nodes[1]) + nodes[1].success_nodes.add(nodes[2]) + + nodes[0].failure_nodes.add(nodes[3]) + nodes[3].failure_nodes.add(nodes[4]) + + return wfj + + def test_inherit_job_template_workflow_nodes(self, mocker, workflow_job): + workflow_job.copy_nodes_from_original(original=workflow_job.workflow_job_template) + + nodes = WorkflowJob.objects.get(id=workflow_job.id).workflow_job_nodes.all().order_by('created') + assert nodes[0].success_nodes.filter(id=nodes[1].id).exists() + assert nodes[1].success_nodes.filter(id=nodes[2].id).exists() + assert nodes[0].failure_nodes.filter(id=nodes[3].id).exists() + assert nodes[3].failure_nodes.filter(id=nodes[4].id).exists() + + def test_inherit_ancestor_artifacts_from_job(self, project, mocker): + """ + Assure that nodes along the line of execution inherit artifacts + from both jobs ran, and from the accumulation of old jobs + """ + # Related resources + wfj = WorkflowJob.objects.create(name='test-wf-job') + job = Job.objects.create(name='test-job', artifacts={'b': 43}) + # Workflow job nodes + job_node = WorkflowJobNode.objects.create(workflow_job=wfj, job=job, + ancestor_artifacts={'a': 42}) + queued_node = WorkflowJobNode.objects.create(workflow_job=wfj) + # Connect old job -> new job + mocker.patch.object(queued_node, 'get_parent_nodes', lambda: [job_node]) + assert queued_node.get_job_kwargs()['extra_vars'] == {'a': 42, 'b': 43} + assert queued_node.ancestor_artifacts == {'a': 42, 'b': 43} + + def test_inherit_ancestor_artifacts_from_project_update(self, project, mocker): + """ + Test that the existence of a project update (no artifacts) does + not break the flow of ancestor_artifacts + """ + # Related resources + wfj = WorkflowJob.objects.create(name='test-wf-job') + update = ProjectUpdate.objects.create(name='test-update', project=project) + # Workflow job nodes + project_node = WorkflowJobNode.objects.create(workflow_job=wfj, job=update, + ancestor_artifacts={'a': 42, 'b': 43}) + queued_node = WorkflowJobNode.objects.create(workflow_job=wfj) + # Connect project update -> new job + mocker.patch.object(queued_node, 'get_parent_nodes', lambda: [project_node]) + assert queued_node.get_job_kwargs()['extra_vars'] == {'a': 42, 'b': 43} + assert queued_node.ancestor_artifacts == {'a': 42, 'b': 43} + + +@pytest.mark.django_db +class TestWorkflowJobTemplate: + @pytest.fixture + def wfjt(self, workflow_job_template_factory, organization): + wfjt = workflow_job_template_factory( + 'test', organization=organization).workflow_job_template + wfjt.organization = organization + nodes = [WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt) for i in range(0, 3)] + nodes[0].success_nodes.add(nodes[1]) + nodes[1].failure_nodes.add(nodes[2]) + return wfjt + + def test_node_parentage(self, wfjt): + # test success parent + wfjt_node = wfjt.workflow_job_template_nodes.all()[1] + parent_qs = wfjt_node.get_parent_nodes() + assert len(parent_qs) == 1 + assert parent_qs[0] == wfjt.workflow_job_template_nodes.all()[0] + # test failure parent + wfjt_node = wfjt.workflow_job_template_nodes.all()[2] + parent_qs = wfjt_node.get_parent_nodes() + assert len(parent_qs) == 1 + assert parent_qs[0] == wfjt.workflow_job_template_nodes.all()[1] + + def test_topology_validator(self, wfjt): + from awx.api.views import WorkflowJobTemplateNodeChildrenBaseList + test_view = WorkflowJobTemplateNodeChildrenBaseList() + nodes = wfjt.workflow_job_template_nodes.all() + node_assoc = WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt) + nodes[2].always_nodes.add(node_assoc) + # test cycle validation + assert test_view.is_valid_relation(node_assoc, nodes[0]) == {'Error': 'Cycle detected.'} + # test multi-ancestor validation + assert test_view.is_valid_relation(node_assoc, nodes[1]) == {'Error': 'Multiple parent relationship not allowed.'} + # test mutex validation + test_view.relationship = 'failure_nodes' + node_assoc_1 = WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt) + assert (test_view.is_valid_relation(nodes[2], node_assoc_1) == + {'Error': 'Cannot associate failure_nodes when always_nodes have been associated.'}) + + def test_wfjt_copy(self, wfjt, job_template, inventory, admin_user): + old_nodes = wfjt.workflow_job_template_nodes.all() + node1 = old_nodes[1] + node1.unified_job_template = job_template + node1.save() + node2 = old_nodes[2] + node2.inventory = inventory + node2.save() + new_wfjt = wfjt.user_copy(admin_user) + for fd in ['description', 'survey_spec', 'survey_enabled', 'extra_vars']: + assert getattr(wfjt, fd) == getattr(new_wfjt, fd) + assert new_wfjt.organization == wfjt.organization + assert len(new_wfjt.workflow_job_template_nodes.all()) == 3 + nodes = new_wfjt.workflow_job_template_nodes.all() + assert nodes[0].success_nodes.all()[0] == nodes[1] + assert nodes[1].failure_nodes.all()[0] == nodes[2] + assert nodes[1].unified_job_template == job_template + assert nodes[2].inventory == inventory + + +@pytest.mark.django_db +class TestWorkflowJobFailure: + """ + Tests to re-implement if workflow failure status is introduced in + a future Tower version. + """ + @pytest.fixture + def wfj(self): + return WorkflowJob.objects.create(name='test-wf-job') + + def test_workflow_not_failed_unran_job(self, wfj): + """ + Test that an un-ran node will not mark workflow job as failed + """ + WorkflowJobNode.objects.create(workflow_job=wfj) + assert not wfj._has_failed() + + def test_workflow_not_failed_successful_job(self, wfj): + """ + Test that a sucessful node will not mark workflow job as failed + """ + job = Job.objects.create(name='test-job', status='successful') + WorkflowJobNode.objects.create(workflow_job=wfj, job=job) + assert not wfj._has_failed() + + def test_workflow_not_failed_failed_job_but_okay(self, wfj): + """ + Test that a failed node will not mark workflow job as failed + """ + job = Job.objects.create(name='test-job', status='failed') + WorkflowJobNode.objects.create(workflow_job=wfj, job=job) + assert not wfj._has_failed() diff --git a/awx/main/tests/functional/test_db_credential.py b/awx/main/tests/functional/test_db_credential.py index 7ed76fdd62..90ec21d4cd 100644 --- a/awx/main/tests/functional/test_db_credential.py +++ b/awx/main/tests/functional/test_db_credential.py @@ -3,6 +3,7 @@ import pytest from django.db import IntegrityError from awx.main.models import Credential + @pytest.mark.django_db def test_cred_unique_org_name_kind(organization_factory): objects = organization_factory("test") diff --git a/awx/main/tests/functional/test_fixture_factories.py b/awx/main/tests/functional/test_fixture_factories.py index 1c25f9d38d..83d96fdbd3 100644 --- a/awx/main/tests/functional/test_fixture_factories.py +++ b/awx/main/tests/functional/test_fixture_factories.py @@ -2,6 +2,7 @@ import pytest from awx.main.tests.factories import NotUnique + def test_roles_exc_not_persisted(organization_factory): with pytest.raises(RuntimeError) as exc: organization_factory('test-org', roles=['test-org.admin_role:user1'], persisted=False) @@ -92,6 +93,7 @@ def test_job_template_factory(job_template_factory): assert jt_objects.job_template.survey_spec is not None assert 'test-survey' in jt_objects.jobs[1].extra_vars + def test_survey_spec_generator_simple(survey_spec_factory): survey_spec = survey_spec_factory('survey_variable') assert 'name' in survey_spec @@ -100,6 +102,7 @@ def test_survey_spec_generator_simple(survey_spec_factory): assert type(survey_spec['spec'][0]) is dict assert survey_spec['spec'][0]['type'] == 'integer' + def test_survey_spec_generator_mixed(survey_spec_factory): survey_spec = survey_spec_factory( [{'variable': 'question1', 'type': 'integer', 'max': 87}, diff --git a/awx/main/tests/functional/test_jobs.py b/awx/main/tests/functional/test_jobs.py index 83302e7400..5169d98fb1 100644 --- a/awx/main/tests/functional/test_jobs.py +++ b/awx/main/tests/functional/test_jobs.py @@ -1,45 +1,24 @@ -from awx.main.models import Job - +from awx.main.models import Job, Instance +from django.test.utils import override_settings import pytest -@pytest.mark.django_db -def test_job_blocking(get, post, job_template, inventory, inventory_factory): - j1 = Job.objects.create(job_template=job_template, - inventory=inventory) - j2 = Job.objects.create(job_template=job_template, - inventory=inventory) - assert j1.is_blocked_by(j2) - j2.inventory = inventory_factory(name='test-different-inventory') - assert not j1.is_blocked_by(j2) - j_callback_1 = Job.objects.create(job_template=job_template, - inventory=inventory, - launch_type='callback', - limit='a') - j_callback_2 = Job.objects.create(job_template=job_template, - inventory=inventory, - launch_type='callback', - limit='a') - assert j_callback_1.is_blocked_by(j_callback_2) - j_callback_2.limit = 'b' - assert not j_callback_1.is_blocked_by(j_callback_2) - -@pytest.mark.django_db -def test_job_blocking_allow_simul(get, post, job_template, inventory): - job_template.allow_simultaneous = True - j1 = Job.objects.create(job_template=job_template, - inventory=inventory) - j2 = Job.objects.create(job_template=job_template, - inventory=inventory) - assert not j1.is_blocked_by(j2) - assert not j2.is_blocked_by(j1) - job_template.allow_simultaneous = False - assert j1.is_blocked_by(j2) - assert j2.is_blocked_by(j1) @pytest.mark.django_db def test_orphan_unified_job_creation(instance, inventory): job = Job.objects.create(job_template=None, inventory=inventory, name='hi world') - job2 = job.copy() + job2 = job.copy_unified_job() assert job2.job_template is None assert job2.inventory == inventory assert job2.name == 'hi world' + assert job.job_type == job2.job_type + assert job2.launch_type == 'relaunch' + + +@pytest.mark.django_db +def test_job_capacity_and_with_inactive_node(): + Instance.objects.create(hostname='test-1', capacity=50) + assert Instance.objects.total_capacity() == 50 + Instance.objects.create(hostname='test-2', capacity=50) + assert Instance.objects.total_capacity() == 100 + with override_settings(AWX_ACTIVE_NODE_TIME=0): + assert Instance.objects.total_capacity() < 100 diff --git a/awx/main/tests/functional/test_notifications.py b/awx/main/tests/functional/test_notifications.py index e5494edbea..cfa8ce76ee 100644 --- a/awx/main/tests/functional/test_notifications.py +++ b/awx/main/tests/functional/test_notifications.py @@ -7,6 +7,7 @@ from awx.main.models.jobs import JobTemplate from django.core.urlresolvers import reverse + @pytest.mark.django_db def test_get_notification_template_list(get, user, notification_template): url = reverse('api:notification_template_list') @@ -14,6 +15,7 @@ def test_get_notification_template_list(get, user, notification_template): assert response.status_code == 200 assert len(response.data['results']) == 1 + @pytest.mark.django_db def test_basic_parameterization(get, post, user, organization): u = user('admin-poster', True) @@ -38,6 +40,7 @@ def test_basic_parameterization(get, post, user, organization): assert 'url' in response.data['notification_configuration'] assert 'headers' in response.data['notification_configuration'] + @pytest.mark.django_db def test_encrypted_subfields(get, post, user, organization): def assert_send(self, messages): @@ -63,6 +66,7 @@ def test_encrypted_subfields(get, post, user, organization): with mock.patch.object(notification_template_actual.notification_class, "send_messages", assert_send): notification_template_actual.send("Test", {'body': "Test"}) + @pytest.mark.django_db def test_inherited_notification_templates(get, post, user, organization, project): u = user('admin-poster', True) @@ -98,6 +102,7 @@ def test_inherited_notification_templates(get, post, user, organization, project assert len(project.notification_templates['any']) == 2 assert len(g.inventory_source.notification_templates['any']) == 1 + @pytest.mark.django_db def test_notification_template_merging(get, post, user, organization, project, notification_template): user('admin-poster', True) @@ -105,14 +110,17 @@ def test_notification_template_merging(get, post, user, organization, project, n project.notification_templates_any.add(notification_template) assert len(project.notification_templates['any']) == 1 + @pytest.mark.django_db def test_notification_template_simple_patch(patch, notification_template, admin): patch(reverse('api:notification_template_detail', args=(notification_template.id,)), { 'name': 'foo'}, admin, expect=200) + @pytest.mark.django_db def test_notification_template_invalid_notification_type(patch, notification_template, admin): patch(reverse('api:notification_template_detail', args=(notification_template.id,)), { 'notification_type': 'invalid'}, admin, expect=400) + @pytest.mark.django_db def test_disallow_delete_when_notifications_pending(delete, user, notification_template): u = user('superuser', True) diff --git a/awx/main/tests/functional/test_partial.py b/awx/main/tests/functional/test_partial.py new file mode 100644 index 0000000000..fd6d294cf2 --- /dev/null +++ b/awx/main/tests/functional/test_partial.py @@ -0,0 +1,126 @@ + +# Python +import pytest +from django.utils.timezone import now as tz_now +from datetime import timedelta + +# AWX +from awx.main.models import ( + Organization, + Inventory, + Group, + Project, + ProjectUpdate, + InventoryUpdate, + InventorySource, +) +from awx.main.scheduler.partial import ( + ProjectUpdateLatestDict, + InventoryUpdateDict, + InventoryUpdateLatestDict, +) + + +@pytest.fixture +def org(): + return Organization.objects.create(name="org1") + + +class TestProjectUpdateLatestDictDict(): + @pytest.fixture + def successful_project_update(self): + p = Project.objects.create(name="proj1") + pu = ProjectUpdate.objects.create(project=p, status='successful', finished=tz_now() - timedelta(seconds=20)) + + return (p, pu) + + # Failed project updates newer than successful ones + @pytest.fixture + def multiple_project_updates(self): + p = Project.objects.create(name="proj1") + + epoch = tz_now() + + successful_pus = [ProjectUpdate.objects.create(project=p, + status='successful', + finished=epoch - timedelta(seconds=100 + i)) for i in xrange(0, 5)] + failed_pus = [ProjectUpdate.objects.create(project=p, + status='failed', + finished=epoch - timedelta(seconds=100 - len(successful_pus) + i)) for i in xrange(0, 5)] + return (p, failed_pus, successful_pus) + + + @pytest.mark.django_db + class TestFilterPartial(): + def test_project_update_successful(self, successful_project_update): + (project, project_update) = successful_project_update + + tasks = ProjectUpdateLatestDict.filter_partial(project_ids=[project.id]) + + assert 1 == len(tasks) + assert project_update.id == tasks[0]['id'] + + def test_correct_project_update(self, multiple_project_updates): + (project, failed_pus, successful_pus) = multiple_project_updates + + tasks = ProjectUpdateLatestDict.filter_partial(project_ids=[project.id]) + + assert 1 == len(tasks) + assert failed_pus[0].id == tasks[0]['id'] + + +class TestInventoryUpdateDict(): + @pytest.fixture + def waiting_inventory_update(self, org): + i = Inventory.objects.create(name='inv1', organization=org) + g = Group.objects.create(name='group1', inventory=i) + #Inventory.groups.add(g) + inv_src = InventorySource.objects.create(group=g) + iu = InventoryUpdate.objects.create(inventory_source=inv_src, status='waiting') + return iu + + @pytest.mark.django_db + class TestFilterPartial(): + def test_simple(self, waiting_inventory_update): + tasks = InventoryUpdateDict.filter_partial(status=['waiting']) + + assert 1 == len(tasks) + assert waiting_inventory_update.id == tasks[0]['id'] + + +class TestInventoryUpdateLatestDict(): + @pytest.fixture + def inventory(self, org): + i = Inventory.objects.create(name='inv1', organization=org) + return i + + @pytest.fixture + def inventory_updates(self, inventory): + g1 = Group.objects.create(name='group1', inventory=inventory) + g2 = Group.objects.create(name='group2', inventory=inventory) + g3 = Group.objects.create(name='group3', inventory=inventory) + + inv_src1 = InventorySource.objects.create(group=g1, update_on_launch=True, inventory=inventory) + inv_src2 = InventorySource.objects.create(group=g2, update_on_launch=False, inventory=inventory) + inv_src3 = InventorySource.objects.create(group=g3, update_on_launch=True, inventory=inventory) + + import time + iu1 = InventoryUpdate.objects.create(inventory_source=inv_src1, status='successful') + time.sleep(0.1) + iu2 = InventoryUpdate.objects.create(inventory_source=inv_src2, status='waiting') + time.sleep(0.1) + iu3 = InventoryUpdate.objects.create(inventory_source=inv_src3, status='waiting') + return [iu1, iu2, iu3] + + @pytest.mark.django_db + def test_filter_partial(self, inventory, inventory_updates): + + tasks = InventoryUpdateLatestDict.filter_partial([inventory.id]) + + inventory_updates_expected = [inventory_updates[0], inventory_updates[2]] + + assert 2 == len(tasks) + task_ids = [task['id'] for task in tasks] + for inventory_update in inventory_updates_expected: + inventory_update.id in task_ids + diff --git a/awx/main/tests/functional/test_projects.py b/awx/main/tests/functional/test_projects.py index 40ea659432..8b66c396bd 100644 --- a/awx/main/tests/functional/test_projects.py +++ b/awx/main/tests/functional/test_projects.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + import mock # noqa import pytest @@ -22,6 +24,84 @@ def team_project_list(organization_factory): return objects +@pytest.mark.django_db +def test_user_project_paged_list(get, organization_factory): + 'Test project listing that spans multiple pages' + + # 3 total projects, 1 per page, 3 pages + objects = organization_factory( + 'org1', + projects=['project-%s' % i for i in range(3)], + users=['alice'], + roles=['project-%s.admin_role:alice' % i for i in range(3)], + ) + + # first page has first project and no previous page + pk = objects.users.alice.pk + url = reverse('api:user_projects_list', args=(pk,)) + results = get(url, objects.users.alice, QUERY_STRING='page_size=1').data + assert results['count'] == 3 + assert len(results['results']) == 1 + assert results['previous'] is None + assert results['next'] == ( + '/api/v1/users/%s/projects/?page=2&page_size=1' % pk + ) + + # second page has one more, a previous and next page + results = get(url, objects.users.alice, + QUERY_STRING='page=2&page_size=1').data + assert len(results['results']) == 1 + assert results['previous'] == ( + '/api/v1/users/%s/projects/?page=1&page_size=1' % pk + ) + assert results['next'] == ( + '/api/v1/users/%s/projects/?page=3&page_size=1' % pk + ) + + # third page has last project and a previous page + results = get(url, objects.users.alice, + QUERY_STRING='page=3&page_size=1').data + assert len(results['results']) == 1 + assert results['previous'] == ( + '/api/v1/users/%s/projects/?page=2&page_size=1' % pk + ) + assert results['next'] is None + + +@pytest.mark.django_db +def test_user_project_paged_list_with_unicode(get, organization_factory): + 'Test project listing that contains unicode chars in the next/prev links' + + # Create 2 projects that contain a "cloud" unicode character, make sure we + # can search it and properly generate next/previous page links + objects = organization_factory( + 'org1', + projects=['project-☁-1','project-☁-2'], + users=['alice'], + roles=['project-☁-1.admin_role:alice','project-☁-2.admin_role:alice'], + ) + pk = objects.users.alice.pk + url = reverse('api:user_projects_list', args=(pk,)) + + # first on first page, next page link contains unicode char + results = get(url, objects.users.alice, + QUERY_STRING='page_size=1&search=%E2%98%81').data + assert results['count'] == 2 + assert len(results['results']) == 1 + assert results['next'] == ( + '/api/v1/users/%s/projects/?page=2&page_size=1&search=%%E2%%98%%81' % pk # noqa + ) + + # second project on second page, previous page link contains unicode char + results = get(url, objects.users.alice, + QUERY_STRING='page=2&page_size=1&search=%E2%98%81').data + assert results['count'] == 2 + assert len(results['results']) == 1 + assert results['previous'] == ( + '/api/v1/users/%s/projects/?page=1&page_size=1&search=%%E2%%98%%81' % pk # noqa + ) + + @pytest.mark.django_db def test_user_project_list(get, organization_factory): 'List of projects a user has access to, filtered by projects you can also see' @@ -83,12 +163,14 @@ def test_team_project_list(get, team_project_list): # alice should see all projects they can see when viewing an admin assert get(reverse('api:user_projects_list', args=(admin.pk,)), alice).data['count'] == 2 + @pytest.mark.django_db def test_team_project_list_fail1(get, team_project_list): objects = team_project_list res = get(reverse('api:team_projects_list', args=(objects.teams.team2.pk,)), objects.users.alice) assert res.status_code == 403 + @pytest.mark.parametrize("u,expected_status_code", [ ('rando', 403), ('org_member', 403), @@ -115,18 +197,32 @@ def test_create_project(post, organization, org_admin, org_member, admin, rando, if expected_status_code == 201: assert Project.objects.filter(name='Project', organization=organization).exists() + @pytest.mark.django_db() def test_create_project_null_organization(post, organization, admin): post(reverse('api:project_list'), { 'name': 't', 'organization': None}, admin, expect=201) + @pytest.mark.django_db() def test_create_project_null_organization_xfail(post, organization, org_admin): - post(reverse('api:project_list'), { 'name': 't', 'organization': None}, org_admin, expect=400) + post(reverse('api:project_list'), { 'name': 't', 'organization': None}, org_admin, expect=403) + @pytest.mark.django_db() def test_patch_project_null_organization(patch, organization, project, admin): patch(reverse('api:project_detail', args=(project.id,)), { 'name': 't', 'organization': organization.id}, admin, expect=200) + @pytest.mark.django_db() def test_patch_project_null_organization_xfail(patch, project, org_admin): patch(reverse('api:project_detail', args=(project.id,)), { 'name': 't', 'organization': None}, org_admin, expect=400) + + +@pytest.mark.django_db +def test_cannot_schedule_manual_project(project, admin_user, post): + response = post( + reverse('api:project_schedules_list', args=(project.pk,)), + {"name": "foo", "description": "", "enabled": True, + "rrule": "DTSTART:20160926T040000Z RRULE:FREQ=HOURLY;INTERVAL=1", + "extra_data": {}}, admin_user, expect=400) + assert 'Manual' in response.data['unified_job_template'][0] diff --git a/awx/main/tests/functional/test_python_requirements.py b/awx/main/tests/functional/test_python_requirements.py new file mode 100644 index 0000000000..0dc48f66b8 --- /dev/null +++ b/awx/main/tests/functional/test_python_requirements.py @@ -0,0 +1,58 @@ + +import os +import re +from pip.operations import freeze + +from django.conf import settings + + +def test_env_matches_requirements_txt(): + def check_is_in(src, dests): + if src not in dests: + print("%s not in" % src) + return False + return True + + base_dir = settings.BASE_DIR + requirements_path = os.path.join(base_dir, '../', 'requirements/requirements.txt') + + reqs_actual = [] + xs = freeze.freeze(local_only=True) + for x in xs: + if '## The following requirements were added by pip freeze' in x: + break + x = x.lower() + (pkg_name, pkg_version) = x.split('==') + reqs_actual.append([pkg_name, pkg_version]) + + reqs_expected = [] + with open(requirements_path) as f: + for line in f: + line = line.partition('#')[0] + line = line.rstrip().lower() + # TODO: process git requiremenst and use egg + if line == '': + continue + if line.strip().startswith('#') or line.strip().startswith('git'): + continue + if line.startswith('-e'): + continue + + ''' + Special case pkg_name[pkg_subname]==version + For this case, we strip out [pkg_subname] + ''' + (pkg_name, pkg_version) = line.split('==') + pkg_name = re.sub(r'\[.*\]', '', pkg_name) + reqs_expected.append([pkg_name, pkg_version]) + + not_found = [] + for r in reqs_expected: + res = check_is_in(r, reqs_actual) + if res is False: + not_found.append(r) + + if len(not_found) > 0: + raise RuntimeError("%s not found in \n\n%s" % (not_found, reqs_actual)) + + diff --git a/awx/main/tests/functional/test_rbac_api.py b/awx/main/tests/functional/test_rbac_api.py index 54dcc8deb5..ef303b8b1d 100644 --- a/awx/main/tests/functional/test_rbac_api.py +++ b/awx/main/tests/functional/test_rbac_api.py @@ -5,11 +5,14 @@ from django.db import transaction from django.core.urlresolvers import reverse from awx.main.models.rbac import Role, ROLE_SINGLETON_SYSTEM_ADMINISTRATOR -def mock_feature_enabled(feature, bypass_database=None): + +def mock_feature_enabled(feature): return True + #@mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) + @pytest.fixture def role(): return Role.objects.create(role_field='admin_role') @@ -19,6 +22,7 @@ def role(): # /roles # + @pytest.mark.django_db def test_get_roles_list_admin(organization, get, admin): 'Admin can see list of all roles' @@ -28,6 +32,7 @@ def test_get_roles_list_admin(organization, get, admin): roles = response.data assert roles['count'] > 0 + @pytest.mark.django_db def test_get_roles_list_user(organization, inventory, team, get, user): 'Users can see all roles they have access to, but not all roles' @@ -57,6 +62,7 @@ def test_get_roles_list_user(organization, inventory, team, get, user): assert inventory.admin_role.id not in role_hash assert team.member_role.id not in role_hash + @pytest.mark.django_db def test_roles_visibility(get, organization, project, admin, alice, bob): Role.singleton('system_auditor').members.add(alice) @@ -66,6 +72,7 @@ def test_roles_visibility(get, organization, project, admin, alice, bob): organization.auditor_role.members.add(bob) assert get(reverse('api:role_list') + '?id=%d' % project.update_role.id, user=bob).data['count'] == 1 + @pytest.mark.django_db def test_roles_filter_visibility(get, organization, project, admin, alice, bob): Role.singleton('system_auditor').members.add(alice) @@ -80,6 +87,7 @@ def test_roles_filter_visibility(get, organization, project, admin, alice, bob): project.use_role.members.add(bob) # sibling role should still grant visibility assert get(reverse('api:user_roles_list', args=(admin.id,)) + '?id=%d' % project.update_role.id, user=bob).data['count'] == 1 + @pytest.mark.django_db def test_cant_create_role(post, admin): "Ensure we can't create new roles through the api" @@ -100,11 +108,11 @@ def test_cant_delete_role(delete, admin): assert response.status_code == 405 - # # /user//roles # + @pytest.mark.django_db def test_get_user_roles_list(get, admin): url = reverse('api:user_roles_list', args=(admin.id,)) @@ -113,6 +121,7 @@ def test_get_user_roles_list(get, admin): roles = response.data assert roles['count'] > 0 # 'system_administrator' role if nothing else + @pytest.mark.django_db def test_user_view_other_user_roles(organization, inventory, team, get, alice, bob): 'Users can see roles for other users, but only the roles that that user has access to see as well' @@ -159,8 +168,6 @@ def test_user_view_other_user_roles(organization, inventory, team, get, alice, b assert team.member_role.id in role_hash # Alice can now see this - - @pytest.mark.django_db def test_add_role_to_user(role, post, admin): assert admin.roles.filter(id=role.id).count() == 0 @@ -178,6 +185,7 @@ def test_add_role_to_user(role, post, admin): assert response.status_code == 400 assert admin.roles.filter(id=role.id).count() == 1 + @pytest.mark.django_db def test_remove_role_from_user(role, post, admin): assert admin.roles.filter(id=role.id).count() == 0 @@ -191,12 +199,11 @@ def test_remove_role_from_user(role, post, admin): assert admin.roles.filter(id=role.id).count() == 0 - - # # /team//roles # + @pytest.mark.django_db def test_get_teams_roles_list(get, team, organization, admin): team.member_role.children.add(organization.admin_role) @@ -226,6 +233,7 @@ def test_add_role_to_teams(team, post, admin): assert response.status_code == 400 assert team.member_role.children.filter(id=team.member_role.id).count() == 1 + @pytest.mark.django_db def test_remove_role_from_teams(team, post, admin): assert team.member_role.children.filter(id=team.member_role.id).count() == 0 @@ -239,11 +247,11 @@ def test_remove_role_from_teams(team, post, admin): assert team.member_role.children.filter(id=team.member_role.id).count() == 0 - # # /roles// # + @pytest.mark.django_db def test_get_role(get, admin, role): url = reverse('api:role_detail', args=(role.id,)) @@ -251,6 +259,7 @@ def test_get_role(get, admin, role): assert response.status_code == 200 assert response.data['id'] == role.id + @pytest.mark.django_db def test_put_role_405(put, admin, role): url = reverse('api:role_detail', args=(role.id,)) @@ -259,6 +268,7 @@ def test_put_role_405(put, admin, role): #r = Role.objects.get(id=role.id) #assert r.name == 'Some new name' + @pytest.mark.django_db def test_put_role_access_denied(put, alice, role): url = reverse('api:role_detail', args=(role.id,)) @@ -270,6 +280,7 @@ def test_put_role_access_denied(put, alice, role): # /roles//users/ # + @pytest.mark.django_db def test_get_role_users(get, admin, role): role.members.add(admin) @@ -279,6 +290,7 @@ def test_get_role_users(get, admin, role): assert response.data['count'] == 1 assert response.data['results'][0]['id'] == admin.id + @pytest.mark.django_db def test_add_user_to_role(post, admin, role): url = reverse('api:role_users_list', args=(role.id,)) @@ -286,6 +298,7 @@ def test_add_user_to_role(post, admin, role): post(url, {'id': admin.id}, admin) assert role.members.filter(id=admin.id).count() == 1 + @pytest.mark.django_db def test_remove_user_to_role(post, admin, role): role.members.add(admin) @@ -294,6 +307,7 @@ def test_remove_user_to_role(post, admin, role): post(url, {'disassociate': True, 'id': admin.id}, admin) assert role.members.filter(id=admin.id).count() == 0 + @pytest.mark.django_db def test_org_admin_add_user_to_job_template(post, organization, check_jobtemplate, user): 'Tests that a user with permissions to assign/revoke membership to a particular role can do so' @@ -355,10 +369,12 @@ def test_user_fail_to_remove_user_to_job_template(post, organization, check_jobt assert joe in check_jobtemplate.execute_role + # # /roles//teams/ # + @pytest.mark.django_db def test_get_role_teams(get, team, admin, role): role.parents.add(team.member_role) @@ -377,6 +393,7 @@ def test_add_team_to_role(post, team, admin, role): assert res.status_code == 204 assert role.parents.filter(id=team.member_role.id).count() == 1 + @pytest.mark.django_db def test_remove_team_from_role(post, team, admin, role): role.members.add(admin) @@ -391,6 +408,7 @@ def test_remove_team_from_role(post, team, admin, role): # /roles//parents/ # + @pytest.mark.django_db def test_role_parents(get, team, admin, role): role.parents.add(team.member_role) @@ -405,6 +423,7 @@ def test_role_parents(get, team, admin, role): # /roles//children/ # + @pytest.mark.django_db def test_role_children(get, team, admin, role): role.parents.add(team.member_role) @@ -415,11 +434,11 @@ def test_role_children(get, team, admin, role): assert response.data['results'][0]['id'] == role.id or response.data['results'][1]['id'] == role.id - # # Generics # + @pytest.mark.django_db def test_ensure_rbac_fields_are_present(organization, get, admin): url = reverse('api:organization_detail', args=(organization.id,)) @@ -438,6 +457,7 @@ def test_ensure_rbac_fields_are_present(organization, get, admin): role = org_role_response.data assert role['related']['organization'] == url + @pytest.mark.django_db def test_ensure_role_summary_is_present(organization, get, user): url = reverse('api:organization_detail', args=(organization.id,)) diff --git a/awx/main/tests/functional/test_rbac_core.py b/awx/main/tests/functional/test_rbac_core.py index 75c89643ad..bae3b61dc0 100644 --- a/awx/main/tests/functional/test_rbac_core.py +++ b/awx/main/tests/functional/test_rbac_core.py @@ -53,7 +53,6 @@ def test_auto_inheritance_by_parents(organization, alice): assert alice not in organization.admin_role - @pytest.mark.django_db def test_accessible_objects(organization, alice, bob): A = Role.objects.create() @@ -68,6 +67,7 @@ def test_accessible_objects(organization, alice, bob): assert Organization.accessible_objects(alice, 'admin_role').count() == 1 assert Organization.accessible_objects(bob, 'admin_role').count() == 0 + @pytest.mark.django_db def test_team_symantics(organization, team, alice): assert alice not in organization.auditor_role @@ -93,6 +93,7 @@ def test_auto_field_adjustments(organization, inventory, team, alice): assert alice not in inventory.admin_role #assert False + @pytest.mark.django_db def test_implicit_deletes(alice): 'Ensures implicit resources and roles delete themselves' @@ -127,6 +128,7 @@ def test_content_object(user): org = Organization.objects.create(name='test-org') assert org.admin_role.content_object.id == org.id + @pytest.mark.django_db def test_hierarchy_rebuilding_multi_path(): 'Tests a subdtle cases around role hierarchy rebuilding when you have multiple paths to the same role of different length' diff --git a/awx/main/tests/functional/test_rbac_credential.py b/awx/main/tests/functional/test_rbac_credential.py index ae68f036d8..6c87a53d27 100644 --- a/awx/main/tests/functional/test_rbac_credential.py +++ b/awx/main/tests/functional/test_rbac_credential.py @@ -8,6 +8,7 @@ from awx.main.migrations import _rbac as rbac from django.apps import apps from django.contrib.auth.models import User + @pytest.mark.django_db def test_credential_migration_user(credential, user, permissions): u = user('user', False) @@ -18,6 +19,7 @@ def test_credential_migration_user(credential, user, permissions): assert u in credential.admin_role + @pytest.mark.django_db def test_two_teams_same_cred_name(organization_factory): objects = organization_factory("test", @@ -33,12 +35,14 @@ def test_two_teams_same_cred_name(organization_factory): assert objects.teams.team1.member_role in cred1.use_role.parents.all() assert objects.teams.team2.member_role in cred2.use_role.parents.all() + @pytest.mark.django_db def test_credential_use_role(credential, user, permissions): u = user('user', False) credential.use_role.members.add(u) assert u in credential.use_role + @pytest.mark.django_db def test_credential_migration_team_member(credential, team, user, permissions): u = user('user', False) @@ -58,6 +62,7 @@ def test_credential_migration_team_member(credential, team, user, permissions): assert u in credential.use_role assert u not in credential.admin_role + @pytest.mark.django_db def test_credential_migration_team_admin(credential, team, user, permissions): u = user('user', False) @@ -71,6 +76,7 @@ def test_credential_migration_team_admin(credential, team, user, permissions): rbac.migrate_credential(apps, None) assert u in credential.admin_role + @pytest.mark.django_db def test_credential_migration_org_auditor(credential, team, org_auditor): # Team's organization is the org_auditor's org @@ -89,6 +95,7 @@ def test_credential_migration_org_auditor(credential, team, org_auditor): assert org_auditor not in credential.use_role assert org_auditor in credential.read_role + def test_credential_access_superuser(): u = User(username='admin', is_superuser=True) access = CredentialAccess(u) @@ -98,6 +105,7 @@ def test_credential_access_superuser(): assert access.can_change(credential, None) assert access.can_delete(credential) + @pytest.mark.django_db def test_credential_access_auditor(credential, organization_factory): objects = organization_factory("org_cred_auditor", @@ -109,6 +117,7 @@ def test_credential_access_auditor(credential, organization_factory): access = CredentialAccess(objects.users.user1) assert access.can_read(credential) + @pytest.mark.django_db def test_credential_access_admin(user, team, credential): u = user('org-admin', False) @@ -135,6 +144,7 @@ def test_credential_access_admin(user, team, credential): # should have can_change access as org-admin assert access.can_change(credential, {'description': 'New description.'}) + @pytest.mark.django_db def test_org_credential_access_member(alice, org_credential, credential): org_credential.admin_role.members.add(alice) @@ -152,6 +162,7 @@ def test_org_credential_access_member(alice, org_credential, credential): 'description': 'New description.', 'organization': None}) + @pytest.mark.django_db def test_cred_job_template_xfail(user, deploy_jobtemplate): ' Personal credential migration ' @@ -167,6 +178,7 @@ def test_cred_job_template_xfail(user, deploy_jobtemplate): rbac.migrate_credential(apps, None) assert not access.can_change(cred, {'organization': org.pk}) + @pytest.mark.django_db def test_cred_job_template(user, team, deploy_jobtemplate): ' Team credential migration => org credential ' @@ -188,6 +200,7 @@ def test_cred_job_template(user, team, deploy_jobtemplate): org.admin_role.members.remove(a) assert not access.can_change(cred, {'organization': org.pk}) + @pytest.mark.django_db def test_cred_multi_job_template_single_org_xfail(user, deploy_jobtemplate): a = user('admin', False) @@ -204,6 +217,7 @@ def test_cred_multi_job_template_single_org_xfail(user, deploy_jobtemplate): assert not access.can_change(cred, {'organization': org.pk}) + @pytest.mark.django_db def test_cred_multi_job_template_single_org(user, team, deploy_jobtemplate): a = user('admin', False) @@ -223,6 +237,7 @@ def test_cred_multi_job_template_single_org(user, team, deploy_jobtemplate): org.admin_role.members.remove(a) assert not access.can_change(cred, {'organization': org.pk}) + @pytest.mark.django_db def test_single_cred_multi_job_template_multi_org(user, organizations, credential, team): orgs = organizations(2) @@ -252,6 +267,7 @@ def test_single_cred_multi_job_template_multi_org(user, organizations, credentia assert jts[0].credential != jts[1].credential + @pytest.mark.django_db def test_cred_inventory_source(user, inventory, credential): u = user('member', False) @@ -268,6 +284,7 @@ def test_cred_inventory_source(user, inventory, credential): rbac.migrate_credential(apps, None) assert u not in credential.use_role + @pytest.mark.django_db def test_cred_project(user, credential, project): u = user('member', False) @@ -280,12 +297,14 @@ def test_cred_project(user, credential, project): rbac.migrate_credential(apps, None) assert u not in credential.use_role + @pytest.mark.django_db def test_cred_no_org(user, credential): su = user('su', True) access = CredentialAccess(su) assert access.can_change(credential, {'user': su.pk}) + @pytest.mark.django_db def test_cred_team(user, team, credential): u = user('a', False) diff --git a/awx/main/tests/functional/test_rbac_inventory.py b/awx/main/tests/functional/test_rbac_inventory.py index 287919ad31..6f26cacc54 100644 --- a/awx/main/tests/functional/test_rbac_inventory.py +++ b/awx/main/tests/functional/test_rbac_inventory.py @@ -5,14 +5,19 @@ from awx.main.models import ( Permission, Host, CustomInventoryScript, + Schedule ) from awx.main.access import ( InventoryAccess, + InventorySourceAccess, HostAccess, - InventoryUpdateAccess + InventoryUpdateAccess, + CustomInventoryScriptAccess, + ScheduleAccess ) from django.apps import apps + @pytest.mark.django_db def test_custom_inv_script_access(organization, user): u = user('user', False) @@ -29,6 +34,26 @@ def test_custom_inv_script_access(organization, user): organization.admin_role.members.add(ou) assert ou in custom_inv.admin_role + +@pytest.mark.django_db +def test_modify_inv_script_foreign_org_admin(org_admin, organization, organization_factory, project): + custom_inv = CustomInventoryScript.objects.create(name='test', script='test', description='test', + organization=organization) + + other_org = organization_factory('not-my-org').organization + access = CustomInventoryScriptAccess(org_admin) + assert not access.can_change(custom_inv, {'organization': other_org.pk, 'name': 'new-project'}) + + +@pytest.mark.django_db +def test_org_member_inventory_script_permissions(org_member, organization): + custom_inv = CustomInventoryScript.objects.create(name='test', script='test', organization=organization) + access = CustomInventoryScriptAccess(org_member) + assert access.can_read(custom_inv) + assert not access.can_delete(custom_inv) + assert not access.can_change(custom_inv, {'name': 'ed-test'}) + + @pytest.mark.django_db def test_inventory_admin_user(inventory, permissions, user): u = user('admin', False) @@ -43,6 +68,7 @@ def test_inventory_admin_user(inventory, permissions, user): assert inventory.use_role.members.filter(id=u.id).exists() is False assert inventory.update_role.members.filter(id=u.id).exists() is False + @pytest.mark.django_db def test_inventory_auditor_user(inventory, permissions, user): u = user('auditor', False) @@ -59,6 +85,7 @@ def test_inventory_auditor_user(inventory, permissions, user): assert inventory.use_role.members.filter(id=u.id).exists() is False assert inventory.update_role.members.filter(id=u.id).exists() is False + @pytest.mark.django_db def test_inventory_updater_user(inventory, permissions, user): u = user('updater', False) @@ -74,6 +101,7 @@ def test_inventory_updater_user(inventory, permissions, user): assert inventory.use_role.members.filter(id=u.id).exists() is False assert inventory.update_role.members.filter(id=u.id).exists() + @pytest.mark.django_db def test_inventory_executor_user(inventory, permissions, user): u = user('executor', False) @@ -91,7 +119,6 @@ def test_inventory_executor_user(inventory, permissions, user): assert inventory.update_role.members.filter(id=u.id).exists() is False - @pytest.mark.django_db def test_inventory_admin_team(inventory, permissions, user, team): u = user('admin', False) @@ -214,6 +241,7 @@ def test_access_auditor(organization, inventory, user): assert not access.can_delete(inventory) assert not access.can_run_ad_hoc_commands(inventory) + @pytest.mark.django_db def test_inventory_update_org_admin(inventory_update, org_admin): access = InventoryUpdateAccess(org_admin) @@ -246,4 +274,19 @@ def test_host_access(organization, inventory, group, user, group_factory): assert inventory_admin_access.can_read(host) is False +@pytest.mark.django_db +def test_inventory_source_credential_check(rando, inventory_source, credential): + inventory_source.group.inventory.admin_role.members.add(rando) + access = InventorySourceAccess(rando) + assert not access.can_change(inventory_source, {'credential': credential}) + +@pytest.mark.django_db +def test_inventory_source_org_admin_schedule_access(org_admin, inventory_source): + schedule = Schedule.objects.create( + unified_job_template=inventory_source, + rrule='DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1') + access = ScheduleAccess(org_admin) + assert access.get_queryset() + assert access.can_read(schedule) + assert access.can_change(schedule, {'rrule': 'DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2'}) diff --git a/awx/main/tests/functional/test_rbac_job.py b/awx/main/tests/functional/test_rbac_job.py index febade67eb..de1ccf6817 100644 --- a/awx/main/tests/functional/test_rbac_job.py +++ b/awx/main/tests/functional/test_rbac_job.py @@ -23,22 +23,26 @@ def normal_job(deploy_jobtemplate): inventory=deploy_jobtemplate.inventory ) + @pytest.fixture def jt_user(deploy_jobtemplate, rando): deploy_jobtemplate.execute_role.members.add(rando) return rando + @pytest.fixture def inv_updater(inventory, rando): inventory.update_role.members.add(rando) return rando + @pytest.fixture def host_adhoc(host, machine_credential, rando): host.inventory.adhoc_role.members.add(rando) machine_credential.use_role.members.add(rando) return rando + @pytest.fixture def proj_updater(project, rando): project.update_role.members.add(rando) @@ -52,6 +56,7 @@ def test_superuser_sees_orphans(normal_job, admin_user): access = JobAccess(admin_user) assert access.can_read(normal_job) + @pytest.mark.django_db def test_org_member_does_not_see_orphans(normal_job, org_member, project): normal_job.job_template = None @@ -60,18 +65,21 @@ def test_org_member_does_not_see_orphans(normal_job, org_member, project): access = JobAccess(org_member) assert not access.can_read(normal_job) + @pytest.mark.django_db def test_org_admin_sees_orphans(normal_job, org_admin): normal_job.job_template = None access = JobAccess(org_admin) assert access.can_read(normal_job) + @pytest.mark.django_db def test_org_auditor_sees_orphans(normal_job, org_auditor): normal_job.job_template = None access = JobAccess(org_auditor) assert access.can_read(normal_job) + # Delete permissions testing @pytest.mark.django_db def test_JT_admin_delete_denied(normal_job, rando): @@ -79,12 +87,14 @@ def test_JT_admin_delete_denied(normal_job, rando): access = JobAccess(rando) assert not access.can_delete(normal_job) + @pytest.mark.django_db def test_inventory_admin_delete_denied(normal_job, rando): normal_job.job_template.inventory.admin_role.members.add(rando) access = JobAccess(rando) assert not access.can_delete(normal_job) + @pytest.mark.django_db def test_null_related_delete_denied(normal_job, rando): normal_job.project = None @@ -92,24 +102,28 @@ def test_null_related_delete_denied(normal_job, rando): access = JobAccess(rando) assert not access.can_delete(normal_job) + @pytest.mark.django_db def test_delete_job_with_orphan_proj(normal_job, rando): normal_job.project.organization = None access = JobAccess(rando) assert not access.can_delete(normal_job) + @pytest.mark.django_db def test_inventory_org_admin_delete_allowed(normal_job, org_admin): normal_job.project = None # do this so we test job->inventory->org->admin connection access = JobAccess(org_admin) assert access.can_delete(normal_job) + @pytest.mark.django_db def test_project_org_admin_delete_allowed(normal_job, org_admin): normal_job.inventory = None # do this so we test job->project->org->admin connection access = JobAccess(org_admin) assert access.can_delete(normal_job) + @pytest.mark.django_db class TestJobAndUpdateCancels: diff --git a/awx/main/tests/functional/test_rbac_job_start.py b/awx/main/tests/functional/test_rbac_job_start.py index c934973cf4..4b6c092680 100644 --- a/awx/main/tests/functional/test_rbac_job_start.py +++ b/awx/main/tests/functional/test_rbac_job_start.py @@ -4,10 +4,10 @@ from awx.main.models.inventory import Inventory from awx.main.models.credential import Credential from awx.main.models.jobs import JobTemplate, Job + @pytest.mark.django_db @pytest.mark.job_permissions def test_admin_executing_permissions(deploy_jobtemplate, inventory, machine_credential, user): - admin_user = user('admin-user', True) assert admin_user.can_access(Inventory, 'use', inventory) @@ -15,33 +15,34 @@ def test_admin_executing_permissions(deploy_jobtemplate, inventory, machine_cred assert admin_user.can_access(JobTemplate, 'start', deploy_jobtemplate) assert admin_user.can_access(Credential, 'use', machine_credential) + @pytest.mark.django_db @pytest.mark.job_permissions def test_job_template_start_access(deploy_jobtemplate, user): - common_user = user('test-user', False) deploy_jobtemplate.execute_role.members.add(common_user) assert common_user.can_access(JobTemplate, 'start', deploy_jobtemplate) + @pytest.mark.django_db @pytest.mark.job_permissions def test_credential_use_access(machine_credential, user): - common_user = user('test-user', False) machine_credential.use_role.members.add(common_user) assert common_user.can_access(Credential, 'use', machine_credential) + @pytest.mark.django_db @pytest.mark.job_permissions def test_inventory_use_access(inventory, user): - common_user = user('test-user', False) inventory.use_role.members.add(common_user) assert common_user.can_access(Inventory, 'use', inventory) + @pytest.mark.django_db class TestJobRelaunchAccess: @pytest.fixture diff --git a/awx/main/tests/functional/test_rbac_job_templates.py b/awx/main/tests/functional/test_rbac_job_templates.py index c8cc2b8502..ef615a09d6 100644 --- a/awx/main/tests/functional/test_rbac_job_templates.py +++ b/awx/main/tests/functional/test_rbac_job_templates.py @@ -4,10 +4,12 @@ import pytest from awx.main.access import ( BaseAccess, JobTemplateAccess, + ScheduleAccess ) from awx.main.migrations import _rbac as rbac from awx.main.models import Permission from awx.main.models.jobs import JobTemplate +from awx.main.models.schedules import Schedule from django.apps import apps from django.core.urlresolvers import reverse @@ -20,6 +22,7 @@ def jt_objects(job_template_factory): credential='cred1', cloud_credential='aws1', network_credential='juniper1') return objects + @pytest.mark.django_db def test_job_template_migration_check(credential, deploy_jobtemplate, check_jobtemplate, user): admin = user('admin', is_superuser=True) @@ -51,6 +54,7 @@ def test_job_template_migration_check(credential, deploy_jobtemplate, check_jobt assert admin in deploy_jobtemplate.execute_role assert joe not in deploy_jobtemplate.execute_role + @pytest.mark.django_db def test_job_template_migration_deploy(credential, deploy_jobtemplate, check_jobtemplate, user): admin = user('admin', is_superuser=True) @@ -166,6 +170,7 @@ def test_job_template_access_superuser(check_license, user, deploy_jobtemplate): assert access.can_read(deploy_jobtemplate) assert access.can_add({}) + @pytest.mark.django_db def test_job_template_access_read_level(jt_objects, rando): @@ -182,6 +187,7 @@ def test_job_template_access_read_level(jt_objects, rando): assert not access.can_add(dict(cloud_credential=jt_objects.cloud_credential.pk, project=proj_pk)) assert not access.can_add(dict(network_credential=jt_objects.network_credential.pk, project=proj_pk)) + @pytest.mark.django_db def test_job_template_access_use_level(jt_objects, rando): @@ -198,6 +204,7 @@ def test_job_template_access_use_level(jt_objects, rando): assert access.can_add(dict(cloud_credential=jt_objects.cloud_credential.pk, project=proj_pk)) assert access.can_add(dict(network_credential=jt_objects.network_credential.pk, project=proj_pk)) + @pytest.mark.django_db def test_job_template_access_org_admin(jt_objects, rando): access = JobTemplateAccess(rando) @@ -218,6 +225,23 @@ def test_job_template_access_org_admin(jt_objects, rando): assert access.can_read(jt_objects.job_template) assert access.can_delete(jt_objects.job_template) + +@pytest.mark.django_db +class TestOrphanJobTemplate: + + def test_orphan_JT_readable_by_system_auditor(self, job_template, system_auditor): + assert system_auditor.is_system_auditor + assert job_template.project is None + access = JobTemplateAccess(system_auditor) + assert access.can_read(job_template) + + def test_system_admin_orphan_capabilities(self, job_template, admin_user): + job_template.capabilities_cache = {'edit': False} + access = JobTemplateAccess(admin_user) + capabilities = access.get_user_capabilities(job_template, method_list=['edit']) + assert capabilities['edit'] + + @pytest.mark.django_db @pytest.mark.job_permissions def test_job_template_creator_access(project, rando, post): @@ -240,3 +264,48 @@ def test_job_template_creator_access(project, rando, post): jt_obj = JobTemplate.objects.get(pk=jt_pk) # Creating a JT should place the creator in the admin role assert rando in jt_obj.admin_role + + +@pytest.mark.django_db +def test_associate_label(label, user, job_template): + access = JobTemplateAccess(user('joe', False)) + job_template.admin_role.members.add(user('joe', False)) + label.organization.read_role.members.add(user('joe', False)) + assert access.can_attach(job_template, label, 'labels', None) + + +@pytest.mark.django_db +class TestJobTemplateSchedules: + + rrule = 'DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1' + rrule2 = 'DTSTART:20151117T050000Z RRULE:FREQ=WEEKLY;INTERVAL=1;COUNT=1' + + @pytest.fixture + def jt2(self): + return JobTemplate.objects.create(name="other-jt") + + def test_move_schedule_to_JT_no_access(self, job_template, rando, jt2): + schedule = Schedule.objects.create(unified_job_template=job_template, rrule=self.rrule) + job_template.admin_role.members.add(rando) + access = ScheduleAccess(rando) + assert not access.can_change(schedule, data=dict(unified_job_template=jt2.pk)) + + + def test_move_schedule_from_JT_no_access(self, job_template, rando, jt2): + schedule = Schedule.objects.create(unified_job_template=job_template, rrule=self.rrule) + jt2.admin_role.members.add(rando) + access = ScheduleAccess(rando) + assert not access.can_change(schedule, data=dict(unified_job_template=jt2.pk)) + + + def test_can_create_schedule_with_execute(self, job_template, rando): + job_template.execute_role.members.add(rando) + access = ScheduleAccess(rando) + assert access.can_add({'unified_job_template': job_template}) + + + def test_can_modify_ones_own_schedule(self, job_template, rando): + job_template.execute_role.members.add(rando) + schedule = Schedule.objects.create(unified_job_template=job_template, rrule=self.rrule, created_by=rando) + access = ScheduleAccess(rando) + assert access.can_change(schedule, {'rrule': self.rrule2}) diff --git a/awx/main/tests/functional/test_rbac_label.py b/awx/main/tests/functional/test_rbac_label.py index e425d50908..a34a4bf27f 100644 --- a/awx/main/tests/functional/test_rbac_label.py +++ b/awx/main/tests/functional/test_rbac_label.py @@ -4,24 +4,27 @@ from awx.main.access import ( LabelAccess, ) -from rest_framework.exceptions import ParseError @pytest.mark.django_db def test_label_get_queryset_user(label, user): - access = LabelAccess(user('user', False)) - label.organization.member_role.members.add(user('user', False)) + u = user('user', False) + access = LabelAccess(u) + label.organization.member_role.members.add(u) assert access.get_queryset().count() == 1 + @pytest.mark.django_db def test_label_get_queryset_su(label, user): access = LabelAccess(user('user', True)) assert access.get_queryset().count() == 1 + @pytest.mark.django_db def test_label_access(label, user): access = LabelAccess(user('user', False)) assert not access.can_read(label) + @pytest.mark.django_db def test_label_access_superuser(label, user): access = LabelAccess(user('admin', True)) @@ -30,6 +33,7 @@ def test_label_access_superuser(label, user): assert access.can_change(label, None) assert access.can_delete(label) + @pytest.mark.django_db def test_label_access_admin(organization_factory): '''can_change because I am an admin of that org''' @@ -49,16 +53,15 @@ def test_label_access_admin(organization_factory): assert access.can_change(label, {'organization': members.organization.id}) assert access.can_delete(label) + @pytest.mark.django_db def test_label_access_user(label, user): access = LabelAccess(user('user', False)) label.organization.member_role.members.add(user('user', False)) - with pytest.raises(ParseError): - access.can_add({'organization': None}) + assert not access.can_add({'organization': None}) assert not access.can_change(label, None) assert not access.can_delete(label) assert access.can_read(label) assert access.can_add({'organization': label.organization.id}) - diff --git a/awx/main/tests/functional/test_rbac_notifications.py b/awx/main/tests/functional/test_rbac_notifications.py index a9a5e7c5f9..80255da0d1 100644 --- a/awx/main/tests/functional/test_rbac_notifications.py +++ b/awx/main/tests/functional/test_rbac_notifications.py @@ -2,36 +2,43 @@ import pytest from awx.main.access import ( NotificationTemplateAccess, - NotificationAccess + NotificationAccess, + JobTemplateAccess ) + @pytest.mark.django_db def test_notification_template_get_queryset_orgmember(notification_template, user): access = NotificationTemplateAccess(user('user', False)) notification_template.organization.member_role.members.add(user('user', False)) assert access.get_queryset().count() == 0 + @pytest.mark.django_db def test_notification_template_get_queryset_nonorgmember(notification_template, user): access = NotificationTemplateAccess(user('user', False)) assert access.get_queryset().count() == 0 + @pytest.mark.django_db def test_notification_template_get_queryset_su(notification_template, user): access = NotificationTemplateAccess(user('user', True)) assert access.get_queryset().count() == 1 + @pytest.mark.django_db def test_notification_template_get_queryset_orgadmin(notification_template, user): access = NotificationTemplateAccess(user('admin', False)) notification_template.organization.admin_role.members.add(user('admin', False)) assert access.get_queryset().count() == 1 + @pytest.mark.django_db def test_notification_template_get_queryset_org_auditor(notification_template, org_auditor): access = NotificationTemplateAccess(org_auditor) assert access.get_queryset().count() == 1 + @pytest.mark.django_db def test_notification_template_access_superuser(notification_template_factory): nf_objects = notification_template_factory('test-orphaned', organization='test', superusers=['admin']) @@ -50,6 +57,7 @@ def test_notification_template_access_superuser(notification_template_factory): assert access.can_change(nf, None) assert access.can_delete(nf) + @pytest.mark.django_db def test_notification_template_access_admin(organization_factory, notification_template_factory): other_objects = organization_factory('other') @@ -75,6 +83,7 @@ def test_notification_template_access_admin(organization_factory, notification_t assert not access.can_change(nf, None) assert not access.can_delete(nf) + @pytest.mark.django_db def test_notification_template_access_org_user(notification_template, user): u = user('normal', False) @@ -84,34 +93,49 @@ def test_notification_template_access_org_user(notification_template, user): assert not access.can_change(notification_template, None) assert not access.can_delete(notification_template) + @pytest.mark.django_db def test_notificaiton_template_orphan_access_org_admin(notification_template, organization, org_admin): notification_template.organization = None access = NotificationTemplateAccess(org_admin) assert not access.can_change(notification_template, {'organization': organization.id}) + @pytest.mark.django_db def test_notification_access_get_queryset_org_admin(notification, org_admin): access = NotificationAccess(org_admin) assert access.get_queryset().count() == 1 + @pytest.mark.django_db def test_notification_access_get_queryset_org_auditor(notification, org_auditor): access = NotificationAccess(org_auditor) assert access.get_queryset().count() == 1 + @pytest.mark.django_db def test_notification_access_system_admin(notification, admin): access = NotificationAccess(admin) assert access.can_read(notification) assert access.can_delete(notification) + +@pytest.mark.django_db +def test_system_auditor_JT_attach(system_auditor, job_template, notification_template): + job_template.admin_role.members.add(system_auditor) + access = JobTemplateAccess(system_auditor) + assert not access.can_attach( + job_template, notification_template, 'notification_templates_success', + {'id': notification_template.id}) + + @pytest.mark.django_db def test_notification_access_org_admin(notification, org_admin): access = NotificationAccess(org_admin) assert access.can_read(notification) assert access.can_delete(notification) + @pytest.mark.django_db def test_notification_access_org_auditor(notification, org_auditor): access = NotificationAccess(org_auditor) diff --git a/awx/main/tests/functional/test_rbac_organization.py b/awx/main/tests/functional/test_rbac_organization.py index 77558c0e7c..1ecf6c7f85 100644 --- a/awx/main/tests/functional/test_rbac_organization.py +++ b/awx/main/tests/functional/test_rbac_organization.py @@ -22,6 +22,7 @@ def test_organization_migration_admin(organization, permissions, user): assert u in organization.admin_role + @pytest.mark.django_db def test_organization_migration_user(organization, permissions, user): u = user('user', False) diff --git a/awx/main/tests/functional/test_rbac_project.py b/awx/main/tests/functional/test_rbac_project.py index ba88226b2e..9f7cfbe705 100644 --- a/awx/main/tests/functional/test_rbac_project.py +++ b/awx/main/tests/functional/test_rbac_project.py @@ -92,6 +92,7 @@ def test_project_migration(): assert o2.projects.all()[0].jobtemplates.count() == 1 assert o3.projects.all()[0].jobtemplates.count() == 0 + @pytest.mark.django_db def test_single_org_project_migration(organization): project = Project.objects.create(name='my project', @@ -103,6 +104,7 @@ def test_single_org_project_migration(organization): project = Project.objects.get(id=project.id) assert project.organization.id == organization.id + @pytest.mark.django_db def test_no_org_project_migration(organization): project = Project.objects.create(name='my project', @@ -112,6 +114,7 @@ def test_no_org_project_migration(organization): rbac.migrate_projects(apps, None) assert project.organization is None + @pytest.mark.django_db def test_multi_org_project_migration(): org1 = Organization.objects.create(name="org1", description="org1 desc") @@ -145,6 +148,7 @@ def test_project_user_project(user_project, project, user): assert u in user_project.read_role assert u not in project.read_role + @pytest.mark.django_db def test_project_accessible_by_sa(user, project): u = user('systemadmin', is_superuser=True) @@ -159,6 +163,7 @@ def test_project_accessible_by_sa(user, project): print(project.admin_role.ancestors.all()) assert u in project.admin_role + @pytest.mark.django_db def test_project_org_members(user, organization, project): admin = user('orgadmin') @@ -176,6 +181,7 @@ def test_project_org_members(user, organization, project): assert admin in project.admin_role assert member in project.read_role + @pytest.mark.django_db def test_project_team(user, team, project): nonmember = user('nonmember') @@ -194,6 +200,7 @@ def test_project_team(user, team, project): assert member in project.read_role assert nonmember not in project.read_role + @pytest.mark.django_db def test_project_explicit_permission(user, team, project, organization): u = user('prjuser') @@ -211,9 +218,18 @@ def test_project_explicit_permission(user, team, project, organization): assert u in project.read_role + @pytest.mark.django_db def test_create_project_foreign_org_admin(org_admin, organization, organization_factory): """Org admins can only create projects in their own org.""" other_org = organization_factory('not-my-org').organization access = ProjectAccess(org_admin) assert not access.can_add({'organization': other_org.pk, 'name': 'new-project'}) + + +@pytest.mark.django_db +def test_modify_project_foreign_org_admin(org_admin, organization, organization_factory, project): + """Org admins can only modify projects in their own org.""" + other_org = organization_factory('not-my-org').organization + access = ProjectAccess(org_admin) + assert not access.can_change(project, {'organization': other_org.pk, 'name': 'new-project'}) diff --git a/awx/main/tests/functional/test_rbac_role.py b/awx/main/tests/functional/test_rbac_role.py index 613051e395..16ad46f8db 100644 --- a/awx/main/tests/functional/test_rbac_role.py +++ b/awx/main/tests/functional/test_rbac_role.py @@ -18,6 +18,7 @@ def test_team_access_attach(rando, team, inventory): data = {'id': inventory.admin_role.pk} assert not access.can_attach(team, inventory.admin_role, 'member_role.children', data, False) + @pytest.mark.django_db def test_user_access_attach(rando, inventory): inventory.read_role.members.add(rando) @@ -25,6 +26,7 @@ def test_user_access_attach(rando, inventory): data = {'id': inventory.admin_role.pk} assert not access.can_attach(rando, inventory.admin_role, 'roles', data, False) + @pytest.mark.django_db def test_role_access_attach(rando, inventory): inventory.read_role.members.add(rando) diff --git a/awx/main/tests/functional/test_rbac_team.py b/awx/main/tests/functional/test_rbac_team.py index 6907589462..5e7cf4ad85 100644 --- a/awx/main/tests/functional/test_rbac_team.py +++ b/awx/main/tests/functional/test_rbac_team.py @@ -22,6 +22,7 @@ def test_team_attach_unattach(team, user): assert not access.can_attach(team, team.member_role, 'member_role.children', None) assert not access.can_unattach(team, team.member_role, 'member_role.chidlren') + @pytest.mark.django_db def test_team_access_superuser(team, user): team.member_role.members.add(user('member', False)) @@ -36,6 +37,7 @@ def test_team_access_superuser(team, user): assert len(t.member_role.members.all()) == 1 assert len(t.organization.admin_role.members.all()) == 0 + @pytest.mark.django_db def test_team_access_org_admin(organization, team, user): a = user('admin', False) @@ -52,6 +54,7 @@ def test_team_access_org_admin(organization, team, user): assert len(t.member_role.members.all()) == 0 assert len(t.organization.admin_role.members.all()) == 1 + @pytest.mark.django_db def test_team_access_member(organization, team, user): u = user('member', False) @@ -68,6 +71,7 @@ def test_team_access_member(organization, team, user): assert len(t.member_role.members.all()) == 1 assert len(t.organization.admin_role.members.all()) == 0 + @pytest.mark.django_db def test_team_accessible_by(team, user, project): u = user('team_member', False) @@ -79,6 +83,7 @@ def test_team_accessible_by(team, user, project): team.member_role.members.add(u) assert u in project.read_role + @pytest.mark.django_db def test_team_accessible_objects(team, user, project): u = user('team_member', False) @@ -90,6 +95,7 @@ def test_team_accessible_objects(team, user, project): team.member_role.members.add(u) assert len(Project.accessible_objects(u, 'read_role')) == 1 + @pytest.mark.django_db def test_team_admin_member_access(team, user, project): u = user('team_admin', False) diff --git a/awx/main/tests/functional/test_rbac_user.py b/awx/main/tests/functional/test_rbac_user.py index de2b9aa8b1..c7eaa8c0e9 100644 --- a/awx/main/tests/functional/test_rbac_user.py +++ b/awx/main/tests/functional/test_rbac_user.py @@ -1,11 +1,50 @@ import pytest from django.apps import apps -from django.contrib.auth.models import User +from django.test import TransactionTestCase from awx.main.migrations import _rbac as rbac from awx.main.access import UserAccess -from awx.main.models import Role +from awx.main.models import Role, User, Organization, Inventory + + +@pytest.mark.django_db +class TestSysAuditorTransactional(TransactionTestCase): + def rando(self): + return User.objects.create(username='rando', password='rando', email='rando@com.com') + + def inventory(self): + org = Organization.objects.create(name='org') + inv = Inventory.objects.create(name='inv', organization=org) + return inv + + def test_auditor_caching(self): + rando = self.rando() + with self.assertNumQueries(1): + v = rando.is_system_auditor + assert not v + with self.assertNumQueries(0): + v = rando.is_system_auditor + assert not v + + def test_auditor_setter(self): + rando = self.rando() + inventory = self.inventory() + rando.is_system_auditor = True + assert rando in inventory.read_role + + def test_refresh_with_set(self): + rando = self.rando() + rando.is_system_auditor = True + assert rando.is_system_auditor + rando.is_system_auditor = False + assert not rando.is_system_auditor + + +@pytest.mark.django_db +def test_system_auditor_is_system_auditor(system_auditor): + assert system_auditor.is_system_auditor + @pytest.mark.django_db def test_user_admin(user_project, project, user): @@ -28,6 +67,7 @@ def test_user_admin(user_project, project, user): assert sa.members.filter(id=joe.id).exists() is False assert sa.members.filter(id=admin.id).exists() is True + @pytest.mark.django_db def test_user_queryset(user): u = user('pete', False) @@ -36,6 +76,7 @@ def test_user_queryset(user): qs = access.get_queryset() assert qs.count() == 1 + @pytest.mark.django_db def test_user_accessible_objects(user, organization): admin = user('admin', False) @@ -49,6 +90,7 @@ def test_user_accessible_objects(user, organization): organization.member_role.members.remove(u) assert User.accessible_objects(admin, 'admin_role').count() == 1 + @pytest.mark.django_db def test_org_user_admin(user, organization): admin = user('orgadmin') @@ -63,6 +105,7 @@ def test_org_user_admin(user, organization): organization.admin_role.members.remove(admin) assert admin not in member.admin_role + @pytest.mark.django_db def test_org_user_removed(user, organization): admin = user('orgadmin') @@ -76,6 +119,7 @@ def test_org_user_removed(user, organization): organization.member_role.members.remove(member) assert admin not in member.admin_role + @pytest.mark.django_db def test_org_admin_create_sys_auditor(org_admin): access = UserAccess(org_admin) @@ -83,6 +127,7 @@ def test_org_admin_create_sys_auditor(org_admin): username='new_user', password="pa$$sowrd", email="asdf@redhat.com", is_system_auditor='true')) + @pytest.mark.django_db def test_org_admin_edit_sys_auditor(org_admin, alice, organization): organization.member_role.members.add(alice) diff --git a/awx/main/tests/functional/test_rbac_workflow.py b/awx/main/tests/functional/test_rbac_workflow.py new file mode 100644 index 0000000000..8d363305d5 --- /dev/null +++ b/awx/main/tests/functional/test_rbac_workflow.py @@ -0,0 +1,134 @@ +import pytest + +from awx.main.access import ( + WorkflowJobTemplateAccess, + WorkflowJobTemplateNodeAccess, + WorkflowJobAccess, + # WorkflowJobNodeAccess +) + + +@pytest.fixture +def wfjt(workflow_job_template_factory, organization): + objects = workflow_job_template_factory('test_workflow', organization=organization, persisted=True) + return objects.workflow_job_template + + +@pytest.fixture +def wfjt_with_nodes(workflow_job_template_factory, organization, job_template): + objects = workflow_job_template_factory( + 'test_workflow', organization=organization, workflow_job_template_nodes=[{'unified_job_template': job_template}], persisted=True) + return objects.workflow_job_template + + +@pytest.fixture +def wfjt_node(wfjt_with_nodes): + return wfjt_with_nodes.workflow_job_template_nodes.all()[0] + + +@pytest.fixture +def workflow_job(wfjt): + return wfjt.workflow_jobs.create(name='test_workflow') + + +@pytest.mark.django_db +class TestWorkflowJobTemplateAccess: + + def test_random_user_no_edit(self, wfjt, rando): + access = WorkflowJobTemplateAccess(rando) + assert not access.can_change(wfjt, {'name': 'new name'}) + + def test_org_admin_edit(self, wfjt, org_admin): + access = WorkflowJobTemplateAccess(org_admin) + assert access.can_change(wfjt, {'name': 'new name'}) + + def test_org_admin_role_inheritance(self, wfjt, org_admin): + assert org_admin in wfjt.admin_role + assert org_admin in wfjt.execute_role + assert org_admin in wfjt.read_role + + +@pytest.mark.django_db +class TestWorkflowJobTemplateNodeAccess: + + def test_no_jt_access_to_edit(self, wfjt_node, org_admin): + # without access to the related job template, admin to the WFJT can + # not change the prompted parameters + access = WorkflowJobTemplateNodeAccess(org_admin) + assert not access.can_change(wfjt_node, {'job_type': 'scan'}) + + def test_add_JT_no_start_perm(self, wfjt, job_template, rando): + wfjt.admin_role.members.add(rando) + access = WorkflowJobTemplateNodeAccess(rando) + job_template.read_role.members.add(rando) + assert not access.can_add({ + 'workflow_job_template': wfjt, + 'unified_job_template': job_template}) + + def test_add_node_with_minimum_permissions(self, wfjt, job_template, inventory, rando): + wfjt.admin_role.members.add(rando) + access = WorkflowJobTemplateNodeAccess(rando) + job_template.execute_role.members.add(rando) + inventory.use_role.members.add(rando) + assert access.can_add({ + 'workflow_job_template': wfjt, + 'inventory': inventory, + 'unified_job_template': job_template}) + + def test_remove_unwanted_foreign_node(self, wfjt_node, job_template, rando): + wfjt = wfjt_node.workflow_job_template + wfjt.admin_role.members.add(rando) + wfjt_node.unified_job_template = job_template + access = WorkflowJobTemplateNodeAccess(rando) + assert access.can_delete(wfjt_node) + + +@pytest.mark.django_db +class TestWorkflowJobAccess: + + def test_org_admin_can_delete_workflow_job(self, workflow_job, org_admin): + access = WorkflowJobAccess(org_admin) + assert access.can_delete(workflow_job) + + def test_wfjt_admin_can_delete_workflow_job(self, workflow_job, rando): + workflow_job.workflow_job_template.admin_role.members.add(rando) + access = WorkflowJobAccess(rando) + assert not access.can_delete(workflow_job) + + def test_cancel_your_own_job(self, wfjt, workflow_job, rando): + wfjt.execute_role.members.add(rando) + workflow_job.created_by = rando + workflow_job.save() + access = WorkflowJobAccess(rando) + assert access.can_cancel(workflow_job) + + def test_copy_permissions_org_admin(self, wfjt, org_admin, org_member): + admin_access = WorkflowJobTemplateAccess(org_admin) + assert admin_access.can_copy(wfjt) + + def test_copy_permissions_user(self, wfjt, org_admin, org_member): + ''' + Only org admins are able to add WFJTs, only org admins + are able to copy them + ''' + wfjt.admin_role.members.add(org_member) + member_access = WorkflowJobTemplateAccess(org_member) + assert not member_access.can_copy(wfjt) + + def test_workflow_copy_warnings_inv(self, wfjt, rando, inventory): + ''' + The user `rando` does not have access to the prompted inventory in a + node inside the workflow - test surfacing this information + ''' + wfjt.workflow_job_template_nodes.create(inventory=inventory) + access = WorkflowJobTemplateAccess(rando, save_messages=True) + assert not access.can_copy(wfjt) + warnings = access.messages + assert 'inventories_unable_to_copy' in warnings + + def test_workflow_copy_warnings_jt(self, wfjt, rando, job_template): + wfjt.workflow_job_template_nodes.create(unified_job_template=job_template) + access = WorkflowJobTemplateAccess(rando, save_messages=True) + assert not access.can_copy(wfjt) + warnings = access.messages + assert 'templates_unable_to_copy' in warnings diff --git a/awx/main/tests/functional/utils/__init__.py b/awx/main/tests/functional/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/main/tests/job_base.py b/awx/main/tests/job_base.py index d215c0fb25..d24d19611a 100644 --- a/awx/main/tests/job_base.py +++ b/awx/main/tests/job_base.py @@ -12,6 +12,7 @@ TEST_PLAYBOOK = '''- hosts: all command: test 1 = 1 ''' + class BaseJobTestMixin(BaseTestMixin): @@ -616,12 +617,12 @@ class BaseJobTestMixin(BaseTestMixin): def setUp(self): super(BaseJobTestMixin, self).setUp() - self.start_redis() + self.start_rabbit() self.setup_instances() self.populate() self.start_queue() def tearDown(self): super(BaseJobTestMixin, self).tearDown() - self.stop_redis() + self.stop_rabbit() self.terminate_queue() diff --git a/awx/main/tests/manual/workflows/linear.py b/awx/main/tests/manual/workflows/linear.py new file mode 100755 index 0000000000..7375d8e9ab --- /dev/null +++ b/awx/main/tests/manual/workflows/linear.py @@ -0,0 +1,43 @@ +# AWX +from awx.main.models import ( + WorkflowJobTemplateNode, + WorkflowJobTemplate, +) +from awx.main.models.jobs import JobTemplate + + +def do_init_workflow(job_template_success, job_template_fail, job_template_never): + wfjt, created = WorkflowJobTemplate.objects.get_or_create(name="linear workflow") + wfjt.delete() + wfjt, created = WorkflowJobTemplate.objects.get_or_create(name="linear workflow") + print(wfjt.id) + WorkflowJobTemplateNode.objects.all().delete() + if created: + nodes_success = [] + nodes_fail = [] + nodes_never = [] + for i in range(0, 2): + nodes_success.append(WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=job_template_success)) + nodes_fail.append(WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=job_template_fail)) + nodes_never.append(WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=job_template_never)) + nodes_never.append(WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=job_template_never)) + nodes_fail[1].delete() + + nodes_success[0].success_nodes.add(nodes_fail[0]) + nodes_success[0].failure_nodes.add(nodes_never[0]) + + nodes_fail[0].failure_nodes.add(nodes_success[1]) + nodes_fail[0].success_nodes.add(nodes_never[1]) + + nodes_success[1].failure_nodes.add(nodes_never[2]) + + +def do_init(): + jt_success = JobTemplate.objects.get(id=5) + jt_fail= JobTemplate.objects.get(id=6) + jt_never= JobTemplate.objects.get(id=7) + do_init_workflow(jt_success, jt_fail, jt_never) + + +if __name__ == "__main__": + do_init() diff --git a/awx/main/tests/manual/workflows/linear.svg b/awx/main/tests/manual/workflows/linear.svg new file mode 100644 index 0000000000..521cc0a9f2 --- /dev/null +++ b/awx/main/tests/manual/workflows/linear.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/awx/main/tests/manual/workflows/parallel.py b/awx/main/tests/manual/workflows/parallel.py new file mode 100755 index 0000000000..ff4973f8b4 --- /dev/null +++ b/awx/main/tests/manual/workflows/parallel.py @@ -0,0 +1,48 @@ +# AWX +from awx.main.models import ( + WorkflowJobTemplateNode, + WorkflowJobTemplate, +) +from awx.main.models.jobs import JobTemplate + + +def do_init_workflow(job_template_success, job_template_fail, job_template_never, jts_parallel): + wfjt, created = WorkflowJobTemplate.objects.get_or_create(name="parallel workflow") + wfjt.delete() + wfjt, created = WorkflowJobTemplate.objects.get_or_create(name="parallel workflow") + print(wfjt.id) + WorkflowJobTemplateNode.objects.all().delete() + if created: + node_success = WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=job_template_success) + + nodes_never = [] + for x in range(0, 3): + nodes_never.append(WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=job_template_never)) + + nodes_parallel = [] + for jt in jts_parallel: + nodes_parallel.append(WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=jt)) + + node_success.success_nodes.add(nodes_parallel[0]) + node_success.success_nodes.add(nodes_parallel[1]) + node_success.success_nodes.add(nodes_parallel[2]) + + # Add a failure node for each paralell node + for i, n in enumerate(nodes_parallel): + n.failure_nodes.add(nodes_never[i]) + + +def do_init(): + jt_success = JobTemplate.objects.get(id=5) + jt_fail= JobTemplate.objects.get(id=6) + jt_never= JobTemplate.objects.get(id=7) + + jt_parallel = [] + jt_parallel.append(JobTemplate.objects.get(id=16)) + jt_parallel.append(JobTemplate.objects.get(id=17)) + jt_parallel.append(JobTemplate.objects.get(id=18)) + do_init_workflow(jt_success, jt_fail, jt_never, jt_parallel) + + +if __name__ == "__main__": + do_init() diff --git a/awx/main/tests/manual/workflows/parallel.svg b/awx/main/tests/manual/workflows/parallel.svg new file mode 100644 index 0000000000..7d480f7308 --- /dev/null +++ b/awx/main/tests/manual/workflows/parallel.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/awx/main/tests/old/ad_hoc.py b/awx/main/tests/old/ad_hoc.py index 2c81ec71a0..8da0e33d24 100644 --- a/awx/main/tests/old/ad_hoc.py +++ b/awx/main/tests/old/ad_hoc.py @@ -20,7 +20,6 @@ from crum import impersonate # AWX from awx.main.utils import * # noqa from awx.main.models import * # noqa -from awx.main.conf import tower_settings from awx.main.tests.base import BaseJobExecutionTest from awx.main.tests.data.ssh import ( TEST_SSH_KEY_DATA, @@ -30,6 +29,7 @@ from awx.main.tests.data.ssh import ( __all__ = ['RunAdHocCommandTest', 'AdHocCommandApiTest'] + class BaseAdHocCommandTest(BaseJobExecutionTest): ''' Common initialization for testing ad hoc commands. @@ -320,19 +320,19 @@ class RunAdHocCommandTest(BaseAdHocCommandTest): self.assertIn('ssh-agent', ad_hoc_command.job_args) self.assertNotIn('Bad passphrase', ad_hoc_command.result_stdout) - def test_run_with_proot(self): - # Only run test if proot is installed - cmd = [getattr(settings, 'AWX_PROOT_CMD', 'proot'), '--version'] + def test_run_with_bubblewrap(self): + # Only run test if bubblewrap is installed + cmd = [getattr(settings, 'AWX_PROOT_CMD', 'bwrap'), '--version'] try: proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.communicate() - has_proot = bool(proc.returncode == 0) + has_bubblewrap = bool(proc.returncode == 0) except (OSError, ValueError): - has_proot = False - if not has_proot: - self.skipTest('proot is not installed') - # Enable proot for this test. + has_bubblewrap = False + if not has_bubblewrap: + self.skipTest('bubblewrap is not installed') + # Enable bubblewrap for this test. settings.AWX_PROOT_ENABLED = True # Hide local settings path. settings.AWX_PROOT_HIDE_PATHS = [os.path.join(settings.BASE_DIR, 'settings')] @@ -363,8 +363,8 @@ class RunAdHocCommandTest(BaseAdHocCommandTest): self.check_ad_hoc_command_events(ad_hoc_command, 'ok') @mock.patch('awx.main.tasks.BaseTask.run_pexpect', return_value=('failed', 0)) - def test_run_with_proot_not_installed(self, ignore): - # Enable proot for this test, specify invalid proot cmd. + def test_run_with_bubblewrap_not_installed(self, ignore): + # Enable bubblewrap for this test, specify invalid bubblewrap cmd. settings.AWX_PROOT_ENABLED = True settings.AWX_PROOT_CMD = 'PR00T' ad_hoc_command = self.create_test_ad_hoc_command() @@ -378,6 +378,7 @@ class RunAdHocCommandTest(BaseAdHocCommandTest): def run_pexpect_mock(self, *args, **kwargs): return 'successful', 0 + @unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test') class AdHocCommandApiTest(BaseAdHocCommandTest): ''' @@ -572,14 +573,14 @@ class AdHocCommandApiTest(BaseAdHocCommandTest): # Try to relaunch ad hoc command when module has been removed from # allowed list of modules. try: - ad_hoc_commands = tower_settings.AD_HOC_COMMANDS - tower_settings.AD_HOC_COMMANDS = [] + ad_hoc_commands = settings.AD_HOC_COMMANDS + settings.AD_HOC_COMMANDS = [] with self.current_user('admin'): response = self.get(url, expect=200) self.assertEqual(response['passwords_needed_to_start'], []) response = self.post(url, {}, expect=400) finally: - tower_settings.AD_HOC_COMMANDS = ad_hoc_commands + settings.AD_HOC_COMMANDS = ad_hoc_commands # Try to relaunch after the inventory has been marked inactive. self.inventory.delete() diff --git a/awx/main/tests/old/api/decorator_paginated.py b/awx/main/tests/old/api/decorator_paginated.py deleted file mode 100644 index 39444590e7..0000000000 --- a/awx/main/tests/old/api/decorator_paginated.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -import json - -from django.test import TestCase - -from rest_framework.permissions import AllowAny -from rest_framework.test import APIRequestFactory -from rest_framework.views import APIView - -from awx.api.utils.decorators import paginated - - -class PaginatedDecoratorTests(TestCase): - """A set of tests for ensuring that the "paginated" decorator works - in the way we expect. - """ - def setUp(self): - self.rf = APIRequestFactory() - - # Define an uninteresting view that we can use to test - # that the paginator wraps in the way we expect. - class View(APIView): - permission_classes = (AllowAny,) - - @paginated - def get(self, request, limit, ordering, offset): - return ['a', 'b', 'c', 'd', 'e'], 26, None - self.view = View.as_view() - - def test_implicit_first_page(self): - """Establish that if we get an implicit request for the first page - (e.g. no page provided), that it is returned appropriately. - """ - # Create a request, and run the paginated function. - request = self.rf.get('/dummy/', {'page_size': 5}) - response = self.view(request) - - # Ensure the response looks like what it should. - r = json.loads(response.rendered_content) - self.assertEqual(r['count'], 26) - self.assertEqual(r['next'], '/dummy/?page=2&page_size=5') - self.assertEqual(r['previous'], None) - self.assertEqual(r['results'], ['a', 'b', 'c', 'd', 'e']) - - def test_mid_page(self): - """Establish that if we get a request for a page in the middle, that - the paginator causes next and prev to be set appropriately. - """ - # Create a request, and run the paginated function. - request = self.rf.get('/dummy/', {'page': 3, 'page_size': 5}) - response = self.view(request) - - # Ensure the response looks like what it should. - r = json.loads(response.rendered_content) - self.assertEqual(r['count'], 26) - self.assertEqual(r['next'], '/dummy/?page=4&page_size=5') - self.assertEqual(r['previous'], '/dummy/?page=2&page_size=5') - self.assertEqual(r['results'], ['a', 'b', 'c', 'd', 'e']) - - def test_last_page(self): - """Establish that if we get a request for the last page, that the - paginator picks up on it and sets `next` to None. - """ - # Create a request, and run the paginated function. - request = self.rf.get('/dummy/', {'page': 6, 'page_size': 5}) - response = self.view(request) - - # Ensure the response looks like what it should. - r = json.loads(response.rendered_content) - self.assertEqual(r['count'], 26) - self.assertEqual(r['next'], None) - self.assertEqual(r['previous'], '/dummy/?page=5&page_size=5') - self.assertEqual(r['results'], ['a', 'b', 'c', 'd', 'e']) diff --git a/awx/main/tests/old/commands/command_base.py b/awx/main/tests/old/commands/command_base.py index 2f1d795d27..f5e762cbee 100644 --- a/awx/main/tests/old/commands/command_base.py +++ b/awx/main/tests/old/commands/command_base.py @@ -13,6 +13,7 @@ from django.core.management import call_command from awx.main.models import * # noqa from awx.main.tests.base import BaseTestMixin + class BaseCommandMixin(BaseTestMixin): def create_test_inventories(self): self.setup_users() diff --git a/awx/main/tests/old/commands/commands_monolithic.py b/awx/main/tests/old/commands/commands_monolithic.py index 39632673c1..5a176e76bb 100644 --- a/awx/main/tests/old/commands/commands_monolithic.py +++ b/awx/main/tests/old/commands/commands_monolithic.py @@ -185,6 +185,7 @@ class BaseCommandMixin(object): sys.stderr = original_stderr return result, captured_stdout, captured_stderr + class CreateDefaultOrgTest(BaseCommandMixin, BaseTest): ''' Test cases for create_default_org management command. @@ -209,6 +210,7 @@ class CreateDefaultOrgTest(BaseCommandMixin, BaseTest): self.assertFalse('Default organization added' in stdout) self.assertEqual(Organization.objects.count(), 1) + class DumpDataTest(BaseCommandMixin, BaseTest): ''' Test cases for dumpdata management command. @@ -223,6 +225,7 @@ class DumpDataTest(BaseCommandMixin, BaseTest): self.assertEqual(result, None) json.loads(stdout) + @override_settings(CELERY_ALWAYS_EAGER=True, CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, ANSIBLE_TRANSPORT='local') @@ -389,13 +392,13 @@ class CleanupActivityStreamTest(BaseCommandMixin, BaseTest): ''' def setUp(self): - self.start_redis() super(CleanupActivityStreamTest, self).setUp() + self.start_rabbit() self.create_test_inventories() def tearDown(self): + self.stop_rabbit() super(CleanupActivityStreamTest, self).tearDown() - self.stop_redis() def test_cleanup(self): # Should already have entries due to test case setup. With no @@ -457,7 +460,7 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest): def setUp(self): super(InventoryImportTest, self).setUp() - self.start_redis() + self.start_rabbit() self.setup_instances() self.create_test_inventories() self.create_test_ini() @@ -465,7 +468,7 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest): def tearDown(self): super(InventoryImportTest, self).tearDown() - self.stop_redis() + self.stop_rabbit() def create_test_ini(self, inv_dir=None, ini_content=None): ini_content = ini_content or TEST_INVENTORY_INI diff --git a/awx/main/tests/old/commands/run_socketio_service.py b/awx/main/tests/old/commands/run_socketio_service.py deleted file mode 100644 index be882d6b20..0000000000 --- a/awx/main/tests/old/commands/run_socketio_service.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved - -# Python -from mock import MagicMock, Mock - -# Django -from django.test import SimpleTestCase - -# AWX -from awx.fact.models.fact import * # noqa -from awx.main.management.commands.run_socketio_service import SocketSessionManager, SocketSession, SocketController - -__all__ = ['SocketSessionManagerUnitTest', 'SocketControllerUnitTest',] - -class WeakRefable(): - pass - -class SocketSessionManagerUnitTest(SimpleTestCase): - - def setUp(self): - self.session_manager = SocketSessionManager() - super(SocketSessionManagerUnitTest, self).setUp() - - def create_sessions(self, count, token_key=None): - self.sessions = [] - self.count = count - for i in range(0, count): - self.sessions.append(SocketSession(i, token_key or i, WeakRefable())) - self.session_manager.add_session(self.sessions[i]) - - def test_multiple_session_diff_token(self): - self.create_sessions(10) - - for s in self.sessions: - self.assertIn(s.token_key, self.session_manager.socket_session_token_key_map) - self.assertEqual(s, self.session_manager.socket_session_token_key_map[s.token_key][s.session_id]) - - - def test_multiple_session_same_token(self): - self.create_sessions(10, token_key='foo') - - sessions_dict = self.session_manager.lookup("foo") - self.assertEqual(len(sessions_dict), 10) - for s in self.sessions: - self.assertIn(s.session_id, sessions_dict) - self.assertEqual(s, sessions_dict[s.session_id]) - - def test_prune_sessions_max(self): - self.create_sessions(self.session_manager.SESSIONS_MAX + 10) - - self.assertEqual(len(self.session_manager.socket_sessions), self.session_manager.SESSIONS_MAX) - - -class SocketControllerUnitTest(SimpleTestCase): - - def setUp(self): - self.socket_controller = SocketController(SocketSessionManager()) - server = Mock() - self.socket_controller.set_server(server) - super(SocketControllerUnitTest, self).setUp() - - def create_clients(self, count, token_key=None): - self.sessions = [] - self.sockets =[] - self.count = count - self.sockets_dict = {} - for i in range(0, count): - if isinstance(token_key, list): - token_key_actual = token_key[i] - else: - token_key_actual = token_key or i - socket = MagicMock(session=dict()) - socket_session = SocketSession(i, token_key_actual, socket) - self.sockets.append(socket) - self.sessions.append(socket_session) - self.sockets_dict[i] = socket - self.socket_controller.add_session(socket_session) - - socket.session['socket_session'] = socket_session - socket.send_packet = Mock() - self.socket_controller.server.sockets = self.sockets_dict - - def test_broadcast_packet(self): - self.create_clients(10) - packet = { - "hello": "world" - } - self.socket_controller.broadcast_packet(packet) - for s in self.sockets: - s.send_packet.assert_called_with(packet) - - def test_send_packet(self): - self.create_clients(5, token_key=[0, 1, 2, 3, 4]) - packet = { - "hello": "world" - } - self.socket_controller.send_packet(packet, 2) - self.assertEqual(0, len(self.sockets[0].send_packet.mock_calls)) - self.assertEqual(0, len(self.sockets[1].send_packet.mock_calls)) - self.sockets[2].send_packet.assert_called_once_with(packet) - self.assertEqual(0, len(self.sockets[3].send_packet.mock_calls)) - self.assertEqual(0, len(self.sockets[4].send_packet.mock_calls)) - - def test_send_packet_multiple_sessions_one_token(self): - self.create_clients(5, token_key=[0, 1, 1, 1, 2]) - packet = { - "hello": "world" - } - self.socket_controller.send_packet(packet, 1) - self.assertEqual(0, len(self.sockets[0].send_packet.mock_calls)) - self.sockets[1].send_packet.assert_called_once_with(packet) - self.sockets[2].send_packet.assert_called_once_with(packet) - self.sockets[3].send_packet.assert_called_once_with(packet) - self.assertEqual(0, len(self.sockets[4].send_packet.mock_calls)) - diff --git a/awx/main/tests/old/inventory.py b/awx/main/tests/old/inventory.py index efa952e64a..a2baf49fe6 100644 --- a/awx/main/tests/old/inventory.py +++ b/awx/main/tests/old/inventory.py @@ -39,11 +39,12 @@ inventory['group-\u037c\u03b4\u0138\u0137\u03cd\u03a1\u0121\u0137\u0138\u01a1']. print json.dumps(inventory) """ + @unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test') class InventoryTest(BaseTest): def setUp(self): - self.start_redis() + self.start_rabbit() super(InventoryTest, self).setUp() self.setup_instances() self.setup_users() @@ -63,7 +64,7 @@ class InventoryTest(BaseTest): def tearDown(self): super(InventoryTest, self).tearDown() - self.stop_redis() + self.stop_rabbit() def test_get_inventory_list(self): url = reverse('api:inventory_list') diff --git a/awx/main/tests/old/jobs/job_launch.py b/awx/main/tests/old/jobs/job_launch.py index dabf3568bc..5ffd8bdaf6 100644 --- a/awx/main/tests/old/jobs/job_launch.py +++ b/awx/main/tests/old/jobs/job_launch.py @@ -17,6 +17,7 @@ import yaml __all__ = ['JobTemplateLaunchTest', 'JobTemplateLaunchPasswordsTest'] + @unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test') class JobTemplateLaunchTest(BaseJobTestMixin, django.test.TransactionTestCase): def setUp(self): @@ -157,7 +158,7 @@ class JobTemplateLaunchTest(BaseJobTestMixin, django.test.TransactionTestCase): self.post(launch_url, {'credential_id': self.cred_sue.pk}, expect=403) def test_no_project_fail(self): - # Job Templates without projects can not be launched + # Job Templates without projects cannot be launched with self.current_user(self.user_sue): self.data['name'] = "missing proj" response = self.post(self.url, self.data, expect=201) @@ -169,7 +170,7 @@ class JobTemplateLaunchTest(BaseJobTestMixin, django.test.TransactionTestCase): self.post(launch_url2, {}, expect=400) def test_no_inventory_fail(self): - # Job Templates without inventory can not be launched + # Job Templates without inventory cannot be launched with self.current_user(self.user_sue): self.data['name'] = "missing inv" response = self.post(self.url, self.data, expect=201) @@ -186,6 +187,7 @@ class JobTemplateLaunchTest(BaseJobTestMixin, django.test.TransactionTestCase): with self.current_user(self.user_sue): self.post(self.launch_url, {}, expect=400) + @unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test') class JobTemplateLaunchPasswordsTest(BaseJobTestMixin, django.test.TransactionTestCase): def setUp(self): diff --git a/awx/main/tests/old/jobs/job_relaunch.py b/awx/main/tests/old/jobs/job_relaunch.py index 3a9e050288..5cb8f30cac 100644 --- a/awx/main/tests/old/jobs/job_relaunch.py +++ b/awx/main/tests/old/jobs/job_relaunch.py @@ -17,6 +17,7 @@ from awx.main.tests.job_base import BaseJobTestMixin __all__ = ['JobRelaunchTest',] + @unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test') class JobRelaunchTest(BaseJobTestMixin, BaseLiveServerTest): diff --git a/awx/main/tests/old/jobs/jobs_monolithic.py b/awx/main/tests/old/jobs/jobs_monolithic.py index b7321bb611..e17c7a5604 100644 --- a/awx/main/tests/old/jobs/jobs_monolithic.py +++ b/awx/main/tests/old/jobs/jobs_monolithic.py @@ -186,6 +186,7 @@ TEST_SURVEY_REQUIREMENTS = ''' } ''' + @unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test') class JobTemplateTest(BaseJobTestMixin, django.test.TransactionTestCase): @@ -485,7 +486,7 @@ class JobTemplateTest(BaseJobTestMixin, django.test.TransactionTestCase): data['credential'] = self.cred_sue.pk response = self.post(url, data, expect=402) self.create_test_license_file(features=dict(system_tracking=True)) - # Scan Jobs can not be created with survey enabled + # Scan Jobs cannot be created with survey enabled with self.current_user(self.user_sue): data['credential'] = self.cred_sue.pk data['survey_enabled'] = True @@ -505,6 +506,7 @@ class JobTemplateTest(BaseJobTestMixin, django.test.TransactionTestCase): with self.current_user(self.user_doug): self.get(detail_url, expect=403) + @unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test') class JobTest(BaseJobTestMixin, django.test.TransactionTestCase): @@ -658,6 +660,7 @@ class JobTest(BaseJobTestMixin, django.test.TransactionTestCase): # and that jobs come back nicely serialized with related resources and so on ... # that we can drill all the way down and can get at host failure lists, etc ... + @unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test') @override_settings(CELERY_ALWAYS_EAGER=True, CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, @@ -1099,6 +1102,7 @@ class JobTransactionTest(BaseJobTestMixin, django.test.LiveServerTestCase): self.assertEqual(job.status, 'successful', job.result_stdout) self.assertFalse(errors) + @unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test') class JobTemplateSurveyTest(BaseJobTestMixin, django.test.TransactionTestCase): def setUp(self): diff --git a/awx/main/tests/old/jobs/start_cancel.py b/awx/main/tests/old/jobs/start_cancel.py index 3a6957af69..1067a618f3 100644 --- a/awx/main/tests/old/jobs/start_cancel.py +++ b/awx/main/tests/old/jobs/start_cancel.py @@ -17,6 +17,7 @@ from awx.main.tests.job_base import BaseJobTestMixin __all__ = ['JobStartCancelTest',] + @unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test') class JobStartCancelTest(BaseJobTestMixin, BaseLiveServerTest): diff --git a/awx/main/tests/old/jobs/survey_password.py b/awx/main/tests/old/jobs/survey_password.py index f7588301d8..64b3bfee06 100644 --- a/awx/main/tests/old/jobs/survey_password.py +++ b/awx/main/tests/old/jobs/survey_password.py @@ -141,6 +141,7 @@ TESTS = { } } + class SurveyPasswordBaseTest(BaseTest, QueueStartStopTestMixin): def setUp(self): super(SurveyPasswordBaseTest, self).setUp() @@ -176,6 +177,7 @@ class SurveyPasswordBaseTest(BaseTest, QueueStartStopTestMixin): url = reverse('api:job_detail', args=(job.pk,)) return self.get(url, expect=200, auth=self.get_super_credentials(), accept='application/json') + class SurveyPasswordRedactedTest(SurveyPasswordBaseTest): ''' Transpose TEST[]['tests'] to the below format. A more flat format." diff --git a/awx/main/tests/old/projects.py b/awx/main/tests/old/projects.py index 01c459b794..10510c5e24 100644 --- a/awx/main/tests/old/projects.py +++ b/awx/main/tests/old/projects.py @@ -37,6 +37,7 @@ TEST_PLAYBOOK = '''- hosts: mygroup command: test 1 = 1 ''' + class ProjectsTest(BaseTransactionTest): # tests for users, projects, and teams @@ -232,7 +233,8 @@ class ProjectsTest(BaseTransactionTest): 'name': 'My Test Project', 'description': 'Does amazing things', 'local_path': os.path.basename(project_dir), - 'scm_type': None, + 'scm_type': 'git', # must not be manual in order to schedule + 'scm_url': 'http://192.168.100.128.git', 'scm_update_on_launch': '', 'scm_delete_on_update': None, 'scm_clean': False, @@ -243,7 +245,7 @@ class ProjectsTest(BaseTransactionTest): # or an empty string for False, but save the value as a boolean. response = self.post(projects, project_data, expect=201, auth=self.get_super_credentials()) - self.assertEqual(response['scm_type'], u'') + self.assertEqual(response['scm_type'], u'git') self.assertEqual(response['scm_update_on_launch'], False) self.assertEqual(response['scm_delete_on_update'], False) self.assertEqual(response['scm_clean'], False) diff --git a/awx/main/tests/old/schedules.py b/awx/main/tests/old/schedules.py index 6433ee1351..8e114d7950 100644 --- a/awx/main/tests/old/schedules.py +++ b/awx/main/tests/old/schedules.py @@ -46,11 +46,13 @@ BAD_SCHEDULES = ["", "DTSTART:20140331T055000 RRULE:FREQ=MINUTELY;INTERVAL=10;CO "DTSTART:20140331T055000Z RRULE:FREQ=YEARLY;BYWEEKNO=10;INTERVAL=1", "DTSTART:20140331T055000Z RRULE:FREQ=HOURLY;INTERVAL=1 DTSTART:20140331T055000Z RRULE:FREQ=HOURLY;INTERVAL=1", "DTSTART:20140331T055000Z RRULE:FREQ=HOURLY;INTERVAL=1 RRULE:FREQ=HOURLY;INTERVAL=1"] + + class ScheduleTest(BaseTest): def setUp(self): super(ScheduleTest, self).setUp() - self.start_redis() + self.start_rabbit() self.setup_instances() self.setup_users() self.organizations = self.make_organizations(self.super_django_user, 2) @@ -92,7 +94,7 @@ class ScheduleTest(BaseTest): def tearDown(self): super(ScheduleTest, self).tearDown() - self.stop_redis() + self.stop_rabbit() def test_schedules_list(self): url = reverse('api:schedule_list') diff --git a/awx/main/tests/old/scripts.py b/awx/main/tests/old/scripts.py index e4cb90042d..7af019fd7c 100644 --- a/awx/main/tests/old/scripts.py +++ b/awx/main/tests/old/scripts.py @@ -14,6 +14,7 @@ from awx.main.tests.base import BaseLiveServerTest __all__ = ['InventoryScriptTest'] + class BaseScriptTest(BaseLiveServerTest): ''' Base class for tests that run external scripts to access the API. @@ -55,6 +56,7 @@ class BaseScriptTest(BaseLiveServerTest): stdout, stderr = proc.communicate() return proc.returncode, stdout, stderr + class InventoryScriptTest(BaseScriptTest): ''' Test helper to run management command as standalone script. @@ -62,7 +64,7 @@ class InventoryScriptTest(BaseScriptTest): def setUp(self): super(InventoryScriptTest, self).setUp() - self.start_redis() + self.start_rabbit() self.setup_instances() self.setup_users() self.organizations = self.make_organizations(self.super_django_user, 2) @@ -128,7 +130,7 @@ class InventoryScriptTest(BaseScriptTest): def tearDown(self): super(InventoryScriptTest, self).tearDown() - self.stop_redis() + self.stop_rabbit() def run_inventory_script(self, *args, **options): rest_api_url = self.live_server_url diff --git a/awx/main/tests/old/settings.py b/awx/main/tests/old/settings.py index 3c507ac992..a1614dac38 100644 --- a/awx/main/tests/old/settings.py +++ b/awx/main/tests/old/settings.py @@ -47,6 +47,7 @@ TEST_TOWER_SETTINGS_MANIFEST = { } } + @override_settings(TOWER_SETTINGS_MANIFEST=TEST_TOWER_SETTINGS_MANIFEST) @pytest.mark.skip(reason="Settings deferred to 3.1") class SettingsPlaceholder(BaseTest): diff --git a/awx/main/tests/old/tasks.py b/awx/main/tests/old/tasks.py index fdd30bf854..eba9bf552b 100644 --- a/awx/main/tests/old/tasks.py +++ b/awx/main/tests/old/tasks.py @@ -150,7 +150,7 @@ TEST_ASYNC_NOWAIT_PLAYBOOK = ''' ''' TEST_PROOT_PLAYBOOK = ''' -- name: test proot environment +- name: test bubblewrap environment hosts: test-group gather_facts: false connection: local @@ -237,6 +237,7 @@ TEST_VAULT_PLAYBOOK = '''$ANSIBLE_VAULT;1.1;AES256 TEST_VAULT_PASSWORD = '1234' + @unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test') class RunJobTest(BaseJobExecutionTest): ''' @@ -1177,19 +1178,19 @@ class RunJobTest(BaseJobExecutionTest): @unittest.skipUnless(settings.BROKER_URL == 'redis://localhost/', 'Non-default Redis setup.') - def test_run_job_with_proot(self): - # Only run test if proot is installed - cmd = [getattr(settings, 'AWX_PROOT_CMD', 'proot'), '--version'] + def test_run_job_with_bubblewrap(self): + # Only run test if bubblewrap is installed + cmd = [getattr(settings, 'AWX_PROOT_CMD', 'bwrap'), '--version'] try: proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.communicate() - has_proot = bool(proc.returncode == 0) + has_bubblewrap = bool(proc.returncode == 0) except (OSError, ValueError): - has_proot = False - if not has_proot: - self.skipTest('proot is not installed') - # Enable proot for this test. + has_bubblewrap = False + if not has_bubblewrap: + self.skipTest('bubblewrap is not installed') + # Enable bubblewrap for this test. settings.AWX_PROOT_ENABLED = True # Hide local settings path. settings.AWX_PROOT_HIDE_PATHS = [os.path.join(settings.BASE_DIR, 'settings')] @@ -1227,8 +1228,8 @@ class RunJobTest(BaseJobExecutionTest): job = Job.objects.get(pk=job.pk) self.check_job_result(job, 'successful') - def test_run_job_with_proot_not_installed(self): - # Enable proot for this test, specify invalid proot cmd. + def test_run_job_with_bubblewrap_not_installed(self): + # Enable bubblewrap for this test, specify invalid bubblewrap cmd. settings.AWX_PROOT_ENABLED = True settings.AWX_PROOT_CMD = 'PR00T' self.create_test_credential() diff --git a/awx/main/tests/old/users.py b/awx/main/tests/old/users.py index df2d5e19bc..154441d6fa 100644 --- a/awx/main/tests/old/users.py +++ b/awx/main/tests/old/users.py @@ -15,7 +15,6 @@ from django.test.utils import override_settings # AWX from awx.main.models import * # noqa from awx.main.tests.base import BaseTest -from awx.main.conf import tower_settings __all__ = ['AuthTokenTimeoutTest', 'AuthTokenLimitTest', 'AuthTokenProxyTest', 'UsersTest', 'LdapTest'] @@ -38,7 +37,8 @@ class AuthTokenTimeoutTest(BaseTest): response = self._generic_rest(dashboard_url, expect=200, method='get', return_response_object=True, client_kwargs=kwargs) self.assertIn('Auth-Token-Timeout', response) - self.assertEqual(response['Auth-Token-Timeout'], str(tower_settings.AUTH_TOKEN_EXPIRATION)) + self.assertEqual(response['Auth-Token-Timeout'], str(settings.AUTH_TOKEN_EXPIRATION)) + class AuthTokenLimitTest(BaseTest): def setUp(self): @@ -75,10 +75,11 @@ class AuthTokenLimitTest(BaseTest): response = self.get(user_me_url, expect=401, auth=auth_token1) self.assertEqual(AuthToken.reason_long('limit_reached'), response['detail']) -''' -Ensure ips from the X-Forwarded-For get honored and used in auth tokens -''' + class AuthTokenProxyTest(BaseTest): + ''' + Ensure ips from the X-Forwarded-For get honored and used in auth tokens + ''' def check_token_and_expires_exist(self, response): self.assertTrue('token' in response) self.assertTrue('expires' in response) @@ -163,6 +164,7 @@ class AuthTokenProxyTest(BaseTest): response = self._get_me(expect=200, auth=auth_token, remote_addr=remote_addr, client_kwargs=client_kwargs) self.check_me_is_admin(response) + class UsersTest(BaseTest): def collection(self): diff --git a/awx/main/tests/unit/__init__.py b/awx/main/tests/unit/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/main/tests/unit/api/serializers/__init__.py b/awx/main/tests/unit/api/serializers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/main/tests/unit/api/serializers/conftest.py b/awx/main/tests/unit/api/serializers/conftest.py new file mode 100644 index 0000000000..af35a4a002 --- /dev/null +++ b/awx/main/tests/unit/api/serializers/conftest.py @@ -0,0 +1,52 @@ + +import pytest + + +@pytest.fixture +def get_related_assert(): + def fn(model_obj, related, resource_name, related_resource_name): + assert related_resource_name in related + assert related[related_resource_name] == '/api/v1/%s/%d/%s/' % (resource_name, model_obj.pk, related_resource_name) + return fn + + +@pytest.fixture +def get_related_mock_and_run(): + def fn(serializer_class, model_obj): + serializer = serializer_class() + related = serializer.get_related(model_obj) + return related + return fn + + +@pytest.fixture +def test_get_related(get_related_assert, get_related_mock_and_run): + def fn(serializer_class, model_obj, resource_name, related_resource_name): + related = get_related_mock_and_run(serializer_class, model_obj) + get_related_assert(model_obj, related, resource_name, related_resource_name) + return related + return fn + + +@pytest.fixture +def get_summary_fields_assert(): + def fn(summary, summary_field_name): + assert summary_field_name in summary + return fn + + +@pytest.fixture +def get_summary_fields_mock_and_run(): + def fn(serializer_class, model_obj): + serializer = serializer_class() + return serializer.get_summary_fields(model_obj) + return fn + + +@pytest.fixture +def test_get_summary_fields(get_summary_fields_mock_and_run, get_summary_fields_assert): + def fn(serializer_class, model_obj, summary_field_name): + summary = get_summary_fields_mock_and_run(serializer_class, model_obj) + get_summary_fields_assert(summary, summary_field_name) + return summary + return fn diff --git a/awx/main/tests/unit/api/serializers/test_inventory_serializers.py b/awx/main/tests/unit/api/serializers/test_inventory_serializers.py new file mode 100644 index 0000000000..e79ea7c10b --- /dev/null +++ b/awx/main/tests/unit/api/serializers/test_inventory_serializers.py @@ -0,0 +1,47 @@ +# Python +import pytest +import mock +from mock import PropertyMock + +# AWX +from awx.api.serializers import ( + CustomInventoryScriptSerializer, +) +from awx.main.models import ( + CustomInventoryScript, + User, +) + +#DRF +from rest_framework.request import Request +from rest_framework.test import ( + APIRequestFactory, + force_authenticate, +) + + +class TestCustomInventoryScriptSerializer(object): + @pytest.mark.parametrize("superuser,sysaudit,admin_role,value", + ((True, False, False, '#!/python'), + (False, True, False, '#!/python'), + (False, False, True, '#!/python'), + (False, False, False, None))) + def test_to_representation_orphan(self, superuser, sysaudit, admin_role, value): + with mock.patch.object(CustomInventoryScriptSerializer, 'get_summary_fields', return_value={}): + with mock.patch.object(User, 'is_system_auditor', return_value=sysaudit): + user = User(username="root", is_superuser=superuser) + roles = [user] if admin_role else [] + + with mock.patch('awx.main.models.CustomInventoryScript.admin_role', new_callable=PropertyMock, return_value=roles): + cis = CustomInventoryScript(pk=1, script=value) + serializer = CustomInventoryScriptSerializer() + + factory = APIRequestFactory() + wsgi_request = factory.post("/inventory_script/1", {'id':1}, format="json") + force_authenticate(wsgi_request, user) + + request = Request(wsgi_request) + serializer.context['request'] = request + + representation = serializer.to_representation(cis) + assert representation['script'] == value diff --git a/awx/main/tests/unit/api/serializers/test_job_serializers.py b/awx/main/tests/unit/api/serializers/test_job_serializers.py new file mode 100644 index 0000000000..fc1ae86a8d --- /dev/null +++ b/awx/main/tests/unit/api/serializers/test_job_serializers.py @@ -0,0 +1,103 @@ +# Python +import pytest +import mock +import json + +# AWX +from awx.api.serializers import ( + JobSerializer, + JobOptionsSerializer, +) + +from awx.main.models import ( + Label, + Job, +) + + +def mock_JT_resource_data(): + return ({}, []) + + +@pytest.fixture +def job_template(mocker): + mock_jt = mocker.MagicMock(pk=5) + mock_jt.resource_validation_data = mock_JT_resource_data + return mock_jt + + +@pytest.fixture +def project_update(mocker): + mock_pu = mocker.MagicMock(pk=1) + return mock_pu + + +@pytest.fixture +def job(mocker, job_template, project_update): + return mocker.MagicMock(pk=5, job_template=job_template, project_update=project_update, + workflow_job_id=None) + + +@pytest.fixture +def labels(mocker): + return [Label(id=x, name='label-%d' % x) for x in xrange(0, 25)] + + +@pytest.fixture +def jobs(mocker): + return [Job(id=x, name='job-%d' % x) for x in xrange(0, 25)] + + +@mock.patch('awx.api.serializers.UnifiedJobTemplateSerializer.get_related', lambda x,y: {}) +@mock.patch('awx.api.serializers.JobOptionsSerializer.get_related', lambda x,y: {}) +class TestJobSerializerGetRelated(): + @pytest.mark.parametrize("related_resource_name", [ + 'job_events', + 'relaunch', + 'labels', + ]) + def test_get_related(self, test_get_related, job, related_resource_name): + test_get_related(JobSerializer, job, 'jobs', related_resource_name) + + def test_job_template_absent(self, job): + job.job_template = None + serializer = JobSerializer() + related = serializer.get_related(job) + assert 'job_template' not in related + + def test_job_template_present(self, get_related_mock_and_run, job): + related = get_related_mock_and_run(JobSerializer, job) + assert 'job_template' in related + assert related['job_template'] == '/api/v1/%s/%d/' % ('job_templates', job.job_template.pk) + + +@mock.patch('awx.api.serializers.BaseSerializer.to_representation', lambda self,obj: { + 'extra_vars': obj.extra_vars}) +class TestJobSerializerSubstitution(): + def test_survey_password_hide(self, mocker): + job = mocker.MagicMock(**{ + 'display_extra_vars.return_value': '{\"secret_key\": \"$encrypted$\"}', + 'extra_vars.return_value': '{\"secret_key\": \"my_password\"}'}) + serializer = JobSerializer(job) + rep = serializer.to_representation(job) + extra_vars = json.loads(rep['extra_vars']) + assert extra_vars['secret_key'] == '$encrypted$' + job.display_extra_vars.assert_called_once_with() + assert 'my_password' not in extra_vars + + +@mock.patch('awx.api.serializers.BaseSerializer.get_summary_fields', lambda x,y: {}) +class TestJobOptionsSerializerGetSummaryFields(): + def test__summary_field_labels_10_max(self, mocker, job_template, labels): + job_template.labels.all = mocker.MagicMock(**{'order_by.return_value': labels}) + job_template.labels.all.return_value = job_template.labels.all + + serializer = JobOptionsSerializer() + summary_labels = serializer._summary_field_labels(job_template) + + job_template.labels.all.order_by.assert_called_with('name') + assert len(summary_labels['results']) == 10 + assert summary_labels['results'] == [{'id': x.id, 'name': x.name} for x in labels[:10]] + + def test_labels_exists(self, test_get_summary_fields, job_template): + test_get_summary_fields(JobOptionsSerializer, job_template, 'labels') diff --git a/awx/main/tests/unit/api/serializers/test_job_template_serializers.py b/awx/main/tests/unit/api/serializers/test_job_template_serializers.py new file mode 100644 index 0000000000..50d190693a --- /dev/null +++ b/awx/main/tests/unit/api/serializers/test_job_template_serializers.py @@ -0,0 +1,132 @@ +# Python +import pytest +import mock + +# AWX +from awx.api.serializers import ( + JobTemplateSerializer, +) +from awx.api.views import JobTemplateDetail +from awx.main.models import ( + Role, + User, + Job, + JobTemplate, +) +from rest_framework.test import APIRequestFactory + +#DRF +from rest_framework import serializers + + +def mock_JT_resource_data(): + return ({}, []) + + +@pytest.fixture +def job_template(mocker): + mock_jt = mocker.MagicMock(spec=JobTemplate) + mock_jt.pk = 5 + mock_jt.host_config_key = '9283920492' + mock_jt.resource_validation_data = mock_JT_resource_data + return mock_jt + + +@pytest.fixture +def job(mocker, job_template): + return mocker.MagicMock(pk=5, job_template=job_template) + + +@pytest.fixture +def jobs(mocker): + return [Job(id=x, name='job-%d' % x) for x in xrange(0, 25)] + + +@mock.patch('awx.api.serializers.UnifiedJobTemplateSerializer.get_related', lambda x,y: {}) +@mock.patch('awx.api.serializers.JobOptionsSerializer.get_related', lambda x,y: {}) +class TestJobTemplateSerializerGetRelated(): + @pytest.mark.parametrize("related_resource_name", [ + 'jobs', + 'schedules', + 'activity_stream', + 'launch', + 'notification_templates_any', + 'notification_templates_success', + 'notification_templates_error', + 'survey_spec', + 'labels', + 'callback', + ]) + def test_get_related(self, test_get_related, job_template, related_resource_name): + test_get_related(JobTemplateSerializer, job_template, 'job_templates', related_resource_name) + + def test_callback_absent(self, get_related_mock_and_run, job_template): + job_template.host_config_key = None + related = get_related_mock_and_run(JobTemplateSerializer, job_template) + assert 'callback' not in related + + +class TestJobTemplateSerializerGetSummaryFields(): + def test__recent_jobs(self, mocker, job_template, jobs): + + job_template.jobs.all = mocker.MagicMock(**{'order_by.return_value': jobs}) + job_template.jobs.all.return_value = job_template.jobs.all + + serializer = JobTemplateSerializer() + recent_jobs = serializer._recent_jobs(job_template) + + job_template.jobs.all.assert_called_once_with() + job_template.jobs.all.order_by.assert_called_once_with('-created') + assert len(recent_jobs) == 10 + for x in jobs[:10]: + assert recent_jobs == [{'id': x.id, 'status': x.status, 'finished': x.finished} for x in jobs[:10]] + + def test_survey_spec_exists(self, test_get_summary_fields, mocker, job_template): + job_template.survey_spec = {'name': 'blah', 'description': 'blah blah'} + test_get_summary_fields(JobTemplateSerializer, job_template, 'survey') + + def test_survey_spec_absent(self, get_summary_fields_mock_and_run, job_template): + job_template.survey_spec = None + summary = get_summary_fields_mock_and_run(JobTemplateSerializer, job_template) + assert 'survey' not in summary + + def test_copy_edit_standard(self, mocker, job_template_factory): + """Verify that the exact output of the access.py methods + are put into the serializer user_capabilities""" + + jt_obj = job_template_factory('testJT', project='proj1', persisted=False).job_template + jt_obj.id = 5 + jt_obj.admin_role = Role(id=9, role_field='admin_role') + jt_obj.execute_role = Role(id=8, role_field='execute_role') + jt_obj.read_role = Role(id=7, role_field='execute_role') + user = User(username="auser") + serializer = JobTemplateSerializer(job_template) + serializer.show_capabilities = ['copy', 'edit'] + serializer._summary_field_labels = lambda self: [] + serializer._recent_jobs = lambda self: [] + request = APIRequestFactory().get('/api/v1/job_templates/42/') + request.user = user + view = JobTemplateDetail() + view.request = request + serializer.context['view'] = view + + with mocker.patch("awx.api.serializers.role_summary_fields_generator", return_value='Can eat pie'): + with mocker.patch("awx.main.access.JobTemplateAccess.can_change", return_value='foobar'): + with mocker.patch("awx.main.access.JobTemplateAccess.can_add", return_value='foo'): + response = serializer.get_summary_fields(jt_obj) + + assert response['user_capabilities']['copy'] == 'foo' + assert response['user_capabilities']['edit'] == 'foobar' + + +class TestJobTemplateSerializerValidation(object): + good_extra_vars = ["{\"test\": \"keys\"}", "---\ntest: key"] + bad_extra_vars = ["{\"test\": \"keys\"", "---\ntest: [2"] + + def test_validate_extra_vars(self): + serializer = JobTemplateSerializer() + for ev in self.good_extra_vars: + serializer.validate_extra_vars(ev) + for ev in self.bad_extra_vars: + with pytest.raises(serializers.ValidationError): + serializer.validate_extra_vars(ev) diff --git a/awx/main/tests/unit/api/serializers/test_workflow_serializers.py b/awx/main/tests/unit/api/serializers/test_workflow_serializers.py new file mode 100644 index 0000000000..b8697db71f --- /dev/null +++ b/awx/main/tests/unit/api/serializers/test_workflow_serializers.py @@ -0,0 +1,195 @@ +# Python +import pytest +import mock + +# AWX +from awx.api.serializers import ( + WorkflowJobTemplateSerializer, + WorkflowNodeBaseSerializer, + WorkflowJobTemplateNodeSerializer, + WorkflowJobNodeSerializer, +) +from awx.main.models import ( + Job, + WorkflowJobTemplateNode, + WorkflowJob, + WorkflowJobNode, +) + + +@mock.patch('awx.api.serializers.UnifiedJobTemplateSerializer.get_related', lambda x,y: {}) +class TestWorkflowJobTemplateSerializerGetRelated(): + @pytest.fixture + def workflow_job_template(self, workflow_job_template_factory): + wfjt = workflow_job_template_factory('hello world', persisted=False).workflow_job_template + wfjt.pk = 3 + return wfjt + + @pytest.mark.parametrize("related_resource_name", [ + 'workflow_jobs', + 'launch', + 'workflow_nodes', + ]) + def test_get_related(self, mocker, test_get_related, workflow_job_template, related_resource_name): + test_get_related(WorkflowJobTemplateSerializer, + workflow_job_template, + 'workflow_job_templates', + related_resource_name) + + +@mock.patch('awx.api.serializers.BaseSerializer.get_related', lambda x,y: {}) +class TestWorkflowNodeBaseSerializerGetRelated(): + @pytest.fixture + def job_template(self, job_template_factory): + jt = job_template_factory(name="blah", persisted=False).job_template + jt.pk = 1 + return jt + + @pytest.fixture + def workflow_job_template_node_related(self, job_template): + return WorkflowJobTemplateNode(pk=1, unified_job_template=job_template) + + @pytest.fixture + def workflow_job_template_node(self): + return WorkflowJobTemplateNode(pk=1) + + def test_workflow_unified_job_template_present(self, get_related_mock_and_run, workflow_job_template_node_related): + related = get_related_mock_and_run(WorkflowNodeBaseSerializer, workflow_job_template_node_related) + assert 'unified_job_template' in related + assert related['unified_job_template'] == '/api/v1/%s/%d/' % ('job_templates', workflow_job_template_node_related.unified_job_template.pk) + + def test_workflow_unified_job_template_absent(self, workflow_job_template_node): + related = WorkflowJobTemplateNodeSerializer().get_related(workflow_job_template_node) + assert 'unified_job_template' not in related + + +@mock.patch('awx.api.serializers.WorkflowNodeBaseSerializer.get_related', lambda x,y: {}) +class TestWorkflowJobTemplateNodeSerializerGetRelated(): + @pytest.fixture + def workflow_job_template_node(self): + return WorkflowJobTemplateNode(pk=1) + + @pytest.fixture + def workflow_job_template(self, workflow_job_template_factory): + wfjt = workflow_job_template_factory("bliggity", persisted=False).workflow_job_template + wfjt.pk = 1 + return wfjt + + @pytest.fixture + def job_template(self, job_template_factory): + jt = job_template_factory(name="blah", persisted=False).job_template + jt.pk = 1 + return jt + + @pytest.fixture + def workflow_job_template_node_related(self, workflow_job_template_node, workflow_job_template): + workflow_job_template_node.workflow_job_template = workflow_job_template + return workflow_job_template_node + + @pytest.mark.parametrize("related_resource_name", [ + 'success_nodes', + 'failure_nodes', + 'always_nodes', + ]) + def test_get_related(self, test_get_related, workflow_job_template_node, related_resource_name): + test_get_related(WorkflowJobTemplateNodeSerializer, + workflow_job_template_node, + 'workflow_job_template_nodes', + related_resource_name) + + def test_workflow_job_template_present(self, get_related_mock_and_run, workflow_job_template_node_related): + related = get_related_mock_and_run(WorkflowJobTemplateNodeSerializer, workflow_job_template_node_related) + assert 'workflow_job_template' in related + assert related['workflow_job_template'] == '/api/v1/%s/%d/' % ('workflow_job_templates', workflow_job_template_node_related.workflow_job_template.pk) + + def test_workflow_job_template_absent(self, workflow_job_template_node): + related = WorkflowJobTemplateNodeSerializer().get_related(workflow_job_template_node) + assert 'workflow_job_template' not in related + + +class FakeView: + def __init__(self, obj): + self.obj = obj + + def get_object(self): + return self.obj + + +class FakeRequest: + pass + + +class TestWorkflowJobTemplateNodeSerializerCharPrompts(): + @pytest.fixture + def WFJT_serializer(self): + serializer = WorkflowJobTemplateNodeSerializer() + node = WorkflowJobTemplateNode(pk=1) + node.char_prompts = {'limit': 'webservers'} + serializer.instance = node + view = FakeView(node) + view.request = FakeRequest() + view.request.method = "PATCH" + serializer.context = {'view': view} + return serializer + + def test_change_single_field(self, WFJT_serializer): + "Test that a single prompt field can be changed without affecting other fields" + internal_value = WFJT_serializer.to_internal_value({'job_type': 'check'}) + assert internal_value['char_prompts']['job_type'] == 'check' + assert internal_value['char_prompts']['limit'] == 'webservers' + + def test_null_single_field(self, WFJT_serializer): + "Test that a single prompt field can be removed without affecting other fields" + internal_value = WFJT_serializer.to_internal_value({'job_type': None}) + assert 'job_type' not in internal_value['char_prompts'] + assert internal_value['char_prompts']['limit'] == 'webservers' + + +@mock.patch('awx.api.serializers.WorkflowNodeBaseSerializer.get_related', lambda x,y: {}) +class TestWorkflowJobNodeSerializerGetRelated(): + @pytest.fixture + def workflow_job_node(self): + return WorkflowJobNode(pk=1) + + @pytest.fixture + def workflow_job(self): + return WorkflowJob(pk=1) + + @pytest.fixture + def job(self): + return Job(name="blah", pk=1) + + @pytest.fixture + def workflow_job_node_related(self, workflow_job_node, workflow_job, job): + workflow_job_node.workflow_job = workflow_job + workflow_job_node.job = job + return workflow_job_node + + @pytest.mark.parametrize("related_resource_name", [ + 'success_nodes', + 'failure_nodes', + 'always_nodes', + ]) + def test_get_related(self, test_get_related, workflow_job_node, related_resource_name): + test_get_related(WorkflowJobNodeSerializer, + workflow_job_node, + 'workflow_job_nodes', + related_resource_name) + + def test_workflow_job_present(self, get_related_mock_and_run, workflow_job_node_related): + related = get_related_mock_and_run(WorkflowJobNodeSerializer, workflow_job_node_related) + assert 'workflow_job' in related + assert related['workflow_job'] == '/api/v1/%s/%d/' % ('workflow_jobs', workflow_job_node_related.workflow_job.pk) + + def test_workflow_job_absent(self, workflow_job_node): + related = WorkflowJobNodeSerializer().get_related(workflow_job_node) + assert 'workflow_job' not in related + + def test_job_present(self, get_related_mock_and_run, workflow_job_node_related): + related = get_related_mock_and_run(WorkflowJobNodeSerializer, workflow_job_node_related) + assert 'job' in related + assert related['job'] == '/api/v1/%s/%d/' % ('jobs', workflow_job_node_related.job.pk) + + def test_job_absent(self, workflow_job_node): + related = WorkflowJobNodeSerializer().get_related(workflow_job_node) + assert 'job' not in related diff --git a/awx/main/tests/unit/api/test_filters.py b/awx/main/tests/unit/api/test_filters.py index 55ef257567..6570ada6f7 100644 --- a/awx/main/tests/unit/api/test_filters.py +++ b/awx/main/tests/unit/api/test_filters.py @@ -2,21 +2,28 @@ import pytest from rest_framework.exceptions import PermissionDenied from awx.api.filters import FieldLookupBackend -from awx.main.models import Credential, JobTemplate +from awx.main.models import (AdHocCommand, AuthToken, CustomInventoryScript, + Credential, Job, JobTemplate, SystemJob, + UnifiedJob, User, WorkflowJob, + WorkflowJobTemplate, WorkflowJobOptions) +from awx.main.models.jobs import JobOptions + @pytest.mark.parametrize(u"empty_value", [u'', '']) def test_empty_in(empty_value): field_lookup = FieldLookupBackend() with pytest.raises(ValueError) as excinfo: - field_lookup.value_to_python(JobTemplate, 'project__in', empty_value) + field_lookup.value_to_python(JobTemplate, 'project__name__in', empty_value) assert 'empty value for __in' in str(excinfo.value) + @pytest.mark.parametrize(u"valid_value", [u'foo', u'foo,']) def test_valid_in(valid_value): field_lookup = FieldLookupBackend() - value, new_lookup = field_lookup.value_to_python(JobTemplate, 'project__in', valid_value) + value, new_lookup = field_lookup.value_to_python(JobTemplate, 'project__name__in', valid_value) assert 'foo' in value + @pytest.mark.parametrize('lookup_suffix', ['', 'contains', 'startswith', 'in']) @pytest.mark.parametrize('password_field', Credential.PASSWORD_FIELDS) def test_filter_on_password_field(password_field, lookup_suffix): @@ -26,6 +33,7 @@ def test_filter_on_password_field(password_field, lookup_suffix): field, new_lookup = field_lookup.get_field_from_lookup(Credential, lookup) assert 'not allowed' in str(excinfo.value) + @pytest.mark.parametrize('lookup_suffix', ['', 'contains', 'startswith', 'in']) @pytest.mark.parametrize('password_field', Credential.PASSWORD_FIELDS) def test_filter_on_related_password_field(password_field, lookup_suffix): @@ -34,3 +42,28 @@ def test_filter_on_related_password_field(password_field, lookup_suffix): with pytest.raises(PermissionDenied) as excinfo: field, new_lookup = field_lookup.get_field_from_lookup(JobTemplate, lookup) assert 'not allowed' in str(excinfo.value) + + +@pytest.mark.parametrize('model, query', [ + (AuthToken, 'request_hash__icontains'), + (User, 'password__icontains'), + (User, 'auth_tokens__key__icontains'), + (User, 'settings__value__icontains'), + (UnifiedJob, 'job_args__icontains'), + (UnifiedJob, 'job_env__icontains'), + (UnifiedJob, 'start_args__icontains'), + (AdHocCommand, 'extra_vars__icontains'), + (JobOptions, 'extra_vars__icontains'), + (SystemJob, 'extra_vars__icontains'), + (WorkflowJobOptions, 'extra_vars__icontains'), + (Job, 'survey_passwords__icontains'), + (WorkflowJob, 'survey_passwords__icontains'), + (JobTemplate, 'survey_spec__icontains'), + (WorkflowJobTemplate, 'survey_spec__icontains'), + (CustomInventoryScript, 'script__icontains') +]) +def test_filter_sensitive_fields_and_relations(model, query): + field_lookup = FieldLookupBackend() + with pytest.raises(PermissionDenied) as excinfo: + field, new_lookup = field_lookup.get_field_from_lookup(model, query) + assert 'not allowed' in str(excinfo.value) diff --git a/awx/main/tests/unit/api/test_generics.py b/awx/main/tests/unit/api/test_generics.py index 289b4547a8..579440b201 100644 --- a/awx/main/tests/unit/api/test_generics.py +++ b/awx/main/tests/unit/api/test_generics.py @@ -6,25 +6,41 @@ import mock # DRF from rest_framework import status from rest_framework.response import Response +from rest_framework.exceptions import PermissionDenied # AWX -from awx.api.generics import ParentMixin, SubListCreateAttachDetachAPIView, DeleteLastUnattachLabelMixin +from awx.api.generics import ( + ParentMixin, + SubListCreateAttachDetachAPIView, + DeleteLastUnattachLabelMixin, + ResourceAccessList +) +from awx.main.models import Organization + @pytest.fixture def get_object_or_404(mocker): # pytest patch without return_value generates a random value, we are counting on this return mocker.patch('awx.api.generics.get_object_or_404') + @pytest.fixture def get_object_or_400(mocker): return mocker.patch('awx.api.generics.get_object_or_400') + @pytest.fixture def mock_response_new(mocker): m = mocker.patch('awx.api.generics.Response.__new__') m.return_value = m return m + +@pytest.fixture +def mock_organization(): + return Organization(pk=4, name="Unsaved Org") + + @pytest.fixture def parent_relationship_factory(mocker): def rf(serializer_class, relationship_name, relationship_value=mocker.Mock()): @@ -38,6 +54,7 @@ def parent_relationship_factory(mocker): return (serializer, mock_parent_relationship) return rf + # TODO: Test create and associate failure (i.e. id doesn't exist, record already exists, permission denied) # TODO: Mock and check return (Response) class TestSubListCreateAttachDetachAPIView: @@ -122,6 +139,7 @@ class TestSubListCreateAttachDetachAPIView: view.unattach_validate.assert_called_with(mock_request) view.unattach_by_id.assert_not_called() + class TestDeleteLastUnattachLabelMixin: @mock.patch('__builtin__.super') def test_unattach_ok(self, super, mocker): @@ -159,6 +177,7 @@ class TestDeleteLastUnattachLabelMixin: super.unattach_validate.assert_called_with(mock_request) assert mock_response == res + class TestParentMixin: def test_get_parent_object(self, mocker, get_object_or_404): parent_mixin = ParentMixin() @@ -168,7 +187,40 @@ class TestParentMixin: mock_parent_mixin = mocker.MagicMock(wraps=parent_mixin) return_value = mock_parent_mixin.get_parent_object() - + get_object_or_404.assert_called_with(parent_mixin.parent_model, **parent_mixin.kwargs) assert get_object_or_404.return_value == return_value - + + +class TestResourceAccessList: + + def mock_request(self): + return mock.MagicMock( + user=mock.MagicMock( + is_anonymous=mock.MagicMock(return_value=False), + is_superuser=False + ), method='GET') + + + def mock_view(self): + view = ResourceAccessList() + view.parent_model = Organization + view.kwargs = {'pk': 4} + return view + + + def test_parent_access_check_failed(self, mocker, mock_organization): + with mocker.patch('awx.api.permissions.get_object_or_400', return_value=mock_organization): + mock_access = mocker.MagicMock(__name__='for logger', return_value=False) + with mocker.patch('awx.main.access.BaseAccess.can_read', mock_access): + with pytest.raises(PermissionDenied): + self.mock_view().check_permissions(self.mock_request()) + mock_access.assert_called_once_with(mock_organization) + + + def test_parent_access_check_worked(self, mocker, mock_organization): + with mocker.patch('awx.api.permissions.get_object_or_400', return_value=mock_organization): + mock_access = mocker.MagicMock(__name__='for logger', return_value=True) + with mocker.patch('awx.main.access.BaseAccess.can_read', mock_access): + self.mock_view().check_permissions(self.mock_request()) + mock_access.assert_called_once_with(mock_organization) diff --git a/awx/main/tests/unit/api/test_roles.py b/awx/main/tests/unit/api/test_roles.py index 2dd6b57675..5cb49a92b0 100644 --- a/awx/main/tests/unit/api/test_roles.py +++ b/awx/main/tests/unit/api/test_roles.py @@ -19,7 +19,7 @@ from awx.main.models import ( Role, ) -@pytest.mark.skip(reason="Seeing pk error, suspect weirdness in mocking requests") + @pytest.mark.parametrize("pk, err", [ (111, "not change the membership"), (1, "may not perform"), @@ -38,18 +38,18 @@ def test_user_roles_list_user_admin_role(pk, err): factory = APIRequestFactory() view = UserRolesList.as_view() - user = User(username="root", is_superuser=True) + user = User(username="root", is_superuser=True, pk=1, id=1) request = factory.post("/user/1/roles", {'id':pk}, format="json") force_authenticate(request, user) - response = view(request) + response = view(request, pk=user.pk) response.render() assert response.status_code == 403 assert err in response.content -@pytest.mark.skip(reason="db access or mocking needed for new tests in role assignment code") + @pytest.mark.parametrize("admin_role, err", [ (True, "may not perform"), (False, "not change the membership"), @@ -70,15 +70,19 @@ def test_role_users_list_other_user_admin_role(admin_role, err): view = RoleUsersList.as_view() user = User(username="root", is_superuser=True, pk=1, id=1) + queried_user = User(username="maynard") + request = factory.post("/role/1/users", {'id':1}, format="json") force_authenticate(request, user) - response = view(request) + with mock.patch('awx.api.views.get_object_or_400', return_value=queried_user): + response = view(request) response.render() assert response.status_code == 403 assert err in response.content + def test_team_roles_list_post_org_roles(): with mock.patch('awx.api.views.get_object_or_400') as role_get, \ mock.patch('awx.api.views.ContentType.objects.get_for_model') as ct_get: diff --git a/awx/main/tests/unit/api/test_serializers.py b/awx/main/tests/unit/api/test_serializers.py deleted file mode 100644 index 2496ba9a2d..0000000000 --- a/awx/main/tests/unit/api/test_serializers.py +++ /dev/null @@ -1,235 +0,0 @@ -# Python -import pytest -import mock -from mock import PropertyMock -import json - -# AWX -from awx.api.serializers import ( - JobTemplateSerializer, - JobSerializer, - JobOptionsSerializer, - CustomInventoryScriptSerializer, -) -from awx.main.models import ( - Label, - Job, - CustomInventoryScript, - User, -) - -#DRF -from rest_framework.request import Request -from rest_framework import serializers -from rest_framework.test import ( - APIRequestFactory, - force_authenticate, -) - - -def mock_JT_resource_data(): - return ({}, []) - -@pytest.fixture -def job_template(mocker): - mock_jt = mocker.MagicMock(pk=5) - mock_jt.resource_validation_data = mock_JT_resource_data - return mock_jt - -@pytest.fixture -def job(mocker, job_template): - return mocker.MagicMock(pk=5, job_template=job_template) - -@pytest.fixture -def labels(mocker): - return [Label(id=x, name='label-%d' % x) for x in xrange(0, 25)] - -@pytest.fixture -def jobs(mocker): - return [Job(id=x, name='job-%d' % x) for x in xrange(0, 25)] - -class GetRelatedMixin: - def _assert(self, model_obj, related, resource_name, related_resource_name): - assert related_resource_name in related - assert related[related_resource_name] == '/api/v1/%s/%d/%s/' % (resource_name, model_obj.pk, related_resource_name) - - def _mock_and_run(self, serializer_class, model_obj): - serializer = serializer_class() - related = serializer.get_related(model_obj) - return related - - def _test_get_related(self, serializer_class, model_obj, resource_name, related_resource_name): - related = self._mock_and_run(serializer_class, model_obj) - self._assert(model_obj, related, resource_name, related_resource_name) - return related - -class GetSummaryFieldsMixin: - def _assert(self, summary, summary_field_name): - assert summary_field_name in summary - - def _mock_and_run(self, serializer_class, model_obj): - serializer = serializer_class() - return serializer.get_summary_fields(model_obj) - - def _test_get_summary_fields(self, serializer_class, model_obj, summary_field_name): - summary = self._mock_and_run(serializer_class, model_obj) - self._assert(summary, summary_field_name) - return summary - -@mock.patch('awx.api.serializers.UnifiedJobTemplateSerializer.get_related', lambda x,y: {}) -@mock.patch('awx.api.serializers.JobOptionsSerializer.get_related', lambda x,y: {}) -class TestJobTemplateSerializerGetRelated(GetRelatedMixin): - @pytest.mark.parametrize("related_resource_name", [ - 'jobs', - 'schedules', - 'activity_stream', - 'launch', - 'notification_templates_any', - 'notification_templates_success', - 'notification_templates_error', - 'survey_spec', - 'labels', - 'callback', - ]) - def test_get_related(self, job_template, related_resource_name): - self._test_get_related(JobTemplateSerializer, job_template, 'job_templates', related_resource_name) - - def test_callback_absent(self, job_template): - job_template.host_config_key = None - related = self._mock_and_run(JobTemplateSerializer, job_template) - assert 'callback' not in related - -class TestJobTemplateSerializerGetSummaryFields(GetSummaryFieldsMixin): - def test__recent_jobs(self, mocker, job_template, jobs): - - job_template.jobs.all = mocker.MagicMock(**{'order_by.return_value': jobs}) - job_template.jobs.all.return_value = job_template.jobs.all - - serializer = JobTemplateSerializer() - recent_jobs = serializer._recent_jobs(job_template) - - job_template.jobs.all.assert_called_once_with() - job_template.jobs.all.order_by.assert_called_once_with('-created') - assert len(recent_jobs) == 10 - for x in jobs[:10]: - assert recent_jobs == [{'id': x.id, 'status': x.status, 'finished': x.finished} for x in jobs[:10]] - - def test_survey_spec_exists(self, mocker, job_template): - job_template.survey_spec = {'name': 'blah', 'description': 'blah blah'} - self._test_get_summary_fields(JobTemplateSerializer, job_template, 'survey') - - def test_survey_spec_absent(self, mocker, job_template): - job_template.survey_spec = None - summary = self._mock_and_run(JobTemplateSerializer, job_template) - assert 'survey' not in summary - - @pytest.mark.skip(reason="RBAC needs to land") - def test_can_copy_true(self, mocker, job_template): - pass - - @pytest.mark.skip(reason="RBAC needs to land") - def test_can_copy_false(self, mocker, job_template): - pass - - @pytest.mark.skip(reason="RBAC needs to land") - def test_can_edit_true(self, mocker, job_template): - pass - - @pytest.mark.skip(reason="RBAC needs to land") - def test_can_edit_false(self, mocker, job_template): - pass - -@mock.patch('awx.api.serializers.UnifiedJobTemplateSerializer.get_related', lambda x,y: {}) -@mock.patch('awx.api.serializers.JobOptionsSerializer.get_related', lambda x,y: {}) -class TestJobSerializerGetRelated(GetRelatedMixin): - @pytest.mark.parametrize("related_resource_name", [ - 'job_events', - 'job_plays', - 'job_tasks', - 'relaunch', - 'labels', - ]) - def test_get_related(self, mocker, job, related_resource_name): - self._test_get_related(JobSerializer, job, 'jobs', related_resource_name) - - def test_job_template_absent(self, mocker, job): - job.job_template = None - serializer = JobSerializer() - related = serializer.get_related(job) - assert 'job_template' not in related - - def test_job_template_present(self, job): - related = self._mock_and_run(JobSerializer, job) - assert 'job_template' in related - assert related['job_template'] == '/api/v1/%s/%d/' % ('job_templates', job.job_template.pk) - -@mock.patch('awx.api.serializers.BaseSerializer.to_representation', lambda self,obj: { - 'extra_vars': obj.extra_vars}) -class TestJobSerializerSubstitution(): - - def test_survey_password_hide(self, mocker): - job = mocker.MagicMock(**{ - 'display_extra_vars.return_value': '{\"secret_key\": \"$encrypted$\"}', - 'extra_vars.return_value': '{\"secret_key\": \"my_password\"}'}) - serializer = JobSerializer(job) - rep = serializer.to_representation(job) - extra_vars = json.loads(rep['extra_vars']) - assert extra_vars['secret_key'] == '$encrypted$' - job.display_extra_vars.assert_called_once_with() - assert 'my_password' not in extra_vars - -@mock.patch('awx.api.serializers.BaseSerializer.get_summary_fields', lambda x,y: {}) -class TestJobOptionsSerializerGetSummaryFields(GetSummaryFieldsMixin): - def test__summary_field_labels_10_max(self, mocker, job_template, labels): - job_template.labels.all = mocker.MagicMock(**{'order_by.return_value': labels}) - job_template.labels.all.return_value = job_template.labels.all - - serializer = JobOptionsSerializer() - summary_labels = serializer._summary_field_labels(job_template) - - job_template.labels.all.order_by.assert_called_with('name') - assert len(summary_labels['results']) == 10 - assert summary_labels['results'] == [{'id': x.id, 'name': x.name} for x in labels[:10]] - - def test_labels_exists(self, mocker, job_template): - self._test_get_summary_fields(JobOptionsSerializer, job_template, 'labels') - -class TestJobTemplateSerializerValidation(object): - - good_extra_vars = ["{\"test\": \"keys\"}", "---\ntest: key"] - bad_extra_vars = ["{\"test\": \"keys\"", "---\ntest: [2"] - - def test_validate_extra_vars(self): - serializer = JobTemplateSerializer() - for ev in self.good_extra_vars: - serializer.validate_extra_vars(ev) - for ev in self.bad_extra_vars: - with pytest.raises(serializers.ValidationError): - serializer.validate_extra_vars(ev) - -class TestCustomInventoryScriptSerializer(object): - - @pytest.mark.parametrize("superuser,sysaudit,admin_role,value", - ((True, False, False, '#!/python'), - (False, True, False, '#!/python'), - (False, False, True, '#!/python'), - (False, False, False, None))) - def test_to_representation_orphan(self, superuser, sysaudit, admin_role, value): - with mock.patch.object(CustomInventoryScriptSerializer, 'get_summary_fields', return_value={}): - User.add_to_class('is_system_auditor', sysaudit) - user = User(username="root", is_superuser=superuser) - roles = [user] if admin_role else [] - - with mock.patch('awx.main.models.CustomInventoryScript.admin_role', new_callable=PropertyMock, return_value=roles): - cis = CustomInventoryScript(pk=1, script='#!/python') - serializer = CustomInventoryScriptSerializer() - - factory = APIRequestFactory() - wsgi_request = factory.post("/inventory_script/1", {'id':1}, format="json") - force_authenticate(wsgi_request, user) - - request = Request(wsgi_request) - serializer.context['request'] = request - - representation = serializer.to_representation(cis) - assert representation['script'] == value diff --git a/awx/main/tests/unit/api/test_views.py b/awx/main/tests/unit/api/test_views.py index 6a97831f02..3bea19cedd 100644 --- a/awx/main/tests/unit/api/test_views.py +++ b/awx/main/tests/unit/api/test_views.py @@ -1,17 +1,22 @@ import mock import pytest +from collections import namedtuple + from awx.api.views import ( ApiV1RootView, JobTemplateLabelList, + JobTemplateSurveySpec, ) + @pytest.fixture def mock_response_new(mocker): m = mocker.patch('awx.api.views.Response.__new__') m.return_value = m return m + class TestApiV1RootView: def test_get_endpoints(self, mocker, mock_response_new): endpoints = [ @@ -43,6 +48,8 @@ class TestApiV1RootView: 'unified_job_templates', 'unified_jobs', 'activity_stream', + 'workflow_job_templates', + 'workflow_jobs', ] view = ApiV1RootView() ret = view.get(mocker.MagicMock()) @@ -52,6 +59,7 @@ class TestApiV1RootView: for endpoint in endpoints: assert endpoint in data_arg + class TestJobTemplateLabelList: def test_inherited_mixin_unattach(self): with mock.patch('awx.api.generics.DeleteLastUnattachLabelMixin.unattach') as mixin_unattach: @@ -60,3 +68,16 @@ class TestJobTemplateLabelList: super(JobTemplateLabelList, view).unattach(mock_request, None, None) assert mixin_unattach.called_with(mock_request, None, None) + + +class TestJobTemplateSurveySpec(object): + @mock.patch('awx.api.views.feature_enabled', lambda feature: True) + def test_get_password_type(self, mocker, mock_response_new): + JobTemplate = namedtuple('JobTemplate', 'survey_spec') + obj = JobTemplate(survey_spec={'spec':[{'type': 'password', 'default': 'my_default'}]}) + with mocker.patch.object(JobTemplateSurveySpec, 'get_object', return_value=obj): + view = JobTemplateSurveySpec() + response = view.get(mocker.MagicMock()) + assert response == mock_response_new + # which there was a better way to do this! + assert response.call_args[0][1]['spec'][0]['default'] == '$encrypted$' diff --git a/awx/main/tests/unit/conftest.py b/awx/main/tests/unit/conftest.py new file mode 100644 index 0000000000..fab8214ed3 --- /dev/null +++ b/awx/main/tests/unit/conftest.py @@ -0,0 +1,6 @@ +import pytest + + +@pytest.fixture(autouse=True) +def _disable_database_settings(mocker): + mocker.patch('awx.conf.settings.SettingsWrapper._get_supported_settings', return_value=[]) diff --git a/awx/main/tests/unit/models/test_job_template_unit.py b/awx/main/tests/unit/models/test_job_template_unit.py index a156d1e920..194ce68cef 100644 --- a/awx/main/tests/unit/models/test_job_template_unit.py +++ b/awx/main/tests/unit/models/test_job_template_unit.py @@ -14,6 +14,7 @@ def test_missing_project_error(job_template_factory): validation_errors, resources_needed_to_start = obj.resource_validation_data() assert 'project' in validation_errors + def test_inventory_credential_need_to_start(job_template_factory): objects = job_template_factory( 'job-template-few-resources', @@ -23,6 +24,7 @@ def test_inventory_credential_need_to_start(job_template_factory): assert 'inventory' in obj.resources_needed_to_start assert 'credential' in obj.resources_needed_to_start + def test_inventory_credential_contradictions(job_template_factory): objects = job_template_factory( 'job-template-paradox', @@ -35,6 +37,7 @@ def test_inventory_credential_contradictions(job_template_factory): assert 'inventory' in validation_errors assert 'credential' in validation_errors + def test_survey_answers_as_string(job_template_factory): objects = job_template_factory( 'job-template-with-survey', @@ -45,8 +48,70 @@ def test_survey_answers_as_string(job_template_factory): accepted, ignored = jt._accept_or_ignore_job_kwargs(extra_vars=user_extra_vars) assert 'var1' in accepted['extra_vars'] + @pytest.mark.survey def test_job_template_survey_password_redaction(job_template_with_survey_passwords_unit): """Tests the JobTemplate model's funciton to redact passwords from extra_vars - used when creating a new job""" assert job_template_with_survey_passwords_unit.survey_password_variables() == ['secret_key', 'SSN'] + + +def test_job_template_survey_variable_validation(job_template_factory): + objects = job_template_factory( + 'survey_variable_validation', + organization='org1', + inventory='inventory1', + credential='cred1', + persisted=False, + ) + obj = objects.job_template + obj.survey_spec = { + "description": "", + "spec": [ + { + "required": True, + "min": 0, + "default": "5", + "max": 1024, + "question_description": "", + "choices": "", + "variable": "a", + "question_name": "Whosyourdaddy", + "type": "text" + } + ], + "name": "" + } + obj.survey_enabled = True + assert obj.survey_variable_validation({"a": 5}) == ["Value 5 for 'a' expected to be a string."] + + +def test_job_template_survey_mixin(job_template_factory): + objects = job_template_factory( + 'survey_mixin_test', + organization='org1', + inventory='inventory1', + credential='cred1', + persisted=False, + ) + obj = objects.job_template + obj.survey_enabled = True + obj.survey_spec = {'spec': [{'default':'my_default', 'type':'password', 'variable':'my_variable'}]} + kwargs = obj._update_unified_job_kwargs(extra_vars={'my_variable':'$encrypted$'}) + assert kwargs['extra_vars'] == '{"my_variable": "my_default"}' + + +def test_job_template_survey_mixin_length(job_template_factory): + objects = job_template_factory( + 'survey_mixin_test', + organization='org1', + inventory='inventory1', + credential='cred1', + persisted=False, + ) + obj = objects.job_template + obj.survey_enabled = True + obj.survey_spec = {'spec': [{'default':'my_default', 'type':'password', 'variable':'my_variable'}, + {'type':'password', 'variable':'my_other_variable'}]} + kwargs = obj._update_unified_job_kwargs(extra_vars={'my_variable':'$encrypted$'}) + assert kwargs['extra_vars'] == '{"my_variable": "my_default"}' diff --git a/awx/main/tests/unit/models/test_job_unit.py b/awx/main/tests/unit/models/test_job_unit.py deleted file mode 100644 index 1b66681dcf..0000000000 --- a/awx/main/tests/unit/models/test_job_unit.py +++ /dev/null @@ -1,50 +0,0 @@ -import pytest -import json - -from awx.main.tasks import RunJob -from awx.main.models import Job - - -@pytest.fixture -def job(mocker): - return mocker.MagicMock(**{ - 'display_extra_vars.return_value': '{\"secret_key\": \"$encrypted$\"}', - 'extra_vars_dict': {"secret_key": "my_password"}, - 'pk': 1, 'job_template.pk': 1, 'job_template.name': '', - 'created_by.pk': 1, 'created_by.username': 'admin', - 'launch_type': 'manual'}) - -@pytest.mark.survey -def test_job_survey_password_redaction(): - """Tests the Job model's funciton to redact passwords from - extra_vars - used when displaying job information""" - job = Job( - name="test-job-with-passwords", - extra_vars=json.dumps({ - 'submitter_email': 'foobar@redhat.com', - 'secret_key': '6kQngg3h8lgiSTvIEb21', - 'SSN': '123-45-6789'}), - survey_passwords={ - 'secret_key': '$encrypted$', - 'SSN': '$encrypted$'}) - assert json.loads(job.display_extra_vars()) == { - 'submitter_email': 'foobar@redhat.com', - 'secret_key': '$encrypted$', - 'SSN': '$encrypted$'} - -def test_job_safe_args_redacted_passwords(job): - """Verify that safe_args hides passwords in the job extra_vars""" - kwargs = {'ansible_version': '2.1'} - run_job = RunJob() - safe_args = run_job.build_safe_args(job, **kwargs) - ev_index = safe_args.index('-e') + 1 - extra_vars = json.loads(safe_args[ev_index]) - assert extra_vars['secret_key'] == '$encrypted$' - -def test_job_args_unredacted_passwords(job): - kwargs = {'ansible_version': '2.1'} - run_job = RunJob() - args = run_job.build_args(job, **kwargs) - ev_index = args.index('-e') + 1 - extra_vars = json.loads(args[ev_index]) - assert extra_vars['secret_key'] == 'my_password' diff --git a/awx/main/tests/unit/models/test_label.py b/awx/main/tests/unit/models/test_label.py index 20da73e9ad..ecbdcb94fb 100644 --- a/awx/main/tests/unit/models/test_label.py +++ b/awx/main/tests/unit/models/test_label.py @@ -1,65 +1,65 @@ import pytest +import mock from awx.main.models.label import Label from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob -def test_get_orphaned_labels(mocker): - mock_query_set = mocker.MagicMock() - Label.objects.filter = mocker.MagicMock(return_value=mock_query_set) +mock_query_set = mock.MagicMock() - ret = Label.get_orphaned_labels() +mock_objects = mock.MagicMock(filter=mock.MagicMock(return_value=mock_query_set)) - assert mock_query_set == ret - Label.objects.filter.assert_called_with(organization=None, jobtemplate_labels__isnull=True) -def test_is_detached(mocker): - mock_query_set = mocker.MagicMock() - Label.objects.filter = mocker.MagicMock(return_value=mock_query_set) - mock_query_set.count.return_value = 1 +@mock.patch('awx.main.models.label.Label.objects', mock_objects) +class TestLabelFilterMocked: + def test_get_orphaned_labels(self, mocker): + ret = Label.get_orphaned_labels() - label = Label(id=37) - ret = label.is_detached() + assert mock_query_set == ret + Label.objects.filter.assert_called_with(organization=None, unifiedjobtemplate_labels__isnull=True) - assert ret is True - Label.objects.filter.assert_called_with(id=37, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True) - mock_query_set.count.assert_called_with() + def test_is_detached(self, mocker): + mock_query_set.count.return_value = 1 -def test_is_detached_not(mocker): - mock_query_set = mocker.MagicMock() - Label.objects.filter = mocker.MagicMock(return_value=mock_query_set) - mock_query_set.count.return_value = 0 + label = Label(id=37) + ret = label.is_detached() - label = Label(id=37) - ret = label.is_detached() + assert ret is True + Label.objects.filter.assert_called_with(id=37, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True) + mock_query_set.count.assert_called_with() - assert ret is False - Label.objects.filter.assert_called_with(id=37, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True) - mock_query_set.count.assert_called_with() + def test_is_detached_not(self, mocker): + mock_query_set.count.return_value = 0 -@pytest.mark.parametrize("jt_count,j_count,expected", [ - (1, 0, True), - (0, 1, True), - (1, 1, False), -]) -def test_is_candidate_for_detach(mocker, jt_count, j_count, expected): - mock_job_qs = mocker.MagicMock() - mock_job_qs.count = mocker.MagicMock(return_value=j_count) - UnifiedJob.objects = mocker.MagicMock() - UnifiedJob.objects.filter = mocker.MagicMock(return_value=mock_job_qs) + label = Label(id=37) + ret = label.is_detached() - mock_jt_qs = mocker.MagicMock() - mock_jt_qs.count = mocker.MagicMock(return_value=jt_count) - UnifiedJobTemplate.objects = mocker.MagicMock() - UnifiedJobTemplate.objects.filter = mocker.MagicMock(return_value=mock_jt_qs) + assert ret is False + Label.objects.filter.assert_called_with(id=37, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True) + mock_query_set.count.assert_called_with() - label = Label(id=37) - ret = label.is_candidate_for_detach() + @pytest.mark.parametrize("jt_count,j_count,expected", [ + (1, 0, True), + (0, 1, True), + (1, 1, False), + ]) + def test_is_candidate_for_detach(self, mocker, jt_count, j_count, expected): + mock_job_qs = mocker.MagicMock() + mock_job_qs.count = mocker.MagicMock(return_value=j_count) + mocker.patch.object(UnifiedJob, 'objects', mocker.MagicMock( + filter=mocker.MagicMock(return_value=mock_job_qs))) - UnifiedJob.objects.filter.assert_called_with(labels__in=[label.id]) - UnifiedJobTemplate.objects.filter.assert_called_with(labels__in=[label.id]) - mock_job_qs.count.assert_called_with() - mock_jt_qs.count.assert_called_with() + mock_jt_qs = mocker.MagicMock() + mock_jt_qs.count = mocker.MagicMock(return_value=jt_count) + mocker.patch.object(UnifiedJobTemplate, 'objects', mocker.MagicMock( + filter=mocker.MagicMock(return_value=mock_jt_qs))) - assert ret is expected + label = Label(id=37) + ret = label.is_candidate_for_detach() + UnifiedJob.objects.filter.assert_called_with(labels__in=[label.id]) + UnifiedJobTemplate.objects.filter.assert_called_with(labels__in=[label.id]) + mock_job_qs.count.assert_called_with() + mock_jt_qs.count.assert_called_with() + + assert ret is expected diff --git a/awx/main/tests/unit/models/test_survey_models.py b/awx/main/tests/unit/models/test_survey_models.py new file mode 100644 index 0000000000..584a4cc7f0 --- /dev/null +++ b/awx/main/tests/unit/models/test_survey_models.py @@ -0,0 +1,111 @@ +import pytest +import json + +from awx.main.tasks import RunJob +from awx.main.models import ( + Job, + WorkflowJobTemplate +) + + +@pytest.fixture +def job(mocker): + ret = mocker.MagicMock(**{ + 'display_extra_vars.return_value': '{\"secret_key\": \"$encrypted$\"}', + 'extra_vars_dict': {"secret_key": "my_password"}, + 'pk': 1, 'job_template.pk': 1, 'job_template.name': '', + 'created_by.pk': 1, 'created_by.username': 'admin', + 'launch_type': 'manual'}) + ret.project = mocker.MagicMock(scm_revision='asdf1234') + return ret + + +@pytest.fixture +def job_with_survey(): + return Job( + name="test-job-with-passwords", + extra_vars=json.dumps({ + 'submitter_email': 'foobar@redhat.com', + 'secret_key': '6kQngg3h8lgiSTvIEb21', + 'SSN': '123-45-6789'}), + survey_passwords={ + 'secret_key': '$encrypted$', + 'SSN': '$encrypted$'}) + + +@pytest.mark.survey +def test_job_survey_password_redaction(job_with_survey): + """Tests the Job model's funciton to redact passwords from + extra_vars - used when displaying job information""" + assert json.loads(job_with_survey.display_extra_vars()) == { + 'submitter_email': 'foobar@redhat.com', + 'secret_key': '$encrypted$', + 'SSN': '$encrypted$'} + + +@pytest.mark.survey +def test_survey_passwords_not_in_extra_vars(): + """Tests that survey passwords not included in extra_vars are + not included when displaying job information""" + job = Job( + name="test-survey-not-in", + extra_vars=json.dumps({ + 'submitter_email': 'foobar@redhat.com'}), + survey_passwords={ + 'secret_key': '$encrypted$', + 'SSN': '$encrypted$'}) + assert json.loads(job.display_extra_vars()) == { + 'submitter_email': 'foobar@redhat.com', + } + + +def test_job_safe_args_redacted_passwords(job): + """Verify that safe_args hides passwords in the job extra_vars""" + kwargs = {'ansible_version': '2.1'} + run_job = RunJob() + safe_args = run_job.build_safe_args(job, **kwargs) + ev_index = safe_args.index('-e') + 1 + extra_vars = json.loads(safe_args[ev_index]) + assert extra_vars['secret_key'] == '$encrypted$' + + +def test_job_args_unredacted_passwords(job): + kwargs = {'ansible_version': '2.1'} + run_job = RunJob() + args = run_job.build_args(job, **kwargs) + ev_index = args.index('-e') + 1 + extra_vars = json.loads(args[ev_index]) + assert extra_vars['secret_key'] == 'my_password' + + +class TestWorkflowSurveys: + def test_update_kwargs_survey_defaults(self, survey_spec_factory): + "Assure that the survey default over-rides a JT variable" + spec = survey_spec_factory('var1') + spec['spec'][0]['default'] = 3 + spec['spec'][0]['required'] = False + wfjt = WorkflowJobTemplate( + name="test-wfjt", + survey_spec=spec, + survey_enabled=True, + extra_vars="var1: 5" + ) + updated_extra_vars = wfjt._update_unified_job_kwargs() + assert 'extra_vars' in updated_extra_vars + assert json.loads(updated_extra_vars['extra_vars'])['var1'] == 3 + assert wfjt.can_start_without_user_input() + + def test_variables_needed_to_start(self, survey_spec_factory): + "Assure that variables_needed_to_start output contains mandatory vars" + spec = survey_spec_factory(['question1', 'question2', 'question3']) + spec['spec'][0]['required'] = False + spec['spec'][1]['required'] = True + spec['spec'][2]['required'] = False + wfjt = WorkflowJobTemplate( + name="test-wfjt", + survey_spec=spec, + survey_enabled=True, + extra_vars="question2: hiworld" + ) + assert wfjt.variables_needed_to_start == ['question2'] + assert not wfjt.can_start_without_user_input() diff --git a/awx/main/tests/unit/models/test_unified_job_unit.py b/awx/main/tests/unit/models/test_unified_job_unit.py new file mode 100644 index 0000000000..af8833482a --- /dev/null +++ b/awx/main/tests/unit/models/test_unified_job_unit.py @@ -0,0 +1,16 @@ +import mock + +from awx.main.models import ( + UnifiedJob, + WorkflowJob, + WorkflowJobNode, +) + + +def test_unified_job_workflow_attributes(): + with mock.patch('django.db.ConnectionRouter.db_for_write'): + job = UnifiedJob(id=1, name="job-1", launch_type="workflow") + job.unified_job_node = WorkflowJobNode(workflow_job=WorkflowJob(pk=1)) + + assert job.spawned_by_workflow is True + assert job.workflow_job_id == 1 diff --git a/awx/main/tests/unit/models/test_workflow_unit.py b/awx/main/tests/unit/models/test_workflow_unit.py new file mode 100644 index 0000000000..ce288dced1 --- /dev/null +++ b/awx/main/tests/unit/models/test_workflow_unit.py @@ -0,0 +1,241 @@ +import pytest + +from awx.main.models.jobs import JobTemplate +from awx.main.models import Inventory, Credential, Project +from awx.main.models.workflow import ( + WorkflowJobTemplate, WorkflowJobTemplateNode, WorkflowJobOptions, + WorkflowJob, WorkflowJobNode +) +import mock + + +class TestWorkflowJobInheritNodesMixin(): + class TestCreateWorkflowJobNodes(): + @pytest.fixture + def job_templates(self): + return [JobTemplate() for i in range(0, 10)] + + @pytest.fixture + def job_template_nodes(self, job_templates): + return [WorkflowJobTemplateNode(unified_job_template=job_templates[i]) for i in range(0, 10)] + + def test__create_workflow_job_nodes(self, mocker, job_template_nodes): + workflow_job_node_create = mocker.patch('awx.main.models.WorkflowJobTemplateNode.create_workflow_job_node') + + mixin = WorkflowJobOptions() + mixin._create_workflow_nodes(job_template_nodes) + + for job_template_node in job_template_nodes: + workflow_job_node_create.assert_any_call(workflow_job=mixin) + + class TestMapWorkflowJobNodes(): + @pytest.fixture + def job_template_nodes(self): + return [WorkflowJobTemplateNode(id=i) for i in range(0, 20)] + + @pytest.fixture + def job_nodes(self): + return [WorkflowJobNode(id=i) for i in range(100, 120)] + + def test__map_workflow_job_nodes(self, job_template_nodes, job_nodes, mocker): + mixin = WorkflowJob() + wj_node = WorkflowJobNode() + mocker.patch('awx.main.models.workflow.WorkflowJobTemplateNode.create_workflow_job_node', + return_value=wj_node) + + node_ids_map = mixin._create_workflow_nodes(job_template_nodes, user=None) + assert len(node_ids_map) == len(job_template_nodes) + + for i, job_template_node in enumerate(job_template_nodes): + assert node_ids_map[job_template_node.id] == wj_node + + class TestInheritRelationship(): + @pytest.fixture + def job_template_nodes(self, mocker): + nodes = [mocker.MagicMock(id=i, pk=i) for i in range(0, 10)] + + for i in range(0, 9): + nodes[i].success_nodes = mocker.MagicMock( + all=mocker.MagicMock(return_value=[mocker.MagicMock(id=i + 1, pk=i + 1)])) + nodes[i].always_nodes = mocker.MagicMock(all=mocker.MagicMock(return_value=[])) + nodes[i].failure_nodes = mocker.MagicMock(all=mocker.MagicMock(return_value=[])) + new_wj_node = mocker.MagicMock(success_nodes=mocker.MagicMock()) + nodes[i].create_workflow_job_node = mocker.MagicMock(return_value=new_wj_node) + + return nodes + + @pytest.fixture + def job_nodes(self, mocker): + nodes = [mocker.MagicMock(id=i) for i in range(100, 110)] + return nodes + + @pytest.fixture + def job_nodes_dict(self, job_nodes): + _map = {} + for n in job_nodes: + _map[n.id] = n + return _map + + + def test__inherit_relationship(self, mocker, job_template_nodes, job_nodes, job_nodes_dict): + wj = WorkflowJob() + + node_ids_map = wj._create_workflow_nodes(job_template_nodes) + wj._inherit_node_relationships(job_template_nodes, node_ids_map) + + for i in range(0, 8): + node_ids_map[i].success_nodes.add.assert_any_call(node_ids_map[i + 1]) + + +@pytest.fixture +def workflow_job_unit(): + return WorkflowJob(name='workflow', status='new') + + +@pytest.fixture +def workflow_job_template_unit(): + return WorkflowJobTemplate(name='workflow') + + +@pytest.fixture +def jt_ask(job_template_factory): + # note: factory sets ask_xxxx_on_launch to true for inventory & credential + jt = job_template_factory(name='example-jt', persisted=False).job_template + jt.ask_job_type_on_launch = True + jt.ask_skip_tags_on_launch = True + jt.ask_limit_on_launch = True + jt.ask_tags_on_launch = True + return jt + + +@pytest.fixture +def project_unit(): + return Project(name='example-proj') + + +example_prompts = dict(job_type='check', job_tags='quack', limit='duck', skip_tags='oink') + + +@pytest.fixture +def job_node_no_prompts(workflow_job_unit, jt_ask): + return WorkflowJobNode(workflow_job=workflow_job_unit, unified_job_template=jt_ask) + + +@pytest.fixture +def job_node_with_prompts(job_node_no_prompts): + job_node_no_prompts.char_prompts = example_prompts + job_node_no_prompts.inventory = Inventory(name='example-inv') + job_node_no_prompts.credential = Credential(name='example-inv', kind='ssh', username='asdf', password='asdf') + return job_node_no_prompts + + +@pytest.fixture +def wfjt_node_no_prompts(workflow_job_template_unit, jt_ask): + return WorkflowJobTemplateNode(workflow_job_template=workflow_job_template_unit, unified_job_template=jt_ask) + + +@pytest.fixture +def wfjt_node_with_prompts(wfjt_node_no_prompts): + wfjt_node_no_prompts.char_prompts = example_prompts + wfjt_node_no_prompts.inventory = Inventory(name='example-inv') + wfjt_node_no_prompts.credential = Credential(name='example-inv', kind='ssh', username='asdf', password='asdf') + return wfjt_node_no_prompts + + +class TestWorkflowJobCreate: + def test_create_no_prompts(self, wfjt_node_no_prompts, workflow_job_unit, mocker): + mock_create = mocker.MagicMock() + with mocker.patch('awx.main.models.WorkflowJobNode.objects.create', mock_create): + wfjt_node_no_prompts.create_workflow_job_node(workflow_job=workflow_job_unit) + mock_create.assert_called_once_with( + char_prompts=wfjt_node_no_prompts.char_prompts, + inventory=None, credential=None, + unified_job_template=wfjt_node_no_prompts.unified_job_template, + workflow_job=workflow_job_unit) + + def test_create_with_prompts(self, wfjt_node_with_prompts, workflow_job_unit, mocker): + mock_create = mocker.MagicMock() + with mocker.patch('awx.main.models.WorkflowJobNode.objects.create', mock_create): + wfjt_node_with_prompts.create_workflow_job_node(workflow_job=workflow_job_unit) + mock_create.assert_called_once_with( + char_prompts=wfjt_node_with_prompts.char_prompts, + inventory=wfjt_node_with_prompts.inventory, + credential=wfjt_node_with_prompts.credential, + unified_job_template=wfjt_node_with_prompts.unified_job_template, + workflow_job=workflow_job_unit) + + +@mock.patch('awx.main.models.workflow.WorkflowNodeBase.get_parent_nodes', lambda self: []) +class TestWorkflowJobNodeJobKWARGS: + """ + Tests for building the keyword arguments that go into creating and + launching a new job that corresponds to a workflow node. + """ + kwargs_base = {'launch_type': 'workflow'} + + def test_null_kwargs(self, job_node_no_prompts): + assert job_node_no_prompts.get_job_kwargs() == self.kwargs_base + + def test_inherit_workflow_job_extra_vars(self, job_node_no_prompts): + workflow_job = job_node_no_prompts.workflow_job + workflow_job.extra_vars = '{"a": 84}' + assert job_node_no_prompts.get_job_kwargs() == dict( + extra_vars={'a': 84}, **self.kwargs_base) + + def test_char_prompts_and_res_node_prompts(self, job_node_with_prompts): + expect_kwargs = dict( + inventory=job_node_with_prompts.inventory.pk, + credential=job_node_with_prompts.credential.pk, + **example_prompts) + expect_kwargs.update(self.kwargs_base) + assert job_node_with_prompts.get_job_kwargs() == expect_kwargs + + def test_reject_some_node_prompts(self, job_node_with_prompts): + job_node_with_prompts.unified_job_template.ask_inventory_on_launch = False + job_node_with_prompts.unified_job_template.ask_job_type_on_launch = False + expect_kwargs = dict(inventory=job_node_with_prompts.inventory.pk, + credential=job_node_with_prompts.credential.pk, + **example_prompts) + expect_kwargs.update(self.kwargs_base) + expect_kwargs.pop('inventory') + expect_kwargs.pop('job_type') + assert job_node_with_prompts.get_job_kwargs() == expect_kwargs + + def test_no_accepted_project_node_prompts(self, job_node_no_prompts, project_unit): + job_node_no_prompts.unified_job_template = project_unit + assert job_node_no_prompts.get_job_kwargs() == self.kwargs_base + + +class TestWorkflowWarnings: + """ + Tests of warnings that show user errors in the construction of a workflow + """ + + def test_no_warn_project_node_no_prompts(self, job_node_no_prompts, project_unit): + job_node_no_prompts.unified_job_template = project_unit + assert job_node_no_prompts.get_prompts_warnings() == {} + + def test_warn_project_node_reject_all_prompts(self, job_node_with_prompts, project_unit): + job_node_with_prompts.unified_job_template = project_unit + assert 'ignored' in job_node_with_prompts.get_prompts_warnings() + assert 'all' in job_node_with_prompts.get_prompts_warnings()['ignored'] + + def test_no_warn_accept_all_prompts(self, job_node_with_prompts): + assert job_node_with_prompts.get_prompts_warnings() == {} + + def test_warn_reject_some_prompts(self, job_node_with_prompts): + job_node_with_prompts.unified_job_template.ask_credential_on_launch = False + job_node_with_prompts.unified_job_template.ask_job_type_on_launch = False + assert 'ignored' in job_node_with_prompts.get_prompts_warnings() + assert 'job_type' in job_node_with_prompts.get_prompts_warnings()['ignored'] + assert 'credential' in job_node_with_prompts.get_prompts_warnings()['ignored'] + assert len(job_node_with_prompts.get_prompts_warnings()['ignored']) == 2 + + def test_warn_scan_errors_node_prompts(self, job_node_with_prompts): + job_node_with_prompts.unified_job_template.job_type = 'scan' + job_node_with_prompts.char_prompts['job_type'] = 'run' + job_node_with_prompts.inventory = Inventory(name='different-inventory', pk=23) + assert 'ignored' in job_node_with_prompts.get_prompts_warnings() + assert 'job_type' in job_node_with_prompts.get_prompts_warnings()['ignored'] + assert 'inventory' in job_node_with_prompts.get_prompts_warnings()['ignored'] + assert len(job_node_with_prompts.get_prompts_warnings()['ignored']) == 2 diff --git a/awx/main/tests/unit/scheduler/__init__.py b/awx/main/tests/unit/scheduler/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/main/tests/unit/scheduler/conftest.py b/awx/main/tests/unit/scheduler/conftest.py new file mode 100644 index 0000000000..8f3c5f913e --- /dev/null +++ b/awx/main/tests/unit/scheduler/conftest.py @@ -0,0 +1,265 @@ + +# Python +import pytest +from datetime import timedelta + +# Django +from django.utils.timezone import now as tz_now + +# awx +from awx.main.scheduler.partial import ( + JobDict, + ProjectUpdateDict, + InventoryUpdateDict, + InventorySourceDict, +) +from awx.main.scheduler import TaskManager + + +@pytest.fixture +def epoch(): + return tz_now() + + +@pytest.fixture +def scheduler_factory(mocker, epoch): + mocker.patch('awx.main.models.Instance.objects.total_capacity', return_value=10000) + + def fn(tasks=[], inventory_sources=[], latest_project_updates=[], latest_inventory_updates=[], create_project_update=None, create_inventory_update=None): + sched = TaskManager() + + sched.graph.get_now = lambda: epoch + + def no_create_inventory_update(task, ignore): + raise RuntimeError("create_inventory_update should not be called") + + def no_create_project_update(task): + raise RuntimeError("create_project_update should not be called") + + mocker.patch.object(sched, 'capture_chain_failure_dependencies') + mocker.patch.object(sched, 'get_tasks', return_value=tasks) + mocker.patch.object(sched, 'get_running_workflow_jobs', return_value=[]) + mocker.patch.object(sched, 'get_inventory_source_tasks', return_value=inventory_sources) + mocker.patch.object(sched, 'get_latest_project_update_tasks', return_value=latest_project_updates) + mocker.patch.object(sched, 'get_latest_inventory_update_tasks', return_value=latest_inventory_updates) + create_project_update_mock = mocker.patch.object(sched, 'create_project_update', return_value=create_project_update) + create_inventory_update_mock = mocker.patch.object(sched, 'create_inventory_update', return_value=create_inventory_update) + mocker.patch.object(sched, 'start_task') + + if not create_project_update: + create_project_update_mock.side_effect = no_create_project_update + if not create_inventory_update: + create_inventory_update_mock.side_effect = no_create_inventory_update + return sched + return fn + + +@pytest.fixture +def project_update_factory(epoch): + def fn(): + return ProjectUpdateDict({ + 'id': 1, + 'created': epoch - timedelta(seconds=100), + 'project_id': 1, + 'project__scm_update_cache_timeout': 0, + 'celery_task_id': '', + 'launch_type': 'dependency', + 'project__scm_update_on_launch': True, + }) + return fn + + +@pytest.fixture +def pending_project_update(project_update_factory): + project_update = project_update_factory() + project_update['status'] = 'pending' + return project_update + + +@pytest.fixture +def waiting_project_update(epoch, project_update_factory): + project_update = project_update_factory() + project_update['status'] = 'waiting' + return project_update + + +@pytest.fixture +def running_project_update(epoch, project_update_factory): + project_update = project_update_factory() + project_update['status'] = 'running' + return project_update + + +@pytest.fixture +def successful_project_update(epoch, project_update_factory): + project_update = project_update_factory() + project_update['finished'] = epoch - timedelta(seconds=90) + project_update['status'] = 'successful' + return project_update + + +@pytest.fixture +def successful_project_update_cache_expired(epoch, project_update_factory): + project_update = project_update_factory() + + project_update['status'] = 'successful' + project_update['created'] = epoch - timedelta(seconds=120) + project_update['finished'] = epoch - timedelta(seconds=110) + project_update['project__scm_update_cache_timeout'] = 1 + return project_update + + +@pytest.fixture +def failed_project_update(epoch, project_update_factory): + project_update = project_update_factory() + project_update['finished'] = epoch - timedelta(seconds=90) + project_update['status'] = 'failed' + return project_update + + +@pytest.fixture +def inventory_update_factory(epoch): + def fn(): + return InventoryUpdateDict({ + 'id': 1, + 'created': epoch - timedelta(seconds=101), + 'inventory_id': 1, + 'celery_task_id': '', + 'status': 'pending', + 'launch_type': 'dependency', + 'inventory_source_id': 1, + 'inventory_source__inventory_id': 1, + }) + return fn + + +@pytest.fixture +def inventory_update_latest_factory(epoch): + def fn(): + return InventoryUpdateDict({ + 'id': 1, + 'created': epoch - timedelta(seconds=101), + 'inventory_id': 1, + 'celery_task_id': '', + 'status': 'pending', + 'launch_type': 'dependency', + 'inventory_source_id': 1, + 'finished': None, + }) + return fn + + +@pytest.fixture +def inventory_update_latest(inventory_update_latest_factory): + return inventory_update_latest_factory() + + +@pytest.fixture +def successful_inventory_update_latest(inventory_update_latest_factory): + iu = inventory_update_latest_factory() + iu['status'] = 'successful' + iu['finished'] = iu['created'] + timedelta(seconds=10) + return iu + + +@pytest.fixture +def failed_inventory_update_latest(inventory_update_latest_factory): + iu = inventory_update_latest_factory() + iu['status'] = 'failed' + return iu + + +@pytest.fixture +def pending_inventory_update(epoch, inventory_update_factory): + inventory_update = inventory_update_factory() + inventory_update['status'] = 'pending' + return inventory_update + + +@pytest.fixture +def waiting_inventory_update(epoch, inventory_update_factory): + inventory_update = inventory_update_factory() + inventory_update['status'] = 'waiting' + return inventory_update + + +@pytest.fixture +def failed_inventory_update(epoch, inventory_update_factory): + inventory_update = inventory_update_factory() + inventory_update['status'] = 'failed' + return inventory_update + + +@pytest.fixture +def running_inventory_update(epoch, inventory_update_factory): + inventory_update = inventory_update_factory() + inventory_update['status'] = 'running' + return inventory_update + + +@pytest.fixture +def successful_inventory_update(epoch, inventory_update_factory): + inventory_update = inventory_update_factory() + inventory_update['finished'] = epoch - timedelta(seconds=90) + inventory_update['status'] = 'successful' + return inventory_update + + +@pytest.fixture +def job_factory(epoch): + ''' + Job + ''' + def fn(id=1, project__scm_update_on_launch=True, inventory__inventory_sources=[], allow_simultaneous=False): + return JobDict({ + 'id': id, + 'status': 'pending', + 'job_template_id': 1, + 'project_id': 1, + 'inventory_id': 1, + 'launch_type': 'manual', + 'allow_simultaneous': allow_simultaneous, + 'created': epoch - timedelta(seconds=99), + 'celery_task_id': '', + 'project__scm_update_on_launch': project__scm_update_on_launch, + 'inventory__inventory_sources': inventory__inventory_sources, + 'forks': 5, + 'dependent_jobs__id': None, + }) + return fn + + +@pytest.fixture +def pending_job(job_factory): + job = job_factory() + job['status'] = 'pending' + return job + + +@pytest.fixture +def running_job(job_factory): + job = job_factory() + job['status'] = 'running' + return job + + +@pytest.fixture +def inventory_source_factory(): + ''' + Inventory id -> [InventorySourceDict, ...] + ''' + def fn(id=1): + return InventorySourceDict({ + 'id': id, + }) + return fn + + +@pytest.fixture +def inventory_id_sources(inventory_source_factory): + return [ + (1, [ + inventory_source_factory(id=1), + inventory_source_factory(id=2), + ]), + ] diff --git a/awx/main/tests/unit/scheduler/test_dag.py b/awx/main/tests/unit/scheduler/test_dag.py new file mode 100644 index 0000000000..932f4436ec --- /dev/null +++ b/awx/main/tests/unit/scheduler/test_dag.py @@ -0,0 +1,198 @@ + +# Python +import pytest + +# AWX +from awx.main.scheduler.dag_simple import SimpleDAG +from awx.main.scheduler.dag_workflow import WorkflowDAG +from awx.main.models import Job, JobTemplate +from awx.main.models.workflow import WorkflowJobNode + + +@pytest.fixture +def dag_root(): + dag = SimpleDAG() + data = [ + {1: 1}, + {2: 2}, + {3: 3}, + {4: 4}, + {5: 5}, + {6: 6}, + ] + # Add all the nodes to the DAG + [dag.add_node(d) for d in data] + + dag.add_edge(data[0], data[1]) + dag.add_edge(data[2], data[3]) + dag.add_edge(data[4], data[5]) + + return dag + + +@pytest.fixture +def dag_simple_edge_labels(): + dag = SimpleDAG() + data = [ + {1: 1}, + {2: 2}, + {3: 3}, + {4: 4}, + {5: 5}, + {6: 6}, + ] + # Add all the nodes to the DAG + [dag.add_node(d) for d in data] + + dag.add_edge(data[0], data[1], 'one') + dag.add_edge(data[2], data[3], 'two') + dag.add_edge(data[4], data[5], 'three') + + return dag + + +''' +class TestSimpleDAG(object): + def test_get_root_nodes(self, dag_root): + leafs = dag_root.get_leaf_nodes() + + roots = dag_root.get_root_nodes() + + def test_get_labeled_edges(self, dag_simple_edge_labels): + dag = dag_simple_edge_labels + nodes = dag.get_dependencies(dag.nodes[0]['node_object'], 'one') + nodes = dag.get_dependencies(dag.nodes[0]['node_object'], 'two') +''' + + +@pytest.fixture +def factory_node(): + def fn(id, status): + wfn = WorkflowJobNode(id=id) + if status: + j = Job(status=status) + wfn.job = j + wfn.unified_job_template = JobTemplate(name='JT{}'.format(id)) + return wfn + return fn + + +@pytest.fixture +def workflow_dag_level_2(factory_node): + dag = WorkflowDAG() + data = [ + factory_node(0, 'successful'), + factory_node(1, 'successful'), + factory_node(2, 'successful'), + factory_node(3, None), + factory_node(4, None), + factory_node(5, None), + ] + [dag.add_node(d) for d in data] + + dag.add_edge(data[0], data[3], 'success_nodes') + dag.add_edge(data[1], data[4], 'success_nodes') + dag.add_edge(data[2], data[5], 'success_nodes') + + return (dag, data[3:6], False) + + +@pytest.fixture +def workflow_dag_multiple_roots(factory_node): + dag = WorkflowDAG() + data = [ + factory_node(1, None), + factory_node(2, None), + factory_node(3, None), + factory_node(4, None), + factory_node(5, None), + factory_node(6, None), + ] + [dag.add_node(d) for d in data] + + dag.add_edge(data[0], data[3], 'success_nodes') + dag.add_edge(data[1], data[4], 'success_nodes') + dag.add_edge(data[2], data[5], 'success_nodes') + + expected = data[0:3] + return (dag, expected, False) + + +@pytest.fixture +def workflow_dag_multiple_edges_labeled(factory_node): + dag = WorkflowDAG() + data = [ + factory_node(0, 'failed'), + factory_node(1, None), + factory_node(2, 'failed'), + factory_node(3, None), + factory_node(4, 'failed'), + factory_node(5, None), + ] + [dag.add_node(d) for d in data] + + dag.add_edge(data[0], data[1], 'success_nodes') + dag.add_edge(data[0], data[2], 'failure_nodes') + dag.add_edge(data[2], data[3], 'success_nodes') + dag.add_edge(data[2], data[4], 'failure_nodes') + dag.add_edge(data[4], data[5], 'failure_nodes') + + expected = data[5:6] + return (dag, expected, False) + + +@pytest.fixture +def workflow_dag_finished(factory_node): + dag = WorkflowDAG() + data = [ + factory_node(0, 'failed'), + factory_node(1, None), + factory_node(2, 'failed'), + factory_node(3, None), + factory_node(4, 'failed'), + factory_node(5, 'successful'), + ] + [dag.add_node(d) for d in data] + + dag.add_edge(data[0], data[1], 'success_nodes') + dag.add_edge(data[0], data[2], 'failure_nodes') + dag.add_edge(data[2], data[3], 'success_nodes') + dag.add_edge(data[2], data[4], 'failure_nodes') + dag.add_edge(data[4], data[5], 'failure_nodes') + + expected = [] + return (dag, expected, True) + + +@pytest.fixture +def workflow_dag_always(factory_node): + dag = WorkflowDAG() + data = [ + factory_node(0, 'failed'), + factory_node(1, 'successful'), + factory_node(2, None), + ] + [dag.add_node(d) for d in data] + + dag.add_edge(data[0], data[1], 'always_nodes') + dag.add_edge(data[1], data[2], 'always_nodes') + + expected = data[2:3] + return (dag, expected, False) + + +@pytest.fixture(params=['workflow_dag_multiple_roots', 'workflow_dag_level_2', + 'workflow_dag_multiple_edges_labeled', 'workflow_dag_finished', + 'workflow_dag_always']) +def workflow_dag(request): + return request.getfuncargvalue(request.param) + + +class TestWorkflowDAG(): + def test_bfs_nodes_to_run(self, workflow_dag): + dag, expected, is_done = workflow_dag + assert dag.bfs_nodes_to_run() == expected + + def test_is_workflow_done(self, workflow_dag): + dag, expected, is_done = workflow_dag + assert dag.is_workflow_done() == is_done diff --git a/awx/main/tests/unit/scheduler/test_dependency_graph.py b/awx/main/tests/unit/scheduler/test_dependency_graph.py new file mode 100644 index 0000000000..ca74ec9bf7 --- /dev/null +++ b/awx/main/tests/unit/scheduler/test_dependency_graph.py @@ -0,0 +1,121 @@ + +# Python +import pytest +from datetime import timedelta + +# Django +from django.utils.timezone import now as tz_now + +# AWX +from awx.main.scheduler.dependency_graph import DependencyGraph +from awx.main.scheduler.partial import ProjectUpdateDict + + +@pytest.fixture +def graph(): + return DependencyGraph() + + +@pytest.fixture +def job(job_factory): + j = job_factory() + j.project_id = 1 + return j + + +@pytest.fixture +def unsuccessful_last_project(graph, job): + pu = ProjectUpdateDict(dict(id=1, + project__scm_update_cache_timeout=999999, + project_id=1, + status='failed', + created='3', + finished='3',)) + + graph.add_latest_project_update(pu) + + return graph + + +@pytest.fixture +def last_dependent_project(graph, job): + now = tz_now() + + job['project_id'] = 1 + job['created'] = now + pu = ProjectUpdateDict(dict(id=1, project_id=1, status='waiting', + project__scm_update_cache_timeout=0, + launch_type='dependency', + created=now - timedelta(seconds=1),)) + + graph.add_latest_project_update(pu) + + return (graph, job) + + +@pytest.fixture +def timedout_project_update(graph, job): + now = tz_now() + + job['project_id'] = 1 + job['created'] = now + pu = ProjectUpdateDict(dict(id=1, project_id=1, status='successful', + project__scm_update_cache_timeout=10, + launch_type='dependency', + created=now - timedelta(seconds=100), + finished=now - timedelta(seconds=11),)) + + graph.add_latest_project_update(pu) + + return (graph, job) + + +@pytest.fixture +def not_timedout_project_update(graph, job): + now = tz_now() + + job['project_id'] = 1 + job['created'] = now + pu = ProjectUpdateDict(dict(id=1, project_id=1, status='successful', + project__scm_update_cache_timeout=3600, + launch_type='dependency', + created=now - timedelta(seconds=100), + finished=now - timedelta(seconds=11),)) + + graph.add_latest_project_update(pu) + + return (graph, job) + + +class TestShouldUpdateRelatedProject(): + def test_no_project_updates(self, graph, job): + actual = graph.should_update_related_project(job) + + assert True is actual + + def test_timedout_project_update(self, timedout_project_update): + (graph, job) = timedout_project_update + + actual = graph.should_update_related_project(job) + + assert True is actual + + def test_not_timedout_project_update(self, not_timedout_project_update): + (graph, job) = not_timedout_project_update + + actual = graph.should_update_related_project(job) + + assert False is actual + + def test_unsuccessful_last_project(self, unsuccessful_last_project, job): + graph = unsuccessful_last_project + + actual = graph.should_update_related_project(job) + + assert True is actual + + def test_last_dependent_project(self, last_dependent_project): + (graph, job) = last_dependent_project + + actual = graph.should_update_related_project(job) + assert False is actual diff --git a/awx/main/tests/unit/scheduler/test_scheduler_inventory_update.py b/awx/main/tests/unit/scheduler/test_scheduler_inventory_update.py new file mode 100644 index 0000000000..acffff3f8d --- /dev/null +++ b/awx/main/tests/unit/scheduler/test_scheduler_inventory_update.py @@ -0,0 +1,132 @@ + +# Python +import pytest +from datetime import timedelta + + +@pytest.fixture +def pending_job(job_factory): + return job_factory(project__scm_update_on_launch=False, inventory__inventory_sources=['1']) + + +@pytest.fixture +def successful_inventory_update_latest(inventory_update_latest_factory): + iu = inventory_update_latest_factory() + iu['inventory_source__update_cache_timeout'] = 100 + iu['status'] = 'successful' + iu['finished'] = iu['created'] + timedelta(seconds=10) + return iu + + +@pytest.fixture +def successful_inventory_update_latest_cache_expired(inventory_update_latest_factory): + iu = inventory_update_latest_factory() + iu['inventory_source__update_cache_timeout'] = 1 + iu['finished'] = iu['created'] + timedelta(seconds=2) + return iu + + +@pytest.fixture +def failed_inventory_update_latest_cache_zero(failed_inventory_update_latest): + iu = failed_inventory_update_latest + iu['inventory_source__update_cache_timeout'] = 0 + iu['inventory_source__update_on_launch'] = True + iu['finished'] = iu['created'] + timedelta(seconds=2) + iu['status'] = 'failed' + return iu + + +@pytest.fixture +def failed_inventory_update_latest_cache_non_zero(failed_inventory_update_latest_cache_zero): + failed_inventory_update_latest_cache_zero['inventory_source__update_cache_timeout'] = 10000000 + return failed_inventory_update_latest_cache_zero + + +class TestStartInventoryUpdate(): + def test_pending(self, scheduler_factory, pending_inventory_update): + scheduler = scheduler_factory(tasks=[pending_inventory_update]) + + scheduler._schedule() + + scheduler.start_task.assert_called_with(pending_inventory_update) + + +class TestInventoryUpdateBlocked(): + def test_running_inventory_update(self, epoch, scheduler_factory, running_inventory_update, pending_inventory_update): + running_inventory_update['created'] = epoch - timedelta(seconds=100) + pending_inventory_update['created'] = epoch - timedelta(seconds=90) + + scheduler = scheduler_factory(tasks=[running_inventory_update, pending_inventory_update]) + + scheduler._schedule() + + def test_waiting_inventory_update(self, epoch, scheduler_factory, waiting_inventory_update, pending_inventory_update): + waiting_inventory_update['created'] = epoch - timedelta(seconds=100) + pending_inventory_update['created'] = epoch - timedelta(seconds=90) + + scheduler = scheduler_factory(tasks=[waiting_inventory_update, pending_inventory_update]) + + scheduler._schedule() + + +class TestCreateDependentInventoryUpdate(): + def test(self, scheduler_factory, pending_job, waiting_inventory_update, inventory_id_sources): + scheduler = scheduler_factory(tasks=[pending_job], + create_inventory_update=waiting_inventory_update, + inventory_sources=inventory_id_sources) + + scheduler._schedule() + + scheduler.start_task.assert_called_with(waiting_inventory_update, [pending_job]) + + def test_cache_hit(self, scheduler_factory, pending_job, successful_inventory_update, successful_inventory_update_latest): + scheduler = scheduler_factory(tasks=[successful_inventory_update, pending_job], + latest_inventory_updates=[successful_inventory_update_latest]) + scheduler._schedule() + + scheduler.start_task.assert_called_with(pending_job) + + def test_cache_miss(self, scheduler_factory, pending_job, successful_inventory_update, successful_inventory_update_latest_cache_expired, waiting_inventory_update, inventory_id_sources): + scheduler = scheduler_factory(tasks=[successful_inventory_update, pending_job], + latest_inventory_updates=[successful_inventory_update_latest_cache_expired], + create_inventory_update=waiting_inventory_update, + inventory_sources=inventory_id_sources) + scheduler._schedule() + + scheduler.start_task.assert_called_with(waiting_inventory_update, [pending_job]) + + def test_last_update_timeout_zero_failed(self, scheduler_factory, pending_job, failed_inventory_update, failed_inventory_update_latest_cache_zero, waiting_inventory_update, inventory_id_sources): + scheduler = scheduler_factory(tasks=[failed_inventory_update, pending_job], + latest_inventory_updates=[failed_inventory_update_latest_cache_zero], + create_inventory_update=waiting_inventory_update, + inventory_sources=inventory_id_sources) + scheduler._schedule() + + scheduler.start_task.assert_called_with(waiting_inventory_update, [pending_job]) + + def test_last_update_timeout_non_zero_failed(self, scheduler_factory, pending_job, failed_inventory_update, failed_inventory_update_latest_cache_non_zero, waiting_inventory_update, inventory_id_sources): + scheduler = scheduler_factory(tasks=[failed_inventory_update, pending_job], + latest_inventory_updates=[failed_inventory_update_latest_cache_non_zero], + create_inventory_update=waiting_inventory_update, + inventory_sources=inventory_id_sources) + scheduler._schedule() + + scheduler.start_task.assert_called_with(waiting_inventory_update, [pending_job]) + + +class TestCaptureChainFailureDependencies(): + @pytest.fixture + def inventory_id_sources(self, inventory_source_factory): + return [ + (1, [inventory_source_factory(id=1)]), + ] + + def test(self, scheduler_factory, pending_job, waiting_inventory_update, inventory_id_sources): + scheduler = scheduler_factory(tasks=[pending_job], + create_inventory_update=waiting_inventory_update, + inventory_sources=inventory_id_sources) + + scheduler._schedule() + + scheduler.capture_chain_failure_dependencies.assert_called_with(pending_job, [waiting_inventory_update]) + diff --git a/awx/main/tests/unit/scheduler/test_scheduler_job.py b/awx/main/tests/unit/scheduler/test_scheduler_job.py new file mode 100644 index 0000000000..cac315af4b --- /dev/null +++ b/awx/main/tests/unit/scheduler/test_scheduler_job.py @@ -0,0 +1,86 @@ + +# Python +import pytest +from datetime import timedelta + + +class TestJobBlocked(): + def test_inventory_update_waiting(self, scheduler_factory, waiting_inventory_update, pending_job): + scheduler = scheduler_factory(tasks=[waiting_inventory_update, pending_job]) + + scheduler._schedule() + + scheduler.start_task.assert_not_called() + + def test_inventory_update_running(self, scheduler_factory, running_inventory_update, pending_job, inventory_source_factory, inventory_id_sources): + scheduler = scheduler_factory(tasks=[running_inventory_update, pending_job], + inventory_sources=inventory_id_sources) + + scheduler._schedule() + + scheduler.start_task.assert_not_called() + + def test_project_update_running(self, scheduler_factory, pending_job, running_project_update): + scheduler = scheduler_factory(tasks=[running_project_update, pending_job]) + + scheduler._schedule() + + scheduler.start_task.assert_not_called() + assert scheduler.create_project_update.call_count == 0 + + def test_project_update_waiting(self, scheduler_factory, pending_job, waiting_project_update): + scheduler = scheduler_factory(tasks=[waiting_project_update, pending_job], + latest_project_updates=[waiting_project_update]) + + scheduler._schedule() + + scheduler.start_task.assert_not_called() + assert scheduler.create_project_update.call_count == 0 + + +class TestJob(): + @pytest.fixture + def successful_project_update(self, project_update_factory): + project_update = project_update_factory() + project_update['status'] = 'successful' + project_update['finished'] = project_update['created'] + timedelta(seconds=10) + project_update['project__scm_update_cache_timeout'] = 3600 + return project_update + + def test_existing_dependencies_finished(self, scheduler_factory, successful_project_update, successful_inventory_update_latest, pending_job): + scheduler = scheduler_factory(tasks=[successful_project_update, pending_job], + latest_project_updates=[successful_project_update], + latest_inventory_updates=[successful_inventory_update_latest]) + + scheduler._schedule() + + scheduler.start_task.assert_called_with(pending_job) + + +class TestCapacity(): + @pytest.fixture + def pending_job_high_impact(self, mocker, job_factory): + pending_job = job_factory(project__scm_update_on_launch=False) + mocker.patch.object(pending_job, 'task_impact', return_value=10001) + return pending_job + + def test_no_capacity(self, scheduler_factory, pending_job_high_impact): + scheduler = scheduler_factory(tasks=[pending_job_high_impact]) + + scheduler._schedule() + + scheduler.start_task.assert_called_with(pending_job_high_impact) + + @pytest.fixture + def pending_jobs_impactful(self, mocker, job_factory): + pending_jobs = [job_factory(id=i + 1, project__scm_update_on_launch=False, allow_simultaneous=True) for i in xrange(0, 3)] + map(lambda pending_job: mocker.patch.object(pending_job, 'task_impact', return_value=10), pending_jobs) + return pending_jobs + + def test_capacity_exhausted(self, mocker, scheduler_factory, pending_jobs_impactful): + scheduler = scheduler_factory(tasks=pending_jobs_impactful) + + scheduler._schedule() + + calls = [mocker.call(job) for job in pending_jobs_impactful] + scheduler.start_task.assert_has_calls(calls) diff --git a/awx/main/tests/unit/scheduler/test_scheduler_project_update.py b/awx/main/tests/unit/scheduler/test_scheduler_project_update.py new file mode 100644 index 0000000000..e8a5af17c8 --- /dev/null +++ b/awx/main/tests/unit/scheduler/test_scheduler_project_update.py @@ -0,0 +1,75 @@ + +# TODO: wherever get_latest_rpoject_update_task() is stubbed and returns a +# ProjectUpdateDict. We should instead return a ProjectUpdateLatestDict() +# For now, this is ok since the fields on deviate that much. + + +class TestStartProjectUpdate(): + def test(self, scheduler_factory, pending_project_update): + scheduler = scheduler_factory(tasks=[pending_project_update]) + + scheduler._schedule() + + scheduler.start_task.assert_called_with(pending_project_update) + assert scheduler.create_project_update.call_count == 0 + + ''' + Explicit project update should always run. They should not use cache logic. + ''' + def test_cache_oblivious(self, scheduler_factory, successful_project_update, pending_project_update): + scheduler = scheduler_factory(tasks=[pending_project_update], + latest_project_updates=[successful_project_update]) + + scheduler._schedule() + + scheduler.start_task.assert_called_with(pending_project_update) + assert scheduler.create_project_update.call_count == 0 + + +class TestCreateDependentProjectUpdate(): + def test(self, scheduler_factory, pending_job, waiting_project_update): + scheduler = scheduler_factory(tasks=[pending_job], + create_project_update=waiting_project_update) + + scheduler._schedule() + + scheduler.start_task.assert_called_with(waiting_project_update, [pending_job]) + + def test_cache_hit(self, scheduler_factory, pending_job, successful_project_update): + scheduler = scheduler_factory(tasks=[successful_project_update, pending_job], + latest_project_updates=[successful_project_update]) + scheduler._schedule() + + scheduler.start_task.assert_called_with(pending_job) + + def test_cache_miss(self, scheduler_factory, pending_job, successful_project_update_cache_expired, waiting_project_update): + scheduler = scheduler_factory(tasks=[successful_project_update_cache_expired, pending_job], + latest_project_updates=[successful_project_update_cache_expired], + create_project_update=waiting_project_update) + scheduler._schedule() + + scheduler.start_task.assert_called_with(waiting_project_update, [pending_job]) + + def test_last_update_failed(self, scheduler_factory, pending_job, failed_project_update, waiting_project_update): + scheduler = scheduler_factory(tasks=[failed_project_update, pending_job], + latest_project_updates=[failed_project_update], + create_project_update=waiting_project_update) + scheduler._schedule() + + scheduler.start_task.assert_called_with(waiting_project_update, [pending_job]) + + +class TestProjectUpdateBlocked(): + def test_projct_update_running(self, scheduler_factory, running_project_update, pending_project_update): + scheduler = scheduler_factory(tasks=[running_project_update, pending_project_update]) + scheduler._schedule() + + scheduler.start_task.assert_not_called() + assert scheduler.create_project_update.call_count == 0 + + def test_job_running(self, scheduler_factory, running_job, pending_project_update): + scheduler = scheduler_factory(tasks=[running_job, pending_project_update]) + + scheduler._schedule() + + scheduler.start_task.assert_not_called() diff --git a/awx/main/tests/unit/settings/test_defaults.py b/awx/main/tests/unit/settings/test_defaults.py index d1c80fce3e..894289002d 100644 --- a/awx/main/tests/unit/settings/test_defaults.py +++ b/awx/main/tests/unit/settings/test_defaults.py @@ -3,6 +3,7 @@ import pytest from django.conf import settings from datetime import timedelta + @pytest.mark.parametrize("job_name,function_path", [ ('admin_checks', 'awx.main.tasks.run_administrative_checks'), ('tower_scheduler', 'awx.main.tasks.tower_periodic_scheduler'), diff --git a/awx/main/tests/unit/test_access.py b/awx/main/tests/unit/test_access.py index 0c2e6bb5be..05199fd5e3 100644 --- a/awx/main/tests/unit/test_access.py +++ b/awx/main/tests/unit/test_access.py @@ -1,25 +1,122 @@ import pytest import mock +import os from django.contrib.auth.models import User from django.forms.models import model_to_dict +from rest_framework.exceptions import ParseError +from rest_framework.exceptions import PermissionDenied from awx.main.access import ( BaseAccess, check_superuser, JobTemplateAccess, + WorkflowJobTemplateAccess, SystemJobTemplateAccess, ) +from awx.conf.license import LicenseForbids from awx.main.models import ( Credential, Inventory, Project, Role, Organization, + Instance, ) +@pytest.fixture +def user_unit(): + return User(username='rando', password='raginrando', email='rando@redhat.com') + + +class TestRelatedFieldAccess: + @pytest.fixture + def resource_good(self, mocker): + good_role = mocker.MagicMock(__contains__=lambda self, user: True) + return mocker.MagicMock(related=mocker.MagicMock(admin_role=good_role), + admin_role=good_role) + + @pytest.fixture + def resource_bad(self, mocker): + bad_role = mocker.MagicMock(__contains__=lambda self, user: False) + return mocker.MagicMock(related=mocker.MagicMock(admin_role=bad_role), + admin_role=bad_role) + + @pytest.fixture + def access(self, user_unit): + return BaseAccess(user_unit) + + def test_new_optional_fail(self, access, resource_bad, mocker): + """ + User tries to create a new resource, but lacks permission + to the related resource they provided + """ + data = {'related': resource_bad} + assert not access.check_related('related', mocker.MagicMock, data) + + def test_new_with_bad_data(self, access, mocker): + data = {'related': 3.1415} + with pytest.raises(ParseError): + access.check_related('related', mocker.MagicMock, data) + + def test_new_mandatory_fail(self, access, mocker): + access.user.is_superuser = False + assert not access.check_related( + 'related', mocker.MagicMock, {}, mandatory=True) + assert not access.check_related( + 'related', mocker.MagicMock, {'resource': None}, mandatory=True) + + def test_existing_no_op(self, access, resource_bad, mocker): + """ + User edits a resource, but does not change related field + lack of access to related field does not block action + """ + data = {'related': resource_bad.related} + assert access.check_related( + 'related', mocker.MagicMock, data, obj=resource_bad) + assert access.check_related( + 'related', mocker.MagicMock, {}, obj=resource_bad) + + def test_existing_required_access(self, access, resource_bad, mocker): + # no-op actions, but mandatory kwarg requires check to pass + assert not access.check_related( + 'related', mocker.MagicMock, {}, obj=resource_bad, mandatory=True) + assert not access.check_related( + 'related', mocker.MagicMock, {'related': resource_bad.related}, + obj=resource_bad, mandatory=True) + + def test_existing_no_access_to_current( + self, access, resource_good, resource_bad, mocker): + """ + User gives a valid related resource (like organization), but does + not have access to _existing_ related resource, so deny action + """ + data = {'related': resource_good} + assert not access.check_related( + 'related', mocker.MagicMock, data, obj=resource_bad) + + def test_existing_no_access_to_new( + self, access, resource_good, resource_bad, mocker): + data = {'related': resource_bad} + assert not access.check_related( + 'related', mocker.MagicMock, data, obj=resource_good) + + def test_existing_not_allowed_to_remove(self, access, resource_bad, mocker): + data = {'related': None} + assert not access.check_related( + 'related', mocker.MagicMock, data, obj=resource_bad) + + def test_existing_not_null_null(self, access, mocker): + resource = mocker.MagicMock(related=None) + data = {'related': None} + # Not changing anything by giving null when it is already-null + # important for PUT requests + assert access.check_related( + 'related', mocker.MagicMock, data, obj=resource, mandatory=True) + + @pytest.fixture def job_template_with_ids(job_template_factory): # Create non-persisted objects with IDs to send to job_template_factory @@ -35,9 +132,6 @@ def job_template_with_ids(job_template_factory): persisted=False) return jt_objects.job_template -@pytest.fixture -def user_unit(): - return User(username='rando', password='raginrando', email='rando@redhat.com') def test_superuser(mocker): user = mocker.MagicMock(spec=User, id=1, is_superuser=True) @@ -46,6 +140,7 @@ def test_superuser(mocker): can_add = check_superuser(BaseAccess.can_add) assert can_add(access, None) is True + def test_not_superuser(mocker): user = mocker.MagicMock(spec=User, id=1, is_superuser=False) access = BaseAccess(user) @@ -53,6 +148,7 @@ def test_not_superuser(mocker): can_add = check_superuser(BaseAccess.can_add) assert can_add(access, None) is False + def test_jt_existing_values_are_nonsensitive(job_template_with_ids, user_unit): """Assure that permission checks are not required if submitted data is identical to what the job template already has.""" @@ -62,6 +158,7 @@ def test_jt_existing_values_are_nonsensitive(job_template_with_ids, user_unit): assert access.changes_are_non_sensitive(job_template_with_ids, data) + def test_change_jt_sensitive_data(job_template_with_ids, mocker, user_unit): """Assure that can_add is called with all ForeignKeys.""" @@ -84,6 +181,7 @@ def test_change_jt_sensitive_data(job_template_with_ids, mocker, user_unit): 'network_credential': job_template_with_ids.network_credential.id }) + def test_jt_add_scan_job_check(job_template_with_ids, user_unit): "Assure that permissions to add scan jobs work correctly" @@ -113,11 +211,110 @@ def test_jt_add_scan_job_check(job_template_with_ids, user_unit): 'job_type': 'scan' }) + +def mock_raise_license_forbids(self, add_host=False, feature=None, check_expiration=True): + raise LicenseForbids("Feature not enabled") + + +def mock_raise_none(self, add_host=False, feature=None, check_expiration=True): + return None + + +def test_jt_can_start_ha(job_template_with_ids): + with mock.patch.object(Instance.objects, 'active_count', return_value=2): + with mock.patch('awx.main.access.BaseAccess.check_license', new=mock_raise_license_forbids): + with pytest.raises(LicenseForbids): + JobTemplateAccess(user_unit).can_start(job_template_with_ids) + + def test_jt_can_add_bad_data(user_unit): "Assure that no server errors are returned if we call JT can_add with bad data" access = JobTemplateAccess(user_unit) assert not access.can_add({'asdf': 'asdf'}) + +class TestWorkflowAccessMethods: + @pytest.fixture + def workflow(self, workflow_job_template_factory): + objects = workflow_job_template_factory('test_workflow', persisted=False) + return objects.workflow_job_template + + def test_workflow_can_add(self, workflow, user_unit): + organization = Organization(name='test-org') + workflow.organization = organization + organization.admin_role = Role() + + def mock_get_object(Class, **kwargs): + if Class == Organization: + return organization + else: + raise Exception('Item requested has not been mocked') + + access = WorkflowJobTemplateAccess(user_unit) + with mock.patch('awx.main.models.rbac.Role.__contains__', return_value=True): + with mock.patch('awx.main.access.get_object_or_400', mock_get_object): + assert access.can_add({'organization': 1}) + + +class TestCheckLicense: + @pytest.fixture + def validate_enhancements_mocker(self, mocker): + os.environ['SKIP_LICENSE_FIXUP_FOR_TEST'] = '1' + + def fn(available_instances=1, free_instances=0, host_exists=False): + + class MockFilter: + def exists(self): + return host_exists + + mocker.patch('awx.main.tasks.TaskEnhancer.validate_enhancements', return_value={'free_instances': free_instances, 'available_instances': available_instances, 'date_warning': True}) + + mock_filter = MockFilter() + mocker.patch('awx.main.models.Host.objects.filter', return_value=mock_filter) + + return fn + + def test_check_license_add_host_duplicate(self, validate_enhancements_mocker, user_unit): + validate_enhancements_mocker(available_instances=1, free_instances=0, host_exists=True) + + BaseAccess(None).check_license(add_host_name='blah', check_expiration=False) + + def test_check_license_add_host_new_exceed_licence(self, validate_enhancements_mocker, user_unit, mocker): + validate_enhancements_mocker(available_instances=1, free_instances=0, host_exists=False) + exception = None + + try: + BaseAccess(None).check_license(add_host_name='blah', check_expiration=False) + except PermissionDenied as e: + exception = e + + assert "License count of 1 instances has been reached." == str(exception) + + +def test_user_capabilities_method(): + """Unit test to verify that the user_capabilities method will defer + to the appropriate sub-class methods of the access classes. + Note that normal output is True/False, but a string is returned + in these tests to establish uniqueness. + """ + + class FooAccess(BaseAccess): + def can_change(self, obj, data): + return 'bar' + + def can_copy(self, obj): + return 'foo' + + user = User(username='auser') + foo_access = FooAccess(user) + foo = object() + foo_capabilities = foo_access.get_user_capabilities(foo, ['edit', 'copy']) + assert foo_capabilities == { + 'edit': 'bar', + 'copy': 'foo' + } + + def test_system_job_template_can_start(mocker): user = mocker.MagicMock(spec=User, id=1, is_system_auditor=True, is_superuser=False) assert user.is_system_auditor diff --git a/awx/main/tests/unit/test_credentials.py b/awx/main/tests/unit/test_credentials.py deleted file mode 100644 index 7445d28fda..0000000000 --- a/awx/main/tests/unit/test_credentials.py +++ /dev/null @@ -1,56 +0,0 @@ -from django.core.exceptions import ValidationError -from awx.main.models.credential import validate_ssh_private_key - -import pytest - -def test_valid_rsa_key(): - begin = """-----BEGIN RSA PRIVATE KEY-----""" - end = """-----END RSA PRIVATE KEY-----""" - unvalidated_key = build_key(begin, body, end) - key_data = validate_ssh_private_key(unvalidated_key) - assert key_data['key_type'] == 'rsa' - -def test_invalid_key(): - unvalidated_key = build_key(key_begin, body, "END KEY") - with pytest.raises(ValidationError): - validate_ssh_private_key(unvalidated_key) - -def test_key_type_empty(): - unvalidated_key = build_key(key_begin, body, key_end) - key_data = validate_ssh_private_key(unvalidated_key) - assert key_data['key_type'] == 'rsa1' - - -def build_key(begin, body, end): - return """%s%s%s""" % (begin, body, end) - -key_begin = """-----BEGIN PRIVATE KEY-----""" -key_end = """-----END PRIVATE KEY-----""" - -body = """ -uFZFyag7VVqI+q/oGnQu+wj/pMi5ox+Qz5L3W0D745DzwgDXOeObAfNlr9NtIKbn -sZ5E0+rYB4Q/U0CYr5juNJQV1dbxq2Em1160axboe2QbvX6wE6Sm6wW9b9cr+PoF -MoYQebUnCY0ObrLbrRugSfZc17lyxK0ZGRgPXKhpMg6Ecv8XpvhjUYU9Esyqfuco -/p26Q140/HsHeHYNma0dQHCEjMr/qEzOY1qguHj+hRf3SARtM9Q+YNgpxchcDDVS -O+n+8Ljd/p82bpEJwxmpXealeWbI6gB9/R6wcCL+ZyCZpnHJd/NJ809Vtu47ZdDi -E6jvqS/3AQhuQKhJlLSDIzezB2VKKrHwOvHkg/+uLoCqHN34Gk6Qio7x69SvXy88 -a7q9D1l/Zx60o08FyZyqlo7l0l/r8EY+36cuI/lvAvfxc5VHVEOvKseUjFRBiCv9 -MkKNxaScoYsPwY7SIS6gD93tg3eM5pA0nfMfya9u1+uq/QCM1gNG3mm6Zd8YG4c/ -Dx4bmsj8cp5ni/Ffl/sKzKYq1THunJEFGXOZRibdxk/Fal3SQrRAwy7CgLQL8SMh -IWqcFm25OtSOP1r1LE25t5pQsMdmp0IP2fEF0t/pXPm1ZfrTurPMqpo4FGm2hkki -U3sH/o6nrkSOjklOLWlwtTkkL4dWPlNwc8OYj8zFizXJkAfv1spzhv3lRouNkw4N -Mm22W7us2f3Ob0H5C07k26h6VuXX+0AybD4tIIcUXCLoNTqA0HvqhKpEuHu3Ck10 -RaB8xHTxgwdhGVaNHMfy9B9l4tNs3Tb5k0LyeRRGVDhWCFo6axYULYebkj+hFLLY -+JE5RzPDFpTf1xbuT+e56H/lLFCUdDu0bn+D0W4ifXaVFegak4r6O4B53CbMqr+R -t6qDPKLUIuVJXK0J6Ay6XgmheXJGbgKh4OtDsc06gsTCE1nY4f/Z82AQahPBfTtF -J2z+NHdsLPn//HlxspGQtmLpuS7Wx0HYXZ+kPRSiE/vmITw85R2u8JSHQicVNN4C -2rlUo15TIU3tTx+WUIrHKHPidUNNotRb2p9n9FoSidU6upKnQHAT/JNv/zcvaia3 -Bhl/wagheWTDnFKSmJ4HlKxplM/32h6MfHqsMVOl4F6eZWKaKgSgN8doXyFJo+sc -yAC6S0gJlD2gQI24iTI4Du1+UGh2MGb69eChvi5mbbdesaZrlR1dRqZpHG+6ob4H -nYLndRvobXS5l6pgGTDRYoUgSbQe21a7Uf3soGl5jHqLWc1zEPwrxV7Wr31mApr6 -8VtGZcLSr0691Q1NLO3eIfuhbMN2mssX/Sl4t+4BibaucNIMfmhKQi8uHtwAXb47 -+TMFlG2EQhZULFM4fLdF1vaizInU3cBk8lsz8i71tDc+5VQTEwoEB7Gksy/XZWEt -6SGHxXUDtNYa+G2O+sQhgqBjLIkVTV6KJOpvNZM+s8Vzv8qoFnD7isKBBrRvF1bP -GOXEG1jd7nSR0WSwcMCHGOrFEELDQPw3k5jqEdPFgVODoZPr+drZVnVz5SAGBk5Y -wsCNaDW+1dABYFlqRTepP5rrSu9wHnRAZ3ZGv+DHoGqenIC5IBR0sQ== -""" diff --git a/awx/main/tests/unit/test_ha.py b/awx/main/tests/unit/test_ha.py index 07249a67fb..ebd86cd3a2 100644 --- a/awx/main/tests/unit/test_ha.py +++ b/awx/main/tests/unit/test_ha.py @@ -6,10 +6,12 @@ import mock # AWX from awx.main.ha import is_ha_environment + @mock.patch('awx.main.models.Instance.objects.count', lambda: 2) def test_multiple_instances(): assert is_ha_environment() + @mock.patch('awx.main.models.Instance.objects.count', lambda: 1) def test_db_localhost(): assert is_ha_environment() is False diff --git a/awx/main/tests/unit/test_network_credential.py b/awx/main/tests/unit/test_network_credential.py index 676a0c7f1f..8cdf720af3 100644 --- a/awx/main/tests/unit/test_network_credential.py +++ b/awx/main/tests/unit/test_network_credential.py @@ -6,9 +6,59 @@ from awx.main.models.inventory import Inventory from awx.main.tasks import RunJob +def test_aws_cred_parse(mocker): + with mocker.patch('django.db.ConnectionRouter.db_for_write'): + job = Job(id=1) + job.inventory = mocker.MagicMock(spec=Inventory, id=2) + + options = { + 'kind': 'aws', + 'username': 'aws_user', + 'password': 'aws_passwd', + 'security_token': 'token', + } + job.cloud_credential = Credential(**options) + + run_job = RunJob() + mocker.patch.object(run_job, 'should_use_proot', return_value=False) + + env = run_job.build_env(job, private_data_dir='/tmp') + assert env['AWS_ACCESS_KEY'] == options['username'] + assert env['AWS_SECRET_KEY'] == options['password'] + assert env['AWS_SECURITY_TOKEN'] == options['security_token'] + + +def test_net_cred_parse(mocker): + with mocker.patch('django.db.ConnectionRouter.db_for_write'): + job = Job(id=1) + job.inventory = mocker.MagicMock(spec=Inventory, id=2) + + options = { + 'username':'test', + 'password':'test', + 'authorize': True, + 'authorize_password': 'passwd', + 'ssh_key_data': """-----BEGIN PRIVATE KEY-----\nstuff==\n-----END PRIVATE KEY-----""", + } + private_data_files = { + 'network_credential': '/tmp/this_file_does_not_exist_during_test_but_the_path_is_real', + } + job.network_credential = Credential(**options) + + run_job = RunJob() + mocker.patch.object(run_job, 'should_use_proot', return_value=False) + + env = run_job.build_env(job, private_data_dir='/tmp', private_data_files=private_data_files) + assert env['ANSIBLE_NET_USERNAME'] == options['username'] + assert env['ANSIBLE_NET_PASSWORD'] == options['password'] + assert env['ANSIBLE_NET_AUTHORIZE'] == '1' + assert env['ANSIBLE_NET_AUTH_PASS'] == options['authorize_password'] + assert env['ANSIBLE_NET_SSH_KEYFILE'] == private_data_files['network_credential'] + + @pytest.fixture -def options(): - return { +def mock_job(mocker): + options = { 'username':'test', 'password':'test', 'ssh_key_data': """-----BEGIN PRIVATE KEY-----\nstuff==\n-----END PRIVATE KEY-----""", @@ -16,46 +66,56 @@ def options(): 'authorize_password': 'passwd', } - -def test_net_cred_parse(mocker, options): - with mocker.patch('django.db.ConnectionRouter.db_for_write'): - job = Job(id=1) - job.inventory = mocker.MagicMock(spec=Inventory, id=2) - job.network_credential = Credential(**options) - - run_job = RunJob() - mocker.patch.object(run_job, 'should_use_proot', return_value=False) - - env = run_job.build_env(job, private_data_dir='/tmp') - assert env['ANSIBLE_NET_USERNAME'] == options['username'] - assert env['ANSIBLE_NET_PASSWORD'] == options['password'] - assert env['ANSIBLE_NET_AUTHORIZE'] == '1' - assert env['ANSIBLE_NET_AUTHORIZE_PASSWORD'] == options['authorize_password'] + mock_job_attrs = {'forks': False, 'id': 1, 'cancel_flag': False, 'status': 'running', 'job_type': 'normal', + 'credential': None, 'cloud_credential': None, 'network_credential': Credential(**options), + 'become_enabled': False, 'become_method': None, 'become_username': None, + 'inventory': mocker.MagicMock(spec=Inventory, id=2), 'force_handlers': False, + 'limit': None, 'verbosity': None, 'job_tags': None, 'skip_tags': None, + 'start_at_task': None, 'pk': 1, 'launch_type': 'normal', 'job_template':None, + 'created_by': None, 'extra_vars_dict': None, 'project':None, 'playbook': 'test.yml'} + mock_job = mocker.MagicMock(spec=Job, **mock_job_attrs) + return mock_job -def test_net_cred_ssh_agent(mocker, options): - with mocker.patch('django.db.ConnectionRouter.db_for_write'): - run_job = RunJob() +@pytest.fixture +def run_job_net_cred(mocker, get_ssh_version, mock_job): + mocker.patch('django.db.ConnectionRouter.db_for_write') + run_job = RunJob() - mock_job_attrs = {'forks': False, 'id': 1, 'cancel_flag': False, 'status': 'running', 'job_type': 'normal', - 'credential': None, 'cloud_credential': None, 'network_credential': Credential(**options), - 'become_enabled': False, 'become_method': None, 'become_username': None, - 'inventory': mocker.MagicMock(spec=Inventory, id=2), 'force_handlers': False, - 'limit': None, 'verbosity': None, 'job_tags': None, 'skip_tags': None, - 'start_at_task': None, 'pk': 1, 'launch_type': 'normal', 'job_template':None, - 'created_by': None, 'extra_vars_dict': None, 'project':None, 'playbook': 'test.yml'} - mock_job = mocker.MagicMock(spec=Job, **mock_job_attrs) + mocker.patch.object(run_job, 'update_model', return_value=mock_job) + mocker.patch.object(run_job, 'build_cwd', return_value='/tmp') + mocker.patch.object(run_job, 'should_use_proot', return_value=False) + mocker.patch.object(run_job, 'run_pexpect', return_value=('successful', 0)) + mocker.patch.object(run_job, 'open_fifo_write', return_value=None) + mocker.patch.object(run_job, 'post_run_hook', return_value=None) - mocker.patch.object(run_job, 'update_model', return_value=mock_job) - mocker.patch.object(run_job, 'build_cwd', return_value='/tmp') - mocker.patch.object(run_job, 'should_use_proot', return_value=False) - mocker.patch.object(run_job, 'run_pexpect', return_value=('successful', 0)) - mocker.patch.object(run_job, 'open_fifo_write', return_value=None) + return run_job + + +@pytest.mark.skip(reason="Note: Ansible network modules don't yet support ssh-agent added keys.") +def test_net_cred_ssh_agent(run_job_net_cred, mock_job): + run_job = run_job_net_cred + run_job.run(mock_job.id) + + assert run_job.update_model.call_count == 4 + + job_args = run_job.update_model.call_args_list[1][1].get('job_args') + assert 'ssh-add' in job_args + assert 'ssh-agent' in job_args + assert 'network_credential' in job_args + + +def test_net_cred_job_model_env(run_job_net_cred, mock_job): + run_job = run_job_net_cred + run_job.run(mock_job.id) + + assert run_job.update_model.call_count == 4 + + job_args = run_job.update_model.call_args_list[1][1].get('job_env') + assert 'ANSIBLE_NET_USERNAME' in job_args + assert 'ANSIBLE_NET_PASSWORD' in job_args + assert 'ANSIBLE_NET_AUTHORIZE' in job_args + assert 'ANSIBLE_NET_AUTH_PASS' in job_args + assert 'ANSIBLE_NET_SSH_KEYFILE' in job_args - run_job.run(mock_job.id) - assert run_job.update_model.call_count == 3 - job_args = run_job.update_model.call_args_list[1][1].get('job_args') - assert 'ssh-add' in job_args - assert 'ssh-agent' in job_args - assert 'network_credential' in job_args diff --git a/awx/main/tests/unit/test_redact.py b/awx/main/tests/unit/test_redact.py index 3535869ee1..931ef72ebc 100644 --- a/awx/main/tests/unit/test_redact.py +++ b/awx/main/tests/unit/test_redact.py @@ -78,7 +78,6 @@ TEST_CLEARTEXT.append({ }) - # should redact sensitive usernames and passwords def test_uri_scm_simple_redacted(): for uri in TEST_URIS: @@ -88,12 +87,14 @@ def test_uri_scm_simple_redacted(): if uri.password: assert uri.username not in redacted_str + # should replace secret data with safe string, UriCleaner.REPLACE_STR def test_uri_scm_simple_replaced(): for uri in TEST_URIS: redacted_str = UriCleaner.remove_sensitive(str(uri)) assert redacted_str.count(UriCleaner.REPLACE_STR) == uri.get_secret_count() + # should redact multiple uris in text def test_uri_scm_multiple(): cleartext = '' @@ -108,6 +109,7 @@ def test_uri_scm_multiple(): if uri.password: assert uri.username not in redacted_str + # should replace multiple secret data with safe string def test_uri_scm_multiple_replaced(): cleartext = '' @@ -123,6 +125,7 @@ def test_uri_scm_multiple_replaced(): redacted_str = UriCleaner.remove_sensitive(cleartext) assert redacted_str.count(UriCleaner.REPLACE_STR) == find_count + # should redact and replace multiple secret data within a complex cleartext blob def test_uri_scm_cleartext_redact_and_replace(): for test_data in TEST_CLEARTEXT: diff --git a/awx/main/tests/unit/test_settings.py b/awx/main/tests/unit/test_settings.py index 2018771c63..d339262808 100644 --- a/awx/main/tests/unit/test_settings.py +++ b/awx/main/tests/unit/test_settings.py @@ -1,11 +1,9 @@ from split_settings.tools import include + def test_postprocess_auth_basic_enabled(): locals().update({'__file__': __file__}) include('../../../settings/defaults.py', scope=locals()) assert 'awx.api.authentication.LoggedBasicAuthentication' in locals()['REST_FRAMEWORK']['DEFAULT_AUTHENTICATION_CLASSES'] - locals().update({'AUTH_BASIC_ENABLED': False}) - include('../../../settings/postprocess.py', scope=locals()) - assert 'awx.api.authentication.LoggedBasicAuthentication' not in locals()['REST_FRAMEWORK']['DEFAULT_AUTHENTICATION_CLASSES'] diff --git a/awx/main/tests/unit/test_signals.py b/awx/main/tests/unit/test_signals.py index c3830ee525..cc14824a67 100644 --- a/awx/main/tests/unit/test_signals.py +++ b/awx/main/tests/unit/test_signals.py @@ -1,5 +1,6 @@ from awx.main import signals + class TestCleanupDetachedLabels: def test_cleanup_detached_labels_on_deleted_parent(self, mocker): mock_labels = [mocker.MagicMock(), mocker.MagicMock()] @@ -10,7 +11,7 @@ class TestCleanupDetachedLabels: mock_labels[1].is_candidate_for_detach.return_value = False signals.cleanup_detached_labels_on_deleted_parent(None, mock_instance) - + mock_labels[0].is_candidate_for_detach.assert_called_with() mock_labels[1].is_candidate_for_detach.assert_called_with() mock_labels[0].delete.assert_called_with() diff --git a/awx/main/tests/unit/test_tasks.py b/awx/main/tests/unit/test_tasks.py index fb491a015b..d8b6469f93 100644 --- a/awx/main/tests/unit/test_tasks.py +++ b/awx/main/tests/unit/test_tasks.py @@ -1,17 +1,15 @@ -import pytest from contextlib import contextmanager +import pytest +import yaml + from awx.main.models import ( UnifiedJob, Notification, ) -from awx.main.tasks import ( - send_notifications, - run_administrative_checks, -) - -from awx.main.task_engine import TaskSerializer +from awx.main import tasks +from awx.main.task_engine import TaskEnhancer @contextmanager @@ -20,51 +18,109 @@ def apply_patches(_patches): yield [p.stop() for p in _patches] + def test_send_notifications_not_list(): with pytest.raises(TypeError): - send_notifications(None) + tasks.send_notifications(None) + def test_send_notifications_job_id(mocker): with mocker.patch('awx.main.models.UnifiedJob.objects.get'): - send_notifications([], job_id=1) + tasks.send_notifications([], job_id=1) assert UnifiedJob.objects.get.called assert UnifiedJob.objects.get.called_with(id=1) + def test_send_notifications_list(mocker): patches = list() mock_job = mocker.MagicMock(spec=UnifiedJob) patches.append(mocker.patch('awx.main.models.UnifiedJob.objects.get', return_value=mock_job)) - mock_notification = mocker.MagicMock(spec=Notification, subject="test") - patches.append(mocker.patch('awx.main.models.Notification.objects.get', return_value=mock_notification)) + mock_notifications = [mocker.MagicMock(spec=Notification, subject="test", body={'hello': 'world'})] + patches.append(mocker.patch('awx.main.models.Notification.objects.filter', return_value=mock_notifications)) with apply_patches(patches): - send_notifications([1,2], job_id=1) - assert Notification.objects.get.call_count == 2 - assert mock_notification.status == "successful" - assert mock_notification.save.called + tasks.send_notifications([1,2], job_id=1) + assert Notification.objects.filter.call_count == 1 + assert mock_notifications[0].status == "successful" + assert mock_notifications[0].save.called assert mock_job.notifications.add.called - assert mock_job.notifications.add.called_with(mock_notification) + assert mock_job.notifications.add.called_with(*mock_notifications) + @pytest.mark.parametrize("current_instances,call_count", [(91, 2), (89,1)]) def test_run_admin_checks_usage(mocker, current_instances, call_count): patches = list() - patches.append(mocker.patch('awx.main.tasks.tower_settings')) patches.append(mocker.patch('awx.main.tasks.User')) - mock_ts = mocker.Mock(spec=TaskSerializer) - mock_ts.from_database.return_value = {'instance_count': 100, 'current_instances': current_instances} - patches.append(mocker.patch('awx.main.tasks.TaskSerializer', return_value=mock_ts)) + mock_te = mocker.Mock(spec=TaskEnhancer) + mock_te.validate_enhancements.return_value = {'instance_count': 100, 'current_instances': current_instances, 'date_warning': True} + patches.append(mocker.patch('awx.main.tasks.TaskEnhancer', return_value=mock_te)) mock_sm = mocker.Mock() patches.append(mocker.patch('awx.main.tasks.send_mail', wraps=mock_sm)) with apply_patches(patches): - run_administrative_checks() + tasks.run_administrative_checks() assert mock_sm.called if call_count == 2: assert '90%' in mock_sm.call_args_list[0][0][0] else: assert 'expire' in mock_sm.call_args_list[0][0][0] + + +def test_openstack_client_config_generation(mocker): + update = tasks.RunInventoryUpdate() + inventory_update = mocker.Mock(**{ + 'source': 'openstack', + 'credential.host': 'https://keystone.openstack.example.org', + 'credential.username': 'demo', + 'credential.password': 'secrete', + 'credential.project': 'demo-project', + 'credential.domain': None, + 'source_vars_dict': {} + }) + cloud_config = update.build_private_data(inventory_update) + cloud_credential = yaml.load(cloud_config['cloud_credential']) + assert cloud_credential['clouds'] == { + 'devstack': { + 'auth': { + 'auth_url': 'https://keystone.openstack.example.org', + 'password': 'secrete', + 'project_name': 'demo-project', + 'username': 'demo' + }, + 'private': True + } + } + + +@pytest.mark.parametrize("source,expected", [ + (False, False), (True, True) +]) +def test_openstack_client_config_generation_with_private_source_vars(mocker, source, expected): + update = tasks.RunInventoryUpdate() + inventory_update = mocker.Mock(**{ + 'source': 'openstack', + 'credential.host': 'https://keystone.openstack.example.org', + 'credential.username': 'demo', + 'credential.password': 'secrete', + 'credential.project': 'demo-project', + 'credential.domain': None, + 'source_vars_dict': {'private': source} + }) + cloud_config = update.build_private_data(inventory_update) + cloud_credential = yaml.load(cloud_config['cloud_credential']) + assert cloud_credential['clouds'] == { + 'devstack': { + 'auth': { + 'auth_url': 'https://keystone.openstack.example.org', + 'password': 'secrete', + 'project_name': 'demo-project', + 'username': 'demo' + }, + 'private': expected + } + } diff --git a/awx/main/tests/unit/test_unified_jobs.py b/awx/main/tests/unit/test_unified_jobs.py index edd6978b47..592c4783b1 100644 --- a/awx/main/tests/unit/test_unified_jobs.py +++ b/awx/main/tests/unit/test_unified_jobs.py @@ -23,6 +23,7 @@ def test_result_stdout_raw_handle_file__found(exists, open): assert result == 'my_file_handler' + # stdout file missing, job finished @mock.patch('os.path.exists', return_value=False) def test_result_stdout_raw_handle__missing(exists): @@ -35,6 +36,7 @@ def test_result_stdout_raw_handle__missing(exists): assert isinstance(result, StringIO) assert result.read() == 'stdout capture is missing' + # stdout file missing, job not finished @mock.patch('os.path.exists', return_value=False) def test_result_stdout_raw_handle__pending(exists): diff --git a/awx/main/tests/unit/test_validators.py b/awx/main/tests/unit/test_validators.py new file mode 100644 index 0000000000..ea47785ed0 --- /dev/null +++ b/awx/main/tests/unit/test_validators.py @@ -0,0 +1,98 @@ +from django.core.exceptions import ValidationError +from awx.main.validators import ( + validate_private_key, + validate_certificate, + validate_ssh_private_key, +) +from awx.main.tests.data.ssh import ( + TEST_SSH_RSA1_KEY_DATA, + TEST_SSH_KEY_DATA, + TEST_SSH_KEY_DATA_LOCKED, + TEST_OPENSSH_KEY_DATA, + TEST_OPENSSH_KEY_DATA_LOCKED, + TEST_SSH_CERT_KEY, +) + +import pytest + + +def test_valid_rsa_key(): + valid_key = TEST_SSH_KEY_DATA + pem_objects = validate_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'rsa' + assert not pem_objects[0]['key_enc'] + with pytest.raises(ValidationError): + validate_certificate(valid_key) + pem_objects = validate_ssh_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'rsa' + assert not pem_objects[0]['key_enc'] + + +def test_valid_locked_rsa_key(): + valid_key = TEST_SSH_KEY_DATA_LOCKED + pem_objects = validate_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'rsa' + assert pem_objects[0]['key_enc'] + with pytest.raises(ValidationError): + validate_certificate(valid_key) + pem_objects = validate_ssh_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'rsa' + assert pem_objects[0]['key_enc'] + + +def test_invalid_rsa_key(): + invalid_key = TEST_SSH_KEY_DATA.replace('-----END', '----END') + with pytest.raises(ValidationError): + validate_private_key(invalid_key) + with pytest.raises(ValidationError): + validate_certificate(invalid_key) + with pytest.raises(ValidationError): + validate_ssh_private_key(invalid_key) + + +def test_valid_openssh_key(): + valid_key = TEST_OPENSSH_KEY_DATA + pem_objects = validate_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'ed25519' + assert not pem_objects[0]['key_enc'] + with pytest.raises(ValidationError): + validate_certificate(valid_key) + pem_objects = validate_ssh_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'ed25519' + assert not pem_objects[0]['key_enc'] + + +def test_valid_locked_openssh_key(): + valid_key = TEST_OPENSSH_KEY_DATA_LOCKED + pem_objects = validate_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'ed25519' + assert pem_objects[0]['key_enc'] + with pytest.raises(ValidationError): + validate_certificate(valid_key) + pem_objects = validate_ssh_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'ed25519' + assert pem_objects[0]['key_enc'] + + +def test_valid_rsa1_key(): + valid_key = TEST_SSH_RSA1_KEY_DATA + pem_objects = validate_ssh_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'rsa1' + assert not pem_objects[0]['key_enc'] + with pytest.raises(ValidationError): + validate_certificate(valid_key) + pem_objects = validate_ssh_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'rsa1' + assert not pem_objects[0]['key_enc'] + + +def test_cert_with_key(): + cert_with_key = TEST_SSH_CERT_KEY + with pytest.raises(ValidationError): + validate_private_key(cert_with_key) + with pytest.raises(ValidationError): + validate_certificate(cert_with_key) + pem_objects = validate_ssh_private_key(cert_with_key) + assert pem_objects[0]['type'] == 'CERTIFICATE' + assert pem_objects[1]['key_type'] == 'rsa' + assert not pem_objects[1]['key_enc'] diff --git a/awx/main/tests/unit/utils/__init__.py b/awx/main/tests/unit/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/main/tests/unit/utils/common/test_common.py b/awx/main/tests/unit/utils/common/test_common.py new file mode 100644 index 0000000000..6542d64cf0 --- /dev/null +++ b/awx/main/tests/unit/utils/common/test_common.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Ansible, Inc. +# All Rights Reserved. + +from awx.conf.models import Setting +from awx.main.utils import common + + +def test_encrypt_field(): + field = Setting(pk=123, value='ANSIBLE') + encrypted = field.value = common.encrypt_field(field, 'value') + assert encrypted == '$encrypted$AES$Ey83gcmMuBBT1OEq2lepnw==' + assert common.decrypt_field(field, 'value') == 'ANSIBLE' + + +def test_encrypt_field_without_pk(): + field = Setting(value='ANSIBLE') + encrypted = field.value = common.encrypt_field(field, 'value') + assert encrypted == '$encrypted$AES$8uIzEoGyY6QJwoTWbMFGhw==' + assert common.decrypt_field(field, 'value') == 'ANSIBLE' + + +def test_encrypt_field_with_unicode_string(): + value = u'Iñtërnâtiônàlizætiøn' + field = Setting(value=value) + encrypted = field.value = common.encrypt_field(field, 'value') + assert encrypted == '$encrypted$UTF8$AES$AESQbqOefpYcLC7x8yZ2aWG4FlXlS66JgavLbDp/DSM=' + assert common.decrypt_field(field, 'value') == value + + +def test_encrypt_field_force_disable_unicode(): + value = u"NothingSpecial" + field = Setting(value=value) + encrypted = field.value = common.encrypt_field(field, 'value', skip_utf8=True) + assert "UTF8" not in encrypted + assert common.decrypt_field(field, 'value') == value + + +def test_encrypt_subfield(): + field = Setting(value={'name': 'ANSIBLE'}) + encrypted = field.value = common.encrypt_field(field, 'value', subfield='name') + assert encrypted == '$encrypted$AES$8uIzEoGyY6QJwoTWbMFGhw==' + assert common.decrypt_field(field, 'value', subfield='name') == 'ANSIBLE' + + +def test_encrypt_field_with_ask(): + encrypted = common.encrypt_field(Setting(value='ASK'), 'value', ask=True) + assert encrypted == 'ASK' + + +def test_encrypt_field_with_empty_value(): + encrypted = common.encrypt_field(Setting(value=None), 'value') + assert encrypted is None diff --git a/awx/main/tests/unit/utils/test_handlers.py b/awx/main/tests/unit/utils/test_handlers.py new file mode 100644 index 0000000000..3de3b2e7b7 --- /dev/null +++ b/awx/main/tests/unit/utils/test_handlers.py @@ -0,0 +1,234 @@ +import base64 +import json +import logging + +from django.conf import LazySettings +import pytest +import requests +from requests_futures.sessions import FuturesSession + +from awx.main.utils.handlers import BaseHTTPSHandler as HTTPSHandler, PARAM_NAMES +from awx.main.utils.formatters import LogstashFormatter + + +@pytest.fixture() +def dummy_log_record(): + return logging.LogRecord( + 'awx', # logger name + 20, # loglevel INFO + './awx/some/module.py', # pathname + 100, # lineno + 'User joe logged in', # msg + tuple(), # args, + None # exc_info + ) + + +@pytest.fixture() +def ok200_adapter(): + class OK200Adapter(requests.adapters.HTTPAdapter): + requests = [] + + def send(self, request, **kwargs): + self.requests.append(request) + resp = requests.models.Response() + resp.status_code = 200 + resp.raw = '200 OK' + resp.request = request + return resp + + return OK200Adapter() + + +def test_https_logging_handler_requests_sync_implementation(): + handler = HTTPSHandler(async=False) + assert not isinstance(handler.session, FuturesSession) + assert isinstance(handler.session, requests.Session) + + +def test_https_logging_handler_requests_async_implementation(): + handler = HTTPSHandler(async=True) + assert isinstance(handler.session, FuturesSession) + + +@pytest.mark.parametrize('param', PARAM_NAMES.keys()) +def test_https_logging_handler_defaults(param): + handler = HTTPSHandler() + assert hasattr(handler, param) and getattr(handler, param) is None + + +@pytest.mark.parametrize('param', PARAM_NAMES.keys()) +def test_https_logging_handler_kwargs(param): + handler = HTTPSHandler(**{param: 'EXAMPLE'}) + assert hasattr(handler, param) and getattr(handler, param) == 'EXAMPLE' + + +@pytest.mark.parametrize('param, django_settings_name', PARAM_NAMES.items()) +def test_https_logging_handler_from_django_settings(param, django_settings_name): + settings = LazySettings() + settings.configure(**{ + django_settings_name: 'EXAMPLE' + }) + handler = HTTPSHandler.from_django_settings(settings) + assert hasattr(handler, param) and getattr(handler, param) == 'EXAMPLE' + + +def test_https_logging_handler_logstash_auth_info(): + handler = HTTPSHandler(message_type='logstash', username='bob', password='ansible') + handler.add_auth_information() + assert isinstance(handler.session.auth, requests.auth.HTTPBasicAuth) + assert handler.session.auth.username == 'bob' + assert handler.session.auth.password == 'ansible' + + +def test_https_logging_handler_splunk_auth_info(): + handler = HTTPSHandler(message_type='splunk', password='ansible') + handler.add_auth_information() + assert handler.session.headers['Authorization'] == 'Splunk ansible' + assert handler.session.headers['Content-Type'] == 'application/json' + + +@pytest.mark.parametrize('host, port, normalized', [ + ('localhost', None, 'http://localhost'), + ('localhost', 80, 'http://localhost'), + ('localhost', 8080, 'http://localhost:8080'), + ('http://localhost', None, 'http://localhost'), + ('http://localhost', 80, 'http://localhost'), + ('http://localhost', 8080, 'http://localhost:8080'), + ('https://localhost', 443, 'https://localhost:443') +]) +def test_https_logging_handler_http_host_format(host, port, normalized): + handler = HTTPSHandler(host=host, port=port) + assert handler.get_http_host() == normalized + + +@pytest.mark.parametrize('params, logger_name, expected', [ + ({'enabled_flag': False}, 'awx.main', True), # skip all records if enabled_flag = False + ({'host': '', 'enabled_flag': True}, 'awx.main', True), # skip all records if the host is undefined + ({'host': '127.0.0.1', 'enabled_flag': True}, 'awx.main', False), + ({'host': '127.0.0.1', 'enabled_flag': True, 'enabled_loggers': ['abc']}, 'awx.analytics.xyz', True), + ({'host': '127.0.0.1', 'enabled_flag': True, 'enabled_loggers': ['xyz']}, 'awx.analytics.xyz', False), +]) +def test_https_logging_handler_skip_log(params, logger_name, expected): + handler = HTTPSHandler(**params) + assert handler.skip_log(logger_name) is expected + + +@pytest.mark.parametrize('message_type, async', [ + ('logstash', False), + ('logstash', True), + ('splunk', False), + ('splunk', True), +]) +def test_https_logging_handler_emit(ok200_adapter, dummy_log_record, + message_type, async): + handler = HTTPSHandler(host='127.0.0.1', enabled_flag=True, + message_type=message_type, + enabled_loggers=['awx', 'activity_stream', 'job_events', 'system_tracking'], + async=async) + handler.setFormatter(LogstashFormatter()) + handler.session.mount('http://', ok200_adapter) + async_futures = handler.emit(dummy_log_record) + [future.result() for future in async_futures] + + assert len(ok200_adapter.requests) == 1 + request = ok200_adapter.requests[0] + assert request.url == 'http://127.0.0.1/' + assert request.method == 'POST' + body = json.loads(request.body) + + if message_type == 'logstash': + # A username + password weren't used, so this header should be missing + assert 'Authorization' not in request.headers + + if message_type == 'splunk': + # splunk messages are nested under the 'event' key + body = body['event'] + assert request.headers['Authorization'] == 'Splunk None' + + assert body['level'] == 'INFO' + assert body['logger_name'] == 'awx' + assert body['message'] == 'User joe logged in' + + +@pytest.mark.parametrize('async', (True, False)) +def test_https_logging_handler_emit_logstash_with_creds(ok200_adapter, + dummy_log_record, async): + handler = HTTPSHandler(host='127.0.0.1', enabled_flag=True, + username='user', password='pass', + message_type='logstash', + enabled_loggers=['awx', 'activity_stream', 'job_events', 'system_tracking'], + async=async) + handler.setFormatter(LogstashFormatter()) + handler.session.mount('http://', ok200_adapter) + async_futures = handler.emit(dummy_log_record) + [future.result() for future in async_futures] + + assert len(ok200_adapter.requests) == 1 + request = ok200_adapter.requests[0] + assert request.headers['Authorization'] == 'Basic %s' % base64.b64encode("user:pass") + + +@pytest.mark.parametrize('async', (True, False)) +def test_https_logging_handler_emit_splunk_with_creds(ok200_adapter, + dummy_log_record, async): + handler = HTTPSHandler(host='127.0.0.1', enabled_flag=True, + password='pass', message_type='splunk', + enabled_loggers=['awx', 'activity_stream', 'job_events', 'system_tracking'], + async=async) + handler.setFormatter(LogstashFormatter()) + handler.session.mount('http://', ok200_adapter) + async_futures = handler.emit(dummy_log_record) + [future.result() for future in async_futures] + + assert len(ok200_adapter.requests) == 1 + request = ok200_adapter.requests[0] + assert request.headers['Authorization'] == 'Splunk pass' + + +def test_https_logging_handler_emit_one_record_per_fact(ok200_adapter): + handler = HTTPSHandler(host='127.0.0.1', enabled_flag=True, + message_type='logstash', indv_facts=True, + enabled_loggers=['awx', 'activity_stream', 'job_events', 'system_tracking']) + handler.setFormatter(LogstashFormatter()) + handler.session.mount('http://', ok200_adapter) + record = logging.LogRecord( + 'awx.analytics.system_tracking', # logger name + 20, # loglevel INFO + './awx/some/module.py', # pathname + 100, # lineno + None, # msg + tuple(), # args, + None # exc_info + ) + record.module_name = 'packages' + record.facts_data = [{ + "name": "ansible", + "version": "2.2.1.0" + }, { + "name": "ansible-tower", + "version": "3.1.0" + }] + async_futures = handler.emit(record) + [future.result() for future in async_futures] + + assert len(ok200_adapter.requests) == 2 + requests = sorted(ok200_adapter.requests, key=lambda request: json.loads(request.body)['version']) + + request = requests[0] + assert request.url == 'http://127.0.0.1/' + assert request.method == 'POST' + body = json.loads(request.body) + assert body['level'] == 'INFO' + assert body['logger_name'] == 'awx.analytics.system_tracking' + assert body['name'] == 'ansible' + assert body['version'] == '2.2.1.0' + + request = requests[1] + assert request.url == 'http://127.0.0.1/' + assert request.method == 'POST' + body = json.loads(request.body) + assert body['level'] == 'INFO' + assert body['logger_name'] == 'awx.analytics.system_tracking' + assert body['name'] == 'ansible-tower' + assert body['version'] == '3.1.0' diff --git a/awx/main/tests/unit/utils/test_reload.py b/awx/main/tests/unit/utils/test_reload.py new file mode 100644 index 0000000000..d1f3291753 --- /dev/null +++ b/awx/main/tests/unit/utils/test_reload.py @@ -0,0 +1,38 @@ +# awx.main.utils.reload +from awx.main.utils import reload + + +def test_produce_supervisor_command(mocker): + with mocker.patch.object(reload.subprocess, 'Popen'): + reload._supervisor_service_restart(['beat', 'callback', 'fact']) + reload.subprocess.Popen.assert_called_once_with( + ['supervisorctl', 'restart', 'tower-processes:receiver', 'tower-processes:factcacher']) + + +def test_routing_of_service_restarts_works(mocker): + ''' + This tests that the parent restart method will call the appropriate + service restart methods, depending on which services are given in args + ''' + with mocker.patch.object(reload, '_uwsgi_reload'),\ + mocker.patch.object(reload, '_reset_celery_thread_pool'),\ + mocker.patch.object(reload, '_supervisor_service_restart'): + reload.restart_local_services(['uwsgi', 'celery', 'flower', 'daphne']) + reload._uwsgi_reload.assert_called_once_with() + reload._reset_celery_thread_pool.assert_called_once_with() + reload._supervisor_service_restart.assert_called_once_with(['flower', 'daphne']) + + + +def test_routing_of_service_restarts_diables(mocker): + ''' + Test that methods are not called if not in the args + ''' + with mocker.patch.object(reload, '_uwsgi_reload'),\ + mocker.patch.object(reload, '_reset_celery_thread_pool'),\ + mocker.patch.object(reload, '_supervisor_service_restart'): + reload.restart_local_services(['flower']) + reload._uwsgi_reload.assert_not_called() + reload._reset_celery_thread_pool.assert_not_called() + reload._supervisor_service_restart.assert_called_once_with(['flower']) + diff --git a/awx/main/utils.py b/awx/main/utils.py deleted file mode 100644 index f1a71fbcfc..0000000000 --- a/awx/main/utils.py +++ /dev/null @@ -1,568 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -# Python -import base64 -import hashlib -import logging -import os -import re -import subprocess -import stat -import sys -import urllib -import urlparse -import threading -import contextlib -import tempfile - -# Django REST Framework -from rest_framework.exceptions import ParseError, PermissionDenied -from django.utils.encoding import smart_str -from django.core.urlresolvers import reverse -from django.apps import apps - -# PyCrypto -from Crypto.Cipher import AES - -logger = logging.getLogger('awx.main.utils') - -__all__ = ['get_object_or_400', 'get_object_or_403', 'camelcase_to_underscore', - 'get_ansible_version', 'get_ssh_version', 'get_awx_version', 'update_scm_url', - 'get_type_for_model', 'get_model_for_type', 'to_python_boolean', - 'ignore_inventory_computed_fields', 'ignore_inventory_group_removal', - '_inventory_updates', 'get_pk_from_dict', 'getattrd', 'NoDefaultProvided', - 'get_current_apps', 'set_current_apps'] - - -def get_object_or_400(klass, *args, **kwargs): - ''' - Return a single object from the given model or queryset based on the query - params, otherwise raise an exception that will return in a 400 response. - ''' - from django.shortcuts import _get_queryset - queryset = _get_queryset(klass) - try: - return queryset.get(*args, **kwargs) - except queryset.model.DoesNotExist as e: - raise ParseError(*e.args) - except queryset.model.MultipleObjectsReturned as e: - raise ParseError(*e.args) - - -def get_object_or_403(klass, *args, **kwargs): - ''' - Return a single object from the given model or queryset based on the query - params, otherwise raise an exception that will return in a 403 response. - ''' - from django.shortcuts import _get_queryset - queryset = _get_queryset(klass) - try: - return queryset.get(*args, **kwargs) - except queryset.model.DoesNotExist as e: - raise PermissionDenied(*e.args) - except queryset.model.MultipleObjectsReturned as e: - raise PermissionDenied(*e.args) - -def to_python_boolean(value, allow_none=False): - value = unicode(value) - if value.lower() in ('true', '1', 't'): - return True - elif value.lower() in ('false', '0', 'f'): - return False - elif allow_none and value.lower() in ('none', 'null'): - return None - else: - raise ValueError(u'Unable to convert "%s" to boolean' % unicode(value)) - -def camelcase_to_underscore(s): - ''' - Convert CamelCase names to lowercase_with_underscore. - ''' - s = re.sub(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', s) - return s.lower().strip('_') - - -class RequireDebugTrueOrTest(logging.Filter): - ''' - Logging filter to output when in DEBUG mode or running tests. - ''' - - def filter(self, record): - from django.conf import settings - return settings.DEBUG or 'test' in sys.argv - - -def get_ansible_version(): - ''' - Return Ansible version installed. - ''' - try: - proc = subprocess.Popen(['ansible', '--version'], - stdout=subprocess.PIPE) - result = proc.communicate()[0] - stripped_result = result.split('\n')[0].replace('ansible', '').strip() - return stripped_result - except: - return 'unknown' - -def get_ssh_version(): - ''' - Return SSH version installed. - ''' - try: - proc = subprocess.Popen(['ssh', '-V'], - stderr=subprocess.PIPE) - result = proc.communicate()[1] - return result.split(" ")[0].split("_")[1] - except: - return 'unknown' - -def get_awx_version(): - ''' - Return Ansible Tower version as reported by setuptools. - ''' - from awx import __version__ - try: - import pkg_resources - return pkg_resources.require('ansible_tower')[0].version - except: - return __version__ - - -def get_encryption_key(instance, field_name): - ''' - Generate key for encrypted password based on instance pk and field name. - ''' - from django.conf import settings - h = hashlib.sha1() - h.update(settings.SECRET_KEY) - h.update(str(instance.pk)) - h.update(field_name) - return h.digest()[:16] - -def encrypt_field(instance, field_name, ask=False, subfield=None): - ''' - Return content of the given instance and field name encrypted. - ''' - value = getattr(instance, field_name) - if isinstance(value, dict) and subfield is not None: - value = value[subfield] - if not value or value.startswith('$encrypted$') or (ask and value == 'ASK'): - return value - value = smart_str(value) - key = get_encryption_key(instance, field_name) - cipher = AES.new(key, AES.MODE_ECB) - while len(value) % cipher.block_size != 0: - value += '\x00' - encrypted = cipher.encrypt(value) - b64data = base64.b64encode(encrypted) - return '$encrypted$%s$%s' % ('AES', b64data) - - -def decrypt_field(instance, field_name, subfield=None): - ''' - Return content of the given instance and field name decrypted. - ''' - value = getattr(instance, field_name) - if isinstance(value, dict) and subfield is not None: - value = value[subfield] - if not value or not value.startswith('$encrypted$'): - return value - algo, b64data = value[len('$encrypted$'):].split('$', 1) - if algo != 'AES': - raise ValueError('unsupported algorithm: %s' % algo) - encrypted = base64.b64decode(b64data) - key = get_encryption_key(instance, field_name) - cipher = AES.new(key, AES.MODE_ECB) - value = cipher.decrypt(encrypted) - return value.rstrip('\x00') - - -def update_scm_url(scm_type, url, username=True, password=True, - check_special_cases=True, scp_format=False): - ''' - Update the given SCM URL to add/replace/remove the username/password. When - username/password is True, preserve existing username/password, when - False (None, '', etc.), remove any existing username/password, otherwise - replace username/password. Also validates the given URL. - ''' - # Handle all of the URL formats supported by the SCM systems: - # git: https://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS - # hg: http://www.selenic.com/mercurial/hg.1.html#url-paths - # svn: http://svnbook.red-bean.com/en/1.7/svn-book.html#svn.advanced.reposurls - if scm_type not in ('git', 'hg', 'svn'): - raise ValueError('Unsupported SCM type "%s"' % str(scm_type)) - if not url.strip(): - return '' - parts = urlparse.urlsplit(url) - try: - parts.port - except ValueError: - raise ValueError('Invalid %s URL' % scm_type) - if parts.scheme == 'git+ssh' and not scp_format: - raise ValueError('Unsupported %s URL' % scm_type) - - if '://' not in url: - # Handle SCP-style URLs for git (e.g. [user@]host.xz:path/to/repo.git/). - if scm_type == 'git' and ':' in url: - if '@' in url: - userpass, hostpath = url.split('@', 1) - else: - userpass, hostpath = '', url - if hostpath.count(':') > 1: - raise ValueError('Invalid %s URL' % scm_type) - host, path = hostpath.split(':', 1) - #if not path.startswith('/') and not path.startswith('~/'): - # path = '~/%s' % path - #if path.startswith('/'): - # path = path.lstrip('/') - hostpath = '/'.join([host, path]) - modified_url = '@'.join(filter(None, [userpass, hostpath])) - # git+ssh scheme identifies URLs that should be converted back to - # SCP style before passed to git module. - parts = urlparse.urlsplit('git+ssh://%s' % modified_url) - # Handle local paths specified without file scheme (e.g. /path/to/foo). - # Only supported by git and hg. (not currently allowed) - elif scm_type in ('git', 'hg'): - if not url.startswith('/'): - parts = urlparse.urlsplit('file:///%s' % url) - else: - parts = urlparse.urlsplit('file://%s' % url) - else: - raise ValueError('Invalid %s URL' % scm_type) - - # Validate that scheme is valid for given scm_type. - scm_type_schemes = { - 'git': ('ssh', 'git', 'git+ssh', 'http', 'https', 'ftp', 'ftps'), - 'hg': ('http', 'https', 'ssh'), - 'svn': ('http', 'https', 'svn', 'svn+ssh'), - } - if parts.scheme not in scm_type_schemes.get(scm_type, ()): - raise ValueError('Unsupported %s URL' % scm_type) - if parts.scheme == 'file' and parts.netloc not in ('', 'localhost'): - raise ValueError('Unsupported host "%s" for file:// URL' % (parts.netloc)) - elif parts.scheme != 'file' and not parts.netloc: - raise ValueError('Host is required for %s URL' % parts.scheme) - if username is True: - netloc_username = parts.username or '' - elif username: - netloc_username = username - else: - netloc_username = '' - if password is True: - netloc_password = parts.password or '' - elif password: - netloc_password = password - else: - netloc_password = '' - - # Special handling for github/bitbucket SSH URLs. - if check_special_cases: - special_git_hosts = ('github.com', 'bitbucket.org', 'altssh.bitbucket.org') - if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_username != 'git': - raise ValueError('Username must be "git" for SSH access to %s.' % parts.hostname) - if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_password: - #raise ValueError('Password not allowed for SSH access to %s.' % parts.hostname) - netloc_password = '' - special_hg_hosts = ('bitbucket.org', 'altssh.bitbucket.org') - if scm_type == 'hg' and parts.scheme == 'ssh' and parts.hostname in special_hg_hosts and netloc_username != 'hg': - raise ValueError('Username must be "hg" for SSH access to %s.' % parts.hostname) - if scm_type == 'hg' and parts.scheme == 'ssh' and netloc_password: - #raise ValueError('Password not supported for SSH with Mercurial.') - netloc_password = '' - - if netloc_username and parts.scheme != 'file': - netloc = u':'.join([urllib.quote(x) for x in (netloc_username, netloc_password) if x]) - else: - netloc = u'' - netloc = u'@'.join(filter(None, [netloc, parts.hostname])) - if parts.port: - netloc = u':'.join([netloc, unicode(parts.port)]) - new_url = urlparse.urlunsplit([parts.scheme, netloc, parts.path, - parts.query, parts.fragment]) - if scp_format and parts.scheme == 'git+ssh': - new_url = new_url.replace('git+ssh://', '', 1).replace('/', ':', 1) - return new_url - - - -def get_allowed_fields(obj, serializer_mapping): - from django.contrib.auth.models import User - - if serializer_mapping is not None and obj.__class__ in serializer_mapping: - serializer_actual = serializer_mapping[obj.__class__]() - allowed_fields = [x for x in serializer_actual.fields if not serializer_actual.fields[x].read_only] + ['id'] - else: - allowed_fields = [x.name for x in obj._meta.fields] - - if isinstance(obj, User): - field_blacklist = ['last_login'] - allowed_fields = [f for f in allowed_fields if f not in field_blacklist] - - return allowed_fields - -def model_instance_diff(old, new, serializer_mapping=None): - """ - Calculate the differences between two model instances. One of the instances may be None (i.e., a newly - created model or deleted model). This will cause all fields with a value to have changed (from None). - serializer_mapping are used to determine read-only fields. - When provided, read-only fields will not be included in the resulting dictionary - """ - from django.db.models import Model - from awx.main.models.credential import Credential - - if not(old is None or isinstance(old, Model)): - raise TypeError('The supplied old instance is not a valid model instance.') - if not(new is None or isinstance(new, Model)): - raise TypeError('The supplied new instance is not a valid model instance.') - - diff = {} - - allowed_fields = get_allowed_fields(new, serializer_mapping) - - for field in allowed_fields: - old_value = getattr(old, field, None) - new_value = getattr(new, field, None) - - if old_value != new_value and field not in Credential.PASSWORD_FIELDS: - if type(old_value) not in (bool, int, type(None)): - old_value = smart_str(old_value) - if type(new_value) not in (bool, int, type(None)): - new_value = smart_str(new_value) - diff[field] = (old_value, new_value) - elif old_value != new_value and field in Credential.PASSWORD_FIELDS: - diff[field] = (u"hidden", u"hidden") - - if len(diff) == 0: - diff = None - - return diff - - -def model_to_dict(obj, serializer_mapping=None): - """ - Serialize a model instance to a dictionary as best as possible - serializer_mapping are used to determine read-only fields. - When provided, read-only fields will not be included in the resulting dictionary - """ - from awx.main.models.credential import Credential - attr_d = {} - - allowed_fields = get_allowed_fields(obj, serializer_mapping) - - for field in obj._meta.fields: - if field.name not in allowed_fields: - continue - if field.name not in Credential.PASSWORD_FIELDS: - field_val = getattr(obj, field.name, None) - if type(field_val) not in (bool, int, type(None)): - attr_d[field.name] = smart_str(field_val) - else: - attr_d[field.name] = field_val - else: - attr_d[field.name] = "hidden" - return attr_d - - -def get_type_for_model(model): - ''' - Return type name for a given model class. - ''' - opts = model._meta.concrete_model._meta - return camelcase_to_underscore(opts.object_name) - - -def get_model_for_type(type): - ''' - Return model class for a given type name. - ''' - from django.db.models import Q - from django.contrib.contenttypes.models import ContentType - for ct in ContentType.objects.filter(Q(app_label='main') | Q(app_label='auth', model='user')): - ct_model = ct.model_class() - if not ct_model: - continue - ct_type = get_type_for_model(ct_model) - if type == ct_type: - return ct_model - - -def get_system_task_capacity(): - ''' - Measure system memory and use it as a baseline for determining the system's capacity - ''' - from django.conf import settings - if hasattr(settings, 'SYSTEM_TASK_CAPACITY'): - return settings.SYSTEM_TASK_CAPACITY - proc = subprocess.Popen(['free', '-m'], stdout=subprocess.PIPE) - out,err = proc.communicate() - total_mem_value = out.split()[7] - if int(total_mem_value) <= 2048: - return 50 - return 50 + ((int(total_mem_value) / 1024) - 2) * 75 - - -def emit_websocket_notification(endpoint, event, payload, token_key=None): - from awx.main.socket import Socket - - try: - with Socket('websocket', 'w', nowait=True, logger=logger) as websocket: - if token_key: - payload['token_key'] = token_key - payload['event'] = event - payload['endpoint'] = endpoint - websocket.publish(payload) - except Exception: - pass - -_inventory_updates = threading.local() - - -@contextlib.contextmanager -def ignore_inventory_computed_fields(): - ''' - Context manager to ignore updating inventory computed fields. - ''' - try: - previous_value = getattr(_inventory_updates, 'is_updating', False) - _inventory_updates.is_updating = True - yield - finally: - _inventory_updates.is_updating = previous_value - - -@contextlib.contextmanager -def ignore_inventory_group_removal(): - ''' - Context manager to ignore moving groups/hosts when group is deleted. - ''' - try: - previous_value = getattr(_inventory_updates, 'is_removing', False) - _inventory_updates.is_removing = True - yield - finally: - _inventory_updates.is_removing = previous_value - -def check_proot_installed(): - ''' - Check that proot is installed. - ''' - from django.conf import settings - cmd = [getattr(settings, 'AWX_PROOT_CMD', 'proot'), '--version'] - try: - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - proc.communicate() - return bool(proc.returncode == 0) - except (OSError, ValueError): - return False - -def build_proot_temp_dir(): - ''' - Create a temporary directory for proot to use. - ''' - from awx.main.conf import tower_settings - path = tempfile.mkdtemp(prefix='ansible_tower_proot_', dir=tower_settings.AWX_PROOT_BASE_PATH) - os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) - return path - -def wrap_args_with_proot(args, cwd, **kwargs): - ''' - Wrap existing command line with proot to restrict access to: - - /etc/tower (to prevent obtaining db info or secret key) - - /var/lib/awx (except for current project) - - /var/log/tower - - /var/log/supervisor - - /tmp (except for own tmp files) - ''' - from awx.main.conf import tower_settings - from django.conf import settings - new_args = [getattr(settings, 'AWX_PROOT_CMD', 'proot'), '-v', - str(getattr(settings, 'AWX_PROOT_VERBOSITY', '0')), '-r', '/'] - hide_paths = ['/etc/tower', '/var/lib/awx', '/var/log', - tempfile.gettempdir(), settings.PROJECTS_ROOT, - settings.JOBOUTPUT_ROOT] - hide_paths.extend(getattr(tower_settings, 'AWX_PROOT_HIDE_PATHS', None) or []) - for path in sorted(set(hide_paths)): - if not os.path.exists(path): - continue - if os.path.isdir(path): - new_path = tempfile.mkdtemp(dir=kwargs['proot_temp_dir']) - os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) - else: - handle, new_path = tempfile.mkstemp(dir=kwargs['proot_temp_dir']) - os.close(handle) - os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR) - new_args.extend(['-b', '%s:%s' % (new_path, path)]) - if 'private_data_dir' in kwargs: - show_paths = [cwd, kwargs['private_data_dir']] - else: - show_paths = [cwd] - if settings.ANSIBLE_USE_VENV: - show_paths.append(settings.ANSIBLE_VENV_PATH) - if settings.TOWER_USE_VENV: - show_paths.append(settings.TOWER_VENV_PATH) - show_paths.extend(getattr(tower_settings, 'AWX_PROOT_SHOW_PATHS', None) or []) - for path in sorted(set(show_paths)): - if not os.path.exists(path): - continue - new_args.extend(['-b', '%s:%s' % (path, path)]) - new_args.extend(['-w', cwd]) - new_args.extend(args) - return new_args - -def get_pk_from_dict(_dict, key): - ''' - Helper for obtaining a pk from user data dict or None if not present. - ''' - try: - return int(_dict[key]) - except (TypeError, KeyError, ValueError): - return None - -def build_url(*args, **kwargs): - get = kwargs.pop('get', {}) - url = reverse(*args, **kwargs) - if get: - url += '?' + urllib.urlencode(get) - return url - -def timestamp_apiformat(timestamp): - timestamp = timestamp.isoformat() - if timestamp.endswith('+00:00'): - timestamp = timestamp[:-6] + 'Z' - return timestamp - -# damn you python 2.6 -def timedelta_total_seconds(timedelta): - return ( - timedelta.microseconds + 0.0 + - (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6 - - -class NoDefaultProvided(object): - pass - -def getattrd(obj, name, default=NoDefaultProvided): - """ - Same as getattr(), but allows dot notation lookup - Discussed in: - http://stackoverflow.com/questions/11975781 - """ - - try: - return reduce(getattr, name.split("."), obj) - except AttributeError: - if default != NoDefaultProvided: - return default - raise - -current_apps = apps -def set_current_apps(apps): - global current_apps - current_apps = apps - -def get_current_apps(): - global current_apps - return current_apps diff --git a/awx/main/utils/__init__.py b/awx/main/utils/__init__.py new file mode 100644 index 0000000000..45b0a0131a --- /dev/null +++ b/awx/main/utils/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) 2017 Ansible Tower by Red Hat +# All Rights Reserved. + +# AWX +from awx.main.utils.common import * # noqa + +# Fields that didn't get included in __all__ +# TODO: after initial commit of file move to devel, these can be added +# to common.py __all__ and removed here +from awx.main.utils.common import ( # noqa + RequireDebugTrueOrTest, + encrypt_field, + parse_yaml_or_json, + decrypt_field, + build_url, + timestamp_apiformat, + model_instance_diff, + model_to_dict, + check_proot_installed, + build_proot_temp_dir, + wrap_args_with_proot, + get_system_task_capacity, + decrypt_field_value +) + diff --git a/awx/main/utils/common.py b/awx/main/utils/common.py new file mode 100644 index 0000000000..49d92b5f9c --- /dev/null +++ b/awx/main/utils/common.py @@ -0,0 +1,869 @@ +# Copyright (c) 2015 Ansible, Inc. +# All Rights Reserved. + +# Python +import base64 +import hashlib +import json +import yaml +import logging +import os +import re +import subprocess +import stat +import sys +import urllib +import urlparse +import threading +import contextlib +import tempfile + +# Decorator +from decorator import decorator + +import six + +# Django +from django.utils.translation import ugettext_lazy as _ +from django.db.models import ManyToManyField + +# Django REST Framework +from rest_framework.exceptions import ParseError, PermissionDenied +from django.utils.encoding import smart_str +from django.utils.text import slugify +from django.core.urlresolvers import reverse +from django.apps import apps + +# PyCrypto +from Crypto.Cipher import AES + +logger = logging.getLogger('awx.main.utils') + +__all__ = ['get_object_or_400', 'get_object_or_403', 'camelcase_to_underscore', 'memoize', + 'get_ansible_version', 'get_ssh_version', 'get_awx_version', 'update_scm_url', + 'get_type_for_model', 'get_model_for_type', 'copy_model_by_class', + 'copy_m2m_relationships' ,'cache_list_capabilities', 'to_python_boolean', + 'ignore_inventory_computed_fields', 'ignore_inventory_group_removal', + '_inventory_updates', 'get_pk_from_dict', 'getattrd', 'NoDefaultProvided', + 'get_current_apps', 'set_current_apps', 'OutputEventFilter', + 'callback_filter_out_ansible_extra_vars',] + + +def get_object_or_400(klass, *args, **kwargs): + ''' + Return a single object from the given model or queryset based on the query + params, otherwise raise an exception that will return in a 400 response. + ''' + from django.shortcuts import _get_queryset + queryset = _get_queryset(klass) + try: + return queryset.get(*args, **kwargs) + except queryset.model.DoesNotExist as e: + raise ParseError(*e.args) + except queryset.model.MultipleObjectsReturned as e: + raise ParseError(*e.args) + + +def get_object_or_403(klass, *args, **kwargs): + ''' + Return a single object from the given model or queryset based on the query + params, otherwise raise an exception that will return in a 403 response. + ''' + from django.shortcuts import _get_queryset + queryset = _get_queryset(klass) + try: + return queryset.get(*args, **kwargs) + except queryset.model.DoesNotExist as e: + raise PermissionDenied(*e.args) + except queryset.model.MultipleObjectsReturned as e: + raise PermissionDenied(*e.args) + + +def to_python_boolean(value, allow_none=False): + value = unicode(value) + if value.lower() in ('true', '1', 't'): + return True + elif value.lower() in ('false', '0', 'f'): + return False + elif allow_none and value.lower() in ('none', 'null'): + return None + else: + raise ValueError(_(u'Unable to convert "%s" to boolean') % unicode(value)) + + +def camelcase_to_underscore(s): + ''' + Convert CamelCase names to lowercase_with_underscore. + ''' + s = re.sub(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', s) + return s.lower().strip('_') + + +class RequireDebugTrueOrTest(logging.Filter): + ''' + Logging filter to output when in DEBUG mode or running tests. + ''' + + def filter(self, record): + from django.conf import settings + return settings.DEBUG or 'test' in sys.argv + + +def memoize(ttl=60, cache_key=None): + ''' + Decorator to wrap a function and cache its result. + ''' + from django.core.cache import cache + + def _memoizer(f, *args, **kwargs): + key = cache_key or slugify('%s %r %r' % (f.__name__, args, kwargs)) + value = cache.get(key) + if value is None: + value = f(*args, **kwargs) + cache.set(key, value, ttl) + return value + return decorator(_memoizer) + + +@memoize() +def get_ansible_version(): + ''' + Return Ansible version installed. + ''' + try: + proc = subprocess.Popen(['ansible', '--version'], + stdout=subprocess.PIPE) + result = proc.communicate()[0] + return result.split('\n')[0].replace('ansible', '').strip() + except: + return 'unknown' + + +@memoize() +def get_ssh_version(): + ''' + Return SSH version installed. + ''' + try: + proc = subprocess.Popen(['ssh', '-V'], + stderr=subprocess.PIPE) + result = proc.communicate()[1] + return result.split(" ")[0].split("_")[1] + except: + return 'unknown' + + +def get_awx_version(): + ''' + Return Ansible Tower version as reported by setuptools. + ''' + from awx import __version__ + try: + import pkg_resources + return pkg_resources.require('ansible_tower')[0].version + except: + return __version__ + + +def get_encryption_key(field_name, pk=None): + ''' + Generate key for encrypted password based on field name, + ``settings.SECRET_KEY``, and instance pk (if available). + + :param pk: (optional) the primary key of the ``awx.conf.model.Setting``; + can be omitted in situations where you're encrypting a setting + that is not database-persistent (like a read-only setting) + ''' + from django.conf import settings + h = hashlib.sha1() + h.update(settings.SECRET_KEY) + if pk is not None: + h.update(str(pk)) + h.update(field_name) + return h.digest()[:16] + + +def encrypt_field(instance, field_name, ask=False, subfield=None, skip_utf8=False): + ''' + Return content of the given instance and field name encrypted. + ''' + value = getattr(instance, field_name) + if isinstance(value, dict) and subfield is not None: + value = value[subfield] + if not value or value.startswith('$encrypted$') or (ask and value == 'ASK'): + return value + if skip_utf8: + utf8 = False + else: + utf8 = type(value) == six.text_type + value = smart_str(value) + key = get_encryption_key(field_name, getattr(instance, 'pk', None)) + cipher = AES.new(key, AES.MODE_ECB) + while len(value) % cipher.block_size != 0: + value += '\x00' + encrypted = cipher.encrypt(value) + b64data = base64.b64encode(encrypted) + tokens = ['$encrypted', 'AES', b64data] + if utf8: + # If the value to encrypt is utf-8, we need to add a marker so we + # know to decode the data when it's decrypted later + tokens.insert(1, 'UTF8') + return '$'.join(tokens) + + +def decrypt_value(encryption_key, value): + raw_data = value[len('$encrypted$'):] + # If the encrypted string contains a UTF8 marker, discard it + utf8 = raw_data.startswith('UTF8$') + if utf8: + raw_data = raw_data[len('UTF8$'):] + algo, b64data = raw_data.split('$', 1) + if algo != 'AES': + raise ValueError('unsupported algorithm: %s' % algo) + encrypted = base64.b64decode(b64data) + cipher = AES.new(encryption_key, AES.MODE_ECB) + value = cipher.decrypt(encrypted) + value = value.rstrip('\x00') + # If the encrypted string contained a UTF8 marker, decode the data + if utf8: + value = value.decode('utf-8') + return value + + +def decrypt_field(instance, field_name, subfield=None): + ''' + Return content of the given instance and field name decrypted. + ''' + value = getattr(instance, field_name) + if isinstance(value, dict) and subfield is not None: + value = value[subfield] + if not value or not value.startswith('$encrypted$'): + return value + key = get_encryption_key(field_name, getattr(instance, 'pk', None)) + + return decrypt_value(key, value) + + +def decrypt_field_value(pk, field_name, value): + key = get_encryption_key(field_name, pk) + return decrypt_value(key, value) + + +def update_scm_url(scm_type, url, username=True, password=True, + check_special_cases=True, scp_format=False): + ''' + Update the given SCM URL to add/replace/remove the username/password. When + username/password is True, preserve existing username/password, when + False (None, '', etc.), remove any existing username/password, otherwise + replace username/password. Also validates the given URL. + ''' + # Handle all of the URL formats supported by the SCM systems: + # git: https://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS + # hg: http://www.selenic.com/mercurial/hg.1.html#url-paths + # svn: http://svnbook.red-bean.com/en/1.7/svn-book.html#svn.advanced.reposurls + if scm_type not in ('git', 'hg', 'svn'): + raise ValueError(_('Unsupported SCM type "%s"') % str(scm_type)) + if not url.strip(): + return '' + parts = urlparse.urlsplit(url) + try: + parts.port + except ValueError: + raise ValueError(_('Invalid %s URL') % scm_type) + if parts.scheme == 'git+ssh' and not scp_format: + raise ValueError(_('Unsupported %s URL') % scm_type) + + if '://' not in url: + # Handle SCP-style URLs for git (e.g. [user@]host.xz:path/to/repo.git/). + if scm_type == 'git' and ':' in url: + if '@' in url: + userpass, hostpath = url.split('@', 1) + else: + userpass, hostpath = '', url + if hostpath.count(':') > 1: + raise ValueError(_('Invalid %s URL') % scm_type) + host, path = hostpath.split(':', 1) + #if not path.startswith('/') and not path.startswith('~/'): + # path = '~/%s' % path + #if path.startswith('/'): + # path = path.lstrip('/') + hostpath = '/'.join([host, path]) + modified_url = '@'.join(filter(None, [userpass, hostpath])) + # git+ssh scheme identifies URLs that should be converted back to + # SCP style before passed to git module. + parts = urlparse.urlsplit('git+ssh://%s' % modified_url) + # Handle local paths specified without file scheme (e.g. /path/to/foo). + # Only supported by git and hg. + elif scm_type in ('git', 'hg'): + if not url.startswith('/'): + parts = urlparse.urlsplit('file:///%s' % url) + else: + parts = urlparse.urlsplit('file://%s' % url) + else: + raise ValueError(_('Invalid %s URL') % scm_type) + + # Validate that scheme is valid for given scm_type. + scm_type_schemes = { + 'git': ('ssh', 'git', 'git+ssh', 'http', 'https', 'ftp', 'ftps', 'file'), + 'hg': ('http', 'https', 'ssh', 'file'), + 'svn': ('http', 'https', 'svn', 'svn+ssh', 'file'), + } + if parts.scheme not in scm_type_schemes.get(scm_type, ()): + raise ValueError(_('Unsupported %s URL') % scm_type) + if parts.scheme == 'file' and parts.netloc not in ('', 'localhost'): + raise ValueError(_('Unsupported host "%s" for file:// URL') % (parts.netloc)) + elif parts.scheme != 'file' and not parts.netloc: + raise ValueError(_('Host is required for %s URL') % parts.scheme) + if username is True: + netloc_username = parts.username or '' + elif username: + netloc_username = username + else: + netloc_username = '' + if password is True: + netloc_password = parts.password or '' + elif password: + netloc_password = password + else: + netloc_password = '' + + # Special handling for github/bitbucket SSH URLs. + if check_special_cases: + special_git_hosts = ('github.com', 'bitbucket.org', 'altssh.bitbucket.org') + if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_username != 'git': + raise ValueError(_('Username must be "git" for SSH access to %s.') % parts.hostname) + if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_password: + #raise ValueError('Password not allowed for SSH access to %s.' % parts.hostname) + netloc_password = '' + special_hg_hosts = ('bitbucket.org', 'altssh.bitbucket.org') + if scm_type == 'hg' and parts.scheme == 'ssh' and parts.hostname in special_hg_hosts and netloc_username != 'hg': + raise ValueError(_('Username must be "hg" for SSH access to %s.') % parts.hostname) + if scm_type == 'hg' and parts.scheme == 'ssh' and netloc_password: + #raise ValueError('Password not supported for SSH with Mercurial.') + netloc_password = '' + + if netloc_username and parts.scheme != 'file': + netloc = u':'.join([urllib.quote(x) for x in (netloc_username, netloc_password) if x]) + else: + netloc = u'' + netloc = u'@'.join(filter(None, [netloc, parts.hostname])) + if parts.port: + netloc = u':'.join([netloc, unicode(parts.port)]) + new_url = urlparse.urlunsplit([parts.scheme, netloc, parts.path, + parts.query, parts.fragment]) + if scp_format and parts.scheme == 'git+ssh': + new_url = new_url.replace('git+ssh://', '', 1).replace('/', ':', 1) + return new_url + + +def get_allowed_fields(obj, serializer_mapping): + from django.contrib.auth.models import User + + if serializer_mapping is not None and obj.__class__ in serializer_mapping: + serializer_actual = serializer_mapping[obj.__class__]() + allowed_fields = [x for x in serializer_actual.fields if not serializer_actual.fields[x].read_only] + ['id'] + else: + allowed_fields = [x.name for x in obj._meta.fields] + + if isinstance(obj, User): + field_blacklist = ['last_login'] + allowed_fields = [f for f in allowed_fields if f not in field_blacklist] + + return allowed_fields + + +def model_instance_diff(old, new, serializer_mapping=None): + """ + Calculate the differences between two model instances. One of the instances may be None (i.e., a newly + created model or deleted model). This will cause all fields with a value to have changed (from None). + serializer_mapping are used to determine read-only fields. + When provided, read-only fields will not be included in the resulting dictionary + """ + from django.db.models import Model + from awx.main.models.credential import Credential + + if not(old is None or isinstance(old, Model)): + raise TypeError('The supplied old instance is not a valid model instance.') + if not(new is None or isinstance(new, Model)): + raise TypeError('The supplied new instance is not a valid model instance.') + + diff = {} + + allowed_fields = get_allowed_fields(new, serializer_mapping) + + for field in allowed_fields: + old_value = getattr(old, field, None) + new_value = getattr(new, field, None) + + if old_value != new_value and field not in Credential.PASSWORD_FIELDS: + if type(old_value) not in (bool, int, type(None)): + old_value = smart_str(old_value) + if type(new_value) not in (bool, int, type(None)): + new_value = smart_str(new_value) + diff[field] = (old_value, new_value) + elif old_value != new_value and field in Credential.PASSWORD_FIELDS: + diff[field] = (u"hidden", u"hidden") + + if len(diff) == 0: + diff = None + + return diff + + +def model_to_dict(obj, serializer_mapping=None): + """ + Serialize a model instance to a dictionary as best as possible + serializer_mapping are used to determine read-only fields. + When provided, read-only fields will not be included in the resulting dictionary + """ + from awx.main.models.credential import Credential + attr_d = {} + + allowed_fields = get_allowed_fields(obj, serializer_mapping) + + for field in obj._meta.fields: + if field.name not in allowed_fields: + continue + if field.name not in Credential.PASSWORD_FIELDS: + field_val = getattr(obj, field.name, None) + if type(field_val) not in (bool, int, type(None)): + attr_d[field.name] = smart_str(field_val) + else: + attr_d[field.name] = field_val + else: + attr_d[field.name] = "hidden" + return attr_d + + +def copy_model_by_class(obj1, Class2, fields, kwargs): + ''' + Creates a new unsaved object of type Class2 using the fields from obj1 + values in kwargs can override obj1 + ''' + create_kwargs = {} + for field_name in fields: + # Foreign keys can be specified as field_name or field_name_id. + id_field_name = '%s_id' % field_name + if hasattr(obj1, id_field_name): + if field_name in kwargs: + value = kwargs[field_name] + elif id_field_name in kwargs: + value = kwargs[id_field_name] + else: + value = getattr(obj1, id_field_name) + if hasattr(value, 'id'): + value = value.id + create_kwargs[id_field_name] = value + elif field_name in kwargs: + if field_name == 'extra_vars' and isinstance(kwargs[field_name], dict): + create_kwargs[field_name] = json.dumps(kwargs['extra_vars']) + # We can't get a hold of django.db.models.fields.related.ManyRelatedManager to compare + # so this is the next best thing. + elif kwargs[field_name].__class__.__name__ is not 'ManyRelatedManager': + create_kwargs[field_name] = kwargs[field_name] + elif hasattr(obj1, field_name): + field_obj = obj1._meta.get_field_by_name(field_name)[0] + if not isinstance(field_obj, ManyToManyField): + create_kwargs[field_name] = getattr(obj1, field_name) + + # Apply class-specific extra processing for origination of unified jobs + if hasattr(obj1, '_update_unified_job_kwargs') and obj1.__class__ != Class2: + new_kwargs = obj1._update_unified_job_kwargs(**create_kwargs) + else: + new_kwargs = create_kwargs + + return Class2(**new_kwargs) + + +def copy_m2m_relationships(obj1, obj2, fields, kwargs=None): + ''' + In-place operation. + Given two saved objects, copies related objects from obj1 + to obj2 to field of same name, if field occurs in `fields` + ''' + for field_name in fields: + if hasattr(obj1, field_name): + field_obj = obj1._meta.get_field_by_name(field_name)[0] + if isinstance(field_obj, ManyToManyField): + # Many to Many can be specified as field_name + src_field_value = getattr(obj1, field_name) + if kwargs and field_name in kwargs: + override_field_val = kwargs[field_name] + if override_field_val.__class__.__name__ is 'ManyRelatedManager': + src_field_value = override_field_val + dest_field = getattr(obj2, field_name) + dest_field.add(*list(src_field_value.all().values_list('id', flat=True))) + + +def get_type_for_model(model): + ''' + Return type name for a given model class. + ''' + opts = model._meta.concrete_model._meta + return camelcase_to_underscore(opts.object_name) + + +def get_model_for_type(type): + ''' + Return model class for a given type name. + ''' + from django.db.models import Q + from django.contrib.contenttypes.models import ContentType + for ct in ContentType.objects.filter(Q(app_label='main') | Q(app_label='auth', model='user')): + ct_model = ct.model_class() + if not ct_model: + continue + ct_type = get_type_for_model(ct_model) + if type == ct_type: + return ct_model + + +def cache_list_capabilities(page, prefetch_list, model, user): + ''' + Given a `page` list of objects, the specified roles for the specified user + are save on each object in the list, using 1 query for each role type + + Examples: + capabilities_prefetch = ['admin', 'execute'] + --> prefetch the admin (edit) and execute (start) permissions for + items in list for current user + capabilities_prefetch = ['inventory.admin'] + --> prefetch the related inventory FK permissions for current user, + and put it into the object's cache + capabilities_prefetch = [{'copy': ['inventory.admin', 'project.admin']}] + --> prefetch logical combination of admin permission to inventory AND + project, put into cache dictionary as "copy" + ''' + from django.db.models import Q + page_ids = [obj.id for obj in page] + for obj in page: + obj.capabilities_cache = {} + + skip_models = [] + if hasattr(model, 'invalid_user_capabilities_prefetch_models'): + skip_models = model.invalid_user_capabilities_prefetch_models() + + for prefetch_entry in prefetch_list: + + display_method = None + if type(prefetch_entry) is dict: + display_method = prefetch_entry.keys()[0] + paths = prefetch_entry[display_method] + else: + paths = prefetch_entry + + if type(paths) is not list: + paths = [paths] + + # Build the query for accessible_objects according the user & role(s) + filter_args = [] + for role_path in paths: + if '.' in role_path: + res_path = '__'.join(role_path.split('.')[:-1]) + role_type = role_path.split('.')[-1] + parent_model = model + for subpath in role_path.split('.')[:-1]: + parent_model = parent_model._meta.get_field(subpath).related_model + filter_args.append(Q( + Q(**{'%s__pk__in' % res_path: parent_model.accessible_pk_qs(user, '%s_role' % role_type)}) | + Q(**{'%s__isnull' % res_path: True}))) + else: + role_type = role_path + filter_args.append(Q(**{'pk__in': model.accessible_pk_qs(user, '%s_role' % role_type)})) + + if display_method is None: + # Role name translation to UI names for methods + display_method = role_type + if role_type == 'admin': + display_method = 'edit' + elif role_type in ['execute', 'update']: + display_method = 'start' + + # Union that query with the list of items on page + filter_args.append(Q(pk__in=page_ids)) + ids_with_role = set(model.objects.filter(*filter_args).values_list('pk', flat=True)) + + # Save data item-by-item + for obj in page: + if skip_models and obj.__class__.__name__.lower() in skip_models: + continue + obj.capabilities_cache[display_method] = False + if obj.pk in ids_with_role: + obj.capabilities_cache[display_method] = True + + +def parse_yaml_or_json(vars_str): + ''' + Attempt to parse a string with variables, and if attempt fails, + return an empty dictionary. + ''' + if isinstance(vars_str, dict): + return vars_str + try: + vars_dict = json.loads(vars_str) + except (ValueError, TypeError): + try: + vars_dict = yaml.safe_load(vars_str) + assert isinstance(vars_dict, dict) + except (yaml.YAMLError, TypeError, AttributeError, AssertionError): + vars_dict = {} + return vars_dict + + +@memoize() +def get_system_task_capacity(): + ''' + Measure system memory and use it as a baseline for determining the system's capacity + ''' + from django.conf import settings + if hasattr(settings, 'SYSTEM_TASK_CAPACITY'): + return settings.SYSTEM_TASK_CAPACITY + proc = subprocess.Popen(['free', '-m'], stdout=subprocess.PIPE) + out,err = proc.communicate() + total_mem_value = out.split()[7] + if int(total_mem_value) <= 2048: + return 50 + return 50 + ((int(total_mem_value) / 1024) - 2) * 75 + + +_inventory_updates = threading.local() + + +@contextlib.contextmanager +def ignore_inventory_computed_fields(): + ''' + Context manager to ignore updating inventory computed fields. + ''' + try: + previous_value = getattr(_inventory_updates, 'is_updating', False) + _inventory_updates.is_updating = True + yield + finally: + _inventory_updates.is_updating = previous_value + + +@contextlib.contextmanager +def ignore_inventory_group_removal(): + ''' + Context manager to ignore moving groups/hosts when group is deleted. + ''' + try: + previous_value = getattr(_inventory_updates, 'is_removing', False) + _inventory_updates.is_removing = True + yield + finally: + _inventory_updates.is_removing = previous_value + + +@memoize() +def check_proot_installed(): + ''' + Check that proot is installed. + ''' + from django.conf import settings + cmd = [getattr(settings, 'AWX_PROOT_CMD', 'bwrap'), '--version'] + try: + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + proc.communicate() + return bool(proc.returncode == 0) + except (OSError, ValueError): + return False + + +def build_proot_temp_dir(): + ''' + Create a temporary directory for proot to use. + ''' + from django.conf import settings + path = tempfile.mkdtemp(prefix='ansible_tower_proot_', dir=settings.AWX_PROOT_BASE_PATH) + os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) + return path + + +def wrap_args_with_proot(args, cwd, **kwargs): + ''' + Wrap existing command line with proot to restrict access to: + - /etc/tower (to prevent obtaining db info or secret key) + - /var/lib/awx (except for current project) + - /var/log/tower + - /var/log/supervisor + - /tmp (except for own tmp files) + ''' + from django.conf import settings + new_args = [getattr(settings, 'AWX_PROOT_CMD', 'bwrap'), '--unshare-pid', '--dev-bind', '/', '/'] + hide_paths = ['/etc/tower', '/var/lib/awx', '/var/log', + tempfile.gettempdir(), settings.PROJECTS_ROOT, + settings.JOBOUTPUT_ROOT] + hide_paths.extend(getattr(settings, 'AWX_PROOT_HIDE_PATHS', None) or []) + for path in sorted(set(hide_paths)): + if not os.path.exists(path): + continue + if os.path.isdir(path): + new_path = tempfile.mkdtemp(dir=kwargs['proot_temp_dir']) + os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) + else: + handle, new_path = tempfile.mkstemp(dir=kwargs['proot_temp_dir']) + os.close(handle) + os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR) + new_args.extend(['--bind', '%s' %(new_path,), '%s' % (path,)]) + if 'private_data_dir' in kwargs: + show_paths = [cwd, kwargs['private_data_dir']] + else: + show_paths = [cwd] + if settings.ANSIBLE_USE_VENV: + show_paths.append(settings.ANSIBLE_VENV_PATH) + if settings.TOWER_USE_VENV: + show_paths.append(settings.TOWER_VENV_PATH) + show_paths.extend(getattr(settings, 'AWX_PROOT_SHOW_PATHS', None) or []) + for path in sorted(set(show_paths)): + if not os.path.exists(path): + continue + new_args.extend(['--bind', '%s' % (path,), '%s' % (path,)]) + new_args.extend(['--chdir', cwd]) + new_args.extend(args) + return new_args + + +def get_pk_from_dict(_dict, key): + ''' + Helper for obtaining a pk from user data dict or None if not present. + ''' + try: + return int(_dict[key]) + except (TypeError, KeyError, ValueError): + return None + + +def build_url(*args, **kwargs): + get = kwargs.pop('get', {}) + url = reverse(*args, **kwargs) + if get: + url += '?' + urllib.urlencode(get) + return url + + +def timestamp_apiformat(timestamp): + timestamp = timestamp.isoformat() + if timestamp.endswith('+00:00'): + timestamp = timestamp[:-6] + 'Z' + return timestamp + + +# damn you python 2.6 +def timedelta_total_seconds(timedelta): + return ( + timedelta.microseconds + 0.0 + + (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6 + + +class NoDefaultProvided(object): + pass + + +def getattrd(obj, name, default=NoDefaultProvided): + """ + Same as getattr(), but allows dot notation lookup + Discussed in: + http://stackoverflow.com/questions/11975781 + """ + + try: + return reduce(getattr, name.split("."), obj) + except AttributeError: + if default != NoDefaultProvided: + return default + raise + + +current_apps = apps + + +def set_current_apps(apps): + global current_apps + current_apps = apps + + +def get_current_apps(): + global current_apps + return current_apps + + +class OutputEventFilter(object): + ''' + File-like object that looks for encoded job events in stdout data. + ''' + + EVENT_DATA_RE = re.compile(r'\x1b\[K((?:[A-Za-z0-9+/=]+\x1b\[\d+D)+)\x1b\[K') + + def __init__(self, fileobj=None, event_callback=None, raw_callback=None): + self._fileobj = fileobj + self._event_callback = event_callback + self._raw_callback = raw_callback + self._counter = 1 + self._start_line = 0 + self._buffer = '' + self._current_event_data = None + + def __getattr__(self, attr): + return getattr(self._fileobj, attr) + + def write(self, data): + if self._fileobj: + self._fileobj.write(data) + self._buffer += data + if self._raw_callback: + self._raw_callback(data) + while True: + match = self.EVENT_DATA_RE.search(self._buffer) + if not match: + break + try: + base64_data = re.sub(r'\x1b\[\d+D', '', match.group(1)) + event_data = json.loads(base64.b64decode(base64_data)) + except ValueError: + event_data = {} + self._emit_event(self._buffer[:match.start()], event_data) + self._buffer = self._buffer[match.end():] + + def close(self): + if self._fileobj: + self._fileobj.close() + if self._buffer: + self._emit_event(self._buffer) + self._buffer = '' + + def _emit_event(self, buffered_stdout, next_event_data=None): + if self._current_event_data: + event_data = self._current_event_data + stdout_chunks = [buffered_stdout] + elif buffered_stdout: + event_data = dict(event='verbose') + stdout_chunks = buffered_stdout.splitlines(True) + else: + stdout_chunks = [] + + for stdout_chunk in stdout_chunks: + event_data['counter'] = self._counter + self._counter += 1 + event_data['stdout'] = stdout_chunk[:-2] if len(stdout_chunk) > 2 else "" + n_lines = stdout_chunk.count('\n') + event_data['start_line'] = self._start_line + event_data['end_line'] = self._start_line + n_lines + self._start_line += n_lines + if self._event_callback: + self._event_callback(event_data) + + if next_event_data.get('uuid', None): + self._current_event_data = next_event_data + else: + self._current_event_data = None + + +def callback_filter_out_ansible_extra_vars(extra_vars): + extra_vars_redacted = {} + for key, value in extra_vars.iteritems(): + if not key.startswith('ansible_'): + extra_vars_redacted[key] = value + return extra_vars_redacted + diff --git a/awx/main/utils/db.py b/awx/main/utils/db.py new file mode 100644 index 0000000000..f9c625a7a1 --- /dev/null +++ b/awx/main/utils/db.py @@ -0,0 +1,22 @@ +# Copyright (c) 2017 Ansible by Red Hat +# All Rights Reserved. + +# Django database +from django.db.migrations.loader import MigrationLoader +from django.db import connection + +# Python +import re + + +def get_tower_migration_version(): + loader = MigrationLoader(connection, ignore_no_migrations=True) + v = '000' + for app_name, migration_name in loader.applied_migrations: + if app_name == 'main': + version_captures = re.findall('^[0-9]{4}_v([0-9]{3})_', migration_name) + if len(version_captures) == 1: + migration_version = version_captures[0] + if migration_version > v: + v = migration_version + return v diff --git a/awx/main/utils/formatters.py b/awx/main/utils/formatters.py new file mode 100644 index 0000000000..68d0917985 --- /dev/null +++ b/awx/main/utils/formatters.py @@ -0,0 +1,161 @@ +# Copyright (c) 2017 Ansible Tower by Red Hat +# All Rights Reserved. + +from logstash.formatter import LogstashFormatterVersion1 +from copy import copy +import json +import time + + +class LogstashFormatter(LogstashFormatterVersion1): + def __init__(self, **kwargs): + settings_module = kwargs.pop('settings_module', None) + ret = super(LogstashFormatter, self).__init__(**kwargs) + if settings_module: + self.host_id = settings_module.CLUSTER_HOST_ID + self.tower_uuid = settings_module.LOG_AGGREGATOR_TOWER_UUID + return ret + + def reformat_data_for_log(self, raw_data, kind=None): + ''' + Process dictionaries from various contexts (job events, activity stream + changes, etc.) to give meaningful information + Output a dictionary which will be passed in logstash or syslog format + to the logging receiver + ''' + if kind == 'activity_stream': + return raw_data + rename_fields = set(( + 'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename', + 'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module', + 'msecs', 'msecs', 'message', 'msg', 'name', 'pathname', 'process', + 'processName', 'relativeCreated', 'thread', 'threadName', 'extra', + 'auth_token', 'tags', 'host', 'host_id', 'level', 'port', 'uuid')) + if kind == 'system_tracking': + data = copy(raw_data['facts_data']) + elif kind == 'job_events': + data = copy(raw_data['event_model_data']) + else: + data = copy(raw_data) + if isinstance(data, basestring): + data = json.loads(data) + skip_fields = ('res', 'password', 'event_data', 'stdout') + data_for_log = {} + + def index_by_name(alist): + """Takes a list of dictionaries with `name` as a key in each dict + and returns a dictionary indexed by those names""" + adict = {} + for item in alist: + subdict = copy(item) + if 'name' in subdict: + name = subdict.get('name', None) + elif 'path' in subdict: + name = subdict.get('path', None) + if name: + # Logstash v2 can not accept '.' in a name + name = name.replace('.', '_') + adict[name] = subdict + return adict + + def convert_to_type(t, val): + if t is float: + val = val[:-1] if val.endswith('s') else val + try: + return float(val) + except ValueError: + return val + elif t is int: + try: + return int(val) + except ValueError: + return val + elif t is str: + return val + + if kind == 'job_events': + data.update(data.get('event_data', {})) + for fd in data: + if fd in skip_fields: + continue + key = fd + if fd in rename_fields: + key = 'event_%s' % fd + val = data[fd] + if key.endswith('created'): + time_float = time.mktime(data[fd].timetuple()) + val = self.format_timestamp(time_float) + data_for_log[key] = val + elif kind == 'system_tracking': + module_name = raw_data['module_name'] + if module_name in ['services', 'packages', 'files']: + data_for_log[module_name] = index_by_name(data) + elif module_name == 'ansible': + data_for_log['ansible'] = data + # Remove sub-keys with data type conflicts in elastic search + data_for_log['ansible'].pop('ansible_python_version', None) + data_for_log['ansible']['ansible_python'].pop('version_info', None) + else: + data_for_log['facts'] = data + data_for_log['module_name'] = module_name + elif kind == 'performance': + request = raw_data['python_objects']['request'] + response = raw_data['python_objects']['response'] + + # Note: All of the below keys may not be in the response "dict" + # For example, X-API-Query-Time and X-API-Query-Count will only + # exist if SQL_DEBUG is turned on in settings. + headers = [ + (float, 'X-API-Time'), # may end with an 's' "0.33s" + (int, 'X-API-Query-Count'), + (float, 'X-API-Query-Time'), # may also end with an 's' + (str, 'X-API-Node'), + ] + data_for_log['x_api'] = {k: convert_to_type(t, response[k]) for (t, k) in headers if k in response} + + data_for_log['request'] = { + 'method': request.method, + 'path': request.path, + 'path_info': request.path_info, + 'query_string': request.META['QUERY_STRING'], + 'data': request.data, + } + + return data_for_log + + def get_extra_fields(self, record): + fields = super(LogstashFormatter, self).get_extra_fields(record) + if record.name.startswith('awx.analytics'): + log_kind = record.name[len('awx.analytics.'):] + fields = self.reformat_data_for_log(fields, kind=log_kind) + return fields + + def format(self, record): + message = { + # Fields not included, but exist in related logs + # 'path': record.pathname + # '@version': '1', # from python-logstash + # 'tags': self.tags, + '@timestamp': self.format_timestamp(record.created), + 'message': record.getMessage(), + 'host': self.host, + 'type': self.message_type, + + # Extra Fields + 'level': record.levelname, + 'logger_name': record.name, + } + + if getattr(self, 'tower_uuid', None): + message['tower_uuid'] = self.tower_uuid + if getattr(self, 'host_id', None): + message['cluster_host_id'] = self.host_id + + # Add extra fields + message.update(self.get_extra_fields(record)) + + # If exception, add debug info + if record.exc_info: + message.update(self.get_debug_fields(record)) + + return self.serialize(message) diff --git a/awx/main/utils/handlers.py b/awx/main/utils/handlers.py new file mode 100644 index 0000000000..fe2fb87228 --- /dev/null +++ b/awx/main/utils/handlers.py @@ -0,0 +1,198 @@ +# Copyright (c) 2017 Ansible Tower by Red Hat +# All Rights Reserved. + +# Python +import logging +import json +import requests +from copy import copy + +# loggly +import traceback + +from requests_futures.sessions import FuturesSession + +# AWX +from awx.main.utils.formatters import LogstashFormatter + + +__all__ = ['HTTPSNullHandler', 'BaseHTTPSHandler', 'configure_external_logger'] + +# AWX external logging handler, generally designed to be used +# with the accompanying LogstashHandler, derives from python-logstash library +# Non-blocking request accomplished by FuturesSession, similar +# to the loggly-python-handler library (not used) + +# Translation of parameter names to names in Django settings +PARAM_NAMES = { + 'host': 'LOG_AGGREGATOR_HOST', + 'port': 'LOG_AGGREGATOR_PORT', + 'message_type': 'LOG_AGGREGATOR_TYPE', + 'username': 'LOG_AGGREGATOR_USERNAME', + 'password': 'LOG_AGGREGATOR_PASSWORD', + 'enabled_loggers': 'LOG_AGGREGATOR_LOGGERS', + 'indv_facts': 'LOG_AGGREGATOR_INDIVIDUAL_FACTS', + 'enabled_flag': 'LOG_AGGREGATOR_ENABLED', +} + + +def unused_callback(sess, resp): + pass + + +class HTTPSNullHandler(logging.NullHandler): + "Placeholder null handler to allow loading without database access" + + def __init__(self, *args, **kwargs): + return super(HTTPSNullHandler, self).__init__() + + +class BaseHTTPSHandler(logging.Handler): + def __init__(self, fqdn=False, **kwargs): + super(BaseHTTPSHandler, self).__init__() + self.fqdn = fqdn + self.async = kwargs.get('async', True) + for fd in PARAM_NAMES: + setattr(self, fd, kwargs.get(fd, None)) + if self.async: + self.session = FuturesSession() + else: + self.session = requests.Session() + self.add_auth_information() + + @classmethod + def from_django_settings(cls, settings, *args, **kwargs): + for param, django_setting_name in PARAM_NAMES.items(): + kwargs[param] = getattr(settings, django_setting_name, None) + return cls(*args, **kwargs) + + def get_full_message(self, record): + if record.exc_info: + return '\n'.join(traceback.format_exception(*record.exc_info)) + else: + return record.getMessage() + + def add_auth_information(self): + if self.message_type == 'logstash': + if not self.username: + # Logstash authentication not enabled + return + logstash_auth = requests.auth.HTTPBasicAuth(self.username, self.password) + self.session.auth = logstash_auth + elif self.message_type == 'splunk': + auth_header = "Splunk %s" % self.password + headers = { + "Authorization": auth_header, + "Content-Type": "application/json" + } + self.session.headers.update(headers) + + def get_http_host(self): + host = self.host or '' + if not host.startswith('http'): + host = 'http://%s' % self.host + if self.port != 80 and self.port is not None: + host = '%s:%s' % (host, str(self.port)) + return host + + def get_post_kwargs(self, payload_input): + if self.message_type == 'splunk': + # Splunk needs data nested under key "event" + if not isinstance(payload_input, dict): + payload_input = json.loads(payload_input) + payload_input = {'event': payload_input} + if isinstance(payload_input, dict): + payload_str = json.dumps(payload_input) + else: + payload_str = payload_input + if self.async: + return dict(data=payload_str, background_callback=unused_callback) + else: + return dict(data=payload_str) + + def skip_log(self, logger_name): + if self.host == '' or (not self.enabled_flag): + return True + if not logger_name.startswith('awx.analytics'): + # Tower log emission is only turned off by enablement setting + return False + return self.enabled_loggers is None or logger_name[len('awx.analytics.'):] not in self.enabled_loggers + + def emit(self, record): + """ + Emit a log record. Returns a list of zero or more + ``concurrent.futures.Future`` objects. + + When ``self.async`` is True, the list will contain one + Future object for each HTTP request made. When ``self.async`` is + False, the list will be empty. + + See: + https://docs.python.org/3/library/concurrent.futures.html#future-objects + http://pythonhosted.org/futures/ + """ + if self.skip_log(record.name): + return [] + try: + payload = self.format(record) + + # Special action for System Tracking, queue up multiple log messages + if self.indv_facts: + payload_data = json.loads(payload) + if record.name.startswith('awx.analytics.system_tracking'): + module_name = payload_data['module_name'] + if module_name in ['services', 'packages', 'files']: + facts_dict = payload_data.pop(module_name) + async_futures = [] + for key in facts_dict: + fact_payload = copy(payload_data) + fact_payload.update(facts_dict[key]) + if self.async: + async_futures.append(self._send(fact_payload)) + else: + self._send(fact_payload) + return async_futures + + if self.async: + return [self._send(payload)] + + self._send(payload) + return [] + except (KeyboardInterrupt, SystemExit): + raise + except: + self.handleError(record) + + def _send(self, payload): + return self.session.post(self.get_http_host(), + **self.get_post_kwargs(payload)) + + +def add_or_remove_logger(address, instance): + specific_logger = logging.getLogger(address) + for i, handler in enumerate(specific_logger.handlers): + if isinstance(handler, (HTTPSNullHandler, BaseHTTPSHandler)): + specific_logger.handlers[i] = instance or HTTPSNullHandler() + break + else: + if instance is not None: + specific_logger.handlers.append(instance) + + +def configure_external_logger(settings_module, async_flag=True, is_startup=True): + + is_enabled = settings_module.LOG_AGGREGATOR_ENABLED + if is_startup and (not is_enabled): + # Pass-through if external logging not being used + return + + instance = None + if is_enabled: + instance = BaseHTTPSHandler.from_django_settings(settings_module, async=async_flag) + instance.setFormatter(LogstashFormatter(settings_module=settings_module)) + awx_logger_instance = instance + if is_enabled and 'awx' not in settings_module.LOG_AGGREGATOR_LOGGERS: + awx_logger_instance = None + + add_or_remove_logger('awx.analytics', instance) + add_or_remove_logger('awx', awx_logger_instance) diff --git a/awx/main/utils/reload.py b/awx/main/utils/reload.py new file mode 100644 index 0000000000..729a33a703 --- /dev/null +++ b/awx/main/utils/reload.py @@ -0,0 +1,68 @@ +# Copyright (c) 2017 Ansible Tower by Red Hat +# All Rights Reserved. + +# Python +import subprocess +import logging + +# Django +from django.conf import settings + +# Celery +from celery import current_app + +logger = logging.getLogger('awx.main.utils.reload') + + +def _uwsgi_reload(): + # http://uwsgi-docs.readthedocs.io/en/latest/MasterFIFO.html#available-commands + logger.warn('Initiating uWSGI chain reload of server') + TRIGGER_CHAIN_RELOAD = 'c' + with open(settings.UWSGI_FIFO_LOCATION, 'w') as awxfifo: + awxfifo.write(TRIGGER_CHAIN_RELOAD) + + +def _reset_celery_thread_pool(): + # Send signal to restart thread pool + app = current_app._get_current_object() + app.control.broadcast('pool_restart', arguments={'reload': True}, + destination=['celery@{}'.format(settings.CLUSTER_HOST_ID)], reply=False) + + +def _supervisor_service_restart(service_internal_names): + ''' + Service internal name options: + - beat - celery - callback - channels - uwsgi - daphne + - fact - nginx + example use pattern of supervisorctl: + # supervisorctl restart tower-processes:receiver tower-processes:factcacher + ''' + group_name = 'tower-processes' + args = ['supervisorctl'] + if settings.DEBUG: + args.extend(['-c', '/supervisor.conf']) + programs = [] + name_translation_dict = settings.SERVICE_NAME_DICT + for n in service_internal_names: + if n in name_translation_dict: + programs.append('{}:{}'.format(group_name, name_translation_dict[n])) + args.extend(['restart']) + args.extend(programs) + logger.debug('Issuing command to restart services, args={}'.format(args)) + subprocess.Popen(args) + + +def restart_local_services(service_internal_names): + logger.warn('Restarting services {} on this node in response to user action'.format(service_internal_names)) + if 'uwsgi' in service_internal_names: + _uwsgi_reload() + service_internal_names.remove('uwsgi') + restart_celery = False + if 'celery' in service_internal_names: + restart_celery = True + service_internal_names.remove('celery') + _supervisor_service_restart(service_internal_names) + if restart_celery: + # Celery restarted last because this probably includes current process + _reset_celery_thread_pool() + diff --git a/awx/main/validators.py b/awx/main/validators.py new file mode 100644 index 0000000000..c045e936cb --- /dev/null +++ b/awx/main/validators.py @@ -0,0 +1,193 @@ +# Copyright (c) 2015 Ansible, Inc. +# All Rights Reserved. + +# Python +import base64 +import re +import yaml +import json + +# Django +from django.utils.translation import ugettext_lazy as _ +from django.core.exceptions import ValidationError + +# REST framework +from rest_framework.serializers import ValidationError as RestValidationError + + +def validate_pem(data, min_keys=0, max_keys=None, min_certs=0, max_certs=None): + """ + Validate the given PEM data is valid and contains the required numbers of + keys and certificates. + + Return a list of PEM objects, where each object is a dict with the following + keys: + - 'all': The entire string for the PEM object including BEGIN/END lines. + - 'type': The type of PEM object ('PRIVATE KEY' or 'CERTIFICATE'). + - 'data': The string inside the BEGIN/END lines. + - 'b64': Key/certificate as a base64-encoded string. + - 'bin': Key/certificate as bytes. + - 'key_type': Only when type == 'PRIVATE KEY', one of 'rsa', 'dsa', + 'ecdsa', 'ed25519' or 'rsa1'. + - 'key_enc': Only when type == 'PRIVATE KEY', boolean indicating if key is + encrypted. + """ + + # Map the X in BEGIN X PRIVATE KEY to the key type (ssh-keygen -t). + # Tower jobs using OPENSSH format private keys may still fail if the + # system SSH implementation lacks support for this format. + private_key_types = { + 'RSA': 'rsa', + 'DSA': 'dsa', + 'EC': 'ecdsa', + 'OPENSSH': 'ed25519', + '': 'rsa1', + } + + # Build regular expressions for matching each object in the PEM file. + pem_obj_re = re.compile( + r'^(-{4,}) *BEGIN ([A-Z ]+?) *\1[\r\n]+' + + r'(.+?)[\r\n]+\1 *END \2 *\1[\r\n]?(.*?)$', re.DOTALL, + ) + pem_obj_header_re = re.compile(r'^(.+?):\s*?(.+?)(\\??)$') + + pem_objects = [] + key_count, cert_count = 0, 0 + data = data.lstrip() + while data: + match = pem_obj_re.match(data) + if not match: + raise ValidationError(_('Invalid certificate or key: %r...') % data[:100]) + data = match.group(4).lstrip() + + # Check PEM object type, check key type if private key. + pem_obj_info = {} + pem_obj_info['all'] = match.group(0) + pem_obj_info['type'] = pem_obj_type = match.group(2) + if pem_obj_type.endswith('PRIVATE KEY'): + key_count += 1 + pem_obj_info['type'] = 'PRIVATE KEY' + key_type = pem_obj_type.replace('PRIVATE KEY', '').strip() + try: + pem_obj_info['key_type'] = private_key_types[key_type] + except KeyError: + raise ValidationError(_('Invalid private key: unsupported type "%s"') % key_type) + elif pem_obj_type == 'CERTIFICATE': + cert_count += 1 + else: + raise ValidationError(_('Unsupported PEM object type: "%s"') % pem_obj_type) + + # Ensure that this PEM object is valid base64 data. + pem_obj_info['data'] = match.group(3) + base64_data = '' + line_continues = False + for line in pem_obj_info['data'].splitlines(): + line = line.strip() + if not line: + continue + if line_continues: + line_continues = line.endswith('\\') + continue + line_match = pem_obj_header_re.match(line) + if line_match: + line_continues = line.endswith('\\') + continue + base64_data += line + try: + decoded_data = base64.b64decode(base64_data) + if not decoded_data: + raise TypeError + pem_obj_info['b64'] = base64_data + pem_obj_info['bin'] = decoded_data + except TypeError: + raise ValidationError(_('Invalid base64-encoded data')) + + # If private key, check whether it is encrypted. + if pem_obj_info.get('key_type', '') == 'ed25519': + # See https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L3218 + # Decoded key data starts with magic string (null-terminated), four byte + # length field, followed by the ciphername -- if ciphername is anything + # other than 'none' the key is encrypted. + pem_obj_info['key_enc'] = not bool(pem_obj_info['bin'].startswith('openssh-key-v1\x00\x00\x00\x00\x04none')) + elif pem_obj_info.get('key_type', ''): + pem_obj_info['key_enc'] = bool('ENCRYPTED' in pem_obj_info['data']) + + pem_objects.append(pem_obj_info) + + # Validate that the number of keys and certs provided are within the limits. + key_count_dict = dict(min_keys=min_keys, max_keys=max_keys, key_count=key_count) + if key_count < min_keys: + if min_keys == 1: + if max_keys == min_keys: + raise ValidationError(_('Exactly one private key is required.')) + else: + raise ValidationError(_('At least one private key is required.')) + else: + raise ValidationError(_('At least %(min_keys)d private keys are required, only %(key_count)d provided.') % key_count_dict) + elif max_keys is not None and key_count > max_keys: + if max_keys == 1: + raise ValidationError(_('Only one private key is allowed, %(key_count)d provided.') % key_count_dict) + else: + raise ValidationError(_('No more than %(max_keys)d private keys are allowed, %(key_count)d provided.') % key_count_dict) + cert_count_dict = dict(min_certs=min_certs, max_certs=max_certs, cert_count=cert_count) + if cert_count < min_certs: + if min_certs == 1: + if max_certs == min_certs: + raise ValidationError(_('Exactly one certificate is required.')) + else: + raise ValidationError(_('At least one certificate is required.')) + else: + raise ValidationError(_('At least %(min_certs)d certificates are required, only %(cert_count)d provided.') % cert_count_dict) + elif max_certs is not None and cert_count > max_certs: + if max_certs == 1: + raise ValidationError(_('Only one certificate is allowed, %(cert_count)d provided.') % cert_count_dict) + else: + raise ValidationError(_('No more than %(max_certs)d certificates are allowed, %(cert_count)d provided.') % cert_count_dict) + + return pem_objects + + +def validate_private_key(data): + """ + Validate that data contains exactly one private key. + """ + return validate_pem(data, min_keys=1, max_keys=1, max_certs=0) + + +def validate_certificate(data): + """ + Validate that data contains one or more certificates. Adds BEGIN/END lines + if necessary. + """ + if 'BEGIN CERTIFICATE' not in data: + data = '-----BEGIN CERTIFICATE-----\n{}\n-----END CERTIFICATE-----\n'.format(data) + return validate_pem(data, max_keys=0, min_certs=1) + + +def validate_ssh_private_key(data): + """ + Validate that data contains at least one private key and optionally + certificates; should handle any valid options for ssh_private_key on a + credential. + """ + return validate_pem(data, min_keys=1) + + +def vars_validate_or_raise(vars_str): + """ + Validate that fields like extra_vars or variables on resources like + job templates, inventories, or hosts are either an acceptable + blank string, or are valid JSON or YAML dict + """ + try: + json.loads((vars_str or '').strip() or '{}') + return vars_str + except ValueError: + pass + try: + r = yaml.safe_load(vars_str) + if not (isinstance(r, basestring) and r.startswith('OrderedDict(')): + return vars_str + except yaml.YAMLError: + pass + raise RestValidationError(_('Must be valid JSON or YAML.')) diff --git a/awx/main/views.py b/awx/main/views.py index a1036a96e6..f476f81cfd 100644 --- a/awx/main/views.py +++ b/awx/main/views.py @@ -4,6 +4,7 @@ # Django from django.shortcuts import render from django.utils.html import format_html +from django.utils.translation import ugettext_lazy as _ # Django REST Framework from rest_framework import exceptions, permissions, views @@ -16,7 +17,7 @@ class ApiErrorView(views.APIView): metadata_class = None allowed_methods = ('GET', 'HEAD') exception_class = exceptions.APIException - view_name = 'API Error' + view_name = _('API Error') def get_view_name(self): return self.view_name @@ -45,31 +46,31 @@ def handle_error(request, status=404, **kwargs): def handle_400(request): kwargs = { - 'name': 'Bad Request', - 'content': 'The request could not be understood by the server.', + 'name': _('Bad Request'), + 'content': _('The request could not be understood by the server.'), } return handle_error(request, 400, **kwargs) def handle_403(request): kwargs = { - 'name': 'Forbidden', - 'content': 'You don\'t have permission to access the requested resource.', + 'name': _('Forbidden'), + 'content': _('You don\'t have permission to access the requested resource.'), } return handle_error(request, 403, **kwargs) def handle_404(request): kwargs = { - 'name': 'Not Found', - 'content': 'The requested resource could not be found.', + 'name': _('Not Found'), + 'content': _('The requested resource could not be found.'), } return handle_error(request, 404, **kwargs) def handle_500(request): kwargs = { - 'name': 'Server Error', - 'content': 'A server error has occurred.', + 'name': _('Server Error'), + 'content': _('A server error has occurred.'), } return handle_error(request, 500, **kwargs) diff --git a/awx/playbooks/project_update.yml b/awx/playbooks/project_update.yml index 1b2f4520f3..0f4d354ff7 100644 --- a/awx/playbooks/project_update.yml +++ b/awx/playbooks/project_update.yml @@ -17,28 +17,93 @@ tasks: - name: delete project directory before update - file: path={{project_path|quote}} state=absent + file: + path: "{{project_path|quote}}" + state: absent when: scm_delete_on_update|default('') - name: update project using git and accept hostkey - git: dest={{project_path|quote}} repo={{scm_url|quote}} version={{scm_branch|quote}} force={{scm_clean}} accept_hostkey={{scm_accept_hostkey}} + git: + dest: "{{project_path|quote}}" + repo: "{{scm_url|quote}}" + version: "{{scm_branch|quote}}" + force: "{{scm_clean}}" + accept_hostkey: "{{scm_accept_hostkey}}" + #clone: "{{ scm_full_checkout }}" + #update: "{{ scm_full_checkout }}" when: scm_type == 'git' and scm_accept_hostkey is defined + register: scm_result + + - name: Set the git repository version + set_fact: + scm_version: "{{ scm_result['after'] }}" + when: "'after' in scm_result" - name: update project using git - git: dest={{project_path|quote}} repo={{scm_url|quote}} version={{scm_branch|quote}} force={{scm_clean}} + git: + dest: "{{project_path|quote}}" + repo: "{{scm_url|quote}}" + version: "{{scm_branch|quote}}" + force: "{{scm_clean}}" + #clone: "{{ scm_full_checkout }}" + #update: "{{ scm_full_checkout }}" when: scm_type == 'git' and scm_accept_hostkey is not defined + register: scm_result + + - name: Set the git repository version + set_fact: + scm_version: "{{ scm_result['after'] }}" + when: "'after' in scm_result" - name: update project using hg - hg: dest={{project_path|quote}} repo={{scm_url|quote}} revision={{scm_branch|quote}} force={{scm_clean}} + hg: + dest: "{{project_path|quote}}" + repo: "{{scm_url|quote}}" + revision: "{{scm_branch|quote}}" + force: "{{scm_clean}}" + #clone: "{{ scm_full_checkout }}" + #update: "{{ scm_full_checkout }}" when: scm_type == 'hg' + register: scm_result + + - name: Set the hg repository version + set_fact: + scm_version: "{{ scm_result['after'] }}" + when: "'after' in scm_result" - name: update project using svn - subversion: dest={{project_path|quote}} repo={{scm_url|quote}} revision={{scm_branch|quote}} force={{scm_clean}} + subversion: + dest: "{{project_path|quote}}" + repo: "{{scm_url|quote}}" + revision: "{{scm_branch|quote}}" + force: "{{scm_clean}}" + #checkout: "{{ scm_full_checkout }}" + #update: "{{ scm_full_checkout }}" when: scm_type == 'svn' and not scm_username|default('') + register: scm_result + + - name: Set the svn repository version + set_fact: + scm_version: "{{ scm_result['after'] }}" + when: "'after' in scm_result" - name: update project using svn with auth - subversion: dest={{project_path|quote}} repo={{scm_url|quote}} revision={{scm_branch|quote}} force={{scm_clean}} username={{scm_username|quote}} password={{scm_password|quote}} + subversion: + dest: "{{project_path|quote}}" + repo: "{{scm_url|quote}}" + revision: "{{scm_branch|quote}}" + force: "{{scm_clean}}" + username: "{{scm_username|quote}}" + password: "{{scm_password|quote}}" + #checkout: "{{ scm_full_checkout }}" + #update: "{{ scm_full_checkout }}" when: scm_type == 'svn' and scm_username|default('') + register: scm_result + + - name: Set the svn repository version + set_fact: + scm_version: "{{ scm_result['after'] }}" + when: "'after' in scm_result" - name: detect requirements.yml stat: path={{project_path|quote}}/roles/requirements.yml @@ -48,4 +113,20 @@ command: ansible-galaxy install -r requirements.yml -p {{project_path|quote}}/roles/ --force args: chdir: "{{project_path|quote}}/roles" - when: doesRequirementsExist.stat.exists + when: doesRequirementsExist.stat.exists and scm_full_checkout|bool + + # format provided by ansible is ["Revision: 12345", "URL: ..."] + - name: parse subversion version string properly + set_fact: + scm_version: "{{scm_version|regex_replace('^.*Revision: ([0-9]+).*$', '\\1')}}" + when: scm_type == 'svn' + + - name: Repository Version + debug: msg="Repository Version {{ scm_version }}" + when: scm_version is defined + + - name: Write Repository Version + copy: + dest: "{{ scm_revision_output }}" + content: "{{ scm_version }}" + when: scm_version is defined and scm_revision_output is defined diff --git a/awx/playbooks/scan_facts.yml b/awx/playbooks/scan_facts.yml index 1b90380c62..d24d07d6fa 100644 --- a/awx/playbooks/scan_facts.yml +++ b/awx/playbooks/scan_facts.yml @@ -3,11 +3,31 @@ scan_use_checksum: false scan_use_recursive: false tasks: - - scan_packages: + + - name: "Scan packages (Unix/Linux)" + scan_packages: os_family: '{{ ansible_os_family }}' - - scan_services: - - scan_files: + when: ansible_os_family != "Windows" + - name: "Scan services (Unix/Linux)" + scan_services: + when: ansible_os_family != "Windows" + - name: "Scan files (Unix/Linux)" + scan_files: paths: '{{ scan_file_paths }}' get_checksum: '{{ scan_use_checksum }}' recursive: '{{ scan_use_recursive }}' - when: scan_file_paths is defined \ No newline at end of file + when: scan_file_paths is defined and ansible_os_family != "Windows" + + - name: "Scan packages (Windows)" + win_scan_packages: + when: ansible_os_family == "Windows" + - name: "Scan services (Windows)" + win_scan_services: + when: ansible_os_family == "Windows" + - name: "Scan files (Windows)" + win_scan_files: + paths: '{{ scan_file_paths }}' + get_checksum: '{{ scan_use_checksum }}' + recursive: '{{ scan_use_recursive }}' + when: scan_file_paths is defined and ansible_os_family == "Windows" + diff --git a/awx/plugins/callback/job_event_callback.py b/awx/plugins/callback/job_event_callback.py deleted file mode 100644 index 2049edc4b8..0000000000 --- a/awx/plugins/callback/job_event_callback.py +++ /dev/null @@ -1,560 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# This file is a utility Ansible plugin that is not part of the AWX or Ansible -# packages. It does not import any code from either package, nor does its -# license apply to Ansible or AWX. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# Neither the name of the nor the names of its contributors -# may be used to endorse or promote products derived from this software -# without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -# Python -import datetime -import glob -import json -import logging -import os -import pwd -import urlparse -import re -from copy import copy - -# Requests -import requests - -# ZeroMQ -import zmq - -import psutil - -# Only use statsd if there's a statsd host in the environment -# otherwise just do a noop. -# NOTE: I've disabled this for the time being until we sort through the venv dependency around this -# if os.environ.get('GRAPHITE_PORT_8125_UDP_ADDR'): -# from statsd import StatsClient -# statsd = StatsClient(host=os.environ['GRAPHITE_PORT_8125_UDP_ADDR'], -# port=8125, -# prefix='tower.job.event_callback', -# maxudpsize=512) -# else: -# from statsd import StatsClient -# class NoStatsClient(StatsClient): -# def __init__(self, *args, **kwargs): -# pass -# def _prepare(self, stat, value, rate): -# pass -# def _send_stat(self, stat, value, rate): -# pass -# def _send(self, *args, **kwargs): -# pass -# statsd = NoStatsClient() - -CENSOR_FIELD_WHITELIST = [ - 'msg', - 'failed', - 'changed', - 'results', - 'start', - 'end', - 'delta', - 'cmd', - '_ansible_no_log', - 'rc', - 'failed_when_result', - 'skipped', - 'skip_reason', -] - -def censor(obj, no_log=False): - if not isinstance(obj, dict): - if no_log: - return "the output has been hidden due to the fact that 'no_log: true' was specified for this result" - return obj - if obj.get('_ansible_no_log', no_log): - new_obj = {} - for k in CENSOR_FIELD_WHITELIST: - if k in obj: - new_obj[k] = obj[k] - if k == 'cmd' and k in obj: - if isinstance(obj['cmd'], list): - obj['cmd'] = ' '.join(obj['cmd']) - if re.search(r'\s', obj['cmd']): - new_obj['cmd'] = re.sub(r'^(([^\s\\]|\\\s)+).*$', - r'\1 ', - obj['cmd']) - new_obj['censored'] = "the output has been hidden due to the fact that 'no_log: true' was specified for this result" - obj = new_obj - if 'results' in obj: - if isinstance(obj['results'], list): - for i in xrange(len(obj['results'])): - obj['results'][i] = censor(obj['results'][i], obj.get('_ansible_no_log', no_log)) - elif obj.get('_ansible_no_log', False): - obj['results'] = "the output has been hidden due to the fact that 'no_log: true' was specified for this result" - return obj - - -class TokenAuth(requests.auth.AuthBase): - - def __init__(self, token): - self.token = token - - def __call__(self, request): - request.headers['Authorization'] = 'Token %s' % self.token - return request - - -class BaseCallbackModule(object): - ''' - Callback module for logging ansible-playbook job events via the REST API. - ''' - - def __init__(self): - self.base_url = os.getenv('REST_API_URL', '') - self.auth_token = os.getenv('REST_API_TOKEN', '') - self.callback_consumer_port = os.getenv('CALLBACK_CONSUMER_PORT', '') - self.context = None - self.socket = None - self._init_logging() - self._init_connection() - self.counter = 0 - - def _init_logging(self): - try: - self.job_callback_debug = int(os.getenv('JOB_CALLBACK_DEBUG', '0')) - except ValueError: - self.job_callback_debug = 0 - self.logger = logging.getLogger('awx.plugins.callback.job_event_callback') - if self.job_callback_debug >= 2: - self.logger.setLevel(logging.DEBUG) - elif self.job_callback_debug >= 1: - self.logger.setLevel(logging.INFO) - else: - self.logger.setLevel(logging.WARNING) - handler = logging.StreamHandler() - formatter = logging.Formatter('%(levelname)-8s %(process)-8d %(message)s') - handler.setFormatter(formatter) - self.logger.addHandler(handler) - self.logger.propagate = False - - def _init_connection(self): - self.context = None - self.socket = None - - def _start_connection(self): - self.context = zmq.Context() - self.socket = self.context.socket(zmq.REQ) - self.socket.setsockopt(zmq.RCVTIMEO, 4000) - self.socket.setsockopt(zmq.LINGER, 2000) - self.socket.connect(self.callback_consumer_port) - - def _post_job_event_queue_msg(self, event, event_data): - self.counter += 1 - msg = { - 'event': event, - 'event_data': event_data, - 'counter': self.counter, - 'created': datetime.datetime.utcnow().isoformat(), - } - if getattr(self, 'job_id', None): - msg['job_id'] = self.job_id - if getattr(self, 'ad_hoc_command_id', None): - msg['ad_hoc_command_id'] = self.ad_hoc_command_id - - active_pid = os.getpid() - if self.job_callback_debug: - msg.update({ - 'pid': active_pid, - }) - for retry_count in xrange(4): - try: - if not hasattr(self, 'connection_pid'): - self.connection_pid = active_pid - if self.connection_pid != active_pid: - self._init_connection() - if self.context is None: - self._start_connection() - - self.socket.send_json(msg) - self.socket.recv() - return - except Exception, e: - self.logger.info('Publish Job Event Exception: %r, retry=%d', e, - retry_count, exc_info=True) - retry_count += 1 - if retry_count >= 3: - break - - def _post_rest_api_event(self, event, event_data): - data = json.dumps({ - 'event': event, - 'event_data': event_data, - }) - parts = urlparse.urlsplit(self.base_url) - if parts.username and parts.password: - auth = (parts.username, parts.password) - elif self.auth_token: - auth = TokenAuth(self.auth_token) - else: - auth = None - port = parts.port or (443 if parts.scheme == 'https' else 80) - url = urlparse.urlunsplit([parts.scheme, - '%s:%d' % (parts.hostname, port), - parts.path, parts.query, parts.fragment]) - url = urlparse.urljoin(url, self.rest_api_path) - headers = {'content-type': 'application/json'} - response = requests.post(url, data=data, headers=headers, auth=auth) - response.raise_for_status() - - def _log_event(self, event, **event_data): - if 'res' in event_data: - event_data['res'] = censor(copy(event_data['res'])) - - if self.callback_consumer_port: - self._post_job_event_queue_msg(event, event_data) - else: - self._post_rest_api_event(event, event_data) - - def on_any(self, *args, **kwargs): - pass - - def runner_on_failed(self, host, res, ignore_errors=False): - self._log_event('runner_on_failed', host=host, res=res, - ignore_errors=ignore_errors) - - def v2_runner_on_failed(self, result, ignore_errors=False): - event_is_loop = result._task.loop if hasattr(result._task, 'loop') else None - self._log_event('runner_on_failed', host=result._host.name, - res=result._result, task=result._task, - ignore_errors=ignore_errors, event_loop=event_is_loop) - - def runner_on_ok(self, host, res): - self._log_event('runner_on_ok', host=host, res=res) - - def v2_runner_on_ok(self, result): - event_is_loop = result._task.loop if hasattr(result._task, 'loop') else None - self._log_event('runner_on_ok', host=result._host.name, - task=result._task, res=result._result, - event_loop=event_is_loop) - - def runner_on_error(self, host, msg): - self._log_event('runner_on_error', host=host, msg=msg) - - def v2_runner_on_error(self, result): - pass # Currently not implemented in v2 - - def runner_on_skipped(self, host, item=None): - self._log_event('runner_on_skipped', host=host, item=item) - - def v2_runner_on_skipped(self, result): - event_is_loop = result._task.loop if hasattr(result._task, 'loop') else None - self._log_event('runner_on_skipped', host=result._host.name, - task=result._task, event_loop=event_is_loop) - - def runner_on_unreachable(self, host, res): - self._log_event('runner_on_unreachable', host=host, res=res) - - def v2_runner_on_unreachable(self, result): - self._log_event('runner_on_unreachable', host=result._host.name, - task=result._task, res=result._result) - - def runner_on_no_hosts(self): - self._log_event('runner_on_no_hosts') - - def v2_runner_on_no_hosts(self, task): - self._log_event('runner_on_no_hosts', task=task) - - # V2 does not use the _on_async callbacks (yet). - - def runner_on_async_poll(self, host, res, jid, clock): - self._log_event('runner_on_async_poll', host=host, res=res, jid=jid, - clock=clock) - - def runner_on_async_ok(self, host, res, jid): - self._log_event('runner_on_async_ok', host=host, res=res, jid=jid) - - def runner_on_async_failed(self, host, res, jid): - self._log_event('runner_on_async_failed', host=host, res=res, jid=jid) - - def runner_on_file_diff(self, host, diff): - self._log_event('runner_on_file_diff', host=host, diff=diff) - - def v2_runner_on_file_diff(self, result, diff): - self._log_event('runner_on_file_diff', host=result._host.name, - task=result._task, diff=diff) - - def v2_runner_item_on_ok(self, result): - self._log_event('runner_item_on_ok', res=result._result, host=result._host.name, - task=result._task) - - def v2_runner_item_on_failed(self, result): - self._log_event('runner_item_on_failed', res=result._result, host=result._host.name, - task=result._task) - - def v2_runner_item_on_skipped(self, result): - self._log_event('runner_item_on_skipped', res=result._result, host=result._host.name, - task=result._task) - - @staticmethod - def terminate_ssh_control_masters(): - # Determine if control persist is being used and if any open sockets - # exist after running the playbook. - cp_path = os.environ.get('ANSIBLE_SSH_CONTROL_PATH', '') - if not cp_path: - return - cp_dir = os.path.dirname(cp_path) - if not os.path.exists(cp_dir): - return - cp_pattern = os.path.join(cp_dir, 'ansible-ssh-*') - cp_files = glob.glob(cp_pattern) - if not cp_files: - return - - # Attempt to find any running control master processes. - username = pwd.getpwuid(os.getuid())[0] - ssh_cm_procs = [] - for proc in psutil.process_iter(): - try: - pname = proc.name() - pcmdline = proc.cmdline() - pusername = proc.username() - except psutil.NoSuchProcess: - continue - if pusername != username: - continue - if pname != 'ssh': - continue - for cp_file in cp_files: - if pcmdline and cp_file in pcmdline[0]: - ssh_cm_procs.append(proc) - break - - # Terminate then kill control master processes. Workaround older - # version of psutil that may not have wait_procs implemented. - for proc in ssh_cm_procs: - proc.terminate() - procs_gone, procs_alive = psutil.wait_procs(ssh_cm_procs, timeout=5) - for proc in procs_alive: - proc.kill() - - -class JobCallbackModule(BaseCallbackModule): - ''' - Callback module for logging ansible-playbook job events via the REST API. - ''' - - # These events should never have an associated play. - EVENTS_WITHOUT_PLAY = [ - 'playbook_on_start', - 'playbook_on_stats', - ] - # These events should never have an associated task. - EVENTS_WITHOUT_TASK = EVENTS_WITHOUT_PLAY + [ - 'playbook_on_setup', - 'playbook_on_notify', - 'playbook_on_import_for_host', - 'playbook_on_not_import_for_host', - 'playbook_on_no_hosts_matched', - 'playbook_on_no_hosts_remaining', - ] - - def __init__(self): - self.job_id = int(os.getenv('JOB_ID', '0')) - self.rest_api_path = '/api/v1/jobs/%d/job_events/' % self.job_id - super(JobCallbackModule, self).__init__() - - def _log_event(self, event, **event_data): - play = getattr(self, 'play', None) - play_name = getattr(play, 'name', '') - if play_name and event not in self.EVENTS_WITHOUT_PLAY: - event_data['play'] = play_name - task = event_data.pop('task', None) or getattr(self, 'task', None) - task_name = None - role_name = None - if task: - if hasattr(task, 'get_name'): - # in v2, the get_name() method creates the name - task_name = task.get_name() - else: - # v1 datastructure - task_name = getattr(task, 'name', '') - if hasattr(task, '_role') and task._role: - # v2 datastructure - role_name = task._role._role_name - else: - # v1 datastructure - role_name = getattr(task, 'role_name', '') - if task_name and event not in self.EVENTS_WITHOUT_TASK: - event_data['task'] = task_name - if role_name and event not in self.EVENTS_WITHOUT_TASK: - event_data['role'] = role_name - super(JobCallbackModule, self)._log_event(event, **event_data) - - def playbook_on_start(self): - self._log_event('playbook_on_start') - - def v2_playbook_on_start(self, playbook): - # NOTE: the playbook parameter was added late in Ansible 2.0 development - # so we don't currently utilize but could later. - self.playbook_on_start() - - def playbook_on_notify(self, host, handler): - self._log_event('playbook_on_notify', host=host, handler=handler) - - def v2_playbook_on_notify(self, result, handler): - self._log_event('playbook_on_notify', host=result._host.name, - task=result._task, handler=handler) - - def playbook_on_no_hosts_matched(self): - self._log_event('playbook_on_no_hosts_matched') - - def v2_playbook_on_no_hosts_matched(self): - # since there is no task/play info, this is currently identical - # to the v1 callback which does the same thing - self.playbook_on_no_hosts_matched() - - def playbook_on_no_hosts_remaining(self): - self._log_event('playbook_on_no_hosts_remaining') - - def v2_playbook_on_no_hosts_remaining(self): - # since there is no task/play info, this is currently identical - # to the v1 callback which does the same thing - self.playbook_on_no_hosts_remaining() - - def playbook_on_task_start(self, name, is_conditional): - self._log_event('playbook_on_task_start', name=name, - is_conditional=is_conditional) - - def v2_playbook_on_task_start(self, task, is_conditional): - self._log_event('playbook_on_task_start', task=task, - name=task.get_name(), is_conditional=is_conditional) - - def v2_playbook_on_cleanup_task_start(self, task): - # re-using playbook_on_task_start event here for this v2-specific - # event, though we may consider any changes necessary to distinguish - # this from a normal task - self._log_event('playbook_on_task_start', task=task, - name=task.get_name()) - - def playbook_on_vars_prompt(self, varname, private=True, prompt=None, - encrypt=None, confirm=False, salt_size=None, - salt=None, default=None): - self._log_event('playbook_on_vars_prompt', varname=varname, - private=private, prompt=prompt, encrypt=encrypt, - confirm=confirm, salt_size=salt_size, salt=salt, - default=default) - - def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, - encrypt=None, confirm=False, salt_size=None, - salt=None, default=None): - pass # not currently used in v2 (yet) - - def playbook_on_setup(self): - self._log_event('playbook_on_setup') - - def v2_playbook_on_setup(self): - pass # not currently used in v2 (yet) - - def playbook_on_import_for_host(self, host, imported_file): - # don't care about recording this one - # self._log_event('playbook_on_import_for_host', host=host, - # imported_file=imported_file) - pass - - def v2_playbook_on_import_for_host(self, result, imported_file): - pass # not currently used in v2 (yet) - - def playbook_on_not_import_for_host(self, host, missing_file): - # don't care about recording this one - #self._log_event('playbook_on_not_import_for_host', host=host, - # missing_file=missing_file) - pass - - def v2_playbook_on_not_import_for_host(self, result, missing_file): - pass # not currently used in v2 (yet) - - def playbook_on_play_start(self, name): - # Only play name is passed via callback, get host pattern from the play. - pattern = getattr(getattr(self, 'play', None), 'hosts', name) - self._log_event('playbook_on_play_start', name=name, pattern=pattern) - - def v2_playbook_on_play_start(self, play): - setattr(self, 'play', play) - # Ansible 2.0.0.2 doesn't default .name to hosts like it did in 1.9.4, - # though that default will likely return in a future version of Ansible. - if (not hasattr(play, 'name') or not play.name) and hasattr(play, 'hosts'): - if isinstance(play.hosts, list): - play.name = ','.join(play.hosts) - else: - play.name = play.hosts - self._log_event('playbook_on_play_start', name=play.name, - pattern=play.hosts) - - def playbook_on_stats(self, stats): - d = {} - for attr in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'): - d[attr] = getattr(stats, attr) - self._log_event('playbook_on_stats', **d) - self.terminate_ssh_control_masters() - - def v2_playbook_on_stats(self, stats): - self.playbook_on_stats(stats) - - def v2_playbook_on_include(self, included_file): - self._log_event('playbook_on_include', included_file=included_file) - -class AdHocCommandCallbackModule(BaseCallbackModule): - ''' - Callback module for logging ansible ad hoc events via ZMQ or the REST API. - ''' - - def __init__(self): - self.ad_hoc_command_id = int(os.getenv('AD_HOC_COMMAND_ID', '0')) - self.rest_api_path = '/api/v1/ad_hoc_commands/%d/events/' % self.ad_hoc_command_id - self.skipped_hosts = set() - super(AdHocCommandCallbackModule, self).__init__() - - def _log_event(self, event, **event_data): - # Ignore task for ad hoc commands (with v2). - event_data.pop('task', None) - super(AdHocCommandCallbackModule, self)._log_event(event, **event_data) - - def runner_on_file_diff(self, host, diff): - pass # Ignore file diff for ad hoc commands. - - def runner_on_ok(self, host, res): - # When running in check mode using a module that does not support check - # mode, Ansible v1.9 will call runner_on_skipped followed by - # runner_on_ok for the same host; only capture the skipped event and - # ignore the ok event. - if host not in self.skipped_hosts: - super(AdHocCommandCallbackModule, self).runner_on_ok(host, res) - - def runner_on_skipped(self, host, item=None): - super(AdHocCommandCallbackModule, self).runner_on_skipped(host, item) - self.skipped_hosts.add(host) - -if os.getenv('JOB_ID', ''): - CallbackModule = JobCallbackModule -elif os.getenv('AD_HOC_COMMAND_ID', ''): - CallbackModule = AdHocCommandCallbackModule diff --git a/awx/plugins/callback/minimal.py b/awx/plugins/callback/minimal.py new file mode 100644 index 0000000000..fcbaa76d55 --- /dev/null +++ b/awx/plugins/callback/minimal.py @@ -0,0 +1,30 @@ +# Copyright (c) 2016 Ansible by Red Hat, Inc. +# +# This file is part of Ansible Tower, but depends on code imported from Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) + +# Python +import os +import sys + +# Add awx/lib to sys.path. +awx_lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'lib')) +if awx_lib_path not in sys.path: + sys.path.insert(0, awx_lib_path) + +# Tower Display Callback +from tower_display_callback import TowerMinimalCallbackModule as CallbackModule # noqa diff --git a/awx/plugins/callback/tower_display.py b/awx/plugins/callback/tower_display.py new file mode 100644 index 0000000000..725232dfe4 --- /dev/null +++ b/awx/plugins/callback/tower_display.py @@ -0,0 +1,30 @@ +# Copyright (c) 2016 Ansible by Red Hat, Inc. +# +# This file is part of Ansible Tower, but depends on code imported from Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) + +# Python +import os +import sys + +# Add awx/lib to sys.path. +awx_lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'lib')) +if awx_lib_path not in sys.path: + sys.path.insert(0, awx_lib_path) + +# Tower Display Callback +from tower_display_callback import TowerDefaultCallbackModule as CallbackModule # noqa diff --git a/awx/plugins/fact_caching/tower.py b/awx/plugins/fact_caching/tower.py index 3e89ccef36..57d413f018 100755 --- a/awx/plugins/fact_caching/tower.py +++ b/awx/plugins/fact_caching/tower.py @@ -29,20 +29,16 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -import sys import os import time -from ansible import constants as C + try: from ansible.cache.base import BaseCacheModule except: from ansible.plugins.cache.base import BaseCacheModule -try: - import zmq -except ImportError: - print("pyzmq is required") - sys.exit(1) +from kombu import Connection, Exchange, Producer + class CacheModule(BaseCacheModule): @@ -51,19 +47,12 @@ class CacheModule(BaseCacheModule): self._cache = {} self._all_keys = {} - # This is the local tower zmq connection - self._tower_connection = C.CACHE_PLUGIN_CONNECTION self.date_key = time.time() - try: - self.context = zmq.Context() - self.socket = self.context.socket(zmq.REQ) - self.socket.setsockopt(zmq.RCVTIMEO, 4000) - self.socket.setsockopt(zmq.LINGER, 2000) - self.socket.connect(self._tower_connection) - except Exception, e: - print("Connection to zeromq failed at %s with error: %s" % (str(self._tower_connection), - str(e))) - sys.exit(1) + self.callback_connection = os.environ['CALLBACK_CONNECTION'] + self.callback_queue = os.environ['FACT_QUEUE'] + self.connection = Connection(self.callback_connection) + self.exchange = Exchange(self.callback_queue, type='direct') + self.producer = Producer(self.connection) def filter_ansible_facts(self, facts): return dict((k, facts[k]) for k in facts.keys() if k.startswith('ansible_')) @@ -116,8 +105,12 @@ class CacheModule(BaseCacheModule): } # Emit fact data to tower for processing - self.socket.send_json(packet) - self.socket.recv() + self.producer.publish(packet, + serializer='json', + compression='bzip2', + exchange=self.exchange, + declare=[self.exchange], + routing_key=self.callback_queue) def keys(self): return self._cache.keys() diff --git a/awx/plugins/inventory/azure_rm.py b/awx/plugins/inventory/azure_rm.py index f3c9e7c28d..8545967c37 100755 --- a/awx/plugins/inventory/azure_rm.py +++ b/awx/plugins/inventory/azure_rm.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright (c) 2016 Matt Davis, # Chris Houseknecht, @@ -786,11 +786,11 @@ class AzureInventory(object): def main(): if not HAS_AZURE: - sys.exit("The Azure python sdk is not installed (try 'pip install azure==2.0.0rc5') - {0}".format(HAS_AZURE_EXC)) + sys.exit("The Azure python sdk is not installed (try 'pip install azure>=2.0.0rc5') - {0}".format(HAS_AZURE_EXC)) - if LooseVersion(azure_compute_version) != LooseVersion(AZURE_MIN_VERSION): + if LooseVersion(azure_compute_version) < LooseVersion(AZURE_MIN_VERSION): sys.exit("Expecting azure.mgmt.compute.__version__ to be {0}. Found version {1} " - "Do you have Azure == 2.0.0rc5 installed?".format(AZURE_MIN_VERSION, azure_compute_version)) + "Do you have Azure >= 2.0.0rc5 installed?".format(AZURE_MIN_VERSION, azure_compute_version)) AzureInventory() diff --git a/awx/plugins/inventory/cloudforms.py b/awx/plugins/inventory/cloudforms.py index 65d95853d5..69c149bfc5 100755 --- a/awx/plugins/inventory/cloudforms.py +++ b/awx/plugins/inventory/cloudforms.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # vim: set fileencoding=utf-8 : # # Copyright (C) 2016 Guido Günther @@ -459,4 +459,3 @@ class CloudFormsInventory(object): return json.dumps(data) CloudFormsInventory() - diff --git a/awx/plugins/inventory/ec2.ini.example b/awx/plugins/inventory/ec2.ini.example index 1d7428b2ed..2b9f089135 100644 --- a/awx/plugins/inventory/ec2.ini.example +++ b/awx/plugins/inventory/ec2.ini.example @@ -29,23 +29,41 @@ regions_exclude = us-gov-west-1,cn-north-1 # in the event of a collision. destination_variable = public_dns_name +# This allows you to override the inventory_name with an ec2 variable, instead +# of using the destination_variable above. Addressing (aka ansible_ssh_host) +# will still use destination_variable. Tags should be written as 'tag_TAGNAME'. +#hostname_variable = tag_Name + # For server inside a VPC, using DNS names may not make sense. When an instance # has 'subnet_id' set, this variable is used. If the subnet is public, setting # this to 'ip_address' will return the public IP address. For instances in a # private subnet, this should be set to 'private_ip_address', and Ansible must # be run from within EC2. The key of an EC2 tag may optionally be used; however # the boto instance variables hold precedence in the event of a collision. -# WARNING: - instances that are in the private vpc, _without_ public ip address -# will not be listed in the inventory untill You set: -# vpc_destination_variable = 'private_ip_address' +# WARNING: - instances that are in the private vpc, _without_ public ip address +# will not be listed in the inventory until You set: +# vpc_destination_variable = private_ip_address vpc_destination_variable = ip_address +# The following two settings allow flexible ansible host naming based on a +# python format string and a comma-separated list of ec2 tags. Note that: +# +# 1) If the tags referenced are not present for some instances, empty strings +# will be substituted in the format string. +# 2) This overrides both destination_variable and vpc_destination_variable. +# +#destination_format = {0}.{1}.example.com +#destination_format_tags = Name,environment + # To tag instances on EC2 with the resource records that point to them from # Route53, uncomment and set 'route53' to True. route53 = False # To exclude RDS instances from the inventory, uncomment and set to False. -#rds = False +rds = False + +# To exclude ElastiCache instances from the inventory, uncomment and set to False. +elasticache = False # Additionally, you can specify the list of zones to exclude looking up in # 'route53_excluded_zones' as a comma-separated list. @@ -55,10 +73,30 @@ route53 = False # 'all_instances' to True to return all instances regardless of state. all_instances = False +# By default, only EC2 instances in the 'running' state are returned. Specify +# EC2 instance states to return as a comma-separated list. This +# option is overriden when 'all_instances' is True. +# instance_states = pending, running, shutting-down, terminated, stopping, stopped + # By default, only RDS instances in the 'available' state are returned. Set # 'all_rds_instances' to True return all RDS instances regardless of state. all_rds_instances = False +# Include RDS cluster information (Aurora etc.) +include_rds_clusters = False + +# By default, only ElastiCache clusters and nodes in the 'available' state +# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' +# to True return all ElastiCache clusters and nodes, regardless of state. +# +# Note that all_elasticache_nodes only applies to listed clusters. That means +# if you set all_elastic_clusters to false, no node will be return from +# unavailable clusters, regardless of the state and to what you set for +# all_elasticache_nodes. +all_elasticache_replication_groups = False +all_elasticache_clusters = False +all_elasticache_nodes = False + # API calls to EC2 are slow. For this reason, we cache the results of an API # call. Set this to the path you want cache files to be written to. Two files # will be written to this directory: @@ -69,11 +107,18 @@ cache_path = ~/.ansible/tmp # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. # To disable the cache, set this value to 0 -cache_max_age = 300 +cache_max_age = 0 # Organize groups into a nested/hierarchy instead of a flat namespace. nested_groups = False +# Replace - tags when creating groups to avoid issues with ansible +replace_dash_in_groups = True + +# If set to true, any tag of the form "a,b,c" is expanded into a list +# and the results are used to create additional tag_* inventory groups. +expand_csv_tags = True + # The EC2 inventory output can become very large. To manage its size, # configure which groups should be created. group_by_instance_id = True @@ -89,6 +134,10 @@ group_by_tag_none = True group_by_route53_names = True group_by_rds_engine = True group_by_rds_parameter_group = True +group_by_elasticache_engine = True +group_by_elasticache_cluster = True +group_by_elasticache_parameter_group = True +group_by_elasticache_replication_group = True # If you only want to include hosts that match a certain regular expression # pattern_include = staging-* @@ -113,5 +162,28 @@ group_by_rds_parameter_group = True # You can use wildcards in filter values also. Below will list instances which # tag Name value matches webservers1* -# (ex. webservers15, webservers1a, webservers123 etc) +# (ex. webservers15, webservers1a, webservers123 etc) # instance_filters = tag:Name=webservers1* + +# A boto configuration profile may be used to separate out credentials +# see http://boto.readthedocs.org/en/latest/boto_config_tut.html +# boto_profile = some-boto-profile-name + + +[credentials] + +# The AWS credentials can optionally be specified here. Credentials specified +# here are ignored if the environment variable AWS_ACCESS_KEY_ID or +# AWS_PROFILE is set, or if the boto_profile property above is set. +# +# Supplying AWS credentials here is not recommended, as it introduces +# non-trivial security concerns. When going down this route, please make sure +# to set access permissions for this file correctly, e.g. handle it the same +# way as you would a private SSH key. +# +# Unlike the boto and AWS configure files, this section does not support +# profiles. +# +# aws_access_key_id = AXXXXXXXXXXXXXX +# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX +# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX diff --git a/awx/plugins/inventory/ec2.py b/awx/plugins/inventory/ec2.py index 6068df901f..dcc369e124 100755 --- a/awx/plugins/inventory/ec2.py +++ b/awx/plugins/inventory/ec2.py @@ -37,6 +37,7 @@ When run against a specific host, this script returns the following variables: - ec2_attachTime - ec2_attachment - ec2_attachmentId + - ec2_block_devices - ec2_client_token - ec2_deleteOnTermination - ec2_description @@ -131,6 +132,15 @@ from boto import elasticache from boto import route53 import six +from ansible.module_utils import ec2 as ec2_utils + +HAS_BOTO3 = False +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + pass + from six.moves import configparser from collections import defaultdict @@ -265,6 +275,12 @@ class Ec2Inventory(object): if config.has_option('ec2', 'rds'): self.rds_enabled = config.getboolean('ec2', 'rds') + # Include RDS cluster instances? + if config.has_option('ec2', 'include_rds_clusters'): + self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters') + else: + self.include_rds_clusters = False + # Include ElastiCache instances? self.elasticache_enabled = True if config.has_option('ec2', 'elasticache'): @@ -474,6 +490,8 @@ class Ec2Inventory(object): if self.elasticache_enabled: self.get_elasticache_clusters_by_region(region) self.get_elasticache_replication_groups_by_region(region) + if self.include_rds_clusters: + self.include_rds_clusters_by_region(region) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) @@ -527,6 +545,7 @@ class Ec2Inventory(object): instance_ids = [] for reservation in reservations: instance_ids.extend([instance.id for instance in reservation.instances]) + max_filter_value = 199 tags = [] for i in range(0, len(instance_ids), max_filter_value): @@ -573,6 +592,65 @@ class Ec2Inventory(object): error = "Looks like AWS RDS is down:\n%s" % e.message self.fail_with_error(error, 'getting RDS instances') + def include_rds_clusters_by_region(self, region): + if not HAS_BOTO3: + self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again", + "getting RDS clusters") + + client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) + + marker, clusters = '', [] + while marker is not None: + resp = client.describe_db_clusters(Marker=marker) + clusters.extend(resp["DBClusters"]) + marker = resp.get('Marker', None) + + account_id = boto.connect_iam().get_user().arn.split(':')[4] + c_dict = {} + for c in clusters: + # remove these datetime objects as there is no serialisation to json + # currently in place and we don't need the data yet + if 'EarliestRestorableTime' in c: + del c['EarliestRestorableTime'] + if 'LatestRestorableTime' in c: + del c['LatestRestorableTime'] + + if self.ec2_instance_filters == {}: + matches_filter = True + else: + matches_filter = False + + try: + # arn:aws:rds:::: + tags = client.list_tags_for_resource( + ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier']) + c['Tags'] = tags['TagList'] + + if self.ec2_instance_filters: + for filter_key, filter_values in self.ec2_instance_filters.items(): + # get AWS tag key e.g. tag:env will be 'env' + tag_name = filter_key.split(":", 1)[1] + # Filter values is a list (if you put multiple values for the same tag name) + matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags']) + + if matches_filter: + # it matches a filter, so stop looking for further matches + break + + except Exception as e: + if e.message.find('DBInstanceNotFound') >= 0: + # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster. + # Ignore errors when trying to find tags for these + pass + + # ignore empty clusters caused by AWS bug + if len(c['DBClusterMembers']) == 0: + continue + elif matches_filter: + c_dict[c['DBClusterIdentifier']] = c + + self.inventory['db_clusters'] = c_dict + def get_elasticache_clusters_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache clusters (with nodes' info) in a particular region.''' @@ -1235,7 +1313,7 @@ class Ec2Inventory(object): elif key == 'ec2_tags': for k, v in value.items(): if self.expand_csv_tags and ',' in v: - v = map(lambda x: x.strip(), v.split(',')) + v = list(map(lambda x: x.strip(), v.split(','))) key = self.to_safe('ec2_tag_' + k) instance_vars[key] = v elif key == 'ec2_groups': @@ -1246,6 +1324,10 @@ class Ec2Inventory(object): group_names.append(group.name) instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) + elif key == 'ec2_block_device_mapping': + instance_vars["ec2_block_devices"] = {} + for k, v in value.items(): + instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id else: pass # TODO Product codes if someone finds them useful diff --git a/awx/plugins/inventory/foreman.ini.example b/awx/plugins/inventory/foreman.ini.example index d5cd56e441..42312dac6c 100644 --- a/awx/plugins/inventory/foreman.ini.example +++ b/awx/plugins/inventory/foreman.ini.example @@ -9,6 +9,9 @@ group_patterns = ["{app}-{tier}-{color}", "{app}-{color}", "{app}", "{tier}"] +group_prefix = foreman_ +# Whether to fetch facts from Foreman and store them on the host +want_facts = True [cache] path = . diff --git a/awx/plugins/inventory/foreman.py b/awx/plugins/inventory/foreman.py index ddcb912fd5..8bff5a8ece 100755 --- a/awx/plugins/inventory/foreman.py +++ b/awx/plugins/inventory/foreman.py @@ -18,14 +18,22 @@ # # This is somewhat based on cobbler inventory +from __future__ import print_function + import argparse -import ConfigParser import copy import os import re -from time import time import requests from requests.auth import HTTPBasicAuth +import sys +from time import time + +try: + import ConfigParser +except ImportError: + import configparser as ConfigParser + try: import json @@ -34,19 +42,34 @@ except ImportError: class ForemanInventory(object): + config_paths = [ + "/etc/ansible/foreman.ini", + os.path.dirname(os.path.realpath(__file__)) + '/foreman.ini', + ] + def __init__(self): - """ Main execution path """ self.inventory = dict() # A list of groups and the hosts in that group - self.cache = dict() # Details about hosts in the inventory - self.params = dict() # Params of each host - self.facts = dict() # Facts of each host + self.cache = dict() # Details about hosts in the inventory + self.params = dict() # Params of each host + self.facts = dict() # Facts of each host self.hostgroups = dict() # host groups + self.session = None # Requests session + def run(self): + if not self._read_settings(): + return False + self._get_inventory() + self._print_data() + return True + + def _read_settings(self): # Read settings and parse CLI arguments - self.read_settings() + if not self.read_settings(): + return False self.parse_cli_args() + return True - # Cache + def _get_inventory(self): if self.args.refresh_cache: self.update_cache() elif not self.is_cache_valid(): @@ -57,9 +80,8 @@ class ForemanInventory(object): self.load_facts_from_cache() self.load_cache_from_cache() + def _print_data(self): data_to_print = "" - - # Data to print if self.args.host: data_to_print += self.get_host_info() else: @@ -77,38 +99,36 @@ class ForemanInventory(object): print(data_to_print) def is_cache_valid(self): - """ Determines if the cache files have expired, or if it is still valid """ - + """Determines if the cache is still valid""" if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if (os.path.isfile(self.cache_path_inventory) and os.path.isfile(self.cache_path_params) and - os.path.isfile(self.cache_path_facts)): + os.path.isfile(self.cache_path_facts)): return True return False def read_settings(self): - """ Reads the settings from the foreman.ini file """ + """Reads the settings from the foreman.ini file""" config = ConfigParser.SafeConfigParser() - config_paths = [ - "/etc/ansible/foreman.ini", - os.path.dirname(os.path.realpath(__file__)) + '/foreman.ini', - ] - env_value = os.environ.get('FOREMAN_INI_PATH') if env_value is not None: - config_paths.append(os.path.expanduser(os.path.expandvars(env_value))) + self.config_paths.append(os.path.expanduser(os.path.expandvars(env_value))) - config.read(config_paths) + config.read(self.config_paths) # Foreman API related - self.foreman_url = config.get('foreman', 'url') - self.foreman_user = config.get('foreman', 'user') - self.foreman_pw = config.get('foreman', 'password') - self.foreman_ssl_verify = config.getboolean('foreman', 'ssl_verify') + try: + self.foreman_url = config.get('foreman', 'url') + self.foreman_user = config.get('foreman', 'user') + self.foreman_pw = config.get('foreman', 'password') + self.foreman_ssl_verify = config.getboolean('foreman', 'ssl_verify') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e: + print("Error parsing configuration: %s" % e, file=sys.stderr) + return False # Ansible related try: @@ -138,10 +158,14 @@ class ForemanInventory(object): self.cache_path_inventory = cache_path + "/%s.index" % script self.cache_path_params = cache_path + "/%s.params" % script self.cache_path_facts = cache_path + "/%s.facts" % script - self.cache_max_age = config.getint('cache', 'max_age') + try: + self.cache_max_age = config.getint('cache', 'max_age') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.cache_max_age = 60 + return True def parse_cli_args(self): - """ Command line argument processing """ + """Command line argument processing""" parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on foreman') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') @@ -150,26 +174,39 @@ class ForemanInventory(object): help='Force refresh of cache by making API requests to foreman (default: False - use cache files)') self.args = parser.parse_args() + def _get_session(self): + if not self.session: + self.session = requests.session() + self.session.auth = HTTPBasicAuth(self.foreman_user, self.foreman_pw) + self.session.verify = self.foreman_ssl_verify + return self.session + def _get_json(self, url, ignore_errors=None): page = 1 results = [] + s = self._get_session() while True: - ret = requests.get(url, - auth=HTTPBasicAuth(self.foreman_user, self.foreman_pw), - verify=self.foreman_ssl_verify, - params={'page': page, 'per_page': 250}) + ret = s.get(url, params={'page': page, 'per_page': 250}) if ignore_errors and ret.status_code in ignore_errors: break ret.raise_for_status() json = ret.json() - if not json.has_key('results'): + # /hosts/:id has not results key + if 'results' not in json: return json - if type(json['results']) == type({}): + # Facts are returned as dict in results not list + if isinstance(json['results'], dict): return json['results'] + # List of all hosts is returned paginaged results = results + json['results'] if len(results) >= json['total']: break page += 1 + if len(json['results']) == 0: + print("Did not make any progress during loop. " + "expected %d got %d" % (json['total'], len(results)), + file=sys.stderr) + break return results def _get_hosts(self): @@ -184,7 +221,8 @@ class ForemanInventory(object): def _get_all_params_by_id(self, hid): url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid) ret = self._get_json(url, [404]) - if ret == []: ret = {} + if ret == []: + ret = {} return ret.get('all_parameters', {}) def _get_facts_by_id(self, hid): @@ -192,9 +230,7 @@ class ForemanInventory(object): return self._get_json(url) def _resolve_params(self, host): - """ - Fetch host params and convert to dict - """ + """Fetch host params and convert to dict""" params = {} for param in self._get_all_params_by_id(host['id']): @@ -204,9 +240,7 @@ class ForemanInventory(object): return params def _get_facts(self, host): - """ - Fetch all host facts of the host - """ + """Fetch all host facts of the host""" if not self.want_facts: return {} @@ -214,7 +248,7 @@ class ForemanInventory(object): if len(ret.values()) == 0: facts = {} elif len(ret.values()) == 1: - facts = ret.values()[0] + facts = list(ret.values())[0] else: raise ValueError("More than one set of facts returned for '%s'" % host) return facts @@ -228,8 +262,15 @@ class ForemanInventory(object): for host in self._get_hosts(): dns_name = host['name'] - # Create ansible groups for hostgroup, environment, location and organization - for group in ['hostgroup', 'environment', 'location', 'organization']: + # Create ansible groups for hostgroup + group = 'hostgroup' + val = host.get('%s_title' % group) or host.get('%s_name' % group) + if val: + safe_key = self.to_safe('%s%s_%s' % (self.group_prefix, group, val.lower())) + self.push(self.inventory, safe_key, dns_name) + + # Create ansible groups for environment, location and organization + for group in ['environment', 'location', 'organization']: val = host.get('%s_name' % group) if val: safe_key = self.to_safe('%s%s_%s' % (self.group_prefix, group, val.lower())) @@ -247,7 +288,7 @@ class ForemanInventory(object): # attributes. groupby = copy.copy(params) for k, v in host.items(): - if isinstance(v, basestring): + if isinstance(v, str): groupby[k] = self.to_safe(v) elif isinstance(v, int): groupby[k] = v @@ -264,14 +305,16 @@ class ForemanInventory(object): self.params[dns_name] = params self.facts[dns_name] = self._get_facts(host) self.push(self.inventory, 'all', dns_name) + self._write_cache() + def _write_cache(self): self.write_to_cache(self.cache, self.cache_path_cache) self.write_to_cache(self.inventory, self.cache_path_inventory) self.write_to_cache(self.params, self.cache_path_params) self.write_to_cache(self.facts, self.cache_path_facts) def get_host_info(self): - """ Get variables about a specific host """ + """Get variables about a specific host""" if not self.cache or len(self.cache) == 0: # Need to load index from cache @@ -294,21 +337,21 @@ class ForemanInventory(object): d[k] = [v] def load_inventory_from_cache(self): - """ Reads the index from the cache file sets self.index """ + """Read the index from the cache file sets self.index""" cache = open(self.cache_path_inventory, 'r') json_inventory = cache.read() self.inventory = json.loads(json_inventory) def load_params_from_cache(self): - """ Reads the index from the cache file sets self.index """ + """Read the index from the cache file sets self.index""" cache = open(self.cache_path_params, 'r') json_params = cache.read() self.params = json.loads(json_params) def load_facts_from_cache(self): - """ Reads the index from the cache file sets self.index """ + """Read the index from the cache file sets self.facts""" if not self.want_facts: return cache = open(self.cache_path_facts, 'r') @@ -316,26 +359,33 @@ class ForemanInventory(object): self.facts = json.loads(json_facts) def load_cache_from_cache(self): - """ Reads the cache from the cache file sets self.cache """ + """Read the cache from the cache file sets self.cache""" cache = open(self.cache_path_cache, 'r') json_cache = cache.read() self.cache = json.loads(json_cache) def write_to_cache(self, data, filename): - """ Writes data in JSON format to a file """ + """Write data in JSON format to a file""" json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' + @staticmethod + def to_safe(word): + '''Converts 'bad' characters in a string to underscores + + so they can be used as Ansible groups + + >>> ForemanInventory.to_safe("foo-bar baz") + 'foo_barbaz' + ''' regex = "[^A-Za-z0-9\_]" return re.sub(regex, "_", word.replace(" ", "")) def json_format_dict(self, data, pretty=False): - """ Converts a dict to a JSON object and dumps it as a formatted string """ + """Converts a dict to a JSON object and dumps it as a formatted string""" if pretty: return json.dumps(data, sort_keys=True, indent=2) @@ -343,6 +393,5 @@ class ForemanInventory(object): return json.dumps(data) if __name__ == '__main__': - ForemanInventory() - - + inv = ForemanInventory() + sys.exit(not inv.run()) diff --git a/awx/plugins/inventory/gce.py b/awx/plugins/inventory/gce.py index 498511d635..87f1e8e811 100755 --- a/awx/plugins/inventory/gce.py +++ b/awx/plugins/inventory/gce.py @@ -69,7 +69,8 @@ Examples: $ contrib/inventory/gce.py --host my_instance Author: Eric Johnson -Version: 0.0.1 +Contributors: Matt Hite , Tom Melendez +Version: 0.0.3 ''' __requires__ = ['pycrypto>=2.6'] @@ -83,13 +84,19 @@ except ImportError: pass USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" -USER_AGENT_VERSION="v1" +USER_AGENT_VERSION="v2" import sys import os import argparse + +from time import time + import ConfigParser +import logging +logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler()) + try: import json except ImportError: @@ -100,33 +107,103 @@ try: from libcloud.compute.providers import get_driver _ = Provider.GCE except: - print("GCE inventory script requires libcloud >= 0.13") - sys.exit(1) + sys.exit("GCE inventory script requires libcloud >= 0.13") + + +class CloudInventoryCache(object): + def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp', + cache_max_age=300): + cache_dir = os.path.expanduser(cache_path) + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + self.cache_path_cache = os.path.join(cache_dir, cache_name) + + self.cache_max_age = cache_max_age + + def is_valid(self, max_age=None): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if max_age is None: + max_age = self.cache_max_age + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + max_age) > current_time: + return True + + return False + + def get_all_data_from_cache(self, filename=''): + ''' Reads the JSON inventory from the cache file. Returns Python dictionary. ''' + + data = '' + if not filename: + filename = self.cache_path_cache + with open(filename, 'r') as cache: + data = cache.read() + return json.loads(data) + + def write_to_cache(self, data, filename=''): + ''' Writes data to file as JSON. Returns True. ''' + if not filename: + filename = self.cache_path_cache + json_data = json.dumps(data) + with open(filename, 'w') as cache: + cache.write(json_data) + return True class GceInventory(object): def __init__(self): + # Cache object + self.cache = None + # dictionary containing inventory read from disk + self.inventory = {} + # Read settings and parse CLI arguments self.parse_cli_args() + self.config = self.get_config() self.driver = self.get_gce_driver() + self.ip_type = self.get_inventory_options() + if self.ip_type: + self.ip_type = self.ip_type.lower() + + # Cache management + start_inventory_time = time() + cache_used = False + if self.args.refresh_cache or not self.cache.is_valid(): + self.do_api_calls_update_cache() + else: + self.load_inventory_from_cache() + cache_used = True + self.inventory['_meta']['stats'] = {'use_cache': True} + self.inventory['_meta']['stats'] = { + 'inventory_load_time': time() - start_inventory_time, + 'cache_used': cache_used + } # Just display data for specific host if self.args.host: - print(self.json_format_dict(self.node_to_dict( - self.get_instance(self.args.host)), - pretty=self.args.pretty)) - sys.exit(0) - - zones = self.parse_env_zones() - - # Otherwise, assume user wants all instances grouped - print(self.json_format_dict(self.group_instances(zones), - pretty=self.args.pretty)) + print(self.json_format_dict( + self.inventory['_meta']['hostvars'][self.args.host], + pretty=self.args.pretty)) + else: + # Otherwise, assume user wants all instances grouped + zones = self.parse_env_zones() + print(self.json_format_dict(self.inventory, + pretty=self.args.pretty)) sys.exit(0) - def get_gce_driver(self): - """Determine the GCE authorization settings and return a - libcloud driver. + def get_config(self): + """ + Reads the settings from the gce.ini file. + + Populates a SafeConfigParser object with defaults and + attempts to read an .ini-style configuration from the filename + specified in GCE_INI_PATH. If the environment variable is + not present, the filename defaults to gce.ini in the current + working directory. """ gce_ini_default_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "gce.ini") @@ -141,14 +218,57 @@ class GceInventory(object): 'gce_service_account_pem_file_path': '', 'gce_project_id': '', 'libcloud_secrets': '', + 'inventory_ip_type': '', + 'cache_path': '~/.ansible/tmp', + 'cache_max_age': '300' }) if 'gce' not in config.sections(): config.add_section('gce') + if 'inventory' not in config.sections(): + config.add_section('inventory') + if 'cache' not in config.sections(): + config.add_section('cache') + config.read(gce_ini_path) + ######### + # Section added for processing ini settings + ######### + + # Set the instance_states filter based on config file options + self.instance_states = [] + if config.has_option('gce', 'instance_states'): + states = config.get('gce', 'instance_states') + # Ignore if instance_states is an empty string. + if states: + self.instance_states = states.split(',') + + # Caching + cache_path = config.get('cache', 'cache_path') + cache_max_age = config.getint('cache', 'cache_max_age') + # TOOD(supertom): support project-specific caches + cache_name = 'ansible-gce.cache' + self.cache = CloudInventoryCache(cache_path=cache_path, + cache_max_age=cache_max_age, + cache_name=cache_name) + return config + + def get_inventory_options(self): + """Determine inventory options. Environment variables always + take precedence over configuration files.""" + ip_type = self.config.get('inventory', 'inventory_ip_type') + # If the appropriate environment variables are set, they override + # other configuration + ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type) + return ip_type + + def get_gce_driver(self): + """Determine the GCE authorization settings and return a + libcloud driver. + """ # Attempt to get GCE params from a configuration file, if one # exists. - secrets_path = config.get('gce', 'libcloud_secrets') + secrets_path = self.config.get('gce', 'libcloud_secrets') secrets_found = False try: import secrets @@ -162,8 +282,7 @@ class GceInventory(object): if not secrets_path.endswith('secrets.py'): err = "Must specify libcloud secrets file as " err += "/absolute/path/to/secrets.py" - print(err) - sys.exit(1) + sys.exit(err) sys.path.append(os.path.dirname(secrets_path)) try: import secrets @@ -174,10 +293,10 @@ class GceInventory(object): pass if not secrets_found: args = [ - config.get('gce','gce_service_account_email_address'), - config.get('gce','gce_service_account_pem_file_path') + self.config.get('gce','gce_service_account_email_address'), + self.config.get('gce','gce_service_account_pem_file_path') ] - kwargs = {'project': config.get('gce', 'gce_project_id')} + kwargs = {'project': self.config.get('gce', 'gce_project_id')} # If the appropriate environment variables are set, they override # other configuration; process those into our args and kwargs. @@ -211,6 +330,9 @@ class GceInventory(object): help='Get all information about an instance') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)') + parser.add_argument( + '--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests (default: False - use cache files)') self.args = parser.parse_args() @@ -220,11 +342,17 @@ class GceInventory(object): if inst is None: return {} - if inst.extra['metadata'].has_key('items'): + if 'items' in inst.extra['metadata']: for entry in inst.extra['metadata']['items']: md[entry['key']] = entry['value'] net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] + # default to exernal IP unless user has specified they prefer internal + if self.ip_type == 'internal': + ssh_host = inst.private_ips[0] + else: + ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] + return { 'gce_uuid': inst.uuid, 'gce_id': inst.id, @@ -240,15 +368,36 @@ class GceInventory(object): 'gce_metadata': md, 'gce_network': net, # Hosts don't have a public name, so we add an IP - 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] + 'ansible_ssh_host': ssh_host } - def get_instance(self, instance_name): - '''Gets details about a specific instance ''' + def load_inventory_from_cache(self): + ''' Loads inventory from JSON on disk. ''' + try: - return self.driver.ex_get_node(instance_name) + self.inventory = self.cache.get_all_data_from_cache() + hosts = self.inventory['_meta']['hostvars'] except Exception as e: - return None + print( + "Invalid inventory file %s. Please rebuild with -refresh-cache option." + % (self.cache.cache_path_cache)) + raise + + def do_api_calls_update_cache(self): + ''' Do API calls and save data in cache. ''' + zones = self.parse_env_zones() + data = self.group_instances(zones) + self.cache.write_to_cache(data) + self.inventory = data + + def list_nodes(self): + all_nodes = [] + params, more_results = {'maxResults': 500}, True + while more_results: + self.driver.connection.gce_params=params + all_nodes.extend(self.driver.list_nodes()) + more_results = 'pageToken' in params + return all_nodes def group_instances(self, zones=None): '''Group all instances''' @@ -256,7 +405,18 @@ class GceInventory(object): meta = {} meta["hostvars"] = {} - for node in self.driver.list_nodes(): + for node in self.list_nodes(): + + # This check filters on the desired instance states defined in the + # config file with the instance_states config option. + # + # If the instance_states list is _empty_ then _ALL_ states are returned. + # + # If the instance_states list is _populated_ then check the current + # state against the instance_states list + if self.instance_states and not node.extra['status'] in self.instance_states: + continue + name = node.name meta["hostvars"][name] = self.node_to_dict(node) @@ -268,7 +428,7 @@ class GceInventory(object): if zones and zone not in zones: continue - if groups.has_key(zone): groups[zone].append(name) + if zone in groups: groups[zone].append(name) else: groups[zone] = [name] tags = node.extra['tags'] @@ -277,25 +437,25 @@ class GceInventory(object): tag = t[6:] else: tag = 'tag_%s' % t - if groups.has_key(tag): groups[tag].append(name) + if tag in groups: groups[tag].append(name) else: groups[tag] = [name] net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] net = 'network_%s' % net - if groups.has_key(net): groups[net].append(name) + if net in groups: groups[net].append(name) else: groups[net] = [name] machine_type = node.size - if groups.has_key(machine_type): groups[machine_type].append(name) + if machine_type in groups: groups[machine_type].append(name) else: groups[machine_type] = [name] image = node.image and node.image or 'persistent_disk' - if groups.has_key(image): groups[image].append(name) + if image in groups: groups[image].append(name) else: groups[image] = [name] status = node.extra['status'] stat = 'status_%s' % status.lower() - if groups.has_key(stat): groups[stat].append(name) + if stat in groups: groups[stat].append(name) else: groups[stat] = [name] groups["_meta"] = meta @@ -311,6 +471,6 @@ class GceInventory(object): else: return json.dumps(data) - # Run the script -GceInventory() +if __name__ == '__main__': + GceInventory() diff --git a/awx/plugins/inventory/openstack.py b/awx/plugins/inventory/openstack.py index 103be1bee0..6679a2cc3b 100755 --- a/awx/plugins/inventory/openstack.py +++ b/awx/plugins/inventory/openstack.py @@ -2,7 +2,8 @@ # Copyright (c) 2012, Marco Vito Moscaritolo # Copyright (c) 2013, Jesse Keating -# Copyright (c) 2014, Hewlett-Packard Development Company, L.P. +# Copyright (c) 2015, Hewlett-Packard Development Company, L.P. +# Copyright (c) 2016, Rackspace Australia # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -18,7 +19,7 @@ # along with this software. If not, see . # The OpenStack Inventory module uses os-client-config for configuration. -# https://github.com/stackforge/os-client-config +# https://github.com/openstack/os-client-config # This means it will either: # - Respect normal OS_* environment variables like other OpenStack tools # - Read values from a clouds.yaml file. @@ -32,12 +33,24 @@ # all of them and present them as one contiguous inventory. # # See the adjacent openstack.yml file for an example config file +# There are two ansible inventory specific options that can be set in +# the inventory section. +# expand_hostvars controls whether or not the inventory will make extra API +# calls to fill out additional information about each server +# use_hostnames changes the behavior from registering every host with its UUID +# and making a group of its hostname to only doing this if the +# hostname in question has more than one server +# fail_on_errors causes the inventory to fail and return no hosts if one cloud +# has failed (for example, bad credentials or being offline). +# When set to False, the inventory will return hosts from +# whichever other clouds it can contact. (Default: True) import argparse import collections import os import sys import time +from distutils.version import StrictVersion try: import json @@ -46,89 +59,137 @@ except: import os_client_config import shade +import shade.inventory + +CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml'] -class OpenStackInventory(object): +def get_groups_from_server(server_vars, namegroup=True): + groups = [] - def __init__(self, private=False, refresh=False): - config_files = os_client_config.config.CONFIG_FILES - config_files.append('/etc/ansible/openstack.yml') - self.openstack_config = os_client_config.config.OpenStackConfig( - config_files) - self.clouds = shade.openstack_clouds(self.openstack_config) - self.private = private - self.refresh = refresh + region = server_vars['region'] + cloud = server_vars['cloud'] + metadata = server_vars.get('metadata', {}) - self.cache_max_age = self.openstack_config.get_cache_max_age() - cache_path = self.openstack_config.get_cache_path() + # Create a group for the cloud + groups.append(cloud) - # Cache related - if not os.path.exists(cache_path): - os.makedirs(cache_path) - self.cache_file = os.path.join(cache_path, "ansible-inventory.cache") + # Create a group on region + groups.append(region) - def is_cache_stale(self): - ''' Determines if cache file has expired, or if it is still valid ''' - if os.path.isfile(self.cache_file): - mod_time = os.path.getmtime(self.cache_file) - current_time = time.time() - if (mod_time + self.cache_max_age) > current_time: - return False - return True + # And one by cloud_region + groups.append("%s_%s" % (cloud, region)) - def get_host_groups(self): - if self.refresh or self.is_cache_stale(): - groups = self.get_host_groups_from_cloud() - self.write_cache(groups) + # Check if group metadata key in servers' metadata + if 'group' in metadata: + groups.append(metadata['group']) + + for extra_group in metadata.get('groups', '').split(','): + if extra_group: + groups.append(extra_group.strip()) + + groups.append('instance-%s' % server_vars['id']) + if namegroup: + groups.append(server_vars['name']) + + for key in ('flavor', 'image'): + if 'name' in server_vars[key]: + groups.append('%s-%s' % (key, server_vars[key]['name'])) + + for key, value in iter(metadata.items()): + groups.append('meta-%s_%s' % (key, value)) + + az = server_vars.get('az', None) + if az: + # Make groups for az, region_az and cloud_region_az + groups.append(az) + groups.append('%s_%s' % (region, az)) + groups.append('%s_%s_%s' % (cloud, region, az)) + return groups + + +def get_host_groups(inventory, refresh=False): + (cache_file, cache_expiration_time) = get_cache_settings() + if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh): + groups = to_json(get_host_groups_from_cloud(inventory)) + open(cache_file, 'w').write(groups) + else: + groups = open(cache_file, 'r').read() + return groups + + +def append_hostvars(hostvars, groups, key, server, namegroup=False): + hostvars[key] = dict( + ansible_ssh_host=server['interface_ip'], + openstack=server) + for group in get_groups_from_server(server, namegroup=namegroup): + groups[group].append(key) + + +def get_host_groups_from_cloud(inventory): + groups = collections.defaultdict(list) + firstpass = collections.defaultdict(list) + hostvars = {} + list_args = {} + if hasattr(inventory, 'extra_config'): + use_hostnames = inventory.extra_config['use_hostnames'] + list_args['expand'] = inventory.extra_config['expand_hostvars'] + if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"): + list_args['fail_on_cloud_config'] = \ + inventory.extra_config['fail_on_errors'] + else: + use_hostnames = False + + for server in inventory.list_hosts(**list_args): + + if 'interface_ip' not in server: + continue + firstpass[server['name']].append(server) + for name, servers in firstpass.items(): + if len(servers) == 1 and use_hostnames: + append_hostvars(hostvars, groups, name, servers[0]) else: - return json.load(open(self.cache_file, 'r')) - return groups + server_ids = set() + # Trap for duplicate results + for server in servers: + server_ids.add(server['id']) + if len(server_ids) == 1 and use_hostnames: + append_hostvars(hostvars, groups, name, servers[0]) + else: + for server in servers: + append_hostvars( + hostvars, groups, server['id'], server, + namegroup=True) + groups['_meta'] = {'hostvars': hostvars} + return groups - def write_cache(self, groups): - with open(self.cache_file, 'w') as cache_file: - cache_file.write(self.json_format_dict(groups)) - def get_host_groups_from_cloud(self): - groups = collections.defaultdict(list) - hostvars = collections.defaultdict(dict) +def is_cache_stale(cache_file, cache_expiration_time, refresh=False): + ''' Determines if cache file has expired, or if it is still valid ''' + if refresh: + return True + if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0: + mod_time = os.path.getmtime(cache_file) + current_time = time.time() + if (mod_time + cache_expiration_time) > current_time: + return False + return True - for cloud in self.clouds: - cloud.private = cloud.private or self.private - # Cycle on servers - for server in cloud.list_servers(): +def get_cache_settings(): + config = os_client_config.config.OpenStackConfig( + config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES) + # For inventory-wide caching + cache_expiration_time = config.get_cache_expiration_time() + cache_path = config.get_cache_path() + if not os.path.exists(cache_path): + os.makedirs(cache_path) + cache_file = os.path.join(cache_path, 'ansible-inventory.cache') + return (cache_file, cache_expiration_time) - meta = cloud.get_server_meta(server) - if 'interface_ip' not in meta['server_vars']: - # skip this host if it doesn't have a network address - continue - - server_vars = meta['server_vars'] - hostvars[server.name][ - 'ansible_ssh_host'] = server_vars['interface_ip'] - hostvars[server.name]['openstack'] = server_vars - - for group in meta['groups']: - groups[group].append(server.name) - - if hostvars: - groups['_meta'] = {'hostvars': hostvars} - return groups - - def json_format_dict(self, data): - return json.dumps(data, sort_keys=True, indent=2) - - def list_instances(self): - groups = self.get_host_groups() - # Return server list - print(self.json_format_dict(groups)) - - def get_host(self, hostname): - groups = self.get_host_groups() - hostvars = groups['_meta']['hostvars'] - if hostname in hostvars: - print(self.json_format_dict(hostvars[hostname])) +def to_json(in_dict): + return json.dumps(in_dict, sort_keys=True, indent=2) def parse_args(): @@ -138,21 +199,43 @@ def parse_args(): help='Use private address for ansible host') parser.add_argument('--refresh', action='store_true', help='Refresh cached information') + parser.add_argument('--debug', action='store_true', default=False, + help='Enable debug output') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--list', action='store_true', help='List active servers') group.add_argument('--host', help='List details about the specific host') + return parser.parse_args() def main(): args = parse_args() try: - inventory = OpenStackInventory(args.private, args.refresh) + config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES + shade.simple_logging(debug=args.debug) + inventory_args = dict( + refresh=args.refresh, + config_files=config_files, + private=args.private, + ) + if hasattr(shade.inventory.OpenStackInventory, 'extra_config'): + inventory_args.update(dict( + config_key='ansible', + config_defaults={ + 'use_hostnames': False, + 'expand_hostvars': True, + 'fail_on_errors': True, + } + )) + + inventory = shade.inventory.OpenStackInventory(**inventory_args) + if args.list: - inventory.list_instances() + output = get_host_groups(inventory, refresh=args.refresh) elif args.host: - inventory.get_host(args.host) + output = to_json(inventory.get_host(args.host)) + print(output) except shade.OpenStackCloudException as e: sys.stderr.write('%s\n' % e.message) sys.exit(1) diff --git a/awx/plugins/inventory/openstack.yml b/awx/plugins/inventory/openstack.yml index a99bb02058..3687b1f399 100644 --- a/awx/plugins/inventory/openstack.yml +++ b/awx/plugins/inventory/openstack.yml @@ -26,3 +26,7 @@ clouds: username: stack password: stack project_name: stack +ansible: + use_hostnames: False + expand_hostvars: True + fail_on_errors: True diff --git a/awx/plugins/inventory/vmware.py b/awx/plugins/inventory/vmware.py deleted file mode 100755 index 8f723a638d..0000000000 --- a/awx/plugins/inventory/vmware.py +++ /dev/null @@ -1,436 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -''' -VMware Inventory Script -======================= - -Retrieve information about virtual machines from a vCenter server or -standalone ESX host. When `group_by=false` (in the INI file), host systems -are also returned in addition to VMs. - -This script will attempt to read configuration from an INI file with the same -base filename if present, or `vmware.ini` if not. It is possible to create -symlinks to the inventory script to support multiple configurations, e.g.: - -* `vmware.py` (this script) -* `vmware.ini` (default configuration, will be read by `vmware.py`) -* `vmware_test.py` (symlink to `vmware.py`) -* `vmware_test.ini` (test configuration, will be read by `vmware_test.py`) -* `vmware_other.py` (symlink to `vmware.py`, will read `vmware.ini` since no - `vmware_other.ini` exists) - -The path to an INI file may also be specified via the `VMWARE_INI` environment -variable, in which case the filename matching rules above will not apply. - -Host and authentication parameters may be specified via the `VMWARE_HOST`, -`VMWARE_USER` and `VMWARE_PASSWORD` environment variables; these options will -take precedence over options present in the INI file. An INI file is not -required if these options are specified using environment variables. -''' - -from __future__ import print_function - -import collections -import json -import logging -import optparse -import os -import sys -import time -import ConfigParser - -from six import text_type - -# Disable logging message trigged by pSphere/suds. -try: - from logging import NullHandler -except ImportError: - from logging import Handler - class NullHandler(Handler): - def emit(self, record): - pass -logging.getLogger('psphere').addHandler(NullHandler()) -logging.getLogger('suds').addHandler(NullHandler()) - -from psphere.client import Client -from psphere.errors import ObjectNotFoundError -from psphere.managedobjects import HostSystem, VirtualMachine, ManagedObject, Network -from suds.sudsobject import Object as SudsObject - - -class VMwareInventory(object): - - def __init__(self, guests_only=None): - self.config = ConfigParser.SafeConfigParser() - if os.environ.get('VMWARE_INI', ''): - config_files = [os.environ['VMWARE_INI']] - else: - config_files = [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini'] - for config_file in config_files: - if os.path.exists(config_file): - self.config.read(config_file) - break - - # Retrieve only guest VMs, or include host systems? - if guests_only is not None: - self.guests_only = guests_only - elif self.config.has_option('defaults', 'guests_only'): - self.guests_only = self.config.getboolean('defaults', 'guests_only') - else: - self.guests_only = True - - # Read authentication information from VMware environment variables - # (if set), otherwise from INI file. - auth_host = os.environ.get('VMWARE_HOST') - if not auth_host and self.config.has_option('auth', 'host'): - auth_host = self.config.get('auth', 'host') - auth_user = os.environ.get('VMWARE_USER') - if not auth_user and self.config.has_option('auth', 'user'): - auth_user = self.config.get('auth', 'user') - auth_password = os.environ.get('VMWARE_PASSWORD') - if not auth_password and self.config.has_option('auth', 'password'): - auth_password = self.config.get('auth', 'password') - - # Create the VMware client connection. - self.client = Client(auth_host, auth_user, auth_password) - - def _put_cache(self, name, value): - ''' - Saves the value to cache with the name given. - ''' - if self.config.has_option('defaults', 'cache_dir'): - cache_dir = os.path.expanduser(self.config.get('defaults', 'cache_dir')) - if not os.path.exists(cache_dir): - os.makedirs(cache_dir) - cache_file = os.path.join(cache_dir, name) - with open(cache_file, 'w') as cache: - json.dump(value, cache) - - def _get_cache(self, name, default=None): - ''' - Retrieves the value from cache for the given name. - ''' - if self.config.has_option('defaults', 'cache_dir'): - cache_dir = self.config.get('defaults', 'cache_dir') - cache_file = os.path.join(cache_dir, name) - if os.path.exists(cache_file): - if self.config.has_option('defaults', 'cache_max_age'): - cache_max_age = self.config.getint('defaults', 'cache_max_age') - else: - cache_max_age = 0 - cache_stat = os.stat(cache_file) - if (cache_stat.st_mtime + cache_max_age) >= time.time(): - with open(cache_file) as cache: - return json.load(cache) - return default - - def _flatten_dict(self, d, parent_key='', sep='_'): - ''' - Flatten nested dicts by combining keys with a separator. Lists with - only string items are included as is; any other lists are discarded. - ''' - items = [] - for k, v in d.items(): - if k.startswith('_'): - continue - new_key = parent_key + sep + k if parent_key else k - if isinstance(v, collections.MutableMapping): - items.extend(self._flatten_dict(v, new_key, sep).items()) - elif isinstance(v, (list, tuple)): - if all([isinstance(x, basestring) for x in v]): - items.append((new_key, v)) - else: - items.append((new_key, v)) - return dict(items) - - def _get_obj_info(self, obj, depth=99, seen=None): - ''' - Recursively build a data structure for the given pSphere object (depth - only applies to ManagedObject instances). - ''' - seen = seen or set() - if isinstance(obj, ManagedObject): - try: - obj_unicode = text_type(getattr(obj, 'name')) - except AttributeError: - obj_unicode = () - if obj in seen: - return obj_unicode - seen.add(obj) - if depth <= 0: - return obj_unicode - d = {} - for attr in dir(obj): - if attr.startswith('_'): - continue - try: - val = getattr(obj, attr) - obj_info = self._get_obj_info(val, depth - 1, seen) - if obj_info != (): - d[attr] = obj_info - except Exception as e: - pass - return d - elif isinstance(obj, SudsObject): - d = {} - for key, val in iter(obj): - obj_info = self._get_obj_info(val, depth, seen) - if obj_info != (): - d[key] = obj_info - return d - elif isinstance(obj, (list, tuple)): - l = [] - for val in iter(obj): - obj_info = self._get_obj_info(val, depth, seen) - if obj_info != (): - l.append(obj_info) - return l - elif isinstance(obj, (type(None), bool, int, long, float, basestring)): - return obj - else: - return () - - def _get_host_info(self, host, prefix='vmware'): - ''' - Return a flattened dict with info about the given host system. - ''' - host_info = { - 'name': host.name, - } - for attr in ('datastore', 'network', 'vm'): - try: - value = getattr(host, attr) - host_info['%ss' % attr] = self._get_obj_info(value, depth=0) - except AttributeError: - host_info['%ss' % attr] = [] - for k, v in self._get_obj_info(host.summary, depth=0).items(): - if isinstance(v, collections.MutableMapping): - for k2, v2 in v.items(): - host_info[k2] = v2 - elif k != 'host': - host_info[k] = v - try: - host_info['ipAddress'] = host.config.network.vnic[0].spec.ip.ipAddress - except Exception as e: - print(e, file=sys.stderr) - host_info = self._flatten_dict(host_info, prefix) - if ('%s_ipAddress' % prefix) in host_info: - host_info['ansible_ssh_host'] = host_info['%s_ipAddress' % prefix] - return host_info - - def _get_vm_info(self, vm, prefix='vmware'): - ''' - Return a flattened dict with info about the given virtual machine. - ''' - vm_info = { - 'name': vm.name, - } - for attr in ('datastore', 'network'): - try: - value = getattr(vm, attr) - vm_info['%ss' % attr] = self._get_obj_info(value, depth=0) - except AttributeError: - vm_info['%ss' % attr] = [] - try: - vm_info['resourcePool'] = self._get_obj_info(vm.resourcePool, depth=0) - except AttributeError: - vm_info['resourcePool'] = '' - try: - vm_info['guestState'] = vm.guest.guestState - except AttributeError: - vm_info['guestState'] = '' - for k, v in self._get_obj_info(vm.summary, depth=0).items(): - if isinstance(v, collections.MutableMapping): - for k2, v2 in v.items(): - if k2 == 'host': - k2 = 'hostSystem' - vm_info[k2] = v2 - elif k != 'vm': - vm_info[k] = v - vm_info = self._flatten_dict(vm_info, prefix) - if ('%s_ipAddress' % prefix) in vm_info: - vm_info['ansible_ssh_host'] = vm_info['%s_ipAddress' % prefix] - return vm_info - - def _add_host(self, inv, parent_group, host_name): - ''' - Add the host to the parent group in the given inventory. - ''' - p_group = inv.setdefault(parent_group, []) - if isinstance(p_group, dict): - group_hosts = p_group.setdefault('hosts', []) - else: - group_hosts = p_group - if host_name not in group_hosts: - group_hosts.append(host_name) - - def _add_child(self, inv, parent_group, child_group): - ''' - Add a child group to a parent group in the given inventory. - ''' - if parent_group != 'all': - p_group = inv.setdefault(parent_group, {}) - if not isinstance(p_group, dict): - inv[parent_group] = {'hosts': p_group} - p_group = inv[parent_group] - group_children = p_group.setdefault('children', []) - if child_group not in group_children: - group_children.append(child_group) - inv.setdefault(child_group, []) - - def get_inventory(self, meta_hostvars=True): - ''' - Reads the inventory from cache or VMware API via pSphere. - ''' - # Use different cache names for guests only vs. all hosts. - if self.guests_only: - cache_name = '__inventory_guests__' - else: - cache_name = '__inventory_all__' - - inv = self._get_cache(cache_name, None) - if inv is not None: - return inv - - inv = {'all': {'hosts': []}} - if meta_hostvars: - inv['_meta'] = {'hostvars': {}} - - default_group = os.path.basename(sys.argv[0]).rstrip('.py') - - if not self.guests_only: - if self.config.has_option('defaults', 'hw_group'): - hw_group = self.config.get('defaults', 'hw_group') - else: - hw_group = default_group + '_hw' - - if self.config.has_option('defaults', 'vm_group'): - vm_group = self.config.get('defaults', 'vm_group') - else: - vm_group = default_group + '_vm' - - if self.config.has_option('defaults', 'prefix_filter'): - prefix_filter = self.config.get('defaults', 'prefix_filter') - else: - prefix_filter = None - - # Loop through physical hosts: - for host in HostSystem.all(self.client): - - if not self.guests_only: - self._add_host(inv, 'all', host.name) - self._add_host(inv, hw_group, host.name) - host_info = self._get_host_info(host) - if meta_hostvars: - inv['_meta']['hostvars'][host.name] = host_info - self._put_cache(host.name, host_info) - - # Loop through all VMs on physical host. - for vm in host.vm: - if prefix_filter: - if vm.name.startswith( prefix_filter ): - continue - self._add_host(inv, 'all', vm.name) - self._add_host(inv, vm_group, vm.name) - vm_info = self._get_vm_info(vm) - if meta_hostvars: - inv['_meta']['hostvars'][vm.name] = vm_info - self._put_cache(vm.name, vm_info) - - # Group by resource pool. - vm_resourcePool = vm_info.get('vmware_resourcePool', None) - if vm_resourcePool: - self._add_child(inv, vm_group, 'resource_pools') - self._add_child(inv, 'resource_pools', vm_resourcePool) - self._add_host(inv, vm_resourcePool, vm.name) - - # Group by datastore. - for vm_datastore in vm_info.get('vmware_datastores', []): - self._add_child(inv, vm_group, 'datastores') - self._add_child(inv, 'datastores', vm_datastore) - self._add_host(inv, vm_datastore, vm.name) - - # Group by network. - for vm_network in vm_info.get('vmware_networks', []): - self._add_child(inv, vm_group, 'networks') - self._add_child(inv, 'networks', vm_network) - self._add_host(inv, vm_network, vm.name) - - # Group by guest OS. - vm_guestId = vm_info.get('vmware_guestId', None) - if vm_guestId: - self._add_child(inv, vm_group, 'guests') - self._add_child(inv, 'guests', vm_guestId) - self._add_host(inv, vm_guestId, vm.name) - - # Group all VM templates. - vm_template = vm_info.get('vmware_template', False) - if vm_template: - self._add_child(inv, vm_group, 'templates') - self._add_host(inv, 'templates', vm.name) - - self._put_cache(cache_name, inv) - return inv - - def get_host(self, hostname): - ''' - Read info about a specific host or VM from cache or VMware API. - ''' - inv = self._get_cache(hostname, None) - if inv is not None: - return inv - - if not self.guests_only: - try: - host = HostSystem.get(self.client, name=hostname) - inv = self._get_host_info(host) - except ObjectNotFoundError: - pass - - if inv is None: - try: - vm = VirtualMachine.get(self.client, name=hostname) - inv = self._get_vm_info(vm) - except ObjectNotFoundError: - pass - - if inv is not None: - self._put_cache(hostname, inv) - return inv or {} - - -def main(): - parser = optparse.OptionParser() - parser.add_option('--list', action='store_true', dest='list', - default=False, help='Output inventory groups and hosts') - parser.add_option('--host', dest='host', default=None, metavar='HOST', - help='Output variables only for the given hostname') - # Additional options for use when running the script standalone, but never - # used by Ansible. - parser.add_option('--pretty', action='store_true', dest='pretty', - default=False, help='Output nicely-formatted JSON') - parser.add_option('--include-host-systems', action='store_true', - dest='include_host_systems', default=False, - help='Include host systems in addition to VMs') - parser.add_option('--no-meta-hostvars', action='store_false', - dest='meta_hostvars', default=True, - help='Exclude [\'_meta\'][\'hostvars\'] with --list') - options, args = parser.parse_args() - - if options.include_host_systems: - vmware_inventory = VMwareInventory(guests_only=False) - else: - vmware_inventory = VMwareInventory() - if options.host is not None: - inventory = vmware_inventory.get_host(options.host) - else: - inventory = vmware_inventory.get_inventory(options.meta_hostvars) - - json_kwargs = {} - if options.pretty: - json_kwargs.update({'indent': 4, 'sort_keys': True}) - json.dump(inventory, sys.stdout, **json_kwargs) - - -if __name__ == '__main__': - main() diff --git a/awx/plugins/inventory/vmware_inventory.py b/awx/plugins/inventory/vmware_inventory.py new file mode 100755 index 0000000000..84979dc270 --- /dev/null +++ b/awx/plugins/inventory/vmware_inventory.py @@ -0,0 +1,723 @@ +#!/usr/bin/env python + +# Requirements +# - pyvmomi >= 6.0.0.2016.4 + +# TODO: +# * more jq examples +# * optional folder heriarchy + +""" +$ jq '._meta.hostvars[].config' data.json | head +{ + "alternateguestname": "", + "instanceuuid": "5035a5cd-b8e8-d717-e133-2d383eb0d675", + "memoryhotaddenabled": false, + "guestfullname": "Red Hat Enterprise Linux 7 (64-bit)", + "changeversion": "2016-05-16T18:43:14.977925Z", + "uuid": "4235fc97-5ddb-7a17-193b-9a3ac97dc7b4", + "cpuhotremoveenabled": false, + "vpmcenabled": false, + "firmware": "bios", +""" + +from __future__ import print_function + +import argparse +import atexit +import datetime +import getpass +import jinja2 +import os +import six +import ssl +import sys +import uuid + +from collections import defaultdict +from six.moves import configparser +from time import time + +HAS_PYVMOMI = False +try: + from pyVmomi import vim + from pyVim.connect import SmartConnect, Disconnect + + HAS_PYVMOMI = True +except ImportError: + pass + +try: + import json +except ImportError: + import simplejson as json + +hasvcr = False +try: + import vcr + + hasvcr = True +except ImportError: + pass + + +class VMwareMissingHostException(Exception): + pass + + +class VMWareInventory(object): + __name__ = 'VMWareInventory' + + guest_props = False + instances = [] + debug = False + load_dumpfile = None + write_dumpfile = None + maxlevel = 1 + lowerkeys = True + config = None + cache_max_age = None + cache_path_cache = None + cache_path_index = None + cache_dir = None + server = None + port = None + username = None + password = None + validate_certs = True + host_filters = [] + skip_keys = [] + groupby_patterns = [] + + if sys.version_info > (3, 0): + safe_types = [int, bool, str, float, None] + else: + safe_types = [int, long, bool, str, float, None] + iter_types = [dict, list] + + bad_types = ['Array', 'disabledMethod', 'declaredAlarmState'] + + vimTableMaxDepth = { + "vim.HostSystem": 2, + "vim.VirtualMachine": 2, + } + + custom_fields = {} + + # translation table for attributes to fetch for known vim types + if not HAS_PYVMOMI: + vimTable = {} + else: + vimTable = { + vim.Datastore: ['_moId', 'name'], + vim.ResourcePool: ['_moId', 'name'], + vim.HostSystem: ['_moId', 'name'], + } + + @staticmethod + def _empty_inventory(): + return {"_meta": {"hostvars": {}}} + + def __init__(self, load=True): + self.inventory = VMWareInventory._empty_inventory() + + if load: + # Read settings and parse CLI arguments + self.parse_cli_args() + self.read_settings() + + # Check the cache + cache_valid = self.is_cache_valid() + + # Handle Cache + if self.args.refresh_cache or not cache_valid: + self.do_api_calls_update_cache() + else: + self.debugl('loading inventory from cache') + self.inventory = self.get_inventory_from_cache() + + def debugl(self, text): + if self.args.debug: + try: + text = str(text) + except UnicodeEncodeError: + text = text.encode('ascii', 'ignore') + print('%s %s' % (datetime.datetime.now(), text)) + + def show(self): + # Data to print + self.debugl('dumping results') + data_to_print = None + if self.args.host: + data_to_print = self.get_host_info(self.args.host) + elif self.args.list: + # Display list of instances for inventory + data_to_print = self.inventory + return json.dumps(data_to_print, indent=2) + + def is_cache_valid(self): + + ''' Determines if the cache files have expired, or if it is still valid ''' + + valid = False + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + valid = True + + return valid + + def do_api_calls_update_cache(self): + + ''' Get instances and cache the data ''' + + self.inventory = self.instances_to_inventory(self.get_instances()) + self.write_to_cache(self.inventory) + + def write_to_cache(self, data): + + ''' Dump inventory to json file ''' + + with open(self.cache_path_cache, 'wb') as f: + f.write(json.dumps(data)) + + def get_inventory_from_cache(self): + + ''' Read in jsonified inventory ''' + + jdata = None + with open(self.cache_path_cache, 'rb') as f: + jdata = f.read() + return json.loads(jdata) + + def read_settings(self): + + ''' Reads the settings from the vmware_inventory.ini file ''' + + scriptbasename = __file__ + scriptbasename = os.path.basename(scriptbasename) + scriptbasename = scriptbasename.replace('.py', '') + + defaults = {'vmware': { + 'server': '', + 'port': 443, + 'username': '', + 'password': '', + 'validate_certs': True, + 'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename), + 'cache_name': 'ansible-vmware', + 'cache_path': '~/.ansible/tmp', + 'cache_max_age': 3600, + 'max_object_level': 1, + 'skip_keys': 'declaredalarmstate,' + 'disabledmethod,' + 'dynamicproperty,' + 'dynamictype,' + 'environmentbrowser,' + 'managedby,' + 'parent,' + 'childtype,' + 'resourceconfig', + 'alias_pattern': '{{ config.name + "_" + config.uuid }}', + 'host_pattern': '{{ guest.ipaddress }}', + 'host_filters': '{{ guest.gueststate == "running" }}', + 'groupby_patterns': '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}', + 'lower_var_keys': True, + 'custom_field_group_prefix': 'vmware_tag_', + 'groupby_custom_field': False} + } + + if six.PY3: + config = configparser.ConfigParser() + else: + config = configparser.SafeConfigParser() + + # where is the config? + vmware_ini_path = os.environ.get('VMWARE_INI_PATH', defaults['vmware']['ini_path']) + vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path)) + config.read(vmware_ini_path) + + # apply defaults + for k, v in defaults['vmware'].items(): + if not config.has_option('vmware', k): + config.set('vmware', k, str(v)) + + # where is the cache? + self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path')) + if self.cache_dir and not os.path.exists(self.cache_dir): + os.makedirs(self.cache_dir) + + # set the cache filename and max age + cache_name = config.get('vmware', 'cache_name') + self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name + self.debugl('cache path is %s' % self.cache_path_cache) + self.cache_max_age = int(config.getint('vmware', 'cache_max_age')) + + # mark the connection info + self.server = os.environ.get('VMWARE_SERVER', config.get('vmware', 'server')) + self.debugl('server is %s' % self.server) + self.port = int(os.environ.get('VMWARE_PORT', config.get('vmware', 'port'))) + self.username = os.environ.get('VMWARE_USERNAME', config.get('vmware', 'username')) + self.debugl('username is %s' % self.username) + self.password = os.environ.get('VMWARE_PASSWORD', config.get('vmware', 'password')) + self.validate_certs = os.environ.get('VMWARE_VALIDATE_CERTS', config.get('vmware', 'validate_certs')) + if self.validate_certs in ['no', 'false', 'False', False]: + self.validate_certs = False + + self.debugl('cert validation is %s' % self.validate_certs) + + # behavior control + self.maxlevel = int(config.get('vmware', 'max_object_level')) + self.debugl('max object level is %s' % self.maxlevel) + self.lowerkeys = config.get('vmware', 'lower_var_keys') + if type(self.lowerkeys) != bool: + if str(self.lowerkeys).lower() in ['yes', 'true', '1']: + self.lowerkeys = True + else: + self.lowerkeys = False + self.debugl('lower keys is %s' % self.lowerkeys) + self.skip_keys = list(config.get('vmware', 'skip_keys').split(',')) + self.debugl('skip keys is %s' % self.skip_keys) + self.host_filters = list(config.get('vmware', 'host_filters').split(',')) + self.debugl('host filters are %s' % self.host_filters) + self.groupby_patterns = list(config.get('vmware', 'groupby_patterns').split(',')) + self.debugl('groupby patterns are %s' % self.groupby_patterns) + + # Special feature to disable the brute force serialization of the + # virtulmachine objects. The key name for these properties does not + # matter because the values are just items for a larger list. + if config.has_section('properties'): + self.guest_props = [] + for prop in config.items('properties'): + self.guest_props.append(prop[1]) + + # save the config + self.config = config + + def parse_cli_args(self): + + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on PyVmomi') + parser.add_argument('--debug', action='store_true', default=False, + help='show debug info') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to VSphere (default: False - use cache files)') + parser.add_argument('--max-instances', default=None, type=int, + help='maximum number of instances to retrieve') + self.args = parser.parse_args() + + def get_instances(self): + + ''' Get a list of vm instances with pyvmomi ''' + kwargs = {'host': self.server, + 'user': self.username, + 'pwd': self.password, + 'port': int(self.port)} + + if hasattr(ssl, 'SSLContext') and not self.validate_certs: + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + context.verify_mode = ssl.CERT_NONE + kwargs['sslContext'] = context + + return self._get_instances(kwargs) + + def _get_instances(self, inkwargs): + + ''' Make API calls ''' + + instances = [] + si = SmartConnect(**inkwargs) + + self.debugl('retrieving all instances') + if not si: + print("Could not connect to the specified host using specified " + "username and password") + return -1 + atexit.register(Disconnect, si) + content = si.RetrieveContent() + + # Create a search container for virtualmachines + self.debugl('creating containerview for virtualmachines') + container = content.rootFolder + viewType = [vim.VirtualMachine] + recursive = True + containerView = content.viewManager.CreateContainerView(container, viewType, recursive) + children = containerView.view + for child in children: + # If requested, limit the total number of instances + if self.args.max_instances: + if len(instances) >= self.args.max_instances: + break + instances.append(child) + self.debugl("%s total instances in container view" % len(instances)) + + if self.args.host: + instances = [x for x in instances if x.name == self.args.host] + + instance_tuples = [] + for instance in sorted(instances): + if self.guest_props: + ifacts = self.facts_from_proplist(instance) + else: + ifacts = self.facts_from_vobj(instance) + instance_tuples.append((instance, ifacts)) + self.debugl('facts collected for all instances') + + cfm = content.customFieldsManager + if cfm is not None and cfm.field: + for f in cfm.field: + if f.managedObjectType == vim.VirtualMachine: + self.custom_fields[f.key] = f.name; + self.debugl('%d custom fieds collected' % len(self.custom_fields)) + return instance_tuples + + def instances_to_inventory(self, instances): + + ''' Convert a list of vm objects into a json compliant inventory ''' + + self.debugl('re-indexing instances based on ini settings') + inventory = VMWareInventory._empty_inventory() + inventory['all'] = {} + inventory['all']['hosts'] = [] + for idx, instance in enumerate(instances): + # make a unique id for this object to avoid vmware's + # numerous uuid's which aren't all unique. + thisid = str(uuid.uuid4()) + idata = instance[1] + + # Put it in the inventory + inventory['all']['hosts'].append(thisid) + inventory['_meta']['hostvars'][thisid] = idata.copy() + inventory['_meta']['hostvars'][thisid]['ansible_uuid'] = thisid + + # Make a map of the uuid to the alias the user wants + name_mapping = self.create_template_mapping( + inventory, + self.config.get('vmware', 'alias_pattern') + ) + + # Make a map of the uuid to the ssh hostname the user wants + host_mapping = self.create_template_mapping( + inventory, + self.config.get('vmware', 'host_pattern') + ) + + # Reset the inventory keys + for k, v in name_mapping.items(): + + if not host_mapping or not k in host_mapping: + continue + + # set ansible_host (2.x) + try: + inventory['_meta']['hostvars'][k]['ansible_host'] = host_mapping[k] + # 1.9.x backwards compliance + inventory['_meta']['hostvars'][k]['ansible_ssh_host'] = host_mapping[k] + except Exception: + continue + + if k == v: + continue + + # add new key + inventory['all']['hosts'].append(v) + inventory['_meta']['hostvars'][v] = inventory['_meta']['hostvars'][k] + + # cleanup old key + inventory['all']['hosts'].remove(k) + inventory['_meta']['hostvars'].pop(k, None) + + self.debugl('pre-filtered hosts:') + for i in inventory['all']['hosts']: + self.debugl(' * %s' % i) + # Apply host filters + for hf in self.host_filters: + if not hf: + continue + self.debugl('filter: %s' % hf) + filter_map = self.create_template_mapping(inventory, hf, dtype='boolean') + for k, v in filter_map.items(): + if not v: + # delete this host + inventory['all']['hosts'].remove(k) + inventory['_meta']['hostvars'].pop(k, None) + + self.debugl('post-filter hosts:') + for i in inventory['all']['hosts']: + self.debugl(' * %s' % i) + + # Create groups + for gbp in self.groupby_patterns: + groupby_map = self.create_template_mapping(inventory, gbp) + for k, v in groupby_map.items(): + if v not in inventory: + inventory[v] = {} + inventory[v]['hosts'] = [] + if k not in inventory[v]['hosts']: + inventory[v]['hosts'].append(k) + + if self.config.get('vmware', 'groupby_custom_field'): + for k, v in inventory['_meta']['hostvars'].items(): + if 'customvalue' in v: + for tv in v['customvalue']: + if not isinstance(tv['value'], str) and not isinstance(tv['value'], unicode): + continue + + newkey = None + field_name = self.custom_fields[tv['key']] if tv['key'] in self.custom_fields else tv['key'] + values = [] + keylist = map(lambda x: x.strip(), tv['value'].split(',')) + for kl in keylist: + try: + newkey = self.config.get('vmware', 'custom_field_group_prefix') + field_name + '_' + kl + newkey = newkey.strip() + except Exception as e: + self.debugl(e) + values.append(newkey) + for tag in values: + if not tag: + continue + if tag not in inventory: + inventory[tag] = {} + inventory[tag]['hosts'] = [] + if k not in inventory[tag]['hosts']: + inventory[tag]['hosts'].append(k) + + return inventory + + def create_template_mapping(self, inventory, pattern, dtype='string'): + + ''' Return a hash of uuid to templated string from pattern ''' + + mapping = {} + for k, v in inventory['_meta']['hostvars'].items(): + t = jinja2.Template(pattern) + newkey = None + try: + newkey = t.render(v) + newkey = newkey.strip() + except Exception as e: + self.debugl(e) + if not newkey: + continue + elif dtype == 'integer': + newkey = int(newkey) + elif dtype == 'boolean': + if newkey.lower() == 'false': + newkey = False + elif newkey.lower() == 'true': + newkey = True + elif dtype == 'string': + pass + mapping[k] = newkey + return mapping + + def facts_from_proplist(self, vm): + '''Get specific properties instead of serializing everything''' + + rdata = {} + for prop in self.guest_props: + self.debugl('getting %s property for %s' % (prop, vm.name)) + key = prop + if self.lowerkeys: + key = key.lower() + + if '.' not in prop: + # props without periods are direct attributes of the parent + rdata[key] = getattr(vm, prop) + else: + # props with periods are subkeys of parent attributes + parts = prop.split('.') + total = len(parts) - 1 + + # pointer to the current object + val = None + # pointer to the current result key + lastref = rdata + + for idx, x in enumerate(parts): + + # if the val wasn't set yet, get it from the parent + if not val: + val = getattr(vm, x) + else: + # in a subkey, get the subprop from the previous attrib + try: + val = getattr(val, x) + except AttributeError as e: + self.debugl(e) + + # lowercase keys if requested + if self.lowerkeys: + x = x.lower() + + # change the pointer or set the final value + if idx != total: + if x not in lastref: + lastref[x] = {} + lastref = lastref[x] + else: + lastref[x] = val + + return rdata + + def facts_from_vobj(self, vobj, level=0): + + ''' Traverse a VM object and return a json compliant data structure ''' + + # pyvmomi objects are not yet serializable, but may be one day ... + # https://github.com/vmware/pyvmomi/issues/21 + + # WARNING: + # Accessing an object attribute will trigger a SOAP call to the remote. + # Increasing the attributes collected or the depth of recursion greatly + # increases runtime duration and potentially memory+network utilization. + + if level == 0: + try: + self.debugl("get facts for %s" % vobj.name) + except Exception as e: + self.debugl(e) + + rdata = {} + + methods = dir(vobj) + methods = [str(x) for x in methods if not x.startswith('_')] + methods = [x for x in methods if x not in self.bad_types] + methods = [x for x in methods if not x.lower() in self.skip_keys] + methods = sorted(methods) + + for method in methods: + # Attempt to get the method, skip on fail + try: + methodToCall = getattr(vobj, method) + except Exception as e: + continue + + # Skip callable methods + if callable(methodToCall): + continue + + if self.lowerkeys: + method = method.lower() + + rdata[method] = self._process_object_types( + methodToCall, + thisvm=vobj, + inkey=method, + ) + + return rdata + + def _process_object_types(self, vobj, thisvm=None, inkey=None, level=0): + ''' Serialize an object ''' + rdata = {} + + if type(vobj).__name__ in self.vimTableMaxDepth and level >= self.vimTableMaxDepth[type(vobj).__name__]: + return rdata + + if vobj is None: + rdata = None + elif type(vobj) in self.vimTable: + rdata = {} + for key in self.vimTable[type(vobj)]: + rdata[key] = getattr(vobj, key) + + elif issubclass(type(vobj), str) or isinstance(vobj, str): + if vobj.isalnum(): + rdata = vobj + else: + rdata = vobj.decode('ascii', 'ignore') + elif issubclass(type(vobj), bool) or isinstance(vobj, bool): + rdata = vobj + elif issubclass(type(vobj), int) or isinstance(vobj, int): + rdata = vobj + elif issubclass(type(vobj), float) or isinstance(vobj, float): + rdata = vobj + elif issubclass(type(vobj), long) or isinstance(vobj, long): + rdata = vobj + elif issubclass(type(vobj), list) or issubclass(type(vobj), tuple): + rdata = [] + try: + vobj = sorted(vobj) + except Exception: + pass + + for idv, vii in enumerate(vobj): + if level + 1 <= self.maxlevel: + vid = self._process_object_types( + vii, + thisvm=thisvm, + inkey=inkey + '[' + str(idv) + ']', + level=(level + 1) + ) + + if vid: + rdata.append(vid) + + elif issubclass(type(vobj), dict): + pass + + elif issubclass(type(vobj), object): + methods = dir(vobj) + methods = [str(x) for x in methods if not x.startswith('_')] + methods = [x for x in methods if x not in self.bad_types] + methods = [x for x in methods if not inkey + '.' + x.lower() in self.skip_keys] + methods = sorted(methods) + + for method in methods: + # Attempt to get the method, skip on fail + try: + methodToCall = getattr(vobj, method) + except Exception as e: + continue + + if callable(methodToCall): + continue + + if self.lowerkeys: + method = method.lower() + if level + 1 <= self.maxlevel: + rdata[method] = self._process_object_types( + methodToCall, + thisvm=thisvm, + inkey=inkey + '.' + method, + level=(level + 1) + ) + else: + pass + + return rdata + + def get_host_info(self, host): + + ''' Return hostvars for a single host ''' + + if host in self.inventory['_meta']['hostvars']: + return self.inventory['_meta']['hostvars'][host] + elif self.args.host and self.inventory['_meta']['hostvars']: + match = None + for k, v in self.inventory['_meta']['hostvars']: + if self.inventory['_meta']['hostvars'][k]['name'] == self.args.host: + match = k + break + if match: + return self.inventory['_meta']['hostvars'][match] + else: + raise VMwareMissingHostException('%s not found' % host) + else: + raise VMwareMissingHostException('%s not found' % host) + + +if __name__ == "__main__": + # Run the script + print(VMWareInventory().show()) + + diff --git a/awx/plugins/library/scan_files.py b/awx/plugins/library/scan_files.py index cd07954b48..3ab092947d 100644 --- a/awx/plugins/library/scan_files.py +++ b/awx/plugins/library/scan_files.py @@ -99,6 +99,7 @@ EXAMPLES = ''' # }, ''' + def main(): module = AnsibleModule( argument_spec = dict(paths=dict(required=True, type='list'), @@ -161,4 +162,5 @@ def main(): results = dict(ansible_facts=dict(files=files)) module.exit_json(**results) + main() diff --git a/awx/plugins/library/scan_packages.py b/awx/plugins/library/scan_packages.py index ee091db39f..d5aafc66e6 100755 --- a/awx/plugins/library/scan_packages.py +++ b/awx/plugins/library/scan_packages.py @@ -22,23 +22,24 @@ EXAMPLES = ''' # { # "source": "apt", # "version": "1.0.6-5", -# "architecture": "amd64", +# "arch": "amd64", # "name": "libbz2-1.0" # }, # { # "source": "apt", # "version": "2.7.1-4ubuntu1", -# "architecture": "amd64", +# "arch": "amd64", # "name": "patch" # }, # { # "source": "apt", # "version": "4.8.2-19ubuntu1", -# "architecture": "amd64", +# "arch": "amd64", # "name": "gcc-4.8-base" # }, ... ] } } ''' + def rpm_package_list(): import rpm trans_set = rpm.TransactionSet() @@ -53,6 +54,7 @@ def rpm_package_list(): installed_packages.append(package_details) return installed_packages + def deb_package_list(): import apt apt_cache = apt.Cache() @@ -62,11 +64,12 @@ def deb_package_list(): ac_pkg = apt_cache[package].installed package_details = dict(name=package, version=ac_pkg.version, - architecture=ac_pkg.architecture, + arch=ac_pkg.architecture, source='apt') installed_packages.append(package_details) return installed_packages + def main(): module = AnsibleModule( argument_spec = dict(os_family=dict(required=True)) @@ -85,4 +88,5 @@ def main(): results = dict(skipped=True, msg="Unsupported Distribution") module.exit_json(**results) + main() diff --git a/awx/plugins/library/scan_services.py b/awx/plugins/library/scan_services.py index 1632ffab39..11a8edc745 100644 --- a/awx/plugins/library/scan_services.py +++ b/awx/plugins/library/scan_services.py @@ -43,12 +43,14 @@ EXAMPLES = ''' # }, .... ] } } ''' + class BaseService(object): def __init__(self, module): self.module = module self.incomplete_warning = False + class ServiceScanService(BaseService): def gather_services(self): @@ -135,6 +137,7 @@ class ServiceScanService(BaseService): services.append(service_data) return services + class SystemctlScanService(BaseService): def systemd_enabled(self): @@ -170,6 +173,7 @@ class SystemctlScanService(BaseService): "source": "systemd"}) return services + def main(): module = AnsibleModule(argument_spec = dict()) service_modules = (ServiceScanService, SystemctlScanService) @@ -190,4 +194,5 @@ def main(): results['msg'] = "WARNING: Could not find status for all services. Sometimes this is due to insufficient privileges." module.exit_json(**results) + main() diff --git a/awx/plugins/library/win_scan_files.ps1 b/awx/plugins/library/win_scan_files.ps1 new file mode 100644 index 0000000000..6d114dfcc8 --- /dev/null +++ b/awx/plugins/library/win_scan_files.ps1 @@ -0,0 +1,102 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args $true; + +$paths = Get-Attr $params "paths" $FALSE; +If ($paths -eq $FALSE) +{ + Fail-Json (New-Object psobject) "missing required argument: paths"; +} + +$get_checksum = Get-Attr $params "get_checksum" $false | ConvertTo-Bool; +$recursive = Get-Attr $params "recursive" $false | ConvertTo-Bool; + +function Date_To_Timestamp($start_date, $end_date) +{ + If($start_date -and $end_date) + { + Write-Output (New-TimeSpan -Start $start_date -End $end_date).TotalSeconds + } +} + +$files = @() + +ForEach ($path In $paths) +{ + "Path: " + $path + ForEach ($file in Get-ChildItem $path -Recurse: $recursive) + { + "File: " + $file.FullName + $fileinfo = New-Object psobject + Set-Attr $fileinfo "path" $file.FullName + $info = Get-Item $file.FullName; + $iscontainer = Get-Attr $info "PSIsContainer" $null; + $length = Get-Attr $info "Length" $null; + $extension = Get-Attr $info "Extension" $null; + $attributes = Get-Attr $info "Attributes" ""; + If ($info) + { + $accesscontrol = $info.GetAccessControl(); + } + Else + { + $accesscontrol = $null; + } + $owner = Get-Attr $accesscontrol "Owner" $null; + $creationtime = Get-Attr $info "CreationTime" $null; + $lastaccesstime = Get-Attr $info "LastAccessTime" $null; + $lastwritetime = Get-Attr $info "LastWriteTime" $null; + + $epoch_date = Get-Date -Date "01/01/1970" + If ($iscontainer) + { + Set-Attr $fileinfo "isdir" $TRUE; + } + Else + { + Set-Attr $fileinfo "isdir" $FALSE; + Set-Attr $fileinfo "size" $length; + } + Set-Attr $fileinfo "extension" $extension; + Set-Attr $fileinfo "attributes" $attributes.ToString(); + # Set-Attr $fileinfo "owner" $getaccesscontrol.Owner; + # Set-Attr $fileinfo "owner" $info.GetAccessControl().Owner; + Set-Attr $fileinfo "owner" $owner; + Set-Attr $fileinfo "creationtime" (Date_To_Timestamp $epoch_date $creationtime); + Set-Attr $fileinfo "lastaccesstime" (Date_To_Timestamp $epoch_date $lastaccesstime); + Set-Attr $fileinfo "lastwritetime" (Date_To_Timestamp $epoch_date $lastwritetime); + + If (($get_checksum) -and -not $fileinfo.isdir) + { + $hash = Get-FileChecksum($file.FullName); + Set-Attr $fileinfo "checksum" $hash; + } + + $files += $fileinfo + } +} + +$result = New-Object psobject @{ + ansible_facts = New-Object psobject @{ + files = $files + } +} + +Exit-Json $result; diff --git a/awx/plugins/library/win_scan_packages.ps1 b/awx/plugins/library/win_scan_packages.ps1 new file mode 100644 index 0000000000..2c9455d154 --- /dev/null +++ b/awx/plugins/library/win_scan_packages.ps1 @@ -0,0 +1,66 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$uninstall_native_path = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall" +$uninstall_wow6432_path = "HKLM:\SOFTWARE\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall" + +if ([System.IntPtr]::Size -eq 4) { + + # This is a 32-bit Windows system, so we only check for 32-bit programs, which will be + # at the native registry location. + + $packages = Get-ChildItem -Path $uninstall_native_path | + Get-ItemProperty | + Select-Object -Property @{Name="name"; Expression={$_."DisplayName"}}, + @{Name="version"; Expression={$_."DisplayVersion"}}, + @{Name="publisher"; Expression={$_."Publisher"}}, + @{Name="arch"; Expression={ "Win32" }} | + Where-Object { $_.name } + +} else { + + # This is a 64-bit Windows system, so we check for 64-bit programs in the native + # registry location, and also for 32-bit programs under Wow6432Node. + + $packages = Get-ChildItem -Path $uninstall_native_path | + Get-ItemProperty | + Select-Object -Property @{Name="name"; Expression={$_."DisplayName"}}, + @{Name="version"; Expression={$_."DisplayVersion"}}, + @{Name="publisher"; Expression={$_."Publisher"}}, + @{Name="arch"; Expression={ "Win64" }} | + Where-Object { $_.name } + + $packages += Get-ChildItem -Path $uninstall_wow6432_path | + Get-ItemProperty | + Select-Object -Property @{Name="name"; Expression={$_."DisplayName"}}, + @{Name="version"; Expression={$_."DisplayVersion"}}, + @{Name="publisher"; Expression={$_."Publisher"}}, + @{Name="arch"; Expression={ "Win32" }} | + Where-Object { $_.name } + +} + +$result = New-Object psobject @{ + ansible_facts = New-Object psobject @{ + packages = $packages + } + changed = $false +} + +Exit-Json $result; diff --git a/awx/plugins/library/win_scan_services.ps1 b/awx/plugins/library/win_scan_services.ps1 new file mode 100644 index 0000000000..3de8ac4c9b --- /dev/null +++ b/awx/plugins/library/win_scan_services.ps1 @@ -0,0 +1,30 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$result = New-Object psobject @{ + ansible_facts = New-Object psobject @{ + services = Get-Service | + Select-Object -Property @{Name="name"; Expression={$_."DisplayName"}}, + @{Name="win_svc_name"; Expression={$_."Name"}}, + @{Name="state"; Expression={$_."Status".ToString().ToLower()}} + } + changed = $false +} + +Exit-Json $result; diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index 6711a5872b..090e939914 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -8,8 +8,15 @@ import ldap import djcelery from datetime import timedelta -# Update this module's local settings from the global settings module. +from kombu import Queue, Exchange +from kombu.common import Broadcast + +# global settings from django.conf import global_settings +# ugettext lazy +from django.utils.translation import ugettext_lazy as _ + +# Update this module's local settings from the global settings module. this_module = sys.modules[__name__] for setting in dir(global_settings): if setting == setting.upper(): @@ -18,7 +25,9 @@ for setting in dir(global_settings): # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(__file__)) + def is_testing(argv=None): + import sys '''Return True if running django or py.test unit tests.''' argv = sys.argv if argv is None else argv if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]): @@ -27,6 +36,11 @@ def is_testing(argv=None): return True return False + +def IS_TESTING(argv=None): + return is_testing(argv) + + DEBUG = True TEMPLATE_DEBUG = DEBUG SQL_DEBUG = DEBUG @@ -59,7 +73,7 @@ DATABASES = { # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. -TIME_ZONE = 'America/New_York' +TIME_ZONE = None # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html @@ -112,7 +126,13 @@ LOG_ROOT = os.path.join(BASE_DIR) # The heartbeat file for the tower scheduler SCHEDULE_METADATA_LOCATION = os.path.join(BASE_DIR, '.tower_cycle') +# Django gettext files path: locale//LC_MESSAGES/django.po, django.mo +LOCALE_PATHS = ( + os.path.join(BASE_DIR, 'locale'), +) + # Maximum number of the same job that can be waiting to run when launching from scheduler +# Note: This setting may be overridden by database settings. SCHEDULE_MAX_JOBS = 10 SITE_ID = 1 @@ -129,8 +149,40 @@ ALLOWED_HOSTS = [] # reverse proxy. REMOTE_HOST_HEADERS = ['REMOTE_ADDR', 'REMOTE_HOST'] +# Note: This setting may be overridden by database settings. STDOUT_MAX_BYTES_DISPLAY = 1048576 +# Returned in the header on event api lists as a recommendation to the UI +# on how many events to display before truncating/hiding +RECOMMENDED_MAX_EVENTS_DISPLAY_HEADER = 4000 + +# The maximum size of the ansible callback event's res data structure +# beyond this limit and the value will be removed +MAX_EVENT_RES_DATA = 700000 + +# Note: This setting may be overridden by database settings. +EVENT_STDOUT_MAX_BYTES_DISPLAY = 1024 + +# The amount of time before a stdout file is expired and removed locally +# Note that this can be recreated if the stdout is downloaded +LOCAL_STDOUT_EXPIRE_TIME = 2592000 + +# The number of processes spawned by the callback receiver to process job +# events into the database +JOB_EVENT_WORKERS = 4 + +# The maximum size of the job event worker queue before requests are blocked +JOB_EVENT_MAX_QUEUE_SIZE = 10000 + +# Disallow sending session cookies over insecure connections +SESSION_COOKIE_SECURE = True + +# Disallow sending csrf cookies over insecure connections +CSRF_COOKIE_SECURE = True + +# Limit CSRF cookies to browser sessions +CSRF_COOKIE_AGE = None + TEMPLATE_CONTEXT_PROCESSORS = ( # NOQA 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', @@ -147,12 +199,12 @@ TEMPLATE_CONTEXT_PROCESSORS = ( # NOQA ) MIDDLEWARE_CLASSES = ( # NOQA - 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.locale.LocaleMiddleware', + 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', - 'awx.main.middleware.HAMiddleware', 'awx.main.middleware.ActivityStreamMiddleware', 'awx.sso.middleware.SocialAuthMiddleware', 'crum.CurrentRequestUserMiddleware', @@ -185,18 +237,21 @@ INSTALLED_APPS = ( 'django_extensions', 'djcelery', 'kombu.transport.django', + 'channels', 'polymorphic', 'taggit', 'social.apps.django_app.default', + 'awx.conf', 'awx.main', 'awx.api', 'awx.ui', - 'awx.fact', 'awx.sso', + 'solo', ) INTERNAL_IPS = ('127.0.0.1',) +MAX_PAGE_SIZE = 200 REST_FRAMEWORK = { 'DEFAULT_PAGINATION_CLASS': 'awx.api.pagination.Pagination', 'PAGE_SIZE': 25, @@ -235,33 +290,41 @@ AUTHENTICATION_BACKENDS = ( 'social.backends.github.GithubOAuth2', 'social.backends.github.GithubOrganizationOAuth2', 'social.backends.github.GithubTeamOAuth2', + 'social.backends.azuread.AzureADOAuth2', 'awx.sso.backends.SAMLAuth', 'django.contrib.auth.backends.ModelBackend', ) # LDAP server (default to None to skip using LDAP authentication). +# Note: This setting may be overridden by database settings. AUTH_LDAP_SERVER_URI = None # Disable LDAP referrals by default (to prevent certain LDAP queries from # hanging with AD). +# Note: This setting may be overridden by database settings. AUTH_LDAP_CONNECTION_OPTIONS = { ldap.OPT_REFERRALS: 0, + ldap.OPT_NETWORK_TIMEOUT: 30 } # Radius server settings (default to empty string to skip using Radius auth). +# Note: These settings may be overridden by database settings. RADIUS_SERVER = '' RADIUS_PORT = 1812 RADIUS_SECRET = '' # Seconds before auth tokens expire. +# Note: This setting may be overridden by database settings. AUTH_TOKEN_EXPIRATION = 1800 # Maximum number of per-user valid, concurrent tokens. # -1 is unlimited +# Note: This setting may be overridden by database settings. AUTH_TOKEN_PER_USER = -1 # Enable / Disable HTTP Basic Authentication used in the API browser # Note: Session limits are not enforced when using HTTP Basic Authentication. +# Note: This setting may be overridden by database settings. AUTH_BASIC_ENABLED = True # If set, serve only minified JS for UI. @@ -326,20 +389,50 @@ os.environ.setdefault('DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:9013-9199') # Initialize Django-Celery. djcelery.setup_loader() -BROKER_URL = 'redis://localhost/' +BROKER_URL = 'amqp://guest:guest@localhost:5672//' +CELERY_DEFAULT_QUEUE = 'default' CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_ACCEPT_CONTENT = ['json'] CELERY_TRACK_STARTED = True CELERYD_TASK_TIME_LIMIT = None CELERYD_TASK_SOFT_TIME_LIMIT = None +CELERYD_POOL_RESTARTS = True CELERYBEAT_SCHEDULER = 'celery.beat.PersistentScheduler' CELERYBEAT_MAX_LOOP_INTERVAL = 60 CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend' +CELERY_IMPORTS = ('awx.main.scheduler.tasks',) +CELERY_QUEUES = ( + Queue('default', Exchange('default'), routing_key='default'), + Queue('jobs', Exchange('jobs'), routing_key='jobs'), + Queue('scheduler', Exchange('scheduler', type='topic'), routing_key='scheduler.job.#', durable=False), + Broadcast('broadcast_all') + # Projects use a fanout queue, this isn't super well supported +) +CELERY_ROUTES = {'awx.main.tasks.run_job': {'queue': 'jobs', + 'routing_key': 'jobs'}, + 'awx.main.tasks.run_project_update': {'queue': 'jobs', + 'routing_key': 'jobs'}, + 'awx.main.tasks.run_inventory_update': {'queue': 'jobs', + 'routing_key': 'jobs'}, + 'awx.main.tasks.run_ad_hoc_command': {'queue': 'jobs', + 'routing_key': 'jobs'}, + 'awx.main.tasks.run_system_job': {'queue': 'jobs', + 'routing_key': 'jobs'}, + 'awx.main.scheduler.tasks.run_job_launch': {'queue': 'scheduler', + 'routing_key': 'scheduler.job.launch'}, + 'awx.main.scheduler.tasks.run_job_complete': {'queue': 'scheduler', + 'routing_key': 'scheduler.job.complete'}, + 'awx.main.tasks.cluster_node_heartbeat': {'queue': 'default', + 'routing_key': 'cluster.heartbeat'}, + 'awx.main.tasks.purge_old_stdout_files': {'queue': 'default', + 'routing_key': 'cluster.heartbeat'}} + CELERYBEAT_SCHEDULE = { 'tower_scheduler': { 'task': 'awx.main.tasks.tower_periodic_scheduler', - 'schedule': timedelta(seconds=30) + 'schedule': timedelta(seconds=30), + 'options': {'expires': 20,} }, 'admin_checks': { 'task': 'awx.main.tasks.run_administrative_checks', @@ -349,8 +442,42 @@ CELERYBEAT_SCHEDULE = { 'task': 'awx.main.tasks.cleanup_authtokens', 'schedule': timedelta(days=30) }, + 'cluster_heartbeat': { + 'task': 'awx.main.tasks.cluster_node_heartbeat', + 'schedule': timedelta(seconds=60), + 'options': {'expires': 50,} + }, + 'purge_stdout_files': { + 'task': 'awx.main.tasks.purge_old_stdout_files', + 'schedule': timedelta(days=7) + }, + 'task_manager': { + 'task': 'awx.main.scheduler.tasks.run_task_manager', + 'schedule': timedelta(seconds=20), + 'options': {'expires': 20,} + }, + 'task_fail_inconsistent_running_jobs': { + 'task': 'awx.main.scheduler.tasks.run_fail_inconsistent_running_jobs', + 'schedule': timedelta(seconds=30), + 'options': {'expires': 20,} + }, } +# Django Caching Configuration +if is_testing(): + CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + }, + } +else: + CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', + 'LOCATION': 'memcached:11211', + }, + } + # Social Auth configuration. SOCIAL_AUTH_STRATEGY = 'social.strategies.django_strategy.DjangoStrategy' SOCIAL_AUTH_STORAGE = 'social.apps.django_app.default.models.DjangoStorage' @@ -373,6 +500,20 @@ SOCIAL_AUTH_PIPELINE = ( 'awx.sso.pipeline.update_user_teams', ) +SOCIAL_AUTH_LOGIN_URL = '/' +SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/sso/complete/' +SOCIAL_AUTH_LOGIN_ERROR_URL = '/sso/error/' +SOCIAL_AUTH_INACTIVE_USER_URL = '/sso/inactive/' + +SOCIAL_AUTH_RAISE_EXCEPTIONS = False +SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = False +SOCIAL_AUTH_SLUGIFY_USERNAMES = True +SOCIAL_AUTH_CLEAN_USERNAMES = True + +SOCIAL_AUTH_SANITIZE_REDIRECTS = True +SOCIAL_AUTH_REDIRECT_IS_HTTPS = False + +# Note: These settings may be overridden by database settings. SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '' SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = '' SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['profile'] @@ -391,6 +532,9 @@ SOCIAL_AUTH_GITHUB_TEAM_SECRET = '' SOCIAL_AUTH_GITHUB_TEAM_ID = '' SOCIAL_AUTH_GITHUB_TEAM_SCOPE = ['user:email', 'read:org'] +SOCIAL_AUTH_AZUREAD_OAUTH2_KEY = '' +SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET = '' + SOCIAL_AUTH_SAML_SP_ENTITY_ID = '' SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = '' SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = '' @@ -399,22 +543,6 @@ SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = {} SOCIAL_AUTH_SAML_SUPPORT_CONTACT = {} SOCIAL_AUTH_SAML_ENABLED_IDPS = {} -SOCIAL_AUTH_LOGIN_URL = '/' -SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/sso/complete/' -SOCIAL_AUTH_LOGIN_ERROR_URL = '/sso/error/' -SOCIAL_AUTH_INACTIVE_USER_URL = '/sso/inactive/' - -SOCIAL_AUTH_RAISE_EXCEPTIONS = False -SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = False -SOCIAL_AUTH_SLUGIFY_USERNAMES = True -SOCIAL_AUTH_CLEAN_USERNAMES = True - -SOCIAL_AUTH_SANITIZE_REDIRECTS = True -SOCIAL_AUTH_REDIRECT_IS_HTTPS = False - -SOCIAL_AUTH_ORGANIZATION_MAP = {} -SOCIAL_AUTH_TEAM_MAP = {} - # Any ANSIBLE_* settings will be passed to the subprocess environment by the # celery task. @@ -434,45 +562,44 @@ ANSIBLE_FORCE_COLOR = True # the celery task. AWX_TASK_ENV = {} -# Maximum number of job events processed by the callback receiver worker process -# before it recycles -JOB_EVENT_RECYCLE_THRESHOLD = 3000 - -# Number of workers used to proecess job events in parallel -JOB_EVENT_WORKERS = 4 - -# Maximum number of job events that can be waiting on a single worker queue before -# it can be skipped as too busy -JOB_EVENT_MAX_QUEUE_SIZE = 100 - # Flag to enable/disable updating hosts M2M when saving job events. CAPTURE_JOB_EVENT_HOSTS = False -# Enable proot support for running jobs (playbook runs only). -AWX_PROOT_ENABLED = False +# Enable bubblewrap support for running jobs (playbook runs only). +# Note: This setting may be overridden by database settings. +AWX_PROOT_ENABLED = True -# Command/path to proot. -AWX_PROOT_CMD = 'proot' +# Command/path to bubblewrap. +AWX_PROOT_CMD = 'bwrap' -# Additional paths to hide from jobs using proot. +# Additional paths to hide from jobs using bubblewrap. +# Note: This setting may be overridden by database settings. AWX_PROOT_HIDE_PATHS = [] -# Additional paths to show for jobs using proot. +# Additional paths to show for jobs using bubbelwrap. +# Note: This setting may be overridden by database settings. AWX_PROOT_SHOW_PATHS = [] # Number of jobs to show as part of the job template history AWX_JOB_TEMPLATE_HISTORY = 10 -# The directory in which proot will create new temporary directories for its root +# The directory in which bubblewrap will create new temporary directories for its root +# Note: This setting may be overridden by database settings. AWX_PROOT_BASE_PATH = "/tmp" # User definable ansible callback plugins +# Note: This setting may be overridden by database settings. AWX_ANSIBLE_CALLBACK_PLUGINS = "" +# Time at which an HA node is considered active +AWX_ACTIVE_NODE_TIME = 7200 + # Enable Pendo on the UI, possible values are 'off', 'anonymous', and 'detailed' +# Note: This setting may be overridden by database settings. PENDO_TRACKING_STATE = "off" # Default list of modules allowed for ad hoc commands. +# Note: This setting may be overridden by database settings. AD_HOC_COMMANDS = [ 'command', 'shell', @@ -499,12 +626,12 @@ AD_HOC_COMMANDS = [ # instead (based on docs from: # http://docs.rackspace.com/loadbalancers/api/v1.0/clb-devguide/content/Service_Access_Endpoints-d1e517.html) RAX_REGION_CHOICES = [ - ('ORD', 'Chicago'), - ('DFW', 'Dallas/Ft. Worth'), - ('IAD', 'Northern Virginia'), - ('LON', 'London'), - ('SYD', 'Sydney'), - ('HKG', 'Hong Kong'), + ('ORD', _('Chicago')), + ('DFW', _('Dallas/Ft. Worth')), + ('IAD', _('Northern Virginia')), + ('LON', _('London')), + ('SYD', _('Sydney')), + ('HKG', _('Hong Kong')), ] # Inventory variable name/values for determining if host is active/enabled. @@ -531,20 +658,22 @@ INV_ENV_VARIABLE_BLACKLIST = ("HOME", "USER", "_", "TERM") # list of names here. The available region IDs will be pulled from boto. # http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region EC2_REGION_NAMES = { - 'us-east-1': 'US East (Northern Virginia)', - 'us-east-2': 'US East (Ohio)', - 'us-west-2': 'US West (Oregon)', - 'us-west-1': 'US West (Northern California)', - 'eu-central-1': 'EU (Frankfurt)', - 'eu-west-1': 'EU (Ireland)', - 'ap-southeast-1': 'Asia Pacific (Singapore)', - 'ap-southeast-2': 'Asia Pacific (Sydney)', - 'ap-northeast-1': 'Asia Pacific (Tokyo)', - 'ap-northeast-2': 'Asia Pacific (Seoul)', - 'ap-south-1': 'Asia Pacific (Mumbai)', - 'sa-east-1': 'South America (Sao Paulo)', - 'us-gov-west-1': 'US West (GovCloud)', - 'cn-north-1': 'China (Beijing)', + 'us-east-1': _('US East (Northern Virginia)'), + 'us-east-2': _('US East (Ohio)'), + 'us-west-2': _('US West (Oregon)'), + 'us-west-1': _('US West (Northern California)'), + 'ca-central-1': _('Canada (Central)'), + 'eu-central-1': _('EU (Frankfurt)'), + 'eu-west-1': _('EU (Ireland)'), + 'eu-west-2': _('EU (London)'), + 'ap-southeast-1': _('Asia Pacific (Singapore)'), + 'ap-southeast-2': _('Asia Pacific (Sydney)'), + 'ap-northeast-1': _('Asia Pacific (Tokyo)'), + 'ap-northeast-2': _('Asia Pacific (Seoul)'), + 'ap-south-1': _('Asia Pacific (Mumbai)'), + 'sa-east-1': _('South America (Sao Paulo)'), + 'us-gov-west-1': _('US West (GovCloud)'), + 'cn-north-1': _('China (Beijing)'), } EC2_REGIONS_BLACKLIST = [ @@ -572,11 +701,11 @@ VMWARE_REGIONS_BLACKLIST = [] # Inventory variable name/values for determining whether a host is # active in vSphere. -VMWARE_ENABLED_VAR = 'vmware_powerState' -VMWARE_ENABLED_VALUE = 'poweredOn' +VMWARE_ENABLED_VAR = 'guest.gueststate' +VMWARE_ENABLED_VALUE = 'running' # Inventory variable name containing the unique instance ID. -VMWARE_INSTANCE_ID_VAR = 'vmware_uuid' +VMWARE_INSTANCE_ID_VAR = 'config.instanceuuid' # Filter for allowed group and host names when importing inventory # from VMware. @@ -593,19 +722,19 @@ VMWARE_EXCLUDE_EMPTY_GROUPS = True # provide a list here. # Source: https://developers.google.com/compute/docs/zones GCE_REGION_CHOICES = [ - ('us-east1-b', 'US East (B)'), - ('us-east1-c', 'US East (C)'), - ('us-east1-d', 'US East (D)'), - ('us-central1-a', 'US Central (A)'), - ('us-central1-b', 'US Central (B)'), - ('us-central1-c', 'US Central (C)'), - ('us-central1-f', 'US Central (F)'), - ('europe-west1-b', 'Europe West (B)'), - ('europe-west1-c', 'Europe West (C)'), - ('europe-west1-d', 'Europe West (D)'), - ('asia-east1-a', 'Asia East (A)'), - ('asia-east1-b', 'Asia East (B)'), - ('asia-east1-c', 'Asia East (C)'), + ('us-east1-b', _('US East (B)')), + ('us-east1-c', _('US East (C)')), + ('us-east1-d', _('US East (D)')), + ('us-central1-a', _('US Central (A)')), + ('us-central1-b', _('US Central (B)')), + ('us-central1-c', _('US Central (C)')), + ('us-central1-f', _('US Central (F)')), + ('europe-west1-b', _('Europe West (B)')), + ('europe-west1-c', _('Europe West (C)')), + ('europe-west1-d', _('Europe West (D)')), + ('asia-east1-a', _('Asia East (A)')), + ('asia-east1-b', _('Asia East (B)')), + ('asia-east1-c', _('Asia East (C)')), ] GCE_REGIONS_BLACKLIST = [] @@ -629,19 +758,19 @@ GCE_INSTANCE_ID_VAR = None # It's not possible to get zones in Azure without authenticating, so we # provide a list here. AZURE_REGION_CHOICES = [ - ('Central_US', 'US Central'), - ('East_US_1', 'US East'), - ('East_US_2', 'US East 2'), - ('North_Central_US', 'US North Central'), - ('South_Central_US', 'US South Central'), - ('West_US', 'US West'), - ('North_Europe', 'Europe North'), - ('West_Europe', 'Europe West'), - ('East_Asia_Pacific', 'Asia Pacific East'), - ('Southest_Asia_Pacific', 'Asia Pacific Southeast'), - ('East_Japan', 'Japan East'), - ('West_Japan', 'Japan West'), - ('South_Brazil', 'Brazil South'), + ('Central_US', _('US Central')), + ('East_US_1', _('US East')), + ('East_US_2', _('US East 2')), + ('North_Central_US', _('US North Central')), + ('South_Central_US', _('US South Central')), + ('West_US', _('US West')), + ('North_Europe', _('Europe North')), + ('West_Europe', _('Europe West')), + ('East_Asia_Pacific', _('Asia Pacific East')), + ('Southest_Asia_Pacific', _('Asia Pacific Southeast')), + ('East_Japan', _('Japan East')), + ('West_Japan', _('Japan West')), + ('South_Brazil', _('Brazil South')), ] AZURE_REGIONS_BLACKLIST = [] @@ -686,6 +815,8 @@ SATELLITE6_GROUP_FILTER = r'^.+$' SATELLITE6_HOST_FILTER = r'^.+$' SATELLITE6_EXCLUDE_EMPTY_GROUPS = True SATELLITE6_INSTANCE_ID_VAR = 'foreman.id' +SATELLITE6_GROUP_PREFIX = 'foreman_' +SATELLITE6_GROUP_PATTERNS = ["{app}-{tier}-{color}", "{app}-{color}", "{app}", "{tier}"] # --------------------- # ----- CloudForms ----- @@ -701,15 +832,19 @@ CLOUDFORMS_INSTANCE_ID_VAR = 'id' # -- Activity Stream -- # --------------------- # Defaults for enabling/disabling activity stream. +# Note: These settings may be overridden by database settings. ACTIVITY_STREAM_ENABLED = True ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC = False # Internal API URL for use by inventory scripts and callback plugin. INTERNAL_API_URL = 'http://127.0.0.1:%s' % DEVSERVER_DEFAULT_PORT -# ZeroMQ callback settings. -CALLBACK_CONSUMER_PORT = "tcp://127.0.0.1:5556" -CALLBACK_QUEUE_PORT = "ipc:///tmp/callback_receiver.ipc" +PERSISTENT_CALLBACK_MESSAGES = True +USE_CALLBACK_QUEUE = True +CALLBACK_QUEUE = "callback_tasks" +FACT_QUEUE = "facts" + +SCHEDULER_QUEUE = "scheduler" TASK_COMMAND_PORT = 6559 @@ -718,161 +853,19 @@ SOCKETIO_LISTEN_PORT = 8080 FACT_CACHE_PORT = 6564 +# Note: This setting may be overridden by database settings. ORG_ADMINS_CAN_SEE_ALL_USERS = True +# Note: This setting may be overridden by database settings. TOWER_ADMIN_ALERTS = True +# Note: This setting may be overridden by database settings. TOWER_URL_BASE = "https://towerhost" -TOWER_SETTINGS_MANIFEST = { - "SCHEDULE_MAX_JOBS": { - "name": "Maximum Scheduled Jobs", - "description": "Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created", - "default": SCHEDULE_MAX_JOBS, - "type": "int", - "category": "jobs", - }, - "STDOUT_MAX_BYTES_DISPLAY": { - "name": "Standard Output Maximum Display Size", - "description": "Maximum Size of Standard Output in bytes to display before requiring the output be downloaded", - "default": STDOUT_MAX_BYTES_DISPLAY, - "type": "int", - "category": "jobs", - }, - "AUTH_TOKEN_EXPIRATION": { - "name": "Idle Time Force Log Out", - "description": "Number of seconds that a user is inactive before they will need to login again", - "type": "int", - "default": AUTH_TOKEN_EXPIRATION, - "category": "authentication", - }, - "AUTH_TOKEN_PER_USER": { - "name": "Maximum number of simultaneous logins", - "description": "Maximum number of simultaneous logins a user may have. To disable enter -1", - "type": "int", - "default": AUTH_TOKEN_PER_USER, - "category": "authentication", - }, - # "AUTH_BASIC_ENABLED": { - # "name": "Enable HTTP Basic Auth", - # "description": "Enable HTTP Basic Auth for the API Browser", - # "default": AUTH_BASIC_ENABLED, - # "type": "bool", - # "category": "authentication", - # }, - # "AUTH_LDAP_SERVER_URI": { - # "name": "LDAP Server URI", - # "description": "URI Location of the LDAP Server", - # "default": AUTH_LDAP_SERVER_URI, - # "type": "string", - # "category": "authentication", - # }, - # "RADIUS_SERVER": { - # "name": "Radius Server Host", - # "description": "Host to communicate with for Radius Authentication", - # "default": RADIUS_SERVER, - # "type": "string", - # "category": "authentication", - # }, - # "RADIUS_PORT": { - # "name": "Radius Server Port", - # "description": "Port on the Radius host for Radius Authentication", - # "default": RADIUS_PORT, - # "type": "string", - # "category": "authentication", - # }, - # "RADIUS_SECRET": { - # "name": "Radius Server Secret", - # "description": "Secret used when negotiating with the Radius server", - # "default": RADIUS_SECRET, - # "type": "string", - # "category": "authentication", - # }, - "AWX_PROOT_ENABLED": { - "name": "Enable PRoot for Job Execution", - "description": "Isolates an Ansible job from protected parts of the Tower system to prevent exposing sensitive information", - "default": AWX_PROOT_ENABLED, - "type": "bool", - "category": "jobs", - }, - "AWX_PROOT_HIDE_PATHS": { - "name": "Paths to hide from PRoot jobs", - "description": "Extra paths to hide from PRoot isolated processes", - "default": AWX_PROOT_HIDE_PATHS, - "type": "list", - "category": "jobs", - }, - "AWX_PROOT_SHOW_PATHS": { - "name": "Paths to expose to PRoot jobs", - "description": "Explicit whitelist of paths to expose to PRoot jobs", - "default": AWX_PROOT_SHOW_PATHS, - "type": "list", - "category": "jobs", - }, - "AWX_PROOT_BASE_PATH": { - "name": "Base PRoot execution path", - "description": "The location that PRoot will create its temporary working directory", - "default": AWX_PROOT_BASE_PATH, - "type": "string", - "category": "jobs", - }, - "AWX_ANSIBLE_CALLBACK_PLUGINS": { - "name": "Ansible Callback Plugins", - "description": "Colon Seperated Paths for extra callback plugins to be used when running jobs", - "default": AWX_ANSIBLE_CALLBACK_PLUGINS, - "type": "string", - "category": "jobs", - }, - "PENDO_TRACKING_STATE": { - "name": "Analytics Tracking State", - "description": "Enable or Disable Analytics Tracking", - "default": PENDO_TRACKING_STATE, - "type": "string", - "category": "ui", - }, - "AD_HOC_COMMANDS": { - "name": "Ansible Modules Allowed for Ad Hoc Jobs", - "description": "A colon-seperated whitelist of modules allowed to be used by ad-hoc jobs", - "default": AD_HOC_COMMANDS, - "type": "list", - "category": "jobs", - }, - "ACTIVITY_STREAM_ENABLED": { - "name": "Enable Activity Stream", - "description": "Enable capturing activity for the Tower activity stream", - "default": ACTIVITY_STREAM_ENABLED, - "type": "bool", - "category": "system", - }, - "ORG_ADMINS_CAN_SEE_ALL_USERS": { - "name": "All Users Visible to Organization Admins", - "description": "Controls whether any Organization Admin can view all users, even those not associated with their Organization", - "default": ORG_ADMINS_CAN_SEE_ALL_USERS, - "type": "bool", - "category": "system", - }, - "TOWER_ADMIN_ALERTS": { - "name": "Enable Tower Administrator Alerts", - "description": "Allow Tower to email Admin users for system events that may require attention", - "default": TOWER_ADMIN_ALERTS, - "type": "bool", - "category": "system", - }, - "TOWER_URL_BASE": { - "name": "Base URL of the Tower host", - "description": "This is used by services like Notifications to render a valid url to the Tower host", - "default": TOWER_URL_BASE, - "type": "string", - "category": "system", - }, - "LICENSE": { - "name": "Tower License", - "description": "Controls what features and functionality is enabled in Tower.", - "default": "{}", - "type": "string", - "category": "system", - }, -} +TOWER_SETTINGS_MANIFEST = {} + +LOG_AGGREGATOR_ENABLED = False + # Logging configuration. LOGGING = { 'version': 1, @@ -882,7 +875,7 @@ LOGGING = { '()': 'django.utils.log.RequireDebugFalse', }, 'require_debug_true': { - '()': 'awx.lib.compat.RequireDebugTrue', + '()': 'django.utils.log.RequireDebugTrue', }, 'require_debug_true_or_test': { '()': 'awx.main.utils.RequireDebugTrueOrTest', @@ -892,6 +885,9 @@ LOGGING = { 'simple': { 'format': '%(asctime)s %(levelname)-8s %(name)s %(message)s', }, + 'json': { + '()': 'awx.main.utils.formatters.LogstashFormatter' + } }, 'handlers': { 'console': { @@ -901,18 +897,24 @@ LOGGING = { 'formatter': 'simple', }, 'null': { - 'class': 'django.utils.log.NullHandler', + 'class': 'logging.NullHandler', }, 'file': { - 'class': 'django.utils.log.NullHandler', + 'class': 'logging.NullHandler', 'formatter': 'simple', }, 'syslog': { 'level': 'WARNING', 'filters': ['require_debug_false'], - 'class': 'django.utils.log.NullHandler', + 'class': 'logging.NullHandler', 'formatter': 'simple', }, + 'http_receiver': { + 'class': 'awx.main.utils.handlers.HTTPSNullHandler', + 'level': 'DEBUG', + 'formatter': 'json', + 'host': '', + }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], @@ -936,15 +938,6 @@ LOGGING = { 'backupCount': 5, 'formatter':'simple', }, - 'socketio_service': { - 'level': 'WARNING', - 'class':'logging.handlers.RotatingFileHandler', - 'filters': ['require_debug_false'], - 'filename': os.path.join(LOG_ROOT, 'socketio_service.log'), - 'maxBytes': 1024 * 1024 * 5, # 5 MB - 'backupCount': 5, - 'formatter':'simple', - }, 'task_system': { 'level': 'INFO', 'class':'logging.handlers.RotatingFileHandler', @@ -963,15 +956,6 @@ LOGGING = { 'backupCount': 5, 'formatter':'simple', }, - 'fact_receiver': { - 'level': 'WARNING', - 'class':'logging.handlers.RotatingFileHandler', - 'filters': ['require_debug_false'], - 'filename': os.path.join(LOG_ROOT, 'fact_receiver.log'), - 'maxBytes': 1024 * 1024 * 5, # 5 MB - 'backupCount': 5, - 'formatter':'simple', - }, 'system_tracking_migrations': { 'level': 'WARNING', 'class':'logging.handlers.RotatingFileHandler', @@ -998,7 +982,6 @@ LOGGING = { 'django.request': { 'handlers': ['mail_admins', 'console', 'file', 'tower_warnings'], 'level': 'WARNING', - 'propagate': False, }, 'rest_framework.request': { 'handlers': ['mail_admins', 'console', 'file', 'tower_warnings'], @@ -1012,21 +995,31 @@ LOGGING = { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG', }, + 'awx.conf': { + 'handlers': ['null'], + 'level': 'WARNING', + }, + 'awx.conf.settings': { + 'handlers': ['null'], + 'level': 'WARNING', + }, + 'awx.main': { + 'handlers': ['null'] + }, 'awx.main.commands.run_callback_receiver': { - 'handlers': ['console', 'file', 'callback_receiver'], - 'propagate': False + 'handlers': ['callback_receiver'], }, - 'awx.main.commands.run_socketio_service': { - 'handlers': ['console', 'file', 'socketio_service'], - 'propagate': False + 'awx.main.tasks': { + 'handlers': ['task_system'], }, - 'awx.main.commands.run_task_system': { - 'handlers': ['console', 'file', 'task_system'], - 'propagate': False + 'awx.main.scheduler': { + 'handlers': ['task_system'], + }, + 'awx.main.consumers': { + 'handlers': ['null'] }, 'awx.main.commands.run_fact_cache_receiver': { - 'handlers': ['console', 'file', 'fact_receiver'], - 'propagate': False + 'handlers': ['fact_receiver'], }, 'awx.main.access': { 'handlers': ['null'], @@ -1040,6 +1033,11 @@ LOGGING = { 'handlers': ['null'], 'propagate': False, }, + 'awx.analytics': { + 'handlers': ['http_receiver'], + 'level': 'INFO', + 'propagate': False + }, 'django_auth_ldap': { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG', @@ -1058,4 +1056,3 @@ LOGGING = { }, } } - diff --git a/awx/settings/development.py b/awx/settings/development.py index 438c152a0a..0a0bc748f2 100644 --- a/awx/settings/development.py +++ b/awx/settings/development.py @@ -4,40 +4,57 @@ # Development settings for AWX project. # Python +import socket +import copy import sys import traceback +# Centos-7 doesn't include the svg mime type +# /usr/lib64/python/mimetypes.py +import mimetypes + # Django Split Settings from split_settings.tools import optional, include # Load default settings. from defaults import * # NOQA -MONGO_HOST = '127.0.0.1' -MONGO_PORT = 27017 -MONGO_USERNAME = None -MONGO_PASSWORD = None -MONGO_DB = 'system_tracking_dev' +ALLOWED_HOSTS = ['*'] + +mimetypes.add_type("image/svg+xml", ".svg", True) +mimetypes.add_type("image/svg+xml", ".svgz", True) + +# Disallow sending session cookies over insecure connections +SESSION_COOKIE_SECURE = False + +# Disallow sending csrf cookies over insecure connections +CSRF_COOKIE_SECURE = False + +# Override django.template.loaders.cached.Loader in defaults.py +TEMPLATE_LOADERS = ( + 'django.template.loaders.filesystem.Loader', + 'django.template.loaders.app_directories.Loader', +) # Disable capturing all SQL queries when running celeryd in development. if 'celeryd' in sys.argv: SQL_DEBUG = False -# Use a different callback consumer/queue for development, to avoid a conflict -# if there is also a nightly install running on the development machine. -CALLBACK_CONSUMER_PORT = "tcp://127.0.0.1:5557" -CALLBACK_QUEUE_PORT = "ipc:///tmp/callback_receiver_dev.ipc" +CALLBACK_QUEUE = "callback_tasks" -# Enable PROOT for tower-qa integration tests +# Enable PROOT for tower-qa integration tests. +# Note: This setting may be overridden by database settings. AWX_PROOT_ENABLED = True +# Disable Pendo on the UI for development/test. +# Note: This setting may be overridden by database settings. PENDO_TRACKING_STATE = "off" # Use Django-Jenkins if installed. Only run tests for awx.main app. try: import django_jenkins INSTALLED_APPS += (django_jenkins.__name__,) - PROJECT_APPS = ('awx.main.tests', 'awx.api.tests', 'awx.fact.tests',) + PROJECT_APPS = ('awx.main.tests', 'awx.api.tests',) except ImportError: pass @@ -65,23 +82,45 @@ PASSWORD_HASHERS = ( # Configure a default UUID for development only. SYSTEM_UUID = '00000000-0000-0000-0000-000000000000' +# Store a snapshot of default settings at this point before loading any +# customizable config files. +DEFAULTS_SNAPSHOT = {} +this_module = sys.modules[__name__] +for setting in dir(this_module): + if setting == setting.upper(): + DEFAULTS_SNAPSHOT[setting] = copy.deepcopy(getattr(this_module, setting)) + # If there is an `/etc/tower/settings.py`, include it. # If there is a `/etc/tower/conf.d/*.py`, include them. include(optional('/etc/tower/settings.py'), scope=locals()) include(optional('/etc/tower/conf.d/*.py'), scope=locals()) ANSIBLE_USE_VENV = True -ANSIBLE_VENV_PATH = "/tower_devel/venv/ansible" +ANSIBLE_VENV_PATH = "/venv/ansible" TOWER_USE_VENV = True -TOWER_VENV_PATH = "/tower_devel/venv/tower" +TOWER_VENV_PATH = "/venv/tower" # If any local_*.py files are present in awx/settings/, use them to override # default settings for development. If not present, we can still run using # only the defaults. try: include(optional('local_*.py'), scope=locals()) - if not is_testing(sys.argv): - include('postprocess.py', scope=locals()) except ImportError: traceback.print_exc() sys.exit(1) + +CLUSTER_HOST_ID = socket.gethostname() +CELERY_ROUTES['awx.main.tasks.cluster_node_heartbeat'] = {'queue': CLUSTER_HOST_ID, 'routing_key': CLUSTER_HOST_ID} + +# Supervisor service name dictionary used for programatic restart +SERVICE_NAME_DICT = { + "celery": "celeryd", + "callback": "receiver", + "runworker": "channels", + "uwsgi": "uwsgi", + "daphne": "daphne", + "fact": "factcacher", + "nginx": "nginx"} +# Used for sending commands in automatic restart +UWSGI_FIFO_LOCATION = '/awxfifo' + diff --git a/awx/settings/local_settings.py.docker_compose b/awx/settings/local_settings.py.docker_compose index 2e27a664ea..1202b1cbe1 100644 --- a/awx/settings/local_settings.py.docker_compose +++ b/awx/settings/local_settings.py.docker_compose @@ -11,6 +11,36 @@ ############################################################################### # MISC PROJECT SETTINGS ############################################################################### +import os + +def patch_broken_pipe_error(): + """Monkey Patch BaseServer.handle_error to not write + a stacktrace to stderr on broken pipe. + http://stackoverflow.com/a/22618740/362702""" + import sys + from SocketServer import BaseServer + from wsgiref import handlers + + handle_error = BaseServer.handle_error + log_exception = handlers.BaseHandler.log_exception + + def is_broken_pipe_error(): + type, err, tb = sys.exc_info() + return "Connection reset by peer" in repr(err) + + def my_handle_error(self, request, client_address): + if not is_broken_pipe_error(): + handle_error(self, request, client_address) + + def my_log_exception(self, exc_info): + if not is_broken_pipe_error(): + log_exception(self, exc_info) + + BaseServer.handle_error = my_handle_error + handlers.BaseHandler.log_exception = my_log_exception + +patch_broken_pipe_error() + ADMINS = ( # ('Your Name', 'your_email@domain.com'), @@ -21,7 +51,7 @@ MANAGERS = ADMINS # Database settings to use PostgreSQL for development. DATABASES = { 'default': { - 'ENGINE': 'django.db.backends.postgresql_psycopg2', + 'ENGINE': 'transaction_hooks.backends.postgresql_psycopg2', 'NAME': 'awx-dev', 'USER': 'awx-dev', 'PASSWORD': 'AWXsome1', @@ -37,7 +67,7 @@ DATABASES = { if is_testing(sys.argv): DATABASES = { 'default': { - 'ENGINE': 'django.db.backends.sqlite3', + 'ENGINE': 'transaction_hooks.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'awx.sqlite3'), 'TEST': { # Test database cannot be :memory: for celery/inventory tests. @@ -45,11 +75,20 @@ if is_testing(sys.argv): }, } } - + MONGO_DB = 'system_tracking_test' # Celery AMQP configuration. -BROKER_URL = 'redis://redis/' +BROKER_URL = "amqp://{}:{}@{}/{}".format(os.environ.get("RABBITMQ_USER"), + os.environ.get("RABBITMQ_PASS"), + os.environ.get("RABBITMQ_HOST"), + os.environ.get("RABBITMQ_VHOST")) + +CHANNEL_LAYERS = { + 'default': {'BACKEND': 'asgi_amqp.AMQPChannelLayer', + 'ROUTING': 'awx.main.routing.channel_routing', + 'CONFIG': {'url': BROKER_URL}} +} # Mongo host configuration MONGO_HOST = NotImplemented @@ -75,7 +114,7 @@ SYSTEM_UUID = '00000000-0000-0000-0000-000000000000' # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. -TIME_ZONE = 'America/New_York' +TIME_ZONE = None # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html @@ -159,141 +198,13 @@ LOGGING['handlers']['syslog'] = { #LOGGING['loggers']['awx.main.signals']['propagate'] = True #LOGGING['loggers']['awx.main.permissions']['propagate'] = True +# Enable the following line to turn on database settings logging. +#LOGGING['loggers']['awx.conf']['level'] = 'DEBUG' + # Enable the following lines to turn on LDAP auth logging. #LOGGING['loggers']['django_auth_ldap']['handlers'] = ['console'] #LOGGING['loggers']['django_auth_ldap']['level'] = 'DEBUG' -############################################################################### -# LDAP AUTHENTICATION SETTINGS -############################################################################### - -# Refer to django-auth-ldap docs for more details: -# http://pythonhosted.org/django-auth-ldap/authentication.html - -# Imports needed for LDAP configuration. -import ldap -from django_auth_ldap.config import LDAPSearch, LDAPSearchUnion -from django_auth_ldap.config import ActiveDirectoryGroupType - -# LDAP server URI, such as "ldap://ldap.example.com:389" (non-SSL) or -# "ldaps://ldap.example.com:636" (SSL). LDAP authentication is disable if this -# parameter is empty. -AUTH_LDAP_SERVER_URI = '' - -# DN of user to bind for all search queries. Normally in the format -# "CN=Some User,OU=Users,DC=example,DC=com" but may also be specified as -# "DOMAIN\username" for Active Directory. -AUTH_LDAP_BIND_DN = '' - -# Password using to bind above user account. -AUTH_LDAP_BIND_PASSWORD = '' - -# Enable TLS when the connection is not using SSL. -AUTH_LDAP_START_TLS = False - -# Additional options to set for the LDAP connection. LDAP referrals are -# disabled by default (to prevent certain LDAP queries from hanging with AD). -AUTH_LDAP_CONNECTION_OPTIONS = { - ldap.OPT_REFERRALS: 0, -} - -# LDAP search query to find users. -AUTH_LDAP_USER_SEARCH = LDAPSearch( - 'OU=Users,DC=example,DC=com', # Base DN - ldap.SCOPE_SUBTREE, # SCOPE_BASE, SCOPE_ONELEVEL, SCOPE_SUBTREE - '(sAMAccountName=%(user)s)', # Query -) - -# Alternative to user search, if user DNs are all of the same format. -#AUTH_LDAP_USER_DN_TEMPLATE = 'uid=%(user)s,OU=Users,DC=example,DC=com' - -# Mapping of LDAP to user atrributes (key is user attribute name, value is LDAP -# attribute name). -AUTH_LDAP_USER_ATTR_MAP = { - 'first_name': 'givenName', - 'last_name': 'sn', - 'email': 'mail', -} - -# LDAP search query to find groups. Does not support LDAPSearchUnion. -AUTH_LDAP_GROUP_SEARCH = LDAPSearch( - 'DC=example,DC=com', # Base DN - ldap.SCOPE_SUBTREE, # SCOPE_BASE, SCOPE_ONELEVEL, SCOPE_SUBTREE - '(objectClass=group)', # Query -) -# Type of group returned by the search above. Should be one of the types -# listed at: http://pythonhosted.org/django-auth-ldap/groups.html#types-of-groups -AUTH_LDAP_GROUP_TYPE = ActiveDirectoryGroupType() - -# Group DN required to login. If specified, user must be a member of this -# group to login via LDAP. -#AUTH_LDAP_REQUIRE_GROUP = '' - -# Group DN denied from login. If specified, user will not be allowed to login -# if a member of this group. -#AUTH_LDAP_DENY_GROUP = '' - -# User profile flags updated from group membership (key is user attribute name, -# value is group DN). -AUTH_LDAP_USER_FLAGS_BY_GROUP = { - #'is_superuser': 'CN=Domain Admins,CN=Users,DC=example,DC=com', -} - -# Mapping between organization admins/users and LDAP groups. Keys are -# organization names (will be created if not present). Values are dictionaries -# of options for each organization's membership, where each can contain the -# following parameters: -# - remove: True/False. Defaults to False. Specifies the default for -# remove_admins or remove_users if those parameters aren't explicitly set. -# - admins: None, True/False, string or list/tuple of strings. -# If None, organization admins will not be updated. -# If True/False, all LDAP users will be added/removed as admins. -# If a string or list of strings, specifies the group DN(s). User will be -# added as an org admin if the user is a member of ANY of these groups. -# - remove_admins: True/False. Defaults to False. If True, a user who is not a -# member of the given groups will be removed from the organization's admins. -# - users: None, True/False, string or list/tuple of strings. Same rules apply -# as for admins. -# - remove_users: True/False. Defaults to False. If True, a user who is not a -# member of the given groups will be removed from the organization's users. -AUTH_LDAP_ORGANIZATION_MAP = { - #'Test Org': { - # 'admins': 'CN=Domain Admins,CN=Users,DC=example,DC=com', - # 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - #}, - #'Test Org 2': { - # 'admins': ['CN=Administrators,CN=Builtin,DC=example,DC=com'], - # 'users': True, - #}, -} - -# Mapping between team members (users) and LDAP groups. Keys are team names -# (will be created if not present). Values are dictionaries of options for -# each team's membership, where each can contain the following parameters: -# - organization: string. The name of the organization to which the team -# belongs. The team will be created if the combination of organization and -# team name does not exist. The organization will first be created if it -# does not exist. -# - users: None, True/False, string or list/tuple of strings. -# If None, team members will not be updated. -# If True/False, all LDAP users will be added/removed as team members. -# If a string or list of strings, specifies the group DN(s). User will be -# added as a team member if the user is a member of ANY of these groups. -# - remove: True/False. Defaults to False. If True, a user who is not a member -# of the given groups will be removed from the team. -AUTH_LDAP_TEAM_MAP = { - 'My Team': { - 'organization': 'Test Org', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - 'remove': True, - }, - 'Other Team': { - 'organization': 'Test Org 2', - 'users': 'CN=Other Users,CN=Users,DC=example,DC=com', - 'remove': False, - }, -} - ############################################################################### # SCM TEST SETTINGS ############################################################################### @@ -332,280 +243,6 @@ import getpass TEST_SSH_LOOPBACK_USERNAME = getpass.getuser() TEST_SSH_LOOPBACK_PASSWORD = '' -############################################################################### -# LDAP TEST SETTINGS -############################################################################### - -# LDAP connection and authentication settings for unit tests only. LDAP tests -# will be skipped if TEST_AUTH_LDAP_SERVER_URI is not configured. - -TEST_AUTH_LDAP_SERVER_URI = '' -TEST_AUTH_LDAP_BIND_DN = '' -TEST_AUTH_LDAP_BIND_PASSWORD = '' -TEST_AUTH_LDAP_START_TLS = False -TEST_AUTH_LDAP_CONNECTION_OPTIONS = { - ldap.OPT_REFERRALS: 0, -} - -# LDAP username/password for testing authentication. -TEST_AUTH_LDAP_USERNAME = '' -TEST_AUTH_LDAP_PASSWORD = '' - -# LDAP search query to find users. -TEST_AUTH_LDAP_USER_SEARCH = LDAPSearch( - 'CN=Users,DC=example,DC=com', - ldap.SCOPE_SUBTREE, - '(sAMAccountName=%(user)s)', -) - -# Alternative to user search. -#TEST_AUTH_LDAP_USER_DN_TEMPLATE = 'sAMAccountName=%(user)s,OU=Users,DC=example,DC=com' - -# Mapping of LDAP attributes to user attributes. -TEST_AUTH_LDAP_USER_ATTR_MAP = { - 'first_name': 'givenName', - 'last_name': 'sn', - 'email': 'mail', -} - -# LDAP search query for finding groups. -TEST_AUTH_LDAP_GROUP_SEARCH = LDAPSearch( - 'DC=example,DC=com', - ldap.SCOPE_SUBTREE, - '(objectClass=group)', -) -# Type of group returned by the search above. -TEST_AUTH_LDAP_GROUP_TYPE = ActiveDirectoryGroupType() - -# Test DNs for a group required to login. User should be a member of the first -# group, but not a member of the second. -TEST_AUTH_LDAP_REQUIRE_GROUP = 'CN=Domain Admins,CN=Users,DC=example,DC=com' -TEST_AUTH_LDAP_REQUIRE_GROUP_FAIL = 'CN=Guest,CN=Users,DC=example,DC=com' - -# Test DNs for a group denied from login. User should not be a member of the -# first group, but should be a member of the second. -TEST_AUTH_LDAP_DENY_GROUP = 'CN=Guest,CN=Users,DC=example,DC=com' -TEST_AUTH_LDAP_DENY_GROUP_FAIL = 'CN=Domain Admins,CN=Users,DC=example,DC=com' - -# User profile flags updated from group membership. Test user should be a -# member of the group. -TEST_AUTH_LDAP_USER_FLAGS_BY_GROUP = { - 'is_superuser': 'CN=Domain Admins,CN=Users,DC=example,DC=com', -} - -# Test mapping between organization admins/users and LDAP groups. -TEST_AUTH_LDAP_ORGANIZATION_MAP = { - 'Test Org': { - 'admins': 'CN=Domain Admins,CN=Users,DC=example,DC=com', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - }, - 'Test Org 2': { - 'admins': ['CN=Administrators,CN=Builtin,DC=example,DC=com'], - 'users': True, - }, -} -# Expected results from organization mapping. After login, should user be an -# admin/user in the given organization? -TEST_AUTH_LDAP_ORGANIZATION_MAP_RESULT = { - 'Test Org': {'admins': True, 'users': False}, - 'Test Org 2': {'admins': False, 'users': True}, -} - -# Second test mapping to test remove parameters. -TEST_AUTH_LDAP_ORGANIZATION_MAP_2 = { - 'Test Org': { - 'admins': 'CN=Domain Users,CN=Users,DC=example,DC=com', - 'users': True, - 'remove_admins': True, - 'remove_users': False, - }, - 'Test Org 2': { - 'admins': ['CN=Domain Admins,CN=Users,DC=example,DC=com', - 'CN=Administrators,CN=Builtin,DC=example,DC=com'], - 'users': False, - 'remove': True, - }, -} - -# Expected results from second organization mapping. -TEST_AUTH_LDAP_ORGANIZATION_MAP_2_RESULT = { - 'Test Org': {'admins': False, 'users': True}, - 'Test Org 2': {'admins': True, 'users': False}, -} - -# Test mapping between team users and LDAP groups. -TEST_AUTH_LDAP_TEAM_MAP = { - 'Domain Users Team': { - 'organization': 'Test Org', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - 'remove': False, - }, - 'Admins Team': { - 'organization': 'Admins Org', - 'users': 'CN=Domain Admins,CN=Users,DC=example,DC=com', - 'remove': True, - }, - 'Everyone Team': { - 'organization': 'Test Org 2', - 'users': True, - }, -} -# Expected results from team mapping. After login, should user be a member of -# the given team? -TEST_AUTH_LDAP_TEAM_MAP_RESULT = { - 'Domain Users Team': {'users': False}, - 'Admins Team': {'users': True}, - 'Everyone Team': {'users': True}, -} - -# Second test mapping for teams to remove user. -TEST_AUTH_LDAP_TEAM_MAP_2 = { - 'Domain Users Team': { - 'organization': 'Test Org', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - 'remove': False, - }, - 'Admins Team': { - 'organization': 'Admins Org', - 'users': 'CN=Administrators,CN=Builtin,DC=example,DC=com', - 'remove': True, - }, - 'Everyone Team': { - 'organization': 'Test Org 2', - 'users': False, - 'remove': False, - }, -} -# Expected results from second team mapping. After login, should user be a -# member of the given team? -TEST_AUTH_LDAP_TEAM_MAP_2_RESULT = { - 'Domain Users Team': {'users': False}, - 'Admins Team': {'users': False}, - 'Everyone Team': {'users': True}, -} - -############################################################################### -# RADIUS AUTH SETTINGS -############################################################################### - -RADIUS_SERVER = '' -RADIUS_PORT = 1812 -RADIUS_SECRET = '' - -############################################################################### -# SOCIAL AUTH SETTINGS -############################################################################### - -SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '' -SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = '' -#SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['profile'] -#SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS = ['example.com'] -#SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS = {'hd': 'example.com'} - -SOCIAL_AUTH_GITHUB_KEY = '' -SOCIAL_AUTH_GITHUB_SECRET = '' - -SOCIAL_AUTH_GITHUB_ORG_KEY = '' -SOCIAL_AUTH_GITHUB_ORG_SECRET = '' -SOCIAL_AUTH_GITHUB_ORG_NAME = '' - -SOCIAL_AUTH_GITHUB_TEAM_KEY = '' -SOCIAL_AUTH_GITHUB_TEAM_SECRET = '' -SOCIAL_AUTH_GITHUB_TEAM_ID = '' - -SOCIAL_AUTH_SAML_SP_ENTITY_ID = '' -SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = '' -SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = '' -SOCIAL_AUTH_SAML_ORG_INFO = { - 'en-US': { - 'name': 'example', - 'displayname': 'Example', - 'url': 'http://www.example.com', - }, -} -SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = { - 'givenName': 'Some User', - 'emailAddress': 'suser@example.com', -} -SOCIAL_AUTH_SAML_SUPPORT_CONTACT = { - 'givenName': 'Some User', - 'emailAddress': 'suser@example.com', -} -SOCIAL_AUTH_SAML_ENABLED_IDPS = { - #'myidp': { - # 'entity_id': 'https://idp.example.com', - # 'url': 'https://myidp.example.com/sso', - # 'x509cert': '', - #}, - #'onelogin': { - # 'entity_id': 'https://app.onelogin.com/saml/metadata/123456', - # 'url': 'https://example.onelogin.com/trust/saml2/http-post/sso/123456', - # 'x509cert': '', - # 'attr_user_permanent_id': 'name_id', - # 'attr_first_name': 'User.FirstName', - # 'attr_last_name': 'User.LastName', - # 'attr_username': 'User.email', - # 'attr_email': 'User.email', - #}, -} - -SOCIAL_AUTH_ORGANIZATION_MAP = { - # Add all users to the default organization. - 'Default': { - 'users': True, - }, - #'Test Org': { - # 'admins': ['admin@example.com'], - # 'users': True, - #}, - #'Test Org 2': { - # 'admins': ['admin@example.com', re.compile(r'^tower-[^@]+*?@.*$], - # 'users': re.compile(r'^[^@].*?@example\.com$'), - #}, -} - -#SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_SAML_ORGANIZATION_MAP = {} - -SOCIAL_AUTH_TEAM_MAP = { - #'My Team': { - # 'organization': 'Test Org', - # 'users': ['re.compile(r'^[^@]+?@test\.example\.com$')'], - # 'remove': True, - #}, - #'Other Team': { - # 'organization': 'Test Org 2', - # 'users': re.compile(r'^[^@]+?@test2\.example\.com$'), - # 'remove': False, - #}, -} - -#SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP = {} -#SOCIAL_AUTH_GITHUB_TEAM_MAP = {} -#SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP = {} -#SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP = {} -#SOCIAL_AUTH_SAML_TEAM_MAP = {} - -# Uncomment the line below (i.e. set SOCIAL_AUTH_USER_FIELDS to an empty list) -# to prevent new user accounts from being created. Only users who have -# previously logged in using social auth or have a user account with a matching -# email address will be able to login. - -#SOCIAL_AUTH_USER_FIELDS = [] - -# It is also possible to add custom functions to the social auth pipeline for -# more advanced organization and team mapping. Use at your own risk. - -#def custom_social_auth_pipeline_function(backend, details, user=None, *args, **kwargs): -# print 'custom:', backend, details, user, args, kwargs - -#SOCIAL_AUTH_PIPELINE += ( -# 'awx.settings.development.custom_social_auth_pipeline_function', -#) - ############################################################################### # INVENTORY IMPORT TEST SETTINGS ############################################################################### diff --git a/awx/settings/local_settings.py.example b/awx/settings/local_settings.py.example index cb85724366..2996a8a28e 100644 --- a/awx/settings/local_settings.py.example +++ b/awx/settings/local_settings.py.example @@ -48,7 +48,7 @@ if is_testing(sys.argv): MONGO_DB = 'system_tracking_test' # Celery AMQP configuration. -BROKER_URL = 'redis://localhost/' +BROKER_URL = 'amqp://guest:guest@localhost:5672' # Set True to enable additional logging from the job_event_callback plugin JOB_CALLBACK_DEBUG = False @@ -71,7 +71,7 @@ SYSTEM_UUID = '00000000-0000-0000-0000-000000000000' # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. -TIME_ZONE = 'America/New_York' +TIME_ZONE = None # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html @@ -156,141 +156,13 @@ LOGGING['handlers']['syslog'] = { #LOGGING['loggers']['awx.main.signals']['propagate'] = True #LOGGING['loggers']['awx.main.permissions']['propagate'] = True +# Enable the following line to turn on database settings logging. +#LOGGING['loggers']['awx.conf']['level'] = 'DEBUG' + # Enable the following lines to turn on LDAP auth logging. #LOGGING['loggers']['django_auth_ldap']['handlers'] = ['console'] #LOGGING['loggers']['django_auth_ldap']['level'] = 'DEBUG' -############################################################################### -# LDAP AUTHENTICATION SETTINGS -############################################################################### - -# Refer to django-auth-ldap docs for more details: -# http://pythonhosted.org/django-auth-ldap/authentication.html - -# Imports needed for LDAP configuration. -import ldap -from django_auth_ldap.config import LDAPSearch, LDAPSearchUnion -from django_auth_ldap.config import ActiveDirectoryGroupType - -# LDAP server URI, such as "ldap://ldap.example.com:389" (non-SSL) or -# "ldaps://ldap.example.com:636" (SSL). LDAP authentication is disable if this -# parameter is empty. -AUTH_LDAP_SERVER_URI = '' - -# DN of user to bind for all search queries. Normally in the format -# "CN=Some User,OU=Users,DC=example,DC=com" but may also be specified as -# "DOMAIN\username" for Active Directory. -AUTH_LDAP_BIND_DN = '' - -# Password using to bind above user account. -AUTH_LDAP_BIND_PASSWORD = '' - -# Enable TLS when the connection is not using SSL. -AUTH_LDAP_START_TLS = False - -# Additional options to set for the LDAP connection. LDAP referrals are -# disabled by default (to prevent certain LDAP queries from hanging with AD). -AUTH_LDAP_CONNECTION_OPTIONS = { - ldap.OPT_REFERRALS: 0, -} - -# LDAP search query to find users. -AUTH_LDAP_USER_SEARCH = LDAPSearch( - 'OU=Users,DC=example,DC=com', # Base DN - ldap.SCOPE_SUBTREE, # SCOPE_BASE, SCOPE_ONELEVEL, SCOPE_SUBTREE - '(sAMAccountName=%(user)s)', # Query -) - -# Alternative to user search, if user DNs are all of the same format. -#AUTH_LDAP_USER_DN_TEMPLATE = 'uid=%(user)s,OU=Users,DC=example,DC=com' - -# Mapping of LDAP to user atrributes (key is user attribute name, value is LDAP -# attribute name). -AUTH_LDAP_USER_ATTR_MAP = { - 'first_name': 'givenName', - 'last_name': 'sn', - 'email': 'mail', -} - -# LDAP search query to find groups. Does not support LDAPSearchUnion. -AUTH_LDAP_GROUP_SEARCH = LDAPSearch( - 'DC=example,DC=com', # Base DN - ldap.SCOPE_SUBTREE, # SCOPE_BASE, SCOPE_ONELEVEL, SCOPE_SUBTREE - '(objectClass=group)', # Query -) -# Type of group returned by the search above. Should be one of the types -# listed at: http://pythonhosted.org/django-auth-ldap/groups.html#types-of-groups -AUTH_LDAP_GROUP_TYPE = ActiveDirectoryGroupType() - -# Group DN required to login. If specified, user must be a member of this -# group to login via LDAP. -#AUTH_LDAP_REQUIRE_GROUP = '' - -# Group DN denied from login. If specified, user will not be allowed to login -# if a member of this group. -#AUTH_LDAP_DENY_GROUP = '' - -# User profile flags updated from group membership (key is user attribute name, -# value is group DN). -AUTH_LDAP_USER_FLAGS_BY_GROUP = { - #'is_superuser': 'CN=Domain Admins,CN=Users,DC=example,DC=com', -} - -# Mapping between organization admins/users and LDAP groups. Keys are -# organization names (will be created if not present). Values are dictionaries -# of options for each organization's membership, where each can contain the -# following parameters: -# - remove: True/False. Defaults to False. Specifies the default for -# remove_admins or remove_users if those parameters aren't explicitly set. -# - admins: None, True/False, string or list/tuple of strings. -# If None, organization admins will not be updated. -# If True/False, all LDAP users will be added/removed as admins. -# If a string or list of strings, specifies the group DN(s). User will be -# added as an org admin if the user is a member of ANY of these groups. -# - remove_admins: True/False. Defaults to False. If True, a user who is not a -# member of the given groups will be removed from the organization's admins. -# - users: None, True/False, string or list/tuple of strings. Same rules apply -# as for admins. -# - remove_users: True/False. Defaults to False. If True, a user who is not a -# member of the given groups will be removed from the organization's users. -AUTH_LDAP_ORGANIZATION_MAP = { - #'Test Org': { - # 'admins': 'CN=Domain Admins,CN=Users,DC=example,DC=com', - # 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - #}, - #'Test Org 2': { - # 'admins': ['CN=Administrators,CN=Builtin,DC=example,DC=com'], - # 'users': True, - #}, -} - -# Mapping between team members (users) and LDAP groups. Keys are team names -# (will be created if not present). Values are dictionaries of options for -# each team's membership, where each can contain the following parameters: -# - organization: string. The name of the organization to which the team -# belongs. The team will be created if the combination of organization and -# team name does not exist. The organization will first be created if it -# does not exist. -# - users: None, True/False, string or list/tuple of strings. -# If None, team members will not be updated. -# If True/False, all LDAP users will be added/removed as team members. -# If a string or list of strings, specifies the group DN(s). User will be -# added as a team member if the user is a member of ANY of these groups. -# - remove: True/False. Defaults to False. If True, a user who is not a member -# of the given groups will be removed from the team. -AUTH_LDAP_TEAM_MAP = { - 'My Team': { - 'organization': 'Test Org', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - 'remove': True, - }, - 'Other Team': { - 'organization': 'Test Org 2', - 'users': 'CN=Other Users,CN=Users,DC=example,DC=com', - 'remove': False, - }, -} - ############################################################################### # SCM TEST SETTINGS ############################################################################### @@ -329,280 +201,6 @@ import getpass TEST_SSH_LOOPBACK_USERNAME = getpass.getuser() TEST_SSH_LOOPBACK_PASSWORD = '' -############################################################################### -# LDAP TEST SETTINGS -############################################################################### - -# LDAP connection and authentication settings for unit tests only. LDAP tests -# will be skipped if TEST_AUTH_LDAP_SERVER_URI is not configured. - -TEST_AUTH_LDAP_SERVER_URI = '' -TEST_AUTH_LDAP_BIND_DN = '' -TEST_AUTH_LDAP_BIND_PASSWORD = '' -TEST_AUTH_LDAP_START_TLS = False -TEST_AUTH_LDAP_CONNECTION_OPTIONS = { - ldap.OPT_REFERRALS: 0, -} - -# LDAP username/password for testing authentication. -TEST_AUTH_LDAP_USERNAME = '' -TEST_AUTH_LDAP_PASSWORD = '' - -# LDAP search query to find users. -TEST_AUTH_LDAP_USER_SEARCH = LDAPSearch( - 'CN=Users,DC=example,DC=com', - ldap.SCOPE_SUBTREE, - '(sAMAccountName=%(user)s)', -) - -# Alternative to user search. -#TEST_AUTH_LDAP_USER_DN_TEMPLATE = 'sAMAccountName=%(user)s,OU=Users,DC=example,DC=com' - -# Mapping of LDAP attributes to user attributes. -TEST_AUTH_LDAP_USER_ATTR_MAP = { - 'first_name': 'givenName', - 'last_name': 'sn', - 'email': 'mail', -} - -# LDAP search query for finding groups. -TEST_AUTH_LDAP_GROUP_SEARCH = LDAPSearch( - 'DC=example,DC=com', - ldap.SCOPE_SUBTREE, - '(objectClass=group)', -) -# Type of group returned by the search above. -TEST_AUTH_LDAP_GROUP_TYPE = ActiveDirectoryGroupType() - -# Test DNs for a group required to login. User should be a member of the first -# group, but not a member of the second. -TEST_AUTH_LDAP_REQUIRE_GROUP = 'CN=Domain Admins,CN=Users,DC=example,DC=com' -TEST_AUTH_LDAP_REQUIRE_GROUP_FAIL = 'CN=Guest,CN=Users,DC=example,DC=com' - -# Test DNs for a group denied from login. User should not be a member of the -# first group, but should be a member of the second. -TEST_AUTH_LDAP_DENY_GROUP = 'CN=Guest,CN=Users,DC=example,DC=com' -TEST_AUTH_LDAP_DENY_GROUP_FAIL = 'CN=Domain Admins,CN=Users,DC=example,DC=com' - -# User profile flags updated from group membership. Test user should be a -# member of the group. -TEST_AUTH_LDAP_USER_FLAGS_BY_GROUP = { - 'is_superuser': 'CN=Domain Admins,CN=Users,DC=example,DC=com', -} - -# Test mapping between organization admins/users and LDAP groups. -TEST_AUTH_LDAP_ORGANIZATION_MAP = { - 'Test Org': { - 'admins': 'CN=Domain Admins,CN=Users,DC=example,DC=com', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - }, - 'Test Org 2': { - 'admins': ['CN=Administrators,CN=Builtin,DC=example,DC=com'], - 'users': True, - }, -} -# Expected results from organization mapping. After login, should user be an -# admin/user in the given organization? -TEST_AUTH_LDAP_ORGANIZATION_MAP_RESULT = { - 'Test Org': {'admins': True, 'users': False}, - 'Test Org 2': {'admins': False, 'users': True}, -} - -# Second test mapping to test remove parameters. -TEST_AUTH_LDAP_ORGANIZATION_MAP_2 = { - 'Test Org': { - 'admins': 'CN=Domain Users,CN=Users,DC=example,DC=com', - 'users': True, - 'remove_admins': True, - 'remove_users': False, - }, - 'Test Org 2': { - 'admins': ['CN=Domain Admins,CN=Users,DC=example,DC=com', - 'CN=Administrators,CN=Builtin,DC=example,DC=com'], - 'users': False, - 'remove': True, - }, -} - -# Expected results from second organization mapping. -TEST_AUTH_LDAP_ORGANIZATION_MAP_2_RESULT = { - 'Test Org': {'admins': False, 'users': True}, - 'Test Org 2': {'admins': True, 'users': False}, -} - -# Test mapping between team users and LDAP groups. -TEST_AUTH_LDAP_TEAM_MAP = { - 'Domain Users Team': { - 'organization': 'Test Org', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - 'remove': False, - }, - 'Admins Team': { - 'organization': 'Admins Org', - 'users': 'CN=Domain Admins,CN=Users,DC=example,DC=com', - 'remove': True, - }, - 'Everyone Team': { - 'organization': 'Test Org 2', - 'users': True, - }, -} -# Expected results from team mapping. After login, should user be a member of -# the given team? -TEST_AUTH_LDAP_TEAM_MAP_RESULT = { - 'Domain Users Team': {'users': False}, - 'Admins Team': {'users': True}, - 'Everyone Team': {'users': True}, -} - -# Second test mapping for teams to remove user. -TEST_AUTH_LDAP_TEAM_MAP_2 = { - 'Domain Users Team': { - 'organization': 'Test Org', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - 'remove': False, - }, - 'Admins Team': { - 'organization': 'Admins Org', - 'users': 'CN=Administrators,CN=Builtin,DC=example,DC=com', - 'remove': True, - }, - 'Everyone Team': { - 'organization': 'Test Org 2', - 'users': False, - 'remove': False, - }, -} -# Expected results from second team mapping. After login, should user be a -# member of the given team? -TEST_AUTH_LDAP_TEAM_MAP_2_RESULT = { - 'Domain Users Team': {'users': False}, - 'Admins Team': {'users': False}, - 'Everyone Team': {'users': True}, -} - -############################################################################### -# RADIUS AUTH SETTINGS -############################################################################### - -RADIUS_SERVER = '' -RADIUS_PORT = 1812 -RADIUS_SECRET = '' - -############################################################################### -# SOCIAL AUTH SETTINGS -############################################################################### - -SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '' -SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = '' -#SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['profile'] -#SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS = ['example.com'] -#SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS = {'hd': 'example.com'} - -SOCIAL_AUTH_GITHUB_KEY = '' -SOCIAL_AUTH_GITHUB_SECRET = '' - -SOCIAL_AUTH_GITHUB_ORG_KEY = '' -SOCIAL_AUTH_GITHUB_ORG_SECRET = '' -SOCIAL_AUTH_GITHUB_ORG_NAME = '' - -SOCIAL_AUTH_GITHUB_TEAM_KEY = '' -SOCIAL_AUTH_GITHUB_TEAM_SECRET = '' -SOCIAL_AUTH_GITHUB_TEAM_ID = '' - -SOCIAL_AUTH_SAML_SP_ENTITY_ID = '' -SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = '' -SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = '' -SOCIAL_AUTH_SAML_ORG_INFO = { - 'en-US': { - 'name': 'example', - 'displayname': 'Example', - 'url': 'http://www.example.com', - }, -} -SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = { - 'givenName': 'Some User', - 'emailAddress': 'suser@example.com', -} -SOCIAL_AUTH_SAML_SUPPORT_CONTACT = { - 'givenName': 'Some User', - 'emailAddress': 'suser@example.com', -} -SOCIAL_AUTH_SAML_ENABLED_IDPS = { - #'myidp': { - # 'entity_id': 'https://idp.example.com', - # 'url': 'https://myidp.example.com/sso', - # 'x509cert': '', - #}, - #'onelogin': { - # 'entity_id': 'https://app.onelogin.com/saml/metadata/123456', - # 'url': 'https://example.onelogin.com/trust/saml2/http-post/sso/123456', - # 'x509cert': '', - # 'attr_user_permanent_id': 'name_id', - # 'attr_first_name': 'User.FirstName', - # 'attr_last_name': 'User.LastName', - # 'attr_username': 'User.email', - # 'attr_email': 'User.email', - #}, -} - -SOCIAL_AUTH_ORGANIZATION_MAP = { - # Add all users to the default organization. - 'Default': { - 'users': True, - }, - #'Test Org': { - # 'admins': ['admin@example.com'], - # 'users': True, - #}, - #'Test Org 2': { - # 'admins': ['admin@example.com', re.compile(r'^tower-[^@]+*?@.*$], - # 'users': re.compile(r'^[^@].*?@example\.com$'), - #}, -} - -#SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_SAML_ORGANIZATION_MAP = {} - -SOCIAL_AUTH_TEAM_MAP = { - #'My Team': { - # 'organization': 'Test Org', - # 'users': ['re.compile(r'^[^@]+?@test\.example\.com$')'], - # 'remove': True, - #}, - #'Other Team': { - # 'organization': 'Test Org 2', - # 'users': re.compile(r'^[^@]+?@test2\.example\.com$'), - # 'remove': False, - #}, -} - -#SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP = {} -#SOCIAL_AUTH_GITHUB_TEAM_MAP = {} -#SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP = {} -#SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP = {} -#SOCIAL_AUTH_SAML_TEAM_MAP = {} - -# Uncomment the line below (i.e. set SOCIAL_AUTH_USER_FIELDS to an empty list) -# to prevent new user accounts from being created. Only users who have -# previously logged in using social auth or have a user account with a matching -# email address will be able to login. - -#SOCIAL_AUTH_USER_FIELDS = [] - -# It is also possible to add custom functions to the social auth pipeline for -# more advanced organization and team mapping. Use at your own risk. - -#def custom_social_auth_pipeline_function(backend, details, user=None, *args, **kwargs): -# print 'custom:', backend, details, user, args, kwargs - -#SOCIAL_AUTH_PIPELINE += ( -# 'awx.settings.development.custom_social_auth_pipeline_function', -#) - ############################################################################### # INVENTORY IMPORT TEST SETTINGS ############################################################################### diff --git a/awx/settings/postprocess.py b/awx/settings/postprocess.py deleted file mode 100644 index fd54fd5050..0000000000 --- a/awx/settings/postprocess.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -# flake8: noqa - -# Runs after all configuration files have been loaded to fix/check/update -# settings as needed. - -if not AUTH_LDAP_SERVER_URI: - AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'awx.sso.backends.LDAPBackend'] - -if not RADIUS_SERVER: - AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'awx.sso.backends.RADIUSBackend'] - -if not all([SOCIAL_AUTH_GOOGLE_OAUTH2_KEY, SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET]): - AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'social.backends.google.GoogleOAuth2'] - -if not all([SOCIAL_AUTH_GITHUB_KEY, SOCIAL_AUTH_GITHUB_SECRET]): - AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'social.backends.github.GithubOAuth2'] - -if not all([SOCIAL_AUTH_GITHUB_ORG_KEY, SOCIAL_AUTH_GITHUB_ORG_SECRET, SOCIAL_AUTH_GITHUB_ORG_NAME]): - AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'social.backends.github.GithubOrganizationOAuth2'] - -if not all([SOCIAL_AUTH_GITHUB_TEAM_KEY, SOCIAL_AUTH_GITHUB_TEAM_SECRET, SOCIAL_AUTH_GITHUB_TEAM_ID]): - AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'social.backends.github.GithubTeamOAuth2'] - -if not all([SOCIAL_AUTH_SAML_SP_ENTITY_ID, SOCIAL_AUTH_SAML_SP_PUBLIC_CERT, - SOCIAL_AUTH_SAML_SP_PRIVATE_KEY, SOCIAL_AUTH_SAML_ORG_INFO, - SOCIAL_AUTH_SAML_TECHNICAL_CONTACT, SOCIAL_AUTH_SAML_SUPPORT_CONTACT, - SOCIAL_AUTH_SAML_ENABLED_IDPS]): - AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'awx.sso.backends.SAMLAuth'] - -if not AUTH_BASIC_ENABLED: - REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = [x for x in REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] if x != 'awx.api.authentication.LoggedBasicAuthentication'] diff --git a/awx/settings/production.py b/awx/settings/production.py index 6efe6c397d..19afcab9c9 100644 --- a/awx/settings/production.py +++ b/awx/settings/production.py @@ -4,6 +4,7 @@ # Production settings for AWX project. # Python +import copy import errno import sys import traceback @@ -51,12 +52,31 @@ TOWER_VENV_PATH = "/var/lib/awx/venv/tower" LOGGING['handlers']['tower_warnings']['filename'] = '/var/log/tower/tower.log' LOGGING['handlers']['callback_receiver']['filename'] = '/var/log/tower/callback_receiver.log' -LOGGING['handlers']['socketio_service']['filename'] = '/var/log/tower/socketio_service.log' LOGGING['handlers']['task_system']['filename'] = '/var/log/tower/task_system.log' LOGGING['handlers']['fact_receiver']['filename'] = '/var/log/tower/fact_receiver.log' LOGGING['handlers']['system_tracking_migrations']['filename'] = '/var/log/tower/tower_system_tracking_migrations.log' LOGGING['handlers']['rbac_migrations']['filename'] = '/var/log/tower/tower_rbac_migrations.log' +# Supervisor service name dictionary used for programatic restart +SERVICE_NAME_DICT = { + "beat": "awx-celeryd-beat", + "celery": "awx-celeryd", + "callback": "awx-callback-receiver", + "channels": "awx-channels-worker", + "uwsgi": "awx-uwsgi", + "daphne": "awx-daphne", + "fact": "awx-fact-cache-receiver"} +# Used for sending commands in automatic restart +UWSGI_FIFO_LOCATION = '/var/lib/awx/awxfifo' + +# Store a snapshot of default settings at this point before loading any +# customizable config files. +DEFAULTS_SNAPSHOT = {} +this_module = sys.modules[__name__] +for setting in dir(this_module): + if setting == setting.upper(): + DEFAULTS_SNAPSHOT[setting] = copy.deepcopy(getattr(this_module, setting)) + # Load settings from any .py files in the global conf.d directory specified in # the environment, defaulting to /etc/tower/conf.d/. settings_dir = os.environ.get('AWX_SETTINGS_DIR', '/etc/tower/conf.d/') @@ -71,7 +91,6 @@ settings_file = os.environ.get('AWX_SETTINGS_FILE', # /etc/tower/conf.d/*.py. try: include(settings_file, optional(settings_files), scope=locals()) - include('postprocess.py', scope=locals()) except ImportError: traceback.print_exc() sys.exit(1) diff --git a/awx/sso/__init__.py b/awx/sso/__init__.py index 6596e4bf78..aa65d65a11 100644 --- a/awx/sso/__init__.py +++ b/awx/sso/__init__.py @@ -11,6 +11,7 @@ xmlsec_initialized = False import dm.xmlsec.binding # noqa original_xmlsec_initialize = dm.xmlsec.binding.initialize + def xmlsec_initialize(*args, **kwargs): global xmlsec_init_lock, xmlsec_initialized, original_xmlsec_initialize with xmlsec_init_lock: @@ -18,4 +19,8 @@ def xmlsec_initialize(*args, **kwargs): original_xmlsec_initialize(*args, **kwargs) xmlsec_initialized = True + dm.xmlsec.binding.initialize = xmlsec_initialize + + +default_app_config = 'awx.sso.apps.SSOConfig' diff --git a/awx/sso/apps.py b/awx/sso/apps.py new file mode 100644 index 0000000000..45c00e871b --- /dev/null +++ b/awx/sso/apps.py @@ -0,0 +1,9 @@ +# Django +from django.apps import AppConfig +from django.utils.translation import ugettext_lazy as _ + + +class SSOConfig(AppConfig): + + name = 'awx.sso' + verbose_name = _('Single Sign-On') diff --git a/awx/sso/backends.py b/awx/sso/backends.py index 91999034d5..9a37ea8bf1 100644 --- a/awx/sso/backends.py +++ b/awx/sso/backends.py @@ -3,11 +3,16 @@ # Python import logging +import uuid + +import ldap # Django from django.dispatch import receiver from django.contrib.auth.models import User from django.conf import settings as django_settings +from django.core.signals import setting_changed +from django.core.exceptions import ImproperlyConfigured # django-auth-ldap from django_auth_ldap.backend import LDAPSettings as BaseLDAPSettings @@ -23,7 +28,7 @@ from social.backends.saml import SAMLAuth as BaseSAMLAuth from social.backends.saml import SAMLIdentityProvider as BaseSAMLIdentityProvider # Ansible Tower -from awx.api.license import feature_enabled +from awx.conf.license import feature_enabled logger = logging.getLogger('awx.sso.backends') @@ -35,6 +40,16 @@ class LDAPSettings(BaseLDAPSettings): 'TEAM_MAP': {}, }.items()) + def __init__(self, prefix='AUTH_LDAP_', defaults={}): + super(LDAPSettings, self).__init__(prefix, defaults) + + # If a DB-backed setting is specified that wipes out the + # OPT_NETWORK_TIMEOUT, fall back to a sane default + if ldap.OPT_NETWORK_TIMEOUT not in getattr(self, 'CONNECTION_OPTIONS', {}): + options = getattr(self, 'CONNECTION_OPTIONS', {}) + options[ldap.OPT_NETWORK_TIMEOUT] = 30 + self.CONNECTION_OPTIONS = options + class LDAPBackend(BaseLDAPBackend): ''' @@ -43,6 +58,20 @@ class LDAPBackend(BaseLDAPBackend): settings_prefix = 'AUTH_LDAP_' + def __init__(self, *args, **kwargs): + self._dispatch_uid = uuid.uuid4() + super(LDAPBackend, self).__init__(*args, **kwargs) + setting_changed.connect(self._on_setting_changed, dispatch_uid=self._dispatch_uid) + + def __del__(self): + setting_changed.disconnect(dispatch_uid=self._dispatch_uid) + + def _on_setting_changed(self, sender, **kwargs): + # If any AUTH_LDAP_* setting changes, force settings to be reloaded for + # this backend instance. + if kwargs.get('setting', '').startswith(self.settings_prefix): + self._settings = None + def _get_settings(self): if self._settings is None: self._settings = LDAPSettings(self.settings_prefix) @@ -59,7 +88,11 @@ class LDAPBackend(BaseLDAPBackend): if not feature_enabled('ldap'): logger.error("Unable to authenticate, license does not support LDAP authentication") return None - return super(LDAPBackend, self).authenticate(username, password) + try: + return super(LDAPBackend, self).authenticate(username, password) + except ImproperlyConfigured: + logger.error("Unable to authenticate, LDAP is improperly configured") + return None def get_user(self, user_id): if not self.settings.SERVER_URI: diff --git a/awx/sso/conf.py b/awx/sso/conf.py new file mode 100644 index 0000000000..fa4d70f3f8 --- /dev/null +++ b/awx/sso/conf.py @@ -0,0 +1,1096 @@ +# Python +import collections +import urlparse + +# Django +from django.conf import settings +from django.core.urlresolvers import reverse +from django.utils.translation import ugettext_lazy as _ + +# Tower +from awx.conf import register +from awx.sso import fields +from awx.main.validators import validate_private_key, validate_certificate +from awx.sso.validators import * # noqa + + +class SocialAuthCallbackURL(object): + + def __init__(self, provider): + self.provider = provider + + def __call__(self): + path = reverse('social:complete', args=(self.provider,)) + return urlparse.urljoin(settings.TOWER_URL_BASE, path) + + +SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT = _('''\ +Mapping to organization admins/users from social auth accounts. This setting +controls which users are placed into which Tower organizations based on +their username and email address. Dictionary keys are organization names. +organizations will be created if not present if the license allows for +multiple organizations, otherwise the single default organization is used +regardless of the key. Values are dictionaries defining the options for +each organization's membership. For each organization it is possible to +specify which users are automatically users of the organization and also +which users can administer the organization. + +- admins: None, True/False, string or list of strings. + If None, organization admins will not be updated. + If True, all users using social auth will automatically be added as admins + of the organization. + If False, no social auth users will be automatically added as admins of + the organization. + If a string or list of strings, specifies the usernames and emails for + users who will be added to the organization. Strings in the format + "//" will be interpreted as JavaScript regular expressions and + may also be used instead of string literals; only "i" and "m" are supported + for flags. +- remove_admins: True/False. Defaults to True. + If True, a user who does not match will be removed from the organization's + administrative list. +- users: None, True/False, string or list of strings. Same rules apply as for + admins. +- remove_users: True/False. Defaults to True. Same rules as apply for + remove_admins.\ +''') + +# FIXME: /regex/gim (flags) + +SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER = collections.OrderedDict([ + ('Default', collections.OrderedDict([ + ('users', True), + ])), + ('Test Org', collections.OrderedDict([ + ('admins', ['admin@example.com']), + ('users', True), + ])), + ('Test Org 2', collections.OrderedDict([ + ('admins', ['admin@example.com', r'/^tower-[^@]+*?@.*$/']), + ('remove_admins', True), + ('users', r'/^[^@].*?@example\.com$/i'), + ('remove_users', True), + ])), +]) + +SOCIAL_AUTH_TEAM_MAP_HELP_TEXT = _('''\ +Mapping of team members (users) from social auth accounts. Keys are team +names (will be created if not present). Values are dictionaries of options +for each team's membership, where each can contain the following parameters: + +- organization: string. The name of the organization to which the team + belongs. The team will be created if the combination of organization and + team name does not exist. The organization will first be created if it + does not exist. If the license does not allow for multiple organizations, + the team will always be assigned to the single default organization. +- users: None, True/False, string or list of strings. + If None, team members will not be updated. + If True/False, all social auth users will be added/removed as team + members. + If a string or list of strings, specifies expressions used to match users. + User will be added as a team member if the username or email matches. + Strings in the format "//" will be interpreted as JavaScript + regular expressions and may also be used instead of string literals; only "i" + and "m" are supported for flags. +- remove: True/False. Defaults to True. If True, a user who does not match + the rules above will be removed from the team.\ +''') + +SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER = collections.OrderedDict([ + ('My Team', collections.OrderedDict([ + ('organization', 'Test Org'), + ('users', [r'/^[^@]+?@test\.example\.com$/']), + ('remove', True), + ])), + ('Other Team', collections.OrderedDict([ + ('organization', 'Test Org 2'), + ('users', r'/^[^@]+?@test2\.example\.com$/i'), + ('remove', False), + ])), +]) + +############################################################################### +# AUTHENTICATION BACKENDS DYNAMIC SETTING +############################################################################### + +register( + 'AUTHENTICATION_BACKENDS', + field_class=fields.AuthenticationBackendsField, + label=_('Authentication Backends'), + help_text=_('List of authentication backends that are enabled based on ' + 'license features and other authentication settings.'), + read_only=True, + depends_on=fields.AuthenticationBackendsField.get_all_required_settings(), + category=_('Authentication'), + category_slug='authentication', +) + +register( + 'SOCIAL_AUTH_ORGANIZATION_MAP', + field_class=fields.SocialOrganizationMapField, + allow_null=True, + default=None, + label=_('Social Auth Organization Map'), + help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT, + category=_('Authentication'), + category_slug='authentication', + placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER, +) + +register( + 'SOCIAL_AUTH_TEAM_MAP', + field_class=fields.SocialTeamMapField, + allow_null=True, + default=None, + label=_('Social Auth Team Map'), + help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT, + category=_('Authentication'), + category_slug='authentication', + placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER, +) + +register( + 'SOCIAL_AUTH_USER_FIELDS', + field_class=fields.StringListField, + allow_null=True, + default=None, + label=_('Social Auth User Fields'), + help_text=_('When set to an empty list `[]`, this setting prevents new user ' + 'accounts from being created. Only users who have previously ' + 'logged in using social auth or have a user account with a ' + 'matching email address will be able to login.'), + category=_('Authentication'), + category_slug='authentication', + placeholder=['username', 'email'], +) + +############################################################################### +# LDAP AUTHENTICATION SETTINGS +############################################################################### + +register( + 'AUTH_LDAP_SERVER_URI', + field_class=fields.LDAPServerURIField, + allow_blank=True, + default='', + label=_('LDAP Server URI'), + help_text=_('URI to connect to LDAP server, such as "ldap://ldap.example.com:389" ' + '(non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP ' + 'servers may be specified by separating with spaces or commas. LDAP ' + 'authentication is disabled if this parameter is empty.'), + category=_('LDAP'), + category_slug='ldap', + placeholder='ldaps://ldap.example.com:636', + feature_required='ldap', +) + +register( + 'AUTH_LDAP_BIND_DN', + field_class=fields.CharField, + allow_blank=True, + default='', + validators=[validate_ldap_bind_dn], + label=_('LDAP Bind DN'), + help_text=_('DN (Distinguished Name) of user to bind for all search queries. ' + 'Normally in the format "CN=Some User,OU=Users,DC=example,DC=com" ' + 'but may also be specified as "DOMAIN\username" for Active Directory. ' + 'This is the system user account we will use to login to query LDAP ' + 'for other user information.'), + category=_('LDAP'), + category_slug='ldap', + feature_required='ldap', +) + +register( + 'AUTH_LDAP_BIND_PASSWORD', + field_class=fields.CharField, + allow_blank=True, + default='', + label=_('LDAP Bind Password'), + help_text=_('Password used to bind LDAP user account.'), + category=_('LDAP'), + category_slug='ldap', + feature_required='ldap', + encrypted=True, +) + +register( + 'AUTH_LDAP_START_TLS', + field_class=fields.BooleanField, + default=False, + label=_('LDAP Start TLS'), + help_text=_('Whether to enable TLS when the LDAP connection is not using SSL.'), + category=_('LDAP'), + category_slug='ldap', + feature_required='ldap', +) + +register( + 'AUTH_LDAP_CONNECTION_OPTIONS', + field_class=fields.LDAPConnectionOptionsField, + default={'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}, + label=_('LDAP Connection Options'), + help_text=_('Additional options to set for the LDAP connection. LDAP ' + 'referrals are disabled by default (to prevent certain LDAP ' + 'queries from hanging with AD). Option names should be strings ' + '(e.g. "OPT_REFERRALS"). Refer to ' + 'https://www.python-ldap.org/doc/html/ldap.html#options for ' + 'possible options and values that can be set.'), + category=_('LDAP'), + category_slug='ldap', + placeholder=collections.OrderedDict([ + ('OPT_REFERRALS', 0), + ('OPT_NETWORK_TIMEOUT', 30) + ]), + feature_required='ldap', +) + +register( + 'AUTH_LDAP_USER_SEARCH', + field_class=fields.LDAPSearchUnionField, + default=[], + label=_('LDAP User Search'), + help_text=_('LDAP search query to find users. Any user that matches the given ' + 'pattern will be able to login to Tower. The user should also be ' + 'mapped into an Tower organization (as defined in the ' + 'AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries ' + 'need to be supported use of "LDAPUnion" is possible. See ' + 'python-ldap documentation as linked at the top of this section.'), + category=_('LDAP'), + category_slug='ldap', + placeholder=( + 'OU=Users,DC=example,DC=com', + 'SCOPE_SUBTREE', + '(sAMAccountName=%(user)s)', + ), + feature_required='ldap', +) + +register( + 'AUTH_LDAP_USER_DN_TEMPLATE', + field_class=fields.LDAPDNWithUserField, + allow_blank=True, + allow_null=True, + default=None, + label=_('LDAP User DN Template'), + help_text=_('Alternative to user search, if user DNs are all of the same ' + 'format. This approach will be more efficient for user lookups than ' + 'searching if it is usable in your organizational environment. If ' + 'this setting has a value it will be used instead of ' + 'AUTH_LDAP_USER_SEARCH.'), + category=_('LDAP'), + category_slug='ldap', + placeholder='uid=%(user)s,OU=Users,DC=example,DC=com', + feature_required='ldap', +) + +register( + 'AUTH_LDAP_USER_ATTR_MAP', + field_class=fields.LDAPUserAttrMapField, + default={}, + label=_('LDAP User Attribute Map'), + help_text=_('Mapping of LDAP user schema to Tower API user attributes (key is ' + 'user attribute name, value is LDAP attribute name). The default ' + 'setting is valid for ActiveDirectory but users with other LDAP ' + 'configurations may need to change the values (not the keys) of ' + 'the dictionary/hash-table.'), + category=_('LDAP'), + category_slug='ldap', + placeholder=collections.OrderedDict([ + ('first_name', 'givenName'), + ('last_name', 'sn'), + ('email', 'mail'), + ]), + feature_required='ldap', +) + +register( + 'AUTH_LDAP_GROUP_SEARCH', + field_class=fields.LDAPSearchField, + default=[], + label=_('LDAP Group Search'), + help_text=_('Users in Tower are mapped to organizations based on their ' + 'membership in LDAP groups. This setting defines the LDAP search ' + 'query to find groups. Note that this, unlike the user search ' + 'above, does not support LDAPSearchUnion.'), + category=_('LDAP'), + category_slug='ldap', + placeholder=( + 'DC=example,DC=com', + 'SCOPE_SUBTREE', + '(objectClass=group)', + ), + feature_required='ldap', +) + +register( + 'AUTH_LDAP_GROUP_TYPE', + field_class=fields.LDAPGroupTypeField, + label=_('LDAP Group Type'), + help_text=_('The group type may need to be changed based on the type of the ' + 'LDAP server. Values are listed at: ' + 'http://pythonhosted.org/django-auth-ldap/groups.html#types-of-groups'), + category=_('LDAP'), + category_slug='ldap', + feature_required='ldap', + default='MemberDNGroupType', +) + +register( + 'AUTH_LDAP_REQUIRE_GROUP', + field_class=fields.LDAPDNField, + allow_blank=True, + allow_null=True, + default=None, + label=_('LDAP Require Group'), + help_text=_('Group DN required to login. If specified, user must be a member ' + 'of this group to login via LDAP. If not set, everyone in LDAP ' + 'that matches the user search will be able to login via Tower. ' + 'Only one require group is supported.'), + category=_('LDAP'), + category_slug='ldap', + placeholder='CN=Tower Users,OU=Users,DC=example,DC=com', + feature_required='ldap', +) + +register( + 'AUTH_LDAP_DENY_GROUP', + field_class=fields.LDAPDNField, + allow_blank=True, + allow_null=True, + default=None, + label=_('LDAP Deny Group'), + help_text=_('Group DN denied from login. If specified, user will not be ' + 'allowed to login if a member of this group. Only one deny group ' + 'is supported.'), + category=_('LDAP'), + category_slug='ldap', + placeholder='CN=Disabled Users,OU=Users,DC=example,DC=com', + feature_required='ldap', +) + +register( + 'AUTH_LDAP_USER_FLAGS_BY_GROUP', + field_class=fields.LDAPUserFlagsField, + default={}, + label=_('LDAP User Flags By Group'), + help_text=_('User profile flags updated from group membership (key is user ' + 'attribute name, value is group DN). These are boolean fields ' + 'that are matched based on whether the user is a member of the ' + 'given group. So far only is_superuser is settable via this ' + 'method. This flag is set both true and false at login time ' + 'based on current LDAP settings.'), + category=_('LDAP'), + category_slug='ldap', + placeholder=collections.OrderedDict([ + ('is_superuser', 'CN=Domain Admins,CN=Users,DC=example,DC=com'), + ]), + feature_required='ldap', +) + +register( + 'AUTH_LDAP_ORGANIZATION_MAP', + field_class=fields.LDAPOrganizationMapField, + default={}, + label=_('LDAP Organization Map'), + help_text=_('Mapping between organization admins/users and LDAP groups. This ' + 'controls what users are placed into what Tower organizations ' + 'relative to their LDAP group memberships. Keys are organization ' + 'names. Organizations will be created if not present. Values are ' + 'dictionaries defining the options for each organization\'s ' + 'membership. For each organization it is possible to specify ' + 'what groups are automatically users of the organization and also ' + 'what groups can administer the organization.\n\n' + ' - admins: None, True/False, string or list of strings.\n' + ' If None, organization admins will not be updated based on ' + 'LDAP values.\n' + ' If True, all users in LDAP will automatically be added as ' + 'admins of the organization.\n' + ' If False, no LDAP users will be automatically added as admins ' + 'of the organization.\n' + ' If a string or list of strings, specifies the group DN(s) ' + 'that will be added of the organization if they match any of the ' + 'specified groups.\n' + ' - remove_admins: True/False. Defaults to True.\n' + ' If True, a user who is not an member of the given groups will ' + 'be removed from the organization\'s administrative list.\n' + ' - users: None, True/False, string or list of strings. ' + 'Same rules apply as for admins.\n' + ' - remove_users: True/False. Defaults to True. Same rules apply ' + 'as for remove_admins.'), + category=_('LDAP'), + category_slug='ldap', + placeholder=collections.OrderedDict([ + ('Test Org', collections.OrderedDict([ + ('admins', 'CN=Domain Admins,CN=Users,DC=example,DC=com'), + ('users', ['CN=Domain Users,CN=Users,DC=example,DC=com']), + ('remove_users', True), + ('remove_admins', True), + ])), + ('Test Org 2', collections.OrderedDict([ + ('admins', 'CN=Administrators,CN=Builtin,DC=example,DC=com'), + ('users', True), + ('remove_users', True), + ('remove_admins', True), + ])), + ]), + feature_required='ldap', +) + +register( + 'AUTH_LDAP_TEAM_MAP', + field_class=fields.LDAPTeamMapField, + default={}, + label=_('LDAP Team Map'), + help_text=_('Mapping between team members (users) and LDAP groups. Keys are ' + 'team names (will be created if not present). Values are ' + 'dictionaries of options for each team\'s membership, where each ' + 'can contain the following parameters:\n\n' + ' - organization: string. The name of the organization to which ' + 'the team belongs. The team will be created if the combination of ' + 'organization and team name does not exist. The organization will ' + 'first be created if it does not exist.\n' + ' - users: None, True/False, string or list of strings.\n' + ' If None, team members will not be updated.\n' + ' If True/False, all LDAP users will be added/removed as team ' + 'members.\n' + ' If a string or list of strings, specifies the group DN(s). ' + 'User will be added as a team member if the user is a member of ' + 'ANY of these groups.\n' + '- remove: True/False. Defaults to True. If True, a user who is ' + 'not a member of the given groups will be removed from the team.'), + category=_('LDAP'), + category_slug='ldap', + placeholder=collections.OrderedDict([ + ('My Team', collections.OrderedDict([ + ('organization', 'Test Org'), + ('users', ['CN=Domain Users,CN=Users,DC=example,DC=com']), + ('remove', True), + ])), + ('Other Team', collections.OrderedDict([ + ('organization', 'Test Org 2'), + ('users', 'CN=Other Users,CN=Users,DC=example,DC=com'), + ('remove', False), + ])), + ]), + feature_required='ldap', +) + +############################################################################### +# RADIUS AUTHENTICATION SETTINGS +############################################################################### + +register( + 'RADIUS_SERVER', + field_class=fields.CharField, + allow_blank=True, + default='', + label=_('RADIUS Server'), + help_text=_('Hostname/IP of RADIUS server. RADIUS authentication will be ' + 'disabled if this setting is empty.'), + category=_('RADIUS'), + category_slug='radius', + placeholder='radius.example.com', + feature_required='enterprise_auth', +) + +register( + 'RADIUS_PORT', + field_class=fields.IntegerField, + min_value=1, + max_value=65535, + default=1812, + label=_('RADIUS Port'), + help_text=_('Port of RADIUS server.'), + category=_('RADIUS'), + category_slug='radius', + feature_required='enterprise_auth', +) + +register( + 'RADIUS_SECRET', + field_class=fields.RADIUSSecretField, + allow_blank=True, + default='', + label=_('RADIUS Secret'), + help_text=_('Shared secret for authenticating to RADIUS server.'), + category=_('RADIUS'), + category_slug='radius', + feature_required='enterprise_auth', + encrypted=True, +) + +############################################################################### +# GOOGLE OAUTH2 AUTHENTICATION SETTINGS +############################################################################### + +register( + 'SOCIAL_AUTH_GOOGLE_OAUTH2_CALLBACK_URL', + field_class=fields.CharField, + read_only=True, + default=SocialAuthCallbackURL('google-oauth2'), + label=_('Google OAuth2 Callback URL'), + help_text=_('Create a project at https://console.developers.google.com/ to ' + 'obtain an OAuth2 key and secret for a web application. Ensure ' + 'that the Google+ API is enabled. Provide this URL as the ' + 'callback URL for your application.'), + category=_('Google OAuth2'), + category_slug='google-oauth2', + depends_on=['TOWER_URL_BASE'], +) + +register( + 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', + field_class=fields.CharField, + allow_blank=True, + label=_('Google OAuth2 Key'), + help_text=_('The OAuth2 key from your web application at https://console.developers.google.com/.'), + category=_('Google OAuth2'), + category_slug='google-oauth2', + placeholder='528620852399-gm2dt4hrl2tsj67fqamk09k1e0ad6gd8.apps.googleusercontent.com', +) + +register( + 'SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET', + field_class=fields.CharField, + allow_blank=True, + label=_('Google OAuth2 Secret'), + help_text=_('The OAuth2 secret from your web application at https://console.developers.google.com/.'), + category=_('Google OAuth2'), + category_slug='google-oauth2', + placeholder='q2fMVCmEregbg-drvebPp8OW', + encrypted=True, +) + +register( + 'SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS', + field_class=fields.StringListField, + default=[], + label=_('Google OAuth2 Whitelisted Domains'), + help_text=_('Update this setting to restrict the domains who are allowed to ' + 'login using Google OAuth2.'), + category=_('Google OAuth2'), + category_slug='google-oauth2', + placeholder=['example.com'], +) + +register( + 'SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS', + field_class=fields.DictField, + default={}, + label=_('Google OAuth2 Extra Arguments'), + help_text=_('Extra arguments for Google OAuth2 login. When only allowing a ' + 'single domain to authenticate, set to `{"hd": "yourdomain.com"}` ' + 'and Google will not display any other accounts even if the user ' + 'is logged in with multiple Google accounts.'), + category=_('Google OAuth2'), + category_slug='google-oauth2', + placeholder={'hd': 'example.com'}, +) + +register( + 'SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP', + field_class=fields.SocialOrganizationMapField, + allow_null=True, + default=None, + label=_('Google OAuth2 Organization Map'), + help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT, + category=_('Google OAuth2'), + category_slug='google-oauth2', + placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER, +) + +register( + 'SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP', + field_class=fields.SocialTeamMapField, + allow_null=True, + default=None, + label=_('Google OAuth2 Team Map'), + help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT, + category=_('Google OAuth2'), + category_slug='google-oauth2', + placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER, +) + +############################################################################### +# GITHUB OAUTH2 AUTHENTICATION SETTINGS +############################################################################### + +register( + 'SOCIAL_AUTH_GITHUB_CALLBACK_URL', + field_class=fields.CharField, + read_only=True, + default=SocialAuthCallbackURL('github'), + label=_('GitHub OAuth2 Callback URL'), + help_text=_('Create a developer application at ' + 'https://github.com/settings/developers to obtain an OAuth2 ' + 'key (Client ID) and secret (Client Secret). Provide this URL ' + 'as the callback URL for your application.'), + category=_('GitHub OAuth2'), + category_slug='github', + depends_on=['TOWER_URL_BASE'], +) + +register( + 'SOCIAL_AUTH_GITHUB_KEY', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub OAuth2 Key'), + help_text=_('The OAuth2 key (Client ID) from your GitHub developer application.'), + category=_('GitHub OAuth2'), + category_slug='github', +) + +register( + 'SOCIAL_AUTH_GITHUB_SECRET', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub OAuth2 Secret'), + help_text=_('The OAuth2 secret (Client Secret) from your GitHub developer application.'), + category=_('GitHub OAuth2'), + category_slug='github', + encrypted=True, +) + +register( + 'SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP', + field_class=fields.SocialOrganizationMapField, + allow_null=True, + default=None, + label=_('GitHub OAuth2 Organization Map'), + help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT, + category=_('GitHub OAuth2'), + category_slug='github', + placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER, +) + +register( + 'SOCIAL_AUTH_GITHUB_TEAM_MAP', + field_class=fields.SocialTeamMapField, + allow_null=True, + default=None, + label=_('GitHub OAuth2 Team Map'), + help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT, + category=_('GitHub OAuth2'), + category_slug='github', + placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER, +) + +############################################################################### +# GITHUB ORG OAUTH2 AUTHENTICATION SETTINGS +############################################################################### + +register( + 'SOCIAL_AUTH_GITHUB_ORG_CALLBACK_URL', + field_class=fields.CharField, + read_only=True, + default=SocialAuthCallbackURL('github-org'), + label=_('GitHub Organization OAuth2 Callback URL'), + help_text=_('Create an organization-owned application at ' + 'https://github.com/organizations//settings/applications ' + 'and obtain an OAuth2 key (Client ID) and secret (Client Secret). ' + 'Provide this URL as the callback URL for your application.'), + category=_('GitHub Organization OAuth2'), + category_slug='github-org', + depends_on=['TOWER_URL_BASE'], +) + +register( + 'SOCIAL_AUTH_GITHUB_ORG_KEY', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub Organization OAuth2 Key'), + help_text=_('The OAuth2 key (Client ID) from your GitHub organization application.'), + category=_('GitHub Organization OAuth2'), + category_slug='github-org', +) + +register( + 'SOCIAL_AUTH_GITHUB_ORG_SECRET', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub Organization OAuth2 Secret'), + help_text=_('The OAuth2 secret (Client Secret) from your GitHub organization application.'), + category=_('GitHub Organization OAuth2'), + category_slug='github-org', + encrypted=True, +) + +register( + 'SOCIAL_AUTH_GITHUB_ORG_NAME', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub Organization Name'), + help_text=_('The name of your GitHub organization, as used in your ' + 'organization\'s URL: https://github.com//.'), + category=_('GitHub Organization OAuth2'), + category_slug='github-org', +) + +register( + 'SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP', + field_class=fields.SocialOrganizationMapField, + allow_null=True, + default=None, + label=_('GitHub Organization OAuth2 Organization Map'), + help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT, + category=_('GitHub Organization OAuth2'), + category_slug='github-org', + placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER, +) + +register( + 'SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP', + field_class=fields.SocialTeamMapField, + allow_null=True, + default=None, + label=_('GitHub Organization OAuth2 Team Map'), + help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT, + category=_('GitHub Organization OAuth2'), + category_slug='github-org', + placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER, +) + +############################################################################### +# GITHUB TEAM OAUTH2 AUTHENTICATION SETTINGS +############################################################################### + +register( + 'SOCIAL_AUTH_GITHUB_TEAM_CALLBACK_URL', + field_class=fields.CharField, + read_only=True, + default=SocialAuthCallbackURL('github-team'), + label=_('GitHub Team OAuth2 Callback URL'), + help_text=_('Create an organization-owned application at ' + 'https://github.com/organizations//settings/applications ' + 'and obtain an OAuth2 key (Client ID) and secret (Client Secret). ' + 'Provide this URL as the callback URL for your application.'), + category=_('GitHub Team OAuth2'), + category_slug='github-team', + depends_on=['TOWER_URL_BASE'], +) + +register( + 'SOCIAL_AUTH_GITHUB_TEAM_KEY', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub Team OAuth2 Key'), + help_text=_('The OAuth2 key (Client ID) from your GitHub organization application.'), + category=_('GitHub Team OAuth2'), + category_slug='github-team', +) + +register( + 'SOCIAL_AUTH_GITHUB_TEAM_SECRET', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub Team OAuth2 Secret'), + help_text=_('The OAuth2 secret (Client Secret) from your GitHub organization application.'), + category=_('GitHub Team OAuth2'), + category_slug='github-team', + encrypted=True, +) + +register( + 'SOCIAL_AUTH_GITHUB_TEAM_ID', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub Team ID'), + help_text=_('Find the numeric team ID using the Github API: ' + 'http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/.'), + category=_('GitHub Team OAuth2'), + category_slug='github-team', +) + +register( + 'SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP', + field_class=fields.SocialOrganizationMapField, + allow_null=True, + default=None, + label=_('GitHub Team OAuth2 Organization Map'), + help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT, + category=_('GitHub Team OAuth2'), + category_slug='github-team', + placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER, +) + +register( + 'SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP', + field_class=fields.SocialTeamMapField, + allow_null=True, + default=None, + label=_('GitHub Team OAuth2 Team Map'), + help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT, + category=_('GitHub Team OAuth2'), + category_slug='github-team', + placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER, +) + +############################################################################### +# MICROSOFT AZURE ACTIVE DIRECTORY SETTINGS +############################################################################### + +register( + 'SOCIAL_AUTH_AZUREAD_OAUTH2_CALLBACK_URL', + field_class=fields.CharField, + read_only=True, + default=SocialAuthCallbackURL('azuread-oauth2'), + label=_('Azure AD OAuth2 Callback URL'), + help_text=_('Register an Azure AD application as described by ' + 'https://msdn.microsoft.com/en-us/library/azure/dn132599.aspx ' + 'and obtain an OAuth2 key (Client ID) and secret (Client Secret). ' + 'Provide this URL as the callback URL for your application.'), + category=_('Azure AD OAuth2'), + category_slug='azuread-oauth2', + depends_on=['TOWER_URL_BASE'], +) + +register( + 'SOCIAL_AUTH_AZUREAD_OAUTH2_KEY', + field_class=fields.CharField, + allow_blank=True, + label=_('Azure AD OAuth2 Key'), + help_text=_('The OAuth2 key (Client ID) from your Azure AD application.'), + category=_('Azure AD OAuth2'), + category_slug='azuread-oauth2', +) + +register( + 'SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET', + field_class=fields.CharField, + allow_blank=True, + label=_('Azure AD OAuth2 Secret'), + help_text=_('The OAuth2 secret (Client Secret) from your Azure AD application.'), + category=_('Azure AD OAuth2'), + category_slug='azuread-oauth2', + encrypted=True, +) + +register( + 'SOCIAL_AUTH_AZUREAD_OAUTH2_ORGANIZATION_MAP', + field_class=fields.SocialOrganizationMapField, + allow_null=True, + default=None, + label=_('Azure AD OAuth2 Organization Map'), + help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT, + category=_('Azure AD OAuth2'), + category_slug='azuread-oauth2', + placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER, +) + +register( + 'SOCIAL_AUTH_AZUREAD_OAUTH2_TEAM_MAP', + field_class=fields.SocialTeamMapField, + allow_null=True, + default=None, + label=_('Azure AD OAuth2 Team Map'), + help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT, + category=_('Azure AD OAuth2'), + category_slug='azuread-oauth2', + placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER, +) + +############################################################################### +# SAML AUTHENTICATION SETTINGS +############################################################################### + + +def get_saml_metadata_url(): + return urlparse.urljoin(settings.TOWER_URL_BASE, reverse('sso:saml_metadata')) + + +register( + 'SOCIAL_AUTH_SAML_CALLBACK_URL', + field_class=fields.CharField, + read_only=True, + default=SocialAuthCallbackURL('saml'), + label=_('SAML Service Provider Callback URL'), + help_text=_('Register Tower as a service provider (SP) with each identity ' + 'provider (IdP) you have configured. Provide your SP Entity ID ' + 'and this callback URL for your application.'), + category=_('SAML'), + category_slug='saml', + depends_on=['TOWER_URL_BASE'], + feature_required='enterprise_auth', +) + +register( + 'SOCIAL_AUTH_SAML_METADATA_URL', + field_class=fields.CharField, + read_only=True, + default=get_saml_metadata_url, + label=_('SAML Service Provider Metadata URL'), + help_text=_('If your identity provider (IdP) allows uploading an XML ' + 'metadata file, you can download one from this URL.'), + category=_('SAML'), + category_slug='saml', + feature_required='enterprise_auth', +) + +register( + 'SOCIAL_AUTH_SAML_SP_ENTITY_ID', + field_class=fields.CharField, + allow_blank=True, + default='', + label=_('SAML Service Provider Entity ID'), + help_text=_('The application-defined unique identifier used as the ' + 'audience of the SAML service provider (SP) configuration.'), + category=_('SAML'), + category_slug='saml', + feature_required='enterprise_auth', +) + +register( + 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT', + field_class=fields.CharField, + allow_blank=True, + default='', + validators=[validate_certificate], + label=_('SAML Service Provider Public Certificate'), + help_text=_('Create a keypair for Tower to use as a service provider (SP) ' + 'and include the certificate content here.'), + category=_('SAML'), + category_slug='saml', + feature_required='enterprise_auth', +) + +register( + 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY', + field_class=fields.CharField, + allow_blank=True, + default='', + validators=[validate_private_key], + label=_('SAML Service Provider Private Key'), + help_text=_('Create a keypair for Tower to use as a service provider (SP) ' + 'and include the private key content here.'), + category=_('SAML'), + category_slug='saml', + feature_required='enterprise_auth', + encrypted=True, +) + +register( + 'SOCIAL_AUTH_SAML_ORG_INFO', + field_class=fields.SAMLOrgInfoField, + default=collections.OrderedDict([ + ('en-US', collections.OrderedDict([ + ('name', 'example'), + ('displayname', 'Example'), + ('url', 'http://www.example.com'), + ])), + ]), + label=_('SAML Service Provider Organization Info'), + help_text=_('Configure this setting with information about your app.'), + category=_('SAML'), + category_slug='saml', + placeholder=collections.OrderedDict([ + ('en-US', collections.OrderedDict([ + ('name', 'example'), + ('displayname', 'Example'), + ('url', 'http://www.example.com'), + ])), + ]), + feature_required='enterprise_auth', +) + +register( + 'SOCIAL_AUTH_SAML_TECHNICAL_CONTACT', + field_class=fields.SAMLContactField, + allow_blank=True, + default=collections.OrderedDict([ + ('givenName', 'Technical Contact'), + ('emailAddress', 'techsup@example.com'), + ]), + label=_('SAML Service Provider Technical Contact'), + help_text=_('Configure this setting with your contact information.'), + category=_('SAML'), + category_slug='saml', + placeholder=collections.OrderedDict([ + ('givenName', 'Technical Contact'), + ('emailAddress', 'techsup@example.com'), + ]), + feature_required='enterprise_auth', +) + +register( + 'SOCIAL_AUTH_SAML_SUPPORT_CONTACT', + field_class=fields.SAMLContactField, + allow_blank=True, + default=collections.OrderedDict([ + ('givenName', 'Support Contact'), + ('emailAddress', 'support@example.com'), + ]), + label=_('SAML Service Provider Support Contact'), + help_text=_('Configure this setting with your contact information.'), + category=_('SAML'), + category_slug='saml', + placeholder=collections.OrderedDict([ + ('givenName', 'Support Contact'), + ('emailAddress', 'support@example.com'), + ]), + feature_required='enterprise_auth', +) + +register( + 'SOCIAL_AUTH_SAML_ENABLED_IDPS', + field_class=fields.SAMLEnabledIdPsField, + default={}, + label=_('SAML Enabled Identity Providers'), + help_text=_('Configure the Entity ID, SSO URL and certificate for each ' + 'identity provider (IdP) in use. Multiple SAML IdPs are supported. ' + 'Some IdPs may provide user data using attribute names that differ ' + 'from the default OIDs ' + '(https://github.com/omab/python-social-auth/blob/master/social/backends/saml.py#L16). ' + 'Attribute names may be overridden for each IdP.'), + category=_('SAML'), + category_slug='saml', + placeholder=collections.OrderedDict([ + ('Okta', collections.OrderedDict([ + ('entity_id', 'http://www.okta.com/HHniyLkaxk9e76wD0Thh'), + ('url', 'https://dev-123456.oktapreview.com/app/ansibletower/HHniyLkaxk9e76wD0Thh/sso/saml'), + ('x509cert', 'MIIDpDCCAoygAwIBAgIGAVVZ4rPzMA0GCSqGSIb3...'), + ('attr_user_permanent_id', 'username'), + ('attr_first_name', 'first_name'), + ('attr_last_name', 'last_name'), + ('attr_username', 'username'), + ('attr_email', 'email'), + ])), + ('OneLogin', collections.OrderedDict([ + ('entity_id', 'https://app.onelogin.com/saml/metadata/123456'), + ('url', 'https://example.onelogin.com/trust/saml2/http-post/sso/123456'), + ('x509cert', 'MIIEJjCCAw6gAwIBAgIUfuSD54OPSBhndDHh3gZo...'), + ('attr_user_permanent_id', 'name_id'), + ('attr_first_name', 'User.FirstName'), + ('attr_last_name', 'User.LastName'), + ('attr_username', 'User.email'), + ('attr_email', 'User.email'), + ])), + ]), + feature_required='enterprise_auth', +) + +register( + 'SOCIAL_AUTH_SAML_ORGANIZATION_MAP', + field_class=fields.SocialOrganizationMapField, + allow_null=True, + default=None, + label=_('SAML Organization Map'), + help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT, + category=_('SAML'), + category_slug='saml', + placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER, + feature_required='enterprise_auth', +) + +register( + 'SOCIAL_AUTH_SAML_TEAM_MAP', + field_class=fields.SocialTeamMapField, + allow_null=True, + default=None, + label=_('SAML Team Map'), + help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT, + category=_('SAML'), + category_slug='saml', + placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER, + feature_required='enterprise_auth', +) diff --git a/awx/sso/fields.py b/awx/sso/fields.py new file mode 100644 index 0000000000..5d95296e8e --- /dev/null +++ b/awx/sso/fields.py @@ -0,0 +1,639 @@ +# Python LDAP +import ldap + +# Django +from django.utils.translation import ugettext_lazy as _ + +# Django Auth LDAP +import django_auth_ldap.config +from django_auth_ldap.config import LDAPSearch, LDAPSearchUnion + +# Tower +from awx.conf import fields +from awx.conf.fields import * # noqa +from awx.conf.license import feature_enabled +from awx.main.validators import validate_certificate +from awx.sso.validators import * # noqa + + +def get_subclasses(cls): + for subclass in cls.__subclasses__(): + for subsubclass in get_subclasses(subclass): + yield subsubclass + yield subclass + + +class AuthenticationBackendsField(fields.StringListField): + + # Mapping of settings that must be set in order to enable each + # authentication backend. + REQUIRED_BACKEND_SETTINGS = collections.OrderedDict([ + ('awx.sso.backends.LDAPBackend', [ + 'AUTH_LDAP_SERVER_URI', + ]), + ('awx.sso.backends.RADIUSBackend', [ + 'RADIUS_SERVER', + ]), + ('social.backends.google.GoogleOAuth2', [ + 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', + 'SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET', + ]), + ('social.backends.github.GithubOAuth2', [ + 'SOCIAL_AUTH_GITHUB_KEY', + 'SOCIAL_AUTH_GITHUB_SECRET', + ]), + ('social.backends.github.GithubOrganizationOAuth2', [ + 'SOCIAL_AUTH_GITHUB_ORG_KEY', + 'SOCIAL_AUTH_GITHUB_ORG_SECRET', + 'SOCIAL_AUTH_GITHUB_ORG_NAME', + ]), + ('social.backends.github.GithubTeamOAuth2', [ + 'SOCIAL_AUTH_GITHUB_TEAM_KEY', + 'SOCIAL_AUTH_GITHUB_TEAM_SECRET', + 'SOCIAL_AUTH_GITHUB_TEAM_ID', + ]), + ('social.backends.azuread.AzureADOAuth2', [ + 'SOCIAL_AUTH_AZUREAD_OAUTH2_KEY', + 'SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET', + ]), + ('awx.sso.backends.SAMLAuth', [ + 'SOCIAL_AUTH_SAML_SP_ENTITY_ID', + 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT', + 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY', + 'SOCIAL_AUTH_SAML_ORG_INFO', + 'SOCIAL_AUTH_SAML_TECHNICAL_CONTACT', + 'SOCIAL_AUTH_SAML_SUPPORT_CONTACT', + 'SOCIAL_AUTH_SAML_ENABLED_IDPS', + ]), + ('django.contrib.auth.backends.ModelBackend', []), + ]) + + REQUIRED_BACKEND_FEATURE = { + 'awx.sso.backends.LDAPBackend': 'ldap', + 'awx.sso.backends.RADIUSBackend': 'enterprise_auth', + 'awx.sso.backends.SAMLAuth': 'enterprise_auth', + } + + @classmethod + def get_all_required_settings(cls): + all_required_settings = set(['LICENSE']) + for required_settings in cls.REQUIRED_BACKEND_SETTINGS.values(): + all_required_settings.update(required_settings) + return all_required_settings + + def __init__(self, *args, **kwargs): + kwargs.setdefault('default', self._default_from_required_settings) + super(AuthenticationBackendsField, self).__init__(*args, **kwargs) + + def _default_from_required_settings(self): + from django.conf import settings + try: + backends = settings._awx_conf_settings._get_default('AUTHENTICATION_BACKENDS') + except AttributeError: + backends = self.REQUIRED_BACKEND_SETTINGS.keys() + # Filter which authentication backends are enabled based on their + # required settings being defined and non-empty. Also filter available + # backends based on license features. + for backend, required_settings in self.REQUIRED_BACKEND_SETTINGS.items(): + if backend not in backends: + continue + required_feature = self.REQUIRED_BACKEND_FEATURE.get(backend, '') + if not required_feature or feature_enabled(required_feature): + if all([getattr(settings, rs, None) for rs in required_settings]): + continue + backends = filter(lambda x: x != backend, backends) + return backends + + +class LDAPServerURIField(fields.URLField): + + def __init__(self, **kwargs): + kwargs.setdefault('schemes', ('ldap', 'ldaps')) + super(LDAPServerURIField, self).__init__(**kwargs) + + def run_validators(self, value): + for url in filter(None, re.split(r'[, ]', (value or ''))): + super(LDAPServerURIField, self).run_validators(url) + return value + + +class LDAPConnectionOptionsField(fields.DictField): + + default_error_messages = { + 'invalid_options': _('Invalid connection option(s): {invalid_options}.'), + } + + def to_representation(self, value): + value = value or {} + opt_names = ldap.OPT_NAMES_DICT + # Convert integer options to their named constants. + repr_value = {} + for opt, opt_value in value.items(): + if opt in opt_names: + repr_value[opt_names[opt]] = opt_value + return repr_value + + def to_internal_value(self, data): + data = super(LDAPConnectionOptionsField, self).to_internal_value(data) + valid_options = dict([(v, k) for k, v in ldap.OPT_NAMES_DICT.items()]) + invalid_options = set(data.keys()) - set(valid_options.keys()) + if invalid_options: + options_display = json.dumps(list(invalid_options)).lstrip('[').rstrip(']') + self.fail('invalid_options', invalid_options=options_display) + # Convert named options to their integer constants. + internal_data = {} + for opt_name, opt_value in data.items(): + internal_data[valid_options[opt_name]] = opt_value + return internal_data + + +class LDAPDNField(fields.CharField): + + def __init__(self, **kwargs): + super(LDAPDNField, self).__init__(**kwargs) + self.validators.append(validate_ldap_dn) + + def run_validation(self, data=empty): + value = super(LDAPDNField, self).run_validation(data) + # django-auth-ldap expects DN fields (like AUTH_LDAP_REQUIRE_GROUP) + # to be either a valid string or ``None`` (not an empty string) + return None if value == '' else value + + +class LDAPDNWithUserField(fields.CharField): + + def __init__(self, **kwargs): + super(LDAPDNWithUserField, self).__init__(**kwargs) + self.validators.append(validate_ldap_dn_with_user) + + def run_validation(self, data=empty): + value = super(LDAPDNWithUserField, self).run_validation(data) + # django-auth-ldap expects DN fields (like AUTH_LDAP_USER_DN_TEMPLATE) + # to be either a valid string or ``None`` (not an empty string) + return None if value == '' else value + + +class LDAPFilterField(fields.CharField): + + def __init__(self, **kwargs): + super(LDAPFilterField, self).__init__(**kwargs) + self.validators.append(validate_ldap_filter) + + +class LDAPFilterWithUserField(fields.CharField): + + def __init__(self, **kwargs): + super(LDAPFilterWithUserField, self).__init__(**kwargs) + self.validators.append(validate_ldap_filter_with_user) + + +class LDAPScopeField(fields.ChoiceField): + + def __init__(self, choices=None, **kwargs): + choices = choices or [ + ('SCOPE_BASE', _('Base')), + ('SCOPE_ONELEVEL', _('One Level')), + ('SCOPE_SUBTREE', _('Subtree')), + ] + super(LDAPScopeField, self).__init__(choices, **kwargs) + + def to_representation(self, value): + for choice in self.choices.keys(): + if value == getattr(ldap, choice): + return choice + return super(LDAPScopeField, self).to_representation(value) + + def to_internal_value(self, data): + value = super(LDAPScopeField, self).to_internal_value(data) + return getattr(ldap, value) + + +class LDAPSearchField(fields.ListField): + + default_error_messages = { + 'invalid_length': _('Expected a list of three items but got {length} instead.'), + 'type_error': _('Expected an instance of LDAPSearch but got {input_type} instead.'), + } + ldap_filter_field_class = LDAPFilterField + + def to_representation(self, value): + if not value: + return [] + if not isinstance(value, LDAPSearch): + self.fail('type_error', input_type=type(value)) + return [ + LDAPDNField().to_representation(value.base_dn), + LDAPScopeField().to_representation(value.scope), + self.ldap_filter_field_class().to_representation(value.filterstr), + ] + + def to_internal_value(self, data): + data = super(LDAPSearchField, self).to_internal_value(data) + if len(data) == 0: + return None + if len(data) != 3: + self.fail('invalid_length', length=len(data)) + return LDAPSearch( + LDAPDNField().run_validation(data[0]), + LDAPScopeField().run_validation(data[1]), + self.ldap_filter_field_class().run_validation(data[2]), + ) + + +class LDAPSearchWithUserField(LDAPSearchField): + + ldap_filter_field_class = LDAPFilterWithUserField + + +class LDAPSearchUnionField(fields.ListField): + + default_error_messages = { + 'type_error': _('Expected an instance of LDAPSearch or LDAPSearchUnion but got {input_type} instead.'), + } + ldap_search_field_class = LDAPSearchWithUserField + + def to_representation(self, value): + if not value: + return [] + elif isinstance(value, LDAPSearchUnion): + return [self.ldap_search_field_class().to_representation(s) for s in value.searches] + elif isinstance(value, LDAPSearch): + return self.ldap_search_field_class().to_representation(value) + else: + self.fail('type_error', input_type=type(value)) + + def to_internal_value(self, data): + data = super(LDAPSearchUnionField, self).to_internal_value(data) + if len(data) == 0: + return None + if len(data) == 3 and isinstance(data[0], basestring): + return self.ldap_search_field_class().run_validation(data) + else: + return LDAPSearchUnion(*[self.ldap_search_field_class().run_validation(x) for x in data]) + + +class LDAPUserAttrMapField(fields.DictField): + + default_error_messages = { + 'invalid_attrs': _('Invalid user attribute(s): {invalid_attrs}.'), + } + valid_user_attrs = {'first_name', 'last_name', 'email'} + child = fields.CharField() + + def to_internal_value(self, data): + data = super(LDAPUserAttrMapField, self).to_internal_value(data) + invalid_attrs = (set(data.keys()) - self.valid_user_attrs) + if invalid_attrs: + attrs_display = json.dumps(list(invalid_attrs)).lstrip('[').rstrip(']') + self.fail('invalid_attrs', invalid_attrs=attrs_display) + return data + + +class LDAPGroupTypeField(fields.ChoiceField): + + default_error_messages = { + 'type_error': _('Expected an instance of LDAPGroupType but got {input_type} instead.'), + } + + def __init__(self, choices=None, **kwargs): + group_types = get_subclasses(django_auth_ldap.config.LDAPGroupType) + choices = choices or [(x.__name__, x.__name__) for x in group_types] + super(LDAPGroupTypeField, self).__init__(choices, **kwargs) + + def to_representation(self, value): + if not value: + return '' + if not isinstance(value, django_auth_ldap.config.LDAPGroupType): + self.fail('type_error', input_type=type(value)) + return value.__class__.__name__ + + def to_internal_value(self, data): + data = super(LDAPGroupTypeField, self).to_internal_value(data) + if not data: + return None + if data.endswith('MemberDNGroupType'): + return getattr(django_auth_ldap.config, data)(member_attr='member') + else: + return getattr(django_auth_ldap.config, data)() + + +class LDAPUserFlagsField(fields.DictField): + + default_error_messages = { + 'invalid_flag': _('Invalid user flag: "{invalid_flag}".'), + } + valid_user_flags = {'is_superuser'} + child = LDAPDNField() + + def to_internal_value(self, data): + data = super(LDAPUserFlagsField, self).to_internal_value(data) + invalid_flags = (set(data.keys()) - self.valid_user_flags) + if invalid_flags: + self.fail('invalid_flag', invalid_flag=list(invalid_flags)[0]) + return data + + +class LDAPDNMapField(fields.ListField): + + default_error_messages = { + 'type_error': _('Expected None, True, False, a string or list of strings but got {input_type} instead.'), + } + child = LDAPDNField() + + def to_representation(self, value): + if isinstance(value, (list, tuple)): + return super(LDAPDNMapField, self).to_representation(value) + elif value in fields.NullBooleanField.TRUE_VALUES: + return True + elif value in fields.NullBooleanField.FALSE_VALUES: + return False + elif value in fields.NullBooleanField.NULL_VALUES: + return None + elif isinstance(value, basestring): + return self.child.to_representation(value) + else: + self.fail('type_error', input_type=type(value)) + + def to_internal_value(self, data): + if isinstance(data, (list, tuple)): + return super(LDAPDNMapField, self).to_internal_value(data) + elif data in fields.NullBooleanField.TRUE_VALUES: + return True + elif data in fields.NullBooleanField.FALSE_VALUES: + return False + elif data in fields.NullBooleanField.NULL_VALUES: + return None + elif isinstance(data, basestring): + return self.child.run_validation(data) + else: + self.fail('type_error', input_type=type(data)) + + +class BaseDictWithChildField(fields.DictField): + + default_error_messages = { + 'missing_keys': _('Missing key(s): {missing_keys}.'), + 'invalid_keys': _('Invalid key(s): {invalid_keys}.'), + } + child_fields = { + # 'key': fields.ChildField(), + } + allow_unknown_keys = False + + def __init__(self, *args, **kwargs): + self.allow_blank = kwargs.pop('allow_blank', False) + super(BaseDictWithChildField, self).__init__(*args, **kwargs) + + def to_representation(self, value): + value = super(BaseDictWithChildField, self).to_representation(value) + for k, v in value.items(): + child_field = self.child_fields.get(k, None) + if child_field: + value[k] = child_field.to_representation(v) + elif self.allow_unknown_keys: + value[k] = v + return value + + def to_internal_value(self, data): + data = super(BaseDictWithChildField, self).to_internal_value(data) + missing_keys = set() + for key, child_field in self.child_fields.items(): + if not child_field.required: + continue + elif key not in data: + missing_keys.add(key) + if missing_keys and (data or not self.allow_blank): + keys_display = json.dumps(list(missing_keys)).lstrip('[').rstrip(']') + self.fail('missing_keys', missing_keys=keys_display) + if not self.allow_unknown_keys: + invalid_keys = set(data.keys()) - set(self.child_fields.keys()) + if invalid_keys: + keys_display = json.dumps(list(invalid_keys)).lstrip('[').rstrip(']') + self.fail('invalid_keys', invalid_keys=keys_display) + for k, v in data.items(): + child_field = self.child_fields.get(k, None) + if child_field: + data[k] = child_field.run_validation(v) + elif self.allow_unknown_keys: + data[k] = v + return data + + +class LDAPSingleOrganizationMapField(BaseDictWithChildField): + + default_error_messages = { + 'invalid_keys': _('Invalid key(s) for organization map: {invalid_keys}.'), + } + child_fields = { + 'admins': LDAPDNMapField(allow_null=True, required=False), + 'users': LDAPDNMapField(allow_null=True, required=False), + 'remove_admins': fields.BooleanField(required=False), + 'remove_users': fields.BooleanField(required=False), + } + + +class LDAPOrganizationMapField(fields.DictField): + + child = LDAPSingleOrganizationMapField() + + +class LDAPSingleTeamMapField(BaseDictWithChildField): + + default_error_messages = { + 'missing_keys': _('Missing required key for team map: {invalid_keys}.'), + 'invalid_keys': _('Invalid key(s) for team map: {invalid_keys}.'), + } + child_fields = { + 'organization': fields.CharField(), + 'users': LDAPDNMapField(allow_null=True, required=False), + 'remove': fields.BooleanField(required=False), + } + + +class LDAPTeamMapField(fields.DictField): + + child = LDAPSingleTeamMapField() + + +class RADIUSSecretField(fields.CharField): + + def run_validation(self, data=empty): + value = super(RADIUSSecretField, self).run_validation(data) + if isinstance(value, unicode): + value = value.encode('utf-8') + return value + + def to_internal_value(self, value): + value = super(RADIUSSecretField, self).to_internal_value(value) + if isinstance(value, unicode): + value = value.encode('utf-8') + return value + + +class SocialMapStringRegexField(fields.CharField): + + def to_representation(self, value): + if isinstance(value, type(re.compile(''))): + flags = [] + if value.flags & re.I: + flags.append('i') + if value.flags & re.M: + flags.append('m') + return '/{}/{}'.format(value.pattern, ''.join(flags)) + else: + return super(SocialMapStringRegexField, self).to_representation(value) + + def to_internal_value(self, data): + data = super(SocialMapStringRegexField, self).to_internal_value(data) + match = re.match(r'^/(?P.*)/(?P[im]+)?$', data) + if match: + flags = 0 + if match.group('flags'): + if 'i' in match.group('flags'): + flags |= re.I + if 'm' in match.group('flags'): + flags |= re.M + try: + return re.compile(match.group('pattern'), flags) + except re.error as e: + raise ValidationError('{}: {}'.format(e, data)) + return data + + +class SocialMapField(fields.ListField): + + default_error_messages = { + 'type_error': _('Expected None, True, False, a string or list of strings but got {input_type} instead.'), + } + child = SocialMapStringRegexField() + + def to_representation(self, value): + if isinstance(value, (list, tuple)): + return super(SocialMapField, self).to_representation(value) + elif value in fields.NullBooleanField.TRUE_VALUES: + return True + elif value in fields.NullBooleanField.FALSE_VALUES: + return False + elif value in fields.NullBooleanField.NULL_VALUES: + return None + elif isinstance(value, (basestring, type(re.compile('')))): + return self.child.to_representation(value) + else: + self.fail('type_error', input_type=type(value)) + + def to_internal_value(self, data): + if isinstance(data, (list, tuple)): + return super(SocialMapField, self).to_internal_value(data) + elif data in fields.NullBooleanField.TRUE_VALUES: + return True + elif data in fields.NullBooleanField.FALSE_VALUES: + return False + elif data in fields.NullBooleanField.NULL_VALUES: + return None + elif isinstance(data, basestring): + return self.child.run_validation(data) + else: + self.fail('type_error', input_type=type(data)) + + +class SocialSingleOrganizationMapField(BaseDictWithChildField): + + default_error_messages = { + 'invalid_keys': _('Invalid key(s) for organization map: {invalid_keys}.'), + } + child_fields = { + 'admins': SocialMapField(allow_null=True, required=False), + 'users': SocialMapField(allow_null=True, required=False), + 'remove_admins': fields.BooleanField(required=False), + 'remove_users': fields.BooleanField(required=False), + } + + +class SocialOrganizationMapField(fields.DictField): + + child = SocialSingleOrganizationMapField() + + +class SocialSingleTeamMapField(BaseDictWithChildField): + + default_error_messages = { + 'missing_keys': _('Missing required key for team map: {missing_keys}.'), + 'invalid_keys': _('Invalid key(s) for team map: {invalid_keys}.'), + } + child_fields = { + 'organization': fields.CharField(), + 'users': SocialMapField(allow_null=True, required=False), + 'remove': fields.BooleanField(required=False), + } + + +class SocialTeamMapField(fields.DictField): + + child = SocialSingleTeamMapField() + + +class SAMLOrgInfoValueField(BaseDictWithChildField): + + default_error_messages = { + 'missing_keys': _('Missing required key(s) for org info record: {missing_keys}.'), + } + child_fields = { + 'name': fields.CharField(), + 'displayname': fields.CharField(), + 'url': fields.URLField(), + } + allow_unknown_keys = True + + +class SAMLOrgInfoField(fields.DictField): + + default_error_messages = { + 'invalid_lang_code': _('Invalid language code(s) for org info: {invalid_lang_codes}.'), + } + child = SAMLOrgInfoValueField() + + def to_internal_value(self, data): + data = super(SAMLOrgInfoField, self).to_internal_value(data) + invalid_keys = set() + for key in data.keys(): + if not re.match(r'^[a-z]{2}(?:-[a-z]{2})??$', key, re.I): + invalid_keys.add(key) + if invalid_keys: + keys_display = json.dumps(list(invalid_keys)).lstrip('[').rstrip(']') + self.fail('invalid_lang_code', invalid_lang_codes=keys_display) + return data + + +class SAMLContactField(BaseDictWithChildField): + + default_error_messages = { + 'missing_keys': _('Missing required key(s) for contact: {missing_keys}.'), + } + child_fields = { + 'givenName': fields.CharField(), + 'emailAddress': fields.EmailField(), + } + allow_unknown_keys = True + + +class SAMLIdPField(BaseDictWithChildField): + + default_error_messages = { + 'missing_keys': _('Missing required key(s) for IdP: {missing_keys}.'), + } + child_fields = { + 'entity_id': fields.CharField(), + 'url': fields.URLField(), + 'x509cert': fields.CharField(validators=[validate_certificate]), + 'attr_user_permanent_id': fields.CharField(required=False), + 'attr_first_name': fields.CharField(required=False), + 'attr_last_name': fields.CharField(required=False), + 'attr_username': fields.CharField(required=False), + 'attr_email': fields.CharField(required=False), + } + allow_unknown_keys = True + + +class SAMLEnabledIdPsField(fields.DictField): + + child = SAMLIdPField() diff --git a/awx/sso/middleware.py b/awx/sso/middleware.py index 012bcefd55..c678ff08f3 100644 --- a/awx/sso/middleware.py +++ b/awx/sso/middleware.py @@ -23,6 +23,10 @@ from awx.main.models import AuthToken class SocialAuthMiddleware(SocialAuthExceptionMiddleware): + def process_view(self, request, callback, callback_args, callback_kwargs): + if request.path.startswith('/sso/login/'): + request.session['social_auth_last_backend'] = callback_kwargs['backend'] + def process_request(self, request): token_key = request.COOKIES.get('token', '') token_key = urllib.quote(urllib.unquote(token_key).strip('"')) @@ -57,6 +61,7 @@ class SocialAuthMiddleware(SocialAuthExceptionMiddleware): if auth_token and request.user and request.user.is_authenticated(): request.session.pop('social_auth_error', None) + request.session.pop('social_auth_last_backend', None) def process_exception(self, request, exception): strategy = getattr(request, 'social_strategy', None) @@ -66,6 +71,12 @@ class SocialAuthMiddleware(SocialAuthExceptionMiddleware): if isinstance(exception, SocialAuthBaseException) or request.path.startswith('/sso/'): backend = getattr(request, 'backend', None) backend_name = getattr(backend, 'name', 'unknown-backend') + + message = self.get_message(request, exception) + if request.session.get('social_auth_last_backend') != backend_name: + backend_name = request.session.get('social_auth_last_backend') + message = request.GET.get('error_description', message) + full_backend_name = backend_name try: idp_name = strategy.request_data()['RelayState'] @@ -73,7 +84,6 @@ class SocialAuthMiddleware(SocialAuthExceptionMiddleware): except KeyError: pass - message = self.get_message(request, exception) social_logger.error(message) url = self.get_redirect_uri(request, exception) diff --git a/awx/sso/pipeline.py b/awx/sso/pipeline.py index 756e64279d..2a16eb25b0 100644 --- a/awx/sso/pipeline.py +++ b/awx/sso/pipeline.py @@ -7,8 +7,11 @@ import re # Python Social Auth from social.exceptions import AuthException +# Django +from django.utils.translation import ugettext_lazy as _ + # Tower -from awx.api.license import feature_enabled +from awx.conf.license import feature_enabled class AuthNotFound(AuthException): @@ -18,13 +21,13 @@ class AuthNotFound(AuthException): super(AuthNotFound, self).__init__(backend, *args, **kwargs) def __str__(self): - return 'An account cannot be found for {0}'.format(self.email_or_uid) + return _('An account cannot be found for {0}').format(self.email_or_uid) class AuthInactive(AuthException): def __str__(self): - return 'Your account is inactive' + return _('Your account is inactive') def check_user_found_or_created(backend, details, user=None, *args, **kwargs): diff --git a/awx/sso/tests/__init__.py b/awx/sso/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/sso/tests/functional/__init__.py b/awx/sso/tests/functional/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/sso/tests/functional/test_ldap.py b/awx/sso/tests/functional/test_ldap.py new file mode 100644 index 0000000000..fcb2a8bc4b --- /dev/null +++ b/awx/sso/tests/functional/test_ldap.py @@ -0,0 +1,24 @@ +from django.test.utils import override_settings +import ldap +import pytest + +from awx.sso.backends import LDAPSettings + + +@override_settings(AUTH_LDAP_CONNECTION_OPTIONS = {ldap.OPT_NETWORK_TIMEOUT: 60}) +@pytest.mark.django_db +def test_ldap_with_custom_timeout(): + settings = LDAPSettings() + assert settings.CONNECTION_OPTIONS == { + ldap.OPT_NETWORK_TIMEOUT: 60 + } + + +@override_settings(AUTH_LDAP_CONNECTION_OPTIONS = {ldap.OPT_REFERRALS: 0}) +@pytest.mark.django_db +def test_ldap_with_missing_timeout(): + settings = LDAPSettings() + assert settings.CONNECTION_OPTIONS == { + ldap.OPT_REFERRALS: 0, + ldap.OPT_NETWORK_TIMEOUT: 30 + } diff --git a/awx/sso/tests/unit/test_ldap.py b/awx/sso/tests/unit/test_ldap.py new file mode 100644 index 0000000000..48dd3e30e2 --- /dev/null +++ b/awx/sso/tests/unit/test_ldap.py @@ -0,0 +1,21 @@ +import ldap + +from awx.sso.backends import LDAPSettings + + +def test_ldap_default_settings(mocker): + from_db = mocker.Mock(**{'order_by.return_value': []}) + with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=from_db): + settings = LDAPSettings() + assert settings.ORGANIZATION_MAP == {} + assert settings.TEAM_MAP == {} + + +def test_ldap_default_network_timeout(mocker): + from_db = mocker.Mock(**{'order_by.return_value': []}) + with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=from_db): + settings = LDAPSettings() + assert settings.CONNECTION_OPTIONS == { + ldap.OPT_REFERRALS: 0, + ldap.OPT_NETWORK_TIMEOUT: 30 + } diff --git a/awx/sso/validators.py b/awx/sso/validators.py new file mode 100644 index 0000000000..dd201f3e67 --- /dev/null +++ b/awx/sso/validators.py @@ -0,0 +1,60 @@ +# Python +import re + +# Python-LDAP +import ldap + +# Django +from django.core.exceptions import ValidationError +from django.utils.translation import ugettext_lazy as _ + +__all__ = ['validate_ldap_dn', 'validate_ldap_dn_with_user', + 'validate_ldap_bind_dn', 'validate_ldap_filter', + 'validate_ldap_filter_with_user'] + + +def validate_ldap_dn(value, with_user=False): + if with_user: + if '%(user)s' not in value: + raise ValidationError(_('DN must include "%%(user)s" placeholder for username: %s') % value) + dn_value = value.replace('%(user)s', 'USER') + else: + dn_value = value + try: + ldap.dn.str2dn(dn_value) + except ldap.DECODING_ERROR: + raise ValidationError(_('Invalid DN: %s') % value) + + +def validate_ldap_dn_with_user(value): + validate_ldap_dn(value, with_user=True) + + +def validate_ldap_bind_dn(value): + if not re.match(r'^[A-Za-z][A-Za-z0-9._-]*?\\[A-Za-z0-9 ._-]+?$', value.strip()): + validate_ldap_dn(value) + + +def validate_ldap_filter(value, with_user=False): + value = value.strip() + if not value: + return + if with_user: + if '%(user)s' not in value: + raise ValidationError(_('DN must include "%%(user)s" placeholder for username: %s') % value) + dn_value = value.replace('%(user)s', 'USER') + else: + dn_value = value + if re.match(r'^\([A-Za-z0-9]+?=[^()]+?\)$', dn_value): + return + elif re.match(r'^\([&|!]\(.*?\)\)$', dn_value): + try: + map(validate_ldap_filter, ['(%s)' % x for x in dn_value[3:-2].split(')(')]) + return + except ValidationError: + pass + raise ValidationError(_('Invalid filter: %s') % value) + + +def validate_ldap_filter_with_user(value): + validate_ldap_filter(value, with_user=True) diff --git a/awx/sso/views.py b/awx/sso/views.py index 962b89943e..2a68deec1a 100644 --- a/awx/sso/views.py +++ b/awx/sso/views.py @@ -22,8 +22,11 @@ from awx.api.serializers import UserSerializer logger = logging.getLogger('awx.sso.views') + class BaseRedirectView(RedirectView): + permanent = True + def get_redirect_url(self, *args, **kwargs): last_path = self.request.COOKIES.get('lastPath', '') last_path = urllib.quote(urllib.unquote(last_path).strip('"')) @@ -33,6 +36,7 @@ class BaseRedirectView(RedirectView): else: return url + sso_error = BaseRedirectView.as_view() sso_inactive = BaseRedirectView.as_view() @@ -67,6 +71,7 @@ class CompleteView(BaseRedirectView): response.set_cookie('current_user', current_user) return response + sso_complete = CompleteView.as_view() @@ -80,10 +85,15 @@ class MetadataView(View): 'saml', redirect_uri=complete_url, ) - metadata, errors = saml_backend.generate_metadata_xml() + try: + metadata, errors = saml_backend.generate_metadata_xml() + except Exception as e: + logger.exception('unable to generate SAML metadata') + errors = e if not errors: return HttpResponse(content=metadata, content_type='text/xml') else: return HttpResponse(content=str(errors), content_type='text/plain') + saml_metadata = MetadataView.as_view() diff --git a/awx/static/api/api.css b/awx/static/api/api.css index 61d51fae12..3b18c4273d 100644 --- a/awx/static/api/api.css +++ b/awx/static/api/api.css @@ -151,6 +151,9 @@ body .prettyprint .lit { body .prettyprint .str { color: #D9534F; } +body div.ansi_back { + display: inline-block; +} body .well.tab-content { padding: 20px; diff --git a/awx/templates/rest_framework/api.html b/awx/templates/rest_framework/api.html index c40d81ff63..3b75c4a35c 100644 --- a/awx/templates/rest_framework/api.html +++ b/awx/templates/rest_framework/api.html @@ -36,9 +36,9 @@ {% if user.is_authenticated %}
  • Logged in as {{ user }}{% if user.get_full_name %} ({{ user.get_full_name }}){% endif %}
  • {% endif %} -
  • Ansible Tower API Guide
  • -
  • Back to Ansible Tower
  • - +
  • {% trans 'Ansible Tower API Guide' %}
  • +
  • {% trans 'Back to Ansible Tower' %}
  • + @@ -52,7 +52,7 @@
    diff --git a/awx/templates/rest_framework/base.html b/awx/templates/rest_framework/base.html index 6ed3cd456f..a6c4169ebd 100644 --- a/awx/templates/rest_framework/base.html +++ b/awx/templates/rest_framework/base.html @@ -1,8 +1,8 @@ + {# Copy of base.html from rest_framework with minor Ansible Tower change. #} {% load staticfiles %} {% load rest_framework %} {% load i18n %} - {% block head %} @@ -75,21 +75,21 @@
    {% if api_settings.URL_FORMAT_OVERRIDE %}
    - GET + GET -
    {% else %} - GET + GET {% endif %}
    @@ -97,13 +97,13 @@ {% if options_form %}
    - +
    {% endif %} {% if delete_form %}
    - +
    {% endif %} @@ -169,7 +169,7 @@ {% csrf_token %} {{ post_form }}
    - +
    @@ -183,7 +183,7 @@
    {% include "rest_framework/raw_data_form.html" %}
    - +
    @@ -213,7 +213,7 @@
    {{ put_form }}
    - +
    @@ -227,10 +227,10 @@ {% include "rest_framework/raw_data_form.html" %}
    {% if raw_data_put_form %} - + {% endif %} {% if raw_data_patch_form %} - + {% endif %}
    diff --git a/awx/ui/.babelrc b/awx/ui/.babelrc new file mode 100644 index 0000000000..c13c5f627f --- /dev/null +++ b/awx/ui/.babelrc @@ -0,0 +1,3 @@ +{ + "presets": ["es2015"] +} diff --git a/awx/ui/.jshintrc b/awx/ui/.jshintrc new file mode 100644 index 0000000000..c8075a8ba8 --- /dev/null +++ b/awx/ui/.jshintrc @@ -0,0 +1,47 @@ +{ + "browser": true, + "node": true, + "jquery": true, + "esnext": true, + "globalstrict": true, + "curly": true, + "immed": true, + "latedef": "nofunc", + "noarg": true, + "nonew": true, + "maxerr": 10000, + "notypeof": true, + "globals": { + "$ENV": true, + "require": true, + "global": true, + "beforeEach": false, + "inject": false, + "module": false, + "angular":false, + "alert":false, + "$AnsibleConfig":true, + "$basePath":true, + "jsyaml":false, + "_":false, + "d3":false, + "Donut3D":false, + "nv":false, + "it": false, + "xit": false, + "expect": false, + "context": false, + "describe": false, + "moment": false, + "spyOn": false, + "jasmine": false + }, + "strict": false, + "quotmark": false, + "trailing": true, + "undef": true, + "unused": true, + "eqeqeq": true, + "indent": 4, + "newcap": false +} diff --git a/awx/ui/.npmrc b/awx/ui/.npmrc new file mode 100644 index 0000000000..d883e4fa13 --- /dev/null +++ b/awx/ui/.npmrc @@ -0,0 +1 @@ +progress=false diff --git a/awx/ui/Gruntfile.js b/awx/ui/Gruntfile.js new file mode 100644 index 0000000000..8dc07e896a --- /dev/null +++ b/awx/ui/Gruntfile.js @@ -0,0 +1,49 @@ +module.exports = function(grunt) { + // Load grunt tasks & configurations automatically from dir grunt/ + require('load-grunt-tasks')(grunt); + // display task timings + require('time-grunt')(grunt); + + var options = { + config: { + src: './grunt-tasks/*.js' + }, + pkg: grunt.file.readJSON('package.json') + }; + + var configs = require('load-grunt-configs')(grunt, options); + + // Project configuration. + grunt.initConfig(configs); + grunt.loadNpmTasks('grunt-newer'); + grunt.loadNpmTasks('grunt-angular-gettext'); + + // writes environment variables for development. current manages: + // browser-sync + websocket proxy + + grunt.registerTask('sync', [ + 'browserSync:http', + 'concurrent:watch' + ]); + + grunt.registerTask('dev', [ + 'clean:tmp', + 'clean:static', + 'concurrent:dev', + 'sync', + ]); + + grunt.registerTask('devNoSync', [ + 'clean:tmp', + 'clean:static', + 'concurrent:devNoSync', + ]); + + grunt.registerTask('release', [ + 'clean:tmp', + 'clean:static', + 'webpack:prod', + 'concurrent:prod', + ]); + +}; diff --git a/awx/ui/README.md b/awx/ui/README.md index 6a30815408..adc8475283 100644 --- a/awx/ui/README.md +++ b/awx/ui/README.md @@ -1,30 +1,159 @@ -Ansible UI -========== -The user interface to Ansible Commander +# Ansible Tower UI + +## Requirements + +### Node / NPM + +Tower currently requires the 6.x LTS version of Node and NPM. + +macOS installer: [https://nodejs.org/dist/latest-v6.x/node-v6.9.4.pkg](https://nodejs.org/dist/latest-v6.x/node-v6.9.4.pkg) + +RHEL / CentOS / Fedora: + +``` +$ curl --silent --location https://rpm.nodesource.com/setup_6.x | bash - +$ yum install nodejs +``` + +### Other Dependencies + +On macOS, install the Command Line Tools: + +``` +$ xcode-select --install +``` + +RHEL / CentOS / Fedora: + +``` +$ yum install bzip2 gcc-c++ git make +``` + +## Usage + +### Starting the UI + +First, the Tower API will need to be running. See [CONTRIBUTING.md](../../CONTRIBUTING.md). + +When using Docker for Mac or native Docker on Linux: + +``` +$ make ui-docker +``` + +When using Docker Machine: + +``` +$ DOCKER_MACHINE_NAME=default make ui-docker-machine +``` + +### Running Tests + +Run unit tests locally, poll for changes to both source and test files, launch tests in supported browser engines: + +``` +$ make ui-test +``` + +Run unit tests in a CI environment (Jenkins) + +``` +$ make ui-test-ci +``` + +### Adding new dependencies -Installation ------------- +#### Add / update a bundled vendor dependency -FIXME: Update the instructions below. +1. `npm install --prefix awx/ui --save some-frontend-package@1.2.3` +2. Add `'some-package'` to `var vendorFiles` in `./grunt-tasks/webpack.js` +3. `npm --prefix awx/ui shrinkwrap` to freeze current dependency resolution -To use the UI you will first need to complete the installation of Ansible Commander. Within -Ansbile Commander you should be able to start the server (make runserver) and log into the -admin console. If that all works, then you are ready to install Ansible UI. +#### Add / update a dependecy in the build/test pipeline -For now the UI runs under the django server installed with Commander. If you are planning to -do development, do NOT pull a copy of UI into the same directory structure as Commander. In -other words, for development the UI should not be insalled as a subdirectory of Commander. +1. `npm install --prefix awx/ui --save-dev some-toolchain-package@1.2.3` +2. `npm --prefix awx/ui shrinkwrap` to freeze current dependency resolution -Once you have obtained a copy of UI, create a symbolic link within the Commander lib/static -directory that points to the app subdirectory under ansible-ui. Call the link web: +### Polyfills, shims, patches - cd ~/ansible-commander/lib/static - ln -s ../../../ansible-ui/app web +The Webpack pipeline will prefer module patterns in this order: CommonJS, AMD, UMD. For a comparison of supported patterns, refer to [https://webpack.github.io/docs/comparison.html](Webpack's docs). -With the Ansible Commander server running, you should now be able to access the UI: +Some javascript libraries do not export their contents as a module, or depend on other third-party components. If the library maintainer does not wrap their lib in a factory that provides a CommonJS or AMD module, you will need to provide dependencies with a shim. - http://127.0.0.1:8013/static/web/index.html +1. Shim implicit dependencies using Webpack's [ProvidePlugin](https://github.com/webpack/webpack/blob/006d59500de0493c4096d5d4cecd64eb12db2b95/lib/ProvidePlugin.js). Example: -You will be immediately prompted for to log in. User your Commander superuser credientials. +```js +// Tower source code depends on the lodash library being available as _ +_.uniq([1,2,3,1]) // will throw error undefined +``` + +```js +// webpack.config.js +plugins: [ + new webpack.ProvidePlugin({ + '_': 'lodash', + }) +] +``` + +```js +// the following requirement is inserted by webpack at build time +var _ = require('lodash'); +_.uniq([1,2,3,1]) +``` + +2. Use [`imports-loader`](https://webpack.github.io/docs/shimming-modules.html#importing) to inject requirements into the namespace of vendor code at import time. Use [`exports-loader`](https://webpack.github.io/docs/shimming-modules.html#exporting) to conventionally export vendor code lacking a conventional export pattern. +3. [Apply a functional patch](https://gist.github.com/leigh-johnson/070159d3fd780d6d8da6e13625234bb3). A webpack plugin is the correct choice for a functional patch if your patch needs to access events in a build's lifecycle. A webpack loader is preferable if you need to compile and export a custom pattern of library modules. +4. [Submit patches to libraries without modular exports](https://github.com/leigh-johnson/ngToast/commit/fea95bb34d27687e414619b4f72c11735d909f93) - the internet will thank you + +Some javascript libraries might only get one module pattern right. + +### Environment configuration - used in development / test builds + +Build tasks are parameterized with environment variables. + +`package.json` contains default environment configuration. When `npm run myScriptName` is executed, these variables will be exported to your environment with the prefix `npm_package_config_`. For example, `my_variable` will be exported to `npm_package_config_my_variable`. + +Environment variables can accessed in a Javascript via `PROCESS.env`. + +``` json + "config": { + "django_port": "8013", + "websocket_port": "8080", + "django_host": "0.0.0.0" + } +``` + +Example usage in `npm run build-docker-machine`: + +```bash +$ docker-machine ssh $DOCKER_MACHINE_NAME -f -N -L ${npm_package_config_websocket_port}:localhost:${npm_package_config_websocket_port}; ip=$(docker-machine ip $DOCKER_MACHINE_NAME); echo npm set ansible-tower:django_host ${ip}; $ grunt dev +``` + +Example usage in an `npm test` script target: + +``` +npm_package_config_websocket_port=mock_websocket_port npm_package_config_django_port=mock_api_port npm_package_config_django_host=mock_api_host npm run test:someMockIntegration +``` + +You'll usually want to pipe and set vars prior to running a script target: +``` +$ npm set ansible-tower:websocket_host ${mock_host}; npm run script-name +``` + +### NPM Scripts + +Examples: +```json + { + "scripts": { + "pretest": "echo I run immediately before 'npm test' executes", + "posttest": "echo I run immediately after 'npm test' exits", + "test": "karma start karma.conf.js" + } + } +``` + +`npm test` is an alias for `npm run test`. Refer to [script field docs](https://docs.npmjs.com/misc/scripts) for a list of other runtime events. diff --git a/awx/ui/TODO.md b/awx/ui/TODO.md deleted file mode 100644 index 290e92fc13..0000000000 --- a/awx/ui/TODO.md +++ /dev/null @@ -1,6 +0,0 @@ -TODO -==== - -* mpd - figure out way to mark each object generically with whether it can be edited/administrated/etc -* mpd - enable generic filtering, look into sorting - diff --git a/awx/ui/__init__.py b/awx/ui/__init__.py index e484e62be1..ac6a554356 100644 --- a/awx/ui/__init__.py +++ b/awx/ui/__init__.py @@ -1,2 +1,4 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. + +default_app_config = 'awx.ui.apps.UIConfig' diff --git a/awx/ui/apps.py b/awx/ui/apps.py new file mode 100644 index 0000000000..40943c6f53 --- /dev/null +++ b/awx/ui/apps.py @@ -0,0 +1,9 @@ +# Django +from django.apps import AppConfig +from django.utils.translation import ugettext_lazy as _ + + +class UIConfig(AppConfig): + + name = 'awx.ui' + verbose_name = _('UI') diff --git a/awx/ui/ascii_mockups.md b/awx/ui/ascii_mockups.md deleted file mode 100644 index 54813c12f7..0000000000 --- a/awx/ui/ascii_mockups.md +++ /dev/null @@ -1,97 +0,0 @@ -Mockups -======= - -Goals/priority: - - Organizations - Inventory - Groups - Hosts - Subgroup - Host variable - Group variable - Jobs and Job Results - Credentials - Projects - Teams - Permissions - -Later/Roadmap: - - Rich Editing - -Just conceptual stuff prior to using Balsamiq. - - Login - - username: - password: - -When log in, menu options: - - organizations - projects - teams - credentials - -Organization tab: - - [if admin, plus sign for add ? ] - org1 [ if admin, delete ] - org2 - org3 - -Organizations detail: - - [ see all properties ] - [ click to edit some of them if admin ] - - [ if org admin, + project to organization ] - [ + add a user ] - [ + add more admins ] - -Projects details: - - (projects are created from the organization detail page) - -Teams: - - add users / remove users - -Users: - - add remove/users - -Credentials: - -Permissions: - -Jobs: - -Inventory View: - - | Groups - +--+ Subgroup <----------------- click on this - +----- subgroup - - see hosts in group that was selected above, (directly?) - - MPD: may need to add a way to see all child hosts of a given group in API - /api/v1/groups/N/all_hosts - if no group is selected, use /api/v1/hosts/ - - [ Group selector ] [ edit link ] [ delete link (if no kids?) ] - - [ Host list ] [ edit link ] [ delete link ] - - - tree of list of - GROUPS | HOSTS - x | asdf - x | jkl - x | foo - x | - x | - - - diff --git a/awx/ui/client/assets/CloudSync.svg b/awx/ui/client/assets/CloudSync.svg deleted file mode 100644 index 557e12e4a5..0000000000 --- a/awx/ui/client/assets/CloudSync.svg +++ /dev/null @@ -1,18 +0,0 @@ - - - - CloudSync - Created with Sketch. - - - - - - - - - - - - - \ No newline at end of file diff --git a/awx/ui/client/assets/Credentials.svg b/awx/ui/client/assets/Credentials.svg deleted file mode 100644 index cede062a4f..0000000000 --- a/awx/ui/client/assets/Credentials.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Credentials - Created with Sketch. - - - - - diff --git a/awx/ui/client/assets/InventoryScripts.svg b/awx/ui/client/assets/InventoryScripts.svg deleted file mode 100644 index 050da79a90..0000000000 --- a/awx/ui/client/assets/InventoryScripts.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - InventoryScripts - Created with Sketch. - - - - - - - - - - diff --git a/awx/ui/client/assets/JobTemplates.svg b/awx/ui/client/assets/JobTemplates.svg deleted file mode 100644 index b82a6b7099..0000000000 --- a/awx/ui/client/assets/JobTemplates.svg +++ /dev/null @@ -1,16 +0,0 @@ - - - - JobTemplates - Created with Sketch. - - - - - - - - - - - \ No newline at end of file diff --git a/awx/ui/client/assets/Jobs.svg b/awx/ui/client/assets/Jobs.svg deleted file mode 100644 index 4b497b79db..0000000000 --- a/awx/ui/client/assets/Jobs.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Jobs - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/awx/ui/client/assets/ManagementJobs.svg b/awx/ui/client/assets/ManagementJobs.svg deleted file mode 100644 index ee638334f7..0000000000 --- a/awx/ui/client/assets/ManagementJobs.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - MaintenanceJobs - Created with Sketch. - - - - - - - - - diff --git a/awx/ui/client/assets/Organizations.svg b/awx/ui/client/assets/Organizations.svg deleted file mode 100644 index db36409b13..0000000000 --- a/awx/ui/client/assets/Organizations.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - Organizations - Created with Sketch. - - - - - - - - - - - - diff --git a/awx/ui/client/assets/PortalMode--exit.svg b/awx/ui/client/assets/PortalMode--exit.svg deleted file mode 100644 index af0ae04212..0000000000 --- a/awx/ui/client/assets/PortalMode--exit.svg +++ /dev/null @@ -1,16 +0,0 @@ - - - - PortalMode - Created with Sketch. - - - - - - - - - - - diff --git a/awx/ui/client/assets/PortalMode.svg b/awx/ui/client/assets/PortalMode.svg deleted file mode 100644 index 0e7656d146..0000000000 --- a/awx/ui/client/assets/PortalMode.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - PortalMode - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/awx/ui/client/assets/Projects.svg b/awx/ui/client/assets/Projects.svg deleted file mode 100644 index 3e7149867b..0000000000 --- a/awx/ui/client/assets/Projects.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Projects - Created with Sketch. - - - - - - - - diff --git a/awx/ui/client/assets/Setup.svg b/awx/ui/client/assets/Setup.svg deleted file mode 100644 index 5b4d23a410..0000000000 --- a/awx/ui/client/assets/Setup.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Setup - Created with Sketch. - - - - - - - diff --git a/awx/ui/client/assets/Signout.svg b/awx/ui/client/assets/Signout.svg deleted file mode 100644 index 6c0f489a7d..0000000000 --- a/awx/ui/client/assets/Signout.svg +++ /dev/null @@ -1,18 +0,0 @@ - - - - Signout - Created with Sketch. - - - - - - - - - - - - - \ No newline at end of file diff --git a/awx/ui/client/assets/Teams.svg b/awx/ui/client/assets/Teams.svg deleted file mode 100644 index d5d0b4562b..0000000000 --- a/awx/ui/client/assets/Teams.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - Teams - Created with Sketch. - - - - - - - - - - diff --git a/awx/ui/client/assets/TowerLogo.svg b/awx/ui/client/assets/TowerLogo.svg deleted file mode 100644 index 1fc73730a5..0000000000 --- a/awx/ui/client/assets/TowerLogo.svg +++ /dev/null @@ -1,22 +0,0 @@ - - - - TowerLogo - Created with Sketch. - - - - - - - diff --git a/awx/ui/client/assets/User.svg b/awx/ui/client/assets/User.svg deleted file mode 100644 index d83d41fc09..0000000000 --- a/awx/ui/client/assets/User.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - Teams - Created with Sketch. - - - - - - - - - - diff --git a/awx/ui/client/assets/Users.svg b/awx/ui/client/assets/Users.svg deleted file mode 100644 index c7e4feac21..0000000000 --- a/awx/ui/client/assets/Users.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Users - Created with Sketch. - - - - - - - - diff --git a/awx/ui/client/assets/fontcustom/.fontcustom-manifest.json b/awx/ui/client/assets/fontcustom/.fontcustom-manifest.json index 06fddffd64..c50d13abe3 100644 --- a/awx/ui/client/assets/fontcustom/.fontcustom-manifest.json +++ b/awx/ui/client/assets/fontcustom/.fontcustom-manifest.json @@ -1,18 +1,38 @@ { "checksum": { - "previous": "e4442d278bfc4c1a673bdd29512213a18cfaf74decd9bb21ecd698b4860e7453", - "current": "e4442d278bfc4c1a673bdd29512213a18cfaf74decd9bb21ecd698b4860e7453" + "previous": "3dfbafd778b214fc5df2a64fe14fbfb30ba40e33282eedf0d98b5a613786db88", + "current": "3dfbafd778b214fc5df2a64fe14fbfb30ba40e33282eedf0d98b5a613786db88" }, "fonts": [ - "..//fontcustom_e4442d278bfc4c1a673bdd29512213a1.ttf", - "..//fontcustom_e4442d278bfc4c1a673bdd29512213a1.svg", - "..//fontcustom_e4442d278bfc4c1a673bdd29512213a1.woff", - "..//fontcustom_e4442d278bfc4c1a673bdd29512213a1.eot" + "./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.ttf", + "./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.svg", + "./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.woff", + "./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.eot" ], "glyphs": { - "fontcustom_fbf6cfce71d2c35351b84596defbfd01": { - "codepoint": 61696, - "source": "./fontcustom_fbf6cfce71d2c35351b84596defbfd01.svg" + "activity-stream": { + "codepoint": 61697, + "source": "new_icons/activity-stream.svg" + }, + "google": { + "codepoint": 61698, + "source": "new_icons/google.svg" + }, + "launch": { + "codepoint": 61699, + "source": "new_icons/launch.svg" + }, + "microsoft": { + "codepoint": 61700, + "source": "new_icons/microsoft.svg" + }, + "saml-02": { + "codepoint": 61701, + "source": "new_icons/saml-02.svg" + }, + "user": { + "codepoint": 61702, + "source": "new_icons/user.svg" } }, "options": { @@ -25,16 +45,16 @@ "font_design_size": 16, "font_em": 512, "font_name": "fontcustom", - "force": true, + "force": false, "input": { - "templates": ".", - "vectors": "." + "templates": "new_icons/", + "vectors": "new_icons/" }, "no_hash": false, "output": { - "css": "../", - "fonts": "../", - "preview": "../" + "css": ".", + "fonts": ".", + "preview": "." }, "preprocessor_path": null, "quiet": false, @@ -44,7 +64,7 @@ ] }, "templates": [ - "../fontcustom.css", - "../fontcustom-preview.html" + "./fontcustom.css", + "./fontcustom-preview.html" ] } \ No newline at end of file diff --git a/awx/ui/client/assets/fontcustom/fontcustom-preview.html b/awx/ui/client/assets/fontcustom/fontcustom-preview.html index becd88eca8..bda47533ba 100644 --- a/awx/ui/client/assets/fontcustom/fontcustom-preview.html +++ b/awx/ui/client/assets/fontcustom/fontcustom-preview.html @@ -141,11 +141,11 @@ @font-face { font-family: "fontcustom"; - src: url("./fontcustom_d77a9996ed04d45b02f5c06874cd36db.eot"); - src: url("./fontcustom_d77a9996ed04d45b02f5c06874cd36db.eot?#iefix") format("embedded-opentype"), - url("./fontcustom_d77a9996ed04d45b02f5c06874cd36db.woff") format("woff"), - url("./fontcustom_d77a9996ed04d45b02f5c06874cd36db.ttf") format("truetype"), - url("./fontcustom_d77a9996ed04d45b02f5c06874cd36db.svg#fontcustom") format("svg"); + src: url("./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.eot"); + src: url("./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.eot?#iefix") format("embedded-opentype"), + url("./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.woff") format("woff"), + url("./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.ttf") format("truetype"), + url("./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.svg#fontcustom") format("svg"); font-weight: normal; font-style: normal; } @@ -153,7 +153,7 @@ @media screen and (-webkit-min-device-pixel-ratio:0) { @font-face { font-family: "fontcustom"; - src: url("./fontcustom_d77a9996ed04d45b02f5c06874cd36db.svg#fontcustom") format("svg"); + src: url("./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.svg#fontcustom") format("svg"); } } @@ -163,9 +163,7 @@ .icon-activity-stream:before, .icon-google:before, .icon-launch:before, -.icon-launch-circle:before, -.icon-launch-new:before, -.icon-launch2:before, +.icon-microsoft:before, .icon-saml-02:before, .icon-user:before { display: inline-block; @@ -182,14 +180,12 @@ font-smoothing: antialiased; } - .icon-activity-stream:before { content: "\f102"; } -.icon-google:before { content: "\f100"; } + .icon-activity-stream:before { content: "\f101"; } +.icon-google:before { content: "\f102"; } .icon-launch:before { content: "\f103"; } -.icon-launch-circle:before { content: "\f105"; } -.icon-launch-new:before { content: "\f107"; } -.icon-launch2:before { content: "\f106"; } -.icon-saml-02:before { content: "\f101"; } -.icon-user:before { content: "\f104"; } +.icon-microsoft:before { content: "\f104"; } +.icon-saml-02:before { content: "\f105"; } +.icon-user:before { content: "\f106"; } @@ -205,7 +201,7 @@
    -

    fontcustom contains 8 glyphs:

    +

    fontcustom contains 6 glyphs:

    Toggle Preview Characters
    @@ -219,7 +215,7 @@
    - +
    @@ -232,7 +228,7 @@
    - +
    @@ -251,40 +247,14 @@
    - PpPpPpPpPpPpPpPpPpPp + PpPpPpPpPpPpPpPpPpPp
    12141618212436486072
    - - -
    -
    - -
    -
    - PpPpPpPpPpPpPpPpPpPp -
    -
    - 12141618212436486072 -
    -
    - - -
    -
    - -
    -
    - PpPpPpPpPpPpPpPpPpPp -
    -
    - 12141618212436486072 -
    -
    - - + +
    @@ -297,7 +267,7 @@
    - +
    @@ -310,7 +280,7 @@
    - +
    diff --git a/awx/ui/client/assets/fontcustom/fontcustom.css b/awx/ui/client/assets/fontcustom/fontcustom.css index 39ab124bda..6dfb140f36 100644 --- a/awx/ui/client/assets/fontcustom/fontcustom.css +++ b/awx/ui/client/assets/fontcustom/fontcustom.css @@ -4,11 +4,11 @@ @font-face { font-family: "fontcustom"; - src: url("./fontcustom_d77a9996ed04d45b02f5c06874cd36db.eot"); - src: url("./fontcustom_d77a9996ed04d45b02f5c06874cd36db.eot?#iefix") format("embedded-opentype"), - url("./fontcustom_d77a9996ed04d45b02f5c06874cd36db.woff") format("woff"), - url("./fontcustom_d77a9996ed04d45b02f5c06874cd36db.ttf") format("truetype"), - url("./fontcustom_d77a9996ed04d45b02f5c06874cd36db.svg#fontcustom") format("svg"); + src: url("./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.eot"); + src: url("./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.eot?#iefix") format("embedded-opentype"), + url("./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.woff") format("woff"), + url("./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.ttf") format("truetype"), + url("./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.svg#fontcustom") format("svg"); font-weight: normal; font-style: normal; } @@ -16,7 +16,7 @@ @media screen and (-webkit-min-device-pixel-ratio:0) { @font-face { font-family: "fontcustom"; - src: url("./fontcustom_d77a9996ed04d45b02f5c06874cd36db.svg#fontcustom") format("svg"); + src: url("./fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.svg#fontcustom") format("svg"); } } @@ -26,9 +26,7 @@ .icon-activity-stream:before, .icon-google:before, .icon-launch:before, -.icon-launch-circle:before, -.icon-launch-new:before, -.icon-launch2:before, +.icon-microsoft:before, .icon-saml-02:before, .icon-user:before { display: inline-block; @@ -45,11 +43,9 @@ font-smoothing: antialiased; } -.icon-activity-stream:before { content: "\f102"; } -.icon-google:before { content: "\f100"; } +.icon-activity-stream:before { content: "\f101"; } +.icon-google:before { content: "\f102"; } .icon-launch:before { content: "\f103"; } -.icon-launch-circle:before { content: "\f105"; } -.icon-launch-new:before { content: "\f107"; } -.icon-launch2:before { content: "\f106"; } -.icon-saml-02:before { content: "\f101"; } -.icon-user:before { content: "\f104"; } +.icon-microsoft:before { content: "\f104"; } +.icon-saml-02:before { content: "\f105"; } +.icon-user:before { content: "\f106"; } diff --git a/awx/ui/client/assets/fontcustom/fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.eot b/awx/ui/client/assets/fontcustom/fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.eot new file mode 100644 index 0000000000..7d890d22c2 Binary files /dev/null and b/awx/ui/client/assets/fontcustom/fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.eot differ diff --git a/awx/ui/client/assets/fontcustom/fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.svg b/awx/ui/client/assets/fontcustom/fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.svg new file mode 100644 index 0000000000..20e687b626 --- /dev/null +++ b/awx/ui/client/assets/fontcustom/fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.svg @@ -0,0 +1,76 @@ + + + + + +Created by FontForge 20120731 at Mon Nov 28 21:58:48 2016 + By Chris Church +Created by Chris Church with FontForge 2.0 (http://fontforge.sf.net) + + + + + + + + + + + + + + diff --git a/awx/ui/client/assets/fontcustom/fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.ttf b/awx/ui/client/assets/fontcustom/fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.ttf new file mode 100644 index 0000000000..b5bb62ffce Binary files /dev/null and b/awx/ui/client/assets/fontcustom/fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.ttf differ diff --git a/awx/ui/client/assets/fontcustom/fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.woff b/awx/ui/client/assets/fontcustom/fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.woff new file mode 100644 index 0000000000..1ec0dcb5c6 Binary files /dev/null and b/awx/ui/client/assets/fontcustom/fontcustom_3dfbafd778b214fc5df2a64fe14fbfb3.woff differ diff --git a/awx/ui/client/assets/fontcustom/fontcustom_d77a9996ed04d45b02f5c06874cd36db.eot b/awx/ui/client/assets/fontcustom/fontcustom_d77a9996ed04d45b02f5c06874cd36db.eot deleted file mode 100644 index 739e0b64f5..0000000000 Binary files a/awx/ui/client/assets/fontcustom/fontcustom_d77a9996ed04d45b02f5c06874cd36db.eot and /dev/null differ diff --git a/awx/ui/client/assets/fontcustom/fontcustom_d77a9996ed04d45b02f5c06874cd36db.svg b/awx/ui/client/assets/fontcustom/fontcustom_d77a9996ed04d45b02f5c06874cd36db.svg deleted file mode 100644 index f1fe2ac03d..0000000000 --- a/awx/ui/client/assets/fontcustom/fontcustom_d77a9996ed04d45b02f5c06874cd36db.svg +++ /dev/null @@ -1,108 +0,0 @@ - - - - - -Created by FontForge 20150401 at Thu Nov 12 14:46:50 2015 - By John Mitchell -Copyright (c) 2015, John Mitchell - - - - - - - - - - - - - - - - diff --git a/awx/ui/client/assets/fontcustom/fontcustom_d77a9996ed04d45b02f5c06874cd36db.ttf b/awx/ui/client/assets/fontcustom/fontcustom_d77a9996ed04d45b02f5c06874cd36db.ttf deleted file mode 100644 index 5d151b75b0..0000000000 Binary files a/awx/ui/client/assets/fontcustom/fontcustom_d77a9996ed04d45b02f5c06874cd36db.ttf and /dev/null differ diff --git a/awx/ui/client/assets/fontcustom/fontcustom_d77a9996ed04d45b02f5c06874cd36db.woff b/awx/ui/client/assets/fontcustom/fontcustom_d77a9996ed04d45b02f5c06874cd36db.woff deleted file mode 100644 index 4c79bee685..0000000000 Binary files a/awx/ui/client/assets/fontcustom/fontcustom_d77a9996ed04d45b02f5c06874cd36db.woff and /dev/null differ diff --git a/awx/ui/client/assets/fontcustom/new_icons/microsoft.svg b/awx/ui/client/assets/fontcustom/new_icons/microsoft.svg new file mode 100644 index 0000000000..934eb2b4b9 --- /dev/null +++ b/awx/ui/client/assets/fontcustom/new_icons/microsoft.svg @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/awx/ui/client/legacy-styles/angular-scheduler.less b/awx/ui/client/legacy-styles/angular-scheduler.less index b502d307b0..4f6817e08d 100644 --- a/awx/ui/client/legacy-styles/angular-scheduler.less +++ b/awx/ui/client/legacy-styles/angular-scheduler.less @@ -5,12 +5,14 @@ * */ - /* + /* #schedules-form-container -inventory group add/edit dialog */ + @import './client/src/shared/branding/colors.less'; + #schedules-tab { - position: relative; + position: relative; top: 0; left: 0; } @@ -20,22 +22,22 @@ position: absolute; top: 0; left: 0; - z-index: 100; + z-index: 100; background-color: @black; - opacity: 0; + opacity: 0; } #schedules-list { overflow-x: hidden; - overflow-y: auto; + overflow-y: auto; } #schedules-form-container { - position: absolute; + position: absolute; top: 0; left: 0; display: none; - border: 1px solid #e5e5e5; + border: 1px solid #e5e5e5; border-radius: 4px; box-shadow: 3px 3px 6px 0 #666; padding: 0 10px 15px 8px; @@ -83,15 +85,15 @@ } #scheduler-modal-dialog, #schedules-form-container { - display: none; - overflow-x: hidden; + display: none; + overflow-x: hidden; overflow-y: auto; padding-top: 25px; - + form { width: 100%; } - + .sublabel { font-weight: normal; } @@ -101,7 +103,7 @@ } .occurrence-list { - border: 1px solid @well-border; + border: 1px solid @well-border; padding: 8px 10px; border-radius: 4px; background-color: @well; @@ -113,7 +115,7 @@ display: inline-block; margin-left: 15px; font-size: 12px; - + .label-inline { display: inline-block; vertical-align: middle; @@ -131,7 +133,7 @@ margin-right: 10px; } } - + .ui-widget input { font-size: 12px; font-weight: normal; @@ -162,7 +164,7 @@ color: #999; padding-left: 10px; } - .error { + .error { color: #dd1b16; font-size: 12px; margin-bottom: 0; @@ -214,14 +216,14 @@ width: 100%; } .occurrence-list { - border: 1px solid @well-border; + border: 1px solid @well-border; padding: 8px 10px; border-radius: 4px; background-color: @well; list-style: none; margin-bottom: 5px; } - + #weekdaySelect .btn-default:hover, #weekdaySelect .btn-default:focus { background-color: #fff; @@ -232,4 +234,4 @@ #weekdaySelect .btn-default.active:hover { background-color: #e0e0e0; } -} \ No newline at end of file +} diff --git a/awx/ui/client/legacy-styles/ansible-ui.less b/awx/ui/client/legacy-styles/ansible-ui.less index f201b7352a..4cc306cb36 100644 --- a/awx/ui/client/legacy-styles/ansible-ui.less +++ b/awx/ui/client/legacy-styles/ansible-ui.less @@ -21,7 +21,7 @@ src: url(/static/assets/OpenSans-Bold.ttf); } -@import "src/shared/branding/colors.less"; +@import "./client/src/shared/branding/colors.less"; @import "fonts.less"; @import "main-layout.less"; @import "animations.less"; @@ -42,9 +42,9 @@ @import "survey-maker.less"; @import "text-label.less"; @import "./bootstrap-datepicker.less"; -@import "awx/ui/client/src/shared/branding/colors.default.less"; +@import "./client/src/shared/branding/colors.default.less"; // Bootstrap default overrides -@import "awx/ui/client/src/shared/bootstrap-settings.less"; +@import "./client/src/shared/bootstrap-settings.less"; /* Bootstrap fix that's causing a right margin to appear whenver a modal is opened */ body.modal-open { @@ -730,75 +730,6 @@ legend { margin: 15px 0 15px 0; } -.page-number { - display: inline-block; - padding: 0; - margin: 0; -} - -.page-number-small { - display: inline-block; - margin-left: 10px; - font-size: 11px; -} - -/* Pagination */ - .page-label { - font-size: 12px; - margin-top: 0; - text-align: right; - } - - .pagination { - margin-top: 0; - margin-bottom: 7px; - } - - .pagination>li>a, - .pagination>li>span { - border: 1px solid @grey-border; - padding: 3px 6px; - font-size: 10px; - } - - .pagination li { - a#next-page { - border-radius: 0 4px 4px 0; - } - - a#previous-page { - border-radius: 4px 0 0 4px; - } - } - - .modal-body { - .pagination { - margin-top: 15px; - margin-bottom: 0; - } - .pagination > li > a { - border: none; - padding-top: 0; - padding-bottom: 0; - } - .pagination > .active > a { - background-color: @default-bg; - color: #428bca; - border-color: none; - border: 1px solid @default-link; - } - .alert { - padding: 0; - border: none; - margin: 0; - } - .alert-danger { - background-color: @default-bg; - border: none; - color: @default-interface-txt; - } - } - .footer-navigation { margin: 10px 0 10px 0; } @@ -962,6 +893,12 @@ input[type="checkbox"].checkbox-no-label { margin-top: 10px; } +.radio-group { + .radio-inline + .radio-inline { + margin-left: 0; + } +} + .checkbox-group { .radio-inline + .radio-inline, .checkbox-inline + .checkbox-inline { @@ -984,7 +921,7 @@ input[type="checkbox"].checkbox-no-label { /* Display list actions next to search widget */ .list-actions { text-align: right; - margin-bottom: 20px; + margin-bottom: -34px; .fa-lg { vertical-align: -8%; @@ -1155,6 +1092,7 @@ input[type="checkbox"].checkbox-no-label { .icon-job-stopped:before, .icon-job-error:before, .icon-job-canceled:before, + .icon-job-stdout-download-tooltip:before, .icon-job-unreachable:before { content: "\f06a"; } @@ -1192,6 +1130,7 @@ input[type="checkbox"].checkbox-no-label { .icon-job-stopped, .icon-job-error, .icon-job-failed, + .icon-job-stdout-download-tooltip, .icon-job-canceled { color: @red; } @@ -1623,6 +1562,10 @@ a.btn-disabled:hover { /* Sort link styles */ +.list-header-noSort:hover.list-header:hover{ + cursor: default; +} + .list-header:hover { cursor: pointer; } @@ -1685,17 +1628,19 @@ tr td button i { } /* overrides to TB modal */ +.modal-content { + padding: 20px; +} .modal-header { color: @default-interface-txt; - margin: .1em 0; white-space: nowrap; width: 90%; overflow: hidden; text-overflow: ellipsis; width: 100%; border: none; - padding: 12px 14px 0 12px; + padding: 0; } .modal { @@ -1724,8 +1669,19 @@ tr td button i { } .modal-body { - padding: 20px 14px 7px 14px; min-height: 120px; + padding: 20px 0; + + .alert { + padding: 10px; + margin: 0; + word-wrap: break-word; + } + .alert-danger { + background-color: @default-bg; + border: none; + color: @default-interface-txt; + } } #prompt-modal .modal-body { @@ -1737,15 +1693,15 @@ tr td button i { } .modal-footer { - padding: .3em 1em .5em .4em; + padding: 0; border: none; + margin-top: 0; .btn.btn-primary { text-transform: uppercase; background-color: @default-succ; border-color: @default-succ; padding: 5px 15px; - margin: .5em .4em .5em 0; cursor: pointer; &:hover { @@ -1767,8 +1723,7 @@ tr td button i { /* PW progress bar */ -.pw-progress { - margin-top: 10px; +.pw-progress { margin-top: 10px; li { line-height: normal; @@ -1984,10 +1939,16 @@ tr td button i { padding-right: 15px; } + + +} + +// lists.less uses 600px as the breakpoint, doing same for consistency +@media (max-width: 600px) { .list-actions { text-align: left; + margin-bottom: 20px; } - } .nvtooltip { @@ -2266,6 +2227,45 @@ a:hover { font-family: 'Open Sans'; } -.modal-body .alert { - padding: 10px; +.WorkflowBadge{ + background-color: @b7grey; + border-radius: 10px; + color: @default-bg; + display: inline-block; + font-family: 'Open Sans'; + font-weight: bold; + font-style: normal; + font-size: x-small; + height: 14px; + margin-left: 5px; + padding-left: 2px; + width: 14px; +} + +button[disabled], +html input[disabled] { + cursor: not-allowed; +} + +.CodeMirror { + font-family: Monaco, Menlo, Consolas, "Courier New", monospace; +} + +.CodeMirror--disabled .CodeMirror.cm-s-default, +.CodeMirror--disabled .CodeMirror-line { + background-color: #f6f6f6; +} + +.CodeMirror--disabled .CodeMirror-gutter.CodeMirror-lint-markers, +.CodeMirror--disabled .CodeMirror-gutter.CodeMirror-linenumbers { + background-color: #ebebeb; + color: @b7grey; +} + +.CodeMirror--disabled .CodeMirror-lines { + cursor: default; +} + +.CodeMirror--disabled .CodeMirror-cursors { + display: none; } diff --git a/awx/ui/client/legacy-styles/breadcrumbs.less b/awx/ui/client/legacy-styles/breadcrumbs.less index f3bb028e6f..6863c2c211 100644 --- a/awx/ui/client/legacy-styles/breadcrumbs.less +++ b/awx/ui/client/legacy-styles/breadcrumbs.less @@ -9,6 +9,8 @@ * */ +@import "./client/src/shared/branding/colors.less"; + .ansible-breadcrumb { list-style: none; overflow: hidden; @@ -81,4 +83,4 @@ } .ansible-breadcrumb li.active a:before { border-left: 11px solid @grey; -} \ No newline at end of file +} diff --git a/awx/ui/client/legacy-styles/dashboard.less b/awx/ui/client/legacy-styles/dashboard.less index 0520417992..87a8126a80 100644 --- a/awx/ui/client/legacy-styles/dashboard.less +++ b/awx/ui/client/legacy-styles/dashboard.less @@ -7,6 +7,8 @@ * */ +@import "./client/src/shared/branding/colors.less"; + .graph-wrapper { width: 100%; diff --git a/awx/ui/client/legacy-styles/forms.less b/awx/ui/client/legacy-styles/forms.less index 9fa16dc0fb..5da836f921 100644 --- a/awx/ui/client/legacy-styles/forms.less +++ b/awx/ui/client/legacy-styles/forms.less @@ -7,7 +7,7 @@ * */ -@import "awx/ui/client/src/shared/branding/colors.default.less"; +@import "./client/src/shared/branding/colors.default.less"; .noselect { -webkit-touch-callout: none; /* iOS Safari */ @@ -44,11 +44,10 @@ color: @list-header-txt; font-size: 14px; font-weight: bold; - padding-bottom: 25px; - min-height: 45px; word-break: break-all; max-width: 90%; word-wrap: break-word; + margin-bottom: 20px; } .Form-secondaryTitle{ @@ -184,6 +183,7 @@ .Form-formGroup--fullWidth { max-width: none !important; width: 100% !important; + padding-right: 0px !important; } .Form-formGroup--checkbox{ @@ -245,13 +245,13 @@ .Form-textArea{ border-radius: 5px; color: @field-input-text; - background-color: @field-secondary-bg!important; + background-color: @field-secondary-bg; width:100%!important; } .Form-textInput{ height: 30px; - background-color: @field-secondary-bg!important; + background-color: @field-secondary-bg; border-radius: 5px; border:1px solid @field-border; color: @field-input-text; @@ -553,19 +553,24 @@ input[type='radio']:checked:before { color: @btn-txt; } -.Form-surveyButton { +.Form-primaryButton { background-color: @default-link; color: @default-bg; text-transform: uppercase; padding-left:15px; padding-right: 15px; + margin-right: 20px; } -.Form-surveyButton:hover{ +.Form-primaryButton:hover { background-color: @default-link-hov; color: @default-bg; } +.Form-primaryButton.Form-tab--disabled:hover { + background-color: @default-link; +} + .Form-formGroup--singleColumn { width: 100% !important; padding-right: 0px; diff --git a/awx/ui/client/legacy-styles/jPushMenu.less b/awx/ui/client/legacy-styles/jPushMenu.less index d90f768442..6ca4108fe8 100644 --- a/awx/ui/client/legacy-styles/jPushMenu.less +++ b/awx/ui/client/legacy-styles/jPushMenu.less @@ -6,6 +6,9 @@ * Custom styles for slideout menu * */ + +@import "./client/src/shared/branding/colors.less"; + .cbp-spmenu { background: #E8E8E8; position: fixed; @@ -179,4 +182,4 @@ .cbp-spmenu-push-toright { left: 190px; } -} \ No newline at end of file +} diff --git a/awx/ui/client/legacy-styles/job-details.less b/awx/ui/client/legacy-styles/job-details.less index 6f19e70fe8..ed9b96fb57 100644 --- a/awx/ui/client/legacy-styles/job-details.less +++ b/awx/ui/client/legacy-styles/job-details.less @@ -7,6 +7,9 @@ * */ +@import "./client/src/shared/branding/colors.less"; + + @failed-hosts-color: @red; @successful-hosts-color: @green; @changed-hosts-color: @changed; diff --git a/awx/ui/client/legacy-styles/jobs.less b/awx/ui/client/legacy-styles/jobs.less index a3b403c44c..86e14926e1 100644 --- a/awx/ui/client/legacy-styles/jobs.less +++ b/awx/ui/client/legacy-styles/jobs.less @@ -7,6 +7,8 @@ * */ +@import "./client/src/shared/branding/colors.less"; + #jobs-page { @@ -17,13 +19,7 @@ } .job-list { - .pagination li { - } - .pagination li a { - font-size: 12px; - padding: 3px 6px; - } i[class*="icon-job-"] { font-size: 13px; } diff --git a/awx/ui/client/legacy-styles/jquery-ui-overrides.less b/awx/ui/client/legacy-styles/jquery-ui-overrides.less index 53d29ddc16..4f79854c2f 100644 --- a/awx/ui/client/legacy-styles/jquery-ui-overrides.less +++ b/awx/ui/client/legacy-styles/jquery-ui-overrides.less @@ -8,7 +8,8 @@ * */ -@import "awx/ui/client/src/shared/branding/colors.default.less"; +@import "./client/src/shared/branding/colors.less"; +@import "./client/src/shared/branding/colors.default.less"; table.ui-datepicker-calendar { background-color: @well; diff --git a/awx/ui/client/legacy-styles/lists.less b/awx/ui/client/legacy-styles/lists.less index cd486376f9..8807fc5f93 100644 --- a/awx/ui/client/legacy-styles/lists.less +++ b/awx/ui/client/legacy-styles/lists.less @@ -7,7 +7,7 @@ * */ - @import "awx/ui/client/src/shared/branding/colors.default.less"; + @import "./client/src/shared/branding/colors.default.less"; table, tbody { @@ -43,7 +43,7 @@ table, tbody { border-top-right-radius: 5px; } -.List-tableHeader--actions { +.List-tableHeader--info, .List-tableHeader--actions { text-align: right; } @@ -116,44 +116,6 @@ table, tbody { margin-left: 15px; } -/* -- Pagination -- */ -.List-pagination { - margin-top: 20px; - font-size: 12px; - color: @list-pagin-text; - text-transform: uppercase; - height: 22px; - display: flex; -} - -.List-paginationPagerHolder { - display: flex; - flex: 1 0 auto; -} - -.List-paginationPager { - display: flex; -} - -.List-paginationPager--pageof { - line-height: 22px; - margin-left: 10px; -} - -.List-paginationPager--item { - border-color: @list-pagin-bord; -} - -.List-paginationPager--active { - border-color: @list-pagin-bord-act!important; - background-color: @list-pagin-bg-act!important; -} - -.List-paginationItemsOf { - display: flex; - justify-content: flex-end; -} - .List-header { display: flex; min-height: 34px; @@ -191,10 +153,13 @@ table, tbody { .List-actionHolder { justify-content: flex-end; display: flex; + // margin-bottom: 20px; + // float: right; } .List-actions { display: flex; + margin-bottom: -32px; } .List-auxAction { @@ -205,7 +170,7 @@ table, tbody { } .List-auxActionStream { - width: 175px; + width: 200px; } .List-action:not(.ng-hide) ~ .List-action:not(.ng-hide) { @@ -313,6 +278,7 @@ table, tbody { } .List-noItems { + margin-top: 52px; display: flex; align-items: center; justify-content: center; @@ -325,6 +291,9 @@ table, tbody { text-transform: uppercase; } +.modal-body > .List-noItems { + margin-top: 0px; +} .List-editButton--selected { background-color: @list-actn-bg-hov !important; color: @list-actn-icn-hov; @@ -366,6 +335,10 @@ table, tbody { padding-left: 10px!important; } +.List-staticColumnAdjacent--monospace { + font-family: monospace; +} + .List-titleLockup { margin-left: 4px; margin-right: 6px; @@ -395,6 +368,47 @@ table, tbody { cursor: not-allowed; } +.List-dropdownButton { + border: none; +} + +.List-dropdownSuccess { + background-color: @submit-button-bg; + color: @submit-button-text; + border-color: @submit-button-bg-hov; +} + +.List-dropdownSuccess:hover, +.List-dropdownSuccess:focus { + color: @submit-button-text; + background-color: @submit-button-bg-hov; +} + +.List-dropdownCarat { + display: inline-block; + width: 0; + height: 0; + vertical-align: middle; + border-top: 4px dashed; + border-right: 4px solid transparent; + border-left: 4px solid transparent; +} + +.List-infoCell { + display: flex; + justify-content: flex-end; + font-size: 0.8em; + cursor: pointer; +} + +.List-infoCell a { + color: @default-icon; +} + +.List-infoCell a:hover, .List-infoCell a:focus { + color: @default-interface-txt; +} + @media (max-width: 991px) { .List-searchWidget + .List-searchWidget { margin-top: 20px; @@ -412,7 +426,51 @@ table, tbody { flex: 1 0 auto; margin-top: 12px; } + .List-actions { + margin-bottom: 20px; + } .List-well { margin-top: 20px; } + .List-action:not(.ng-hide) ~ .List-action:not(.ng-hide) { + margin-left: 0; + } } + +.InventoryManage-container, .modal-body { + .List-header { + flex-direction: column; + align-items: stretch; + } + .List-actionHolder { + justify-content: flex-start; + align-items: center; + flex: 1 0 auto; + margin-top: 12px; + } + .List-actions { + margin-bottom: 20px; + } + .List-well { + margin-top: 20px; + } + .List-action:not(.ng-hide) ~ .List-action:not(.ng-hide) { + margin-left: 0; + } +} + +// Inventory Manage exceptions +.InventoryManage-container { + .List-actionHolder { + justify-content: flex-end; + margin-top: -52px; + } + .List-action button { + margin-left: 12px; + } + .SmartSearch-searchTermContainer { + width: 100%; + } +} + + diff --git a/awx/ui/client/legacy-styles/main-layout.less b/awx/ui/client/legacy-styles/main-layout.less index 2aa3491b05..99c865d737 100644 --- a/awx/ui/client/legacy-styles/main-layout.less +++ b/awx/ui/client/legacy-styles/main-layout.less @@ -7,8 +7,8 @@ * */ -@import "src/shared/branding/colors.less"; -@import "src/shared/branding/colors.default.less"; +@import "./client/src/shared/branding/colors.less"; +@import "./client/src/shared/branding/colors.default.less"; html { height: 100%; } @@ -21,6 +21,7 @@ body { padding-bottom: 50px; position: relative; background-color: @default-secondary-bg; + padding-top: 96px; } .container-fluid { @@ -61,7 +62,7 @@ body { } #content-container { - padding-bottom: 40px; + padding-bottom: 0px; } .group-breadcrumbs { diff --git a/awx/ui/client/legacy-styles/stdout.less b/awx/ui/client/legacy-styles/stdout.less index 7d15a11c53..822c10e759 100644 --- a/awx/ui/client/legacy-styles/stdout.less +++ b/awx/ui/client/legacy-styles/stdout.less @@ -5,7 +5,8 @@ * */ - @import "awx/ui/client/src/shared/branding/colors.default.less"; + @import "./client/src/shared/branding/colors.default.less"; + @import "./client/src/shared/branding/colors.less"; #jobs-stdout { margin-bottom: 20px; @@ -45,7 +46,7 @@ .ansi3 { font-weight: italic; } .ansi4 { text-decoration: underline; } .ansi9 { text-decoration: line-through; } -.ansi30 { color: @default-stdout-txt; } +.ansi30 { color: @default-data-txt; } .ansi31 { color: @default-err; } .ansi1.ansi31 { color: @default-unreachable; diff --git a/awx/ui/client/legacy-styles/text-label.less b/awx/ui/client/legacy-styles/text-label.less index 57d50f3f8b..d4805c7459 100644 --- a/awx/ui/client/legacy-styles/text-label.less +++ b/awx/ui/client/legacy-styles/text-label.less @@ -1,4 +1,4 @@ -@import "src/shared/branding/colors.default.less"; +@import "./client/src/shared/branding/colors.default.less"; .host-disabled-label { &:after { diff --git a/awx/ui/client/lib/angular-animate/.bower.json b/awx/ui/client/lib/angular-animate/.bower.json deleted file mode 100644 index fd1a3db81b..0000000000 --- a/awx/ui/client/lib/angular-animate/.bower.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "name": "angular-animate", - "version": "1.4.8", - "main": "./angular-animate.js", - "ignore": [], - "dependencies": { - "angular": "1.4.8" - }, - "homepage": "https://github.com/angular/bower-angular-animate", - "_release": "1.4.8", - "_resolution": { - "type": "version", - "tag": "v1.4.8", - "commit": "cc1d9740059f5e8fd43abf0e2e80695d43b3b6b1" - }, - "_source": "git://github.com/angular/bower-angular-animate.git", - "_target": "~1.4.8", - "_originalSource": "angular-animate" -} \ No newline at end of file diff --git a/awx/ui/client/lib/angular-animate/README.md b/awx/ui/client/lib/angular-animate/README.md deleted file mode 100644 index 8313da67c3..0000000000 --- a/awx/ui/client/lib/angular-animate/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# packaged angular-animate - -This repo is for distribution on `npm` and `bower`. The source for this module is in the -[main AngularJS repo](https://github.com/angular/angular.js/tree/master/src/ngAnimate). -Please file issues and pull requests against that repo. - -## Install - -You can install this package either with `npm` or with `bower`. - -### npm - -```shell -npm install angular-animate -``` - -Then add `ngAnimate` as a dependency for your app: - -```javascript -angular.module('myApp', [require('angular-animate')]); -``` - -### bower - -```shell -bower install angular-animate -``` - -Then add a ` -``` - -Then add `ngAnimate` as a dependency for your app: - -```javascript -angular.module('myApp', ['ngAnimate']); -``` - -## Documentation - -Documentation is available on the -[AngularJS docs site](http://docs.angularjs.org/api/ngAnimate). - -## License - -The MIT License - -Copyright (c) 2010-2015 Google, Inc. http://angularjs.org - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/awx/ui/client/lib/angular-animate/angular-animate.js b/awx/ui/client/lib/angular-animate/angular-animate.js deleted file mode 100644 index 30610044a9..0000000000 --- a/awx/ui/client/lib/angular-animate/angular-animate.js +++ /dev/null @@ -1,3930 +0,0 @@ -/** - * @license AngularJS v1.4.8 - * (c) 2010-2015 Google, Inc. http://angularjs.org - * License: MIT - */ -(function(window, angular, undefined) {'use strict'; - -/* jshint ignore:start */ -var noop = angular.noop; -var extend = angular.extend; -var jqLite = angular.element; -var forEach = angular.forEach; -var isArray = angular.isArray; -var isString = angular.isString; -var isObject = angular.isObject; -var isUndefined = angular.isUndefined; -var isDefined = angular.isDefined; -var isFunction = angular.isFunction; -var isElement = angular.isElement; - -var ELEMENT_NODE = 1; -var COMMENT_NODE = 8; - -var ADD_CLASS_SUFFIX = '-add'; -var REMOVE_CLASS_SUFFIX = '-remove'; -var EVENT_CLASS_PREFIX = 'ng-'; -var ACTIVE_CLASS_SUFFIX = '-active'; - -var NG_ANIMATE_CLASSNAME = 'ng-animate'; -var NG_ANIMATE_CHILDREN_DATA = '$$ngAnimateChildren'; - -// Detect proper transitionend/animationend event names. -var CSS_PREFIX = '', TRANSITION_PROP, TRANSITIONEND_EVENT, ANIMATION_PROP, ANIMATIONEND_EVENT; - -// If unprefixed events are not supported but webkit-prefixed are, use the latter. -// Otherwise, just use W3C names, browsers not supporting them at all will just ignore them. -// Note: Chrome implements `window.onwebkitanimationend` and doesn't implement `window.onanimationend` -// but at the same time dispatches the `animationend` event and not `webkitAnimationEnd`. -// Register both events in case `window.onanimationend` is not supported because of that, -// do the same for `transitionend` as Safari is likely to exhibit similar behavior. -// Also, the only modern browser that uses vendor prefixes for transitions/keyframes is webkit -// therefore there is no reason to test anymore for other vendor prefixes: -// http://caniuse.com/#search=transition -if (isUndefined(window.ontransitionend) && isDefined(window.onwebkittransitionend)) { - CSS_PREFIX = '-webkit-'; - TRANSITION_PROP = 'WebkitTransition'; - TRANSITIONEND_EVENT = 'webkitTransitionEnd transitionend'; -} else { - TRANSITION_PROP = 'transition'; - TRANSITIONEND_EVENT = 'transitionend'; -} - -if (isUndefined(window.onanimationend) && isDefined(window.onwebkitanimationend)) { - CSS_PREFIX = '-webkit-'; - ANIMATION_PROP = 'WebkitAnimation'; - ANIMATIONEND_EVENT = 'webkitAnimationEnd animationend'; -} else { - ANIMATION_PROP = 'animation'; - ANIMATIONEND_EVENT = 'animationend'; -} - -var DURATION_KEY = 'Duration'; -var PROPERTY_KEY = 'Property'; -var DELAY_KEY = 'Delay'; -var TIMING_KEY = 'TimingFunction'; -var ANIMATION_ITERATION_COUNT_KEY = 'IterationCount'; -var ANIMATION_PLAYSTATE_KEY = 'PlayState'; -var SAFE_FAST_FORWARD_DURATION_VALUE = 9999; - -var ANIMATION_DELAY_PROP = ANIMATION_PROP + DELAY_KEY; -var ANIMATION_DURATION_PROP = ANIMATION_PROP + DURATION_KEY; -var TRANSITION_DELAY_PROP = TRANSITION_PROP + DELAY_KEY; -var TRANSITION_DURATION_PROP = TRANSITION_PROP + DURATION_KEY; - -var isPromiseLike = function(p) { - return p && p.then ? true : false; -}; - -function assertArg(arg, name, reason) { - if (!arg) { - throw ngMinErr('areq', "Argument '{0}' is {1}", (name || '?'), (reason || "required")); - } - return arg; -} - -function mergeClasses(a,b) { - if (!a && !b) return ''; - if (!a) return b; - if (!b) return a; - if (isArray(a)) a = a.join(' '); - if (isArray(b)) b = b.join(' '); - return a + ' ' + b; -} - -function packageStyles(options) { - var styles = {}; - if (options && (options.to || options.from)) { - styles.to = options.to; - styles.from = options.from; - } - return styles; -} - -function pendClasses(classes, fix, isPrefix) { - var className = ''; - classes = isArray(classes) - ? classes - : classes && isString(classes) && classes.length - ? classes.split(/\s+/) - : []; - forEach(classes, function(klass, i) { - if (klass && klass.length > 0) { - className += (i > 0) ? ' ' : ''; - className += isPrefix ? fix + klass - : klass + fix; - } - }); - return className; -} - -function removeFromArray(arr, val) { - var index = arr.indexOf(val); - if (val >= 0) { - arr.splice(index, 1); - } -} - -function stripCommentsFromElement(element) { - if (element instanceof jqLite) { - switch (element.length) { - case 0: - return []; - break; - - case 1: - // there is no point of stripping anything if the element - // is the only element within the jqLite wrapper. - // (it's important that we retain the element instance.) - if (element[0].nodeType === ELEMENT_NODE) { - return element; - } - break; - - default: - return jqLite(extractElementNode(element)); - break; - } - } - - if (element.nodeType === ELEMENT_NODE) { - return jqLite(element); - } -} - -function extractElementNode(element) { - if (!element[0]) return element; - for (var i = 0; i < element.length; i++) { - var elm = element[i]; - if (elm.nodeType == ELEMENT_NODE) { - return elm; - } - } -} - -function $$addClass($$jqLite, element, className) { - forEach(element, function(elm) { - $$jqLite.addClass(elm, className); - }); -} - -function $$removeClass($$jqLite, element, className) { - forEach(element, function(elm) { - $$jqLite.removeClass(elm, className); - }); -} - -function applyAnimationClassesFactory($$jqLite) { - return function(element, options) { - if (options.addClass) { - $$addClass($$jqLite, element, options.addClass); - options.addClass = null; - } - if (options.removeClass) { - $$removeClass($$jqLite, element, options.removeClass); - options.removeClass = null; - } - } -} - -function prepareAnimationOptions(options) { - options = options || {}; - if (!options.$$prepared) { - var domOperation = options.domOperation || noop; - options.domOperation = function() { - options.$$domOperationFired = true; - domOperation(); - domOperation = noop; - }; - options.$$prepared = true; - } - return options; -} - -function applyAnimationStyles(element, options) { - applyAnimationFromStyles(element, options); - applyAnimationToStyles(element, options); -} - -function applyAnimationFromStyles(element, options) { - if (options.from) { - element.css(options.from); - options.from = null; - } -} - -function applyAnimationToStyles(element, options) { - if (options.to) { - element.css(options.to); - options.to = null; - } -} - -function mergeAnimationOptions(element, target, newOptions) { - var toAdd = (target.addClass || '') + ' ' + (newOptions.addClass || ''); - var toRemove = (target.removeClass || '') + ' ' + (newOptions.removeClass || ''); - var classes = resolveElementClasses(element.attr('class'), toAdd, toRemove); - - if (newOptions.preparationClasses) { - target.preparationClasses = concatWithSpace(newOptions.preparationClasses, target.preparationClasses); - delete newOptions.preparationClasses; - } - - // noop is basically when there is no callback; otherwise something has been set - var realDomOperation = target.domOperation !== noop ? target.domOperation : null; - - extend(target, newOptions); - - // TODO(matsko or sreeramu): proper fix is to maintain all animation callback in array and call at last,but now only leave has the callback so no issue with this. - if (realDomOperation) { - target.domOperation = realDomOperation; - } - - if (classes.addClass) { - target.addClass = classes.addClass; - } else { - target.addClass = null; - } - - if (classes.removeClass) { - target.removeClass = classes.removeClass; - } else { - target.removeClass = null; - } - - return target; -} - -function resolveElementClasses(existing, toAdd, toRemove) { - var ADD_CLASS = 1; - var REMOVE_CLASS = -1; - - var flags = {}; - existing = splitClassesToLookup(existing); - - toAdd = splitClassesToLookup(toAdd); - forEach(toAdd, function(value, key) { - flags[key] = ADD_CLASS; - }); - - toRemove = splitClassesToLookup(toRemove); - forEach(toRemove, function(value, key) { - flags[key] = flags[key] === ADD_CLASS ? null : REMOVE_CLASS; - }); - - var classes = { - addClass: '', - removeClass: '' - }; - - forEach(flags, function(val, klass) { - var prop, allow; - if (val === ADD_CLASS) { - prop = 'addClass'; - allow = !existing[klass]; - } else if (val === REMOVE_CLASS) { - prop = 'removeClass'; - allow = existing[klass]; - } - if (allow) { - if (classes[prop].length) { - classes[prop] += ' '; - } - classes[prop] += klass; - } - }); - - function splitClassesToLookup(classes) { - if (isString(classes)) { - classes = classes.split(' '); - } - - var obj = {}; - forEach(classes, function(klass) { - // sometimes the split leaves empty string values - // incase extra spaces were applied to the options - if (klass.length) { - obj[klass] = true; - } - }); - return obj; - } - - return classes; -} - -function getDomNode(element) { - return (element instanceof angular.element) ? element[0] : element; -} - -function applyGeneratedPreparationClasses(element, event, options) { - var classes = ''; - if (event) { - classes = pendClasses(event, EVENT_CLASS_PREFIX, true); - } - if (options.addClass) { - classes = concatWithSpace(classes, pendClasses(options.addClass, ADD_CLASS_SUFFIX)); - } - if (options.removeClass) { - classes = concatWithSpace(classes, pendClasses(options.removeClass, REMOVE_CLASS_SUFFIX)); - } - if (classes.length) { - options.preparationClasses = classes; - element.addClass(classes); - } -} - -function clearGeneratedClasses(element, options) { - if (options.preparationClasses) { - element.removeClass(options.preparationClasses); - options.preparationClasses = null; - } - if (options.activeClasses) { - element.removeClass(options.activeClasses); - options.activeClasses = null; - } -} - -function blockTransitions(node, duration) { - // we use a negative delay value since it performs blocking - // yet it doesn't kill any existing transitions running on the - // same element which makes this safe for class-based animations - var value = duration ? '-' + duration + 's' : ''; - applyInlineStyle(node, [TRANSITION_DELAY_PROP, value]); - return [TRANSITION_DELAY_PROP, value]; -} - -function blockKeyframeAnimations(node, applyBlock) { - var value = applyBlock ? 'paused' : ''; - var key = ANIMATION_PROP + ANIMATION_PLAYSTATE_KEY; - applyInlineStyle(node, [key, value]); - return [key, value]; -} - -function applyInlineStyle(node, styleTuple) { - var prop = styleTuple[0]; - var value = styleTuple[1]; - node.style[prop] = value; -} - -function concatWithSpace(a,b) { - if (!a) return b; - if (!b) return a; - return a + ' ' + b; -} - -var $$rAFSchedulerFactory = ['$$rAF', function($$rAF) { - var queue, cancelFn; - - function scheduler(tasks) { - // we make a copy since RAFScheduler mutates the state - // of the passed in array variable and this would be difficult - // to track down on the outside code - queue = queue.concat(tasks); - nextTick(); - } - - queue = scheduler.queue = []; - - /* waitUntilQuiet does two things: - * 1. It will run the FINAL `fn` value only when an uncancelled RAF has passed through - * 2. It will delay the next wave of tasks from running until the quiet `fn` has run. - * - * The motivation here is that animation code can request more time from the scheduler - * before the next wave runs. This allows for certain DOM properties such as classes to - * be resolved in time for the next animation to run. - */ - scheduler.waitUntilQuiet = function(fn) { - if (cancelFn) cancelFn(); - - cancelFn = $$rAF(function() { - cancelFn = null; - fn(); - nextTick(); - }); - }; - - return scheduler; - - function nextTick() { - if (!queue.length) return; - - var items = queue.shift(); - for (var i = 0; i < items.length; i++) { - items[i](); - } - - if (!cancelFn) { - $$rAF(function() { - if (!cancelFn) nextTick(); - }); - } - } -}]; - -var $$AnimateChildrenDirective = [function() { - return function(scope, element, attrs) { - var val = attrs.ngAnimateChildren; - if (angular.isString(val) && val.length === 0) { //empty attribute - element.data(NG_ANIMATE_CHILDREN_DATA, true); - } else { - attrs.$observe('ngAnimateChildren', function(value) { - value = value === 'on' || value === 'true'; - element.data(NG_ANIMATE_CHILDREN_DATA, value); - }); - } - }; -}]; - -var ANIMATE_TIMER_KEY = '$$animateCss'; - -/** - * @ngdoc service - * @name $animateCss - * @kind object - * - * @description - * The `$animateCss` service is a useful utility to trigger customized CSS-based transitions/keyframes - * from a JavaScript-based animation or directly from a directive. The purpose of `$animateCss` is NOT - * to side-step how `$animate` and ngAnimate work, but the goal is to allow pre-existing animations or - * directives to create more complex animations that can be purely driven using CSS code. - * - * Note that only browsers that support CSS transitions and/or keyframe animations are capable of - * rendering animations triggered via `$animateCss` (bad news for IE9 and lower). - * - * ## Usage - * Once again, `$animateCss` is designed to be used inside of a registered JavaScript animation that - * is powered by ngAnimate. It is possible to use `$animateCss` directly inside of a directive, however, - * any automatic control over cancelling animations and/or preventing animations from being run on - * child elements will not be handled by Angular. For this to work as expected, please use `$animate` to - * trigger the animation and then setup a JavaScript animation that injects `$animateCss` to trigger - * the CSS animation. - * - * The example below shows how we can create a folding animation on an element using `ng-if`: - * - * ```html - * - *
    - * This element will go BOOM - *
    - * - * ``` - * - * Now we create the **JavaScript animation** that will trigger the CSS transition: - * - * ```js - * ngModule.animation('.fold-animation', ['$animateCss', function($animateCss) { - * return { - * enter: function(element, doneFn) { - * var height = element[0].offsetHeight; - * return $animateCss(element, { - * from: { height:'0px' }, - * to: { height:height + 'px' }, - * duration: 1 // one second - * }); - * } - * } - * }]); - * ``` - * - * ## More Advanced Uses - * - * `$animateCss` is the underlying code that ngAnimate uses to power **CSS-based animations** behind the scenes. Therefore CSS hooks - * like `.ng-EVENT`, `.ng-EVENT-active`, `.ng-EVENT-stagger` are all features that can be triggered using `$animateCss` via JavaScript code. - * - * This also means that just about any combination of adding classes, removing classes, setting styles, dynamically setting a keyframe animation, - * applying a hardcoded duration or delay value, changing the animation easing or applying a stagger animation are all options that work with - * `$animateCss`. The service itself is smart enough to figure out the combination of options and examine the element styling properties in order - * to provide a working animation that will run in CSS. - * - * The example below showcases a more advanced version of the `.fold-animation` from the example above: - * - * ```js - * ngModule.animation('.fold-animation', ['$animateCss', function($animateCss) { - * return { - * enter: function(element, doneFn) { - * var height = element[0].offsetHeight; - * return $animateCss(element, { - * addClass: 'red large-text pulse-twice', - * easing: 'ease-out', - * from: { height:'0px' }, - * to: { height:height + 'px' }, - * duration: 1 // one second - * }); - * } - * } - * }]); - * ``` - * - * Since we're adding/removing CSS classes then the CSS transition will also pick those up: - * - * ```css - * /* since a hardcoded duration value of 1 was provided in the JavaScript animation code, - * the CSS classes below will be transitioned despite them being defined as regular CSS classes */ - * .red { background:red; } - * .large-text { font-size:20px; } - * - * /* we can also use a keyframe animation and $animateCss will make it work alongside the transition */ - * .pulse-twice { - * animation: 0.5s pulse linear 2; - * -webkit-animation: 0.5s pulse linear 2; - * } - * - * @keyframes pulse { - * from { transform: scale(0.5); } - * to { transform: scale(1.5); } - * } - * - * @-webkit-keyframes pulse { - * from { -webkit-transform: scale(0.5); } - * to { -webkit-transform: scale(1.5); } - * } - * ``` - * - * Given this complex combination of CSS classes, styles and options, `$animateCss` will figure everything out and make the animation happen. - * - * ## How the Options are handled - * - * `$animateCss` is very versatile and intelligent when it comes to figuring out what configurations to apply to the element to ensure the animation - * works with the options provided. Say for example we were adding a class that contained a keyframe value and we wanted to also animate some inline - * styles using the `from` and `to` properties. - * - * ```js - * var animator = $animateCss(element, { - * from: { background:'red' }, - * to: { background:'blue' } - * }); - * animator.start(); - * ``` - * - * ```css - * .rotating-animation { - * animation:0.5s rotate linear; - * -webkit-animation:0.5s rotate linear; - * } - * - * @keyframes rotate { - * from { transform: rotate(0deg); } - * to { transform: rotate(360deg); } - * } - * - * @-webkit-keyframes rotate { - * from { -webkit-transform: rotate(0deg); } - * to { -webkit-transform: rotate(360deg); } - * } - * ``` - * - * The missing pieces here are that we do not have a transition set (within the CSS code nor within the `$animateCss` options) and the duration of the animation is - * going to be detected from what the keyframe styles on the CSS class are. In this event, `$animateCss` will automatically create an inline transition - * style matching the duration detected from the keyframe style (which is present in the CSS class that is being added) and then prepare both the transition - * and keyframe animations to run in parallel on the element. Then when the animation is underway the provided `from` and `to` CSS styles will be applied - * and spread across the transition and keyframe animation. - * - * ## What is returned - * - * `$animateCss` works in two stages: a preparation phase and an animation phase. Therefore when `$animateCss` is first called it will NOT actually - * start the animation. All that is going on here is that the element is being prepared for the animation (which means that the generated CSS classes are - * added and removed on the element). Once `$animateCss` is called it will return an object with the following properties: - * - * ```js - * var animator = $animateCss(element, { ... }); - * ``` - * - * Now what do the contents of our `animator` variable look like: - * - * ```js - * { - * // starts the animation - * start: Function, - * - * // ends (aborts) the animation - * end: Function - * } - * ``` - * - * To actually start the animation we need to run `animation.start()` which will then return a promise that we can hook into to detect when the animation ends. - * If we choose not to run the animation then we MUST run `animation.end()` to perform a cleanup on the element (since some CSS classes and stlyes may have been - * applied to the element during the preparation phase). Note that all other properties such as duration, delay, transitions and keyframes are just properties - * and that changing them will not reconfigure the parameters of the animation. - * - * ### runner.done() vs runner.then() - * It is documented that `animation.start()` will return a promise object and this is true, however, there is also an additional method available on the - * runner called `.done(callbackFn)`. The done method works the same as `.finally(callbackFn)`, however, it does **not trigger a digest to occur**. - * Therefore, for performance reasons, it's always best to use `runner.done(callback)` instead of `runner.then()`, `runner.catch()` or `runner.finally()` - * unless you really need a digest to kick off afterwards. - * - * Keep in mind that, to make this easier, ngAnimate has tweaked the JS animations API to recognize when a runner instance is returned from $animateCss - * (so there is no need to call `runner.done(doneFn)` inside of your JavaScript animation code). - * Check the {@link ngAnimate.$animateCss#usage animation code above} to see how this works. - * - * @param {DOMElement} element the element that will be animated - * @param {object} options the animation-related options that will be applied during the animation - * - * * `event` - The DOM event (e.g. enter, leave, move). When used, a generated CSS class of `ng-EVENT` and `ng-EVENT-active` will be applied - * to the element during the animation. Multiple events can be provided when spaces are used as a separator. (Note that this will not perform any DOM operation.) - * * `structural` - Indicates that the `ng-` prefix will be added to the event class. Setting to `false` or omitting will turn `ng-EVENT` and - * `ng-EVENT-active` in `EVENT` and `EVENT-active`. Unused if `event` is omitted. - * * `easing` - The CSS easing value that will be applied to the transition or keyframe animation (or both). - * * `transitionStyle` - The raw CSS transition style that will be used (e.g. `1s linear all`). - * * `keyframeStyle` - The raw CSS keyframe animation style that will be used (e.g. `1s my_animation linear`). - * * `from` - The starting CSS styles (a key/value object) that will be applied at the start of the animation. - * * `to` - The ending CSS styles (a key/value object) that will be applied across the animation via a CSS transition. - * * `addClass` - A space separated list of CSS classes that will be added to the element and spread across the animation. - * * `removeClass` - A space separated list of CSS classes that will be removed from the element and spread across the animation. - * * `duration` - A number value representing the total duration of the transition and/or keyframe (note that a value of 1 is 1000ms). If a value of `0` - * is provided then the animation will be skipped entirely. - * * `delay` - A number value representing the total delay of the transition and/or keyframe (note that a value of 1 is 1000ms). If a value of `true` is - * used then whatever delay value is detected from the CSS classes will be mirrored on the elements styles (e.g. by setting delay true then the style value - * of the element will be `transition-delay: DETECTED_VALUE`). Using `true` is useful when you want the CSS classes and inline styles to all share the same - * CSS delay value. - * * `stagger` - A numeric time value representing the delay between successively animated elements - * ({@link ngAnimate#css-staggering-animations Click here to learn how CSS-based staggering works in ngAnimate.}) - * * `staggerIndex` - The numeric index representing the stagger item (e.g. a value of 5 is equal to the sixth item in the stagger; therefore when a - * * `stagger` option value of `0.1` is used then there will be a stagger delay of `600ms`) - * * `applyClassesEarly` - Whether or not the classes being added or removed will be used when detecting the animation. This is set by `$animate` when enter/leave/move animations are fired to ensure that the CSS classes are resolved in time. (Note that this will prevent any transitions from occuring on the classes being added and removed.) - * * `cleanupStyles` - Whether or not the provided `from` and `to` styles will be removed once - * the animation is closed. This is useful for when the styles are used purely for the sake of - * the animation and do not have a lasting visual effect on the element (e.g. a colapse and open animation). - * By default this value is set to `false`. - * - * @return {object} an object with start and end methods and details about the animation. - * - * * `start` - The method to start the animation. This will return a `Promise` when called. - * * `end` - This method will cancel the animation and remove all applied CSS classes and styles. - */ -var ONE_SECOND = 1000; -var BASE_TEN = 10; - -var ELAPSED_TIME_MAX_DECIMAL_PLACES = 3; -var CLOSING_TIME_BUFFER = 1.5; - -var DETECT_CSS_PROPERTIES = { - transitionDuration: TRANSITION_DURATION_PROP, - transitionDelay: TRANSITION_DELAY_PROP, - transitionProperty: TRANSITION_PROP + PROPERTY_KEY, - animationDuration: ANIMATION_DURATION_PROP, - animationDelay: ANIMATION_DELAY_PROP, - animationIterationCount: ANIMATION_PROP + ANIMATION_ITERATION_COUNT_KEY -}; - -var DETECT_STAGGER_CSS_PROPERTIES = { - transitionDuration: TRANSITION_DURATION_PROP, - transitionDelay: TRANSITION_DELAY_PROP, - animationDuration: ANIMATION_DURATION_PROP, - animationDelay: ANIMATION_DELAY_PROP -}; - -function getCssKeyframeDurationStyle(duration) { - return [ANIMATION_DURATION_PROP, duration + 's']; -} - -function getCssDelayStyle(delay, isKeyframeAnimation) { - var prop = isKeyframeAnimation ? ANIMATION_DELAY_PROP : TRANSITION_DELAY_PROP; - return [prop, delay + 's']; -} - -function computeCssStyles($window, element, properties) { - var styles = Object.create(null); - var detectedStyles = $window.getComputedStyle(element) || {}; - forEach(properties, function(formalStyleName, actualStyleName) { - var val = detectedStyles[formalStyleName]; - if (val) { - var c = val.charAt(0); - - // only numerical-based values have a negative sign or digit as the first value - if (c === '-' || c === '+' || c >= 0) { - val = parseMaxTime(val); - } - - // by setting this to null in the event that the delay is not set or is set directly as 0 - // then we can still allow for zegative values to be used later on and not mistake this - // value for being greater than any other negative value. - if (val === 0) { - val = null; - } - styles[actualStyleName] = val; - } - }); - - return styles; -} - -function parseMaxTime(str) { - var maxValue = 0; - var values = str.split(/\s*,\s*/); - forEach(values, function(value) { - // it's always safe to consider only second values and omit `ms` values since - // getComputedStyle will always handle the conversion for us - if (value.charAt(value.length - 1) == 's') { - value = value.substring(0, value.length - 1); - } - value = parseFloat(value) || 0; - maxValue = maxValue ? Math.max(value, maxValue) : value; - }); - return maxValue; -} - -function truthyTimingValue(val) { - return val === 0 || val != null; -} - -function getCssTransitionDurationStyle(duration, applyOnlyDuration) { - var style = TRANSITION_PROP; - var value = duration + 's'; - if (applyOnlyDuration) { - style += DURATION_KEY; - } else { - value += ' linear all'; - } - return [style, value]; -} - -function createLocalCacheLookup() { - var cache = Object.create(null); - return { - flush: function() { - cache = Object.create(null); - }, - - count: function(key) { - var entry = cache[key]; - return entry ? entry.total : 0; - }, - - get: function(key) { - var entry = cache[key]; - return entry && entry.value; - }, - - put: function(key, value) { - if (!cache[key]) { - cache[key] = { total: 1, value: value }; - } else { - cache[key].total++; - } - } - }; -} - -// we do not reassign an already present style value since -// if we detect the style property value again we may be -// detecting styles that were added via the `from` styles. -// We make use of `isDefined` here since an empty string -// or null value (which is what getPropertyValue will return -// for a non-existing style) will still be marked as a valid -// value for the style (a falsy value implies that the style -// is to be removed at the end of the animation). If we had a simple -// "OR" statement then it would not be enough to catch that. -function registerRestorableStyles(backup, node, properties) { - forEach(properties, function(prop) { - backup[prop] = isDefined(backup[prop]) - ? backup[prop] - : node.style.getPropertyValue(prop); - }); -} - -var $AnimateCssProvider = ['$animateProvider', function($animateProvider) { - var gcsLookup = createLocalCacheLookup(); - var gcsStaggerLookup = createLocalCacheLookup(); - - this.$get = ['$window', '$$jqLite', '$$AnimateRunner', '$timeout', - '$$forceReflow', '$sniffer', '$$rAFScheduler', '$animate', - function($window, $$jqLite, $$AnimateRunner, $timeout, - $$forceReflow, $sniffer, $$rAFScheduler, $animate) { - - var applyAnimationClasses = applyAnimationClassesFactory($$jqLite); - - var parentCounter = 0; - function gcsHashFn(node, extraClasses) { - var KEY = "$$ngAnimateParentKey"; - var parentNode = node.parentNode; - var parentID = parentNode[KEY] || (parentNode[KEY] = ++parentCounter); - return parentID + '-' + node.getAttribute('class') + '-' + extraClasses; - } - - function computeCachedCssStyles(node, className, cacheKey, properties) { - var timings = gcsLookup.get(cacheKey); - - if (!timings) { - timings = computeCssStyles($window, node, properties); - if (timings.animationIterationCount === 'infinite') { - timings.animationIterationCount = 1; - } - } - - // we keep putting this in multiple times even though the value and the cacheKey are the same - // because we're keeping an interal tally of how many duplicate animations are detected. - gcsLookup.put(cacheKey, timings); - return timings; - } - - function computeCachedCssStaggerStyles(node, className, cacheKey, properties) { - var stagger; - - // if we have one or more existing matches of matching elements - // containing the same parent + CSS styles (which is how cacheKey works) - // then staggering is possible - if (gcsLookup.count(cacheKey) > 0) { - stagger = gcsStaggerLookup.get(cacheKey); - - if (!stagger) { - var staggerClassName = pendClasses(className, '-stagger'); - - $$jqLite.addClass(node, staggerClassName); - - stagger = computeCssStyles($window, node, properties); - - // force the conversion of a null value to zero incase not set - stagger.animationDuration = Math.max(stagger.animationDuration, 0); - stagger.transitionDuration = Math.max(stagger.transitionDuration, 0); - - $$jqLite.removeClass(node, staggerClassName); - - gcsStaggerLookup.put(cacheKey, stagger); - } - } - - return stagger || {}; - } - - var cancelLastRAFRequest; - var rafWaitQueue = []; - function waitUntilQuiet(callback) { - rafWaitQueue.push(callback); - $$rAFScheduler.waitUntilQuiet(function() { - gcsLookup.flush(); - gcsStaggerLookup.flush(); - - // DO NOT REMOVE THIS LINE OR REFACTOR OUT THE `pageWidth` variable. - // PLEASE EXAMINE THE `$$forceReflow` service to understand why. - var pageWidth = $$forceReflow(); - - // we use a for loop to ensure that if the queue is changed - // during this looping then it will consider new requests - for (var i = 0; i < rafWaitQueue.length; i++) { - rafWaitQueue[i](pageWidth); - } - rafWaitQueue.length = 0; - }); - } - - function computeTimings(node, className, cacheKey) { - var timings = computeCachedCssStyles(node, className, cacheKey, DETECT_CSS_PROPERTIES); - var aD = timings.animationDelay; - var tD = timings.transitionDelay; - timings.maxDelay = aD && tD - ? Math.max(aD, tD) - : (aD || tD); - timings.maxDuration = Math.max( - timings.animationDuration * timings.animationIterationCount, - timings.transitionDuration); - - return timings; - } - - return function init(element, options) { - var restoreStyles = {}; - var node = getDomNode(element); - if (!node - || !node.parentNode - || !$animate.enabled()) { - return closeAndReturnNoopAnimator(); - } - - options = prepareAnimationOptions(options); - - var temporaryStyles = []; - var classes = element.attr('class'); - var styles = packageStyles(options); - var animationClosed; - var animationPaused; - var animationCompleted; - var runner; - var runnerHost; - var maxDelay; - var maxDelayTime; - var maxDuration; - var maxDurationTime; - - if (options.duration === 0 || (!$sniffer.animations && !$sniffer.transitions)) { - return closeAndReturnNoopAnimator(); - } - - var method = options.event && isArray(options.event) - ? options.event.join(' ') - : options.event; - - var isStructural = method && options.structural; - var structuralClassName = ''; - var addRemoveClassName = ''; - - if (isStructural) { - structuralClassName = pendClasses(method, EVENT_CLASS_PREFIX, true); - } else if (method) { - structuralClassName = method; - } - - if (options.addClass) { - addRemoveClassName += pendClasses(options.addClass, ADD_CLASS_SUFFIX); - } - - if (options.removeClass) { - if (addRemoveClassName.length) { - addRemoveClassName += ' '; - } - addRemoveClassName += pendClasses(options.removeClass, REMOVE_CLASS_SUFFIX); - } - - // there may be a situation where a structural animation is combined together - // with CSS classes that need to resolve before the animation is computed. - // However this means that there is no explicit CSS code to block the animation - // from happening (by setting 0s none in the class name). If this is the case - // we need to apply the classes before the first rAF so we know to continue if - // there actually is a detected transition or keyframe animation - if (options.applyClassesEarly && addRemoveClassName.length) { - applyAnimationClasses(element, options); - } - - var preparationClasses = [structuralClassName, addRemoveClassName].join(' ').trim(); - var fullClassName = classes + ' ' + preparationClasses; - var activeClasses = pendClasses(preparationClasses, ACTIVE_CLASS_SUFFIX); - var hasToStyles = styles.to && Object.keys(styles.to).length > 0; - var containsKeyframeAnimation = (options.keyframeStyle || '').length > 0; - - // there is no way we can trigger an animation if no styles and - // no classes are being applied which would then trigger a transition, - // unless there a is raw keyframe value that is applied to the element. - if (!containsKeyframeAnimation - && !hasToStyles - && !preparationClasses) { - return closeAndReturnNoopAnimator(); - } - - var cacheKey, stagger; - if (options.stagger > 0) { - var staggerVal = parseFloat(options.stagger); - stagger = { - transitionDelay: staggerVal, - animationDelay: staggerVal, - transitionDuration: 0, - animationDuration: 0 - }; - } else { - cacheKey = gcsHashFn(node, fullClassName); - stagger = computeCachedCssStaggerStyles(node, preparationClasses, cacheKey, DETECT_STAGGER_CSS_PROPERTIES); - } - - if (!options.$$skipPreparationClasses) { - $$jqLite.addClass(element, preparationClasses); - } - - var applyOnlyDuration; - - if (options.transitionStyle) { - var transitionStyle = [TRANSITION_PROP, options.transitionStyle]; - applyInlineStyle(node, transitionStyle); - temporaryStyles.push(transitionStyle); - } - - if (options.duration >= 0) { - applyOnlyDuration = node.style[TRANSITION_PROP].length > 0; - var durationStyle = getCssTransitionDurationStyle(options.duration, applyOnlyDuration); - - // we set the duration so that it will be picked up by getComputedStyle later - applyInlineStyle(node, durationStyle); - temporaryStyles.push(durationStyle); - } - - if (options.keyframeStyle) { - var keyframeStyle = [ANIMATION_PROP, options.keyframeStyle]; - applyInlineStyle(node, keyframeStyle); - temporaryStyles.push(keyframeStyle); - } - - var itemIndex = stagger - ? options.staggerIndex >= 0 - ? options.staggerIndex - : gcsLookup.count(cacheKey) - : 0; - - var isFirst = itemIndex === 0; - - // this is a pre-emptive way of forcing the setup classes to be added and applied INSTANTLY - // without causing any combination of transitions to kick in. By adding a negative delay value - // it forces the setup class' transition to end immediately. We later then remove the negative - // transition delay to allow for the transition to naturally do it's thing. The beauty here is - // that if there is no transition defined then nothing will happen and this will also allow - // other transitions to be stacked on top of each other without any chopping them out. - if (isFirst && !options.skipBlocking) { - blockTransitions(node, SAFE_FAST_FORWARD_DURATION_VALUE); - } - - var timings = computeTimings(node, fullClassName, cacheKey); - var relativeDelay = timings.maxDelay; - maxDelay = Math.max(relativeDelay, 0); - maxDuration = timings.maxDuration; - - var flags = {}; - flags.hasTransitions = timings.transitionDuration > 0; - flags.hasAnimations = timings.animationDuration > 0; - flags.hasTransitionAll = flags.hasTransitions && timings.transitionProperty == 'all'; - flags.applyTransitionDuration = hasToStyles && ( - (flags.hasTransitions && !flags.hasTransitionAll) - || (flags.hasAnimations && !flags.hasTransitions)); - flags.applyAnimationDuration = options.duration && flags.hasAnimations; - flags.applyTransitionDelay = truthyTimingValue(options.delay) && (flags.applyTransitionDuration || flags.hasTransitions); - flags.applyAnimationDelay = truthyTimingValue(options.delay) && flags.hasAnimations; - flags.recalculateTimingStyles = addRemoveClassName.length > 0; - - if (flags.applyTransitionDuration || flags.applyAnimationDuration) { - maxDuration = options.duration ? parseFloat(options.duration) : maxDuration; - - if (flags.applyTransitionDuration) { - flags.hasTransitions = true; - timings.transitionDuration = maxDuration; - applyOnlyDuration = node.style[TRANSITION_PROP + PROPERTY_KEY].length > 0; - temporaryStyles.push(getCssTransitionDurationStyle(maxDuration, applyOnlyDuration)); - } - - if (flags.applyAnimationDuration) { - flags.hasAnimations = true; - timings.animationDuration = maxDuration; - temporaryStyles.push(getCssKeyframeDurationStyle(maxDuration)); - } - } - - if (maxDuration === 0 && !flags.recalculateTimingStyles) { - return closeAndReturnNoopAnimator(); - } - - if (options.delay != null) { - var delayStyle = parseFloat(options.delay); - - if (flags.applyTransitionDelay) { - temporaryStyles.push(getCssDelayStyle(delayStyle)); - } - - if (flags.applyAnimationDelay) { - temporaryStyles.push(getCssDelayStyle(delayStyle, true)); - } - } - - // we need to recalculate the delay value since we used a pre-emptive negative - // delay value and the delay value is required for the final event checking. This - // property will ensure that this will happen after the RAF phase has passed. - if (options.duration == null && timings.transitionDuration > 0) { - flags.recalculateTimingStyles = flags.recalculateTimingStyles || isFirst; - } - - maxDelayTime = maxDelay * ONE_SECOND; - maxDurationTime = maxDuration * ONE_SECOND; - if (!options.skipBlocking) { - flags.blockTransition = timings.transitionDuration > 0; - flags.blockKeyframeAnimation = timings.animationDuration > 0 && - stagger.animationDelay > 0 && - stagger.animationDuration === 0; - } - - if (options.from) { - if (options.cleanupStyles) { - registerRestorableStyles(restoreStyles, node, Object.keys(options.from)); - } - applyAnimationFromStyles(element, options); - } - - if (flags.blockTransition || flags.blockKeyframeAnimation) { - applyBlocking(maxDuration); - } else if (!options.skipBlocking) { - blockTransitions(node, false); - } - - // TODO(matsko): for 1.5 change this code to have an animator object for better debugging - return { - $$willAnimate: true, - end: endFn, - start: function() { - if (animationClosed) return; - - runnerHost = { - end: endFn, - cancel: cancelFn, - resume: null, //this will be set during the start() phase - pause: null - }; - - runner = new $$AnimateRunner(runnerHost); - - waitUntilQuiet(start); - - // we don't have access to pause/resume the animation - // since it hasn't run yet. AnimateRunner will therefore - // set noop functions for resume and pause and they will - // later be overridden once the animation is triggered - return runner; - } - }; - - function endFn() { - close(); - } - - function cancelFn() { - close(true); - } - - function close(rejected) { // jshint ignore:line - // if the promise has been called already then we shouldn't close - // the animation again - if (animationClosed || (animationCompleted && animationPaused)) return; - animationClosed = true; - animationPaused = false; - - if (!options.$$skipPreparationClasses) { - $$jqLite.removeClass(element, preparationClasses); - } - $$jqLite.removeClass(element, activeClasses); - - blockKeyframeAnimations(node, false); - blockTransitions(node, false); - - forEach(temporaryStyles, function(entry) { - // There is only one way to remove inline style properties entirely from elements. - // By using `removeProperty` this works, but we need to convert camel-cased CSS - // styles down to hyphenated values. - node.style[entry[0]] = ''; - }); - - applyAnimationClasses(element, options); - applyAnimationStyles(element, options); - - if (Object.keys(restoreStyles).length) { - forEach(restoreStyles, function(value, prop) { - value ? node.style.setProperty(prop, value) - : node.style.removeProperty(prop); - }); - } - - // the reason why we have this option is to allow a synchronous closing callback - // that is fired as SOON as the animation ends (when the CSS is removed) or if - // the animation never takes off at all. A good example is a leave animation since - // the element must be removed just after the animation is over or else the element - // will appear on screen for one animation frame causing an overbearing flicker. - if (options.onDone) { - options.onDone(); - } - - // if the preparation function fails then the promise is not setup - if (runner) { - runner.complete(!rejected); - } - } - - function applyBlocking(duration) { - if (flags.blockTransition) { - blockTransitions(node, duration); - } - - if (flags.blockKeyframeAnimation) { - blockKeyframeAnimations(node, !!duration); - } - } - - function closeAndReturnNoopAnimator() { - runner = new $$AnimateRunner({ - end: endFn, - cancel: cancelFn - }); - - // should flush the cache animation - waitUntilQuiet(noop); - close(); - - return { - $$willAnimate: false, - start: function() { - return runner; - }, - end: endFn - }; - } - - function start() { - if (animationClosed) return; - if (!node.parentNode) { - close(); - return; - } - - var startTime, events = []; - - // even though we only pause keyframe animations here the pause flag - // will still happen when transitions are used. Only the transition will - // not be paused since that is not possible. If the animation ends when - // paused then it will not complete until unpaused or cancelled. - var playPause = function(playAnimation) { - if (!animationCompleted) { - animationPaused = !playAnimation; - if (timings.animationDuration) { - var value = blockKeyframeAnimations(node, animationPaused); - animationPaused - ? temporaryStyles.push(value) - : removeFromArray(temporaryStyles, value); - } - } else if (animationPaused && playAnimation) { - animationPaused = false; - close(); - } - }; - - // checking the stagger duration prevents an accidently cascade of the CSS delay style - // being inherited from the parent. If the transition duration is zero then we can safely - // rely that the delay value is an intential stagger delay style. - var maxStagger = itemIndex > 0 - && ((timings.transitionDuration && stagger.transitionDuration === 0) || - (timings.animationDuration && stagger.animationDuration === 0)) - && Math.max(stagger.animationDelay, stagger.transitionDelay); - if (maxStagger) { - $timeout(triggerAnimationStart, - Math.floor(maxStagger * itemIndex * ONE_SECOND), - false); - } else { - triggerAnimationStart(); - } - - // this will decorate the existing promise runner with pause/resume methods - runnerHost.resume = function() { - playPause(true); - }; - - runnerHost.pause = function() { - playPause(false); - }; - - function triggerAnimationStart() { - // just incase a stagger animation kicks in when the animation - // itself was cancelled entirely - if (animationClosed) return; - - applyBlocking(false); - - forEach(temporaryStyles, function(entry) { - var key = entry[0]; - var value = entry[1]; - node.style[key] = value; - }); - - applyAnimationClasses(element, options); - $$jqLite.addClass(element, activeClasses); - - if (flags.recalculateTimingStyles) { - fullClassName = node.className + ' ' + preparationClasses; - cacheKey = gcsHashFn(node, fullClassName); - - timings = computeTimings(node, fullClassName, cacheKey); - relativeDelay = timings.maxDelay; - maxDelay = Math.max(relativeDelay, 0); - maxDuration = timings.maxDuration; - - if (maxDuration === 0) { - close(); - return; - } - - flags.hasTransitions = timings.transitionDuration > 0; - flags.hasAnimations = timings.animationDuration > 0; - } - - if (flags.applyAnimationDelay) { - relativeDelay = typeof options.delay !== "boolean" && truthyTimingValue(options.delay) - ? parseFloat(options.delay) - : relativeDelay; - - maxDelay = Math.max(relativeDelay, 0); - timings.animationDelay = relativeDelay; - delayStyle = getCssDelayStyle(relativeDelay, true); - temporaryStyles.push(delayStyle); - node.style[delayStyle[0]] = delayStyle[1]; - } - - maxDelayTime = maxDelay * ONE_SECOND; - maxDurationTime = maxDuration * ONE_SECOND; - - if (options.easing) { - var easeProp, easeVal = options.easing; - if (flags.hasTransitions) { - easeProp = TRANSITION_PROP + TIMING_KEY; - temporaryStyles.push([easeProp, easeVal]); - node.style[easeProp] = easeVal; - } - if (flags.hasAnimations) { - easeProp = ANIMATION_PROP + TIMING_KEY; - temporaryStyles.push([easeProp, easeVal]); - node.style[easeProp] = easeVal; - } - } - - if (timings.transitionDuration) { - events.push(TRANSITIONEND_EVENT); - } - - if (timings.animationDuration) { - events.push(ANIMATIONEND_EVENT); - } - - startTime = Date.now(); - var timerTime = maxDelayTime + CLOSING_TIME_BUFFER * maxDurationTime; - var endTime = startTime + timerTime; - - var animationsData = element.data(ANIMATE_TIMER_KEY) || []; - var setupFallbackTimer = true; - if (animationsData.length) { - var currentTimerData = animationsData[0]; - setupFallbackTimer = endTime > currentTimerData.expectedEndTime; - if (setupFallbackTimer) { - $timeout.cancel(currentTimerData.timer); - } else { - animationsData.push(close); - } - } - - if (setupFallbackTimer) { - var timer = $timeout(onAnimationExpired, timerTime, false); - animationsData[0] = { - timer: timer, - expectedEndTime: endTime - }; - animationsData.push(close); - element.data(ANIMATE_TIMER_KEY, animationsData); - } - - element.on(events.join(' '), onAnimationProgress); - if (options.to) { - if (options.cleanupStyles) { - registerRestorableStyles(restoreStyles, node, Object.keys(options.to)); - } - applyAnimationToStyles(element, options); - } - } - - function onAnimationExpired() { - var animationsData = element.data(ANIMATE_TIMER_KEY); - - // this will be false in the event that the element was - // removed from the DOM (via a leave animation or something - // similar) - if (animationsData) { - for (var i = 1; i < animationsData.length; i++) { - animationsData[i](); - } - element.removeData(ANIMATE_TIMER_KEY); - } - } - - function onAnimationProgress(event) { - event.stopPropagation(); - var ev = event.originalEvent || event; - var timeStamp = ev.$manualTimeStamp || ev.timeStamp || Date.now(); - - /* Firefox (or possibly just Gecko) likes to not round values up - * when a ms measurement is used for the animation */ - var elapsedTime = parseFloat(ev.elapsedTime.toFixed(ELAPSED_TIME_MAX_DECIMAL_PLACES)); - - /* $manualTimeStamp is a mocked timeStamp value which is set - * within browserTrigger(). This is only here so that tests can - * mock animations properly. Real events fallback to event.timeStamp, - * or, if they don't, then a timeStamp is automatically created for them. - * We're checking to see if the timeStamp surpasses the expected delay, - * but we're using elapsedTime instead of the timeStamp on the 2nd - * pre-condition since animations sometimes close off early */ - if (Math.max(timeStamp - startTime, 0) >= maxDelayTime && elapsedTime >= maxDuration) { - // we set this flag to ensure that if the transition is paused then, when resumed, - // the animation will automatically close itself since transitions cannot be paused. - animationCompleted = true; - close(); - } - } - } - }; - }]; -}]; - -var $$AnimateCssDriverProvider = ['$$animationProvider', function($$animationProvider) { - $$animationProvider.drivers.push('$$animateCssDriver'); - - var NG_ANIMATE_SHIM_CLASS_NAME = 'ng-animate-shim'; - var NG_ANIMATE_ANCHOR_CLASS_NAME = 'ng-anchor'; - - var NG_OUT_ANCHOR_CLASS_NAME = 'ng-anchor-out'; - var NG_IN_ANCHOR_CLASS_NAME = 'ng-anchor-in'; - - function isDocumentFragment(node) { - return node.parentNode && node.parentNode.nodeType === 11; - } - - this.$get = ['$animateCss', '$rootScope', '$$AnimateRunner', '$rootElement', '$sniffer', '$$jqLite', '$document', - function($animateCss, $rootScope, $$AnimateRunner, $rootElement, $sniffer, $$jqLite, $document) { - - // only browsers that support these properties can render animations - if (!$sniffer.animations && !$sniffer.transitions) return noop; - - var bodyNode = $document[0].body; - var rootNode = getDomNode($rootElement); - - var rootBodyElement = jqLite( - // this is to avoid using something that exists outside of the body - // we also special case the doc fragement case because our unit test code - // appends the $rootElement to the body after the app has been bootstrapped - isDocumentFragment(rootNode) || bodyNode.contains(rootNode) ? rootNode : bodyNode - ); - - var applyAnimationClasses = applyAnimationClassesFactory($$jqLite); - - return function initDriverFn(animationDetails) { - return animationDetails.from && animationDetails.to - ? prepareFromToAnchorAnimation(animationDetails.from, - animationDetails.to, - animationDetails.classes, - animationDetails.anchors) - : prepareRegularAnimation(animationDetails); - }; - - function filterCssClasses(classes) { - //remove all the `ng-` stuff - return classes.replace(/\bng-\S+\b/g, ''); - } - - function getUniqueValues(a, b) { - if (isString(a)) a = a.split(' '); - if (isString(b)) b = b.split(' '); - return a.filter(function(val) { - return b.indexOf(val) === -1; - }).join(' '); - } - - function prepareAnchoredAnimation(classes, outAnchor, inAnchor) { - var clone = jqLite(getDomNode(outAnchor).cloneNode(true)); - var startingClasses = filterCssClasses(getClassVal(clone)); - - outAnchor.addClass(NG_ANIMATE_SHIM_CLASS_NAME); - inAnchor.addClass(NG_ANIMATE_SHIM_CLASS_NAME); - - clone.addClass(NG_ANIMATE_ANCHOR_CLASS_NAME); - - rootBodyElement.append(clone); - - var animatorIn, animatorOut = prepareOutAnimation(); - - // the user may not end up using the `out` animation and - // only making use of the `in` animation or vice-versa. - // In either case we should allow this and not assume the - // animation is over unless both animations are not used. - if (!animatorOut) { - animatorIn = prepareInAnimation(); - if (!animatorIn) { - return end(); - } - } - - var startingAnimator = animatorOut || animatorIn; - - return { - start: function() { - var runner; - - var currentAnimation = startingAnimator.start(); - currentAnimation.done(function() { - currentAnimation = null; - if (!animatorIn) { - animatorIn = prepareInAnimation(); - if (animatorIn) { - currentAnimation = animatorIn.start(); - currentAnimation.done(function() { - currentAnimation = null; - end(); - runner.complete(); - }); - return currentAnimation; - } - } - // in the event that there is no `in` animation - end(); - runner.complete(); - }); - - runner = new $$AnimateRunner({ - end: endFn, - cancel: endFn - }); - - return runner; - - function endFn() { - if (currentAnimation) { - currentAnimation.end(); - } - } - } - }; - - function calculateAnchorStyles(anchor) { - var styles = {}; - - var coords = getDomNode(anchor).getBoundingClientRect(); - - // we iterate directly since safari messes up and doesn't return - // all the keys for the coods object when iterated - forEach(['width','height','top','left'], function(key) { - var value = coords[key]; - switch (key) { - case 'top': - value += bodyNode.scrollTop; - break; - case 'left': - value += bodyNode.scrollLeft; - break; - } - styles[key] = Math.floor(value) + 'px'; - }); - return styles; - } - - function prepareOutAnimation() { - var animator = $animateCss(clone, { - addClass: NG_OUT_ANCHOR_CLASS_NAME, - delay: true, - from: calculateAnchorStyles(outAnchor) - }); - - // read the comment within `prepareRegularAnimation` to understand - // why this check is necessary - return animator.$$willAnimate ? animator : null; - } - - function getClassVal(element) { - return element.attr('class') || ''; - } - - function prepareInAnimation() { - var endingClasses = filterCssClasses(getClassVal(inAnchor)); - var toAdd = getUniqueValues(endingClasses, startingClasses); - var toRemove = getUniqueValues(startingClasses, endingClasses); - - var animator = $animateCss(clone, { - to: calculateAnchorStyles(inAnchor), - addClass: NG_IN_ANCHOR_CLASS_NAME + ' ' + toAdd, - removeClass: NG_OUT_ANCHOR_CLASS_NAME + ' ' + toRemove, - delay: true - }); - - // read the comment within `prepareRegularAnimation` to understand - // why this check is necessary - return animator.$$willAnimate ? animator : null; - } - - function end() { - clone.remove(); - outAnchor.removeClass(NG_ANIMATE_SHIM_CLASS_NAME); - inAnchor.removeClass(NG_ANIMATE_SHIM_CLASS_NAME); - } - } - - function prepareFromToAnchorAnimation(from, to, classes, anchors) { - var fromAnimation = prepareRegularAnimation(from, noop); - var toAnimation = prepareRegularAnimation(to, noop); - - var anchorAnimations = []; - forEach(anchors, function(anchor) { - var outElement = anchor['out']; - var inElement = anchor['in']; - var animator = prepareAnchoredAnimation(classes, outElement, inElement); - if (animator) { - anchorAnimations.push(animator); - } - }); - - // no point in doing anything when there are no elements to animate - if (!fromAnimation && !toAnimation && anchorAnimations.length === 0) return; - - return { - start: function() { - var animationRunners = []; - - if (fromAnimation) { - animationRunners.push(fromAnimation.start()); - } - - if (toAnimation) { - animationRunners.push(toAnimation.start()); - } - - forEach(anchorAnimations, function(animation) { - animationRunners.push(animation.start()); - }); - - var runner = new $$AnimateRunner({ - end: endFn, - cancel: endFn // CSS-driven animations cannot be cancelled, only ended - }); - - $$AnimateRunner.all(animationRunners, function(status) { - runner.complete(status); - }); - - return runner; - - function endFn() { - forEach(animationRunners, function(runner) { - runner.end(); - }); - } - } - }; - } - - function prepareRegularAnimation(animationDetails) { - var element = animationDetails.element; - var options = animationDetails.options || {}; - - if (animationDetails.structural) { - options.event = animationDetails.event; - options.structural = true; - options.applyClassesEarly = true; - - // we special case the leave animation since we want to ensure that - // the element is removed as soon as the animation is over. Otherwise - // a flicker might appear or the element may not be removed at all - if (animationDetails.event === 'leave') { - options.onDone = options.domOperation; - } - } - - // We assign the preparationClasses as the actual animation event since - // the internals of $animateCss will just suffix the event token values - // with `-active` to trigger the animation. - if (options.preparationClasses) { - options.event = concatWithSpace(options.event, options.preparationClasses); - } - - var animator = $animateCss(element, options); - - // the driver lookup code inside of $$animation attempts to spawn a - // driver one by one until a driver returns a.$$willAnimate animator object. - // $animateCss will always return an object, however, it will pass in - // a flag as a hint as to whether an animation was detected or not - return animator.$$willAnimate ? animator : null; - } - }]; -}]; - -// TODO(matsko): use caching here to speed things up for detection -// TODO(matsko): add documentation -// by the time... - -var $$AnimateJsProvider = ['$animateProvider', function($animateProvider) { - this.$get = ['$injector', '$$AnimateRunner', '$$jqLite', - function($injector, $$AnimateRunner, $$jqLite) { - - var applyAnimationClasses = applyAnimationClassesFactory($$jqLite); - // $animateJs(element, 'enter'); - return function(element, event, classes, options) { - // the `classes` argument is optional and if it is not used - // then the classes will be resolved from the element's className - // property as well as options.addClass/options.removeClass. - if (arguments.length === 3 && isObject(classes)) { - options = classes; - classes = null; - } - - options = prepareAnimationOptions(options); - if (!classes) { - classes = element.attr('class') || ''; - if (options.addClass) { - classes += ' ' + options.addClass; - } - if (options.removeClass) { - classes += ' ' + options.removeClass; - } - } - - var classesToAdd = options.addClass; - var classesToRemove = options.removeClass; - - // the lookupAnimations function returns a series of animation objects that are - // matched up with one or more of the CSS classes. These animation objects are - // defined via the module.animation factory function. If nothing is detected then - // we don't return anything which then makes $animation query the next driver. - var animations = lookupAnimations(classes); - var before, after; - if (animations.length) { - var afterFn, beforeFn; - if (event == 'leave') { - beforeFn = 'leave'; - afterFn = 'afterLeave'; // TODO(matsko): get rid of this - } else { - beforeFn = 'before' + event.charAt(0).toUpperCase() + event.substr(1); - afterFn = event; - } - - if (event !== 'enter' && event !== 'move') { - before = packageAnimations(element, event, options, animations, beforeFn); - } - after = packageAnimations(element, event, options, animations, afterFn); - } - - // no matching animations - if (!before && !after) return; - - function applyOptions() { - options.domOperation(); - applyAnimationClasses(element, options); - } - - return { - start: function() { - var closeActiveAnimations; - var chain = []; - - if (before) { - chain.push(function(fn) { - closeActiveAnimations = before(fn); - }); - } - - if (chain.length) { - chain.push(function(fn) { - applyOptions(); - fn(true); - }); - } else { - applyOptions(); - } - - if (after) { - chain.push(function(fn) { - closeActiveAnimations = after(fn); - }); - } - - var animationClosed = false; - var runner = new $$AnimateRunner({ - end: function() { - endAnimations(); - }, - cancel: function() { - endAnimations(true); - } - }); - - $$AnimateRunner.chain(chain, onComplete); - return runner; - - function onComplete(success) { - animationClosed = true; - applyOptions(); - applyAnimationStyles(element, options); - runner.complete(success); - } - - function endAnimations(cancelled) { - if (!animationClosed) { - (closeActiveAnimations || noop)(cancelled); - onComplete(cancelled); - } - } - } - }; - - function executeAnimationFn(fn, element, event, options, onDone) { - var args; - switch (event) { - case 'animate': - args = [element, options.from, options.to, onDone]; - break; - - case 'setClass': - args = [element, classesToAdd, classesToRemove, onDone]; - break; - - case 'addClass': - args = [element, classesToAdd, onDone]; - break; - - case 'removeClass': - args = [element, classesToRemove, onDone]; - break; - - default: - args = [element, onDone]; - break; - } - - args.push(options); - - var value = fn.apply(fn, args); - if (value) { - if (isFunction(value.start)) { - value = value.start(); - } - - if (value instanceof $$AnimateRunner) { - value.done(onDone); - } else if (isFunction(value)) { - // optional onEnd / onCancel callback - return value; - } - } - - return noop; - } - - function groupEventedAnimations(element, event, options, animations, fnName) { - var operations = []; - forEach(animations, function(ani) { - var animation = ani[fnName]; - if (!animation) return; - - // note that all of these animations will run in parallel - operations.push(function() { - var runner; - var endProgressCb; - - var resolved = false; - var onAnimationComplete = function(rejected) { - if (!resolved) { - resolved = true; - (endProgressCb || noop)(rejected); - runner.complete(!rejected); - } - }; - - runner = new $$AnimateRunner({ - end: function() { - onAnimationComplete(); - }, - cancel: function() { - onAnimationComplete(true); - } - }); - - endProgressCb = executeAnimationFn(animation, element, event, options, function(result) { - var cancelled = result === false; - onAnimationComplete(cancelled); - }); - - return runner; - }); - }); - - return operations; - } - - function packageAnimations(element, event, options, animations, fnName) { - var operations = groupEventedAnimations(element, event, options, animations, fnName); - if (operations.length === 0) { - var a,b; - if (fnName === 'beforeSetClass') { - a = groupEventedAnimations(element, 'removeClass', options, animations, 'beforeRemoveClass'); - b = groupEventedAnimations(element, 'addClass', options, animations, 'beforeAddClass'); - } else if (fnName === 'setClass') { - a = groupEventedAnimations(element, 'removeClass', options, animations, 'removeClass'); - b = groupEventedAnimations(element, 'addClass', options, animations, 'addClass'); - } - - if (a) { - operations = operations.concat(a); - } - if (b) { - operations = operations.concat(b); - } - } - - if (operations.length === 0) return; - - // TODO(matsko): add documentation - return function startAnimation(callback) { - var runners = []; - if (operations.length) { - forEach(operations, function(animateFn) { - runners.push(animateFn()); - }); - } - - runners.length ? $$AnimateRunner.all(runners, callback) : callback(); - - return function endFn(reject) { - forEach(runners, function(runner) { - reject ? runner.cancel() : runner.end(); - }); - }; - }; - } - }; - - function lookupAnimations(classes) { - classes = isArray(classes) ? classes : classes.split(' '); - var matches = [], flagMap = {}; - for (var i=0; i < classes.length; i++) { - var klass = classes[i], - animationFactory = $animateProvider.$$registeredAnimations[klass]; - if (animationFactory && !flagMap[klass]) { - matches.push($injector.get(animationFactory)); - flagMap[klass] = true; - } - } - return matches; - } - }]; -}]; - -var $$AnimateJsDriverProvider = ['$$animationProvider', function($$animationProvider) { - $$animationProvider.drivers.push('$$animateJsDriver'); - this.$get = ['$$animateJs', '$$AnimateRunner', function($$animateJs, $$AnimateRunner) { - return function initDriverFn(animationDetails) { - if (animationDetails.from && animationDetails.to) { - var fromAnimation = prepareAnimation(animationDetails.from); - var toAnimation = prepareAnimation(animationDetails.to); - if (!fromAnimation && !toAnimation) return; - - return { - start: function() { - var animationRunners = []; - - if (fromAnimation) { - animationRunners.push(fromAnimation.start()); - } - - if (toAnimation) { - animationRunners.push(toAnimation.start()); - } - - $$AnimateRunner.all(animationRunners, done); - - var runner = new $$AnimateRunner({ - end: endFnFactory(), - cancel: endFnFactory() - }); - - return runner; - - function endFnFactory() { - return function() { - forEach(animationRunners, function(runner) { - // at this point we cannot cancel animations for groups just yet. 1.5+ - runner.end(); - }); - }; - } - - function done(status) { - runner.complete(status); - } - } - }; - } else { - return prepareAnimation(animationDetails); - } - }; - - function prepareAnimation(animationDetails) { - // TODO(matsko): make sure to check for grouped animations and delegate down to normal animations - var element = animationDetails.element; - var event = animationDetails.event; - var options = animationDetails.options; - var classes = animationDetails.classes; - return $$animateJs(element, event, classes, options); - } - }]; -}]; - -var NG_ANIMATE_ATTR_NAME = 'data-ng-animate'; -var NG_ANIMATE_PIN_DATA = '$ngAnimatePin'; -var $$AnimateQueueProvider = ['$animateProvider', function($animateProvider) { - var PRE_DIGEST_STATE = 1; - var RUNNING_STATE = 2; - - var rules = this.rules = { - skip: [], - cancel: [], - join: [] - }; - - function isAllowed(ruleType, element, currentAnimation, previousAnimation) { - return rules[ruleType].some(function(fn) { - return fn(element, currentAnimation, previousAnimation); - }); - } - - function hasAnimationClasses(options, and) { - options = options || {}; - var a = (options.addClass || '').length > 0; - var b = (options.removeClass || '').length > 0; - return and ? a && b : a || b; - } - - rules.join.push(function(element, newAnimation, currentAnimation) { - // if the new animation is class-based then we can just tack that on - return !newAnimation.structural && hasAnimationClasses(newAnimation.options); - }); - - rules.skip.push(function(element, newAnimation, currentAnimation) { - // there is no need to animate anything if no classes are being added and - // there is no structural animation that will be triggered - return !newAnimation.structural && !hasAnimationClasses(newAnimation.options); - }); - - rules.skip.push(function(element, newAnimation, currentAnimation) { - // why should we trigger a new structural animation if the element will - // be removed from the DOM anyway? - return currentAnimation.event == 'leave' && newAnimation.structural; - }); - - rules.skip.push(function(element, newAnimation, currentAnimation) { - // if there is an ongoing current animation then don't even bother running the class-based animation - return currentAnimation.structural && currentAnimation.state === RUNNING_STATE && !newAnimation.structural; - }); - - rules.cancel.push(function(element, newAnimation, currentAnimation) { - // there can never be two structural animations running at the same time - return currentAnimation.structural && newAnimation.structural; - }); - - rules.cancel.push(function(element, newAnimation, currentAnimation) { - // if the previous animation is already running, but the new animation will - // be triggered, but the new animation is structural - return currentAnimation.state === RUNNING_STATE && newAnimation.structural; - }); - - rules.cancel.push(function(element, newAnimation, currentAnimation) { - var nO = newAnimation.options; - var cO = currentAnimation.options; - - // if the exact same CSS class is added/removed then it's safe to cancel it - return (nO.addClass && nO.addClass === cO.removeClass) || (nO.removeClass && nO.removeClass === cO.addClass); - }); - - this.$get = ['$$rAF', '$rootScope', '$rootElement', '$document', '$$HashMap', - '$$animation', '$$AnimateRunner', '$templateRequest', '$$jqLite', '$$forceReflow', - function($$rAF, $rootScope, $rootElement, $document, $$HashMap, - $$animation, $$AnimateRunner, $templateRequest, $$jqLite, $$forceReflow) { - - var activeAnimationsLookup = new $$HashMap(); - var disabledElementsLookup = new $$HashMap(); - var animationsEnabled = null; - - function postDigestTaskFactory() { - var postDigestCalled = false; - return function(fn) { - // we only issue a call to postDigest before - // it has first passed. This prevents any callbacks - // from not firing once the animation has completed - // since it will be out of the digest cycle. - if (postDigestCalled) { - fn(); - } else { - $rootScope.$$postDigest(function() { - postDigestCalled = true; - fn(); - }); - } - }; - } - - // Wait until all directive and route-related templates are downloaded and - // compiled. The $templateRequest.totalPendingRequests variable keeps track of - // all of the remote templates being currently downloaded. If there are no - // templates currently downloading then the watcher will still fire anyway. - var deregisterWatch = $rootScope.$watch( - function() { return $templateRequest.totalPendingRequests === 0; }, - function(isEmpty) { - if (!isEmpty) return; - deregisterWatch(); - - // Now that all templates have been downloaded, $animate will wait until - // the post digest queue is empty before enabling animations. By having two - // calls to $postDigest calls we can ensure that the flag is enabled at the - // very end of the post digest queue. Since all of the animations in $animate - // use $postDigest, it's important that the code below executes at the end. - // This basically means that the page is fully downloaded and compiled before - // any animations are triggered. - $rootScope.$$postDigest(function() { - $rootScope.$$postDigest(function() { - // we check for null directly in the event that the application already called - // .enabled() with whatever arguments that it provided it with - if (animationsEnabled === null) { - animationsEnabled = true; - } - }); - }); - } - ); - - var callbackRegistry = {}; - - // remember that the classNameFilter is set during the provider/config - // stage therefore we can optimize here and setup a helper function - var classNameFilter = $animateProvider.classNameFilter(); - var isAnimatableClassName = !classNameFilter - ? function() { return true; } - : function(className) { - return classNameFilter.test(className); - }; - - var applyAnimationClasses = applyAnimationClassesFactory($$jqLite); - - function normalizeAnimationOptions(element, options) { - return mergeAnimationOptions(element, options, {}); - } - - function findCallbacks(parent, element, event) { - var targetNode = getDomNode(element); - var targetParentNode = getDomNode(parent); - - var matches = []; - var entries = callbackRegistry[event]; - if (entries) { - forEach(entries, function(entry) { - if (entry.node.contains(targetNode)) { - matches.push(entry.callback); - } else if (event === 'leave' && entry.node.contains(targetParentNode)) { - matches.push(entry.callback); - } - }); - } - - return matches; - } - - return { - on: function(event, container, callback) { - var node = extractElementNode(container); - callbackRegistry[event] = callbackRegistry[event] || []; - callbackRegistry[event].push({ - node: node, - callback: callback - }); - }, - - off: function(event, container, callback) { - var entries = callbackRegistry[event]; - if (!entries) return; - - callbackRegistry[event] = arguments.length === 1 - ? null - : filterFromRegistry(entries, container, callback); - - function filterFromRegistry(list, matchContainer, matchCallback) { - var containerNode = extractElementNode(matchContainer); - return list.filter(function(entry) { - var isMatch = entry.node === containerNode && - (!matchCallback || entry.callback === matchCallback); - return !isMatch; - }); - } - }, - - pin: function(element, parentElement) { - assertArg(isElement(element), 'element', 'not an element'); - assertArg(isElement(parentElement), 'parentElement', 'not an element'); - element.data(NG_ANIMATE_PIN_DATA, parentElement); - }, - - push: function(element, event, options, domOperation) { - options = options || {}; - options.domOperation = domOperation; - return queueAnimation(element, event, options); - }, - - // this method has four signatures: - // () - global getter - // (bool) - global setter - // (element) - element getter - // (element, bool) - element setter - enabled: function(element, bool) { - var argCount = arguments.length; - - if (argCount === 0) { - // () - Global getter - bool = !!animationsEnabled; - } else { - var hasElement = isElement(element); - - if (!hasElement) { - // (bool) - Global setter - bool = animationsEnabled = !!element; - } else { - var node = getDomNode(element); - var recordExists = disabledElementsLookup.get(node); - - if (argCount === 1) { - // (element) - Element getter - bool = !recordExists; - } else { - // (element, bool) - Element setter - bool = !!bool; - if (!bool) { - disabledElementsLookup.put(node, true); - } else if (recordExists) { - disabledElementsLookup.remove(node); - } - } - } - } - - return bool; - } - }; - - function queueAnimation(element, event, options) { - var node, parent; - element = stripCommentsFromElement(element); - if (element) { - node = getDomNode(element); - parent = element.parent(); - } - - options = prepareAnimationOptions(options); - - // we create a fake runner with a working promise. - // These methods will become available after the digest has passed - var runner = new $$AnimateRunner(); - - // this is used to trigger callbacks in postDigest mode - var runInNextPostDigestOrNow = postDigestTaskFactory(); - - if (isArray(options.addClass)) { - options.addClass = options.addClass.join(' '); - } - - if (options.addClass && !isString(options.addClass)) { - options.addClass = null; - } - - if (isArray(options.removeClass)) { - options.removeClass = options.removeClass.join(' '); - } - - if (options.removeClass && !isString(options.removeClass)) { - options.removeClass = null; - } - - if (options.from && !isObject(options.from)) { - options.from = null; - } - - if (options.to && !isObject(options.to)) { - options.to = null; - } - - // there are situations where a directive issues an animation for - // a jqLite wrapper that contains only comment nodes... If this - // happens then there is no way we can perform an animation - if (!node) { - close(); - return runner; - } - - var className = [node.className, options.addClass, options.removeClass].join(' '); - if (!isAnimatableClassName(className)) { - close(); - return runner; - } - - var isStructural = ['enter', 'move', 'leave'].indexOf(event) >= 0; - - // this is a hard disable of all animations for the application or on - // the element itself, therefore there is no need to continue further - // past this point if not enabled - var skipAnimations = !animationsEnabled || disabledElementsLookup.get(node); - var existingAnimation = (!skipAnimations && activeAnimationsLookup.get(node)) || {}; - var hasExistingAnimation = !!existingAnimation.state; - - // there is no point in traversing the same collection of parent ancestors if a followup - // animation will be run on the same element that already did all that checking work - if (!skipAnimations && (!hasExistingAnimation || existingAnimation.state != PRE_DIGEST_STATE)) { - skipAnimations = !areAnimationsAllowed(element, parent, event); - } - - if (skipAnimations) { - close(); - return runner; - } - - if (isStructural) { - closeChildAnimations(element); - } - - var newAnimation = { - structural: isStructural, - element: element, - event: event, - close: close, - options: options, - runner: runner - }; - - if (hasExistingAnimation) { - var skipAnimationFlag = isAllowed('skip', element, newAnimation, existingAnimation); - if (skipAnimationFlag) { - if (existingAnimation.state === RUNNING_STATE) { - close(); - return runner; - } else { - mergeAnimationOptions(element, existingAnimation.options, options); - return existingAnimation.runner; - } - } - - var cancelAnimationFlag = isAllowed('cancel', element, newAnimation, existingAnimation); - if (cancelAnimationFlag) { - if (existingAnimation.state === RUNNING_STATE) { - // this will end the animation right away and it is safe - // to do so since the animation is already running and the - // runner callback code will run in async - existingAnimation.runner.end(); - } else if (existingAnimation.structural) { - // this means that the animation is queued into a digest, but - // hasn't started yet. Therefore it is safe to run the close - // method which will call the runner methods in async. - existingAnimation.close(); - } else { - // this will merge the new animation options into existing animation options - mergeAnimationOptions(element, existingAnimation.options, newAnimation.options); - return existingAnimation.runner; - } - } else { - // a joined animation means that this animation will take over the existing one - // so an example would involve a leave animation taking over an enter. Then when - // the postDigest kicks in the enter will be ignored. - var joinAnimationFlag = isAllowed('join', element, newAnimation, existingAnimation); - if (joinAnimationFlag) { - if (existingAnimation.state === RUNNING_STATE) { - normalizeAnimationOptions(element, options); - } else { - applyGeneratedPreparationClasses(element, isStructural ? event : null, options); - - event = newAnimation.event = existingAnimation.event; - options = mergeAnimationOptions(element, existingAnimation.options, newAnimation.options); - - //we return the same runner since only the option values of this animation will - //be fed into the `existingAnimation`. - return existingAnimation.runner; - } - } - } - } else { - // normalization in this case means that it removes redundant CSS classes that - // already exist (addClass) or do not exist (removeClass) on the element - normalizeAnimationOptions(element, options); - } - - // when the options are merged and cleaned up we may end up not having to do - // an animation at all, therefore we should check this before issuing a post - // digest callback. Structural animations will always run no matter what. - var isValidAnimation = newAnimation.structural; - if (!isValidAnimation) { - // animate (from/to) can be quickly checked first, otherwise we check if any classes are present - isValidAnimation = (newAnimation.event === 'animate' && Object.keys(newAnimation.options.to || {}).length > 0) - || hasAnimationClasses(newAnimation.options); - } - - if (!isValidAnimation) { - close(); - clearElementAnimationState(element); - return runner; - } - - // the counter keeps track of cancelled animations - var counter = (existingAnimation.counter || 0) + 1; - newAnimation.counter = counter; - - markElementAnimationState(element, PRE_DIGEST_STATE, newAnimation); - - $rootScope.$$postDigest(function() { - var animationDetails = activeAnimationsLookup.get(node); - var animationCancelled = !animationDetails; - animationDetails = animationDetails || {}; - - // if addClass/removeClass is called before something like enter then the - // registered parent element may not be present. The code below will ensure - // that a final value for parent element is obtained - var parentElement = element.parent() || []; - - // animate/structural/class-based animations all have requirements. Otherwise there - // is no point in performing an animation. The parent node must also be set. - var isValidAnimation = parentElement.length > 0 - && (animationDetails.event === 'animate' - || animationDetails.structural - || hasAnimationClasses(animationDetails.options)); - - // this means that the previous animation was cancelled - // even if the follow-up animation is the same event - if (animationCancelled || animationDetails.counter !== counter || !isValidAnimation) { - // if another animation did not take over then we need - // to make sure that the domOperation and options are - // handled accordingly - if (animationCancelled) { - applyAnimationClasses(element, options); - applyAnimationStyles(element, options); - } - - // if the event changed from something like enter to leave then we do - // it, otherwise if it's the same then the end result will be the same too - if (animationCancelled || (isStructural && animationDetails.event !== event)) { - options.domOperation(); - runner.end(); - } - - // in the event that the element animation was not cancelled or a follow-up animation - // isn't allowed to animate from here then we need to clear the state of the element - // so that any future animations won't read the expired animation data. - if (!isValidAnimation) { - clearElementAnimationState(element); - } - - return; - } - - // this combined multiple class to addClass / removeClass into a setClass event - // so long as a structural event did not take over the animation - event = !animationDetails.structural && hasAnimationClasses(animationDetails.options, true) - ? 'setClass' - : animationDetails.event; - - markElementAnimationState(element, RUNNING_STATE); - var realRunner = $$animation(element, event, animationDetails.options); - - realRunner.done(function(status) { - close(!status); - var animationDetails = activeAnimationsLookup.get(node); - if (animationDetails && animationDetails.counter === counter) { - clearElementAnimationState(getDomNode(element)); - } - notifyProgress(runner, event, 'close', {}); - }); - - // this will update the runner's flow-control events based on - // the `realRunner` object. - runner.setHost(realRunner); - notifyProgress(runner, event, 'start', {}); - }); - - return runner; - - function notifyProgress(runner, event, phase, data) { - runInNextPostDigestOrNow(function() { - var callbacks = findCallbacks(parent, element, event); - if (callbacks.length) { - // do not optimize this call here to RAF because - // we don't know how heavy the callback code here will - // be and if this code is buffered then this can - // lead to a performance regression. - $$rAF(function() { - forEach(callbacks, function(callback) { - callback(element, phase, data); - }); - }); - } - }); - runner.progress(event, phase, data); - } - - function close(reject) { // jshint ignore:line - clearGeneratedClasses(element, options); - applyAnimationClasses(element, options); - applyAnimationStyles(element, options); - options.domOperation(); - runner.complete(!reject); - } - } - - function closeChildAnimations(element) { - var node = getDomNode(element); - var children = node.querySelectorAll('[' + NG_ANIMATE_ATTR_NAME + ']'); - forEach(children, function(child) { - var state = parseInt(child.getAttribute(NG_ANIMATE_ATTR_NAME)); - var animationDetails = activeAnimationsLookup.get(child); - switch (state) { - case RUNNING_STATE: - animationDetails.runner.end(); - /* falls through */ - case PRE_DIGEST_STATE: - if (animationDetails) { - activeAnimationsLookup.remove(child); - } - break; - } - }); - } - - function clearElementAnimationState(element) { - var node = getDomNode(element); - node.removeAttribute(NG_ANIMATE_ATTR_NAME); - activeAnimationsLookup.remove(node); - } - - function isMatchingElement(nodeOrElmA, nodeOrElmB) { - return getDomNode(nodeOrElmA) === getDomNode(nodeOrElmB); - } - - function areAnimationsAllowed(element, parentElement, event) { - var bodyElement = jqLite($document[0].body); - var bodyElementDetected = isMatchingElement(element, bodyElement) || element[0].nodeName === 'HTML'; - var rootElementDetected = isMatchingElement(element, $rootElement); - var parentAnimationDetected = false; - var animateChildren; - - var parentHost = element.data(NG_ANIMATE_PIN_DATA); - if (parentHost) { - parentElement = parentHost; - } - - while (parentElement && parentElement.length) { - if (!rootElementDetected) { - // angular doesn't want to attempt to animate elements outside of the application - // therefore we need to ensure that the rootElement is an ancestor of the current element - rootElementDetected = isMatchingElement(parentElement, $rootElement); - } - - var parentNode = parentElement[0]; - if (parentNode.nodeType !== ELEMENT_NODE) { - // no point in inspecting the #document element - break; - } - - var details = activeAnimationsLookup.get(parentNode) || {}; - // either an enter, leave or move animation will commence - // therefore we can't allow any animations to take place - // but if a parent animation is class-based then that's ok - if (!parentAnimationDetected) { - parentAnimationDetected = details.structural || disabledElementsLookup.get(parentNode); - } - - if (isUndefined(animateChildren) || animateChildren === true) { - var value = parentElement.data(NG_ANIMATE_CHILDREN_DATA); - if (isDefined(value)) { - animateChildren = value; - } - } - - // there is no need to continue traversing at this point - if (parentAnimationDetected && animateChildren === false) break; - - if (!rootElementDetected) { - // angular doesn't want to attempt to animate elements outside of the application - // therefore we need to ensure that the rootElement is an ancestor of the current element - rootElementDetected = isMatchingElement(parentElement, $rootElement); - if (!rootElementDetected) { - parentHost = parentElement.data(NG_ANIMATE_PIN_DATA); - if (parentHost) { - parentElement = parentHost; - } - } - } - - if (!bodyElementDetected) { - // we also need to ensure that the element is or will be apart of the body element - // otherwise it is pointless to even issue an animation to be rendered - bodyElementDetected = isMatchingElement(parentElement, bodyElement); - } - - parentElement = parentElement.parent(); - } - - var allowAnimation = !parentAnimationDetected || animateChildren; - return allowAnimation && rootElementDetected && bodyElementDetected; - } - - function markElementAnimationState(element, state, details) { - details = details || {}; - details.state = state; - - var node = getDomNode(element); - node.setAttribute(NG_ANIMATE_ATTR_NAME, state); - - var oldValue = activeAnimationsLookup.get(node); - var newValue = oldValue - ? extend(oldValue, details) - : details; - activeAnimationsLookup.put(node, newValue); - } - }]; -}]; - -var $$AnimateAsyncRunFactory = ['$$rAF', function($$rAF) { - var waitQueue = []; - - function waitForTick(fn) { - waitQueue.push(fn); - if (waitQueue.length > 1) return; - $$rAF(function() { - for (var i = 0; i < waitQueue.length; i++) { - waitQueue[i](); - } - waitQueue = []; - }); - } - - return function() { - var passed = false; - waitForTick(function() { - passed = true; - }); - return function(callback) { - passed ? callback() : waitForTick(callback); - }; - }; -}]; - -var $$AnimateRunnerFactory = ['$q', '$sniffer', '$$animateAsyncRun', - function($q, $sniffer, $$animateAsyncRun) { - - var INITIAL_STATE = 0; - var DONE_PENDING_STATE = 1; - var DONE_COMPLETE_STATE = 2; - - AnimateRunner.chain = function(chain, callback) { - var index = 0; - - next(); - function next() { - if (index === chain.length) { - callback(true); - return; - } - - chain[index](function(response) { - if (response === false) { - callback(false); - return; - } - index++; - next(); - }); - } - }; - - AnimateRunner.all = function(runners, callback) { - var count = 0; - var status = true; - forEach(runners, function(runner) { - runner.done(onProgress); - }); - - function onProgress(response) { - status = status && response; - if (++count === runners.length) { - callback(status); - } - } - }; - - function AnimateRunner(host) { - this.setHost(host); - - this._doneCallbacks = []; - this._runInAnimationFrame = $$animateAsyncRun(); - this._state = 0; - } - - AnimateRunner.prototype = { - setHost: function(host) { - this.host = host || {}; - }, - - done: function(fn) { - if (this._state === DONE_COMPLETE_STATE) { - fn(); - } else { - this._doneCallbacks.push(fn); - } - }, - - progress: noop, - - getPromise: function() { - if (!this.promise) { - var self = this; - this.promise = $q(function(resolve, reject) { - self.done(function(status) { - status === false ? reject() : resolve(); - }); - }); - } - return this.promise; - }, - - then: function(resolveHandler, rejectHandler) { - return this.getPromise().then(resolveHandler, rejectHandler); - }, - - 'catch': function(handler) { - return this.getPromise()['catch'](handler); - }, - - 'finally': function(handler) { - return this.getPromise()['finally'](handler); - }, - - pause: function() { - if (this.host.pause) { - this.host.pause(); - } - }, - - resume: function() { - if (this.host.resume) { - this.host.resume(); - } - }, - - end: function() { - if (this.host.end) { - this.host.end(); - } - this._resolve(true); - }, - - cancel: function() { - if (this.host.cancel) { - this.host.cancel(); - } - this._resolve(false); - }, - - complete: function(response) { - var self = this; - if (self._state === INITIAL_STATE) { - self._state = DONE_PENDING_STATE; - self._runInAnimationFrame(function() { - self._resolve(response); - }); - } - }, - - _resolve: function(response) { - if (this._state !== DONE_COMPLETE_STATE) { - forEach(this._doneCallbacks, function(fn) { - fn(response); - }); - this._doneCallbacks.length = 0; - this._state = DONE_COMPLETE_STATE; - } - } - }; - - return AnimateRunner; -}]; - -var $$AnimationProvider = ['$animateProvider', function($animateProvider) { - var NG_ANIMATE_REF_ATTR = 'ng-animate-ref'; - - var drivers = this.drivers = []; - - var RUNNER_STORAGE_KEY = '$$animationRunner'; - - function setRunner(element, runner) { - element.data(RUNNER_STORAGE_KEY, runner); - } - - function removeRunner(element) { - element.removeData(RUNNER_STORAGE_KEY); - } - - function getRunner(element) { - return element.data(RUNNER_STORAGE_KEY); - } - - this.$get = ['$$jqLite', '$rootScope', '$injector', '$$AnimateRunner', '$$HashMap', '$$rAFScheduler', - function($$jqLite, $rootScope, $injector, $$AnimateRunner, $$HashMap, $$rAFScheduler) { - - var animationQueue = []; - var applyAnimationClasses = applyAnimationClassesFactory($$jqLite); - - function sortAnimations(animations) { - var tree = { children: [] }; - var i, lookup = new $$HashMap(); - - // this is done first beforehand so that the hashmap - // is filled with a list of the elements that will be animated - for (i = 0; i < animations.length; i++) { - var animation = animations[i]; - lookup.put(animation.domNode, animations[i] = { - domNode: animation.domNode, - fn: animation.fn, - children: [] - }); - } - - for (i = 0; i < animations.length; i++) { - processNode(animations[i]); - } - - return flatten(tree); - - function processNode(entry) { - if (entry.processed) return entry; - entry.processed = true; - - var elementNode = entry.domNode; - var parentNode = elementNode.parentNode; - lookup.put(elementNode, entry); - - var parentEntry; - while (parentNode) { - parentEntry = lookup.get(parentNode); - if (parentEntry) { - if (!parentEntry.processed) { - parentEntry = processNode(parentEntry); - } - break; - } - parentNode = parentNode.parentNode; - } - - (parentEntry || tree).children.push(entry); - return entry; - } - - function flatten(tree) { - var result = []; - var queue = []; - var i; - - for (i = 0; i < tree.children.length; i++) { - queue.push(tree.children[i]); - } - - var remainingLevelEntries = queue.length; - var nextLevelEntries = 0; - var row = []; - - for (i = 0; i < queue.length; i++) { - var entry = queue[i]; - if (remainingLevelEntries <= 0) { - remainingLevelEntries = nextLevelEntries; - nextLevelEntries = 0; - result.push(row); - row = []; - } - row.push(entry.fn); - entry.children.forEach(function(childEntry) { - nextLevelEntries++; - queue.push(childEntry); - }); - remainingLevelEntries--; - } - - if (row.length) { - result.push(row); - } - - return result; - } - } - - // TODO(matsko): document the signature in a better way - return function(element, event, options) { - options = prepareAnimationOptions(options); - var isStructural = ['enter', 'move', 'leave'].indexOf(event) >= 0; - - // there is no animation at the current moment, however - // these runner methods will get later updated with the - // methods leading into the driver's end/cancel methods - // for now they just stop the animation from starting - var runner = new $$AnimateRunner({ - end: function() { close(); }, - cancel: function() { close(true); } - }); - - if (!drivers.length) { - close(); - return runner; - } - - setRunner(element, runner); - - var classes = mergeClasses(element.attr('class'), mergeClasses(options.addClass, options.removeClass)); - var tempClasses = options.tempClasses; - if (tempClasses) { - classes += ' ' + tempClasses; - options.tempClasses = null; - } - - animationQueue.push({ - // this data is used by the postDigest code and passed into - // the driver step function - element: element, - classes: classes, - event: event, - structural: isStructural, - options: options, - beforeStart: beforeStart, - close: close - }); - - element.on('$destroy', handleDestroyedElement); - - // we only want there to be one function called within the post digest - // block. This way we can group animations for all the animations that - // were apart of the same postDigest flush call. - if (animationQueue.length > 1) return runner; - - $rootScope.$$postDigest(function() { - var animations = []; - forEach(animationQueue, function(entry) { - // the element was destroyed early on which removed the runner - // form its storage. This means we can't animate this element - // at all and it already has been closed due to destruction. - if (getRunner(entry.element)) { - animations.push(entry); - } else { - entry.close(); - } - }); - - // now any future animations will be in another postDigest - animationQueue.length = 0; - - var groupedAnimations = groupAnimations(animations); - var toBeSortedAnimations = []; - - forEach(groupedAnimations, function(animationEntry) { - toBeSortedAnimations.push({ - domNode: getDomNode(animationEntry.from ? animationEntry.from.element : animationEntry.element), - fn: function triggerAnimationStart() { - // it's important that we apply the `ng-animate` CSS class and the - // temporary classes before we do any driver invoking since these - // CSS classes may be required for proper CSS detection. - animationEntry.beforeStart(); - - var startAnimationFn, closeFn = animationEntry.close; - - // in the event that the element was removed before the digest runs or - // during the RAF sequencing then we should not trigger the animation. - var targetElement = animationEntry.anchors - ? (animationEntry.from.element || animationEntry.to.element) - : animationEntry.element; - - if (getRunner(targetElement)) { - var operation = invokeFirstDriver(animationEntry); - if (operation) { - startAnimationFn = operation.start; - } - } - - if (!startAnimationFn) { - closeFn(); - } else { - var animationRunner = startAnimationFn(); - animationRunner.done(function(status) { - closeFn(!status); - }); - updateAnimationRunners(animationEntry, animationRunner); - } - } - }); - }); - - // we need to sort each of the animations in order of parent to child - // relationships. This ensures that the child classes are applied at the - // right time. - $$rAFScheduler(sortAnimations(toBeSortedAnimations)); - }); - - return runner; - - // TODO(matsko): change to reference nodes - function getAnchorNodes(node) { - var SELECTOR = '[' + NG_ANIMATE_REF_ATTR + ']'; - var items = node.hasAttribute(NG_ANIMATE_REF_ATTR) - ? [node] - : node.querySelectorAll(SELECTOR); - var anchors = []; - forEach(items, function(node) { - var attr = node.getAttribute(NG_ANIMATE_REF_ATTR); - if (attr && attr.length) { - anchors.push(node); - } - }); - return anchors; - } - - function groupAnimations(animations) { - var preparedAnimations = []; - var refLookup = {}; - forEach(animations, function(animation, index) { - var element = animation.element; - var node = getDomNode(element); - var event = animation.event; - var enterOrMove = ['enter', 'move'].indexOf(event) >= 0; - var anchorNodes = animation.structural ? getAnchorNodes(node) : []; - - if (anchorNodes.length) { - var direction = enterOrMove ? 'to' : 'from'; - - forEach(anchorNodes, function(anchor) { - var key = anchor.getAttribute(NG_ANIMATE_REF_ATTR); - refLookup[key] = refLookup[key] || {}; - refLookup[key][direction] = { - animationID: index, - element: jqLite(anchor) - }; - }); - } else { - preparedAnimations.push(animation); - } - }); - - var usedIndicesLookup = {}; - var anchorGroups = {}; - forEach(refLookup, function(operations, key) { - var from = operations.from; - var to = operations.to; - - if (!from || !to) { - // only one of these is set therefore we can't have an - // anchor animation since all three pieces are required - var index = from ? from.animationID : to.animationID; - var indexKey = index.toString(); - if (!usedIndicesLookup[indexKey]) { - usedIndicesLookup[indexKey] = true; - preparedAnimations.push(animations[index]); - } - return; - } - - var fromAnimation = animations[from.animationID]; - var toAnimation = animations[to.animationID]; - var lookupKey = from.animationID.toString(); - if (!anchorGroups[lookupKey]) { - var group = anchorGroups[lookupKey] = { - structural: true, - beforeStart: function() { - fromAnimation.beforeStart(); - toAnimation.beforeStart(); - }, - close: function() { - fromAnimation.close(); - toAnimation.close(); - }, - classes: cssClassesIntersection(fromAnimation.classes, toAnimation.classes), - from: fromAnimation, - to: toAnimation, - anchors: [] // TODO(matsko): change to reference nodes - }; - - // the anchor animations require that the from and to elements both have at least - // one shared CSS class which effictively marries the two elements together to use - // the same animation driver and to properly sequence the anchor animation. - if (group.classes.length) { - preparedAnimations.push(group); - } else { - preparedAnimations.push(fromAnimation); - preparedAnimations.push(toAnimation); - } - } - - anchorGroups[lookupKey].anchors.push({ - 'out': from.element, 'in': to.element - }); - }); - - return preparedAnimations; - } - - function cssClassesIntersection(a,b) { - a = a.split(' '); - b = b.split(' '); - var matches = []; - - for (var i = 0; i < a.length; i++) { - var aa = a[i]; - if (aa.substring(0,3) === 'ng-') continue; - - for (var j = 0; j < b.length; j++) { - if (aa === b[j]) { - matches.push(aa); - break; - } - } - } - - return matches.join(' '); - } - - function invokeFirstDriver(animationDetails) { - // we loop in reverse order since the more general drivers (like CSS and JS) - // may attempt more elements, but custom drivers are more particular - for (var i = drivers.length - 1; i >= 0; i--) { - var driverName = drivers[i]; - if (!$injector.has(driverName)) continue; // TODO(matsko): remove this check - - var factory = $injector.get(driverName); - var driver = factory(animationDetails); - if (driver) { - return driver; - } - } - } - - function beforeStart() { - element.addClass(NG_ANIMATE_CLASSNAME); - if (tempClasses) { - $$jqLite.addClass(element, tempClasses); - } - } - - function updateAnimationRunners(animation, newRunner) { - if (animation.from && animation.to) { - update(animation.from.element); - update(animation.to.element); - } else { - update(animation.element); - } - - function update(element) { - getRunner(element).setHost(newRunner); - } - } - - function handleDestroyedElement() { - var runner = getRunner(element); - if (runner && (event !== 'leave' || !options.$$domOperationFired)) { - runner.end(); - } - } - - function close(rejected) { // jshint ignore:line - element.off('$destroy', handleDestroyedElement); - removeRunner(element); - - applyAnimationClasses(element, options); - applyAnimationStyles(element, options); - options.domOperation(); - - if (tempClasses) { - $$jqLite.removeClass(element, tempClasses); - } - - element.removeClass(NG_ANIMATE_CLASSNAME); - runner.complete(!rejected); - } - }; - }]; -}]; - -/* global angularAnimateModule: true, - - $$AnimateAsyncRunFactory, - $$rAFSchedulerFactory, - $$AnimateChildrenDirective, - $$AnimateRunnerFactory, - $$AnimateQueueProvider, - $$AnimationProvider, - $AnimateCssProvider, - $$AnimateCssDriverProvider, - $$AnimateJsProvider, - $$AnimateJsDriverProvider, -*/ - -/** - * @ngdoc module - * @name ngAnimate - * @description - * - * The `ngAnimate` module provides support for CSS-based animations (keyframes and transitions) as well as JavaScript-based animations via - * callback hooks. Animations are not enabled by default, however, by including `ngAnimate` the animation hooks are enabled for an Angular app. - * - *
    - * - * # Usage - * Simply put, there are two ways to make use of animations when ngAnimate is used: by using **CSS** and **JavaScript**. The former works purely based - * using CSS (by using matching CSS selectors/styles) and the latter triggers animations that are registered via `module.animation()`. For - * both CSS and JS animations the sole requirement is to have a matching `CSS class` that exists both in the registered animation and within - * the HTML element that the animation will be triggered on. - * - * ## Directive Support - * The following directives are "animation aware": - * - * | Directive | Supported Animations | - * |----------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------| - * | {@link ng.directive:ngRepeat#animations ngRepeat} | enter, leave and move | - * | {@link ngRoute.directive:ngView#animations ngView} | enter and leave | - * | {@link ng.directive:ngInclude#animations ngInclude} | enter and leave | - * | {@link ng.directive:ngSwitch#animations ngSwitch} | enter and leave | - * | {@link ng.directive:ngIf#animations ngIf} | enter and leave | - * | {@link ng.directive:ngClass#animations ngClass} | add and remove (the CSS class(es) present) | - * | {@link ng.directive:ngShow#animations ngShow} & {@link ng.directive:ngHide#animations ngHide} | add and remove (the ng-hide class value) | - * | {@link ng.directive:form#animation-hooks form} & {@link ng.directive:ngModel#animation-hooks ngModel} | add and remove (dirty, pristine, valid, invalid & all other validations) | - * | {@link module:ngMessages#animations ngMessages} | add and remove (ng-active & ng-inactive) | - * | {@link module:ngMessages#animations ngMessage} | enter and leave | - * - * (More information can be found by visiting each the documentation associated with each directive.) - * - * ## CSS-based Animations - * - * CSS-based animations with ngAnimate are unique since they require no JavaScript code at all. By using a CSS class that we reference between our HTML - * and CSS code we can create an animation that will be picked up by Angular when an the underlying directive performs an operation. - * - * The example below shows how an `enter` animation can be made possible on an element using `ng-if`: - * - * ```html - *
    - * Fade me in out - *
    - * - * - * ``` - * - * Notice the CSS class **fade**? We can now create the CSS transition code that references this class: - * - * ```css - * /* The starting CSS styles for the enter animation */ - * .fade.ng-enter { - * transition:0.5s linear all; - * opacity:0; - * } - * - * /* The finishing CSS styles for the enter animation */ - * .fade.ng-enter.ng-enter-active { - * opacity:1; - * } - * ``` - * - * The key thing to remember here is that, depending on the animation event (which each of the directives above trigger depending on what's going on) two - * generated CSS classes will be applied to the element; in the example above we have `.ng-enter` and `.ng-enter-active`. For CSS transitions, the transition - * code **must** be defined within the starting CSS class (in this case `.ng-enter`). The destination class is what the transition will animate towards. - * - * If for example we wanted to create animations for `leave` and `move` (ngRepeat triggers move) then we can do so using the same CSS naming conventions: - * - * ```css - * /* now the element will fade out before it is removed from the DOM */ - * .fade.ng-leave { - * transition:0.5s linear all; - * opacity:1; - * } - * .fade.ng-leave.ng-leave-active { - * opacity:0; - * } - * ``` - * - * We can also make use of **CSS Keyframes** by referencing the keyframe animation within the starting CSS class: - * - * ```css - * /* there is no need to define anything inside of the destination - * CSS class since the keyframe will take charge of the animation */ - * .fade.ng-leave { - * animation: my_fade_animation 0.5s linear; - * -webkit-animation: my_fade_animation 0.5s linear; - * } - * - * @keyframes my_fade_animation { - * from { opacity:1; } - * to { opacity:0; } - * } - * - * @-webkit-keyframes my_fade_animation { - * from { opacity:1; } - * to { opacity:0; } - * } - * ``` - * - * Feel free also mix transitions and keyframes together as well as any other CSS classes on the same element. - * - * ### CSS Class-based Animations - * - * Class-based animations (animations that are triggered via `ngClass`, `ngShow`, `ngHide` and some other directives) have a slightly different - * naming convention. Class-based animations are basic enough that a standard transition or keyframe can be referenced on the class being added - * and removed. - * - * For example if we wanted to do a CSS animation for `ngHide` then we place an animation on the `.ng-hide` CSS class: - * - * ```html - *
    - * Show and hide me - *
    - * - * - * - * ``` - * - * All that is going on here with ngShow/ngHide behind the scenes is the `.ng-hide` class is added/removed (when the hidden state is valid). Since - * ngShow and ngHide are animation aware then we can match up a transition and ngAnimate handles the rest. - * - * In addition the addition and removal of the CSS class, ngAnimate also provides two helper methods that we can use to further decorate the animation - * with CSS styles. - * - * ```html - *
    - * Highlight this box - *
    - * - * - * - * ``` - * - * We can also make use of CSS keyframes by placing them within the CSS classes. - * - * - * ### CSS Staggering Animations - * A Staggering animation is a collection of animations that are issued with a slight delay in between each successive operation resulting in a - * curtain-like effect. The ngAnimate module (versions >=1.2) supports staggering animations and the stagger effect can be - * performed by creating a **ng-EVENT-stagger** CSS class and attaching that class to the base CSS class used for - * the animation. The style property expected within the stagger class can either be a **transition-delay** or an - * **animation-delay** property (or both if your animation contains both transitions and keyframe animations). - * - * ```css - * .my-animation.ng-enter { - * /* standard transition code */ - * transition: 1s linear all; - * opacity:0; - * } - * .my-animation.ng-enter-stagger { - * /* this will have a 100ms delay between each successive leave animation */ - * transition-delay: 0.1s; - * - * /* As of 1.4.4, this must always be set: it signals ngAnimate - * to not accidentally inherit a delay property from another CSS class */ - * transition-duration: 0s; - * } - * .my-animation.ng-enter.ng-enter-active { - * /* standard transition styles */ - * opacity:1; - * } - * ``` - * - * Staggering animations work by default in ngRepeat (so long as the CSS class is defined). Outside of ngRepeat, to use staggering animations - * on your own, they can be triggered by firing multiple calls to the same event on $animate. However, the restrictions surrounding this - * are that each of the elements must have the same CSS className value as well as the same parent element. A stagger operation - * will also be reset if one or more animation frames have passed since the multiple calls to `$animate` were fired. - * - * The following code will issue the **ng-leave-stagger** event on the element provided: - * - * ```js - * var kids = parent.children(); - * - * $animate.leave(kids[0]); //stagger index=0 - * $animate.leave(kids[1]); //stagger index=1 - * $animate.leave(kids[2]); //stagger index=2 - * $animate.leave(kids[3]); //stagger index=3 - * $animate.leave(kids[4]); //stagger index=4 - * - * window.requestAnimationFrame(function() { - * //stagger has reset itself - * $animate.leave(kids[5]); //stagger index=0 - * $animate.leave(kids[6]); //stagger index=1 - * - * $scope.$digest(); - * }); - * ``` - * - * Stagger animations are currently only supported within CSS-defined animations. - * - * ### The `ng-animate` CSS class - * - * When ngAnimate is animating an element it will apply the `ng-animate` CSS class to the element for the duration of the animation. - * This is a temporary CSS class and it will be removed once the animation is over (for both JavaScript and CSS-based animations). - * - * Therefore, animations can be applied to an element using this temporary class directly via CSS. - * - * ```css - * .zipper.ng-animate { - * transition:0.5s linear all; - * } - * .zipper.ng-enter { - * opacity:0; - * } - * .zipper.ng-enter.ng-enter-active { - * opacity:1; - * } - * .zipper.ng-leave { - * opacity:1; - * } - * .zipper.ng-leave.ng-leave-active { - * opacity:0; - * } - * ``` - * - * (Note that the `ng-animate` CSS class is reserved and it cannot be applied on an element directly since ngAnimate will always remove - * the CSS class once an animation has completed.) - * - * - * ## JavaScript-based Animations - * - * ngAnimate also allows for animations to be consumed by JavaScript code. The approach is similar to CSS-based animations (where there is a shared - * CSS class that is referenced in our HTML code) but in addition we need to register the JavaScript animation on the module. By making use of the - * `module.animation()` module function we can register the ainmation. - * - * Let's see an example of a enter/leave animation using `ngRepeat`: - * - * ```html - *
    - * {{ item }} - *
    - * ``` - * - * See the **slide** CSS class? Let's use that class to define an animation that we'll structure in our module code by using `module.animation`: - * - * ```js - * myModule.animation('.slide', [function() { - * return { - * // make note that other events (like addClass/removeClass) - * // have different function input parameters - * enter: function(element, doneFn) { - * jQuery(element).fadeIn(1000, doneFn); - * - * // remember to call doneFn so that angular - * // knows that the animation has concluded - * }, - * - * move: function(element, doneFn) { - * jQuery(element).fadeIn(1000, doneFn); - * }, - * - * leave: function(element, doneFn) { - * jQuery(element).fadeOut(1000, doneFn); - * } - * } - * }]); - * ``` - * - * The nice thing about JS-based animations is that we can inject other services and make use of advanced animation libraries such as - * greensock.js and velocity.js. - * - * If our animation code class-based (meaning that something like `ngClass`, `ngHide` and `ngShow` triggers it) then we can still define - * our animations inside of the same registered animation, however, the function input arguments are a bit different: - * - * ```html - *
    - * this box is moody - *
    - * - * - * - * ``` - * - * ```js - * myModule.animation('.colorful', [function() { - * return { - * addClass: function(element, className, doneFn) { - * // do some cool animation and call the doneFn - * }, - * removeClass: function(element, className, doneFn) { - * // do some cool animation and call the doneFn - * }, - * setClass: function(element, addedClass, removedClass, doneFn) { - * // do some cool animation and call the doneFn - * } - * } - * }]); - * ``` - * - * ## CSS + JS Animations Together - * - * AngularJS 1.4 and higher has taken steps to make the amalgamation of CSS and JS animations more flexible. However, unlike earlier versions of Angular, - * defining CSS and JS animations to work off of the same CSS class will not work anymore. Therefore the example below will only result in **JS animations taking - * charge of the animation**: - * - * ```html - *
    - * Slide in and out - *
    - * ``` - * - * ```js - * myModule.animation('.slide', [function() { - * return { - * enter: function(element, doneFn) { - * jQuery(element).slideIn(1000, doneFn); - * } - * } - * }]); - * ``` - * - * ```css - * .slide.ng-enter { - * transition:0.5s linear all; - * transform:translateY(-100px); - * } - * .slide.ng-enter.ng-enter-active { - * transform:translateY(0); - * } - * ``` - * - * Does this mean that CSS and JS animations cannot be used together? Do JS-based animations always have higher priority? We can make up for the - * lack of CSS animations by using the `$animateCss` service to trigger our own tweaked-out, CSS-based animations directly from - * our own JS-based animation code: - * - * ```js - * myModule.animation('.slide', ['$animateCss', function($animateCss) { - * return { - * enter: function(element) { -* // this will trigger `.slide.ng-enter` and `.slide.ng-enter-active`. - * return $animateCss(element, { - * event: 'enter', - * structural: true - * }); - * } - * } - * }]); - * ``` - * - * The nice thing here is that we can save bandwidth by sticking to our CSS-based animation code and we don't need to rely on a 3rd-party animation framework. - * - * The `$animateCss` service is very powerful since we can feed in all kinds of extra properties that will be evaluated and fed into a CSS transition or - * keyframe animation. For example if we wanted to animate the height of an element while adding and removing classes then we can do so by providing that - * data into `$animateCss` directly: - * - * ```js - * myModule.animation('.slide', ['$animateCss', function($animateCss) { - * return { - * enter: function(element) { - * return $animateCss(element, { - * event: 'enter', - * structural: true, - * addClass: 'maroon-setting', - * from: { height:0 }, - * to: { height: 200 } - * }); - * } - * } - * }]); - * ``` - * - * Now we can fill in the rest via our transition CSS code: - * - * ```css - * /* the transition tells ngAnimate to make the animation happen */ - * .slide.ng-enter { transition:0.5s linear all; } - * - * /* this extra CSS class will be absorbed into the transition - * since the $animateCss code is adding the class */ - * .maroon-setting { background:red; } - * ``` - * - * And `$animateCss` will figure out the rest. Just make sure to have the `done()` callback fire the `doneFn` function to signal when the animation is over. - * - * To learn more about what's possible be sure to visit the {@link ngAnimate.$animateCss $animateCss service}. - * - * ## Animation Anchoring (via `ng-animate-ref`) - * - * ngAnimate in AngularJS 1.4 comes packed with the ability to cross-animate elements between - * structural areas of an application (like views) by pairing up elements using an attribute - * called `ng-animate-ref`. - * - * Let's say for example we have two views that are managed by `ng-view` and we want to show - * that there is a relationship between two components situated in within these views. By using the - * `ng-animate-ref` attribute we can identify that the two components are paired together and we - * can then attach an animation, which is triggered when the view changes. - * - * Say for example we have the following template code: - * - * ```html - * - *
    - *
    - * - * - * - * - * - * - * - * - * ``` - * - * Now, when the view changes (once the link is clicked), ngAnimate will examine the - * HTML contents to see if there is a match reference between any components in the view - * that is leaving and the view that is entering. It will scan both the view which is being - * removed (leave) and inserted (enter) to see if there are any paired DOM elements that - * contain a matching ref value. - * - * The two images match since they share the same ref value. ngAnimate will now create a - * transport element (which is a clone of the first image element) and it will then attempt - * to animate to the position of the second image element in the next view. For the animation to - * work a special CSS class called `ng-anchor` will be added to the transported element. - * - * We can now attach a transition onto the `.banner.ng-anchor` CSS class and then - * ngAnimate will handle the entire transition for us as well as the addition and removal of - * any changes of CSS classes between the elements: - * - * ```css - * .banner.ng-anchor { - * /* this animation will last for 1 second since there are - * two phases to the animation (an `in` and an `out` phase) */ - * transition:0.5s linear all; - * } - * ``` - * - * We also **must** include animations for the views that are being entered and removed - * (otherwise anchoring wouldn't be possible since the new view would be inserted right away). - * - * ```css - * .view-animation.ng-enter, .view-animation.ng-leave { - * transition:0.5s linear all; - * position:fixed; - * left:0; - * top:0; - * width:100%; - * } - * .view-animation.ng-enter { - * transform:translateX(100%); - * } - * .view-animation.ng-leave, - * .view-animation.ng-enter.ng-enter-active { - * transform:translateX(0%); - * } - * .view-animation.ng-leave.ng-leave-active { - * transform:translateX(-100%); - * } - * ``` - * - * Now we can jump back to the anchor animation. When the animation happens, there are two stages that occur: - * an `out` and an `in` stage. The `out` stage happens first and that is when the element is animated away - * from its origin. Once that animation is over then the `in` stage occurs which animates the - * element to its destination. The reason why there are two animations is to give enough time - * for the enter animation on the new element to be ready. - * - * The example above sets up a transition for both the in and out phases, but we can also target the out or - * in phases directly via `ng-anchor-out` and `ng-anchor-in`. - * - * ```css - * .banner.ng-anchor-out { - * transition: 0.5s linear all; - * - * /* the scale will be applied during the out animation, - * but will be animated away when the in animation runs */ - * transform: scale(1.2); - * } - * - * .banner.ng-anchor-in { - * transition: 1s linear all; - * } - * ``` - * - * - * - * - * ### Anchoring Demo - * - - - Home -
    -
    -
    -
    -
    - - angular.module('anchoringExample', ['ngAnimate', 'ngRoute']) - .config(['$routeProvider', function($routeProvider) { - $routeProvider.when('/', { - templateUrl: 'home.html', - controller: 'HomeController as home' - }); - $routeProvider.when('/profile/:id', { - templateUrl: 'profile.html', - controller: 'ProfileController as profile' - }); - }]) - .run(['$rootScope', function($rootScope) { - $rootScope.records = [ - { id:1, title: "Miss Beulah Roob" }, - { id:2, title: "Trent Morissette" }, - { id:3, title: "Miss Ava Pouros" }, - { id:4, title: "Rod Pouros" }, - { id:5, title: "Abdul Rice" }, - { id:6, title: "Laurie Rutherford Sr." }, - { id:7, title: "Nakia McLaughlin" }, - { id:8, title: "Jordon Blanda DVM" }, - { id:9, title: "Rhoda Hand" }, - { id:10, title: "Alexandrea Sauer" } - ]; - }]) - .controller('HomeController', [function() { - //empty - }]) - .controller('ProfileController', ['$rootScope', '$routeParams', function($rootScope, $routeParams) { - var index = parseInt($routeParams.id, 10); - var record = $rootScope.records[index - 1]; - - this.title = record.title; - this.id = record.id; - }]); - - -

    Welcome to the home page

    -

    Please click on an element

    - - {{ record.title }} - -
    - -
    - {{ profile.title }} -
    -
    - - .record { - display:block; - font-size:20px; - } - .profile { - background:black; - color:white; - font-size:100px; - } - .view-container { - position:relative; - } - .view-container > .view.ng-animate { - position:absolute; - top:0; - left:0; - width:100%; - min-height:500px; - } - .view.ng-enter, .view.ng-leave, - .record.ng-anchor { - transition:0.5s linear all; - } - .view.ng-enter { - transform:translateX(100%); - } - .view.ng-enter.ng-enter-active, .view.ng-leave { - transform:translateX(0%); - } - .view.ng-leave.ng-leave-active { - transform:translateX(-100%); - } - .record.ng-anchor-out { - background:red; - } - -
    - * - * ### How is the element transported? - * - * When an anchor animation occurs, ngAnimate will clone the starting element and position it exactly where the starting - * element is located on screen via absolute positioning. The cloned element will be placed inside of the root element - * of the application (where ng-app was defined) and all of the CSS classes of the starting element will be applied. The - * element will then animate into the `out` and `in` animations and will eventually reach the coordinates and match - * the dimensions of the destination element. During the entire animation a CSS class of `.ng-animate-shim` will be applied - * to both the starting and destination elements in order to hide them from being visible (the CSS styling for the class - * is: `visibility:hidden`). Once the anchor reaches its destination then it will be removed and the destination element - * will become visible since the shim class will be removed. - * - * ### How is the morphing handled? - * - * CSS Anchoring relies on transitions and keyframes and the internal code is intelligent enough to figure out - * what CSS classes differ between the starting element and the destination element. These different CSS classes - * will be added/removed on the anchor element and a transition will be applied (the transition that is provided - * in the anchor class). Long story short, ngAnimate will figure out what classes to add and remove which will - * make the transition of the element as smooth and automatic as possible. Be sure to use simple CSS classes that - * do not rely on DOM nesting structure so that the anchor element appears the same as the starting element (since - * the cloned element is placed inside of root element which is likely close to the body element). - * - * Note that if the root element is on the `` element then the cloned node will be placed inside of body. - * - * - * ## Using $animate in your directive code - * - * So far we've explored how to feed in animations into an Angular application, but how do we trigger animations within our own directives in our application? - * By injecting the `$animate` service into our directive code, we can trigger structural and class-based hooks which can then be consumed by animations. Let's - * imagine we have a greeting box that shows and hides itself when the data changes - * - * ```html - * Hi there - * ``` - * - * ```js - * ngModule.directive('greetingBox', ['$animate', function($animate) { - * return function(scope, element, attrs) { - * attrs.$observe('active', function(value) { - * value ? $animate.addClass(element, 'on') : $animate.removeClass(element, 'on'); - * }); - * }); - * }]); - * ``` - * - * Now the `on` CSS class is added and removed on the greeting box component. Now if we add a CSS class on top of the greeting box element - * in our HTML code then we can trigger a CSS or JS animation to happen. - * - * ```css - * /* normally we would create a CSS class to reference on the element */ - * greeting-box.on { transition:0.5s linear all; background:green; color:white; } - * ``` - * - * The `$animate` service contains a variety of other methods like `enter`, `leave`, `animate` and `setClass`. To learn more about what's - * possible be sure to visit the {@link ng.$animate $animate service API page}. - * - * - * ### Preventing Collisions With Third Party Libraries - * - * Some third-party frameworks place animation duration defaults across many element or className - * selectors in order to make their code small and reuseable. This can lead to issues with ngAnimate, which - * is expecting actual animations on these elements and has to wait for their completion. - * - * You can prevent this unwanted behavior by using a prefix on all your animation classes: - * - * ```css - * /* prefixed with animate- */ - * .animate-fade-add.animate-fade-add-active { - * transition:1s linear all; - * opacity:0; - * } - * ``` - * - * You then configure `$animate` to enforce this prefix: - * - * ```js - * $animateProvider.classNameFilter(/animate-/); - * ``` - * - * This also may provide your application with a speed boost since only specific elements containing CSS class prefix - * will be evaluated for animation when any DOM changes occur in the application. - * - * ## Callbacks and Promises - * - * When `$animate` is called it returns a promise that can be used to capture when the animation has ended. Therefore if we were to trigger - * an animation (within our directive code) then we can continue performing directive and scope related activities after the animation has - * ended by chaining onto the returned promise that animation method returns. - * - * ```js - * // somewhere within the depths of the directive - * $animate.enter(element, parent).then(function() { - * //the animation has completed - * }); - * ``` - * - * (Note that earlier versions of Angular prior to v1.4 required the promise code to be wrapped using `$scope.$apply(...)`. This is not the case - * anymore.) - * - * In addition to the animation promise, we can also make use of animation-related callbacks within our directives and controller code by registering - * an event listener using the `$animate` service. Let's say for example that an animation was triggered on our view - * routing controller to hook into that: - * - * ```js - * ngModule.controller('HomePageController', ['$animate', function($animate) { - * $animate.on('enter', ngViewElement, function(element) { - * // the animation for this route has completed - * }]); - * }]) - * ``` - * - * (Note that you will need to trigger a digest within the callback to get angular to notice any scope-related changes.) - */ - -/** - * @ngdoc service - * @name $animate - * @kind object - * - * @description - * The ngAnimate `$animate` service documentation is the same for the core `$animate` service. - * - * Click here {@link ng.$animate to learn more about animations with `$animate`}. - */ -angular.module('ngAnimate', []) - .directive('ngAnimateChildren', $$AnimateChildrenDirective) - .factory('$$rAFScheduler', $$rAFSchedulerFactory) - - .factory('$$AnimateRunner', $$AnimateRunnerFactory) - .factory('$$animateAsyncRun', $$AnimateAsyncRunFactory) - - .provider('$$animateQueue', $$AnimateQueueProvider) - .provider('$$animation', $$AnimationProvider) - - .provider('$animateCss', $AnimateCssProvider) - .provider('$$animateCssDriver', $$AnimateCssDriverProvider) - - .provider('$$animateJs', $$AnimateJsProvider) - .provider('$$animateJsDriver', $$AnimateJsDriverProvider); - - -})(window, window.angular); diff --git a/awx/ui/client/lib/angular-animate/angular-animate.min.js b/awx/ui/client/lib/angular-animate/angular-animate.min.js deleted file mode 100644 index 9461603d04..0000000000 --- a/awx/ui/client/lib/angular-animate/angular-animate.min.js +++ /dev/null @@ -1,56 +0,0 @@ -/* - AngularJS v1.4.8 - (c) 2010-2015 Google, Inc. http://angularjs.org - License: MIT -*/ -(function(H,u,Sa){'use strict';function wa(a,b,c){if(!a)throw ngMinErr("areq",b||"?",c||"required");return a}function xa(a,b){if(!a&&!b)return"";if(!a)return b;if(!b)return a;X(a)&&(a=a.join(" "));X(b)&&(b=b.join(" "));return a+" "+b}function Ia(a){var b={};a&&(a.to||a.from)&&(b.to=a.to,b.from=a.from);return b}function T(a,b,c){var d="";a=X(a)?a:a&&I(a)&&a.length?a.split(/\s+/):[];q(a,function(a,s){a&&0=a&&(a=m,m=0,b.push(e),e=[]);e.push(h.fn);h.children.forEach(function(a){m++;c.push(a)});a--}e.length&&b.push(e);return b}(c)}var $=[],u=N(a);return function(g,C,D){function K(a){a=a.hasAttribute("ng-animate-ref")?[a]:a.querySelectorAll("[ng-animate-ref]");var b=[];q(a,function(a){var c=a.getAttribute("ng-animate-ref");c&&c.length&&b.push(a)});return b} -function l(a){var b=[],c={};q(a,function(a,f){var d=B(a.element),t=0<=["enter","move"].indexOf(a.event),d=a.structural?K(d):[];if(d.length){var m=t?"to":"from";q(d,function(a){var b=a.getAttribute("ng-animate-ref");c[b]=c[b]||{};c[b][m]={animationID:f,element:L(a)}})}else b.push(a)});var f={},d={};q(c,function(c,m){var w=c.from,e=c.to;if(w&&e){var h=a[w.animationID],g=a[e.animationID],x=w.animationID.toString();if(!d[x]){var A=d[x]={structural:!0,beforeStart:function(){h.beforeStart();g.beforeStart()}, -close:function(){h.close();g.close()},classes:y(h.classes,g.classes),from:h,to:g,anchors:[]};A.classes.length?b.push(A):(b.push(h),b.push(g))}d[x].anchors.push({out:w.element,"in":e.element})}else w=w?w.animationID:e.animationID,e=w.toString(),f[e]||(f[e]=!0,b.push(a[w]))});return b}function y(a,b){a=a.split(" ");b=b.split(" ");for(var c=[],f=0;fG.expectedEndTime)?h.cancel(G.timer):l.push(m)}r&&(v=h(d,v,!1),l[0]={timer:v,expectedEndTime:k},l.push(m),a.data("$$animateCss",l));a.on(x.join(" "),g);c.to&&(c.cleanupStyles&&Ea(t,n,Object.keys(c.to)),za(a,c))}}function d(){var b=a.data("$$animateCss");if(b){for(var c=1;c=N&&b>=J&&(va=!0,m())}if(!ga)if(n.parentNode){var A,x=[],l=function(a){if(va)k&&a&&(k=!1,m());else if(k=!a,E.animationDuration)if(a=na(n,k),k)y.push(a);else{var b=y,c=b.indexOf(a);0<=a&&b.splice(c,1)}},v=0", - "license": "MIT", - "bugs": { - "url": "https://github.com/angular/angular.js/issues" - }, - "homepage": "http://angularjs.org" -} diff --git a/awx/ui/client/lib/angular-breadcrumb/.bower.json b/awx/ui/client/lib/angular-breadcrumb/.bower.json deleted file mode 100644 index 3895c9f0a9..0000000000 --- a/awx/ui/client/lib/angular-breadcrumb/.bower.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "angular-breadcrumb", - "description": "AngularJS module that generates a breadcrumb from ui-router's states", - "version": "0.4.1", - "main": "release/angular-breadcrumb.js", - "ignore": [ - "sample", - "src", - "test", - ".bowerrc", - ".coveralls.yml", - ".gitignore", - ".jshintrc", - ".travis.yml", - "gruntfile.js", - "bower.json", - "karma.conf.js", - "libpeerconnection.log", - "package.json", - "README.md" - ], - "dependencies": { - "angular": ">=1.0.8", - "angular-ui-router": ">=0.2.0" - }, - "devDependencies": { - "bootstrap": "~2.3.2", - "angular-ui-bootstrap-bower": "~0.8.0", - "underscore": "~1.5.1", - "angular-mocks": ">=1.0.8", - "angular-sanitize": ">=1.0.8" - }, - "homepage": "https://github.com/ncuillery/angular-breadcrumb", - "_release": "0.4.1", - "_resolution": { - "type": "version", - "tag": "v0.4.1", - "commit": "b291e06f4010ebebbb41ea2c14e73e236aa70930" - }, - "_source": "git://github.com/ncuillery/angular-breadcrumb.git", - "_target": "~0.4.1", - "_originalSource": "angular-breadcrumb", - "_direct": true -} \ No newline at end of file diff --git a/awx/ui/client/lib/angular-breadcrumb/.editorconfig b/awx/ui/client/lib/angular-breadcrumb/.editorconfig deleted file mode 100644 index 16480f1870..0000000000 --- a/awx/ui/client/lib/angular-breadcrumb/.editorconfig +++ /dev/null @@ -1,16 +0,0 @@ -# EditorConfig helps developers define and maintain consistent -# coding styles between different editors and IDEs -# editorconfig.org - -root = true - -[*] -indent_style = space -indent_size = 4 -end_of_line = lf -charset = utf-8 -trim_trailing_whitespace = true -insert_final_newline = true - -[*.md] -trim_trailing_whitespace = false diff --git a/awx/ui/client/lib/angular-breadcrumb/.npmignore b/awx/ui/client/lib/angular-breadcrumb/.npmignore deleted file mode 100644 index 86b98729db..0000000000 --- a/awx/ui/client/lib/angular-breadcrumb/.npmignore +++ /dev/null @@ -1,15 +0,0 @@ -sample -src -test -.idea -bower_components -coverage -testDependencies -.bowerrc -.coveralls.yml -.gitignore -.jshintrc -.travis.yml -gruntfile.js -karma.conf.js -libpeerconnection.log diff --git a/awx/ui/client/lib/angular-breadcrumb/CHANGELOG.md b/awx/ui/client/lib/angular-breadcrumb/CHANGELOG.md deleted file mode 100644 index 6bec7b2db5..0000000000 --- a/awx/ui/client/lib/angular-breadcrumb/CHANGELOG.md +++ /dev/null @@ -1,151 +0,0 @@ - -### 0.4.1 (2015-08-09) - - -#### Features - -* add the scope-based ncyBreadcrumbIgnore flag ([934c5523](http://github.com/ncuillery/angular-breadcrumb/commit/934c5523208a9615d7cfa3abcb397bbe131332ac), closes [#42](http://github.com/ncuillery/angular-breadcrumb/issues/42), [#62](http://github.com/ncuillery/angular-breadcrumb/issues/42)) - - - -### 0.4.0 (2015-05-17) - - -#### Bug Fixes - -* **$breadcrumb:** Handle parents provided by StateObject references ([f4288d37](http://github.com/ncuillery/angular-breadcrumb/commit/f4288d375fd1090ffec1d67e85c6300d74d86d37), closes [#82](http://github.com/ncuillery/angular-breadcrumb/issues/82)) -* **ncyBreadcrumb:** - * Prevent memory leak when label is a binding ([264e10f6](http://github.com/ncuillery/angular-breadcrumb/commit/264e10f680e1bbb8d1e00cf500de39cac4222cfd), closes [#88](http://github.com/ncuillery/angular-breadcrumb/issues/88)) - * Removed trailing spaces from breadcrumb items([bc276ed5](http://github.com/ncuillery/angular-breadcrumb/commit/bc276ed5351a586d4a6dc83ada0687e6ca485344), closes [#77](http://github.com/ncuillery/angular-breadcrumb/issues/77)) - -#### Features - -* Add force to ncyBreadcrumb options ([31125a38](http://github.com/ncuillery/angular-breadcrumb/commit/31125a386d706dd76df807b3b02e1fccea38fb59), closes [#77](http://github.com/ncuillery/angular-breadcrumb/issues/78)) -* **ncyBreadcrumbText:** Add ncyBreadcrumbText directive ([82b2b443](http://github.com/ncuillery/angular-breadcrumb/commit/82b2b443fab220cd9ac7d3a8c90c1edc4291e54a), closes [#71](http://github.com/ncuillery/angular-breadcrumb/issues/71), [#83](http://github.com/ncuillery/angular-breadcrumb/issues/83)) - - - -### 0.3.3 (2014-12-16) - - -#### Bug Fixes - -* **ncyBreadcrumb:** define `$$templates` with var instead of attaching it to `window` ([c35c9d25](http://github.com/ncuillery/angular-breadcrumb/commit/c35c9d255b5e2585d225a961d1efdb51d18f6a55), closes [#55](http://github.com/ncuillery/angular-breadcrumb/issues/55)) - - - -### 0.3.2 (2014-11-15) - -* **npm:** nothing, it's only a blank release due to a network problem during the last `npm publish` (f...ing npm doesn't allow a republish with the same version number [npm-registry-couchapp#148](https://github.com/npm/npm-registry-couchapp/issues/148)). - - -### 0.3.1 (2014-11-15) - - -#### Bug Fixes - -* **npm:** update package.json after (unclean) npm publish ([ab8161c2](http://github.com/ncuillery/angular-breadcrumb/commit/ab8161c25f98613f725b5e5ff8fe147acd60b365), closes [#52](http://github.com/ncuillery/angular-breadcrumb/issues/52)) -* **sample:** Send correct url params for the room link in booking view ([876de49a](http://github.com/ncuillery/angular-breadcrumb/commit/876de49a9c5d6e2d75714a606238e9041ed49baf)) - - - -## 0.3.0 (2014-10-29) - - -#### Bug Fixes - -* organize state-level options in `ncyBreadcrumb` key instead of `data` ([1ea436d3](http://github.com/ncuillery/angular-breadcrumb/commit/1ea436d3f6d5470b7ae3e71e71259dbd2422bc00), closes [#30](http://github.com/ncuillery/angular-breadcrumb/issues/30)) -* curly braces appearing on title of sample app ([855e76cb](http://github.com/ncuillery/angular-breadcrumb/commit/855e76cb33fda607fa3caa230564b77b48262c40)) - - -#### Features - -* Add a global option to include abstract states ([6f0461ea](http://github.com/ncuillery/angular-breadcrumb/commit/6f0461ea7db36d8e10c29ed10de1f1c08d215a19), closes [#35](http://github.com/ncuillery/angular-breadcrumb/issues/35), [#28](http://github.com/ncuillery/angular-breadcrumb/issues/28)) -* **$breadcrumb:** - * Support url params when using `ncyBreadcrumb.parent` property ([55730045](http://github.com/ncuillery/angular-breadcrumb/commit/55730045dcf3b4fb1048c67f1e18953505563ed4), closes [#46](http://github.com/ncuillery/angular-breadcrumb/issues/46)) - * add the customization of the parent state with a function ([ada09015](http://github.com/ncuillery/angular-breadcrumb/commit/ada09015c49f05a94349dabf078f1ed621811aaa), closes [#32](http://github.com/ncuillery/angular-breadcrumb/issues/32)) -* **ncyBreadcrumbLast:** Add a new directive rendering the last step ([1eef24fb](http://github.com/ncuillery/angular-breadcrumb/commit/1eef24fbe862a1e3308181c38f50755843cf4426), closes [#37](http://github.com/ncuillery/angular-breadcrumb/issues/37)) - - -#### Breaking Changes - -* state-level options has been moved under the custom key -`ncyBreadcrumb` in state's configuration. - -To migrate the code follow the example below: -``` -// Before -$stateProvider.state('A', { - url: '/a', - data: { - ncyBreadcrumbLabel: 'State A' - } -}); -``` - -``` -// After -$stateProvider.state('A', { - url: '/a', - ncyBreadcrumb: { - label: 'State A' - } -}); -``` -See [API reference](https://github.com/ncuillery/angular-breadcrumb/wiki/API-Reference) for more informations. - ([1ea436d3](http://github.com/ncuillery/angular-breadcrumb/commit/1ea436d3f6d5470b7ae3e71e71259dbd2422bc00)) - - - -### 0.2.3 (2014-07-26) - - -#### Bug Fixes - -* **$breadcrumb:** use `$stateParams` in case of unhierarchical states ([1c3c05e0](http://github.com/ncuillery/angular-breadcrumb/commit/1c3c05e0acac191fe2e76db2ef18da339caefaaa), closes [#29](http://github.com/ncuillery/angular-breadcrumb/issues/29)) - - - -### 0.2.2 (2014-06-23) - - -#### Bug Fixes - -* catch the `$viewContentLoaded` earlier ([bb47dd54](http://github.com/ncuillery/angular-breadcrumb/commit/bb47dd54deb5efc579ccb9b1575e686803dee1c5), closes [#14](http://github.com/ncuillery/angular-breadcrumb/issues/14)) -* **sample:** - * make the CRU(D) about rooms working ([3ca89ec7](http://github.com/ncuillery/angular-breadcrumb/commit/3ca89ec771fd20dc4ab2d733612bdcfb96ced703)) - * prevent direct URL access to a day disabled in the datepicker ([95236916](http://github.com/ncuillery/angular-breadcrumb/commit/95236916e00b19464a3dfe3584ef1b18da9ffb25), closes [#17](http://github.com/ncuillery/angular-breadcrumb/issues/17)) - * use the same variable in the datepicker and from url params for state `booking.day` ([646f7060](http://github.com/ncuillery/angular-breadcrumb/commit/646f70607e494f0e5e3c2483ed69f689684b2742), closes [#16](http://github.com/ncuillery/angular-breadcrumb/issues/16)) - - -#### Features - -* **ncyBreadcrumb:** watch every expression founded in labels ([1363515e](http://github.com/ncuillery/angular-breadcrumb/commit/1363515e20977ce2f39a1f5e5e1d701f0d7af296), closes [#20](http://github.com/ncuillery/angular-breadcrumb/issues/20)) - - - -### 0.2.1 (2014-05-16) - - -#### Bug Fixes - -* **$breadcrumb:** check if a state has a parent when looking for an inheritated property ([77e668b5](http://github.com/ncuillery/angular-breadcrumb/commit/77e668b5eb759570a64c2a885e81580953af3201), closes [#11](http://github.com/ncuillery/angular-breadcrumb/issues/11)) - - - -### 0.2.0 (2014-05-08) - - -#### Bug Fixes - -* **$breadcrumb:** remove abstract states from breadcrumb ([8a06c5ab](http://github.com/ncuillery/angular-breadcrumb/commit/8a06c5abce749027d48f7309d1aabea1e447dfd5), closes [#8](http://github.com/ncuillery/angular-breadcrumb/issues/8)) -* **ncyBreadcrumb:** display the correct breadcrumb in case of direct access ([e1f455ba](http://github.com/ncuillery/angular-breadcrumb/commit/e1f455ba4def97d3fc76b53772867b5f9daf4232), closes [#10](http://github.com/ncuillery/angular-breadcrumb/issues/10)) - - -#### Features - -* **$breadcrumb:** - * add a configuration property for skipping a state in the breadcrumb ([dd255d90](http://github.com/ncuillery/angular-breadcrumb/commit/dd255d906c4231f44b48f066d4db197a9c6b9e27), closes [#9](http://github.com/ncuillery/angular-breadcrumb/issues/9)) - * allow chain of states customization ([028e493a](http://github.com/ncuillery/angular-breadcrumb/commit/028e493a1ebcae5ae60b8a9d42b949262000d7df), closes [#7](http://github.com/ncuillery/angular-breadcrumb/issues/7)) -* **ncyBreadcrumb:** add 'Element' declaration style '' ([b51441ea](http://github.com/ncuillery/angular-breadcrumb/commit/b51441eafb1659b782fea1f8668c7f455e1d6b4d)) - diff --git a/awx/ui/client/lib/angular-breadcrumb/CONTRIBUTING.md b/awx/ui/client/lib/angular-breadcrumb/CONTRIBUTING.md deleted file mode 100644 index 720c4f142a..0000000000 --- a/awx/ui/client/lib/angular-breadcrumb/CONTRIBUTING.md +++ /dev/null @@ -1,52 +0,0 @@ -# Contributing to angular-breadcrumb - -I am very glad to see this project living with PR from contributors who trust in it. Here is some guidelines to keep the contributions useful and efficient. - -## Development hints - -### Installation -- Checkout the repository -- Run `npm install` -- Run `bower install` - -### Test running -This module uses the classic AngularJS stack with: - -- Karma (test runner) -- Jasmine (assertion framework) -- angular-mocks (AngularJS module for testing) - -Run the test with the grunt task `grunt test`. It runs the tests with different versions of AngularJS. - -### Test developing -Tests are build around modules with a specific `$stateProvider` configuration: - -- [Basic configuration](https://github.com/ncuillery/angular-breadcrumb/blob/master/test/mock/test-modules.js#L6): Basic definitions (no template, no controller) -- [Interpolation configuration](https://github.com/ncuillery/angular-breadcrumb/blob/master/test/mock/test-modules.js#L21): States with bindings in `ncyBreadcrumbLabel` -- [HTML configuration](https://github.com/ncuillery/angular-breadcrumb/blob/master/test/mock/test-modules.js#L36): States with HTML in `ncyBreadcrumbLabel` -- [Sample configuration](https://github.com/ncuillery/angular-breadcrumb/blob/master/test/mock/test-modules.js#L41): Bridge towards the sample app configuration for using in tests -- [UI-router's configuration](https://github.com/ncuillery/angular-breadcrumb/blob/master/test/mock/test-ui-router-sample.js#L9): Clone of the UI-router sample app (complemented with breadcrumb configuration) - -Theses modules are loaded by Karma and they are available in test specifications. - -Specifications are generally related to the directive `ncyBreadcrumb` or the service `$breadcrumb`. - -### Sample -If you are not familiar with JS testing. You can run the [sample](http://ncuillery.github.io/angular-breadcrumb/#/sample) locally for testing purposes by using `grunt sample`. Sources are live-reloaded after each changes. - -## Submitting a Pull Request -- Fork the [repository](https://github.com/ncuillery/angular-breadcrumb/) -- Make your changes in a new git branch following the coding rules below. -- Run the grunt default task (by typing `grunt` or `grunt default`): it will run the tests and build the module in `dist` directory) -- Commit the changes (including the `dist` directory) by using the commit conventions explained below. -- Push and make the PR - - -## Coding rules -- When making changes on the source file, please check that your changes are covered by the tests. If not, create a new test case. - - -## Commit conventions -angular-breadcrumb uses the same strict conventions as AngularJS and UI-router. These conventions are explained [here](https://github.com/angular/angular.js/blob/master/CONTRIBUTING.md#-git-commit-guidelines). - -It is very important to fit these conventions especially for types `fix` and `feature` which are used by the CHANGELOG.md generation (it uses the [grunt-conventional-changelog](https://github.com/btford/grunt-conventional-changelog)). diff --git a/awx/ui/client/lib/angular-breadcrumb/Gruntfile.js b/awx/ui/client/lib/angular-breadcrumb/Gruntfile.js deleted file mode 100644 index 744af4d211..0000000000 --- a/awx/ui/client/lib/angular-breadcrumb/Gruntfile.js +++ /dev/null @@ -1,258 +0,0 @@ -'use strict'; - -var LIVERELOAD_PORT = 35729; -var lrSnippet = require('connect-livereload')({ port: LIVERELOAD_PORT }); -var mountFolder = function (connect, dir) { - return connect.static(require('path').resolve(dir)); -}; - -module.exports = function (grunt) { - - // Project configuration. - grunt.initConfig({ - // Metadata. - pkg: grunt.file.readJSON('package.json'), - headerDev: '/*! <%= pkg.name %> - v<%= pkg.version %>-dev-<%= grunt.template.today("yyyy-mm-dd") %>\n', - headerRelease: '/*! <%= pkg.name %> - v<%= pkg.version %>\n', - banner: '<%= pkg.homepage ? "* " + pkg.homepage + "\\n" : "" %>' + - '* Copyright (c) <%= grunt.template.today("yyyy") %> <%= pkg.author.name %>;' + - ' Licensed <%= _.pluck(pkg.licenses, "type").join(", ") %> */\n', - // Task configuration. - concat: { - dev: { - options: { - banner: '<%= headerDev %><%= banner %>\n(function (window, angular, undefined) {\n', - footer: '})(window, window.angular);\n', - stripBanners: true - }, - src: ['src/<%= pkg.name %>.js'], - dest: 'dist/<%= pkg.name %>.js' - }, - release: { - options: { - banner: '<%= headerRelease %><%= banner %>\n(function (window, angular, undefined) {\n', - footer: '})(window, window.angular);\n', - stripBanners: true - }, - src: ['src/<%= pkg.name %>.js'], - dest: 'release/<%= pkg.name %>.js' - } - }, - uglify: { - dev: { - options: { - banner: '<%= headerDev %><%= banner %>' - }, - src: '<%= concat.dev.dest %>', - dest: 'dist/<%= pkg.name %>.min.js' - }, - release: { - options: { - banner: '<%= headerRelease %><%= banner %>' - }, - src: '<%= concat.release.dest %>', - dest: 'release/<%= pkg.name %>.min.js' - } - }, - karma: { - unit: { - configFile: 'karma.conf.js' - } - }, - jshint: { - options: { - jshintrc: '.jshintrc' - }, - gruntfile: { - src: 'Gruntfile.js' - }, - sources: { - options: { - jshintrc: 'src/.jshintrc' - }, - src: ['src/**/*.js'] - }, - test: { - src: ['test/**/*.js'] - } - }, - watch: { - gruntfile: { - files: '<%= jshint.gruntfile.src %>', - tasks: ['jshint:gruntfile'] - }, - sources: { - files: '<%= jshint.sources.src %>', - tasks: ['jshint:sources', 'karma'] - }, - test: { - files: '<%= jshint.test.src %>', - tasks: ['jshint:test', 'karma'] - }, - sample: { - options: { - livereload: LIVERELOAD_PORT - }, - tasks: 'copy:breadcrumb', - files: [ - 'sample/*.{css,js,html}', - 'sample/controllers/*.{css,js,html}', - 'sample/views/*.{css,js,html}', - 'src/*.js' - ] - } - }, - copy: { - breadcrumb: { - files: [ - { - flatten: true, - expand: true, - src: [ - 'src/angular-breadcrumb.js' - ], - dest: 'sample/asset/' - } - ] - }, - asset: { - files: [ - { - flatten: true, - expand: true, - src: [ - 'dist/angular-breadcrumb.js', - 'bower_components/angular/angular.js', - 'bower_components/angular-ui-router/release/angular-ui-router.js', - 'bower_components/angular-ui-bootstrap-bower/ui-bootstrap-tpls.js', - 'bower_components/bootstrap/docs/assets/css/bootstrap.css', - 'bower_components/underscore/underscore.js' - ], - dest: 'sample/asset/' - } - ] - }, - img: { - files: [ - { - flatten: true, - expand: true, - src: [ - 'bower_components/bootstrap.css/img/glyphicons-halflings.png' - ], - dest: 'sample/img/' - } - ] - } - }, - connect: { - options: { - port: 9000, - hostname: 'localhost' - }, - livereload: { - options: { - middleware: function (connect) { - return [ - lrSnippet, - mountFolder(connect, 'sample') - ]; - } - } - } - }, - open: { - server: { - url: 'http://localhost:<%= connect.options.port %>/index.html' - } - }, - bump: { - options: { - files: ['package.json', 'bower.json'], - updateConfigs: ['pkg'] - } - }, - clean: { - release: ["sample/*.zip"], - test: ["testDependencies/*"] - }, - compress: { - release: { - options: { - archive: 'sample/<%= pkg.name %>-<%= pkg.version %>.zip' - }, - files: [ - {expand: true, cwd: 'release/', src: ['*.js']} - ] - } - }, - replace: { - release: { - src: ['sample/views/home.html'], - overwrite: true, - replacements: [{ - from: /angular-breadcrumb-[0-9]+\.[0-9]+\.[0-9]+\.zip/g, - to: "angular-breadcrumb-<%= pkg.version %>.zip" - }, - { - from: /\([0-9]+\.[0-9]+\.[0-9]+\)/g, - to: "(<%= pkg.version %>)" - }] - } - }, - shell: { - testMinimal: { - command: 'bower install angular#=1.0.8 angular-mocks#=1.0.8 angular-sanitize#=1.0.8 angular-ui-router#=0.2.0 --config.directory=. --config.cwd=testDependencies' - }, - test1dot2: { - command: 'bower install angular#=1.2.18 angular-mocks#=1.2.18 angular-sanitize#=1.2.18 angular-ui-router#=0.2.15 --config.directory=. --config.cwd=testDependencies' - }, - testLatest: { - command: 'bower install angular angular-mocks angular-sanitize angular-ui-router --config.directory=. --config.cwd=testDependencies' - } - } - - }); - - // These plugins provide necessary tasks. - grunt.loadNpmTasks('grunt-bump'); - grunt.loadNpmTasks('grunt-contrib-clean'); - grunt.loadNpmTasks('grunt-contrib-compress'); - grunt.loadNpmTasks('grunt-contrib-concat'); - grunt.loadNpmTasks('grunt-contrib-uglify'); - grunt.loadNpmTasks('grunt-contrib-jshint'); - grunt.loadNpmTasks('grunt-contrib-watch'); - grunt.loadNpmTasks('grunt-contrib-copy'); - grunt.loadNpmTasks('grunt-contrib-connect'); - grunt.loadNpmTasks('grunt-conventional-changelog'); - grunt.loadNpmTasks('grunt-karma'); - grunt.loadNpmTasks('grunt-open'); - grunt.loadNpmTasks('grunt-shell'); - grunt.loadNpmTasks('grunt-text-replace'); - - grunt.registerTask('test', ['jshint', 'testMin', 'test1dot2', 'testLatest']); - grunt.registerTask('testMin', ['clean:test', 'shell:testMinimal', 'karma']); - grunt.registerTask('test1dot2', ['clean:test', 'shell:test1dot2', 'karma']); - grunt.registerTask('testLatest', ['clean:test', 'shell:testLatest', 'karma']); - - grunt.registerTask('default', ['test', 'concat:dev', 'uglify:dev']); - - grunt.registerTask('sample', ['concat:dev', 'copy:asset', 'copy:img', 'connect:livereload', 'open', 'watch']); - - grunt.registerTask('release-prepare', 'Update all files for a release', function(target) { - if(!target) { - target = 'patch'; - } - grunt.task.run( - 'bump-only:' + target, // Version update - 'test', // Tests - 'concat:release', // Concat with release banner - 'uglify:release', // Minify with release banner - 'changelog', // Changelog update - 'clean:release', // Delete old version download file - 'compress:release', // New version download file - 'replace:release' // Update version in download button (link & label) - ); - }); - -}; diff --git a/awx/ui/client/lib/angular-breadcrumb/LICENSE b/awx/ui/client/lib/angular-breadcrumb/LICENSE deleted file mode 100644 index f0774fcf13..0000000000 --- a/awx/ui/client/lib/angular-breadcrumb/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -The MIT License - -Copyright (c) 2013 Nicolas Cuillery - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. diff --git a/awx/ui/client/lib/angular-breadcrumb/bower.json b/awx/ui/client/lib/angular-breadcrumb/bower.json deleted file mode 100644 index 3a0a2f944c..0000000000 --- a/awx/ui/client/lib/angular-breadcrumb/bower.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "name": "angular-breadcrumb", - "description": "AngularJS module that generates a breadcrumb from ui-router's states", - "version": "0.4.1", - "main": "release/angular-breadcrumb.js", - "ignore": [ - "sample", - "src", - "test", - ".bowerrc", - ".coveralls.yml", - ".gitignore", - ".jshintrc", - ".travis.yml", - "gruntfile.js", - "bower.json", - "karma.conf.js", - "libpeerconnection.log", - "package.json", - "README.md" - ], - "dependencies": { - "angular": ">=1.0.8", - "angular-ui-router": ">=0.2.0" - }, - "devDependencies": { - "bootstrap": "~2.3.2", - "angular-ui-bootstrap-bower": "~0.8.0", - "underscore": "~1.5.1", - "angular-mocks": ">=1.0.8", - "angular-sanitize": ">=1.0.8" - } -} diff --git a/awx/ui/client/lib/angular-breadcrumb/dist/angular-breadcrumb.js b/awx/ui/client/lib/angular-breadcrumb/dist/angular-breadcrumb.js deleted file mode 100644 index e20ca158ec..0000000000 --- a/awx/ui/client/lib/angular-breadcrumb/dist/angular-breadcrumb.js +++ /dev/null @@ -1,369 +0,0 @@ -/*! angular-breadcrumb - v0.4.0-dev-2015-08-07 -* http://ncuillery.github.io/angular-breadcrumb -* Copyright (c) 2015 Nicolas Cuillery; Licensed MIT */ - -(function (window, angular, undefined) { -'use strict'; - -function isAOlderThanB(scopeA, scopeB) { - if(angular.equals(scopeA.length, scopeB.length)) { - return scopeA > scopeB; - } else { - return scopeA.length > scopeB.length; - } -} - -function parseStateRef(ref) { - var parsed = ref.replace(/\n/g, " ").match(/^([^(]+?)\s*(\((.*)\))?$/); - if (!parsed || parsed.length !== 4) { throw new Error("Invalid state ref '" + ref + "'"); } - return { state: parsed[1], paramExpr: parsed[3] || null }; -} - -function $Breadcrumb() { - - var $$options = { - prefixStateName: null, - template: 'bootstrap3', - templateUrl: null, - includeAbstract : false - }; - - this.setOptions = function(options) { - angular.extend($$options, options); - }; - - this.$get = ['$state', '$stateParams', '$rootScope', function($state, $stateParams, $rootScope) { - - var $lastViewScope = $rootScope; - - // Early catch of $viewContentLoaded event - $rootScope.$on('$viewContentLoaded', function (event) { - // With nested views, the event occur several times, in "wrong" order - if(!event.targetScope.ncyBreadcrumbIgnore && - isAOlderThanB(event.targetScope.$id, $lastViewScope.$id)) { - $lastViewScope = event.targetScope; - } - }); - - // Get the parent state - var $$parentState = function(state) { - // Check if state has explicit parent OR we try guess parent from its name - var parent = state.parent || (/^(.+)\.[^.]+$/.exec(state.name) || [])[1]; - var isObjectParent = typeof parent === "object"; - // if parent is a object reference, then extract the name - return isObjectParent ? parent.name : parent; - }; - - // Add the state in the chain if not already in and if not abstract - var $$addStateInChain = function(chain, stateRef) { - var conf, - parentParams, - ref = parseStateRef(stateRef), - force = false, - skip = false; - - for(var i=0, l=chain.length; i' + - '
  • ' + - '{{step.ncyBreadcrumbLabel}}' + - '{{step.ncyBreadcrumbLabel}}' + - '/' + - '
  • ' + - '', - bootstrap3: '' - }; - - return { - restrict: 'AE', - replace: true, - scope: {}, - template: $breadcrumb.getTemplate($$templates), - templateUrl: $breadcrumb.getTemplateUrl(), - link: { - post: function postLink(scope) { - var labelWatchers = []; - - var renderBreadcrumb = function() { - deregisterWatchers(labelWatchers); - labelWatchers = []; - - var viewScope = $breadcrumb.$getLastViewScope(); - scope.steps = $breadcrumb.getStatesChain(); - angular.forEach(scope.steps, function (step) { - if (step.ncyBreadcrumb && step.ncyBreadcrumb.label) { - var parseLabel = $interpolate(step.ncyBreadcrumb.label); - step.ncyBreadcrumbLabel = parseLabel(viewScope); - // Watcher for further viewScope updates - registerWatchers(labelWatchers, parseLabel, viewScope, step); - } else { - step.ncyBreadcrumbLabel = step.name; - } - }); - }; - - $rootScope.$on('$viewContentLoaded', function (event) { - if(!event.targetScope.ncyBreadcrumbIgnore) { - renderBreadcrumb(); - } - }); - - // View(s) may be already loaded while the directive's linking - renderBreadcrumb(); - } - } - }; -} -BreadcrumbDirective.$inject = ['$interpolate', '$breadcrumb', '$rootScope']; - -function BreadcrumbLastDirective($interpolate, $breadcrumb, $rootScope) { - - return { - restrict: 'A', - scope: {}, - template: '{{ncyBreadcrumbLabel}}', - compile: function(cElement, cAttrs) { - - // Override the default template if ncyBreadcrumbLast has a value - var template = cElement.attr(cAttrs.$attr.ncyBreadcrumbLast); - if(template) { - cElement.html(template); - } - - return { - post: function postLink(scope) { - var labelWatchers = []; - - var renderLabel = function() { - deregisterWatchers(labelWatchers); - labelWatchers = []; - - var viewScope = $breadcrumb.$getLastViewScope(); - var lastStep = $breadcrumb.getLastStep(); - if(lastStep) { - scope.ncyBreadcrumbLink = lastStep.ncyBreadcrumbLink; - if (lastStep.ncyBreadcrumb && lastStep.ncyBreadcrumb.label) { - var parseLabel = $interpolate(lastStep.ncyBreadcrumb.label); - scope.ncyBreadcrumbLabel = parseLabel(viewScope); - // Watcher for further viewScope updates - // Tricky last arg: the last step is the entire scope of the directive ! - registerWatchers(labelWatchers, parseLabel, viewScope, scope); - } else { - scope.ncyBreadcrumbLabel = lastStep.name; - } - } - }; - - $rootScope.$on('$viewContentLoaded', function (event) { - if(!event.targetScope.ncyBreadcrumbIgnore) { - renderLabel(); - } - }); - - // View(s) may be already loaded while the directive's linking - renderLabel(); - } - }; - - } - }; -} -BreadcrumbLastDirective.$inject = ['$interpolate', '$breadcrumb', '$rootScope']; - -function BreadcrumbTextDirective($interpolate, $breadcrumb, $rootScope) { - - return { - restrict: 'A', - scope: {}, - template: '{{ncyBreadcrumbChain}}', - - compile: function(cElement, cAttrs) { - // Override the default template if ncyBreadcrumbText has a value - var template = cElement.attr(cAttrs.$attr.ncyBreadcrumbText); - if(template) { - cElement.html(template); - } - - var separator = cElement.attr(cAttrs.$attr.ncyBreadcrumbTextSeparator) || ' / '; - - return { - post: function postLink(scope) { - var labelWatchers = []; - - var registerWatchersText = function(labelWatcherArray, interpolationFunction, viewScope) { - angular.forEach(getExpression(interpolationFunction), function(expression) { - var watcher = viewScope.$watch(expression, function(newValue, oldValue) { - if (newValue !== oldValue) { - renderLabel(); - } - }); - labelWatcherArray.push(watcher); - }); - }; - - var renderLabel = function() { - deregisterWatchers(labelWatchers); - labelWatchers = []; - - var viewScope = $breadcrumb.$getLastViewScope(); - var steps = $breadcrumb.getStatesChain(); - var combinedLabels = []; - angular.forEach(steps, function (step) { - if (step.ncyBreadcrumb && step.ncyBreadcrumb.label) { - var parseLabel = $interpolate(step.ncyBreadcrumb.label); - combinedLabels.push(parseLabel(viewScope)); - // Watcher for further viewScope updates - registerWatchersText(labelWatchers, parseLabel, viewScope); - } else { - combinedLabels.push(step.name); - } - }); - - scope.ncyBreadcrumbChain = combinedLabels.join(separator); - }; - - $rootScope.$on('$viewContentLoaded', function (event) { - if(!event.targetScope.ncyBreadcrumbIgnore) { - renderLabel(); - } - }); - - // View(s) may be already loaded while the directive's linking - renderLabel(); - } - }; - - } - }; -} -BreadcrumbTextDirective.$inject = ['$interpolate', '$breadcrumb', '$rootScope']; - -angular.module('ncy-angular-breadcrumb', ['ui.router.state']) - .provider('$breadcrumb', $Breadcrumb) - .directive('ncyBreadcrumb', BreadcrumbDirective) - .directive('ncyBreadcrumbLast', BreadcrumbLastDirective) - .directive('ncyBreadcrumbText', BreadcrumbTextDirective); -})(window, window.angular); diff --git a/awx/ui/client/lib/angular-breadcrumb/dist/angular-breadcrumb.min.js b/awx/ui/client/lib/angular-breadcrumb/dist/angular-breadcrumb.min.js deleted file mode 100644 index 1da045d879..0000000000 --- a/awx/ui/client/lib/angular-breadcrumb/dist/angular-breadcrumb.min.js +++ /dev/null @@ -1,4 +0,0 @@ -/*! angular-breadcrumb - v0.4.0-dev-2015-08-07 -* http://ncuillery.github.io/angular-breadcrumb -* Copyright (c) 2015 Nicolas Cuillery; Licensed MIT */ -!function(a,b,c){"use strict";function d(a,c){return b.equals(a.length,c.length)?a>c:a.length>c.length}function e(a){var b=a.replace(/\n/g," ").match(/^([^(]+?)\s*(\((.*)\))?$/);if(!b||4!==b.length)throw new Error("Invalid state ref '"+a+"'");return{state:b[1],paramExpr:b[3]||null}}function f(){var a={prefixStateName:null,template:"bootstrap3",templateUrl:null,includeAbstract:!1};this.setOptions=function(c){b.extend(a,c)},this.$get=["$state","$stateParams","$rootScope",function(b,f,g){var h=g;g.$on("$viewContentLoaded",function(a){!a.targetScope.ncyBreadcrumbIgnore&&d(a.targetScope.$id,h.$id)&&(h=a.targetScope)});var i=function(a){var b=a.parent||(/^(.+)\.[^.]+$/.exec(a.name)||[])[1],c="object"==typeof b;return c?b.name:b},j=function(c,d){for(var g,i,j=e(d),k=!1,l=!1,m=0,n=c.length;n>m;m+=1)if(c[m].name===j.state)return;g=b.get(j.state),g.ncyBreadcrumb&&(g.ncyBreadcrumb.force&&(k=!0),g.ncyBreadcrumb.skip&&(l=!0)),g["abstract"]&&!a.includeAbstract&&!k||l||(j.paramExpr&&(i=h.$eval(j.paramExpr)),g.ncyBreadcrumbLink=b.href(j.state,i||f||{}),c.unshift(g))},k=function(a){var c=e(a),d=b.get(c.state);if(d.ncyBreadcrumb&&d.ncyBreadcrumb.parent){var f="function"==typeof d.ncyBreadcrumb.parent,g=f?d.ncyBreadcrumb.parent(h):d.ncyBreadcrumb.parent;if(g)return g}return i(d)};return{getTemplate:function(b){return a.templateUrl?null:b[a.template]?b[a.template]:a.template},getTemplateUrl:function(){return a.templateUrl},getStatesChain:function(c){for(var d=[],e=b.$current.self.name;e;e=k(e))if(j(d,e),c&&d.length)return d;return a.prefixStateName&&j(d,a.prefixStateName),d},getLastStep:function(){var a=this.getStatesChain(!0);return a.length?a[0]:c},$getLastViewScope:function(){return h}}}]}function g(a,c,d){var e={bootstrap2:'',bootstrap3:''};return{restrict:"AE",replace:!0,scope:{},template:c.getTemplate(e),templateUrl:c.getTemplateUrl(),link:{post:function(e){var f=[],g=function(){l(f),f=[];var d=c.$getLastViewScope();e.steps=c.getStatesChain(),b.forEach(e.steps,function(b){if(b.ncyBreadcrumb&&b.ncyBreadcrumb.label){var c=a(b.ncyBreadcrumb.label);b.ncyBreadcrumbLabel=c(d),k(f,c,d,b)}else b.ncyBreadcrumbLabel=b.name})};d.$on("$viewContentLoaded",function(a){a.targetScope.ncyBreadcrumbIgnore||g()}),g()}}}}function h(a,b,c){return{restrict:"A",scope:{},template:"{{ncyBreadcrumbLabel}}",compile:function(d,e){var f=d.attr(e.$attr.ncyBreadcrumbLast);return f&&d.html(f),{post:function(d){var e=[],f=function(){l(e),e=[];var c=b.$getLastViewScope(),f=b.getLastStep();if(f)if(d.ncyBreadcrumbLink=f.ncyBreadcrumbLink,f.ncyBreadcrumb&&f.ncyBreadcrumb.label){var g=a(f.ncyBreadcrumb.label);d.ncyBreadcrumbLabel=g(c),k(e,g,c,d)}else d.ncyBreadcrumbLabel=f.name};c.$on("$viewContentLoaded",function(a){a.targetScope.ncyBreadcrumbIgnore||f()}),f()}}}}}function i(a,c,d){return{restrict:"A",scope:{},template:"{{ncyBreadcrumbChain}}",compile:function(e,f){var g=e.attr(f.$attr.ncyBreadcrumbText);g&&e.html(g);var h=e.attr(f.$attr.ncyBreadcrumbTextSeparator)||" / ";return{post:function(e){var f=[],g=function(a,c,d){b.forEach(j(c),function(b){var c=d.$watch(b,function(a,b){a!==b&&i()});a.push(c)})},i=function(){l(f),f=[];var d=c.$getLastViewScope(),i=c.getStatesChain(),j=[];b.forEach(i,function(b){if(b.ncyBreadcrumb&&b.ncyBreadcrumb.label){var c=a(b.ncyBreadcrumb.label);j.push(c(d)),g(f,c,d)}else j.push(b.name)}),e.ncyBreadcrumbChain=j.join(h)};d.$on("$viewContentLoaded",function(a){a.targetScope.ncyBreadcrumbIgnore||i()}),i()}}}}}var j=function(a){if(a.expressions)return a.expressions;var c=[];return b.forEach(a.parts,function(a){b.isFunction(a)&&c.push(a.exp)}),c},k=function(a,c,d,e){b.forEach(j(c),function(b){var f=d.$watch(b,function(){e.ncyBreadcrumbLabel=c(d)});a.push(f)})},l=function(a){b.forEach(a,function(a){a()})};g.$inject=["$interpolate","$breadcrumb","$rootScope"],h.$inject=["$interpolate","$breadcrumb","$rootScope"],i.$inject=["$interpolate","$breadcrumb","$rootScope"],b.module("ncy-angular-breadcrumb",["ui.router.state"]).provider("$breadcrumb",f).directive("ncyBreadcrumb",g).directive("ncyBreadcrumbLast",h).directive("ncyBreadcrumbText",i)}(window,window.angular); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-breadcrumb/release/angular-breadcrumb.js b/awx/ui/client/lib/angular-breadcrumb/release/angular-breadcrumb.js deleted file mode 100644 index ba6e5dcd39..0000000000 --- a/awx/ui/client/lib/angular-breadcrumb/release/angular-breadcrumb.js +++ /dev/null @@ -1,369 +0,0 @@ -/*! angular-breadcrumb - v0.4.1 -* http://ncuillery.github.io/angular-breadcrumb -* Copyright (c) 2015 Nicolas Cuillery; Licensed MIT */ - -(function (window, angular, undefined) { -'use strict'; - -function isAOlderThanB(scopeA, scopeB) { - if(angular.equals(scopeA.length, scopeB.length)) { - return scopeA > scopeB; - } else { - return scopeA.length > scopeB.length; - } -} - -function parseStateRef(ref) { - var parsed = ref.replace(/\n/g, " ").match(/^([^(]+?)\s*(\((.*)\))?$/); - if (!parsed || parsed.length !== 4) { throw new Error("Invalid state ref '" + ref + "'"); } - return { state: parsed[1], paramExpr: parsed[3] || null }; -} - -function $Breadcrumb() { - - var $$options = { - prefixStateName: null, - template: 'bootstrap3', - templateUrl: null, - includeAbstract : false - }; - - this.setOptions = function(options) { - angular.extend($$options, options); - }; - - this.$get = ['$state', '$stateParams', '$rootScope', function($state, $stateParams, $rootScope) { - - var $lastViewScope = $rootScope; - - // Early catch of $viewContentLoaded event - $rootScope.$on('$viewContentLoaded', function (event) { - // With nested views, the event occur several times, in "wrong" order - if(!event.targetScope.ncyBreadcrumbIgnore && - isAOlderThanB(event.targetScope.$id, $lastViewScope.$id)) { - $lastViewScope = event.targetScope; - } - }); - - // Get the parent state - var $$parentState = function(state) { - // Check if state has explicit parent OR we try guess parent from its name - var parent = state.parent || (/^(.+)\.[^.]+$/.exec(state.name) || [])[1]; - var isObjectParent = typeof parent === "object"; - // if parent is a object reference, then extract the name - return isObjectParent ? parent.name : parent; - }; - - // Add the state in the chain if not already in and if not abstract - var $$addStateInChain = function(chain, stateRef) { - var conf, - parentParams, - ref = parseStateRef(stateRef), - force = false, - skip = false; - - for(var i=0, l=chain.length; i' + - '
  • ' + - '{{step.ncyBreadcrumbLabel}}' + - '{{step.ncyBreadcrumbLabel}}' + - '/' + - '
  • ' + - '', - bootstrap3: '' - }; - - return { - restrict: 'AE', - replace: true, - scope: {}, - template: $breadcrumb.getTemplate($$templates), - templateUrl: $breadcrumb.getTemplateUrl(), - link: { - post: function postLink(scope) { - var labelWatchers = []; - - var renderBreadcrumb = function() { - deregisterWatchers(labelWatchers); - labelWatchers = []; - - var viewScope = $breadcrumb.$getLastViewScope(); - scope.steps = $breadcrumb.getStatesChain(); - angular.forEach(scope.steps, function (step) { - if (step.ncyBreadcrumb && step.ncyBreadcrumb.label) { - var parseLabel = $interpolate(step.ncyBreadcrumb.label); - step.ncyBreadcrumbLabel = parseLabel(viewScope); - // Watcher for further viewScope updates - registerWatchers(labelWatchers, parseLabel, viewScope, step); - } else { - step.ncyBreadcrumbLabel = step.name; - } - }); - }; - - $rootScope.$on('$viewContentLoaded', function (event) { - if(!event.targetScope.ncyBreadcrumbIgnore) { - renderBreadcrumb(); - } - }); - - // View(s) may be already loaded while the directive's linking - renderBreadcrumb(); - } - } - }; -} -BreadcrumbDirective.$inject = ['$interpolate', '$breadcrumb', '$rootScope']; - -function BreadcrumbLastDirective($interpolate, $breadcrumb, $rootScope) { - - return { - restrict: 'A', - scope: {}, - template: '{{ncyBreadcrumbLabel}}', - compile: function(cElement, cAttrs) { - - // Override the default template if ncyBreadcrumbLast has a value - var template = cElement.attr(cAttrs.$attr.ncyBreadcrumbLast); - if(template) { - cElement.html(template); - } - - return { - post: function postLink(scope) { - var labelWatchers = []; - - var renderLabel = function() { - deregisterWatchers(labelWatchers); - labelWatchers = []; - - var viewScope = $breadcrumb.$getLastViewScope(); - var lastStep = $breadcrumb.getLastStep(); - if(lastStep) { - scope.ncyBreadcrumbLink = lastStep.ncyBreadcrumbLink; - if (lastStep.ncyBreadcrumb && lastStep.ncyBreadcrumb.label) { - var parseLabel = $interpolate(lastStep.ncyBreadcrumb.label); - scope.ncyBreadcrumbLabel = parseLabel(viewScope); - // Watcher for further viewScope updates - // Tricky last arg: the last step is the entire scope of the directive ! - registerWatchers(labelWatchers, parseLabel, viewScope, scope); - } else { - scope.ncyBreadcrumbLabel = lastStep.name; - } - } - }; - - $rootScope.$on('$viewContentLoaded', function (event) { - if(!event.targetScope.ncyBreadcrumbIgnore) { - renderLabel(); - } - }); - - // View(s) may be already loaded while the directive's linking - renderLabel(); - } - }; - - } - }; -} -BreadcrumbLastDirective.$inject = ['$interpolate', '$breadcrumb', '$rootScope']; - -function BreadcrumbTextDirective($interpolate, $breadcrumb, $rootScope) { - - return { - restrict: 'A', - scope: {}, - template: '{{ncyBreadcrumbChain}}', - - compile: function(cElement, cAttrs) { - // Override the default template if ncyBreadcrumbText has a value - var template = cElement.attr(cAttrs.$attr.ncyBreadcrumbText); - if(template) { - cElement.html(template); - } - - var separator = cElement.attr(cAttrs.$attr.ncyBreadcrumbTextSeparator) || ' / '; - - return { - post: function postLink(scope) { - var labelWatchers = []; - - var registerWatchersText = function(labelWatcherArray, interpolationFunction, viewScope) { - angular.forEach(getExpression(interpolationFunction), function(expression) { - var watcher = viewScope.$watch(expression, function(newValue, oldValue) { - if (newValue !== oldValue) { - renderLabel(); - } - }); - labelWatcherArray.push(watcher); - }); - }; - - var renderLabel = function() { - deregisterWatchers(labelWatchers); - labelWatchers = []; - - var viewScope = $breadcrumb.$getLastViewScope(); - var steps = $breadcrumb.getStatesChain(); - var combinedLabels = []; - angular.forEach(steps, function (step) { - if (step.ncyBreadcrumb && step.ncyBreadcrumb.label) { - var parseLabel = $interpolate(step.ncyBreadcrumb.label); - combinedLabels.push(parseLabel(viewScope)); - // Watcher for further viewScope updates - registerWatchersText(labelWatchers, parseLabel, viewScope); - } else { - combinedLabels.push(step.name); - } - }); - - scope.ncyBreadcrumbChain = combinedLabels.join(separator); - }; - - $rootScope.$on('$viewContentLoaded', function (event) { - if(!event.targetScope.ncyBreadcrumbIgnore) { - renderLabel(); - } - }); - - // View(s) may be already loaded while the directive's linking - renderLabel(); - } - }; - - } - }; -} -BreadcrumbTextDirective.$inject = ['$interpolate', '$breadcrumb', '$rootScope']; - -angular.module('ncy-angular-breadcrumb', ['ui.router.state']) - .provider('$breadcrumb', $Breadcrumb) - .directive('ncyBreadcrumb', BreadcrumbDirective) - .directive('ncyBreadcrumbLast', BreadcrumbLastDirective) - .directive('ncyBreadcrumbText', BreadcrumbTextDirective); -})(window, window.angular); diff --git a/awx/ui/client/lib/angular-breadcrumb/release/angular-breadcrumb.min.js b/awx/ui/client/lib/angular-breadcrumb/release/angular-breadcrumb.min.js deleted file mode 100644 index ceb49da17e..0000000000 --- a/awx/ui/client/lib/angular-breadcrumb/release/angular-breadcrumb.min.js +++ /dev/null @@ -1,4 +0,0 @@ -/*! angular-breadcrumb - v0.4.1 -* http://ncuillery.github.io/angular-breadcrumb -* Copyright (c) 2015 Nicolas Cuillery; Licensed MIT */ -!function(a,b,c){"use strict";function d(a,c){return b.equals(a.length,c.length)?a>c:a.length>c.length}function e(a){var b=a.replace(/\n/g," ").match(/^([^(]+?)\s*(\((.*)\))?$/);if(!b||4!==b.length)throw new Error("Invalid state ref '"+a+"'");return{state:b[1],paramExpr:b[3]||null}}function f(){var a={prefixStateName:null,template:"bootstrap3",templateUrl:null,includeAbstract:!1};this.setOptions=function(c){b.extend(a,c)},this.$get=["$state","$stateParams","$rootScope",function(b,f,g){var h=g;g.$on("$viewContentLoaded",function(a){!a.targetScope.ncyBreadcrumbIgnore&&d(a.targetScope.$id,h.$id)&&(h=a.targetScope)});var i=function(a){var b=a.parent||(/^(.+)\.[^.]+$/.exec(a.name)||[])[1],c="object"==typeof b;return c?b.name:b},j=function(c,d){for(var g,i,j=e(d),k=!1,l=!1,m=0,n=c.length;n>m;m+=1)if(c[m].name===j.state)return;g=b.get(j.state),g.ncyBreadcrumb&&(g.ncyBreadcrumb.force&&(k=!0),g.ncyBreadcrumb.skip&&(l=!0)),g["abstract"]&&!a.includeAbstract&&!k||l||(j.paramExpr&&(i=h.$eval(j.paramExpr)),g.ncyBreadcrumbLink=b.href(j.state,i||f||{}),c.unshift(g))},k=function(a){var c=e(a),d=b.get(c.state);if(d.ncyBreadcrumb&&d.ncyBreadcrumb.parent){var f="function"==typeof d.ncyBreadcrumb.parent,g=f?d.ncyBreadcrumb.parent(h):d.ncyBreadcrumb.parent;if(g)return g}return i(d)};return{getTemplate:function(b){return a.templateUrl?null:b[a.template]?b[a.template]:a.template},getTemplateUrl:function(){return a.templateUrl},getStatesChain:function(c){for(var d=[],e=b.$current.self.name;e;e=k(e))if(j(d,e),c&&d.length)return d;return a.prefixStateName&&j(d,a.prefixStateName),d},getLastStep:function(){var a=this.getStatesChain(!0);return a.length?a[0]:c},$getLastViewScope:function(){return h}}}]}function g(a,c,d){var e={bootstrap2:'',bootstrap3:''};return{restrict:"AE",replace:!0,scope:{},template:c.getTemplate(e),templateUrl:c.getTemplateUrl(),link:{post:function(e){var f=[],g=function(){l(f),f=[];var d=c.$getLastViewScope();e.steps=c.getStatesChain(),b.forEach(e.steps,function(b){if(b.ncyBreadcrumb&&b.ncyBreadcrumb.label){var c=a(b.ncyBreadcrumb.label);b.ncyBreadcrumbLabel=c(d),k(f,c,d,b)}else b.ncyBreadcrumbLabel=b.name})};d.$on("$viewContentLoaded",function(a){a.targetScope.ncyBreadcrumbIgnore||g()}),g()}}}}function h(a,b,c){return{restrict:"A",scope:{},template:"{{ncyBreadcrumbLabel}}",compile:function(d,e){var f=d.attr(e.$attr.ncyBreadcrumbLast);return f&&d.html(f),{post:function(d){var e=[],f=function(){l(e),e=[];var c=b.$getLastViewScope(),f=b.getLastStep();if(f)if(d.ncyBreadcrumbLink=f.ncyBreadcrumbLink,f.ncyBreadcrumb&&f.ncyBreadcrumb.label){var g=a(f.ncyBreadcrumb.label);d.ncyBreadcrumbLabel=g(c),k(e,g,c,d)}else d.ncyBreadcrumbLabel=f.name};c.$on("$viewContentLoaded",function(a){a.targetScope.ncyBreadcrumbIgnore||f()}),f()}}}}}function i(a,c,d){return{restrict:"A",scope:{},template:"{{ncyBreadcrumbChain}}",compile:function(e,f){var g=e.attr(f.$attr.ncyBreadcrumbText);g&&e.html(g);var h=e.attr(f.$attr.ncyBreadcrumbTextSeparator)||" / ";return{post:function(e){var f=[],g=function(a,c,d){b.forEach(j(c),function(b){var c=d.$watch(b,function(a,b){a!==b&&i()});a.push(c)})},i=function(){l(f),f=[];var d=c.$getLastViewScope(),i=c.getStatesChain(),j=[];b.forEach(i,function(b){if(b.ncyBreadcrumb&&b.ncyBreadcrumb.label){var c=a(b.ncyBreadcrumb.label);j.push(c(d)),g(f,c,d)}else j.push(b.name)}),e.ncyBreadcrumbChain=j.join(h)};d.$on("$viewContentLoaded",function(a){a.targetScope.ncyBreadcrumbIgnore||i()}),i()}}}}}var j=function(a){if(a.expressions)return a.expressions;var c=[];return b.forEach(a.parts,function(a){b.isFunction(a)&&c.push(a.exp)}),c},k=function(a,c,d,e){b.forEach(j(c),function(b){var f=d.$watch(b,function(){e.ncyBreadcrumbLabel=c(d)});a.push(f)})},l=function(a){b.forEach(a,function(a){a()})};g.$inject=["$interpolate","$breadcrumb","$rootScope"],h.$inject=["$interpolate","$breadcrumb","$rootScope"],i.$inject=["$interpolate","$breadcrumb","$rootScope"],b.module("ncy-angular-breadcrumb",["ui.router.state"]).provider("$breadcrumb",f).directive("ncyBreadcrumb",g).directive("ncyBreadcrumbLast",h).directive("ncyBreadcrumbText",i)}(window,window.angular); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-codemirror/.bower.json b/awx/ui/client/lib/angular-codemirror/.bower.json deleted file mode 100644 index c88cb405db..0000000000 --- a/awx/ui/client/lib/angular-codemirror/.bower.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "name": "angular-codemirror", - "version": "1.0.3", - "dependencies": { - "angular": "latest", - "angular-route": "latest", - "jquery": "latest", - "jqueryui": "latest", - "components-font-awesome": "latest", - "twitter": "latest", - "js-yaml": "latest", - "jsonlint": "latest", - "codemirror": "latest" - }, - "homepage": "https://github.com/chouseknecht/angular-codemirror", - "_release": "1.0.3", - "_resolution": { - "type": "version", - "tag": "1.0.3", - "commit": "b94dc86fde8f60a50b324054806d29d742177d21" - }, - "_source": "https://github.com/chouseknecht/angular-codemirror.git", - "_target": "~1.0.3", - "_originalSource": "angular-codemirror" -} \ No newline at end of file diff --git a/awx/ui/client/lib/angular-codemirror/.gitignore b/awx/ui/client/lib/angular-codemirror/.gitignore deleted file mode 100644 index c346b13427..0000000000 --- a/awx/ui/client/lib/angular-codemirror/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -bower_components/ -node_modules/ diff --git a/awx/ui/client/lib/angular-codemirror/.jshintrc b/awx/ui/client/lib/angular-codemirror/.jshintrc deleted file mode 100644 index e82866db11..0000000000 --- a/awx/ui/client/lib/angular-codemirror/.jshintrc +++ /dev/null @@ -1,21 +0,0 @@ -{ - // Details: https://github.com/victorporof/Sublime-JSHint#using-your-own-jshintrc-options - // Example: https://github.com/jshint/jshint/blob/master/examples/.jshintrc - // Documentation: http://www.jshint.com/docs/ - - "browser": true, - "jquery": true, - "esnext": true, - "globalstrict": true, - "globals": { "angular":false, "alert":true, "CodeMirror":false, "jsyaml":false }, - "strict": false, - "quotmark": false, - "smarttabs": true, - "trailing": true, - "undef": true, - "unused": true, - "eqeqeq": true, - "indent": 4, - "onevar": true, - "newcap": false -} diff --git a/awx/ui/client/lib/angular-codemirror/LICENSE.md b/awx/ui/client/lib/angular-codemirror/LICENSE.md deleted file mode 100644 index b549e5bf9e..0000000000 --- a/awx/ui/client/lib/angular-codemirror/LICENSE.md +++ /dev/null @@ -1,24 +0,0 @@ -AngularCodeMirror -================ - -Copyright (c) 2014 Chris Houseknecht - -The MIT License (MIT) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of angular-codemirror and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/awx/ui/client/lib/angular-codemirror/README.md b/awx/ui/client/lib/angular-codemirror/README.md deleted file mode 100644 index e871884187..0000000000 --- a/awx/ui/client/lib/angular-codemirror/README.md +++ /dev/null @@ -1,29 +0,0 @@ -AngularCodeMirror -================= - -Incorporate [CodeMirror](http://www.codemirror.net) into your AngularJS app. Presents the editor in a resizable, draggable modal dialog styled with Twitter Bootstrap. Pass in any valid CodeMirror options to make the editor fit your app needs. - -Installation: -------------- -bower install angular-codemirror - -Example App: ------------- -With [Node.js](http://nodejs.org) installed, you can run the sample app locally. Clone the repo to a local projects directory, install package dependencies, and then run with the included server: - - cd projects - git clone git@github.com:chouseknecht/angular-codemirror.git - cd angular-codemirror - bower install - node ./scripts/web-server.js - -Point your browser to http://localhost:8000/app/index.html. Click the code editor link. - -How To: -------- -If you installed with Bower, then all the dependencies will exist in bower_components. See app/index.html for a template of how to include all the needed .js and .css files. If you want to install dependencies manually, review bower.json for a list of what's needed. - -Check the CodeMirror documentation to see what needs to be included for the mode and options you choose. Again, if you installed with Bower, then everything you need should be found under bower_components. - -Incorporate into your Angular app by following the example in app/js/sampleApp.js. - diff --git a/awx/ui/client/lib/angular-codemirror/app/css/sampleApp.css b/awx/ui/client/lib/angular-codemirror/app/css/sampleApp.css deleted file mode 100644 index c6902629af..0000000000 --- a/awx/ui/client/lib/angular-codemirror/app/css/sampleApp.css +++ /dev/null @@ -1,68 +0,0 @@ -/********************************************* - * Copyright (c) 2013-2014 Chris Houseknecht - * - * SampleForm.js - * - * Demonstrate some of the things you can do with angular-forms.js to - * generate clean, consistent forms in your app. - * - */ - -body { - padding-bottom: 80px; -} - -#parse-type-group { - margin: 15px 0; -} - -#parse-type-label { - display: inline-block; - padding-right: 10px; - font-weight: bold; -} - -textarea { - resize: none; -} - -a, -a:active, -a:link, -a:hover { - text-decoration: none; -} - -.external-editor-link { - display: inline-block; - margin-left: 20px; -} - -/*.ui-dialog-titlebar-close { - color: #000; - border: none; - background-color: transparent; -}*/ - -.red-txt { - color: #dd1b16; -} - -.navbar-default .navbar-brand { - font-size: 24px; - color: #000; -} - -.navigation { - margin-top: 20px; - padding-right: 15px; -} - -.footer { - margin-top: 30px; -} - -.footer .navbar-brand { - margin-bottom: 20px; - padding-right: 0; -} \ No newline at end of file diff --git a/awx/ui/client/lib/angular-codemirror/app/index.html b/awx/ui/client/lib/angular-codemirror/app/index.html deleted file mode 100644 index 719aebfc05..0000000000 --- a/awx/ui/client/lib/angular-codemirror/app/index.html +++ /dev/null @@ -1,70 +0,0 @@ - - - - - AngularCodeMirror | Sample application - - - - - - - - - - - - - - - - - - - -
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - diff --git a/awx/ui/client/lib/angular-codemirror/app/js/sampleApp.js b/awx/ui/client/lib/angular-codemirror/app/js/sampleApp.js deleted file mode 100644 index 199cca43ab..0000000000 --- a/awx/ui/client/lib/angular-codemirror/app/js/sampleApp.js +++ /dev/null @@ -1,135 +0,0 @@ -/********************************************** - * sampleApp.js - * - * Copyright (c) 2013-2014 Chris Houseknecht - * - * Distributed under The MIT License (MIT) - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - * - */ - -'use strict'; - -angular.module('sampleApp', ['ngRoute', 'AngularCodeMirrorModule']) - - .config(['$routeProvider', function($routeProvider) { - $routeProvider - .when('/', { - templateUrl: 'partials/main.html', - controller: 'sampleController' - }) - .otherwise({ - redirectTo: '/' - }); - }]) - - .controller('sampleController', ['$scope', 'AngularCodeMirror', function($scope, AngularCodeMirror) { - - $scope.parseType = 'json'; - $scope.codeValue = '{}'; - - var container = document.getElementById('main-view'), - modes = { - yaml: { - mode:"text/x-yaml", - matchBrackets: true, - autoCloseBrackets: true, - styleActiveLine: true, - lineNumbers: true, - gutters: ["CodeMirror-lint-markers"], - lint: true - }, - json: { - mode: "application/json", - styleActiveLine: true, - matchBrackets: true, - autoCloseBrackets: true, - lineNumbers: true, - gutters: ["CodeMirror-lint-markers"], - lint: true - } - }, - codeMirror = AngularCodeMirror(); - - codeMirror.addModes(modes); - - $scope.parseTypeChange = function() { - var json_obj; - if ($scope.parseType === 'json') { - // converting yaml to json - try { - json_obj = jsyaml.load($scope.codeValue); - if ($.isEmptyObject(json_obj)) { - $scope.codeValue = "{}"; - } - else { - $scope.codeValue = JSON.stringify(json_obj, null, " "); - } - } - catch (e) { - alert('Failed to parse valid YAML. ' + e.message); - setTimeout( function() { $scope.$apply( function() { $scope.parseType = 'yaml'; }); }, 500); - } - } - else { - // convert json to yaml - try { - json_obj = JSON.parse($scope.codeValue); - if ($.isEmptyObject(json_obj)) { - $scope.codeValue = '---'; - } - else { - $scope.codeValue = jsyaml.safeDump(json_obj); - } - } - catch (e) { - alert('Failed to parse valid JSON. ' + e.message); - setTimeout( function() { $scope.$apply( function() { $scope.parseType = 'json'; }); }, 500 ); - } - } - }; - - $scope.showCodeEditor = function() { - var title = 'Edit ' + $scope.parseType.toUpperCase(); - codeMirror.show({ - scope: $scope, - container: container, - mode: $scope.parseType, - model: 'codeValue', - title: title - }); - }; - - }]) - - .directive('afTooltip', [ function() { - return { - link: function(scope, element, attrs) { - var placement = (attrs.placement) ? attrs.placement : 'top'; - $(element).tooltip({ - html: true, - placement: placement, - title: attrs.afTooltip, - trigger: 'hover', - container: 'body' - }); - } - }; - }]); diff --git a/awx/ui/client/lib/angular-codemirror/app/partials/main.html b/awx/ui/client/lib/angular-codemirror/app/partials/main.html deleted file mode 100644 index 6ad0e03aee..0000000000 --- a/awx/ui/client/lib/angular-codemirror/app/partials/main.html +++ /dev/null @@ -1,20 +0,0 @@ - -
    -
    -

    CodeMirror Example

    -
    -
    Parse Type:
    - - - Code Editor -
    -
    -
    - -
    -
    -
    -
    diff --git a/awx/ui/client/lib/angular-codemirror/bower.json b/awx/ui/client/lib/angular-codemirror/bower.json deleted file mode 100644 index df7644686c..0000000000 --- a/awx/ui/client/lib/angular-codemirror/bower.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "name": "angular-codemirror", - "version": "1.0.2", - "dependencies": { - "angular": "latest", - "angular-route": "latest", - "jquery": "latest", - "jqueryui": "latest", - "components-font-awesome": "latest", - "twitter": "latest", - "js-yaml": "latest", - "jsonlint": "latest", - "codemirror": "latest" - } -} diff --git a/awx/ui/client/lib/angular-codemirror/lib/AngularCodeMirror.css b/awx/ui/client/lib/angular-codemirror/lib/AngularCodeMirror.css deleted file mode 100644 index f233cc2941..0000000000 --- a/awx/ui/client/lib/angular-codemirror/lib/AngularCodeMirror.css +++ /dev/null @@ -1,111 +0,0 @@ -/********************************************** - * AngularCodeMirror.css - * - * CodeMirror.css overrides - * - * Copyright (c) 2014 Chris Houseknecht - * - * The MIT License (MIT) - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of angular-codemirror and associated files and documentation (the "Software"), - * to deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - * - */ - - .CodeMirror { - height: auto; - } - - .CodeMirror-activeline-background { - background-color: #f7f7f7; - } - - - -/* Modal dialog overrides to make jqueryui dialog blend in with Twitter. - Why? Twitter's modal is not draggable or resizable, which is not very - useful for a code editor */ - - .ui-dialog-title { - font-size: 22px; - color: #1778c3; - font-weight: bold; - line-height: normal; - } - .ui-dialog .close { - font-size: 18px; - font-weight: bold; - font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; - line-height: 1; - opacity: .7; - text-shadow: 0 1px 0 #ffffff; - } - .ui-dialog .ui-widget-header { - border-radius: 0; - border: none; - border-bottom: 1px solid #A9A9A9; - height: 55px; - } - .ui-dialog .ui-dialog-titlebar { - padding-bottom: 0; - padding-top: 12px; - } - .ui-dialog .ui-dialog-titlebar { - background-image: none; - background-color: #ffffff; - border-color: #ffffff; - color: #A9A9A9; - } - - .ui-dialog .ui-resizable-se { - right: 5px; - bottom: 5px; - background-position: -80px -224px; - color: #171717; - } - - .ui-dialog-buttonset button.btn.btn-default.ui-state-hover, - .ui-dialog-buttonset button.btn.btn-default.ui-state-active, - .ui-dialog-buttonset button.btn.btn-default.ui-state-focus { - font-weight: normal; - } - .ui-dialog-buttonset button.btn.btn-primary.ui-state-hover, - .ui-dialog-buttonset button.btn.btn-primary.ui-state-active, - .ui-dialog-buttonset button.btn.btn-primary.ui-state-focus { - background-image: none; - color: #ffffff; - background-color: #2a6496; - border-color: #285e8e; - text-decoration: none; - font-weight: normal; - } - /* Bring the overlay above any TB fixed navs and darken it to match */ - .ui-widget-overlay.ui-front { - background-image: none; - background-color: #000; - opacity: .6; - z-index: 1040; - } - /* Make sure code editor dialog is always at top of stack */ - [aria-describedby=af-code-editor-modal].ui-front { - z-index: 2050; - } - .CodeMirror-lint-tooltip { - z-index: 2060; - } - diff --git a/awx/ui/client/lib/angular-codemirror/lib/AngularCodeMirror.js b/awx/ui/client/lib/angular-codemirror/lib/AngularCodeMirror.js deleted file mode 100644 index 2ef0ad4fe7..0000000000 --- a/awx/ui/client/lib/angular-codemirror/lib/AngularCodeMirror.js +++ /dev/null @@ -1,189 +0,0 @@ -/********************************************** - * AngularCodeMirror.js - * - * Copyright (c) 2014 Chris Houseknecht - * - * The MIT License (MIT) - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of angular-codemirror and associated files and documentation (the "Software"), - * to deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - * - */ - -'use strict'; - -angular.module('AngularCodeMirrorModule', []) - - .factory('AngularCodeMirror', [ function() { - return function(readOnly) { - var fn = function() { - - this.myCodeMirror = null; - this.element = null; - - this.showTextArea = function(params) { - var self = this, - element = (typeof params.element === "object") ? params.element : document.getElementById(params.element), - scope = params.scope, - model = params.model, - mode = params.mode, - onReady = params.onReady, - height = 0; - - self.element = $(element); - - // We don't want to touch the original textarea. Angular likely has a model and other listeners - // attached to it. In prior iterations attaching CodeMirror to it seemed to go bad, so we'll insert a - //
    under it, hide the textarea and let CodeMirror attach to the
    . - if ($('#cm-' + model + '-container').length > 0) { - $('#cm-' + model + '-container').empty(); - } - else { - self.element.after("
    "); - } - - // Calc the height of the text area- our CodeMirror should match. - height += self.element.attr('rows') * parseInt($(self.element).css('line-height').replace(/px/,''),10); - height += parseInt(self.element.css('padding-top').replace(/px|%/,''),10) + - parseInt(self.element.css('padding-bottom').replace(/px|%/,''),10); - height += 2; //for the border - - // hide - self.element.hide(); - - // Initialize CodeMirror - self.modes[mode].value = scope[model]; - - // if readOnly is passed to AngularCodeMirror, set the - // options for all modes to be readOnly - if (readOnly) { - Object.keys(self.modes).forEach(function(val) { - self.modes[val].readOnly = true; - }); - } - - self.myCodeMirror = CodeMirror(document.getElementById('cm-' + model + '-container'), self.modes[mode]); - - // Adjust the height - $('.CodeMirror').css({ 'min-height': height, 'max-height': height }); - self.myCodeMirror.setSize(null, height); - - // This doesn't work without a setTimeout - setTimeout(function() { - self.myCodeMirror.refresh(); - if (onReady) { - onReady(); - } - }, 500); - - // Update the model on change - self.myCodeMirror.on('change', function() { - setTimeout(function() { scope.$apply(function(){ scope[model] = self.myCodeMirror.getValue(); }); }, 500); - }); - }; - - this.getValue = function() { - var self = this; - return self.myCodeMirror.getValue(); - }; - - this.destroy = function() { - // Intended for use with showTextArea. This will get ride of CM and put the - // textarea back to normal - var self = this; - $('.CodeMirror').empty().remove(); - if (self.element) { - self.element.show(); - } - }; - - this.showModal = function(params) { - - var self = this, - scope = params.scope, - target = (typeof params.container === "string") ? document.getElementById(params.container) : params.container, - mode = params.mode, - model = params.model, - title = params.title || 'Code Editor', - modes = self.modes; - - this.html = "
    \n
    \n"; - if ($('#af-code-editor-modal').length === 0) { - $(target).append(this.html); - } - else { - $('#af-code-editor-modal').remove(); - $(target).append(this.html); - } - - $('#af-code-editor-modal').dialog({ - title: title, - resizable: true, - width: Math.ceil($(window).width() * 0.9), - height: Math.ceil($(window).height() * 0.8), - position: "center", - show: true, - closeOnEscape: true, - modal: true, - autoOpen: true, - buttons: [ - { text: "Cancel", id: "af-code-edit-cancel", click: function() { $(this).dialog('close'); } }, - { text: "OK", id: "af-code-edit-ok", click: - function() { - scope.$apply(function() { scope[model] = self.myCodeMirror.getValue(); }); - $(this).dialog('close'); - } - } - ], - open: function() { - var self = $('.ui-dialog[aria-describedby="af-code-editor-modal"]'), - idx, options; - - // bring the overlay up to just below the new window - idx = self.css('z-index'); - $('.ui-widget-overlay').css({ 'z-index': idx - 1}); - - // fix buttons- make them more twittery - self.find('.ui-dialog-titlebar button').empty().attr({'class': 'close'}).text('x'); - $('#af-code-edit-cancel').attr({ "class": "btn btn-default" }).empty().html(" Cancel"); - $('#af-code-edit-ok').attr({ "class": "btn btn-primary" }).empty().html(" Save"); - - // initialize CodeMirror - options = modes[mode]; - options.value = scope[model]; - self.myCodeMirror = CodeMirror(document.getElementById('af-code'), options); - } - }); - }; - - // Don't maintain modes here. Use this.addModes() to set/override available modes - this.modes = {}; - - // Add or override available modes. - this.addModes = function(obj) { - for (var key in obj) { - if (this.modes[key]) { - delete this.modes[key]; - } - this.modes[key] = angular.copy(obj[key]); - } - }; - }; - return new fn(); - }; - }]); diff --git a/awx/ui/client/lib/angular-codemirror/lib/yaml-lint.js b/awx/ui/client/lib/angular-codemirror/lib/yaml-lint.js deleted file mode 100644 index 99de370eed..0000000000 --- a/awx/ui/client/lib/angular-codemirror/lib/yaml-lint.js +++ /dev/null @@ -1,15 +0,0 @@ -// Add YAML lint support to CodeMirror. Submitted pull request #2266 -// Depends on js-yaml.js from https://github.com/nodeca/js-yaml - -// declare global: jsyaml - -CodeMirror.registerHelper("lint", "yaml", function(text) { - var found = []; - try { jsyaml.load(text); } - catch(e) { - var loc = e.mark; - found.push({ from: CodeMirror.Pos(loc.line, loc.column), to: CodeMirror.Pos(loc.line, loc.column), message: e.message }); - } - return found; -}); -CodeMirror.yamlValidator = CodeMirror.lint.yaml; // deprecated diff --git a/awx/ui/client/lib/angular-codemirror/scripts/compile.sh b/awx/ui/client/lib/angular-codemirror/scripts/compile.sh deleted file mode 100755 index 589435328c..0000000000 --- a/awx/ui/client/lib/angular-codemirror/scripts/compile.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# -# Minify angular-forms.js -# -# ./compile.sh -# - -if [ -f ../angular-forms.min.js ]; then - rm ../angular-forms.min.js -fi -java -jar ../bower_components/closure-compiler/compiler.jar --js ../angular-forms.js --js_output_file ../angular-forms.min.js diff --git a/awx/ui/client/lib/angular-codemirror/scripts/web-server.js b/awx/ui/client/lib/angular-codemirror/scripts/web-server.js deleted file mode 100755 index 3f74441e31..0000000000 --- a/awx/ui/client/lib/angular-codemirror/scripts/web-server.js +++ /dev/null @@ -1,244 +0,0 @@ -#!/usr/bin/env node - -var util = require('util'), - http = require('http'), - fs = require('fs'), - url = require('url'), - events = require('events'); - -var DEFAULT_PORT = 8000; - -function main(argv) { - new HttpServer({ - 'GET': createServlet(StaticServlet), - 'HEAD': createServlet(StaticServlet) - }).start(Number(argv[2]) || DEFAULT_PORT); -} - -function escapeHtml(value) { - return value.toString(). - replace('<', '<'). - replace('>', '>'). - replace('"', '"'); -} - -function createServlet(Class) { - var servlet = new Class(); - return servlet.handleRequest.bind(servlet); -} - -/** - * An Http server implementation that uses a map of methods to decide - * action routing. - * - * @param {Object} Map of method => Handler function - */ -function HttpServer(handlers) { - this.handlers = handlers; - this.server = http.createServer(this.handleRequest_.bind(this)); -} - -HttpServer.prototype.start = function(port) { - this.port = port; - this.server.listen(port); - util.puts('Http Server running at http://localhost:' + port + '/'); -}; - -HttpServer.prototype.parseUrl_ = function(urlString) { - var parsed = url.parse(urlString); - parsed.pathname = url.resolve('/', parsed.pathname); - return url.parse(url.format(parsed), true); -}; - -HttpServer.prototype.handleRequest_ = function(req, res) { - var logEntry = req.method + ' ' + req.url; - if (req.headers['user-agent']) { - logEntry += ' ' + req.headers['user-agent']; - } - util.puts(logEntry); - req.url = this.parseUrl_(req.url); - var handler = this.handlers[req.method]; - if (!handler) { - res.writeHead(501); - res.end(); - } else { - handler.call(this, req, res); - } -}; - -/** - * Handles static content. - */ -function StaticServlet() {} - -StaticServlet.MimeMap = { - 'txt': 'text/plain', - 'html': 'text/html', - 'css': 'text/css', - 'xml': 'application/xml', - 'json': 'application/json', - 'js': 'application/javascript', - 'jpg': 'image/jpeg', - 'jpeg': 'image/jpeg', - 'gif': 'image/gif', - 'png': 'image/png', -  'svg': 'image/svg+xml' -}; - -StaticServlet.prototype.handleRequest = function(req, res) { - var self = this; - var path = ('./' + req.url.pathname).replace('//','/').replace(/%(..)/g, function(match, hex){ - return String.fromCharCode(parseInt(hex, 16)); - }); - var parts = path.split('/'); - if (parts[parts.length-1].charAt(0) === '.') - return self.sendForbidden_(req, res, path); - fs.stat(path, function(err, stat) { - if (err) - return self.sendMissing_(req, res, path); - if (stat.isDirectory()) - return self.sendDirectory_(req, res, path); - return self.sendFile_(req, res, path); - }); -} - -StaticServlet.prototype.sendError_ = function(req, res, error) { - res.writeHead(500, { - 'Content-Type': 'text/html' - }); - res.write('\n'); - res.write('Internal Server Error\n'); - res.write('

    Internal Server Error

    '); - res.write('
    ' + escapeHtml(util.inspect(error)) + '
    '); - util.puts('500 Internal Server Error'); - util.puts(util.inspect(error)); -}; - -StaticServlet.prototype.sendMissing_ = function(req, res, path) { - path = path.substring(1); - res.writeHead(404, { - 'Content-Type': 'text/html' - }); - res.write('\n'); - res.write('404 Not Found\n'); - res.write('

    Not Found

    '); - res.write( - '

    The requested URL ' + - escapeHtml(path) + - ' was not found on this server.

    ' - ); - res.end(); - util.puts('404 Not Found: ' + path); -}; - -StaticServlet.prototype.sendForbidden_ = function(req, res, path) { - path = path.substring(1); - res.writeHead(403, { - 'Content-Type': 'text/html' - }); - res.write('\n'); - res.write('403 Forbidden\n'); - res.write('

    Forbidden

    '); - res.write( - '

    You do not have permission to access ' + - escapeHtml(path) + ' on this server.

    ' - ); - res.end(); - util.puts('403 Forbidden: ' + path); -}; - -StaticServlet.prototype.sendRedirect_ = function(req, res, redirectUrl) { - res.writeHead(301, { - 'Content-Type': 'text/html', - 'Location': redirectUrl - }); - res.write('\n'); - res.write('301 Moved Permanently\n'); - res.write('

    Moved Permanently

    '); - res.write( - '

    The document has moved here.

    ' - ); - res.end(); - util.puts('301 Moved Permanently: ' + redirectUrl); -}; - -StaticServlet.prototype.sendFile_ = function(req, res, path) { - var self = this; - var file = fs.createReadStream(path); - res.writeHead(200, { - 'Content-Type': StaticServlet. - MimeMap[path.split('.').pop()] || 'text/plain' - }); - if (req.method === 'HEAD') { - res.end(); - } else { - file.on('data', res.write.bind(res)); - file.on('close', function() { - res.end(); - }); - file.on('error', function(error) { - self.sendError_(req, res, error); - }); - } -}; - -StaticServlet.prototype.sendDirectory_ = function(req, res, path) { - var self = this; - if (path.match(/[^\/]$/)) { - req.url.pathname += '/'; - var redirectUrl = url.format(url.parse(url.format(req.url))); - return self.sendRedirect_(req, res, redirectUrl); - } - fs.readdir(path, function(err, files) { - if (err) - return self.sendError_(req, res, error); - - if (!files.length) - return self.writeDirectoryIndex_(req, res, path, []); - - var remaining = files.length; - files.forEach(function(fileName, index) { - fs.stat(path + '/' + fileName, function(err, stat) { - if (err) - return self.sendError_(req, res, err); - if (stat.isDirectory()) { - files[index] = fileName + '/'; - } - if (!(--remaining)) - return self.writeDirectoryIndex_(req, res, path, files); - }); - }); - }); -}; - -StaticServlet.prototype.writeDirectoryIndex_ = function(req, res, path, files) { - path = path.substring(1); - res.writeHead(200, { - 'Content-Type': 'text/html' - }); - if (req.method === 'HEAD') { - res.end(); - return; - } - res.write('\n'); - res.write('' + escapeHtml(path) + '\n'); - res.write('\n'); - res.write('

    Directory: ' + escapeHtml(path) + '

    '); - res.write('
      '); - files.forEach(function(fileName) { - if (fileName.charAt(0) !== '.') { - res.write('
    1. ' + - escapeHtml(fileName) + '
    2. '); - } - }); - res.write('
    '); - res.end(); -}; - -// Must be last, -main(process.argv); diff --git a/awx/ui/client/lib/angular-cookies/.bower.json b/awx/ui/client/lib/angular-cookies/.bower.json deleted file mode 100644 index 30f123e4cd..0000000000 --- a/awx/ui/client/lib/angular-cookies/.bower.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "name": "angular-cookies", - "version": "1.4.3", - "main": "./angular-cookies.js", - "ignore": [], - "dependencies": { - "angular": "1.4.3" - }, - "homepage": "https://github.com/angular/bower-angular-cookies", - "_release": "1.4.3", - "_resolution": { - "type": "version", - "tag": "v1.4.3", - "commit": "1ef7a87fc52ed419322aefe14890bdb24e717fc9" - }, - "_source": "git://github.com/angular/bower-angular-cookies.git", - "_target": "~1.4.3", - "_originalSource": "angular-cookies" -} \ No newline at end of file diff --git a/awx/ui/client/lib/angular-cookies/README.md b/awx/ui/client/lib/angular-cookies/README.md deleted file mode 100644 index 7b190d3461..0000000000 --- a/awx/ui/client/lib/angular-cookies/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# packaged angular-cookies - -This repo is for distribution on `npm` and `bower`. The source for this module is in the -[main AngularJS repo](https://github.com/angular/angular.js/tree/master/src/ngCookies). -Please file issues and pull requests against that repo. - -## Install - -You can install this package either with `npm` or with `bower`. - -### npm - -```shell -npm install angular-cookies -``` - -Then add `ngCookies` as a dependency for your app: - -```javascript -angular.module('myApp', [require('angular-cookies')]); -``` - -### bower - -```shell -bower install angular-cookies -``` - -Add a ` -``` - -Then add `ngCookies` as a dependency for your app: - -```javascript -angular.module('myApp', ['ngCookies']); -``` - -## Documentation - -Documentation is available on the -[AngularJS docs site](http://docs.angularjs.org/api/ngCookies). - -## License - -The MIT License - -Copyright (c) 2010-2015 Google, Inc. http://angularjs.org - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/awx/ui/client/lib/angular-cookies/angular-cookies.js b/awx/ui/client/lib/angular-cookies/angular-cookies.js deleted file mode 100644 index ddfcd1469d..0000000000 --- a/awx/ui/client/lib/angular-cookies/angular-cookies.js +++ /dev/null @@ -1,321 +0,0 @@ -/** - * @license AngularJS v1.4.3 - * (c) 2010-2015 Google, Inc. http://angularjs.org - * License: MIT - */ -(function(window, angular, undefined) {'use strict'; - -/** - * @ngdoc module - * @name ngCookies - * @description - * - * # ngCookies - * - * The `ngCookies` module provides a convenient wrapper for reading and writing browser cookies. - * - * - *
    - * - * See {@link ngCookies.$cookies `$cookies`} for usage. - */ - - -angular.module('ngCookies', ['ng']). - /** - * @ngdoc provider - * @name $cookiesProvider - * @description - * Use `$cookiesProvider` to change the default behavior of the {@link ngCookies.$cookies $cookies} service. - * */ - provider('$cookies', [function $CookiesProvider() { - /** - * @ngdoc property - * @name $cookiesProvider#defaults - * @description - * - * Object containing default options to pass when setting cookies. - * - * The object may have following properties: - * - * - **path** - `{string}` - The cookie will be available only for this path and its - * sub-paths. By default, this would be the URL that appears in your base tag. - * - **domain** - `{string}` - The cookie will be available only for this domain and - * its sub-domains. For obvious security reasons the user agent will not accept the - * cookie if the current domain is not a sub domain or equals to the requested domain. - * - **expires** - `{string|Date}` - String of the form "Wdy, DD Mon YYYY HH:MM:SS GMT" - * or a Date object indicating the exact date/time this cookie will expire. - * - **secure** - `{boolean}` - The cookie will be available only in secured connection. - * - * Note: by default the address that appears in your `` tag will be used as path. - * This is import so that cookies will be visible for all routes in case html5mode is enabled - * - **/ - var defaults = this.defaults = {}; - - function calcOptions(options) { - return options ? angular.extend({}, defaults, options) : defaults; - } - - /** - * @ngdoc service - * @name $cookies - * - * @description - * Provides read/write access to browser's cookies. - * - *
    - * Up until Angular 1.3, `$cookies` exposed properties that represented the - * current browser cookie values. In version 1.4, this behavior has changed, and - * `$cookies` now provides a standard api of getters, setters etc. - *
    - * - * Requires the {@link ngCookies `ngCookies`} module to be installed. - * - * @example - * - * ```js - * angular.module('cookiesExample', ['ngCookies']) - * .controller('ExampleController', ['$cookies', function($cookies) { - * // Retrieving a cookie - * var favoriteCookie = $cookies.get('myFavorite'); - * // Setting a cookie - * $cookies.put('myFavorite', 'oatmeal'); - * }]); - * ``` - */ - this.$get = ['$$cookieReader', '$$cookieWriter', function($$cookieReader, $$cookieWriter) { - return { - /** - * @ngdoc method - * @name $cookies#get - * - * @description - * Returns the value of given cookie key - * - * @param {string} key Id to use for lookup. - * @returns {string} Raw cookie value. - */ - get: function(key) { - return $$cookieReader()[key]; - }, - - /** - * @ngdoc method - * @name $cookies#getObject - * - * @description - * Returns the deserialized value of given cookie key - * - * @param {string} key Id to use for lookup. - * @returns {Object} Deserialized cookie value. - */ - getObject: function(key) { - var value = this.get(key); - return value ? angular.fromJson(value) : value; - }, - - /** - * @ngdoc method - * @name $cookies#getAll - * - * @description - * Returns a key value object with all the cookies - * - * @returns {Object} All cookies - */ - getAll: function() { - return $$cookieReader(); - }, - - /** - * @ngdoc method - * @name $cookies#put - * - * @description - * Sets a value for given cookie key - * - * @param {string} key Id for the `value`. - * @param {string} value Raw value to be stored. - * @param {Object=} options Options object. - * See {@link ngCookies.$cookiesProvider#defaults $cookiesProvider.defaults} - */ - put: function(key, value, options) { - $$cookieWriter(key, value, calcOptions(options)); - }, - - /** - * @ngdoc method - * @name $cookies#putObject - * - * @description - * Serializes and sets a value for given cookie key - * - * @param {string} key Id for the `value`. - * @param {Object} value Value to be stored. - * @param {Object=} options Options object. - * See {@link ngCookies.$cookiesProvider#defaults $cookiesProvider.defaults} - */ - putObject: function(key, value, options) { - this.put(key, angular.toJson(value), options); - }, - - /** - * @ngdoc method - * @name $cookies#remove - * - * @description - * Remove given cookie - * - * @param {string} key Id of the key-value pair to delete. - * @param {Object=} options Options object. - * See {@link ngCookies.$cookiesProvider#defaults $cookiesProvider.defaults} - */ - remove: function(key, options) { - $$cookieWriter(key, undefined, calcOptions(options)); - } - }; - }]; - }]); - -angular.module('ngCookies'). -/** - * @ngdoc service - * @name $cookieStore - * @deprecated - * @requires $cookies - * - * @description - * Provides a key-value (string-object) storage, that is backed by session cookies. - * Objects put or retrieved from this storage are automatically serialized or - * deserialized by angular's toJson/fromJson. - * - * Requires the {@link ngCookies `ngCookies`} module to be installed. - * - *
    - * **Note:** The $cookieStore service is **deprecated**. - * Please use the {@link ngCookies.$cookies `$cookies`} service instead. - *
    - * - * @example - * - * ```js - * angular.module('cookieStoreExample', ['ngCookies']) - * .controller('ExampleController', ['$cookieStore', function($cookieStore) { - * // Put cookie - * $cookieStore.put('myFavorite','oatmeal'); - * // Get cookie - * var favoriteCookie = $cookieStore.get('myFavorite'); - * // Removing a cookie - * $cookieStore.remove('myFavorite'); - * }]); - * ``` - */ - factory('$cookieStore', ['$cookies', function($cookies) { - - return { - /** - * @ngdoc method - * @name $cookieStore#get - * - * @description - * Returns the value of given cookie key - * - * @param {string} key Id to use for lookup. - * @returns {Object} Deserialized cookie value, undefined if the cookie does not exist. - */ - get: function(key) { - return $cookies.getObject(key); - }, - - /** - * @ngdoc method - * @name $cookieStore#put - * - * @description - * Sets a value for given cookie key - * - * @param {string} key Id for the `value`. - * @param {Object} value Value to be stored. - */ - put: function(key, value) { - $cookies.putObject(key, value); - }, - - /** - * @ngdoc method - * @name $cookieStore#remove - * - * @description - * Remove given cookie - * - * @param {string} key Id of the key-value pair to delete. - */ - remove: function(key) { - $cookies.remove(key); - } - }; - - }]); - -/** - * @name $$cookieWriter - * @requires $document - * - * @description - * This is a private service for writing cookies - * - * @param {string} name Cookie name - * @param {string=} value Cookie value (if undefined, cookie will be deleted) - * @param {Object=} options Object with options that need to be stored for the cookie. - */ -function $$CookieWriter($document, $log, $browser) { - var cookiePath = $browser.baseHref(); - var rawDocument = $document[0]; - - function buildCookieString(name, value, options) { - var path, expires; - options = options || {}; - expires = options.expires; - path = angular.isDefined(options.path) ? options.path : cookiePath; - if (value === undefined) { - expires = 'Thu, 01 Jan 1970 00:00:00 GMT'; - value = ''; - } - if (angular.isString(expires)) { - expires = new Date(expires); - } - - var str = encodeURIComponent(name) + '=' + encodeURIComponent(value); - str += path ? ';path=' + path : ''; - str += options.domain ? ';domain=' + options.domain : ''; - str += expires ? ';expires=' + expires.toUTCString() : ''; - str += options.secure ? ';secure' : ''; - - // per http://www.ietf.org/rfc/rfc2109.txt browser must allow at minimum: - // - 300 cookies - // - 20 cookies per unique domain - // - 4096 bytes per cookie - var cookieLength = str.length + 1; - if (cookieLength > 4096) { - $log.warn("Cookie '" + name + - "' possibly not set or overflowed because it was too large (" + - cookieLength + " > 4096 bytes)!"); - } - - return str; - } - - return function(name, value, options) { - rawDocument.cookie = buildCookieString(name, value, options); - }; -} - -$$CookieWriter.$inject = ['$document', '$log', '$browser']; - -angular.module('ngCookies').provider('$$cookieWriter', function $$CookieWriterProvider() { - this.$get = $$CookieWriter; -}); - - -})(window, window.angular); diff --git a/awx/ui/client/lib/angular-cookies/angular-cookies.min.js b/awx/ui/client/lib/angular-cookies/angular-cookies.min.js deleted file mode 100644 index 36b74e96fd..0000000000 --- a/awx/ui/client/lib/angular-cookies/angular-cookies.min.js +++ /dev/null @@ -1,9 +0,0 @@ -/* - AngularJS v1.4.3 - (c) 2010-2015 Google, Inc. http://angularjs.org - License: MIT -*/ -(function(p,g,l){'use strict';function m(b,a,f){var c=f.baseHref(),k=b[0];return function(b,d,e){var f,h;e=e||{};h=e.expires;f=g.isDefined(e.path)?e.path:c;d===l&&(h="Thu, 01 Jan 1970 00:00:00 GMT",d="");g.isString(h)&&(h=new Date(h));d=encodeURIComponent(b)+"="+encodeURIComponent(d);d=d+(f?";path="+f:"")+(e.domain?";domain="+e.domain:"");d+=h?";expires="+h.toUTCString():"";d+=e.secure?";secure":"";e=d.length+1;4096 4096 bytes)!");k.cookie=d}}g.module("ngCookies",["ng"]).provider("$cookies",[function(){var b=this.defaults={};this.$get=["$$cookieReader","$$cookieWriter",function(a,f){return{get:function(c){return a()[c]},getObject:function(c){return(c=this.get(c))?g.fromJson(c):c},getAll:function(){return a()},put:function(c,a,n){f(c,a,n?g.extend({},b,n):b)},putObject:function(c,b,a){this.put(c,g.toJson(b),a)},remove:function(a,k){f(a,l,k?g.extend({},b,k):b)}}}]}]);g.module("ngCookies").factory("$cookieStore", -["$cookies",function(b){return{get:function(a){return b.getObject(a)},put:function(a,f){b.putObject(a,f)},remove:function(a){b.remove(a)}}}]);m.$inject=["$document","$log","$browser"];g.module("ngCookies").provider("$$cookieWriter",function(){this.$get=m})})(window,window.angular); -//# sourceMappingURL=angular-cookies.min.js.map diff --git a/awx/ui/client/lib/angular-cookies/angular-cookies.min.js.map b/awx/ui/client/lib/angular-cookies/angular-cookies.min.js.map deleted file mode 100644 index 84e6a5701f..0000000000 --- a/awx/ui/client/lib/angular-cookies/angular-cookies.min.js.map +++ /dev/null @@ -1,8 +0,0 @@ -{ -"version":3, -"file":"angular-cookies.min.js", -"lineCount":8, -"mappings":"A;;;;;aAKC,SAAQ,CAACA,CAAD,CAASC,CAAT,CAAkBC,CAAlB,CAA6B,CA0QtCC,QAASA,EAAc,CAACC,CAAD,CAAYC,CAAZ,CAAkBC,CAAlB,CAA4B,CACjD,IAAIC,EAAaD,CAAAE,SAAA,EAAjB,CACIC,EAAcL,CAAA,CAAU,CAAV,CAmClB,OAAO,SAAQ,CAACM,CAAD,CAAOC,CAAP,CAAcC,CAAd,CAAuB,CAjCW,IAC3CC,CAD2C,CACrCC,CACVF,EAAA,CAgCoDA,CAhCpD,EAAqB,EACrBE,EAAA,CAAUF,CAAAE,QACVD,EAAA,CAAOZ,CAAAc,UAAA,CAAkBH,CAAAC,KAAlB,CAAA,CAAkCD,CAAAC,KAAlC,CAAiDN,CACpDI,EAAJ,GAAcT,CAAd,GACEY,CACA,CADU,+BACV,CAAAH,CAAA,CAAQ,EAFV,CAIIV,EAAAe,SAAA,CAAiBF,CAAjB,CAAJ,GACEA,CADF,CACY,IAAIG,IAAJ,CAASH,CAAT,CADZ,CAIII,EAAAA,CAAMC,kBAAA,CAqB6BT,CArB7B,CAANQ,CAAiC,GAAjCA,CAAuCC,kBAAA,CAAmBR,CAAnB,CAE3CO,EAAA,CADAA,CACA,EADOL,CAAA,CAAO,QAAP,CAAkBA,CAAlB,CAAyB,EAChC,GAAOD,CAAAQ,OAAA,CAAiB,UAAjB,CAA8BR,CAAAQ,OAA9B,CAA+C,EAAtD,CACAF,EAAA,EAAOJ,CAAA,CAAU,WAAV,CAAwBA,CAAAO,YAAA,EAAxB,CAAgD,EACvDH,EAAA,EAAON,CAAAU,OAAA,CAAiB,SAAjB,CAA6B,EAMhCC,EAAAA,CAAeL,CAAAM,OAAfD,CAA4B,CACb,KAAnB,CAAIA,CAAJ,EACElB,CAAAoB,KAAA,CAAU,UAAV,CASqCf,CATrC,CACE,6DADF;AAEEa,CAFF,CAEiB,iBAFjB,CASFd,EAAAiB,OAAA,CAJOR,CAG6B,CArCW,CAxPnDjB,CAAA0B,OAAA,CAAe,WAAf,CAA4B,CAAC,IAAD,CAA5B,CAAAC,SAAA,CAOY,UAPZ,CAOwB,CAACC,QAAyB,EAAG,CAuBjD,IAAIC,EAAW,IAAAA,SAAXA,CAA2B,EAiC/B,KAAAC,KAAA,CAAY,CAAC,gBAAD,CAAmB,gBAAnB,CAAqC,QAAQ,CAACC,CAAD,CAAiBC,CAAjB,CAAiC,CACxF,MAAO,CAWLC,IAAKA,QAAQ,CAACC,CAAD,CAAM,CACjB,MAAOH,EAAA,EAAA,CAAiBG,CAAjB,CADU,CAXd,CAyBLC,UAAWA,QAAQ,CAACD,CAAD,CAAM,CAEvB,MAAO,CADHxB,CACG,CADK,IAAAuB,IAAA,CAASC,CAAT,CACL,EAAQlC,CAAAoC,SAAA,CAAiB1B,CAAjB,CAAR,CAAkCA,CAFlB,CAzBpB,CAuCL2B,OAAQA,QAAQ,EAAG,CACjB,MAAON,EAAA,EADU,CAvCd,CAuDLO,IAAKA,QAAQ,CAACJ,CAAD,CAAMxB,CAAN,CAAaC,CAAb,CAAsB,CACjCqB,CAAA,CAAeE,CAAf,CAAoBxB,CAApB,CAAuCC,CAvFpC,CAAUX,CAAAuC,OAAA,CAAe,EAAf,CAAmBV,CAAnB,CAuF0BlB,CAvF1B,CAAV,CAAkDkB,CAuFrD,CADiC,CAvD9B,CAuELW,UAAWA,QAAQ,CAACN,CAAD,CAAMxB,CAAN,CAAaC,CAAb,CAAsB,CACvC,IAAA2B,IAAA,CAASJ,CAAT,CAAclC,CAAAyC,OAAA,CAAe/B,CAAf,CAAd,CAAqCC,CAArC,CADuC,CAvEpC,CAsFL+B,OAAQA,QAAQ,CAACR,CAAD,CAAMvB,CAAN,CAAe,CAC7BqB,CAAA,CAAeE,CAAf,CAAoBjC,CAApB,CAA2CU,CAtHxC,CAAUX,CAAAuC,OAAA,CAAe,EAAf,CAAmBV,CAAnB,CAsH8BlB,CAtH9B,CAAV,CAAkDkB,CAsHrD,CAD6B,CAtF1B,CADiF,CAA9E,CAxDqC,CAA7B,CAPxB,CA6JA7B,EAAA0B,OAAA,CAAe,WAAf,CAAAiB,QAAA,CAiCS,cAjCT;AAiCyB,CAAC,UAAD,CAAa,QAAQ,CAACC,CAAD,CAAW,CAErD,MAAO,CAWLX,IAAKA,QAAQ,CAACC,CAAD,CAAM,CACjB,MAAOU,EAAAT,UAAA,CAAmBD,CAAnB,CADU,CAXd,CAyBLI,IAAKA,QAAQ,CAACJ,CAAD,CAAMxB,CAAN,CAAa,CACxBkC,CAAAJ,UAAA,CAAmBN,CAAnB,CAAwBxB,CAAxB,CADwB,CAzBrB,CAsCLgC,OAAQA,QAAQ,CAACR,CAAD,CAAM,CACpBU,CAAAF,OAAA,CAAgBR,CAAhB,CADoB,CAtCjB,CAF8C,CAAhC,CAjCzB,CAqIAhC,EAAA2C,QAAA,CAAyB,CAAC,WAAD,CAAc,MAAd,CAAsB,UAAtB,CAEzB7C,EAAA0B,OAAA,CAAe,WAAf,CAAAC,SAAA,CAAqC,gBAArC,CAAuDmB,QAA+B,EAAG,CACvF,IAAAhB,KAAA,CAAY5B,CAD2E,CAAzF,CAtTsC,CAArC,CAAD,CA2TGH,MA3TH,CA2TWA,MAAAC,QA3TX;", -"sources":["angular-cookies.js"], -"names":["window","angular","undefined","$$CookieWriter","$document","$log","$browser","cookiePath","baseHref","rawDocument","name","value","options","path","expires","isDefined","isString","Date","str","encodeURIComponent","domain","toUTCString","secure","cookieLength","length","warn","cookie","module","provider","$CookiesProvider","defaults","$get","$$cookieReader","$$cookieWriter","get","key","getObject","fromJson","getAll","put","extend","putObject","toJson","remove","factory","$cookies","$inject","$$CookieWriterProvider"] -} diff --git a/awx/ui/client/lib/angular-cookies/bower.json b/awx/ui/client/lib/angular-cookies/bower.json deleted file mode 100644 index 9ab6d5f01b..0000000000 --- a/awx/ui/client/lib/angular-cookies/bower.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "name": "angular-cookies", - "version": "1.4.3", - "main": "./angular-cookies.js", - "ignore": [], - "dependencies": { - "angular": "1.4.3" - } -} diff --git a/awx/ui/client/lib/angular-cookies/index.js b/awx/ui/client/lib/angular-cookies/index.js deleted file mode 100644 index 657667549a..0000000000 --- a/awx/ui/client/lib/angular-cookies/index.js +++ /dev/null @@ -1,2 +0,0 @@ -require('./angular-cookies'); -module.exports = 'ngCookies'; diff --git a/awx/ui/client/lib/angular-cookies/package.json b/awx/ui/client/lib/angular-cookies/package.json deleted file mode 100644 index a9862f9bcc..0000000000 --- a/awx/ui/client/lib/angular-cookies/package.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "name": "angular-cookies", - "version": "1.4.3", - "description": "AngularJS module for cookies", - "main": "index.js", - "scripts": { - "test": "echo \"Error: no test specified\" && exit 1" - }, - "repository": { - "type": "git", - "url": "https://github.com/angular/angular.js.git" - }, - "keywords": [ - "angular", - "framework", - "browser", - "cookies", - "client-side" - ], - "author": "Angular Core Team ", - "license": "MIT", - "bugs": { - "url": "https://github.com/angular/angular.js/issues" - }, - "homepage": "http://angularjs.org" -} diff --git a/awx/ui/client/lib/angular-drag-and-drop-lists/.bower.json b/awx/ui/client/lib/angular-drag-and-drop-lists/.bower.json deleted file mode 100644 index b5afbfffb6..0000000000 --- a/awx/ui/client/lib/angular-drag-and-drop-lists/.bower.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "angular-drag-and-drop-lists", - "main": "angular-drag-and-drop-lists.js", - "version": "1.4.0", - "homepage": "https://github.com/marceljuenemann/angular-drag-and-drop-lists", - "authors": [ - "Marcel Juenemann " - ], - "description": "Angular directives for sorting nested lists using the HTML5 Drag & Drop API", - "keywords": [ - "angular", - "drag", - "drop", - "dnd", - "nested", - "sortable", - "lists", - "html5" - ], - "license": "MIT", - "ignore": [ - "**/.*", - "node_modules", - "bower_components", - "demo", - "*.json", - "test", - "tests" - ], - "_release": "1.4.0", - "_resolution": { - "type": "version", - "tag": "v1.4.0", - "commit": "141e13919b30578ed53d079bdd269fb99b20f78f" - }, - "_source": "git://github.com/marceljuenemann/angular-drag-and-drop-lists.git", - "_target": "~1.4.0", - "_originalSource": "angular-drag-and-drop-lists", - "_direct": true -} \ No newline at end of file diff --git a/awx/ui/client/lib/angular-drag-and-drop-lists/CHANGELOG.md b/awx/ui/client/lib/angular-drag-and-drop-lists/CHANGELOG.md deleted file mode 100644 index f5c9fdc64d..0000000000 --- a/awx/ui/client/lib/angular-drag-and-drop-lists/CHANGELOG.md +++ /dev/null @@ -1,107 +0,0 @@ -# 1.4.0 (2016-02-06) - -## Features - -- **dnd-handle directive**: This directive can be used in combination with `dnd-nodrag`, so that a `dnd-draggable` can only be dragged by using certain handle elements. [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/types) -- **dnd-drop can handle insertion**: The `dnd-drop` callback can now return true to signalize that it will take care of inserting the dropped element itself. `dnd-list` will then no longer insert any elements into the list, but will still call the `dnd-inserted` callback. - -## Bug Fixes - -- **Fix dnd-disable-if on dnd-draggable**: When you disabled a `dnd-draggable` with `dnd-disable-if`, the user was still able to trigger a drag of that element by selecting some text inside the element. (issue #159) -- **dnd-list now handles the dragenter event**: According to the HTML5 standard dropzones need to handle the `dragenter` event, although there doesn't seem to be any browser that enforces this. (issue #118) - -## Tested browsers - -- Chrome 48 (Mac, Ubuntu & Windows 10) -- Firefox 44 (Ubuntu) -- Safari 9 (Mac) -- Microsoft Edge 20 (Windows 10) -- Internet Explorer 11 (Windows 10) -- Internet Explorer 10 & 9 in compatibility mode (Windows 10) - -# 1.3.0 (2015-08-20) - -## Features - -- **New callbacks**: `dnd-dragend`, `dnd-canceled` and `dnd-inserted`. -- **Custom placeholder elements**: `dnd-list` elements can have custom elements by creating a child element with `dnd-placeholder` class. This is useful for cases where a simple `li` element is not sufficient. -- **dnd-nodrag directive**: This directive can be used inside `dnd-draggable` to prevent dragging certain areas. This is useful for input elements inside the draggable or creating handle elements. - -## Bug Fixes - -- **Fix user selection inside dnd-draggable**: The `selectstart` event is no longer cancelled. -- **Fix click handler compatibility**: Propagation of click events is now only stopped if the `dnd-selected` attribute is present. -- **Fix IE9 glitch**: Double clicks in IE9 previously would trigger the `dnd-moved` callback, and therefore remove items accidentially. (issue #21) - -## Tested browsers - -- Chrome 43 (Win7) -- Chrome 44 (Ubuntu) -- Chrome 44 (Mac) -- Firefox 40 (Win7) -- Firefox 39 (Ubuntu) -- Safari 8.0.8 (Mac) -- Internet Explorer 11 (IE9 & 10 in compatibility mode) - -# 1.2.0 (2014-11-30) - -## Bug Fixes - -- **Fix glitches in Chrome**: When aborting a drag operation or dragging an element on itself, Chrome on Linux sometimes sends `move` as dropEffect instead of `none`. This lead to elements sometimes disappearing. Can be reproduced by dragging an element over itself and aborting with Esc key. (issue #14) -- **Fix dnd-allowed-types in nested lists**: When a drop was not allowed due to the wrong element type, the event was correctly propagated to the parent list. Nevertheless, the drop was still executed, because the drop handler didn't check the type again. (issue #16) - -## Features - -- **New callbacks**: The `dnd-draggable` directive now has a new `dnd-dragstart` callback besides the existing `dnd-moved` and `dnd-copied`. The `dnd-list` directive got the callbacks `dnd-dragover` and `dnd-drag` added, which are also able to abort a drop. (issue #11) -- **dnd-horizontal-list**: Lists can be marked as horizontal with this new attribute. The positioning algorithm then positions the placeholder left or right of other list items, instead of above or below. (issue #19) -- **dnd-external-sources**: This attribute allows drag and drop accross browser windows. See documentation for details. (issue #9) -- **pointer-events: none no longer required**: The dragover handler now traverses the DOM until it finds the list item node, therefore it's child elements no longer require the pointer-events: none style. - -## Tested browsers - -- Chrome 38 (Ubuntu) -- Chrome 38 (Win7) -- Chrome 39 (Mac) -- Firefox 31 (Win7) -- Firefox 33 (Ubuntu) -- Safari 7.1 (Mac) -- Internet Explorer 11 (IE9 & 10 in compatibility mode) - -# 1.1.0 (2014-08-31) - -## Bug Fixes - -- **jQuery compatibility**: jQuery wraps browser events in event.originalEvent - -## Features - -- **dnd-disable-if attribute**: allows to dynamically disable the drag and drop functionality -- **dnd-type and dnd-allowed-types**: allows to restrict an item to specifc lists depending on it's type - -## Tested browsers - -- Chrome 34 (Ubuntu) -- Chrome 37 (Mac) -- Chrome 37 (Win7) -- Firefox 28 (Win7) -- Firefox 31 (Ubuntu) -- Safari 7.0.6 (Mac) -- Internet Explorer 11 (IE9 & 10 in compatibility mode) - -# 1.0.0 (2014-04-11) - -Initial release - -# Release checklist - -- Bump versions - - bower.json - - package.json - - JS files -- Minify and test (npm run-script minify) -- Test different OS & browsers (npm start) -- Update README and CHANGELOG -- Merge to master -- Tag release -- Merge to gh-pages -- Publish to npm diff --git a/awx/ui/client/lib/angular-drag-and-drop-lists/LICENSE b/awx/ui/client/lib/angular-drag-and-drop-lists/LICENSE deleted file mode 100644 index 45299cb3c6..0000000000 --- a/awx/ui/client/lib/angular-drag-and-drop-lists/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Marcel Juenemann -Copyright (c) 2014-2016 Google Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/awx/ui/client/lib/angular-drag-and-drop-lists/README.md b/awx/ui/client/lib/angular-drag-and-drop-lists/README.md deleted file mode 100644 index 797ebffc2d..0000000000 --- a/awx/ui/client/lib/angular-drag-and-drop-lists/README.md +++ /dev/null @@ -1,135 +0,0 @@ -angular-drag-and-drop-lists -=========================== -Angular directives that allow you to build sortable lists with the native HTML5 drag & drop API. The directives can also be nested to bring drag & drop to your WYSIWYG editor, your tree, or whatever fancy structure you are building. - -## Demo -* [Nested Lists](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/nested) -* [Simple Lists](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/simple) -* [Typed Lists](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/types) -* [Advanced Features](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/advanced) -* [Multiselection Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/multi) - - -## Supported browsers - -**Touch devices are not supported**, because they do not implement the HTML5 drag & drop standard. However, you can use a [shim](https://github.com/timruffles/ios-html5-drag-drop-shim) to make it work on touch devices as well. - -Internet Explorer 8 or lower is *not supported*, but all modern browsers are (see changelog for list of tested browsers). - - -## Download & Installation -Download `angular-drag-and-drop-lists.js` (or the minified version) and include it in your application. If you use bower, you can of course just add it via bower. Add the `dndLists` module as dependency to your angular app. - -## dnd-draggable directive -Use the dnd-draggable directive to make your element draggable - -**Attributes** -* `dnd-draggable` Required attribute. The value has to be an object that represents the data of the element. In case of a drag and drop operation the object will be serialized and unserialized on the receiving end. -* `dnd-effect-allowed` Use this attribute to limit the operations that can be performed. Options are: - * `move` The drag operation will move the element. This is the default - * `copy` The drag operation will copy the element. There will be a copy cursor. - * `copyMove` The user can choose between copy and move by pressing the ctrl or shift key. - * *Not supported in IE:* In Internet Explorer this option will be the same as `copy`. - * *Not fully supported in Chrome on Windows:* In the Windows version of Chrome the cursor will always be the move cursor. However, when the user drops an element and has the ctrl key pressed, we will perform a copy anyways. - * HTML5 also specifies the `link` option, but this library does not actively support it yet, so use it at your own risk. - * [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/advanced) -* `dnd-type` Use this attribute if you have different kinds of items in your application and you want to limit which items can be dropped into which lists. Combine with dnd-allowed-types on the dnd-list(s). This attribute should evaluate to a string, although this restriction is not enforced (at the moment). [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/types) -* `dnd-disable-if` You can use this attribute to dynamically disable the draggability of the element. This is useful if you have certain list items that you don't want to be draggable, or if you want to disable drag & drop completely without having two different code branches (e.g. only allow for admins). *Note*: If your element is not draggable, the user is probably able to select text or images inside of it. Since a selection is always draggable, this breaks your UI. You most likely want to disable user selection via CSS (see [user-select](http://stackoverflow.com/a/4407335)). [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/types) - -**Callbacks** -* `dnd-moved` Callback that is invoked when the element was moved. Usually you will remove your element from the original list in this callback, since the directive is not doing that for you automatically. The original dragend event will be provided in the local `event` variable. [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/advanced) -* `dnd-copied` Same as dnd-moved, just that it is called when the element was copied instead of moved. The original dragend event will be provided in the local `event` variable. [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/advanced) -* `dnd-canceled` Callback that is invoked if the element was dragged, but the operation was canceled and the element was not dropped. The original dragend event will be provided in the local event variable. [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/advanced) -* `dnd-dragstart` Callback that is invoked when the element was dragged. The original dragstart event will be provided in the local `event` variable. [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/advanced) -* `dnd-dragend` Callback that is invoked when the drag operation ended. Available local variables are `event` and `dropEffect`. [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/advanced) -* `dnd-selected` Callback that is invoked when the element was clicked but not dragged. The original click event will be provided in the local `event` variable. [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/nested) - -**CSS classes** -* `dndDragging` This class will be added to the element while the element is being dragged. It will affect both the element you see while dragging and the source element that stays at it's position. Do not try to hide the source element with this class, because that will abort the drag operation. -* `dndDraggingSource` This class will be added to the element after the drag operation was started, meaning it only affects the original element that is still at it's source position, and not the "element" that the user is dragging with his mouse pointer - -## dnd-list directive - -Use the dnd-list attribute to make your list element a dropzone. Usually you will add a single li element as child with the ng-repeat directive. If you don't do that, we will not be able to position the dropped element correctly. If you want your list to be sortable, also add the dnd-draggable directive to your li element(s). Both the dnd-list and it's direct children must have position: relative CSS style, otherwise the positioning algorithm will not be able to determine the correct placeholder position in all browsers. - -**Attributes** -* `dnd-list` Required attribute. The value has to be the array in which the data of the dropped element should be inserted. -* `dnd-allowed-types` Optional array of allowed item types. When used, only items that had a matching dnd-type attribute will be dropable. [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/types) -* `dnd-disable-if` Optional boolean expression. When it evaluates to true, no dropping into the list is possible. Note that this also disables rearranging items inside the list. [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/types) -* `dnd-horizontal-list` Optional boolean expression. When it evaluates to true, the positioning algorithm will use the left and right halfs of the list items instead of the upper and lower halfs. [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/advanced) -* `dnd-external-sources` Optional boolean expression. When it evaluates to true, the list accepts drops from sources outside of the current browser tab. This allows to drag and drop accross different browser tabs. Note that this will allow to drop arbitrary text into the list, thus it is highly recommended to implement the dnd-drop callback to check the incoming element for sanity. Furthermore, the dnd-type of external sources can not be determined, therefore do not rely on restrictions of dnd-allowed-type. Also note that this feature does not work very well in Internet Explorer. [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/advanced) - -**Callbacks** -* `dnd-dragover` Optional expression that is invoked when an element is dragged over the list. If the expression is set, but does not return true, the element is not allowed to be dropped. The following variables will be available: - * `event` The original dragover event sent by the browser. - * `index` The position in the list at which the element would be dropped. - * `type` The `dnd-type` set on the dnd-draggable, or undefined if unset. - * `external` Whether the element was dragged from an external source. See `dnd-external-sources`. - * [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/advanced) -* `dnd-drop` Optional expression that is invoked when an element is dropped on the list. - * The following variables will be available: - * `event` The original drop event sent by the browser. - * `index` The position in the list at which the element would be dropped. - * `item` The transferred object. - * `type` The dnd-type set on the dnd-draggable, or undefined if unset. - * `external` Whether the element was dragged from an external source. See `dnd-external-sources`. - * The return value determines the further handling of the drop: - * `false` The drop will be canceled and the element won't be inserted. - * `true` Signalises that the drop is allowed, but the dnd-drop callback will take care of inserting the element. - * Otherwise: All other return values will be treated as the object to insert into the array. In most cases you simply want to return the `item` parameter, but there are no restrictions on what you can return. -* `dnd-inserted` Optional expression that is invoked after a drop if the element was actually inserted into the list. The same local variables as for `dnd-drop` will be available. Note that for reorderings inside the same list the old element will still be in the list due to the fact that `dnd-moved` was not called yet. [Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/advanced) - -**CSS classes** -* `dndPlaceholder` When an element is dragged over the list, a new placeholder child element will be added. This element is of type `li` and has the class `dndPlaceholder` set. Alternatively, you can define your own placeholder by creating a child element with `dndPlaceholder` class. -* `dndDragover` This class will be added to the list while an element is being dragged over the list. - -## dnd-nodrag directive - -Use the `dnd-nodrag` attribute inside of `dnd-draggable` elements to prevent them from starting drag operations. This is especially useful if you want to use input elements inside of `dnd-draggable` elements or create specific handle elements. - -**Note:** This directive does not work in Internet Explorer 9. - -[Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/types) - -## dnd-handle directive - -Use the `dnd-handle` directive within a `dnd-nodrag` element in order to allow dragging of that element after all. Therefore, by combining `dnd-nodrag` and `dnd-handle` you can allow `dnd-draggable` elements to only be dragged via specific *handle* elements. - -**Note:** Internet Explorer will show the handle element as drag image instead of the `dnd-draggable` element. You can work around this by styling the handle element differently when it is being dragged. Use the CSS selector `.dndDragging:not(.dndDraggingSource) [dnd-handle]` for that. - -[Demo](http://marceljuenemann.github.io/angular-drag-and-drop-lists/demo/#/types) - -## Required CSS styles -Both the dnd-list and it's children require relative positioning, so that the directive can determine the mouse position relative to the list and thus calculate the correct drop position. - -
    -ul[dnd-list], ul[dnd-list] > li {
    -    position: relative;
    -}
    -
    - - - -## Why another drag & drop library? -There are tons of other drag & drop libraries out there, but none of them met my three requirements: - -* **Angular:** If you use angular.js, you really don't want to throw a bunch of jQuery into your app. Instead you want to use libraries that were build the "angular way" and support **two-way data binding** to update your data model automatically. -* **Nested lists:** If you want to build a **WYSIWYG editor** or have some fancy **tree structure**, the library has to support nested lists. -* **HTML5 drag & drop:** Most drag & drop applications you'll find on the internet use pure JavaScript drag & drop. But with the arrival of HTML5 we can delegate most of the work to the browser. For example: If you want to show the user what he's currently dragging, you'll have to update the position of the element all the time and set it below the mouse pointer. In HTML5 the browser will do that for you! But you can not only save code lines, you can also offer a more **native user experience**: If you click on an element in a pure JavaScript drag & drop implementation, it will usually start the drag operation. But remember what happens when you click an icon on your desktop: The icon will be selected, not dragged! This is the native behaviour you can bring to your web application with HTML5. - -If this doesn't fit your requirements, check out one of the other awesome drag & drop libraries: - -* [angular-ui-tree](https://github.com/JimLiu/angular-ui-tree): Very similar to this library, but does not use the HTML5 API. Therefore you need to write some more markup to see what you are dragging and it will create another DOM node that you have to style. However, if you plan to support touch devices this is probably your best choice. -* [angular-dragdrop](https://github.com/ganarajpr/angular-dragdrop): One of many libraries with the same name. This one uses the HTML5 API, but if you want to build (nested) sortable lists, you're on your own, because it does not calculate the correct element position for you. -* [more...](https://www.google.de/search?q=angular+drag+and+drop) - - -## License - -Copyright (c) 2014 [Marcel Juenemann](mailto:marcel@juenemann.cc) - -Copyright (c) 2014-2016 Google Inc. - -This is not an official Google product (experimental or otherwise), it is just code that happens to be owned by Google. - -[MIT License](https://raw.githubusercontent.com/marceljuenemann/angular-drag-and-drop-lists/master/LICENSE) diff --git a/awx/ui/client/lib/angular-drag-and-drop-lists/angular-drag-and-drop-lists.js b/awx/ui/client/lib/angular-drag-and-drop-lists/angular-drag-and-drop-lists.js deleted file mode 100644 index 4a1f2eccda..0000000000 --- a/awx/ui/client/lib/angular-drag-and-drop-lists/angular-drag-and-drop-lists.js +++ /dev/null @@ -1,590 +0,0 @@ -/** - * angular-drag-and-drop-lists v1.4.0 - * - * Copyright (c) 2014 Marcel Juenemann marcel@juenemann.cc - * Copyright (c) 2014-2016 Google Inc. - * https://github.com/marceljuenemann/angular-drag-and-drop-lists - * - * License: MIT - */ -angular.module('dndLists', []) - - /** - * Use the dnd-draggable attribute to make your element draggable - * - * Attributes: - * - dnd-draggable Required attribute. The value has to be an object that represents the data - * of the element. In case of a drag and drop operation the object will be - * serialized and unserialized on the receiving end. - * - dnd-selected Callback that is invoked when the element was clicked but not dragged. - * The original click event will be provided in the local event variable. - * - dnd-effect-allowed Use this attribute to limit the operations that can be performed. Options: - * - "move": The drag operation will move the element. This is the default. - * - "copy": The drag operation will copy the element. Shows a copy cursor. - * - "copyMove": The user can choose between copy and move by pressing the - * ctrl or shift key. *Not supported in IE:* In Internet Explorer this - * option will be the same as "copy". *Not fully supported in Chrome on - * Windows:* In the Windows version of Chrome the cursor will always be the - * move cursor. However, when the user drops an element and has the ctrl - * key pressed, we will perform a copy anyways. - * - HTML5 also specifies the "link" option, but this library does not - * actively support it yet, so use it at your own risk. - * - dnd-moved Callback that is invoked when the element was moved. Usually you will - * remove your element from the original list in this callback, since the - * directive is not doing that for you automatically. The original dragend - * event will be provided in the local event variable. - * - dnd-canceled Callback that is invoked if the element was dragged, but the operation was - * canceled and the element was not dropped. The original dragend event will - * be provided in the local event variable. - * - dnd-copied Same as dnd-moved, just that it is called when the element was copied - * instead of moved. The original dragend event will be provided in the local - * event variable. - * - dnd-dragstart Callback that is invoked when the element was dragged. The original - * dragstart event will be provided in the local event variable. - * - dnd-dragend Callback that is invoked when the drag operation ended. Available local - * variables are event and dropEffect. - * - dnd-type Use this attribute if you have different kinds of items in your - * application and you want to limit which items can be dropped into which - * lists. Combine with dnd-allowed-types on the dnd-list(s). This attribute - * should evaluate to a string, although this restriction is not enforced. - * - dnd-disable-if You can use this attribute to dynamically disable the draggability of the - * element. This is useful if you have certain list items that you don't want - * to be draggable, or if you want to disable drag & drop completely without - * having two different code branches (e.g. only allow for admins). - * **Note**: If your element is not draggable, the user is probably able to - * select text or images inside of it. Since a selection is always draggable, - * this breaks your UI. You most likely want to disable user selection via - * CSS (see user-select). - * - * CSS classes: - * - dndDragging This class will be added to the element while the element is being - * dragged. It will affect both the element you see while dragging and the - * source element that stays at it's position. Do not try to hide the source - * element with this class, because that will abort the drag operation. - * - dndDraggingSource This class will be added to the element after the drag operation was - * started, meaning it only affects the original element that is still at - * it's source position, and not the "element" that the user is dragging with - * his mouse pointer. - */ - .directive('dndDraggable', ['$parse', '$timeout', 'dndDropEffectWorkaround', 'dndDragTypeWorkaround', - function($parse, $timeout, dndDropEffectWorkaround, dndDragTypeWorkaround) { - return function(scope, element, attr) { - // Set the HTML5 draggable attribute on the element - element.attr("draggable", "true"); - - // If the dnd-disable-if attribute is set, we have to watch that - if (attr.dndDisableIf) { - scope.$watch(attr.dndDisableIf, function(disabled) { - element.attr("draggable", !disabled); - }); - } - - /** - * When the drag operation is started we have to prepare the dataTransfer object, - * which is the primary way we communicate with the target element - */ - element.on('dragstart', function(event) { - event = event.originalEvent || event; - - // Check whether the element is draggable, since dragstart might be triggered on a child. - if (element.attr('draggable') == 'false') return true; - - // Serialize the data associated with this element. IE only supports the Text drag type - event.dataTransfer.setData("Text", angular.toJson(scope.$eval(attr.dndDraggable))); - - // Only allow actions specified in dnd-effect-allowed attribute - event.dataTransfer.effectAllowed = attr.dndEffectAllowed || "move"; - - // Add CSS classes. See documentation above - element.addClass("dndDragging"); - $timeout(function() { element.addClass("dndDraggingSource"); }, 0); - - // Workarounds for stupid browsers, see description below - dndDropEffectWorkaround.dropEffect = "none"; - dndDragTypeWorkaround.isDragging = true; - - // Save type of item in global state. Usually, this would go into the dataTransfer - // typename, but we have to use "Text" there to support IE - dndDragTypeWorkaround.dragType = attr.dndType ? scope.$eval(attr.dndType) : undefined; - - // Try setting a proper drag image if triggered on a dnd-handle (won't work in IE). - if (event._dndHandle && event.dataTransfer.setDragImage) { - event.dataTransfer.setDragImage(element[0], 0, 0); - } - - // Invoke callback - $parse(attr.dndDragstart)(scope, {event: event}); - - event.stopPropagation(); - }); - - /** - * The dragend event is triggered when the element was dropped or when the drag - * operation was aborted (e.g. hit escape button). Depending on the executed action - * we will invoke the callbacks specified with the dnd-moved or dnd-copied attribute. - */ - element.on('dragend', function(event) { - event = event.originalEvent || event; - - // Invoke callbacks. Usually we would use event.dataTransfer.dropEffect to determine - // the used effect, but Chrome has not implemented that field correctly. On Windows - // it always sets it to 'none', while Chrome on Linux sometimes sets it to something - // else when it's supposed to send 'none' (drag operation aborted). - var dropEffect = dndDropEffectWorkaround.dropEffect; - scope.$apply(function() { - switch (dropEffect) { - case "move": - $parse(attr.dndMoved)(scope, {event: event}); - break; - case "copy": - $parse(attr.dndCopied)(scope, {event: event}); - break; - case "none": - $parse(attr.dndCanceled)(scope, {event: event}); - break; - } - $parse(attr.dndDragend)(scope, {event: event, dropEffect: dropEffect}); - }); - - // Clean up - element.removeClass("dndDragging"); - $timeout(function() { element.removeClass("dndDraggingSource"); }, 0); - dndDragTypeWorkaround.isDragging = false; - event.stopPropagation(); - }); - - /** - * When the element is clicked we invoke the callback function - * specified with the dnd-selected attribute. - */ - element.on('click', function(event) { - if (!attr.dndSelected) return; - - event = event.originalEvent || event; - scope.$apply(function() { - $parse(attr.dndSelected)(scope, {event: event}); - }); - - // Prevent triggering dndSelected in parent elements. - event.stopPropagation(); - }); - - /** - * Workaround to make element draggable in IE9 - */ - element.on('selectstart', function() { - if (this.dragDrop) this.dragDrop(); - }); - }; - }]) - - /** - * Use the dnd-list attribute to make your list element a dropzone. Usually you will add a single - * li element as child with the ng-repeat directive. If you don't do that, we will not be able to - * position the dropped element correctly. If you want your list to be sortable, also add the - * dnd-draggable directive to your li element(s). Both the dnd-list and it's direct children must - * have position: relative CSS style, otherwise the positioning algorithm will not be able to - * determine the correct placeholder position in all browsers. - * - * Attributes: - * - dnd-list Required attribute. The value has to be the array in which the data of - * the dropped element should be inserted. - * - dnd-allowed-types Optional array of allowed item types. When used, only items that had a - * matching dnd-type attribute will be dropable. - * - dnd-disable-if Optional boolean expresssion. When it evaluates to true, no dropping - * into the list is possible. Note that this also disables rearranging - * items inside the list. - * - dnd-horizontal-list Optional boolean expresssion. When it evaluates to true, the positioning - * algorithm will use the left and right halfs of the list items instead of - * the upper and lower halfs. - * - dnd-dragover Optional expression that is invoked when an element is dragged over the - * list. If the expression is set, but does not return true, the element is - * not allowed to be dropped. The following variables will be available: - * - event: The original dragover event sent by the browser. - * - index: The position in the list at which the element would be dropped. - * - type: The dnd-type set on the dnd-draggable, or undefined if unset. - * - external: Whether the element was dragged from an external source. - * - dnd-drop Optional expression that is invoked when an element is dropped on the - * list. The following variables will be available: - * - event: The original drop event sent by the browser. - * - index: The position in the list at which the element would be dropped. - * - item: The transferred object. - * - type: The dnd-type set on the dnd-draggable, or undefined if unset. - * - external: Whether the element was dragged from an external source. - * The return value determines the further handling of the drop: - * - false: The drop will be canceled and the element won't be inserted. - * - true: Signalises that the drop is allowed, but the dnd-drop - * callback already took care of inserting the element. - * - otherwise: All other return values will be treated as the object to - * insert into the array. In most cases you want to simply return the - * item parameter, but there are no restrictions on what you can return. - * - dnd-inserted Optional expression that is invoked after a drop if the element was - * actually inserted into the list. The same local variables as for - * dnd-drop will be available. Note that for reorderings inside the same - * list the old element will still be in the list due to the fact that - * dnd-moved was not called yet. - * - dnd-external-sources Optional boolean expression. When it evaluates to true, the list accepts - * drops from sources outside of the current browser tab. This allows to - * drag and drop accross different browser tabs. Note that this will allow - * to drop arbitrary text into the list, thus it is highly recommended to - * implement the dnd-drop callback to check the incoming element for - * sanity. Furthermore, the dnd-type of external sources can not be - * determined, therefore do not rely on restrictions of dnd-allowed-type. - * - * CSS classes: - * - dndPlaceholder When an element is dragged over the list, a new placeholder child - * element will be added. This element is of type li and has the class - * dndPlaceholder set. Alternatively, you can define your own placeholder - * by creating a child element with dndPlaceholder class. - * - dndDragover Will be added to the list while an element is dragged over the list. - */ - .directive('dndList', ['$parse', '$timeout', 'dndDropEffectWorkaround', 'dndDragTypeWorkaround', - function($parse, $timeout, dndDropEffectWorkaround, dndDragTypeWorkaround) { - return function(scope, element, attr) { - // While an element is dragged over the list, this placeholder element is inserted - // at the location where the element would be inserted after dropping - var placeholder = getPlaceholderElement(); - var placeholderNode = placeholder[0]; - var listNode = element[0]; - placeholder.remove(); - - var horizontal = attr.dndHorizontalList && scope.$eval(attr.dndHorizontalList); - var externalSources = attr.dndExternalSources && scope.$eval(attr.dndExternalSources); - - /** - * The dragenter event is fired when a dragged element or text selection enters a valid drop - * target. According to the spec, we either need to have a dropzone attribute or listen on - * dragenter events and call preventDefault(). It should be noted though that no browser seems - * to enforce this behaviour. - */ - element.on('dragenter', function (event) { - event = event.originalEvent || event; - if (!isDropAllowed(event)) return true; - event.preventDefault(); - }); - - /** - * The dragover event is triggered "every few hundred milliseconds" while an element - * is being dragged over our list, or over an child element. - */ - element.on('dragover', function(event) { - event = event.originalEvent || event; - - if (!isDropAllowed(event)) return true; - - // First of all, make sure that the placeholder is shown - // This is especially important if the list is empty - if (placeholderNode.parentNode != listNode) { - element.append(placeholder); - } - - if (event.target !== listNode) { - // Try to find the node direct directly below the list node. - var listItemNode = event.target; - while (listItemNode.parentNode !== listNode && listItemNode.parentNode) { - listItemNode = listItemNode.parentNode; - } - - if (listItemNode.parentNode === listNode && listItemNode !== placeholderNode) { - // If the mouse pointer is in the upper half of the child element, - // we place it before the child element, otherwise below it. - if (isMouseInFirstHalf(event, listItemNode)) { - listNode.insertBefore(placeholderNode, listItemNode); - } else { - listNode.insertBefore(placeholderNode, listItemNode.nextSibling); - } - } - } else { - // This branch is reached when we are dragging directly over the list element. - // Usually we wouldn't need to do anything here, but the IE does not fire it's - // events for the child element, only for the list directly. Therefore, we repeat - // the positioning algorithm for IE here. - if (isMouseInFirstHalf(event, placeholderNode, true)) { - // Check if we should move the placeholder element one spot towards the top. - // Note that display none elements will have offsetTop and offsetHeight set to - // zero, therefore we need a special check for them. - while (placeholderNode.previousElementSibling - && (isMouseInFirstHalf(event, placeholderNode.previousElementSibling, true) - || placeholderNode.previousElementSibling.offsetHeight === 0)) { - listNode.insertBefore(placeholderNode, placeholderNode.previousElementSibling); - } - } else { - // Check if we should move the placeholder element one spot towards the bottom - while (placeholderNode.nextElementSibling && - !isMouseInFirstHalf(event, placeholderNode.nextElementSibling, true)) { - listNode.insertBefore(placeholderNode, - placeholderNode.nextElementSibling.nextElementSibling); - } - } - } - - // At this point we invoke the callback, which still can disallow the drop. - // We can't do this earlier because we want to pass the index of the placeholder. - if (attr.dndDragover && !invokeCallback(attr.dndDragover, event, getPlaceholderIndex())) { - return stopDragover(); - } - - element.addClass("dndDragover"); - event.preventDefault(); - event.stopPropagation(); - return false; - }); - - /** - * When the element is dropped, we use the position of the placeholder element as the - * position where we insert the transferred data. This assumes that the list has exactly - * one child element per array element. - */ - element.on('drop', function(event) { - event = event.originalEvent || event; - - if (!isDropAllowed(event)) return true; - - // The default behavior in Firefox is to interpret the dropped element as URL and - // forward to it. We want to prevent that even if our drop is aborted. - event.preventDefault(); - - // Unserialize the data that was serialized in dragstart. According to the HTML5 specs, - // the "Text" drag type will be converted to text/plain, but IE does not do that. - var data = event.dataTransfer.getData("Text") || event.dataTransfer.getData("text/plain"); - var transferredObject; - try { - transferredObject = JSON.parse(data); - } catch(e) { - return stopDragover(); - } - - // Invoke the callback, which can transform the transferredObject and even abort the drop. - var index = getPlaceholderIndex(); - if (attr.dndDrop) { - transferredObject = invokeCallback(attr.dndDrop, event, index, transferredObject); - if (!transferredObject) { - return stopDragover(); - } - } - - // Insert the object into the array, unless dnd-drop took care of that (returned true). - if (transferredObject !== true) { - scope.$apply(function() { - scope.$eval(attr.dndList).splice(index, 0, transferredObject); - }); - } - invokeCallback(attr.dndInserted, event, index, transferredObject); - - // In Chrome on Windows the dropEffect will always be none... - // We have to determine the actual effect manually from the allowed effects - if (event.dataTransfer.dropEffect === "none") { - if (event.dataTransfer.effectAllowed === "copy" || - event.dataTransfer.effectAllowed === "move") { - dndDropEffectWorkaround.dropEffect = event.dataTransfer.effectAllowed; - } else { - dndDropEffectWorkaround.dropEffect = event.ctrlKey ? "copy" : "move"; - } - } else { - dndDropEffectWorkaround.dropEffect = event.dataTransfer.dropEffect; - } - - // Clean up - stopDragover(); - event.stopPropagation(); - return false; - }); - - /** - * We have to remove the placeholder when the element is no longer dragged over our list. The - * problem is that the dragleave event is not only fired when the element leaves our list, - * but also when it leaves a child element -- so practically it's fired all the time. As a - * workaround we wait a few milliseconds and then check if the dndDragover class was added - * again. If it is there, dragover must have been called in the meantime, i.e. the element - * is still dragging over the list. If you know a better way of doing this, please tell me! - */ - element.on('dragleave', function(event) { - event = event.originalEvent || event; - - element.removeClass("dndDragover"); - $timeout(function() { - if (!element.hasClass("dndDragover")) { - placeholder.remove(); - } - }, 100); - }); - - /** - * Checks whether the mouse pointer is in the first half of the given target element. - * - * In Chrome we can just use offsetY, but in Firefox we have to use layerY, which only - * works if the child element has position relative. In IE the events are only triggered - * on the listNode instead of the listNodeItem, therefore the mouse positions are - * relative to the parent element of targetNode. - */ - function isMouseInFirstHalf(event, targetNode, relativeToParent) { - var mousePointer = horizontal ? (event.offsetX || event.layerX) - : (event.offsetY || event.layerY); - var targetSize = horizontal ? targetNode.offsetWidth : targetNode.offsetHeight; - var targetPosition = horizontal ? targetNode.offsetLeft : targetNode.offsetTop; - targetPosition = relativeToParent ? targetPosition : 0; - return mousePointer < targetPosition + targetSize / 2; - } - - /** - * Tries to find a child element that has the dndPlaceholder class set. If none was found, a - * new li element is created. - */ - function getPlaceholderElement() { - var placeholder; - angular.forEach(element.children(), function(childNode) { - var child = angular.element(childNode); - if (child.hasClass('dndPlaceholder')) { - placeholder = child; - } - }); - return placeholder || angular.element("
  • "); - } - - /** - * We use the position of the placeholder node to determine at which position of the array the - * object needs to be inserted - */ - function getPlaceholderIndex() { - return Array.prototype.indexOf.call(listNode.children, placeholderNode); - } - - /** - * Checks various conditions that must be fulfilled for a drop to be allowed - */ - function isDropAllowed(event) { - // Disallow drop from external source unless it's allowed explicitly. - if (!dndDragTypeWorkaround.isDragging && !externalSources) return false; - - // Check mimetype. Usually we would use a custom drag type instead of Text, but IE doesn't - // support that. - if (!hasTextMimetype(event.dataTransfer.types)) return false; - - // Now check the dnd-allowed-types against the type of the incoming element. For drops from - // external sources we don't know the type, so it will need to be checked via dnd-drop. - if (attr.dndAllowedTypes && dndDragTypeWorkaround.isDragging) { - var allowed = scope.$eval(attr.dndAllowedTypes); - if (angular.isArray(allowed) && allowed.indexOf(dndDragTypeWorkaround.dragType) === -1) { - return false; - } - } - - // Check whether droping is disabled completely - if (attr.dndDisableIf && scope.$eval(attr.dndDisableIf)) return false; - - return true; - } - - /** - * Small helper function that cleans up if we aborted a drop. - */ - function stopDragover() { - placeholder.remove(); - element.removeClass("dndDragover"); - return true; - } - - /** - * Invokes a callback with some interesting parameters and returns the callbacks return value. - */ - function invokeCallback(expression, event, index, item) { - return $parse(expression)(scope, { - event: event, - index: index, - item: item || undefined, - external: !dndDragTypeWorkaround.isDragging, - type: dndDragTypeWorkaround.isDragging ? dndDragTypeWorkaround.dragType : undefined - }); - } - - /** - * Check if the dataTransfer object contains a drag type that we can handle. In old versions - * of IE the types collection will not even be there, so we just assume a drop is possible. - */ - function hasTextMimetype(types) { - if (!types) return true; - for (var i = 0; i < types.length; i++) { - if (types[i] === "Text" || types[i] === "text/plain") return true; - } - - return false; - } - }; - }]) - - /** - * Use the dnd-nodrag attribute inside of dnd-draggable elements to prevent them from starting - * drag operations. This is especially useful if you want to use input elements inside of - * dnd-draggable elements or create specific handle elements. Note: This directive does not work - * in Internet Explorer 9. - */ - .directive('dndNodrag', function() { - return function(scope, element, attr) { - // Set as draggable so that we can cancel the events explicitly - element.attr("draggable", "true"); - - /** - * Since the element is draggable, the browser's default operation is to drag it on dragstart. - * We will prevent that and also stop the event from bubbling up. - */ - element.on('dragstart', function(event) { - event = event.originalEvent || event; - - if (!event._dndHandle) { - // If a child element already reacted to dragstart and set a dataTransfer object, we will - // allow that. For example, this is the case for user selections inside of input elements. - if (!(event.dataTransfer.types && event.dataTransfer.types.length)) { - event.preventDefault(); - } - event.stopPropagation(); - } - }); - - /** - * Stop propagation of dragend events, otherwise dnd-moved might be triggered and the element - * would be removed. - */ - element.on('dragend', function(event) { - event = event.originalEvent || event; - if (!event._dndHandle) { - event.stopPropagation(); - } - }); - }; - }) - - /** - * Use the dnd-handle directive within a dnd-nodrag element in order to allow dragging with that - * element after all. Therefore, by combining dnd-nodrag and dnd-handle you can allow - * dnd-draggable elements to only be dragged via specific "handle" elements. Note that Internet - * Explorer will show the handle element as drag image instead of the dnd-draggable element. You - * can work around this by styling the handle element differently when it is being dragged. Use - * the CSS selector .dndDragging:not(.dndDraggingSource) [dnd-handle] for that. - */ - .directive('dndHandle', function() { - return function(scope, element, attr) { - element.attr("draggable", "true"); - - element.on('dragstart dragend', function(event) { - event = event.originalEvent || event; - event._dndHandle = true; - }); - }; - }) - - /** - * This workaround handles the fact that Internet Explorer does not support drag types other than - * "Text" and "URL". That means we can not know whether the data comes from one of our elements or - * is just some other data like a text selection. As a workaround we save the isDragging flag in - * here. When a dropover event occurs, we only allow the drop if we are already dragging, because - * that means the element is ours. - */ - .factory('dndDragTypeWorkaround', function(){ return {} }) - - /** - * Chrome on Windows does not set the dropEffect field, which we need in dragend to determine - * whether a drag operation was successful. Therefore we have to maintain it in this global - * variable. The bug report for that has been open for years: - * https://code.google.com/p/chromium/issues/detail?id=39399 - */ - .factory('dndDropEffectWorkaround', function(){ return {} }); diff --git a/awx/ui/client/lib/angular-drag-and-drop-lists/angular-drag-and-drop-lists.min.js b/awx/ui/client/lib/angular-drag-and-drop-lists/angular-drag-and-drop-lists.min.js deleted file mode 100644 index 3532582184..0000000000 --- a/awx/ui/client/lib/angular-drag-and-drop-lists/angular-drag-and-drop-lists.min.js +++ /dev/null @@ -1,35 +0,0 @@ -/** - * angular-drag-and-drop-lists v1.4.0 - * - * Copyright (c) 2014 Marcel Juenemann marcel@juenemann.cc - * Copyright (c) 2014-2016 Google Inc. - * https://github.com/marceljuenemann/angular-drag-and-drop-lists - * - * License: MIT - */ -angular.module("dndLists",[]).directive("dndDraggable",["$parse","$timeout","dndDropEffectWorkaround","dndDragTypeWorkaround",function(e,n,r,t){return function(a,d,o){d.attr("draggable","true"),o.dndDisableIf&&a.$watch(o.dndDisableIf,function(e){d.attr("draggable",!e)}),d.on("dragstart",function(i){return i=i.originalEvent||i,"false"==d.attr("draggable")?!0:(i.dataTransfer.setData("Text",angular.toJson(a.$eval(o.dndDraggable))),i.dataTransfer.effectAllowed=o.dndEffectAllowed||"move",d.addClass("dndDragging"),n(function(){d.addClass("dndDraggingSource")},0),r.dropEffect="none",t.isDragging=!0,t.dragType=o.dndType?a.$eval(o.dndType):void 0,i._dndHandle&&i.dataTransfer.setDragImage&&i.dataTransfer.setDragImage(d[0],0,0),e(o.dndDragstart)(a,{event:i}),void i.stopPropagation())}),d.on("dragend",function(i){i=i.originalEvent||i -var f=r.dropEffect -a.$apply(function(){switch(f){case"move":e(o.dndMoved)(a,{event:i}) -break -case"copy":e(o.dndCopied)(a,{event:i}) -break -case"none":e(o.dndCanceled)(a,{event:i})}e(o.dndDragend)(a,{event:i,dropEffect:f})}),d.removeClass("dndDragging"),n(function(){d.removeClass("dndDraggingSource")},0),t.isDragging=!1,i.stopPropagation()}),d.on("click",function(n){o.dndSelected&&(n=n.originalEvent||n,a.$apply(function(){e(o.dndSelected)(a,{event:n})}),n.stopPropagation())}),d.on("selectstart",function(){this.dragDrop&&this.dragDrop()})}}]).directive("dndList",["$parse","$timeout","dndDropEffectWorkaround","dndDragTypeWorkaround",function(e,n,r,t){return function(a,d,o){function i(e,n,r){var t=E?e.offsetX||e.layerX:e.offsetY||e.layerY,a=E?n.offsetWidth:n.offsetHeight,d=E?n.offsetLeft:n.offsetTop -return d=r?d:0,d+a/2>t}function f(){var e -return angular.forEach(d.children(),function(n){var r=angular.element(n) -r.hasClass("dndPlaceholder")&&(e=r)}),e||angular.element("
  • ")}function l(){return Array.prototype.indexOf.call(D.children,v)}function g(e){if(!t.isDragging&&!y)return!1 -if(!c(e.dataTransfer.types))return!1 -if(o.dndAllowedTypes&&t.isDragging){var n=a.$eval(o.dndAllowedTypes) -if(angular.isArray(n)&&-1===n.indexOf(t.dragType))return!1}return o.dndDisableIf&&a.$eval(o.dndDisableIf)?!1:!0}function s(){return p.remove(),d.removeClass("dndDragover"),!0}function u(n,r,d,o){return e(n)(a,{event:r,index:d,item:o||void 0,external:!t.isDragging,type:t.isDragging?t.dragType:void 0})}function c(e){if(!e)return!0 -for(var n=0;n" - ], - "description": "Angular directives for sorting nested lists using the HTML5 Drag & Drop API", - "keywords": [ - "angular", - "drag", - "drop", - "dnd", - "nested", - "sortable", - "lists", - "html5" - ], - "license": "MIT", - "ignore": [ - "**/.*", - "node_modules", - "bower_components", - "demo", - "*.json", - "test", - "tests" - ] -} diff --git a/awx/ui/client/lib/angular-filters/.bower.json b/awx/ui/client/lib/angular-filters/.bower.json deleted file mode 100644 index e64588c093..0000000000 --- a/awx/ui/client/lib/angular-filters/.bower.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "author": "Francesco Pontillo", - "name": "angular-filters", - "description": "A collection of filters for AngularJS.", - "version": "1.1.2", - "homepage": "https://github.com/frapontillo/angular-filters", - "repository": { - "type": "git", - "url": "git://github.com/frapontillo/angular-filters.git" - }, - "main": "./dist/angular-filters.js", - "dependencies": { - "angular": "~1.4.0" - }, - "devDependencies": { - "angular-mocks": "~1.4.0" - }, - "_release": "1.1.2", - "_resolution": { - "type": "version", - "tag": "1.1.2", - "commit": "61c1501beb057ff75fbc4953e32623104c6cd8ad" - }, - "_source": "git://github.com/frapontillo/angular-filters.git", - "_target": "~1.1.2", - "_originalSource": "angular-filters" -} \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/.gitignore b/awx/ui/client/lib/angular-filters/.gitignore deleted file mode 100644 index aa461ff903..0000000000 --- a/awx/ui/client/lib/angular-filters/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.idea -node_modules -bower_components \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/.jshintrc b/awx/ui/client/lib/angular-filters/.jshintrc deleted file mode 100644 index 861a058c3c..0000000000 --- a/awx/ui/client/lib/angular-filters/.jshintrc +++ /dev/null @@ -1,26 +0,0 @@ -{ - "node": true, - "browser": true, - "esnext": true, - "bitwise": true, - "camelcase": true, - "curly": true, - "eqeqeq": true, - "immed": true, - "indent": 2, - "latedef": true, - "newcap": true, - "noarg": true, - "quotmark": "single", - "regexp": true, - "undef": true, - "unused": true, - "strict": true, - "trailing": true, - "smarttabs": true, - "globals": { - "angular": false, - "$": false, - "jQuery": false - } -} diff --git a/awx/ui/client/lib/angular-filters/.travis.yml b/awx/ui/client/lib/angular-filters/.travis.yml deleted file mode 100644 index 9255758283..0000000000 --- a/awx/ui/client/lib/angular-filters/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ - language: node_js - - node_js: - - '0.10' - - before_script: - - export DISPLAY=:99.0 - - export PHANTOMJS_BIN=/usr/local/phantomjs/bin/phantomjs - - sh -e /etc/init.d/xvfb start - - sleep 3 # give xvfb some time to start - - 'npm install -g bower grunt-cli' - - 'npm install' - - 'bower install' - - script: - - grunt diff --git a/awx/ui/client/lib/angular-filters/CHANGELOG.md b/awx/ui/client/lib/angular-filters/CHANGELOG.md deleted file mode 100644 index 60fad95f78..0000000000 --- a/awx/ui/client/lib/angular-filters/CHANGELOG.md +++ /dev/null @@ -1,40 +0,0 @@ -CHANGELOG -========= - -#### v1.1.2 (2015-06-13) - -- Updated to angular 1.4.0 - -#### v1.1.1 - -- Treat `NaN` as an invalid value for the `default` filter - -#### v1.1.0 - -- Added `property` filter -- Added `join` filter -- Updated to angular 1.2.10 - -#### v1.0.1 - -- Improved tests and stability -- Rewritten build process - -#### v1.0.0 - -- Main module renamed to `frapontillo.ex.filters` in order to adhere with the [Angular Component Specification draft](https://github.com/PascalPrecht/angular-component-spec). -- Added `bool` filter. -- Upgraded bower information, node packages and Karma test runner. - -#### v0.0.2 - -- Added `firstNotNull`, `lastNotNull`, `max`, `min`. -- Test set complete. -- TravisCI is working. - -#### v0.0.1 - -- First release. -- `default` filter is the only filter at the moment. -- Unit testing with grunt, testacular and gruntacular configured. -- `defaultSpec` test written. \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/Gruntfile.js b/awx/ui/client/lib/angular-filters/Gruntfile.js deleted file mode 100644 index 5c997d8254..0000000000 --- a/awx/ui/client/lib/angular-filters/Gruntfile.js +++ /dev/null @@ -1,109 +0,0 @@ -'use strict'; - -module.exports = function(grunt) { - require('matchdep').filterDev('grunt-*').forEach(grunt.loadNpmTasks); - - // configurable paths - var yeomanConfig = { - src: 'src', - dist: 'dist', - test: 'test' - }; - - grunt.initConfig({ - yeoman: yeomanConfig, - pkg: grunt.file.readJSON('package.json'), - dev: { - reporters: 'dots' - }, - karma : { - options: { - configFile: 'karma.conf.js', - singleRun: true - }, - travis: { - browsers: ['PhantomJS'] - }, - local: { - browsers: ['Chrome'] - }, - dev: { - singleRun: false - } - }, - jshint: { - options: { - jshintrc: '.jshintrc' - }, - src: [ - 'Gruntfile.js', - '<%= yeoman.src %>/**/*.js' - ], - test: { - src: ['<%= yeoman.test %>/**/*.js'], - options: { - jshintrc: 'test/.jshintrc' - } - } - }, - meta: { - banner: '/**\n' + ' * <%= pkg.description %>\n' + - ' * @version v<%= pkg.version %> - <%= grunt.template.today("yyyy-mm-dd") %>\n' + - ' * @author <%= pkg.author.name %>\n' + - ' * @link <%= pkg.homepage %>\n' + - ' * @license <%= _.pluck(pkg.licenses, "type").join(", ") %>\n**/\n\n' - }, - clean: { - dist: { - files: [{ - dot: true, - src: [ - '<%= yeoman.dist %>/*', - '!<%= yeoman.dist %>/.git*' - ] - }] - }, - temp: { - src: ['<%= yeoman.dist %>/.temp'] - } - }, - ngmin: { - dist: { - expand: true, - cwd: '<%= yeoman.src %>', - src: ['**/*.js'], - dest: '<%= yeoman.dist %>/.temp' - } - }, - concat: { - options: { - banner: '<%= meta.banner %>\'use strict\';\n', - process: function(src, filepath) { - return '// Source: ' + filepath + '\n' + - src.replace(/(^|\n)[ \t]*('use strict'|"use strict");?\s*/g, '$1'); - } - }, - build: { - src: ['common/*.js', '<%= yeoman.dist %>/.temp/**/*.js'], - dest: '<%= yeoman.dist %>/<%= pkg.name %>.js' - } - }, - uglify: { - options: { - banner: '<%= meta.banner %>' - }, - build: { - src: ['<%= yeoman.dist %>/<%= pkg.name %>.js'], - dest: '<%= yeoman.dist %>/<%= pkg.name %>.min.js' - } - } - }); - - grunt.registerTask('test', ['jshint', 'karma:local']); - grunt.registerTask('test-travis', ['jshint', 'karma:travis']); - - grunt.registerTask('build', ['clean', 'ngmin', 'concat', 'uglify', 'clean:temp']); - grunt.registerTask('travis', ['test-travis', 'build']); - grunt.registerTask('default', ['test-travis', 'build']); - -}; \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/README.md b/awx/ui/client/lib/angular-filters/README.md deleted file mode 100644 index 5667149c14..0000000000 --- a/awx/ui/client/lib/angular-filters/README.md +++ /dev/null @@ -1,201 +0,0 @@ -angular-filters [![Build Status](https://travis-ci.org/frapontillo/angular-filters.png?branch=master)](https://travis-ci.org/frapontillo/angular-filters) -=============== - -A collection of useful filters for [AngularJS](http://angularjs.org/). - -You can install the latest version of `angular-filters` with `bower`: - -```bash -$ bower install angular-filters -``` - -Then, simply include `./dist/angular-filters.js` or `./dist/angular-filters.min.js` in your Web app and inject the module `frapontillo.ex.filters` in your application. - -## Filters specs - -The included filters are: - -- [`bool:trueValue:falseValue`](#bool) -- [`default:defaultValue`](#default) -- [`firstNotNull`](#firstnotnull) -- [`lastNotNull`](#lastnotnull) -- [`max`](#max) -- [`min`](#min) -- [`property`](#property) -- [`join`](#join) - -### bool - -The `bool` filter allows to **specify true and false values** to show depending on the input value. The second parameter will be returned if and only if the first parameter is `true`; the third parameter will be returned if and only if the first parameter is `false`. - -This filter can be used to print a specific message depending on a boolean value. - -Use it as follows: - -```html -

    {{ someBoolValue | bool:'Candies!':'No candies :(' }}

    -``` - -```javascript - $scope.returnValue = $filter('bool')($scope.someBoolValue, 'Candies!', 'No candies :('); -``` - -### default - -The `default` filter allows to **specify a default fallback value** if an object is one of the following: - -- `null` -- `undefined` -- empty string, `''` -- `NaN` - -Please notice that if a value equals to `0`, the default value won't be returned, as `0` is accepted. - -This filter is useful when another filter return is not safe and when you want to display a fallback value. - -Use it as follows: - -```html -

    {{ someValue | number:2 | default:'No value is available.' }}

    -``` - -```javascript - $scope.returnValue = $filter('default') - ($filter('number')($scope.someValue, 2), 'No value is available.'); -``` - -### firstNotNull - -The `firstNotNull` filter returns the **first element from an array** that is neither `null` or `undefined`. This means it returns all numbers and strings, even if empty. It returns `undefined` if all values aren't set or if the array is empty. - -Use it as follows: - -```html -

    {{ myValues | firstNotNull }}

    -``` - -```javascript - $scope.firstValue = $filter('firstNotNull')($scope.myValues); -``` - -### lastNotNull - -The `lastNotNull` filter returns the **last element from an array** that is neither `null` or `undefined`. This means it returns all numbers and strings, even if empty. It returns `undefined` if all values aren't set or if the array is empty. - -Use it as follows: - -```html -

    {{ myValues | lastNotNull }}

    -``` - -```javascript - $scope.firstValue = $filter('lastNotNull')($scope.myValues); -``` - -### max - -The `max` filter returns the **maximum value from an array** that is neither `null` or `undefined`. It returns `undefined` if all values aren't set or if the array is empty. - -Use it as follows: - -```html -

    {{ myValues | max }}

    -``` - -```javascript - $scope.maxValue = $filter('max')($scope.myValues); -``` - -### min - -The `min` filter returns the **minimum value from an array** that is neither `null` or `undefined`. It returns `undefined` if all values aren't set or if the array is empty. - -Use it as follows: - -```html -

    {{ myValues | min }}

    -``` - -```javascript - $scope.minValue = $filter('min')($scope.myValues); -``` - -### property - -The `property` filter returns an **array with only the specified property from the original objects**, not altering the `null` or `undefined` values. - -Use it as follows: - -```html -

    {{ myObjects | property:'myText' }}

    -``` - -```javascript - $scope.allTheTexts = $filter('property')($scope.myObjects, 'myText'); -``` - -### join - -The `join` filter returns **the original array as a string, with its elements joined with the specified separator**, if any, otherwise defaulting to the comma `,`. - -Use it as follows: - -```html -

    {{ myValues | join:', ' }}

    -``` - -```javascript - $scope.joinedValues = $filter('join')($scope.myValues, ', '); -``` - -## Development - -### Test and build - -To test and build the distribution files yourself, do the following: - -```shell -npm install -g grunt-cli karma bower -npm install -bower install -grunt -``` - -These are the available grunt task: - -* `karma:travis`, run karma tests once, on PhantomJS -* `karma:local`, run karma tests once, on Chrome -* `karma:dev`, run karma tests indefinitely after every file change, on Chrome -* `jshint:src`, run jshint on every source file -* `jshint:test`, run jshint on every test file -* `clean:dist`, clean the distribution directory -* `clean:temp`, clean the temporary directory -* `ngmin`, prepares every angular file into the `dist/.temp` directory -* `concat`, concatenates the module declaration and the `ngmin`-ified file from the `dist/.temp` into the `dist` directory, adding the banner -* `uglify`, minifies the output file in the `dist` directory, adding the banner -* `build`, builds the regular and minified file -* `test-travis`, runs `jshint` and `karma:travis` - -Use the default task by calling `grunt` to run tests on PhantomJS and builds the regular and minified file. - -### Contribute - -To contribute, please follow the generic [AngularJS Contributing Guidelines](https://github.com/angular/angular.js/blob/master/CONTRIBUTING.md), with the only exception to send the PR to the `develop` branch instead of `master`. - -## License - -``` - Copyright 2014-2015 Francesco Pontillo - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -``` \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/bower.json b/awx/ui/client/lib/angular-filters/bower.json deleted file mode 100644 index a48807293a..0000000000 --- a/awx/ui/client/lib/angular-filters/bower.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "author": "Francesco Pontillo", - "name": "angular-filters", - "description": "A collection of filters for AngularJS.", - "version": "1.1.2", - "homepage": "https://github.com/frapontillo/angular-filters", - "repository": { - "type": "git", - "url": "git://github.com/frapontillo/angular-filters.git" - }, - "main": "./dist/angular-filters.js", - "dependencies": { - "angular": "~1.4.0" - }, - "devDependencies": { - "angular-mocks": "~1.4.0" - } -} diff --git a/awx/ui/client/lib/angular-filters/common/module.js b/awx/ui/client/lib/angular-filters/common/module.js deleted file mode 100644 index 4a284259ee..0000000000 --- a/awx/ui/client/lib/angular-filters/common/module.js +++ /dev/null @@ -1,2 +0,0 @@ -angular.module('frapontillo.ex.filters', []); -angular.module('frapontillo', ['ex.filters']); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/dist/angular-filters.js b/awx/ui/client/lib/angular-filters/dist/angular-filters.js deleted file mode 100644 index 703f40259f..0000000000 --- a/awx/ui/client/lib/angular-filters/dist/angular-filters.js +++ /dev/null @@ -1,101 +0,0 @@ -/** - * A collection of filters for AngularJS. - * @version v1.1.2 - 2015-06-13 - * @author Francesco Pontillo - * @link https://github.com/frapontillo/angular-filters - * @license Apache License 2.0 -**/ - -'use strict'; -// Source: common/module.js -angular.module('frapontillo.ex.filters', []); -angular.module('frapontillo', ['ex.filters']); -// Source: dist/.temp/filters/bool/bool.js -angular.module('frapontillo.ex.filters').filter('bool', function () { - return function (input, valueTrue, valueFalse) { - return input !== true ? valueFalse : valueTrue; - }; -}); -// Source: dist/.temp/filters/default/default.js -angular.module('frapontillo.ex.filters').filter('default', function () { - return function (input, value) { - if (!isNaN(input) && input !== null && input !== undefined && (input !== '' || angular.isNumber(input))) { - return input; - } - return value || ''; - }; -}); -// Source: dist/.temp/filters/firstNotNull/firstNotNull.js -angular.module('frapontillo.ex.filters').filter('firstNotNull', function () { - return function (input) { - if (input) { - var l = input.length - 1; - for (var i = 0; i <= l; i++) { - if (input[i] !== undefined && input[i] !== null) { - return input[i]; - } - } - } - }; -}); -// Source: dist/.temp/filters/join/join.js -angular.module('frapontillo.ex.filters').filter('join', function () { - return function (array, separator) { - if (!array) { - return ''; - } - return array.join(separator); - }; -}); -// Source: dist/.temp/filters/lastNotNull/lastNotNull.js -angular.module('frapontillo.ex.filters').filter('lastNotNull', function () { - return function (input) { - if (input) { - var l = input.length - 1; - for (var i = l; i >= 0; i--) { - if (input[i] !== undefined) { - return input[i]; - } - } - } - }; -}); -// Source: dist/.temp/filters/max/max.js -angular.module('frapontillo.ex.filters').filter('max', function () { - return function (input) { - var out; - if (input) { - for (var i in input) { - if (input[i] > out || out === undefined || out === null) { - out = input[i]; - } - } - } - return out; - }; -}); -// Source: dist/.temp/filters/min/min.js -angular.module('frapontillo.ex.filters').filter('min', function () { - return function (input) { - var out; - if (input) { - for (var i in input) { - if (input[i] < out || out === undefined || out === null) { - out = input[i]; - } - } - } - return out; - }; -}); -// Source: dist/.temp/filters/property/property.js -angular.module('frapontillo.ex.filters').filter('property', function () { - return function (array, property) { - var newArray = []; - angular.forEach(array, function (element) { - var evalProperty = element[property]; - newArray.push(evalProperty); - }); - return newArray; - }; -}); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/dist/angular-filters.min.js b/awx/ui/client/lib/angular-filters/dist/angular-filters.min.js deleted file mode 100644 index 9d24b7a4b4..0000000000 --- a/awx/ui/client/lib/angular-filters/dist/angular-filters.min.js +++ /dev/null @@ -1,9 +0,0 @@ -/** - * A collection of filters for AngularJS. - * @version v1.1.2 - 2015-06-13 - * @author Francesco Pontillo - * @link https://github.com/frapontillo/angular-filters - * @license Apache License 2.0 -**/ - -"use strict";angular.module("frapontillo.ex.filters",[]),angular.module("frapontillo",["ex.filters"]),angular.module("frapontillo.ex.filters").filter("bool",function(){return function(a,b,c){return a!==!0?c:b}}),angular.module("frapontillo.ex.filters").filter("default",function(){return function(a,b){return isNaN(a)||null===a||void 0===a||""===a&&!angular.isNumber(a)?b||"":a}}),angular.module("frapontillo.ex.filters").filter("firstNotNull",function(){return function(a){if(a)for(var b=a.length-1,c=0;b>=c;c++)if(void 0!==a[c]&&null!==a[c])return a[c]}}),angular.module("frapontillo.ex.filters").filter("join",function(){return function(a,b){return a?a.join(b):""}}),angular.module("frapontillo.ex.filters").filter("lastNotNull",function(){return function(a){if(a)for(var b=a.length-1,c=b;c>=0;c--)if(void 0!==a[c])return a[c]}}),angular.module("frapontillo.ex.filters").filter("max",function(){return function(a){var b;if(a)for(var c in a)(a[c]>b||void 0===b||null===b)&&(b=a[c]);return b}}),angular.module("frapontillo.ex.filters").filter("min",function(){return function(a){var b;if(a)for(var c in a)(a[c]= 0.10.15" - }, - "dependencies": {}, - "devDependencies": { - "grunt": "~0.4.1", - "grunt-contrib-clean": "~0.5.0", - "grunt-contrib-concat": "~0.3.0", - "grunt-contrib-jshint": "~0.6.0", - "grunt-contrib-uglify": "~0.2.7", - "grunt-karma": "~0.6.2", - "matchdep": "~0.3.0", - "grunt-ngmin": "0.0.3" - } -} diff --git a/awx/ui/client/lib/angular-filters/src/filters/bool/bool.js b/awx/ui/client/lib/angular-filters/src/filters/bool/bool.js deleted file mode 100644 index 78d93893e9..0000000000 --- a/awx/ui/client/lib/angular-filters/src/filters/bool/bool.js +++ /dev/null @@ -1,9 +0,0 @@ -'use strict'; - -angular.module('frapontillo.ex.filters') - .filter('bool', function() { - return function(input, valueTrue, valueFalse) { - return input !== true ? valueFalse : valueTrue; - }; - } -); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/src/filters/default/default.js b/awx/ui/client/lib/angular-filters/src/filters/default/default.js deleted file mode 100644 index 97cf488561..0000000000 --- a/awx/ui/client/lib/angular-filters/src/filters/default/default.js +++ /dev/null @@ -1,12 +0,0 @@ -'use strict'; - -angular.module('frapontillo.ex.filters') - .filter('default', function() { - return function(input, value) { - if (!isNaN(input) && input !== null && input !== undefined && (input !== '' || angular.isNumber(input))) { - return input; - } - return value || ''; - }; - } -); diff --git a/awx/ui/client/lib/angular-filters/src/filters/firstNotNull/firstNotNull.js b/awx/ui/client/lib/angular-filters/src/filters/firstNotNull/firstNotNull.js deleted file mode 100644 index a26fe438be..0000000000 --- a/awx/ui/client/lib/angular-filters/src/filters/firstNotNull/firstNotNull.js +++ /dev/null @@ -1,16 +0,0 @@ -'use strict'; - -angular.module('frapontillo.ex.filters') - .filter('firstNotNull', function() { - return function(input) { - if (input) { - var l = input.length - 1; - for (var i = 0; i <= l; i++) { - if (input[i] !== undefined && input[i] !== null) { - return input[i]; - } - } - } - }; - } -); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/src/filters/join/join.js b/awx/ui/client/lib/angular-filters/src/filters/join/join.js deleted file mode 100644 index e760feb4e8..0000000000 --- a/awx/ui/client/lib/angular-filters/src/filters/join/join.js +++ /dev/null @@ -1,12 +0,0 @@ -'use strict'; - -angular.module('frapontillo.ex.filters') - .filter('join', function() { - return function(array, separator) { - if (!array) { - return ''; - } - return array.join(separator); - }; - } -); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/src/filters/lastNotNull/lastNotNull.js b/awx/ui/client/lib/angular-filters/src/filters/lastNotNull/lastNotNull.js deleted file mode 100644 index 5b628c7c9b..0000000000 --- a/awx/ui/client/lib/angular-filters/src/filters/lastNotNull/lastNotNull.js +++ /dev/null @@ -1,16 +0,0 @@ -'use strict'; - -angular.module('frapontillo.ex.filters') - .filter('lastNotNull', function() { - return function(input) { - if (input) { - var l = input.length - 1; - for (var i = l; i >= 0; i--) { - if (input[i] !== undefined) { - return input[i]; - } - } - } - }; - } -); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/src/filters/max/max.js b/awx/ui/client/lib/angular-filters/src/filters/max/max.js deleted file mode 100644 index 435d4f6d6b..0000000000 --- a/awx/ui/client/lib/angular-filters/src/filters/max/max.js +++ /dev/null @@ -1,17 +0,0 @@ -'use strict'; - -angular.module('frapontillo.ex.filters') - .filter('max', function() { - return function(input) { - var out; - if (input) { - for (var i in input) { - if (input[i] > out || out === undefined || out === null) { - out = input[i]; - } - } - } - return out; - }; - } -); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/src/filters/min/min.js b/awx/ui/client/lib/angular-filters/src/filters/min/min.js deleted file mode 100644 index bfbae9ef8c..0000000000 --- a/awx/ui/client/lib/angular-filters/src/filters/min/min.js +++ /dev/null @@ -1,17 +0,0 @@ -'use strict'; - -angular.module('frapontillo.ex.filters') - .filter('min', function() { - return function(input) { - var out; - if (input) { - for (var i in input) { - if (input[i] < out || out === undefined || out === null) { - out = input[i]; - } - } - } - return out; - }; - } -); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/src/filters/property/property.js b/awx/ui/client/lib/angular-filters/src/filters/property/property.js deleted file mode 100644 index 5f1c1bdc15..0000000000 --- a/awx/ui/client/lib/angular-filters/src/filters/property/property.js +++ /dev/null @@ -1,15 +0,0 @@ -'use strict'; - -angular.module('frapontillo.ex.filters') - .filter('property', function() { - return function(array, property) { - var newArray = []; - // for each object in the array - angular.forEach(array, function(element) { - var evalProperty = element[property]; - newArray.push(evalProperty); - }); - return newArray; - }; - } -); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/test/.jshintrc b/awx/ui/client/lib/angular-filters/test/.jshintrc deleted file mode 100644 index aa37e7a4d0..0000000000 --- a/awx/ui/client/lib/angular-filters/test/.jshintrc +++ /dev/null @@ -1,35 +0,0 @@ -{ - "node": true, - "browser": true, - "esnext": true, - "bitwise": true, - "camelcase": true, - "curly": true, - "eqeqeq": true, - "immed": true, - "indent": 2, - "latedef": true, - "newcap": true, - "noarg": true, - "quotmark": "single", - "regexp": true, - "undef": true, - "unused": true, - "strict": true, - "trailing": true, - "smarttabs": true, - "globals": { - "after": false, - "afterEach": false, - "angular": false, - "before": false, - "beforeEach": false, - "browser": false, - "describe": false, - "expect": false, - "inject": false, - "it": false, - "spyOn": false - } -} - diff --git a/awx/ui/client/lib/angular-filters/test/bool/boolSpec.js b/awx/ui/client/lib/angular-filters/test/bool/boolSpec.js deleted file mode 100644 index 5a3807636f..0000000000 --- a/awx/ui/client/lib/angular-filters/test/bool/boolSpec.js +++ /dev/null @@ -1,34 +0,0 @@ -'use strict'; - -describe('bool', function () { - var boolFilter; - - beforeEach(module('frapontillo.ex.filters')); - beforeEach(inject(function ($filter) { - boolFilter = $filter('bool'); - })); - - it('should match the true value', function () { - expect(boolFilter(true, 1, 0)).toEqual(1); - }); - - it('should match the false value', function () { - expect(boolFilter(false, 1, 0)).toEqual(0); - }); - - it('should match a string to the false value', function () { - expect(boolFilter('true', 1, 0)).toEqual(0); - }); - - it('should match the empty string to the false value', function () { - expect(boolFilter('', 1, 0)).toEqual(0); - }); - - it('should match undefined to the false value', function () { - expect(boolFilter(undefined, 1, 0)).toEqual(0); - }); - - it('should match null to the false value', function () { - expect(boolFilter(null, 1, 0)).toEqual(0); - }); -}); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/test/default/defaultSpec.js b/awx/ui/client/lib/angular-filters/test/default/defaultSpec.js deleted file mode 100644 index f9442c8b93..0000000000 --- a/awx/ui/client/lib/angular-filters/test/default/defaultSpec.js +++ /dev/null @@ -1,53 +0,0 @@ -'use strict'; - -describe('default', function () { - var defaultFilter; - var numberFilter; - - beforeEach(module('frapontillo.ex.filters')); - beforeEach(inject(function ($filter) { - defaultFilter = $filter('default'); - numberFilter = $filter('number'); - })); - - it('should return the number 1337', function () { - var inputVal = 1337; - expect(defaultFilter(inputVal)).toEqual(1337); - }); - - it('should return a "default" string', function () { - var inputVal; - var defaultVal = 'default'; - expect(defaultFilter(inputVal, defaultVal)).toEqual('default'); - }); - - it('should return a "default" string', function () { - var inputVal = null; - var defaultVal = 'default'; - expect(defaultFilter(inputVal, defaultVal)).toEqual('default'); - }); - - it('should return a "default" string', function () { - var inputVal = NaN; - var defaultVal = 'default'; - expect(defaultFilter(inputVal, defaultVal)).toEqual('default'); - }); - - it('should return the number 0', function () { - var inputVal = 0; - var defaultVal = 'default'; - expect(defaultFilter(inputVal, defaultVal)).toEqual(0); - }); - - it('should return the string "13.37"', function () { - var inputVal = '13.3678787'; - var defaultVal = 'N.A.'; - expect(defaultFilter(numberFilter(inputVal, 2), defaultVal)).toEqual('13.37'); - }); - - it('should return a "N.A." string', function () { - var inputVal; - var defaultVal = 'N.A.'; - expect(defaultFilter(numberFilter(inputVal, 2), defaultVal)).toEqual(defaultVal); - }); -}); diff --git a/awx/ui/client/lib/angular-filters/test/firstNotNull/firstNotNullSpec.js b/awx/ui/client/lib/angular-filters/test/firstNotNull/firstNotNullSpec.js deleted file mode 100644 index 6bf7a40f60..0000000000 --- a/awx/ui/client/lib/angular-filters/test/firstNotNull/firstNotNullSpec.js +++ /dev/null @@ -1,28 +0,0 @@ -'use strict'; - -describe('firstNotNull', function () { - var firstNotNullFilter; - - beforeEach(module('frapontillo.ex.filters')); - beforeEach(inject(function ($filter) { - firstNotNullFilter = $filter('firstNotNull'); - })); - - it('should return the number 1337', function () { - expect(firstNotNullFilter([ - null, undefined, 1337, 0 - ])).toEqual(1337); - }); - - it('should return the number 0', function () { - expect(firstNotNullFilter([ - null, 0, undefined, 3 - ])).toEqual(0); - }); - - it('should return undefined', function () { - expect(firstNotNullFilter([ - null, undefined - ])).toEqual(undefined); - }); -}); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/test/join/joinSpec.js b/awx/ui/client/lib/angular-filters/test/join/joinSpec.js deleted file mode 100644 index 558713c0ee..0000000000 --- a/awx/ui/client/lib/angular-filters/test/join/joinSpec.js +++ /dev/null @@ -1,30 +0,0 @@ -'use strict'; - -describe('join', function () { - var joinFilter; - - beforeEach(module('frapontillo.ex.filters')); - beforeEach(inject(function ($filter) { - joinFilter = $filter('join'); - })); - - it('should return \'this is a simple test\'', function () { - expect(joinFilter(['this', 'is', 'a', 'simple', 'test'], ' ')).toEqual('this is a simple test'); - }); - - it('should return the empty string for an undefined array', function () { - expect(joinFilter(undefined, '')).toEqual(''); - }); - - it('should return the empty string for an empty array', function () { - expect(joinFilter([], '')).toEqual(''); - }); - - it('should return \'0123456789\'', function () { - expect(joinFilter([0,1,2,3,4,5,6,7,8,9], '')).toEqual('0123456789'); - }); - - it('should default to the comma separator and return \'0,1,2,3,4,5,6,7,8,9\'', function () { - expect(joinFilter([0,1,2,3,4,5,6,7,8,9])).toEqual('0,1,2,3,4,5,6,7,8,9'); - }); -}); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/test/lastNotNull/lastNotNullSpec.js b/awx/ui/client/lib/angular-filters/test/lastNotNull/lastNotNullSpec.js deleted file mode 100644 index 1136150c6e..0000000000 --- a/awx/ui/client/lib/angular-filters/test/lastNotNull/lastNotNullSpec.js +++ /dev/null @@ -1,28 +0,0 @@ -'use strict'; - -describe('lastNotNull', function () { - var lastNotNullFilter; - - beforeEach(module('frapontillo.ex.filters')); - beforeEach(inject(function ($filter) { - lastNotNullFilter = $filter('lastNotNull'); - })); - - it('should return the number 0', function () { - expect(lastNotNullFilter([ - null, undefined, 1337, 0 - ])).toEqual(0); - }); - - it('should return the number 1337', function () { - expect(lastNotNullFilter([ - null, 0, 1337, undefined - ])).toEqual(1337); - }); - - it('should return undefined', function () { - expect(lastNotNullFilter([ - null, undefined - ])).toEqual(undefined); - }); -}); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/test/max/maxSpec.js b/awx/ui/client/lib/angular-filters/test/max/maxSpec.js deleted file mode 100644 index d919a5c42e..0000000000 --- a/awx/ui/client/lib/angular-filters/test/max/maxSpec.js +++ /dev/null @@ -1,28 +0,0 @@ -'use strict'; - -describe('max', function () { - var maxFilter; - - beforeEach(module('frapontillo.ex.filters')); - beforeEach(inject(function ($filter) { - maxFilter = $filter('max'); - })); - - it('should return the number 1337', function () { - expect(maxFilter([ - null, undefined, 1337, 0 - ])).toEqual(1337); - }); - - it('should return the number 1337', function () { - expect(maxFilter([ - null, 0, 1337, undefined - ])).toEqual(1337); - }); - - it('should return undefined', function () { - expect(maxFilter([ - null, undefined - ])).toEqual(undefined); - }); -}); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/test/min/minSpec.js b/awx/ui/client/lib/angular-filters/test/min/minSpec.js deleted file mode 100644 index 906dbc9c91..0000000000 --- a/awx/ui/client/lib/angular-filters/test/min/minSpec.js +++ /dev/null @@ -1,28 +0,0 @@ -'use strict'; - -describe('min', function () { - var minFilter; - - beforeEach(module('frapontillo.ex.filters')); - beforeEach(inject(function ($filter) { - minFilter = $filter('min'); - })); - - it('should return the number 0', function () { - expect(minFilter([ - null, undefined, 1337, 0 - ])).toEqual(0); - }); - - it('should return the number 1', function () { - expect(minFilter([ - null, 1, 1337, undefined - ])).toEqual(1); - }); - - it('should return undefined', function () { - expect(minFilter([ - null, undefined - ])).toEqual(undefined); - }); -}); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-filters/test/property/propertySpec.js b/awx/ui/client/lib/angular-filters/test/property/propertySpec.js deleted file mode 100644 index da80421f42..0000000000 --- a/awx/ui/client/lib/angular-filters/test/property/propertySpec.js +++ /dev/null @@ -1,42 +0,0 @@ -'use strict'; - -describe('property', function () { - var propertyFilter; - - var testArray = [ - {id:0, text:'zero'}, - {id:1, text:'one'}, - {id:2, text:'two'}, - {id:3, text:'three'}, - {id:4, text:'four'}, - {id:5, text:'five'}, - {id:6, text:'six'} - ]; - - var makeArray = function() { - return angular.copy(testArray); - }; - - beforeEach(module('frapontillo.ex.filters')); - beforeEach(inject(function ($filter) { - propertyFilter = $filter('property'); - })); - - it('should return the \'id\' properties', function () { - var filteredArray = propertyFilter(makeArray(), 'id'); - expect(filteredArray[0]).toEqual(0); - expect(filteredArray.length).toEqual(7); - }); - - it('should return elements with only the \'text\' property', function () { - var filteredArray = propertyFilter(makeArray(), 'text'); - expect(filteredArray[0]).toEqual('zero'); - expect(filteredArray.length).toEqual(7); - }); - - it('should return empty elements', function () { - var filteredArray = propertyFilter(makeArray(), 'something else'); - expect(filteredArray[0]).toEqual(undefined); - expect(filteredArray.length).toEqual(7); - }); -}); \ No newline at end of file diff --git a/awx/ui/client/lib/angular-md5/.bower.json b/awx/ui/client/lib/angular-md5/.bower.json deleted file mode 100644 index be6198b13f..0000000000 --- a/awx/ui/client/lib/angular-md5/.bower.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "angular-md5", - "version": "0.1.7", - "main": "angular-md5.js", - "description": "A md5 crypto component for Angular.js", - "author": [ - { - "name": "Patrick Stapleton", - "email": "github@gdi2290.com", - "url": "www.gdi2290.com" - } - ], - "keywords": [ - "PatrickJS", - "gdi2290", - "angular.js", - "angularjs", - "angular", - "crypto", - "md5" - ], - "ignore": [ - "**/.*", - "*.yml", - "*.xml", - "node_modules", - "bower_components", - "test", - "tests" - ], - "dependencies": { - "angular": "*" - }, - "homepage": "https://github.com/gdi2290/angular-md5", - "_release": "0.1.7", - "_resolution": { - "type": "version", - "tag": "v0.1.7", - "commit": "9ab48b3a7911ba19c86670040ae7ded6fd2b14f7" - }, - "_source": "git://github.com/gdi2290/angular-md5.git", - "_target": "~0.1.7", - "_originalSource": "angular-md5", - "_direct": true -} \ No newline at end of file diff --git a/awx/ui/client/lib/angular-md5/Gruntfile.js b/awx/ui/client/lib/angular-md5/Gruntfile.js deleted file mode 100644 index 4d5faa5717..0000000000 --- a/awx/ui/client/lib/angular-md5/Gruntfile.js +++ /dev/null @@ -1,129 +0,0 @@ -'use strict'; - -module.exports = function(grunt) { - - require('load-grunt-tasks')(grunt); - require('time-grunt')(grunt); - - // Project configuration. - grunt.initConfig({ - pkg: grunt.file.readJSON('package.json'), - bwr: grunt.file.readJSON('bower.json'), - nodeunit: { - files: ['test/**/*_test.js'] - }, - jshint: { - options: { - jshintrc: '.jshintrc' - }, - gruntfile: { - src: 'Gruntfile.js' - }, - lib: { - src: ['<%= bwr.name %>'] - }, - test: { - src: ['test/**/*.js'] - } - }, - clean: ['.tmp/'], - concat: { - dist:{} - }, - ngmin: { - dist: { - files: { - '.tmp/<%= bwr.name %>.js': ['./lib/index.js', './lib/*/*.js'] - } - } - }, - uglify: { - options: { - report: 'min', - enclose: { - 'this': 'window', - 'this.angular': 'angular', - 'void 0': 'undefined' - }, - banner: '/*\n <%= bwr.name %> - v<%= bwr.version %> \n ' + - '<%= grunt.template.today("yyyy-mm-dd") %>\n*/\n'+ - '', - }, - dist: { - options: { - beautify: false, - mangle: true, - compress: { - global_defs: { - 'DEBUG': false - }, - dead_code: true - }, - sourceMap: '<%= bwr.name %>.min.js.map' - }, - files: { - '<%= bwr.name %>.min.js': ['./lib/index.js', './lib/*/*.js'] - } - }, - src: { - options: { - beautify: true, - mangle: false, - compress: false - }, - files: { - '<%= bwr.name %>.js': ['./lib/index.js', './lib/*/*.js'] - } - }, - buildDist: { - options: { - beautify: false, - mangle: true, - compress: { - global_defs: { - 'DEBUG': false - }, - dead_code: true - }, - sourceMap: '<%= bwr.name %>.min.js.map' - }, - files: { - '<%= bwr.name %>.min.js': '.tmp/<%= bwr.name %>.js' - } - }, - buildSrc: { - options: { - beautify: { - indent_level: 2, - beautify: true - }, - mangle: false, - compress: false - }, - files: { - '<%= bwr.name %>.js': '.tmp/<%= bwr.name %>.js' - } - } - - } - }); - - // Testing task - grunt.registerTask('test', [ - ]); - - // Build task - grunt.registerTask('build', [ - 'clean', - 'concat', - 'ngmin:dist', - 'uglify:buildSrc', - 'uglify:buildDist' - ]); - - // Default task - grunt.registerTask('default', [ - 'build' - ]); - -}; diff --git a/awx/ui/client/lib/angular-md5/README.md b/awx/ui/client/lib/angular-md5/README.md deleted file mode 100644 index f56754f1e7..0000000000 --- a/awx/ui/client/lib/angular-md5/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# angular-md5 [![Build Status](https://travis-ci.org/gdi2290/angular-md5.png?branch=master)](https://travis-ci.org/gdi2290/angular-md5) -md5 for Angular.js and Gravatar filter - -#How do I add this to my project? - -You can download angular-md5 by: - -* (prefered) Using bower and running `bower install angular-md5 --save` -* Using npm and running `npm install angular-md5 --save` -* Downloading it manually by clicking [here to download development unminified version](https://raw.github.com/gdi2290/angular-md5/master/angular-md5.js) - - -````html - - - - {{ message }} - - - - - -```` diff --git a/awx/ui/client/lib/angular-md5/angular-md5.js b/awx/ui/client/lib/angular-md5/angular-md5.js deleted file mode 100644 index 7acad5dc0f..0000000000 --- a/awx/ui/client/lib/angular-md5/angular-md5.js +++ /dev/null @@ -1,200 +0,0 @@ -/* - angular-md5 - v0.1.7 - 2014-01-20 -*/ -(function(window, angular, undefined) { - angular.module("angular-md5", [ "gdi2290.md5" ]); - angular.module("ngMd5", [ "gdi2290.md5" ]); - angular.module("gdi2290.md5", [ "gdi2290.gravatar-filter", "gdi2290.md5-service", "gdi2290.md5-filter" ]); - "use strict"; - angular.module("gdi2290.gravatar-filter", []).filter("gravatar", [ "md5", function(md5) { - var cache = {}; - return function(text, defaultText) { - if (!cache[text]) { - defaultText = defaultText ? md5.createHash(defaultText.toString().toLowerCase()) : ""; - cache[text] = text ? md5.createHash(text.toString().toLowerCase()) : defaultText; - } - return cache[text]; - }; - } ]); - "use strict"; - angular.module("gdi2290.md5-filter", []).filter("md5", [ "md5", function(md5) { - return function(text) { - return text ? md5.createHash(text.toString().toLowerCase()) : text; - }; - } ]); - "use strict"; - angular.module("gdi2290.md5-service", []).factory("md5", [ function() { - var md5 = { - createHash: function(str) { - var xl; - var rotateLeft = function(lValue, iShiftBits) { - return lValue << iShiftBits | lValue >>> 32 - iShiftBits; - }; - var addUnsigned = function(lX, lY) { - var lX4, lY4, lX8, lY8, lResult; - lX8 = lX & 2147483648; - lY8 = lY & 2147483648; - lX4 = lX & 1073741824; - lY4 = lY & 1073741824; - lResult = (lX & 1073741823) + (lY & 1073741823); - if (lX4 & lY4) { - return lResult ^ 2147483648 ^ lX8 ^ lY8; - } - if (lX4 | lY4) { - if (lResult & 1073741824) { - return lResult ^ 3221225472 ^ lX8 ^ lY8; - } else { - return lResult ^ 1073741824 ^ lX8 ^ lY8; - } - } else { - return lResult ^ lX8 ^ lY8; - } - }; - var _F = function(x, y, z) { - return x & y | ~x & z; - }; - var _G = function(x, y, z) { - return x & z | y & ~z; - }; - var _H = function(x, y, z) { - return x ^ y ^ z; - }; - var _I = function(x, y, z) { - return y ^ (x | ~z); - }; - var _FF = function(a, b, c, d, x, s, ac) { - a = addUnsigned(a, addUnsigned(addUnsigned(_F(b, c, d), x), ac)); - return addUnsigned(rotateLeft(a, s), b); - }; - var _GG = function(a, b, c, d, x, s, ac) { - a = addUnsigned(a, addUnsigned(addUnsigned(_G(b, c, d), x), ac)); - return addUnsigned(rotateLeft(a, s), b); - }; - var _HH = function(a, b, c, d, x, s, ac) { - a = addUnsigned(a, addUnsigned(addUnsigned(_H(b, c, d), x), ac)); - return addUnsigned(rotateLeft(a, s), b); - }; - var _II = function(a, b, c, d, x, s, ac) { - a = addUnsigned(a, addUnsigned(addUnsigned(_I(b, c, d), x), ac)); - return addUnsigned(rotateLeft(a, s), b); - }; - var convertToWordArray = function(str) { - var lWordCount; - var lMessageLength = str.length; - var lNumberOfWords_temp1 = lMessageLength + 8; - var lNumberOfWords_temp2 = (lNumberOfWords_temp1 - lNumberOfWords_temp1 % 64) / 64; - var lNumberOfWords = (lNumberOfWords_temp2 + 1) * 16; - var lWordArray = new Array(lNumberOfWords - 1); - var lBytePosition = 0; - var lByteCount = 0; - while (lByteCount < lMessageLength) { - lWordCount = (lByteCount - lByteCount % 4) / 4; - lBytePosition = lByteCount % 4 * 8; - lWordArray[lWordCount] = lWordArray[lWordCount] | str.charCodeAt(lByteCount) << lBytePosition; - lByteCount++; - } - lWordCount = (lByteCount - lByteCount % 4) / 4; - lBytePosition = lByteCount % 4 * 8; - lWordArray[lWordCount] = lWordArray[lWordCount] | 128 << lBytePosition; - lWordArray[lNumberOfWords - 2] = lMessageLength << 3; - lWordArray[lNumberOfWords - 1] = lMessageLength >>> 29; - return lWordArray; - }; - var wordToHex = function(lValue) { - var wordToHexValue = "", wordToHexValue_temp = "", lByte, lCount; - for (lCount = 0; lCount <= 3; lCount++) { - lByte = lValue >>> lCount * 8 & 255; - wordToHexValue_temp = "0" + lByte.toString(16); - wordToHexValue = wordToHexValue + wordToHexValue_temp.substr(wordToHexValue_temp.length - 2, 2); - } - return wordToHexValue; - }; - var x = [], k, AA, BB, CC, DD, a, b, c, d, S11 = 7, S12 = 12, S13 = 17, S14 = 22, S21 = 5, S22 = 9, S23 = 14, S24 = 20, S31 = 4, S32 = 11, S33 = 16, S34 = 23, S41 = 6, S42 = 10, S43 = 15, S44 = 21; - x = convertToWordArray(str); - a = 1732584193; - b = 4023233417; - c = 2562383102; - d = 271733878; - xl = x.length; - for (k = 0; k < xl; k += 16) { - AA = a; - BB = b; - CC = c; - DD = d; - a = _FF(a, b, c, d, x[k + 0], S11, 3614090360); - d = _FF(d, a, b, c, x[k + 1], S12, 3905402710); - c = _FF(c, d, a, b, x[k + 2], S13, 606105819); - b = _FF(b, c, d, a, x[k + 3], S14, 3250441966); - a = _FF(a, b, c, d, x[k + 4], S11, 4118548399); - d = _FF(d, a, b, c, x[k + 5], S12, 1200080426); - c = _FF(c, d, a, b, x[k + 6], S13, 2821735955); - b = _FF(b, c, d, a, x[k + 7], S14, 4249261313); - a = _FF(a, b, c, d, x[k + 8], S11, 1770035416); - d = _FF(d, a, b, c, x[k + 9], S12, 2336552879); - c = _FF(c, d, a, b, x[k + 10], S13, 4294925233); - b = _FF(b, c, d, a, x[k + 11], S14, 2304563134); - a = _FF(a, b, c, d, x[k + 12], S11, 1804603682); - d = _FF(d, a, b, c, x[k + 13], S12, 4254626195); - c = _FF(c, d, a, b, x[k + 14], S13, 2792965006); - b = _FF(b, c, d, a, x[k + 15], S14, 1236535329); - a = _GG(a, b, c, d, x[k + 1], S21, 4129170786); - d = _GG(d, a, b, c, x[k + 6], S22, 3225465664); - c = _GG(c, d, a, b, x[k + 11], S23, 643717713); - b = _GG(b, c, d, a, x[k + 0], S24, 3921069994); - a = _GG(a, b, c, d, x[k + 5], S21, 3593408605); - d = _GG(d, a, b, c, x[k + 10], S22, 38016083); - c = _GG(c, d, a, b, x[k + 15], S23, 3634488961); - b = _GG(b, c, d, a, x[k + 4], S24, 3889429448); - a = _GG(a, b, c, d, x[k + 9], S21, 568446438); - d = _GG(d, a, b, c, x[k + 14], S22, 3275163606); - c = _GG(c, d, a, b, x[k + 3], S23, 4107603335); - b = _GG(b, c, d, a, x[k + 8], S24, 1163531501); - a = _GG(a, b, c, d, x[k + 13], S21, 2850285829); - d = _GG(d, a, b, c, x[k + 2], S22, 4243563512); - c = _GG(c, d, a, b, x[k + 7], S23, 1735328473); - b = _GG(b, c, d, a, x[k + 12], S24, 2368359562); - a = _HH(a, b, c, d, x[k + 5], S31, 4294588738); - d = _HH(d, a, b, c, x[k + 8], S32, 2272392833); - c = _HH(c, d, a, b, x[k + 11], S33, 1839030562); - b = _HH(b, c, d, a, x[k + 14], S34, 4259657740); - a = _HH(a, b, c, d, x[k + 1], S31, 2763975236); - d = _HH(d, a, b, c, x[k + 4], S32, 1272893353); - c = _HH(c, d, a, b, x[k + 7], S33, 4139469664); - b = _HH(b, c, d, a, x[k + 10], S34, 3200236656); - a = _HH(a, b, c, d, x[k + 13], S31, 681279174); - d = _HH(d, a, b, c, x[k + 0], S32, 3936430074); - c = _HH(c, d, a, b, x[k + 3], S33, 3572445317); - b = _HH(b, c, d, a, x[k + 6], S34, 76029189); - a = _HH(a, b, c, d, x[k + 9], S31, 3654602809); - d = _HH(d, a, b, c, x[k + 12], S32, 3873151461); - c = _HH(c, d, a, b, x[k + 15], S33, 530742520); - b = _HH(b, c, d, a, x[k + 2], S34, 3299628645); - a = _II(a, b, c, d, x[k + 0], S41, 4096336452); - d = _II(d, a, b, c, x[k + 7], S42, 1126891415); - c = _II(c, d, a, b, x[k + 14], S43, 2878612391); - b = _II(b, c, d, a, x[k + 5], S44, 4237533241); - a = _II(a, b, c, d, x[k + 12], S41, 1700485571); - d = _II(d, a, b, c, x[k + 3], S42, 2399980690); - c = _II(c, d, a, b, x[k + 10], S43, 4293915773); - b = _II(b, c, d, a, x[k + 1], S44, 2240044497); - a = _II(a, b, c, d, x[k + 8], S41, 1873313359); - d = _II(d, a, b, c, x[k + 15], S42, 4264355552); - c = _II(c, d, a, b, x[k + 6], S43, 2734768916); - b = _II(b, c, d, a, x[k + 13], S44, 1309151649); - a = _II(a, b, c, d, x[k + 4], S41, 4149444226); - d = _II(d, a, b, c, x[k + 11], S42, 3174756917); - c = _II(c, d, a, b, x[k + 2], S43, 718787259); - b = _II(b, c, d, a, x[k + 9], S44, 3951481745); - a = addUnsigned(a, AA); - b = addUnsigned(b, BB); - c = addUnsigned(c, CC); - d = addUnsigned(d, DD); - } - var temp = wordToHex(a) + wordToHex(b) + wordToHex(c) + wordToHex(d); - return temp.toLowerCase(); - } - }; - return md5; - } ]); -})(window, window.angular, void 0); diff --git a/awx/ui/client/lib/angular-md5/angular-md5.min.js b/awx/ui/client/lib/angular-md5/angular-md5.min.js deleted file mode 100644 index 44da9b18f5..0000000000 --- a/awx/ui/client/lib/angular-md5/angular-md5.min.js +++ /dev/null @@ -1,7 +0,0 @@ -/* - angular-md5 - v0.1.7 - 2014-01-20 -*/ - -!function(a,b){b.module("angular-md5",["gdi2290.md5"]),b.module("ngMd5",["gdi2290.md5"]),b.module("gdi2290.md5",["gdi2290.gravatar-filter","gdi2290.md5-service","gdi2290.md5-filter"]),b.module("gdi2290.gravatar-filter",[]).filter("gravatar",["md5",function(a){var b={};return function(c,d){return b[c]||(d=d?a.createHash(d.toString().toLowerCase()):"",b[c]=c?a.createHash(c.toString().toLowerCase()):d),b[c]}}]),b.module("gdi2290.md5-filter",[]).filter("md5",["md5",function(a){return function(b){return b?a.createHash(b.toString().toLowerCase()):b}}]),b.module("gdi2290.md5-service",[]).factory("md5",[function(){var a={createHash:function(a){var b,c,d,e,f,g,h,i,j,k,l=function(a,b){return a<>>32-b},m=function(a,b){var c,d,e,f,g;return e=2147483648&a,f=2147483648&b,c=1073741824&a,d=1073741824&b,g=(1073741823&a)+(1073741823&b),c&d?2147483648^g^e^f:c|d?1073741824&g?3221225472^g^e^f:1073741824^g^e^f:g^e^f},n=function(a,b,c){return a&b|~a&c},o=function(a,b,c){return a&c|b&~c},p=function(a,b,c){return a^b^c},q=function(a,b,c){return b^(a|~c)},r=function(a,b,c,d,e,f,g){return a=m(a,m(m(n(b,c,d),e),g)),m(l(a,f),b)},s=function(a,b,c,d,e,f,g){return a=m(a,m(m(o(b,c,d),e),g)),m(l(a,f),b)},t=function(a,b,c,d,e,f,g){return a=m(a,m(m(p(b,c,d),e),g)),m(l(a,f),b)},u=function(a,b,c,d,e,f,g){return a=m(a,m(m(q(b,c,d),e),g)),m(l(a,f),b)},v=function(a){for(var b,c=a.length,d=c+8,e=(d-d%64)/64,f=16*(e+1),g=new Array(f-1),h=0,i=0;c>i;)b=(i-i%4)/4,h=i%4*8,g[b]=g[b]|a.charCodeAt(i)<>>29,g},w=function(a){var b,c,d="",e="";for(c=0;3>=c;c++)b=a>>>8*c&255,e="0"+b.toString(16),d+=e.substr(e.length-2,2);return d},x=[],y=7,z=12,A=17,B=22,C=5,D=9,E=14,F=20,G=4,H=11,I=16,J=23,K=6,L=10,M=15,N=21;for(x=v(a),h=1732584193,i=4023233417,j=2562383102,k=271733878,b=x.length,c=0;b>c;c+=16)d=h,e=i,f=j,g=k,h=r(h,i,j,k,x[c+0],y,3614090360),k=r(k,h,i,j,x[c+1],z,3905402710),j=r(j,k,h,i,x[c+2],A,606105819),i=r(i,j,k,h,x[c+3],B,3250441966),h=r(h,i,j,k,x[c+4],y,4118548399),k=r(k,h,i,j,x[c+5],z,1200080426),j=r(j,k,h,i,x[c+6],A,2821735955),i=r(i,j,k,h,x[c+7],B,4249261313),h=r(h,i,j,k,x[c+8],y,1770035416),k=r(k,h,i,j,x[c+9],z,2336552879),j=r(j,k,h,i,x[c+10],A,4294925233),i=r(i,j,k,h,x[c+11],B,2304563134),h=r(h,i,j,k,x[c+12],y,1804603682),k=r(k,h,i,j,x[c+13],z,4254626195),j=r(j,k,h,i,x[c+14],A,2792965006),i=r(i,j,k,h,x[c+15],B,1236535329),h=s(h,i,j,k,x[c+1],C,4129170786),k=s(k,h,i,j,x[c+6],D,3225465664),j=s(j,k,h,i,x[c+11],E,643717713),i=s(i,j,k,h,x[c+0],F,3921069994),h=s(h,i,j,k,x[c+5],C,3593408605),k=s(k,h,i,j,x[c+10],D,38016083),j=s(j,k,h,i,x[c+15],E,3634488961),i=s(i,j,k,h,x[c+4],F,3889429448),h=s(h,i,j,k,x[c+9],C,568446438),k=s(k,h,i,j,x[c+14],D,3275163606),j=s(j,k,h,i,x[c+3],E,4107603335),i=s(i,j,k,h,x[c+8],F,1163531501),h=s(h,i,j,k,x[c+13],C,2850285829),k=s(k,h,i,j,x[c+2],D,4243563512),j=s(j,k,h,i,x[c+7],E,1735328473),i=s(i,j,k,h,x[c+12],F,2368359562),h=t(h,i,j,k,x[c+5],G,4294588738),k=t(k,h,i,j,x[c+8],H,2272392833),j=t(j,k,h,i,x[c+11],I,1839030562),i=t(i,j,k,h,x[c+14],J,4259657740),h=t(h,i,j,k,x[c+1],G,2763975236),k=t(k,h,i,j,x[c+4],H,1272893353),j=t(j,k,h,i,x[c+7],I,4139469664),i=t(i,j,k,h,x[c+10],J,3200236656),h=t(h,i,j,k,x[c+13],G,681279174),k=t(k,h,i,j,x[c+0],H,3936430074),j=t(j,k,h,i,x[c+3],I,3572445317),i=t(i,j,k,h,x[c+6],J,76029189),h=t(h,i,j,k,x[c+9],G,3654602809),k=t(k,h,i,j,x[c+12],H,3873151461),j=t(j,k,h,i,x[c+15],I,530742520),i=t(i,j,k,h,x[c+2],J,3299628645),h=u(h,i,j,k,x[c+0],K,4096336452),k=u(k,h,i,j,x[c+7],L,1126891415),j=u(j,k,h,i,x[c+14],M,2878612391),i=u(i,j,k,h,x[c+5],N,4237533241),h=u(h,i,j,k,x[c+12],K,1700485571),k=u(k,h,i,j,x[c+3],L,2399980690),j=u(j,k,h,i,x[c+10],M,4293915773),i=u(i,j,k,h,x[c+1],N,2240044497),h=u(h,i,j,k,x[c+8],K,1873313359),k=u(k,h,i,j,x[c+15],L,4264355552),j=u(j,k,h,i,x[c+6],M,2734768916),i=u(i,j,k,h,x[c+13],N,1309151649),h=u(h,i,j,k,x[c+4],K,4149444226),k=u(k,h,i,j,x[c+11],L,3174756917),j=u(j,k,h,i,x[c+2],M,718787259),i=u(i,j,k,h,x[c+9],N,3951481745),h=m(h,d),i=m(i,e),j=m(j,f),k=m(k,g);var O=w(h)+w(i)+w(j)+w(k);return O.toLowerCase()}};return a}])}(this,this.angular,void 0); -//# sourceMappingURL=angular-md5.min.js.map \ No newline at end of file diff --git a/awx/ui/client/lib/angular-md5/angular-md5.min.js.map b/awx/ui/client/lib/angular-md5/angular-md5.min.js.map deleted file mode 100644 index 10b2f564a6..0000000000 --- a/awx/ui/client/lib/angular-md5/angular-md5.min.js.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"file":"angular-md5.min.js","sources":["?",".tmp/angular-md5.js"],"names":["window","angular","module","filter","md5","cache","text","defaultText","createHash","toString","toLowerCase","factory","str","xl","k","AA","BB","CC","DD","a","b","c","d","rotateLeft","lValue","iShiftBits","addUnsigned","lX","lY","lX4","lY4","lX8","lY8","lResult","_F","x","y","z","_G","_H","_I","_FF","s","ac","_GG","_HH","_II","convertToWordArray","lWordCount","lMessageLength","length","lNumberOfWords_temp1","lNumberOfWords_temp2","lNumberOfWords","lWordArray","Array","lBytePosition","lByteCount","charCodeAt","wordToHex","lByte","lCount","wordToHexValue","wordToHexValue_temp","substr","S11","S12","S13","S14","S21","S22","S23","S24","S31","S32","S33","S34","S41","S42","S43","S44","temp","this"],"mappings":";;;;;CAAA,SAAUA,EAAOC,GCAjBA,EAAQC,OAAO,eAAgB,gBAC/BD,EAAQC,OAAO,SAAU,gBACzBD,EAAQC,OAAO,eACb,0BACA,sBACA,uBAEFD,EAAQC,OAAO,8BAA+BC,OAAO,YACnD,MACA,SAAUC,GACR,GAAIC,KACJ,OAAO,UAAUC,EAAMC,GAKrB,MAJKF,GAAMC,KACTC,EAAcA,EAAcH,EAAII,WAAWD,EAAYE,WAAWC,eAAiB,GACnFL,EAAMC,GAAQA,EAAOF,EAAII,WAAWF,EAAKG,WAAWC,eAAiBH,GAEhEF,EAAMC,OAInBL,EAAQC,OAAO,yBAA0BC,OAAO,OAC9C,MACA,SAAUC,GACR,MAAO,UAAUE,GACf,MAAOA,GAAOF,EAAII,WAAWF,EAAKG,WAAWC,eAAiBJ,MAIpEL,EAAQC,OAAO,0BAA2BS,QAAQ,OAAQ,WACtD,GAAIP,IACAI,WAAY,SAAUI,GACpB,GAAIC,GAmFQC,EAAGC,EAAIC,EAAIC,EAAIC,EAAIC,EAAGC,EAAGC,EAAGC,EAlFpCC,EAAa,SAAUC,EAAQC,GACjC,MAAOD,IAAUC,EAAaD,IAAW,GAAKC,GAE5CC,EAAc,SAAUC,EAAIC,GAC9B,GAAIC,GAAKC,EAAKC,EAAKC,EAAKC,CAMxB,OALAF,GAAW,WAALJ,EACNK,EAAW,WAALJ,EACNC,EAAW,WAALF,EACNG,EAAW,WAALF,EACNK,GAAgB,WAALN,IAAyB,WAALC,GAC3BC,EAAMC,EACS,WAAVG,EAAuBF,EAAMC,EAElCH,EAAMC,EACM,WAAVG,EACe,WAAVA,EAAuBF,EAAMC,EAEnB,WAAVC,EAAuBF,EAAMC,EAG/BC,EAAUF,EAAMC,GAGvBE,EAAK,SAAUC,EAAGC,EAAGC,GACvB,MAAOF,GAAIC,GAAKD,EAAIE,GAElBC,EAAK,SAAUH,EAAGC,EAAGC,GACvB,MAAOF,GAAIE,EAAID,GAAKC,GAElBE,EAAK,SAAUJ,EAAGC,EAAGC,GACvB,MAAOF,GAAIC,EAAIC,GAEbG,EAAK,SAAUL,EAAGC,EAAGC,GACvB,MAAOD,IAAKD,GAAKE,IAEfI,EAAM,SAAUtB,EAAGC,EAAGC,EAAGC,EAAGa,EAAGO,EAAGC,GAEpC,MADAxB,GAAIO,EAAYP,EAAGO,EAAYA,EAAYQ,EAAGd,EAAGC,EAAGC,GAAIa,GAAIQ,IACrDjB,EAAYH,EAAWJ,EAAGuB,GAAItB,IAEnCwB,EAAM,SAAUzB,EAAGC,EAAGC,EAAGC,EAAGa,EAAGO,EAAGC,GAEpC,MADAxB,GAAIO,EAAYP,EAAGO,EAAYA,EAAYY,EAAGlB,EAAGC,EAAGC,GAAIa,GAAIQ,IACrDjB,EAAYH,EAAWJ,EAAGuB,GAAItB,IAEnCyB,EAAM,SAAU1B,EAAGC,EAAGC,EAAGC,EAAGa,EAAGO,EAAGC,GAEpC,MADAxB,GAAIO,EAAYP,EAAGO,EAAYA,EAAYa,EAAGnB,EAAGC,EAAGC,GAAIa,GAAIQ,IACrDjB,EAAYH,EAAWJ,EAAGuB,GAAItB,IAEnC0B,EAAM,SAAU3B,EAAGC,EAAGC,EAAGC,EAAGa,EAAGO,EAAGC,GAEpC,MADAxB,GAAIO,EAAYP,EAAGO,EAAYA,EAAYc,EAAGpB,EAAGC,EAAGC,GAAIa,GAAIQ,IACrDjB,EAAYH,EAAWJ,EAAGuB,GAAItB,IAEnC2B,EAAqB,SAAUnC,GASjC,IARA,GAAIoC,GACAC,EAAiBrC,EAAIsC,OACrBC,EAAuBF,EAAiB,EACxCG,GAAwBD,EAAuBA,EAAuB,IAAM,GAC5EE,EAA8C,IAA5BD,EAAuB,GACzCE,EAAa,GAAIC,OAAMF,EAAiB,GACxCG,EAAgB,EAChBC,EAAa,EACGR,EAAbQ,GACLT,GAAcS,EAAaA,EAAa,GAAK,EAC7CD,EAAgBC,EAAa,EAAI,EACjCH,EAAWN,GAAcM,EAAWN,GAAcpC,EAAI8C,WAAWD,IAAeD,EAChFC,GAOF,OALAT,IAAcS,EAAaA,EAAa,GAAK,EAC7CD,EAAgBC,EAAa,EAAI,EACjCH,EAAWN,GAAcM,EAAWN,GAAc,KAAOQ,EACzDF,EAAWD,EAAiB,GAAKJ,GAAkB,EACnDK,EAAWD,EAAiB,GAAKJ,IAAmB,GAC7CK,GAELK,EAAY,SAAUnC,GACxB,GAAmDoC,GAAOC,EAAtDC,EAAiB,GAAIC,EAAsB,EAC/C,KAAKF,EAAS,EAAa,GAAVA,EAAaA,IAC5BD,EAAQpC,IAAoB,EAATqC,EAAa,IAChCE,EAAsB,IAAMH,EAAMnD,SAAS,IAC3CqD,GAAkCC,EAAoBC,OAAOD,EAAoBb,OAAS,EAAG,EAE/F,OAAOY,IAEL3B,KAAuC8B,EAAM,EAAGC,EAAM,GAAIC,EAAM,GAAIC,EAAM,GAAIC,EAAM,EAAGC,EAAM,EAAGC,EAAM,GAAIC,EAAM,GAAIC,EAAM,EAAGC,EAAM,GAAIC,EAAM,GAAIC,EAAM,GAAIC,EAAM,EAAGC,EAAM,GAAIC,EAAM,GAAIC,EAAM,EAOlM,KANA7C,EAAIY,EAAmBnC,GACvBO,EAAI,WACJC,EAAI,WACJC,EAAI,WACJC,EAAI,UACJT,EAAKsB,EAAEe,OACFpC,EAAI,EAAOD,EAAJC,EAAQA,GAAK,GACvBC,EAAKI,EACLH,EAAKI,EACLH,EAAKI,EACLH,EAAKI,EACLH,EAAIsB,EAAItB,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,GAAImD,EAAK,YACnC3C,EAAImB,EAAInB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,GAAIoD,EAAK,YACnC7C,EAAIoB,EAAIpB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,GAAIqD,EAAK,WACnC/C,EAAIqB,EAAIrB,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,GAAIsD,EAAK,YACnCjD,EAAIsB,EAAItB,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,GAAImD,EAAK,YACnC3C,EAAImB,EAAInB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,GAAIoD,EAAK,YACnC7C,EAAIoB,EAAIpB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,GAAIqD,EAAK,YACnC/C,EAAIqB,EAAIrB,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,GAAIsD,EAAK,YACnCjD,EAAIsB,EAAItB,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,GAAImD,EAAK,YACnC3C,EAAImB,EAAInB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,GAAIoD,EAAK,YACnC7C,EAAIoB,EAAIpB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,IAAKqD,EAAK,YACpC/C,EAAIqB,EAAIrB,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,IAAKsD,EAAK,YACpCjD,EAAIsB,EAAItB,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,IAAKmD,EAAK,YACpC3C,EAAImB,EAAInB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,IAAKoD,EAAK,YACpC7C,EAAIoB,EAAIpB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,IAAKqD,EAAK,YACpC/C,EAAIqB,EAAIrB,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,IAAKsD,EAAK,YACpCjD,EAAIyB,EAAIzB,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,GAAIuD,EAAK,YACnC/C,EAAIsB,EAAItB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,GAAIwD,EAAK,YACnCjD,EAAIuB,EAAIvB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,IAAKyD,EAAK,WACpCnD,EAAIwB,EAAIxB,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,GAAI0D,EAAK,YACnCrD,EAAIyB,EAAIzB,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,GAAIuD,EAAK,YACnC/C,EAAIsB,EAAItB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,IAAKwD,EAAK,UACpCjD,EAAIuB,EAAIvB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,IAAKyD,EAAK,YACpCnD,EAAIwB,EAAIxB,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,GAAI0D,EAAK,YACnCrD,EAAIyB,EAAIzB,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,GAAIuD,EAAK,WACnC/C,EAAIsB,EAAItB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,IAAKwD,EAAK,YACpCjD,EAAIuB,EAAIvB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,GAAIyD,EAAK,YACnCnD,EAAIwB,EAAIxB,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,GAAI0D,EAAK,YACnCrD,EAAIyB,EAAIzB,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,IAAKuD,EAAK,YACpC/C,EAAIsB,EAAItB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,GAAIwD,EAAK,YACnCjD,EAAIuB,EAAIvB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,GAAIyD,EAAK,YACnCnD,EAAIwB,EAAIxB,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,IAAK0D,EAAK,YACpCrD,EAAI0B,EAAI1B,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,GAAI2D,EAAK,YACnCnD,EAAIuB,EAAIvB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,GAAI4D,EAAK,YACnCrD,EAAIwB,EAAIxB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,IAAK6D,EAAK,YACpCvD,EAAIyB,EAAIzB,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,IAAK8D,EAAK,YACpCzD,EAAI0B,EAAI1B,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,GAAI2D,EAAK,YACnCnD,EAAIuB,EAAIvB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,GAAI4D,EAAK,YACnCrD,EAAIwB,EAAIxB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,GAAI6D,EAAK,YACnCvD,EAAIyB,EAAIzB,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,IAAK8D,EAAK,YACpCzD,EAAI0B,EAAI1B,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,IAAK2D,EAAK,WACpCnD,EAAIuB,EAAIvB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,GAAI4D,EAAK,YACnCrD,EAAIwB,EAAIxB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,GAAI6D,EAAK,YACnCvD,EAAIyB,EAAIzB,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,GAAI8D,EAAK,UACnCzD,EAAI0B,EAAI1B,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,GAAI2D,EAAK,YACnCnD,EAAIuB,EAAIvB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,IAAK4D,EAAK,YACpCrD,EAAIwB,EAAIxB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,IAAK6D,EAAK,WACpCvD,EAAIyB,EAAIzB,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,GAAI8D,EAAK,YACnCzD,EAAI2B,EAAI3B,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,GAAI+D,EAAK,YACnCvD,EAAIwB,EAAIxB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,GAAIgE,EAAK,YACnCzD,EAAIyB,EAAIzB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,IAAKiE,EAAK,YACpC3D,EAAI0B,EAAI1B,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,GAAIkE,EAAK,YACnC7D,EAAI2B,EAAI3B,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,IAAK+D,EAAK,YACpCvD,EAAIwB,EAAIxB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,GAAIgE,EAAK,YACnCzD,EAAIyB,EAAIzB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,IAAKiE,EAAK,YACpC3D,EAAI0B,EAAI1B,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,GAAIkE,EAAK,YACnC7D,EAAI2B,EAAI3B,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,GAAI+D,EAAK,YACnCvD,EAAIwB,EAAIxB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,IAAKgE,EAAK,YACpCzD,EAAIyB,EAAIzB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,GAAIiE,EAAK,YACnC3D,EAAI0B,EAAI1B,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,IAAKkE,EAAK,YACpC7D,EAAI2B,EAAI3B,EAAGC,EAAGC,EAAGC,EAAGa,EAAErB,EAAI,GAAI+D,EAAK,YACnCvD,EAAIwB,EAAIxB,EAAGH,EAAGC,EAAGC,EAAGc,EAAErB,EAAI,IAAKgE,EAAK,YACpCzD,EAAIyB,EAAIzB,EAAGC,EAAGH,EAAGC,EAAGe,EAAErB,EAAI,GAAIiE,EAAK,WACnC3D,EAAI0B,EAAI1B,EAAGC,EAAGC,EAAGH,EAAGgB,EAAErB,EAAI,GAAIkE,EAAK,YACnC7D,EAAIO,EAAYP,EAAGJ,GACnBK,EAAIM,EAAYN,EAAGJ,GACnBK,EAAIK,EAAYL,EAAGJ,GACnBK,EAAII,EAAYJ,EAAGJ,EAErB,IAAI+D,GAAOtB,EAAUxC,GAAKwC,EAAUvC,GAAKuC,EAAUtC,GAAKsC,EAAUrC,EAClE,OAAO2D,GAAKvE,eAGlB,OAAON,ODvMsC8E,KAAKA,KAAKjF,QAAQ"} \ No newline at end of file diff --git a/awx/ui/client/lib/angular-md5/bower.json b/awx/ui/client/lib/angular-md5/bower.json deleted file mode 100644 index e77d431f23..0000000000 --- a/awx/ui/client/lib/angular-md5/bower.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "name": "angular-md5", - "version": "0.1.7", - "main": "angular-md5.js", - "description": "A md5 crypto component for Angular.js", - "author": [ - { - "name": "Patrick Stapleton", - "email": "github@gdi2290.com", - "url": "www.gdi2290.com" - } - ], - "keywords": [ - "PatrickJS", - "gdi2290", - "angular.js", - "angularjs", - "angular", - "crypto", - "md5" - ], - "ignore": [ - "**/.*", - "*.yml", - "*.xml", - "node_modules", - "bower_components", - "test", - "tests" - ], - "dependencies": { - "angular": "*" - } -} diff --git a/awx/ui/client/lib/angular-md5/example/index.html b/awx/ui/client/lib/angular-md5/example/index.html deleted file mode 100644 index 406a8957a0..0000000000 --- a/awx/ui/client/lib/angular-md5/example/index.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - Angular md5 - - - - - - - Your email Hash is: {{ message }} - - - - diff --git a/awx/ui/client/lib/angular-md5/lib/filters/gravatar.js b/awx/ui/client/lib/angular-md5/lib/filters/gravatar.js deleted file mode 100644 index 74b300b6ee..0000000000 --- a/awx/ui/client/lib/angular-md5/lib/filters/gravatar.js +++ /dev/null @@ -1,13 +0,0 @@ -'use strict'; - -angular.module('gdi2290.gravatar-filter', []) -.filter('gravatar', function(md5) { - var cache = {}; - return function(text, defaultText) { - if (!cache[text]) { - defaultText = (defaultText) ? md5.createHash(defaultText.toString().toLowerCase()) : ''; - cache[text] = (text) ? md5.createHash(text.toString().toLowerCase()) : defaultText; - } - return cache[text]; - }; -}); diff --git a/awx/ui/client/lib/angular-md5/lib/filters/md5.js b/awx/ui/client/lib/angular-md5/lib/filters/md5.js deleted file mode 100644 index 23eeecaff7..0000000000 --- a/awx/ui/client/lib/angular-md5/lib/filters/md5.js +++ /dev/null @@ -1,8 +0,0 @@ -'use strict'; - -angular.module('gdi2290.md5-filter', []) -.filter('md5', function(md5) { - return function(text) { - return (text) ? md5.createHash(text.toString().toLowerCase()) : text; - }; -}); diff --git a/awx/ui/client/lib/angular-md5/lib/index.js b/awx/ui/client/lib/angular-md5/lib/index.js deleted file mode 100644 index 4227ae5029..0000000000 --- a/awx/ui/client/lib/angular-md5/lib/index.js +++ /dev/null @@ -1,8 +0,0 @@ - -angular.module('angular-md5', ['gdi2290.md5']); -angular.module('ngMd5', ['gdi2290.md5']); -angular.module('gdi2290.md5', [ - 'gdi2290.gravatar-filter', - 'gdi2290.md5-service', - 'gdi2290.md5-filter' -]); diff --git a/awx/ui/client/lib/angular-md5/lib/services/md5.js b/awx/ui/client/lib/angular-md5/lib/services/md5.js deleted file mode 100644 index 736ce77127..0000000000 --- a/awx/ui/client/lib/angular-md5/lib/services/md5.js +++ /dev/null @@ -1,215 +0,0 @@ -'use strict'; - -angular.module('gdi2290.md5-service', []) -.factory('md5', [function() { - - var md5 = { - - createHash: function(str) { - - var xl; - - var rotateLeft = function (lValue, iShiftBits) { - return (lValue << iShiftBits) | (lValue >>> (32 - iShiftBits)); - }; - - var addUnsigned = function (lX, lY) { - var lX4, lY4, lX8, lY8, lResult; - lX8 = (lX & 0x80000000); - lY8 = (lY & 0x80000000); - lX4 = (lX & 0x40000000); - lY4 = (lY & 0x40000000); - lResult = (lX & 0x3FFFFFFF) + (lY & 0x3FFFFFFF); - if (lX4 & lY4) { - return (lResult ^ 0x80000000 ^ lX8 ^ lY8); - } - if (lX4 | lY4) { - if (lResult & 0x40000000) { - return (lResult ^ 0xC0000000 ^ lX8 ^ lY8); - } else { - return (lResult ^ 0x40000000 ^ lX8 ^ lY8); - } - } else { - return (lResult ^ lX8 ^ lY8); - } - }; - - var _F = function (x, y, z) { - return (x & y) | ((~x) & z); - }; - var _G = function (x, y, z) { - return (x & z) | (y & (~z)); - }; - var _H = function (x, y, z) { - return (x ^ y ^ z); - }; - var _I = function (x, y, z) { - return (y ^ (x | (~z))); - }; - - var _FF = function (a, b, c, d, x, s, ac) { - a = addUnsigned(a, addUnsigned(addUnsigned(_F(b, c, d), x), ac)); - return addUnsigned(rotateLeft(a, s), b); - }; - - var _GG = function (a, b, c, d, x, s, ac) { - a = addUnsigned(a, addUnsigned(addUnsigned(_G(b, c, d), x), ac)); - return addUnsigned(rotateLeft(a, s), b); - }; - - var _HH = function (a, b, c, d, x, s, ac) { - a = addUnsigned(a, addUnsigned(addUnsigned(_H(b, c, d), x), ac)); - return addUnsigned(rotateLeft(a, s), b); - }; - - var _II = function (a, b, c, d, x, s, ac) { - a = addUnsigned(a, addUnsigned(addUnsigned(_I(b, c, d), x), ac)); - return addUnsigned(rotateLeft(a, s), b); - }; - - var convertToWordArray = function (str) { - var lWordCount; - var lMessageLength = str.length; - var lNumberOfWords_temp1 = lMessageLength + 8; - var lNumberOfWords_temp2 = (lNumberOfWords_temp1 - (lNumberOfWords_temp1 % 64)) / 64; - var lNumberOfWords = (lNumberOfWords_temp2 + 1) * 16; - var lWordArray = new Array(lNumberOfWords - 1); - var lBytePosition = 0; - var lByteCount = 0; - while (lByteCount < lMessageLength) { - lWordCount = (lByteCount - (lByteCount % 4)) / 4; - lBytePosition = (lByteCount % 4) * 8; - lWordArray[lWordCount] = (lWordArray[lWordCount] | (str.charCodeAt(lByteCount) << lBytePosition)); - lByteCount++; - } - lWordCount = (lByteCount - (lByteCount % 4)) / 4; - lBytePosition = (lByteCount % 4) * 8; - lWordArray[lWordCount] = lWordArray[lWordCount] | (0x80 << lBytePosition); - lWordArray[lNumberOfWords - 2] = lMessageLength << 3; - lWordArray[lNumberOfWords - 1] = lMessageLength >>> 29; - return lWordArray; - }; - - var wordToHex = function (lValue) { - var wordToHexValue = '', - wordToHexValue_temp = '', - lByte, lCount; - for (lCount = 0; lCount <= 3; lCount++) { - lByte = (lValue >>> (lCount * 8)) & 255; - wordToHexValue_temp = '0' + lByte.toString(16); - wordToHexValue = wordToHexValue + wordToHexValue_temp.substr(wordToHexValue_temp.length - 2, 2); - } - return wordToHexValue; - }; - - var x = [], - k, AA, BB, CC, DD, a, b, c, d, S11 = 7, - S12 = 12, - S13 = 17, - S14 = 22, - S21 = 5, - S22 = 9, - S23 = 14, - S24 = 20, - S31 = 4, - S32 = 11, - S33 = 16, - S34 = 23, - S41 = 6, - S42 = 10, - S43 = 15, - S44 = 21; - - //str = this.utf8_encode(str); - x = convertToWordArray(str); - a = 0x67452301; - b = 0xEFCDAB89; - c = 0x98BADCFE; - d = 0x10325476; - - xl = x.length; - for (k = 0; k < xl; k += 16) { - AA = a; - BB = b; - CC = c; - DD = d; - a = _FF(a, b, c, d, x[k + 0], S11, 0xD76AA478); - d = _FF(d, a, b, c, x[k + 1], S12, 0xE8C7B756); - c = _FF(c, d, a, b, x[k + 2], S13, 0x242070DB); - b = _FF(b, c, d, a, x[k + 3], S14, 0xC1BDCEEE); - a = _FF(a, b, c, d, x[k + 4], S11, 0xF57C0FAF); - d = _FF(d, a, b, c, x[k + 5], S12, 0x4787C62A); - c = _FF(c, d, a, b, x[k + 6], S13, 0xA8304613); - b = _FF(b, c, d, a, x[k + 7], S14, 0xFD469501); - a = _FF(a, b, c, d, x[k + 8], S11, 0x698098D8); - d = _FF(d, a, b, c, x[k + 9], S12, 0x8B44F7AF); - c = _FF(c, d, a, b, x[k + 10], S13, 0xFFFF5BB1); - b = _FF(b, c, d, a, x[k + 11], S14, 0x895CD7BE); - a = _FF(a, b, c, d, x[k + 12], S11, 0x6B901122); - d = _FF(d, a, b, c, x[k + 13], S12, 0xFD987193); - c = _FF(c, d, a, b, x[k + 14], S13, 0xA679438E); - b = _FF(b, c, d, a, x[k + 15], S14, 0x49B40821); - a = _GG(a, b, c, d, x[k + 1], S21, 0xF61E2562); - d = _GG(d, a, b, c, x[k + 6], S22, 0xC040B340); - c = _GG(c, d, a, b, x[k + 11], S23, 0x265E5A51); - b = _GG(b, c, d, a, x[k + 0], S24, 0xE9B6C7AA); - a = _GG(a, b, c, d, x[k + 5], S21, 0xD62F105D); - d = _GG(d, a, b, c, x[k + 10], S22, 0x2441453); - c = _GG(c, d, a, b, x[k + 15], S23, 0xD8A1E681); - b = _GG(b, c, d, a, x[k + 4], S24, 0xE7D3FBC8); - a = _GG(a, b, c, d, x[k + 9], S21, 0x21E1CDE6); - d = _GG(d, a, b, c, x[k + 14], S22, 0xC33707D6); - c = _GG(c, d, a, b, x[k + 3], S23, 0xF4D50D87); - b = _GG(b, c, d, a, x[k + 8], S24, 0x455A14ED); - a = _GG(a, b, c, d, x[k + 13], S21, 0xA9E3E905); - d = _GG(d, a, b, c, x[k + 2], S22, 0xFCEFA3F8); - c = _GG(c, d, a, b, x[k + 7], S23, 0x676F02D9); - b = _GG(b, c, d, a, x[k + 12], S24, 0x8D2A4C8A); - a = _HH(a, b, c, d, x[k + 5], S31, 0xFFFA3942); - d = _HH(d, a, b, c, x[k + 8], S32, 0x8771F681); - c = _HH(c, d, a, b, x[k + 11], S33, 0x6D9D6122); - b = _HH(b, c, d, a, x[k + 14], S34, 0xFDE5380C); - a = _HH(a, b, c, d, x[k + 1], S31, 0xA4BEEA44); - d = _HH(d, a, b, c, x[k + 4], S32, 0x4BDECFA9); - c = _HH(c, d, a, b, x[k + 7], S33, 0xF6BB4B60); - b = _HH(b, c, d, a, x[k + 10], S34, 0xBEBFBC70); - a = _HH(a, b, c, d, x[k + 13], S31, 0x289B7EC6); - d = _HH(d, a, b, c, x[k + 0], S32, 0xEAA127FA); - c = _HH(c, d, a, b, x[k + 3], S33, 0xD4EF3085); - b = _HH(b, c, d, a, x[k + 6], S34, 0x4881D05); - a = _HH(a, b, c, d, x[k + 9], S31, 0xD9D4D039); - d = _HH(d, a, b, c, x[k + 12], S32, 0xE6DB99E5); - c = _HH(c, d, a, b, x[k + 15], S33, 0x1FA27CF8); - b = _HH(b, c, d, a, x[k + 2], S34, 0xC4AC5665); - a = _II(a, b, c, d, x[k + 0], S41, 0xF4292244); - d = _II(d, a, b, c, x[k + 7], S42, 0x432AFF97); - c = _II(c, d, a, b, x[k + 14], S43, 0xAB9423A7); - b = _II(b, c, d, a, x[k + 5], S44, 0xFC93A039); - a = _II(a, b, c, d, x[k + 12], S41, 0x655B59C3); - d = _II(d, a, b, c, x[k + 3], S42, 0x8F0CCC92); - c = _II(c, d, a, b, x[k + 10], S43, 0xFFEFF47D); - b = _II(b, c, d, a, x[k + 1], S44, 0x85845DD1); - a = _II(a, b, c, d, x[k + 8], S41, 0x6FA87E4F); - d = _II(d, a, b, c, x[k + 15], S42, 0xFE2CE6E0); - c = _II(c, d, a, b, x[k + 6], S43, 0xA3014314); - b = _II(b, c, d, a, x[k + 13], S44, 0x4E0811A1); - a = _II(a, b, c, d, x[k + 4], S41, 0xF7537E82); - d = _II(d, a, b, c, x[k + 11], S42, 0xBD3AF235); - c = _II(c, d, a, b, x[k + 2], S43, 0x2AD7D2BB); - b = _II(b, c, d, a, x[k + 9], S44, 0xEB86D391); - a = addUnsigned(a, AA); - b = addUnsigned(b, BB); - c = addUnsigned(c, CC); - d = addUnsigned(d, DD); - } - - var temp = wordToHex(a) + wordToHex(b) + wordToHex(c) + wordToHex(d); - - return temp.toLowerCase(); - } - - }; - - return md5; - -}]); diff --git a/awx/ui/client/lib/angular-md5/package.json b/awx/ui/client/lib/angular-md5/package.json deleted file mode 100644 index be431aeac4..0000000000 --- a/awx/ui/client/lib/angular-md5/package.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "angular-md5", - "version": "0.1.7", - "main": "angular-md5.js", - "description": "A md5 crypto component for Angular.js", - "homepage": "https://github.com/gdi2290/angular-md5", - "bugs": "https://github.com/gdi2290/angular-md5/issues", - "author": { - "name": "Patrick Stapleton", - "email": "github@gdi2290.com", - "url": "www.gdi2290.com" - }, - "repository": { - "type": "git", - "url": "git@github.com:gdi2290/angular-md5.git" - }, - "keywords": [ - "PatrickJS", - "gdi2290", - "angular.js", - "angularjs", - "angular", - "crypto", - "md5" - ], - "licenses": [ - { - "type": "MIT" - } - ], - "dependencies": {}, - "devDependencies": { - "load-grunt-tasks": "~0.1.0", - "time-grunt": "~0.1.0", - "grunt": "*", - "grunt-contrib-copy": "*", - "grunt-contrib-watch": "~0.5.0", - "grunt-contrib-concat": "*", - "grunt-contrib-uglify": "*", - "grunt-contrib-connect": "*", - "grunt-contrib-jshint": "~0.6.0", - "grunt-ngmin": "*", - "grunt-contrib-clean": "~0.5.0" - } -} diff --git a/awx/ui/client/lib/angular-mocks/.bower.json b/awx/ui/client/lib/angular-mocks/.bower.json deleted file mode 100644 index 9fea1563a4..0000000000 --- a/awx/ui/client/lib/angular-mocks/.bower.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "name": "angular-mocks", - "version": "1.4.3", - "main": "./angular-mocks.js", - "ignore": [], - "dependencies": { - "angular": "1.4.3" - }, - "homepage": "https://github.com/angular/bower-angular-mocks", - "_release": "1.4.3", - "_resolution": { - "type": "version", - "tag": "v1.4.3", - "commit": "7e3beec84afceeb060a3c6def0d7ca965727851c" - }, - "_source": "git://github.com/angular/bower-angular-mocks.git", - "_target": "~1.4.3", - "_originalSource": "angular-mocks" -} \ No newline at end of file diff --git a/awx/ui/client/lib/angular-mocks/README.md b/awx/ui/client/lib/angular-mocks/README.md deleted file mode 100644 index 440cce9b78..0000000000 --- a/awx/ui/client/lib/angular-mocks/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# packaged angular-mocks - -This repo is for distribution on `npm` and `bower`. The source for this module is in the -[main AngularJS repo](https://github.com/angular/angular.js/tree/master/src/ngMock). -Please file issues and pull requests against that repo. - -## Install - -You can install this package either with `npm` or with `bower`. - -### npm - -```shell -npm install angular-mocks -``` - -You can `require` ngMock modules: - -```js -var angular = require('angular'); -angular.module('myMod', [ - require('angular-animate'), - require('angular-mocks/ngMock') - require('angular-mocks/ngAnimateMock') -]); -``` - -### bower - -```shell -bower install angular-mocks -``` - -The mocks are then available at `bower_components/angular-mocks/angular-mocks.js`. - -## Documentation - -Documentation is available on the -[AngularJS docs site](https://docs.angularjs.org/guide/unit-testing). - -## License - -The MIT License - -Copyright (c) 2010-2015 Google, Inc. http://angularjs.org - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/awx/ui/client/lib/angular-mocks/angular-mocks.js b/awx/ui/client/lib/angular-mocks/angular-mocks.js deleted file mode 100644 index 9482b702b3..0000000000 --- a/awx/ui/client/lib/angular-mocks/angular-mocks.js +++ /dev/null @@ -1,2436 +0,0 @@ -/** - * @license AngularJS v1.4.3 - * (c) 2010-2015 Google, Inc. http://angularjs.org - * License: MIT - */ -(function(window, angular, undefined) { - -'use strict'; - -/** - * @ngdoc object - * @name angular.mock - * @description - * - * Namespace from 'angular-mocks.js' which contains testing related code. - */ -angular.mock = {}; - -/** - * ! This is a private undocumented service ! - * - * @name $browser - * - * @description - * This service is a mock implementation of {@link ng.$browser}. It provides fake - * implementation for commonly used browser apis that are hard to test, e.g. setTimeout, xhr, - * cookies, etc... - * - * The api of this service is the same as that of the real {@link ng.$browser $browser}, except - * that there are several helper methods available which can be used in tests. - */ -angular.mock.$BrowserProvider = function() { - this.$get = function() { - return new angular.mock.$Browser(); - }; -}; - -angular.mock.$Browser = function() { - var self = this; - - this.isMock = true; - self.$$url = "http://server/"; - self.$$lastUrl = self.$$url; // used by url polling fn - self.pollFns = []; - - // TODO(vojta): remove this temporary api - self.$$completeOutstandingRequest = angular.noop; - self.$$incOutstandingRequestCount = angular.noop; - - - // register url polling fn - - self.onUrlChange = function(listener) { - self.pollFns.push( - function() { - if (self.$$lastUrl !== self.$$url || self.$$state !== self.$$lastState) { - self.$$lastUrl = self.$$url; - self.$$lastState = self.$$state; - listener(self.$$url, self.$$state); - } - } - ); - - return listener; - }; - - self.$$applicationDestroyed = angular.noop; - self.$$checkUrlChange = angular.noop; - - self.deferredFns = []; - self.deferredNextId = 0; - - self.defer = function(fn, delay) { - delay = delay || 0; - self.deferredFns.push({time:(self.defer.now + delay), fn:fn, id: self.deferredNextId}); - self.deferredFns.sort(function(a, b) { return a.time - b.time;}); - return self.deferredNextId++; - }; - - - /** - * @name $browser#defer.now - * - * @description - * Current milliseconds mock time. - */ - self.defer.now = 0; - - - self.defer.cancel = function(deferId) { - var fnIndex; - - angular.forEach(self.deferredFns, function(fn, index) { - if (fn.id === deferId) fnIndex = index; - }); - - if (fnIndex !== undefined) { - self.deferredFns.splice(fnIndex, 1); - return true; - } - - return false; - }; - - - /** - * @name $browser#defer.flush - * - * @description - * Flushes all pending requests and executes the defer callbacks. - * - * @param {number=} number of milliseconds to flush. See {@link #defer.now} - */ - self.defer.flush = function(delay) { - if (angular.isDefined(delay)) { - self.defer.now += delay; - } else { - if (self.deferredFns.length) { - self.defer.now = self.deferredFns[self.deferredFns.length - 1].time; - } else { - throw new Error('No deferred tasks to be flushed'); - } - } - - while (self.deferredFns.length && self.deferredFns[0].time <= self.defer.now) { - self.deferredFns.shift().fn(); - } - }; - - self.$$baseHref = '/'; - self.baseHref = function() { - return this.$$baseHref; - }; -}; -angular.mock.$Browser.prototype = { - -/** - * @name $browser#poll - * - * @description - * run all fns in pollFns - */ - poll: function poll() { - angular.forEach(this.pollFns, function(pollFn) { - pollFn(); - }); - }, - - url: function(url, replace, state) { - if (angular.isUndefined(state)) { - state = null; - } - if (url) { - this.$$url = url; - // Native pushState serializes & copies the object; simulate it. - this.$$state = angular.copy(state); - return this; - } - - return this.$$url; - }, - - state: function() { - return this.$$state; - }, - - notifyWhenNoOutstandingRequests: function(fn) { - fn(); - } -}; - - -/** - * @ngdoc provider - * @name $exceptionHandlerProvider - * - * @description - * Configures the mock implementation of {@link ng.$exceptionHandler} to rethrow or to log errors - * passed to the `$exceptionHandler`. - */ - -/** - * @ngdoc service - * @name $exceptionHandler - * - * @description - * Mock implementation of {@link ng.$exceptionHandler} that rethrows or logs errors passed - * to it. See {@link ngMock.$exceptionHandlerProvider $exceptionHandlerProvider} for configuration - * information. - * - * - * ```js - * describe('$exceptionHandlerProvider', function() { - * - * it('should capture log messages and exceptions', function() { - * - * module(function($exceptionHandlerProvider) { - * $exceptionHandlerProvider.mode('log'); - * }); - * - * inject(function($log, $exceptionHandler, $timeout) { - * $timeout(function() { $log.log(1); }); - * $timeout(function() { $log.log(2); throw 'banana peel'; }); - * $timeout(function() { $log.log(3); }); - * expect($exceptionHandler.errors).toEqual([]); - * expect($log.assertEmpty()); - * $timeout.flush(); - * expect($exceptionHandler.errors).toEqual(['banana peel']); - * expect($log.log.logs).toEqual([[1], [2], [3]]); - * }); - * }); - * }); - * ``` - */ - -angular.mock.$ExceptionHandlerProvider = function() { - var handler; - - /** - * @ngdoc method - * @name $exceptionHandlerProvider#mode - * - * @description - * Sets the logging mode. - * - * @param {string} mode Mode of operation, defaults to `rethrow`. - * - * - `log`: Sometimes it is desirable to test that an error is thrown, for this case the `log` - * mode stores an array of errors in `$exceptionHandler.errors`, to allow later - * assertion of them. See {@link ngMock.$log#assertEmpty assertEmpty()} and - * {@link ngMock.$log#reset reset()} - * - `rethrow`: If any errors are passed to the handler in tests, it typically means that there - * is a bug in the application or test, so this mock will make these tests fail. - * For any implementations that expect exceptions to be thrown, the `rethrow` mode - * will also maintain a log of thrown errors. - */ - this.mode = function(mode) { - - switch (mode) { - case 'log': - case 'rethrow': - var errors = []; - handler = function(e) { - if (arguments.length == 1) { - errors.push(e); - } else { - errors.push([].slice.call(arguments, 0)); - } - if (mode === "rethrow") { - throw e; - } - }; - handler.errors = errors; - break; - default: - throw new Error("Unknown mode '" + mode + "', only 'log'/'rethrow' modes are allowed!"); - } - }; - - this.$get = function() { - return handler; - }; - - this.mode('rethrow'); -}; - - -/** - * @ngdoc service - * @name $log - * - * @description - * Mock implementation of {@link ng.$log} that gathers all logged messages in arrays - * (one array per logging level). These arrays are exposed as `logs` property of each of the - * level-specific log function, e.g. for level `error` the array is exposed as `$log.error.logs`. - * - */ -angular.mock.$LogProvider = function() { - var debug = true; - - function concat(array1, array2, index) { - return array1.concat(Array.prototype.slice.call(array2, index)); - } - - this.debugEnabled = function(flag) { - if (angular.isDefined(flag)) { - debug = flag; - return this; - } else { - return debug; - } - }; - - this.$get = function() { - var $log = { - log: function() { $log.log.logs.push(concat([], arguments, 0)); }, - warn: function() { $log.warn.logs.push(concat([], arguments, 0)); }, - info: function() { $log.info.logs.push(concat([], arguments, 0)); }, - error: function() { $log.error.logs.push(concat([], arguments, 0)); }, - debug: function() { - if (debug) { - $log.debug.logs.push(concat([], arguments, 0)); - } - } - }; - - /** - * @ngdoc method - * @name $log#reset - * - * @description - * Reset all of the logging arrays to empty. - */ - $log.reset = function() { - /** - * @ngdoc property - * @name $log#log.logs - * - * @description - * Array of messages logged using {@link ng.$log#log `log()`}. - * - * @example - * ```js - * $log.log('Some Log'); - * var first = $log.log.logs.unshift(); - * ``` - */ - $log.log.logs = []; - /** - * @ngdoc property - * @name $log#info.logs - * - * @description - * Array of messages logged using {@link ng.$log#info `info()`}. - * - * @example - * ```js - * $log.info('Some Info'); - * var first = $log.info.logs.unshift(); - * ``` - */ - $log.info.logs = []; - /** - * @ngdoc property - * @name $log#warn.logs - * - * @description - * Array of messages logged using {@link ng.$log#warn `warn()`}. - * - * @example - * ```js - * $log.warn('Some Warning'); - * var first = $log.warn.logs.unshift(); - * ``` - */ - $log.warn.logs = []; - /** - * @ngdoc property - * @name $log#error.logs - * - * @description - * Array of messages logged using {@link ng.$log#error `error()`}. - * - * @example - * ```js - * $log.error('Some Error'); - * var first = $log.error.logs.unshift(); - * ``` - */ - $log.error.logs = []; - /** - * @ngdoc property - * @name $log#debug.logs - * - * @description - * Array of messages logged using {@link ng.$log#debug `debug()`}. - * - * @example - * ```js - * $log.debug('Some Error'); - * var first = $log.debug.logs.unshift(); - * ``` - */ - $log.debug.logs = []; - }; - - /** - * @ngdoc method - * @name $log#assertEmpty - * - * @description - * Assert that all of the logging methods have no logged messages. If any messages are present, - * an exception is thrown. - */ - $log.assertEmpty = function() { - var errors = []; - angular.forEach(['error', 'warn', 'info', 'log', 'debug'], function(logLevel) { - angular.forEach($log[logLevel].logs, function(log) { - angular.forEach(log, function(logItem) { - errors.push('MOCK $log (' + logLevel + '): ' + String(logItem) + '\n' + - (logItem.stack || '')); - }); - }); - }); - if (errors.length) { - errors.unshift("Expected $log to be empty! Either a message was logged unexpectedly, or " + - "an expected log message was not checked and removed:"); - errors.push(''); - throw new Error(errors.join('\n---------\n')); - } - }; - - $log.reset(); - return $log; - }; -}; - - -/** - * @ngdoc service - * @name $interval - * - * @description - * Mock implementation of the $interval service. - * - * Use {@link ngMock.$interval#flush `$interval.flush(millis)`} to - * move forward by `millis` milliseconds and trigger any functions scheduled to run in that - * time. - * - * @param {function()} fn A function that should be called repeatedly. - * @param {number} delay Number of milliseconds between each function call. - * @param {number=} [count=0] Number of times to repeat. If not set, or 0, will repeat - * indefinitely. - * @param {boolean=} [invokeApply=true] If set to `false` skips model dirty checking, otherwise - * will invoke `fn` within the {@link ng.$rootScope.Scope#$apply $apply} block. - * @param {...*=} Pass additional parameters to the executed function. - * @returns {promise} A promise which will be notified on each iteration. - */ -angular.mock.$IntervalProvider = function() { - this.$get = ['$browser', '$rootScope', '$q', '$$q', - function($browser, $rootScope, $q, $$q) { - var repeatFns = [], - nextRepeatId = 0, - now = 0; - - var $interval = function(fn, delay, count, invokeApply) { - var hasParams = arguments.length > 4, - args = hasParams ? Array.prototype.slice.call(arguments, 4) : [], - iteration = 0, - skipApply = (angular.isDefined(invokeApply) && !invokeApply), - deferred = (skipApply ? $$q : $q).defer(), - promise = deferred.promise; - - count = (angular.isDefined(count)) ? count : 0; - promise.then(null, null, (!hasParams) ? fn : function() { - fn.apply(null, args); - }); - - promise.$$intervalId = nextRepeatId; - - function tick() { - deferred.notify(iteration++); - - if (count > 0 && iteration >= count) { - var fnIndex; - deferred.resolve(iteration); - - angular.forEach(repeatFns, function(fn, index) { - if (fn.id === promise.$$intervalId) fnIndex = index; - }); - - if (fnIndex !== undefined) { - repeatFns.splice(fnIndex, 1); - } - } - - if (skipApply) { - $browser.defer.flush(); - } else { - $rootScope.$apply(); - } - } - - repeatFns.push({ - nextTime:(now + delay), - delay: delay, - fn: tick, - id: nextRepeatId, - deferred: deferred - }); - repeatFns.sort(function(a, b) { return a.nextTime - b.nextTime;}); - - nextRepeatId++; - return promise; - }; - /** - * @ngdoc method - * @name $interval#cancel - * - * @description - * Cancels a task associated with the `promise`. - * - * @param {promise} promise A promise from calling the `$interval` function. - * @returns {boolean} Returns `true` if the task was successfully cancelled. - */ - $interval.cancel = function(promise) { - if (!promise) return false; - var fnIndex; - - angular.forEach(repeatFns, function(fn, index) { - if (fn.id === promise.$$intervalId) fnIndex = index; - }); - - if (fnIndex !== undefined) { - repeatFns[fnIndex].deferred.reject('canceled'); - repeatFns.splice(fnIndex, 1); - return true; - } - - return false; - }; - - /** - * @ngdoc method - * @name $interval#flush - * @description - * - * Runs interval tasks scheduled to be run in the next `millis` milliseconds. - * - * @param {number=} millis maximum timeout amount to flush up until. - * - * @return {number} The amount of time moved forward. - */ - $interval.flush = function(millis) { - now += millis; - while (repeatFns.length && repeatFns[0].nextTime <= now) { - var task = repeatFns[0]; - task.fn(); - task.nextTime += task.delay; - repeatFns.sort(function(a, b) { return a.nextTime - b.nextTime;}); - } - return millis; - }; - - return $interval; - }]; -}; - - -/* jshint -W101 */ -/* The R_ISO8061_STR regex is never going to fit into the 100 char limit! - * This directive should go inside the anonymous function but a bug in JSHint means that it would - * not be enacted early enough to prevent the warning. - */ -var R_ISO8061_STR = /^(\d{4})-?(\d\d)-?(\d\d)(?:T(\d\d)(?:\:?(\d\d)(?:\:?(\d\d)(?:\.(\d{3}))?)?)?(Z|([+-])(\d\d):?(\d\d)))?$/; - -function jsonStringToDate(string) { - var match; - if (match = string.match(R_ISO8061_STR)) { - var date = new Date(0), - tzHour = 0, - tzMin = 0; - if (match[9]) { - tzHour = toInt(match[9] + match[10]); - tzMin = toInt(match[9] + match[11]); - } - date.setUTCFullYear(toInt(match[1]), toInt(match[2]) - 1, toInt(match[3])); - date.setUTCHours(toInt(match[4] || 0) - tzHour, - toInt(match[5] || 0) - tzMin, - toInt(match[6] || 0), - toInt(match[7] || 0)); - return date; - } - return string; -} - -function toInt(str) { - return parseInt(str, 10); -} - -function padNumber(num, digits, trim) { - var neg = ''; - if (num < 0) { - neg = '-'; - num = -num; - } - num = '' + num; - while (num.length < digits) num = '0' + num; - if (trim) { - num = num.substr(num.length - digits); - } - return neg + num; -} - - -/** - * @ngdoc type - * @name angular.mock.TzDate - * @description - * - * *NOTE*: this is not an injectable instance, just a globally available mock class of `Date`. - * - * Mock of the Date type which has its timezone specified via constructor arg. - * - * The main purpose is to create Date-like instances with timezone fixed to the specified timezone - * offset, so that we can test code that depends on local timezone settings without dependency on - * the time zone settings of the machine where the code is running. - * - * @param {number} offset Offset of the *desired* timezone in hours (fractions will be honored) - * @param {(number|string)} timestamp Timestamp representing the desired time in *UTC* - * - * @example - * !!!! WARNING !!!!! - * This is not a complete Date object so only methods that were implemented can be called safely. - * To make matters worse, TzDate instances inherit stuff from Date via a prototype. - * - * We do our best to intercept calls to "unimplemented" methods, but since the list of methods is - * incomplete we might be missing some non-standard methods. This can result in errors like: - * "Date.prototype.foo called on incompatible Object". - * - * ```js - * var newYearInBratislava = new TzDate(-1, '2009-12-31T23:00:00Z'); - * newYearInBratislava.getTimezoneOffset() => -60; - * newYearInBratislava.getFullYear() => 2010; - * newYearInBratislava.getMonth() => 0; - * newYearInBratislava.getDate() => 1; - * newYearInBratislava.getHours() => 0; - * newYearInBratislava.getMinutes() => 0; - * newYearInBratislava.getSeconds() => 0; - * ``` - * - */ -angular.mock.TzDate = function(offset, timestamp) { - var self = new Date(0); - if (angular.isString(timestamp)) { - var tsStr = timestamp; - - self.origDate = jsonStringToDate(timestamp); - - timestamp = self.origDate.getTime(); - if (isNaN(timestamp)) { - throw { - name: "Illegal Argument", - message: "Arg '" + tsStr + "' passed into TzDate constructor is not a valid date string" - }; - } - } else { - self.origDate = new Date(timestamp); - } - - var localOffset = new Date(timestamp).getTimezoneOffset(); - self.offsetDiff = localOffset * 60 * 1000 - offset * 1000 * 60 * 60; - self.date = new Date(timestamp + self.offsetDiff); - - self.getTime = function() { - return self.date.getTime() - self.offsetDiff; - }; - - self.toLocaleDateString = function() { - return self.date.toLocaleDateString(); - }; - - self.getFullYear = function() { - return self.date.getFullYear(); - }; - - self.getMonth = function() { - return self.date.getMonth(); - }; - - self.getDate = function() { - return self.date.getDate(); - }; - - self.getHours = function() { - return self.date.getHours(); - }; - - self.getMinutes = function() { - return self.date.getMinutes(); - }; - - self.getSeconds = function() { - return self.date.getSeconds(); - }; - - self.getMilliseconds = function() { - return self.date.getMilliseconds(); - }; - - self.getTimezoneOffset = function() { - return offset * 60; - }; - - self.getUTCFullYear = function() { - return self.origDate.getUTCFullYear(); - }; - - self.getUTCMonth = function() { - return self.origDate.getUTCMonth(); - }; - - self.getUTCDate = function() { - return self.origDate.getUTCDate(); - }; - - self.getUTCHours = function() { - return self.origDate.getUTCHours(); - }; - - self.getUTCMinutes = function() { - return self.origDate.getUTCMinutes(); - }; - - self.getUTCSeconds = function() { - return self.origDate.getUTCSeconds(); - }; - - self.getUTCMilliseconds = function() { - return self.origDate.getUTCMilliseconds(); - }; - - self.getDay = function() { - return self.date.getDay(); - }; - - // provide this method only on browsers that already have it - if (self.toISOString) { - self.toISOString = function() { - return padNumber(self.origDate.getUTCFullYear(), 4) + '-' + - padNumber(self.origDate.getUTCMonth() + 1, 2) + '-' + - padNumber(self.origDate.getUTCDate(), 2) + 'T' + - padNumber(self.origDate.getUTCHours(), 2) + ':' + - padNumber(self.origDate.getUTCMinutes(), 2) + ':' + - padNumber(self.origDate.getUTCSeconds(), 2) + '.' + - padNumber(self.origDate.getUTCMilliseconds(), 3) + 'Z'; - }; - } - - //hide all methods not implemented in this mock that the Date prototype exposes - var unimplementedMethods = ['getUTCDay', - 'getYear', 'setDate', 'setFullYear', 'setHours', 'setMilliseconds', - 'setMinutes', 'setMonth', 'setSeconds', 'setTime', 'setUTCDate', 'setUTCFullYear', - 'setUTCHours', 'setUTCMilliseconds', 'setUTCMinutes', 'setUTCMonth', 'setUTCSeconds', - 'setYear', 'toDateString', 'toGMTString', 'toJSON', 'toLocaleFormat', 'toLocaleString', - 'toLocaleTimeString', 'toSource', 'toString', 'toTimeString', 'toUTCString', 'valueOf']; - - angular.forEach(unimplementedMethods, function(methodName) { - self[methodName] = function() { - throw new Error("Method '" + methodName + "' is not implemented in the TzDate mock"); - }; - }); - - return self; -}; - -//make "tzDateInstance instanceof Date" return true -angular.mock.TzDate.prototype = Date.prototype; -/* jshint +W101 */ - -angular.mock.animate = angular.module('ngAnimateMock', ['ng']) - - .config(['$provide', function($provide) { - - var reflowQueue = []; - $provide.value('$$animateReflow', function(fn) { - var index = reflowQueue.length; - reflowQueue.push(fn); - return function cancel() { - reflowQueue.splice(index, 1); - }; - }); - - $provide.decorator('$animate', ['$delegate', '$timeout', '$browser', '$$rAF', - function($delegate, $timeout, $browser, $$rAF) { - var animate = { - queue: [], - cancel: $delegate.cancel, - enabled: $delegate.enabled, - triggerCallbackEvents: function() { - $$rAF.flush(); - }, - triggerCallbackPromise: function() { - $timeout.flush(0); - }, - triggerCallbacks: function() { - this.triggerCallbackEvents(); - this.triggerCallbackPromise(); - }, - triggerReflow: function() { - angular.forEach(reflowQueue, function(fn) { - fn(); - }); - reflowQueue = []; - } - }; - - angular.forEach( - ['animate','enter','leave','move','addClass','removeClass','setClass'], function(method) { - animate[method] = function() { - animate.queue.push({ - event: method, - element: arguments[0], - options: arguments[arguments.length - 1], - args: arguments - }); - return $delegate[method].apply($delegate, arguments); - }; - }); - - return animate; - }]); - - }]); - - -/** - * @ngdoc function - * @name angular.mock.dump - * @description - * - * *NOTE*: this is not an injectable instance, just a globally available function. - * - * Method for serializing common angular objects (scope, elements, etc..) into strings, useful for - * debugging. - * - * This method is also available on window, where it can be used to display objects on debug - * console. - * - * @param {*} object - any object to turn into string. - * @return {string} a serialized string of the argument - */ -angular.mock.dump = function(object) { - return serialize(object); - - function serialize(object) { - var out; - - if (angular.isElement(object)) { - object = angular.element(object); - out = angular.element('
    '); - angular.forEach(object, function(element) { - out.append(angular.element(element).clone()); - }); - out = out.html(); - } else if (angular.isArray(object)) { - out = []; - angular.forEach(object, function(o) { - out.push(serialize(o)); - }); - out = '[ ' + out.join(', ') + ' ]'; - } else if (angular.isObject(object)) { - if (angular.isFunction(object.$eval) && angular.isFunction(object.$apply)) { - out = serializeScope(object); - } else if (object instanceof Error) { - out = object.stack || ('' + object.name + ': ' + object.message); - } else { - // TODO(i): this prevents methods being logged, - // we should have a better way to serialize objects - out = angular.toJson(object, true); - } - } else { - out = String(object); - } - - return out; - } - - function serializeScope(scope, offset) { - offset = offset || ' '; - var log = [offset + 'Scope(' + scope.$id + '): {']; - for (var key in scope) { - if (Object.prototype.hasOwnProperty.call(scope, key) && !key.match(/^(\$|this)/)) { - log.push(' ' + key + ': ' + angular.toJson(scope[key])); - } - } - var child = scope.$$childHead; - while (child) { - log.push(serializeScope(child, offset + ' ')); - child = child.$$nextSibling; - } - log.push('}'); - return log.join('\n' + offset); - } -}; - -/** - * @ngdoc service - * @name $httpBackend - * @description - * Fake HTTP backend implementation suitable for unit testing applications that use the - * {@link ng.$http $http service}. - * - * *Note*: For fake HTTP backend implementation suitable for end-to-end testing or backend-less - * development please see {@link ngMockE2E.$httpBackend e2e $httpBackend mock}. - * - * During unit testing, we want our unit tests to run quickly and have no external dependencies so - * we don’t want to send [XHR](https://developer.mozilla.org/en/xmlhttprequest) or - * [JSONP](http://en.wikipedia.org/wiki/JSONP) requests to a real server. All we really need is - * to verify whether a certain request has been sent or not, or alternatively just let the - * application make requests, respond with pre-trained responses and assert that the end result is - * what we expect it to be. - * - * This mock implementation can be used to respond with static or dynamic responses via the - * `expect` and `when` apis and their shortcuts (`expectGET`, `whenPOST`, etc). - * - * When an Angular application needs some data from a server, it calls the $http service, which - * sends the request to a real server using $httpBackend service. With dependency injection, it is - * easy to inject $httpBackend mock (which has the same API as $httpBackend) and use it to verify - * the requests and respond with some testing data without sending a request to a real server. - * - * There are two ways to specify what test data should be returned as http responses by the mock - * backend when the code under test makes http requests: - * - * - `$httpBackend.expect` - specifies a request expectation - * - `$httpBackend.when` - specifies a backend definition - * - * - * # Request Expectations vs Backend Definitions - * - * Request expectations provide a way to make assertions about requests made by the application and - * to define responses for those requests. The test will fail if the expected requests are not made - * or they are made in the wrong order. - * - * Backend definitions allow you to define a fake backend for your application which doesn't assert - * if a particular request was made or not, it just returns a trained response if a request is made. - * The test will pass whether or not the request gets made during testing. - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
    Request expectationsBackend definitions
    Syntax.expect(...).respond(...).when(...).respond(...)
    Typical usagestrict unit testsloose (black-box) unit testing
    Fulfills multiple requestsNOYES
    Order of requests mattersYESNO
    Request requiredYESNO
    Response requiredoptional (see below)YES
    - * - * In cases where both backend definitions and request expectations are specified during unit - * testing, the request expectations are evaluated first. - * - * If a request expectation has no response specified, the algorithm will search your backend - * definitions for an appropriate response. - * - * If a request didn't match any expectation or if the expectation doesn't have the response - * defined, the backend definitions are evaluated in sequential order to see if any of them match - * the request. The response from the first matched definition is returned. - * - * - * # Flushing HTTP requests - * - * The $httpBackend used in production always responds to requests asynchronously. If we preserved - * this behavior in unit testing, we'd have to create async unit tests, which are hard to write, - * to follow and to maintain. But neither can the testing mock respond synchronously; that would - * change the execution of the code under test. For this reason, the mock $httpBackend has a - * `flush()` method, which allows the test to explicitly flush pending requests. This preserves - * the async api of the backend, while allowing the test to execute synchronously. - * - * - * # Unit testing with mock $httpBackend - * The following code shows how to setup and use the mock backend when unit testing a controller. - * First we create the controller under test: - * - ```js - // The module code - angular - .module('MyApp', []) - .controller('MyController', MyController); - - // The controller code - function MyController($scope, $http) { - var authToken; - - $http.get('/auth.py').success(function(data, status, headers) { - authToken = headers('A-Token'); - $scope.user = data; - }); - - $scope.saveMessage = function(message) { - var headers = { 'Authorization': authToken }; - $scope.status = 'Saving...'; - - $http.post('/add-msg.py', message, { headers: headers } ).success(function(response) { - $scope.status = ''; - }).error(function() { - $scope.status = 'ERROR!'; - }); - }; - } - ``` - * - * Now we setup the mock backend and create the test specs: - * - ```js - // testing controller - describe('MyController', function() { - var $httpBackend, $rootScope, createController, authRequestHandler; - - // Set up the module - beforeEach(module('MyApp')); - - beforeEach(inject(function($injector) { - // Set up the mock http service responses - $httpBackend = $injector.get('$httpBackend'); - // backend definition common for all tests - authRequestHandler = $httpBackend.when('GET', '/auth.py') - .respond({userId: 'userX'}, {'A-Token': 'xxx'}); - - // Get hold of a scope (i.e. the root scope) - $rootScope = $injector.get('$rootScope'); - // The $controller service is used to create instances of controllers - var $controller = $injector.get('$controller'); - - createController = function() { - return $controller('MyController', {'$scope' : $rootScope }); - }; - })); - - - afterEach(function() { - $httpBackend.verifyNoOutstandingExpectation(); - $httpBackend.verifyNoOutstandingRequest(); - }); - - - it('should fetch authentication token', function() { - $httpBackend.expectGET('/auth.py'); - var controller = createController(); - $httpBackend.flush(); - }); - - - it('should fail authentication', function() { - - // Notice how you can change the response even after it was set - authRequestHandler.respond(401, ''); - - $httpBackend.expectGET('/auth.py'); - var controller = createController(); - $httpBackend.flush(); - expect($rootScope.status).toBe('Failed...'); - }); - - - it('should send msg to server', function() { - var controller = createController(); - $httpBackend.flush(); - - // now you don’t care about the authentication, but - // the controller will still send the request and - // $httpBackend will respond without you having to - // specify the expectation and response for this request - - $httpBackend.expectPOST('/add-msg.py', 'message content').respond(201, ''); - $rootScope.saveMessage('message content'); - expect($rootScope.status).toBe('Saving...'); - $httpBackend.flush(); - expect($rootScope.status).toBe(''); - }); - - - it('should send auth header', function() { - var controller = createController(); - $httpBackend.flush(); - - $httpBackend.expectPOST('/add-msg.py', undefined, function(headers) { - // check if the header was sent, if it wasn't the expectation won't - // match the request and the test will fail - return headers['Authorization'] == 'xxx'; - }).respond(201, ''); - - $rootScope.saveMessage('whatever'); - $httpBackend.flush(); - }); - }); - ``` - */ -angular.mock.$HttpBackendProvider = function() { - this.$get = ['$rootScope', '$timeout', createHttpBackendMock]; -}; - -/** - * General factory function for $httpBackend mock. - * Returns instance for unit testing (when no arguments specified): - * - passing through is disabled - * - auto flushing is disabled - * - * Returns instance for e2e testing (when `$delegate` and `$browser` specified): - * - passing through (delegating request to real backend) is enabled - * - auto flushing is enabled - * - * @param {Object=} $delegate Real $httpBackend instance (allow passing through if specified) - * @param {Object=} $browser Auto-flushing enabled if specified - * @return {Object} Instance of $httpBackend mock - */ -function createHttpBackendMock($rootScope, $timeout, $delegate, $browser) { - var definitions = [], - expectations = [], - responses = [], - responsesPush = angular.bind(responses, responses.push), - copy = angular.copy; - - function createResponse(status, data, headers, statusText) { - if (angular.isFunction(status)) return status; - - return function() { - return angular.isNumber(status) - ? [status, data, headers, statusText] - : [200, status, data, headers]; - }; - } - - // TODO(vojta): change params to: method, url, data, headers, callback - function $httpBackend(method, url, data, callback, headers, timeout, withCredentials) { - var xhr = new MockXhr(), - expectation = expectations[0], - wasExpected = false; - - function prettyPrint(data) { - return (angular.isString(data) || angular.isFunction(data) || data instanceof RegExp) - ? data - : angular.toJson(data); - } - - function wrapResponse(wrapped) { - if (!$browser && timeout) { - timeout.then ? timeout.then(handleTimeout) : $timeout(handleTimeout, timeout); - } - - return handleResponse; - - function handleResponse() { - var response = wrapped.response(method, url, data, headers); - xhr.$$respHeaders = response[2]; - callback(copy(response[0]), copy(response[1]), xhr.getAllResponseHeaders(), - copy(response[3] || '')); - } - - function handleTimeout() { - for (var i = 0, ii = responses.length; i < ii; i++) { - if (responses[i] === handleResponse) { - responses.splice(i, 1); - callback(-1, undefined, ''); - break; - } - } - } - } - - if (expectation && expectation.match(method, url)) { - if (!expectation.matchData(data)) { - throw new Error('Expected ' + expectation + ' with different data\n' + - 'EXPECTED: ' + prettyPrint(expectation.data) + '\nGOT: ' + data); - } - - if (!expectation.matchHeaders(headers)) { - throw new Error('Expected ' + expectation + ' with different headers\n' + - 'EXPECTED: ' + prettyPrint(expectation.headers) + '\nGOT: ' + - prettyPrint(headers)); - } - - expectations.shift(); - - if (expectation.response) { - responses.push(wrapResponse(expectation)); - return; - } - wasExpected = true; - } - - var i = -1, definition; - while ((definition = definitions[++i])) { - if (definition.match(method, url, data, headers || {})) { - if (definition.response) { - // if $browser specified, we do auto flush all requests - ($browser ? $browser.defer : responsesPush)(wrapResponse(definition)); - } else if (definition.passThrough) { - $delegate(method, url, data, callback, headers, timeout, withCredentials); - } else throw new Error('No response defined !'); - return; - } - } - throw wasExpected ? - new Error('No response defined !') : - new Error('Unexpected request: ' + method + ' ' + url + '\n' + - (expectation ? 'Expected ' + expectation : 'No more request expected')); - } - - /** - * @ngdoc method - * @name $httpBackend#when - * @description - * Creates a new backend definition. - * - * @param {string} method HTTP method. - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(string|RegExp|function(string))=} data HTTP request body or function that receives - * data string and returns true if the data is as expected. - * @param {(Object|function(Object))=} headers HTTP headers or function that receives http header - * object and returns true if the headers match the current definition. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. - * - * - respond – - * `{function([status,] data[, headers, statusText]) - * | function(function(method, url, data, headers)}` - * – The respond method takes a set of static data to be returned or a function that can - * return an array containing response status (number), response data (string), response - * headers (Object), and the text for the status (string). The respond method returns the - * `requestHandler` object for possible overrides. - */ - $httpBackend.when = function(method, url, data, headers) { - var definition = new MockHttpExpectation(method, url, data, headers), - chain = { - respond: function(status, data, headers, statusText) { - definition.passThrough = undefined; - definition.response = createResponse(status, data, headers, statusText); - return chain; - } - }; - - if ($browser) { - chain.passThrough = function() { - definition.response = undefined; - definition.passThrough = true; - return chain; - }; - } - - definitions.push(definition); - return chain; - }; - - /** - * @ngdoc method - * @name $httpBackend#whenGET - * @description - * Creates a new backend definition for GET requests. For more info see `when()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(Object|function(Object))=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. - */ - - /** - * @ngdoc method - * @name $httpBackend#whenHEAD - * @description - * Creates a new backend definition for HEAD requests. For more info see `when()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(Object|function(Object))=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. - */ - - /** - * @ngdoc method - * @name $httpBackend#whenDELETE - * @description - * Creates a new backend definition for DELETE requests. For more info see `when()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(Object|function(Object))=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. - */ - - /** - * @ngdoc method - * @name $httpBackend#whenPOST - * @description - * Creates a new backend definition for POST requests. For more info see `when()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(string|RegExp|function(string))=} data HTTP request body or function that receives - * data string and returns true if the data is as expected. - * @param {(Object|function(Object))=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. - */ - - /** - * @ngdoc method - * @name $httpBackend#whenPUT - * @description - * Creates a new backend definition for PUT requests. For more info see `when()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(string|RegExp|function(string))=} data HTTP request body or function that receives - * data string and returns true if the data is as expected. - * @param {(Object|function(Object))=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. - */ - - /** - * @ngdoc method - * @name $httpBackend#whenJSONP - * @description - * Creates a new backend definition for JSONP requests. For more info see `when()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. - */ - createShortMethods('when'); - - - /** - * @ngdoc method - * @name $httpBackend#expect - * @description - * Creates a new request expectation. - * - * @param {string} method HTTP method. - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(string|RegExp|function(string)|Object)=} data HTTP request body or function that - * receives data string and returns true if the data is as expected, or Object if request body - * is in JSON format. - * @param {(Object|function(Object))=} headers HTTP headers or function that receives http header - * object and returns true if the headers match the current expectation. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. - * - * - respond – - * `{function([status,] data[, headers, statusText]) - * | function(function(method, url, data, headers)}` - * – The respond method takes a set of static data to be returned or a function that can - * return an array containing response status (number), response data (string), response - * headers (Object), and the text for the status (string). The respond method returns the - * `requestHandler` object for possible overrides. - */ - $httpBackend.expect = function(method, url, data, headers) { - var expectation = new MockHttpExpectation(method, url, data, headers), - chain = { - respond: function(status, data, headers, statusText) { - expectation.response = createResponse(status, data, headers, statusText); - return chain; - } - }; - - expectations.push(expectation); - return chain; - }; - - - /** - * @ngdoc method - * @name $httpBackend#expectGET - * @description - * Creates a new request expectation for GET requests. For more info see `expect()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {Object=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. See #expect for more info. - */ - - /** - * @ngdoc method - * @name $httpBackend#expectHEAD - * @description - * Creates a new request expectation for HEAD requests. For more info see `expect()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {Object=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. - */ - - /** - * @ngdoc method - * @name $httpBackend#expectDELETE - * @description - * Creates a new request expectation for DELETE requests. For more info see `expect()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {Object=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. - */ - - /** - * @ngdoc method - * @name $httpBackend#expectPOST - * @description - * Creates a new request expectation for POST requests. For more info see `expect()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(string|RegExp|function(string)|Object)=} data HTTP request body or function that - * receives data string and returns true if the data is as expected, or Object if request body - * is in JSON format. - * @param {Object=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. - */ - - /** - * @ngdoc method - * @name $httpBackend#expectPUT - * @description - * Creates a new request expectation for PUT requests. For more info see `expect()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(string|RegExp|function(string)|Object)=} data HTTP request body or function that - * receives data string and returns true if the data is as expected, or Object if request body - * is in JSON format. - * @param {Object=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. - */ - - /** - * @ngdoc method - * @name $httpBackend#expectPATCH - * @description - * Creates a new request expectation for PATCH requests. For more info see `expect()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(string|RegExp|function(string)|Object)=} data HTTP request body or function that - * receives data string and returns true if the data is as expected, or Object if request body - * is in JSON format. - * @param {Object=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. - */ - - /** - * @ngdoc method - * @name $httpBackend#expectJSONP - * @description - * Creates a new request expectation for JSONP requests. For more info see `expect()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives an url - * and returns true if the url matches the current definition. - * @returns {requestHandler} Returns an object with `respond` method that controls how a matched - * request is handled. You can save this object for later use and invoke `respond` again in - * order to change how a matched request is handled. - */ - createShortMethods('expect'); - - - /** - * @ngdoc method - * @name $httpBackend#flush - * @description - * Flushes all pending requests using the trained responses. - * - * @param {number=} count Number of responses to flush (in the order they arrived). If undefined, - * all pending requests will be flushed. If there are no pending requests when the flush method - * is called an exception is thrown (as this typically a sign of programming error). - */ - $httpBackend.flush = function(count, digest) { - if (digest !== false) $rootScope.$digest(); - if (!responses.length) throw new Error('No pending request to flush !'); - - if (angular.isDefined(count) && count !== null) { - while (count--) { - if (!responses.length) throw new Error('No more pending request to flush !'); - responses.shift()(); - } - } else { - while (responses.length) { - responses.shift()(); - } - } - $httpBackend.verifyNoOutstandingExpectation(digest); - }; - - - /** - * @ngdoc method - * @name $httpBackend#verifyNoOutstandingExpectation - * @description - * Verifies that all of the requests defined via the `expect` api were made. If any of the - * requests were not made, verifyNoOutstandingExpectation throws an exception. - * - * Typically, you would call this method following each test case that asserts requests using an - * "afterEach" clause. - * - * ```js - * afterEach($httpBackend.verifyNoOutstandingExpectation); - * ``` - */ - $httpBackend.verifyNoOutstandingExpectation = function(digest) { - if (digest !== false) $rootScope.$digest(); - if (expectations.length) { - throw new Error('Unsatisfied requests: ' + expectations.join(', ')); - } - }; - - - /** - * @ngdoc method - * @name $httpBackend#verifyNoOutstandingRequest - * @description - * Verifies that there are no outstanding requests that need to be flushed. - * - * Typically, you would call this method following each test case that asserts requests using an - * "afterEach" clause. - * - * ```js - * afterEach($httpBackend.verifyNoOutstandingRequest); - * ``` - */ - $httpBackend.verifyNoOutstandingRequest = function() { - if (responses.length) { - throw new Error('Unflushed requests: ' + responses.length); - } - }; - - - /** - * @ngdoc method - * @name $httpBackend#resetExpectations - * @description - * Resets all request expectations, but preserves all backend definitions. Typically, you would - * call resetExpectations during a multiple-phase test when you want to reuse the same instance of - * $httpBackend mock. - */ - $httpBackend.resetExpectations = function() { - expectations.length = 0; - responses.length = 0; - }; - - return $httpBackend; - - - function createShortMethods(prefix) { - angular.forEach(['GET', 'DELETE', 'JSONP', 'HEAD'], function(method) { - $httpBackend[prefix + method] = function(url, headers) { - return $httpBackend[prefix](method, url, undefined, headers); - }; - }); - - angular.forEach(['PUT', 'POST', 'PATCH'], function(method) { - $httpBackend[prefix + method] = function(url, data, headers) { - return $httpBackend[prefix](method, url, data, headers); - }; - }); - } -} - -function MockHttpExpectation(method, url, data, headers) { - - this.data = data; - this.headers = headers; - - this.match = function(m, u, d, h) { - if (method != m) return false; - if (!this.matchUrl(u)) return false; - if (angular.isDefined(d) && !this.matchData(d)) return false; - if (angular.isDefined(h) && !this.matchHeaders(h)) return false; - return true; - }; - - this.matchUrl = function(u) { - if (!url) return true; - if (angular.isFunction(url.test)) return url.test(u); - if (angular.isFunction(url)) return url(u); - return url == u; - }; - - this.matchHeaders = function(h) { - if (angular.isUndefined(headers)) return true; - if (angular.isFunction(headers)) return headers(h); - return angular.equals(headers, h); - }; - - this.matchData = function(d) { - if (angular.isUndefined(data)) return true; - if (data && angular.isFunction(data.test)) return data.test(d); - if (data && angular.isFunction(data)) return data(d); - if (data && !angular.isString(data)) { - return angular.equals(angular.fromJson(angular.toJson(data)), angular.fromJson(d)); - } - return data == d; - }; - - this.toString = function() { - return method + ' ' + url; - }; -} - -function createMockXhr() { - return new MockXhr(); -} - -function MockXhr() { - - // hack for testing $http, $httpBackend - MockXhr.$$lastInstance = this; - - this.open = function(method, url, async) { - this.$$method = method; - this.$$url = url; - this.$$async = async; - this.$$reqHeaders = {}; - this.$$respHeaders = {}; - }; - - this.send = function(data) { - this.$$data = data; - }; - - this.setRequestHeader = function(key, value) { - this.$$reqHeaders[key] = value; - }; - - this.getResponseHeader = function(name) { - // the lookup must be case insensitive, - // that's why we try two quick lookups first and full scan last - var header = this.$$respHeaders[name]; - if (header) return header; - - name = angular.lowercase(name); - header = this.$$respHeaders[name]; - if (header) return header; - - header = undefined; - angular.forEach(this.$$respHeaders, function(headerVal, headerName) { - if (!header && angular.lowercase(headerName) == name) header = headerVal; - }); - return header; - }; - - this.getAllResponseHeaders = function() { - var lines = []; - - angular.forEach(this.$$respHeaders, function(value, key) { - lines.push(key + ': ' + value); - }); - return lines.join('\n'); - }; - - this.abort = angular.noop; -} - - -/** - * @ngdoc service - * @name $timeout - * @description - * - * This service is just a simple decorator for {@link ng.$timeout $timeout} service - * that adds a "flush" and "verifyNoPendingTasks" methods. - */ - -angular.mock.$TimeoutDecorator = ['$delegate', '$browser', function($delegate, $browser) { - - /** - * @ngdoc method - * @name $timeout#flush - * @description - * - * Flushes the queue of pending tasks. - * - * @param {number=} delay maximum timeout amount to flush up until - */ - $delegate.flush = function(delay) { - $browser.defer.flush(delay); - }; - - /** - * @ngdoc method - * @name $timeout#verifyNoPendingTasks - * @description - * - * Verifies that there are no pending tasks that need to be flushed. - */ - $delegate.verifyNoPendingTasks = function() { - if ($browser.deferredFns.length) { - throw new Error('Deferred tasks to flush (' + $browser.deferredFns.length + '): ' + - formatPendingTasksAsString($browser.deferredFns)); - } - }; - - function formatPendingTasksAsString(tasks) { - var result = []; - angular.forEach(tasks, function(task) { - result.push('{id: ' + task.id + ', ' + 'time: ' + task.time + '}'); - }); - - return result.join(', '); - } - - return $delegate; -}]; - -angular.mock.$RAFDecorator = ['$delegate', function($delegate) { - var queue = []; - var rafFn = function(fn) { - var index = queue.length; - queue.push(fn); - return function() { - queue.splice(index, 1); - }; - }; - - rafFn.supported = $delegate.supported; - - rafFn.flush = function() { - if (queue.length === 0) { - throw new Error('No rAF callbacks present'); - } - - var length = queue.length; - for (var i = 0; i < length; i++) { - queue[i](); - } - - queue = queue.slice(i); - }; - - return rafFn; -}]; - -/** - * - */ -angular.mock.$RootElementProvider = function() { - this.$get = function() { - return angular.element('
    '); - }; -}; - -/** - * @ngdoc service - * @name $controller - * @description - * A decorator for {@link ng.$controller} with additional `bindings` parameter, useful when testing - * controllers of directives that use {@link $compile#-bindtocontroller- `bindToController`}. - * - * - * ## Example - * - * ```js - * - * // Directive definition ... - * - * myMod.directive('myDirective', { - * controller: 'MyDirectiveController', - * bindToController: { - * name: '@' - * } - * }); - * - * - * // Controller definition ... - * - * myMod.controller('MyDirectiveController', ['log', function($log) { - * $log.info(this.name); - * })]; - * - * - * // In a test ... - * - * describe('myDirectiveController', function() { - * it('should write the bound name to the log', inject(function($controller, $log) { - * var ctrl = $controller('MyDirective', { /* no locals */ }, { name: 'Clark Kent' }); - * expect(ctrl.name).toEqual('Clark Kent'); - * expect($log.info.logs).toEqual(['Clark Kent']); - * }); - * }); - * - * ``` - * - * @param {Function|string} constructor If called with a function then it's considered to be the - * controller constructor function. Otherwise it's considered to be a string which is used - * to retrieve the controller constructor using the following steps: - * - * * check if a controller with given name is registered via `$controllerProvider` - * * check if evaluating the string on the current scope returns a constructor - * * if $controllerProvider#allowGlobals, check `window[constructor]` on the global - * `window` object (not recommended) - * - * The string can use the `controller as property` syntax, where the controller instance is published - * as the specified property on the `scope`; the `scope` must be injected into `locals` param for this - * to work correctly. - * - * @param {Object} locals Injection locals for Controller. - * @param {Object=} bindings Properties to add to the controller before invoking the constructor. This is used - * to simulate the `bindToController` feature and simplify certain kinds of tests. - * @return {Object} Instance of given controller. - */ -angular.mock.$ControllerDecorator = ['$delegate', function($delegate) { - return function(expression, locals, later, ident) { - if (later && typeof later === 'object') { - var create = $delegate(expression, locals, true, ident); - angular.extend(create.instance, later); - return create(); - } - return $delegate(expression, locals, later, ident); - }; -}]; - - -/** - * @ngdoc module - * @name ngMock - * @packageName angular-mocks - * @description - * - * # ngMock - * - * The `ngMock` module provides support to inject and mock Angular services into unit tests. - * In addition, ngMock also extends various core ng services such that they can be - * inspected and controlled in a synchronous manner within test code. - * - * - *
    - * - */ -angular.module('ngMock', ['ng']).provider({ - $browser: angular.mock.$BrowserProvider, - $exceptionHandler: angular.mock.$ExceptionHandlerProvider, - $log: angular.mock.$LogProvider, - $interval: angular.mock.$IntervalProvider, - $httpBackend: angular.mock.$HttpBackendProvider, - $rootElement: angular.mock.$RootElementProvider -}).config(['$provide', function($provide) { - $provide.decorator('$timeout', angular.mock.$TimeoutDecorator); - $provide.decorator('$$rAF', angular.mock.$RAFDecorator); - $provide.decorator('$rootScope', angular.mock.$RootScopeDecorator); - $provide.decorator('$controller', angular.mock.$ControllerDecorator); -}]); - -/** - * @ngdoc module - * @name ngMockE2E - * @module ngMockE2E - * @packageName angular-mocks - * @description - * - * The `ngMockE2E` is an angular module which contains mocks suitable for end-to-end testing. - * Currently there is only one mock present in this module - - * the {@link ngMockE2E.$httpBackend e2e $httpBackend} mock. - */ -angular.module('ngMockE2E', ['ng']).config(['$provide', function($provide) { - $provide.decorator('$httpBackend', angular.mock.e2e.$httpBackendDecorator); -}]); - -/** - * @ngdoc service - * @name $httpBackend - * @module ngMockE2E - * @description - * Fake HTTP backend implementation suitable for end-to-end testing or backend-less development of - * applications that use the {@link ng.$http $http service}. - * - * *Note*: For fake http backend implementation suitable for unit testing please see - * {@link ngMock.$httpBackend unit-testing $httpBackend mock}. - * - * This implementation can be used to respond with static or dynamic responses via the `when` api - * and its shortcuts (`whenGET`, `whenPOST`, etc) and optionally pass through requests to the - * real $httpBackend for specific requests (e.g. to interact with certain remote apis or to fetch - * templates from a webserver). - * - * As opposed to unit-testing, in an end-to-end testing scenario or in scenario when an application - * is being developed with the real backend api replaced with a mock, it is often desirable for - * certain category of requests to bypass the mock and issue a real http request (e.g. to fetch - * templates or static files from the webserver). To configure the backend with this behavior - * use the `passThrough` request handler of `when` instead of `respond`. - * - * Additionally, we don't want to manually have to flush mocked out requests like we do during unit - * testing. For this reason the e2e $httpBackend flushes mocked out requests - * automatically, closely simulating the behavior of the XMLHttpRequest object. - * - * To setup the application to run with this http backend, you have to create a module that depends - * on the `ngMockE2E` and your application modules and defines the fake backend: - * - * ```js - * myAppDev = angular.module('myAppDev', ['myApp', 'ngMockE2E']); - * myAppDev.run(function($httpBackend) { - * phones = [{name: 'phone1'}, {name: 'phone2'}]; - * - * // returns the current list of phones - * $httpBackend.whenGET('/phones').respond(phones); - * - * // adds a new phone to the phones array - * $httpBackend.whenPOST('/phones').respond(function(method, url, data) { - * var phone = angular.fromJson(data); - * phones.push(phone); - * return [200, phone, {}]; - * }); - * $httpBackend.whenGET(/^\/templates\//).passThrough(); - * //... - * }); - * ``` - * - * Afterwards, bootstrap your app with this new module. - */ - -/** - * @ngdoc method - * @name $httpBackend#when - * @module ngMockE2E - * @description - * Creates a new backend definition. - * - * @param {string} method HTTP method. - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(string|RegExp)=} data HTTP request body. - * @param {(Object|function(Object))=} headers HTTP headers or function that receives http header - * object and returns true if the headers match the current definition. - * @returns {requestHandler} Returns an object with `respond` and `passThrough` methods that - * control how a matched request is handled. You can save this object for later use and invoke - * `respond` or `passThrough` again in order to change how a matched request is handled. - * - * - respond – - * `{function([status,] data[, headers, statusText]) - * | function(function(method, url, data, headers)}` - * – The respond method takes a set of static data to be returned or a function that can return - * an array containing response status (number), response data (string), response headers - * (Object), and the text for the status (string). - * - passThrough – `{function()}` – Any request matching a backend definition with - * `passThrough` handler will be passed through to the real backend (an XHR request will be made - * to the server.) - * - Both methods return the `requestHandler` object for possible overrides. - */ - -/** - * @ngdoc method - * @name $httpBackend#whenGET - * @module ngMockE2E - * @description - * Creates a new backend definition for GET requests. For more info see `when()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(Object|function(Object))=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` and `passThrough` methods that - * control how a matched request is handled. You can save this object for later use and invoke - * `respond` or `passThrough` again in order to change how a matched request is handled. - */ - -/** - * @ngdoc method - * @name $httpBackend#whenHEAD - * @module ngMockE2E - * @description - * Creates a new backend definition for HEAD requests. For more info see `when()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(Object|function(Object))=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` and `passThrough` methods that - * control how a matched request is handled. You can save this object for later use and invoke - * `respond` or `passThrough` again in order to change how a matched request is handled. - */ - -/** - * @ngdoc method - * @name $httpBackend#whenDELETE - * @module ngMockE2E - * @description - * Creates a new backend definition for DELETE requests. For more info see `when()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(Object|function(Object))=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` and `passThrough` methods that - * control how a matched request is handled. You can save this object for later use and invoke - * `respond` or `passThrough` again in order to change how a matched request is handled. - */ - -/** - * @ngdoc method - * @name $httpBackend#whenPOST - * @module ngMockE2E - * @description - * Creates a new backend definition for POST requests. For more info see `when()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(string|RegExp)=} data HTTP request body. - * @param {(Object|function(Object))=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` and `passThrough` methods that - * control how a matched request is handled. You can save this object for later use and invoke - * `respond` or `passThrough` again in order to change how a matched request is handled. - */ - -/** - * @ngdoc method - * @name $httpBackend#whenPUT - * @module ngMockE2E - * @description - * Creates a new backend definition for PUT requests. For more info see `when()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(string|RegExp)=} data HTTP request body. - * @param {(Object|function(Object))=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` and `passThrough` methods that - * control how a matched request is handled. You can save this object for later use and invoke - * `respond` or `passThrough` again in order to change how a matched request is handled. - */ - -/** - * @ngdoc method - * @name $httpBackend#whenPATCH - * @module ngMockE2E - * @description - * Creates a new backend definition for PATCH requests. For more info see `when()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @param {(string|RegExp)=} data HTTP request body. - * @param {(Object|function(Object))=} headers HTTP headers. - * @returns {requestHandler} Returns an object with `respond` and `passThrough` methods that - * control how a matched request is handled. You can save this object for later use and invoke - * `respond` or `passThrough` again in order to change how a matched request is handled. - */ - -/** - * @ngdoc method - * @name $httpBackend#whenJSONP - * @module ngMockE2E - * @description - * Creates a new backend definition for JSONP requests. For more info see `when()`. - * - * @param {string|RegExp|function(string)} url HTTP url or function that receives a url - * and returns true if the url matches the current definition. - * @returns {requestHandler} Returns an object with `respond` and `passThrough` methods that - * control how a matched request is handled. You can save this object for later use and invoke - * `respond` or `passThrough` again in order to change how a matched request is handled. - */ -angular.mock.e2e = {}; -angular.mock.e2e.$httpBackendDecorator = - ['$rootScope', '$timeout', '$delegate', '$browser', createHttpBackendMock]; - - -/** - * @ngdoc type - * @name $rootScope.Scope - * @module ngMock - * @description - * {@link ng.$rootScope.Scope Scope} type decorated with helper methods useful for testing. These - * methods are automatically available on any {@link ng.$rootScope.Scope Scope} instance when - * `ngMock` module is loaded. - * - * In addition to all the regular `Scope` methods, the following helper methods are available: - */ -angular.mock.$RootScopeDecorator = ['$delegate', function($delegate) { - - var $rootScopePrototype = Object.getPrototypeOf($delegate); - - $rootScopePrototype.$countChildScopes = countChildScopes; - $rootScopePrototype.$countWatchers = countWatchers; - - return $delegate; - - // ------------------------------------------------------------------------------------------ // - - /** - * @ngdoc method - * @name $rootScope.Scope#$countChildScopes - * @module ngMock - * @description - * Counts all the direct and indirect child scopes of the current scope. - * - * The current scope is excluded from the count. The count includes all isolate child scopes. - * - * @returns {number} Total number of child scopes. - */ - function countChildScopes() { - // jshint validthis: true - var count = 0; // exclude the current scope - var pendingChildHeads = [this.$$childHead]; - var currentScope; - - while (pendingChildHeads.length) { - currentScope = pendingChildHeads.shift(); - - while (currentScope) { - count += 1; - pendingChildHeads.push(currentScope.$$childHead); - currentScope = currentScope.$$nextSibling; - } - } - - return count; - } - - - /** - * @ngdoc method - * @name $rootScope.Scope#$countWatchers - * @module ngMock - * @description - * Counts all the watchers of direct and indirect child scopes of the current scope. - * - * The watchers of the current scope are included in the count and so are all the watchers of - * isolate child scopes. - * - * @returns {number} Total number of watchers. - */ - function countWatchers() { - // jshint validthis: true - var count = this.$$watchers ? this.$$watchers.length : 0; // include the current scope - var pendingChildHeads = [this.$$childHead]; - var currentScope; - - while (pendingChildHeads.length) { - currentScope = pendingChildHeads.shift(); - - while (currentScope) { - count += currentScope.$$watchers ? currentScope.$$watchers.length : 0; - pendingChildHeads.push(currentScope.$$childHead); - currentScope = currentScope.$$nextSibling; - } - } - - return count; - } -}]; - - -if (window.jasmine || window.mocha) { - - var currentSpec = null, - annotatedFunctions = [], - isSpecRunning = function() { - return !!currentSpec; - }; - - angular.mock.$$annotate = angular.injector.$$annotate; - angular.injector.$$annotate = function(fn) { - if (typeof fn === 'function' && !fn.$inject) { - annotatedFunctions.push(fn); - } - return angular.mock.$$annotate.apply(this, arguments); - }; - - - (window.beforeEach || window.setup)(function() { - annotatedFunctions = []; - currentSpec = this; - }); - - (window.afterEach || window.teardown)(function() { - var injector = currentSpec.$injector; - - annotatedFunctions.forEach(function(fn) { - delete fn.$inject; - }); - - angular.forEach(currentSpec.$modules, function(module) { - if (module && module.$$hashKey) { - module.$$hashKey = undefined; - } - }); - - currentSpec.$injector = null; - currentSpec.$modules = null; - currentSpec = null; - - if (injector) { - injector.get('$rootElement').off(); - } - - // clean up jquery's fragment cache - angular.forEach(angular.element.fragments, function(val, key) { - delete angular.element.fragments[key]; - }); - - MockXhr.$$lastInstance = null; - - angular.forEach(angular.callbacks, function(val, key) { - delete angular.callbacks[key]; - }); - angular.callbacks.counter = 0; - }); - - /** - * @ngdoc function - * @name angular.mock.module - * @description - * - * *NOTE*: This function is also published on window for easy access.
    - * *NOTE*: This function is declared ONLY WHEN running tests with jasmine or mocha - * - * This function registers a module configuration code. It collects the configuration information - * which will be used when the injector is created by {@link angular.mock.inject inject}. - * - * See {@link angular.mock.inject inject} for usage example - * - * @param {...(string|Function|Object)} fns any number of modules which are represented as string - * aliases or as anonymous module initialization functions. The modules are used to - * configure the injector. The 'ng' and 'ngMock' modules are automatically loaded. If an - * object literal is passed they will be registered as values in the module, the key being - * the module name and the value being what is returned. - */ - window.module = angular.mock.module = function() { - var moduleFns = Array.prototype.slice.call(arguments, 0); - return isSpecRunning() ? workFn() : workFn; - ///////////////////// - function workFn() { - if (currentSpec.$injector) { - throw new Error('Injector already created, can not register a module!'); - } else { - var modules = currentSpec.$modules || (currentSpec.$modules = []); - angular.forEach(moduleFns, function(module) { - if (angular.isObject(module) && !angular.isArray(module)) { - modules.push(function($provide) { - angular.forEach(module, function(value, key) { - $provide.value(key, value); - }); - }); - } else { - modules.push(module); - } - }); - } - } - }; - - /** - * @ngdoc function - * @name angular.mock.inject - * @description - * - * *NOTE*: This function is also published on window for easy access.
    - * *NOTE*: This function is declared ONLY WHEN running tests with jasmine or mocha - * - * The inject function wraps a function into an injectable function. The inject() creates new - * instance of {@link auto.$injector $injector} per test, which is then used for - * resolving references. - * - * - * ## Resolving References (Underscore Wrapping) - * Often, we would like to inject a reference once, in a `beforeEach()` block and reuse this - * in multiple `it()` clauses. To be able to do this we must assign the reference to a variable - * that is declared in the scope of the `describe()` block. Since we would, most likely, want - * the variable to have the same name of the reference we have a problem, since the parameter - * to the `inject()` function would hide the outer variable. - * - * To help with this, the injected parameters can, optionally, be enclosed with underscores. - * These are ignored by the injector when the reference name is resolved. - * - * For example, the parameter `_myService_` would be resolved as the reference `myService`. - * Since it is available in the function body as _myService_, we can then assign it to a variable - * defined in an outer scope. - * - * ``` - * // Defined out reference variable outside - * var myService; - * - * // Wrap the parameter in underscores - * beforeEach( inject( function(_myService_){ - * myService = _myService_; - * })); - * - * // Use myService in a series of tests. - * it('makes use of myService', function() { - * myService.doStuff(); - * }); - * - * ``` - * - * See also {@link angular.mock.module angular.mock.module} - * - * ## Example - * Example of what a typical jasmine tests looks like with the inject method. - * ```js - * - * angular.module('myApplicationModule', []) - * .value('mode', 'app') - * .value('version', 'v1.0.1'); - * - * - * describe('MyApp', function() { - * - * // You need to load modules that you want to test, - * // it loads only the "ng" module by default. - * beforeEach(module('myApplicationModule')); - * - * - * // inject() is used to inject arguments of all given functions - * it('should provide a version', inject(function(mode, version) { - * expect(version).toEqual('v1.0.1'); - * expect(mode).toEqual('app'); - * })); - * - * - * // The inject and module method can also be used inside of the it or beforeEach - * it('should override a version and test the new version is injected', function() { - * // module() takes functions or strings (module aliases) - * module(function($provide) { - * $provide.value('version', 'overridden'); // override version here - * }); - * - * inject(function(version) { - * expect(version).toEqual('overridden'); - * }); - * }); - * }); - * - * ``` - * - * @param {...Function} fns any number of functions which will be injected using the injector. - */ - - - - var ErrorAddingDeclarationLocationStack = function(e, errorForStack) { - this.message = e.message; - this.name = e.name; - if (e.line) this.line = e.line; - if (e.sourceId) this.sourceId = e.sourceId; - if (e.stack && errorForStack) - this.stack = e.stack + '\n' + errorForStack.stack; - if (e.stackArray) this.stackArray = e.stackArray; - }; - ErrorAddingDeclarationLocationStack.prototype.toString = Error.prototype.toString; - - window.inject = angular.mock.inject = function() { - var blockFns = Array.prototype.slice.call(arguments, 0); - var errorForStack = new Error('Declaration Location'); - return isSpecRunning() ? workFn.call(currentSpec) : workFn; - ///////////////////// - function workFn() { - var modules = currentSpec.$modules || []; - var strictDi = !!currentSpec.$injectorStrict; - modules.unshift('ngMock'); - modules.unshift('ng'); - var injector = currentSpec.$injector; - if (!injector) { - if (strictDi) { - // If strictDi is enabled, annotate the providerInjector blocks - angular.forEach(modules, function(moduleFn) { - if (typeof moduleFn === "function") { - angular.injector.$$annotate(moduleFn); - } - }); - } - injector = currentSpec.$injector = angular.injector(modules, strictDi); - currentSpec.$injectorStrict = strictDi; - } - for (var i = 0, ii = blockFns.length; i < ii; i++) { - if (currentSpec.$injectorStrict) { - // If the injector is strict / strictDi, and the spec wants to inject using automatic - // annotation, then annotate the function here. - injector.annotate(blockFns[i]); - } - try { - /* jshint -W040 *//* Jasmine explicitly provides a `this` object when calling functions */ - injector.invoke(blockFns[i] || angular.noop, this); - /* jshint +W040 */ - } catch (e) { - if (e.stack && errorForStack) { - throw new ErrorAddingDeclarationLocationStack(e, errorForStack); - } - throw e; - } finally { - errorForStack = null; - } - } - } - }; - - - angular.mock.inject.strictDi = function(value) { - value = arguments.length ? !!value : true; - return isSpecRunning() ? workFn() : workFn; - - function workFn() { - if (value !== currentSpec.$injectorStrict) { - if (currentSpec.$injector) { - throw new Error('Injector already created, can not modify strict annotations'); - } else { - currentSpec.$injectorStrict = value; - } - } - } - }; -} - - -})(window, window.angular); diff --git a/awx/ui/client/lib/angular-mocks/bower.json b/awx/ui/client/lib/angular-mocks/bower.json deleted file mode 100644 index 43a3eed447..0000000000 --- a/awx/ui/client/lib/angular-mocks/bower.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "name": "angular-mocks", - "version": "1.4.3", - "main": "./angular-mocks.js", - "ignore": [], - "dependencies": { - "angular": "1.4.3" - } -} diff --git a/awx/ui/client/lib/angular-mocks/ngAnimateMock.js b/awx/ui/client/lib/angular-mocks/ngAnimateMock.js deleted file mode 100644 index 6f99e62ef6..0000000000 --- a/awx/ui/client/lib/angular-mocks/ngAnimateMock.js +++ /dev/null @@ -1,2 +0,0 @@ -require('./angular-mocks'); -module.exports = 'ngAnimateMock'; diff --git a/awx/ui/client/lib/angular-mocks/ngMock.js b/awx/ui/client/lib/angular-mocks/ngMock.js deleted file mode 100644 index 7944de7d5b..0000000000 --- a/awx/ui/client/lib/angular-mocks/ngMock.js +++ /dev/null @@ -1,2 +0,0 @@ -require('./angular-mocks'); -module.exports = 'ngMock'; diff --git a/awx/ui/client/lib/angular-mocks/ngMockE2E.js b/awx/ui/client/lib/angular-mocks/ngMockE2E.js deleted file mode 100644 index fc2e539dbd..0000000000 --- a/awx/ui/client/lib/angular-mocks/ngMockE2E.js +++ /dev/null @@ -1,2 +0,0 @@ -require('./angular-mocks'); -module.exports = 'ngMockE2E'; diff --git a/awx/ui/client/lib/angular-mocks/package.json b/awx/ui/client/lib/angular-mocks/package.json deleted file mode 100644 index f1c285856d..0000000000 --- a/awx/ui/client/lib/angular-mocks/package.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "name": "angular-mocks", - "version": "1.4.3", - "description": "AngularJS mocks for testing", - "main": "angular-mocks.js", - "scripts": { - "test": "echo \"Error: no test specified\" && exit 1" - }, - "repository": { - "type": "git", - "url": "https://github.com/angular/angular.js.git" - }, - "keywords": [ - "angular", - "framework", - "browser", - "mocks", - "testing", - "client-side" - ], - "author": "Angular Core Team ", - "license": "MIT", - "bugs": { - "url": "https://github.com/angular/angular.js/issues" - }, - "homepage": "http://angularjs.org" -} diff --git a/awx/ui/client/lib/angular-moment/.bower.json b/awx/ui/client/lib/angular-moment/.bower.json deleted file mode 100644 index aa74eee08f..0000000000 --- a/awx/ui/client/lib/angular-moment/.bower.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "name": "angular-moment", - "version": "0.10.1", - "description": "Moment.JS directives & filters for AngularJS (timeago alternative)", - "author": "Uri Shaked", - "license": "MIT", - "homepage": "http://github.com/urish/angular-moment", - "main": "./angular-moment.js", - "ignore": [], - "dependencies": { - "angular": ">=1.2.0 <1.5.0", - "moment": ">=2.8.0 <2.11.0" - }, - "devDependencies": { - "angular-mocks": "1.3.x", - "moment-timezone": "0.3.1" - }, - "repository": { - "type": "git", - "url": "git://github.com/urish/angular-moment.git" - }, - "_release": "0.10.1", - "_resolution": { - "type": "version", - "tag": "0.10.1", - "commit": "8910240ee1872478a1b318d2d800c1c073526c37" - }, - "_source": "git://github.com/urish/angular-moment.git", - "_target": "~0.10.1", - "_originalSource": "angular-moment", - "_direct": true -} \ No newline at end of file diff --git a/awx/ui/client/lib/angular-moment/.editorconfig b/awx/ui/client/lib/angular-moment/.editorconfig deleted file mode 100644 index 297368244a..0000000000 --- a/awx/ui/client/lib/angular-moment/.editorconfig +++ /dev/null @@ -1,24 +0,0 @@ -# EditorConfig helps developers define and maintain consistent -# coding styles between different editors and IDEs -# editorconfig.org - -root = true - -[*] - -# Change these settings to your own preference -indent_style = tab -indent_size = 4 - -# We recommend you to keep these unchanged -end_of_line = lf -charset = utf-8 -trim_trailing_whitespace = true -insert_final_newline = true - -[{package.json,bower.json}] -indent_style=space -indent_size=2 - -[*.md] -trim_trailing_whitespace = false diff --git a/awx/ui/client/lib/angular-moment/.gitignore b/awx/ui/client/lib/angular-moment/.gitignore deleted file mode 100644 index fccb56b324..0000000000 --- a/awx/ui/client/lib/angular-moment/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/.idea -/bower_components -/node_modules -/coverage \ No newline at end of file diff --git a/awx/ui/client/lib/angular-moment/.jshintrc b/awx/ui/client/lib/angular-moment/.jshintrc deleted file mode 100644 index d430ef2a6b..0000000000 --- a/awx/ui/client/lib/angular-moment/.jshintrc +++ /dev/null @@ -1,26 +0,0 @@ -{ - "node": true, - "browser": true, - "esnext": true, - "bitwise": true, - "camelcase": true, - "curly": true, - "eqeqeq": true, - "immed": true, - "indent": 2, - "latedef": true, - "newcap": true, - "noarg": true, - "quotmark": "single", - "regexp": true, - "undef": true, - "unused": true, - "strict": true, - "trailing": true, - "smarttabs": true, - "maxdepth": 2, - "maxcomplexity": 10, - "globals": { - "angular": false - } -} diff --git a/awx/ui/client/lib/angular-moment/.npmignore b/awx/ui/client/lib/angular-moment/.npmignore deleted file mode 100644 index 2a9c3935f7..0000000000 --- a/awx/ui/client/lib/angular-moment/.npmignore +++ /dev/null @@ -1,4 +0,0 @@ -.idea -bower_components -node_modules -coverage diff --git a/awx/ui/client/lib/angular-moment/.travis.yml b/awx/ui/client/lib/angular-moment/.travis.yml deleted file mode 100644 index c85bdee3c0..0000000000 --- a/awx/ui/client/lib/angular-moment/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: node_js -node_js: - - "0.10" -before_script: - - npm run bower -after_success: - - cat ./coverage/*/lcov.info | ./node_modules/coveralls/bin/coveralls.js diff --git a/awx/ui/client/lib/angular-moment/CHANGELOG.md b/awx/ui/client/lib/angular-moment/CHANGELOG.md deleted file mode 100644 index 669057ba3b..0000000000 --- a/awx/ui/client/lib/angular-moment/CHANGELOG.md +++ /dev/null @@ -1,131 +0,0 @@ -# Changelog - -## 0.10.1 - 2015-05-01 -- Fix broken SystemJS/JSPM support (see [#104](https://github.com/urish/angular-moment/issues/104)) - -## 0.10.0 - 2015-04-10 -- Breaking change: removed one-time binding for `am-time-ago` in favor of AngularJS 1.3's one time binding ([#122](https://github.com/urish/angular-moment/issues/122)) -- Remove support for AngularJS 1.0.x and 1.1.x. -- Support moment.js v2.10.x -- Support for displaying full dates in `am-time-ago` (see [#75](https://github.com/urish/angular-moment/issues/75)) -- Support Angular Core's style CommonJS standard ([#123](https://github.com/urish/angular-moment/pull/123), contributed by [seanhealy](https://github.com/seanhealy)) -- Added an optional timezone parameter to amDateFormat ([#90](https://github.com/urish/angular-moment/pull/90), contributed by [robertbrooker](https://github.com/robertbrooker)) - -## 0.9.2 - 2015-03-17 -- Critical fix: npm install angular-moment fails ([#121](https://github.com/urish/angular-moment/issues/121)) - -## 0.9.1 - 2015-03-17 -- Add support for locale strings customization ([#102](https://github.com/urish/angular-moment/pull/102), contributed by [vosi](https://github.com/vosi)) -- Add `amDifference` filter ([#120](https://github.com/urish/angular-moment/pull/120), contributed by [ajhodges](https://github.com/ajhodges)) -- Support for changing the timezone via `amMoment.changeTimezone()` ([#92](https://github.com/urish/angular-moment/issues/92)) -- Support for AngularJS 1.4.x -- Remove explicit module name for RequireJS ([#112](https://github.com/urish/angular-moment/pull/112), contributed by [WilliamCarter](https://github.com/WilliamCarter)) - -## 0.9.0 - 2015-01-11 -- Support moment.js v2.9.0. See [here](https://gist.github.com/ichernev/0c9a9b49951111a27ce7) for changelog. -- Removed support for older moment.js versions. Only 2.8.0 and newer versions are now supported. -- Removed deprecated method: `amMoment.changeLanguage()`. Use `amMoment.changeLocale()` instead. -- Removed deprecated event: `amMoment:languageChange`. Listen for `amMoment:localeChange` instead. -- Filters are now stateful by default (fixes [#97](https://github.com/urish/angular-moment/issues/97)). -- The project is now available on [NuGet](https://www.nuget.org/packages/angular-moment/) ([#99](https://github.com/urish/angular-moment/pull/99), contributed by [markvp](https://github.com/markvp)). - -## 0.8.3 - 2014-12-08 -- `amTimeAgo` filter ([#96](https://github.com/urish/angular-moment/pull/96), contributed by [maxklenk](https://github.com/maxklenk)) -- Show formatted time as element title ([#78](https://github.com/urish/angular-moment/pull/78), contributed by [ctesene](https://github.com/ctesene)) -- Support commonjs and browserify ([#95](https://github.com/urish/angular-moment/pull/95), contributed by [Pencroff](https://github.com/Pencroff)) -- SystemJS Loader support ([#85](https://github.com/urish/angular-moment/pull/85), contributed by [capaj](https://github.com/capaj)) - -## 0.8.2 - 2014-09-07 -- `amMoment.changeLanguage()` was deprecated in favor of `amMoment.changeLocale()` (following [a change](http://momentjs.com/docs/#/i18n/changing-locale/) introduced in moment v2.8.1) -- Bugfix: changing the locale emitted a deprecation warning (see [#76](https://github.com/urish/angular-moment/issues/76) for details). - -## 0.8.1 - 2014-09-01 -- Support moment.js v2.8.0. See [here](https://gist.github.com/ichernev/ac3899324a5fa6c8c9b4) for changelog. -- Support moment-timezone v0.2.1. See [here](https://github.com/moment/moment-timezone/blob/develop/changelog.md#021-2014-08-02) for changelog. -- Bugfix: `updateTime()` is called too often for future dates ([#73](https://github.com/urish/angular-moment/issues/73)) - -## 0.8.0 - 2014-07-26 -- Generate source map for the minified version ([#50](https://github.com/urish/angular-moment/issues/50)) -- Add support HTML `