diff --git a/.gitignore b/.gitignore index 57e2baf042..48c5934377 100644 --- a/.gitignore +++ b/.gitignore @@ -108,6 +108,7 @@ reports *.results local/ *.mo +requirements/vendor # AWX python libs populated by requirements.txt awx/lib/.deps_built diff --git a/Makefile b/Makefile index 836f96da9c..3d378c60c9 100644 --- a/Makefile +++ b/Makefile @@ -176,11 +176,11 @@ UI_RELEASE_FLAG_FILE = awx/ui/.release_built .PHONY: clean clean-tmp clean-venv rebase push requirements requirements_dev \ develop refresh adduser migrate dbchange dbshell runserver celeryd \ - receiver test test_unit test_coverage coverage_html test_jenkins dev_build \ - release_build release_clean sdist rpmtar mock-rpm mock-srpm rpm-sign \ - deb deb-src debian debsign pbuilder reprepro setup_tarball \ - virtualbox-ovf virtualbox-centos-7 virtualbox-centos-6 \ - clean-bundle setup_bundle_tarball \ + receiver test test_unit test_ansible test_coverage coverage_html \ + test_jenkins dev_build release_build release_clean sdist rpmtar mock-rpm \ + mock-srpm rpm-sign deb deb-src debian debsign pbuilder \ + reprepro setup_tarball virtualbox-ovf virtualbox-centos-7 \ + virtualbox-centos-6 clean-bundle setup_bundle_tarball \ ui-docker-machine ui-docker ui-release ui-devel \ ui-test ui-deps ui-test-ci ui-test-saucelabs jlaska @@ -215,6 +215,7 @@ clean-bundle: clean-ui: rm -rf awx/ui/static/ rm -rf awx/ui/node_modules/ + rm -rf awx/ui/coverage/ rm -f $(UI_DEPS_FLAG_FILE) rm -f $(UI_RELEASE_FLAG_FILE) @@ -224,15 +225,18 @@ clean-tmp: clean-venv: rm -rf venv/ +clean-dist: + rm -rf dist + # Remove temporary build files, compiled Python files. -clean: clean-rpm clean-deb clean-ui clean-tar clean-packer clean-bundle +clean: clean-rpm clean-deb clean-ui clean-tar clean-packer clean-bundle clean-dist rm -rf awx/public rm -rf awx/lib/site-packages - rm -rf dist/* rm -rf awx/job_status rm -rf awx/job_output rm -rf reports rm -f awx/awx_test.sqlite3 + rm -rf requirements/vendor rm -rf tmp mkdir tmp rm -rf build $(NAME)-$(VERSION) *.egg-info @@ -263,8 +267,8 @@ virtualenv_ansible: fi; \ if [ ! -d "$(VENV_BASE)/ansible" ]; then \ virtualenv --system-site-packages --setuptools $(VENV_BASE)/ansible && \ - $(VENV_BASE)/ansible/bin/pip install -I setuptools==23.0.0 && \ - $(VENV_BASE)/ansible/bin/pip install -I pip==8.1.2; \ + $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed setuptools==23.0.0 && \ + $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed pip==8.1.2; \ fi; \ fi @@ -275,42 +279,40 @@ virtualenv_tower: fi; \ if [ ! -d "$(VENV_BASE)/tower" ]; then \ virtualenv --system-site-packages --setuptools $(VENV_BASE)/tower && \ - $(VENV_BASE)/tower/bin/pip install -I setuptools==23.0.0 && \ - $(VENV_BASE)/tower/bin/pip install -I pip==8.1.2; \ + $(VENV_BASE)/tower/bin/pip install $(PIP_OPTIONS) --ignore-installed setuptools==23.0.0 && \ + $(VENV_BASE)/tower/bin/pip install $(PIP_OPTIONS) --ignore-installed pip==8.1.2; \ fi; \ fi requirements_ansible: virtualenv_ansible - if [ "$(VENV_BASE)" ]; then \ - . $(VENV_BASE)/ansible/bin/activate; \ - $(VENV_BASE)/ansible/bin/pip install --ignore-installed --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements_ansible.txt ;\ - $(VENV_BASE)/ansible/bin/pip uninstall --yes -r requirements/requirements_ansible_uninstall.txt; \ + if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \ + cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed -r /dev/stdin ; \ else \ - pip install --ignore-installed --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements_ansible.txt ; \ - pip uninstall --yes -r requirements/requirements_ansible_uninstall.txt; \ + cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin ; \ + fi + $(VENV_BASE)/ansible/bin/pip uninstall --yes -r requirements/requirements_ansible_uninstall.txt + +requirements_ansible_dev: + if [ "$(VENV_BASE)" ]; then \ + $(VENV_BASE)/ansible/bin/pip install pytest; \ fi # Install third-party requirements needed for Tower's environment. requirements_tower: virtualenv_tower - if [ "$(VENV_BASE)" ]; then \ - . $(VENV_BASE)/tower/bin/activate; \ - $(VENV_BASE)/tower/bin/pip install --ignore-installed --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements.txt ;\ - $(VENV_BASE)/tower/bin/pip uninstall --yes -r requirements/requirements_tower_uninstall.txt; \ + if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \ + cat requirements/requirements.txt requirements/requirements_local.txt | $(VENV_BASE)/tower/bin/pip install $(PIP_OPTIONS) --ignore-installed -r /dev/stdin ; \ else \ - pip install --ignore-installed --no-binary $(SRC_ONLY_PKGS) -r requirements/requirements.txt ; \ - pip uninstall --yes -r requirements/requirements_tower_uninstall.txt; \ + cat requirements/requirements.txt requirements/requirements_git.txt | $(VENV_BASE)/tower/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin ; \ fi + $(VENV_BASE)/tower/bin/pip uninstall --yes -r requirements/requirements_tower_uninstall.txt requirements_tower_dev: - if [ "$(VENV_BASE)" ]; then \ - . $(VENV_BASE)/tower/bin/activate; \ - $(VENV_BASE)/tower/bin/pip install -r requirements/requirements_dev.txt; \ - $(VENV_BASE)/tower/bin/pip uninstall --yes -r requirements/requirements_dev_uninstall.txt; \ - fi + $(VENV_BASE)/tower/bin/pip install -r requirements/requirements_dev.txt + $(VENV_BASE)/tower/bin/pip uninstall --yes -r requirements/requirements_dev_uninstall.txt requirements: requirements_ansible requirements_tower -requirements_dev: requirements requirements_tower_dev +requirements_dev: requirements requirements_tower_dev requirements_ansible_dev requirements_test: requirements @@ -481,7 +483,7 @@ check: flake8 pep8 # pyflakes pylint TEST_DIRS ?= awx/main/tests awx/conf/tests awx/sso/tests # Run all API unit tests. -test: +test: test_ansible @if [ "$(VENV_BASE)" ]; then \ . $(VENV_BASE)/tower/bin/activate; \ fi; \ @@ -493,6 +495,12 @@ test_unit: fi; \ py.test awx/main/tests/unit awx/conf/tests/unit awx/sso/tests/unit +test_ansible: + @if [ "$(VENV_BASE)" ]; then \ + . $(VENV_BASE)/ansible/bin/activate; \ + fi; \ + py.test awx/lib/tests -c awx/lib/tests/pytest.ini + # Run all API unit tests with coverage enabled. test_coverage: @if [ "$(VENV_BASE)" ]; then \ @@ -608,7 +616,7 @@ ui-test-ci: $(UI_DEPS_FLAG_FILE) testjs_ci: echo "Update UI unittests later" #ui-test-ci -jshint: +jshint: $(UI_DEPS_FLAG_FILE) $(NPM_BIN) run --prefix awx/ui jshint ui-test-saucelabs: $(UI_DEPS_FLAG_FILE) @@ -693,24 +701,68 @@ setup_bundle_tarball: setup-bundle-build setup-bundle-build/$(OFFLINE_TAR_FILE) rpm-build: mkdir -p $@ -rpm-build/$(SDIST_TAR_FILE): rpm-build dist/$(SDIST_TAR_FILE) +rpm-build/$(SDIST_TAR_FILE): rpm-build dist/$(SDIST_TAR_FILE) tar-build/$(SETUP_TAR_FILE) cp packaging/rpm/$(NAME).spec rpm-build/ cp packaging/rpm/tower.te rpm-build/ cp packaging/rpm/tower.fc rpm-build/ cp packaging/rpm/$(NAME).sysconfig rpm-build/ cp packaging/remove_tower_source.py rpm-build/ cp packaging/bytecompile.sh rpm-build/ + cp tar-build/$(SETUP_TAR_FILE) rpm-build/ if [ "$(OFFICIAL)" != "yes" ] ; then \ (cd dist/ && tar zxf $(SDIST_TAR_FILE)) ; \ (cd dist/ && mv $(NAME)-$(VERSION)-$(BUILD) $(NAME)-$(VERSION)) ; \ (cd dist/ && tar czf ../rpm-build/$(SDIST_TAR_FILE) $(NAME)-$(VERSION)) ; \ ln -sf $(SDIST_TAR_FILE) rpm-build/$(NAME)-$(VERSION).tar.gz ; \ + (cd tar-build/ && tar zxf $(SETUP_TAR_FILE)) ; \ + (cd tar-build/ && mv $(NAME)-setup-$(VERSION)-$(BUILD) $(NAME)-setup-$(VERSION)) ; \ + (cd tar-build/ && tar czf ../rpm-build/$(SETUP_TAR_FILE) $(NAME)-setup-$(VERSION)) ; \ + ln -sf $(SETUP_TAR_FILE) rpm-build/$(NAME)-setup-$(VERSION).tar.gz ; \ else \ cp -a dist/$(SDIST_TAR_FILE) rpm-build/ ; \ fi rpmtar: sdist rpm-build/$(SDIST_TAR_FILE) +brewrpmtar: rpm-build/python-deps.tar.gz requirements/requirements_local.txt requirements/requirements_ansible_local.txt rpmtar + +rpm-build/python-deps.tar.gz: requirements/vendor rpm-build + tar czf rpm-build/python-deps.tar.gz requirements/vendor + +requirements/vendor: + cat requirements/requirements.txt requirements/requirements_git.txt | pip download \ + --no-binary=:all: \ + --requirement=/dev/stdin \ + --dest=$@ \ + --exists-action=i + + cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | pip download \ + --no-binary=:all: \ + --requirement=/dev/stdin \ + --dest=$@ \ + --exists-action=i + + pip download \ + --no-binary=:all: \ + --requirement=requirements/requirements_setup_requires.txt \ + --dest=$@ \ + --exists-action=i + +requirements/requirements_local.txt: + @echo "This is going to take a while..." + pip download \ + --requirement=requirements/requirements_git.txt \ + --no-deps \ + --exists-action=w \ + --dest=requirements/vendor 2>/dev/null | sed -n 's/^\s*Saved\s*//p' > $@ + +requirements/requirements_ansible_local.txt: + pip download \ + --requirement=requirements/requirements_ansible_git.txt \ + --no-deps \ + --exists-action=w \ + --dest=requirements/vendor 2>/dev/null | sed -n 's/^\s*Saved\s*//p' > $@ + rpm-build/$(RPM_NVR).src.rpm: /etc/mock/$(MOCK_CFG).cfg $(MOCK_BIN) -r $(MOCK_CFG) --resultdir rpm-build --buildsrpm --spec rpm-build/$(NAME).spec --sources rpm-build \ --define "tower_version $(VERSION)" --define "tower_release $(RELEASE)" $(SCL_DEFINES) @@ -721,6 +773,8 @@ mock-srpm: rpmtar rpm-build/$(RPM_NVR).src.rpm @echo rpm-build/$(RPM_NVR).src.rpm @echo "#############################################" +brew-srpm: brewrpmtar mock-srpm + rpm-build/$(RPM_NVR).$(RPM_ARCH).rpm: rpm-build/$(RPM_NVR).src.rpm $(MOCK_BIN) -r $(MOCK_CFG) --resultdir rpm-build --rebuild rpm-build/$(RPM_NVR).src.rpm \ --define "tower_version $(VERSION)" --define "tower_release $(RELEASE)" $(SCL_DEFINES) @@ -829,7 +883,7 @@ amazon-ebs: cd packaging/packer && $(PACKER) build -only $@ $(PACKER_BUILD_OPTS) -var "aws_instance_count=$(AWS_INSTANCE_COUNT)" -var "product_version=$(VERSION)" packer-$(NAME).json # Vagrant box using virtualbox provider -vagrant-virtualbox: packaging/packer/ansible-tower-$(VERSION)-virtualbox.box +vagrant-virtualbox: packaging/packer/ansible-tower-$(VERSION)-virtualbox.box tar-build/$(SETUP_TAR_FILE) packaging/packer/ansible-tower-$(VERSION)-virtualbox.box: packaging/packer/output-virtualbox-iso/centos-7.ovf cd packaging/packer && $(PACKER) build -only virtualbox-ovf $(PACKER_BUILD_OPTS) -var "aws_instance_count=$(AWS_INSTANCE_COUNT)" -var "product_version=$(VERSION)" packer-$(NAME).json @@ -840,7 +894,7 @@ packaging/packer/output-virtualbox-iso/centos-7.ovf: virtualbox-iso: packaging/packer/output-virtualbox-iso/centos-7.ovf # Vagrant box using VMware provider -vagrant-vmware: packaging/packer/ansible-tower-$(VERSION)-vmware.box +vagrant-vmware: packaging/packer/ansible-tower-$(VERSION)-vmware.box tar-build/$(SETUP_TAR_FILE) packaging/packer/output-vmware-iso/centos-7.vmx: cd packaging/packer && $(PACKER) build -only vmware-iso packer-centos-7.json diff --git a/awx/__init__.py b/awx/__init__.py index 48f9219933..30256ec453 100644 --- a/awx/__init__.py +++ b/awx/__init__.py @@ -5,7 +5,7 @@ import os import sys import warnings -__version__ = '3.1.1' +__version__ = '3.1.2' __all__ = ['__version__'] diff --git a/awx/api/filters.py b/awx/api/filters.py index e5c9c39264..6d5c7920c5 100644 --- a/awx/api/filters.py +++ b/awx/api/filters.py @@ -3,6 +3,7 @@ # Python import re +import json # Django from django.core.exceptions import FieldError, ValidationError @@ -291,7 +292,7 @@ class FieldLookupBackend(BaseFilterBackend): except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e: raise ParseError(e.args[0]) except ValidationError as e: - raise ParseError(e.messages) + raise ParseError(json.dumps(e.messages, ensure_ascii=False)) class OrderByBackend(BaseFilterBackend): @@ -310,6 +311,8 @@ class OrderByBackend(BaseFilterBackend): else: order_by = (value,) if order_by: + order_by = self._strip_sensitive_model_fields(queryset.model, order_by) + # Special handling of the type field for ordering. In this # case, we're not sorting exactly on the type field, but # given the limited number of views with multiple types, @@ -332,3 +335,16 @@ class OrderByBackend(BaseFilterBackend): except FieldError as e: # Return a 400 for invalid field names. raise ParseError(*e.args) + + def _strip_sensitive_model_fields(self, model, order_by): + for field_name in order_by: + # strip off the negation prefix `-` if it exists + _field_name = field_name.split('-')[-1] + try: + # if the field name is encrypted/sensitive, don't sort on it + if _field_name in getattr(model, 'PASSWORD_FIELDS', ()) or \ + getattr(model._meta.get_field(_field_name), '__prevent_search__', False): + raise ParseError(_('cannot order by field %s') % _field_name) + except FieldDoesNotExist: + pass + yield field_name diff --git a/awx/api/views.py b/awx/api/views.py index e69dc57fd4..33ef14826a 100644 --- a/awx/api/views.py +++ b/awx/api/views.py @@ -2678,7 +2678,8 @@ class JobTemplateCallback(GenericAPIView): def post(self, request, *args, **kwargs): extra_vars = None - if request.content_type == "application/json": + # Be careful here: content_type can look like '; charset=blar' + if request.content_type.startswith("application/json"): extra_vars = request.data.get("extra_vars", None) # Permission class should have already validated host_config_key. job_template = self.get_object() @@ -2727,14 +2728,14 @@ class JobTemplateCallback(GenericAPIView): return Response(data, status=status.HTTP_400_BAD_REQUEST) # Everything is fine; actually create the job. + kv = {"limit": limit, "launch_type": 'callback'} + if extra_vars is not None and job_template.ask_variables_on_launch: + kv['extra_vars'] = callback_filter_out_ansible_extra_vars(extra_vars) with transaction.atomic(): - job = job_template.create_job(limit=limit, launch_type='callback') + job = job_template.create_job(**kv) # Send a signal to celery that the job should be started. - kv = {"inventory_sources_already_updated": inventory_sources_already_updated} - if extra_vars is not None: - kv['extra_vars'] = callback_filter_out_ansible_extra_vars(extra_vars) - result = job.signal_start(**kv) + result = job.signal_start(inventory_sources_already_updated=inventory_sources_already_updated) if not result: data = dict(msg=_('Error starting job!')) return Response(data, status=status.HTTP_400_BAD_REQUEST) @@ -3503,6 +3504,7 @@ class BaseJobEventsList(SubListAPIView): parent_model = None # Subclasses must define this attribute. relationship = 'job_events' view_name = _('Job Events List') + search_fields = ('stdout',) def finalize_response(self, request, response, *args, **kwargs): response['X-UI-Max-Events'] = settings.RECOMMENDED_MAX_EVENTS_DISPLAY_HEADER @@ -3664,7 +3666,7 @@ class AdHocCommandRelaunch(GenericAPIView): data = {} for field in ('job_type', 'inventory_id', 'limit', 'credential_id', 'module_name', 'module_args', 'forks', 'verbosity', - 'become_enabled'): + 'extra_vars', 'become_enabled'): if field.endswith('_id'): data[field[:-3]] = getattr(obj, field) else: diff --git a/awx/lib/tests/__init__.py b/awx/lib/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/tests/pytest.ini b/awx/lib/tests/pytest.ini new file mode 100644 index 0000000000..2c2dad06eb --- /dev/null +++ b/awx/lib/tests/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +addopts = -v diff --git a/awx/lib/tests/test_display_callback.py b/awx/lib/tests/test_display_callback.py new file mode 100644 index 0000000000..f84ab0df4f --- /dev/null +++ b/awx/lib/tests/test_display_callback.py @@ -0,0 +1,213 @@ +from collections import OrderedDict +import json +import mock +import os +import sys + +import pytest + +# ansible uses `ANSIBLE_CALLBACK_PLUGINS` and `ANSIBLE_STDOUT_CALLBACK` to +# discover callback plugins; `ANSIBLE_CALLBACK_PLUGINS` is a list of paths to +# search for a plugin implementation (which should be named `CallbackModule`) +# +# this code modifies the Python path to make our +# `awx.lib.tower_display_callback` callback importable (because `awx.lib` +# itself is not a package) +# +# we use the `tower_display_callback` imports below within this file, but +# Ansible also uses them when it discovers this file in +# `ANSIBLE_CALLBACK_PLUGINS` +CALLBACK = os.path.splitext(os.path.basename(__file__))[0] +PLUGINS = os.path.dirname(__file__) +with mock.patch.dict(os.environ, {'ANSIBLE_STDOUT_CALLBACK': CALLBACK, + 'ANSIBLE_CALLBACK_PLUGINS': PLUGINS}): + from ansible.cli.playbook import PlaybookCLI + from ansible.executor.playbook_executor import PlaybookExecutor + from ansible.inventory import Inventory + from ansible.parsing.dataloader import DataLoader + from ansible.vars import VariableManager + + # Add awx/lib to sys.path so we can use the plugin + path = os.path.abspath(os.path.join(PLUGINS, '..', '..')) + if path not in sys.path: + sys.path.insert(0, path) + + from tower_display_callback import TowerDefaultCallbackModule as CallbackModule # noqa + from tower_display_callback.events import event_context # noqa + + +@pytest.fixture() +def cache(request): + class Cache(OrderedDict): + def set(self, key, value): + self[key] = value + local_cache = Cache() + patch = mock.patch.object(event_context, 'cache', local_cache) + patch.start() + request.addfinalizer(patch.stop) + return local_cache + + +@pytest.fixture() +def executor(tmpdir_factory, request): + playbooks = request.node.callspec.params.get('playbook') + playbook_files = [] + for name, playbook in playbooks.items(): + filename = str(tmpdir_factory.mktemp('data').join(name)) + with open(filename, 'w') as f: + f.write(playbook) + playbook_files.append(filename) + + cli = PlaybookCLI(['', 'playbook.yml']) + cli.parse() + options = cli.parser.parse_args(['-v'])[0] + loader = DataLoader() + variable_manager = VariableManager() + inventory = Inventory(loader=loader, variable_manager=variable_manager, + host_list=['localhost']) + variable_manager.set_inventory(inventory) + + return PlaybookExecutor(playbooks=playbook_files, inventory=inventory, + variable_manager=variable_manager, loader=loader, + options=options, passwords={}) + + +@pytest.mark.parametrize('event', {'playbook_on_start', + 'playbook_on_play_start', + 'playbook_on_task_start', 'runner_on_ok', + 'playbook_on_stats'}) +@pytest.mark.parametrize('playbook', [ +{'helloworld.yml': ''' +- name: Hello World Sample + connection: local + hosts: all + gather_facts: no + tasks: + - name: Hello Message + debug: + msg: "Hello World!" +'''} # noqa +]) +def test_callback_plugin_receives_events(executor, cache, event, playbook): + executor.run() + assert len(cache) + assert event in [task['event'] for task in cache.values()] + + +@pytest.mark.parametrize('playbook', [ +{'no_log_on_ok.yml': ''' +- name: args should not be logged when task-level no_log is set + connection: local + hosts: all + gather_facts: no + tasks: + - shell: echo "SENSITIVE" + no_log: true +'''}, # noqa +{'no_log_on_fail.yml': ''' +- name: failed args should not be logged when task-level no_log is set + connection: local + hosts: all + gather_facts: no + tasks: + - shell: echo "SENSITIVE" + no_log: true + failed_when: true + ignore_errors: true +'''}, # noqa +{'no_log_on_skip.yml': ''' +- name: skipped task args should be suppressed with no_log + connection: local + hosts: all + gather_facts: no + tasks: + - shell: echo "SENSITIVE" + no_log: true + when: false +'''}, # noqa +{'no_log_on_play.yml': ''' +- name: args should not be logged when play-level no_log set + connection: local + hosts: all + gather_facts: no + no_log: true + tasks: + - shell: echo "SENSITIVE" +'''}, # noqa +{'async_no_log.yml': ''' +- name: async task args should suppressed with no_log + connection: local + hosts: all + gather_facts: no + no_log: true + tasks: + - async: 10 + poll: 1 + shell: echo "SENSITIVE" + no_log: true +'''}, # noqa +{'with_items.yml': ''' +- name: with_items tasks should be suppressed with no_log + connection: local + hosts: all + gather_facts: no + tasks: + - shell: echo {{ item }} + no_log: true + with_items: [ "SENSITIVE", "SENSITIVE-SKIPPED", "SENSITIVE-FAILED" ] + when: item != "SENSITIVE-SKIPPED" + failed_when: item == "SENSITIVE-FAILED" + ignore_errors: yes +'''}, # noqa +]) +def test_callback_plugin_no_log_filters(executor, cache, playbook): + executor.run() + assert len(cache) + assert 'SENSITIVE' not in json.dumps(cache.items()) + + +@pytest.mark.parametrize('playbook', [ +{'no_log_on_ok.yml': ''' +- name: args should not be logged when task-level no_log is set + connection: local + hosts: all + gather_facts: no + tasks: + - shell: echo "SENSITIVE" + - shell: echo "PRIVATE" + no_log: true +'''}, # noqa +]) +def test_callback_plugin_task_args_leak(executor, cache, playbook): + executor.run() + events = cache.values() + assert events[0]['event'] == 'playbook_on_start' + assert events[1]['event'] == 'playbook_on_play_start' + + # task 1 + assert events[2]['event'] == 'playbook_on_task_start' + assert 'SENSITIVE' in events[2]['event_data']['task_args'] + assert events[3]['event'] == 'runner_on_ok' + assert 'SENSITIVE' in events[3]['event_data']['task_args'] + + # task 2 no_log=True + assert events[4]['event'] == 'playbook_on_task_start' + assert events[4]['event_data']['task_args'] == "the output has been hidden due to the fact that 'no_log: true' was specified for this result" # noqa + assert events[5]['event'] == 'runner_on_ok' + assert events[5]['event_data']['task_args'] == "the output has been hidden due to the fact that 'no_log: true' was specified for this result" # noqa + + +@pytest.mark.parametrize('playbook', [ +{'strip_env_vars.yml': ''' +- name: sensitive environment variables should be stripped from events + connection: local + hosts: all + tasks: + - shell: echo "Hello, World!" +'''}, # noqa +]) +def test_callback_plugin_strips_task_environ_variables(executor, cache, playbook): + executor.run() + assert len(cache) + for event in cache.values(): + assert os.environ['PATH'] not in json.dumps(event) diff --git a/awx/lib/tower_display_callback/module.py b/awx/lib/tower_display_callback/module.py index c553b08853..76dcb5be7f 100644 --- a/awx/lib/tower_display_callback/module.py +++ b/awx/lib/tower_display_callback/module.py @@ -30,6 +30,8 @@ from ansible.plugins.callback.default import CallbackModule as DefaultCallbackMo from .events import event_context from .minimal import CallbackModule as MinimalCallbackModule +CENSORED = "the output has been hidden due to the fact that 'no_log: true' was specified for this result" # noqa + class BaseCallbackModule(CallbackBase): ''' @@ -55,22 +57,6 @@ class BaseCallbackModule(CallbackBase): 'playbook_on_no_hosts_remaining', ] - CENSOR_FIELD_WHITELIST = [ - 'msg', - 'failed', - 'changed', - 'results', - 'start', - 'end', - 'delta', - 'cmd', - '_ansible_no_log', - 'rc', - 'failed_when_result', - 'skipped', - 'skip_reason', - ] - def __init__(self): super(BaseCallbackModule, self).__init__() self.task_uuids = set() @@ -85,6 +71,13 @@ class BaseCallbackModule(CallbackBase): else: task = None + if event_data.get('res'): + if event_data['res'].get('_ansible_no_log', False): + event_data['res'] = {'censored': CENSORED} + for i, item in enumerate(event_data['res'].get('results', [])): + if event_data['res']['results'][i].get('_ansible_no_log', False): + event_data['res']['results'][i] = {'censored': CENSORED} + with event_context.display_lock: try: event_context.add_local(event=event, **event_data) @@ -132,7 +125,9 @@ class BaseCallbackModule(CallbackBase): task_ctx['task_path'] = task.get_path() except AttributeError: pass - if not task.no_log: + if task.no_log: + task_ctx['task_args'] = "the output has been hidden due to the fact that 'no_log: true' was specified for this result" + else: task_args = ', '.join(('%s=%s' % a for a in task.args.items())) task_ctx['task_args'] = task_args if getattr(task, '_role', None): @@ -304,6 +299,12 @@ class BaseCallbackModule(CallbackBase): def v2_runner_on_ok(self, result): # FIXME: Display detailed results or not based on verbosity. + + # strip environment vars from the job event; it already exists on the + # job and sensitive values are filtered there + if result._task.get_name() == 'setup': + result._result.get('ansible_facts', {}).pop('ansible_env', None) + event_data = dict( host=result._host.get_name(), remote_addr=result._host.address, diff --git a/awx/main/conf.py b/awx/main/conf.py index 098b84f639..674c7fabee 100644 --- a/awx/main/conf.py +++ b/awx/main/conf.py @@ -242,9 +242,11 @@ register( field_class=fields.IntegerField, allow_null=True, label=_('Logging Aggregator Port'), - help_text=_('Port on Logging Aggregator to send logs to (if required).'), + help_text=_('Port on Logging Aggregator to send logs to (if required and not' + ' provided in Logging Aggregator).'), category=_('Logging'), category_slug='logging', + required=False ) register( 'LOG_AGGREGATOR_TYPE', diff --git a/awx/main/migrations/0036_v311_insights.py b/awx/main/migrations/0036_v311_insights.py new file mode 100644 index 0000000000..57baff9b5d --- /dev/null +++ b/awx/main/migrations/0036_v311_insights.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('main', '0035_v310_remove_tower_settings'), + ] + + operations = [ + migrations.AlterField( + model_name='project', + name='scm_type', + field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'), + ), + migrations.AlterField( + model_name='projectupdate', + name='scm_type', + field=models.CharField(default=b'', choices=[(b'', 'Manual'), (b'git', 'Git'), (b'hg', 'Mercurial'), (b'svn', 'Subversion'), (b'insights', 'Red Hat Insights')], max_length=8, blank=True, help_text='Specifies the source control system used to store the project.', verbose_name='SCM Type'), + ), + ] diff --git a/awx/main/models/ad_hoc_commands.py b/awx/main/models/ad_hoc_commands.py index 3636aa8e0a..d6c97e6f86 100644 --- a/awx/main/models/ad_hoc_commands.py +++ b/awx/main/models/ad_hoc_commands.py @@ -190,7 +190,7 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin): data = {} for field in ('job_type', 'inventory_id', 'limit', 'credential_id', 'module_name', 'module_args', 'forks', 'verbosity', - 'become_enabled'): + 'extra_vars', 'become_enabled'): data[field] = getattr(self, field) return AdHocCommand.objects.create(**data) diff --git a/awx/main/models/inventory.py b/awx/main/models/inventory.py index 387277c5e9..81ba4fd50b 100644 --- a/awx/main/models/inventory.py +++ b/awx/main/models/inventory.py @@ -1277,10 +1277,20 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin): def get_notification_friendly_name(self): return "Inventory Update" - def cancel(self): - res = super(InventoryUpdate, self).cancel() + def _build_job_explanation(self): + if not self.job_explanation: + return 'Previous Task Canceled: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \ + (self.model_to_str(), self.name, self.id) + return None + + def get_dependent_jobs(self): + return Job.objects.filter(dependent_jobs__in=[self.id]) + + def cancel(self, job_explanation=None): + + res = super(InventoryUpdate, self).cancel(job_explanation=job_explanation) if res: - map(lambda x: x.cancel(), Job.objects.filter(dependent_jobs__in=[self.id])) + map(lambda x: x.cancel(job_explanation=self._build_job_explanation()), self.get_dependent_jobs()) return res diff --git a/awx/main/models/jobs.py b/awx/main/models/jobs.py index 00a68c69ca..388be47d17 100644 --- a/awx/main/models/jobs.py +++ b/awx/main/models/jobs.py @@ -310,9 +310,13 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour elif self.variables_needed_to_start: variables_needed = True prompting_needed = False - for value in self._ask_for_vars_dict().values(): - if value: - prompting_needed = True + # The behavior of provisioning callback should mimic + # that of job template launch, so prompting_needed should + # not block a provisioning callback from creating/launching jobs. + if callback_extra_vars is None: + for value in self._ask_for_vars_dict().values(): + if value: + prompting_needed = True return (not prompting_needed and not self.passwords_needed_to_start and not variables_needed) @@ -633,10 +637,10 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin): Canceling a job also cancels the implicit project update with launch_type run. ''' - def cancel(self): - res = super(Job, self).cancel() + def cancel(self, job_explanation=None): + res = super(Job, self).cancel(job_explanation=job_explanation) if self.project_update: - self.project_update.cancel() + self.project_update.cancel(job_explanation=job_explanation) return res @@ -1139,7 +1143,7 @@ class JobEvent(CreatedModifiedModel): # Save artifact data to parent job (if provided). if artifact_dict: if event_data and isinstance(event_data, dict): - # Note: Core has not added support for marking artifacts as + # Note: Core has not added support for marking artifacts as # sensitive yet. Going forward, core will not use # _ansible_no_log to denote sensitive set_stats calls. # Instead, they plan to add a flag outside of the traditional diff --git a/awx/main/models/mixins.py b/awx/main/models/mixins.py index 3ae26eaf71..e818b5b648 100644 --- a/awx/main/models/mixins.py +++ b/awx/main/models/mixins.py @@ -130,13 +130,18 @@ class SurveyJobTemplateMixin(models.Model): for survey_element in self.survey_spec.get("spec", []): default = survey_element.get('default') variable_key = survey_element.get('variable') + if survey_element.get('type') == 'password': if variable_key in kwargs_extra_vars and default: kw_value = kwargs_extra_vars[variable_key] if kw_value.startswith('$encrypted$') and kw_value != default: kwargs_extra_vars[variable_key] = default + if default is not None: - extra_vars[variable_key] = default + data = {variable_key: default} + errors = self._survey_element_validation(survey_element, data) + if not errors: + extra_vars[variable_key] = default # Overwrite job template extra vars with explicit job extra vars # and add on job extra vars @@ -144,6 +149,65 @@ class SurveyJobTemplateMixin(models.Model): kwargs['extra_vars'] = json.dumps(extra_vars) return kwargs + def _survey_element_validation(self, survey_element, data): + errors = [] + if survey_element['variable'] not in data and survey_element['required']: + errors.append("'%s' value missing" % survey_element['variable']) + elif survey_element['type'] in ["textarea", "text", "password"]: + if survey_element['variable'] in data: + if type(data[survey_element['variable']]) not in (str, unicode): + errors.append("Value %s for '%s' expected to be a string." % (data[survey_element['variable']], + survey_element['variable'])) + return errors + if 'min' in survey_element and survey_element['min'] not in ["", None] and len(data[survey_element['variable']]) < int(survey_element['min']): + errors.append("'%s' value %s is too small (length is %s must be at least %s)." % + (survey_element['variable'], data[survey_element['variable']], len(data[survey_element['variable']]), survey_element['min'])) + if 'max' in survey_element and survey_element['max'] not in ["", None] and len(data[survey_element['variable']]) > int(survey_element['max']): + errors.append("'%s' value %s is too large (must be no more than %s)." % + (survey_element['variable'], data[survey_element['variable']], survey_element['max'])) + elif survey_element['type'] == 'integer': + if survey_element['variable'] in data: + if type(data[survey_element['variable']]) != int: + errors.append("Value %s for '%s' expected to be an integer." % (data[survey_element['variable']], + survey_element['variable'])) + return errors + if 'min' in survey_element and survey_element['min'] not in ["", None] and survey_element['variable'] in data and \ + data[survey_element['variable']] < int(survey_element['min']): + errors.append("'%s' value %s is too small (must be at least %s)." % + (survey_element['variable'], data[survey_element['variable']], survey_element['min'])) + if 'max' in survey_element and survey_element['max'] not in ["", None] and survey_element['variable'] in data and \ + data[survey_element['variable']] > int(survey_element['max']): + errors.append("'%s' value %s is too large (must be no more than %s)." % + (survey_element['variable'], data[survey_element['variable']], survey_element['max'])) + elif survey_element['type'] == 'float': + if survey_element['variable'] in data: + if type(data[survey_element['variable']]) not in (float, int): + errors.append("Value %s for '%s' expected to be a numeric type." % (data[survey_element['variable']], + survey_element['variable'])) + return errors + if 'min' in survey_element and survey_element['min'] not in ["", None] and data[survey_element['variable']] < float(survey_element['min']): + errors.append("'%s' value %s is too small (must be at least %s)." % + (survey_element['variable'], data[survey_element['variable']], survey_element['min'])) + if 'max' in survey_element and survey_element['max'] not in ["", None] and data[survey_element['variable']] > float(survey_element['max']): + errors.append("'%s' value %s is too large (must be no more than %s)." % + (survey_element['variable'], data[survey_element['variable']], survey_element['max'])) + elif survey_element['type'] == 'multiselect': + if survey_element['variable'] in data: + if type(data[survey_element['variable']]) != list: + errors.append("'%s' value is expected to be a list." % survey_element['variable']) + else: + for val in data[survey_element['variable']]: + if val not in survey_element['choices']: + errors.append("Value %s for '%s' expected to be one of %s." % (val, survey_element['variable'], + survey_element['choices'])) + elif survey_element['type'] == 'multiplechoice': + if survey_element['variable'] in data: + if data[survey_element['variable']] not in survey_element['choices']: + errors.append("Value %s for '%s' expected to be one of %s." % (data[survey_element['variable']], + survey_element['variable'], + survey_element['choices'])) + return errors + def survey_variable_validation(self, data): errors = [] if not self.survey_enabled: @@ -153,62 +217,7 @@ class SurveyJobTemplateMixin(models.Model): if 'description' not in self.survey_spec: errors.append("'description' missing from survey spec.") for survey_element in self.survey_spec.get("spec", []): - if survey_element['variable'] not in data and \ - survey_element['required']: - errors.append("'%s' value missing" % survey_element['variable']) - elif survey_element['type'] in ["textarea", "text", "password"]: - if survey_element['variable'] in data: - if type(data[survey_element['variable']]) not in (str, unicode): - errors.append("Value %s for '%s' expected to be a string." % (data[survey_element['variable']], - survey_element['variable'])) - continue - if 'min' in survey_element and survey_element['min'] not in ["", None] and len(data[survey_element['variable']]) < int(survey_element['min']): - errors.append("'%s' value %s is too small (length is %s must be at least %s)." % - (survey_element['variable'], data[survey_element['variable']], len(data[survey_element['variable']]), survey_element['min'])) - if 'max' in survey_element and survey_element['max'] not in ["", None] and len(data[survey_element['variable']]) > int(survey_element['max']): - errors.append("'%s' value %s is too large (must be no more than %s)." % - (survey_element['variable'], data[survey_element['variable']], survey_element['max'])) - elif survey_element['type'] == 'integer': - if survey_element['variable'] in data: - if type(data[survey_element['variable']]) != int: - errors.append("Value %s for '%s' expected to be an integer." % (data[survey_element['variable']], - survey_element['variable'])) - continue - if 'min' in survey_element and survey_element['min'] not in ["", None] and survey_element['variable'] in data and \ - data[survey_element['variable']] < int(survey_element['min']): - errors.append("'%s' value %s is too small (must be at least %s)." % - (survey_element['variable'], data[survey_element['variable']], survey_element['min'])) - if 'max' in survey_element and survey_element['max'] not in ["", None] and survey_element['variable'] in data and \ - data[survey_element['variable']] > int(survey_element['max']): - errors.append("'%s' value %s is too large (must be no more than %s)." % - (survey_element['variable'], data[survey_element['variable']], survey_element['max'])) - elif survey_element['type'] == 'float': - if survey_element['variable'] in data: - if type(data[survey_element['variable']]) not in (float, int): - errors.append("Value %s for '%s' expected to be a numeric type." % (data[survey_element['variable']], - survey_element['variable'])) - continue - if 'min' in survey_element and survey_element['min'] not in ["", None] and data[survey_element['variable']] < float(survey_element['min']): - errors.append("'%s' value %s is too small (must be at least %s)." % - (survey_element['variable'], data[survey_element['variable']], survey_element['min'])) - if 'max' in survey_element and survey_element['max'] not in ["", None] and data[survey_element['variable']] > float(survey_element['max']): - errors.append("'%s' value %s is too large (must be no more than %s)." % - (survey_element['variable'], data[survey_element['variable']], survey_element['max'])) - elif survey_element['type'] == 'multiselect': - if survey_element['variable'] in data: - if type(data[survey_element['variable']]) != list: - errors.append("'%s' value is expected to be a list." % survey_element['variable']) - else: - for val in data[survey_element['variable']]: - if val not in survey_element['choices']: - errors.append("Value %s for '%s' expected to be one of %s." % (val, survey_element['variable'], - survey_element['choices'])) - elif survey_element['type'] == 'multiplechoice': - if survey_element['variable'] in data: - if data[survey_element['variable']] not in survey_element['choices']: - errors.append("Value %s for '%s' expected to be one of %s." % (data[survey_element['variable']], - survey_element['variable'], - survey_element['choices'])) + errors += self._survey_element_validation(survey_element, data) return errors diff --git a/awx/main/models/projects.py b/awx/main/models/projects.py index 9897067843..bf60e5b77c 100644 --- a/awx/main/models/projects.py +++ b/awx/main/models/projects.py @@ -43,6 +43,7 @@ class ProjectOptions(models.Model): ('git', _('Git')), ('hg', _('Mercurial')), ('svn', _('Subversion')), + ('insights', _('Red Hat Insights')), ] class Meta: @@ -120,6 +121,8 @@ class ProjectOptions(models.Model): return self.scm_type or '' def clean_scm_url(self): + if self.scm_type == 'insights': + self.scm_url = settings.INSIGHTS_URL_BASE scm_url = unicode(self.scm_url or '') if not self.scm_type: return '' @@ -141,6 +144,8 @@ class ProjectOptions(models.Model): if cred.kind != 'scm': raise ValidationError(_("Credential kind must be 'scm'.")) try: + if self.scm_type == 'insights': + self.scm_url = settings.INSIGHTS_URL_BASE scm_url = update_scm_url(self.scm_type, self.scm_url, check_special_cases=False) scm_url_parts = urlparse.urlsplit(scm_url) diff --git a/awx/main/models/unified_jobs.py b/awx/main/models/unified_jobs.py index 2ccae7fdaf..880789aafe 100644 --- a/awx/main/models/unified_jobs.py +++ b/awx/main/models/unified_jobs.py @@ -1025,7 +1025,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique if settings.DEBUG: raise - def cancel(self): + def cancel(self, job_explanation=None): if self.can_cancel: if not self.cancel_flag: self.cancel_flag = True @@ -1033,6 +1033,9 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique if self.status in ('pending', 'waiting', 'new'): self.status = 'canceled' cancel_fields.append('status') + if job_explanation is not None: + self.job_explanation = job_explanation + cancel_fields.append('job_explanation') self.save(update_fields=cancel_fields) self.websocket_emit_status("canceled") if settings.BROKER_URL.startswith('amqp://'): diff --git a/awx/main/tasks.py b/awx/main/tasks.py index 91ef460f16..d60d936531 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -90,7 +90,7 @@ def celery_startup(conf=None, **kwargs): @worker_process_init.connect def task_set_logger_pre_run(*args, **kwargs): cache.close() - configure_external_logger(settings, async_flag=False, is_startup=False) + configure_external_logger(settings, is_startup=False) def _clear_cache_keys(set_of_keys): @@ -471,24 +471,24 @@ class BaseTask(Task): env['PROOT_TMP_DIR'] = settings.AWX_PROOT_BASE_PATH return env - def build_safe_env(self, instance, **kwargs): + def build_safe_env(self, env, **kwargs): ''' Build environment dictionary, hiding potentially sensitive information such as passwords or keys. ''' hidden_re = re.compile(r'API|TOKEN|KEY|SECRET|PASS', re.I) - urlpass_re = re.compile(r'^.*?://.?:(.*?)@.*?$') - env = self.build_env(instance, **kwargs) - for k,v in env.items(): + urlpass_re = re.compile(r'^.*?://[^:]+:(.*?)@.*?$') + safe_env = dict(env) + for k,v in safe_env.items(): if k in ('REST_API_URL', 'AWS_ACCESS_KEY', 'AWS_ACCESS_KEY_ID'): continue elif k.startswith('ANSIBLE_') and not k.startswith('ANSIBLE_NET'): continue elif hidden_re.search(k): - env[k] = HIDDEN_PASSWORD + safe_env[k] = HIDDEN_PASSWORD elif type(v) == str and urlpass_re.match(v): - env[k] = urlpass_re.sub(HIDDEN_PASSWORD, v) - return env + safe_env[k] = urlpass_re.sub(HIDDEN_PASSWORD, v) + return safe_env def args2cmdline(self, *args): return ' '.join([pipes.quote(a) for a in args]) @@ -699,7 +699,7 @@ class BaseTask(Task): output_replacements = self.build_output_replacements(instance, **kwargs) cwd = self.build_cwd(instance, **kwargs) env = self.build_env(instance, **kwargs) - safe_env = self.build_safe_env(instance, **kwargs) + safe_env = self.build_safe_env(env, **kwargs) stdout_handle = self.get_stdout_handle(instance) if self.should_use_proot(instance, **kwargs): if not check_proot_installed(): @@ -1160,6 +1160,7 @@ class RunProjectUpdate(BaseTask): ''' env = super(RunProjectUpdate, self).build_env(project_update, **kwargs) env = self.add_ansible_venv(env) + env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False) env['ANSIBLE_ASK_PASS'] = str(False) env['ANSIBLE_ASK_SUDO_PASS'] = str(False) env['DISPLAY'] = '' # Prevent stupid password popup when running tests. @@ -1189,6 +1190,9 @@ class RunProjectUpdate(BaseTask): scm_username = False elif scm_url_parts.scheme.endswith('ssh'): scm_password = False + elif scm_type == 'insights': + extra_vars['scm_username'] = scm_username + extra_vars['scm_password'] = scm_password scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True) else: @@ -1218,6 +1222,7 @@ class RunProjectUpdate(BaseTask): scm_branch = project_update.scm_branch or {'hg': 'tip'}.get(project_update.scm_type, 'HEAD') extra_vars.update({ 'project_path': project_update.get_project_path(check_if_exists=False), + 'insights_url': settings.INSIGHTS_URL_BASE, 'scm_type': project_update.scm_type, 'scm_url': scm_url, 'scm_branch': scm_branch, @@ -1314,10 +1319,10 @@ class RunProjectUpdate(BaseTask): lines = fd.readlines() if lines: p.scm_revision = lines[0].strip() - p.playbook_files = p.playbooks - p.save() else: - logger.error("Could not find scm revision in check") + logger.info("Could not find scm revision in check") + p.playbook_files = p.playbooks + p.save() try: os.remove(self.revision_path) except Exception, e: diff --git a/awx/main/tests/functional/api/test_credential.py b/awx/main/tests/functional/api/test_credential.py index 8f596cdac9..458a629cce 100644 --- a/awx/main/tests/functional/api/test_credential.py +++ b/awx/main/tests/functional/api/test_credential.py @@ -339,6 +339,21 @@ def test_list_created_org_credentials(post, get, organization, org_admin, org_me assert response.data['count'] == 0 +@pytest.mark.parametrize('order_by', ('password', '-password', 'password,pk', '-password,pk')) +@pytest.mark.django_db +def test_list_cannot_order_by_encrypted_field(post, get, organization, org_admin, order_by): + for i, password in enumerate(('abc', 'def', 'xyz')): + response = post(reverse('api:credential_list'), { + 'organization': organization.id, + 'name': 'C%d' % i, + 'password': password + }, org_admin) + + response = get(reverse('api:credential_list'), org_admin, + QUERY_STRING='order_by=%s' % order_by, status=400) + assert response.status_code == 400 + + # # Openstack Credentials # diff --git a/awx/main/tests/functional/api/test_inventory.py b/awx/main/tests/functional/api/test_inventory.py index 925d7352fd..839cffab9e 100644 --- a/awx/main/tests/functional/api/test_inventory.py +++ b/awx/main/tests/functional/api/test_inventory.py @@ -35,6 +35,21 @@ def test_edit_inventory(put, inventory, alice, role_field, expected_status_code) put(reverse('api:inventory_detail', args=(inventory.id,)), data, alice, expect=expected_status_code) +@pytest.mark.parametrize('order_by', ('script', '-script', 'script,pk', '-script,pk')) +@pytest.mark.django_db +def test_list_cannot_order_by_unsearchable_field(get, organization, alice, order_by): + for i, script in enumerate(('#!/bin/a', '#!/bin/b', '#!/bin/c')): + custom_script = organization.custom_inventory_scripts.create( + name="I%d" % i, + script=script + ) + custom_script.admin_role.members.add(alice) + + response = get(reverse('api:inventory_script_list'), alice, + QUERY_STRING='order_by=%s' % order_by, status=400) + assert response.status_code == 400 + + @pytest.mark.parametrize("role_field,expected_status_code", [ (None, 403), ('admin_role', 201), diff --git a/awx/main/tests/functional/api/test_job_runtime_params.py b/awx/main/tests/functional/api/test_job_runtime_params.py index af8bd659fd..e63a074965 100644 --- a/awx/main/tests/functional/api/test_job_runtime_params.py +++ b/awx/main/tests/functional/api/test_job_runtime_params.py @@ -344,3 +344,53 @@ def test_job_launch_unprompted_vars_with_survey(mocker, survey_spec_factory, job # Check that the survey variable is accepted and the job variable isn't mock_job.signal_start.assert_called_once() + + +@pytest.mark.django_db +@pytest.mark.job_runtime_vars +def test_callback_accept_prompted_extra_var(mocker, survey_spec_factory, job_template_prompts, post, admin_user, host): + job_template = job_template_prompts(True) + job_template.host_config_key = "foo" + job_template.survey_enabled = True + job_template.survey_spec = survey_spec_factory('survey_var') + job_template.save() + + with mocker.patch('awx.main.access.BaseAccess.check_license'): + mock_job = mocker.MagicMock(spec=Job, id=968, extra_vars={"job_launch_var": 3, "survey_var": 4}) + with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job): + with mocker.patch('awx.api.serializers.JobSerializer.to_representation', return_value={}): + with mocker.patch('awx.api.views.JobTemplateCallback.find_matching_hosts', return_value=[host]): + post( + reverse('api:job_template_callback', args=[job_template.pk]), + dict(extra_vars={"job_launch_var": 3, "survey_var": 4}, host_config_key="foo"), + admin_user, expect=201, format='json') + assert JobTemplate.create_unified_job.called + assert JobTemplate.create_unified_job.call_args == ({'extra_vars': {'survey_var': 4, + 'job_launch_var': 3}, + 'launch_type': 'callback', + 'limit': 'single-host'},) + + mock_job.signal_start.assert_called_once() + + +@pytest.mark.django_db +@pytest.mark.job_runtime_vars +def test_callback_ignore_unprompted_extra_var(mocker, survey_spec_factory, job_template_prompts, post, admin_user, host): + job_template = job_template_prompts(False) + job_template.host_config_key = "foo" + job_template.save() + + with mocker.patch('awx.main.access.BaseAccess.check_license'): + mock_job = mocker.MagicMock(spec=Job, id=968, extra_vars={"job_launch_var": 3, "survey_var": 4}) + with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job): + with mocker.patch('awx.api.serializers.JobSerializer.to_representation', return_value={}): + with mocker.patch('awx.api.views.JobTemplateCallback.find_matching_hosts', return_value=[host]): + post( + reverse('api:job_template_callback', args=[job_template.pk]), + dict(extra_vars={"job_launch_var": 3, "survey_var": 4}, host_config_key="foo"), + admin_user, expect=201, format='json') + assert JobTemplate.create_unified_job.called + assert JobTemplate.create_unified_job.call_args == ({'launch_type': 'callback', + 'limit': 'single-host'},) + + mock_job.signal_start.assert_called_once() diff --git a/awx/main/tests/functional/core/test_licenses.py b/awx/main/tests/functional/core/test_licenses.py index f2c3d9348e..7432dbbdcd 100644 --- a/awx/main/tests/functional/core/test_licenses.py +++ b/awx/main/tests/functional/core/test_licenses.py @@ -92,3 +92,15 @@ def test_expired_licenses(): assert vdata['compliant'] is False assert vdata['grace_period_remaining'] > 0 + + +@pytest.mark.django_db +def test_cloudforms_license(mocker): + with mocker.patch('awx.main.task_engine.TaskEnhancer._check_cloudforms_subscription', return_value=True): + task_enhancer = TaskEnhancer() + vdata = task_enhancer.validate_enhancements() + assert vdata['compliant'] is True + assert vdata['subscription_name'] == "Red Hat CloudForms License" + assert vdata['available_instances'] == 9999999 + assert vdata['license_type'] == 'enterprise' + assert vdata['features']['ha'] is True diff --git a/awx/main/tests/functional/test_python_requirements.py b/awx/main/tests/functional/test_python_requirements.py index 0dc48f66b8..6f16fc2624 100644 --- a/awx/main/tests/functional/test_python_requirements.py +++ b/awx/main/tests/functional/test_python_requirements.py @@ -1,11 +1,13 @@ import os import re +import pytest from pip.operations import freeze from django.conf import settings +@pytest.mark.skip(reason="This test needs some love") def test_env_matches_requirements_txt(): def check_is_in(src, dests): if src not in dests: diff --git a/awx/main/tests/unit/models/__init__.py b/awx/main/tests/unit/models/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/main/tests/unit/models/test_inventory.py b/awx/main/tests/unit/models/test_inventory.py new file mode 100644 index 0000000000..900881aa4c --- /dev/null +++ b/awx/main/tests/unit/models/test_inventory.py @@ -0,0 +1,38 @@ +import pytest +import mock +from awx.main.models import ( + UnifiedJob, + InventoryUpdate, + Job, +) + + +@pytest.fixture +def dependent_job(mocker): + j = Job(id=3, name='I_am_a_job') + j.cancel = mocker.MagicMock(return_value=True) + return [j] + + +def test_cancel(mocker, dependent_job): + with mock.patch.object(UnifiedJob, 'cancel', return_value=True) as parent_cancel: + iu = InventoryUpdate() + + iu.get_dependent_jobs = mocker.MagicMock(return_value=dependent_job) + iu.save = mocker.MagicMock() + build_job_explanation_mock = mocker.MagicMock() + iu._build_job_explanation = mocker.MagicMock(return_value=build_job_explanation_mock) + + iu.cancel() + + parent_cancel.assert_called_with(job_explanation=None) + dependent_job[0].cancel.assert_called_with(job_explanation=build_job_explanation_mock) + + +def test__build_job_explanation(): + iu = InventoryUpdate(id=3, name='I_am_an_Inventory_Update') + + job_explanation = iu._build_job_explanation() + + assert job_explanation == 'Previous Task Canceled: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \ + ('inventory_update', 'I_am_an_Inventory_Update', 3) diff --git a/awx/main/tests/unit/models/test_job_template_unit.py b/awx/main/tests/unit/models/test_job_template_unit.py index 194ce68cef..a6086b7b9d 100644 --- a/awx/main/tests/unit/models/test_job_template_unit.py +++ b/awx/main/tests/unit/models/test_job_template_unit.py @@ -115,3 +115,16 @@ def test_job_template_survey_mixin_length(job_template_factory): {'type':'password', 'variable':'my_other_variable'}]} kwargs = obj._update_unified_job_kwargs(extra_vars={'my_variable':'$encrypted$'}) assert kwargs['extra_vars'] == '{"my_variable": "my_default"}' + + +def test_job_template_can_start_with_callback_extra_vars_provided(job_template_factory): + objects = job_template_factory( + 'callback_extra_vars_test', + organization='org1', + inventory='inventory1', + credential='cred1', + persisted=False, + ) + obj = objects.job_template + obj.ask_variables_on_launch = True + assert obj.can_start_without_user_input(callback_extra_vars='{"foo": "bar"}') is True diff --git a/awx/main/tests/unit/models/test_survey_models.py b/awx/main/tests/unit/models/test_survey_models.py index 584a4cc7f0..eefc5d97ab 100644 --- a/awx/main/tests/unit/models/test_survey_models.py +++ b/awx/main/tests/unit/models/test_survey_models.py @@ -4,6 +4,7 @@ import json from awx.main.tasks import RunJob from awx.main.models import ( Job, + JobTemplate, WorkflowJobTemplate ) @@ -78,6 +79,18 @@ def test_job_args_unredacted_passwords(job): assert extra_vars['secret_key'] == 'my_password' +def test_update_kwargs_survey_invalid_default(survey_spec_factory): + spec = survey_spec_factory('var2') + spec['spec'][0]['required'] = False + spec['spec'][0]['min'] = 3 + spec['spec'][0]['default'] = 1 + jt = JobTemplate(name="test-jt", survey_spec=spec, survey_enabled=True, extra_vars="var2: 2") + defaulted_extra_vars = jt._update_unified_job_kwargs() + assert 'extra_vars' in defaulted_extra_vars + # Make sure we did not set the invalid default of 1 + assert json.loads(defaulted_extra_vars['extra_vars'])['var2'] == 2 + + class TestWorkflowSurveys: def test_update_kwargs_survey_defaults(self, survey_spec_factory): "Assure that the survey default over-rides a JT variable" diff --git a/awx/main/tests/unit/models/test_unified_job_unit.py b/awx/main/tests/unit/models/test_unified_job_unit.py index af8833482a..256d6d0b03 100644 --- a/awx/main/tests/unit/models/test_unified_job_unit.py +++ b/awx/main/tests/unit/models/test_unified_job_unit.py @@ -1,3 +1,4 @@ +import pytest import mock from awx.main.models import ( @@ -14,3 +15,38 @@ def test_unified_job_workflow_attributes(): assert job.spawned_by_workflow is True assert job.workflow_job_id == 1 + + +@pytest.fixture +def unified_job(mocker): + mocker.patch.object(UnifiedJob, 'can_cancel', return_value=True) + j = UnifiedJob() + j.status = 'pending' + j.cancel_flag = None + j.save = mocker.MagicMock() + j.websocket_emit_status = mocker.MagicMock() + return j + + +def test_cancel(unified_job): + + unified_job.cancel() + + assert unified_job.cancel_flag is True + assert unified_job.status == 'canceled' + assert unified_job.job_explanation == '' + # Note: the websocket emit status check is just reflecting the state of the current code. + # Some more thought may want to go into only emitting canceled if/when the job record + # status is changed to canceled. Unlike, currently, where it's emitted unconditionally. + unified_job.websocket_emit_status.assert_called_with("canceled") + unified_job.save.assert_called_with(update_fields=['cancel_flag', 'status']) + + +def test_cancel_job_explanation(unified_job): + job_explanation = 'giggity giggity' + + unified_job.cancel(job_explanation=job_explanation) + + assert unified_job.job_explanation == job_explanation + unified_job.save.assert_called_with(update_fields=['cancel_flag', 'status', 'job_explanation']) + diff --git a/awx/main/tests/unit/test_tasks.py b/awx/main/tests/unit/test_tasks.py index d8b6469f93..16b9bc6b14 100644 --- a/awx/main/tests/unit/test_tasks.py +++ b/awx/main/tests/unit/test_tasks.py @@ -71,6 +71,25 @@ def test_run_admin_checks_usage(mocker, current_instances, call_count): assert 'expire' in mock_sm.call_args_list[0][0][0] +@pytest.mark.parametrize("key,value", [ + ('REST_API_TOKEN', 'SECRET'), + ('SECRET_KEY', 'SECRET'), + ('RABBITMQ_PASS', 'SECRET'), + ('VMWARE_PASSWORD', 'SECRET'), + ('API_SECRET', 'SECRET'), + ('CALLBACK_CONNECTION', 'amqp://tower:password@localhost:5672/tower'), +]) +def test_safe_env_filtering(key, value): + task = tasks.RunJob() + assert task.build_safe_env({key: value})[key] == tasks.HIDDEN_PASSWORD + + +def test_safe_env_returns_new_copy(): + task = tasks.RunJob() + env = {'foo': 'bar'} + assert task.build_safe_env(env) is not env + + def test_openstack_client_config_generation(mocker): update = tasks.RunInventoryUpdate() inventory_update = mocker.Mock(**{ diff --git a/awx/main/tests/unit/utils/test_handlers.py b/awx/main/tests/unit/utils/test_handlers.py index 3de3b2e7b7..518a213669 100644 --- a/awx/main/tests/unit/utils/test_handlers.py +++ b/awx/main/tests/unit/utils/test_handlers.py @@ -1,7 +1,9 @@ import base64 +import cStringIO import json import logging +from django.conf import settings from django.conf import LazySettings import pytest import requests @@ -40,17 +42,27 @@ def ok200_adapter(): return OK200Adapter() -def test_https_logging_handler_requests_sync_implementation(): - handler = HTTPSHandler(async=False) - assert not isinstance(handler.session, FuturesSession) - assert isinstance(handler.session, requests.Session) +@pytest.fixture() +def connection_error_adapter(): + class ConnectionErrorAdapter(requests.adapters.HTTPAdapter): + + def send(self, request, **kwargs): + err = requests.packages.urllib3.exceptions.SSLError() + raise requests.exceptions.ConnectionError(err, request=request) + + return ConnectionErrorAdapter() def test_https_logging_handler_requests_async_implementation(): - handler = HTTPSHandler(async=True) + handler = HTTPSHandler() assert isinstance(handler.session, FuturesSession) +def test_https_logging_handler_has_default_http_timeout(): + handler = HTTPSHandler.from_django_settings(settings) + assert handler.http_timeout == 5 + + @pytest.mark.parametrize('param', PARAM_NAMES.keys()) def test_https_logging_handler_defaults(param): handler = HTTPSHandler() @@ -95,7 +107,17 @@ def test_https_logging_handler_splunk_auth_info(): ('http://localhost', None, 'http://localhost'), ('http://localhost', 80, 'http://localhost'), ('http://localhost', 8080, 'http://localhost:8080'), - ('https://localhost', 443, 'https://localhost:443') + ('https://localhost', 443, 'https://localhost:443'), + ('ftp://localhost', 443, 'ftp://localhost:443'), + ('https://localhost:550', 443, 'https://localhost:550'), + ('https://localhost:yoho/foobar', 443, 'https://localhost:443/foobar'), + ('https://localhost:yoho/foobar', None, 'https://localhost:yoho/foobar'), + ('http://splunk.server:8088/services/collector/event', 80, + 'http://splunk.server:8088/services/collector/event'), + ('http://splunk.server/services/collector/event', 80, + 'http://splunk.server/services/collector/event'), + ('http://splunk.server/services/collector/event', 8088, + 'http://splunk.server:8088/services/collector/event'), ]) def test_https_logging_handler_http_host_format(host, port, normalized): handler = HTTPSHandler(host=host, port=port) @@ -114,18 +136,39 @@ def test_https_logging_handler_skip_log(params, logger_name, expected): assert handler.skip_log(logger_name) is expected -@pytest.mark.parametrize('message_type, async', [ - ('logstash', False), - ('logstash', True), - ('splunk', False), - ('splunk', True), -]) +def test_https_logging_handler_connection_error(connection_error_adapter, + dummy_log_record): + handler = HTTPSHandler(host='127.0.0.1', enabled_flag=True, + message_type='logstash', + enabled_loggers=['awx', 'activity_stream', 'job_events', 'system_tracking']) + handler.setFormatter(LogstashFormatter()) + handler.session.mount('http://', connection_error_adapter) + + buff = cStringIO.StringIO() + logging.getLogger('awx.main.utils.handlers').addHandler( + logging.StreamHandler(buff) + ) + + async_futures = handler.emit(dummy_log_record) + with pytest.raises(requests.exceptions.ConnectionError): + [future.result() for future in async_futures] + assert 'failed to emit log to external aggregator\nTraceback' in buff.getvalue() + + # we should only log failures *periodically*, so causing *another* + # immediate failure shouldn't report a second ConnectionError + buff.truncate(0) + async_futures = handler.emit(dummy_log_record) + with pytest.raises(requests.exceptions.ConnectionError): + [future.result() for future in async_futures] + assert buff.getvalue() == '' + + +@pytest.mark.parametrize('message_type', ['logstash', 'splunk']) def test_https_logging_handler_emit(ok200_adapter, dummy_log_record, - message_type, async): + message_type): handler = HTTPSHandler(host='127.0.0.1', enabled_flag=True, message_type=message_type, - enabled_loggers=['awx', 'activity_stream', 'job_events', 'system_tracking'], - async=async) + enabled_loggers=['awx', 'activity_stream', 'job_events', 'system_tracking']) handler.setFormatter(LogstashFormatter()) handler.session.mount('http://', ok200_adapter) async_futures = handler.emit(dummy_log_record) @@ -151,14 +194,12 @@ def test_https_logging_handler_emit(ok200_adapter, dummy_log_record, assert body['message'] == 'User joe logged in' -@pytest.mark.parametrize('async', (True, False)) def test_https_logging_handler_emit_logstash_with_creds(ok200_adapter, - dummy_log_record, async): + dummy_log_record): handler = HTTPSHandler(host='127.0.0.1', enabled_flag=True, username='user', password='pass', message_type='logstash', - enabled_loggers=['awx', 'activity_stream', 'job_events', 'system_tracking'], - async=async) + enabled_loggers=['awx', 'activity_stream', 'job_events', 'system_tracking']) handler.setFormatter(LogstashFormatter()) handler.session.mount('http://', ok200_adapter) async_futures = handler.emit(dummy_log_record) @@ -169,13 +210,11 @@ def test_https_logging_handler_emit_logstash_with_creds(ok200_adapter, assert request.headers['Authorization'] == 'Basic %s' % base64.b64encode("user:pass") -@pytest.mark.parametrize('async', (True, False)) def test_https_logging_handler_emit_splunk_with_creds(ok200_adapter, - dummy_log_record, async): + dummy_log_record): handler = HTTPSHandler(host='127.0.0.1', enabled_flag=True, password='pass', message_type='splunk', - enabled_loggers=['awx', 'activity_stream', 'job_events', 'system_tracking'], - async=async) + enabled_loggers=['awx', 'activity_stream', 'job_events', 'system_tracking']) handler.setFormatter(LogstashFormatter()) handler.session.mount('http://', ok200_adapter) async_futures = handler.emit(dummy_log_record) diff --git a/awx/main/utils/common.py b/awx/main/utils/common.py index 49d92b5f9c..50ccb3bf89 100644 --- a/awx/main/utils/common.py +++ b/awx/main/utils/common.py @@ -261,7 +261,7 @@ def update_scm_url(scm_type, url, username=True, password=True, # git: https://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS # hg: http://www.selenic.com/mercurial/hg.1.html#url-paths # svn: http://svnbook.red-bean.com/en/1.7/svn-book.html#svn.advanced.reposurls - if scm_type not in ('git', 'hg', 'svn'): + if scm_type not in ('git', 'hg', 'svn', 'insights'): raise ValueError(_('Unsupported SCM type "%s"') % str(scm_type)) if not url.strip(): return '' @@ -307,6 +307,7 @@ def update_scm_url(scm_type, url, username=True, password=True, 'git': ('ssh', 'git', 'git+ssh', 'http', 'https', 'ftp', 'ftps', 'file'), 'hg': ('http', 'https', 'ssh', 'file'), 'svn': ('http', 'https', 'svn', 'svn+ssh', 'file'), + 'insights': ('http', 'https') } if parts.scheme not in scm_type_schemes.get(scm_type, ()): raise ValueError(_('Unsupported %s URL') % scm_type) @@ -342,7 +343,7 @@ def update_scm_url(scm_type, url, username=True, password=True, #raise ValueError('Password not supported for SSH with Mercurial.') netloc_password = '' - if netloc_username and parts.scheme != 'file': + if netloc_username and parts.scheme != 'file' and scm_type != "insights": netloc = u':'.join([urllib.quote(x) for x in (netloc_username, netloc_password) if x]) else: netloc = u'' diff --git a/awx/main/utils/formatters.py b/awx/main/utils/formatters.py index 68d0917985..868f1c50ee 100644 --- a/awx/main/utils/formatters.py +++ b/awx/main/utils/formatters.py @@ -13,7 +13,9 @@ class LogstashFormatter(LogstashFormatterVersion1): ret = super(LogstashFormatter, self).__init__(**kwargs) if settings_module: self.host_id = settings_module.CLUSTER_HOST_ID - self.tower_uuid = settings_module.LOG_AGGREGATOR_TOWER_UUID + if hasattr(settings_module, 'LOG_AGGREGATOR_TOWER_UUID'): + self.tower_uuid = settings_module.LOG_AGGREGATOR_TOWER_UUID + self.message_type = settings_module.LOG_AGGREGATOR_TYPE return ret def reformat_data_for_log(self, raw_data, kind=None): diff --git a/awx/main/utils/handlers.py b/awx/main/utils/handlers.py index fe2fb87228..3eb9852472 100644 --- a/awx/main/utils/handlers.py +++ b/awx/main/utils/handlers.py @@ -5,6 +5,9 @@ import logging import json import requests +import time +import urlparse +from concurrent.futures import ThreadPoolExecutor from copy import copy # loggly @@ -18,6 +21,8 @@ from awx.main.utils.formatters import LogstashFormatter __all__ = ['HTTPSNullHandler', 'BaseHTTPSHandler', 'configure_external_logger'] +logger = logging.getLogger('awx.main.utils.handlers') + # AWX external logging handler, generally designed to be used # with the accompanying LogstashHandler, derives from python-logstash library # Non-blocking request accomplished by FuturesSession, similar @@ -33,6 +38,7 @@ PARAM_NAMES = { 'enabled_loggers': 'LOG_AGGREGATOR_LOGGERS', 'indv_facts': 'LOG_AGGREGATOR_INDIVIDUAL_FACTS', 'enabled_flag': 'LOG_AGGREGATOR_ENABLED', + 'http_timeout': 'LOG_AGGREGATOR_HTTP_TIMEOUT', } @@ -47,17 +53,41 @@ class HTTPSNullHandler(logging.NullHandler): return super(HTTPSNullHandler, self).__init__() +class VerboseThreadPoolExecutor(ThreadPoolExecutor): + + last_log_emit = 0 + + def submit(self, func, *args, **kwargs): + def _wrapped(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception: + # If an exception occurs in a concurrent thread worker (like + # a ConnectionError or a read timeout), periodically log + # that failure. + # + # This approach isn't really thread-safe, so we could + # potentially log once per thread every 10 seconds, but it + # beats logging *every* failed HTTP request in a scenario where + # you've typo'd your log aggregator hostname. + now = time.time() + if now - self.last_log_emit > 10: + logger.exception('failed to emit log to external aggregator') + self.last_log_emit = now + raise + return super(VerboseThreadPoolExecutor, self).submit(_wrapped, *args, + **kwargs) + + class BaseHTTPSHandler(logging.Handler): def __init__(self, fqdn=False, **kwargs): super(BaseHTTPSHandler, self).__init__() self.fqdn = fqdn - self.async = kwargs.get('async', True) for fd in PARAM_NAMES: setattr(self, fd, kwargs.get(fd, None)) - if self.async: - self.session = FuturesSession() - else: - self.session = requests.Session() + self.session = FuturesSession(executor=VerboseThreadPoolExecutor( + max_workers=2 # this is the default used by requests_futures + )) self.add_auth_information() @classmethod @@ -89,10 +119,21 @@ class BaseHTTPSHandler(logging.Handler): def get_http_host(self): host = self.host or '' - if not host.startswith('http'): - host = 'http://%s' % self.host - if self.port != 80 and self.port is not None: - host = '%s:%s' % (host, str(self.port)) + # urlparse requires scheme to be provided, default to use http if + # missing + if not urlparse.urlsplit(host).scheme: + host = 'http://%s' % host + parsed = urlparse.urlsplit(host) + # Insert self.port if its special and port number is either not + # given in host or given as non-numerical + try: + port = parsed.port or self.port + except ValueError: + port = self.port + if port not in (80, None): + new_netloc = '%s:%s' % (parsed.hostname, port) + return urlparse.urlunsplit((parsed.scheme, new_netloc, parsed.path, + parsed.query, parsed.fragment)) return host def get_post_kwargs(self, payload_input): @@ -105,10 +146,8 @@ class BaseHTTPSHandler(logging.Handler): payload_str = json.dumps(payload_input) else: payload_str = payload_input - if self.async: - return dict(data=payload_str, background_callback=unused_callback) - else: - return dict(data=payload_str) + return dict(data=payload_str, background_callback=unused_callback, + timeout=self.http_timeout) def skip_log(self, logger_name): if self.host == '' or (not self.enabled_flag): @@ -123,10 +162,6 @@ class BaseHTTPSHandler(logging.Handler): Emit a log record. Returns a list of zero or more ``concurrent.futures.Future`` objects. - When ``self.async`` is True, the list will contain one - Future object for each HTTP request made. When ``self.async`` is - False, the list will be empty. - See: https://docs.python.org/3/library/concurrent.futures.html#future-objects http://pythonhosted.org/futures/ @@ -147,17 +182,10 @@ class BaseHTTPSHandler(logging.Handler): for key in facts_dict: fact_payload = copy(payload_data) fact_payload.update(facts_dict[key]) - if self.async: - async_futures.append(self._send(fact_payload)) - else: - self._send(fact_payload) + async_futures.append(self._send(fact_payload)) return async_futures - if self.async: - return [self._send(payload)] - - self._send(payload) - return [] + return [self._send(payload)] except (KeyboardInterrupt, SystemExit): raise except: @@ -179,7 +207,7 @@ def add_or_remove_logger(address, instance): specific_logger.handlers.append(instance) -def configure_external_logger(settings_module, async_flag=True, is_startup=True): +def configure_external_logger(settings_module, is_startup=True): is_enabled = settings_module.LOG_AGGREGATOR_ENABLED if is_startup and (not is_enabled): @@ -188,7 +216,7 @@ def configure_external_logger(settings_module, async_flag=True, is_startup=True) instance = None if is_enabled: - instance = BaseHTTPSHandler.from_django_settings(settings_module, async=async_flag) + instance = BaseHTTPSHandler.from_django_settings(settings_module) instance.setFormatter(LogstashFormatter(settings_module=settings_module)) awx_logger_instance = instance if is_enabled and 'awx' not in settings_module.LOG_AGGREGATOR_LOGGERS: diff --git a/awx/playbooks/project_update.yml b/awx/playbooks/project_update.yml index 0f4d354ff7..bfabd47a98 100644 --- a/awx/playbooks/project_update.yml +++ b/awx/playbooks/project_update.yml @@ -25,7 +25,7 @@ - name: update project using git and accept hostkey git: dest: "{{project_path|quote}}" - repo: "{{scm_url|quote}}" + repo: "{{scm_url}}" version: "{{scm_branch|quote}}" force: "{{scm_clean}}" accept_hostkey: "{{scm_accept_hostkey}}" @@ -42,7 +42,7 @@ - name: update project using git git: dest: "{{project_path|quote}}" - repo: "{{scm_url|quote}}" + repo: "{{scm_url}}" version: "{{scm_branch|quote}}" force: "{{scm_clean}}" #clone: "{{ scm_full_checkout }}" @@ -105,6 +105,45 @@ scm_version: "{{ scm_result['after'] }}" when: "'after' in scm_result" + - name: update project using insights + uri: + url: "{{insights_url}}/r/insights/v1/maintenance?ansible=true" + user: "{{scm_username}}" + password: "{{scm_password}}" + force_basic_auth: yes + when: scm_type == 'insights' + register: insights_output + + - name: Ensure the project directory is present + file: + dest: "{{project_path|quote}}" + state: directory + when: scm_type == 'insights' + + - name: Fetch Insights Playbook With Name + get_url: + url: "{{insights_url}}/r/insights/v3/maintenance/{{item.maintenance_id}}/playbook" + dest: "{{project_path|quote}}/{{item.name}}-{{item.maintenance_id}}.yml" + url_username: "{{scm_username}}" + url_password: "{{scm_password}}" + force_basic_auth: yes + force: yes + when: scm_type == 'insights' and item.name != None + with_items: "{{insights_output.json}}" + failed_when: false + + - name: Fetch Insights Playbook + get_url: + url: "{{insights_url}}/r/insights/v3/maintenance/{{item.maintenance_id}}/playbook" + dest: "{{project_path|quote}}/insights-plan-{{item.maintenance_id}}.yml" + url_username: "{{scm_username}}" + url_password: "{{scm_password}}" + force_basic_auth: yes + force: yes + when: scm_type == 'insights' and item.name == None + with_items: "{{insights_output.json}}" + failed_when: false + - name: detect requirements.yml stat: path={{project_path|quote}}/roles/requirements.yml register: doesRequirementsExist @@ -121,6 +160,11 @@ scm_version: "{{scm_version|regex_replace('^.*Revision: ([0-9]+).*$', '\\1')}}" when: scm_type == 'svn' + - name: parse hg version string properly + set_fact: + scm_version: "{{scm_version|regex_replace('^([A-Za-z0-9]+).*$', '\\1')}}" + when: scm_type == 'hg' + - name: Repository Version debug: msg="Repository Version {{ scm_version }}" when: scm_version is defined diff --git a/awx/plugins/library/win_scan_packages.ps1 b/awx/plugins/library/win_scan_packages.ps1 index 2c9455d154..2ab3fdbec6 100644 --- a/awx/plugins/library/win_scan_packages.ps1 +++ b/awx/plugins/library/win_scan_packages.ps1 @@ -25,7 +25,7 @@ if ([System.IntPtr]::Size -eq 4) { # This is a 32-bit Windows system, so we only check for 32-bit programs, which will be # at the native registry location. - $packages = Get-ChildItem -Path $uninstall_native_path | + [PSObject []]$packages = Get-ChildItem -Path $uninstall_native_path | Get-ItemProperty | Select-Object -Property @{Name="name"; Expression={$_."DisplayName"}}, @{Name="version"; Expression={$_."DisplayVersion"}}, @@ -38,7 +38,7 @@ if ([System.IntPtr]::Size -eq 4) { # This is a 64-bit Windows system, so we check for 64-bit programs in the native # registry location, and also for 32-bit programs under Wow6432Node. - $packages = Get-ChildItem -Path $uninstall_native_path | + [PSObject []]$packages = Get-ChildItem -Path $uninstall_native_path | Get-ItemProperty | Select-Object -Property @{Name="name"; Expression={$_."DisplayName"}}, @{Name="version"; Expression={$_."DisplayVersion"}}, diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index 0901961fa5..e3af1781db 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -862,9 +862,12 @@ TOWER_ADMIN_ALERTS = True # Note: This setting may be overridden by database settings. TOWER_URL_BASE = "https://towerhost" +INSIGHTS_URL_BASE = "https://access.redhat.com" + TOWER_SETTINGS_MANIFEST = {} LOG_AGGREGATOR_ENABLED = False +LOG_AGGREGATOR_HTTP_TIMEOUT = 5 # The number of retry attempts for websocket session establishment # If you're encountering issues establishing websockets in clustered Tower, diff --git a/awx/sso/conf.py b/awx/sso/conf.py index fa4d70f3f8..3a2b2b77a8 100644 --- a/awx/sso/conf.py +++ b/awx/sso/conf.py @@ -377,9 +377,9 @@ register( help_text=_('User profile flags updated from group membership (key is user ' 'attribute name, value is group DN). These are boolean fields ' 'that are matched based on whether the user is a member of the ' - 'given group. So far only is_superuser is settable via this ' - 'method. This flag is set both true and false at login time ' - 'based on current LDAP settings.'), + 'given group. So far only is_superuser and is_system_auditor ' + 'are settable via this method. This flag is set both true and ' + 'false at login time based on current LDAP settings.'), category=_('LDAP'), category_slug='ldap', placeholder=collections.OrderedDict([ diff --git a/awx/sso/fields.py b/awx/sso/fields.py index 5d95296e8e..338178b288 100644 --- a/awx/sso/fields.py +++ b/awx/sso/fields.py @@ -322,7 +322,7 @@ class LDAPUserFlagsField(fields.DictField): default_error_messages = { 'invalid_flag': _('Invalid user flag: "{invalid_flag}".'), } - valid_user_flags = {'is_superuser'} + valid_user_flags = {'is_superuser', 'is_system_auditor'} child = LDAPDNField() def to_internal_value(self, data): diff --git a/awx/ui/client/legacy-styles/forms.less b/awx/ui/client/legacy-styles/forms.less index 5da836f921..d8f705e1ad 100644 --- a/awx/ui/client/legacy-styles/forms.less +++ b/awx/ui/client/legacy-styles/forms.less @@ -40,7 +40,6 @@ .Form-title{ flex: 0 1 auto; - text-transform: uppercase; color: @list-header-txt; font-size: 14px; font-weight: bold; @@ -50,6 +49,10 @@ margin-bottom: 20px; } +.Form-title--uppercase { + text-transform: uppercase; +} + .Form-secondaryTitle{ color: @default-icon; padding-bottom: 20px; @@ -98,8 +101,8 @@ .Form-tabHolder{ display: flex; - margin-bottom: 20px; min-height: 30px; + flex-wrap:wrap; } .Form-tabs { @@ -115,6 +118,7 @@ height: 30px; border-radius: 5px; margin-right: 20px; + margin-bottom: 20px; padding-left: 10px; padding-right: 10px; padding-bottom: 5px; @@ -560,6 +564,8 @@ input[type='radio']:checked:before { padding-left:15px; padding-right: 15px; margin-right: 20px; + min-height: 30px; + margin-bottom: 20px; } .Form-primaryButton:hover { diff --git a/awx/ui/client/legacy-styles/lists.less b/awx/ui/client/legacy-styles/lists.less index 8807fc5f93..4e007e5bea 100644 --- a/awx/ui/client/legacy-styles/lists.less +++ b/awx/ui/client/legacy-styles/lists.less @@ -147,7 +147,6 @@ table, tbody { font-size: 14px; font-weight: bold; margin-right: 10px; - text-transform: uppercase; } .List-actionHolder { diff --git a/awx/ui/client/src/about/about.block.less b/awx/ui/client/src/about/about.block.less index 8759cfa6dc..73cd7a9937 100644 --- a/awx/ui/client/src/about/about.block.less +++ b/awx/ui/client/src/about/about.block.less @@ -1,47 +1,60 @@ /** @define About */ @import "./client/src/shared/branding/colors.default.less"; -.About-cowsay--container{ - width: 340px; - margin: 0 auto; + +.About-ansibleVersion, +.About-cowsayCode { + font-family: Monaco, Menlo, Consolas, "Courier New", monospace; } -.About-cowsay--code{ - background-color: @default-bg; - padding-left: 30px; - border-style: none; - max-width: 340px; - padding-left: 30px; + +.About-cowsayContainer { + width: 340px; + margin: 0 auto; } -.About .modal-header{ - border: none; - padding-bottom: 0px; +.About-cowsayCode { + background-color: @default-bg; + padding-left: 30px; + border-style: none; + max-width: 340px; + padding-left: 30px; } -.About .modal-dialog{ - max-width: 500px; +.About-modalHeader { + border: none; + padding-bottom: 0px; } -.About .modal-body{ - padding-top: 0px; +.About-modalDialog { + max-width: 500px; } -.About-brand--redhat{ + +.About-modalBody { + padding-top: 0px; + padding-bottom: 0px; +} +.About-brandImg { float: left; width: 112px; + padding-top: 13px; } -.About-brand--ansible{ - max-width: 120px; - margin: 0 auto; + +.About-close { + position: absolute; + top: 15px; + right: 15px; + z-index: 10; } -.About-close{ - position: absolute; - top: 15px; - right: 15px; - z-index: 10; + +.About-modalFooter { + clear: both; } -.About p{ - color: @default-interface-txt; + +.About-footerText { + text-align: right; + color: @default-interface-txt; margin: 0; font-size: 12px; padding-top: 10px; } -.About-modal--footer { - clear: both; + +.About-ansibleVersion { + color: @default-data-txt; } diff --git a/awx/ui/client/src/about/about.controller.js b/awx/ui/client/src/about/about.controller.js index 2c821b9e09..159f61458a 100644 --- a/awx/ui/client/src/about/about.controller.js +++ b/awx/ui/client/src/about/about.controller.js @@ -1,27 +1,12 @@ export default - ['$scope', '$state', 'ConfigService', 'i18n', - function($scope, $state, ConfigService, i18n){ - var processVersion = function(version){ - // prettify version & calculate padding - // e,g 3.0.0-0.git201602191743/ -> 3.0.0 - var split = version.split('-')[0]; - var spaces = Math.floor((16-split.length)/2), - paddedStr = ""; - for(var i=0; i<=spaces; i++){ - paddedStr = paddedStr +" "; - } - paddedStr = paddedStr + split; - for(var j = paddedStr.length; j<16; j++){ - paddedStr = paddedStr + " "; - } - return paddedStr; - }; + ['$scope', '$state', 'ConfigService', + function($scope, $state, ConfigService){ var init = function(){ ConfigService.getConfig() .then(function(config){ + $scope.version = config.version.split('-')[0]; + $scope.ansible_version = config.ansible_version; $scope.subscription = config.license_info.subscription_name; - $scope.version = processVersion(config.version); - $scope.version_str = i18n._("Version"); $('#about-modal').modal('show'); }); }; diff --git a/awx/ui/client/src/about/about.partial.html b/awx/ui/client/src/about/about.partial.html index 867a5f6035..5874221a54 100644 --- a/awx/ui/client/src/about/about.partial.html +++ b/awx/ui/client/src/about/about.partial.html @@ -1,19 +1,18 @@