mirror of
https://github.com/ansible/awx.git
synced 2026-01-09 23:12:08 -03:30
Make tests pass with current versions of things
This commit is contained in:
parent
241931309e
commit
de82c613fc
7
Makefile
7
Makefile
@ -362,7 +362,7 @@ TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/ss
|
||||
|
||||
# Run all API unit tests.
|
||||
test:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider -n auto $(TEST_DIRS)
|
||||
@ -377,10 +377,11 @@ COLLECTION_NAMESPACE ?= awx
|
||||
COLLECTION_INSTALL = ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE)/$(COLLECTION_PACKAGE)
|
||||
|
||||
test_collection:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONPATH=$(PYTHONPATH):$(VENV_BASE)/awx/lib/python3.6/site-packages:/usr/lib/python3.6/site-packages py.test $(COLLECTION_TEST_DIRS)
|
||||
py.test $(COLLECTION_TEST_DIRS) -v
|
||||
# The python path needs to be modified so that the tests can find Ansible within the container
|
||||
# First we will use anything expility set as PYTHONPATH
|
||||
# Second we will load any libraries out of the virtualenv (if it's unspecified that should be ok because python should not load out of an empty directory)
|
||||
|
||||
@ -29,9 +29,10 @@ def reg(request):
|
||||
# as "defined in a settings file". This is analogous to manually
|
||||
# specifying a setting on the filesystem (e.g., in a local_settings.py in
|
||||
# development, or in /etc/tower/conf.d/<something>.py)
|
||||
defaults = request.node.get_marker('defined_in_file')
|
||||
if defaults:
|
||||
settings.configure(**defaults.kwargs)
|
||||
for marker in request.node.own_markers:
|
||||
if marker.name == 'defined_in_file':
|
||||
settings.configure(**marker.kwargs)
|
||||
|
||||
settings._wrapped = SettingsWrapper(settings._wrapped,
|
||||
cache,
|
||||
registry)
|
||||
|
||||
@ -41,13 +41,16 @@ def settings(request):
|
||||
cache = LocMemCache(str(uuid4()), {}) # make a new random cache each time
|
||||
settings = LazySettings()
|
||||
registry = SettingsRegistry(settings)
|
||||
defaults = {}
|
||||
|
||||
# @pytest.mark.defined_in_file can be used to mark specific setting values
|
||||
# as "defined in a settings file". This is analogous to manually
|
||||
# specifying a setting on the filesystem (e.g., in a local_settings.py in
|
||||
# development, or in /etc/tower/conf.d/<something>.py)
|
||||
in_file_marker = request.node.get_marker('defined_in_file')
|
||||
defaults = in_file_marker.kwargs if in_file_marker else {}
|
||||
for marker in request.node.own_markers:
|
||||
if marker.name == 'defined_in_file':
|
||||
defaults = marker.kwargs
|
||||
|
||||
defaults['DEFAULTS_SNAPSHOT'] = {}
|
||||
settings.configure(**defaults)
|
||||
settings._wrapped = SettingsWrapper(settings._wrapped,
|
||||
|
||||
@ -50,8 +50,6 @@ class TestSwaggerGeneration():
|
||||
data.update(response.accepted_renderer.get_customizations() or {})
|
||||
|
||||
data['host'] = None
|
||||
if not pytest.config.getoption("--genschema"):
|
||||
data['modified'] = datetime.datetime.utcnow().isoformat()
|
||||
data['schemes'] = ['https']
|
||||
data['consumes'] = ['application/json']
|
||||
|
||||
@ -79,10 +77,14 @@ class TestSwaggerGeneration():
|
||||
data['paths'] = revised_paths
|
||||
self.__class__.JSON = data
|
||||
|
||||
def test_sanity(self, release):
|
||||
def test_sanity(self, release, request):
|
||||
JSON = self.__class__.JSON
|
||||
JSON['info']['version'] = release
|
||||
|
||||
|
||||
if not request.config.getoption('--genschema'):
|
||||
JSON['modified'] = datetime.datetime.utcnow().isoformat()
|
||||
|
||||
# Make some basic assertions about the rendered JSON so we can
|
||||
# be sure it doesn't break across DRF upgrades and view/serializer
|
||||
# changes.
|
||||
@ -115,7 +117,7 @@ class TestSwaggerGeneration():
|
||||
# hit a couple important endpoints so we always have example data
|
||||
get(path, user=admin, expect=200)
|
||||
|
||||
def test_autogen_response_examples(self, swagger_autogen):
|
||||
def test_autogen_response_examples(self, swagger_autogen, request):
|
||||
for pattern, node in TestSwaggerGeneration.JSON['paths'].items():
|
||||
pattern = pattern.replace('{id}', '[0-9]+')
|
||||
pattern = pattern.replace(r'{category_slug}', r'[a-zA-Z0-9\-]+')
|
||||
@ -138,7 +140,7 @@ class TestSwaggerGeneration():
|
||||
for param in node[method].get('parameters'):
|
||||
if param['in'] == 'body':
|
||||
node[method]['parameters'].remove(param)
|
||||
if pytest.config.getoption("--genschema"):
|
||||
if request.config.getoption("--genschema"):
|
||||
pytest.skip("In schema generator skipping swagger generator", allow_module_level=True)
|
||||
else:
|
||||
node[method].setdefault('parameters', []).append({
|
||||
|
||||
@ -121,21 +121,24 @@ def credential_kind(source):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def fake_credential_factory(source):
|
||||
ct = CredentialType.defaults[credential_kind(source)]()
|
||||
ct.save()
|
||||
def fake_credential_factory():
|
||||
def wrap(source):
|
||||
ct = CredentialType.defaults[credential_kind(source)]()
|
||||
ct.save()
|
||||
|
||||
inputs = {}
|
||||
var_specs = {} # pivoted version of inputs
|
||||
for element in ct.inputs.get('fields'):
|
||||
var_specs[element['id']] = element
|
||||
for var in var_specs.keys():
|
||||
inputs[var] = generate_fake_var(var_specs[var])
|
||||
inputs = {}
|
||||
var_specs = {} # pivoted version of inputs
|
||||
for element in ct.inputs.get('fields'):
|
||||
var_specs[element['id']] = element
|
||||
for var in var_specs.keys():
|
||||
inputs[var] = generate_fake_var(var_specs[var])
|
||||
|
||||
return Credential.objects.create(
|
||||
credential_type=ct,
|
||||
inputs=inputs
|
||||
)
|
||||
return wrap
|
||||
|
||||
return Credential.objects.create(
|
||||
credential_type=ct,
|
||||
inputs=inputs
|
||||
)
|
||||
|
||||
|
||||
def read_content(private_data_dir, raw_env, inventory_update):
|
||||
@ -248,7 +251,7 @@ def create_reference_data(source_dir, env, content):
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('this_kind', CLOUD_PROVIDERS)
|
||||
@pytest.mark.parametrize('script_or_plugin', ['scripts', 'plugins'])
|
||||
def test_inventory_update_injected_content(this_kind, script_or_plugin, inventory):
|
||||
def test_inventory_update_injected_content(this_kind, script_or_plugin, inventory, fake_credential_factory):
|
||||
src_vars = dict(base_source_var='value_of_var')
|
||||
if this_kind in INI_TEST_VARS:
|
||||
src_vars.update(INI_TEST_VARS[this_kind])
|
||||
|
||||
@ -158,7 +158,7 @@ def test_cred_type_injectors_schema(injectors, valid):
|
||||
)
|
||||
field = CredentialType._meta.get_field('injectors')
|
||||
if valid is False:
|
||||
with pytest.raises(ValidationError, message="Injector was supposed to throw a validation error, data: {}".format(injectors)):
|
||||
with pytest.raises(ValidationError):
|
||||
field.clean(injectors, type_)
|
||||
else:
|
||||
field.clean(injectors, type_)
|
||||
|
||||
@ -2373,7 +2373,7 @@ def test_aquire_lock_open_fail_logged(logging_getLogger, os_open):
|
||||
|
||||
ProjectUpdate = tasks.RunProjectUpdate()
|
||||
|
||||
with pytest.raises(OSError, message='dummy message'):
|
||||
with pytest.raises(OSError):
|
||||
ProjectUpdate.acquire_lock(instance)
|
||||
assert logger.err.called_with("I/O error({0}) while trying to open lock file [{1}]: {2}".format(3, 'this_file_does_not_exist', 'dummy message'))
|
||||
|
||||
@ -2399,7 +2399,7 @@ def test_aquire_lock_acquisition_fail_logged(fcntl_lockf, logging_getLogger, os_
|
||||
fcntl_lockf.side_effect = err
|
||||
|
||||
ProjectUpdate = tasks.RunProjectUpdate()
|
||||
with pytest.raises(IOError, message='dummy message'):
|
||||
with pytest.raises(IOError):
|
||||
ProjectUpdate.acquire_lock(instance)
|
||||
os_close.assert_called_with(3)
|
||||
assert logger.err.called_with("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(3, 'this_file_does_not_exist', 'dummy message'))
|
||||
|
||||
@ -213,6 +213,7 @@ RUN chmod u+s /usr/bin/bwrap ; \
|
||||
{% if build_dev|bool %}
|
||||
RUN for dir in \
|
||||
/venv \
|
||||
/venv/awx/lib/python3.6 \
|
||||
/var/lib/awx/projects \
|
||||
/var/lib/awx/rsyslog \
|
||||
/var/run/awx-rsyslog \
|
||||
|
||||
@ -8,4 +8,8 @@ markers =
|
||||
ac: access control test
|
||||
survey: tests related to survey feature
|
||||
inventory_import: tests of code used by inventory import command
|
||||
defined_in_file:
|
||||
job_permissions:
|
||||
activity_stream_access:
|
||||
job_runtime_vars:
|
||||
junit_family=xunit2
|
||||
@ -60,7 +60,7 @@ jaraco.functools==3.0.0 # via irc, jaraco.text, tempora
|
||||
jaraco.logging==3.0.0 # via irc
|
||||
jaraco.stream==3.0.0 # via irc
|
||||
jaraco.text==3.2.0 # via irc, jaraco.collections
|
||||
jinja2==2.11.1 # via -r /awx_devel/requirements/requirements.in, openshift
|
||||
jinja2==2.11.2 # via -r /awx_devel/requirements/requirements.in, openshift
|
||||
jsonschema==3.2.0 # via -r /awx_devel/requirements/requirements.in
|
||||
kubernetes==11.0.0 # via openshift
|
||||
lockfile==0.12.2 # via python-daemon
|
||||
|
||||
@ -5,15 +5,14 @@ ipython==5.2.1
|
||||
unittest2
|
||||
pep8
|
||||
flake8
|
||||
pluggy==0.6.0
|
||||
pyflakes
|
||||
pytest==3.6.0
|
||||
pytest
|
||||
pytest-cov
|
||||
pytest-django
|
||||
pytest-pythonpath
|
||||
pytest-mock==1.11.1
|
||||
pytest-timeout
|
||||
pytest-xdist<1.28.0
|
||||
pytest-xdist
|
||||
tox # for awxkit
|
||||
logutils
|
||||
jupyter
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user