mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 03:24:50 -03:30
Compare commits
17 Commits
dependabot
...
x-request-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
19e3cba35c | ||
|
|
e4646ae611 | ||
|
|
c11ff49a56 | ||
|
|
7dc77546f4 | ||
|
|
f5f85666c8 | ||
|
|
47a061eb39 | ||
|
|
51bcf82cf4 | ||
|
|
c760577855 | ||
|
|
814ceb0d06 | ||
|
|
f178c84728 | ||
|
|
c0f71801f6 | ||
|
|
4e8e1398d7 | ||
|
|
3d6a8fd4ef | ||
|
|
e873bb1304 | ||
|
|
672f1eb745 | ||
|
|
199507c6f1 | ||
|
|
a176c04c14 |
@@ -80,7 +80,7 @@ If any of those items are missing your pull request will still get the `needs_tr
|
||||
Currently you can expect awxbot to add common labels such as `state:needs_triage`, `type:bug`, `component:docs`, etc...
|
||||
These labels are determined by the template data. Please use the template and fill it out as accurately as possible.
|
||||
|
||||
The `state:needs_triage` label will will remain on your pull request until a person has looked at it.
|
||||
The `state:needs_triage` label will remain on your pull request until a person has looked at it.
|
||||
|
||||
You can also expect the bot to CC maintainers of specific areas of the code, this will notify them that there is a pull request by placing a comment on the pull request.
|
||||
The comment will look something like `CC @matburt @wwitzel3 ...`.
|
||||
|
||||
2
Makefile
2
Makefile
@@ -616,7 +616,7 @@ docker-clean:
|
||||
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
||||
|
||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
docker volume rm -f tools_var_lib_awx tools_awx_db tools_awx_db_15 tools_vault_1 tools_ldap_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
docker volume rm -f tools_var_lib_awx tools_awx_db tools_awx_db_15 tools_vault_1 tools_ldap_1 tools_grafana_storage tools_prometheus_storage $(shell docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
|
||||
@@ -95,7 +95,9 @@ class LoggedLoginView(auth_views.LoginView):
|
||||
ret = super(LoggedLoginView, self).post(request, *args, **kwargs)
|
||||
if request.user.is_authenticated:
|
||||
logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, request.META.get('REMOTE_ADDR', None))))
|
||||
ret.set_cookie('userLoggedIn', 'true', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False))
|
||||
ret.set_cookie(
|
||||
'userLoggedIn', 'true', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False), samesite=getattr(settings, 'USER_COOKIE_SAMESITE', 'Lax')
|
||||
)
|
||||
ret.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid'))
|
||||
|
||||
return ret
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
import logging
|
||||
|
||||
# Django
|
||||
from django.core.checks import Error
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
@@ -954,3 +955,27 @@ def logging_validate(serializer, attrs):
|
||||
|
||||
|
||||
register_validate('logging', logging_validate)
|
||||
|
||||
|
||||
def csrf_trusted_origins_validate(serializer, attrs):
|
||||
if not serializer.instance or not hasattr(serializer.instance, 'CSRF_TRUSTED_ORIGINS'):
|
||||
return attrs
|
||||
if 'CSRF_TRUSTED_ORIGINS' not in attrs:
|
||||
return attrs
|
||||
errors = []
|
||||
for origin in attrs['CSRF_TRUSTED_ORIGINS']:
|
||||
if "://" not in origin:
|
||||
errors.append(
|
||||
Error(
|
||||
"As of Django 4.0, the values in the CSRF_TRUSTED_ORIGINS "
|
||||
"setting must start with a scheme (usually http:// or "
|
||||
"https://) but found %s. See the release notes for details." % origin,
|
||||
)
|
||||
)
|
||||
if errors:
|
||||
error_messages = [error.msg for error in errors]
|
||||
raise serializers.ValidationError(_('\n'.join(error_messages)))
|
||||
return attrs
|
||||
|
||||
|
||||
register_validate('system', csrf_trusted_origins_validate)
|
||||
|
||||
@@ -2,10 +2,11 @@ import json
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
|
||||
from typing import Any
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
from awx.conf import settings_registry
|
||||
|
||||
|
||||
@@ -40,6 +41,15 @@ class Command(BaseCommand):
|
||||
"USER_SEARCH": False,
|
||||
}
|
||||
|
||||
def is_enabled(self, settings, keys):
|
||||
missing_fields = []
|
||||
for key, required in keys.items():
|
||||
if required and not settings.get(key):
|
||||
missing_fields.append(key)
|
||||
if missing_fields:
|
||||
return False, missing_fields
|
||||
return True, None
|
||||
|
||||
def get_awx_ldap_settings(self) -> dict[str, dict[str, Any]]:
|
||||
awx_ldap_settings = {}
|
||||
|
||||
@@ -64,15 +74,17 @@ class Command(BaseCommand):
|
||||
|
||||
if new_key == "SERVER_URI" and value:
|
||||
value = value.split(", ")
|
||||
grouped_settings[index][new_key] = value
|
||||
|
||||
if type(value).__name__ == "LDAPSearch":
|
||||
data = []
|
||||
data.append(value.base_dn)
|
||||
data.append("SCOPE_SUBTREE")
|
||||
data.append(value.filterstr)
|
||||
grouped_settings[index][new_key] = data
|
||||
|
||||
return grouped_settings
|
||||
|
||||
def is_enabled(self, settings, keys):
|
||||
for key, required in keys.items():
|
||||
if required and not settings.get(key):
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_awx_saml_settings(self) -> dict[str, Any]:
|
||||
awx_saml_settings = {}
|
||||
for awx_saml_setting in settings_registry.get_registered_settings(category_slug='saml'):
|
||||
@@ -82,7 +94,7 @@ class Command(BaseCommand):
|
||||
|
||||
def format_config_data(self, enabled, awx_settings, type, keys, name):
|
||||
config = {
|
||||
"type": f"awx.authentication.authenticator_plugins.{type}",
|
||||
"type": f"ansible_base.authentication.authenticator_plugins.{type}",
|
||||
"name": name,
|
||||
"enabled": enabled,
|
||||
"create_objects": True,
|
||||
@@ -130,7 +142,7 @@ class Command(BaseCommand):
|
||||
|
||||
# dump SAML settings
|
||||
awx_saml_settings = self.get_awx_saml_settings()
|
||||
awx_saml_enabled = self.is_enabled(awx_saml_settings, self.DAB_SAML_AUTHENTICATOR_KEYS)
|
||||
awx_saml_enabled, saml_missing_fields = self.is_enabled(awx_saml_settings, self.DAB_SAML_AUTHENTICATOR_KEYS)
|
||||
if awx_saml_enabled:
|
||||
awx_saml_name = awx_saml_settings["ENABLED_IDPS"]
|
||||
data.append(
|
||||
@@ -142,21 +154,25 @@ class Command(BaseCommand):
|
||||
awx_saml_name,
|
||||
)
|
||||
)
|
||||
else:
|
||||
data.append({"SAML_missing_fields": saml_missing_fields})
|
||||
|
||||
# dump LDAP settings
|
||||
awx_ldap_group_settings = self.get_awx_ldap_settings()
|
||||
for awx_ldap_name, awx_ldap_settings in enumerate(awx_ldap_group_settings.values()):
|
||||
enabled = self.is_enabled(awx_ldap_settings, self.DAB_LDAP_AUTHENTICATOR_KEYS)
|
||||
if enabled:
|
||||
for awx_ldap_name, awx_ldap_settings in awx_ldap_group_settings.items():
|
||||
awx_ldap_enabled, ldap_missing_fields = self.is_enabled(awx_ldap_settings, self.DAB_LDAP_AUTHENTICATOR_KEYS)
|
||||
if awx_ldap_enabled:
|
||||
data.append(
|
||||
self.format_config_data(
|
||||
enabled,
|
||||
awx_ldap_enabled,
|
||||
awx_ldap_settings,
|
||||
"ldap",
|
||||
self.DAB_LDAP_AUTHENTICATOR_KEYS,
|
||||
str(awx_ldap_name),
|
||||
f"LDAP_{awx_ldap_name}",
|
||||
)
|
||||
)
|
||||
else:
|
||||
data.append({f"LDAP_{awx_ldap_name}_missing_fields": ldap_missing_fields})
|
||||
|
||||
# write to file if requested
|
||||
if options["output_file"]:
|
||||
|
||||
@@ -165,11 +165,10 @@ class Command(BaseCommand):
|
||||
return
|
||||
|
||||
WebsocketsMetricsServer().start()
|
||||
websocket_relay_manager = WebSocketRelayManager()
|
||||
|
||||
while True:
|
||||
try:
|
||||
asyncio.run(websocket_relay_manager.run())
|
||||
asyncio.run(WebSocketRelayManager().run())
|
||||
except KeyboardInterrupt:
|
||||
logger.info('Shutting down Websocket Relayer')
|
||||
break
|
||||
|
||||
@@ -58,7 +58,7 @@ class TimingMiddleware(threading.local, MiddlewareMixin):
|
||||
response['X-API-Profile-File'] = self.prof.stop()
|
||||
perf_logger.debug(
|
||||
f'request: {request}, response_time: {response["X-API-Total-Time"]}',
|
||||
extra=dict(python_objects=dict(request=request, response=response, X_API_TOTAL_TIME=response["X-API-Total-Time"])),
|
||||
extra=dict(python_objects=dict(request=request, response=response, X_API_TOTAL_TIME=response["X-API-Total-Time"], x_request_id=request.get('x-request-id', 'not-set'))),
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0189_inbound_hop_nodes'),
|
||||
]
|
||||
|
||||
@@ -140,6 +140,17 @@ def get_permissions_for_role(role_field, children_map, apps):
|
||||
return perm_list
|
||||
|
||||
|
||||
def model_class(ct, apps):
|
||||
"""
|
||||
You can not use model methods in migrations, so this duplicates
|
||||
what ContentType.model_class does, using current apps
|
||||
"""
|
||||
try:
|
||||
return apps.get_model(ct.app_label, ct.model)
|
||||
except LookupError:
|
||||
return None
|
||||
|
||||
|
||||
def migrate_to_new_rbac(apps, schema_editor):
|
||||
"""
|
||||
This method moves the assigned permissions from the old rbac.py models
|
||||
@@ -197,7 +208,7 @@ def migrate_to_new_rbac(apps, schema_editor):
|
||||
role_definition = managed_definitions[permissions]
|
||||
else:
|
||||
action = role.role_field.rsplit('_', 1)[0] # remove the _field ending of the name
|
||||
role_definition_name = f'{role.content_type.model_class().__name__} {action.title()}'
|
||||
role_definition_name = f'{model_class(role.content_type, apps).__name__} {action.title()}'
|
||||
|
||||
description = role_descriptions[role.role_field]
|
||||
if type(description) == dict:
|
||||
|
||||
@@ -1232,6 +1232,14 @@ ManagedCredentialType(
|
||||
'multiline': True,
|
||||
'help_text': gettext_noop('Terraform backend config as Hashicorp configuration language.'),
|
||||
},
|
||||
{
|
||||
'id': 'gce_credentials',
|
||||
'label': gettext_noop('Google Cloud Platform account credentials'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'multiline': True,
|
||||
'help_text': gettext_noop('Google Cloud Platform account credentials in JSON format.'),
|
||||
},
|
||||
],
|
||||
'required': ['configuration'],
|
||||
},
|
||||
|
||||
@@ -130,3 +130,10 @@ def terraform(cred, env, private_data_dir):
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
f.write(cred.get_input('configuration'))
|
||||
env['TF_BACKEND_CONFIG_FILE'] = to_container_path(path, private_data_dir)
|
||||
# Handle env variables for GCP account credentials
|
||||
if 'gce_credentials' in cred.inputs:
|
||||
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
|
||||
with os.fdopen(handle, 'w') as f:
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
f.write(cred.get_input('gce_credentials'))
|
||||
env['GOOGLE_BACKEND_CREDENTIALS'] = to_container_path(path, private_data_dir)
|
||||
|
||||
@@ -11,6 +11,8 @@ import os.path
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import yaml
|
||||
import tempfile
|
||||
import stat
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -1638,17 +1640,39 @@ class satellite6(PluginFileInjector):
|
||||
|
||||
class terraform(PluginFileInjector):
|
||||
plugin_name = 'terraform_state'
|
||||
base_injector = 'managed'
|
||||
namespace = 'cloud'
|
||||
collection = 'terraform'
|
||||
use_fqcn = True
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
env = super(terraform, self).get_plugin_env(inventory_update, private_data_dir, None)
|
||||
ret = super().inventory_as_dict(inventory_update, private_data_dir)
|
||||
ret['backend_config_files'] = env["TF_BACKEND_CONFIG_FILE"]
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
config_cred = credential.get_input('configuration')
|
||||
if config_cred:
|
||||
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
|
||||
with os.fdopen(handle, 'w') as f:
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
f.write(config_cred)
|
||||
ret['backend_config_files'] = to_container_path(path, private_data_dir)
|
||||
return ret
|
||||
|
||||
def build_plugin_private_data(self, inventory_update, private_data_dir):
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
|
||||
private_data = {'credentials': {}}
|
||||
gce_cred = credential.get_input('gce_credentials')
|
||||
if gce_cred:
|
||||
private_data['credentials'][credential] = gce_cred
|
||||
return private_data
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = super(terraform, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
cred_data = private_data_files['credentials']
|
||||
if cred_data[credential]:
|
||||
env['GOOGLE_BACKEND_CREDENTIALS'] = to_container_path(cred_data[credential], private_data_dir)
|
||||
return env
|
||||
|
||||
|
||||
class controller(PluginFileInjector):
|
||||
plugin_name = 'tower' # TODO: relying on routing for now, update after EEs pick up revised collection
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
{
|
||||
"TF_BACKEND_CONFIG_FILE": "{{ file_reference }}"
|
||||
"GOOGLE_BACKEND_CREDENTIALS": "{{ file_reference }}"
|
||||
}
|
||||
|
||||
@@ -40,10 +40,25 @@ def test_custom_system_roles_prohibited(admin_user, post):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_assign_managed_role(admin_user, alice, rando, inventory, post, managed_roles):
|
||||
def test_assignment_to_invisible_user(admin_user, alice, rando, inventory, post, managed_roles):
|
||||
"Alice can not see rando, and so can not give them a role assignment"
|
||||
rd = RoleDefinition.objects.get(name='Inventory Admin')
|
||||
rd.give_permission(alice, inventory)
|
||||
# Now that alice has full permissions to the inventory, she will give rando permission
|
||||
url = django_reverse('roleuserassignment-list')
|
||||
r = post(url=url, data={"user": rando.id, "role_definition": rd.id, "object_id": inventory.id}, user=alice, expect=400)
|
||||
assert 'does not exist' in str(r.data)
|
||||
assert not rando.has_obj_perm(inventory, 'change')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_assign_managed_role(admin_user, alice, rando, inventory, post, managed_roles, organization):
|
||||
rd = RoleDefinition.objects.get(name='Inventory Admin')
|
||||
rd.give_permission(alice, inventory)
|
||||
# When alice and rando are members of the same org, they can see each other
|
||||
member_rd = RoleDefinition.objects.get(name='Organization Member')
|
||||
for u in (alice, rando):
|
||||
member_rd.give_permission(u, organization)
|
||||
# Now that alice has full permissions to the inventory, and can see rando, she will give rando permission
|
||||
url = django_reverse('roleuserassignment-list')
|
||||
post(url=url, data={"user": rando.id, "role_definition": rd.id, "object_id": inventory.id}, user=alice, expect=201)
|
||||
assert rando.has_obj_perm(inventory, 'change') is True
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import pytest
|
||||
|
||||
from django_test_migrations.plan import all_migrations, nodes_to_tuples
|
||||
from django.utils.timezone import now
|
||||
|
||||
"""
|
||||
Most tests that live in here can probably be deleted at some point. They are mainly
|
||||
@@ -68,3 +69,19 @@ class TestMigrationSmoke:
|
||||
bar_peers = bar.peers.all()
|
||||
assert len(bar_peers) == 1
|
||||
assert fooaddr in bar_peers
|
||||
|
||||
def test_migrate_DAB_RBAC(self, migrator):
|
||||
old_state = migrator.apply_initial_migration(('main', '0190_alter_inventorysource_source_and_more'))
|
||||
Organization = old_state.apps.get_model('main', 'Organization')
|
||||
User = old_state.apps.get_model('auth', 'User')
|
||||
|
||||
org = Organization.objects.create(name='arbitrary-org', created=now(), modified=now())
|
||||
user = User.objects.create(username='random-user')
|
||||
org.read_role.members.add(user)
|
||||
|
||||
new_state = migrator.apply_tested_migration(
|
||||
('main', '0192_custom_roles'),
|
||||
)
|
||||
|
||||
RoleUserAssignment = new_state.apps.get_model('dab_rbac', 'RoleUserAssignment')
|
||||
assert RoleUserAssignment.objects.filter(user=user.id, object_id=org.id).exists()
|
||||
|
||||
@@ -52,7 +52,7 @@ class TestDumpAuthConfigCommand(TestCase):
|
||||
super().setUp()
|
||||
self.expected_config = [
|
||||
{
|
||||
"type": "awx.authentication.authenticator_plugins.saml",
|
||||
"type": "ansible_base.authentication.authenticator_plugins.saml",
|
||||
"name": "Keycloak",
|
||||
"enabled": True,
|
||||
"create_objects": True,
|
||||
@@ -94,14 +94,14 @@ class TestDumpAuthConfigCommand(TestCase):
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "awx.authentication.authenticator_plugins.ldap",
|
||||
"name": "1",
|
||||
"type": "ansible_base.authentication.authenticator_plugins.ldap",
|
||||
"name": "LDAP_1",
|
||||
"enabled": True,
|
||||
"create_objects": True,
|
||||
"users_unique": False,
|
||||
"remove_users": True,
|
||||
"configuration": {
|
||||
"SERVER_URI": "SERVER_URI",
|
||||
"SERVER_URI": ["SERVER_URI"],
|
||||
"BIND_DN": "BIND_DN",
|
||||
"BIND_PASSWORD": "BIND_PASSWORD",
|
||||
"CONNECTION_OPTIONS": {},
|
||||
@@ -119,4 +119,14 @@ class TestDumpAuthConfigCommand(TestCase):
|
||||
def test_json_returned_from_cmd(self):
|
||||
output = StringIO()
|
||||
call_command("dump_auth_config", stdout=output)
|
||||
assert json.loads(output.getvalue()) == self.expected_config
|
||||
cmmd_output = json.loads(output.getvalue())
|
||||
|
||||
# check configured SAML return
|
||||
assert cmmd_output[0] == self.expected_config[0]
|
||||
|
||||
# check configured LDAP return
|
||||
assert cmmd_output[2] == self.expected_config[1]
|
||||
|
||||
# check unconfigured LDAP return
|
||||
assert "LDAP_0_missing_fields" in cmmd_output[1]
|
||||
assert cmmd_output[1]["LDAP_0_missing_fields"] == ['SERVER_URI', 'GROUP_TYPE', 'GROUP_TYPE_PARAMS', 'USER_DN_TEMPLATE', 'USER_ATTR_MAP']
|
||||
|
||||
@@ -1106,6 +1106,44 @@ class TestJobCredentials(TestJobExecution):
|
||||
config = open(local_path, 'r').read()
|
||||
assert config == hcl_config
|
||||
|
||||
def test_terraform_gcs_backend_credentials(self, job, private_data_dir, mock_me):
|
||||
terraform = CredentialType.defaults['terraform']()
|
||||
hcl_config = '''
|
||||
backend "gcs" {
|
||||
bucket = "gce_storage"
|
||||
}
|
||||
'''
|
||||
gce_backend_credentials = '''
|
||||
{
|
||||
"type": "service_account",
|
||||
"project_id": "sample",
|
||||
"private_key_id": "eeeeeeeeeeeeeeeeeeeeeeeeeee",
|
||||
"private_key": "-----BEGIN PRIVATE KEY-----\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n-----END PRIVATE KEY-----\n",
|
||||
"client_email": "sample@sample.iam.gserviceaccount.com",
|
||||
"client_id": "0123456789",
|
||||
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
||||
"token_uri": "https://oauth2.googleapis.com/token",
|
||||
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
||||
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/cloud-content-robot%40sample.iam.gserviceaccount.com",
|
||||
}
|
||||
'''
|
||||
credential = Credential(pk=1, credential_type=terraform, inputs={'configuration': hcl_config, 'gce_credentials': gce_backend_credentials})
|
||||
credential.inputs['configuration'] = encrypt_field(credential, 'configuration')
|
||||
credential.inputs['gce_credentials'] = encrypt_field(credential, 'gce_credentials')
|
||||
job.credentials.add(credential)
|
||||
|
||||
env = {}
|
||||
safe_env = {}
|
||||
credential.credential_type.inject_credential(credential, env, safe_env, [], private_data_dir)
|
||||
|
||||
local_path = to_host_path(env['TF_BACKEND_CONFIG_FILE'], private_data_dir)
|
||||
config = open(local_path, 'r').read()
|
||||
assert config == hcl_config
|
||||
|
||||
credentials_path = to_host_path(env['GOOGLE_BACKEND_CREDENTIALS'], private_data_dir)
|
||||
credentials = open(credentials_path, 'r').read()
|
||||
assert credentials == gce_backend_credentials
|
||||
|
||||
def test_custom_environment_injectors_with_jinja_syntax_error(self, private_data_dir, mock_me):
|
||||
some_cloud = CredentialType(
|
||||
kind='cloud',
|
||||
|
||||
@@ -242,7 +242,7 @@ class WebSocketRelayManager(object):
|
||||
# In this case, we'll be sharing a redis, no need to relay.
|
||||
if payload.get("hostname") == self.local_hostname:
|
||||
hostname = payload.get("hostname")
|
||||
logger.debug("Received a heartbeat request for {hostname}. Skipping as we use redis for local host.")
|
||||
logger.debug(f"Received a heartbeat request for {hostname}. Skipping as we use redis for local host.")
|
||||
continue
|
||||
|
||||
action = payload.get("action")
|
||||
@@ -285,6 +285,8 @@ class WebSocketRelayManager(object):
|
||||
except asyncio.CancelledError:
|
||||
# Handle the case where the task was already cancelled by the time we got here.
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to cancel relay connection for {hostname}: {e}")
|
||||
|
||||
del self.relay_connections[hostname]
|
||||
|
||||
@@ -295,6 +297,8 @@ class WebSocketRelayManager(object):
|
||||
self.stats_mgr.delete_remote_host_stats(hostname)
|
||||
except KeyError:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete stats for {hostname}: {e}")
|
||||
|
||||
async def run(self):
|
||||
event_loop = asyncio.get_running_loop()
|
||||
@@ -316,57 +320,77 @@ class WebSocketRelayManager(object):
|
||||
|
||||
task = None
|
||||
|
||||
# Managing the async_conn here so that we can close it if we need to restart the connection
|
||||
async_conn = None
|
||||
|
||||
# Establishes a websocket connection to /websocket/relay on all API servers
|
||||
while True:
|
||||
if not task or task.done():
|
||||
try:
|
||||
while True:
|
||||
if not task or task.done():
|
||||
try:
|
||||
# Try to close the connection if it's open
|
||||
if async_conn:
|
||||
try:
|
||||
await async_conn.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to close connection to database for pg_notify: {e}")
|
||||
|
||||
# and re-establish the connection
|
||||
async_conn = await psycopg.AsyncConnection.connect(
|
||||
dbname=database_conf['NAME'],
|
||||
host=database_conf['HOST'],
|
||||
user=database_conf['USER'],
|
||||
port=database_conf['PORT'],
|
||||
**database_conf.get("OPTIONS", {}),
|
||||
)
|
||||
await async_conn.set_autocommit(True)
|
||||
|
||||
# before creating the task that uses the connection
|
||||
task = event_loop.create_task(self.on_ws_heartbeat(async_conn), name="on_ws_heartbeat")
|
||||
logger.info("Creating `on_ws_heartbeat` task in event loop.")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to connect to database for pg_notify: {e}")
|
||||
|
||||
future_remote_hosts = self.known_hosts.keys()
|
||||
current_remote_hosts = self.relay_connections.keys()
|
||||
deleted_remote_hosts = set(current_remote_hosts) - set(future_remote_hosts)
|
||||
new_remote_hosts = set(future_remote_hosts) - set(current_remote_hosts)
|
||||
|
||||
# This loop handles if we get an advertisement from a host we already know about but
|
||||
# the advertisement has a different IP than we are currently connected to.
|
||||
for hostname, address in self.known_hosts.items():
|
||||
if hostname not in self.relay_connections:
|
||||
# We've picked up a new hostname that we don't know about yet.
|
||||
continue
|
||||
|
||||
if address != self.relay_connections[hostname].remote_host:
|
||||
deleted_remote_hosts.add(hostname)
|
||||
new_remote_hosts.add(hostname)
|
||||
|
||||
# Delete any hosts with closed connections
|
||||
for hostname, relay_conn in self.relay_connections.items():
|
||||
if not relay_conn.connected:
|
||||
deleted_remote_hosts.add(hostname)
|
||||
|
||||
if deleted_remote_hosts:
|
||||
logger.info(f"Removing {deleted_remote_hosts} from websocket broadcast list")
|
||||
await asyncio.gather(*[self.cleanup_offline_host(h) for h in deleted_remote_hosts])
|
||||
|
||||
if new_remote_hosts:
|
||||
logger.info(f"Adding {new_remote_hosts} to websocket broadcast list")
|
||||
|
||||
for h in new_remote_hosts:
|
||||
stats = self.stats_mgr.new_remote_host_stats(h)
|
||||
relay_connection = WebsocketRelayConnection(name=self.local_hostname, stats=stats, remote_host=self.known_hosts[h])
|
||||
relay_connection.start()
|
||||
self.relay_connections[h] = relay_connection
|
||||
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_NEW_INSTANCE_POLL_RATE_SECONDS)
|
||||
finally:
|
||||
if async_conn:
|
||||
logger.info("Shutting down db connection for wsrelay.")
|
||||
try:
|
||||
async_conn = await psycopg.AsyncConnection.connect(
|
||||
dbname=database_conf['NAME'],
|
||||
host=database_conf['HOST'],
|
||||
user=database_conf['USER'],
|
||||
port=database_conf['PORT'],
|
||||
**database_conf.get("OPTIONS", {}),
|
||||
)
|
||||
await async_conn.set_autocommit(True)
|
||||
|
||||
task = event_loop.create_task(self.on_ws_heartbeat(async_conn), name="on_ws_heartbeat")
|
||||
logger.info("Creating `on_ws_heartbeat` task in event loop.")
|
||||
|
||||
await async_conn.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to connect to database for pg_notify: {e}")
|
||||
|
||||
future_remote_hosts = self.known_hosts.keys()
|
||||
current_remote_hosts = self.relay_connections.keys()
|
||||
deleted_remote_hosts = set(current_remote_hosts) - set(future_remote_hosts)
|
||||
new_remote_hosts = set(future_remote_hosts) - set(current_remote_hosts)
|
||||
|
||||
# This loop handles if we get an advertisement from a host we already know about but
|
||||
# the advertisement has a different IP than we are currently connected to.
|
||||
for hostname, address in self.known_hosts.items():
|
||||
if hostname not in self.relay_connections:
|
||||
# We've picked up a new hostname that we don't know about yet.
|
||||
continue
|
||||
|
||||
if address != self.relay_connections[hostname].remote_host:
|
||||
deleted_remote_hosts.add(hostname)
|
||||
new_remote_hosts.add(hostname)
|
||||
|
||||
# Delete any hosts with closed connections
|
||||
for hostname, relay_conn in self.relay_connections.items():
|
||||
if not relay_conn.connected:
|
||||
deleted_remote_hosts.add(hostname)
|
||||
|
||||
if deleted_remote_hosts:
|
||||
logger.info(f"Removing {deleted_remote_hosts} from websocket broadcast list")
|
||||
await asyncio.gather(*[self.cleanup_offline_host(h) for h in deleted_remote_hosts])
|
||||
|
||||
if new_remote_hosts:
|
||||
logger.info(f"Adding {new_remote_hosts} to websocket broadcast list")
|
||||
|
||||
for h in new_remote_hosts:
|
||||
stats = self.stats_mgr.new_remote_host_stats(h)
|
||||
relay_connection = WebsocketRelayConnection(name=self.local_hostname, stats=stats, remote_host=self.known_hosts[h])
|
||||
relay_connection.start()
|
||||
self.relay_connections[h] = relay_connection
|
||||
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_NEW_INSTANCE_POLL_RATE_SECONDS)
|
||||
logger.info(f"Failed to close connection to database for pg_notify: {e}")
|
||||
|
||||
@@ -277,6 +277,9 @@ SESSION_COOKIE_SECURE = True
|
||||
# Note: This setting may be overridden by database settings.
|
||||
SESSION_COOKIE_AGE = 1800
|
||||
|
||||
# Option to change userLoggedIn cookie SameSite policy.
|
||||
USER_COOKIE_SAMESITE = 'Lax'
|
||||
|
||||
# Name of the cookie that contains the session information.
|
||||
# Note: Changing this value may require changes to any clients.
|
||||
SESSION_COOKIE_NAME = 'awx_sessionid'
|
||||
|
||||
@@ -38,7 +38,9 @@ class CompleteView(BaseRedirectView):
|
||||
response = super(CompleteView, self).dispatch(request, *args, **kwargs)
|
||||
if self.request.user and self.request.user.is_authenticated:
|
||||
logger.info(smart_str(u"User {} logged in".format(self.request.user.username)))
|
||||
response.set_cookie('userLoggedIn', 'true', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False))
|
||||
response.set_cookie(
|
||||
'userLoggedIn', 'true', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False), samesite=getattr(settings, 'USER_COOKIE_SAMESITE', 'Lax')
|
||||
)
|
||||
response.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid'))
|
||||
return response
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import AssociateModal from 'components/AssociateModal';
|
||||
import ErrorDetail from 'components/ErrorDetail';
|
||||
import AlertModal from 'components/AlertModal';
|
||||
import useToast, { AlertVariant } from 'hooks/useToast';
|
||||
import { getQSConfig, parseQueryString, mergeParams } from 'util/qs';
|
||||
import { getQSConfig, parseQueryString } from 'util/qs';
|
||||
import { useLocation, useParams } from 'react-router-dom';
|
||||
import useRequest, { useDismissableError } from 'hooks/useRequest';
|
||||
import DataListToolbar from 'components/DataListToolbar';
|
||||
@@ -106,62 +106,38 @@ function InstancePeerList({ setBreadcrumb }) {
|
||||
const { selected, isAllSelected, handleSelect, clearSelected, selectAll } =
|
||||
useSelected(peers);
|
||||
|
||||
const fetchInstancesToAssociate = useCallback(
|
||||
const fetchPeersToAssociate = useCallback(
|
||||
async (params) => {
|
||||
const address_list = [];
|
||||
|
||||
const instances = await InstancesAPI.read(
|
||||
mergeParams(params, {
|
||||
...{ not__node_type: ['control', 'hybrid'] },
|
||||
})
|
||||
);
|
||||
const receptors = (await ReceptorAPI.read()).data.results;
|
||||
// do not show this instance or instances that are already peered
|
||||
// to this instance (reverse_peers)
|
||||
const not_instances = instance.reverse_peers;
|
||||
not_instances.push(instance.id);
|
||||
|
||||
// get instance ids of the current peered receptor ids
|
||||
const already_peered_instance_ids = [];
|
||||
for (let h = 0; h < instance.peers.length; h++) {
|
||||
const matched = receptors.filter((obj) => obj.id === instance.peers[h]);
|
||||
matched.forEach((element) => {
|
||||
already_peered_instance_ids.push(element.instance);
|
||||
});
|
||||
params.not__instance = not_instances;
|
||||
params.is_internal = false;
|
||||
// do not show the current peers
|
||||
if (instance.peers.length > 0) {
|
||||
params.not__id__in = instance.peers.join(',');
|
||||
}
|
||||
|
||||
for (let q = 0; q < receptors.length; q++) {
|
||||
const receptor = receptors[q];
|
||||
const receptoraddresses = await ReceptorAPI.read(params);
|
||||
|
||||
if (already_peered_instance_ids.includes(receptor.instance)) {
|
||||
// ignore reverse peers
|
||||
continue;
|
||||
}
|
||||
// retrieve the instances that are associated with those receptor addresses
|
||||
const instance_ids = receptoraddresses.data.results.map(
|
||||
(obj) => obj.instance
|
||||
);
|
||||
const instance_ids_str = instance_ids.join(',');
|
||||
const instances = await InstancesAPI.read({ id__in: instance_ids_str });
|
||||
|
||||
if (instance.peers.includes(receptor.id)) {
|
||||
// no links to existing links
|
||||
continue;
|
||||
}
|
||||
|
||||
if (instance.id === receptor.instance) {
|
||||
// no links to thy self
|
||||
continue;
|
||||
}
|
||||
|
||||
if (instance.managed) {
|
||||
// no managed nodes
|
||||
continue;
|
||||
}
|
||||
for (let q = 0; q < receptoraddresses.data.results.length; q++) {
|
||||
const receptor = receptoraddresses.data.results[q];
|
||||
|
||||
const host = instances.data.results.filter(
|
||||
(obj) => obj.id === receptor.instance
|
||||
)[0];
|
||||
|
||||
if (host === undefined) {
|
||||
// no hosts
|
||||
continue;
|
||||
}
|
||||
|
||||
if (receptor.is_internal) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const copy = receptor;
|
||||
copy.hostname = host.hostname;
|
||||
copy.node_type = host.node_type;
|
||||
@@ -169,9 +145,9 @@ function InstancePeerList({ setBreadcrumb }) {
|
||||
address_list.push(copy);
|
||||
}
|
||||
|
||||
instances.data.results = address_list;
|
||||
receptoraddresses.data.results = address_list;
|
||||
|
||||
return instances;
|
||||
return receptoraddresses;
|
||||
},
|
||||
[instance]
|
||||
);
|
||||
@@ -191,7 +167,7 @@ function InstancePeerList({ setBreadcrumb }) {
|
||||
fetchPeers();
|
||||
addToast({
|
||||
id: instancesPeerToAssociate,
|
||||
title: t`Please be sure to run the install bundle for the selected instance(s) again in order to see changes take effect.`,
|
||||
title: t`Please be sure to run the install bundle for ${instance.hostname} again in order to see changes take effect.`,
|
||||
variant: AlertVariant.success,
|
||||
hasTimeout: true,
|
||||
});
|
||||
@@ -315,13 +291,13 @@ function InstancePeerList({ setBreadcrumb }) {
|
||||
{isModalOpen && (
|
||||
<AssociateModal
|
||||
header={t`Instances`}
|
||||
fetchRequest={fetchInstancesToAssociate}
|
||||
fetchRequest={fetchPeersToAssociate}
|
||||
isModalOpen={isModalOpen}
|
||||
onAssociate={handlePeerAssociate}
|
||||
onClose={() => setIsModalOpen(false)}
|
||||
title={t`Select Peer Addresses`}
|
||||
optionsRequest={readInstancesOptions}
|
||||
displayKey="hostname"
|
||||
displayKey="address"
|
||||
columns={[
|
||||
{ key: 'hostname', name: t`Name` },
|
||||
{ key: 'address', name: t`Address` },
|
||||
|
||||
@@ -78,12 +78,14 @@ function MiscAuthenticationEdit() {
|
||||
default: OAUTH2_PROVIDER_OPTIONS.default.ACCESS_TOKEN_EXPIRE_SECONDS,
|
||||
type: OAUTH2_PROVIDER_OPTIONS.child.type,
|
||||
label: t`Access Token Expiration`,
|
||||
help_text: t`Access Token Expiration in seconds`,
|
||||
},
|
||||
REFRESH_TOKEN_EXPIRE_SECONDS: {
|
||||
...OAUTH2_PROVIDER_OPTIONS,
|
||||
default: OAUTH2_PROVIDER_OPTIONS.default.REFRESH_TOKEN_EXPIRE_SECONDS,
|
||||
type: OAUTH2_PROVIDER_OPTIONS.child.type,
|
||||
label: t`Refresh Token Expiration`,
|
||||
help_text: t`Refresh Token Expiration in seconds`,
|
||||
},
|
||||
AUTHORIZATION_CODE_EXPIRE_SECONDS: {
|
||||
...OAUTH2_PROVIDER_OPTIONS,
|
||||
@@ -91,6 +93,7 @@ function MiscAuthenticationEdit() {
|
||||
OAUTH2_PROVIDER_OPTIONS.default.AUTHORIZATION_CODE_EXPIRE_SECONDS,
|
||||
type: OAUTH2_PROVIDER_OPTIONS.child.type,
|
||||
label: t`Authorization Code Expiration`,
|
||||
help_text: t`Authorization Code Expiration in seconds`,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ This section describes setting up authentication for the following enterprise sy
|
||||
|
||||
For LDAP authentication, see :ref:`ag_auth_ldap`.
|
||||
|
||||
SAML, RADIUS, and TACACS+ users are categorized as 'Enterprise' users. The following rules apply to Enterprise users:
|
||||
Azure, RADIUS, SAML, and TACACS+ users are categorized as 'Enterprise' users. The following rules apply to Enterprise users:
|
||||
|
||||
- Enterprise users can only be created via the first successful login attempt from remote authentication backend.
|
||||
- Enterprise users cannot be created/authenticated if non-enterprise users with the same name has already been created in AWX.
|
||||
|
||||
@@ -17,7 +17,9 @@ Administrators use LDAP as a source for account authentication information for A
|
||||
|
||||
When so configured, a user who logs in with an LDAP username and password automatically gets an AWX account created for them and they can be automatically placed into organizations as either regular users or organization administrators.
|
||||
|
||||
Users created via an LDAP login cannot change their username, first name, last name, or set a local password for themselves. This is also tunable to restrict editing of other field names.
|
||||
Users created locally in the user interface, take precedence over those logging into controller for their first time with an alternative authentication solution. You must delete the local user if you want to re-use it with another authentication method, such as LDAP.
|
||||
|
||||
Users created through an LDAP login cannot change their username, given name, surname, or set a local password for themselves. You can also configure this to restrict editing of other field names.
|
||||
|
||||
To configure LDAP integration for AWX:
|
||||
|
||||
@@ -84,7 +86,7 @@ Here ``CN=josie,CN=users,DC=website,DC=com`` is the Distinguished Name of the co
|
||||
.. _`django-auth-ldap library`: https://django-auth-ldap.readthedocs.io/en/latest/groups.html#types-of-groups
|
||||
|
||||
|
||||
7. The **LDAP Start TLS** is disabled by default. To enable TLS when the LDAP connection is not using SSL, click the toggle to **ON**.
|
||||
7. The **LDAP Start TLS** is disabled by default. To enable TLS when the LDAP connection is not using SSL/TLS, click the toggle to **ON**.
|
||||
|
||||
.. image:: ../common/images/configure-awx-auth-ldap-start-tls.png
|
||||
|
||||
|
||||
@@ -150,7 +150,7 @@ The easiest and most common way to obtain an OAuth 2 token is to create a person
|
||||
|
||||
::
|
||||
|
||||
curl -XPOST -k -H "Content-type: application/json" -d '{"description":"Personal AWX CLI token", "application":null, "scope":"write"}' https://<USERNAME>:<PASSWORD>@<AWX_SERVER>/api/v2/users/<USER_ID>/personal_tokens/ | python -m json.tool
|
||||
curl -H "Content-type: application/json" -d '{"description":"Personal AWX CLI token", "application":null, "scope":"write"}' https://<USERNAME>:<PASSWORD>@<AWX_SERVER>/api/v2/users/<USER_ID>/personal_tokens/ | python -m json.tool
|
||||
|
||||
You could also pipe the JSON output through ``jq``, if installed.
|
||||
|
||||
@@ -159,7 +159,7 @@ Following is an example of using the personal token to access an API endpoint us
|
||||
|
||||
::
|
||||
|
||||
curl -k -H "Authorization: Bearer <token>" -H "Content-Type: application/json" -X POST -d '{}' https://awx/api/v2/job_templates/5/launch/
|
||||
curl -H "Authorization: Bearer <token>" -H "Content-Type: application/json" -d '{}' https://awx/api/v2/job_templates/5/launch/
|
||||
|
||||
|
||||
In AWX, the OAuth 2 system is built on top of the `Django Oauth Toolkit`_, which provides dedicated endpoints for authorizing, revoking, and refreshing tokens. These endpoints can be found under the ``/api/v2/users/<USER_ID>/personal_tokens/`` endpoint, which also provides detailed examples on some typical usage of those endpoints. These special OAuth 2 endpoints only support using the ``x-www-form-urlencoded`` **Content-type**, so none of the ``api/o/*`` endpoints accept ``application/json``.
|
||||
@@ -217,7 +217,7 @@ This returns a <token-value> that you can use to authenticate with for future re
|
||||
|
||||
::
|
||||
|
||||
curl -H "Authorization: Bearer <token-value>" -H "Content-Type: application/json" -X GET https://<awx>/api/v2/users/
|
||||
curl -H "Authorization: Bearer <token-value>" -H "Content-Type: application/json" https://<awx>/api/v2/users/
|
||||
|
||||
|
||||
The ``-k`` flag may be needed if you have not set up a CA yet and are using SSL.
|
||||
@@ -227,14 +227,14 @@ To revoke a token, you can make a DELETE on the detail page for that token, usin
|
||||
|
||||
::
|
||||
|
||||
curl -ku <user>:<password> -X DELETE https://<awx>/api/v2/tokens/<pk>/
|
||||
curl -u <user>:<password> -X DELETE https://<awx>/api/v2/tokens/<pk>/
|
||||
|
||||
|
||||
Similarly, using a token:
|
||||
|
||||
::
|
||||
|
||||
curl -H "Authorization: Bearer <token-value>" -X DELETE https://<awx>/api/v2/tokens/<pk>/ -k
|
||||
curl -H "Authorization: Bearer <token-value>" -X DELETE https://<awx>/api/v2/tokens/<pk>/
|
||||
|
||||
|
||||
.. _ag_oauth2_token_auth_grant_types:
|
||||
@@ -336,8 +336,7 @@ Logging in is not required for ``password`` grant type, so you can simply use cu
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
curl -k --user <user>:<password> -H "Content-type: application/json" \
|
||||
-X POST \
|
||||
curl --user <user>:<password> -H "Content-type: application/json" \
|
||||
--data '{
|
||||
"description": "Token for Nagios Monitoring app",
|
||||
"application": 1,
|
||||
@@ -398,7 +397,7 @@ The ``/api/o/token/`` endpoint is used for refreshing the access token:
|
||||
|
||||
::
|
||||
|
||||
curl -X POST \
|
||||
curl \
|
||||
-d "grant_type=refresh_token&refresh_token=AL0NK9TTpv0qp54dGbC4VUZtsZ9r8z" \
|
||||
-u "gwSPoasWSdNkMDtBN3Hu2WYQpPWCO9SwUEsKK22l:fI6ZpfocHYBGfm1tP92r0yIgCyfRdDQt0Tos9L8a4fNsJjQQMwp9569eIaUBsaVDgt2eiwOGe0bg5m5vCSstClZmtdy359RVx2rQK5YlIWyPlrolpt2LEpVeKXWaiybo" \
|
||||
http://<awx>/api/o/token/ -i
|
||||
@@ -441,7 +440,7 @@ Revoking an access token by this method is the same as deleting the token resour
|
||||
|
||||
::
|
||||
|
||||
curl -X POST -d "token=rQONsve372fQwuc2pn76k3IHDCYpi7" \
|
||||
curl -d "token=rQONsve372fQwuc2pn76k3IHDCYpi7" \
|
||||
-u "gwSPoasWSdNkMDtBN3Hu2WYQpPWCO9SwUEsKK22l:fI6ZpfocHYBGfm1tP92r0yIgCyfRdDQt0Tos9L8a4fNsJjQQMwp9569eIaUBsaVDgt2eiwOGe0bg5m5vCSstClZmtdy359RVx2rQK5YlIWyPlrolpt2LEpVeKXWaiybo" \
|
||||
http://<awx>/api/o/revoke_token/ -i
|
||||
|
||||
@@ -455,7 +454,7 @@ Revoking an access token by this method is the same as deleting the token resour
|
||||
The **Allow External Users to Create Oauth2 Tokens** (``ALLOW_OAUTH2_FOR_EXTERNAL_USERS`` in the API) setting is disabled by default. External users refer to users authenticated externally with a service like LDAP, or any of the other SSO services. This setting ensures external users cannot *create* their own tokens. If you enable then disable it, any tokens created by external users in the meantime will still exist, and are not automatically revoked.
|
||||
|
||||
|
||||
Alternatively, you can use the ``manage`` utility, :ref:`ag_manage_utility_revoke_tokens`, to revoke tokens as described in the the :ref:`ag_token_utility` section.
|
||||
Alternatively, you can use the ``manage`` utility, :ref:`ag_manage_utility_revoke_tokens`, to revoke tokens as described in the :ref:`ag_token_utility` section.
|
||||
|
||||
|
||||
This setting can be configured at the system-level in the AWX User Interface:
|
||||
|
||||
@@ -11,13 +11,14 @@ Authentication methods help simplify logins for end users--offering single sign-
|
||||
|
||||
Account authentication can be configured in the AWX User Interface and saved to the PostgreSQL database. For instructions, refer to the :ref:`ag_configure_awx` section.
|
||||
|
||||
Account authentication in AWX can be configured to centrally use OAuth2, while enterprise-level account authentication can be configured for SAML, RADIUS, or even LDAP as a source for authentication information. See :ref:`ag_ent_auth`.
|
||||
Account authentication in AWX can be configured to centrally use OAuth2, while enterprise-level account authentication can be configured for :ref:`Azure <ag_auth_azure>`, :ref:`RADIUS <ag_auth_radius>`, :ref:`SAML <ag_auth_saml>`, or even :ref:`LDAP <ag_auth_ldap>` as a source for authentication information. See :ref:`ag_ent_auth` for more detail.
|
||||
|
||||
For websites, such as Microsoft Azure, Google or GitHub, that provide account information, account information is often implemented using the OAuth standard. OAuth is a secure authorization protocol which is commonly used in conjunction with account authentication to grant 3rd party applications a "session token" allowing them to make API calls to providers on the user’s behalf.
|
||||
|
||||
SAML (Security Assertion Markup Language) is an XML-based, open-standard data format for exchanging account authentication and authorization data between an identity provider and a service provider.
|
||||
Security Assertion Markup Language (:ref:`SAML <ag_auth_saml>`) is an XML-based, open-standard data format for exchanging account authentication and authorization data between an identity provider and a service provider.
|
||||
|
||||
The :ref:`RADIUS <ag_auth_radius>` distributed client/server system allows you to secure networks against unauthorized access and can be implemented in network environments requiring high levels of security while maintaining network access for remote users.
|
||||
|
||||
The RADIUS distributed client/server system allows you to secure networks against unauthorized access and can be implemented in network environments requiring high levels of security while maintaining network access for remote users.
|
||||
|
||||
.. _ag_auth_github:
|
||||
|
||||
@@ -378,7 +379,7 @@ the team will always be assigned to the single default organization.
|
||||
}
|
||||
|
||||
|
||||
Team mappings may be specified separately for each account authentication backend, based on which of these you setup. When defined, these configurations take precedence over the the global configuration above.
|
||||
Team mappings may be specified separately for each account authentication backend, based on which of these you setup. When defined, these configurations take precedence over the global configuration above.
|
||||
|
||||
::
|
||||
|
||||
|
||||
@@ -271,7 +271,7 @@ First, determine which is the updated module you want to use from the available
|
||||
|
||||
Next, create a new directory, at the same directory level of your Ansible source playbooks, named ``/library``.
|
||||
|
||||
Once this is created, copy the module you want to use and drop it into the ``/library`` directory--it will be consumed first over your system modules and can be removed once you have updated the the stable version via your normal package manager.
|
||||
Once this is created, copy the module you want to use and drop it into the ``/library`` directory--it will be consumed first over your system modules and can be removed once you have updated the stable version via your normal package manager.
|
||||
|
||||
|
||||
Using callback plugins with AWX
|
||||
|
||||
@@ -94,10 +94,10 @@ In some situations, you can modify the following:
|
||||
|
||||
- A new Host manually created on Inventory w/ inventory sources
|
||||
- In Groups that were created as a result of inventory source syncs
|
||||
- Variables on Host and Group are changeable
|
||||
|
||||
Hosts associated with the Smart Inventory are manifested at view time. If the results of a Smart Inventory contains more than one host with identical hostnames, only one of the matching hosts will be included as part of the Smart Inventory, ordered by Host ID.
|
||||
|
||||
Variables on Host and Group are not changeable even as the local system admin user.
|
||||
|
||||
.. _ug_host_filters:
|
||||
|
||||
|
||||
@@ -961,6 +961,8 @@ Extra Variables
|
||||
|
||||
When you pass survey variables, they are passed as extra variables (``extra_vars``) within AWX. This can be tricky, as passing extra variables to a job template (as you would do with a survey) can override other variables being passed from the inventory and project.
|
||||
|
||||
By default, ``extra_vars`` are marked as ``!unsafe`` unless you specify them on the job template’s Extra Variables section. These are trusted, because they can only be added by users with enough privileges to add or edit a Job Template. For example, nested variables do not expand when entered as a prompt, as the Jinja brackets are treated as a string. For more information about unsafe variables, see `unsafe or raw strings <https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_advanced_syntax.html#unsafe-or-raw-strings>`_.
|
||||
|
||||
For example, say that you have a defined variable for an inventory for ``debug = true``. It is entirely possible that this variable, ``debug = true``, can be overridden in a job template survey.
|
||||
|
||||
To ensure that the variables you need to pass are not overridden, ensure they are included by redefining them in the survey. Keep in mind that extra variables can be defined at the inventory, group, and host levels.
|
||||
@@ -979,7 +981,7 @@ If specifying the ``ALLOW_JINJA_IN_EXTRA_VARS`` parameter, refer to the :ref:`AW
|
||||
The Job Template extra variables dictionary is merged with the Survey variables.
|
||||
|
||||
|
||||
Here are some simplified examples of extra_vars in YAML and JSON formats:
|
||||
Here are some simplified examples of ``extra_vars`` in YAML and JSON formats:
|
||||
|
||||
The configuration in YAML format:
|
||||
|
||||
|
||||
@@ -387,7 +387,7 @@ Now we are ready to configure and plumb Keycloak with AWX. To do this we have pr
|
||||
* Backup and configure the SMAL and OIDC adapter in AWX. NOTE: the private key of any existing SAML or OIDC adapters can not be backed up through the API, you need a DB backup to recover this.
|
||||
|
||||
Before we can run the playbook we need to understand that SAML works by sending redirects between AWX and Keycloak through the browser. Because of this we have to tell both AWX and Keycloak how they will construct the redirect URLs. On the Keycloak side, this is done within the realm configuration and on the AWX side its done through the SAML settings. The playbook requires a variable called `container_reference` to be set. The container_reference variable needs to be how your browser will be able to talk to the running containers. Here are some examples of how to choose a proper container_reference.
|
||||
* If you develop on a mac which runs a Fedora VM which has AWX running within that and the browser you use to access AWX runs on the mac. The the VM with the container has its own IP that is mapped to a name like `tower.home.net`. In this scenario your "container_reference" could be either the IP of the VM or the tower.home.net friendly name.
|
||||
* If you develop on a mac which runs a Fedora VM which has AWX running within that and the browser you use to access AWX runs on the mac. The VM with the container has its own IP that is mapped to a name like `tower.home.net`. In this scenario your "container_reference" could be either the IP of the VM or the tower.home.net friendly name.
|
||||
* If you are on a Fedora work station running AWX and also using a browser on your workstation you could use localhost, your work stations IP or hostname as the container_reference.
|
||||
|
||||
In addition, OIDC works similar but slightly differently. OIDC has browser redirection but OIDC will also communicate from the AWX docker instance to the Keycloak docker instance directly. Any hostnames you might have are likely not propagated down into the AWX container. So we need a method for both the browser and AWX container to talk to Keycloak. For this we will likely use your machines IP address. This can be passed in as a variable called `oidc_reference`. If unset this will default to container_reference which may be viable for some configurations.
|
||||
|
||||
Reference in New Issue
Block a user