Compare commits

..

2 Commits

Author SHA1 Message Date
Hao Liu
0a655cabe8 Merge branch 'devel' into dependabot/pip/docs/docsite/sphinx-ansible-theme-0.10.3 2024-04-15 10:10:37 -04:00
dependabot[bot]
a4e7826631 Bump sphinx-ansible-theme from 0.10.2 to 0.10.3 in /docs/docsite
Bumps [sphinx-ansible-theme](https://github.com/ansible-community/sphinx_ansible_theme) from 0.10.2 to 0.10.3.
- [Release notes](https://github.com/ansible-community/sphinx_ansible_theme/releases)
- [Commits](https://github.com/ansible-community/sphinx_ansible_theme/compare/v0.10.2...v0.10.3)

---
updated-dependencies:
- dependency-name: sphinx-ansible-theme
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-12-11 08:59:22 +00:00
21 changed files with 150 additions and 256 deletions

View File

@@ -80,7 +80,7 @@ If any of those items are missing your pull request will still get the `needs_tr
Currently you can expect awxbot to add common labels such as `state:needs_triage`, `type:bug`, `component:docs`, etc...
These labels are determined by the template data. Please use the template and fill it out as accurately as possible.
The `state:needs_triage` label will remain on your pull request until a person has looked at it.
The `state:needs_triage` label will will remain on your pull request until a person has looked at it.
You can also expect the bot to CC maintainers of specific areas of the code, this will notify them that there is a pull request by placing a comment on the pull request.
The comment will look something like `CC @matburt @wwitzel3 ...`.

View File

@@ -616,7 +616,7 @@ docker-clean:
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
docker volume rm -f tools_var_lib_awx tools_awx_db tools_awx_db_15 tools_vault_1 tools_ldap_1 tools_grafana_storage tools_prometheus_storage $(shell docker volume ls --filter name=tools_redis_socket_ -q)
docker volume rm -f tools_var_lib_awx tools_awx_db tools_awx_db_15 tools_vault_1 tools_ldap_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
docker-refresh: docker-clean docker-compose

View File

@@ -2,11 +2,10 @@ import json
import os
import sys
import re
from typing import Any
from typing import Any
from django.core.management.base import BaseCommand
from django.conf import settings
from awx.conf import settings_registry
@@ -41,15 +40,6 @@ class Command(BaseCommand):
"USER_SEARCH": False,
}
def is_enabled(self, settings, keys):
missing_fields = []
for key, required in keys.items():
if required and not settings.get(key):
missing_fields.append(key)
if missing_fields:
return False, missing_fields
return True, None
def get_awx_ldap_settings(self) -> dict[str, dict[str, Any]]:
awx_ldap_settings = {}
@@ -74,17 +64,15 @@ class Command(BaseCommand):
if new_key == "SERVER_URI" and value:
value = value.split(", ")
grouped_settings[index][new_key] = value
if type(value).__name__ == "LDAPSearch":
data = []
data.append(value.base_dn)
data.append("SCOPE_SUBTREE")
data.append(value.filterstr)
grouped_settings[index][new_key] = data
return grouped_settings
def is_enabled(self, settings, keys):
for key, required in keys.items():
if required and not settings.get(key):
return False
return True
def get_awx_saml_settings(self) -> dict[str, Any]:
awx_saml_settings = {}
for awx_saml_setting in settings_registry.get_registered_settings(category_slug='saml'):
@@ -94,7 +82,7 @@ class Command(BaseCommand):
def format_config_data(self, enabled, awx_settings, type, keys, name):
config = {
"type": f"ansible_base.authentication.authenticator_plugins.{type}",
"type": f"awx.authentication.authenticator_plugins.{type}",
"name": name,
"enabled": enabled,
"create_objects": True,
@@ -142,7 +130,7 @@ class Command(BaseCommand):
# dump SAML settings
awx_saml_settings = self.get_awx_saml_settings()
awx_saml_enabled, saml_missing_fields = self.is_enabled(awx_saml_settings, self.DAB_SAML_AUTHENTICATOR_KEYS)
awx_saml_enabled = self.is_enabled(awx_saml_settings, self.DAB_SAML_AUTHENTICATOR_KEYS)
if awx_saml_enabled:
awx_saml_name = awx_saml_settings["ENABLED_IDPS"]
data.append(
@@ -154,25 +142,21 @@ class Command(BaseCommand):
awx_saml_name,
)
)
else:
data.append({"SAML_missing_fields": saml_missing_fields})
# dump LDAP settings
awx_ldap_group_settings = self.get_awx_ldap_settings()
for awx_ldap_name, awx_ldap_settings in awx_ldap_group_settings.items():
awx_ldap_enabled, ldap_missing_fields = self.is_enabled(awx_ldap_settings, self.DAB_LDAP_AUTHENTICATOR_KEYS)
if awx_ldap_enabled:
for awx_ldap_name, awx_ldap_settings in enumerate(awx_ldap_group_settings.values()):
enabled = self.is_enabled(awx_ldap_settings, self.DAB_LDAP_AUTHENTICATOR_KEYS)
if enabled:
data.append(
self.format_config_data(
awx_ldap_enabled,
enabled,
awx_ldap_settings,
"ldap",
self.DAB_LDAP_AUTHENTICATOR_KEYS,
f"LDAP_{awx_ldap_name}",
str(awx_ldap_name),
)
)
else:
data.append({f"LDAP_{awx_ldap_name}_missing_fields": ldap_missing_fields})
# write to file if requested
if options["output_file"]:

View File

@@ -165,10 +165,11 @@ class Command(BaseCommand):
return
WebsocketsMetricsServer().start()
websocket_relay_manager = WebSocketRelayManager()
while True:
try:
asyncio.run(WebSocketRelayManager().run())
asyncio.run(websocket_relay_manager.run())
except KeyboardInterrupt:
logger.info('Shutting down Websocket Relayer')
break

View File

@@ -1232,14 +1232,6 @@ ManagedCredentialType(
'multiline': True,
'help_text': gettext_noop('Terraform backend config as Hashicorp configuration language.'),
},
{
'id': 'gce_credentials',
'label': gettext_noop('Google Cloud Platform account credentials'),
'type': 'string',
'secret': True,
'multiline': True,
'help_text': gettext_noop('Google Cloud Platform account credentials in JSON format.'),
},
],
'required': ['configuration'],
},

View File

@@ -130,10 +130,3 @@ def terraform(cred, env, private_data_dir):
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
f.write(cred.get_input('configuration'))
env['TF_BACKEND_CONFIG_FILE'] = to_container_path(path, private_data_dir)
# Handle env variables for GCP account credentials
if 'gce_credentials' in cred.inputs:
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
with os.fdopen(handle, 'w') as f:
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
f.write(cred.get_input('gce_credentials'))
env['GOOGLE_BACKEND_CREDENTIALS'] = to_container_path(path, private_data_dir)

View File

@@ -11,8 +11,6 @@ import os.path
from urllib.parse import urljoin
import yaml
import tempfile
import stat
# Django
from django.conf import settings
@@ -1640,39 +1638,17 @@ class satellite6(PluginFileInjector):
class terraform(PluginFileInjector):
plugin_name = 'terraform_state'
base_injector = 'managed'
namespace = 'cloud'
collection = 'terraform'
use_fqcn = True
def inventory_as_dict(self, inventory_update, private_data_dir):
env = super(terraform, self).get_plugin_env(inventory_update, private_data_dir, None)
ret = super().inventory_as_dict(inventory_update, private_data_dir)
credential = inventory_update.get_cloud_credential()
config_cred = credential.get_input('configuration')
if config_cred:
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
with os.fdopen(handle, 'w') as f:
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
f.write(config_cred)
ret['backend_config_files'] = to_container_path(path, private_data_dir)
ret['backend_config_files'] = env["TF_BACKEND_CONFIG_FILE"]
return ret
def build_plugin_private_data(self, inventory_update, private_data_dir):
credential = inventory_update.get_cloud_credential()
private_data = {'credentials': {}}
gce_cred = credential.get_input('gce_credentials')
if gce_cred:
private_data['credentials'][credential] = gce_cred
return private_data
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
env = super(terraform, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
credential = inventory_update.get_cloud_credential()
cred_data = private_data_files['credentials']
if cred_data[credential]:
env['GOOGLE_BACKEND_CREDENTIALS'] = to_container_path(cred_data[credential], private_data_dir)
return env
class controller(PluginFileInjector):
plugin_name = 'tower' # TODO: relying on routing for now, update after EEs pick up revised collection

View File

@@ -1,3 +1,3 @@
{
"GOOGLE_BACKEND_CREDENTIALS": "{{ file_reference }}"
"TF_BACKEND_CONFIG_FILE": "{{ file_reference }}"
}

View File

@@ -52,7 +52,7 @@ class TestDumpAuthConfigCommand(TestCase):
super().setUp()
self.expected_config = [
{
"type": "ansible_base.authentication.authenticator_plugins.saml",
"type": "awx.authentication.authenticator_plugins.saml",
"name": "Keycloak",
"enabled": True,
"create_objects": True,
@@ -94,14 +94,14 @@ class TestDumpAuthConfigCommand(TestCase):
},
},
{
"type": "ansible_base.authentication.authenticator_plugins.ldap",
"name": "LDAP_1",
"type": "awx.authentication.authenticator_plugins.ldap",
"name": "1",
"enabled": True,
"create_objects": True,
"users_unique": False,
"remove_users": True,
"configuration": {
"SERVER_URI": ["SERVER_URI"],
"SERVER_URI": "SERVER_URI",
"BIND_DN": "BIND_DN",
"BIND_PASSWORD": "BIND_PASSWORD",
"CONNECTION_OPTIONS": {},
@@ -119,14 +119,4 @@ class TestDumpAuthConfigCommand(TestCase):
def test_json_returned_from_cmd(self):
output = StringIO()
call_command("dump_auth_config", stdout=output)
cmmd_output = json.loads(output.getvalue())
# check configured SAML return
assert cmmd_output[0] == self.expected_config[0]
# check configured LDAP return
assert cmmd_output[2] == self.expected_config[1]
# check unconfigured LDAP return
assert "LDAP_0_missing_fields" in cmmd_output[1]
assert cmmd_output[1]["LDAP_0_missing_fields"] == ['SERVER_URI', 'GROUP_TYPE', 'GROUP_TYPE_PARAMS', 'USER_DN_TEMPLATE', 'USER_ATTR_MAP']
assert json.loads(output.getvalue()) == self.expected_config

View File

@@ -1106,44 +1106,6 @@ class TestJobCredentials(TestJobExecution):
config = open(local_path, 'r').read()
assert config == hcl_config
def test_terraform_gcs_backend_credentials(self, job, private_data_dir, mock_me):
terraform = CredentialType.defaults['terraform']()
hcl_config = '''
backend "gcs" {
bucket = "gce_storage"
}
'''
gce_backend_credentials = '''
{
"type": "service_account",
"project_id": "sample",
"private_key_id": "eeeeeeeeeeeeeeeeeeeeeeeeeee",
"private_key": "-----BEGIN PRIVATE KEY-----\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n-----END PRIVATE KEY-----\n",
"client_email": "sample@sample.iam.gserviceaccount.com",
"client_id": "0123456789",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/cloud-content-robot%40sample.iam.gserviceaccount.com",
}
'''
credential = Credential(pk=1, credential_type=terraform, inputs={'configuration': hcl_config, 'gce_credentials': gce_backend_credentials})
credential.inputs['configuration'] = encrypt_field(credential, 'configuration')
credential.inputs['gce_credentials'] = encrypt_field(credential, 'gce_credentials')
job.credentials.add(credential)
env = {}
safe_env = {}
credential.credential_type.inject_credential(credential, env, safe_env, [], private_data_dir)
local_path = to_host_path(env['TF_BACKEND_CONFIG_FILE'], private_data_dir)
config = open(local_path, 'r').read()
assert config == hcl_config
credentials_path = to_host_path(env['GOOGLE_BACKEND_CREDENTIALS'], private_data_dir)
credentials = open(credentials_path, 'r').read()
assert credentials == gce_backend_credentials
def test_custom_environment_injectors_with_jinja_syntax_error(self, private_data_dir, mock_me):
some_cloud = CredentialType(
kind='cloud',

View File

@@ -242,7 +242,7 @@ class WebSocketRelayManager(object):
# In this case, we'll be sharing a redis, no need to relay.
if payload.get("hostname") == self.local_hostname:
hostname = payload.get("hostname")
logger.debug(f"Received a heartbeat request for {hostname}. Skipping as we use redis for local host.")
logger.debug("Received a heartbeat request for {hostname}. Skipping as we use redis for local host.")
continue
action = payload.get("action")
@@ -285,8 +285,6 @@ class WebSocketRelayManager(object):
except asyncio.CancelledError:
# Handle the case where the task was already cancelled by the time we got here.
pass
except Exception as e:
logger.warning(f"Failed to cancel relay connection for {hostname}: {e}")
del self.relay_connections[hostname]
@@ -297,8 +295,6 @@ class WebSocketRelayManager(object):
self.stats_mgr.delete_remote_host_stats(hostname)
except KeyError:
pass
except Exception as e:
logger.warning(f"Failed to delete stats for {hostname}: {e}")
async def run(self):
event_loop = asyncio.get_running_loop()
@@ -320,77 +316,57 @@ class WebSocketRelayManager(object):
task = None
# Managing the async_conn here so that we can close it if we need to restart the connection
async_conn = None
# Establishes a websocket connection to /websocket/relay on all API servers
try:
while True:
if not task or task.done():
try:
# Try to close the connection if it's open
if async_conn:
try:
await async_conn.close()
except Exception as e:
logger.warning(f"Failed to close connection to database for pg_notify: {e}")
# and re-establish the connection
async_conn = await psycopg.AsyncConnection.connect(
dbname=database_conf['NAME'],
host=database_conf['HOST'],
user=database_conf['USER'],
port=database_conf['PORT'],
**database_conf.get("OPTIONS", {}),
)
await async_conn.set_autocommit(True)
# before creating the task that uses the connection
task = event_loop.create_task(self.on_ws_heartbeat(async_conn), name="on_ws_heartbeat")
logger.info("Creating `on_ws_heartbeat` task in event loop.")
except Exception as e:
logger.warning(f"Failed to connect to database for pg_notify: {e}")
future_remote_hosts = self.known_hosts.keys()
current_remote_hosts = self.relay_connections.keys()
deleted_remote_hosts = set(current_remote_hosts) - set(future_remote_hosts)
new_remote_hosts = set(future_remote_hosts) - set(current_remote_hosts)
# This loop handles if we get an advertisement from a host we already know about but
# the advertisement has a different IP than we are currently connected to.
for hostname, address in self.known_hosts.items():
if hostname not in self.relay_connections:
# We've picked up a new hostname that we don't know about yet.
continue
if address != self.relay_connections[hostname].remote_host:
deleted_remote_hosts.add(hostname)
new_remote_hosts.add(hostname)
# Delete any hosts with closed connections
for hostname, relay_conn in self.relay_connections.items():
if not relay_conn.connected:
deleted_remote_hosts.add(hostname)
if deleted_remote_hosts:
logger.info(f"Removing {deleted_remote_hosts} from websocket broadcast list")
await asyncio.gather(*[self.cleanup_offline_host(h) for h in deleted_remote_hosts])
if new_remote_hosts:
logger.info(f"Adding {new_remote_hosts} to websocket broadcast list")
for h in new_remote_hosts:
stats = self.stats_mgr.new_remote_host_stats(h)
relay_connection = WebsocketRelayConnection(name=self.local_hostname, stats=stats, remote_host=self.known_hosts[h])
relay_connection.start()
self.relay_connections[h] = relay_connection
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_NEW_INSTANCE_POLL_RATE_SECONDS)
finally:
if async_conn:
logger.info("Shutting down db connection for wsrelay.")
while True:
if not task or task.done():
try:
await async_conn.close()
async_conn = await psycopg.AsyncConnection.connect(
dbname=database_conf['NAME'],
host=database_conf['HOST'],
user=database_conf['USER'],
port=database_conf['PORT'],
**database_conf.get("OPTIONS", {}),
)
await async_conn.set_autocommit(True)
task = event_loop.create_task(self.on_ws_heartbeat(async_conn), name="on_ws_heartbeat")
logger.info("Creating `on_ws_heartbeat` task in event loop.")
except Exception as e:
logger.info(f"Failed to close connection to database for pg_notify: {e}")
logger.warning(f"Failed to connect to database for pg_notify: {e}")
future_remote_hosts = self.known_hosts.keys()
current_remote_hosts = self.relay_connections.keys()
deleted_remote_hosts = set(current_remote_hosts) - set(future_remote_hosts)
new_remote_hosts = set(future_remote_hosts) - set(current_remote_hosts)
# This loop handles if we get an advertisement from a host we already know about but
# the advertisement has a different IP than we are currently connected to.
for hostname, address in self.known_hosts.items():
if hostname not in self.relay_connections:
# We've picked up a new hostname that we don't know about yet.
continue
if address != self.relay_connections[hostname].remote_host:
deleted_remote_hosts.add(hostname)
new_remote_hosts.add(hostname)
# Delete any hosts with closed connections
for hostname, relay_conn in self.relay_connections.items():
if not relay_conn.connected:
deleted_remote_hosts.add(hostname)
if deleted_remote_hosts:
logger.info(f"Removing {deleted_remote_hosts} from websocket broadcast list")
await asyncio.gather(*[self.cleanup_offline_host(h) for h in deleted_remote_hosts])
if new_remote_hosts:
logger.info(f"Adding {new_remote_hosts} to websocket broadcast list")
for h in new_remote_hosts:
stats = self.stats_mgr.new_remote_host_stats(h)
relay_connection = WebsocketRelayConnection(name=self.local_hostname, stats=stats, remote_host=self.known_hosts[h])
relay_connection.start()
self.relay_connections[h] = relay_connection
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_NEW_INSTANCE_POLL_RATE_SECONDS)

View File

@@ -12,7 +12,7 @@ import AssociateModal from 'components/AssociateModal';
import ErrorDetail from 'components/ErrorDetail';
import AlertModal from 'components/AlertModal';
import useToast, { AlertVariant } from 'hooks/useToast';
import { getQSConfig, parseQueryString } from 'util/qs';
import { getQSConfig, parseQueryString, mergeParams } from 'util/qs';
import { useLocation, useParams } from 'react-router-dom';
import useRequest, { useDismissableError } from 'hooks/useRequest';
import DataListToolbar from 'components/DataListToolbar';
@@ -106,38 +106,62 @@ function InstancePeerList({ setBreadcrumb }) {
const { selected, isAllSelected, handleSelect, clearSelected, selectAll } =
useSelected(peers);
const fetchPeersToAssociate = useCallback(
const fetchInstancesToAssociate = useCallback(
async (params) => {
const address_list = [];
// do not show this instance or instances that are already peered
// to this instance (reverse_peers)
const not_instances = instance.reverse_peers;
not_instances.push(instance.id);
const instances = await InstancesAPI.read(
mergeParams(params, {
...{ not__node_type: ['control', 'hybrid'] },
})
);
const receptors = (await ReceptorAPI.read()).data.results;
params.not__instance = not_instances;
params.is_internal = false;
// do not show the current peers
if (instance.peers.length > 0) {
params.not__id__in = instance.peers.join(',');
// get instance ids of the current peered receptor ids
const already_peered_instance_ids = [];
for (let h = 0; h < instance.peers.length; h++) {
const matched = receptors.filter((obj) => obj.id === instance.peers[h]);
matched.forEach((element) => {
already_peered_instance_ids.push(element.instance);
});
}
const receptoraddresses = await ReceptorAPI.read(params);
for (let q = 0; q < receptors.length; q++) {
const receptor = receptors[q];
// retrieve the instances that are associated with those receptor addresses
const instance_ids = receptoraddresses.data.results.map(
(obj) => obj.instance
);
const instance_ids_str = instance_ids.join(',');
const instances = await InstancesAPI.read({ id__in: instance_ids_str });
if (already_peered_instance_ids.includes(receptor.instance)) {
// ignore reverse peers
continue;
}
for (let q = 0; q < receptoraddresses.data.results.length; q++) {
const receptor = receptoraddresses.data.results[q];
if (instance.peers.includes(receptor.id)) {
// no links to existing links
continue;
}
if (instance.id === receptor.instance) {
// no links to thy self
continue;
}
if (instance.managed) {
// no managed nodes
continue;
}
const host = instances.data.results.filter(
(obj) => obj.id === receptor.instance
)[0];
if (host === undefined) {
// no hosts
continue;
}
if (receptor.is_internal) {
continue;
}
const copy = receptor;
copy.hostname = host.hostname;
copy.node_type = host.node_type;
@@ -145,9 +169,9 @@ function InstancePeerList({ setBreadcrumb }) {
address_list.push(copy);
}
receptoraddresses.data.results = address_list;
instances.data.results = address_list;
return receptoraddresses;
return instances;
},
[instance]
);
@@ -167,7 +191,7 @@ function InstancePeerList({ setBreadcrumb }) {
fetchPeers();
addToast({
id: instancesPeerToAssociate,
title: t`Please be sure to run the install bundle for ${instance.hostname} again in order to see changes take effect.`,
title: t`Please be sure to run the install bundle for the selected instance(s) again in order to see changes take effect.`,
variant: AlertVariant.success,
hasTimeout: true,
});
@@ -291,13 +315,13 @@ function InstancePeerList({ setBreadcrumb }) {
{isModalOpen && (
<AssociateModal
header={t`Instances`}
fetchRequest={fetchPeersToAssociate}
fetchRequest={fetchInstancesToAssociate}
isModalOpen={isModalOpen}
onAssociate={handlePeerAssociate}
onClose={() => setIsModalOpen(false)}
title={t`Select Peer Addresses`}
optionsRequest={readInstancesOptions}
displayKey="address"
displayKey="hostname"
columns={[
{ key: 'hostname', name: t`Name` },
{ key: 'address', name: t`Address` },

View File

@@ -52,7 +52,7 @@ sphinx==7.2.6
# sphinxcontrib-jquery
# sphinxcontrib-qthelp
# sphinxcontrib-serializinghtml
sphinx-ansible-theme==0.10.2
sphinx-ansible-theme==0.10.3
# via -r requirements.in
sphinx-rtd-theme==1.3.0
# via sphinx-ansible-theme

View File

@@ -17,7 +17,7 @@ This section describes setting up authentication for the following enterprise sy
For LDAP authentication, see :ref:`ag_auth_ldap`.
Azure, RADIUS, SAML, and TACACS+ users are categorized as 'Enterprise' users. The following rules apply to Enterprise users:
SAML, RADIUS, and TACACS+ users are categorized as 'Enterprise' users. The following rules apply to Enterprise users:
- Enterprise users can only be created via the first successful login attempt from remote authentication backend.
- Enterprise users cannot be created/authenticated if non-enterprise users with the same name has already been created in AWX.

View File

@@ -17,9 +17,7 @@ Administrators use LDAP as a source for account authentication information for A
When so configured, a user who logs in with an LDAP username and password automatically gets an AWX account created for them and they can be automatically placed into organizations as either regular users or organization administrators.
Users created locally in the user interface, take precedence over those logging into controller for their first time with an alternative authentication solution. You must delete the local user if you want to re-use it with another authentication method, such as LDAP.
Users created through an LDAP login cannot change their username, given name, surname, or set a local password for themselves. You can also configure this to restrict editing of other field names.
Users created via an LDAP login cannot change their username, first name, last name, or set a local password for themselves. This is also tunable to restrict editing of other field names.
To configure LDAP integration for AWX:
@@ -86,7 +84,7 @@ Here ``CN=josie,CN=users,DC=website,DC=com`` is the Distinguished Name of the co
.. _`django-auth-ldap library`: https://django-auth-ldap.readthedocs.io/en/latest/groups.html#types-of-groups
7. The **LDAP Start TLS** is disabled by default. To enable TLS when the LDAP connection is not using SSL/TLS, click the toggle to **ON**.
7. The **LDAP Start TLS** is disabled by default. To enable TLS when the LDAP connection is not using SSL, click the toggle to **ON**.
.. image:: ../common/images/configure-awx-auth-ldap-start-tls.png

View File

@@ -150,7 +150,7 @@ The easiest and most common way to obtain an OAuth 2 token is to create a person
::
curl -H "Content-type: application/json" -d '{"description":"Personal AWX CLI token", "application":null, "scope":"write"}' https://<USERNAME>:<PASSWORD>@<AWX_SERVER>/api/v2/users/<USER_ID>/personal_tokens/ | python -m json.tool
curl -XPOST -k -H "Content-type: application/json" -d '{"description":"Personal AWX CLI token", "application":null, "scope":"write"}' https://<USERNAME>:<PASSWORD>@<AWX_SERVER>/api/v2/users/<USER_ID>/personal_tokens/ | python -m json.tool
You could also pipe the JSON output through ``jq``, if installed.
@@ -159,7 +159,7 @@ Following is an example of using the personal token to access an API endpoint us
::
curl -H "Authorization: Bearer <token>" -H "Content-Type: application/json" -d '{}' https://awx/api/v2/job_templates/5/launch/
curl -k -H "Authorization: Bearer <token>" -H "Content-Type: application/json" -X POST -d '{}' https://awx/api/v2/job_templates/5/launch/
In AWX, the OAuth 2 system is built on top of the `Django Oauth Toolkit`_, which provides dedicated endpoints for authorizing, revoking, and refreshing tokens. These endpoints can be found under the ``/api/v2/users/<USER_ID>/personal_tokens/`` endpoint, which also provides detailed examples on some typical usage of those endpoints. These special OAuth 2 endpoints only support using the ``x-www-form-urlencoded`` **Content-type**, so none of the ``api/o/*`` endpoints accept ``application/json``.
@@ -217,7 +217,7 @@ This returns a <token-value> that you can use to authenticate with for future re
::
curl -H "Authorization: Bearer <token-value>" -H "Content-Type: application/json" https://<awx>/api/v2/users/
curl -H "Authorization: Bearer <token-value>" -H "Content-Type: application/json" -X GET https://<awx>/api/v2/users/
The ``-k`` flag may be needed if you have not set up a CA yet and are using SSL.
@@ -227,14 +227,14 @@ To revoke a token, you can make a DELETE on the detail page for that token, usin
::
curl -u <user>:<password> -X DELETE https://<awx>/api/v2/tokens/<pk>/
curl -ku <user>:<password> -X DELETE https://<awx>/api/v2/tokens/<pk>/
Similarly, using a token:
::
curl -H "Authorization: Bearer <token-value>" -X DELETE https://<awx>/api/v2/tokens/<pk>/
curl -H "Authorization: Bearer <token-value>" -X DELETE https://<awx>/api/v2/tokens/<pk>/ -k
.. _ag_oauth2_token_auth_grant_types:
@@ -336,7 +336,8 @@ Logging in is not required for ``password`` grant type, so you can simply use cu
.. code-block:: text
curl --user <user>:<password> -H "Content-type: application/json" \
curl -k --user <user>:<password> -H "Content-type: application/json" \
-X POST \
--data '{
"description": "Token for Nagios Monitoring app",
"application": 1,
@@ -397,7 +398,7 @@ The ``/api/o/token/`` endpoint is used for refreshing the access token:
::
curl \
curl -X POST \
-d "grant_type=refresh_token&refresh_token=AL0NK9TTpv0qp54dGbC4VUZtsZ9r8z" \
-u "gwSPoasWSdNkMDtBN3Hu2WYQpPWCO9SwUEsKK22l:fI6ZpfocHYBGfm1tP92r0yIgCyfRdDQt0Tos9L8a4fNsJjQQMwp9569eIaUBsaVDgt2eiwOGe0bg5m5vCSstClZmtdy359RVx2rQK5YlIWyPlrolpt2LEpVeKXWaiybo" \
http://<awx>/api/o/token/ -i
@@ -440,7 +441,7 @@ Revoking an access token by this method is the same as deleting the token resour
::
curl -d "token=rQONsve372fQwuc2pn76k3IHDCYpi7" \
curl -X POST -d "token=rQONsve372fQwuc2pn76k3IHDCYpi7" \
-u "gwSPoasWSdNkMDtBN3Hu2WYQpPWCO9SwUEsKK22l:fI6ZpfocHYBGfm1tP92r0yIgCyfRdDQt0Tos9L8a4fNsJjQQMwp9569eIaUBsaVDgt2eiwOGe0bg5m5vCSstClZmtdy359RVx2rQK5YlIWyPlrolpt2LEpVeKXWaiybo" \
http://<awx>/api/o/revoke_token/ -i
@@ -454,7 +455,7 @@ Revoking an access token by this method is the same as deleting the token resour
The **Allow External Users to Create Oauth2 Tokens** (``ALLOW_OAUTH2_FOR_EXTERNAL_USERS`` in the API) setting is disabled by default. External users refer to users authenticated externally with a service like LDAP, or any of the other SSO services. This setting ensures external users cannot *create* their own tokens. If you enable then disable it, any tokens created by external users in the meantime will still exist, and are not automatically revoked.
Alternatively, you can use the ``manage`` utility, :ref:`ag_manage_utility_revoke_tokens`, to revoke tokens as described in the :ref:`ag_token_utility` section.
Alternatively, you can use the ``manage`` utility, :ref:`ag_manage_utility_revoke_tokens`, to revoke tokens as described in the the :ref:`ag_token_utility` section.
This setting can be configured at the system-level in the AWX User Interface:

View File

@@ -11,14 +11,13 @@ Authentication methods help simplify logins for end users--offering single sign-
Account authentication can be configured in the AWX User Interface and saved to the PostgreSQL database. For instructions, refer to the :ref:`ag_configure_awx` section.
Account authentication in AWX can be configured to centrally use OAuth2, while enterprise-level account authentication can be configured for :ref:`Azure <ag_auth_azure>`, :ref:`RADIUS <ag_auth_radius>`, :ref:`SAML <ag_auth_saml>`, or even :ref:`LDAP <ag_auth_ldap>` as a source for authentication information. See :ref:`ag_ent_auth` for more detail.
Account authentication in AWX can be configured to centrally use OAuth2, while enterprise-level account authentication can be configured for SAML, RADIUS, or even LDAP as a source for authentication information. See :ref:`ag_ent_auth`.
For websites, such as Microsoft Azure, Google or GitHub, that provide account information, account information is often implemented using the OAuth standard. OAuth is a secure authorization protocol which is commonly used in conjunction with account authentication to grant 3rd party applications a "session token" allowing them to make API calls to providers on the users behalf.
Security Assertion Markup Language (:ref:`SAML <ag_auth_saml>`) is an XML-based, open-standard data format for exchanging account authentication and authorization data between an identity provider and a service provider.
The :ref:`RADIUS <ag_auth_radius>` distributed client/server system allows you to secure networks against unauthorized access and can be implemented in network environments requiring high levels of security while maintaining network access for remote users.
SAML (Security Assertion Markup Language) is an XML-based, open-standard data format for exchanging account authentication and authorization data between an identity provider and a service provider.
The RADIUS distributed client/server system allows you to secure networks against unauthorized access and can be implemented in network environments requiring high levels of security while maintaining network access for remote users.
.. _ag_auth_github:
@@ -379,7 +378,7 @@ the team will always be assigned to the single default organization.
}
Team mappings may be specified separately for each account authentication backend, based on which of these you setup. When defined, these configurations take precedence over the global configuration above.
Team mappings may be specified separately for each account authentication backend, based on which of these you setup. When defined, these configurations take precedence over the the global configuration above.
::

View File

@@ -271,7 +271,7 @@ First, determine which is the updated module you want to use from the available
Next, create a new directory, at the same directory level of your Ansible source playbooks, named ``/library``.
Once this is created, copy the module you want to use and drop it into the ``/library`` directory--it will be consumed first over your system modules and can be removed once you have updated the stable version via your normal package manager.
Once this is created, copy the module you want to use and drop it into the ``/library`` directory--it will be consumed first over your system modules and can be removed once you have updated the the stable version via your normal package manager.
Using callback plugins with AWX

View File

@@ -94,10 +94,10 @@ In some situations, you can modify the following:
- A new Host manually created on Inventory w/ inventory sources
- In Groups that were created as a result of inventory source syncs
- Variables on Host and Group are changeable
Hosts associated with the Smart Inventory are manifested at view time. If the results of a Smart Inventory contains more than one host with identical hostnames, only one of the matching hosts will be included as part of the Smart Inventory, ordered by Host ID.
Variables on Host and Group are not changeable even as the local system admin user.
.. _ug_host_filters:

View File

@@ -961,8 +961,6 @@ Extra Variables
When you pass survey variables, they are passed as extra variables (``extra_vars``) within AWX. This can be tricky, as passing extra variables to a job template (as you would do with a survey) can override other variables being passed from the inventory and project.
By default, ``extra_vars`` are marked as ``!unsafe`` unless you specify them on the job templates Extra Variables section. These are trusted, because they can only be added by users with enough privileges to add or edit a Job Template. For example, nested variables do not expand when entered as a prompt, as the Jinja brackets are treated as a string. For more information about unsafe variables, see `unsafe or raw strings <https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_advanced_syntax.html#unsafe-or-raw-strings>`_.
For example, say that you have a defined variable for an inventory for ``debug = true``. It is entirely possible that this variable, ``debug = true``, can be overridden in a job template survey.
To ensure that the variables you need to pass are not overridden, ensure they are included by redefining them in the survey. Keep in mind that extra variables can be defined at the inventory, group, and host levels.
@@ -981,7 +979,7 @@ If specifying the ``ALLOW_JINJA_IN_EXTRA_VARS`` parameter, refer to the :ref:`AW
The Job Template extra variables dictionary is merged with the Survey variables.
Here are some simplified examples of ``extra_vars`` in YAML and JSON formats:
Here are some simplified examples of extra_vars in YAML and JSON formats:
The configuration in YAML format:

View File

@@ -387,7 +387,7 @@ Now we are ready to configure and plumb Keycloak with AWX. To do this we have pr
* Backup and configure the SMAL and OIDC adapter in AWX. NOTE: the private key of any existing SAML or OIDC adapters can not be backed up through the API, you need a DB backup to recover this.
Before we can run the playbook we need to understand that SAML works by sending redirects between AWX and Keycloak through the browser. Because of this we have to tell both AWX and Keycloak how they will construct the redirect URLs. On the Keycloak side, this is done within the realm configuration and on the AWX side its done through the SAML settings. The playbook requires a variable called `container_reference` to be set. The container_reference variable needs to be how your browser will be able to talk to the running containers. Here are some examples of how to choose a proper container_reference.
* If you develop on a mac which runs a Fedora VM which has AWX running within that and the browser you use to access AWX runs on the mac. The VM with the container has its own IP that is mapped to a name like `tower.home.net`. In this scenario your "container_reference" could be either the IP of the VM or the tower.home.net friendly name.
* If you develop on a mac which runs a Fedora VM which has AWX running within that and the browser you use to access AWX runs on the mac. The the VM with the container has its own IP that is mapped to a name like `tower.home.net`. In this scenario your "container_reference" could be either the IP of the VM or the tower.home.net friendly name.
* If you are on a Fedora work station running AWX and also using a browser on your workstation you could use localhost, your work stations IP or hostname as the container_reference.
In addition, OIDC works similar but slightly differently. OIDC has browser redirection but OIDC will also communicate from the AWX docker instance to the Keycloak docker instance directly. Any hostnames you might have are likely not propagated down into the AWX container. So we need a method for both the browser and AWX container to talk to Keycloak. For this we will likely use your machines IP address. This can be passed in as a variable called `oidc_reference`. If unset this will default to container_reference which may be viable for some configurations.