Assorted renaming and string changes

This commit is contained in:
Bill Nottingham 2021-04-30 14:14:38 -04:00
parent e0d6b138b0
commit c8cf28f266
97 changed files with 730 additions and 707 deletions

View File

@ -1,7 +1,7 @@
Coding Standards and Practices
==============================
This is not meant to be a style document so much as a practices document for ensuring performance and convention in the Ansible Tower API.
This is not meant to be a style document so much as a practices document for ensuring performance and convention in the AWX API.
Paginate Everything
===================

View File

@ -3,7 +3,7 @@
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is the upstream project for [Tower](https://www.ansible.com/tower), a commercial derivative of AWX.
AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is one of the upstream projects for [Red Hat Ansible Automation Platform](https://www.ansible.com/products/automation-platform).
To install AWX, please view the [Install guide](./INSTALL.md).

View File

@ -85,7 +85,7 @@ def oauth2_getattribute(self, attr):
# setting lookups for references to model classes (e.g.,
# oauth2_settings.REFRESH_TOKEN_MODEL)
# If we're doing an OAuth2 setting lookup *while running* a migration,
# don't do our usual "Configure Tower in Tower" database setting lookup
# don't do our usual database settings lookup
val = settings.OAUTH2_PROVIDER.get(attr)
if val is None:
val = object.__getattribute__(self, attr)

View File

@ -77,7 +77,7 @@ register(
required=False,
default='',
label=_('Login redirect override URL'),
help_text=_('URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the Tower login page.'),
help_text=_('URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page.'),
category=_('Authentication'),
category_slug='authentication',
)

View File

@ -62,7 +62,7 @@ class SwaggerSchemaView(APIView):
renderer_classes = [CoreJSONRenderer, renderers.OpenAPIRenderer, renderers.SwaggerUIRenderer]
def get(self, request):
generator = SuperUserSchemaGenerator(title='Ansible Tower API', patterns=None, urlconf=None)
generator = SuperUserSchemaGenerator(title='Ansible Automation Platform controller API', patterns=None, urlconf=None)
schema = generator.get_schema(request=request)
# python core-api doesn't support the deprecation yet, so track it
# ourselves and return it in a response header

View File

@ -4250,13 +4250,13 @@ class NotificationTemplateTest(GenericAPIView):
def post(self, request, *args, **kwargs):
obj = self.get_object()
msg = "Tower Notification Test {} {}".format(obj.id, settings.TOWER_URL_BASE)
msg = "Notification Test {} {}".format(obj.id, settings.TOWER_URL_BASE)
if obj.notification_type in ('email', 'pagerduty'):
body = "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)
body = "Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)
elif obj.notification_type in ('webhook', 'grafana'):
body = '{{"body": "Ansible Tower Test Notification {} {}"}}'.format(obj.id, settings.TOWER_URL_BASE)
body = '{{"body": "Test Notification {} {}"}}'.format(obj.id, settings.TOWER_URL_BASE)
else:
body = {"body": "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)}
body = {"body": "Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)}
notification = obj.generate_notification(msg, body)
if not notification:

View File

@ -30,8 +30,8 @@ if MODE == 'production':
except FileNotFoundError:
pass
except ValueError as e:
logger.error("Missing or incorrect metadata for Tower version. Ensure Tower was installed using the setup playbook.")
raise Exception("Missing or incorrect metadata for Tower version. Ensure Tower was installed using the setup playbook.") from e
logger.error("Missing or incorrect metadata for controller version. Ensure controller was installed using the setup playbook.")
raise Exception("Missing or incorrect metadata for controller version. Ensure controller was installed using the setup playbook.") from e
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awx.settings")

View File

@ -4,7 +4,7 @@
# Django
from django.utils.module_loading import autodiscover_modules
# Tower
# AWX
from .registry import settings_registry
default_app_config = 'awx.conf.apps.ConfConfig'

View File

@ -4,7 +4,7 @@
# Django
from django.db.models import Q
# Tower
# AWX
from awx.main.access import BaseAccess, register_access
from awx.conf.models import Setting

View File

@ -2,7 +2,7 @@
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
# Tower
# AWX
from awx.conf import fields, register
from awx.conf import settings_registry

View File

@ -11,5 +11,5 @@ def _get_validated_license_data():
def get_license():
"""Return a dictionary representing the active license on this Tower instance."""
"""Return a dictionary representing the active license on this instance."""
return _get_validated_license_data()

View File

@ -7,7 +7,7 @@ import json
# Django
from django.db import models
# Tower
# AWX
from awx.main.models.base import CreatedModifiedModel, prevent_search
from awx.main.fields import JSONField
from awx.main.utils import encrypt_field

View File

@ -1,7 +1,7 @@
# Django REST Framework
from rest_framework import serializers
# Tower
# AWX
from awx.api.fields import VerbatimField
from awx.api.serializers import BaseSerializer
from awx.conf.models import Setting

View File

@ -20,7 +20,7 @@ from rest_framework.fields import empty, SkipField
import cachetools
# Tower
# AWX
from awx.main.utils import encrypt_field, decrypt_field
from awx.conf import settings_registry
from awx.conf.models import Setting

View File

@ -8,7 +8,7 @@ from django.db.models.signals import post_save, pre_delete, post_delete
from django.core.cache import cache
from django.dispatch import receiver
# Tower
# AWX
from awx.conf import settings_registry
from awx.conf.models import Setting

View File

@ -21,7 +21,7 @@ from rest_framework.response import Response
from rest_framework import serializers
from rest_framework import status
# Tower
# AWX
from awx.api.generics import APIView, GenericAPIView, ListAPIView, RetrieveUpdateDestroyAPIView
from awx.api.permissions import IsSuperUser
from awx.api.versioning import reverse

View File

@ -40,8 +40,8 @@ def metrics():
registry=REGISTRY,
)
CUSTOM_VENVS = Gauge('awx_custom_virtualenvs_total', 'Number of virtualenvs', registry=REGISTRY)
RUNNING_JOBS = Gauge('awx_running_jobs_total', 'Number of running jobs on the Tower system', registry=REGISTRY)
PENDING_JOBS = Gauge('awx_pending_jobs_total', 'Number of pending jobs on the Tower system', registry=REGISTRY)
RUNNING_JOBS = Gauge('awx_running_jobs_total', 'Number of running jobs on the system', registry=REGISTRY)
PENDING_JOBS = Gauge('awx_pending_jobs_total', 'Number of pending jobs on the system', registry=REGISTRY)
STATUS = Gauge(
'awx_status_total',
'Status of Job launched',
@ -53,7 +53,7 @@ def metrics():
INSTANCE_CAPACITY = Gauge(
'awx_instance_capacity',
'Capacity of each node in a Tower system',
'Capacity of each node in the system',
[
'hostname',
'instance_uuid',
@ -62,7 +62,7 @@ def metrics():
)
INSTANCE_CPU = Gauge(
'awx_instance_cpu',
'CPU cores on each node in a Tower system',
'CPU cores on each node in the system',
[
'hostname',
'instance_uuid',
@ -71,7 +71,7 @@ def metrics():
)
INSTANCE_MEMORY = Gauge(
'awx_instance_memory',
'RAM (Kb) on each node in a Tower system',
'RAM (Kb) on each node in the system',
[
'hostname',
'instance_uuid',
@ -80,7 +80,7 @@ def metrics():
)
INSTANCE_INFO = Info(
'awx_instance',
'Info about each node in a Tower system',
'Info about each node in the system',
[
'hostname',
'instance_uuid',
@ -107,7 +107,7 @@ def metrics():
)
INSTANCE_CONSUMED_CAPACITY = Gauge(
'awx_instance_consumed_capacity',
'Consumed capacity of each node in a Tower system',
'Consumed capacity of each node in the system',
[
'hostname',
'instance_uuid',
@ -116,7 +116,7 @@ def metrics():
)
INSTANCE_REMAINING_CAPACITY = Gauge(
'awx_instance_remaining_capacity',
'Remaining capacity of each node in a Tower system',
'Remaining capacity of each node in the system',
[
'hostname',
'instance_uuid',

View File

@ -7,7 +7,7 @@ from django.utils.translation import ugettext_lazy as _
# Django REST Framework
from rest_framework import serializers
# Tower
# AWX
from awx.conf import fields, register, register_validate
from awx.main.models import ExecutionEnvironment
@ -58,8 +58,8 @@ register(
field_class=fields.URLField,
schemes=('http', 'https'),
allow_plain_hostname=True, # Allow hostname only without TLD.
label=_('Base URL of the Tower host'),
help_text=_('This setting is used by services like notifications to render ' 'a valid url to the Tower host.'),
label=_('Base URL of the service'),
help_text=_('This setting is used by services like notifications to render ' 'a valid url to the service.'),
category=_('System'),
category_slug='system',
)
@ -84,8 +84,8 @@ register(
field_class=fields.StringListField,
label=_('Proxy IP Allowed List'),
help_text=_(
"If Tower is behind a reverse proxy/load balancer, use this setting "
"to configure the proxy IP addresses from which Tower should trust "
"If the service is behind a reverse proxy/load balancer, use this setting "
"to configure the proxy IP addresses from which the service should trust "
"custom REMOTE_HOST_HEADERS header values. "
"If this setting is an empty list (the default), the headers specified by "
"REMOTE_HOST_HEADERS will be trusted unconditionally')"
@ -172,7 +172,7 @@ register(
register(
'INSTALL_UUID',
field_class=fields.CharField,
label=_('Unique identifier for an AWX/Tower installation'),
label=_('Unique identifier for an installation'),
category=_('System'),
category_slug='system',
read_only=True,
@ -223,7 +223,7 @@ register(
help_text=_(
'Ansible allows variable substitution via the Jinja2 templating '
'language for --extra-vars. This poses a potential security '
'risk where Tower users with the ability to specify extra vars at job '
'risk where users with the ability to specify extra vars at job '
'launch time can use Jinja2 templates to run arbitrary Python. It is '
'recommended that this value be set to "template" or "never".'
),
@ -235,7 +235,7 @@ register(
'AWX_ISOLATION_BASE_PATH',
field_class=fields.CharField,
label=_('Job execution path'),
help_text=_('The directory in which Tower will create new temporary directories for job execution and isolation (such as credential files).'),
help_text=_('The directory in which the service will create new temporary directories for job execution and isolation (such as credential files).'),
category=_('Jobs'),
category_slug='jobs',
)
@ -266,7 +266,7 @@ register(
field_class=fields.BooleanField,
default=False,
label=_('Gather data for Automation Analytics'),
help_text=_('Enables Tower to gather data on automation and send it to Red Hat.'),
help_text=_('Enables the service to gather data on automation and send it to Red Hat Insights.'),
category=_('System'),
category_slug='system',
)
@ -537,8 +537,8 @@ register(
field_class=fields.CharField,
allow_blank=True,
default='',
label=_('Cluster-wide Tower unique identifier.'),
help_text=_('Useful to uniquely identify Tower instances.'),
label=_('Cluster-wide unique identifier.'),
help_text=_('Useful to uniquely identify instances.'),
category=_('Logging'),
category_slug='logging',
)
@ -573,7 +573,7 @@ register(
label=_('Enable/disable HTTPS certificate verification'),
help_text=_(
'Flag to control enable/disable of certificate verification'
' when LOG_AGGREGATOR_PROTOCOL is "https". If enabled, Tower\'s'
' when LOG_AGGREGATOR_PROTOCOL is "https". If enabled, the'
' log handler will verify certificate sent by external log aggregator'
' before establishing connection.'
),

View File

@ -18,7 +18,7 @@ def reap_job(j, status):
j.start_args = '' # blank field to remove encrypted passwords
j.job_explanation += ' '.join(
(
'Task was marked as running in Tower but was not present in',
'Task was marked as running but was not present in',
'the job queue, so it has been marked as failed.',
)
)
@ -37,7 +37,7 @@ def reap(instance=None, status='failed', excluded_uuids=[]):
if me is None:
(changed, me) = Instance.objects.get_or_register()
if changed:
logger.info("Registered tower node '{}'".format(me.hostname))
logger.info("Registered node '{}'".format(me.hostname))
now = tz_now()
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
jobs = UnifiedJob.objects.filter(

View File

@ -10,7 +10,7 @@ from awx.main.utils.pglock import advisory_lock
class Command(BaseCommand):
"""
Deprovision a Tower cluster node
Deprovision a cluster node
"""
help = 'Remove instance from the database. ' 'Specify `--hostname` to use this command.'

View File

@ -16,8 +16,7 @@ from awx.main.utils.encryption import encrypt_field, decrypt_field, encrypt_valu
class Command(BaseCommand):
"""
Regenerate a new SECRET_KEY value and re-encrypt every secret in the
Tower database.
Regenerate a new SECRET_KEY value and re-encrypt every secret in the database.
"""
@transaction.atomic

View File

@ -0,0 +1,90 @@
# Generated by Django 2.2.16 on 2021-04-27 18:07
import awx.main.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0139_isolated_removal'),
]
operations = [
migrations.AlterField(
model_name='credential',
name='credential_type',
field=models.ForeignKey(
help_text='Specify the type of credential you want to create. Refer to the documentation for details on each type.',
on_delete=django.db.models.deletion.CASCADE,
related_name='credentials',
to='main.CredentialType',
),
),
migrations.AlterField(
model_name='credential',
name='inputs',
field=awx.main.fields.CredentialInputField(
blank=True, default=dict, help_text='Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax.'
),
),
migrations.AlterField(
model_name='credentialtype',
name='injectors',
field=awx.main.fields.CredentialTypeInjectorField(
blank=True, default=dict, help_text='Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax.'
),
),
migrations.AlterField(
model_name='credentialtype',
name='inputs',
field=awx.main.fields.CredentialTypeInputField(
blank=True, default=dict, help_text='Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax.'
),
),
migrations.AlterField(
model_name='inventorysource',
name='enabled_value',
field=models.TextField(
blank=True,
default='',
help_text='Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2020-08-04T18:13:04+00:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled',
),
),
migrations.AlterField(
model_name='inventorysource',
name='host_filter',
field=models.TextField(blank=True, default='', help_text='Regex where only matching hosts will be imported.'),
),
migrations.AlterField(
model_name='inventoryupdate',
name='enabled_value',
field=models.TextField(
blank=True,
default='',
help_text='Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2020-08-04T18:13:04+00:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled',
),
),
migrations.AlterField(
model_name='inventoryupdate',
name='host_filter',
field=models.TextField(blank=True, default='', help_text='Regex where only matching hosts will be imported.'),
),
migrations.AlterField(
model_name='job',
name='use_fact_cache',
field=models.BooleanField(
default=False,
help_text='If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible.',
),
),
migrations.AlterField(
model_name='jobtemplate',
name='use_fact_cache',
field=models.BooleanField(
default=False,
help_text='If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible.',
),
),
]

View File

@ -27,7 +27,7 @@ def migrate_galaxy_settings(apps, schema_editor):
galaxy_type = CredentialType.objects.get(kind='galaxy')
private_galaxy_url = Setting.objects.filter(key='PRIMARY_GALAXY_URL').first()
# by default, prior versions of AWX/Tower automatically pulled content
# by default, prior versions of AWX automatically pulled content
# from galaxy.ansible.com
public_galaxy_enabled = True
public_galaxy_setting = Setting.objects.filter(key='PUBLIC_GALAXY_ENABLED').first()

View File

@ -1,7 +1,7 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Tower
# AWX
from awx.api.versioning import reverse
from awx.main.fields import JSONField
from awx.main.models.base import accepts_json

View File

@ -89,7 +89,7 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
related_name='credentials',
null=False,
on_delete=models.CASCADE,
help_text=_('Specify the type of credential you want to create. Refer ' 'to the Ansible Tower documentation for details on each type.'),
help_text=_('Specify the type of credential you want to create. Refer ' 'to the documentation for details on each type.'),
)
managed_by_tower = models.BooleanField(default=False, editable=False)
organization = models.ForeignKey(
@ -101,7 +101,7 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
related_name='credentials',
)
inputs = CredentialInputField(
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. ' 'Refer to the Ansible Tower documentation for example syntax.')
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. ' 'Refer to the documentation for example syntax.')
)
admin_role = ImplicitRoleField(
parent_role=[
@ -343,12 +343,12 @@ class CredentialType(CommonModelNameNotUnique):
managed_by_tower = models.BooleanField(default=False, editable=False)
namespace = models.CharField(max_length=1024, null=True, default=None, editable=False)
inputs = CredentialTypeInputField(
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. ' 'Refer to the Ansible Tower documentation for example syntax.')
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. ' 'Refer to the documentation for example syntax.')
)
injectors = CredentialTypeInjectorField(
blank=True,
default=dict,
help_text=_('Enter injectors using either JSON or YAML syntax. ' 'Refer to the Ansible Tower documentation for example syntax.'),
help_text=_('Enter injectors using either JSON or YAML syntax. ' 'Refer to the documentation for example syntax.'),
)
@classmethod
@ -752,7 +752,7 @@ ManagedCredentialType(
'help_text': ugettext_noop(
'OpenStack domains define administrative boundaries. '
'It is only needed for Keystone v3 authentication '
'URLs. Refer to Ansible Tower documentation for '
'URLs. Refer to the documentation for '
'common scenarios.'
),
},
@ -1032,9 +1032,7 @@ ManagedCredentialType(
'label': ugettext_noop('OAuth Token'),
'type': 'string',
'secret': True,
'help_text': ugettext_noop(
'An OAuth token to use to authenticate to Tower with.' 'This should not be set if username/password are being used.'
),
'help_text': ugettext_noop('An OAuth token to use to authenticate with.' 'This should not be set if username/password are being used.'),
},
{'id': 'verify_ssl', 'label': ugettext_noop('Verify SSL'), 'type': 'boolean', 'secret': False},
],

View File

@ -877,14 +877,14 @@ class InventorySourceOptions(BaseModel):
'}'
'The host would be marked enabled. If power_state where any '
'value other than powered_on then the host would be disabled '
'when imported into Tower. If the key is not found then the '
'when imported. If the key is not found then the '
'host will be enabled'
),
)
host_filter = models.TextField(
blank=True,
default='',
help_text=_('Regex where only matching hosts will be imported into Tower.'),
help_text=_('Regex where only matching hosts will be imported.'),
)
overwrite = models.BooleanField(
default=False,

View File

@ -162,7 +162,7 @@ class JobOptions(BaseModel):
use_fact_cache = models.BooleanField(
default=False,
help_text=_(
"If enabled, Tower will act as an Ansible Fact Cache Plugin; persisting "
"If enabled, the service will act as an Ansible Fact Cache Plugin; persisting "
"facts at the end of a playbook run to the database and caching facts for use by Ansible."
),
)

View File

@ -9,7 +9,7 @@ from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from awx.main.notifications.base import AWXBaseEmailBackend
from awx.main.utils import get_awx_version
from awx.main.utils import get_awx_http_client_headers
from awx.main.notifications.custom_notification_base import CustomNotificationBase
logger = logging.getLogger('awx.main.notifications.webhook_backend')
@ -61,9 +61,6 @@ class WebhookBackend(AWXBaseEmailBackend, CustomNotificationBase):
def send_messages(self, messages):
sent_messages = 0
self.headers['Content-Type'] = 'application/json'
if 'User-Agent' not in self.headers:
self.headers['User-Agent'] = "Tower {}".format(get_awx_version())
if self.http_method.lower() not in ['put', 'post']:
raise ValueError("HTTP method must be either 'POST' or 'PUT'.")
chosen_method = getattr(requests, self.http_method.lower(), None)
@ -75,7 +72,7 @@ class WebhookBackend(AWXBaseEmailBackend, CustomNotificationBase):
"{}".format(m.recipients()[0]),
auth=auth,
data=json.dumps(m.body, ensure_ascii=False).encode('utf-8'),
headers=self.headers,
headers=get_awx_http_client_headers(),
verify=(not self.disable_ssl_verification),
)
if r.status_code >= 400:

View File

@ -11,5 +11,5 @@ logger = logging.getLogger('awx.main.scheduler')
@task(queue=get_local_queuename)
def run_task_manager():
logger.debug("Running Tower task manager.")
logger.debug("Running task manager.")
TaskManager().schedule()

View File

@ -220,7 +220,7 @@ def get_awx_http_client_headers():
license = get_license().get('license_type', 'UNLICENSED')
headers = {
'Content-Type': 'application/json',
'User-Agent': '{} {} ({})'.format('AWX' if license == 'open' else 'Red Hat Ansible Tower', get_awx_version(), license),
'User-Agent': '{} {} ({})'.format('AWX' if license == 'open' else 'Red Hat Ansible Automation Platform', get_awx_version(), license),
}
return headers
@ -234,7 +234,7 @@ def get_licenser(*args, **kwargs):
else:
return OpenLicense()
except Exception as e:
raise ValueError(_('Error importing Tower License: %s') % e)
raise ValueError(_('Error importing License: %s') % e)
def update_scm_url(scm_type, url, username=True, password=True, check_special_cases=True, scp_format=False):

View File

@ -231,7 +231,7 @@ class Licenser(object):
raise error
except OSError as error:
raise OSError(
'Unable to open certificate bundle {}. Check that Ansible Tower is running on Red Hat Enterprise Linux.'.format(verify)
'Unable to open certificate bundle {}. Check that the service is running on Red Hat Enterprise Linux.'.format(verify)
) from error # noqa
subs.raise_for_status()
@ -258,7 +258,7 @@ class Licenser(object):
raise error
except OSError as error:
raise OSError(
'Unable to open certificate bundle {}. Check that Ansible Tower is running on Red Hat Enterprise Linux.'.format(verify)
'Unable to open certificate bundle {}. Check that the service is running on Red Hat Enterprise Linux.'.format(verify)
) from error # noqa
orgs.raise_for_status()

View File

@ -50,7 +50,7 @@ class ActionModule(ActionBase):
session.auth = requests.auth.HTTPBasicAuth(username, password)
headers = {
'Content-Type': 'application/json',
'User-Agent': '{} {} ({})'.format('AWX' if license == 'open' else 'Red Hat Ansible Tower', awx_version, license),
'User-Agent': '{} {} ({})'.format('AWX' if license == 'open' else 'Red Hat Ansible Automation Platform', awx_version, license),
}
url = '/api/remediations/v1/remediations'
while url:

View File

@ -137,7 +137,7 @@ JOBOUTPUT_ROOT = '/var/lib/awx/job_status/'
# Absolute filesystem path to the directory to store logs
LOG_ROOT = '/var/log/tower/'
# The heartbeat file for the tower scheduler
# The heartbeat file for the scheduler
SCHEDULE_METADATA_LOCATION = os.path.join(BASE_DIR, '.tower_cycle')
# Django gettext files path: locale/<lang-code>/LC_MESSAGES/django.po, django.mo
@ -167,7 +167,7 @@ ALLOWED_HOSTS = []
# reverse proxy.
REMOTE_HOST_HEADERS = ['REMOTE_ADDR', 'REMOTE_HOST']
# If Tower is behind a reverse proxy/load balancer, use this setting to
# If we is behind a reverse proxy/load balancer, use this setting to
# allow the proxy IP addresses from which Tower should trust custom
# REMOTE_HOST_HEADERS header values
# REMOTE_HOST_HEADERS = ['HTTP_X_FORWARDED_FOR', ''REMOTE_ADDR', 'REMOTE_HOST']
@ -178,7 +178,7 @@ PROXY_IP_ALLOWED_LIST = []
CUSTOM_VENV_PATHS = []
# Warning: this is a placeholder for a configure tower-in-tower setting
# Warning: this is a placeholder for a database setting
# This should not be set via a file.
DEFAULT_EXECUTION_ENVIRONMENT = None
@ -568,7 +568,7 @@ GALAXY_IGNORE_CERTS = False
# Note: This setting may be overridden by database settings.
AWX_ISOLATION_SHOW_PATHS = []
# The directory in which Tower will create new temporary directories for job
# The directory in which the service will create new temporary directories for job
# execution and isolation (such as credential files and custom
# inventory scripts).
# Note: This setting may be overridden by database settings.
@ -585,7 +585,7 @@ AWX_AUTO_DEPROVISION_INSTANCES = False
# Note: This setting may be overridden by database settings.
PENDO_TRACKING_STATE = "off"
# Enables Insights data collection for Ansible Tower.
# Enables Insights data collection.
# Note: This setting may be overridden by database settings.
INSIGHTS_TRACKING_STATE = False
@ -738,7 +738,7 @@ LOG_AGGREGATOR_RSYSLOGD_DEBUG = False
LOG_AGGREGATOR_RSYSLOGD_ERROR_LOG_FILE = '/var/log/tower/rsyslog.err'
# The number of retry attempts for websocket session establishment
# If you're encountering issues establishing websockets in clustered Tower,
# If you're encountering issues establishing websockets in a cluster,
# raising this value can help
CHANNEL_LAYER_RECEIVE_MAX_RETRY = 10

View File

@ -30,14 +30,14 @@ SECRET_KEY = None
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# The heartbeat file for the tower scheduler
# The heartbeat file for the scheduler
SCHEDULE_METADATA_LOCATION = '/var/lib/awx/.tower_cycle'
# Ansible base virtualenv paths and enablement
BASE_VENV_PATH = os.path.realpath("/var/lib/awx/venv")
ANSIBLE_VENV_PATH = os.path.join(BASE_VENV_PATH, "ansible")
# Tower base virtualenv paths and enablement
# Base virtualenv paths and enablement
AWX_VENV_PATH = os.path.join(BASE_VENV_PATH, "awx")
# Store a snapshot of default settings at this point before loading any

View File

@ -10,7 +10,7 @@ from django.utils.translation import ugettext_lazy as _
# Django REST Framework
from rest_framework import serializers
# Tower
# AWX
from awx.conf import register, register_validate, fields
from awx.sso.fields import (
AuthenticationBackendsField,
@ -51,9 +51,9 @@ class SocialAuthCallbackURL(object):
SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT = _(
'''\
Mapping to organization admins/users from social auth accounts. This setting
controls which users are placed into which Tower organizations based on their
username and email address. Configuration details are available in the Ansible
Tower documentation.\
controls which users are placed into which organizations based on their
username and email address. Configuration details are available in the
documentation.\
'''
)
@ -80,7 +80,7 @@ SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER = collections.OrderedDict(
SOCIAL_AUTH_TEAM_MAP_HELP_TEXT = _(
'''\
Mapping of team members (users) from social auth accounts. Configuration
details are available in Tower documentation.\
details are available in the documentation.\
'''
)
@ -182,7 +182,7 @@ def _register_ldap(append=None):
help_text=_(
'DN (Distinguished Name) of user to bind for all search queries. This'
' is the system user account we will use to login to query LDAP for other'
' user information. Refer to the Ansible Tower documentation for example syntax.'
' user information. Refer to the documentation for example syntax.'
),
category=_('LDAP'),
category_slug='ldap',
@ -235,11 +235,11 @@ def _register_ldap(append=None):
label=_('LDAP User Search'),
help_text=_(
'LDAP search query to find users. Any user that matches the given '
'pattern will be able to login to Tower. The user should also be '
'mapped into a Tower organization (as defined in the '
'pattern will be able to login to the service. The user should also be '
'mapped into an organization (as defined in the '
'AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries '
'need to be supported use of "LDAPUnion" is possible. See '
'Tower documentation for details.'
'the documentation for details.'
),
category=_('LDAP'),
category_slug='ldap',
@ -271,10 +271,10 @@ def _register_ldap(append=None):
default={},
label=_('LDAP User Attribute Map'),
help_text=_(
'Mapping of LDAP user schema to Tower API user attributes. The default'
'Mapping of LDAP user schema to API user attributes. The default'
' setting is valid for ActiveDirectory but users with other LDAP'
' configurations may need to change the values. Refer to the Ansible'
' Tower documentation for additional details.'
' configurations may need to change the values. Refer to the'
' documentation for additional details.'
),
category=_('LDAP'),
category_slug='ldap',
@ -333,12 +333,12 @@ def _register_ldap(append=None):
help_text=_(
'Group DN required to login. If specified, user must be a member '
'of this group to login via LDAP. If not set, everyone in LDAP '
'that matches the user search will be able to login via Tower. '
'that matches the user search will be able to login to the service. '
'Only one require group is supported.'
),
category=_('LDAP'),
category_slug='ldap',
placeholder='CN=Tower Users,OU=Users,DC=example,DC=com',
placeholder='CN=Service Users,OU=Users,DC=example,DC=com',
)
register(
@ -363,7 +363,7 @@ def _register_ldap(append=None):
label=_('LDAP User Flags By Group'),
help_text=_(
'Retrieve users from a given group. At this time, superuser and system'
' auditors are the only groups supported. Refer to the Ansible Tower'
' auditors are the only groups supported. Refer to the'
' documentation for more detail.'
),
category=_('LDAP'),
@ -380,9 +380,9 @@ def _register_ldap(append=None):
label=_('LDAP Organization Map'),
help_text=_(
'Mapping between organization admins/users and LDAP groups. This '
'controls which users are placed into which Tower organizations '
'controls which users are placed into which organizations '
'relative to their LDAP group memberships. Configuration details '
'are available in the Ansible Tower documentation.'
'are available in the documentation.'
),
category=_('LDAP'),
category_slug='ldap',
@ -415,7 +415,7 @@ def _register_ldap(append=None):
field_class=LDAPTeamMapField,
default={},
label=_('LDAP Team Map'),
help_text=_('Mapping between team members (users) and LDAP groups. Configuration' ' details are available in the Ansible Tower documentation.'),
help_text=_('Mapping between team members (users) and LDAP groups. Configuration' ' details are available in the documentation.'),
category=_('LDAP'),
category_slug='ldap',
placeholder=collections.OrderedDict(
@ -554,9 +554,7 @@ register(
default=SocialAuthCallbackURL('google-oauth2'),
label=_('Google OAuth2 Callback URL'),
help_text=_(
'Provide this URL as the callback URL for your application as part '
'of your registration process. Refer to the Ansible Tower '
'documentation for more detail.'
'Provide this URL as the callback URL for your application as part ' 'of your registration process. Refer to the ' 'documentation for more detail.'
),
category=_('Google OAuth2'),
category_slug='google-oauth2',
@ -607,7 +605,7 @@ register(
help_text=_(
'Extra arguments for Google OAuth2 login. You can restrict it to'
' only allow a single domain to authenticate, even if the user is'
' logged in with multple Google accounts. Refer to the Ansible Tower'
' logged in with multple Google accounts. Refer to the'
' documentation for more detail.'
),
category=_('Google OAuth2'),
@ -650,9 +648,7 @@ register(
default=SocialAuthCallbackURL('github'),
label=_('GitHub OAuth2 Callback URL'),
help_text=_(
'Provide this URL as the callback URL for your application as part '
'of your registration process. Refer to the Ansible Tower '
'documentation for more detail.'
'Provide this URL as the callback URL for your application as part ' 'of your registration process. Refer to the ' 'documentation for more detail.'
),
category=_('GitHub OAuth2'),
category_slug='github',
@ -717,9 +713,7 @@ register(
default=SocialAuthCallbackURL('github-org'),
label=_('GitHub Organization OAuth2 Callback URL'),
help_text=_(
'Provide this URL as the callback URL for your application as part '
'of your registration process. Refer to the Ansible Tower '
'documentation for more detail.'
'Provide this URL as the callback URL for your application as part ' 'of your registration process. Refer to the ' 'documentation for more detail.'
),
category=_('GitHub Organization OAuth2'),
category_slug='github-org',
@ -874,9 +868,7 @@ register(
default=SocialAuthCallbackURL('github-enterprise'),
label=_('GitHub Enterprise OAuth2 Callback URL'),
help_text=_(
'Provide this URL as the callback URL for your application as part '
'of your registration process. Refer to the Ansible Tower '
'documentation for more detail.'
'Provide this URL as the callback URL for your application as part ' 'of your registration process. Refer to the ' 'documentation for more detail.'
),
category=_('GitHub Enterprise OAuth2'),
category_slug='github-enterprise',
@ -965,9 +957,7 @@ register(
default=SocialAuthCallbackURL('github-enterprise-org'),
label=_('GitHub Enterprise Organization OAuth2 Callback URL'),
help_text=_(
'Provide this URL as the callback URL for your application as part '
'of your registration process. Refer to the Ansible Tower '
'documentation for more detail.'
'Provide this URL as the callback URL for your application as part ' 'of your registration process. Refer to the ' 'documentation for more detail.'
),
category=_('GitHub Enterprise Organization OAuth2'),
category_slug='github-enterprise-org',
@ -1170,9 +1160,7 @@ register(
default=SocialAuthCallbackURL('azuread-oauth2'),
label=_('Azure AD OAuth2 Callback URL'),
help_text=_(
'Provide this URL as the callback URL for your application as part'
' of your registration process. Refer to the Ansible Tower'
' documentation for more detail. '
'Provide this URL as the callback URL for your application as part' ' of your registration process. Refer to the' ' documentation for more detail. '
),
category=_('Azure AD OAuth2'),
category_slug='azuread-oauth2',
@ -1256,7 +1244,7 @@ register(
default=SocialAuthCallbackURL('saml'),
label=_('SAML Assertion Consumer Service (ACS) URL'),
help_text=_(
'Register Tower as a service provider (SP) with each identity '
'Register the service as a service provider (SP) with each identity '
'provider (IdP) you have configured. Provide your SP Entity ID '
'and this ACS URL for your application.'
),
@ -1285,7 +1273,7 @@ register(
help_text=_(
'The application-defined unique identifier used as the '
'audience of the SAML service provider (SP) configuration. '
'This is usually the URL for Tower.'
'This is usually the URL for the service.'
),
category=_('SAML'),
category_slug='saml',
@ -1299,7 +1287,7 @@ register(
required=True,
validators=[validate_certificate],
label=_('SAML Service Provider Public Certificate'),
help_text=_('Create a keypair for Tower to use as a service provider (SP) ' 'and include the certificate content here.'),
help_text=_('Create a keypair to use as a service provider (SP) ' 'and include the certificate content here.'),
category=_('SAML'),
category_slug='saml',
)
@ -1311,7 +1299,7 @@ register(
required=True,
validators=[validate_private_key],
label=_('SAML Service Provider Private Key'),
help_text=_('Create a keypair for Tower to use as a service provider (SP) ' 'and include the private key content here.'),
help_text=_('Create a keypair to use as a service provider (SP) ' 'and include the private key content here.'),
category=_('SAML'),
category_slug='saml',
encrypted=True,
@ -1322,7 +1310,7 @@ register(
field_class=SAMLOrgInfoField,
required=True,
label=_('SAML Service Provider Organization Info'),
help_text=_('Provide the URL, display name, and the name of your app. Refer to' ' the Ansible Tower documentation for example syntax.'),
help_text=_('Provide the URL, display name, and the name of your app. Refer to' ' the documentation for example syntax.'),
category=_('SAML'),
category_slug='saml',
placeholder=collections.OrderedDict(
@ -1336,11 +1324,7 @@ register(
allow_blank=True,
required=True,
label=_('SAML Service Provider Technical Contact'),
help_text=_(
'Provide the name and email address of the technical contact for'
' your service provider. Refer to the Ansible Tower documentation'
' for example syntax.'
),
help_text=_('Provide the name and email address of the technical contact for' ' your service provider. Refer to the documentation' ' for example syntax.'),
category=_('SAML'),
category_slug='saml',
placeholder=collections.OrderedDict([('givenName', 'Technical Contact'), ('emailAddress', 'techsup@example.com')]),
@ -1352,11 +1336,7 @@ register(
allow_blank=True,
required=True,
label=_('SAML Service Provider Support Contact'),
help_text=_(
'Provide the name and email address of the support contact for your'
' service provider. Refer to the Ansible Tower documentation for'
' example syntax.'
),
help_text=_('Provide the name and email address of the support contact for your' ' service provider. Refer to the documentation for' ' example syntax.'),
category=_('SAML'),
category_slug='saml',
placeholder=collections.OrderedDict([('givenName', 'Support Contact'), ('emailAddress', 'support@example.com')]),
@ -1500,7 +1480,7 @@ register(
allow_null=True,
default=None,
label=_('SAML Organization Attribute Mapping'),
help_text=_('Used to translate user organization membership into Tower.'),
help_text=_('Used to translate user organization membership.'),
category=_('SAML'),
category_slug='saml',
placeholder=collections.OrderedDict(
@ -1521,7 +1501,7 @@ register(
allow_null=True,
default=None,
label=_('SAML Team Attribute Mapping'),
help_text=_('Used to translate user team membership into Tower.'),
help_text=_('Used to translate user team membership.'),
category=_('SAML'),
category_slug='saml',
placeholder=collections.OrderedDict(

View File

@ -22,7 +22,7 @@ from rest_framework.fields import empty, Field, SkipField
# This must be imported so get_subclasses picks it up
from awx.sso.ldap_group_types import PosixUIDGroupType # noqa
# Tower
# AWX
from awx.conf import fields
from awx.main.validators import validate_certificate
from awx.sso.validators import ( # noqa

View File

@ -8,7 +8,7 @@ from django.utils.translation import ugettext_lazy as _
class UserEnterpriseAuth(models.Model):
"""Tower Enterprise Auth association model"""
"""Enterprise Auth association model"""
PROVIDER_CHOICES = (('radius', _('RADIUS')), ('tacacs+', _('TACACS+')), ('saml', _('SAML')))

View File

@ -2,7 +2,7 @@
import pytest
from unittest import mock
# Tower
# AWX
from awx.sso.backends import _get_or_set_enterprise_user

View File

@ -39,8 +39,8 @@
{% else %}
<li><a href="{% url 'api:login' %}?next={{ request.get_full_path }}" data-toggle="tooltip" data-placement="bottom" data-delay="1000" title="Log in"><span class="glyphicon glyphicon-log-in"></span>Log in</a></li>
{% endif %}
<li><a href="//docs.ansible.com/ansible-tower/{{short_tower_version}}/html/towerapi/index.html" target="_blank" data-toggle="tooltip" data-placement="bottom" data-delay="1000" title="{% trans 'Ansible Tower API Guide' %}"><span class="glyphicon glyphicon-question-sign"></span><span class="visible-xs-inline">{% trans 'Ansible Tower API Guide' %}</span></a></li>
<li><a href="/" data-toggle="tooltip" data-placement="bottom" data-delay="1000" title="{% trans 'Back to Ansible Tower' %}"><span class="glyphicon glyphicon-circle-arrow-left"></span><span class="visible-xs-inline">{% trans 'Back to Ansible Tower' %}</span></a></li>
<li><a href="//docs.ansible.com/ansible-tower/{{short_tower_version}}/html/towerapi/index.html" target="_blank" data-toggle="tooltip" data-placement="bottom" data-delay="1000" title="{% trans 'API Guide' %}"><span class="glyphicon glyphicon-question-sign"></span><span class="visible-xs-inline">{% trans 'API Guide' %}</span></a></li>
<li><a href="/" data-toggle="tooltip" data-placement="bottom" data-delay="1000" title="{% trans 'Back to application' %}"><span class="glyphicon glyphicon-circle-arrow-left"></span><span class="visible-xs-inline">{% trans 'Back to application' %}</span></a></li>
<li class="hidden-xs"><a href="#" class="resize" data-toggle="tooltip" data-placement="bottom" data-delay="1000" title="{% trans 'Resize' %}"><span class="glyphicon glyphicon-resize-full"></span></a></li>
</ul>
</div>
@ -64,7 +64,7 @@
<div class="col-sm-6">
</div>
<div class="col-sm-6 footer-copyright">
Copyright &copy; 2019 <a href="http://www.redhat.com" target="_blank">Red Hat</a>, Inc. All Rights Reserved.
Copyright &copy; 2021 <a href="http://www.redhat.com" target="_blank">Red Hat</a>, Inc. All Rights Reserved.
</div>
</div>
</div>

View File

@ -4,7 +4,7 @@
# Django
from django.utils.translation import ugettext_lazy as _
# Tower
# AWX
from awx.conf import register, fields
from awx.ui.fields import PendoTrackingStateField, CustomLogoField # noqa

View File

@ -9,7 +9,7 @@ import re
# Django
from django.utils.translation import ugettext_lazy as _
# Tower
# AWX
from awx.conf import fields

View File

@ -2,7 +2,7 @@
## UX Considerations
Historically, the code that powers search in the AngularJS version of the AWX/Tower UI is very complex and prone to bugs. In order to reduce that complexity, we've made some UX decisions to help make the code easier to maintain.
Historically, the code that powers search in the AngularJS version of the AWX UI is very complex and prone to bugs. In order to reduce that complexity, we've made some UX decisions to help make the code easier to maintain.
**ALL query params namespaced and in url bar**
@ -311,7 +311,7 @@ It is okay to only make this typing representation available initially (i.e. the
when you click through or type in the search bar for the various phases of crafting the query ("not", "related resource project", "related resource key name", "value foo") which might be represented in the top bar as a series of tags that can be added and removed before submitting the tag.
We will try to form options data from a static file. Because options data is static, we may be able to generate and store as a static file of some sort (that we can use for managing smart search). Alan had ideas around this. If we do this it will mean we don't have to make a ton of requests as we craft smart search filters. It sounds like tower cli may start using something similar.
We will try to form options data from a static file. Because options data is static, we may be able to generate and store as a static file of some sort (that we can use for managing smart search). Alan had ideas around this. If we do this it will mean we don't have to make a ton of requests as we craft smart search filters. It sounds like the cli may start using something similar.
## Smart search flow

View File

@ -287,7 +287,7 @@ function HostFilterLookup({
content={i18n._(
t`Populate the hosts for this inventory by using a search
filter. Example: ansible_facts.ansible_distribution:"RedHat".
Refer to the Ansible Tower documentation for further syntax and
Refer to the documentation for further syntax and
examples.`
)}
/>

View File

@ -90,7 +90,7 @@
"label": "OAuth Token",
"type": "string",
"secret": true,
"help_text": "An OAuth token to use to authenticate to Tower with.This should not be set if username/password are being used."
"help_text": "An OAuth token to use to authenticate with.This should not be set if username/password are being used."
},
{
"id": "verify_ssl",
@ -280,7 +280,7 @@
"id": "domain",
"label": "Domain Name",
"type": "string",
"help_text": "OpenStack domains define administrative boundaries. It is only needed for Keystone v3 authentication URLs. Refer to Ansible Tower documentation for common scenarios."
"help_text": "OpenStack domains define administrative boundaries. It is only needed for Keystone v3 authentication URLs. Refer to the documentation for common scenarios."
},
{
"id": "region",

View File

@ -36,7 +36,7 @@ function CredentialTypeFormFields({ i18n }) {
<FormFullWidthLayout>
<VariablesField
tooltip={i18n._(
t`Enter inputs using either JSON or YAML syntax. Refer to the Ansible Tower documentation for example syntax.`
t`Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax.`
)}
id="credential-type-inputs-configuration"
name="inputs"
@ -46,7 +46,7 @@ function CredentialTypeFormFields({ i18n }) {
<FormFullWidthLayout>
<VariablesField
tooltip={i18n._(
t`Enter injectors using either JSON or YAML syntax. Refer to the Ansible Tower documentation for example syntax.`
t`Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax.`
)}
id="credential-type-injectors-configuration"
name="injectors"

View File

@ -124,7 +124,7 @@ function InventorySourceDetail({ inventorySource, i18n }) {
<>
{i18n._(t`If checked, any hosts and groups that were
previously present on the external source but are now removed
will be removed from the Tower inventory. Hosts and groups
will be removed from the inventory. Hosts and groups
that were not managed by the inventory source will be promoted
to the next manually created group or if there is no manually
created group to promote them into, they will be left in the "all"

View File

@ -82,7 +82,7 @@ function InventoryFormFields({ i18n, credentialTypeId, inventory }) {
<FormFullWidthLayout>
<VariablesField
tooltip={i18n._(
t`Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Tower documentation for example syntax`
t`Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the documentation for example syntax`
)}
id="inventory-variables"
name="variables"

View File

@ -134,7 +134,7 @@ export const OptionsField = withI18n()(
<>
{i18n._(t`If checked, any hosts and groups that were
previously present on the external source but are now removed
will be removed from the Tower inventory. Hosts and groups
will be removed from the inventory. Hosts and groups
that were not managed by the inventory source will be promoted
to the next manually created group or if there is no manually
created group to promote them into, they will be left in the "all"

View File

@ -99,7 +99,7 @@ const SmartInventoryFormFields = withI18n()(({ i18n, inventory }) => {
tooltip={i18n._(
t`Enter inventory variables using either JSON or YAML syntax.
Use the radio button to toggle between the two. Refer to the
Ansible Tower documentation for example syntax.`
documentation for example syntax.`
)}
/>
</FormFullWidthLayout>

View File

@ -96,7 +96,7 @@ function CustomMessagesSubForm({ defaultMessages, type, i18n }) {
config
)}/html/userguide/notifications.html#create-custom-notifications`}
>
{i18n._(t`Ansible Tower Documentation.`)}
{i18n._(t`Documentation.`)}
</a>
</small>
</Text>

View File

@ -476,7 +476,7 @@ function WebhookFields({ i18n }) {
label={i18n._(t`HTTP Headers`)}
mode="javascript"
tooltip={i18n._(t`Specify HTTP Headers in JSON format. Refer to
the Ansible Tower documentation for example syntax.`)}
the documentation for example syntax.`)}
rows={5}
/>
</FormFullWidthLayout>

View File

@ -88,7 +88,7 @@ const GitSubForm = ({
config
)}/html/userguide/projects.html#manage-playbooks-using-source-control`}
>
{i18n._(t`Ansible Tower Documentation.`)}
{i18n._(t`Documentation.`)}
</a>
</span>
}

View File

@ -83,7 +83,7 @@ describe('<LDAPDetail />', () => {
assertDetail(
wrapper,
'LDAP Require Group',
'CN=Tower Users,OU=Users,DC=example,DC=com'
'CN=Service Users,OU=Users,DC=example,DC=com'
);
assertDetail(wrapper, 'LDAP Deny Group', 'Not configured');
assertVariableDetail(wrapper, 'LDAP User Search', '[]');

View File

@ -169,7 +169,7 @@ describe('<LDAPEdit />', () => {
AUTH_LDAP_GROUP_TYPE: 'MemberDNGroupType',
AUTH_LDAP_GROUP_TYPE_PARAMS: { name_attr: 'cn', member_attr: 'member' },
AUTH_LDAP_ORGANIZATION_MAP: {},
AUTH_LDAP_REQUIRE_GROUP: 'CN=Tower Users,OU=Users,DC=example,DC=com',
AUTH_LDAP_REQUIRE_GROUP: 'CN=Service Users,OU=Users,DC=example,DC=com',
AUTH_LDAP_SERVER_URI: 'ldap://mock.example.com',
AUTH_LDAP_START_TLS: false,
AUTH_LDAP_USER_ATTR_MAP: {},

View File

@ -94,7 +94,7 @@ describe('<MiscSystemDetail />', () => {
'Automation Analytics upload URL',
'https://example.com'
);
assertDetail(wrapper, 'Base URL of the Tower host', 'https://towerhost');
assertDetail(wrapper, 'Base URL of the service', 'https://awxhost');
assertDetail(wrapper, 'Enable HTTP Basic Auth', 'On');
assertDetail(wrapper, 'Gather data for Automation Analytics', 'Off');
assertDetail(wrapper, 'Idle Time Force Log Out', '30000000000 seconds');

View File

@ -45,8 +45,8 @@ function AnalyticsStep({ i18n }) {
<Trans>User and Insights analytics</Trans>
<p>
<Trans>
By default, Tower collects and transmits analytics data on Tower usage
to Red Hat. There are two categories of data collected by Tower. For
By default, we collect and transmit analytics data on the serice usage
to Red Hat. There are two categories of data collected by the service. For
more information, see{' '}
<Button
component="a"
@ -71,7 +71,7 @@ function AnalyticsStep({ i18n }) {
label={i18n._(t`User analytics`)}
id="pendo-field"
description={i18n._(t`This data is used to enhance
future releases of the Tower Software and help
future releases of the Software and help
streamline customer experience and success.`)}
/>
</FormGroup>
@ -83,8 +83,8 @@ function AnalyticsStep({ i18n }) {
label={i18n._(t`Insights Analytics`)}
id="insights-field"
description={i18n._(t`This data is used to enhance
future releases of the Tower Software and to provide
Insights Analytics to Tower subscribers.`)}
future releases of the Software and to provide
Insights Analytics to subscribers.`)}
/>
</FormGroup>
{requireCredentialFields && (

View File

@ -113,7 +113,7 @@
"AUTH_LDAP_GROUP_SEARCH":["DC=example,DC=com","SCOPE_SUBTREE","(objectClass=group)"],
"AUTH_LDAP_GROUP_TYPE":"MemberDNGroupType",
"AUTH_LDAP_GROUP_TYPE_PARAMS":{"name_attr":"cn","member_attr":"member"},
"AUTH_LDAP_REQUIRE_GROUP":"CN=Tower Users,OU=Users,DC=example,DC=com",
"AUTH_LDAP_REQUIRE_GROUP":"CN=Service Users,OU=Users,DC=example,DC=com",
"AUTH_LDAP_DENY_GROUP":null,
"AUTH_LDAP_USER_FLAGS_BY_GROUP":{"is_superuser":["cn=superusers"]},
"AUTH_LDAP_ORGANIZATION_MAP":{},

View File

@ -16,7 +16,7 @@
"name_attr": "cn",
"member_attr": "member"
},
"AUTH_LDAP_REQUIRE_GROUP": "CN=Tower Users,OU=Users,DC=example,DC=com",
"AUTH_LDAP_REQUIRE_GROUP": "CN=Service Users,OU=Users,DC=example,DC=com",
"AUTH_LDAP_DENY_GROUP": null,
"AUTH_LDAP_USER_FLAGS_BY_GROUP": {},
"AUTH_LDAP_ORGANIZATION_MAP": {},

View File

@ -35,7 +35,7 @@ function AnswerTypeField({ i18n }) {
<Popover
content={i18n._(
t`Choose an answer type or format you want as the prompt for the user.
Refer to the Ansible Tower Documentation for more additional
Refer to the Documentation for more additional
information about each option.`
)}
/>

View File

@ -352,7 +352,7 @@ function JobTemplateForm({
label={i18n._(t`Credentials`)}
promptId="template-ask-credential-on-launch"
promptName="ask_credential_on_launch"
tooltip={i18n._(t`Select credentials that allow Tower to access the nodes this job will be ran
tooltip={i18n._(t`Select credentials for accessing the nodes this job will be ran
against. You can only select one credential of each type. For machine credentials (SSH),
checking "Prompt on launch" without selecting credentials will require you to select a machine
credential at run time. If you select credentials and check "Prompt on launch", the selected
@ -393,7 +393,7 @@ function JobTemplateForm({
t`Pass extra command line variables to the playbook. This is the
-e or --extra-vars command line parameter for ansible-playbook.
Provide key/value pairs using either YAML or JSON. Refer to the
Ansible Tower documentation for example syntax.`
documentation for example syntax.`
)}
/>
<FormColumnLayout>
@ -502,7 +502,7 @@ function JobTemplateForm({
tooltip={i18n._(t`Tags are useful when you have a large
playbook, and you want to run a specific part of a
play or task. Use commas to separate multiple tags.
Refer to Ansible Tower documentation for details on
Refer to the documentation for details on
the usage of tags.`)}
>
<TagMultiSelect
@ -518,7 +518,7 @@ function JobTemplateForm({
tooltip={i18n._(t`Skip tags are useful when you have a
large playbook, and you want to skip specific parts of a
play or task. Use commas to separate multiple tags. Refer
to Ansible Tower documentation for details on the usage
to the documentation for details on the usage
of tags.`)}
>
<TagMultiSelect

View File

@ -239,7 +239,7 @@ function WorkflowJobTemplateForm({
label={i18n._(t`Variables`)}
promptId="template-ask-variables-on-launch"
tooltip={i18n._(
t`Pass extra command line variables to the playbook. This is the -e or --extra-vars command line parameter for ansible-playbook. Provide key/value pairs using either YAML or JSON. Refer to the Ansible Tower documentation for example syntax.`
t`Pass extra command line variables to the playbook. This is the -e or --extra-vars command line parameter for ansible-playbook. Provide key/value pairs using either YAML or JSON. Refer to the documentation for example syntax.`
)}
/>
</FormFullWidthLayout>

View File

@ -34,8 +34,8 @@ if MODE == 'production':
except FileNotFoundError:
pass
except ValueError as e:
logger.error("Missing or incorrect metadata for Tower version. Ensure Tower was installed using the setup playbook.")
raise Exception("Missing or incorrect metadata for Tower version. Ensure Tower was installed using the setup playbook.") from e
logger.error("Missing or incorrect metadata for controller version. Ensure controller was installed using the setup playbook.")
raise Exception("Missing or incorrect metadata for controller version. Ensure controller was installed using the setup playbook.") from e
# Return the default Django WSGI application.

View File

@ -81,7 +81,7 @@ class ApiV2(base.Base):
# Export methods
def _export(self, _page, post_fields):
# Drop any (credential_type) assets that are being managed by the Tower instance.
# Drop any (credential_type) assets that are being managed by the instance.
if _page.json.get('managed_by_tower'):
log.debug("%s is managed by Tower, skipping.", _page.endpoint)
return None

View File

@ -45,7 +45,7 @@ def render():
# Sphinx document from.
for e in ('TOWER_HOST', 'TOWER_USERNAME', 'TOWER_PASSWORD'):
if not os.environ.get(e):
raise SystemExit('Please specify a valid {} for a real (running) Tower install.'.format(e)) # noqa
raise SystemExit('Please specify a valid {} for a real (running) installation.'.format(e)) # noqa
cli = CLI()
cli.parse_args(['awx', '--help'])
cli.connect()

View File

@ -1,4 +0,0 @@
import sys
if sys.prefix != '/var/lib/awx/venv/tower':
raise RuntimeError('Tower virtualenv not activated. Check WSGIPythonHome in Apache configuration.')
from awx.wsgi import application # NOQA

View File

@ -1,6 +1,6 @@
This folder describes third-party authentications supported by Ansible Tower. These authentications can be configured and enabled inside Tower.
This folder describes third-party authentications supported by AWX. These authentications can be configured and enabled inside AWX.
When a user wants to log into Tower, she can explicitly choose some of the supported authentications to log in instead of Tower's own authentication using username and password. Here is a list of such authentications:
When a user wants to log into AWX, she can explicitly choose some of the supported authentications to log in instead of AWX's own authentication using username and password. Here is a list of such authentications:
* Google OAuth2
* Github OAuth2
* Github Organization OAuth2
@ -10,18 +10,18 @@ When a user wants to log into Tower, she can explicitly choose some of the suppo
* Github Enterprise Team OAuth2
* Microsoft Azure Active Directory (AD) OAuth2
On the other hand, the other authentication methods use the same types of login info as Tower (username and password), but authenticate using external auth systems rather than Tower's own database. If some of these methods are enabled, Tower will try authenticating using the enabled methods *before Tower's own authentication method*. The order of precedence is:
On the other hand, the other authentication methods use the same types of login info (username and password), but authenticate using external auth systems rather than AWX's own database. If some of these methods are enabled, AWX will try authenticating using the enabled methods *before AWX's own authentication method*. The order of precedence is:
* LDAP
* RADIUS
* TACACS+
* SAML
Tower will try authenticating against each enabled authentication method *in the specified order*, meaning if the same username and password is valid in multiple enabled auth methods (*e.g.*, both LDAP and TACACS+), Tower will only use the first positive match (in the above example, log a user in via LDAP and skip TACACS+).
AWX will try authenticating against each enabled authentication method *in the specified order*, meaning if the same username and password is valid in multiple enabled auth methods (*e.g.*, both LDAP and TACACS+), AWX will only use the first positive match (in the above example, log a user in via LDAP and skip TACACS+).
## Notes:
SAML users, RADIUS users and TACACS+ users are categorized as 'Enterprise' users. The following rules apply to Enterprise users:
* Enterprise users can only be created via the first successful login attempt from remote authentication backend.
* Enterprise users cannot be created/authenticated if non-enterprise users with the same name has already been created in Tower.
* Tower passwords of Enterprise users should always be empty and cannot be set by any user if there are enterprise backends enabled.
* If enterprise backends are disabled, an Enterprise user can be converted to a normal Tower user by setting password field. But this operation is irreversible (the converted Tower user can no longer be treated as Enterprise user).
* Enterprise users cannot be created/authenticated if non-enterprise users with the same name has already been created in AWX.
* AWX passwords of Enterprise users should always be empty and cannot be set by any user if there are enterprise backends enabled.
* If enterprise backends are disabled, an Enterprise user can be converted to a normal AWX user by setting password field. But this operation is irreversible (the converted AWX user can no longer be treated as Enterprise user).

View File

@ -1,5 +1,5 @@
## Introduction
Starting from Tower 3.3, OAuth2 will be used as the new means of token-based authentication. Users
OAuth2 is the AWX means of token-based authentication. Users
will be able to manage OAuth2 tokens as well as applications, a server-side representation of API
clients used to generate tokens. With OAuth2, a user can authenticate by passing a token as part of
the HTTP authentication header. The token can be scoped to have more restrictive permissions on top of
@ -166,7 +166,7 @@ For an OAuth2 token, the only fully mutable fields are `scope` and `description`
field is *immutable on update*, and all other fields are totally immutable, and will be auto-populated
during creation.
* `user` - this field corresponds to the user the token is created for
* `expires` will be generated according to Tower configuration setting `OAUTH2_PROVIDER`
* `expires` will be generated according to the configuration setting `OAUTH2_PROVIDER`
* `token` and `refresh_token` will be auto-generated to be non-clashing random strings.
Both application tokens and personal access tokens will be shown at the `/api/v2/tokens/`
@ -398,6 +398,6 @@ at `/api/v2/tokens/`.
* Incoming requests using unexpired OAuth2 token correctly in authentication header should be able
to successfully authenticate themselves.
* Token scope mask over RBAC should work as described.
* Tower configuration setting `OAUTH2_PROVIDER` should be configurable and function as described.
* AWX configuration setting `OAUTH2_PROVIDER` should be configurable and function as described.
* `/api/o/` endpoint should work as expected. In specific, all examples given in the description
help text should be working (a user following the steps should get expected result).

View File

@ -1,8 +1,6 @@
## Introduction
Before Tower 3.3, an auth token was used as the main authentication method. Starting from Tower 3.3,
session-based authentication will take its place as the main authentication method, and auth token
will be replaced by OAuth 2 tokens.
Session-based authentication is the main authentication method, and auth tokens have been replaced by OAuth 2 tokens.
Session authentication is a safer way of utilizing HTTP(S) cookies. Theoretically, the user can provide authentication information, like username and password, as part of the
`Cookie` header, but this method is vulnerable to cookie hijacks, where crackers can see and steal user

View File

@ -1,7 +1,7 @@
# TACACS+
[Terminal Access Controller Access-Control System Plus (TACACS+)](https://en.wikipedia.org/wiki/TACACS) is a protocol developed by Cisco to handle remote authentication and related services for networked access control through a centralized server. In specific, TACACS+ provides authentication, authorization and accounting (AAA) services. Ansible Tower currently utilizes its authentication service.
[Terminal Access Controller Access-Control System Plus (TACACS+)](https://en.wikipedia.org/wiki/TACACS) is a protocol developed by Cisco to handle remote authentication and related services for networked access control through a centralized server. In specific, TACACS+ provides authentication, authorization and accounting (AAA) services. AWX currently utilizes its authentication service.
TACACS+ is configured by Tower configuration and is available under `/api/v2/settings/tacacsplus/`. Here is a typical configuration with every configurable field included:
TACACS+ is configured by settings configuration and is available under `/api/v2/settings/tacacsplus/`. Here is a typical configuration with every configurable field included:
```
{
"TACACSPLUS_HOST": "127.0.0.1",
@ -21,7 +21,7 @@ Each field is explained below:
| `TACACSPLUS_SESSION_TIMEOUT` | Integer | 5 | TACACS+ session timeout value in seconds. |
| `TACACSPLUS_AUTH_PROTOCOL` | String with choices | 'ascii' | The authentication protocol used by TACACS+ client (choices are `ascii` and `pap`). |
Under the hood, Tower uses [open-source TACACS+ python client](https://github.com/ansible/tacacs_plus) to communicate with the remote TACACS+ server. During authentication, Tower passes username and password to TACACS+ client, which packs up auth information and sends it to the TACACS+ server. Based on what the server returns, Tower will invalidate login attempt if authentication fails. If authentication passes, Tower will create a user if she does not exist in database, and log the user in.
Under the hood, AWX uses [open-source TACACS+ python client](https://github.com/ansible/tacacs_plus) to communicate with the remote TACACS+ server. During authentication, AWX passes username and password to TACACS+ client, which packs up auth information and sends it to the TACACS+ server. Based on what the server returns, AWX will invalidate login attempt if authentication fails. If authentication passes, AWX will create a user if she does not exist in database, and log the user in.
## Test Environment Setup
@ -41,9 +41,9 @@ The playbook creates a user named 'tower' with ascii password default to 'login'
## Acceptance Criteria
* All specified Tower configuration fields should be shown and configurable as documented.
* A user defined by the TACACS+ server should be able to log into Tower.
* User not defined by TACACS+ server should not be able to log into Tower via TACACS+.
* A user existing in TACACS+ server but not in Tower should be created after the first successful log in.
* All specified in configuration fields should be shown and configurable as documented.
* A user defined by the TACACS+ server should be able to log into AWX.
* User not defined by TACACS+ server should not be able to log into AWX via TACACS+.
* A user existing in TACACS+ server but not in AWX should be created after the first successful log in.
* TACACS+ backend should stop an authentication attempt after configured timeout and should not block the authentication pipeline in any case.
* If exceptions occur on TACACS+ server side, the exception details should be logged in Tower, and Tower should not authenticate that user via TACACS+.
* If exceptions occur on TACACS+ server side, the exception details should be logged in AWX, and AWX should not authenticate that user via TACACS+.

View File

@ -1,6 +1,6 @@
## Ansible Tower Capacity Determination and Job Impact
## AWX Capacity Determination and Job Impact
The Ansible Tower capacity system determines how many jobs can run on an Instance given the amount of resources
The AWX capacity system determines how many jobs can run on an Instance given the amount of resources
available to the Instance and the size of the jobs that are running (referred to hereafter as `Impact`).
The algorithm used to determine this is based entirely on two things:
@ -25,18 +25,18 @@ These concepts mean that, in general, Capacity and Impact is not a zero-sum syst
### Resource Determination For Capacity Algorithm
The capacity algorithms are defined in order to determine how many `forks` a system is capable of running at the same time. This controls how
many systems Ansible itself will communicate with simultaneously. Increasing the number of forks a Tower system is running will, in general,
many systems Ansible itself will communicate with simultaneously. Increasing the number of forks a AWX system is running will, in general,
allow jobs to run faster by performing more work in parallel. The tradeoff is that this will increase the load on the system which could cause work
to slow down overall.
Tower can operate in two modes when determining capacity. `mem_capacity` (the default) will allow you to overcommit CPU resources while protecting the system
AWX can operate in two modes when determining capacity. `mem_capacity` (the default) will allow you to overcommit CPU resources while protecting the system
from running out of memory. If most of your work is not CPU-bound, then selecting this mode will maximize the number of forks.
#### Memory Relative Capacity
`mem_capacity` is calculated relative to the amount of memory needed per-fork. Taking into account the overhead for Tower's internal components, this comes out
`mem_capacity` is calculated relative to the amount of memory needed per-fork. Taking into account the overhead for AWX's internal components, this comes out
to be about `100MB` per fork. When considering the amount of memory available to Ansible jobs the capacity algorithm will reserve 2GB of memory to account
for the presence of other Tower services. The algorithm itself looks like this:
for the presence of other AWX services. The algorithm itself looks like this:
(mem - 2048) / mem_per_fork
@ -44,7 +44,7 @@ As an example:
(4096 - 2048) / 100 == ~20
So a system with 4GB of memory would be capable of running 20 forks. The value `mem_per_fork` can be controlled by setting the Tower settings value
So a system with 4GB of memory would be capable of running 20 forks. The value `mem_per_fork` can be controlled by setting the AWX settings value
(or environment variable) `SYSTEM_TASK_FORKS_MEM` which defaults to `100`.
@ -53,7 +53,7 @@ So a system with 4GB of memory would be capable of running 20 forks. The value `
Often times Ansible workloads can be fairly CPU-bound. In these cases, sometimes reducing the simultaneous workload allows more tasks to run faster and reduces
the average time-to-completion of those jobs.
Just as the Tower `mem_capacity` algorithm uses the amount of memory needed per-fork, the `cpu_capacity` algorithm looks at the amount of CPU resources is needed
Just as the AWX `mem_capacity` algorithm uses the amount of memory needed per-fork, the `cpu_capacity` algorithm looks at the amount of CPU resources is needed
per fork. The baseline value for this is `4` forks per core. The algorithm itself looks like this:
cpus * fork_per_cpu
@ -62,7 +62,7 @@ For example, in a 4-core system:
4 * 4 == 16
The value `fork_per_cpu` can be controlled by setting the Tower settings value (or environment variable) `SYSTEM_TASK_FORKS_CPU`, which defaults to `4`.
The value `fork_per_cpu` can be controlled by setting the AWX settings value (or environment variable) `SYSTEM_TASK_FORKS_CPU`, which defaults to `4`.
### Job Impacts Relative To Capacity
@ -70,13 +70,13 @@ When selecting the capacity, it's important to understand how each job type affe
It's helpful to understand what `forks` mean to Ansible: http://docs.ansible.com/ansible/latest/intro_configuration.html#forks
The default forks value for ansible is `5`. However, if Tower knows that you're running against fewer systems than that, then the actual concurrency value
The default forks value for ansible is `5`. However, if AWX knows that you're running against fewer systems than that, then the actual concurrency value
will be lower.
When a job is made to run, Tower will add `1` to the number of forks selected to compensate for the Ansible parent process. So if you are running a playbook against `5`
When a job is made to run, AWX will add `1` to the number of forks selected to compensate for the Ansible parent process. So if you are running a playbook against `5`
systems with a `forks` value of `5`, then the actual `forks` value from the perspective of Job Impact will be 6.
#### Impact of Job Types in Tower
#### Impact of Job Types in AWX
Jobs and Ad-hoc jobs follow the above model, `forks + 1`.
@ -88,7 +88,7 @@ Other job types have a fixed impact:
### Selecting the Right Capacity
Selecting between a memory-focused capacity algorithm and a CPU-focused capacity for your Tower use means you'll be selecting between a minimum
Selecting between a memory-focused capacity algorithm and a CPU-focused capacity for your AWX use means you'll be selecting between a minimum
and maximum value. In the above examples, the CPU capacity would allow a maximum of 16 forks while the Memory capacity would allow 20. For some systems,
the disparity between these can be large and oftentimes you may want to have a balance between these two.

View File

@ -1,10 +1,10 @@
## Tower Clustering/HA Overview
## AWX Clustering/HA Overview
Prior to 3.1, the Ansible Tower HA solution was not a true high-availability system. This system has been entirely rewritten in 3.1 with a focus towards a proper highly-available clustered system. This has been extended further in 3.2 to allow grouping of clustered instances into different pools/queues.
* Each instance should be able to act as an entry point for UI and API Access.
This should enable Tower administrators to use load balancers in front of as many instances as they wish and maintain good data visibility.
* Each instance should be able to join the Tower cluster and expand its ability to execute jobs.
This should enable AWX administrators to use load balancers in front of as many instances as they wish and maintain good data visibility.
* Each instance should be able to join the AWX cluster and expand its ability to execute jobs.
* Provisioning new instance should be as simple as updating the `inventory` file and re-running the setup playbook.
* Instances can be de-provisioned with a simple management command.
* Instances can be grouped into one or more Instance Groups to share resources for topical purposes.
@ -18,7 +18,7 @@ Prior to 3.1, the Ansible Tower HA solution was not a true high-availability sys
It's important to point out a few existing things:
* PostgreSQL is still a standalone instance and is not clustered. Replica configuration will not be managed. If the user configures standby replicas, database failover will also not be managed.
* All instances should be reachable from all other instances and they should be able to reach the database. It's also important for the hosts to have a stable address and/or hostname (depending on how you configure the Tower host).
* All instances should be reachable from all other instances and they should be able to reach the database. It's also important for the hosts to have a stable address and/or hostname (depending on how you configure the AWX host).
* Existing old-style HA deployments will be transitioned automatically to the new HA system during the upgrade process to 3.1.
* Manual projects will need to be synced to all instances by the customer.
@ -27,8 +27,8 @@ Ansible Tower 3.3 adds support for container-based clusters using Openshift or K
## Important Changes
* There is no concept of primary/secondary in the new Tower system. *All* systems are primary.
* The `inventory` file for Tower deployments should be saved/persisted. If new instances are to be provisioned, the passwords and configuration options as well as host names will need to be available to the installer.
* There is no concept of primary/secondary in the new AWX system. *All* systems are primary.
* The `inventory` file for AWX deployments should be saved/persisted. If new instances are to be provisioned, the passwords and configuration options as well as host names will need to be available to the installer.
## Concepts and Configuration
@ -70,18 +70,17 @@ Recommendations and constraints:
- Do not create a group named `instance_group_tower`.
- Do not name any instance the same as a group name.
### Provisioning and Deprovisioning Instances and Groups
* **Provisioning** - Provisioning Instances after installation is supported by updating the `inventory` file and re-running the setup playbook. It's important that this file contain all passwords and information used when installing the cluster, or other instances may be reconfigured (this can be done intentionally).
* **Deprovisioning** - Tower does not automatically de-provision instances since it cannot distinguish between an instance that was taken offline intentionally or due to failure. Instead, the procedure for de-provisioning an instance is to shut it down (or stop the `automation-controller-service`) and run the Tower de-provision command:
* **Deprovisioning** - AWX does not automatically de-provision instances since it cannot distinguish between an instance that was taken offline intentionally or due to failure. Instead, the procedure for de-provisioning an instance is to shut it down (or stop the `automation-controller-service`) and run the AWX de-provision command:
```
$ awx-manage deprovision_instance --hostname=<hostname>
```
* **Removing/Deprovisioning Instance Groups** - Tower does not automatically de-provision or remove instance groups, even though re-provisioning will often cause these to be unused. They may still show up in API endpoints and stats monitoring. These groups can be removed with the following command:
* **Removing/Deprovisioning Instance Groups** - AWX does not automatically de-provision or remove instance groups, even though re-provisioning will often cause these to be unused. They may still show up in API endpoints and stats monitoring. These groups can be removed with the following command:
```
$ awx-manage unregister_queue --queuename=<name>
@ -102,12 +101,12 @@ An `Instance` that is added to an `InstanceGroup` will automatically reconfigure
### Instance Group Policies
Tower `Instances` can be configured to automatically join `Instance Groups` when they come online by defining a policy. These policies are evaluated for
AWX `Instances` can be configured to automatically join `Instance Groups` when they come online by defining a policy. These policies are evaluated for
every new Instance that comes online.
Instance Group Policies are controlled by three optional fields on an `Instance Group`:
* `policy_instance_percentage`: This is a number between 0 - 100. It guarantees that this percentage of active Tower instances will be added to this `Instance Group`. As new instances come online, if the number of Instances in this group relative to the total number of instances is fewer than the given percentage, then new ones will be added until the percentage condition is satisfied.
* `policy_instance_percentage`: This is a number between 0 - 100. It guarantees that this percentage of active AWX instances will be added to this `Instance Group`. As new instances come online, if the number of Instances in this group relative to the total number of instances is fewer than the given percentage, then new ones will be added until the percentage condition is satisfied.
* `policy_instance_minimum`: This policy attempts to keep at least this many `Instances` in the `Instance Group`. If the number of available instances is lower than this minimum, then all `Instances` will be placed in this `Instance Group`.
* `policy_instance_list`: This is a fixed list of `Instance` names to always include in this `Instance Group`.
@ -144,7 +143,7 @@ HTTP PATCH /api/v2/instances/X/
### Status and Monitoring
Tower itself reports as much status as it can via the API at `/api/v2/ping` in order to provide validation of the health of the Cluster. This includes:
AWX itself reports as much status as it can via the API at `/api/v2/ping` in order to provide validation of the health of the Cluster. This includes:
* The instance servicing the HTTP request.
* The last heartbeat time of all other instances in the cluster.
@ -156,25 +155,25 @@ information can be seen at `/api/v2/instances/` and `/api/v2/instance_groups`.
### Instance Services and Failure Behavior
Each Tower instance is made up of several different services working collaboratively:
Each AWX instance is made up of several different services working collaboratively:
* **HTTP Services** - This includes the Tower application itself as well as external web services.
* **HTTP Services** - This includes the AWX application itself as well as external web services.
* **Callback Receiver** - Receives job events that result from running Ansible jobs.
* **Celery** - The worker queue that processes and runs all jobs.
* **Redis** - this is used as a queue for AWX to process ansible playbook callback events.
Tower is configured in such a way that if any of these services or their components fail, then all services are restarted. If these fail sufficiently (often in a short span of time), then the entire instance will be placed offline in an automated fashion in order to allow remediation without causing unexpected behavior.
AWX is configured in such a way that if any of these services or their components fail, then all services are restarted. If these fail sufficiently (often in a short span of time), then the entire instance will be placed offline in an automated fashion in order to allow remediation without causing unexpected behavior.
### Job Runtime Behavior
Ideally a regular user of Tower should not notice any semantic difference to the way jobs are run and reported. Behind the scenes it is worth pointing out the differences in how the system behaves.
Ideally a regular user of AWX should not notice any semantic difference to the way jobs are run and reported. Behind the scenes it is worth pointing out the differences in how the system behaves.
When a job is submitted from the API interface, it gets pushed into the dispatcher queue via postgres notify/listen (https://www.postgresql.org/docs/10/sql-notify.html), and the task is handled by the dispatcher process running on that specific Tower node. If an instance fails while executing jobs, then the work is marked as permanently failed.
When a job is submitted from the API interface, it gets pushed into the dispatcher queue via postgres notify/listen (https://www.postgresql.org/docs/10/sql-notify.html), and the task is handled by the dispatcher process running on that specific AWX node. If an instance fails while executing jobs, then the work is marked as permanently failed.
If a cluster is divided into separate Instance Groups, then the behavior is similar to the cluster as a whole. If two instances are assigned to a group then either one is just as likely to receive a job as any other in the same group.
As Tower instances are brought online, it effectively expands the work capacity of the Tower system. If those instances are also placed into Instance Groups, then they also expand that group's capacity. If an instance is performing work and it is a member of multiple groups, then capacity will be reduced from all groups for which it is a member. De-provisioning an instance will remove capacity from the cluster wherever that instance was assigned.
As AWX instances are brought online, it effectively expands the work capacity of the AWX system. If those instances are also placed into Instance Groups, then they also expand that group's capacity. If an instance is performing work and it is a member of multiple groups, then capacity will be reduced from all groups for which it is a member. De-provisioning an instance will remove capacity from the cluster wherever that instance was assigned.
It's important to note that not all instances are required to be provisioned with an equal capacity.
@ -219,8 +218,8 @@ When this property is disabled, no jobs will be assigned to that `Instance`. Exi
When verifying acceptance, we should ensure that the following statements are true:
* Tower should install as a standalone Instance
* Tower should install in a Clustered fashion
* AWX should install as a standalone Instance
* AWX should install in a Clustered fashion
* Instances should, optionally, be able to be grouped arbitrarily into different Instance Groups
* Capacity should be tracked at the group level and capacity impact should make sense relative to what instance a job is running on and what groups that instance is a member of
* Provisioning should be supported via the setup playbook
@ -228,8 +227,8 @@ When verifying acceptance, we should ensure that the following statements are tr
* All jobs, inventory updates, and project updates should run successfully
* Jobs should be able to run on hosts for which they are targeted; if assigned implicitly or directly to groups, then they should only run on instances in those Instance Groups
* Project updates should manifest their data on the host that will run the job immediately prior to the job running
* Tower should be able to reasonably survive the removal of all instances in the cluster
* Tower should behave in a predictable fashion during network partitioning
* AWX should be able to reasonably survive the removal of all instances in the cluster
* AWX should behave in a predictable fashion during network partitioning
## Testing Considerations

View File

@ -1 +1 @@
This folder contains documentation related to credentials in AWX / Ansible Tower.
This folder contains documentation related to credentials in AWX.

View File

@ -4,10 +4,10 @@ Extracting Credential Values
AWX stores a variety of secrets in the database that are either used for automation or are a result of automation. These secrets include:
- all secret fields of all credential types (passwords, secret keys, authentication tokens, secret cloud credentials)
- secret tokens and passwords for external services defined in Ansible Tower settings
- secret tokens and passwords for external services defined in AWX settings
- "password" type survey fields entries
To encrypt secret fields, Tower uses AES in CBC mode with a 256-bit key for encryption, PKCS7 padding, and HMAC using SHA256 for authentication.
To encrypt secret fields, AWX uses AES in CBC mode with a 256-bit key for encryption, PKCS7 padding, and HMAC using SHA256 for authentication.
If necessary, credentials and encrypted settings can be extracted using the AWX shell:

View File

@ -168,7 +168,7 @@ Vault credentials in AWX now have an optional field, `vault_id`, which is
analogous to the `--vault-id` argument to `ansible-playbook`. To run
a playbook which makes use of multiple Vault passwords:
1. Make a Vault credential in Tower for each Vault password; specify the Vault
1. Make a Vault credential in AWX for each Vault password; specify the Vault
ID as a field on the credential and input the password (which will be
encrypted and stored).
2. Assign multiple Vault credentials to the job template via the new

View File

@ -131,12 +131,12 @@ WHERE (now() - pg_stat_activity.query_start) > interval '5 minutes';
Remote Debugging
----------------
Python processes in Tower's development environment are kept running in the
Python processes in AWX's development environment are kept running in the
background via supervisord. As such, interacting with them via Python's
standard `pdb.set_trace()` isn't possible.
Bundled in our container environment is a remote debugging tool, `sdb`. You
can use it to set remote breakpoints in Tower code and debug interactively over
can use it to set remote breakpoints in AWX code and debug interactively over
a telnet session:
```python
@ -147,7 +147,7 @@ a telnet session:
def run(self, pk, **kwargs):
# This will set a breakpoint and open an interactive Python
# debugger exposed on a random port between 7899-7999. The chosen
# port will be reported as a warning in the Tower logs, e.g.,
# port will be reported as a warning in the AWX logs, e.g.,
#
# [2017-01-30 22:26:04,366: WARNING/Worker-11] Remote Debugger:7900: Please telnet into 0.0.0.0 7900.
#
@ -163,7 +163,7 @@ that encounters a breakpoint will wait until an active client is established
(it won't handle additional tasks) and concludes the debugging session with
a `continue` command.
To simplify remote debugging session management, Tower's development
To simplify remote debugging session management, AWX's development
environment comes with tooling that can automatically discover open
remote debugging sessions and automatically connect to them. From your *host*
machine (*i.e.*, _outside_ of the development container), you can run:

View File

@ -1 +1 @@
This folder contains documentation related to inventories in AWX / Ansible Tower.
This folder contains documentation related to inventories in AWX.

View File

@ -1,5 +1,5 @@
# Inventory Refresh Overview
Tower should have an inventory view that is more aligned towards systems management
AWX should have an inventory view that is more aligned towards systems management
rather than merely maintaining inventory for automation.
## Inventory Source Promotion
@ -7,7 +7,7 @@ Starting with Tower 3.2, `InventorySource` will be associated directly with an `
## Fact Searching
Facts generated by an Ansible playbook during a Job Template run are stored by Tower into the database
Facts generated by an Ansible playbook during a Job Template run are stored by AWX into the database
whenever `use_fact_cache=True` is set per-Job-Template. New facts are merged with existing
facts and are per-host. These stored facts can be used to filter hosts via the
`/api/v2/hosts` endpoint, using the GET query parameter `host_filter` *i.e.*,
@ -77,7 +77,7 @@ Creating a new _Smart Inventory_ for all of our GCE and EC2 groups might look li
### More On Searching
The `host_filter` that is set will search over the entirety of the hosts the user has
access to in Tower. If the user wants to restrict their search in anyway, they will
access to in AWX. If the user wants to restrict their search in anyway, they will
want to declare that in their host filter.
For example, if wanting to restrict the search to only hosts in an inventory

View File

@ -82,7 +82,7 @@ Some test scenarios to look at:
`group_vars`, `host_vars`, etc.
- Test scripts in the project repo
- Test scripts that use environment variables provided by a credential
in Tower
in AWX
- Test multiple inventories that use the same project, pointing to different
files / directories inside of the project
- Feature works correctly even if project doesn't have any playbook files
@ -127,7 +127,7 @@ access to the source-control, a user may want to restrict which instance
groups the inventory update runs on.
If the inventory source is set to update on project update, it will run
on the same instance (inside of the Tower cluster) as the project update.
on the same instance (inside of the AWX cluster) as the project update.
This can be restricted by limiting the instance groups of the organization
that contains the `source_project` of the inventory source.

View File

@ -1,6 +1,6 @@
## Ansible Callback and Job Events
There is no concept of a job event in Ansible. Job Events are JSON structures, created when Ansible calls the Tower callback plugin hooks (*i.e.*, `v2_playbook_on_task_start`, `v2_runner_on_ok`, etc.). The Job Event data structures contain data from the parameters of the callback hooks plus unique IDs that reference other Job Events. There is usually a one-to-one relationship between a Job Event and an Ansible callback plugin function call.
There is no concept of a job event in Ansible. Job Events are JSON structures, created when Ansible calls the runner callback plugin hooks (*i.e.*, `v2_playbook_on_task_start`, `v2_runner_on_ok`, etc.). The Job Event data structures contain data from the parameters of the callback hooks plus unique IDs that reference other Job Events. There is usually a one-to-one relationship between a Job Event and an Ansible callback plugin function call.
## Job Event Relationships
@ -9,7 +9,7 @@ The Job Event relationship is strictly hierarchical. In the example details belo
* There is always one and only one `v2_playbook_on_start` event and it is the first event.
* `v2_playbook_on_play_start` is generated once per-play in the playbook; two such events would be generated from the playbook example below.
* The `v2_playbook_on_task_start` function is called once for each task under the default execution strategy. Other execution strategies (*i.e.*, free or serial) can result in the `v2_playbook_on_task_start` function being called multiple times, one for each host. Tower only creates a Job Event for the **first** `v2_playbook_on_task_start` call. Subsequent calls for the same task do **not** result in Job Events being created.
* The `v2_playbook_on_task_start` function is called once for each task under the default execution strategy. Other execution strategies (*i.e.*, free or serial) can result in the `v2_playbook_on_task_start` function being called multiple times, one for each host. AWX only creates a Job Event for the **first** `v2_playbook_on_task_start` call. Subsequent calls for the same task do **not** result in Job Events being created.
* `v2_runner_on_[ok, failed, skipped, unreachable, retry, item_on_ok, item_on_failed, item_on_skipped]`; one `v2_runner_on_...` Job Event will be created for each `v2_playbook_on_task_start` event.
@ -115,4 +115,4 @@ A management command for event replay exists for replaying jobs at varying speed
## Code References
* For a more comprehensive list of Job Events and the hierarchy they form, go here: https://github.com/ansible/awx/blob/devel/awx/main/models/jobs.py#L870
* Exhaustive list of Job Events in Tower: https://github.com/ansible/awx/blob/devel/awx/main/models/jobs.py#L900
* Exhaustive list of Job Events in AWX: https://github.com/ansible/awx/blob/devel/awx/main/models/jobs.py#L900

View File

@ -7,7 +7,7 @@ Job Slicing solves this problem by adding a Job Template field `job_slice_count`
## Implications for Job Execution
When jobs are sliced, they can run on any Tower node; however, some may not run at the same time. Because of this, anything that relies on setting/sliced state (using modules such as `set_fact`) will not work as expected. It's reasonable to expect that not all jobs will actually run at the same time (*e.g.*, if there is not enough capacity in the system)
When jobs are sliced, they can run on any AWX node; however, some may not run at the same time. Because of this, anything that relies on setting/sliced state (using modules such as `set_fact`) will not work as expected. It's reasonable to expect that not all jobs will actually run at the same time (*e.g.*, if there is not enough capacity in the system)
## Simultaneous Execution Behavior

View File

@ -1,11 +1,10 @@
The components listed herein have been integrated into Red Hat, Inc.'s
distribution of Ansible Tower. Red Hat, Inc. supports Towers use of and
interactions with these components for both development and production
purposes, subject to applicable terms and conditions. Unless otherwise
agreed to in writing, the use of Ansible Tower is subject to the Red Hat End
User License Agreement located at
https://www.redhat.com/en/about/red-hat-end-user-license-agreements
and Red Hat Enterprise Agreement located at
https://www.redhat.com/en/about/licenses-us. Ansible Tower is a proprietary
product offered by Red Hat, Inc. and its use is not intended to prohibit
the rights under any open source license.
distribution of Ansible Automation Platform. Red Hat, Inc. supports the
platform's use of and interactions with these components for both
development and production purposes, subject to applicable terms and
conditions. Unless otherwise agreed to in writing, the use of Ansible
Automation Platform is subject to the Red Hat End User License Agreement
located at
https://www.redhat.com/en/about/red-hat-end-user-license-agreements and Red
Hat Enterprise Agreement located at
https://www.redhat.com/en/about/licenses-us.

View File

@ -2,7 +2,7 @@
This feature builds in the capability to send detailed logs to several kinds
of third party external log aggregation services. Services connected to this
data feed should be useful in order to gain insights into Tower usage
data feed should be useful in order to gain insights into AWX usage
or technical trends. The data is intended to be
sent in JSON format via three ways: over an HTTP connection, a direct TCP
connection, or a direct UDP connection. It uses minimal service-specific
@ -17,14 +17,14 @@ following the same structure as one would expect if obtaining the data
from the API. These data loggers are the following:
- `awx.analytics.job_events` - Data returned from the Ansible callback module
- `awx.analytics.activity_stream` - Record of changes to the objects within the Ansible Tower app
- `awx.analytics.activity_stream` - Record of changes to the objects within the AWX app
- `awx.analytics.system_tracking` - Data gathered by Ansible scan modules ran by scan job templates
These loggers only use log-level of `INFO`. Additionally, the standard Tower logs are deliverable through this
These loggers only use log-level of `INFO`. Additionally, the standard AWX logs are deliverable through this
same mechanism. It should be obvious to the user how to enable or disable
each of these five sources of data without manipulating a complex dictionary
in their local settings file, as well as adjust the log level consumed
from the standard Tower logs.
from the standard AWX logs.
## Supported Services
@ -92,8 +92,8 @@ Common schema for all loggers:
| Field | Information |
|-----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `cluster_host_id` | (string) Unique identifier of the host within the Tower cluster |
| `level` | (choice of `DEBUG`, `INFO`, `WARNING`, `ERROR`, etc.) Standard python log level, roughly reflecting the significance of the event; all of the data loggers (as a part of this feature) use `INFO` level, but the other Tower logs will use different levels as appropriate |
| `cluster_host_id` | (string) Unique identifier of the host within the AWX cluster |
| `level` | (choice of `DEBUG`, `INFO`, `WARNING`, `ERROR`, etc.) Standard python log level, roughly reflecting the significance of the event; all of the data loggers (as a part of this feature) use `INFO` level, but the other AWX logs will use different levels as appropriate |
| `logger_name` | (string) Name of the logger we use in the settings, *e.g.*, "`awx.analytics.activity_stream`" |
| `@timestamp` | (datetime) Time of log |
| `path` | (string) File path in code where the log was generated |
@ -105,7 +105,7 @@ Common schema for all loggers:
|-------------------|-------------------------------------------------------------------------------------------------------------------------|
| (common) | This uses all the fields common to all loggers listed above |
| actor | (string) Username of the user who took the action documented in the log |
| changes | (string) Unique identifier of the host within the Tower cluster |
| changes | (string) Unique identifier of the host within the AWX cluster |
| operation | (choice of several options) The basic category of the change logged in the Activity Stream, for instance, "associate". |
| object1 | (string) Information about the primary object being operated on, consistent with what we show in the Activity Stream |
| object2 | (string) If applicable, the second object involved in the action |
@ -149,7 +149,7 @@ In addition to common fields, these logs include fields present on
the job model.
## Tower Logs
## AWX Logs
In addition to the common fields, this will contain a `msg` field with
the log message. Errors contain a separate `traceback` field.
@ -157,7 +157,7 @@ These logs can be enabled or disabled in CTiT by adding or removing
it to the setting `LOG_AGGREGATOR_LOGGERS`.
# Configuring Inside of Tower
# Configuring Inside of AWX
Parameters needed in order to configure the connection to the log
aggregation service will include most of the following for all
@ -178,7 +178,7 @@ Some settings for the log handler will not be exposed to the user via
this mechanism. For example, threading (enabled).
Parameters for the items listed above should be configurable through
the Configure-Tower-in-Tower interface.
the settings interface.
One note on configuring Host and Port: When entering URL it is customary to
include port number, like `https://localhost:4399/foo/bar`. So for the convenience
@ -196,7 +196,7 @@ portion will be extracted as the actual hostname.
**Connection:** Testers need to replicate the documented steps for setting up
and connecting with a destination log aggregation service, if that is
an officially supported service. That will involve 1) configuring the
settings, as documented, 2) taking some action in Tower that causes a log
settings, as documented, 2) taking some action in AWX that causes a log
message from each type of data logger to be sent and 3) verifying that
the content is present in the log aggregation service.
@ -206,7 +206,7 @@ It also needs to be confirmed that the schema is consistent with the
documentation. In the case of Splunk, we need basic confirmation that
the data is compatible with the existing app schema.
**Tower logs:** Formatting of Traceback message is a known issue in several
**AWX logs:** Formatting of Traceback message is a known issue in several
open-source log handlers, so we should confirm that server errors result
in the log aggregator receiving a well-formatted multi-line string
with the traceback message.
@ -215,4 +215,4 @@ Log messages should be sent outside of the
request-response cycle. For example, Loggly examples use
rsyslog, which handles these messages without interfering with other
operations. A timeout on the part of the log aggregation service should
not cause Tower operations to hang.
not cause AWX operations to hang.

View File

@ -1,9 +1,9 @@
Starting from API V2, the Named URL feature lets users access Tower resources via resource-specific human-readable identifiers. Previously, the only way of accessing a resource object without auxiliary query string was via resource primary key number(*e.g.*, via URL path `/api/v2/hosts/2/`). Now users can use named URL to do the same thing, for example, via URL path `/api/v2/hosts/host_name++inv_name++org_name/`.
Starting from API V2, the Named URL feature lets users access AWX resources via resource-specific human-readable identifiers. Previously, the only way of accessing a resource object without auxiliary query string was via resource primary key number(*e.g.*, via URL path `/api/v2/hosts/2/`). Now users can use named URL to do the same thing, for example, via URL path `/api/v2/hosts/host_name++inv_name++org_name/`.
## Usage
There are two named-URL-related Tower configuration settings available under `/api/v2/settings/named-url/`: `NAMED_URL_FORMATS` and `NAMED_URL_GRAPH_NODES`.
There are two named-URL-related AWX configuration settings available under `/api/v2/settings/named-url/`: `NAMED_URL_FORMATS` and `NAMED_URL_GRAPH_NODES`.
`NAMED_URL_FORMATS` is a *read only* key-value pair list of all available named URL identifier formats. A typical `NAMED_URL_FORMATS` looks like this:
```
@ -42,21 +42,21 @@ An important aspect of generating unique identifiers for named URL is dealing wi
`NAMED_URL_FORMATS` exclusively lists every resource that can have named URL; any resource not listed there has no named URL. `NAMED_URL_FORMATS` alone should be instructive enough for users to compose human-readable unique identifier and named URL themselves. For more convenience, every object of a resource that can have named URL will have a related field `named_url` that displays that object's named URL. Users can simply copy-paste that field for their custom usages. Also, users are expected to see indications in the help text of the API browser if a resource object has named URL.
Although `NAMED_URL_FORMATS` is immutable on the user side, it will be automatically modified and expanded over time, reflecting underlying resource modification and expansion. Please consult `NAMED_URL_FORMATS` on the same Tower cluster where you want to use the named URL feature against.
Although `NAMED_URL_FORMATS` is immutable on the user side, it will be automatically modified and expanded over time, reflecting underlying resource modification and expansion. Please consult `NAMED_URL_FORMATS` on the same AWX cluster where you want to use the named URL feature against.
`NAMED_URL_GRAPH_NODES` is another *read-only* list of key-value pairs that exposes the internal graph data structure that Tower uses to manage named URLs. This is not supposed to be human-readable but should be used for programmatically generating named URLs. An example script of generating a named URL given the primary key of arbitrary resource objects that can have named URL (using info provided by `NAMED_URL_GRAPH_NODES`) can be found as `/tools/scripts/pk_to_named_url.py`.
`NAMED_URL_GRAPH_NODES` is another *read-only* list of key-value pairs that exposes the internal graph data structure that AWX uses to manage named URLs. This is not supposed to be human-readable but should be used for programmatically generating named URLs. An example script of generating a named URL given the primary key of arbitrary resource objects that can have named URL (using info provided by `NAMED_URL_GRAPH_NODES`) can be found as `/tools/scripts/pk_to_named_url.py`.
## Identifier Format Protocol
Resources in Tower are identifiable by their unique keys, which are basically tuples of resource fields. Every Tower resource is guaranteed to have its primary key number alone as a unique key, but there might be multiple other unique keys.
Resources in AWX are identifiable by their unique keys, which are basically tuples of resource fields. Every AWX resource is guaranteed to have its primary key number alone as a unique key, but there might be multiple other unique keys.
A resource can generate identifier formats and thus have named URL if it contains at least one unique key that satisfies rules below:
1. The key *contains and only contains* fields that are either the `name` field, or text fields with a finite number of possible choices (like credential type resource's `kind` field).
2. The only allowed exceptional fields that breaks the first rule is a many-to-one related field relating to a resource *other than self* which is also allowed to have a slug.
Here is an example for understanding the rules: Suppose Tower has resources `Foo` and `Bar`; both `Foo` and `Bar` contain a `name` field and a `choice` field that can only have value `'yes'` or `'no'`. Additionally, resource `Foo` contains a many-to-one field (a foreign key) relating to `Bar`, say `fk`. `Foo` has a unique key tuple `(name, choice, fk)` and `Bar` has a unique key tuple `(name, choice)`. Apparently `Bar` can have named URL because it satisfies rule 1. On the other hand, `Foo` can also have named URL, because although `Foo` breaks rule 1, the extra field breaking rule 1 is a `fk` field, which is many-to-one-related to `Bar` and `Bar` can have named URL.
Here is an example for understanding the rules: Suppose AWX has resources `Foo` and `Bar`; both `Foo` and `Bar` contain a `name` field and a `choice` field that can only have value `'yes'` or `'no'`. Additionally, resource `Foo` contains a many-to-one field (a foreign key) relating to `Bar`, say `fk`. `Foo` has a unique key tuple `(name, choice, fk)` and `Bar` has a unique key tuple `(name, choice)`. Apparently `Bar` can have named URL because it satisfies rule 1. On the other hand, `Foo` can also have named URL, because although `Foo` breaks rule 1, the extra field breaking rule 1 is a `fk` field, which is many-to-one-related to `Bar` and `Bar` can have named URL.
For resources satisfying rule 1 above, their human-readable unique identifiers are combinations of foreign key fields, delimited by `+`. Specifically, resource `Bar` above will have the slug format `<name>+<choice>`. Note the field order matters in slug format: `name` field always comes first if present, followed by all the rest of the fields arranged in lexicographic order of field name. For example, if `Bar` also has an `a_choice` field satisfying rule 1 and the unique key becomes `(name, choice, a_choice)`, its slug format becomes `<name>+<a_choice>+<choice>`.
@ -74,9 +74,9 @@ When generating identifiers according to the given identifier format, there are
## Implementation Overview
Module `awx.main.utils.named_url_graph` stands at the core of named URL implementation. It exposes a single public function, `generate_graph`. `generate_graph` accepts a list of Tower models in Tower that might have named URL (meaning they have corresponding endpoints under `/api/v2/`), filter out those that are unable to have named URLs, and connect the rest together into a named URL graph. The graph is available as a settings option, `NAMED_URL_GRAPH`, and each node of it contains all info needed to generate named URL identifier formats and parse incoming named URL identifiers.
Module `awx.main.utils.named_url_graph` stands at the core of named URL implementation. It exposes a single public function, `generate_graph`. `generate_graph` accepts a list of AWX models in AWX that might have named URL (meaning they have corresponding endpoints under `/api/v2/`), filter out those that are unable to have named URLs, and connect the rest together into a named URL graph. The graph is available as a settings option, `NAMED_URL_GRAPH`, and each node of it contains all info needed to generate named URL identifier formats and parse incoming named URL identifiers.
`generate_graph` will run only once for each Tower WSGI process. This is guaranteed by putting the function call inside `__init__` of `URLModificationMiddleware`. When an incoming request enters `URLModificationMiddleware`, the part of its URL path that could contain a valid named URL identifier is extracted and processed to find (possible) corresponding resource objects. The internal process is basically crawling against part of the named URL graph. If the object is found, the identifier part of the URL path is converted to the object's primary key. Going forward, Tower can treat the request with the old-styled URL.
`generate_graph` will run only once for each AWX WSGI process. This is guaranteed by putting the function call inside `__init__` of `URLModificationMiddleware`. When an incoming request enters `URLModificationMiddleware`, the part of its URL path that could contain a valid named URL identifier is extracted and processed to find (possible) corresponding resource objects. The internal process is basically crawling against part of the named URL graph. If the object is found, the identifier part of the URL path is converted to the object's primary key. Going forward, AWX can treat the request with the old-styled URL.
## Job Template Organization Changes
@ -93,7 +93,7 @@ If multiple job templates with the same name exist, the oldest one will be retur
In general, acceptance should follow what's in the "Usage" section. The contents in the "Identifier Format Protocol" section should not be relevant.
* The classical way of getting objects via primary keys should behave the same.
* Tower configuration for named URL should work as described. Particularly, `NAMED_URL_FORMATS` should be immutable on the user's side and display accurately-named URL identifier format info.
* AWX configuration for named URL should work as described. Particularly, `NAMED_URL_FORMATS` should be immutable on the user's side and display accurately-named URL identifier format info.
* `NAMED_URL_FORMATS` should be exclusive, meaning resources specified in `NAMED_URL_FORMATS` should have named URL, and resources not specified there should *not* have named URL.
* If a resource can have named URL, its objects should have a `named_url` field which represents the object-specific named URL. That field should only be visible under detail view, not list view.
* A user following the rules specified in `NAMED_URL_FORMATS` should be able to generate named URL exactly the same as the `named_url` field.

View File

@ -14,7 +14,7 @@ At a high level, the typical notification task flow is:
## Templated notification messages
When creating a notification template, the user can optionally provide their own custom messages for each notification event (start, success, error). If a message is not provided, the default message generated by Tower will be used.
When creating a notification template, the user can optionally provide their own custom messages for each notification event (start, success, error). If a message is not provided, the default message generated by AWX will be used.
The notification message can include templated fields written using Jinja templates. The templates may reference a set of white-listed fields found in the associated job's serialization.
@ -115,7 +115,7 @@ The following should be performed for good acceptance:
## Mattermost
The Mattermost notification integration uses Incoming Webhooks. A password is not required because the webhook URL itself is the secret. Webhooks must be enabled in the System Console of Mattermost. If the user wishes to allow Ansible Tower notifications to modify the Icon URL and username of the notification, then they must enable these options as well.
The Mattermost notification integration uses Incoming Webhooks. A password is not required because the webhook URL itself is the secret. Webhooks must be enabled in the System Console of Mattermost. If the user wishes to allow AWX notifications to modify the Icon URL and username of the notification, then they must enable these options as well.
In order to enable these settings in Mattermost:
1. Go to System Console > Integrations > Custom Integrations. Check "Enable Incoming Webhooks".
@ -160,9 +160,9 @@ The following fields are available for the Rocket.Chat notification type:
## Pagerduty
Pagerduty is a fairly straightforward integration. The user will create an API Key in the Pagerduty system (this will be the token that is given to Tower) and then create a "Service" which will provide an "Integration Key" that will also be given to Tower. The other options of note are:
Pagerduty is a fairly straightforward integration. The user will create an API Key in the Pagerduty system (this will be the token that is given to AWX) and then create a "Service" which will provide an "Integration Key" that will also be given to AWX. The other options of note are:
* `subdomain`: When you sign up for the Pagerduty account, you will get a unique subdomain to communicate with. For instance, if you signed up as "towertest", the web dashboard will be at *towertest.pagerduty.com* and you will give the Tower API "towertest" as the subdomain (not the full domain).
* `subdomain`: When you sign up for the Pagerduty account, you will get a unique subdomain to communicate with. For instance, if you signed up as "towertest", the web dashboard will be at *towertest.pagerduty.com* and you will give the AWX API "towertest" as the subdomain (not the full domain).
* `client_name`: This will be sent along with the alert content to the Pagerduty service to help identify the service that is using the API key/service. This is helpful if multiple integrations are using the same API key and service.
### Testing considerations
@ -178,7 +178,7 @@ Pagerduty allows you to sign up for a free trial with the service.
Twilio is a Voice and SMS automation service. Once you are signed in, you'll need to create a phone number from which the message will be sent. You'll then define a "Messaging Service" under Programmable SMS and associate the number (the one you created for this purpose) with it. Note that you may need to verify this number or some other information before you are allowed to use it to send to any numbers. The Messaging Service does not need a status callback URL nor does it need the ability to process inbound messages.
Under your individual (or sub) account settings, you will have API credentials. The Account SID and AuthToken are what will be given to Tower. There are a couple of other important fields:
Under your individual (or sub) account settings, you will have API credentials. The Account SID and AuthToken are what will be given to AWX. There are a couple of other important fields:
* `from_number`: This is the number associated with the messaging service above and must be given in the form of "+15556667777".
* `to_numbers`: This will be the list of numbers to receive the SMS and should be the 10-digit phone number.
@ -194,7 +194,7 @@ Twilio is fairly straightforward to sign up for but there may not be a free tria
## IRC
The Tower IRC notification takes the form of an IRC bot that will connect, deliver its messages to channel(s) or individual user(s), and then disconnect. The Tower notification bot also supports SSL authentication. The Tower bot does not currently support Nickserv identification. If a channel or user does not exist or is not online, then the Notification will not fail; the failure scenario is reserved specifically for connectivity.
The AWX IRC notification takes the form of an IRC bot that will connect, deliver its messages to channel(s) or individual user(s), and then disconnect. The AWX notification bot also supports SSL authentication. The AWX bot does not currently support Nickserv identification. If a channel or user does not exist or is not online, then the Notification will not fail; the failure scenario is reserved specifically for connectivity.
Connectivity information is straightforward:
@ -216,7 +216,7 @@ There are a few modern IRC servers to choose from. [InspIRCd](http://www.inspirc
## Webhook
The webhook notification type in Ansible Tower provides a simple interface for sending `POST`s to a predefined web service. Tower will `POST` to this address using `application/json` content type with the data payload containing all relevant details in JSON format.
The webhook notification type in AWX provides a simple interface for sending `POST`s to a predefined web service. AWX will `POST` to this address using `application/json` content type with the data payload containing all relevant details in JSON format.
The parameters are fairly straightforward:
@ -256,7 +256,7 @@ This will create an `httpbin` service reachable from the AWX container at `http:
## Grafana
The Grafana notification type allows you to create Grafana annotations. Details about this feature of Grafana are available at http://docs.grafana.org/reference/annotations/. In order to allow Tower to add annotations, an API Key needs to be created in Grafana. Note that the created annotations are region events with start and endtime of the associated Tower Job. The annotation description is also provided by the subject of the associated Tower Job, for example:
The Grafana notification type allows you to create Grafana annotations. Details about this feature of Grafana are available at http://docs.grafana.org/reference/annotations/. In order to allow AWX to add annotations, an API Key needs to be created in Grafana. Note that the created annotations are region events with start and endtime of the associated AWX Job. The annotation description is also provided by the subject of the associated AWX Job, for example:
```
Job #1 'Ping Macbook' succeeded: https://towerhost/#/jobs/playbook/1

View File

@ -1,7 +1,7 @@
# Role-Based Access Control (RBAC)
This document describes the RBAC implementation of the Ansible Tower Software.
The intended audience of this document is the Ansible Tower developer.
This document describes the RBAC implementation of the AWX Software.
The intended audience of this document is the AWX developer.
## Overview

View File

@ -16,7 +16,7 @@ The `schedule()` function is run (a) periodically by a background task and (b) o
`schedule()` is triggered via both mechanisms because of the following properties:
1. It reduces the time from launch to running, resulting a better user experience.
2. It is a fail-safe in case we miss code-paths, in the present and future, that change the scheduling considerations for which we should call `schedule()` (_i.e._, adding new nodes to Tower changes the capacity, obscure job error handling that fails a job).
2. It is a fail-safe in case we miss code-paths, in the present and future, that change the scheduling considerations for which we should call `schedule()` (_i.e._, adding new nodes to AWX changes the capacity, obscure job error handling that fails a job).
Empirically, the periodic task manager has been effective in the past and will continue to be relied upon with the added event-triggered `schedule()`.
@ -35,7 +35,7 @@ Empirically, the periodic task manager has been effective in the past and will c
|:----------:|:------------------------------------------------------------------------------------------------------------------:|
| pending | Job has been launched. <br>1. Hasn't yet been seen by the scheduler <br>2. Is blocked by another task <br>3. Not enough capacity |
| waiting | Job published to an AMQP queue.
| running | Job is running on a Tower node.
| running | Job is running on a AWX node.
| successful | Job finished with `ansible-playbook` return code 0. |
| failed | Job finished with `ansible-playbook` return code other than 0. |
| error | System failure. |
@ -48,7 +48,7 @@ The Task Manager decides which exact node a job will run on. It does so by consi
## Code Composition
The main goal of the new task manager is to run in our HA environment. This translates to making the task manager logic run on any Tower node. To support this, we need to remove any reliance on the state between task manager schedule logic runs. A future goal of AWX is to design the task manager to have limited/no access to the database for this feature. This secondary requirement, combined with performance needs, led to the creation of partial models that wrap dict database model data.
The main goal of the new task manager is to run in our HA environment. This translates to making the task manager logic run on any AWX node. To support this, we need to remove any reliance on the state between task manager schedule logic runs. A future goal of AWX is to design the task manager to have limited/no access to the database for this feature. This secondary requirement, combined with performance needs, led to the creation of partial models that wrap dict database model data.
### Blocking Logic
@ -73,7 +73,7 @@ The new task manager should, in essence, work like the old one. Old task manager
### Update on Launch Logic
This is a feature in Tower where dynamic inventory and projects associated with Job Templates may be set to invoke and update when related Job Templates are launched. Related to this feature is a cache feature on dynamic inventory updates and project updates. The rules for these two intertwined features are below:
This is a feature in AWX where dynamic inventory and projects associated with Job Templates may be set to invoke and update when related Job Templates are launched. Related to this feature is a cache feature on dynamic inventory updates and project updates. The rules for these two intertwined features are below:
* Projects marked as `update on launch` should trigger a project update when a related job template is launched.
* Inventory sources marked as `update on launch` should trigger an inventory update when a related job template is launched.

View File

@ -1,11 +1,11 @@
Tower configuration gives Tower users the ability to adjust multiple runtime parameters of Tower, which enables much more fine-grained control over Tower runs.
AWX configuration gives AWX users the ability to adjust multiple runtime parameters of AWX, which enables much more fine-grained control over AWX runs.
## Usage manual
#### To Use:
The REST endpoint for CRUD operations against Tower configurations can be found at `/api/v2/settings/`. GETing to that endpoint will return a list of available Tower configuration categories and their URLs, such as `"system": "/api/v2/settings/system/"`. The URL given to each category is the endpoint for CRUD operations against individual settings under that category.
The REST endpoint for CRUD operations against AWX configurations can be found at `/api/v2/settings/`. GETing to that endpoint will return a list of available AWX configuration categories and their URLs, such as `"system": "/api/v2/settings/system/"`. The URL given to each category is the endpoint for CRUD operations against individual settings under that category.
Here is a typical Tower configuration category GET response:
Here is a typical AWX configuration category GET response:
```
GET /api/v2/settings/github-team/
HTTP 200 OK
@ -27,10 +27,10 @@ X-API-Time: 0.026s
}
```
The returned body is a JSON of key-value pairs, where the key is the name of the Tower configuration setting, and the value is the value of that setting. To update the settings, simply update setting values and PUT/PATCH to the same endpoint.
The returned body is a JSON of key-value pairs, where the key is the name of the AWX configuration setting, and the value is the value of that setting. To update the settings, simply update setting values and PUT/PATCH to the same endpoint.
#### To Develop:
Each Django app in Tower should have a `conf.py` file where related settings get registered. Below is the general format for `conf.py`:
Each Django app in AWX should have a `conf.py` file where related settings get registered. Below is the general format for `conf.py`:
```python
# Other dependencies
@ -39,7 +39,7 @@ Each Django app in Tower should have a `conf.py` file where related settings get
# Django
from django.utils.translation import ugettext_lazy as _
# Tower
# AWX
from awx.conf import fields, register
# Other dependencies
@ -52,7 +52,7 @@ register(
# Other setting registries
```
`register` is the endpoint API for registering individual Tower configurations:
`register` is the endpoint API for registering individual AWX configurations:
```
register(
setting,
@ -75,16 +75,16 @@ Here are the details for each argument:
| `**field_related_kwargs` | `**kwargs` | Key-worded arguments needed to initialize an instance of `field_class`. |
| `category_slug` | `str` | The actual identifier used for finding individual setting categories. |
| `category` | transformable string, like `_('foobar')` | The human-readable form of `category_slug`, mainly for display. |
| `depends_on` | `list` of `str`s | A list of setting names this setting depends on. A setting this setting depends on is another Tower configuration setting whose changes may affect the value of this setting. |
| `depends_on` | `list` of `str`s | A list of setting names this setting depends on. A setting this setting depends on is another AWX configuration setting whose changes may affect the value of this setting. |
| `placeholder` | transformable string, like `_('foobar')` | A human-readable string displaying a typical value for the setting, mainly used by the UI. |
| `encrypted` | `boolean` | A flag which determines whether the setting value should be encrypted. |
| `defined_in_file` | `boolean` | A flag which determines whether a value has been manually set in the settings file. |
During Tower bootstrapping, **all** settings registered in `conf.py` modules of Tower Django apps will be loaded (registered). This set of Tower configuration settings will form a new top-level of the `django.conf.settings` object. Later, all Tower configuration settings will be available as attributes of it, just like the normal Django settings. Note that Tower configuration settings take higher priority over normal settings, meaning if a setting `FOOBAR` is both defined in a settings file *and* registered in `conf.py`, the registered attribute will be used over the defined attribute every time.
During AWX bootstrapping, **all** settings registered in `conf.py` modules of AWX Django apps will be loaded (registered). This set of AWX configuration settings will form a new top-level of the `django.conf.settings` object. Later, all AWX configuration settings will be available as attributes of it, just like the normal Django settings. Note that AWX configuration settings take higher priority over normal settings, meaning if a setting `FOOBAR` is both defined in a settings file *and* registered in `conf.py`, the registered attribute will be used over the defined attribute every time.
Please note that when registering new configurations, it is recommended to provide a default value if it is possible to do so, as the Tower configuration UI has a 'revert all' functionality that reverts all settings to its default value.
Please note that when registering new configurations, it is recommended to provide a default value if it is possible to do so, as the AWX configuration UI has a 'revert all' functionality that reverts all settings to its default value.
Starting with version 3.2, Tower configuration supports category-specific validation functions. They should also be defined under `conf.py` in the form
Configuration supports category-specific validation functions. They should also be defined under `conf.py` in the form
```python
def custom_validate(serializer, attrs):
'''

View File

@ -1,6 +1,6 @@
## Tower Workflow Overview
## AWX Workflow Overview
Workflows are structured compositions of Tower job resources. The only job of a workflow is to trigger other jobs in specific orders to achieve certain goals, such as tracking the full set of jobs that were part of a release process as a single unit.
Workflows are structured compositions of AWX job resources. The only job of a workflow is to trigger other jobs in specific orders to achieve certain goals, such as tracking the full set of jobs that were part of a release process as a single unit.
A workflow has an associated tree-graph that is composed of multiple nodes. Each node in the tree has one associated template (job template, inventory update, project update, approval template, or workflow job template) along with related resources that, if defined, will override the associated job template resources (*i.e.*, credential, inventory, etc.) if the job template associated with the node is selected to run.
@ -128,7 +128,7 @@ Workflow job summary:
...
```
Starting from Tower 3.2, Workflow jobs support simultaneous job runs just like that of ordinary jobs. It is controlled by the `allow_simultaneous` field of underlying workflow job template. By default, simultaneous workflow job runs are disabled and users should be prudent in enabling this functionality, since the performance boost of simultaneous workflow runs will only manifest when a large portion of jobs contained by a workflow allow simultaneous runs. Otherwise, it is expected to have some long-running workflow jobs since its spawned jobs can be in pending state for a long time.
Workflow jobs support simultaneous job runs just like that of ordinary jobs. It is controlled by the `allow_simultaneous` field of underlying workflow job template. By default, simultaneous workflow job runs are disabled and users should be prudent in enabling this functionality, since the performance boost of simultaneous workflow runs will only manifest when a large portion of jobs contained by a workflow allow simultaneous runs. Otherwise, it is expected to have some long-running workflow jobs since its spawned jobs can be in pending state for a long time.
A workflow job is marked as failed if a job spawned by a workflow job fails, without a failure handler. A failure handler is a `failure` or `always` link in the workflow job template. A job that is canceled is, effectively, considered a failure for the purposes of determining if a job nodes is failed.
@ -144,7 +144,7 @@ Workflow jobs cannot be copied directly; instead, a workflow job is implicitly c
### Artifacts
Support for artifacts starts in Ansible and is carried through in Tower. The `set_stats` module is invoked by users, in a playbook, to register facts. Facts are passed in via the `data:` argument. Note that the default `set_stats` parameters are the correct ones to work with Tower (*i.e.*, `per_host: no`). Now that facts are registered, we will describe how facts are used. In Ansible, registered facts are "returned" to the callback plugin(s) via the `playbook_on_stats` event. Ansible users can configure whether or not they want the facts displayed through the global `show_custom_stats` configuration. Note that the `show_custom_stats` does not effect the artifact feature of Tower. This only controls the displaying of `set_stats` fact data in Ansible output (also the output in Ansible playbooks that get run in Tower). Tower uses a custom callback plugin that gathers the fact data set via `set_stats` in the `playbook_on_stats` handler and "ships" it back to Tower, saves it in the database, and makes it available on the job endpoint via the variable `artifacts`. The semantics and usage of `artifacts` throughout a workflow is described elsewhere in this document.
Support for artifacts starts in Ansible and is carried through in AWX. The `set_stats` module is invoked by users, in a playbook, to register facts. Facts are passed in via the `data:` argument. Note that the default `set_stats` parameters are the correct ones to work with AWX (*i.e.*, `per_host: no`). Now that facts are registered, we will describe how facts are used. In Ansible, registered facts are "returned" to the callback plugin(s) via the `playbook_on_stats` event. Ansible users can configure whether or not they want the facts displayed through the global `show_custom_stats` configuration. Note that the `show_custom_stats` does not effect the artifact feature of AWX. This only controls the displaying of `set_stats` fact data in Ansible output (also the output in Ansible playbooks that get run in AWX). AWX uses a custom callback plugin that gathers the fact data set via `set_stats` in the `playbook_on_stats` handler and "ships" it back to AWX, saves it in the database, and makes it available on the job endpoint via the variable `artifacts`. The semantics and usage of `artifacts` throughout a workflow is described elsewhere in this document.
### Workflow Run Example

View File

@ -77,11 +77,10 @@ based on the value of `settings.AUTHENTICATION_BACKENDS` *at import time*:
https://github.com/python-social-auth/social-app-django/blob/c1e2795b00b753d58a81fa6a0261d8dae1d9c73d/social_django/utils.py#L13
Our `settings.AUTHENTICATION_BACKENDS` can *change*
dynamically as Tower settings are changed (i.e., if somebody
dynamically as settings are changed (i.e., if somebody
configures Github OAuth2 integration), so we need to
_overwrite_ this in-memory value at the top of every request so
that we have the latest version
see: https://github.com/ansible/tower/issues/1979
### django-oauth-toolkit

View File

@ -27,7 +27,7 @@ def get_version():
if os.path.exists("/etc/debian_version"):
sysinit = "/etc/init.d"
webconfig = "/etc/nginx"
webconfig = "/etc/nginx"
siteconfig = "/etc/nginx/sites-enabled"
# sosreport-3.1 (and newer) look in '/usr/share/sosreport/sos/plugins'
# sosreport-3.0 looks in '/usr/lib/python2.7/dist-packages/sos/plugins'
@ -35,7 +35,7 @@ if os.path.exists("/etc/debian_version"):
sosconfig = "/usr/share/sosreport/sos/plugins"
else:
sysinit = "/etc/rc.d/init.d"
webconfig = "/etc/nginx"
webconfig = "/etc/nginx"
siteconfig = "/etc/nginx/sites-enabled"
# The .spec will create symlinks to support multiple versions of sosreport
sosconfig = "/usr/share/sosreport/sos/plugins"
@ -71,7 +71,7 @@ def proc_data_files(data_files):
if hasattr(sys, 'real_prefix'):
return result
for dir,files in data_files:
for dir, files in data_files:
includes = []
for item in files:
includes.extend(explode_glob_path(item))
@ -97,8 +97,7 @@ setup(
author='Ansible, Inc.',
author_email='info@ansible.com',
description='awx: API, UI and Task Engine for Ansible',
long_description='AWX provides a web-based user interface, REST API and '
'task engine built on top of Ansible',
long_description='AWX provides a web-based user interface, REST API and ' 'task engine built on top of Ansible',
license='Apache License 2.0',
keywords='ansible',
url='http://github.com/ansible/awx',
@ -112,8 +111,7 @@ setup(
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators'
'License :: Apache License 2.0',
'Intended Audience :: System Administrators' 'License :: Apache License 2.0',
'Natural Language :: English',
'Operating System :: OS Independent',
'Operating System :: POSIX',
@ -121,7 +119,7 @@ setup(
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
],
entry_points = {
entry_points={
'console_scripts': [
'awx-manage = awx:manage',
],
@ -131,29 +129,38 @@ setup(
'hashivault_ssh = awx.main.credential_plugins.hashivault:hashivault_ssh_plugin',
'azure_kv = awx.main.credential_plugins.azure_kv:azure_keyvault_plugin',
'aim = awx.main.credential_plugins.aim:aim_plugin',
'centrify_vault_kv = awx.main.credential_plugins.centrify_vault:centrify_plugin'
]
'centrify_vault_kv = awx.main.credential_plugins.centrify_vault:centrify_plugin',
],
},
data_files = proc_data_files([
("%s" % homedir, ["config/wsgi.py",
"awx/static/favicon.ico"]),
("%s" % siteconfig, ["config/awx-nginx.conf"]),
# ("%s" % webconfig, ["config/uwsgi_params"]),
("%s" % sharedir, ["tools/scripts/request_tower_configuration.sh","tools/scripts/request_tower_configuration.ps1"]),
("%s" % docdir, ["docs/licenses/*",]),
("%s" % bindir, ["tools/scripts/automation-controller-service",
"tools/scripts/failure-event-handler",
"tools/scripts/awx-python",
"tools/scripts/ansible-tower-setup"]),
("%s" % sosconfig, ["tools/sosreport/tower.py"])]),
options = {
'aliases': {
'dev_build': 'clean --all egg_info sdist',
'release_build': 'clean --all egg_info -b "" sdist'
},
data_files=proc_data_files(
[
("%s" % homedir, ["awx/static/favicon.ico"]),
("%s" % siteconfig, ["config/awx-nginx.conf"]),
# ("%s" % webconfig, ["config/uwsgi_params"]),
("%s" % sharedir, ["tools/scripts/request_tower_configuration.sh", "tools/scripts/request_tower_configuration.ps1"]),
(
"%s" % docdir,
[
"docs/licenses/*",
],
),
(
"%s" % bindir,
[
"tools/scripts/automation-controller-service",
"tools/scripts/failure-event-handler",
"tools/scripts/awx-python",
"tools/scripts/ansible-tower-setup",
],
),
("%s" % sosconfig, ["tools/sosreport/tower.py"]),
]
),
options={
'aliases': {'dev_build': 'clean --all egg_info sdist', 'release_build': 'clean --all egg_info -b "" sdist'},
'build_scripts': {
'executable': '/usr/bin/awx-python',
},
},
cmdclass={'egg_info_dev': egg_info_dev}
cmdclass={'egg_info_dev': egg_info_dev},
)

View File

@ -18,75 +18,64 @@ from django.utils.timezone import now
base_dir = os.path.abspath( # Convert into absolute path string
os.path.join( # Current file's grandparent directory
os.path.join( # Current file's parent directory
os.path.dirname( # Current file's directory
os.path.abspath(__file__) # Current file path
),
os.pardir
),
os.pardir
os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir), # Current file's parent directory # Current file's directory # Current file path
os.pardir,
)
)
if base_dir not in sys.path:
sys.path.insert(1, base_dir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awx.settings.development") # noqa
django.setup() # noqa
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awx.settings.development") # noqa
django.setup() # noqa
from django.db import transaction # noqa
from django.db import transaction # noqa
# awx
from awx.main.models import ( # noqa
Credential, CredentialType, Group, Host, Inventory, Job, JobEvent,
JobHostSummary, JobTemplate, Label, Organization, PrimordialModel, Project,
Team, User, WorkflowJobTemplate, WorkflowJobTemplateNode,
Credential,
CredentialType,
Group,
Host,
Inventory,
Job,
JobEvent,
JobHostSummary,
JobTemplate,
Label,
Organization,
PrimordialModel,
Project,
Team,
User,
WorkflowJobTemplate,
WorkflowJobTemplateNode,
batch_role_ancestor_rebuilding,
)
from awx.main.signals import ( # noqa
disable_activity_stream,
disable_computed_fields
)
from awx.main.signals import disable_activity_stream, disable_computed_fields # noqa
option_list = [
make_option('--organizations', action='store', type='int', default=3,
help='Number of organizations to create'),
make_option('--users', action='store', type='int', default=10,
help='Number of users to create'),
make_option('--teams', action='store', type='int', default=5,
help='Number of teams to create'),
make_option('--projects', action='store', type='int', default=10,
help='Number of projects to create'),
make_option('--job-templates', action='store', type='int', default=20,
help='Number of job templates to create'),
make_option('--credentials', action='store', type='int', default=5,
help='Number of credentials to create'),
make_option('--inventories', action='store', type='int', default=5,
help='Number of credentials to create'),
make_option('--inventory-groups', action='store', type='int', default=10,
help='Number of credentials to create'),
make_option('--inventory-hosts', action='store', type='int', default=40,
help='number of credentials to create'),
make_option('--wfjts', action='store', type='int', default=15,
help='number of workflow job templates to create'),
make_option('--nodes', action='store', type='int', default=200,
help='number of workflow job template nodes to create'),
make_option('--labels', action='store', type='int', default=100,
help='labels to create, will associate 10x as many'),
make_option('--jobs', action='store', type='int', default=200,
help='number of job entries to create'),
make_option('--job-events', action='store', type='int', default=500,
help='number of job event entries to create'),
make_option('--pretend', action='store_true',
help="Don't commit the data to the database"),
make_option('--preset', action='store', type='string', default='',
help="Preset data set to use"),
make_option('--prefix', action='store', type='string', default='',
help="Prefix generated names with this string"),
#make_option('--spread-bias', action='store', type='string', default='exponential',
make_option('--organizations', action='store', type='int', default=3, help='Number of organizations to create'),
make_option('--users', action='store', type='int', default=10, help='Number of users to create'),
make_option('--teams', action='store', type='int', default=5, help='Number of teams to create'),
make_option('--projects', action='store', type='int', default=10, help='Number of projects to create'),
make_option('--job-templates', action='store', type='int', default=20, help='Number of job templates to create'),
make_option('--credentials', action='store', type='int', default=5, help='Number of credentials to create'),
make_option('--inventories', action='store', type='int', default=5, help='Number of credentials to create'),
make_option('--inventory-groups', action='store', type='int', default=10, help='Number of credentials to create'),
make_option('--inventory-hosts', action='store', type='int', default=40, help='number of credentials to create'),
make_option('--wfjts', action='store', type='int', default=15, help='number of workflow job templates to create'),
make_option('--nodes', action='store', type='int', default=200, help='number of workflow job template nodes to create'),
make_option('--labels', action='store', type='int', default=100, help='labels to create, will associate 10x as many'),
make_option('--jobs', action='store', type='int', default=200, help='number of job entries to create'),
make_option('--job-events', action='store', type='int', default=500, help='number of job event entries to create'),
make_option('--pretend', action='store_true', help="Don't commit the data to the database"),
make_option('--preset', action='store', type='string', default='', help="Preset data set to use"),
make_option('--prefix', action='store', type='string', default='', help="Prefix generated names with this string"),
# make_option('--spread-bias', action='store', type='string', default='exponential',
# help='"exponential" to bias associations exponentially front loaded for - for ex'),
]
parser = OptionParser(option_list=option_list)
@ -97,8 +86,7 @@ options = vars(options)
if options['preset']:
print(' Using preset data numbers set ' + str(options['preset']))
# Read the numbers of resources from presets file, if provided
presets_filename = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'presets.tsv'))
presets_filename = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'presets.tsv'))
with open(presets_filename) as f:
text = f.read()
@ -117,36 +105,36 @@ if options['preset']:
options['prefix'] = options['preset']
n_organizations = int(options['organizations'])
n_users = int(options['users'])
n_teams = int(options['teams'])
n_projects = int(options['projects'])
n_job_templates = int(options['job_templates'])
n_credentials = int(options['credentials'])
n_inventories = int(options['inventories'])
n_organizations = int(options['organizations'])
n_users = int(options['users'])
n_teams = int(options['teams'])
n_projects = int(options['projects'])
n_job_templates = int(options['job_templates'])
n_credentials = int(options['credentials'])
n_inventories = int(options['inventories'])
n_inventory_groups = int(options['inventory_groups'])
n_inventory_hosts = int(options['inventory_hosts'])
n_wfjts = int(options['wfjts'])
n_nodes = int(options['nodes'])
n_labels = int(options['labels'])
n_jobs = int(options['jobs'])
n_job_events = int(options['job_events'])
prefix = options['prefix']
n_inventory_hosts = int(options['inventory_hosts'])
n_wfjts = int(options['wfjts'])
n_nodes = int(options['nodes'])
n_labels = int(options['labels'])
n_jobs = int(options['jobs'])
n_job_events = int(options['job_events'])
prefix = options['prefix']
organizations = []
users = []
teams = []
projects = []
job_templates = []
credentials = []
inventories = []
organizations = []
users = []
teams = []
projects = []
job_templates = []
credentials = []
inventories = []
inventory_groups = []
inventory_hosts = []
wfjts = []
nodes = []
labels = []
jobs = []
#job_events = []
inventory_hosts = []
wfjts = []
nodes = []
labels = []
jobs = []
# job_events = []
def spread(n, m):
@ -162,7 +150,7 @@ def spread(n, m):
for i in range(m):
n_in_this_slot = n // 2
n-= n_in_this_slot
n -= n_in_this_slot
ret[i] += n_in_this_slot
if n > 0 and len(ret):
ret[0] += n
@ -170,7 +158,7 @@ def spread(n, m):
ids = defaultdict(lambda: 0)
bulk_data_description = 'From Tower bulk-data script'
bulk_data_description = 'From AWX bulk-data script'
# function to cycle through a list
@ -211,39 +199,38 @@ startTime = datetime.now()
def make_the_data():
with disable_activity_stream():
with batch_role_ancestor_rebuilding(), disable_computed_fields():
admin, created = User.objects.get_or_create(username = 'admin', is_superuser=True)
admin, created = User.objects.get_or_create(username='admin', is_superuser=True)
if created:
admin.is_superuser = True
admin.save()
admin.set_password('test')
admin.save()
org_admin, created = User.objects.get_or_create(username = 'org_admin')
org_admin, created = User.objects.get_or_create(username='org_admin')
if created:
org_admin.set_password('test')
org_admin.save()
org_member, created = User.objects.get_or_create(username = 'org_member')
org_member, created = User.objects.get_or_create(username='org_member')
if created:
org_member.set_password('test')
org_member.save()
prj_admin, created = User.objects.get_or_create(username = 'prj_admin')
prj_admin, created = User.objects.get_or_create(username='prj_admin')
if created:
prj_admin.set_password('test')
prj_admin.save()
jt_admin, created = User.objects.get_or_create(username = 'jt_admin')
jt_admin, created = User.objects.get_or_create(username='jt_admin')
if created:
jt_admin.set_password('test')
jt_admin.save()
inv_admin, created = User.objects.get_or_create(username = 'inv_admin')
inv_admin, created = User.objects.get_or_create(username='inv_admin')
if created:
inv_admin.set_password('test')
inv_admin.save()
print('# Creating %d organizations' % n_organizations)
for i in range(n_organizations):
sys.stdout.write('\r%d ' % (i + 1))
@ -266,7 +253,7 @@ def make_the_data():
for i in range(n):
ids['user'] += 1
user_id = ids['user']
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, organizations[org_idx].name, i+ 1))
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, organizations[org_idx].name, i + 1))
sys.stdout.flush()
user, _ = User.objects.get_or_create(username='%suser-%d' % (prefix, user_id))
organizations[org_idx].member_role.members.add(user)
@ -286,12 +273,12 @@ def make_the_data():
for i in range(n):
ids['team'] += 1
team_id = ids['team']
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, org.name, i+ 1))
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, org.name, i + 1))
sys.stdout.flush()
team, _ = Team.objects.get_or_create(
name='%s Team %d Org %d' % (prefix, team_id, org_idx), organization=org,
defaults=dict(created_by=next(creator_gen),
modified_by=next(modifier_gen))
name='%s Team %d Org %d' % (prefix, team_id, org_idx),
organization=org,
defaults=dict(created_by=next(creator_gen), modified_by=next(modifier_gen)),
)
teams.append(team)
org_idx += 1
@ -318,7 +305,6 @@ def make_the_data():
for team in org_teams:
team.member_role.members.add(org_users[0])
print('# Creating %d credentials for users' % (n_credentials - n_credentials // 2))
user_idx = 0
for n in spread(n_credentials - n_credentials // 2, n_users):
@ -330,9 +316,8 @@ def make_the_data():
credential_id = ids['credential']
credential, _ = Credential.objects.get_or_create(
name='%s Credential %d User %d' % (prefix, credential_id, user_idx),
defaults=dict(created_by=next(creator_gen),
modified_by=next(modifier_gen)),
credential_type=CredentialType.objects.filter(namespace='ssh').first()
defaults=dict(created_by=next(creator_gen), modified_by=next(modifier_gen)),
credential_type=CredentialType.objects.filter(namespace='ssh').first(),
)
credential.admin_role.members.add(user)
credentials.append(credential)
@ -353,9 +338,8 @@ def make_the_data():
credential_id = ids['credential']
credential, _ = Credential.objects.get_or_create(
name='%s Credential %d team %d' % (prefix, credential_id, team_idx),
defaults=dict(created_by=next(creator_gen),
modified_by=next(modifier_gen)),
credential_type=CredentialType.objects.filter(namespace='ssh').first()
defaults=dict(created_by=next(creator_gen), modified_by=next(modifier_gen)),
credential_type=CredentialType.objects.filter(namespace='ssh').first(),
)
credential.admin_role.parents.add(team.member_role)
credentials.append(credential)
@ -369,22 +353,33 @@ def make_the_data():
for i in range(n):
ids['project'] += 1
project_id = ids['project']
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, org.name, i+ 1))
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, org.name, i + 1))
sys.stdout.flush()
project, _ = Project.objects.get_or_create(
name='%s Project %d Org %d' % (prefix, project_id, org_idx),
organization=org,
defaults=dict(
created_by=next(creator_gen), modified_by=next(modifier_gen),
created_by=next(creator_gen),
modified_by=next(modifier_gen),
scm_url='https://github.com/ansible/test-playbooks.git',
scm_type='git',
playbook_files=[
"check.yml", "debug-50.yml", "debug.yml", "debug2.yml",
"debug_extra_vars.yml", "dynamic_inventory.yml",
"environ_test.yml", "fail_unless.yml", "pass_unless.yml",
"pause.yml", "ping-20.yml", "ping.yml",
"setfact_50.yml", "vault.yml"
])
"check.yml",
"debug-50.yml",
"debug.yml",
"debug2.yml",
"debug_extra_vars.yml",
"dynamic_inventory.yml",
"environ_test.yml",
"fail_unless.yml",
"pass_unless.yml",
"pause.yml",
"ping-20.yml",
"ping.yml",
"setfact_50.yml",
"vault.yml",
],
),
)
projects.append(project)
if org_idx == 0 and i == 0:
@ -393,7 +388,6 @@ def make_the_data():
org_idx += 1
print('')
print('# Creating %d inventories' % n_inventories)
org_idx = 0
for n in spread(n_inventories, min(n_inventories // 4 + 1, n_organizations)):
@ -401,14 +395,13 @@ def make_the_data():
for i in range(n):
ids['inventory'] += 1
inventory_id = ids['inventory']
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, org.name, i+ 1))
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, org.name, i + 1))
sys.stdout.flush()
inventory, _ = Inventory.objects.get_or_create(
name='%s Inventory %d Org %d' % (prefix, inventory_id, org_idx),
organization=org,
defaults=dict(created_by=next(creator_gen),
modified_by=next(modifier_gen)),
variables='{"ansible_connection": "local"}'
defaults=dict(created_by=next(creator_gen), modified_by=next(modifier_gen)),
variables='{"ansible_connection": "local"}',
)
inventories.append(inventory)
if org_idx == 0 and i == 0:
@ -417,7 +410,6 @@ def make_the_data():
org_idx += 1
print('')
print('# Creating %d inventory_groups' % n_inventory_groups)
inv_idx = 0
for n in spread(n_inventory_groups, n_inventories):
@ -426,13 +418,12 @@ def make_the_data():
for i in range(n):
ids['group'] += 1
group_id = ids['group']
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, inventory.name, i+ 1))
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, inventory.name, i + 1))
sys.stdout.flush()
group, _ = Group.objects.get_or_create(
name='%s Group %d Inventory %d' % (prefix, group_id, inv_idx),
inventory=inventory,
defaults=dict(created_by=next(creator_gen),
modified_by=next(modifier_gen))
defaults=dict(created_by=next(creator_gen), modified_by=next(modifier_gen)),
)
# Have each group have up to 3 parent groups
for parent_n in range(3):
@ -447,7 +438,6 @@ def make_the_data():
inv_idx += 1
print('')
print('# Creating %d inventory_hosts' % n_inventory_hosts)
group_idx = 0
for n in spread(n_inventory_hosts, n_inventory_groups):
@ -455,13 +445,12 @@ def make_the_data():
for i in range(n):
ids['host'] += 1
host_id = ids['host']
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, group.name, i+ 1))
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, group.name, i + 1))
sys.stdout.flush()
host, _ = Host.objects.get_or_create(
name='%s.host-%06d.group-%05d.dummy' % (prefix, host_id, group_idx),
inventory=group.inventory,
defaults=dict(created_by=next(creator_gen),
modified_by=next(modifier_gen))
defaults=dict(created_by=next(creator_gen), modified_by=next(modifier_gen)),
)
# Add the host to up to 3 groups
host.groups.add(group)
@ -482,7 +471,7 @@ def make_the_data():
for i in range(n):
ids['job_template'] += 1
job_template_id = ids['job_template']
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, project.name, i+ 1))
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, project.name, i + 1))
sys.stdout.flush()
inventory = None
@ -499,7 +488,8 @@ def make_the_data():
created_by=next(creator_gen),
modified_by=next(modifier_gen),
playbook="debug.yml",
**extra_kwargs)
**extra_kwargs
),
)
job_template.credentials.add(next(credential_gen))
if ids['job_template'] % 7 == 0:
@ -522,14 +512,13 @@ def make_the_data():
for i in range(n):
ids['wfjts'] += 1
wfjt_id = ids['wfjts']
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, org.name, i+ 1))
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, org.name, i + 1))
sys.stdout.flush()
wfjt, _ = WorkflowJobTemplate.objects.get_or_create(
name='%s WFJT %d Org %d' % (prefix, wfjt_id, org_idx),
description=bulk_data_description,
organization=org,
defaults=dict(created_by=next(creator_gen),
modified_by=next(modifier_gen))
defaults=dict(created_by=next(creator_gen), modified_by=next(modifier_gen)),
)
wfjt._is_new = _
wfjts.append(wfjt)
@ -550,19 +539,13 @@ def make_the_data():
wfjt_nodes = []
for i in range(n):
ids['nodes'] += 1
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, wfjt.name, i+ 1))
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, wfjt.name, i + 1))
sys.stdout.flush()
kwargs = dict(
workflow_job_template=wfjt,
unified_job_template=next(jt_gen),
modified=now()
)
kwargs = dict(workflow_job_template=wfjt, unified_job_template=next(jt_gen), modified=now())
if i % 2 == 0:
# only apply inventories for every other node
kwargs['inventory'] = next(inv_gen)
node, _ = WorkflowJobTemplateNode.objects.get_or_create(
**kwargs
)
node, _ = WorkflowJobTemplateNode.objects.get_or_create(**kwargs)
if i % 3 == 0:
# only apply prompted credential every 3rd node
node.credentials.add(next(cred_gen))
@ -599,8 +582,7 @@ def make_the_data():
label, _ = Label.objects.get_or_create(
name='%sL_%do%d' % (prefix, label_id, org_idx),
organization=org,
defaults=dict(created_by=next(creator_gen),
modified_by=next(modifier_gen))
defaults=dict(created_by=next(creator_gen), modified_by=next(modifier_gen)),
)
labels.append(label)
org_idx += 1
@ -643,7 +625,7 @@ def make_the_data():
for n in spread(n_jobs, n_job_templates):
job_template = job_templates[job_template_idx]
for i in range(n):
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, job_template.name, i+ 1))
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, job_template.name, i + 1))
sys.stdout.flush()
if len(jobs) % 4 == 0:
job_stat = 'failed'
@ -653,8 +635,10 @@ def make_the_data():
job_stat = 'successful'
job, _ = Job.objects.get_or_create(
job_template=job_template,
status=job_stat, name="%s-%d" % (job_template.name, job_i),
project=job_template.project, inventory=job_template.inventory,
status=job_stat,
name="%s-%d" % (job_template.name, job_i),
project=job_template.project,
inventory=job_template.inventory,
)
for ec in job_template.credentials.all():
job.credentials.add(ec)
@ -675,13 +659,12 @@ def make_the_data():
if job_template.inventory:
inv_groups = [g for g in job_template.inventory.groups.all()]
if len(inv_groups):
JobHostSummary.objects.bulk_create([
JobHostSummary(
job=job, host=h, host_name=h.name, processed=1,
created=now(), modified=now()
)
for h in inv_groups[group_idx % len(inv_groups)].hosts.all()[:100]
])
JobHostSummary.objects.bulk_create(
[
JobHostSummary(job=job, host=h, host_name=h.name, processed=1, created=now(), modified=now())
for h in inv_groups[group_idx % len(inv_groups)].hosts.all()[:100]
]
)
group_idx += 1
job_template_idx += 1
if n:
@ -703,15 +686,7 @@ def make_the_data():
n_subgroup = n % MAX_BULK_CREATE
sys.stdout.write('\r Creating %d job events for job %d, subgroup: %d' % (n, job.id, j + 1))
sys.stdout.flush()
JobEvent.objects.bulk_create([
JobEvent(
created=now(),
modified=now(),
job=job,
event='runner_on_ok'
)
for i in range(n_subgroup)
])
JobEvent.objects.bulk_create([JobEvent(created=now(), modified=now(), job=job, event='runner_on_ok') for i in range(n_subgroup)])
job_idx += 1
if n:
print('')

View File

@ -1,7 +1,7 @@
---
version: '2'
services:
# Primary Tower Development Container link
# Primary AWX Development Container link
awx_1:
links:
- hashivault

View File

@ -95,7 +95,7 @@ and
"LOG_AGGREGATOR_TCP_TIMEOUT": 5
}
```
These can be entered via Configure-Tower-in-Tower by making a POST to
These can be entered via the settings endpoint by making a POST to
`/api/v2/settings/logging/`.
### Connecting Logstash to 3rd Party Receivers

View File

@ -1,7 +1,7 @@
---
version: '2'
services:
# Tower Development Cluster
# AWX Development Cluster
tower_1:
links:
- logstash

View File

@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Enable Tower virtualenv
# Enable AWX virtualenv
for venv_path in /var/lib/awx/venv/awx; do
if [ -f $venv_path/bin/activate ]; then
. $venv_path/bin/activate

View File

@ -14,12 +14,12 @@ for c in ';/?:@=&[]':
def _get_named_url_graph(url, auth):
"""Get the graph data structure Tower used to manage all named URLs.
"""Get the graph data structure AWX used to manage all named URLs.
Args:
url: String representing the URL of tower configuration endpoint where
to fetch graph information.
auth: Tuple of username + password to authenticate connection to Tower.
auth: Tuple of username + password to authenticate connection to AWX.
Return:
A dict of graph nodes that in ensembly represent the graph structure. Each
@ -48,8 +48,7 @@ def _encode_uri(text):
for c in URL_PATH_RESERVED_CHARSET:
if c in text:
text = text.replace(c, URL_PATH_RESERVED_CHARSET[c])
text = text.replace(NAMED_URL_RES_INNER_DILIMITER,
'[%s]' % NAMED_URL_RES_INNER_DILIMITER)
text = text.replace(NAMED_URL_RES_INNER_DILIMITER, '[%s]' % NAMED_URL_RES_INNER_DILIMITER)
return text
@ -81,8 +80,8 @@ def _get_named_url_identifier(url, named_url_graph, resource, tower_host, auth,
component from.
named_url_graph: The graph structure used to DFS against.
resource: Key name of the current graph node.
tower_host: String representing the host name of Tower backend.
auth: Tuple of username + password to authenticate connection to Tower.
tower_host: String representing the host name of AWX backend.
auth: Tuple of username + password to authenticate connection to AWX.
ret: list of strings storing components that would later be joined into
the final named URL identifier.
@ -97,8 +96,7 @@ def _get_named_url_identifier(url, named_url_graph, resource, tower_host, auth,
for next_ in named_url_graph[resource]['adj_list']:
next_fk, next_res = tuple(next_)
if next_fk in r['related']:
_get_named_url_identifier(tower_host.strip('/') + r['related'][next_fk],
named_url_graph, next_res, tower_host, auth, ret)
_get_named_url_identifier(tower_host.strip('/') + r['related'][next_fk], named_url_graph, next_res, tower_host, auth, ret)
else:
ret.append('')
@ -107,9 +105,9 @@ def main(username=None, password=None, tower_host=None, resource=None, pk=None):
"""Main function for generating and printing named URL of a resource object given its pk.
Args:
username: String representing the username needed to authenticating Tower.
password: String representing the password needed to authenticating Tower.
tower_host: String representing the host name of Tower backend.
username: String representing the username needed to authenticating AWX.
password: String representing the password needed to authenticating AWX.
tower_host: String representing the host name of AWX backend.
resource: REST API name of a specific resource, e.g. name for resource inventory
is 'inventories'.
pk: Primary key of the resource object whose named URL will be derived.
@ -125,27 +123,15 @@ def main(username=None, password=None, tower_host=None, resource=None, pk=None):
auth = (username, password)
named_url_graph = _get_named_url_graph(conf_url, auth)
named_url_identifier = []
_get_named_url_identifier(start_url, named_url_graph, resource,
tower_host, auth, named_url_identifier)
print('%s/api/v2/%s/%s/' % (tower_host.strip('/'), resource.strip('/'),
NAMED_URL_RES_DILIMITER.join(named_url_identifier)))
_get_named_url_identifier(start_url, named_url_graph, resource, tower_host, auth, named_url_identifier)
print('%s/api/v2/%s/%s/' % (tower_host.strip('/'), resource.strip('/'), NAMED_URL_RES_DILIMITER.join(named_url_identifier)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--username', type=str, required=True,
help='Name of the Tower user for making requests',
dest='username', metavar='STR')
parser.add_argument('--password', type=str, required=True,
help='Password of the Tower user for making requests',
dest='password', metavar='STR')
parser.add_argument('--tower-host', type=str, required=True,
help='Tower host name, like "http://127.0.0.1"',
dest='tower_host', metavar='STR')
parser.add_argument('--resource', type=str, required=True,
help='Name of the resource in REST endpoints',
dest='resource', metavar='STR')
parser.add_argument('--pk', type=int, required=True,
help='Primary key of resource object whose named URL will be derived',
dest='pk', metavar='INT')
parser.add_argument('--username', type=str, required=True, help='Name of the user for making requests', dest='username', metavar='STR')
parser.add_argument('--password', type=str, required=True, help='Password of the user for making requests', dest='password', metavar='STR')
parser.add_argument('--tower-host', type=str, required=True, help='API host name, like "http://127.0.0.1"', dest='tower_host', metavar='STR')
parser.add_argument('--resource', type=str, required=True, help='Name of the resource in REST endpoints', dest='resource', metavar='STR')
parser.add_argument('--pk', type=int, required=True, help='Primary key of resource object whose named URL will be derived', dest='pk', metavar='INT')
main(**vars(parser.parse_args()))