mirror of
https://github.com/ansible/awx.git
synced 2026-01-11 18:09:57 -03:30
Merge pull request #13423 from ansible/feature_web-task-split
Allow web and task container to be deployed in separate deployment on Kubernetes
This commit is contained in:
commit
acb22f0131
35
Makefile
35
Makefile
@ -217,12 +217,6 @@ daphne:
|
||||
fi; \
|
||||
daphne -b 127.0.0.1 -p 8051 awx.asgi:channel_layer
|
||||
|
||||
wsbroadcast:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_wsbroadcast
|
||||
|
||||
## Run to start the background task dispatcher for development.
|
||||
dispatcher:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@ -230,7 +224,6 @@ dispatcher:
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_dispatcher
|
||||
|
||||
|
||||
## Run to start the zeromq callback receiver
|
||||
receiver:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@ -247,6 +240,34 @@ jupyter:
|
||||
fi; \
|
||||
$(MANAGEMENT_COMMAND) shell_plus --notebook
|
||||
|
||||
## Start the rsyslog configurer process in background in development environment.
|
||||
run-rsyslog-configurer:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_rsyslog_configurer
|
||||
|
||||
## Start cache_clear process in background in development environment.
|
||||
run-cache-clear:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_cache_clear
|
||||
|
||||
## Start the wsrelay process in background in development environment.
|
||||
run-wsrelay:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_wsrelay
|
||||
|
||||
## Start the heartbeat process in background in development environment.
|
||||
run-heartbeet:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_heartbeet
|
||||
|
||||
reports:
|
||||
mkdir -p $@
|
||||
|
||||
|
||||
@ -5,11 +5,13 @@ import threading
|
||||
import time
|
||||
import os
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
# Django
|
||||
from django.conf import LazySettings
|
||||
from django.conf import settings, UserSettingsHolder
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation
|
||||
from django.db import transaction, connection
|
||||
from django.db.utils import Error as DBError, ProgrammingError
|
||||
from django.utils.functional import cached_property
|
||||
@ -157,7 +159,7 @@ class EncryptedCacheProxy(object):
|
||||
obj_id = self.cache.get(Setting.get_cache_id_key(key), default=empty)
|
||||
if obj_id is empty:
|
||||
logger.info('Efficiency notice: Corresponding id not stored in cache %s', Setting.get_cache_id_key(key))
|
||||
obj_id = getattr(self._get_setting_from_db(key), 'pk', None)
|
||||
obj_id = getattr(_get_setting_from_db(self.registry, key), 'pk', None)
|
||||
elif obj_id == SETTING_CACHE_NONE:
|
||||
obj_id = None
|
||||
return method(TransientSetting(pk=obj_id, value=value), 'value')
|
||||
@ -166,11 +168,6 @@ class EncryptedCacheProxy(object):
|
||||
# a no-op; it just returns the provided value
|
||||
return value
|
||||
|
||||
def _get_setting_from_db(self, key):
|
||||
field = self.registry.get_setting_field(key)
|
||||
if not field.read_only:
|
||||
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.cache, name)
|
||||
|
||||
@ -186,6 +183,22 @@ def get_settings_to_cache(registry):
|
||||
return dict([(key, SETTING_CACHE_NOTSET) for key in get_writeable_settings(registry)])
|
||||
|
||||
|
||||
# Will first attempt to get the setting from the database in synchronous mode.
|
||||
# If call from async context, it will attempt to get the setting from the database in a thread.
|
||||
def _get_setting_from_db(registry, key):
|
||||
def get_settings_from_db_sync(registry, key):
|
||||
field = registry.get_setting_field(key)
|
||||
if not field.read_only or key == 'INSTALL_UUID':
|
||||
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
|
||||
|
||||
try:
|
||||
return get_settings_from_db_sync(registry, key)
|
||||
except SynchronousOnlyOperation:
|
||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
||||
future = executor.submit(get_settings_from_db_sync, registry, key)
|
||||
return future.result()
|
||||
|
||||
|
||||
def get_cache_value(value):
|
||||
"""Returns the proper special cache setting for a value
|
||||
based on instance type.
|
||||
@ -345,7 +358,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
setting_id = None
|
||||
# this value is read-only, however we *do* want to fetch its value from the database
|
||||
if not field.read_only or name == 'INSTALL_UUID':
|
||||
setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first()
|
||||
setting = _get_setting_from_db(self.registry, name)
|
||||
if setting:
|
||||
if getattr(field, 'encrypted', False):
|
||||
value = decrypt_field(setting, 'value')
|
||||
|
||||
@ -94,9 +94,7 @@ def test_setting_singleton_retrieve_readonly(api_request, dummy_setting):
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 3})
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
assert response.data['FOO_BAR'] == 3
|
||||
@ -112,7 +110,7 @@ def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, du
|
||||
# sure that the _Forbidden validator doesn't get used for the
|
||||
# fields. See also https://github.com/ansible/awx/issues/4099.
|
||||
with dummy_setting('FOO_BAR', field_class=sso_fields.SAMLOrgAttrField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request(
|
||||
'patch',
|
||||
@ -126,7 +124,7 @@ def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, du
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=4, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 5})
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
@ -136,7 +134,7 @@ def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update_dont_change_encrypted_mark(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.CharField, encrypted=True, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 'password'})
|
||||
assert Setting.objects.get(key='FOO_BAR').value.startswith('$encrypted$')
|
||||
@ -155,16 +153,14 @@ def test_setting_singleton_update_runs_custom_validate(api_request, dummy_settin
|
||||
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), dummy_validate(
|
||||
'foobar', func_raising_exception
|
||||
), mock.patch('awx.conf.views.handle_setting_changes'):
|
||||
), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
response = api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 23})
|
||||
assert response.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_delete(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
assert not response.data['FOO_BAR']
|
||||
@ -173,7 +169,7 @@ def test_setting_singleton_delete(api_request, dummy_setting):
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_delete_no_read_only_fields(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=23, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
|
||||
@ -26,10 +26,11 @@ from awx.api.generics import APIView, GenericAPIView, ListAPIView, RetrieveUpdat
|
||||
from awx.api.permissions import IsSystemAdminOrAuditor
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import camelcase_to_underscore
|
||||
from awx.main.tasks.system import handle_setting_changes
|
||||
from awx.main.tasks.system import clear_setting_cache
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer
|
||||
from awx.conf import settings_registry
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
|
||||
|
||||
SettingCategory = collections.namedtuple('SettingCategory', ('url', 'slug', 'name'))
|
||||
@ -118,7 +119,10 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.save(update_fields=['value'])
|
||||
settings_change_list.append(key)
|
||||
if settings_change_list:
|
||||
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
|
||||
connection.on_commit(lambda: clear_setting_cache.delay(settings_change_list))
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in settings_change_list]):
|
||||
# call notify to rsyslog. no data is need so payload is empty
|
||||
reconfigure_rsyslog.delay()
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
@ -133,7 +137,10 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.delete()
|
||||
settings_change_list.append(setting.key)
|
||||
if settings_change_list:
|
||||
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
|
||||
connection.on_commit(lambda: clear_setting_cache.delay(settings_change_list))
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in settings_change_list]):
|
||||
# call notify to rsyslog. no data is need so payload is empty
|
||||
reconfigure_rsyslog.delay()
|
||||
|
||||
# When TOWER_URL_BASE is deleted from the API, reset it to the hostname
|
||||
# used to make the request as a default.
|
||||
|
||||
@ -4,11 +4,11 @@ import logging
|
||||
# AWX
|
||||
from awx.main.analytics.subsystem_metrics import Metrics
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def send_subsystem_metrics():
|
||||
Metrics().send_metrics()
|
||||
|
||||
@ -65,7 +65,7 @@ class FixedSlidingWindow:
|
||||
return sum(self.buckets.values()) or 0
|
||||
|
||||
|
||||
class BroadcastWebsocketStatsManager:
|
||||
class RelayWebsocketStatsManager:
|
||||
def __init__(self, event_loop, local_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
|
||||
@ -74,7 +74,7 @@ class BroadcastWebsocketStatsManager:
|
||||
self._redis_key = BROADCAST_WEBSOCKET_REDIS_KEY_NAME
|
||||
|
||||
def new_remote_host_stats(self, remote_hostname):
|
||||
self._stats[remote_hostname] = BroadcastWebsocketStats(self._local_hostname, remote_hostname)
|
||||
self._stats[remote_hostname] = RelayWebsocketStats(self._local_hostname, remote_hostname)
|
||||
return self._stats[remote_hostname]
|
||||
|
||||
def delete_remote_host_stats(self, remote_hostname):
|
||||
@ -107,7 +107,7 @@ class BroadcastWebsocketStatsManager:
|
||||
return parser.text_string_to_metric_families(stats_str.decode('UTF-8'))
|
||||
|
||||
|
||||
class BroadcastWebsocketStats:
|
||||
class RelayWebsocketStats:
|
||||
def __init__(self, local_hostname, remote_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
self._remote_hostname = remote_hostname
|
||||
|
||||
@ -9,7 +9,7 @@ from django.apps import apps
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.utils import is_testing
|
||||
|
||||
root_key = 'awx_metrics'
|
||||
root_key = settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
|
||||
|
||||
@ -264,13 +264,6 @@ class Metrics:
|
||||
data[field] = self.METRICS[field].decode(self.conn)
|
||||
return data
|
||||
|
||||
def store_metrics(self, data_json):
|
||||
# called when receiving metrics from other instances
|
||||
data = json.loads(data_json)
|
||||
if self.instance_name != data['instance']:
|
||||
logger.debug(f"{self.instance_name} received subsystem metrics from {data['instance']}")
|
||||
self.conn.set(root_key + "_instance_" + data['instance'], data['metrics'])
|
||||
|
||||
def should_pipe_execute(self):
|
||||
if self.metrics_have_changed is False:
|
||||
return False
|
||||
@ -309,9 +302,9 @@ class Metrics:
|
||||
'instance': self.instance_name,
|
||||
'metrics': self.serialize_local_metrics(),
|
||||
}
|
||||
# store a local copy as well
|
||||
self.store_metrics(json.dumps(payload))
|
||||
|
||||
emit_channel_notification("metrics", payload)
|
||||
|
||||
self.previous_send_metrics.set(current_time)
|
||||
self.previous_send_metrics.store_value(self.conn)
|
||||
finally:
|
||||
|
||||
@ -65,7 +65,7 @@ ENV_BLOCKLIST = frozenset(
|
||||
'INVENTORY_HOSTVARS',
|
||||
'AWX_HOST',
|
||||
'PROJECT_REVISION',
|
||||
'SUPERVISOR_WEB_CONFIG_PATH',
|
||||
'SUPERVISOR_CONFIG_PATH',
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@ import logging
|
||||
import time
|
||||
import hmac
|
||||
import asyncio
|
||||
import redis
|
||||
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from django.conf import settings
|
||||
@ -80,7 +81,7 @@ class WebsocketSecretAuthHelper:
|
||||
WebsocketSecretAuthHelper.verify_secret(secret)
|
||||
|
||||
|
||||
class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
class RelayConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def connect(self):
|
||||
try:
|
||||
WebsocketSecretAuthHelper.is_authorized(self.scope)
|
||||
@ -100,6 +101,21 @@ class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def internal_message(self, event):
|
||||
await self.send(event['text'])
|
||||
|
||||
async def receive_json(self, data):
|
||||
(group, message) = unwrap_broadcast_msg(data)
|
||||
if group == "metrics":
|
||||
message = json.loads(message['text'])
|
||||
conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "_instance_" + message['instance'], message['metrics'])
|
||||
else:
|
||||
await self.channel_layer.group_send(group, message)
|
||||
|
||||
async def consumer_subscribe(self, event):
|
||||
await self.send_json(event)
|
||||
|
||||
async def consumer_unsubscribe(self, event):
|
||||
await self.send_json(event)
|
||||
|
||||
|
||||
class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def connect(self):
|
||||
@ -128,6 +144,11 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
self.channel_name,
|
||||
)
|
||||
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.unsubscribe", "groups": list(current_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
|
||||
@database_sync_to_async
|
||||
def user_can_see_object_id(self, user_access, oid):
|
||||
# At this point user is a channels.auth.UserLazyObject object
|
||||
@ -176,9 +197,20 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
self.channel_name,
|
||||
)
|
||||
|
||||
if len(old_groups):
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.unsubscribe", "groups": list(old_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
|
||||
new_groups_exclusive = new_groups - current_groups
|
||||
for group_name in new_groups_exclusive:
|
||||
await self.channel_layer.group_add(group_name, self.channel_name)
|
||||
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.subscribe", "groups": list(new_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
self.scope['session']['groups'] = new_groups
|
||||
await self.send_json({"groups_current": list(new_groups), "groups_left": list(old_groups), "groups_joined": list(new_groups_exclusive)})
|
||||
|
||||
@ -200,9 +232,11 @@ def _dump_payload(payload):
|
||||
return None
|
||||
|
||||
|
||||
def emit_channel_notification(group, payload):
|
||||
from awx.main.wsbroadcast import wrap_broadcast_msg # noqa
|
||||
def unwrap_broadcast_msg(payload: dict):
|
||||
return (payload['group'], payload['message'])
|
||||
|
||||
|
||||
def emit_channel_notification(group, payload):
|
||||
payload_dumped = _dump_payload(payload)
|
||||
if payload_dumped is None:
|
||||
return
|
||||
@ -212,16 +246,6 @@ def emit_channel_notification(group, payload):
|
||||
run_sync(
|
||||
channel_layer.group_send(
|
||||
group,
|
||||
{"type": "internal.message", "text": payload_dumped},
|
||||
)
|
||||
)
|
||||
|
||||
run_sync(
|
||||
channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{
|
||||
"type": "internal.message",
|
||||
"text": wrap_broadcast_msg(group, payload_dumped),
|
||||
},
|
||||
{"type": "internal.message", "text": payload_dumped, "needs_relay": True},
|
||||
)
|
||||
)
|
||||
|
||||
@ -63,7 +63,7 @@ class RecordedQueryLog(object):
|
||||
if not os.path.isdir(self.dest):
|
||||
os.makedirs(self.dest)
|
||||
progname = ' '.join(sys.argv)
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsbroadcast'):
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsrelay'):
|
||||
if match in progname:
|
||||
progname = match
|
||||
break
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import os
|
||||
import psycopg2
|
||||
import select
|
||||
|
||||
@ -6,7 +7,6 @@ from contextlib import contextmanager
|
||||
from django.conf import settings
|
||||
from django.db import connection as pg_connection
|
||||
|
||||
|
||||
NOT_READY = ([], [], [])
|
||||
|
||||
|
||||
@ -14,6 +14,29 @@ def get_local_queuename():
|
||||
return settings.CLUSTER_HOST_ID
|
||||
|
||||
|
||||
def get_task_queuename():
|
||||
if os.getenv('AWX_COMPONENT') != 'web':
|
||||
return settings.CLUSTER_HOST_ID
|
||||
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
random_task_instance = (
|
||||
Instance.objects.filter(
|
||||
node_type__in=(Instance.Types.CONTROL, Instance.Types.HYBRID),
|
||||
node_state=Instance.States.READY,
|
||||
enabled=True,
|
||||
)
|
||||
.only('hostname')
|
||||
.order_by('?')
|
||||
.first()
|
||||
)
|
||||
|
||||
if random_task_instance is None:
|
||||
raise ValueError('No task instances are READY and Enabled.')
|
||||
|
||||
return random_task_instance.hostname
|
||||
|
||||
|
||||
class PubSub(object):
|
||||
def __init__(self, conn):
|
||||
self.conn = conn
|
||||
|
||||
@ -6,7 +6,7 @@ from django.conf import settings
|
||||
from django.db import connection
|
||||
import redis
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
from . import pg_bus_conn
|
||||
|
||||
@ -21,7 +21,7 @@ class Control(object):
|
||||
if service not in self.services:
|
||||
raise RuntimeError('{} must be in {}'.format(service, self.services))
|
||||
self.service = service
|
||||
self.queuename = host or get_local_queuename()
|
||||
self.queuename = host or get_task_queuename()
|
||||
|
||||
def status(self, *args, **kwargs):
|
||||
r = redis.Redis.from_url(settings.BROKER_URL)
|
||||
|
||||
@ -26,8 +26,8 @@ class TaskWorker(BaseWorker):
|
||||
`awx.main.dispatch.publish`.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resolve_callable(cls, task):
|
||||
@staticmethod
|
||||
def resolve_callable(task):
|
||||
"""
|
||||
Transform a dotted notation task into an imported, callable function, e.g.,
|
||||
|
||||
@ -46,7 +46,8 @@ class TaskWorker(BaseWorker):
|
||||
|
||||
return _call
|
||||
|
||||
def run_callable(self, body):
|
||||
@staticmethod
|
||||
def run_callable(body):
|
||||
"""
|
||||
Given some AMQP message, import the correct Python code and run it.
|
||||
"""
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from awx.main.tasks.system import clear_setting_cache
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@ -31,5 +32,7 @@ class Command(BaseCommand):
|
||||
else:
|
||||
raise CommandError('Please pass --enable flag to allow local auth or --disable flag to disable local auth')
|
||||
|
||||
clear_setting_cache.delay(['DISABLE_LOCAL_AUTH'])
|
||||
|
||||
def handle(self, **options):
|
||||
self._enable_disable_auth(options.get('enable'), options.get('disable'))
|
||||
|
||||
32
awx/main/management/commands/run_cache_clear.py
Normal file
32
awx/main/management/commands/run_cache_clear.py
Normal file
@ -0,0 +1,32 @@
|
||||
import logging
|
||||
import json
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.dispatch.worker.task import TaskWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.cache_clear')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Cache Clear
|
||||
Runs as a management command and starts a daemon that listens for a pg_notify message to clear the cache.
|
||||
"""
|
||||
|
||||
help = 'Launch the cache clear daemon'
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
try:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
conn.listen("tower_settings_change")
|
||||
for e in conn.events(yield_timeouts=True):
|
||||
if e is not None:
|
||||
body = json.loads(e.payload)
|
||||
logger.info(f"Cache clear request received. Clearing now, payload: {e.payload}")
|
||||
TaskWorker.run_callable(body)
|
||||
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in cache clear main loop')
|
||||
raise
|
||||
@ -8,7 +8,7 @@ from django.core.cache import cache as django_cache
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||
@ -76,7 +76,7 @@ class Command(BaseCommand):
|
||||
consumer = None
|
||||
|
||||
try:
|
||||
queues = ['tower_broadcast_all', get_local_queuename()]
|
||||
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
||||
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4))
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
|
||||
67
awx/main/management/commands/run_heartbeet.py
Normal file
67
awx/main/management/commands/run_heartbeet.py
Normal file
@ -0,0 +1,67 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_heartbeet')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Launch the web server beacon (heartbeet)'
|
||||
|
||||
def print_banner(self):
|
||||
heartbeet = r"""
|
||||
********** **********
|
||||
************* *************
|
||||
*****************************
|
||||
***********HEART***********
|
||||
*************************
|
||||
*******************
|
||||
*************** _._
|
||||
*********** /`._ `'. __
|
||||
******* \ .\| \ _'` `)
|
||||
*** (``_) \| ).'` /`- /
|
||||
* `\ `;\_ `\\//`-'` /
|
||||
\ `'.'.| / __/`
|
||||
`'--v_|/`'`
|
||||
__||-._
|
||||
/'` `-`` `'\\
|
||||
/ .'` )
|
||||
\ BEET ' )
|
||||
\. /
|
||||
'. /'`
|
||||
`) |
|
||||
//
|
||||
'(.
|
||||
`\`.
|
||||
``"""
|
||||
print(heartbeet)
|
||||
|
||||
def construct_payload(self, action='online'):
|
||||
payload = {
|
||||
'hostname': settings.CLUSTER_HOST_ID,
|
||||
'ip': os.environ.get('MY_POD_IP'),
|
||||
'action': action,
|
||||
}
|
||||
return json.dumps(payload)
|
||||
|
||||
def do_hearbeat_loop(self):
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
while True:
|
||||
logger.debug('Sending heartbeat')
|
||||
conn.notify('web_heartbeet', self.construct_payload())
|
||||
time.sleep(settings.BROADCAST_WEBSOCKET_BEACON_FROM_WEB_RATE_SECONDS)
|
||||
|
||||
# TODO: Send a message with action=offline if we notice a SIGTERM or SIGINT
|
||||
# (wsrelay can use this to remove the node quicker)
|
||||
def handle(self, *arg, **options):
|
||||
self.print_banner()
|
||||
|
||||
# Note: We don't really try any reconnect logic to pg_notify here,
|
||||
# just let supervisor restart if we fail.
|
||||
self.do_hearbeat_loop()
|
||||
41
awx/main/management/commands/run_rsyslog_configurer.py
Normal file
41
awx/main/management/commands/run_rsyslog_configurer.py
Normal file
@ -0,0 +1,41 @@
|
||||
import logging
|
||||
import json
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.dispatch.worker.task import TaskWorker
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
|
||||
logger = logging.getLogger('awx.main.rsyslog_configurer')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Rsyslog Configurer
|
||||
Runs as a management command and starts rsyslog configurer daemon. Daemon listens
|
||||
for pg_notify then calls reconfigure_rsyslog
|
||||
"""
|
||||
|
||||
help = 'Launch the rsyslog_configurer daemon'
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
try:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
conn.listen("rsyslog_configurer")
|
||||
# reconfigure rsyslog on start up
|
||||
reconfigure_rsyslog()
|
||||
for e in conn.events(yield_timeouts=True):
|
||||
if e is not None:
|
||||
logger.info("Change in logging settings found. Restarting rsyslogd")
|
||||
# clear the cache of relevant settings then restart
|
||||
setting_keys = [k for k in dir(settings) if k.startswith('LOG_AGGREGATOR')]
|
||||
cache.delete_many(setting_keys)
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
body = json.loads(e.payload)
|
||||
TaskWorker.run_callable(body)
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in rsyslog_configurer main loop')
|
||||
raise
|
||||
@ -13,13 +13,13 @@ from django.db import connection
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import (
|
||||
BroadcastWebsocketStatsManager,
|
||||
RelayWebsocketStatsManager,
|
||||
safe_name,
|
||||
)
|
||||
from awx.main.wsbroadcast import BroadcastWebsocketManager
|
||||
from awx.main.wsrelay import WebSocketRelayManager
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
logger = logging.getLogger('awx.main.wsrelay')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@ -99,7 +99,7 @@ class Command(BaseCommand):
|
||||
executor = MigrationExecutor(connection)
|
||||
migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
|
||||
except Exception as exc:
|
||||
logger.info(f'Error on startup of run_wsbroadcast (error: {exc}), retry in 10s...')
|
||||
logger.warning(f'Error on startup of run_wsrelay (error: {exc}), retry in 10s...')
|
||||
time.sleep(10)
|
||||
return
|
||||
|
||||
@ -130,9 +130,9 @@ class Command(BaseCommand):
|
||||
|
||||
if options.get('status'):
|
||||
try:
|
||||
stats_all = BroadcastWebsocketStatsManager.get_stats_sync()
|
||||
stats_all = RelayWebsocketStatsManager.get_stats_sync()
|
||||
except redis.exceptions.ConnectionError as e:
|
||||
print(f"Unable to get Broadcast Websocket Status. Failed to connect to redis {e}")
|
||||
print(f"Unable to get Relay Websocket Status. Failed to connect to redis {e}")
|
||||
return
|
||||
|
||||
data = {}
|
||||
@ -151,22 +151,19 @@ class Command(BaseCommand):
|
||||
host_stats = Command.get_connection_status(hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
print(f'Broadcast websocket connection status from "{my_hostname}" to:')
|
||||
print(f'Relay websocket connection status from "{my_hostname}" to:')
|
||||
print('\n'.join(lines))
|
||||
|
||||
host_stats = Command.get_connection_stats(hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
print(f'\nBroadcast websocket connection stats from "{my_hostname}" to:')
|
||||
print(f'\nRelay websocket connection stats from "{my_hostname}" to:')
|
||||
print('\n'.join(lines))
|
||||
|
||||
return
|
||||
|
||||
try:
|
||||
broadcast_websocket_mgr = BroadcastWebsocketManager()
|
||||
task = broadcast_websocket_mgr.start()
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(task)
|
||||
websocket_relay_manager = WebSocketRelayManager()
|
||||
asyncio.run(websocket_relay_manager.run())
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Websocket Broadcaster')
|
||||
logger.info('Terminating Websocket Relayer')
|
||||
@ -32,7 +32,7 @@ from polymorphic.models import PolymorphicModel
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel, prevent_search
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.control import Control as ControlDispatcher
|
||||
from awx.main.registrar import activity_stream_registrar
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin, ExecutionEnvironmentMixin
|
||||
@ -1567,7 +1567,7 @@ class UnifiedJob(
|
||||
return r
|
||||
|
||||
def get_queue_name(self):
|
||||
return self.controller_node or self.execution_node or get_local_queuename()
|
||||
return self.controller_node or self.execution_node or get_task_queuename()
|
||||
|
||||
@property
|
||||
def is_container_group_task(self):
|
||||
|
||||
@ -28,7 +28,7 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
||||
|
||||
websocket_urlpatterns = [
|
||||
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
|
||||
re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),
|
||||
re_path(r'websocket/relay/$', consumers.RelayConsumer.as_asgi()),
|
||||
]
|
||||
|
||||
application = AWXProtocolTypeRouter(
|
||||
|
||||
@ -8,7 +8,7 @@ from django.conf import settings
|
||||
from awx import MODE
|
||||
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
@ -20,16 +20,16 @@ def run_manager(manager, prefix):
|
||||
manager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def task_manager():
|
||||
run_manager(TaskManager, "task")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def dependency_manager():
|
||||
run_manager(DependencyManager, "dependency")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def workflow_manager():
|
||||
run_manager(WorkflowManager, "workflow")
|
||||
|
||||
@ -29,7 +29,7 @@ from gitdb.exc import BadName as BadGitName
|
||||
|
||||
# AWX
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.constants import (
|
||||
PRIVILEGE_ESCALATION_METHODS,
|
||||
STANDARD_INVENTORY_UPDATE_ENV,
|
||||
@ -806,7 +806,7 @@ class SourceControlMixin(BaseTask):
|
||||
self.release_lock(project)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunJob(SourceControlMixin, BaseTask):
|
||||
"""
|
||||
Run a job using ansible-playbook.
|
||||
@ -1121,7 +1121,7 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
update_inventory_computed_fields.delay(inventory.id)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunProjectUpdate(BaseTask):
|
||||
model = ProjectUpdate
|
||||
event_model = ProjectUpdateEvent
|
||||
@ -1443,7 +1443,7 @@ class RunProjectUpdate(BaseTask):
|
||||
return params
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
model = InventoryUpdate
|
||||
event_model = InventoryUpdateEvent
|
||||
@ -1706,7 +1706,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunAdHocCommand(BaseTask):
|
||||
"""
|
||||
Run an ad hoc command using ansible.
|
||||
@ -1859,7 +1859,7 @@ class RunAdHocCommand(BaseTask):
|
||||
return d
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunSystemJob(BaseTask):
|
||||
model = SystemJob
|
||||
event_model = SystemJobEvent
|
||||
|
||||
@ -28,7 +28,7 @@ from awx.main.utils.common import (
|
||||
from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
|
||||
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
||||
from awx.main.models import Instance, InstanceLink, UnifiedJob
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
|
||||
# Receptorctl
|
||||
@ -713,7 +713,7 @@ def write_receptor_config():
|
||||
links.update(link_state=InstanceLink.States.ESTABLISHED)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def remove_deprovisioned_node(hostname):
|
||||
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
InstanceLink.objects.filter(target__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
|
||||
@ -50,7 +50,7 @@ from awx.main.models import (
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.dispatch import get_task_queuename, reaper
|
||||
from awx.main.utils.common import (
|
||||
get_type_for_model,
|
||||
ignore_inventory_computed_fields,
|
||||
@ -59,7 +59,6 @@ from awx.main.utils.common import (
|
||||
ScheduleTaskManager,
|
||||
)
|
||||
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
|
||||
@ -115,9 +114,6 @@ def dispatch_startup():
|
||||
m = Metrics()
|
||||
m.reset_values()
|
||||
|
||||
# Update Tower's rsyslog.conf file based on loggins settings in the db
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
def inform_cluster_of_shutdown():
|
||||
try:
|
||||
@ -132,7 +128,7 @@ def inform_cluster_of_shutdown():
|
||||
logger.exception('Encountered problem with normal shutdown signal.')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def apply_cluster_membership_policies():
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
@ -244,8 +240,10 @@ def apply_cluster_membership_policies():
|
||||
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def handle_setting_changes(setting_keys):
|
||||
@task(queue='tower_settings_change')
|
||||
def clear_setting_cache(setting_keys):
|
||||
# log that cache is being cleared
|
||||
logger.info(f"clear_setting_cache of keys {setting_keys}")
|
||||
orig_len = len(setting_keys)
|
||||
for i in range(orig_len):
|
||||
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
|
||||
@ -254,9 +252,6 @@ def handle_setting_changes(setting_keys):
|
||||
logger.debug('cache delete_many(%r)', cache_keys)
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def delete_project_files(project_path):
|
||||
@ -286,7 +281,7 @@ def profile_sql(threshold=1, minutes=1):
|
||||
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def send_notifications(notification_list, job_id=None):
|
||||
if not isinstance(notification_list, list):
|
||||
raise TypeError("notification_list should be of type list")
|
||||
@ -317,7 +312,7 @@ def send_notifications(notification_list, job_id=None):
|
||||
logger.exception('Error saving notification {} result.'.format(notification.id))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
@ -330,7 +325,7 @@ def gather_analytics():
|
||||
analytics.gather()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def purge_old_stdout_files():
|
||||
nowtime = time.time()
|
||||
for f in os.listdir(settings.JOBOUTPUT_ROOT):
|
||||
@ -378,12 +373,12 @@ def handle_removed_image(remove_images=None):
|
||||
_cleanup_images_and_files(remove_images=remove_images, file_pattern='')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_images_and_files():
|
||||
_cleanup_images_and_files()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def cluster_node_health_check(node):
|
||||
"""
|
||||
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
|
||||
@ -402,7 +397,7 @@ def cluster_node_health_check(node):
|
||||
this_inst.local_health_check()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def execution_node_health_check(node):
|
||||
if node == '':
|
||||
logger.warning('Remote health check incorrectly called with blank string')
|
||||
@ -496,7 +491,7 @@ def inspect_execution_nodes(instance_list):
|
||||
execution_node_health_check.apply_async([hostname])
|
||||
|
||||
|
||||
@task(queue=get_local_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||
@task(queue=get_task_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||
def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
logger.debug("Cluster node heartbeat task.")
|
||||
nowtime = now()
|
||||
@ -586,7 +581,7 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def awx_receptor_workunit_reaper():
|
||||
"""
|
||||
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
|
||||
@ -622,7 +617,7 @@ def awx_receptor_workunit_reaper():
|
||||
administrative_workunit_reaper(receptor_work_list)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def awx_k8s_reaper():
|
||||
if not settings.RECEPTOR_RELEASE_WORK:
|
||||
return
|
||||
@ -642,7 +637,7 @@ def awx_k8s_reaper():
|
||||
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def awx_periodic_scheduler():
|
||||
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
@ -708,7 +703,7 @@ def schedule_manager_success_or_error(instance):
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def handle_work_success(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
@ -720,7 +715,7 @@ def handle_work_success(task_actual):
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def handle_work_error(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
@ -760,7 +755,7 @@ def handle_work_error(task_actual):
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
"""
|
||||
Signal handler and wrapper around inventory.update_computed_fields to
|
||||
@ -801,7 +796,7 @@ def update_smart_memberships_for_inventory(smart_inventory):
|
||||
return False
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def update_host_smart_inventory_memberships():
|
||||
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
|
||||
changed_inventories = set([])
|
||||
@ -817,7 +812,7 @@ def update_host_smart_inventory_memberships():
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def delete_inventory(inventory_id, user_id, retries=5):
|
||||
# Delete inventory as user
|
||||
if user_id is None:
|
||||
@ -882,7 +877,7 @@ def _reconstruct_relationships(copy_mapping):
|
||||
new_obj.save()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
|
||||
sub_obj_list = cache.get(uuid)
|
||||
if sub_obj_list is None:
|
||||
|
||||
@ -6,6 +6,7 @@ import urllib.parse as urlparse
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.utils.reload import supervisor_service_command
|
||||
from awx.main.dispatch.publish import task
|
||||
|
||||
|
||||
def construct_rsyslog_conf_template(settings=settings):
|
||||
@ -114,6 +115,7 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
return tmpl
|
||||
|
||||
|
||||
@task(queue='rsyslog_configurer')
|
||||
def reconfigure_rsyslog():
|
||||
tmpl = construct_rsyslog_conf_template()
|
||||
# Write config to a temp file then move it to preserve atomicity
|
||||
|
||||
@ -17,7 +17,7 @@ def supervisor_service_command(command, service='*', communicate=True):
|
||||
"""
|
||||
args = ['supervisorctl']
|
||||
|
||||
supervisor_config_path = os.getenv('SUPERVISOR_WEB_CONFIG_PATH', None)
|
||||
supervisor_config_path = os.getenv('SUPERVISOR_CONFIG_PATH', None)
|
||||
if supervisor_config_path:
|
||||
args.extend(['-c', supervisor_config_path])
|
||||
|
||||
|
||||
@ -1,208 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import asyncio
|
||||
|
||||
import aiohttp
|
||||
from aiohttp import client_exceptions
|
||||
from asgiref.sync import sync_to_async
|
||||
|
||||
from channels.layers import get_channel_layer
|
||||
|
||||
from django.conf import settings
|
||||
from django.apps import apps
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import (
|
||||
BroadcastWebsocketStats,
|
||||
BroadcastWebsocketStatsManager,
|
||||
)
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
|
||||
|
||||
def wrap_broadcast_msg(group, message: str):
|
||||
# TODO: Maybe wrap as "group","message" so that we don't need to
|
||||
# encode/decode as json.
|
||||
return json.dumps(dict(group=group, message=message), cls=DjangoJSONEncoder)
|
||||
|
||||
|
||||
def unwrap_broadcast_msg(payload: dict):
|
||||
return (payload['group'], payload['message'])
|
||||
|
||||
|
||||
@sync_to_async
|
||||
def get_broadcast_hosts():
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
instances = (
|
||||
Instance.objects.exclude(hostname=Instance.objects.my_hostname())
|
||||
.exclude(node_type='execution')
|
||||
.exclude(node_type='hop')
|
||||
.order_by('hostname')
|
||||
.values('hostname', 'ip_address')
|
||||
.distinct()
|
||||
)
|
||||
return {i['hostname']: i['ip_address'] or i['hostname'] for i in instances}
|
||||
|
||||
|
||||
def get_local_host():
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
return Instance.objects.my_hostname()
|
||||
|
||||
|
||||
class WebsocketTask:
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
event_loop,
|
||||
stats: BroadcastWebsocketStats,
|
||||
remote_host: str,
|
||||
remote_port: int = settings.BROADCAST_WEBSOCKET_PORT,
|
||||
protocol: str = settings.BROADCAST_WEBSOCKET_PROTOCOL,
|
||||
verify_ssl: bool = settings.BROADCAST_WEBSOCKET_VERIFY_CERT,
|
||||
endpoint: str = 'broadcast',
|
||||
):
|
||||
self.name = name
|
||||
self.event_loop = event_loop
|
||||
self.stats = stats
|
||||
self.remote_host = remote_host
|
||||
self.remote_port = remote_port
|
||||
self.endpoint = endpoint
|
||||
self.protocol = protocol
|
||||
self.verify_ssl = verify_ssl
|
||||
self.channel_layer = None
|
||||
self.subsystem_metrics = s_metrics.Metrics(instance_name=name)
|
||||
|
||||
async def run_loop(self, websocket: aiohttp.ClientWebSocketResponse):
|
||||
raise RuntimeError("Implement me")
|
||||
|
||||
async def connect(self, attempt):
|
||||
from awx.main.consumers import WebsocketSecretAuthHelper # noqa
|
||||
|
||||
logger.debug(f"Connection from {self.name} to {self.remote_host} attempt number {attempt}.")
|
||||
|
||||
'''
|
||||
Can not put get_channel_layer() in the init code because it is in the init
|
||||
path of channel layers i.e. RedisChannelLayer() calls our init code.
|
||||
'''
|
||||
if not self.channel_layer:
|
||||
self.channel_layer = get_channel_layer()
|
||||
|
||||
try:
|
||||
if attempt > 0:
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_RECONNECT_RETRY_RATE_SECONDS)
|
||||
except asyncio.CancelledError:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} cancelled")
|
||||
raise
|
||||
|
||||
uri = f"{self.protocol}://{self.remote_host}:{self.remote_port}/websocket/{self.endpoint}/"
|
||||
timeout = aiohttp.ClientTimeout(total=10)
|
||||
|
||||
secret_val = WebsocketSecretAuthHelper.construct_secret()
|
||||
try:
|
||||
async with aiohttp.ClientSession(headers={'secret': secret_val}, timeout=timeout) as session:
|
||||
async with session.ws_connect(uri, ssl=self.verify_ssl, heartbeat=20) as websocket:
|
||||
logger.info(f"Connection from {self.name} to {self.remote_host} established.")
|
||||
self.stats.record_connection_established()
|
||||
attempt = 0
|
||||
await self.run_loop(websocket)
|
||||
except asyncio.CancelledError:
|
||||
# TODO: Check if connected and disconnect
|
||||
# Possibly use run_until_complete() if disconnect is async
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} cancelled.")
|
||||
self.stats.record_connection_lost()
|
||||
raise
|
||||
except client_exceptions.ClientConnectorError as e:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} failed: '{e}'.")
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} timed out.")
|
||||
except Exception as e:
|
||||
# Early on, this is our canary. I'm not sure what exceptions we can really encounter.
|
||||
logger.exception(f"Connection from {self.name} to {self.remote_host} failed for unknown reason: '{e}'.")
|
||||
else:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} list.")
|
||||
|
||||
self.stats.record_connection_lost()
|
||||
self.start(attempt=attempt + 1)
|
||||
|
||||
def start(self, attempt=0):
|
||||
self.async_task = self.event_loop.create_task(self.connect(attempt=attempt))
|
||||
|
||||
def cancel(self):
|
||||
self.async_task.cancel()
|
||||
|
||||
|
||||
class BroadcastWebsocketTask(WebsocketTask):
|
||||
async def run_loop(self, websocket: aiohttp.ClientWebSocketResponse):
|
||||
async for msg in websocket:
|
||||
self.stats.record_message_received()
|
||||
|
||||
if msg.type == aiohttp.WSMsgType.ERROR:
|
||||
break
|
||||
elif msg.type == aiohttp.WSMsgType.TEXT:
|
||||
try:
|
||||
payload = json.loads(msg.data)
|
||||
except json.JSONDecodeError:
|
||||
logmsg = "Failed to decode broadcast message"
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logmsg = "{} {}".format(logmsg, payload)
|
||||
logger.warning(logmsg)
|
||||
continue
|
||||
(group, message) = unwrap_broadcast_msg(payload)
|
||||
if group == "metrics":
|
||||
self.subsystem_metrics.store_metrics(message)
|
||||
continue
|
||||
await self.channel_layer.group_send(group, {"type": "internal.message", "text": message})
|
||||
|
||||
|
||||
class BroadcastWebsocketManager(object):
|
||||
def __init__(self):
|
||||
self.event_loop = asyncio.get_event_loop()
|
||||
'''
|
||||
{
|
||||
'hostname1': BroadcastWebsocketTask(),
|
||||
'hostname2': BroadcastWebsocketTask(),
|
||||
'hostname3': BroadcastWebsocketTask(),
|
||||
}
|
||||
'''
|
||||
self.broadcast_tasks = dict()
|
||||
self.local_hostname = get_local_host()
|
||||
self.stats_mgr = BroadcastWebsocketStatsManager(self.event_loop, self.local_hostname)
|
||||
|
||||
async def run_per_host_websocket(self):
|
||||
while True:
|
||||
known_hosts = await get_broadcast_hosts()
|
||||
future_remote_hosts = known_hosts.keys()
|
||||
current_remote_hosts = self.broadcast_tasks.keys()
|
||||
deleted_remote_hosts = set(current_remote_hosts) - set(future_remote_hosts)
|
||||
new_remote_hosts = set(future_remote_hosts) - set(current_remote_hosts)
|
||||
|
||||
remote_addresses = {k: v.remote_host for k, v in self.broadcast_tasks.items()}
|
||||
for hostname, address in known_hosts.items():
|
||||
if hostname in self.broadcast_tasks and address != remote_addresses[hostname]:
|
||||
deleted_remote_hosts.add(hostname)
|
||||
new_remote_hosts.add(hostname)
|
||||
|
||||
if deleted_remote_hosts:
|
||||
logger.warning(f"Removing {deleted_remote_hosts} from websocket broadcast list")
|
||||
if new_remote_hosts:
|
||||
logger.warning(f"Adding {new_remote_hosts} to websocket broadcast list")
|
||||
|
||||
for h in deleted_remote_hosts:
|
||||
self.broadcast_tasks[h].cancel()
|
||||
del self.broadcast_tasks[h]
|
||||
self.stats_mgr.delete_remote_host_stats(h)
|
||||
|
||||
for h in new_remote_hosts:
|
||||
stats = self.stats_mgr.new_remote_host_stats(h)
|
||||
broadcast_task = BroadcastWebsocketTask(name=self.local_hostname, event_loop=self.event_loop, stats=stats, remote_host=known_hosts[h])
|
||||
broadcast_task.start()
|
||||
self.broadcast_tasks[h] = broadcast_task
|
||||
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_NEW_INSTANCE_POLL_RATE_SECONDS)
|
||||
|
||||
def start(self):
|
||||
self.stats_mgr.start()
|
||||
|
||||
self.async_task = self.event_loop.create_task(self.run_per_host_websocket())
|
||||
return self.async_task
|
||||
305
awx/main/wsrelay.py
Normal file
305
awx/main/wsrelay.py
Normal file
@ -0,0 +1,305 @@
|
||||
import json
|
||||
import logging
|
||||
import asyncio
|
||||
from typing import Dict
|
||||
|
||||
import aiohttp
|
||||
from aiohttp import client_exceptions
|
||||
|
||||
from channels.layers import get_channel_layer
|
||||
|
||||
from django.conf import settings
|
||||
from django.apps import apps
|
||||
|
||||
import psycopg
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import (
|
||||
RelayWebsocketStats,
|
||||
RelayWebsocketStatsManager,
|
||||
)
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
|
||||
logger = logging.getLogger('awx.main.wsrelay')
|
||||
|
||||
|
||||
def wrap_broadcast_msg(group, message: str):
|
||||
# TODO: Maybe wrap as "group","message" so that we don't need to
|
||||
# encode/decode as json.
|
||||
return dict(group=group, message=message)
|
||||
|
||||
|
||||
def get_local_host():
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
return Instance.objects.my_hostname()
|
||||
|
||||
|
||||
class WebsocketRelayConnection:
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
stats: RelayWebsocketStats,
|
||||
remote_host: str,
|
||||
remote_port: int = settings.BROADCAST_WEBSOCKET_PORT,
|
||||
protocol: str = settings.BROADCAST_WEBSOCKET_PROTOCOL,
|
||||
verify_ssl: bool = settings.BROADCAST_WEBSOCKET_VERIFY_CERT,
|
||||
):
|
||||
self.name = name
|
||||
self.event_loop = asyncio.get_event_loop()
|
||||
self.stats = stats
|
||||
self.remote_host = remote_host
|
||||
self.remote_port = remote_port
|
||||
self.protocol = protocol
|
||||
self.verify_ssl = verify_ssl
|
||||
self.channel_layer = None
|
||||
self.subsystem_metrics = s_metrics.Metrics(instance_name=name)
|
||||
self.producers = dict()
|
||||
self.connected = False
|
||||
|
||||
async def run_loop(self, websocket: aiohttp.ClientWebSocketResponse):
|
||||
raise RuntimeError("Implement me")
|
||||
|
||||
async def connect(self):
|
||||
from awx.main.consumers import WebsocketSecretAuthHelper # noqa
|
||||
|
||||
logger.debug(f"Connection attempt from {self.name} to {self.remote_host}")
|
||||
|
||||
'''
|
||||
Can not put get_channel_layer() in the init code because it is in the init
|
||||
path of channel layers i.e. RedisChannelLayer() calls our init code.
|
||||
'''
|
||||
if not self.channel_layer:
|
||||
self.channel_layer = get_channel_layer()
|
||||
|
||||
uri = f"{self.protocol}://{self.remote_host}:{self.remote_port}/websocket/relay/"
|
||||
timeout = aiohttp.ClientTimeout(total=10)
|
||||
|
||||
secret_val = WebsocketSecretAuthHelper.construct_secret()
|
||||
try:
|
||||
async with aiohttp.ClientSession(headers={'secret': secret_val}, timeout=timeout) as session:
|
||||
async with session.ws_connect(uri, ssl=self.verify_ssl, heartbeat=20) as websocket:
|
||||
logger.info(f"Connection from {self.name} to {self.remote_host} established.")
|
||||
self.stats.record_connection_established()
|
||||
self.connected = True
|
||||
await self.run_connection(websocket)
|
||||
except asyncio.CancelledError:
|
||||
# TODO: Check if connected and disconnect
|
||||
# Possibly use run_until_complete() if disconnect is async
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} cancelled.")
|
||||
except client_exceptions.ClientConnectorError as e:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} failed: '{e}'.", exc_info=True)
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} timed out.")
|
||||
except Exception as e:
|
||||
# Early on, this is our canary. I'm not sure what exceptions we can really encounter.
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} failed for unknown reason: '{e}'.", exc_info=True)
|
||||
else:
|
||||
logger.debug(f"Connection from {self.name} to {self.remote_host} lost, but no exception was raised.")
|
||||
finally:
|
||||
self.connected = False
|
||||
self.stats.record_connection_lost()
|
||||
|
||||
def start(self):
|
||||
self.async_task = self.event_loop.create_task(self.connect())
|
||||
return self.async_task
|
||||
|
||||
def cancel(self):
|
||||
self.async_task.cancel()
|
||||
|
||||
async def run_connection(self, websocket: aiohttp.ClientWebSocketResponse):
|
||||
# create a dedicated subsystem metric producer to handle local subsystem
|
||||
# metrics messages
|
||||
# the "metrics" group is not subscribed to in the typical fashion, so we
|
||||
# just explicitly create it
|
||||
producer = self.event_loop.create_task(self.run_producer("metrics", websocket, "metrics"))
|
||||
self.producers["metrics"] = {"task": producer, "subscriptions": {"metrics"}}
|
||||
async for msg in websocket:
|
||||
self.stats.record_message_received()
|
||||
|
||||
if msg.type == aiohttp.WSMsgType.ERROR:
|
||||
break
|
||||
elif msg.type == aiohttp.WSMsgType.TEXT:
|
||||
try:
|
||||
payload = json.loads(msg.data)
|
||||
except json.JSONDecodeError:
|
||||
logmsg = "Failed to decode message from web node"
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logmsg = "{} {}".format(logmsg, payload)
|
||||
logger.warning(logmsg)
|
||||
continue
|
||||
|
||||
if payload.get("type") == "consumer.subscribe":
|
||||
for group in payload['groups']:
|
||||
name = f"{self.remote_host}-{group}"
|
||||
origin_channel = payload['origin_channel']
|
||||
if not self.producers.get(name):
|
||||
producer = self.event_loop.create_task(self.run_producer(name, websocket, group))
|
||||
self.producers[name] = {"task": producer, "subscriptions": {origin_channel}}
|
||||
logger.debug(f"Producer {name} started.")
|
||||
else:
|
||||
self.producers[name]["subscriptions"].add(origin_channel)
|
||||
logger.debug(f"Connection from {self.name} to {self.remote_host} added subscription to {group}.")
|
||||
|
||||
if payload.get("type") == "consumer.unsubscribe":
|
||||
for group in payload['groups']:
|
||||
name = f"{self.remote_host}-{group}"
|
||||
origin_channel = payload['origin_channel']
|
||||
try:
|
||||
self.producers[name]["subscriptions"].remove(origin_channel)
|
||||
logger.debug(f"Unsubscribed {origin_channel} from {name}")
|
||||
except KeyError:
|
||||
logger.warning(f"Producer {name} not found.")
|
||||
|
||||
async def run_producer(self, name, websocket, group):
|
||||
try:
|
||||
logger.info(f"Starting producer for {name}")
|
||||
|
||||
consumer_channel = await self.channel_layer.new_channel()
|
||||
await self.channel_layer.group_add(group, consumer_channel)
|
||||
logger.debug(f"Producer {name} added to group {group} and is now awaiting messages.")
|
||||
|
||||
while True:
|
||||
try:
|
||||
msg = await asyncio.wait_for(self.channel_layer.receive(consumer_channel), timeout=10)
|
||||
if not msg.get("needs_relay"):
|
||||
# This is added in by emit_channel_notification(). It prevents us from looping
|
||||
# in the event that we are sharing a redis with a web instance. We'll see the
|
||||
# message once (it'll have needs_relay=True), we'll delete that, and then forward
|
||||
# the message along. The web instance will add it back to the same channels group,
|
||||
# but it won't have needs_relay=True, so we'll ignore it.
|
||||
continue
|
||||
|
||||
# We need to copy the message because we're going to delete the needs_relay key
|
||||
# and we don't want to modify the original message because other producers may
|
||||
# still need to act on it. It seems weird, but it's necessary.
|
||||
msg = dict(msg)
|
||||
del msg["needs_relay"]
|
||||
except asyncio.TimeoutError:
|
||||
current_subscriptions = self.producers[name]["subscriptions"]
|
||||
if len(current_subscriptions) == 0:
|
||||
logger.info(f"Producer {name} has no subscribers, shutting down.")
|
||||
return
|
||||
|
||||
continue
|
||||
|
||||
await websocket.send_json(wrap_broadcast_msg(group, msg))
|
||||
except ConnectionResetError:
|
||||
# This can be hit when a web node is scaling down and we try to write to it.
|
||||
# There's really nothing to do in this case and it's a fairly typical thing to happen.
|
||||
# We'll log it as debug, but it's not really a problem.
|
||||
logger.debug(f"Producer {name} connection reset.")
|
||||
pass
|
||||
except Exception:
|
||||
# Note, this is very intentional and important since we do not otherwise
|
||||
# ever check the result of this future. Without this line you will not see an error if
|
||||
# something goes wrong in here.
|
||||
logger.exception(f"Event relay producer {name} crashed")
|
||||
finally:
|
||||
await self.channel_layer.group_discard(group, consumer_channel)
|
||||
del self.producers[name]
|
||||
|
||||
|
||||
class WebSocketRelayManager(object):
|
||||
def __init__(self):
|
||||
self.local_hostname = get_local_host()
|
||||
self.relay_connections = dict()
|
||||
# hostname -> ip
|
||||
self.known_hosts: Dict[str, str] = dict()
|
||||
|
||||
async def pg_consumer(self, conn):
|
||||
try:
|
||||
await conn.execute("LISTEN web_heartbeet")
|
||||
async for notif in conn.notifies():
|
||||
if notif is not None and notif.channel == "web_heartbeet":
|
||||
try:
|
||||
payload = json.loads(notif.payload)
|
||||
except json.JSONDecodeError:
|
||||
logmsg = "Failed to decode message from pg_notify channel `web_heartbeet`"
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logmsg = "{} {}".format(logmsg, payload)
|
||||
logger.warning(logmsg)
|
||||
continue
|
||||
|
||||
# Skip if the message comes from the same host we are running on
|
||||
# In this case, we'll be sharing a redis, no need to relay.
|
||||
if payload.get("hostname") == self.local_hostname:
|
||||
continue
|
||||
|
||||
if payload.get("action") == "online":
|
||||
hostname = payload["hostname"]
|
||||
ip = payload["ip"]
|
||||
if ip is None:
|
||||
# If we don't get an IP, just try the hostname, maybe it resolves
|
||||
ip = hostname
|
||||
self.known_hosts[hostname] = ip
|
||||
logger.debug(f"Web host {hostname} ({ip}) online heartbeat received.")
|
||||
elif payload.get("action") == "offline":
|
||||
hostname = payload["hostname"]
|
||||
del self.known_hosts[hostname]
|
||||
logger.debug(f"Web host {hostname} ({ip}) offline heartbeat received.")
|
||||
except Exception as e:
|
||||
# This catch-all is the same as the one above. asyncio will eat the exception
|
||||
# but we want to know about it.
|
||||
logger.exception(f"pg_consumer exception: {e}")
|
||||
|
||||
async def run(self):
|
||||
event_loop = asyncio.get_running_loop()
|
||||
|
||||
stats_mgr = RelayWebsocketStatsManager(event_loop, self.local_hostname)
|
||||
stats_mgr.start()
|
||||
|
||||
# Set up a pg_notify consumer for allowing web nodes to "provision" and "deprovision" themselves gracefully.
|
||||
database_conf = settings.DATABASES['default']
|
||||
async_conn = await psycopg.AsyncConnection.connect(
|
||||
dbname=database_conf['NAME'],
|
||||
host=database_conf['HOST'],
|
||||
user=database_conf['USER'],
|
||||
password=database_conf['PASSWORD'],
|
||||
port=database_conf['PORT'],
|
||||
**database_conf.get("OPTIONS", {}),
|
||||
)
|
||||
await async_conn.set_autocommit(True)
|
||||
event_loop.create_task(self.pg_consumer(async_conn))
|
||||
|
||||
# Establishes a websocket connection to /websocket/relay on all API servers
|
||||
while True:
|
||||
# logger.info("Current known hosts: {}".format(self.known_hosts))
|
||||
future_remote_hosts = self.known_hosts.keys()
|
||||
current_remote_hosts = self.relay_connections.keys()
|
||||
deleted_remote_hosts = set(current_remote_hosts) - set(future_remote_hosts)
|
||||
new_remote_hosts = set(future_remote_hosts) - set(current_remote_hosts)
|
||||
|
||||
# This loop handles if we get an advertisement from a host we already know about but
|
||||
# the advertisement has a different IP than we are currently connected to.
|
||||
for hostname, address in self.known_hosts.items():
|
||||
if hostname not in self.relay_connections:
|
||||
# We've picked up a new hostname that we don't know about yet.
|
||||
continue
|
||||
|
||||
if address != self.relay_connections[hostname].remote_host:
|
||||
deleted_remote_hosts.add(hostname)
|
||||
new_remote_hosts.add(hostname)
|
||||
|
||||
# Delete any hosts with closed connections
|
||||
for hostname, relay_conn in self.relay_connections.items():
|
||||
if not relay_conn.connected:
|
||||
deleted_remote_hosts.add(hostname)
|
||||
|
||||
if deleted_remote_hosts:
|
||||
logger.info(f"Removing {deleted_remote_hosts} from websocket broadcast list")
|
||||
|
||||
if new_remote_hosts:
|
||||
logger.info(f"Adding {new_remote_hosts} to websocket broadcast list")
|
||||
|
||||
for h in deleted_remote_hosts:
|
||||
self.relay_connections[h].cancel()
|
||||
del self.relay_connections[h]
|
||||
del self.known_hosts[h]
|
||||
stats_mgr.delete_remote_host_stats(h)
|
||||
|
||||
for h in new_remote_hosts:
|
||||
stats = stats_mgr.new_remote_host_stats(h)
|
||||
relay_connection = WebsocketRelayConnection(name=self.local_hostname, stats=stats, remote_host=self.known_hosts[h])
|
||||
relay_connection.start()
|
||||
self.relay_connections[h] = relay_connection
|
||||
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_NEW_INSTANCE_POLL_RATE_SECONDS)
|
||||
@ -228,6 +228,9 @@ JOB_EVENT_MAX_QUEUE_SIZE = 10000
|
||||
# The number of job events to migrate per-transaction when moving from int -> bigint
|
||||
JOB_EVENT_MIGRATION_CHUNK_SIZE = 1000000
|
||||
|
||||
# The prefix of the redis key that stores metrics
|
||||
SUBSYSTEM_METRICS_REDIS_KEY_PREFIX = "awx_metrics"
|
||||
|
||||
# Histogram buckets for the callback_receiver_batch_events_insert_db metric
|
||||
SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS = [10, 50, 150, 350, 650, 2000]
|
||||
|
||||
@ -866,7 +869,9 @@ LOGGING = {
|
||||
'awx.main.commands.run_callback_receiver': {'handlers': ['callback_receiver']}, # level handled by dynamic_level_filter
|
||||
'awx.main.dispatch': {'handlers': ['dispatcher']},
|
||||
'awx.main.consumers': {'handlers': ['console', 'file', 'tower_warnings'], 'level': 'INFO'},
|
||||
'awx.main.wsbroadcast': {'handlers': ['wsbroadcast']},
|
||||
'awx.main.rsyslog_configurer': {'handlers': ['rsyslog_configurer']},
|
||||
'awx.main.cache_clear': {'handlers': ['cache_clear']},
|
||||
'awx.main.wsrelay': {'handlers': ['wsrelay']},
|
||||
'awx.main.commands.inventory_import': {'handlers': ['inventory_import'], 'propagate': False},
|
||||
'awx.main.tasks': {'handlers': ['task_system', 'external_logger'], 'propagate': False},
|
||||
'awx.main.analytics': {'handlers': ['task_system', 'external_logger'], 'level': 'INFO', 'propagate': False},
|
||||
@ -875,7 +880,7 @@ LOGGING = {
|
||||
'awx.main.signals': {'level': 'INFO'}, # very verbose debug-level logs
|
||||
'awx.api.permissions': {'level': 'INFO'}, # very verbose debug-level logs
|
||||
'awx.analytics': {'handlers': ['external_logger'], 'level': 'INFO', 'propagate': False},
|
||||
'awx.analytics.broadcast_websocket': {'handlers': ['console', 'file', 'wsbroadcast', 'external_logger'], 'level': 'INFO', 'propagate': False},
|
||||
'awx.analytics.broadcast_websocket': {'handlers': ['console', 'file', 'wsrelay', 'external_logger'], 'level': 'INFO', 'propagate': False},
|
||||
'awx.analytics.performance': {'handlers': ['console', 'file', 'tower_warnings', 'external_logger'], 'level': 'DEBUG', 'propagate': False},
|
||||
'awx.analytics.job_lifecycle': {'handlers': ['console', 'job_lifecycle'], 'level': 'DEBUG', 'propagate': False},
|
||||
'django_auth_ldap': {'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG'},
|
||||
@ -893,10 +898,12 @@ handler_config = {
|
||||
'tower_warnings': {'filename': 'tower.log'},
|
||||
'callback_receiver': {'filename': 'callback_receiver.log'},
|
||||
'dispatcher': {'filename': 'dispatcher.log', 'formatter': 'dispatcher'},
|
||||
'wsbroadcast': {'filename': 'wsbroadcast.log'},
|
||||
'wsrelay': {'filename': 'wsrelay.log'},
|
||||
'task_system': {'filename': 'task_system.log'},
|
||||
'rbac_migrations': {'filename': 'tower_rbac_migrations.log'},
|
||||
'job_lifecycle': {'filename': 'job_lifecycle.log', 'formatter': 'job_lifecycle'},
|
||||
'rsyslog_configurer': {'filename': 'rsyslog_configurer.log'},
|
||||
'cache_clear': {'filename': 'cache_clear.log'},
|
||||
}
|
||||
|
||||
# If running on a VM, we log to files. When running in a container, we log to stdout.
|
||||
@ -1007,6 +1014,9 @@ BROADCAST_WEBSOCKET_NEW_INSTANCE_POLL_RATE_SECONDS = 10
|
||||
# How often websocket process will generate stats
|
||||
BROADCAST_WEBSOCKET_STATS_POLL_RATE_SECONDS = 5
|
||||
|
||||
# How often should web instances advertise themselves?
|
||||
BROADCAST_WEBSOCKET_BEACON_FROM_WEB_RATE_SECONDS = 15
|
||||
|
||||
DJANGO_GUID = {'GUID_HEADER_NAME': 'X-API-Request-Id'}
|
||||
|
||||
# Name of the default task queue
|
||||
|
||||
@ -116,8 +116,7 @@ def main():
|
||||
|
||||
if result['status_code'] == 405:
|
||||
module.fail_json(
|
||||
msg="Unable to trigger a project update because the project scm_type ({0}) does not support it.".format(project['scm_type']),
|
||||
response=result
|
||||
msg="Unable to trigger a project update because the project scm_type ({0}) does not support it.".format(project['scm_type']), response=result
|
||||
)
|
||||
elif result['status_code'] != 202:
|
||||
module.fail_json(msg="Failed to update project, see response for details", response=result)
|
||||
|
||||
@ -6,73 +6,4 @@ Our channels/websocket implementation handles the communication between AWX API
|
||||
|
||||
AWX enlists the help of the `django-channels` library to create our communications layer. `django-channels` provides us with per-client messaging integration in our application by implementing the Asynchronous Server Gateway Interface (ASGI).
|
||||
|
||||
To communicate between our different services we use websockets. Every AWX node is fully connected via a special websocket endpoint that forwards any local websocket data to all other nodes. Local websockets are backed by Redis, the channels2 default service.
|
||||
|
||||
Inside AWX we use the `emit_channel_notification` function which places messages onto the queue. The messages are given an explicit event group and event type which we later use in our wire protocol to control message delivery to the client.
|
||||
|
||||
### Broadcast Backplane
|
||||
|
||||
Previously, AWX leveraged RabbitMQ to deliver Ansible events that emanated from one AWX node to all other AWX nodes so that any client listening and subscribed to the Websockets could get events from any running playbook. We are since moved off of RabbitMQ and onto a per-node local Redis instance. To maintain the requirement that any Websocket connection can receive events from any playbook running on any AWX node we still need to deliver every event to every AWX node. AWX does this via a fully connected Websocket backplane.
|
||||
|
||||
#### Broadcast Backplane Token
|
||||
|
||||
AWX node(s) connect to every other node via the Websocket backplane. The backplane websockets initiate from the `wsbroadcast` process and connect to other nodes via the same nginx process that serves webpage websocket connections and marshalls incoming web/API requests. If you have configured AWX to run with an ssl terminated connection in front of nginx then you likely will have nginx configured to handle http traffic and thus the websocket connection will flow unencrypted over http. If you have nginx configured with ssl enabled, then the websocket traffic will flow encrypted.
|
||||
|
||||
Authentication is accomplished via a shared secret that is generated and set at playbook install time. The shared secret is used to derive a payload that is exchanged via the http(s) header `secret`. The shared secret payload consists of a a `secret`, containing the shared secret, and a `nonce` which is used to mitigate replay attack windows.
|
||||
|
||||
Note that the nonce timestamp is considered valid if it is within `300` second threshold. This is to allow for machine clock skews.
|
||||
```
|
||||
{
|
||||
"secret": settings.BROADCAST_WEBSOCKET_SECRET,
|
||||
"nonce": time.now()
|
||||
}
|
||||
```
|
||||
|
||||
The payload is encrypted using `HMAC-SHA256` with `settings.BROADCAST_WEBSOCKET_SECRET` as the key. The final payload that is sent, including the http header, is of the form: `secret: nonce_plaintext:HMAC_SHA256({"secret": settings.BROADCAST_WEBSOCKET_SECRET, "nonce": nonce_plaintext})`.
|
||||
|
||||
Upon receiving the payload, AWX decrypts the `secret` header using the known shared secret and ensures the `secret` value of the decrypted payload matches the known shared secret, `settings.BROADCAST_WEBSOCKET_SECRET`. If it does not match, the connection is closed. If it does match, the `nonce` is compared to the current time. If the nonce is off by more than `300` seconds, the connection is closed. If both tests pass, the connection is accepted.
|
||||
|
||||
## Protocol
|
||||
|
||||
You can connect to the AWX channels implementation using any standard websocket library by pointing it to `/websocket/`. You must
|
||||
provide a valid Auth Token in the request URL.
|
||||
|
||||
Once you've connected, you are not subscribed to any event groups. You subscribe by sending a `json` request that looks like the following:
|
||||
|
||||
'groups': {
|
||||
'jobs': ['status_changed', 'summary'],
|
||||
'schedules': ['changed'],
|
||||
'ad_hoc_command_events': [ids...],
|
||||
'job_events': [ids...],
|
||||
'workflow_events': [ids...],
|
||||
'project_update_events': [ids...],
|
||||
'inventory_update_events': [ids...],
|
||||
'system_job_events': [ids...],
|
||||
'control': ['limit_reached_<user_id>'],
|
||||
}
|
||||
|
||||
These map to the event group and event type that the user is interested in. Sending in a new groups dictionary will clear all previously-subscribed groups before subscribing to the newly requested ones. This is intentional, and makes the single page navigation much easier since users only need to care about current subscriptions.
|
||||
|
||||
## Deployment
|
||||
|
||||
This section will specifically discuss deployment in the context of websockets and the path those requests take through the system.
|
||||
|
||||
**Note:** The deployment of AWX changes slightly with the introduction of `django-channels` and websockets. There are some minor differences between production and development deployments that will be pointed out in this document, but the actual services that run the code and handle the requests are identical between the two environments.
|
||||
|
||||
### Services
|
||||
| Name | Details |
|
||||
|:-----------:|:-----------------------------------------------------------------------------------------------------------:|
|
||||
| `nginx` | listens on ports 80/443, handles HTTPS proxying, serves static assets, routes requests for `daphne` and `uwsgi` |
|
||||
| `uwsgi` | listens on port 8050, handles API requests |
|
||||
| `daphne` | listens on port 8051, handles websocket requests |
|
||||
| `wsbroadcast` | no listening port, forwards all group messages to all cluster nodes |
|
||||
| `supervisord` | (production-only) handles the process management of all the services except `nginx` |
|
||||
|
||||
When a request comes in to `nginx` and has the `Upgrade` header and is for the path `/websocket`, then `nginx` knows that it should be routing that request to our `daphne` service.
|
||||
|
||||
`daphne` handles websocket connections proxied by nginx.
|
||||
|
||||
`wsbroadcast` fully connects all cluster nodes via the `/websocket/broadcast/` endpoint to every other cluster nodes. Sends a copy of all group websocket messages to all other cluster nodes (i.e. job event type messages).
|
||||
|
||||
### Development
|
||||
- `nginx` listens on 8013/8043 instead of 80/443
|
||||
**TODO: The information previously contained within this document has become outdated. This document will be rewritten in the near future.**
|
||||
|
||||
BIN
licenses/psycopg-3.1.4.tar.gz
Normal file
BIN
licenses/psycopg-3.1.4.tar.gz
Normal file
Binary file not shown.
165
licenses/psycopg.txt
Normal file
165
licenses/psycopg.txt
Normal file
@ -0,0 +1,165 @@
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
||||
@ -36,6 +36,7 @@ openshift
|
||||
pexpect==4.7.0 # see library notes
|
||||
prometheus_client
|
||||
psycopg2
|
||||
psycopg # psycopg3 is used to listen for pg_notify messages from web servers in awx.main.wsrelay where asyncio is used
|
||||
psutil
|
||||
pygerduty
|
||||
pyparsing==2.4.6 # Upgrading to v3 of pyparsing introduce errors on smart host filtering: Expected 'or' term, found 'or' (at char 15), (line:1, col:16)
|
||||
|
||||
@ -265,6 +265,8 @@ prometheus-client==0.15.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
psutil==5.9.4
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
psycopg==3.1.4
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
psycopg2==2.9.5
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
ptyprocess==0.7.0
|
||||
@ -425,7 +427,7 @@ txaio==22.2.1
|
||||
typing-extensions==4.4.0
|
||||
# via
|
||||
# azure-core
|
||||
# pydantic
|
||||
# psycopg
|
||||
# setuptools-rust
|
||||
# setuptools-scm
|
||||
# twisted
|
||||
|
||||
@ -24,4 +24,5 @@ cat << EOF > /var/lib/awx/rsyslog/rsyslog.conf
|
||||
action(type="omfile" file="/dev/null")
|
||||
EOF
|
||||
|
||||
exec supervisord -c /etc/supervisord.conf
|
||||
exec supervisord -c /etc/supervisor_rsyslog.conf
|
||||
|
||||
20
tools/ansible/roles/dockerfile/files/launch_awx_web.sh
Executable file
20
tools/ansible/roles/dockerfile/files/launch_awx_web.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
if [ `id -u` -ge 500 ]; then
|
||||
echo "awx:x:`id -u`:`id -g`:,,,:/var/lib/awx:/bin/bash" >> /tmp/passwd
|
||||
cat /tmp/passwd > /etc/passwd
|
||||
rm /tmp/passwd
|
||||
fi
|
||||
|
||||
if [ -n "${AWX_KUBE_DEVEL}" ]; then
|
||||
pushd /awx_devel
|
||||
make awx-link
|
||||
popd
|
||||
|
||||
export SDB_NOTIFY_HOST=$MY_POD_IP
|
||||
fi
|
||||
|
||||
set -e
|
||||
|
||||
wait-for-migrations
|
||||
|
||||
exec supervisord -c /etc/supervisord_web.conf
|
||||
@ -9,8 +9,9 @@
|
||||
src: "{{ item }}.j2"
|
||||
dest: "{{ dockerfile_dest }}/{{ template_dest }}/{{ item }}"
|
||||
with_items:
|
||||
- "supervisor.conf"
|
||||
- "supervisor_web.conf"
|
||||
- "supervisor_task.conf"
|
||||
- "supervisor_rsyslog.conf"
|
||||
|
||||
- name: Render Dockerfile
|
||||
template:
|
||||
|
||||
@ -194,7 +194,6 @@ ENV _CONTAINERS_USERNS_CONFIGURED=""
|
||||
RUN mkdir -p /etc/containers/registries.conf.d/ && echo "unqualified-search-registries = []" >> /etc/containers/registries.conf.d/force-fully-qualified-images.conf && chmod 644 /etc/containers/registries.conf.d/force-fully-qualified-images.conf
|
||||
{% endif %}
|
||||
|
||||
# Create default awx rsyslog config
|
||||
ADD tools/ansible/roles/dockerfile/files/rsyslog.conf /var/lib/awx/rsyslog/rsyslog.conf
|
||||
ADD tools/ansible/roles/dockerfile/files/wait-for-migrations /usr/local/bin/wait-for-migrations
|
||||
ADD tools/ansible/roles/dockerfile/files/stop-supervisor /usr/local/bin/stop-supervisor
|
||||
@ -211,10 +210,12 @@ ADD tools/scripts/config-watcher /usr/bin/config-watcher
|
||||
ADD tools/docker-compose/containers.conf /etc/containers/containers.conf
|
||||
ADD tools/docker-compose/podman-containers.conf /var/lib/awx/.config/containers/containers.conf
|
||||
{% else %}
|
||||
ADD tools/ansible/roles/dockerfile/files/launch_awx.sh /usr/bin/launch_awx.sh
|
||||
ADD tools/ansible/roles/dockerfile/files/launch_awx_web.sh /usr/bin/launch_awx_web.sh
|
||||
ADD tools/ansible/roles/dockerfile/files/launch_awx_task.sh /usr/bin/launch_awx_task.sh
|
||||
ADD {{ template_dest }}/supervisor.conf /etc/supervisord.conf
|
||||
ADD tools/ansible/roles/dockerfile/files/launch_awx_rsyslog.sh /usr/bin/launch_awx_rsyslog.sh
|
||||
ADD {{ template_dest }}/supervisor_web.conf /etc/supervisord_web.conf
|
||||
ADD {{ template_dest }}/supervisor_task.conf /etc/supervisord_task.conf
|
||||
ADD {{ template_dest }}/supervisor_rsyslog.conf /etc/supervisor_rsyslog.conf
|
||||
{% endif %}
|
||||
{% if (build_dev|bool) or (kube_dev|bool) %}
|
||||
ADD tools/docker-compose/awx.egg-link /tmp/awx.egg-link
|
||||
@ -296,7 +297,6 @@ USER 1000
|
||||
EXPOSE 8052
|
||||
|
||||
ENTRYPOINT ["dumb-init", "--"]
|
||||
CMD /usr/bin/launch_awx.sh
|
||||
VOLUME /var/lib/nginx
|
||||
VOLUME /var/lib/awx/.local/share/containers
|
||||
{% endif %}
|
||||
|
||||
@ -0,0 +1,56 @@
|
||||
[supervisord]
|
||||
nodaemon = True
|
||||
umask = 022
|
||||
logfile = /dev/stdout
|
||||
logfile_maxbytes = 0
|
||||
pidfile = /var/run/supervisor/supervisor.rsyslog.pid
|
||||
|
||||
[program:awx-rsyslogd]
|
||||
command = rsyslogd -n -i /var/run/awx-rsyslog/rsyslog.pid -f /var/lib/awx/rsyslog/rsyslog.conf
|
||||
autorestart = true
|
||||
startsecs = 30
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:awx-rsyslog-configurer]
|
||||
{% if kube_dev | bool %}
|
||||
command = make run-rsyslog-configurer
|
||||
directory = /awx_devel
|
||||
{% else %}
|
||||
command = awx-manage run_rsyslog_configurer
|
||||
directory = /var/lib/awx
|
||||
{% endif %}
|
||||
autorestart = true
|
||||
startsecs = 30
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[group:tower-processes]
|
||||
programs=awx-rsyslog-configurer,awx-rsyslogd
|
||||
priority=5
|
||||
|
||||
[eventlistener:superwatcher]
|
||||
command=stop-supervisor
|
||||
events=PROCESS_STATE_FATAL
|
||||
autorestart = true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[unix_http_server]
|
||||
file=/var/run/supervisor/supervisor.rsyslog.sock
|
||||
|
||||
[supervisorctl]
|
||||
serverurl=unix:///var/run/supervisor/supervisor.rsyslog.sock ; use a unix:// URL for a unix socket
|
||||
|
||||
[rpcinterface:supervisor]
|
||||
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
|
||||
@ -22,6 +22,23 @@ stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:wsrelay]
|
||||
{% if kube_dev | bool %}
|
||||
command = make run-wsrelay
|
||||
directory = /awx_devel
|
||||
{% else %}
|
||||
command = awx-manage run_wsrelay
|
||||
directory = /var/lib/awx
|
||||
{% endif %}
|
||||
autorestart = true
|
||||
startsecs = 30
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:callback-receiver]
|
||||
{% if kube_dev | bool %}
|
||||
command = make receiver
|
||||
@ -40,7 +57,7 @@ stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[group:tower-processes]
|
||||
programs=dispatcher,callback-receiver
|
||||
programs=dispatcher,callback-receiver,wsrelay
|
||||
priority=5
|
||||
|
||||
[eventlistener:superwatcher]
|
||||
|
||||
@ -22,12 +22,11 @@ stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:uwsgi]
|
||||
|
||||
{% if kube_dev | bool %}
|
||||
command = make uwsgi
|
||||
directory = /awx_devel
|
||||
environment =
|
||||
DEV_RELOAD_COMMAND='supervisorctl -c /etc/supervisord_task.conf restart all; supervisorctl restart tower-processes:daphne tower-processes:wsbroadcast'
|
||||
DEV_RELOAD_COMMAND='supervisorctl -c /etc/supervisord_task.conf restart all; supervisorctl restart tower-processes:daphne'
|
||||
{% else %}
|
||||
command = /var/lib/awx/venv/awx/bin/uwsgi /etc/tower/uwsgi.ini
|
||||
{% endif %}
|
||||
@ -58,12 +57,12 @@ stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:wsbroadcast]
|
||||
[program:heartbeet]
|
||||
{% if kube_dev | bool %}
|
||||
command = make wsbroadcast
|
||||
command = make run-heartbeet
|
||||
directory = /awx_devel
|
||||
{% else %}
|
||||
command = awx-manage run_wsbroadcast
|
||||
command = awx-manage run_heartbeet
|
||||
directory = /var/lib/awx
|
||||
{% endif %}
|
||||
autorestart = true
|
||||
@ -75,8 +74,14 @@ stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:awx-rsyslogd]
|
||||
command = rsyslogd -n -i /var/run/awx-rsyslog/rsyslog.pid -f /var/lib/awx/rsyslog/rsyslog.conf
|
||||
[program:awx-cache-clear]
|
||||
{% if kube_dev | bool %}
|
||||
command = make cache_clear
|
||||
directory = /awx_devel
|
||||
{% else %}
|
||||
command = awx-manage run_cache_clear
|
||||
directory = /var/lib/awx
|
||||
{% endif %}
|
||||
autorestart = true
|
||||
startsecs = 30
|
||||
stopasgroup=true
|
||||
@ -87,7 +92,7 @@ stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[group:tower-processes]
|
||||
programs=nginx,uwsgi,daphne,wsbroadcast,awx-rsyslogd
|
||||
programs=nginx,uwsgi,daphne,awx-cache-clear,heartbeet
|
||||
priority=5
|
||||
|
||||
[eventlistener:superwatcher]
|
||||
@ -293,7 +293,7 @@ Certain features or bugs are only applicable when running a cluster of AWX nodes
|
||||
|
||||
`CONTROL_PLANE_NODE_COUNT` is configurable and defaults to 1, effectively a non-clustered AWX.
|
||||
|
||||
Note that you may see multiple messages of the form `2021-03-04 20:11:47,666 WARNING [-] awx.main.wsbroadcast Connection from awx_2 to awx_5 failed: 'Cannot connect to host awx_5:8013 ssl:False [Name or service not known]'.`. This can happen when you bring up a cluster of many nodes, say 10, then you bring up a cluster of less nodes, say 3. In this example, there will be 7 `Instance` records in the database that represent AWX instances. The AWX development environment mimics the VM deployment (vs. kubernetes) and expects the missing nodes to be brought back to healthy by the admin. The warning message you are seeing is all of the AWX nodes trying to connect the websocket backplane. You can manually delete the `Instance` records from the database i.e. `Instance.objects.get(hostname='awx_9').delete()` to stop the warnings.
|
||||
Note that you may see multiple messages of the form `2021-03-04 20:11:47,666 WARNING [-] awx.main.wsrelay Connection from awx_2 to awx_5 failed: 'Cannot connect to host awx_5:8013 ssl:False [Name or service not known]'.`. This can happen when you bring up a cluster of many nodes, say 10, then you bring up a cluster of less nodes, say 3. In this example, there will be 7 `Instance` records in the database that represent AWX instances. The AWX development environment mimics the VM deployment (vs. kubernetes) and expects the missing nodes to be brought back to healthy by the admin. The warning message you are seeing is all of the AWX nodes trying to connect the websocket backplane. You can manually delete the `Instance` records from the database i.e. `Instance.objects.get(hostname='awx_9').delete()` to stop the warnings.
|
||||
|
||||
### Start with Minikube
|
||||
|
||||
|
||||
@ -32,7 +32,7 @@ backend nodes
|
||||
option httpchk HEAD / HTTP/1.1\r\nHost:localhost
|
||||
{% for i in range(control_plane_node_count|int) %}
|
||||
{% set container_postfix = loop.index %}
|
||||
server tools_awx_{{ container_postfix }} tools_awx_{{ container_postfix }}:8013 check
|
||||
server tools_awx_{{ container_postfix }} tools_awx_{{ container_postfix }}:8013 check inter 10s
|
||||
{% endfor %}
|
||||
|
||||
backend nodes_ssl
|
||||
@ -40,7 +40,7 @@ backend nodes_ssl
|
||||
balance roundrobin
|
||||
{% for i in range(control_plane_node_count|int) %}
|
||||
{% set container_postfix = loop.index %}
|
||||
server tools_awx_{{ container_postfix }} tools_awx_{{ container_postfix }}:8043 check
|
||||
server tools_awx_{{ container_postfix }} tools_awx_{{ container_postfix }}:8043 check inter 10s
|
||||
{% endfor %}
|
||||
|
||||
listen stats
|
||||
|
||||
@ -8,24 +8,47 @@ command = make dispatcher
|
||||
autorestart = true
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
|
||||
[program:awx-receiver]
|
||||
command = make receiver
|
||||
autorestart = true
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
|
||||
[program:awx-wsrelay]
|
||||
command = make run-wsrelay
|
||||
autorestart = true
|
||||
autorestart = true
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
|
||||
[program:awx-heartbeet]
|
||||
command = make run-heartbeet
|
||||
autorestart = true
|
||||
autorestart = true
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
|
||||
[program:awx-rsyslog-configurer]
|
||||
command = make run-rsyslog-configurer
|
||||
autorestart = true
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:awx-wsbroadcast]
|
||||
command = make wsbroadcast
|
||||
autorestart = true
|
||||
[program:awx-cache-clear]
|
||||
command = make run-cache-clear
|
||||
autorestart = true
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
@ -41,30 +64,24 @@ stopwaitsecs = 1
|
||||
stopsignal=KILL
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
|
||||
[program:awx-daphne]
|
||||
command = make daphne
|
||||
autorestart = true
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
|
||||
[program:awx-nginx]
|
||||
command = make nginx
|
||||
autorestart = true
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
|
||||
[program:awx-rsyslogd]
|
||||
command = rsyslogd -n -i /var/run/awx-rsyslog/rsyslog.pid -f /var/lib/awx/rsyslog/rsyslog.conf
|
||||
@ -80,13 +97,11 @@ command = receptor --config /etc/receptor/receptor.conf
|
||||
autorestart = true
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
|
||||
[group:tower-processes]
|
||||
programs=awx-dispatcher,awx-receiver,awx-uwsgi,awx-daphne,awx-nginx,awx-wsbroadcast,awx-rsyslogd
|
||||
programs=awx-dispatcher,awx-receiver,awx-uwsgi,awx-daphne,awx-nginx,awx-wsrelay,awx-rsyslogd,awx-heartbeet, awx-rsyslog-configurer
|
||||
priority=5
|
||||
|
||||
[program:awx-autoreload]
|
||||
@ -95,10 +110,6 @@ autostart = true
|
||||
autorestart = true
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
|
||||
@ -107,9 +118,6 @@ command=stop-supervisor
|
||||
events=PROCESS_STATE_FATAL
|
||||
autorestart = true
|
||||
stderr_logfile=/dev/stdout
|
||||
stderr_logfile_maxbytes=0
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
|
||||
[unix_http_server]
|
||||
file=/var/run/supervisor/supervisor.sock
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user