Merge pull request #4242 from ansible/logging_int

Logging Integration
This commit is contained in:
Alan Rominger 2016-12-02 16:51:00 -05:00 committed by GitHub
commit ef87a58239
31 changed files with 766 additions and 46 deletions

View File

@ -433,7 +433,7 @@ celeryd:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/tower/bin/activate; \
fi; \
$(PYTHON) manage.py celeryd -l DEBUG -B --autoreload --autoscale=20,3 --schedule=$(CELERY_SCHEDULE_FILE) -Q projects,jobs,default,scheduler,$(COMPOSE_HOST)
$(PYTHON) manage.py celeryd -l DEBUG -B --autoreload --autoscale=20,3 --schedule=$(CELERY_SCHEDULE_FILE) -Q projects,jobs,default,scheduler,broadcast_all,$(COMPOSE_HOST)
#$(PYTHON) manage.py celery multi show projects jobs default -l DEBUG -Q:projects projects -Q:jobs jobs -Q:default default -c:projects 1 -c:jobs 3 -c:default 3 -Ofair -B --schedule=$(CELERY_SCHEDULE_FILE)
# Run to start the zeromq callback receiver
@ -878,6 +878,21 @@ docker-clean:
docker-refresh: docker-clean docker-compose
# Docker Development Environment with Elastic Stack Connected
docker-compose-elk: docker-auth
TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
docker-compose-cluster-elk: docker-auth
TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose-cluster.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
clean-elk:
docker stop tools_kibana_1
docker stop tools_logstash_1
docker stop tools_elasticsearch_1
docker rm tools_logstash_1
docker rm tools_elasticsearch_1
docker rm tools_kibana_1
mongo-debug-ui:
docker run -it --rm --name mongo-express --link tools_mongo_1:mongo -e ME_CONFIG_OPTIONS_EDITORTHEME=ambiance -e ME_CONFIG_BASICAUTH_USERNAME=admin -e ME_CONFIG_BASICAUTH_PASSWORD=password -p 8081:8081 knickers/mongo-express

View File

@ -259,14 +259,16 @@ class ApiV1ConfigView(APIView):
try:
data_actual = json.dumps(request.data)
except Exception:
# FIX: Log
logger.info(smart_text(u"Invalid JSON submitted for Tower license."),
extra=dict(actor=request.user.username))
return Response({"error": _("Invalid JSON")}, status=status.HTTP_400_BAD_REQUEST)
try:
from awx.main.task_engine import TaskEnhancer
license_data = json.loads(data_actual)
license_data_validated = TaskEnhancer(**license_data).validate_enhancements()
except Exception:
# FIX: Log
logger.warning(smart_text(u"Invalid Tower license submitted."),
extra=dict(actor=request.user.username))
return Response({"error": _("Invalid License")}, status=status.HTTP_400_BAD_REQUEST)
# If the license is valid, write it to the database.
@ -275,6 +277,8 @@ class ApiV1ConfigView(APIView):
settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host())
return Response(license_data_validated)
logger.warning(smart_text(u"Invalid Tower license submitted."),
extra=dict(actor=request.user.username))
return Response({"error": _("Invalid license")}, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request):
@ -541,12 +545,14 @@ class AuthTokenView(APIView):
reason='')[0]
token.refresh()
if 'username' in request.data:
logger.info(smart_text(u"User {} logged in".format(request.data['username'])))
logger.info(smart_text(u"User {} logged in".format(request.data['username'])),
extra=dict(actor=request.data['username']))
except IndexError:
token = AuthToken.objects.create(user=serializer.validated_data['user'],
request_hash=request_hash)
if 'username' in request.data:
logger.info(smart_text(u"User {} logged in".format(request.data['username'])))
logger.info(smart_text(u"User {} logged in".format(request.data['username'])),
extra=dict(actor=request.data['username']))
# Get user un-expired tokens that are not invalidated that are
# over the configured limit.
# Mark them as invalid and inform the user
@ -564,7 +570,8 @@ class AuthTokenView(APIView):
}
return Response({'token': token.key, 'expires': token.expires}, headers=headers)
if 'username' in request.data:
logger.warning(smart_text(u"Login failed for user {}".format(request.data['username'])))
logger.warning(smart_text(u"Login failed for user {}".format(request.data['username'])),
user=dict(actor=request.data['username']))
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)

View File

@ -2,6 +2,8 @@
from django.apps import AppConfig
# from django.core import checks
from django.utils.translation import ugettext_lazy as _
from django.utils.log import configure_logging
from django.conf import settings
class ConfConfig(AppConfig):
@ -13,4 +15,8 @@ class ConfConfig(AppConfig):
self.module.autodiscover()
from .settings import SettingsWrapper
SettingsWrapper.initialize()
if settings.LOG_AGGREGATOR_ENABLED:
LOGGING = settings.LOGGING
LOGGING['handlers']['http_receiver']['class'] = 'awx.main.utils.handlers.HTTPSHandler'
configure_logging(settings.LOGGING_CONFIG, LOGGING)
# checks.register(SettingsWrapper._check_settings)

View File

@ -3,9 +3,9 @@ import logging
# Django
from django.conf import settings
from django.core.cache import cache
from django.core.signals import setting_changed
from django.db.models.signals import post_save, pre_delete, post_delete
from django.core.cache import cache
from django.dispatch import receiver
# Tower
@ -13,6 +13,7 @@ import awx.main.signals
from awx.conf import settings_registry
from awx.conf.models import Setting
from awx.conf.serializers import SettingSerializer
from awx.main.tasks import clear_cache_keys
logger = logging.getLogger('awx.conf.signals')
@ -25,12 +26,16 @@ def handle_setting_change(key, for_delete=False):
# When a setting changes or is deleted, remove its value from cache along
# with any other settings that depend on it.
setting_keys = [key]
setting_key_dict = {}
setting_key_dict[key] = key
for dependent_key in settings_registry.get_dependent_settings(key):
# Note: Doesn't handle multiple levels of dependencies!
setting_keys.append(dependent_key)
setting_key_dict[dependent_key] = dependent_key
cache_keys = set([Setting.get_cache_key(k) for k in setting_keys])
logger.debug('cache delete_many(%r)', cache_keys)
logger.debug('sending signals to delete cache keys(%r)', cache_keys)
cache.delete_many(cache_keys)
clear_cache_keys.delay(setting_key_dict)
# Send setting_changed signal with new value for each setting.
for setting_key in setting_keys:

View File

@ -223,3 +223,85 @@ register(
category=_('Jobs'),
category_slug='jobs',
)
register(
'LOG_AGGREGATOR_HOST',
field_class=fields.CharField,
allow_null=True,
label=_('Logging Aggregator Receiving Host'),
help_text=_('External host maintain a log collector to send logs to'),
category=_('Logging'),
category_slug='logging',
)
register(
'LOG_AGGREGATOR_PORT',
field_class=fields.IntegerField,
allow_null=True,
label=_('Logging Aggregator Receiving Port'),
help_text=_('Port that the log collector is listening on'),
category=_('Logging'),
category_slug='logging',
)
register(
'LOG_AGGREGATOR_TYPE',
field_class=fields.ChoiceField,
choices=['logstash', 'splunk', 'loggly', 'sumologic', 'other'],
allow_null=True,
label=_('Logging Aggregator Type: Logstash, Loggly, Datadog, etc'),
help_text=_('The type of log aggregator service to format messages for'),
category=_('Logging'),
category_slug='logging',
)
register(
'LOG_AGGREGATOR_USERNAME',
field_class=fields.CharField,
allow_null=True,
label=_('Logging Aggregator Username to Authenticate With'),
help_text=_('Username for Logstash or others (basic auth)'),
category=_('Logging'),
category_slug='logging',
)
register(
'LOG_AGGREGATOR_PASSWORD',
field_class=fields.CharField,
allow_null=True,
label=_('Logging Aggregator Password to Authenticate With'),
help_text=_('Password for Logstash or others (basic auth)'),
category=_('Logging'),
category_slug='logging',
)
register(
'LOG_AGGREGATOR_LOGGERS',
field_class=fields.StringListField,
default=['awx', 'activity_stream', 'job_events', 'system_tracking'],
label=_('Loggers to send data to the log aggregator from'),
help_text=_('List of loggers that will send HTTP logs to the collector, these can '
'include any or all of: \n'
'activity_stream - logs duplicate to records entered in activity stream\n'
'job_events - callback data from Ansible job events\n'
'system_tracking - data generated from scan jobs\n'
'Sending generic Tower logs must be configured through local_settings.py'
'instead of this mechanism.'),
category=_('Logging'),
category_slug='logging',
)
register(
'LOG_AGGREGATOR_INDIVIDUAL_FACTS',
field_class=fields.BooleanField,
default=False,
label=_('Flag denoting to send individual messages for each fact in system tracking'),
help_text=_('If not set, the data from system tracking will be sent inside '
'of a single dictionary, but if set, separate requests will be sent '
'for each package, service, etc. that is found in the scan.'),
category=_('Logging'),
category_slug='logging',
)
register(
'LOG_AGGREGATOR_ENABLED',
field_class=fields.BooleanField,
default=False,
label=_('Flag denoting whether to use the external logger system'),
help_text=_('If not set, only normal settings data will be used to configure loggers.'),
category=_('Logging'),
category_slug='logging',
)

View File

@ -1,5 +1,6 @@
import json
import urlparse
import logging
from channels import Group
from channels.sessions import channel_session
@ -8,6 +9,9 @@ from django.contrib.auth.models import User
from awx.main.models.organization import AuthToken
logger = logging.getLogger('awx.main.consumers')
def discard_groups(message):
if 'groups' in message.channel_session:
for group in message.channel_session['groups']:
@ -52,11 +56,13 @@ def ws_receive(message):
auth_token = validate_token(token)
if auth_token is None:
logger.error("Authentication Failure validating user")
message.reply_channel.send({"text": json.dumps({"error": "invalid auth token"})})
return None
user = user_from_token(auth_token)
if user is None:
logger.error("No valid user corresponding to submitted auth_token")
message.reply_channel.send({"text": json.dumps({"error": "no valid user"})})
return None

View File

@ -18,6 +18,7 @@ from awx.main.models.fact import Fact
from awx.main.models.inventory import Host
logger = logging.getLogger('awx.main.commands.run_fact_cache_receiver')
analytics_logger = logging.getLogger('awx.analytics.system_tracking')
class FactBrokerWorker(ConsumerMixin):
@ -51,8 +52,6 @@ class FactBrokerWorker(ConsumerMixin):
return (module, facts)
def process_fact_message(self, body, message):
print body
print type(body)
hostname = body['host']
inventory_id = body['inventory_id']
facts_data = body['facts']
@ -83,6 +82,8 @@ class FactBrokerWorker(ConsumerMixin):
# Create new Fact entry
fact_obj = Fact.add_fact(host_obj.id, module_name, self.timestamp, facts)
logger.info('Created new fact <fact_id, module> <%s, %s>' % (fact_obj.id, module_name))
analytics_logger.info('Received message with fact data', extra=dict(
module_name=module_name, facts_data=facts))
return fact_obj

View File

@ -16,6 +16,7 @@ from awx.api.authentication import TokenAuthentication
logger = logging.getLogger('awx.main.middleware')
analytics_logger = logging.getLogger('awx.analytics.activity_stream')
class ActivityStreamMiddleware(threading.local):
@ -46,6 +47,10 @@ class ActivityStreamMiddleware(threading.local):
instance.actor = drf_user
try:
instance.save(update_fields=['actor'])
analytics_logger.info('Activity Stream update entry for %s' % str(instance.object1),
extra=dict(changes=instance.changes, relationship=instance.object_relationship_type,
actor=drf_user.username, operation=instance.operation,
object1=instance.object1, object2=instance.object2))
except IntegrityError:
logger.debug("Integrity Error saving Activity Stream instance for id : " + str(instance.id))
# else:

View File

@ -43,6 +43,7 @@ from awx.main.consumers import emit_channel_notification
logger = logging.getLogger('awx.main.models.jobs')
analytics_logger = logging.getLogger('awx.analytics.job_events')
__all__ = ['JobTemplate', 'Job', 'JobHostSummary', 'JobEvent', 'SystemJobOptions', 'SystemJobTemplate', 'SystemJob']
@ -1186,6 +1187,8 @@ class JobEvent(CreatedModifiedModel):
if parent_id:
kwargs['parent_id'] = parent_id
analytics_logger.info('Job event data saved.', extra=dict(event_model_data=kwargs))
job_event = JobEvent.objects.create(**kwargs)
# Cache this job event ID vs. UUID for future parent lookups.

View File

@ -42,6 +42,8 @@ from django.utils.encoding import smart_str
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.core.cache import cache
from django.utils.log import configure_logging
# AWX
from awx.main.constants import CLOUD_PROVIDERS
@ -83,6 +85,22 @@ def celery_startup(conf=None, **kwargs):
logger.error("Failed to rebuild schedule {}: {}".format(sch, e))
@task(queue='broadcast_all')
def clear_cache_keys(cache_keys):
set_of_keys = set([key for key in cache_keys])
logger.debug('cache delete_many(%r)', set_of_keys)
cache.delete_many(set_of_keys)
for setting_key in set_of_keys:
if setting_key.startswith('LOG_AGGREGATOR_'):
LOGGING = settings.LOGGING
if settings.LOG_AGGREGATOR_ENABLED:
LOGGING['handlers']['http_receiver']['class'] = 'awx.main.utils.handlers.HTTPSHandler'
else:
LOGGING['handlers']['http_receiver']['class'] = 'awx.main.utils.handlers.HTTPSNullHandler'
configure_logging(settings.LOGGING_CONFIG, LOGGING)
break
@task(queue='default')
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):

View File

@ -0,0 +1,25 @@
# Copyright (c) 2017 Ansible Tower by Red Hat
# All Rights Reserved.
# AWX
from awx.main.utils.common import * # noqa
# Fields that didn't get included in __all__
# TODO: after initial commit of file move to devel, these can be added
# to common.py __all__ and removed here
from awx.main.utils.common import ( # noqa
RequireDebugTrueOrTest,
encrypt_field,
parse_yaml_or_json,
decrypt_field,
build_url,
timestamp_apiformat,
model_instance_diff,
model_to_dict,
check_proot_installed,
build_proot_temp_dir,
wrap_args_with_proot,
get_system_task_capacity,
decrypt_field_value
)

View File

@ -0,0 +1,117 @@
# Copyright (c) 2017 Ansible Tower by Red Hat
# All Rights Reserved.
from logstash.formatter import LogstashFormatterVersion1
from django.conf import settings
from copy import copy
import json
import time
class LogstashFormatter(LogstashFormatterVersion1):
def __init__(self, **kwargs):
ret = super(LogstashFormatter, self).__init__(**kwargs)
self.host_id = settings.CLUSTER_HOST_ID
return ret
def reformat_data_for_log(self, raw_data, kind=None):
'''
Process dictionaries from various contexts (job events, activity stream
changes, etc.) to give meaningful information
Output a dictionary which will be passed in logstash or syslog format
to the logging receiver
'''
if kind == 'activity_stream':
return raw_data
rename_fields = set((
'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename',
'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module',
'msecs', 'msecs', 'message', 'msg', 'name', 'pathname', 'process',
'processName', 'relativeCreated', 'thread', 'threadName', 'extra',
'auth_token', 'tags', 'host', 'host_id', 'level', 'port', 'uuid'))
if kind == 'system_tracking':
data = copy(raw_data['facts_data'])
elif kind == 'job_events':
data = copy(raw_data['event_model_data'])
else:
data = copy(raw_data)
if isinstance(data, basestring):
data = json.loads(data)
skip_fields = ('res', 'password', 'event_data', 'stdout')
data_for_log = {}
def index_by_name(alist):
"""Takes a list of dictionaries with `name` as a key in each dict
and returns a dictionary indexed by those names"""
adict = {}
for item in alist:
subdict = copy(item)
if 'name' in subdict:
name = subdict.get('name', None)
elif 'path' in subdict:
name = subdict.get('path', None)
if name:
# Logstash v2 can not accept '.' in a name
name = name.replace('.', '_')
adict[name] = subdict
return adict
if kind == 'job_events':
data.update(data.get('event_data', {}))
for fd in data:
if fd in skip_fields:
continue
key = fd
if fd in rename_fields:
key = 'event_%s' % fd
val = data[fd]
if key.endswith('created'):
time_float = time.mktime(data[fd].timetuple())
val = self.format_timestamp(time_float)
data_for_log[key] = val
elif kind == 'system_tracking':
module_name = raw_data['module_name']
if module_name in ['services', 'packages', 'files']:
data_for_log[module_name] = index_by_name(data)
elif module_name == 'ansible':
data_for_log['ansible'] = data
# Remove sub-keys with data type conflicts in elastic search
data_for_log['ansible'].pop('ansible_python_version', None)
data_for_log['ansible']['ansible_python'].pop('version_info', None)
else:
data_for_log['facts'] = data
data_for_log['module_name'] = module_name
return data_for_log
def get_extra_fields(self, record):
fields = super(LogstashFormatter, self).get_extra_fields(record)
if record.name.startswith('awx.analytics'):
log_kind = record.name.split('.')[-1]
fields = self.reformat_data_for_log(fields, kind=log_kind)
return fields
def format(self, record):
message = {
# Fields not included, but exist in related logs
# 'path': record.pathname
# '@version': '1', # from python-logstash
# 'tags': self.tags,
'@timestamp': self.format_timestamp(record.created),
'message': record.getMessage(),
'host': self.host,
'type': self.message_type,
# Extra Fields
'level': record.levelname,
'logger_name': record.name,
'cluster_host_id': self.host_id
}
# Add extra fields
message.update(self.get_extra_fields(record))
# If exception, add debug info
if record.exc_info:
message.update(self.get_debug_fields(record))
return self.serialize(message)

131
awx/main/utils/handlers.py Normal file
View File

@ -0,0 +1,131 @@
# Copyright (c) 2017 Ansible Tower by Red Hat
# All Rights Reserved.
# Python
import logging
import json
import requests
from copy import copy
# loggly
import traceback
from requests_futures.sessions import FuturesSession
# custom
from django.conf import settings as django_settings
from django.utils.log import NullHandler
# AWX external logging handler, generally designed to be used
# with the accompanying LogstashHandler, derives from python-logstash library
# Non-blocking request accomplished by FuturesSession, similar
# to the loggly-python-handler library (not used)
# Translation of parameter names to names in Django settings
PARAM_NAMES = {
'host': 'LOG_AGGREGATOR_HOST',
'port': 'LOG_AGGREGATOR_PORT',
'message_type': 'LOG_AGGREGATOR_TYPE',
'username': 'LOG_AGGREGATOR_USERNAME',
'password': 'LOG_AGGREGATOR_PASSWORD',
'enabled_loggers': 'LOG_AGGREGATOR_LOGGERS',
'indv_facts': 'LOG_AGGREGATOR_INDIVIDUAL_FACTS',
}
def unused_callback(sess, resp):
pass
class HTTPSNullHandler(NullHandler):
"Placeholder null handler to allow loading without database access"
def __init__(self, host, **kwargs):
return super(HTTPSNullHandler, self).__init__()
class HTTPSHandler(logging.Handler):
def __init__(self, fqdn=False, **kwargs):
super(HTTPSHandler, self).__init__()
self.fqdn = fqdn
for fd in PARAM_NAMES:
# settings values take precedence over the input params
settings_name = PARAM_NAMES[fd]
settings_val = getattr(django_settings, settings_name, None)
if settings_val:
setattr(self, fd, settings_val)
elif fd in kwargs:
setattr(self, fd, kwargs[fd])
else:
setattr(self, fd, None)
self.session = FuturesSession()
self.add_auth_information()
def get_full_message(self, record):
if record.exc_info:
return '\n'.join(traceback.format_exception(*record.exc_info))
else:
return record.getMessage()
def add_auth_information(self):
if self.message_type == 'logstash':
if not self.username:
# Logstash authentication not enabled
return
logstash_auth = requests.auth.HTTPBasicAuth(self.username, self.password)
self.session.auth = logstash_auth
elif self.message_type == 'splunk':
auth_header = "Splunk %s" % self.password
headers = {
"Authorization": auth_header,
"Content-Type": "application/json"
}
self.session.headers.update(headers)
def get_http_host(self):
host = self.host
if not host.startswith('http'):
host = 'http://%s' % self.host
if self.port != 80 and self.port is not None:
host = '%s:%s' % (host, str(self.port))
return host
def get_post_kwargs(self, payload_input):
if self.message_type == 'splunk':
# Splunk needs data nested under key "event"
if not isinstance(payload_input, dict):
payload_input = json.loads(payload_input)
payload_input = {'event': payload_input}
if isinstance(payload_input, dict):
payload_str = json.dumps(payload_input)
else:
payload_str = payload_input
return dict(data=payload_str, background_callback=unused_callback)
def emit(self, record):
if (self.host == '' or self.enabled_loggers is None or
record.name.split('.')[-1] not in self.enabled_loggers):
return
try:
payload = self.format(record)
host = self.get_http_host()
# Special action for System Tracking, queue up multiple log messages
if self.indv_facts:
payload_data = json.loads(payload)
if record.name.startswith('awx.analytics.system_tracking'):
module_name = payload_data['module_name']
if module_name in ['services', 'packages', 'files']:
facts_dict = payload_data.pop(module_name)
for key in facts_dict:
fact_payload = copy(payload_data)
fact_payload.update(facts_dict[key])
self.session.post(host, **self.get_post_kwargs(fact_payload))
return
self.session.post(host, **self.get_post_kwargs(payload))
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)

View File

@ -9,6 +9,7 @@ import djcelery
from datetime import timedelta
from kombu import Queue, Exchange
from kombu.common import Broadcast
# global settings
from django.conf import global_settings
@ -374,6 +375,7 @@ CELERY_QUEUES = (
Queue('default', Exchange('default'), routing_key='default'),
Queue('jobs', Exchange('jobs'), routing_key='jobs'),
Queue('scheduler', Exchange('scheduler', type='topic'), routing_key='scheduler.job.#', durable=False),
Broadcast('broadcast_all')
# Projects use a fanout queue, this isn't super well supported
)
CELERY_ROUTES = {'awx.main.tasks.run_job': {'queue': 'jobs',
@ -824,6 +826,8 @@ TOWER_URL_BASE = "https://towerhost"
TOWER_SETTINGS_MANIFEST = {}
LOG_AGGREGATOR_ENABLED = False
# Logging configuration.
LOGGING = {
'version': 1,
@ -843,6 +847,9 @@ LOGGING = {
'simple': {
'format': '%(asctime)s %(levelname)-8s %(name)s %(message)s',
},
'json': {
'()': 'awx.main.utils.formatters.LogstashFormatter'
}
},
'handlers': {
'console': {
@ -864,6 +871,12 @@ LOGGING = {
'class': 'django.utils.log.NullHandler',
'formatter': 'simple',
},
'http_receiver': {
'class': 'awx.main.utils.handlers.HTTPSNullHandler',
'level': 'INFO',
'formatter': 'json',
'host': '',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
@ -940,7 +953,6 @@ LOGGING = {
'django.request': {
'handlers': ['mail_admins', 'console', 'file', 'tower_warnings'],
'level': 'WARNING',
'propagate': False,
},
'rest_framework.request': {
'handlers': ['mail_admins', 'console', 'file', 'tower_warnings'],
@ -955,29 +967,30 @@ LOGGING = {
'level': 'DEBUG',
},
'awx.conf': {
'handlers': ['console', 'file', 'tower_warnings'],
'handlers': ['null'],
'level': 'WARNING',
'propagate': False,
},
'awx.conf.settings': {
'handlers': ['null'],
'level': 'WARNING',
},
'awx.main': {
'handlers': ['null']
},
'awx.main.commands.run_callback_receiver': {
'handlers': ['console', 'file', 'callback_receiver'],
'propagate': False
},
'awx.main.commands.run_socketio_service': {
'handlers': ['console', 'file', 'socketio_service'],
'propagate': False
'handlers': ['callback_receiver'],
},
'awx.main.tasks': {
'handlers': ['console', 'file', 'task_system'],
'propagate': False
'handlers': ['task_system']
},
'awx.main.scheduler': {
'handlers': ['console', 'file', 'task_system'],
'propagate': False
'handlers': ['task_system'],
},
'awx.main.consumers': {
'handlers': ['null']
},
'awx.main.commands.run_fact_cache_receiver': {
'handlers': ['console', 'file', 'fact_receiver'],
'propagate': False
'handlers': ['fact_receiver'],
},
'awx.main.access': {
'handlers': ['null'],
@ -991,6 +1004,23 @@ LOGGING = {
'handlers': ['null'],
'propagate': False,
},
'awx.analytics': {
'handlers': ['null'],
'level': 'INFO',
'propagate': False
},
'awx.analytics.job_events': {
'handlers': ['null'],
'level': 'INFO'
},
'awx.analytics.activity_stream': {
'handlers': ['null'],
'level': 'INFO'
},
'awx.analytics.system_tracking': {
'handlers': ['null'],
'level': 'INFO'
},
'django_auth_ldap': {
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'DEBUG',

View File

@ -52,7 +52,6 @@ TOWER_VENV_PATH = "/var/lib/awx/venv/tower"
LOGGING['handlers']['tower_warnings']['filename'] = '/var/log/tower/tower.log'
LOGGING['handlers']['callback_receiver']['filename'] = '/var/log/tower/callback_receiver.log'
LOGGING['handlers']['socketio_service']['filename'] = '/var/log/tower/socketio_service.log'
LOGGING['handlers']['task_system']['filename'] = '/var/log/tower/task_system.log'
LOGGING['handlers']['fact_receiver']['filename'] = '/var/log/tower/fact_receiver.log'
LOGGING['handlers']['system_tracking_migrations']['filename'] = '/var/log/tower/tower_system_tracking_migrations.log'

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Anthony Lapenna
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -33,12 +33,14 @@ psphere==0.5.2
psutil==5.0.0
pygerduty==0.35.1
pyOpenSSL==16.2.0
python-logstash==0.4.6
python-memcached==1.58
python-radius==1.0
python-saml==2.2.0
python-social-auth==0.2.21
pyzmq==14.5.0
redbaron==0.6.2
requests-futures==0.9.7
shade==1.13.1
slackclient==1.0.2
twilio==5.6.0

View File

@ -4,11 +4,6 @@
#
# pip-compile --output-file requirements.txt requirements.in
#
git+https://github.com/ansible/ansiconv.git@tower_1.0.0#egg=ansiconv
git+https://github.com/ansible/django-qsstats-magic.git@tower_0.7.2#egg=django-qsstats-magic
git+https://github.com/ansible/dm.xmlsec.binding.git@master#egg=dm.xmlsec.binding
git+https://github.com/ansible/django-jsonbfield@master#egg=jsonbfield
git+https://github.com/chrismeyersfsu/pyrax@tower#egg=pyrax
amqp==1.4.9 # via kombu
anyjson==0.3.3 # via kombu
apache-libcloud==1.3.0
@ -17,7 +12,7 @@ asgi-amqp==0.3.1
asgiref==1.0.0 # via asgi-amqp, channels, daphne
autobahn==0.16.1 # via daphne
azure-batch==1.0.0 # via azure
azure-common[autorest]==1.1.4 # via azure-batch, azure-mgmt-batch, azure-mgmt-compute, azure-mgmt-keyvault, azure-mgmt-logic, azure-mgmt-network, azure-mgmt-redis, azure-mgmt-resource, azure-mgmt-scheduler, azure-mgmt-storage, azure-servicebus, azure-servicemanagement-legacy, azure-storage
azure-common==1.1.4 # via azure-batch, azure-mgmt-batch, azure-mgmt-compute, azure-mgmt-keyvault, azure-mgmt-logic, azure-mgmt-network, azure-mgmt-redis, azure-mgmt-resource, azure-mgmt-scheduler, azure-mgmt-storage, azure-servicebus, azure-servicemanagement-legacy, azure-storage
azure-mgmt-batch==1.0.0 # via azure-mgmt
azure-mgmt-compute==0.30.0rc6 # via azure-mgmt
azure-mgmt-keyvault==0.30.0rc6 # via azure-mgmt
@ -48,7 +43,7 @@ chardet==2.3.0 # via msrest
cliff==2.3.0 # via osc-lib, python-designateclient, python-heatclient, python-mistralclient, python-neutronclient, python-openstackclient
cmd2==0.6.9 # via cliff
constantly==15.1.0 # via twisted
cryptography==1.5.3 # via azure-storage, pyopenssl, python-magnumclient, secretstorage
cryptography==1.6 # via azure-storage, pyopenssl, python-magnumclient, secretstorage
daphne==0.15.0 # via channels
debtcollector==1.9.0 # via oslo.config, oslo.utils, python-designateclient, python-keystoneclient, python-neutronclient
decorator==4.0.10 # via python-magnumclient, shade
@ -71,7 +66,7 @@ dogpile.cache==0.6.2 # via python-ironicclient, shade
enum34==1.1.6 # via cryptography, msrest
funcsigs==1.0.2 # via debtcollector, mock, oslo.utils
functools32==3.2.3.post2 # via jsonschema
futures==3.0.5 # via azure-storage, python-swiftclient
futures==3.0.5 # via azure-storage, python-swiftclient, requests-futures
gevent-websocket==0.9.5
gevent==1.1.2 # via gevent-websocket
greenlet==0.4.10 # via gevent
@ -111,7 +106,7 @@ msrestazure==0.4.4 # via azure-common
munch==2.0.4 # via shade
netaddr==0.7.18 # via oslo.config, oslo.utils, python-neutronclient
netifaces==0.10.5 # via oslo.utils, shade
oauthlib==2.0.0 # via python-social-auth, requests-oauthlib
oauthlib==2.0.1 # via python-social-auth, requests-oauthlib
openstacksdk==0.9.10 # via python-openstackclient
ordereddict==1.1
os-client-config==1.24.0 # via openstacksdk, osc-lib, python-magnumclient, python-neutronclient, shade
@ -141,17 +136,18 @@ python-cinderclient==1.9.0 # via python-openstackclient, shade
python-dateutil==2.6.0 # via azure-storage
python-designateclient==2.3.0 # via shade
python-glanceclient==2.5.0 # via python-openstackclient, shade
python-heatclient==1.6.0 # via shade
python-heatclient==1.6.1 # via shade
python-ironicclient==1.8.0 # via shade
python-keystoneclient==3.6.0 # via python-glanceclient, python-mistralclient, python-openstackclient, shade
python-ldap==2.4.27 # via django-auth-ldap
python-keystoneclient==3.7.0 # via python-glanceclient, python-mistralclient, python-openstackclient, shade
python-ldap==2.4.28 # via django-auth-ldap
python-logstash==0.4.6
python-magnumclient==2.3.1 # via shade
python-memcached==1.58
python-mistralclient==2.1.1 # via python-troveclient
python-neutronclient==6.0.0 # via shade
python-novaclient==6.0.0 # via ip-associations-python-novaclient-ext, os-diskconfig-python-novaclient-ext, os-networksv2-python-novaclient-ext, os-virtual-interfacesv2-python-novaclient-ext, python-openstackclient, rackspace-auth-openstack, rackspace-novaclient, rax-default-network-flags-python-novaclient-ext, rax-scheduled-images-python-novaclient-ext, shade
python-openid==2.2.5 # via python-social-auth
python-openstackclient==3.4.0 # via python-ironicclient
python-openstackclient==3.4.1 # via python-ironicclient
python-radius==1.0
python-saml==2.2.0
python-social-auth==0.2.21
@ -165,8 +161,9 @@ rackspace-novaclient==2.1
rax-default-network-flags-python-novaclient-ext==0.4.0 # via rackspace-novaclient
rax-scheduled-images-python-novaclient-ext==0.3.1 # via rackspace-novaclient
redbaron==0.6.2
requests-futures==0.9.7
requests-oauthlib==0.7.0 # via msrest, python-social-auth
requests==2.12.1 # via azure-servicebus, azure-servicemanagement-legacy, azure-storage, keystoneauth1, msrest, python-cinderclient, python-designateclient, python-glanceclient, python-heatclient, python-ironicclient, python-keystoneclient, python-magnumclient, python-mistralclient, python-neutronclient, python-novaclient, python-social-auth, python-swiftclient, python-troveclient, requests-oauthlib, slackclient
requests==2.12.1 # via azure-servicebus, azure-servicemanagement-legacy, azure-storage, keystoneauth1, msrest, python-cinderclient, python-designateclient, python-glanceclient, python-heatclient, python-ironicclient, python-keystoneclient, python-magnumclient, python-mistralclient, python-neutronclient, python-novaclient, python-social-auth, python-swiftclient, python-troveclient, requests-futures, requests-oauthlib, slackclient
requestsexceptions==1.1.3 # via os-client-config, shade
rfc3986==0.4.1 # via oslo.config
rply==0.7.4 # via baron
@ -179,7 +176,7 @@ stevedore==1.18.0 # via cliff, keystoneauth1, openstacksdk, osc-lib, osl
suds==0.4 # via psphere
tempora==1.6.1 # via irc, jaraco.logging
twilio==5.6.0
twisted==16.5.0 # via daphne
twisted==16.6.0 # via daphne
txaio==2.5.2 # via autobahn
typing==3.5.2.2 # via m2crypto
unicodecsv==0.14.1 # via cliff

View File

@ -1,6 +1,7 @@
-e git+https://github.com/chrismeyersfsu/pyrax@tower#egg=pyrax
apache-libcloud==1.3.0
azure==2.0.0rc6
kombu==3.0.35
boto==2.43.0
psutil==5.0.0
shade==1.13.1

View File

@ -4,7 +4,8 @@
#
# pip-compile --output-file requirements_ansible.txt requirements_ansible.in
#
git+https://github.com/chrismeyersfsu/pyrax@tower#egg=pyrax
amqp==1.4.9 # via kombu
anyjson==0.3.3 # via kombu
apache-libcloud==1.3.0
appdirs==1.4.0 # via os-client-config, python-ironicclient
azure-batch==1.0.0 # via azure
@ -32,7 +33,7 @@ cffi==1.9.1 # via cryptography
chardet==2.3.0 # via msrest
cliff==2.3.0 # via osc-lib, python-designateclient, python-heatclient, python-mistralclient, python-neutronclient, python-openstackclient
cmd2==0.6.9 # via cliff
cryptography==1.5.3 # via azure-storage, python-magnumclient, secretstorage
cryptography==1.6 # via azure-storage, python-magnumclient, secretstorage
debtcollector==1.9.0 # via oslo.config, oslo.utils, python-designateclient, python-keystoneclient, python-neutronclient
decorator==4.0.10 # via python-magnumclient, shade
dogpile.cache==0.6.2 # via python-ironicclient, shade
@ -51,6 +52,7 @@ jsonpointer==1.10 # via jsonpatch
jsonschema==2.5.1 # via python-designateclient, python-ironicclient, warlock
keyring==10.0.2 # via msrest
keystoneauth1==2.15.0 # via openstacksdk, os-client-config, osc-lib, python-cinderclient, python-designateclient, python-heatclient, python-ironicclient, python-keystoneclient, python-magnumclient, python-neutronclient, python-novaclient, python-openstackclient, python-troveclient, shade
kombu==3.0.35
mock==2.0.0
monotonic==1.2 # via oslo.utils
msgpack-python==0.4.8 # via oslo.serialization
@ -59,7 +61,7 @@ msrestazure==0.4.4 # via azure-common
munch==2.0.4 # via shade
netaddr==0.7.18 # via oslo.config, oslo.utils, python-neutronclient
netifaces==0.10.5 # via oslo.utils, shade
oauthlib==2.0.0 # via requests-oauthlib
oauthlib==2.0.1 # via requests-oauthlib
openstacksdk==0.9.10 # via python-openstackclient
os-client-config==1.24.0 # via openstacksdk, osc-lib, python-magnumclient, python-neutronclient, shade
os-diskconfig-python-novaclient-ext==0.1.3 # via rackspace-novaclient
@ -81,9 +83,9 @@ python-cinderclient==1.9.0 # via python-openstackclient, shade
python-dateutil==2.6.0 # via azure-storage
python-designateclient==2.3.0 # via shade
python-glanceclient==2.5.0 # via python-openstackclient, shade
python-heatclient==1.6.0 # via shade
python-heatclient==1.6.1 # via shade
python-ironicclient==1.8.0 # via shade
python-keystoneclient==3.6.0 # via python-glanceclient, python-mistralclient, python-openstackclient, shade
python-keystoneclient==3.7.0 # via python-glanceclient, python-mistralclient, python-openstackclient, shade
python-magnumclient==2.3.1 # via shade
python-mistralclient==2.1.1 # via python-troveclient
python-neutronclient==6.0.0 # via shade

56
tools/elastic/README.md Normal file
View File

@ -0,0 +1,56 @@
# Docker ELK / Elastic Stack Development Tools
These are tools to run a containerized version of ELK stack, comprising
of Logstash, Elastic Search, and Kibana. There are also cases where
only a subset of these are needed to run.
A copy of the license is in `docs/licenses/docker-elk.txt`
## Instructions
Due to complex requirements from the elastic search container upstream, there
is a prerequisite to get the containers running. The docker _host_ machine
must have the `max_map_count` variable increased. For a developer using
docker-machine with something like VirtualBox of VMWare, this can be
done by getting bash in the running Docker machine. Example:
```bash
$ docker-machine ssh default
docker@default:~$ sudo sysctl -w vm.max_map_count=262144
vm.max_map_count = 262144
```
After this, the containers can be started up with commands like:
```bash
make docker-compose-elk
```
```bash
make docker-compose-cluster-elk
```
These are ran from the root folder of the ansible-tower repository.
### Connecting Logstash to 3rd Party Receivers
In order to send these logs to an external consumer of logstash format
messages, replace the output variables in the logstash.conf file.
```
output {
elasticsearch {
hosts => "elasticsearch:9200"
}
}
```
## Changelog
Current branch point `a776151221182dcfaec7df727459e208c895d25b`
Nov 18, 2016
- Original branch point `b5a4deee142b152d4f9232ebac5bbabb2d2cef3c`
Sep 25, 2016, before X-Pack support

View File

@ -0,0 +1,49 @@
# Structure for the Elastic Stack docker configuration came from docker-elk:
# https://github.com/deviantony/docker-elk
# docker-elk is under the MIT License,
# a copy of its license is provided in docs/licenses/docker-elk.txt
# contents modified
version: '2'
services:
# Components of ELK stack for logging
elasticsearch:
build: elastic/elasticsearch/
ports:
- "9200:9200"
- "9300:9300"
environment:
ES_JAVA_OPTS: "-Xms1g -Xmx1g"
# networks: # add back in when a connection to tower_tools is possible
# - docker_elk
logstash:
build: elastic/logstash/
command: -f /etc/logstash/conf.d/
volumes:
- ./elastic/logstash/config:/etc/logstash/conf.d
ports:
- "8085:8085"
links:
- elasticsearch
# networks:
# - docker_elk
depends_on:
- elasticsearch
kibana:
build: elastic/kibana/
volumes:
- ./elastic/kibana/config/:/opt/kibana/config/
ports:
- "5601:5601"
links:
- elasticsearch
# networks:
# - docker_elk
depends_on:
- elasticsearch
# networks:
# docker_elk:
# driver: bridge

View File

@ -0,0 +1,12 @@
version: '2'
services:
# Tower Development Cluster
tower_1:
links:
- logstash
tower_2:
links:
- logstash
tower_3:
links:
- logstash

View File

@ -0,0 +1,6 @@
version: '2'
services:
# Primary Tower Development Container
tower:
links:
- logstash

View File

@ -0,0 +1,5 @@
FROM elasticsearch:5
ENV ES_JAVA_OPTS="-Des.path.conf=/etc/elasticsearch"
CMD ["-E", "network.host=0.0.0.0", "-E", "discovery.zen.minimum_master_nodes=1"]

View File

@ -0,0 +1 @@
Ensure the existence of the parent folder.

View File

@ -0,0 +1 @@
FROM kibana:5

View File

@ -0,0 +1,92 @@
# Kibana is served by a back end server. This setting specifies the port to use.
server.port: 5601
# This setting specifies the IP address of the back end server.
server.host: "0.0.0.0"
# Enables you to specify a path to mount Kibana at if you are running behind a proxy. This setting
# cannot end in a slash.
# server.basePath: ""
# The maximum payload size in bytes for incoming server requests.
# server.maxPayloadBytes: 1048576
# The Kibana server's name. This is used for display purposes.
# server.name: "your-hostname"
# The URL of the Elasticsearch instance to use for all your queries.
elasticsearch.url: "http://elasticsearch:9200"
# When this settings value is true Kibana uses the hostname specified in the server.host
# setting. When the value of this setting is false, Kibana uses the hostname of the host
# that connects to this Kibana instance.
# elasticsearch.preserveHost: true
# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
# dashboards. Kibana creates a new index if the index doesnt already exist.
# kibana.index: ".kibana"
# The default application to load.
# kibana.defaultAppId: "discover"
# If your Elasticsearch is protected with basic authentication, these settings provide
# the username and password that the Kibana server uses to perform maintenance on the Kibana
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
# is proxied through the Kibana server.
# elasticsearch.username: "user"
# elasticsearch.password: "pass"
# Paths to the PEM-format SSL certificate and SSL key files, respectively. These
# files enable SSL for outgoing requests from the Kibana server to the browser.
# server.ssl.cert: /path/to/your/server.crt
# server.ssl.key: /path/to/your/server.key
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
# These files validate that your Elasticsearch backend uses the same key files.
# elasticsearch.ssl.cert: /path/to/your/client.crt
# elasticsearch.ssl.key: /path/to/your/client.key
# Optional setting that enables you to specify a path to the PEM file for the certificate
# authority for your Elasticsearch instance.
# elasticsearch.ssl.ca: /path/to/your/CA.pem
# To disregard the validity of SSL certificates, change this settings value to false.
# elasticsearch.ssl.verify: true
# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
# the elasticsearch.requestTimeout setting.
# elasticsearch.pingTimeout: 1500
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
# must be a positive integer.
# elasticsearch.requestTimeout: 30000
# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
# headers, set this value to [] (an empty list).
# elasticsearch.requestHeadersWhitelist: [ authorization ]
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
# elasticsearch.shardTimeout: 0
# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.
# elasticsearch.startupTimeout: 5000
# Specifies the path where Kibana creates the process ID file.
# pid.file: /var/run/kibana.pid
# Enables you specify a file where Kibana stores log output.
# logging.dest: stdout
# Set the value of this setting to true to suppress all logging output.
# logging.silent: false
# Set the value of this setting to true to suppress all logging output other than error messages.
# logging.quiet: false
# Set the value of this setting to true to log all events, including system usage information
# and all requests.
# logging.verbose: false
# Set the interval in milliseconds to sample system and process performance
# metrics. Minimum is 100ms. Defaults to 10000.
# ops.interval: 10000

View File

@ -0,0 +1,4 @@
FROM logstash:5
# Add your logstash plugins setup here
# Example: RUN logstash-plugin install logstash-filter-json

View File

@ -0,0 +1,21 @@
input {
http {
port => 8085
user => awx_logger
password => "workflows"
}
}
## Add your filters / logstash plugins configuration here
filter {
json {
source => "message"
}
}
output {
elasticsearch {
hosts => "elasticsearch:9200"
}
}