mirror of
https://github.com/ansible/awx.git
synced 2026-01-11 10:00:01 -03:30
Logging Integration, ELK docker-compose as update file
This commit is contained in:
parent
dc032489c2
commit
f3427d1359
15
Makefile
15
Makefile
@ -878,6 +878,21 @@ docker-clean:
|
||||
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
# Docker Development Environment with Elastic Stack Connected
|
||||
docker-compose-elk: docker-auth
|
||||
TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
docker-compose-cluster-elk: docker-auth
|
||||
TAG=$(COMPOSE_TAG) docker-compose -f tools/docker-compose-cluster.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
clean-elk:
|
||||
docker stop tools_kibana_1
|
||||
docker stop tools_logstash_1
|
||||
docker stop tools_elasticsearch_1
|
||||
docker rm tools_logstash_1
|
||||
docker rm tools_elasticsearch_1
|
||||
docker rm tools_kibana_1
|
||||
|
||||
mongo-debug-ui:
|
||||
docker run -it --rm --name mongo-express --link tools_mongo_1:mongo -e ME_CONFIG_OPTIONS_EDITORTHEME=ambiance -e ME_CONFIG_BASICAUTH_USERNAME=admin -e ME_CONFIG_BASICAUTH_PASSWORD=password -p 8081:8081 knickers/mongo-express
|
||||
|
||||
|
||||
@ -259,14 +259,16 @@ class ApiV1ConfigView(APIView):
|
||||
try:
|
||||
data_actual = json.dumps(request.data)
|
||||
except Exception:
|
||||
# FIX: Log
|
||||
logger.info(smart_text(u"Invalid JSON submitted for Tower license."),
|
||||
extra=dict(actor=request.user.username))
|
||||
return Response({"error": _("Invalid JSON")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
try:
|
||||
from awx.main.task_engine import TaskEnhancer
|
||||
license_data = json.loads(data_actual)
|
||||
license_data_validated = TaskEnhancer(**license_data).validate_enhancements()
|
||||
except Exception:
|
||||
# FIX: Log
|
||||
logger.warning(smart_text(u"Invalid Tower license submitted."),
|
||||
extra=dict(actor=request.user.username))
|
||||
return Response({"error": _("Invalid License")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# If the license is valid, write it to the database.
|
||||
@ -275,6 +277,8 @@ class ApiV1ConfigView(APIView):
|
||||
settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host())
|
||||
return Response(license_data_validated)
|
||||
|
||||
logger.warning(smart_text(u"Invalid Tower license submitted."),
|
||||
extra=dict(actor=request.user.username))
|
||||
return Response({"error": _("Invalid license")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
def delete(self, request):
|
||||
@ -541,12 +545,14 @@ class AuthTokenView(APIView):
|
||||
reason='')[0]
|
||||
token.refresh()
|
||||
if 'username' in request.data:
|
||||
logger.info(smart_text(u"User {} logged in".format(request.data['username'])))
|
||||
logger.info(smart_text(u"User {} logged in".format(request.data['username'])),
|
||||
extra=dict(actor=request.data['username']))
|
||||
except IndexError:
|
||||
token = AuthToken.objects.create(user=serializer.validated_data['user'],
|
||||
request_hash=request_hash)
|
||||
if 'username' in request.data:
|
||||
logger.info(smart_text(u"User {} logged in".format(request.data['username'])))
|
||||
logger.info(smart_text(u"User {} logged in".format(request.data['username'])),
|
||||
extra=dict(actor=request.data['username']))
|
||||
# Get user un-expired tokens that are not invalidated that are
|
||||
# over the configured limit.
|
||||
# Mark them as invalid and inform the user
|
||||
@ -564,7 +570,8 @@ class AuthTokenView(APIView):
|
||||
}
|
||||
return Response({'token': token.key, 'expires': token.expires}, headers=headers)
|
||||
if 'username' in request.data:
|
||||
logger.warning(smart_text(u"Login failed for user {}".format(request.data['username'])))
|
||||
logger.warning(smart_text(u"Login failed for user {}".format(request.data['username'])),
|
||||
user=dict(actor=request.data['username']))
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
|
||||
@ -223,3 +223,36 @@ register(
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'LOG_AGGREGATOR_HOST',
|
||||
field_class=fields.CharField,
|
||||
label=_('Logging Aggregator Receiving Host'),
|
||||
help_text=_('External host maintain a log collector to send logs to'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_PORT',
|
||||
field_class=fields.CharField,
|
||||
label=_('Logging Aggregator Receiving Port'),
|
||||
help_text=_('Port that the log collector is listening on'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_TYPE',
|
||||
field_class=fields.CharField,
|
||||
label=_('Logging Aggregator Type: Logstash, Loggly, Datadog, etc'),
|
||||
help_text=_('The type of log aggregator service to format messages for'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_USERNAME',
|
||||
field_class=fields.CharField,
|
||||
label=_('Logging Aggregator Receiver Username'),
|
||||
help_text=_('Username for Logstash or others'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import json
|
||||
import urlparse
|
||||
import logging
|
||||
|
||||
from channels import Group
|
||||
from channels.sessions import channel_session
|
||||
@ -8,6 +9,8 @@ from django.contrib.auth.models import User
|
||||
from awx.main.models.organization import AuthToken
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.consumers')
|
||||
|
||||
def discard_groups(message):
|
||||
if 'groups' in message.channel_session:
|
||||
for group in message.channel_session['groups']:
|
||||
@ -52,11 +55,13 @@ def ws_receive(message):
|
||||
|
||||
auth_token = validate_token(token)
|
||||
if auth_token is None:
|
||||
logger.error("Authentication Failure validating user")
|
||||
message.reply_channel.send({"text": json.dumps({"error": "invalid auth token"})})
|
||||
return None
|
||||
|
||||
user = user_from_token(auth_token)
|
||||
if user is None:
|
||||
logger.error("No valid user corresponding to submitted auth_token")
|
||||
message.reply_channel.send({"text": json.dumps({"error": "no valid user"})})
|
||||
return None
|
||||
|
||||
|
||||
2
awx/main/log_utils/__init__.py
Normal file
2
awx/main/log_utils/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
# Copyright (c) 2017 Ansible by Red Hat
|
||||
# All Rights Reserved.
|
||||
59
awx/main/log_utils/formatters.py
Normal file
59
awx/main/log_utils/formatters.py
Normal file
@ -0,0 +1,59 @@
|
||||
# Copyright (c) 2017 Ansible Tower by Red Hat
|
||||
# All Rights Reserved.
|
||||
|
||||
from logstash.formatter import LogstashFormatterVersion1
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
# # Loggly example
|
||||
# 'json': {
|
||||
# 'format': '{
|
||||
# "loggerName":"%(name)s",
|
||||
# "asciTime":"%(asctime)s",
|
||||
# "fileName":"%(filename)s",
|
||||
# "logRecordCreationTime":"%(created)f",
|
||||
# "functionName":"%(funcName)s",
|
||||
# "levelNo":"%(levelno)s",
|
||||
# "lineNo":"%(lineno)d",
|
||||
# "time":"%(msecs)d",
|
||||
# "levelName":"%(levelname)s",
|
||||
# "message":"%(message)s"}',
|
||||
# },
|
||||
|
||||
class LogstashFormatter(LogstashFormatterVersion1):
|
||||
def __init__(self, **kwargs):
|
||||
ret = super(LogstashFormatter, self).__init__(**kwargs)
|
||||
self.host_id = settings.CLUSTER_HOST_ID
|
||||
return ret
|
||||
|
||||
def get_extra_fields(self, record):
|
||||
fields = super(LogstashFormatter, self).get_extra_fields(record)
|
||||
fields['cluster_host_id'] = self.host_id
|
||||
return fields
|
||||
|
||||
def format(self, record):
|
||||
# Create message dict
|
||||
# message = record.getMessage()
|
||||
# print ' message ' + str(message)
|
||||
message = {
|
||||
'@timestamp': self.format_timestamp(record.created),
|
||||
'@version': '1',
|
||||
'message': record.getMessage(),
|
||||
'host': self.host,
|
||||
'path': record.pathname,
|
||||
'tags': self.tags,
|
||||
'type': self.message_type,
|
||||
|
||||
# Extra Fields
|
||||
'level': record.levelname,
|
||||
'logger_name': record.name,
|
||||
}
|
||||
|
||||
# Add extra fields
|
||||
message.update(self.get_extra_fields(record))
|
||||
|
||||
# If exception, add debug info
|
||||
if record.exc_info:
|
||||
message.update(self.get_debug_fields(record))
|
||||
|
||||
return self.serialize(message)
|
||||
210
awx/main/log_utils/handlers.py
Normal file
210
awx/main/log_utils/handlers.py
Normal file
@ -0,0 +1,210 @@
|
||||
# Copyright (c) 2017 Ansible Tower by Red Hat
|
||||
# All Rights Reserved.
|
||||
|
||||
# common
|
||||
import socket
|
||||
import logging
|
||||
|
||||
# Splunk
|
||||
import urllib
|
||||
import json
|
||||
import gzip
|
||||
import cStringIO
|
||||
|
||||
import requests
|
||||
|
||||
from .utils import parse_config_file, get_config_from_env
|
||||
|
||||
# loggly
|
||||
import traceback
|
||||
|
||||
from requests_futures.sessions import FuturesSession
|
||||
|
||||
# Logstash
|
||||
from logstash import formatter
|
||||
|
||||
|
||||
ENABLED_LOGS = ['ansible']
|
||||
|
||||
# Logstash
|
||||
# https://github.com/vklochan/python-logstash
|
||||
class TCPLogstashHandler(logging.handlers.SocketHandler, object):
|
||||
"""Python logging handler for Logstash. Sends events over TCP.
|
||||
:param host: The host of the logstash server.
|
||||
:param port: The port of the logstash server (default 5959).
|
||||
:param message_type: The type of the message (default logstash).
|
||||
:param fqdn; Indicates whether to show fully qualified domain name or not (default False).
|
||||
:param version: version of logstash event schema (default is 0).
|
||||
:param tags: list of tags for a logger (default is None).
|
||||
"""
|
||||
|
||||
def __init__(self, host, port=5959, message_type='logstash', tags=None, fqdn=False, version=0):
|
||||
super(TCPLogstashHandler, self).__init__(host, port)
|
||||
if version == 1:
|
||||
self.formatter = formatter.LogstashFormatterVersion1(message_type, tags, fqdn)
|
||||
else:
|
||||
self.formatter = formatter.LogstashFormatterVersion0(message_type, tags, fqdn)
|
||||
|
||||
def makePickle(self, record):
|
||||
return self.formatter.format(record) + b'\n'
|
||||
|
||||
|
||||
# loggly
|
||||
# https://github.com/varshneyjayant/loggly-python-handler
|
||||
|
||||
session = FuturesSession()
|
||||
|
||||
|
||||
def bg_cb(sess, resp):
|
||||
""" Don't do anything with the response """
|
||||
pass
|
||||
|
||||
# add port for a generic handler
|
||||
class HTTPSHandler(logging.Handler):
|
||||
def __init__(self, host, port=80, message_type='logstash', fqdn=False):
|
||||
super(HTTPSHandler, self).__init__()
|
||||
self.host_saved = host
|
||||
self.port = port
|
||||
# self.port = port
|
||||
self.message_type = message_type
|
||||
self.fqdn = fqdn
|
||||
|
||||
def get_full_message(self, record):
|
||||
if record.exc_info:
|
||||
return '\n'.join(traceback.format_exception(*record.exc_info))
|
||||
else:
|
||||
return record.getMessage()
|
||||
|
||||
def emit(self, record):
|
||||
try:
|
||||
payload = self.format(record)
|
||||
# TODO: move this enablement logic to rely on individual loggers once
|
||||
# the enablement config variable is hooked up
|
||||
payload_data = json.loads(payload)
|
||||
if payload_data['logger_name'].startswith('awx.analytics.system_tracking'):
|
||||
st_type = None
|
||||
for fd in ['services', 'packages', 'files', 'ansible']:
|
||||
if fd in payload_data:
|
||||
st_type = fd
|
||||
break
|
||||
if st_type not in ENABLED_LOGS:
|
||||
return
|
||||
host = self.host_saved
|
||||
if not host.startswith('http'):
|
||||
host = 'http://%s' % self.host_saved
|
||||
if self.port != 80:
|
||||
host = '%s:%s' % (host, str(self.port))
|
||||
session.post(host, data=payload, background_callback=bg_cb)
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
raise
|
||||
except:
|
||||
self.handleError(record)
|
||||
|
||||
|
||||
# splunk
|
||||
# https://github.com/andresriancho/splunk-logger
|
||||
|
||||
class SplunkLogger(logging.Handler):
|
||||
"""
|
||||
A class to send messages to splunk storm using their API
|
||||
"""
|
||||
# Required format for splunk storm
|
||||
INPUT_URL_FMT = 'https://%s/1/inputs/http'
|
||||
|
||||
def __init__(self, access_token=None, project_id=None, api_domain=None):
|
||||
logging.Handler.__init__(self)
|
||||
|
||||
self._set_auth(access_token, project_id, api_domain)
|
||||
self.url = self.INPUT_URL_FMT % self.api_domain
|
||||
|
||||
self._set_url_opener()
|
||||
|
||||
# Handle errors in authentication
|
||||
self._auth_failed = False
|
||||
|
||||
def _set_auth(self, access_token, project_id, api_domain):
|
||||
# The access token and project id passed as parameter override the ones
|
||||
# configured in the .splunk_logger file.
|
||||
if access_token is not None\
|
||||
and project_id is not None\
|
||||
and api_domain is not None:
|
||||
self.project_id = project_id
|
||||
self.access_token = access_token
|
||||
self.api_domain = api_domain
|
||||
|
||||
else:
|
||||
# Try to get the credentials form the configuration file
|
||||
self.project_id, self.access_token, self.api_domain = parse_config_file()
|
||||
|
||||
if self.project_id is None\
|
||||
or self.access_token is None\
|
||||
or self.api_domain is None:
|
||||
# Try to get the credentials form the environment variables
|
||||
self.project_id, self.access_token, self.api_domain = get_config_from_env()
|
||||
|
||||
if self.access_token is None or self.project_id is None:
|
||||
raise ValueError('Access token, project id and API endpoint domain'
|
||||
' need to be set.')
|
||||
|
||||
def _set_url_opener(self):
|
||||
# We disable the logging of the requests module to avoid some infinite
|
||||
# recursion errors that might appear.
|
||||
requests_log = logging.getLogger("requests")
|
||||
requests_log.setLevel(logging.CRITICAL)
|
||||
|
||||
self.session = requests.Session()
|
||||
self.session.auth = ('x', self.access_token)
|
||||
self.session.headers.update({'Content-Encoding': 'gzip'})
|
||||
|
||||
def usesTime(self):
|
||||
return False
|
||||
|
||||
def _compress(self, input_str):
|
||||
"""
|
||||
Compress the log message in order to send less bytes to the wire.
|
||||
"""
|
||||
compressed_bits = cStringIO.StringIO()
|
||||
|
||||
f = gzip.GzipFile(fileobj=compressed_bits, mode='wb')
|
||||
f.write(input_str)
|
||||
f.close()
|
||||
|
||||
return compressed_bits.getvalue()
|
||||
|
||||
def emit(self, record):
|
||||
|
||||
if self._auth_failed:
|
||||
# Don't send anything else once a 401 was returned
|
||||
return
|
||||
|
||||
try:
|
||||
response = self._send_to_splunk(record)
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
raise
|
||||
except:
|
||||
# All errors end here.
|
||||
self.handleError(record)
|
||||
else:
|
||||
if response.status_code == 401:
|
||||
self._auth_failed = True
|
||||
|
||||
def _send_to_splunk(self, record):
|
||||
# http://docs.splunk.com/Documentation/Storm/latest/User/Sourcesandsourcetypes
|
||||
sourcetype = 'json_no_timestamp'
|
||||
|
||||
host = socket.gethostname()
|
||||
|
||||
event_dict = {'data': self.format(record),
|
||||
'level': record.levelname,
|
||||
'module': record.module,
|
||||
'line': record.lineno}
|
||||
event = json.dumps(event_dict)
|
||||
event = self._compress(event)
|
||||
|
||||
params = {'index': self.project_id,
|
||||
'sourcetype': sourcetype,
|
||||
'host': host}
|
||||
|
||||
url = '%s?%s' % (self.url, urllib.urlencode(params))
|
||||
return self.session.post(url, data=event)
|
||||
|
||||
57
awx/main/log_utils/utils.py
Normal file
57
awx/main/log_utils/utils.py
Normal file
@ -0,0 +1,57 @@
|
||||
import os
|
||||
import yaml
|
||||
|
||||
|
||||
def parse_config_file():
|
||||
"""
|
||||
Find the .splunk_logger config file in the current directory, or in the
|
||||
user's home and parse it. The one in the current directory has precedence.
|
||||
|
||||
:return: A tuple with:
|
||||
- project_id
|
||||
- access_token
|
||||
"""
|
||||
for filename in ('.splunk_logger', os.path.expanduser('~/.splunk_logger')):
|
||||
|
||||
project_id, access_token, api_domain = _parse_config_file_impl(filename)
|
||||
|
||||
if project_id is not None\
|
||||
and access_token is not None\
|
||||
and api_domain is not None:
|
||||
return project_id, access_token, api_domain
|
||||
|
||||
else:
|
||||
return None, None, None
|
||||
|
||||
|
||||
def _parse_config_file_impl(filename):
|
||||
"""
|
||||
Format for the file is:
|
||||
|
||||
credentials:
|
||||
project_id: ...
|
||||
access_token: ...
|
||||
api_domain: ...
|
||||
|
||||
:param filename: The filename to parse
|
||||
:return: A tuple with:
|
||||
- project_id
|
||||
- access_token
|
||||
- api_domain
|
||||
"""
|
||||
try:
|
||||
doc = yaml.load(file(filename).read())
|
||||
|
||||
project_id = doc["credentials"]["project_id"]
|
||||
access_token = doc["credentials"]["access_token"]
|
||||
api_domain = doc["credentials"]["api_domain"]
|
||||
|
||||
return project_id, access_token, api_domain
|
||||
except:
|
||||
return None, None, None
|
||||
|
||||
|
||||
def get_config_from_env():
|
||||
return (os.environ.get('SPLUNK_PROJECT_ID', None),
|
||||
os.environ.get('SPLUNK_ACCESS_TOKEN', None),
|
||||
os.environ.get('SPLUNK_API_DOMAIN', None))
|
||||
@ -16,8 +16,10 @@ from django.utils import timezone
|
||||
# AWX
|
||||
from awx.main.models.fact import Fact
|
||||
from awx.main.models.inventory import Host
|
||||
from awx.main.utils import format_for_log
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_fact_cache_receiver')
|
||||
data_logger = logging.getLogger('awx.analytics.system_tracking')
|
||||
|
||||
|
||||
class FactBrokerWorker(ConsumerMixin):
|
||||
@ -83,6 +85,7 @@ class FactBrokerWorker(ConsumerMixin):
|
||||
# Create new Fact entry
|
||||
fact_obj = Fact.add_fact(host_obj.id, module_name, self.timestamp, facts)
|
||||
logger.info('Created new fact <fact_id, module> <%s, %s>' % (fact_obj.id, module_name))
|
||||
data_logger.info('Received message with fact data', extra=format_for_log({module_name: facts}, kind="fact"))
|
||||
return fact_obj
|
||||
|
||||
|
||||
|
||||
@ -23,7 +23,7 @@ from socketio import socketio_manage
|
||||
from socketio.server import SocketIOServer
|
||||
from socketio.namespace import BaseNamespace
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_socketio_service')
|
||||
logger = logging.getLogger('awx.main.consumers')
|
||||
|
||||
|
||||
class SocketSession(object):
|
||||
|
||||
@ -16,6 +16,7 @@ from awx.api.authentication import TokenAuthentication
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.middleware')
|
||||
analytics_logger = logging.getLogger('awx.analytics.activity_stream')
|
||||
|
||||
|
||||
class ActivityStreamMiddleware(threading.local):
|
||||
@ -46,6 +47,10 @@ class ActivityStreamMiddleware(threading.local):
|
||||
instance.actor = drf_user
|
||||
try:
|
||||
instance.save(update_fields=['actor'])
|
||||
analytics_logger.info('Activity Stream update entry for %s' % str(instance.object1),
|
||||
extra=dict(changes=instance.changes, relationship=instance.object_relationship_type,
|
||||
actor=drf_user.username, operation=instance.operation,
|
||||
object1=instance.object1, object2=instance.object2))
|
||||
except IntegrityError:
|
||||
logger.debug("Integrity Error saving Activity Stream instance for id : " + str(instance.id))
|
||||
# else:
|
||||
|
||||
@ -32,6 +32,7 @@ from awx.main.models.notifications import (
|
||||
from awx.main.utils import (
|
||||
ignore_inventory_computed_fields,
|
||||
parse_yaml_or_json,
|
||||
format_for_log
|
||||
)
|
||||
from awx.main.redact import PlainTextCleaner
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
@ -43,6 +44,7 @@ from awx.main.consumers import emit_channel_notification
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.models.jobs')
|
||||
event_logger = logging.getLogger('awx.analytics.job_events')
|
||||
|
||||
__all__ = ['JobTemplate', 'Job', 'JobHostSummary', 'JobEvent', 'SystemJobOptions', 'SystemJobTemplate', 'SystemJob']
|
||||
|
||||
@ -1186,6 +1188,9 @@ class JobEvent(CreatedModifiedModel):
|
||||
if parent_id:
|
||||
kwargs['parent_id'] = parent_id
|
||||
|
||||
# event_logger.info('Body: {}'.format(str(data_for_log)), extra=data_for_log)
|
||||
event_logger.info('Job event data saved.', extra=format_for_log(kwargs, kind='event'))
|
||||
|
||||
job_event = JobEvent.objects.create(**kwargs)
|
||||
|
||||
# Cache this job event ID vs. UUID for future parent lookups.
|
||||
@ -1196,7 +1201,6 @@ class JobEvent(CreatedModifiedModel):
|
||||
# Save artifact data to parent job (if provided).
|
||||
if artifact_data:
|
||||
artifact_dict = json.loads(artifact_data)
|
||||
event_data = kwargs.get('event_data', None)
|
||||
if event_data and isinstance(event_data, dict):
|
||||
res = event_data.get('res', None)
|
||||
if res and isinstance(res, dict):
|
||||
|
||||
@ -20,7 +20,7 @@ from crum.signals import current_user_getter
|
||||
from awx.main.models import * # noqa
|
||||
from awx.api.serializers import * # noqa
|
||||
from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore
|
||||
from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_group_removal, _inventory_updates
|
||||
from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_group_removal, _inventory_updates, format_for_log
|
||||
from awx.main.tasks import update_inventory_computed_fields
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
@ -28,6 +28,7 @@ from awx.main.consumers import emit_channel_notification
|
||||
__all__ = []
|
||||
|
||||
logger = logging.getLogger('awx.main.signals')
|
||||
analytics_logger = logging.getLogger('awx.analytics.activity_stream')
|
||||
|
||||
# Update has_active_failures for inventory/groups when a Host/Group is deleted,
|
||||
# when a Host-Group or Group-Group relationship is updated, or when a Job is deleted
|
||||
@ -369,11 +370,15 @@ def activity_stream_create(sender, instance, created, **kwargs):
|
||||
if type(instance) == Job:
|
||||
if 'extra_vars' in changes:
|
||||
changes['extra_vars'] = instance.display_extra_vars()
|
||||
changes_dict = json.dumps(changes)
|
||||
activity_entry = ActivityStream(
|
||||
operation='create',
|
||||
object1=object1,
|
||||
changes=json.dumps(changes))
|
||||
changes=changes_dict)
|
||||
activity_entry.save()
|
||||
# analytics_logger.info('Activity Stream create entry for %s' % str(object1),
|
||||
# extra=format_for_log(changes, kind='activity_stream',
|
||||
# actor=activity_entry.actor, operation='update', object1=object1))
|
||||
#TODO: Weird situation where cascade SETNULL doesn't work
|
||||
# it might actually be a good idea to remove all of these FK references since
|
||||
# we don't really use them anyway.
|
||||
@ -401,6 +406,9 @@ def activity_stream_update(sender, instance, **kwargs):
|
||||
object1=object1,
|
||||
changes=json.dumps(changes))
|
||||
activity_entry.save()
|
||||
# analytics_logger.info('Activity Stream update entry for %s' % str(object1),
|
||||
# extra=format_for_log(changes, kind='activity_stream',
|
||||
# actor=activity_entry.actor, operation='update', object1=object1))
|
||||
if instance._meta.model_name != 'setting': # Is not conf.Setting instance
|
||||
getattr(activity_entry, object1).add(instance)
|
||||
|
||||
|
||||
31
awx/main/tests/unit/test_log_formatter.py
Normal file
31
awx/main/tests/unit/test_log_formatter.py
Normal file
@ -0,0 +1,31 @@
|
||||
from awx.main.utils import format_for_log
|
||||
import datetime
|
||||
|
||||
# Example data
|
||||
event_data = {
|
||||
'stdout': u'\x1b[0;36mskipping: [host1]\x1b[0m\r\n', u'uuid': u'ffe4858c-ac38-4cab-9192-b07bdbe80502',
|
||||
u'created': datetime.datetime(2016, 11, 10, 14, 59, 16, 376051), 'counter': 17, u'job_id': 209,
|
||||
u'event': u'runner_on_skipped', 'parent_id': 1937, 'end_line': 24, 'start_line': 23,
|
||||
u'event_data': {u'play_pattern': u'all', u'play': u'all', u'task': u'Scan files (Windows)',
|
||||
u'task_args': u'paths={{ scan_file_paths }}, recursive={{ scan_use_recursive }}, get_checksum={{ scan_use_checksum }}',
|
||||
u'remote_addr': u'host1', u'pid': 1427, u'play_uuid': u'da784361-3811-4ea7-9cc8-46ec758fde66',
|
||||
u'task_uuid': u'4f9525fd-bc25-4ace-9eb2-adad9fa21a94', u'event_loop': None,
|
||||
u'playbook_uuid': u'653fd95e-f718-428e-9df0-3f279df9f07e', u'playbook': u'scan_facts.yml',
|
||||
u'task_action': u'win_scan_files', u'host': u'host1', u'task_path': None}}
|
||||
|
||||
event_stats = {
|
||||
'stdout': u'asdf', u'created': datetime.datetime(2016, 11, 10, 14, 59, 16, 385416),
|
||||
'counter': 18, u'job_id': 209, u'event': u'playbook_on_stats', 'parent_id': 1923,
|
||||
'end_line': 28, 'start_line': 24, u'event_data': {
|
||||
u'skipped': {u'host1': 4}, u'ok': {u'host2': 3}, u'changed': {},
|
||||
u'pid': 1427, u'dark': {}, u'playbook_uuid': u'653fd95e-f718-428e-9df0-3f279df9f07e',
|
||||
u'playbook': u'scan_facts.yml', u'failures': {}, u'processed': {u'duck': 1}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
def test_format_event():
|
||||
log_data = format_for_log(event_data, kind='event')
|
||||
assert log_data['event_host'] == 'host1'
|
||||
|
||||
@ -17,6 +17,7 @@ import urlparse
|
||||
import threading
|
||||
import contextlib
|
||||
import tempfile
|
||||
from copy import copy
|
||||
|
||||
# Decorator
|
||||
from decorator import decorator
|
||||
@ -824,3 +825,64 @@ class OutputEventFilter(object):
|
||||
self._current_event_data = next_event_data
|
||||
else:
|
||||
self._current_event_data = None
|
||||
|
||||
def format_for_log(raw_data, kind=None, **kwargs):
|
||||
'''
|
||||
Process dictionaries from various contexts (job events, activity stream
|
||||
changes, etc.) to give meaningful information
|
||||
Output a dictionary which will be passed in logstash or syslog format
|
||||
to the logging receiver
|
||||
'''
|
||||
rename_fields = set((
|
||||
'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename',
|
||||
'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module',
|
||||
'msecs', 'msecs', 'message', 'msg', 'name', 'pathname', 'process',
|
||||
'processName', 'relativeCreated', 'thread', 'threadName', 'extra',
|
||||
'auth_token', 'tags', 'host', 'host_id', 'level', 'port', 'uuid'))
|
||||
data = copy(raw_data)
|
||||
if isinstance(data, basestring):
|
||||
data = json.loads(data)
|
||||
data.update(kwargs)
|
||||
skip_fields = ('res', 'password', 'event_data', 'stdout')
|
||||
data_for_log = {}
|
||||
|
||||
def index_by_name(alist):
|
||||
"""Takes a list of dictionaries with `name` as a key in each dict
|
||||
and returns a dictionary indexed by those names"""
|
||||
adict = {}
|
||||
for item in alist:
|
||||
subdict = copy(item)
|
||||
name = subdict.pop('name', None)
|
||||
if name:
|
||||
# Logstash v2 can not accept '.' in a name
|
||||
name = name.replace('.', '_')
|
||||
adict[name] = subdict
|
||||
return adict
|
||||
|
||||
if kind == 'event':
|
||||
data.update(data.get('event_data', {}))
|
||||
for fd in data:
|
||||
if fd in skip_fields:
|
||||
continue
|
||||
key = fd
|
||||
if fd in rename_fields:
|
||||
key = 'event_%s' % fd
|
||||
if type(data[fd]) is dict:
|
||||
data_for_log[key] = len(data[fd])
|
||||
else:
|
||||
data_for_log[key] = data[fd]
|
||||
elif kind == 'fact':
|
||||
if 'services' in data:
|
||||
data_for_log['services'] = index_by_name(data['services'])
|
||||
elif 'packages' in data:
|
||||
data_for_log['packages'] = index_by_name(data['packages'])
|
||||
elif 'files' in data:
|
||||
data_for_log['files'] = index_by_name(data['files'])
|
||||
elif 'ansible' in data:
|
||||
data_for_log['ansible'] = data['ansible']
|
||||
# Remove sub-keys with data type conflicts in elastic search
|
||||
data_for_log['ansible'].pop('ansible_python_version', None)
|
||||
data_for_log['ansible']['ansible_python'].pop('version_info', None)
|
||||
else:
|
||||
data_for_log['facts'] = data
|
||||
return data_for_log
|
||||
|
||||
@ -842,6 +842,19 @@ LOGGING = {
|
||||
'simple': {
|
||||
'format': '%(asctime)s %(levelname)-8s %(name)s %(message)s',
|
||||
},
|
||||
'logstash': {
|
||||
'()': 'awx.main.log_utils.formatters.LogstashFormatter'
|
||||
},
|
||||
'json': {
|
||||
'()': 'awx.main.log_utils.formatters.LogstashFormatter'
|
||||
}
|
||||
# From loggly examples
|
||||
# 'json': {
|
||||
# 'format': '{ "loggerName":"%(name)s", "asciTime":"%(asctime)s", "fileName":"%(filename)s", "logRecordCreationTime":"%(created)f", "functionName":"%(funcName)s", "levelNo":"%(levelno)s", "lineNo":"%(lineno)d", "time":"%(msecs)d", "levelName":"%(levelname)s", "message":"%(message)s"}',
|
||||
# },
|
||||
# 'json': {
|
||||
# 'format': '{"message": %(message)s}',
|
||||
# },
|
||||
},
|
||||
'handlers': {
|
||||
'console': {
|
||||
@ -863,6 +876,24 @@ LOGGING = {
|
||||
'class': 'django.utils.log.NullHandler',
|
||||
'formatter': 'simple',
|
||||
},
|
||||
'http_receiver': {
|
||||
'class': 'awx.main.log_utils.handlers.HTTPSHandler',
|
||||
'level': 'INFO',
|
||||
'formatter': 'json',
|
||||
'host': '',
|
||||
},
|
||||
'logstash': {
|
||||
'level': 'INFO',
|
||||
'class': 'awx.main.log_utils.handlers.HTTPSHandler',
|
||||
'host': 'logstash', # IP/name of our Logstash EC2 instance
|
||||
'port': 8085,
|
||||
# 'port': 5000,
|
||||
# 'version': 1,
|
||||
'message_type': 'logstash',
|
||||
'fqdn': True,
|
||||
# 'tags': ['tower'],
|
||||
'formatter': 'json'
|
||||
},
|
||||
'mail_admins': {
|
||||
'level': 'ERROR',
|
||||
'filters': ['require_debug_false'],
|
||||
@ -939,7 +970,6 @@ LOGGING = {
|
||||
'django.request': {
|
||||
'handlers': ['mail_admins', 'console', 'file', 'tower_warnings'],
|
||||
'level': 'WARNING',
|
||||
'propagate': False,
|
||||
},
|
||||
'rest_framework.request': {
|
||||
'handlers': ['mail_admins', 'console', 'file', 'tower_warnings'],
|
||||
@ -954,29 +984,30 @@ LOGGING = {
|
||||
'level': 'DEBUG',
|
||||
},
|
||||
'awx.conf': {
|
||||
'handlers': ['console', 'file', 'tower_warnings'],
|
||||
'handlers': ['null'],
|
||||
'level': 'WARNING',
|
||||
'propagate': False,
|
||||
},
|
||||
'awx.conf.settings': {
|
||||
'handlers': ['null'],
|
||||
'level': 'WARNING',
|
||||
},
|
||||
'awx.main': {
|
||||
'handlers': ['null']
|
||||
},
|
||||
'awx.main.commands.run_callback_receiver': {
|
||||
'handlers': ['console', 'file', 'callback_receiver'],
|
||||
'propagate': False
|
||||
},
|
||||
'awx.main.commands.run_socketio_service': {
|
||||
'handlers': ['console', 'file', 'socketio_service'],
|
||||
'propagate': False
|
||||
'handlers': ['callback_receiver'],
|
||||
},
|
||||
'awx.main.tasks': {
|
||||
'handlers': ['console', 'file', 'task_system'],
|
||||
'propagate': False
|
||||
'handlers': ['task_system']
|
||||
},
|
||||
'awx.main.scheduler': {
|
||||
'handlers': ['console', 'file', 'task_system'],
|
||||
'propagate': False
|
||||
'handlers': ['task_system'],
|
||||
},
|
||||
'awx.main.consumers': {
|
||||
'handlers': ['null']
|
||||
},
|
||||
'awx.main.commands.run_fact_cache_receiver': {
|
||||
'handlers': ['console', 'file', 'fact_receiver'],
|
||||
'propagate': False
|
||||
'handlers': ['fact_receiver'],
|
||||
},
|
||||
'awx.main.access': {
|
||||
'handlers': ['null'],
|
||||
@ -990,6 +1021,23 @@ LOGGING = {
|
||||
'handlers': ['null'],
|
||||
'propagate': False,
|
||||
},
|
||||
'awx.analytics': {
|
||||
'handlers': ['null'],
|
||||
'level': 'INFO',
|
||||
'propagate': False
|
||||
},
|
||||
'awx.analytics.job_events': {
|
||||
'handlers': ['null'],
|
||||
'level': 'INFO'
|
||||
},
|
||||
'awx.analytics.activity_stream': {
|
||||
'handlers': ['null'],
|
||||
'level': 'INFO'
|
||||
},
|
||||
'awx.analytics.system_tracking': {
|
||||
'handlers': ['null'],
|
||||
'level': 'INFO'
|
||||
},
|
||||
'django_auth_ldap': {
|
||||
'handlers': ['console', 'file', 'tower_warnings'],
|
||||
'level': 'DEBUG',
|
||||
|
||||
@ -52,7 +52,6 @@ TOWER_VENV_PATH = "/var/lib/awx/venv/tower"
|
||||
|
||||
LOGGING['handlers']['tower_warnings']['filename'] = '/var/log/tower/tower.log'
|
||||
LOGGING['handlers']['callback_receiver']['filename'] = '/var/log/tower/callback_receiver.log'
|
||||
LOGGING['handlers']['socketio_service']['filename'] = '/var/log/tower/socketio_service.log'
|
||||
LOGGING['handlers']['task_system']['filename'] = '/var/log/tower/task_system.log'
|
||||
LOGGING['handlers']['fact_receiver']['filename'] = '/var/log/tower/fact_receiver.log'
|
||||
LOGGING['handlers']['system_tracking_migrations']['filename'] = '/var/log/tower/tower_system_tracking_migrations.log'
|
||||
|
||||
21
docs/licenses/docker-elk.txt
Normal file
21
docs/licenses/docker-elk.txt
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Anthony Lapenna
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
56
tools/elastic/README.md
Normal file
56
tools/elastic/README.md
Normal file
@ -0,0 +1,56 @@
|
||||
# Docker ELK / Elastic Stack Development Tools
|
||||
|
||||
These are tools to run a containerized version of ELK stack, comprising
|
||||
of Logstash, Elastic Search, and Kibana. There are also cases where
|
||||
only a subset of these are needed to run.
|
||||
|
||||
A copy of the license is in `docs/licenses/docker-elk.txt`
|
||||
|
||||
## Instructions
|
||||
|
||||
Due to complex requirements from the elastic search container upstream, there
|
||||
is a prerequisite to get the containers running. The docker _host_ machine
|
||||
must have the `max_map_count` variable increased. For a developer using
|
||||
docker-machine with something like VirtualBox of VMWare, this can be
|
||||
done by getting bash in the running Docker machine. Example:
|
||||
|
||||
```bash
|
||||
$ docker-machine ssh default
|
||||
docker@default:~$ sudo sysctl -w vm.max_map_count=262144
|
||||
vm.max_map_count = 262144
|
||||
```
|
||||
|
||||
After this, the containers can be started up with commands like:
|
||||
|
||||
```bash
|
||||
make docker-compose-elk
|
||||
```
|
||||
|
||||
```bash
|
||||
make docker-compose-cluster-elk
|
||||
```
|
||||
|
||||
These are ran from the root folder of the ansible-tower repository.
|
||||
|
||||
### Connecting Logstash to 3rd Party Receivers
|
||||
|
||||
In order to send these logs to an external consumer of logstash format
|
||||
messages, replace the output variables in the logstash.conf file.
|
||||
|
||||
```
|
||||
output {
|
||||
elasticsearch {
|
||||
hosts => "elasticsearch:9200"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Changelog
|
||||
|
||||
Current branch point `a776151221182dcfaec7df727459e208c895d25b`
|
||||
Nov 18, 2016
|
||||
|
||||
|
||||
- Original branch point `b5a4deee142b152d4f9232ebac5bbabb2d2cef3c`
|
||||
Sep 25, 2016, before X-Pack support
|
||||
|
||||
49
tools/elastic/docker-compose.elastic-override.yml
Normal file
49
tools/elastic/docker-compose.elastic-override.yml
Normal file
@ -0,0 +1,49 @@
|
||||
# Structure for the Elastic Stack docker configuration came from docker-elk:
|
||||
# https://github.com/deviantony/docker-elk
|
||||
# docker-elk is under the MIT License,
|
||||
# a copy of its license is provided in docs/licenses/docker-elk.txt
|
||||
# contents modified
|
||||
|
||||
version: '2'
|
||||
services:
|
||||
|
||||
# Components of ELK stack for logging
|
||||
elasticsearch:
|
||||
build: elastic/elasticsearch/
|
||||
ports:
|
||||
- "9200:9200"
|
||||
- "9300:9300"
|
||||
environment:
|
||||
ES_JAVA_OPTS: "-Xms1g -Xmx1g"
|
||||
# networks: # add back in when a connection to tower_tools is possible
|
||||
# - docker_elk
|
||||
|
||||
logstash:
|
||||
build: elastic/logstash/
|
||||
command: -f /etc/logstash/conf.d/
|
||||
volumes:
|
||||
- ./elastic/logstash/config:/etc/logstash/conf.d
|
||||
ports:
|
||||
- "8085:8085"
|
||||
links:
|
||||
- elasticsearch
|
||||
# networks:
|
||||
# - docker_elk
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
kibana:
|
||||
build: elastic/kibana/
|
||||
volumes:
|
||||
- ./elastic/kibana/config/:/opt/kibana/config/
|
||||
ports:
|
||||
- "5601:5601"
|
||||
links:
|
||||
- elasticsearch
|
||||
# networks:
|
||||
# - docker_elk
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
|
||||
# networks:
|
||||
# docker_elk:
|
||||
# driver: bridge
|
||||
12
tools/elastic/docker-compose.logstash-link-cluster.yml
Normal file
12
tools/elastic/docker-compose.logstash-link-cluster.yml
Normal file
@ -0,0 +1,12 @@
|
||||
version: '2'
|
||||
services:
|
||||
# Tower Development Cluster
|
||||
tower_1:
|
||||
links:
|
||||
- logstash
|
||||
tower_2:
|
||||
links:
|
||||
- logstash
|
||||
tower_3:
|
||||
links:
|
||||
- logstash
|
||||
6
tools/elastic/docker-compose.logstash-link.yml
Normal file
6
tools/elastic/docker-compose.logstash-link.yml
Normal file
@ -0,0 +1,6 @@
|
||||
version: '2'
|
||||
services:
|
||||
# Primary Tower Development Container
|
||||
tower:
|
||||
links:
|
||||
- logstash
|
||||
5
tools/elastic/elasticsearch/Dockerfile
Normal file
5
tools/elastic/elasticsearch/Dockerfile
Normal file
@ -0,0 +1,5 @@
|
||||
FROM elasticsearch:5
|
||||
|
||||
ENV ES_JAVA_OPTS="-Des.path.conf=/etc/elasticsearch"
|
||||
|
||||
CMD ["-E", "network.host=0.0.0.0", "-E", "discovery.zen.minimum_master_nodes=1"]
|
||||
1
tools/elastic/elasticsearch/config/.placeholder
Normal file
1
tools/elastic/elasticsearch/config/.placeholder
Normal file
@ -0,0 +1 @@
|
||||
Ensure the existence of the parent folder.
|
||||
1
tools/elastic/kibana/Dockerfile
Normal file
1
tools/elastic/kibana/Dockerfile
Normal file
@ -0,0 +1 @@
|
||||
FROM kibana:5
|
||||
92
tools/elastic/kibana/config/kibana.yml
Normal file
92
tools/elastic/kibana/config/kibana.yml
Normal file
@ -0,0 +1,92 @@
|
||||
# Kibana is served by a back end server. This setting specifies the port to use.
|
||||
server.port: 5601
|
||||
|
||||
# This setting specifies the IP address of the back end server.
|
||||
server.host: "0.0.0.0"
|
||||
|
||||
# Enables you to specify a path to mount Kibana at if you are running behind a proxy. This setting
|
||||
# cannot end in a slash.
|
||||
# server.basePath: ""
|
||||
|
||||
# The maximum payload size in bytes for incoming server requests.
|
||||
# server.maxPayloadBytes: 1048576
|
||||
|
||||
# The Kibana server's name. This is used for display purposes.
|
||||
# server.name: "your-hostname"
|
||||
|
||||
# The URL of the Elasticsearch instance to use for all your queries.
|
||||
elasticsearch.url: "http://elasticsearch:9200"
|
||||
|
||||
# When this setting’s value is true Kibana uses the hostname specified in the server.host
|
||||
# setting. When the value of this setting is false, Kibana uses the hostname of the host
|
||||
# that connects to this Kibana instance.
|
||||
# elasticsearch.preserveHost: true
|
||||
|
||||
# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
|
||||
# dashboards. Kibana creates a new index if the index doesn’t already exist.
|
||||
# kibana.index: ".kibana"
|
||||
|
||||
# The default application to load.
|
||||
# kibana.defaultAppId: "discover"
|
||||
|
||||
# If your Elasticsearch is protected with basic authentication, these settings provide
|
||||
# the username and password that the Kibana server uses to perform maintenance on the Kibana
|
||||
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
|
||||
# is proxied through the Kibana server.
|
||||
# elasticsearch.username: "user"
|
||||
# elasticsearch.password: "pass"
|
||||
|
||||
# Paths to the PEM-format SSL certificate and SSL key files, respectively. These
|
||||
# files enable SSL for outgoing requests from the Kibana server to the browser.
|
||||
# server.ssl.cert: /path/to/your/server.crt
|
||||
# server.ssl.key: /path/to/your/server.key
|
||||
|
||||
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
|
||||
# These files validate that your Elasticsearch backend uses the same key files.
|
||||
# elasticsearch.ssl.cert: /path/to/your/client.crt
|
||||
# elasticsearch.ssl.key: /path/to/your/client.key
|
||||
|
||||
# Optional setting that enables you to specify a path to the PEM file for the certificate
|
||||
# authority for your Elasticsearch instance.
|
||||
# elasticsearch.ssl.ca: /path/to/your/CA.pem
|
||||
|
||||
# To disregard the validity of SSL certificates, change this setting’s value to false.
|
||||
# elasticsearch.ssl.verify: true
|
||||
|
||||
# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
|
||||
# the elasticsearch.requestTimeout setting.
|
||||
# elasticsearch.pingTimeout: 1500
|
||||
|
||||
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
|
||||
# must be a positive integer.
|
||||
# elasticsearch.requestTimeout: 30000
|
||||
|
||||
# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
|
||||
# headers, set this value to [] (an empty list).
|
||||
# elasticsearch.requestHeadersWhitelist: [ authorization ]
|
||||
|
||||
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
|
||||
# elasticsearch.shardTimeout: 0
|
||||
|
||||
# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.
|
||||
# elasticsearch.startupTimeout: 5000
|
||||
|
||||
# Specifies the path where Kibana creates the process ID file.
|
||||
# pid.file: /var/run/kibana.pid
|
||||
|
||||
# Enables you specify a file where Kibana stores log output.
|
||||
# logging.dest: stdout
|
||||
|
||||
# Set the value of this setting to true to suppress all logging output.
|
||||
# logging.silent: false
|
||||
|
||||
# Set the value of this setting to true to suppress all logging output other than error messages.
|
||||
# logging.quiet: false
|
||||
|
||||
# Set the value of this setting to true to log all events, including system usage information
|
||||
# and all requests.
|
||||
# logging.verbose: false
|
||||
|
||||
# Set the interval in milliseconds to sample system and process performance
|
||||
# metrics. Minimum is 100ms. Defaults to 10000.
|
||||
# ops.interval: 10000
|
||||
4
tools/elastic/logstash/Dockerfile
Normal file
4
tools/elastic/logstash/Dockerfile
Normal file
@ -0,0 +1,4 @@
|
||||
FROM logstash:5
|
||||
|
||||
# Add your logstash plugins setup here
|
||||
# Example: RUN logstash-plugin install logstash-filter-json
|
||||
19
tools/elastic/logstash/config/logstash.conf
Normal file
19
tools/elastic/logstash/config/logstash.conf
Normal file
@ -0,0 +1,19 @@
|
||||
input {
|
||||
http {
|
||||
port => 8085
|
||||
}
|
||||
}
|
||||
|
||||
## Add your filters / logstash plugins configuration here
|
||||
|
||||
filter {
|
||||
json {
|
||||
source => "message"
|
||||
}
|
||||
}
|
||||
|
||||
output {
|
||||
elasticsearch {
|
||||
hosts => "elasticsearch:9200"
|
||||
}
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user