mirror of
https://github.com/ansible/awx.git
synced 2026-03-16 08:27:29 -02:30
Merge branch 'devel' into patch-1
This commit is contained in:
@@ -17,4 +17,11 @@ set -e
|
||||
|
||||
wait-for-migrations
|
||||
|
||||
supervisord -c /etc/supervisord.conf
|
||||
# This file will be re-written when the dispatcher calls reconfigure_rsyslog(),
|
||||
# but it needs to exist when supervisor initially starts rsyslog to prevent the
|
||||
# container from crashing. This was the most minimal config I could get working.
|
||||
cat << EOF > /var/lib/awx/rsyslog/rsyslog.conf
|
||||
action(type="omfile" file="/dev/null")
|
||||
EOF
|
||||
|
||||
exec supervisord -c /etc/supervisord.conf
|
||||
|
||||
@@ -17,4 +17,6 @@ set -e
|
||||
|
||||
wait-for-migrations
|
||||
|
||||
supervisord -c /etc/supervisord_task.conf
|
||||
awx-manage provision_instance
|
||||
|
||||
exec supervisord -c /etc/supervisord_task.conf
|
||||
|
||||
8
tools/ansible/roles/dockerfile/files/stop-supervisor
Executable file
8
tools/ansible/roles/dockerfile/files/stop-supervisor
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
printf "READY\n";
|
||||
|
||||
while read line; do
|
||||
echo "Processing Event: $line" >&2;
|
||||
kill -SIGQUIT $PPID
|
||||
done < /dev/stdin
|
||||
19
tools/ansible/roles/dockerfile/files/uwsgi.ini
Normal file
19
tools/ansible/roles/dockerfile/files/uwsgi.ini
Normal file
@@ -0,0 +1,19 @@
|
||||
[uwsgi]
|
||||
socket = 127.0.0.1:8050
|
||||
processes = 5
|
||||
master = true
|
||||
vacuum = true
|
||||
no-orphans = true
|
||||
lazy-apps = true
|
||||
manage-script-name = true
|
||||
master-fifo = /var/lib/awx/awxfifo
|
||||
max-requests = 1000
|
||||
buffer-size = 32768
|
||||
|
||||
if-env = UWSGI_MOUNT_PATH
|
||||
mount = %(_)=awx.wsgi:application
|
||||
endif =
|
||||
|
||||
if-not-env = UWSGI_MOUNT_PATH
|
||||
mount = /=awx.wsgi:application
|
||||
endif =
|
||||
@@ -5,7 +5,7 @@
|
||||
###
|
||||
|
||||
# Build container
|
||||
FROM quay.io/centos/centos:8 as builder
|
||||
FROM quay.io/centos/centos:stream9 as builder
|
||||
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US:en
|
||||
@@ -15,15 +15,13 @@ ENV LC_ALL en_US.UTF-8
|
||||
USER root
|
||||
|
||||
# Install build dependencies
|
||||
RUN dnf -y module enable 'postgresql:12'
|
||||
RUN dnf -y update && \
|
||||
dnf -y install epel-release 'dnf-command(config-manager)' && \
|
||||
dnf module -y enable 'postgresql:12' && \
|
||||
dnf config-manager --set-enabled powertools && \
|
||||
RUN dnf -y update && dnf install -y 'dnf-command(config-manager)' && \
|
||||
dnf config-manager --set-enabled crb && \
|
||||
dnf -y install \
|
||||
gcc \
|
||||
gcc-c++ \
|
||||
git-core \
|
||||
gettext \
|
||||
glibc-langpack-en \
|
||||
libffi-devel \
|
||||
libtool-ltdl-devel \
|
||||
@@ -34,18 +32,18 @@ RUN dnf -y update && \
|
||||
nss \
|
||||
openldap-devel \
|
||||
patch \
|
||||
@postgresql:12 \
|
||||
postgresql \
|
||||
postgresql-devel \
|
||||
python38-devel \
|
||||
python38-pip \
|
||||
python38-psycopg2 \
|
||||
python38-setuptools \
|
||||
python3-devel \
|
||||
python3-pip \
|
||||
python3-psycopg2 \
|
||||
python3-setuptools \
|
||||
swig \
|
||||
unzip \
|
||||
xmlsec1-devel \
|
||||
xmlsec1-openssl-devel
|
||||
|
||||
RUN python3.8 -m ensurepip && pip3 install "virtualenv < 20"
|
||||
RUN pip3 install virtualenv
|
||||
|
||||
|
||||
# Install & build requirements
|
||||
@@ -69,7 +67,7 @@ RUN cd /tmp && make requirements_awx_dev
|
||||
# Use the distro provided npm to bootstrap our required version of node
|
||||
|
||||
{% if not headless|bool %}
|
||||
RUN npm install -g n && n 14.15.1
|
||||
RUN npm install -g n && n 16.13.1
|
||||
{% endif %}
|
||||
|
||||
# Copy source into builder, build sdist, install it into awx venv
|
||||
@@ -85,7 +83,7 @@ RUN SKIP_PG_VERSION_CHECK=yes /var/lib/awx/venv/awx/bin/awx-manage collectstatic
|
||||
{% endif %}
|
||||
|
||||
# Final container(s)
|
||||
FROM quay.io/centos/centos:8
|
||||
FROM quay.io/centos/centos:stream9
|
||||
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US:en
|
||||
@@ -94,43 +92,35 @@ ENV LC_ALL en_US.UTF-8
|
||||
USER root
|
||||
|
||||
# Install runtime requirements
|
||||
RUN dnf -y module enable 'postgresql:12'
|
||||
RUN dnf -y update && \
|
||||
dnf -y install epel-release 'dnf-command(config-manager)' && \
|
||||
dnf module -y enable 'postgresql:12' && \
|
||||
dnf config-manager --set-enabled powertools && \
|
||||
RUN dnf -y update && dnf install -y 'dnf-command(config-manager)' && \
|
||||
dnf config-manager --set-enabled crb && \
|
||||
dnf -y install acl \
|
||||
git-core \
|
||||
git-lfs \
|
||||
glibc-langpack-en \
|
||||
krb5-workstation \
|
||||
libcgroup-tools \
|
||||
nginx \
|
||||
@postgresql:12 \
|
||||
postgresql \
|
||||
python3-devel \
|
||||
python3-libselinux \
|
||||
python38-pip \
|
||||
python38-psycopg2 \
|
||||
python38-setuptools \
|
||||
python3-pip \
|
||||
python3-psycopg2 \
|
||||
python3-setuptools \
|
||||
rsync \
|
||||
"rsyslog >= 8.1911.0" \
|
||||
subversion \
|
||||
sudo \
|
||||
vim-minimal \
|
||||
which \
|
||||
unzip \
|
||||
xmlsec1-openssl && \
|
||||
dnf -y install centos-release-stream && dnf -y install "rsyslog >= 8.1911.0" && dnf -y remove centos-release-stream && \
|
||||
dnf -y clean all
|
||||
|
||||
RUN curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && \
|
||||
chmod 700 get_helm.sh && \
|
||||
./get_helm.sh
|
||||
|
||||
# Install tini
|
||||
RUN curl -L -o /usr/bin/tini https://github.com/krallin/tini/releases/download/v0.19.0/tini-{{ tini_architecture | default('amd64') }} && \
|
||||
chmod +x /usr/bin/tini
|
||||
|
||||
RUN python3.8 -m ensurepip && pip3 install "virtualenv < 20" supervisor
|
||||
RUN pip3 install virtualenv supervisor dumb-init
|
||||
|
||||
RUN rm -rf /root/.cache && rm -rf /tmp/*
|
||||
|
||||
@@ -141,9 +131,12 @@ RUN dnf -y install \
|
||||
gdb \
|
||||
gtk3 \
|
||||
gettext \
|
||||
hostname \
|
||||
procps \
|
||||
alsa-lib \
|
||||
libX11-xcb \
|
||||
libXScrnSaver \
|
||||
iproute \
|
||||
strace \
|
||||
vim \
|
||||
nmap-ncat \
|
||||
@@ -157,30 +150,18 @@ RUN dnf -y install \
|
||||
wget \
|
||||
diffutils \
|
||||
unzip && \
|
||||
npm install -g n && n 14.15.1 && npm install -g npm@7.20.3 && dnf remove -y nodejs
|
||||
npm install -g n && n 16.13.1 && npm install -g npm@8.5.0 && dnf remove -y nodejs
|
||||
|
||||
RUN pip3 install black git+https://github.com/coderanger/supervisor-stdout
|
||||
|
||||
# This package randomly fails to download.
|
||||
# It is nice to have in the dev env, but not necessary.
|
||||
# Add it back to the list above if the repo ever straighten up.
|
||||
RUN dnf --enablerepo=debuginfo -y install python3-debuginfo || :
|
||||
{% endif %}
|
||||
RUN dnf --enablerepo=baseos-debug -y install python3-debuginfo || :
|
||||
|
||||
{% if build_dev|bool %}
|
||||
RUN dnf install -y podman
|
||||
RUN echo -e '[engine]\ncgroup_manager = "cgroupfs"\nevents_logger = "file"\nruntime = "crun"' > /etc/containers/containers.conf
|
||||
RUN dnf install -y epel-next-release && dnf install -y inotify-tools && dnf remove -y epel-next-release
|
||||
{% endif %}
|
||||
|
||||
# Fix overlay filesystem issue
|
||||
{% if build_dev|bool %}
|
||||
RUN sed -i '/^#mount_program/s/^#//' /etc/containers/storage.conf
|
||||
{% endif %}
|
||||
|
||||
# Ensure we must use fully qualified image names
|
||||
# This prevents podman prompt that hangs when trying to pull unqualified images
|
||||
RUN mkdir -p /etc/containers/registries.conf.d/ && echo "unqualified-search-registries = []" >> /etc/containers/registries.conf.d/force-fully-qualified-images.conf && chmod 644 /etc/containers/registries.conf.d/force-fully-qualified-images.conf
|
||||
|
||||
# Copy app from builder
|
||||
COPY --from=builder /var/lib/awx /var/lib/awx
|
||||
|
||||
@@ -188,15 +169,30 @@ RUN ln -s /var/lib/awx/venv/awx/bin/awx-manage /usr/bin/awx-manage
|
||||
|
||||
{%if build_dev|bool %}
|
||||
COPY --from={{ receptor_image }} /usr/bin/receptor /usr/bin/receptor
|
||||
|
||||
RUN openssl req -nodes -newkey rsa:2048 -keyout /etc/nginx/nginx.key -out /etc/nginx/nginx.csr \
|
||||
-subj "/C=US/ST=North Carolina/L=Durham/O=Ansible/OU=AWX Development/CN=awx.localhost" && \
|
||||
openssl x509 -req -days 365 -in /etc/nginx/nginx.csr -signkey /etc/nginx/nginx.key -out /etc/nginx/nginx.crt && \
|
||||
chmod 640 /etc/nginx/nginx.{csr,key,crt}
|
||||
{% endif %}
|
||||
|
||||
{% if build_dev|bool %}
|
||||
RUN dnf install -y podman && rpm --restore shadow-utils 2>/dev/null
|
||||
|
||||
# chmod containers.conf and adjust storage.conf to enable Fuse storage.
|
||||
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
|
||||
|
||||
ENV _CONTAINERS_USERNS_CONFIGURED=""
|
||||
|
||||
# Ensure we must use fully qualified image names
|
||||
# This prevents podman prompt that hangs when trying to pull unqualified images
|
||||
RUN mkdir -p /etc/containers/registries.conf.d/ && echo "unqualified-search-registries = []" >> /etc/containers/registries.conf.d/force-fully-qualified-images.conf && chmod 644 /etc/containers/registries.conf.d/force-fully-qualified-images.conf
|
||||
{% endif %}
|
||||
|
||||
# Create default awx rsyslog config
|
||||
ADD tools/ansible/roles/dockerfile/files/rsyslog.conf /var/lib/awx/rsyslog/rsyslog.conf
|
||||
ADD tools/ansible/roles/dockerfile/files/wait-for-migrations /usr/local/bin/wait-for-migrations
|
||||
ADD tools/ansible/roles/dockerfile/files/stop-supervisor /usr/local/bin/stop-supervisor
|
||||
|
||||
## File mappings
|
||||
{% if build_dev|bool %}
|
||||
@@ -206,13 +202,16 @@ ADD tools/docker-compose/nginx.vh.default.conf /etc/nginx/conf.d/nginx.vh.defaul
|
||||
ADD tools/docker-compose/start_tests.sh /start_tests.sh
|
||||
ADD tools/docker-compose/bootstrap_development.sh /usr/bin/bootstrap_development.sh
|
||||
ADD tools/docker-compose/entrypoint.sh /entrypoint.sh
|
||||
ADD tools/scripts/config-watcher /usr/bin/config-watcher
|
||||
ADD https://raw.githubusercontent.com/containers/libpod/master/contrib/podmanimage/stable/containers.conf /etc/containers/containers.conf
|
||||
ADD https://raw.githubusercontent.com/containers/libpod/master/contrib/podmanimage/stable/podman-containers.conf /var/lib/awx/.config/containers/containers.conf
|
||||
{% else %}
|
||||
ADD tools/ansible/roles/dockerfile/files/launch_awx.sh /usr/bin/launch_awx.sh
|
||||
ADD tools/ansible/roles/dockerfile/files/launch_awx_task.sh /usr/bin/launch_awx_task.sh
|
||||
ADD tools/ansible/roles/dockerfile/files/settings.py /etc/tower/settings.py
|
||||
ADD tools/ansible/roles/dockerfile/files/uwsgi.ini /etc/tower/uwsgi.ini
|
||||
ADD {{ template_dest }}/supervisor.conf /etc/supervisord.conf
|
||||
ADD {{ template_dest }}/supervisor_task.conf /etc/supervisord_task.conf
|
||||
ADD tools/scripts/config-watcher /usr/bin/config-watcher
|
||||
{% endif %}
|
||||
{% if (build_dev|bool) or (kube_dev|bool) %}
|
||||
ADD tools/docker-compose/awx.egg-link /tmp/awx.egg-link
|
||||
@@ -233,7 +232,7 @@ RUN for dir in \
|
||||
/var/run/supervisor \
|
||||
/var/run/awx-receptor \
|
||||
/var/lib/nginx ; \
|
||||
do mkdir -m 0775 -p $dir ; chmod g+rw $dir ; chgrp root $dir ; done && \
|
||||
do mkdir -m 0775 -p $dir ; chmod g+rwx $dir ; chgrp root $dir ; done && \
|
||||
for file in \
|
||||
/etc/subuid \
|
||||
/etc/subgid \
|
||||
@@ -244,19 +243,32 @@ RUN for dir in \
|
||||
|
||||
{% if (build_dev|bool) or (kube_dev|bool) %}
|
||||
RUN for dir in \
|
||||
/etc/containers \
|
||||
/var/lib/awx/.config/containers \
|
||||
/var/lib/awx/.config/cni \
|
||||
/var/lib/awx/venv \
|
||||
/var/lib/awx/venv/awx/bin \
|
||||
/var/lib/awx/venv/awx/lib/python3.8 \
|
||||
/var/lib/awx/venv/awx/lib/python3.8/site-packages \
|
||||
/var/lib/awx/venv/awx/lib/python3.9 \
|
||||
/var/lib/awx/venv/awx/lib/python3.9/site-packages \
|
||||
/var/lib/awx/projects \
|
||||
/var/lib/awx/rsyslog \
|
||||
/var/run/awx-rsyslog \
|
||||
/.ansible \
|
||||
/var/lib/shared/overlay-images \
|
||||
/var/lib/shared/overlay-layers \
|
||||
/var/lib/shared/vfs-images \
|
||||
/var/lib/shared/vfs-layers \
|
||||
/var/lib/awx/vendor ; \
|
||||
do mkdir -m 0775 -p $dir ; chmod g+rw $dir ; chgrp root $dir ; done && \
|
||||
do mkdir -m 0775 -p $dir ; chmod g+rwx $dir ; chgrp root $dir ; done && \
|
||||
for file in \
|
||||
/etc/containers/containers.conf \
|
||||
/var/lib/awx/.config/containers/containers.conf \
|
||||
/var/lib/shared/overlay-images/images.lock \
|
||||
/var/lib/shared/overlay-layers/layers.lock \
|
||||
/var/lib/shared/vfs-images/images.lock \
|
||||
/var/lib/shared/vfs-layers/layers.lock \
|
||||
/var/run/nginx.pid \
|
||||
/var/lib/awx/venv/awx/lib/python3.8/site-packages/awx.egg-link ; \
|
||||
/var/lib/awx/venv/awx/lib/python3.9/site-packages/awx.egg-link ; \
|
||||
do touch $file ; chmod g+rw $file ; done
|
||||
{% endif %}
|
||||
|
||||
@@ -279,8 +291,8 @@ CMD ["/bin/bash"]
|
||||
USER 1000
|
||||
EXPOSE 8052
|
||||
|
||||
ENTRYPOINT ["/usr/bin/tini", "--"]
|
||||
ENTRYPOINT ["dumb-init", "--"]
|
||||
CMD /usr/bin/launch_awx.sh
|
||||
VOLUME /var/lib/nginx
|
||||
VOLUME /var/lib/awx/.local/share/containers/storage
|
||||
VOLUME /var/lib/awx/.local/share/containers
|
||||
{% endif %}
|
||||
|
||||
@@ -12,9 +12,10 @@ directory = /awx_devel
|
||||
{% else %}
|
||||
command = nginx -g "daemon off;"
|
||||
{% endif %}
|
||||
autostart = true
|
||||
autorestart = true
|
||||
stopwaitsecs = 5
|
||||
startsecs = 30
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
@@ -26,17 +27,15 @@ stderr_logfile_maxbytes=0
|
||||
command = make uwsgi
|
||||
directory = /awx_devel
|
||||
environment =
|
||||
UWSGI_DEV_RELOAD_COMMAND='supervisorctl -c /etc/supervisord_task.conf restart all; supervisorctl restart tower-processes:daphne tower-processes:wsbroadcast'
|
||||
DEV_RELOAD_COMMAND='supervisorctl -c /etc/supervisord_task.conf restart all; supervisorctl restart tower-processes:daphne tower-processes:wsbroadcast'
|
||||
{% else %}
|
||||
command = /var/lib/awx/venv/awx/bin/uwsgi --socket 127.0.0.1:8050 --module=awx.wsgi:application --vacuum --processes=5 --harakiri=120 --no-orphans --master --max-requests=1000 --master-fifo=/var/lib/awx/awxfifo --lazy-apps -b 32768
|
||||
command = /var/lib/awx/venv/awx/bin/uwsgi /etc/tower/uwsgi.ini
|
||||
directory = /var/lib/awx
|
||||
{% endif %}
|
||||
autostart = true
|
||||
autorestart = true
|
||||
stopwaitsecs = 15
|
||||
startsecs = 30
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stopsignal=KILL
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
@@ -50,10 +49,8 @@ directory = /awx_devel
|
||||
command = /var/lib/awx/venv/awx/bin/daphne -b 127.0.0.1 -p 8051 --websocket_timeout -1 awx.asgi:channel_layer
|
||||
directory = /var/lib/awx
|
||||
{% endif %}
|
||||
autostart = true
|
||||
stopsignal=KILL
|
||||
autorestart = true
|
||||
stopwaitsecs = 5
|
||||
startsecs = 30
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
@@ -69,9 +66,8 @@ directory = /awx_devel
|
||||
command = awx-manage run_wsbroadcast
|
||||
directory = /var/lib/awx
|
||||
{% endif %}
|
||||
autostart = true
|
||||
autorestart = true
|
||||
stopwaitsecs = 5
|
||||
startsecs = 30
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
@@ -81,31 +77,26 @@ stderr_logfile_maxbytes=0
|
||||
|
||||
[program:awx-rsyslogd]
|
||||
command = rsyslogd -n -i /var/run/awx-rsyslog/rsyslog.pid -f /var/lib/awx/rsyslog/rsyslog.conf
|
||||
autostart = true
|
||||
autorestart = true
|
||||
startretries = 10
|
||||
stopwaitsecs = 5
|
||||
stopsignal=TERM
|
||||
startsecs = 30
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
redirect_stderr=true
|
||||
stdout_logfile=/dev/stderr
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[group:tower-processes]
|
||||
programs=nginx,uwsgi,daphne,wsbroadcast,awx-rsyslogd
|
||||
priority=5
|
||||
|
||||
# TODO: Exit Handler
|
||||
|
||||
[eventlistener:awx-config-watcher]
|
||||
command=/usr/bin/config-watcher
|
||||
stderr_logfile=/dev/stdout
|
||||
stderr_logfile_maxbytes=0
|
||||
[eventlistener:superwatcher]
|
||||
command=stop-supervisor
|
||||
events=PROCESS_STATE_FATAL
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
events=TICK_60
|
||||
priority=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[unix_http_server]
|
||||
file=/var/run/supervisor/supervisor.web.sock
|
||||
|
||||
@@ -13,9 +13,8 @@ directory = /awx_devel
|
||||
command = awx-manage run_dispatcher
|
||||
directory = /var/lib/awx
|
||||
{% endif %}
|
||||
autostart = true
|
||||
autorestart = true
|
||||
stopwaitsecs = 5
|
||||
startsecs = 30
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
@@ -31,9 +30,8 @@ directory = /awx_devel
|
||||
command = awx-manage run_callback_receiver
|
||||
directory = /var/lib/awx
|
||||
{% endif %}
|
||||
autostart = true
|
||||
autorestart = true
|
||||
stopwaitsecs = 5
|
||||
startsecs = 30
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
@@ -45,16 +43,14 @@ stderr_logfile_maxbytes=0
|
||||
programs=dispatcher,callback-receiver
|
||||
priority=5
|
||||
|
||||
# TODO: Exit Handler
|
||||
|
||||
[eventlistener:awx-config-watcher]
|
||||
command=/usr/bin/config-watcher
|
||||
stderr_logfile=/dev/stdout
|
||||
stderr_logfile_maxbytes=0
|
||||
[eventlistener:superwatcher]
|
||||
command=stop-supervisor
|
||||
events=PROCESS_STATE_FATAL
|
||||
autorestart = true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
events=TICK_60
|
||||
priority=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[unix_http_server]
|
||||
file=/var/run/supervisor/supervisor.sock
|
||||
|
||||
@@ -12,11 +12,11 @@
|
||||
|
||||
- name: Tag and Push Container Images
|
||||
docker_image:
|
||||
name: "{{ awx_image }}:{{ awx_version }}"
|
||||
name: "{{ awx_image }}:{{ awx_image_tag }}"
|
||||
repository: "{{ registry }}/{{ awx_image }}:{{ item }}"
|
||||
force_tag: yes
|
||||
push: true
|
||||
source: local
|
||||
with_items:
|
||||
- "latest"
|
||||
- "{{ awx_version }}"
|
||||
- "{{ awx_image_tag }}"
|
||||
|
||||
@@ -4,6 +4,7 @@ users 500 5000 3 3 3 3 3 3 110
|
||||
teams 200 500 2 2 2 2 2 2 100
|
||||
projects 150 1000 30 30 30 30 30 30 110
|
||||
job_templates 300 2000 127 127 127 127 127 127 110
|
||||
schedules 50 1 5 8 1 1 1 1 1
|
||||
credentials 150 2000 50 50 50 50 50 50 110
|
||||
inventories 150 2000 6 6 6 6 6 6 110
|
||||
inventory_groups 700 500 15 15 15 15 15 15 110
|
||||
@@ -12,4 +13,4 @@ wfjts 50 100 0 0 0 0 0 0 0
|
||||
nodes 1000 1000 0 0 0 0 0 0 0
|
||||
labels 1000 1000 0 0 0 0 0 0 0
|
||||
jobs 2000 5000 157208 1000 10000 50000 100000 200000 1000
|
||||
job_events 40000 100000 3370942 20000 200000 1000000 2000000 4000000 20000
|
||||
job_events 40000 100000 3370942 20000 200000 1000000 2000000 4000000 20000
|
||||
|
||||
|
@@ -53,6 +53,7 @@ from awx.main.models import ( # noqa
|
||||
WorkflowJobTemplateNode,
|
||||
batch_role_ancestor_rebuilding,
|
||||
)
|
||||
from awx.main.models.schedules import Schedule #noqa
|
||||
|
||||
from awx.main.signals import disable_activity_stream, disable_computed_fields # noqa
|
||||
|
||||
@@ -63,6 +64,7 @@ option_list = [
|
||||
make_option('--teams', action='store', type='int', default=5, help='Number of teams to create'),
|
||||
make_option('--projects', action='store', type='int', default=10, help='Number of projects to create'),
|
||||
make_option('--job-templates', action='store', type='int', default=20, help='Number of job templates to create'),
|
||||
make_option('--schedules', action='store', type='int', default=50, help='Number of schedules to create'),
|
||||
make_option('--credentials', action='store', type='int', default=5, help='Number of credentials to create'),
|
||||
make_option('--inventories', action='store', type='int', default=5, help='Number of credentials to create'),
|
||||
make_option('--inventory-groups', action='store', type='int', default=10, help='Number of credentials to create'),
|
||||
@@ -110,6 +112,7 @@ n_users = int(options['users'])
|
||||
n_teams = int(options['teams'])
|
||||
n_projects = int(options['projects'])
|
||||
n_job_templates = int(options['job_templates'])
|
||||
n_schedules = int(options['schedules'])
|
||||
n_credentials = int(options['credentials'])
|
||||
n_inventories = int(options['inventories'])
|
||||
n_inventory_groups = int(options['inventory_groups'])
|
||||
@@ -126,6 +129,7 @@ users = []
|
||||
teams = []
|
||||
projects = []
|
||||
job_templates = []
|
||||
schedules = []
|
||||
credentials = []
|
||||
inventories = []
|
||||
inventory_groups = []
|
||||
@@ -570,6 +574,29 @@ def make_the_data():
|
||||
if n:
|
||||
print('')
|
||||
|
||||
print('# Creating %d Schedules' % n_schedules)
|
||||
jt_idx = 0
|
||||
for n in spread(n_schedules, n_job_templates):
|
||||
jt = job_templates[0]
|
||||
for i in range(n):
|
||||
ids['schedules'] += 1
|
||||
schedules_id = ids['schedules']
|
||||
unified_job_template = job_templates[jt_idx]
|
||||
|
||||
sys.stdout.write('\r Assigning %d to %s: %d ' % (n, jt, i + 1))
|
||||
sys.stdout.flush()
|
||||
schedule, _ = Schedule.objects.get_or_create(
|
||||
name='%s Schedule %d' % (prefix, schedules_id),
|
||||
rrule="DTSTART;TZID=America/New_York:20220505T111500 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY",
|
||||
created_by=next(creator_gen),
|
||||
modified_by=next(modifier_gen),
|
||||
unified_job_template=unified_job_template,
|
||||
)
|
||||
schedule._is_new = _
|
||||
schedules.append(schedule)
|
||||
|
||||
|
||||
|
||||
print('# Creating %d Labels' % n_labels)
|
||||
org_idx = 0
|
||||
for n in spread(n_labels, n_organizations):
|
||||
|
||||
@@ -24,7 +24,7 @@ rules:
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "create", "delete"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ minikube_service_account_name }}
|
||||
|
||||
@@ -241,6 +241,11 @@ $ make docker-compose
|
||||
- [Start a shell](#start-a-shell)
|
||||
- [Start AWX from the container shell](#start-awx-from-the-container-shell)
|
||||
- [Using Logstash](./docs/logstash.md)
|
||||
- [Start a Cluster](#start-a-cluster)
|
||||
- [Start with Minikube](#start-with-minikube)
|
||||
- [Keycloak Integration](#keycloak-integration)
|
||||
- [OpenLDAP Integration](#openldap-integration)
|
||||
- [Splunk Integration](#splunk-integration)
|
||||
|
||||
### Start a Shell
|
||||
|
||||
@@ -311,3 +316,163 @@ If you want to clean all things once your are done, you can do:
|
||||
```bash
|
||||
(host)$ make docker-compose-container-group-clean
|
||||
```
|
||||
|
||||
### Keycloak Integration
|
||||
Keycloak is a SAML provider and can be used to test AWX social auth. This section describes how to build a reference Keycloak instance and plumb it with AWX for testing purposes.
|
||||
|
||||
First, be sure that you have the awx.awx collection installed by running `make install_collection`.
|
||||
Next, make sure you have your containers running by running `make docker-compose`.
|
||||
|
||||
Note: The following instructions assume we are using the built-in postgres database container. If you are not using the internal database you can use this guide as a reference, updating the database fields as required for your connection.
|
||||
|
||||
We are now ready to run two one time commands to build and pre-populate the Keycloak database.
|
||||
|
||||
The first one time command will be creating a Keycloak database in your postgres database by running:
|
||||
```bash
|
||||
docker exec tools_postgres_1 /usr/bin/psql -U awx --command "create database keycloak with encoding 'UTF8';"
|
||||
```
|
||||
|
||||
After running this command the following message should appear and you should be returned to your prompt:
|
||||
```base
|
||||
CREATE DATABASE
|
||||
```
|
||||
|
||||
The second one time command will be to start a Keycloak container to build our admin user; be sure to set pg_username and pg_password to work for you installation. Note: the command below set the username as admin with a password of admin, you can change this if you want. Also, if you are using your own container or have changed the pg_username please update the command accordingly.
|
||||
```bash
|
||||
PG_PASSWORD=`cat tools/docker-compose/_sources/secrets/pg_password.yml | cut -f 2 -d \'`
|
||||
docker run --rm -e KEYCLOAK_USER=admin -e KEYCLOAK_PASSWORD=admin --net=_sources_default \
|
||||
-e DB_VENDOR=postgres -e DB_ADDR=postgres -e DB_DATABASE=keycloak -e DB_USER=awx -e DB_PASSWORD=${PG_PASSWORD} \
|
||||
quay.io/keycloak/keycloak:15.0.2
|
||||
```
|
||||
|
||||
Once you see a message like: `WFLYSRV0051: Admin console listening on http://127.0.0.1:9990` you can stop the container.
|
||||
|
||||
Now that we have performed the one time setup anytime you want to run a Keycloak instance alongside AWX we can start docker-compose with the KEYCLOAK option to get a Keycloak instance with the command:
|
||||
```bash
|
||||
KEYCLOAK=true make docker-compose
|
||||
```
|
||||
|
||||
Go ahead and stop your existing docker-compose run and restart with Keycloak before proceeding to the next steps.
|
||||
|
||||
Once the containers come up a new port (8443) should be exposed and the Keycloak interface should be running on that port. Connect to this through a url like `https://localhost:8443` to confirm that Keycloak has stared. If you wanted to login and look at Keycloak itself you could select the "Administration console" link and log into the UI the username/password set in the previous `docker run` command. For more information about Keycloak and links to their documentation see their project at https://github.com/keycloak/keycloak.
|
||||
|
||||
Now we are ready to configure and plumb Keycloak with AWX. To do this we have provided a playbook which will:
|
||||
* Create a certificate for data exchange between Keycloak and AWX.
|
||||
* Create a realm in Keycloak with a client for AWX and 3 users.
|
||||
* Backup and configure the SMAL adapter in AWX. NOTE: the private key of any existing SAML adapters can not be backed up through the API, you need a DB backup to recover this.
|
||||
|
||||
Before we can run the playbook we need to understand that SAML works by sending redirects between AWX and Keycloak through the browser. Because of this we have to tell both AWX and Keycloak how they will construct the redirect URLs. On the Keycloak side, this is done within the realm configuration and on the AWX side its done through the SAML settings. The playbook requires a variable called `container_reference` to be set. The container_reference variable needs to be how your browser will be able to talk to the running containers. Here are some examples of how to choose a proper container_reference.
|
||||
* If you develop on a mac which runs a Fedora VM which has AWX running within that and the browser you use to access AWX runs on the mac. The the VM with the container has its own IP that is mapped to a name like `tower.home.net`. In this scenario your "container_reference" could be either the IP of the VM or the tower.home.net friendly name.
|
||||
* If you are on a Fedora work station running AWX and also using a browser on your workstation you could use localhost, your work stations IP or hostname as the container_reference.
|
||||
|
||||
In addition to container_reference, there are some additional variables which you can override if you need/choose to do so. Here are their names and default values:
|
||||
```yaml
|
||||
keycloak_user: admin
|
||||
keycloak_pass: admin
|
||||
cert_subject: "/C=US/ST=NC/L=Durham/O=awx/CN="
|
||||
```
|
||||
|
||||
* keycloak_(user|pass) need to change if you modified the user when starting the initial container above.
|
||||
* cert_subject will be the subject line of the certificate shared between AWX and keycloak you can change this if you like or just use the defaults.
|
||||
|
||||
To override any of the variables above you can add more `-e` arguments to the playbook run below. For example, if you simply need to change the `keycloak_pass` add the argument `-r keycloak_pass=my_secret_pass` to the next command.
|
||||
|
||||
In addition, you may need to override the username or password to get into your AWX instance. We log into AWX in order to read and write the SAML settings. This can be done in several ways because we are using the awx.awx collection. The easiest way is to set environment variables such as `CONTROLLER_USERNAME`. See the awx.awx documentation for more information on setting environment variables. In the example provided below we are showing an example of specifying a username/password for authentication.
|
||||
|
||||
Now that we have all of our variables covered we can run the playbook like:
|
||||
```bash
|
||||
export CONTROLLER_USERNAME=<your username>
|
||||
export CONTROLLER_PASSWORD=<your password>
|
||||
ansible-playbook tools/docker-compose/ansible/plumb_keycloak.yml -e container_reference=<your container_reference here>
|
||||
```
|
||||
|
||||
Once the playbook is done running SAML should now be setup in your development environment. This realm has three users with the following username/passwords:
|
||||
1. awx_unpriv:unpriv123
|
||||
2. awx_admin:admin123
|
||||
3. awx_auditor:audit123
|
||||
|
||||
The first account is a normal user. The second account has the attribute is_superuser set in Keycloak so will be a super user in AWX. The third account has the is_system_auditor attribute in Keycloak so it will be a system auditor in AWX. To log in with one of these Keycloak users go to the AWX login screen and click the small "Sign In With SAML Keycloak" button at the bottom of the login box.
|
||||
|
||||
### OpenLDAP Integration
|
||||
|
||||
OpenLDAP is an LDAP provider that can be used to test AWX with LDAP integration. This section describes how to build a reference OpenLDAP instance and plumb it with your AWX for testing purposes.
|
||||
|
||||
First, be sure that you have the awx.awx collection installed by running `make install_collection`.
|
||||
|
||||
Anytime you want to run an OpenLDAP instance alongside AWX we can start docker-compose with the LDAP option to get an LDAP instance with the command:
|
||||
```bash
|
||||
LDAP=true make docker-compose
|
||||
```
|
||||
|
||||
Once the containers come up two new ports (389, 636) should be exposed and the LDAP server should be running on those ports. The first port (389) is non-SSL and the second port (636) is SSL enabled.
|
||||
|
||||
Now we are ready to configure and plumb OpenLDAP with AWX. To do this we have provided a playbook which will:
|
||||
* Backup and configure the LDAP adapter in AWX. NOTE: this will back up your existing settings but the password fields can not be backed up through the API, you need a DB backup to recover this.
|
||||
|
||||
Note: The default configuration will utilize the non-tls connection. If you want to use the tls configuration you will need to work through TLS negotiation issues because the LDAP server is using a self signed certificate.
|
||||
|
||||
Before we can run the playbook we need to understand that LDAP will be communicated to from within the AWX container. Because of this, we have to tell AWX how to route traffic to the LDAP container through the `LDAP Server URI` settings. The playbook requires a variable called container_reference to be set. The container_reference variable needs to be how your AWX container will be able to talk to the LDAP container. See the SAML section for some examples for how to select a `container_reference`.
|
||||
|
||||
Once you have your container reference you can run the playbook like:
|
||||
```bash
|
||||
export CONTROLLER_USERNAME=<your username>
|
||||
export CONTROLLER_PASSWORD=<your password>
|
||||
ansible-playbook tools/docker-compose/ansible/plumb_ldap.yml -e container_reference=<your container_reference here>
|
||||
```
|
||||
|
||||
|
||||
Once the playbook is done running LDAP should now be setup in your development environment. This realm has four users with the following username/passwords:
|
||||
1. awx_ldap_unpriv:unpriv123
|
||||
2. awx_ldap_admin:admin123
|
||||
3. awx_ldap_auditor:audit123
|
||||
4. awx_ldap_org_admin:orgadmin123
|
||||
|
||||
The first account is a normal user. The second account will be a super user in AWX. The third account will be a system auditor in AWX. The fourth account is an org admin. All users belong to an org called "LDAP Organization". To log in with one of these users go to the AWX login screen enter the username/password.
|
||||
|
||||
|
||||
### Splunk Integration
|
||||
|
||||
Splunk is a log aggregation tool that can be used to test AWX with external logging integration. This section describes how to build a reference Splunk instance and plumb it with your AWX for testing purposes.
|
||||
|
||||
First, be sure that you have the awx.awx collection installed by running `make install_collection`.
|
||||
|
||||
Next, install the splunk.es collection by running `ansible-galaxy collection install splunk.es`.
|
||||
|
||||
Anytime you want to run a Splunk instance alongside AWX we can start docker-compose with the SPLUNK option to get a Splunk instance with the command:
|
||||
```bash
|
||||
SPLUNK=true make docker-compose
|
||||
```
|
||||
|
||||
Once the containers come up three new ports (8000, 8089 and 9199) should be exposed and the Splunk server should be running on some of those ports (the 9199 will be created later by the plumbing playbook). The first port (8000) is the non-SSL admin port and you can log into splunk with the credentials admin/splunk_admin. The url will be like http://<server>:8000/ this will be referenced below. The 8089 is the API port that the ansible modules will use to connect to and configure splunk. The 9199 port will be used to construct a TCP listener in Splunk that AWX will forward messages to.
|
||||
|
||||
Once the containers are up we are ready to configure and plumb Splunk with AWX. To do this we have provided a playbook which will:
|
||||
* Backup and configure the External Logging adapter in AWX. NOTE: this will back up your existing settings but the password fields can not be backed up through the API, you need a DB backup to recover this.
|
||||
* Create a TCP port in Splunk for log forwarding
|
||||
|
||||
For routing traffic between AWX and Splunk we will use the internal docker compose network. The `Logging Aggregator` will be configured using the internal network machine name of `splunk`.
|
||||
|
||||
Once you have have the collections installed (from above) you can run the playbook like:
|
||||
```bash
|
||||
export CONTROLLER_USERNAME=<your username>
|
||||
export CONTROLLER_PASSWORD=<your password>
|
||||
ansible-playbook tools/docker-compose/ansible/plumb_splunk.yml
|
||||
```
|
||||
|
||||
Once the playbook is done running Splunk should now be setup in your development environment. You can log into the admin console (see above for username/password) and click on "Searching and Reporting" in the left hand navigation. In the search box enter `source="http:tower_logging_collections"` and click search.
|
||||
|
||||
|
||||
### Prometheus and Grafana integration
|
||||
|
||||
Prometheus is a metrics collecting tool, and we support prometheus formatted data at the `api/v2/metrics` endpoint.
|
||||
|
||||
1. Change the `username` and `password` in `tools/prometheus/prometheus.yml`. You can also change the scrape interval.
|
||||
2. (optional) if you are in a clustered environment, you can change the target to `haproxy:8043` so that the incoming prometheus requests go through the load balancer. Leaving it set to `awx1` also works.
|
||||
3. run `make prometheus`
|
||||
4. navigate to `http://localhost:9090/targets` and check that the metrics endpoint State is Up.
|
||||
5. Click the Graph tab, start typing a metric name, or use the Open metrics explorer button to find a metric to display (next to `Execute` button)
|
||||
|
||||
Prometheus can display basic graphs of your data, but it is minimal. Often Prometheus is paired with an app like Grafana for better visualization features.
|
||||
|
||||
1. `make grafana` to run a local docker grafana instance.
|
||||
2. Navigate to `http://localhost:3001`. Sign in, using `admin` for both username and password.
|
||||
4. Now you can create a dashboard and add panels for whichever metrics you like.
|
||||
|
||||
81
tools/docker-compose/ansible/plumb_keycloak.yml
Normal file
81
tools/docker-compose/ansible/plumb_keycloak.yml
Normal file
@@ -0,0 +1,81 @@
|
||||
---
|
||||
- name: Plumb a keycloak instance
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: False
|
||||
vars:
|
||||
private_key_file: ../_sources/keycloak.key
|
||||
public_key_file: ../_sources/keycloak.cert
|
||||
awx_host: "https://localhost:8043"
|
||||
keycloak_realm_template: ../_sources/keycloak.awx.realm.json
|
||||
keycloak_user: admin
|
||||
keycloak_pass: admin
|
||||
cert_subject: "/C=US/ST=NC/L=Durham/O=awx/CN="
|
||||
tasks:
|
||||
- name: Generate certificates for keycloak
|
||||
command: 'openssl req -new -x509 -days 365 -nodes -out {{ public_key_file }} -keyout {{ private_key_file }} -subj "{{ cert_subject }}"'
|
||||
args:
|
||||
creates: "{{ public_key_file }}"
|
||||
|
||||
- name: Load certs, existing and new SAML settings
|
||||
set_fact:
|
||||
private_key: "{{ private_key_content }}"
|
||||
public_key: "{{ public_key_content }}"
|
||||
public_key_trimmed: "{{ public_key_content | regex_replace('-----BEGIN CERTIFICATE-----\\\\n', '') | regex_replace('\\\\n-----END CERTIFICATE-----', '') }}"
|
||||
existing_saml: "{{ lookup('awx.awx.controller_api', 'settings/saml', host=awx_host, verify_ssl=false) }}"
|
||||
new_saml: "{{ lookup('template', 'saml_settings.json.j2') }}"
|
||||
vars:
|
||||
# We add the extra \\ in here so that when jinja is templating out the files we end up with \n in the strings.
|
||||
public_key_content: "{{ lookup('file', public_key_file) | regex_replace('\n', '\\\\n') }}"
|
||||
private_key_content: "{{ lookup('file', private_key_file) | regex_replace('\n', '\\\\n') }}"
|
||||
|
||||
- name: Displauy existing SAML configuration
|
||||
debug:
|
||||
msg:
|
||||
- "Here is your existing SAML configuration for reference:"
|
||||
- "{{ existing_saml }}"
|
||||
|
||||
- pause:
|
||||
prompt: "Continuing to run this will replace your existing saml settings (displayed above). They will all be captured except for your private key. Be sure that is backed up before continuing"
|
||||
|
||||
- name: Write out the existing content
|
||||
copy:
|
||||
dest: "../_sources/existing_saml_adapter_settings.json"
|
||||
content: "{{ existing_saml }}"
|
||||
|
||||
- name: Configure AWX SAML adapter
|
||||
awx.awx.settings:
|
||||
settings: "{{ new_saml }}"
|
||||
controller_host: "{{ awx_host }}"
|
||||
validate_certs: False
|
||||
|
||||
- name: Get a keycloak token
|
||||
uri:
|
||||
url: "https://localhost:8443/auth/realms/master/protocol/openid-connect/token"
|
||||
method: POST
|
||||
body_format: form-urlencoded
|
||||
body:
|
||||
client_id: "admin-cli"
|
||||
username: "{{ keycloak_user }}"
|
||||
password: "{{ keycloak_pass }}"
|
||||
grant_type: "password"
|
||||
validate_certs: False
|
||||
register: keycloak_response
|
||||
|
||||
- name: Template the AWX realm
|
||||
template:
|
||||
src: keycloak.awx.realm.json.j2
|
||||
dest: "{{ keycloak_realm_template }}"
|
||||
|
||||
- name: Create the AWX realm
|
||||
uri:
|
||||
url: "https://localhost:8443/auth/admin/realms"
|
||||
method: POST
|
||||
body_format: json
|
||||
body: "{{ lookup('file', keycloak_realm_template) }}"
|
||||
validate_certs: False
|
||||
headers:
|
||||
Authorization: "Bearer {{ keycloak_response.json.access_token }}"
|
||||
status_code: 201
|
||||
register: realm_creation
|
||||
changed_when: True
|
||||
32
tools/docker-compose/ansible/plumb_ldap.yml
Normal file
32
tools/docker-compose/ansible/plumb_ldap.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
- name: Plumb an ldap instance
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: False
|
||||
vars:
|
||||
awx_host: "https://localhost:8043"
|
||||
tasks:
|
||||
- name: Load existing and new LDAP settings
|
||||
set_fact:
|
||||
existing_ldap: "{{ lookup('awx.awx.controller_api', 'settings/ldap', host=awx_host, verify_ssl=false) }}"
|
||||
new_ldap: "{{ lookup('template', 'ldap_settings.json.j2') }}"
|
||||
|
||||
- name: Display existing LDAP configuration
|
||||
debug:
|
||||
msg:
|
||||
- "Here is your existing LDAP configuration for reference:"
|
||||
- "{{ existing_ldap }}"
|
||||
|
||||
- pause:
|
||||
prompt: "Continuing to run this will replace your existing ldap settings (displayed above). They will all be captured. Be sure that is backed up before continuing"
|
||||
|
||||
- name: Write out the existing content
|
||||
copy:
|
||||
dest: "../_sources/existing_ldap_adapter_settings.json"
|
||||
content: "{{ existing_ldap }}"
|
||||
|
||||
- name: Configure AWX LDAP adapter
|
||||
awx.awx.settings:
|
||||
settings: "{{ new_ldap }}"
|
||||
controller_host: "{{ awx_host }}"
|
||||
validate_certs: False
|
||||
51
tools/docker-compose/ansible/plumb_splunk.yml
Normal file
51
tools/docker-compose/ansible/plumb_splunk.yml
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
- name: Plumb a splunk instance
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: False
|
||||
vars:
|
||||
awx_host: "https://localhost:8043"
|
||||
collections:
|
||||
- splunk.es
|
||||
|
||||
tasks:
|
||||
- name: create splunk_data_input_network
|
||||
splunk.es.data_input_network:
|
||||
name: "9199"
|
||||
protocol: "tcp"
|
||||
source: "http:tower_logging_collections"
|
||||
sourcetype: "httpevent"
|
||||
state: "present"
|
||||
vars:
|
||||
ansible_network_os: splunk.es.splunk
|
||||
ansible_user: admin
|
||||
ansible_httpapi_pass: splunk_admin
|
||||
ansible_httpapi_port: 8089
|
||||
ansible_httpapi_use_ssl: yes
|
||||
ansible_httpapi_validate_certs: False
|
||||
ansible_connection: httpapi
|
||||
|
||||
- name: Load existing and new Logging settings
|
||||
set_fact:
|
||||
existing_logging: "{{ lookup('awx.awx.controller_api', 'settings/logging', host=awx_host, verify_ssl=false) }}"
|
||||
new_logging: "{{ lookup('template', 'logging.json.j2') }}"
|
||||
|
||||
- name: Display existing Logging configuration
|
||||
debug:
|
||||
msg:
|
||||
- "Here is your existing SAML configuration for reference:"
|
||||
- "{{ existing_logging }}"
|
||||
|
||||
- pause:
|
||||
prompt: "Continuing to run this will replace your existing logging settings (displayed above). They will all be captured except for your connection password. Be sure that is backed up before continuing"
|
||||
|
||||
- name: Write out the existing content
|
||||
copy:
|
||||
dest: "../_sources/existing_logging.json"
|
||||
content: "{{ existing_logging }}"
|
||||
|
||||
- name: Configure AWX logging adapter
|
||||
awx.awx.settings:
|
||||
settings: "{{ new_logging }}"
|
||||
controller_host: "{{ awx_host }}"
|
||||
validate_certs: False
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
sources_dest: '../_sources'
|
||||
compose_name: 'docker-compose.yml'
|
||||
awx_image: 'quay.io/ansible/awx_devel'
|
||||
awx_image: 'ghcr.io/ansible/awx_devel'
|
||||
pg_port: 5432
|
||||
pg_username: 'awx'
|
||||
pg_database: 'awx'
|
||||
@@ -16,3 +16,16 @@ receptor_work_sign_reconfigure: false
|
||||
work_sign_key_dir: '../_sources/receptor'
|
||||
work_sign_private_keyfile: "{{ work_sign_key_dir }}/work_private_key.pem"
|
||||
work_sign_public_keyfile: "{{ work_sign_key_dir }}/work_public_key.pem"
|
||||
|
||||
enable_keycloak: false
|
||||
|
||||
enable_ldap: false
|
||||
ldap_public_key_file_name: 'ldap.cert'
|
||||
ldap_private_key_file_name: 'ldap.key'
|
||||
ldap_cert_dir: '{{ sources_dest }}/ldap_certs'
|
||||
ldap_diff_dir: '{{ sources_dest }}/ldap_diffs'
|
||||
ldap_public_key_file: '{{ ldap_cert_dir }}/{{ ldap_public_key_file_name }}'
|
||||
ldap_private_key_file: '{{ ldap_cert_dir }}/{{ ldap_private_key_file_name }}'
|
||||
ldap_cert_subject: "/C=US/ST=NC/L=Durham/O=awx/CN="
|
||||
|
||||
enable_splunk: false
|
||||
|
||||
86
tools/docker-compose/ansible/roles/sources/files/ldap.ldif
Normal file
86
tools/docker-compose/ansible/roles/sources/files/ldap.ldif
Normal file
@@ -0,0 +1,86 @@
|
||||
dn: dc=example,dc=org
|
||||
objectClass: dcObject
|
||||
objectClass: organization
|
||||
dc: example
|
||||
o: example
|
||||
|
||||
dn: ou=users,dc=example,dc=org
|
||||
ou: users
|
||||
objectClass: organizationalUnit
|
||||
|
||||
dn: cn=awx_ldap_admin,ou=users,dc=example,dc=org
|
||||
mail: admin@example.org
|
||||
sn: LdapAdmin
|
||||
cn: awx_ldap_admin
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
userPassword: admin123
|
||||
givenName: awx
|
||||
|
||||
dn: cn=awx_ldap_auditor,ou=users,dc=example,dc=org
|
||||
mail: auditor@example.org
|
||||
sn: LdapAuditor
|
||||
cn: awx_ldap_auditor
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
userPassword: audit123
|
||||
givenName: awx
|
||||
|
||||
dn: cn=awx_ldap_unpriv,ou=users,dc=example,dc=org
|
||||
mail: unpriv@example.org
|
||||
sn: LdapUnpriv
|
||||
cn: awx_ldap_unpriv
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
givenName: awx
|
||||
userPassword: unpriv123
|
||||
|
||||
dn: ou=groups,dc=example,dc=org
|
||||
ou: groups
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
|
||||
dn: cn=awx_users,ou=groups,dc=example,dc=org
|
||||
cn: awx_users
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
member: cn=awx_ldap_admin,ou=users,dc=example,dc=org
|
||||
member: cn=awx_ldap_auditor,ou=users,dc=example,dc=org
|
||||
member: cn=awx_ldap_unpriv,ou=users,dc=example,dc=org
|
||||
member: cn=awx_ldap_org_admin,ou=users,dc=example,dc=org
|
||||
|
||||
dn: cn=awx_admins,ou=groups,dc=example,dc=org
|
||||
cn: awx_admins
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
member: cn=awx_ldap_admin,ou=users,dc=example,dc=org
|
||||
|
||||
dn: cn=awx_auditors,ou=groups,dc=example,dc=org
|
||||
cn: awx_auditors
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
member: cn=awx_ldap_auditor,ou=users,dc=example,dc=org
|
||||
|
||||
dn: cn=awx_ldap_org_admin,ou=users,dc=example,dc=org
|
||||
mail: org.admin@example.org
|
||||
sn: LdapOrgAdmin
|
||||
cn: awx_ldap_org_admin
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
givenName: awx
|
||||
userPassword: orgadmin123
|
||||
|
||||
dn: cn=awx_org_admins,ou=groups,dc=example,dc=org
|
||||
cn: awx_org_admins
|
||||
objectClass: top
|
||||
objectClass: groupOfNames
|
||||
member: cn=awx_ldap_org_admin,ou=users,dc=example,dc=org
|
||||
|
||||
18
tools/docker-compose/ansible/roles/sources/tasks/ldap.yml
Normal file
18
tools/docker-compose/ansible/roles/sources/tasks/ldap.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
- name: Create LDAP cert directory
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
loop:
|
||||
- "{{ ldap_cert_dir }}"
|
||||
- "{{ ldap_diff_dir }}"
|
||||
|
||||
- name: General LDAP cert
|
||||
command: 'openssl req -new -x509 -days 365 -nodes -out {{ ldap_public_key_file }} -keyout {{ ldap_private_key_file }} -subj "{{ ldap_cert_subject }}"'
|
||||
args:
|
||||
creates: "{{ ldap_public_key_file }}"
|
||||
|
||||
- name: Copy ldap.diff
|
||||
copy:
|
||||
src: "ldap.ldif"
|
||||
dest: "{{ ldap_diff_dir }}/ldap.ldif"
|
||||
@@ -26,6 +26,8 @@
|
||||
mode: '0600'
|
||||
when: not lookup('vars', item.item, default='') and not item.stat.exists
|
||||
loop: "{{ secrets.results }}"
|
||||
loop_control:
|
||||
label: '{{ item.item }}'
|
||||
|
||||
- name: Include generated secrets unless they are explicitly passed in
|
||||
include_vars: "{{ sources_dest }}/secrets/{{ item.item }}.yml"
|
||||
@@ -89,6 +91,10 @@
|
||||
args:
|
||||
creates: "{{ work_sign_public_keyfile }}"
|
||||
|
||||
- name: Include LDAP tasks if enabled
|
||||
include_tasks: ldap.yml
|
||||
when: enable_ldap | bool
|
||||
|
||||
- name: Render Docker-Compose
|
||||
template:
|
||||
src: docker-compose.yml.j2
|
||||
|
||||
@@ -19,6 +19,8 @@ services:
|
||||
AWX_GROUP_QUEUES: tower
|
||||
MAIN_NODE_TYPE: "${MAIN_NODE_TYPE:-hybrid}"
|
||||
RECEPTORCTL_SOCKET: {{ receptor_socket_file }}
|
||||
CONTROL_PLANE_NODE_COUNT: {{ control_plane_node_count|int }}
|
||||
EXECUTION_NODE_COUNT: {{ execution_node_count|int }}
|
||||
{% if loop.index == 1 %}
|
||||
RUN_MIGRATIONS: 1
|
||||
{% endif %}
|
||||
@@ -54,6 +56,7 @@ services:
|
||||
- "8013:8013" # http
|
||||
- "8043:8043" # https
|
||||
- "2222:2222" # receptor foo node
|
||||
- "3000:3001" # used by the UI dev env
|
||||
{% endif %}
|
||||
redis_{{ container_postfix }}:
|
||||
image: redis:latest
|
||||
@@ -79,6 +82,59 @@ services:
|
||||
{% set container_postfix = loop.index %}
|
||||
- "awx_{{ container_postfix }}"
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if enable_keycloak|bool %}
|
||||
keycloak:
|
||||
image: quay.io/keycloak/keycloak:15.0.2
|
||||
container_name: tools_keycloak_1
|
||||
hostname: keycloak
|
||||
user: "{{ ansible_user_uid }}"
|
||||
ports:
|
||||
- "8443:8443"
|
||||
environment:
|
||||
DB_VENDOR: postgres
|
||||
DB_ADDR: postgres
|
||||
DB_DATABASE: keycloak
|
||||
DB_USER: {{ pg_username }}
|
||||
DB_PASSWORD: {{ pg_password }}
|
||||
depends_on:
|
||||
- postgres
|
||||
{% endif %}
|
||||
{% if enable_ldap|bool %}
|
||||
ldap:
|
||||
image: bitnami/openldap:2
|
||||
container_name: tools_ldap_1
|
||||
hostname: ldap
|
||||
user: "{{ ansible_user_uid }}"
|
||||
ports:
|
||||
- "389:1389"
|
||||
- "636:1636"
|
||||
environment:
|
||||
LDAP_ADMIN_USERNAME: admin
|
||||
LDAP_ADMIN_PASSWORD: admin
|
||||
LDAP_CUSTOM_LDIF_DIR: /opt/bitnami/openldap/ldiffs
|
||||
LDAP_ENABLE_TLS: "yes"
|
||||
LDAP_LDAPS_PORT_NUMBER: 1636
|
||||
LDAP_TLS_CERT_FILE: /opt/bitnami/openldap/certs/{{ ldap_public_key_file_name }}
|
||||
LDAP_TLS_CA_FILE: /opt/bitnami/openldap/certs/{{ ldap_public_key_file_name }}
|
||||
LDAP_TLS_KEY_FILE: /opt/bitnami/openldap/certs/{{ ldap_private_key_file_name }}
|
||||
volumes:
|
||||
- 'openldap_data:/bitnami/openldap'
|
||||
- '../../docker-compose/_sources/ldap_certs:/opt/bitnami/openldap/certs'
|
||||
- '../../docker-compose/_sources/ldap_diffs:/opt/bitnami/openldap/ldiffs'
|
||||
{% endif %}
|
||||
{% if enable_splunk|bool %}
|
||||
splunk:
|
||||
image: splunk/splunk:latest
|
||||
container_name: tools_splunk_1
|
||||
hostname: splunk
|
||||
ports:
|
||||
- "8000:8000"
|
||||
- "8089:8089"
|
||||
- "9199:9199"
|
||||
environment:
|
||||
SPLUNK_START_ARGS: --accept-license
|
||||
SPLUNK_PASSWORD: splunk_admin
|
||||
{% endif %}
|
||||
# A useful container that simply passes through log messages to the console
|
||||
# helpful for testing awx/tower logging
|
||||
@@ -121,6 +177,7 @@ services:
|
||||
links:
|
||||
- receptor-hop
|
||||
volumes:
|
||||
- "../../../:/awx_devel" # not used, but mounted so that any in-place installs can be used for whole cluster
|
||||
- "../../docker-compose/_sources/receptor/receptor-worker-{{ loop.index }}.conf:/etc/receptor/receptor.conf"
|
||||
- "/sys/fs/cgroup:/sys/fs/cgroup"
|
||||
- "../../docker-compose/_sources/receptor/work_public_key.pem:/etc/receptor/work_public_key.pem"
|
||||
@@ -136,6 +193,11 @@ volumes:
|
||||
redis_socket_{{ container_postfix }}:
|
||||
name: tools_redis_socket_{{ container_postfix }}
|
||||
{% endfor -%}
|
||||
{% if enable_ldap %}
|
||||
openldap_data:
|
||||
name: tools_ldap_1
|
||||
driver: local
|
||||
{% endif %}
|
||||
{% if minikube_container_group|bool %}
|
||||
networks:
|
||||
default:
|
||||
|
||||
@@ -46,4 +46,3 @@ listen stats
|
||||
bind *:1936
|
||||
stats enable
|
||||
stats uri /
|
||||
|
||||
|
||||
@@ -10,3 +10,6 @@
|
||||
|
||||
- tcp-listener:
|
||||
port: 5555
|
||||
|
||||
- control-service:
|
||||
service: control
|
||||
|
||||
77
tools/docker-compose/ansible/smoke-test.yml
Normal file
77
tools/docker-compose/ansible/smoke-test.yml
Normal file
@@ -0,0 +1,77 @@
|
||||
---
|
||||
#
|
||||
# This is used by a CI check in GitHub Actions and isnt really
|
||||
# meant to be run locally.
|
||||
#
|
||||
# The development environment does some unfortunate things to
|
||||
# make rootless podman work inside of a docker container.
|
||||
# The goal here is to essentially tests that the awx user is
|
||||
# able to run `podman run`.
|
||||
#
|
||||
- name: Test that the development environment is able to launch a job
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: Boot the development environment
|
||||
command: |
|
||||
make docker-compose
|
||||
environment:
|
||||
COMPOSE_UP_OPTS: -d
|
||||
args:
|
||||
chdir: "{{ repo_dir }}"
|
||||
|
||||
# Takes a while for migrations to finish
|
||||
- name: Wait for the dev environment to be ready
|
||||
uri:
|
||||
url: "http://localhost:8013/api/v2/ping/"
|
||||
register: _result
|
||||
until: _result.status == 200
|
||||
retries: 120
|
||||
delay: 5
|
||||
|
||||
- name: Reset admin password
|
||||
shell: |
|
||||
docker exec -i tools_awx_1 bash <<EOSH
|
||||
awx-manage update_password --username=admin --password=password
|
||||
awx-manage create_preload_data
|
||||
EOSH
|
||||
|
||||
- block:
|
||||
- name: Launch Demo Job Template
|
||||
awx.awx.job_launch:
|
||||
name: Demo Job Template
|
||||
wait: yes
|
||||
validate_certs: no
|
||||
controller_host: "http://localhost:8013"
|
||||
controller_username: "admin"
|
||||
controller_password: "password"
|
||||
rescue:
|
||||
- name: Get list of project updates and jobs
|
||||
uri:
|
||||
url: "http://localhost:8013/api/v2/{{ resource }}/"
|
||||
user: admin
|
||||
password: "password"
|
||||
force_basic_auth: yes
|
||||
register: job_lists
|
||||
loop:
|
||||
- project_updates
|
||||
- jobs
|
||||
loop_control:
|
||||
loop_var: resource
|
||||
|
||||
- name: Get all job and project details
|
||||
uri:
|
||||
url: "http://localhost:8013{{ endpoint }}"
|
||||
user: admin
|
||||
password: "password"
|
||||
force_basic_auth: yes
|
||||
loop: |
|
||||
{{ job_lists.results | map(attribute='json') | map(attribute='results') | flatten | map(attribute='url') }}
|
||||
loop_control:
|
||||
loop_var: endpoint
|
||||
|
||||
- name: Re-emit failure
|
||||
vars:
|
||||
failed_task:
|
||||
result: '{{ ansible_failed_result }}'
|
||||
fail:
|
||||
msg: '{{ failed_task }}'
|
||||
1770
tools/docker-compose/ansible/templates/keycloak.awx.realm.json.j2
Normal file
1770
tools/docker-compose/ansible/templates/keycloak.awx.realm.json.j2
Normal file
File diff suppressed because it is too large
Load Diff
52
tools/docker-compose/ansible/templates/ldap_settings.json.j2
Normal file
52
tools/docker-compose/ansible/templates/ldap_settings.json.j2
Normal file
@@ -0,0 +1,52 @@
|
||||
{
|
||||
"AUTH_LDAP_1_SERVER_URI": "ldap://{{ container_reference }}:389",
|
||||
"AUTH_LDAP_1_BIND_DN": "cn=admin,dc=example,dc=org",
|
||||
"AUTH_LDAP_1_BIND_PASSWORD": "admin",
|
||||
"AUTH_LDAP_1_START_TLS": false,
|
||||
"AUTH_LDAP_1_CONNECTION_OPTIONS": {
|
||||
"OPT_REFERRALS": 0,
|
||||
"OPT_NETWORK_TIMEOUT": 30
|
||||
},
|
||||
"AUTH_LDAP_1_USER_SEARCH": [
|
||||
"ou=users,dc=example,dc=org",
|
||||
"SCOPE_SUBTREE",
|
||||
"(cn=%(user)s)"
|
||||
],
|
||||
"AUTH_LDAP_1_USER_DN_TEMPLATE": "cn=%(user)s,ou=users,dc=example,dc=org",
|
||||
"AUTH_LDAP_1_USER_ATTR_MAP": {
|
||||
"first_name": "givenName",
|
||||
"last_name": "sn",
|
||||
"email": "mail"
|
||||
},
|
||||
"AUTH_LDAP_1_GROUP_SEARCH": [
|
||||
"ou=groups,dc=example,dc=org",
|
||||
"SCOPE_SUBTREE",
|
||||
"(objectClass=groupOfNames)"
|
||||
],
|
||||
"AUTH_LDAP_1_GROUP_TYPE": "MemberDNGroupType",
|
||||
"AUTH_LDAP_1_GROUP_TYPE_PARAMS": {
|
||||
"member_attr": "member",
|
||||
"name_attr": "cn"
|
||||
},
|
||||
"AUTH_LDAP_1_REQUIRE_GROUP": "cn=awx_users,ou=groups,dc=example,dc=org",
|
||||
"AUTH_LDAP_1_DENY_GROUP": null,
|
||||
"AUTH_LDAP_1_USER_FLAGS_BY_GROUP": {
|
||||
"is_superuser": [
|
||||
"cn=awx_admins,ou=groups,dc=example,dc=org"
|
||||
],
|
||||
"is_system_auditor": [
|
||||
"cn=awx_auditors,ou=groups,dc=example,dc=org"
|
||||
]
|
||||
},
|
||||
"AUTH_LDAP_1_ORGANIZATION_MAP": {
|
||||
"LDAP Organization": {
|
||||
"users": true,
|
||||
"remove_admins": false,
|
||||
"remove_users": true,
|
||||
"admins": [
|
||||
"cn=awx_org_admins,ou=groups,dc=example,dc=org"
|
||||
]
|
||||
}
|
||||
},
|
||||
"AUTH_LDAP_1_TEAM_MAP": {}
|
||||
}
|
||||
10
tools/docker-compose/ansible/templates/logging.json.j2
Normal file
10
tools/docker-compose/ansible/templates/logging.json.j2
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"LOG_AGGREGATOR_HOST": "splunk",
|
||||
"LOG_AGGREGATOR_PORT": 9199,
|
||||
"LOG_AGGREGATOR_TYPE": "splunk",
|
||||
"LOG_AGGREGATOR_USERNAME": "admin",
|
||||
"LOG_AGGREGATOR_PASSWORD": "splunk_admin",
|
||||
"LOG_AGGREGATOR_ENABLED": true,
|
||||
"LOG_AGGREGATOR_PROTOCOL": "tcp",
|
||||
"LOG_AGGREGATOR_VERIFY_CERT": false,
|
||||
}
|
||||
51
tools/docker-compose/ansible/templates/saml_settings.json.j2
Normal file
51
tools/docker-compose/ansible/templates/saml_settings.json.j2
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
"SAML_AUTO_CREATE_OBJECTS": true,
|
||||
"SOCIAL_AUTH_SAML_SP_ENTITY_ID": "{{ container_reference }}:8043",
|
||||
"SOCIAL_AUTH_SAML_SP_PUBLIC_CERT": "{{ public_key_content | regex_replace('\\n', '') }}",
|
||||
"SOCIAL_AUTH_SAML_SP_PRIVATE_KEY": "{{ private_key_content | regex_replace('\\n', '') }}",
|
||||
"SOCIAL_AUTH_SAML_ORG_INFO": {
|
||||
"en-US": {
|
||||
"url": "https://{{ container_reference }}:8443",
|
||||
"name": "Keycloak",
|
||||
"displayname": "Keycloak Solutions Engineering"
|
||||
}
|
||||
},
|
||||
"SOCIAL_AUTH_SAML_TECHNICAL_CONTACT": {
|
||||
"givenName": "Me Myself",
|
||||
"emailAddress": "noone@nowhere.com"
|
||||
},
|
||||
"SOCIAL_AUTH_SAML_SUPPORT_CONTACT": {
|
||||
"givenName": "Me Myself",
|
||||
"emailAddress": "noone@nowhere.com"
|
||||
},
|
||||
"SOCIAL_AUTH_SAML_ENABLED_IDPS": {
|
||||
"Keycloak": {
|
||||
"attr_user_permanent_id": "name_id",
|
||||
"entity_id": "https://{{ container_reference }}:8443/auth/realms/awx",
|
||||
"attr_groups": "groups",
|
||||
"url": "https://{{ container_reference }}:8443/auth/realms/awx/protocol/saml",
|
||||
"attr_first_name": "first_name",
|
||||
"x509cert": "{{ public_key_content | regex_replace('\\n', '') }}",
|
||||
"attr_email": "email",
|
||||
"attr_last_name": "last_name",
|
||||
"attr_username": "username"
|
||||
}
|
||||
},
|
||||
"SOCIAL_AUTH_SAML_SECURITY_CONFIG": {
|
||||
"requestedAuthnContext": false
|
||||
},
|
||||
"SOCIAL_AUTH_SAML_SP_EXTRA": null,
|
||||
"SOCIAL_AUTH_SAML_EXTRA_DATA": null,
|
||||
"SOCIAL_AUTH_SAML_ORGANIZATION_MAP": {
|
||||
"Default": {
|
||||
"users": true
|
||||
}
|
||||
},
|
||||
"SOCIAL_AUTH_SAML_TEAM_MAP": null,
|
||||
"SOCIAL_AUTH_SAML_ORGANIZATION_ATTR": {},
|
||||
"SOCIAL_AUTH_SAML_TEAM_ATTR": {},
|
||||
"SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR": {
|
||||
"is_superuser_attr": "is_superuser",
|
||||
"is_system_auditor_attr": "is_system_auditor"
|
||||
}
|
||||
}
|
||||
20
tools/docker-compose/awx-autoreload
Executable file
20
tools/docker-compose/awx-autoreload
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/env bash
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "Usage:"
|
||||
echo " autoreload directory command"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
last_reload=`date +%s`
|
||||
|
||||
inotifywait -mrq -e create,delete,attrib,close_write,move --exclude '/awx_devel/awx/ui' $1 | while read directory action file; do
|
||||
this_reload=`date +%s`
|
||||
since_last=$((this_reload-last_reload))
|
||||
if [[ "$file" =~ .*py$ ]] && [[ "$since_last" -gt 1 ]]; then
|
||||
echo "File changed: $file"
|
||||
echo "Running command: $2"
|
||||
eval $2
|
||||
last_reload=`date +%s`
|
||||
fi
|
||||
done
|
||||
@@ -19,9 +19,6 @@ else
|
||||
wait-for-migrations
|
||||
fi
|
||||
|
||||
make init
|
||||
|
||||
|
||||
if output=$(awx-manage createsuperuser --noinput --username=admin --email=admin@localhost 2> /dev/null); then
|
||||
echo $output
|
||||
admin_password=$(openssl rand -base64 12)
|
||||
@@ -35,6 +32,27 @@ mkdir -p /awx_devel/awx/public/static
|
||||
mkdir -p /awx_devel/awx/ui/static
|
||||
mkdir -p /awx_devel/awx/ui/build/static
|
||||
|
||||
awx-manage provision_instance --hostname="$(hostname)" --node_type="$MAIN_NODE_TYPE"
|
||||
awx-manage register_queue --queuename=controlplane --instance_percent=100
|
||||
awx-manage register_queue --queuename=default --instance_percent=100
|
||||
|
||||
if [[ -n "$RUN_MIGRATIONS" ]]; then
|
||||
for (( i=1; i<$CONTROL_PLANE_NODE_COUNT; i++ )); do
|
||||
for (( j=i + 1; j<=$CONTROL_PLANE_NODE_COUNT; j++ )); do
|
||||
awx-manage register_peers "awx_$i" --peers "awx_$j"
|
||||
done
|
||||
done
|
||||
|
||||
if [[ $EXECUTION_NODE_COUNT > 0 ]]; then
|
||||
awx-manage provision_instance --hostname="receptor-hop" --node_type="hop"
|
||||
awx-manage register_peers "receptor-hop" --peers "awx_1"
|
||||
for (( e=1; e<=$EXECUTION_NODE_COUNT; e++ )); do
|
||||
awx-manage provision_instance --hostname="receptor-$e" --node_type="execution"
|
||||
awx-manage register_peers "receptor-$e" --peers "receptor-hop"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create resource entries when using Minikube
|
||||
if [[ -n "$MINIKUBE_CONTAINER_GROUP" ]]; then
|
||||
awx-manage shell < /awx_devel/tools/docker-compose-minikube/_sources/bootstrap_minikube.py
|
||||
|
||||
@@ -15,5 +15,5 @@ localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env pyth
|
||||
# pg_username=""
|
||||
# pg_hostname=""
|
||||
|
||||
# awx_image="quay.io/awx/awx_devel"
|
||||
# awx_image="ghcr.io/ansible/awx_devel"
|
||||
# migrate_local_docker=false
|
||||
|
||||
@@ -5,4 +5,4 @@ bootstrap_development.sh
|
||||
|
||||
cd /awx_devel
|
||||
# Start the services
|
||||
exec tini -- make supervisor
|
||||
exec make supervisor
|
||||
|
||||
@@ -22,6 +22,7 @@ server {
|
||||
|
||||
# HSTS (ngx_http_headers_module is required) (15768000 seconds = 6 months)
|
||||
add_header Strict-Transport-Security max-age=15768000;
|
||||
add_header X-Content-Type-Options nosniff;
|
||||
|
||||
location /static/ {
|
||||
root /awx_devel;
|
||||
@@ -84,6 +85,7 @@ server {
|
||||
|
||||
# HSTS (ngx_http_headers_module is required) (15768000 seconds = 6 months)
|
||||
add_header Strict-Transport-Security max-age=15768000;
|
||||
add_header X-Content-Type-Options nosniff;
|
||||
|
||||
location /static/ {
|
||||
root /awx_devel;
|
||||
|
||||
@@ -6,4 +6,9 @@ make clean
|
||||
make awx-link
|
||||
|
||||
cp tools/docker-compose/ansible/roles/sources/files/local_settings.py awx/settings/local_settings.py
|
||||
make "${1:-test}"
|
||||
|
||||
if [[ ! $@ ]]; then
|
||||
make test
|
||||
else
|
||||
make $@
|
||||
fi
|
||||
|
||||
@@ -5,79 +5,70 @@ nodaemon=true
|
||||
|
||||
[program:awx-dispatcher]
|
||||
command = make dispatcher
|
||||
autostart = true
|
||||
autorestart = true
|
||||
stopwaitsecs = 1
|
||||
stopsignal=KILL
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
redirect_stderr=true
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:awx-receiver]
|
||||
command = make receiver
|
||||
autostart = true
|
||||
autorestart = true
|
||||
stopwaitsecs = 1
|
||||
stopsignal=KILL
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
redirect_stderr=true
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:awx-wsbroadcast]
|
||||
command = make wsbroadcast
|
||||
autostart = true
|
||||
autorestart = true
|
||||
stopwaitsecs = 1
|
||||
stopsignal=KILL
|
||||
autorestart = true
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
redirect_stderr=true
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:awx-uwsgi]
|
||||
command = make uwsgi
|
||||
autostart = true
|
||||
autorestart = true
|
||||
redirect_stderr=true
|
||||
stopwaitsecs = 1
|
||||
stopsignal=KILL
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:awx-daphne]
|
||||
command = make daphne
|
||||
autostart = true
|
||||
autorestart = true
|
||||
redirect_stderr=true
|
||||
stopwaitsecs = 1
|
||||
stopsignal=KILL
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:awx-nginx]
|
||||
command = make nginx
|
||||
autostart = true
|
||||
autorestart = true
|
||||
redirect_stderr=true
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:awx-rsyslogd]
|
||||
command = rsyslogd -n -i /var/run/awx-rsyslog/rsyslog.pid -f /var/lib/awx/rsyslog/rsyslog.conf
|
||||
autostart = true
|
||||
autorestart = true
|
||||
stopwaitsecs = 5
|
||||
stopsignal=TERM
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
redirect_stderr=true
|
||||
@@ -86,19 +77,40 @@ stderr_events_enabled = true
|
||||
|
||||
[program:awx-receptor]
|
||||
command = receptor --config /etc/receptor/receptor.conf
|
||||
autostart = true
|
||||
autorestart = true
|
||||
stopsignal = KILL
|
||||
stopasgroup = true
|
||||
killasgroup = true
|
||||
redirect_stderr=true
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[group:tower-processes]
|
||||
programs=awx-dispatcher,awx-receiver,awx-uwsgi,awx-daphne,awx-nginx,awx-wsbroadcast,awx-rsyslogd
|
||||
priority=5
|
||||
|
||||
[program:awx-autoreload]
|
||||
command = make awx-autoreload
|
||||
autostart = true
|
||||
autorestart = true
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
|
||||
[eventlistener:superwatcher]
|
||||
command=stop-supervisor
|
||||
events=PROCESS_STATE_FATAL
|
||||
autorestart = true
|
||||
stderr_logfile=/dev/stdout
|
||||
stderr_logfile_maxbytes=0
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
|
||||
[unix_http_server]
|
||||
file=/var/run/supervisor/supervisor.sock
|
||||
|
||||
|
||||
8
tools/grafana/dashboards/awx_dashboard.yml
Normal file
8
tools/grafana/dashboards/awx_dashboard.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: awx
|
||||
allowUiUpdates: true
|
||||
options:
|
||||
path: /etc/grafana/provisioning/dashboards/demo_dashboard.json
|
||||
134
tools/grafana/dashboards/demo_dashboard.json
Normal file
134
tools/grafana/dashboards/demo_dashboard.json
Normal file
@@ -0,0 +1,134 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": {
|
||||
"type": "grafana",
|
||||
"uid": "-- Grafana --"
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"target": {
|
||||
"limit": 100,
|
||||
"matchAny": false,
|
||||
"tags": [],
|
||||
"type": "dashboard"
|
||||
},
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"id": 1,
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 8,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"expr": "awx_status_total",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "job status",
|
||||
"type": "timeseries"
|
||||
}
|
||||
],
|
||||
"schemaVersion": 36,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-30m",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "",
|
||||
"title": "awx-demo",
|
||||
"uid": "GISWZOXnk",
|
||||
"version": 6,
|
||||
"weekStart": ""
|
||||
}
|
||||
10
tools/grafana/datasources/prometheus_source.yml
Normal file
10
tools/grafana/datasources/prometheus_source.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
isDefault: true
|
||||
access: proxy
|
||||
url: http://prometheus:9090
|
||||
editable: true
|
||||
1
tools/prometheus/.gitignore
vendored
1
tools/prometheus/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
./data
|
||||
@@ -1,46 +1,18 @@
|
||||
---
|
||||
# prometheus.yml
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Alertmanager configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
# - alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||
- job_name: 'prometheus'
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:9090']
|
||||
|
||||
- job_name: 'awx'
|
||||
static_configs:
|
||||
- targets: ['awx1:8043'] # or haproxy:8043 in cluster env
|
||||
tls_config:
|
||||
insecure_skip_verify: true
|
||||
metrics_path: /api/v2/metrics
|
||||
scrape_interval: 5s
|
||||
scheme: http
|
||||
scheme: https
|
||||
params:
|
||||
format: ['txt']
|
||||
basic_auth:
|
||||
username: admin
|
||||
password: password
|
||||
# bearer_token: oauth-token
|
||||
static_configs:
|
||||
- targets:
|
||||
- awxweb:8013
|
||||
username: awxuser # change this
|
||||
password: password # change this
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
exec /var/lib/awx/setup/setup.sh "$@"
|
||||
@@ -2,24 +2,72 @@
|
||||
import sys
|
||||
import os
|
||||
import signal
|
||||
import datetime
|
||||
|
||||
from datetime import timezone
|
||||
|
||||
|
||||
def write_stdout(s):
|
||||
sys.stdout.write(s)
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def write_stderr(s):
|
||||
sys.stderr.write(s)
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
def main():
|
||||
while 1:
|
||||
write_stdout('READY\n')
|
||||
write_stdout("READY\n")
|
||||
|
||||
# read header line and print it to stderr
|
||||
line = sys.stdin.readline()
|
||||
headers = dict([ x.split(':') for x in line.split() ])
|
||||
headers.update(dict([ x.split(':') for x in sys.stdin.read(int(headers['len'])).split()]))
|
||||
if headers['eventname'] == 'PROCESS_STATE_FATAL':
|
||||
os.kill(os.getppid(), signal.SIGTERM)
|
||||
write_stdout('RESULT 2\nOK')
|
||||
|
||||
# parse header line and decide what to do with it
|
||||
try:
|
||||
headers = dict([x.split(":") for x in line.split()])
|
||||
data = sys.stdin.read(int(headers["len"]))
|
||||
except ValueError as e:
|
||||
write_stderr(str(e))
|
||||
|
||||
# now decide what do to based on eventnames
|
||||
if headers["eventname"] == "PROCESS_STATE_FATAL":
|
||||
headers.update(
|
||||
dict(
|
||||
[x.split(":") for x in sys.stdin.read(int(headers["len"])).split()]
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
# incoming event that produced PROCESS_STATE_FATAL will have a PID. SIGTERM it!
|
||||
write_stderr(
|
||||
f"{datetime.datetime.now(timezone.utc)} - sending SIGTERM to proc={headers} with data={headers}\n"
|
||||
)
|
||||
os.kill(headers["pid"], signal.SIGTERM)
|
||||
except Exception as e:
|
||||
write_stderr(str(e))
|
||||
|
||||
# awx-rsyslog PROCESS_LOG_STDERR handler
|
||||
if headers["eventname"] == "PROCESS_LOG_STDERR":
|
||||
# pertinent data to process that produced PROCES_LOG_STDERR is in the first line of the data payload; so lets extract it
|
||||
proc_details = dict([x.split(":") for x in (data.split("\n")[0]).split()])
|
||||
|
||||
if proc_details["processname"] == "awx-rsyslogd":
|
||||
log_message = "".join(data.split("\n")[1:])
|
||||
|
||||
# look for a 4XX HTTP CODE in the log message. if found, issue a sigkill
|
||||
if any(str(x) in log_message.split() for x in range(400, 420)):
|
||||
try:
|
||||
write_stderr(
|
||||
f"{datetime.datetime.now(timezone.utc)} - sending SIGTERM to proc=[{proc_details['processname']}] with pid=[{int(proc_details['pid'])}] due to log_message=[{log_message}]\n"
|
||||
)
|
||||
os.kill(int(proc_details["pid"]), signal.SIGTERM)
|
||||
except Exception as e:
|
||||
write_stderr(str(e))
|
||||
|
||||
write_stdout("RESULT 2\nOK")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -45,11 +45,7 @@ from django.db import connection
|
||||
from django.db.models.sql import InsertQuery
|
||||
from django.utils.timezone import now
|
||||
|
||||
db = json.loads(
|
||||
subprocess.check_output(
|
||||
['awx-manage', 'print_settings', 'DATABASES', '--format', 'json']
|
||||
)
|
||||
)
|
||||
db = json.loads(subprocess.check_output(['awx-manage', 'print_settings', 'DATABASES', '--format', 'json']))
|
||||
name = db['DATABASES']['default']['NAME']
|
||||
user = db['DATABASES']['default']['USER']
|
||||
pw = db['DATABASES']['default']['PASSWORD']
|
||||
@@ -67,38 +63,39 @@ MODULE_OPTIONS = ('yup', 'stonchronize', 'templotz', 'deboog')
|
||||
|
||||
|
||||
class YieldedRows(StringIO):
|
||||
|
||||
def __init__(self, job_id, rows, created_stamp, modified_stamp, *args, **kwargs):
|
||||
self.rows = rows
|
||||
self.rowlist = []
|
||||
for (event, module) in itertools.product(EVENT_OPTIONS, MODULE_OPTIONS):
|
||||
event_data_json = {
|
||||
"task_action": module,
|
||||
"name": "Do a {} thing".format(module),
|
||||
"task": "Do a {} thing".format(module)
|
||||
}
|
||||
row = "\t".join([
|
||||
f"{created_stamp}",
|
||||
f"{modified_stamp}",
|
||||
event,
|
||||
json.dumps(event_data_json),
|
||||
str(event in ('runner_on_failed', 'runner_on_unreachable')),
|
||||
str(event == 'runner_on_changed'),
|
||||
"localhost",
|
||||
"Example Play",
|
||||
"Hello World",
|
||||
"",
|
||||
"0",
|
||||
"1",
|
||||
job_id,
|
||||
u,
|
||||
"",
|
||||
"1",
|
||||
"hello_world.yml",
|
||||
"0",
|
||||
"X",
|
||||
"1",
|
||||
]) + '\n'
|
||||
event_data_json = {"task_action": module, "name": "Do a {} thing".format(module), "task": "Do a {} thing".format(module)}
|
||||
row = (
|
||||
"\t".join(
|
||||
[
|
||||
f"{created_stamp}",
|
||||
f"{modified_stamp}",
|
||||
f"{created_stamp}",
|
||||
event,
|
||||
json.dumps(event_data_json),
|
||||
str(event in ('runner_on_failed', 'runner_on_unreachable')),
|
||||
str(event == 'runner_on_changed'),
|
||||
"localhost",
|
||||
"Example Play",
|
||||
"Hello World",
|
||||
"",
|
||||
"0",
|
||||
"1",
|
||||
job_id,
|
||||
u,
|
||||
"",
|
||||
"1",
|
||||
"hello_world.yml",
|
||||
"0",
|
||||
"X",
|
||||
"1",
|
||||
]
|
||||
)
|
||||
+ '\n'
|
||||
)
|
||||
self.rowlist.append(row)
|
||||
|
||||
def read(self, x):
|
||||
@@ -117,15 +114,19 @@ def firehose(job, count, created_stamp, modified_stamp):
|
||||
conn = psycopg2.connect(dsn)
|
||||
f = YieldedRows(job, count, created_stamp, modified_stamp)
|
||||
with conn.cursor() as cursor:
|
||||
cursor.copy_expert((
|
||||
'COPY '
|
||||
'main_jobevent('
|
||||
'created, modified, event, event_data, failed, changed, '
|
||||
'host_name, play, role, task, counter, host_id, job_id, uuid, '
|
||||
'parent_uuid, end_line, playbook, start_line, stdout, verbosity'
|
||||
') '
|
||||
'FROM STDIN'
|
||||
), f, size=1024 * 1000)
|
||||
cursor.copy_expert(
|
||||
(
|
||||
'COPY '
|
||||
'main_jobevent('
|
||||
'created, modified, job_created, event, event_data, failed, changed, '
|
||||
'host_name, play, role, task, counter, host_id, job_id, uuid, '
|
||||
'parent_uuid, end_line, playbook, start_line, stdout, verbosity'
|
||||
') '
|
||||
'FROM STDIN'
|
||||
),
|
||||
f,
|
||||
size=1024 * 1000,
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
@@ -143,10 +144,12 @@ def generate_jobs(jobs, batch_size, time_delta):
|
||||
print(f'inserting {jobs} job(s)')
|
||||
sys.path.insert(0, pkg_resources.get_distribution('awx').module_path)
|
||||
from awx import prepare_env
|
||||
|
||||
prepare_env()
|
||||
setup_django()
|
||||
|
||||
from awx.main.models import UnifiedJob, Job, JobTemplate
|
||||
|
||||
fields = list(set(Job._meta.fields) - set(UnifiedJob._meta.fields))
|
||||
job_field_names = set([f.attname for f in fields])
|
||||
# extra unified job field names from base class
|
||||
@@ -161,9 +164,7 @@ def generate_jobs(jobs, batch_size, time_delta):
|
||||
jt = JobTemplate.objects.all()[jt_pos % jt_count]
|
||||
except IndexError as e:
|
||||
# seems to happen every now and then due to some race condition
|
||||
print('Warning: IndexError on {} JT, error: {}'.format(
|
||||
jt_pos % jt_count, e
|
||||
))
|
||||
print('Warning: IndexError on {} JT, error: {}'.format(jt_pos % jt_count, e))
|
||||
jt_pos += 1
|
||||
jt_defaults = dict(
|
||||
(f.attname, getattr(jt, f.attname))
|
||||
@@ -176,22 +177,36 @@ def generate_jobs(jobs, batch_size, time_delta):
|
||||
jobs = [
|
||||
Job(
|
||||
status=STATUS_OPTIONS[i % len(STATUS_OPTIONS)],
|
||||
started=now() - time_delta, created=now() - time_delta, modified=now() - time_delta, finished=now() - time_delta,
|
||||
elapsed=0., **jt_defaults)
|
||||
started=now() - time_delta,
|
||||
created=now() - time_delta,
|
||||
modified=now() - time_delta,
|
||||
finished=now() - time_delta,
|
||||
elapsed=0.0,
|
||||
**jt_defaults,
|
||||
)
|
||||
for i in range(N)
|
||||
]
|
||||
ujs = UnifiedJob.objects.bulk_create(jobs)
|
||||
for uj in ujs:
|
||||
uj.unifiedjob_ptr_id = uj.id # hack around the polymorphic id field not being picked up
|
||||
query = InsertQuery(Job)
|
||||
query.insert_values(fields, ujs)
|
||||
with connection.cursor() as cursor:
|
||||
query, params = query.sql_with_params()[0]
|
||||
cursor.execute(query, params)
|
||||
return ujs[-1], jt_pos, [ujs[i].pk for i in range(len(ujs))]
|
||||
return ujs[-1], jt_pos, [uj.pk for uj in ujs]
|
||||
|
||||
i = 1
|
||||
jt_pos = 0
|
||||
created_job_ids = []
|
||||
s = time()
|
||||
|
||||
from awx.main.models import JobEvent
|
||||
from awx.main.utils.common import create_partition
|
||||
|
||||
start_partition = (now() - time_delta).replace(minute=0, second=0, microsecond=0)
|
||||
create_partition(JobEvent._meta.db_table, start_partition)
|
||||
|
||||
while jobs > 0:
|
||||
s_loop = time()
|
||||
print('running batch {}, runtime {}'.format(i, time() - s))
|
||||
@@ -200,21 +215,20 @@ def generate_jobs(jobs, batch_size, time_delta):
|
||||
i += 1
|
||||
jobs -= batch_size
|
||||
created_job_ids += ujs_pk
|
||||
print('Creted Job IDS: {}'.format(created_job_ids))
|
||||
#return created
|
||||
print('Created Job IDS: {}'.format(created_job_ids))
|
||||
# return created
|
||||
return created_job_ids
|
||||
|
||||
|
||||
def generate_events(events, job, time_delta):
|
||||
conn = psycopg2.connect(dsn)
|
||||
cursor = conn.cursor()
|
||||
|
||||
|
||||
created_time = datetime.datetime.today() - time_delta - datetime.timedelta(seconds=5)
|
||||
modified_time = datetime.datetime.today() - time_delta
|
||||
created_stamp = created_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
modified_stamp = modified_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
# get all the indexes for main_jobevent
|
||||
|
||||
print(f'attaching {events} events to job {job}')
|
||||
cores = multiprocessing.cpu_count()
|
||||
workers = []
|
||||
@@ -244,7 +258,7 @@ def generate_events(events, job, time_delta):
|
||||
cursor.execute('ALTER SEQUENCE firehose_line_seq RESTART WITH 0;')
|
||||
cursor.execute("SELECT nextval('firehose_line_seq')")
|
||||
conn.commit()
|
||||
|
||||
|
||||
cursor.execute(
|
||||
"UPDATE main_jobevent SET "
|
||||
"counter=nextval('firehose_seq')::integer,"
|
||||
@@ -258,18 +272,10 @@ def generate_events(events, job, time_delta):
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
parser.add_argument(
|
||||
'--jobs-per-hour', type=int, help='Number of jobs to create.',
|
||||
default=1000) # 1M by default
|
||||
parser.add_argument(
|
||||
'--events-per-job', type=int, help='Number of events to create.',
|
||||
default=1345) # 1B by default
|
||||
parser.add_argument(
|
||||
'--batch-size', type=int, help='Number of jobs to create in a single batch.',
|
||||
default=100)
|
||||
parser.add_argument(
|
||||
'--days-delta', type=int, help='Number of days old to create the events. Defaults to 31.',
|
||||
default=31)
|
||||
parser.add_argument('--jobs-per-hour', type=int, help='Number of jobs to create.', default=1000) # 1M by default
|
||||
parser.add_argument('--events-per-job', type=int, help='Number of events to create.', default=1345) # 1B by default
|
||||
parser.add_argument('--batch-size', type=int, help='Number of jobs to create in a single batch.', default=100)
|
||||
parser.add_argument('--days-delta', type=int, help='Number of days old to create the events. Defaults to 31.', default=31)
|
||||
params = parser.parse_args()
|
||||
jobs = params.jobs_per_hour
|
||||
events = params.events_per_job
|
||||
@@ -279,7 +285,7 @@ if __name__ == '__main__':
|
||||
conn = psycopg2.connect(dsn)
|
||||
cursor = conn.cursor()
|
||||
|
||||
#Drop all the indexes before generating jobs
|
||||
# Drop all the indexes before generating jobs
|
||||
print('removing indexes and constraints')
|
||||
# get all the indexes for main_jobevent
|
||||
# disable WAL to drastically increase write speed
|
||||
@@ -287,17 +293,20 @@ if __name__ == '__main__':
|
||||
# insert data as quickly as possible without concern for the risk of
|
||||
# data loss on crash
|
||||
# see: https://www.compose.com/articles/faster-performance-with-unlogged-tables-in-postgresql/
|
||||
|
||||
|
||||
cursor.execute('ALTER TABLE main_jobevent SET UNLOGGED')
|
||||
cursor.execute("SELECT indexname, indexdef FROM pg_indexes WHERE tablename='main_jobevent' AND indexname != 'main_jobevent_pkey1';")
|
||||
indexes = cursor.fetchall()
|
||||
|
||||
|
||||
cursor.execute(
|
||||
"SELECT conname, contype, pg_catalog.pg_get_constraintdef(r.oid, true) as condef FROM pg_catalog.pg_constraint r WHERE r.conrelid = 'main_jobevent'::regclass AND conname != 'main_jobevent_pkey1';") # noqa
|
||||
"SELECT conname, contype, pg_catalog.pg_get_constraintdef(r.oid, true) as condef FROM pg_catalog.pg_constraint r WHERE r.conrelid = 'main_jobevent'::regclass AND conname != 'main_jobevent_pkey1';"
|
||||
) # noqa
|
||||
constraints = cursor.fetchall()
|
||||
|
||||
|
||||
# drop all indexes for speed
|
||||
for indexname, indexdef in indexes:
|
||||
if indexname == 'main_jobevent_pkey_new': # Dropped by the constraint
|
||||
continue
|
||||
cursor.execute(f'DROP INDEX IF EXISTS {indexname}')
|
||||
print(f'DROP INDEX IF EXISTS {indexname}')
|
||||
for conname, contype, condef in constraints:
|
||||
@@ -305,34 +314,35 @@ if __name__ == '__main__':
|
||||
print(f'ALTER TABLE main_jobevent DROP CONSTRAINT IF EXISTS {conname}')
|
||||
conn.commit()
|
||||
|
||||
|
||||
for i_day in range(days_delta,0,-1):
|
||||
for i_day in range(days_delta, 0, -1):
|
||||
for j_hour in range(24):
|
||||
time_delta = datetime.timedelta(days=i_day, hours=j_hour, seconds=0)
|
||||
created_job_ids = generate_jobs(jobs, batch_size=batch_size, time_delta=time_delta)
|
||||
for k_id in created_job_ids:
|
||||
generate_events(events, str(k_id), time_delta)
|
||||
if events > 0:
|
||||
for k_id in created_job_ids:
|
||||
generate_events(events, str(k_id), time_delta)
|
||||
print(datetime.datetime.utcnow().isoformat())
|
||||
conn.close()
|
||||
|
||||
|
||||
finally:
|
||||
# restore all indexes
|
||||
print(datetime.datetime.utcnow().isoformat())
|
||||
print('restoring indexes and constraints (this may take awhile)')
|
||||
|
||||
|
||||
workers = []
|
||||
for indexname, indexdef in indexes:
|
||||
if indexname == 'main_jobevent_pkey_new': # Created by the constraint
|
||||
continue
|
||||
p = multiprocessing.Process(target=cleanup, args=(indexdef,))
|
||||
p.daemon = True
|
||||
workers.append(p)
|
||||
|
||||
|
||||
for w in workers:
|
||||
w.start()
|
||||
|
||||
|
||||
for w in workers:
|
||||
w.join()
|
||||
|
||||
|
||||
for conname, contype, condef in constraints:
|
||||
if contype == 'c':
|
||||
# if there are any check constraints, don't add them back
|
||||
@@ -340,6 +350,8 @@ if __name__ == '__main__':
|
||||
# worthless, because Ansible doesn't emit counters, line
|
||||
# numbers, verbosity, etc... < 0)
|
||||
continue
|
||||
sql = f'ALTER TABLE main_jobevent ADD CONSTRAINT {conname} {condef}'
|
||||
cleanup(sql)
|
||||
|
||||
sql = f'ALTER TABLE main_jobevent ADD CONSTRAINT {conname} {condef}'
|
||||
cleanup(sql)
|
||||
|
||||
print(datetime.datetime.utcnow().isoformat())
|
||||
|
||||
136
tools/scripts/post_webhook.py
Executable file
136
tools/scripts/post_webhook.py
Executable file
@@ -0,0 +1,136 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from hashlib import sha1
|
||||
from sys import exit
|
||||
import click
|
||||
import hmac
|
||||
import http.client as http_client
|
||||
import json
|
||||
import logging
|
||||
import requests
|
||||
import urllib3
|
||||
import uuid
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option('--file', required=True, help='File containing the post data.')
|
||||
@click.option('--key', "webhook_key", required=True, help='The webhook key for the job template.')
|
||||
@click.option('--url', required=True, help='The webhook url for the job template (i.e. https://tower.jowestco.net:8043/api/v2/job_templates/637/github/.')
|
||||
@click.option('--event-type', help='Specific value for Event header, defaults to "issues" for GitHub and "Push Hook" for GitLab')
|
||||
@click.option('--verbose', is_flag=True, help='Dump HTTP communication for debugging')
|
||||
@click.option('--insecure', is_flag=True, help='Ignore SSL certs if true')
|
||||
def post_webhook(file, webhook_key, url, verbose, event_type, insecure):
|
||||
"""
|
||||
Helper command for submitting POST requests to Webhook endpoints.
|
||||
|
||||
We have two sample webhooks in tools/scripts/webhook_examples for gitlab and github.
|
||||
These or any other file can be pointed to with the --file parameter.
|
||||
|
||||
\b
|
||||
Additional example webhook events can be found online.
|
||||
For GitLab see:
|
||||
https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html
|
||||
|
||||
\b
|
||||
For GitHub see:
|
||||
https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads
|
||||
|
||||
\b
|
||||
For setting up webhooks in AWX see:
|
||||
https://docs.ansible.com/ansible-tower/latest/html/userguide/webhooks.html
|
||||
|
||||
\b
|
||||
Example usage for GitHub:
|
||||
./post_webhook.py \\
|
||||
--file webhook_examples/github_push.json \\
|
||||
--url https://tower.jowestco.net:8043/api/v2/job_templates/637/github/ \\
|
||||
--key AvqBR19JDFaLTsbF3p7FmiU9WpuHsJKdHDfTqKXyzv1HtwDGZ8 \\
|
||||
--insecure \\
|
||||
--type github
|
||||
|
||||
\b
|
||||
Example usage for GitLab:
|
||||
./post_webhook.py \\
|
||||
--file webhook_examples/gitlab_push.json \\
|
||||
--url https://tower.jowestco.net:8043/api/v2/job_templates/638/gitlab/ \\
|
||||
--key fZ8vUpfHfb1Dn7zHtyaAsyZC5IHFcZf2a2xiBc2jmrBDptCOL2 \\
|
||||
--insecure \\
|
||||
--type=gitlab
|
||||
|
||||
\b
|
||||
NOTE: GitLab webhooks are stored in the DB with a UID of the hash of the POST body.
|
||||
After submitting one post GitLab post body a second POST of the same payload
|
||||
can result in a response like:
|
||||
Response code: 202
|
||||
Response body:
|
||||
{
|
||||
"message": "Webhook previously received, aborting."
|
||||
}
|
||||
|
||||
If you need to test multiple GitLab posts simply change your payload slightly
|
||||
|
||||
"""
|
||||
if insecure:
|
||||
# Disable insecure warnings
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
if verbose:
|
||||
# Enable HTTP debugging
|
||||
http_client.HTTPConnection.debuglevel = 1
|
||||
# Configure logging
|
||||
logging.basicConfig()
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
requests_log = logging.getLogger("requests.packages.urllib3")
|
||||
requests_log.setLevel(logging.DEBUG)
|
||||
requests_log.propagate = True
|
||||
|
||||
# read webhook payload
|
||||
with open(file, 'r') as f:
|
||||
post_data = json.loads(f.read())
|
||||
|
||||
# Construct Headers
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
|
||||
# Encode key and post_data
|
||||
key_bytes = webhook_key.encode('utf-8', 'strict')
|
||||
data_bytes = str(json.dumps(post_data)).encode('utf-8', 'strict')
|
||||
|
||||
# Compute sha1 mac
|
||||
mac = hmac.new(key_bytes, msg=data_bytes, digestmod=sha1)
|
||||
|
||||
if url.endswith('/github/'):
|
||||
headers.update(
|
||||
{
|
||||
'X-Hub-Signature': 'sha1={}'.format(mac.hexdigest()),
|
||||
'X-GitHub-Event': 'issues' if event_type == 'default' else event_type,
|
||||
'X-GitHub-Delivery': str(uuid.uuid4()),
|
||||
}
|
||||
)
|
||||
elif url.endswith('/gitlab/'):
|
||||
mac = hmac.new(key_bytes, msg=data_bytes, digestmod=sha1)
|
||||
headers.update(
|
||||
{
|
||||
'X-GitLab-Event': 'Push Hook' if event_type == 'default' else event_type,
|
||||
'X-GitLab-Token': webhook_key,
|
||||
}
|
||||
)
|
||||
else:
|
||||
click.echo("This utility only knows how to support URLs that end in /github/ or /gitlab/.")
|
||||
exit(250)
|
||||
|
||||
# Make post
|
||||
r = requests.post(url, data=json.dumps(post_data), headers=headers, verify=(not insecure))
|
||||
|
||||
if not verbose:
|
||||
click.echo("Response code: {}".format(r.status_code))
|
||||
click.echo("Response body:")
|
||||
try:
|
||||
click.echo(json.dumps(r.json(), indent=4))
|
||||
except:
|
||||
click.echo(r.text)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
post_webhook()
|
||||
156
tools/scripts/webhook_examples/github_push.json
Normal file
156
tools/scripts/webhook_examples/github_push.json
Normal file
@@ -0,0 +1,156 @@
|
||||
{
|
||||
"ref": "refs/tags/simple-tag",
|
||||
"before": "0000000000000000000000000000000000000000",
|
||||
"after": "6113728f27ae82c7b1a177c8d03f9e96e0adf246",
|
||||
"created": true,
|
||||
"deleted": false,
|
||||
"forced": false,
|
||||
"base_ref": "refs/heads/main",
|
||||
"compare": "https://github.com/Codertocat/Hello-World/compare/simple-tag",
|
||||
"commits": [],
|
||||
"head_commit": {
|
||||
"id": "6113728f27ae82c7b1a177c8d03f9e96e0adf246",
|
||||
"tree_id": "4b825dc642cb6eb9a060e54bf8d69288fbee4904",
|
||||
"distinct": true,
|
||||
"message": "Adding a .gitignore file",
|
||||
"timestamp": "2019-05-15T15:20:41Z",
|
||||
"url": "https://github.com/Codertocat/Hello-World/commit/6113728f27ae82c7b1a177c8d03f9e96e0adf246",
|
||||
"author": {
|
||||
"name": "Codertocat",
|
||||
"email": "21031067+Codertocat@users.noreply.github.com",
|
||||
"username": "Codertocat"
|
||||
},
|
||||
"committer": {
|
||||
"name": "Codertocat",
|
||||
"email": "21031067+Codertocat@users.noreply.github.com",
|
||||
"username": "Codertocat"
|
||||
},
|
||||
"added": [
|
||||
".gitignore"
|
||||
],
|
||||
"removed": [],
|
||||
"modified": []
|
||||
},
|
||||
"repository": {
|
||||
"id": 186853002,
|
||||
"node_id": "MDEwOlJlcG9zaXRvcnkxODY4NTMwMDI=",
|
||||
"name": "Hello-World",
|
||||
"full_name": "Codertocat/Hello-World",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"name": "Codertocat",
|
||||
"email": "21031067+Codertocat@users.noreply.github.com",
|
||||
"login": "Codertocat",
|
||||
"id": 21031067,
|
||||
"node_id": "MDQ6VXNlcjIxMDMxMDY3",
|
||||
"avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Codertocat",
|
||||
"html_url": "https://github.com/Codertocat",
|
||||
"followers_url": "https://api.github.com/users/Codertocat/followers",
|
||||
"following_url": "https://api.github.com/users/Codertocat/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Codertocat/orgs",
|
||||
"repos_url": "https://api.github.com/users/Codertocat/repos",
|
||||
"events_url": "https://api.github.com/users/Codertocat/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Codertocat/received_events",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Codertocat/Hello-World",
|
||||
"description": null,
|
||||
"fork": false,
|
||||
"url": "https://github.com/Codertocat/Hello-World",
|
||||
"forks_url": "https://api.github.com/repos/Codertocat/Hello-World/forks",
|
||||
"keys_url": "https://api.github.com/repos/Codertocat/Hello-World/keys{/key_id}",
|
||||
"collaborators_url": "https://api.github.com/repos/Codertocat/Hello-World/collaborators{/collaborator}",
|
||||
"teams_url": "https://api.github.com/repos/Codertocat/Hello-World/teams",
|
||||
"hooks_url": "https://api.github.com/repos/Codertocat/Hello-World/hooks",
|
||||
"issue_events_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/events{/number}",
|
||||
"events_url": "https://api.github.com/repos/Codertocat/Hello-World/events",
|
||||
"assignees_url": "https://api.github.com/repos/Codertocat/Hello-World/assignees{/user}",
|
||||
"branches_url": "https://api.github.com/repos/Codertocat/Hello-World/branches{/branch}",
|
||||
"tags_url": "https://api.github.com/repos/Codertocat/Hello-World/tags",
|
||||
"blobs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/blobs{/sha}",
|
||||
"git_tags_url": "https://api.github.com/repos/Codertocat/Hello-World/git/tags{/sha}",
|
||||
"git_refs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/refs{/sha}",
|
||||
"trees_url": "https://api.github.com/repos/Codertocat/Hello-World/git/trees{/sha}",
|
||||
"statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/{sha}",
|
||||
"languages_url": "https://api.github.com/repos/Codertocat/Hello-World/languages",
|
||||
"stargazers_url": "https://api.github.com/repos/Codertocat/Hello-World/stargazers",
|
||||
"contributors_url": "https://api.github.com/repos/Codertocat/Hello-World/contributors",
|
||||
"subscribers_url": "https://api.github.com/repos/Codertocat/Hello-World/subscribers",
|
||||
"subscription_url": "https://api.github.com/repos/Codertocat/Hello-World/subscription",
|
||||
"commits_url": "https://api.github.com/repos/Codertocat/Hello-World/commits{/sha}",
|
||||
"git_commits_url": "https://api.github.com/repos/Codertocat/Hello-World/git/commits{/sha}",
|
||||
"comments_url": "https://api.github.com/repos/Codertocat/Hello-World/comments{/number}",
|
||||
"issue_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/comments{/number}",
|
||||
"contents_url": "https://api.github.com/repos/Codertocat/Hello-World/contents/{+path}",
|
||||
"compare_url": "https://api.github.com/repos/Codertocat/Hello-World/compare/{base}...{head}",
|
||||
"merges_url": "https://api.github.com/repos/Codertocat/Hello-World/merges",
|
||||
"archive_url": "https://api.github.com/repos/Codertocat/Hello-World/{archive_format}{/ref}",
|
||||
"downloads_url": "https://api.github.com/repos/Codertocat/Hello-World/downloads",
|
||||
"issues_url": "https://api.github.com/repos/Codertocat/Hello-World/issues{/number}",
|
||||
"pulls_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls{/number}",
|
||||
"milestones_url": "https://api.github.com/repos/Codertocat/Hello-World/milestones{/number}",
|
||||
"notifications_url": "https://api.github.com/repos/Codertocat/Hello-World/notifications{?since,all,participating}",
|
||||
"labels_url": "https://api.github.com/repos/Codertocat/Hello-World/labels{/name}",
|
||||
"releases_url": "https://api.github.com/repos/Codertocat/Hello-World/releases{/id}",
|
||||
"deployments_url": "https://api.github.com/repos/Codertocat/Hello-World/deployments",
|
||||
"created_at": 1557933565,
|
||||
"updated_at": "2019-05-15T15:20:41Z",
|
||||
"pushed_at": 1557933657,
|
||||
"git_url": "git://github.com/Codertocat/Hello-World.git",
|
||||
"ssh_url": "git@github.com:Codertocat/Hello-World.git",
|
||||
"clone_url": "https://github.com/Codertocat/Hello-World.git",
|
||||
"svn_url": "https://github.com/Codertocat/Hello-World",
|
||||
"homepage": null,
|
||||
"size": 0,
|
||||
"stargazers_count": 0,
|
||||
"watchers_count": 0,
|
||||
"language": "Ruby",
|
||||
"has_issues": true,
|
||||
"has_projects": true,
|
||||
"has_downloads": true,
|
||||
"has_wiki": true,
|
||||
"has_pages": true,
|
||||
"forks_count": 1,
|
||||
"mirror_url": null,
|
||||
"archived": false,
|
||||
"disabled": false,
|
||||
"open_issues_count": 2,
|
||||
"license": null,
|
||||
"forks": 1,
|
||||
"open_issues": 2,
|
||||
"watchers": 0,
|
||||
"default_branch": "master",
|
||||
"stargazers": 0,
|
||||
"master_branch": "master"
|
||||
},
|
||||
"pusher": {
|
||||
"name": "Codertocat",
|
||||
"email": "21031067+Codertocat@users.noreply.github.com"
|
||||
},
|
||||
"sender": {
|
||||
"login": "Codertocat",
|
||||
"id": 21031067,
|
||||
"node_id": "MDQ6VXNlcjIxMDMxMDY3",
|
||||
"avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Codertocat",
|
||||
"html_url": "https://github.com/Codertocat",
|
||||
"followers_url": "https://api.github.com/users/Codertocat/followers",
|
||||
"following_url": "https://api.github.com/users/Codertocat/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Codertocat/orgs",
|
||||
"repos_url": "https://api.github.com/users/Codertocat/repos",
|
||||
"events_url": "https://api.github.com/users/Codertocat/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Codertocat/received_events",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
71
tools/scripts/webhook_examples/gitlab_push.json
Normal file
71
tools/scripts/webhook_examples/gitlab_push.json
Normal file
@@ -0,0 +1,71 @@
|
||||
{
|
||||
"object_kind": "push",
|
||||
"event_name": "push",
|
||||
"before": "95790bf891e76fee5e1747ab589903a6a1f80f22",
|
||||
"after": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
|
||||
"ref": "refs/heads/master",
|
||||
"checkout_sha": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
|
||||
"user_id": 4,
|
||||
"user_name": "John Smith",
|
||||
"user_username": "jsmith",
|
||||
"user_email": "john@example.com",
|
||||
"user_avatar": "https://s.gravatar.com/avatar/d4c74594d841139328695756648b6bd6?s=8://s.gravatar.com/avatar/d4c74594d841139328695756648b6bd6?s=80",
|
||||
"project_id": 15,
|
||||
"project":{
|
||||
"id": 15,
|
||||
"name":"Diaspora",
|
||||
"description":"",
|
||||
"web_url":"http://example.com/mike/diaspora",
|
||||
"avatar_url":null,
|
||||
"git_ssh_url":"git@example.com:mike/diaspora.git",
|
||||
"git_http_url":"http://example.com/mike/diaspora.git",
|
||||
"namespace":"Mike",
|
||||
"visibility_level":0,
|
||||
"path_with_namespace":"mike/diaspora",
|
||||
"default_branch":"master",
|
||||
"homepage":"http://example.com/mike/diaspora",
|
||||
"url":"git@example.com:mike/diaspora.git",
|
||||
"ssh_url":"git@example.com:mike/diaspora.git",
|
||||
"http_url":"http://example.com/mike/diaspora.git"
|
||||
},
|
||||
"repository":{
|
||||
"name": "Diaspora",
|
||||
"url": "git@example.com:mike/diaspora.git",
|
||||
"description": "",
|
||||
"homepage": "http://example.com/mike/diaspora",
|
||||
"git_http_url":"http://example.com/mike/diaspora.git",
|
||||
"git_ssh_url":"git@example.com:mike/diaspora.git",
|
||||
"visibility_level":0
|
||||
},
|
||||
"commits": [
|
||||
{
|
||||
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
|
||||
"message": "Update Catalan translation to e38cb41.\n\nSee https://gitlab.com/gitlab-org/gitlab for more information",
|
||||
"title": "Update Catalan translation to e38cb41.",
|
||||
"timestamp": "2011-12-12T14:27:31+02:00",
|
||||
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
|
||||
"author": {
|
||||
"name": "Jordi Mallach",
|
||||
"email": "jordi@softcatala.org"
|
||||
},
|
||||
"added": ["CHANGELOG"],
|
||||
"modified": ["app/controller/application.rb"],
|
||||
"removed": []
|
||||
},
|
||||
{
|
||||
"id": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
|
||||
"message": "fixed readme",
|
||||
"title": "fixed readme",
|
||||
"timestamp": "2012-01-03T23:36:29+02:00",
|
||||
"url": "http://example.com/mike/diaspora/commit/da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
|
||||
"author": {
|
||||
"name": "GitLab dev user",
|
||||
"email": "gitlabdev@dv6700.(none)"
|
||||
},
|
||||
"added": ["CHANGELOG"],
|
||||
"modified": ["app/controller/application.rb"],
|
||||
"removed": []
|
||||
}
|
||||
],
|
||||
"total_commits_count": 4
|
||||
}
|
||||
18
tools/sosreport/TESTING.md
Normal file
18
tools/sosreport/TESTING.md
Normal file
@@ -0,0 +1,18 @@
|
||||
Create a RHEL box and then do the following.
|
||||
```bash
|
||||
sudo mkdir -p /usr/share/sosreport/sos/plugins
|
||||
sudo yum install sos
|
||||
cp controller.py /usr/share/sosreport/sos/plugins
|
||||
sudo chmod 644 /usr/share/sosreport/sos/plugins/controller.py
|
||||
ln -s /usr/share/sosreport/sos/plugins/controller.py `find `find /usr/lib -name sos` -name plugins`
|
||||
sosreport -l | grep controller
|
||||
```
|
||||
|
||||
The results should be:
|
||||
```bash
|
||||
# sosreport -l | grep controller
|
||||
controller Ansible Automation Platform controller information
|
||||
```
|
||||
|
||||
To run only the controller plugin run: `sosreport --only-plugins controller`
|
||||
|
||||
@@ -22,15 +22,16 @@ SOSREPORT_CONTROLLER_COMMANDS = [
|
||||
"ls -ll /var/lib/awx", # check permissions
|
||||
"ls -ll /var/lib/awx/venv", # list all venvs
|
||||
"ls -ll /etc/tower",
|
||||
"ls -ll /var/run/awx-receptor", # list contents of dirctory where receptor socket should be
|
||||
"ls -ll /var/run/awx-receptor", # list contents of dirctory where receptor socket should be
|
||||
"ls -ll /etc/receptor",
|
||||
"receptorctl --socket /var/run/awx-receptor/receptor.sock status", # Get information about the status of the mesh
|
||||
"receptorctl --socket /var/run/awx-receptor/receptor.sock status", # Get information about the status of the mesh
|
||||
"umask -p", # check current umask
|
||||
]
|
||||
|
||||
SOSREPORT_CONTROLLER_DIRS = [
|
||||
"/etc/tower/",
|
||||
"/etc/receptor/",
|
||||
"/etc/supervisord.conf",
|
||||
"/etc/supervisord.d/",
|
||||
"/etc/nginx/",
|
||||
"/var/log/tower",
|
||||
@@ -54,7 +55,7 @@ SOSREPORT_FORBIDDEN_PATHS = [
|
||||
"/etc/tower/awx.cert",
|
||||
"/var/log/tower/profile",
|
||||
"/etc/receptor/tls/ca/*.key",
|
||||
"/etc/receptor/tls/*.key"
|
||||
"/etc/receptor/tls/*.key",
|
||||
]
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user