diff --git a/installer/roles/kubernetes/defaults/main.yml b/installer/roles/kubernetes/defaults/main.yml index fae302f780..9100e9e537 100644 --- a/installer/roles/kubernetes/defaults/main.yml +++ b/installer/roles/kubernetes/defaults/main.yml @@ -1,6 +1,19 @@ --- -dockerhub_web_image: "{{ dockerhub_base | default('ansible') }}/awx_web:{{ dockerhub_version | default('latest') }}" -dockerhub_task_image: "{{ dockerhub_base | default('ansible') }}/awx_task:{{ dockerhub_version | default('latest') }}" +admin_user: 'admin' +admin_email: 'root@localhost' +admin_password: 'password' + +rabbitmq_user: 'awx' +rabbitmq_password: 'password' +rabbitmq_erlang_cookie: 'cookiemonster' + +kubernetes_base_path: "{{ local_base_config_path|default('/tmp') }}/{{ kubernetes_deployment_name }}-config" + +kubernetes_task_version: "{{ tower_package_version | default(dockerhub_version) }}" +kubernetes_task_image: "{{ tower_package_name | default(dockerhub_base+'/awx_task') }}" + +kubernetes_web_version: "{{ tower_package_version | default(dockerhub_version) }}" +kubernetes_web_image: "{{ tower_package_name | default(dockerhub_base+'/awx_web') }}" web_mem_request: 1 web_cpu_request: 500 @@ -25,4 +38,4 @@ openshift_pg_pvc_name: postgresql kubernetes_deployment_name: awx -tiller_namespace: kube-system +kubernetes_deployment_replica_size: 1 diff --git a/installer/roles/kubernetes/tasks/backup.yml b/installer/roles/kubernetes/tasks/backup.yml new file mode 100644 index 0000000000..e01b740dba --- /dev/null +++ b/installer/roles/kubernetes/tasks/backup.yml @@ -0,0 +1,80 @@ +--- +- name: Determine the timestamp for the backup. + set_fact: + now: '{{ lookup("pipe", "date +%F-%T") }}' + +- include_tasks: openshift_auth.yml + when: openshift_host is defined + +- include_tasks: kubernetes_auth.yml + when: kubernetes_context is defined + +- name: Use kubectl or oc + set_fact: + kubectl_or_oc: "{{ openshift_oc_bin if openshift_oc_bin is defined else 'kubectl' }}" + +- name: Delete any existing management pod + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + delete pod ansible-tower-management --grace-period=0 --ignore-not-found + +- name: Template management pod + set_fact: + management_pod: "{{ lookup('template', 'management-pod.yml.j2') }}" + +- name: Create management pod + shell: | + echo {{ management_pod | quote }} | {{ kubectl_or_oc }} apply -f - + +- name: Wait for management pod to start + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + get pod ansible-tower-management -o jsonpath="{.status.phase}" + register: result + until: result.stdout == "Running" + retries: 60 + +- name: Create directory for backup + file: + state: directory + path: "{{ playbook_dir }}/tower-openshift-backup-{{ now }}" + +- name: Precreate file for database dump + file: + path: "{{ playbook_dir }}/tower-openshift-backup-{{ now }}/tower.db" + state: touch + mode: 0600 + +- name: Dump database + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} exec ansible-tower-management -- \ + bash -c "PGPASSWORD={{ pg_password }} \ + pg_dump --clean --create \ + --host='{{ pg_hostname | default('postgresql') }}' \ + --port={{ pg_port | default('5432') }} \ + --username='{{ pg_username }}' \ + --dbname='{{ pg_database }}'" > {{ playbook_dir }}/tower-openshift-backup-{{ now }}/tower.db + +- name: Copy inventory into backup directory + copy: + src: "{{ inventory_file }}" + dest: "{{ playbook_dir }}/tower-openshift-backup-{{ now }}/" + mode: 0600 + +- name: Delete management pod + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + delete pod ansible-tower-management --grace-period=0 --ignore-not-found + +- name: Create backup archive + archive: + path: "{{ playbook_dir }}/tower-openshift-backup-{{ now }}" + dest: "{{ item }}" + with_items: + - "{{ playbook_dir }}/tower-openshift-backup-{{ now }}.tar.gz" + - "{{ playbook_dir }}/tower-openshift-backup-latest.tar.gz" + +- name: Remove temporary backup directory + file: + path: "{{ playbook_dir }}/tower-openshift-backup-{{ now }}" + state: absent diff --git a/installer/roles/kubernetes/tasks/kubernetes.yml b/installer/roles/kubernetes/tasks/kubernetes.yml index a11ed63272..717ee4bcd0 100644 --- a/installer/roles/kubernetes/tasks/kubernetes.yml +++ b/installer/roles/kubernetes/tasks/kubernetes.yml @@ -1,6 +1,4 @@ -- name: Set the Kubernetes Context - shell: "kubectl config use-context {{ kubernetes_context }}" - +--- - name: Get Namespace Detail shell: "kubectl get namespace {{ kubernetes_namespace }}" register: namespace_details @@ -14,3 +12,4 @@ set_fact: postgresql_service_name: "{{ kubernetes_deployment_name }}-postgresql" when: "pg_hostname is not defined or pg_hostname == ''" + diff --git a/installer/roles/kubernetes/tasks/kubernetes_auth.yml b/installer/roles/kubernetes/tasks/kubernetes_auth.yml new file mode 100644 index 0000000000..a84490cfac --- /dev/null +++ b/installer/roles/kubernetes/tasks/kubernetes_auth.yml @@ -0,0 +1,3 @@ +--- +- name: Set the Kubernetes Context + shell: "kubectl config set-context {{ kubernetes_context }}" diff --git a/installer/roles/kubernetes/tasks/main.yml b/installer/roles/kubernetes/tasks/main.yml index 4cec23ad4e..1bbccc55fb 100644 --- a/installer/roles/kubernetes/tasks/main.yml +++ b/installer/roles/kubernetes/tasks/main.yml @@ -3,20 +3,48 @@ msg: "Only set one of kubernetes_context or openshift_host" when: openshift_host is defined and kubernetes_context is defined -- name: Set kubernetes base path - set_fact: - kubernetes_base_path: "{{ local_base_config_path|default('/tmp') }}/{{ kubernetes_deployment_name }}-config" - -- include_tasks: openshift.yml +- include_tasks: "{{ tasks }}" + with_items: + - openshift_auth.yml + - openshift.yml + loop_control: + loop_var: tasks when: openshift_host is defined -- include_tasks: kubernetes.yml +- include_tasks: "{{ tasks }}" + with_items: + - kubernetes_auth.yml + - kubernetes.yml + loop_control: + loop_var: tasks when: kubernetes_context is defined - name: Use kubectl or oc set_fact: kubectl_or_oc: "{{ openshift_oc_bin if openshift_oc_bin is defined else 'kubectl' }}" +- set_fact: + deployment_object: "{{ 'dc' if openshift_host is defined else 'deployment' }}" + +- name: Record deployment size + shell: | + {{ kubectl_or_oc }} get {{ deployment_object }} \ + {{ kubernetes_deployment_name }} \ + -n {{ kubernetes_namespace }} -o=jsonpath='{.status.replicas}' + register: deployment_details + ignore_errors: yes + +- name: Set expected post-deployment Replicas value + set_fact: + kubernetes_deployment_replica_size: "{{ deployment_details.stdout | int }}" + when: deployment_details.rc == 0 + +- name: Delete existing Deployment + shell: | + {{ kubectl_or_oc }} delete {{ deployment_object }} \ + {{ kubernetes_deployment_name }} -n {{ kubernetes_namespace }} + when: deployment_details.rc == 0 + - name: Get Postgres Service Detail shell: "{{ kubectl_or_oc }} describe svc {{ postgresql_service_name }} -n {{ kubernetes_namespace }}" register: postgres_svc_details @@ -39,10 +67,12 @@ -e POSTGRESQL_MAX_CONNECTIONS={{ pg_max_connections|default(1024) }} \ -e POSTGRESQL_USER={{ pg_username }} \ -e POSTGRESQL_PASSWORD={{ pg_password }} \ + -e POSTGRESQL_ADMIN_PASSWORD={{ pg_password }} \ -e POSTGRESQL_DATABASE={{ pg_database }} \ -e POSTGRESQL_VERSION=9.5 \ -n {{ kubernetes_namespace }} register: openshift_pg_activate + no_log: yes when: - pg_hostname is not defined or pg_hostname == '' - postgres_svc_details is defined and postgres_svc_details.rc != 0 @@ -51,7 +81,6 @@ - name: Deploy and Activate Postgres (Kubernetes) shell: | helm install --name {{ kubernetes_deployment_name }} --namespace {{ kubernetes_namespace }} \ - --tiller-namespace {{ tiller_namespace }} \ --set postgresUser={{ pg_username }} \ --set postgresPassword={{ pg_password }} \ --set postgresDatabase={{ pg_database }} \ @@ -62,6 +91,7 @@ - postgres_svc_details is defined and postgres_svc_details.rc != 0 - kubernetes_context is defined register: kubernetes_pg_activate + no_log: yes - name: Set postgresql hostname to helm package service set_fact: @@ -75,17 +105,6 @@ seconds: 60 when: openshift_pg_activate.changed or kubernetes_pg_activate.changed -- name: Ensure directory exists - file: - path: "{{ kubernetes_base_path }}" - state: directory - -- name: Template Kubernetes AWX Config - template: - src: configmap.yml.j2 - dest: "{{ kubernetes_base_path }}/configmap.yml" - mode: '0600' - - name: Set image names if using custom registry block: - name: Set task image name @@ -99,14 +118,88 @@ when: kubernetes_web_image is not defined when: docker_registry is defined -- name: Template Kubernetes AWX Deployment - template: - src: deployment.yml.j2 - dest: "{{ kubernetes_base_path }}/deployment.yml" - mode: '0600' - -- name: Apply Configmap - shell: "{{ kubectl_or_oc }} apply -f {{ kubernetes_base_path }}/configmap.yml" +- name: Render deployment templates + set_fact: + "{{ item }}": "{{ lookup('template', item + '.yml.j2') }}" + with_items: + - 'configmap' + - 'deployment' + - 'secret' + no_log: yes - name: Apply Deployment - shell: "{{ kubectl_or_oc }} apply -f {{ kubernetes_base_path }}/deployment.yml" + shell: | + echo {{ item | quote }} | {{ kubectl_or_oc }} apply -f - + with_items: + - "{{ configmap }}" + - "{{ deployment }}" + - "{{ secret }}" + no_log: yes + +- name: Delete any existing management pod + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + delete pod ansible-tower-management --grace-period=0 --ignore-not-found + +- name: Template management pod + set_fact: + management_pod: "{{ lookup('template', 'management-pod.yml.j2') }}" + +- name: Create management pod + shell: | + echo {{ management_pod | quote }} | {{ kubectl_or_oc }} apply -f - + +- name: Wait for management pod to start + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + get pod ansible-tower-management -o jsonpath="{.status.phase}" + register: result + until: result.stdout == "Running" + retries: 60 + +- name: Migrate database + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} exec ansible-tower-management -- \ + bash -c "awx-manage migrate --noinput" + +- name: Check for Tower Super users + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} exec ansible-tower-management -- \ + bash -c "echo 'from django.contrib.auth.models import User; nsu = User.objects.filter(is_superuser=True).count(); exit(0 if nsu > 0 else 1)' | awx-manage shell" + register: super_check + ignore_errors: yes + changed_when: super_check.rc > 0 + +- name: create django super user if it does not exist + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} exec ansible-tower-management -- \ + bash -c "echo \"from django.contrib.auth.models import User; User.objects.create_superuser('{{ admin_user }}', '{{ admin_email }}', '{{ admin_password }}')\" | awx-manage shell" + no_log: yes + when: super_check.rc > 0 + +- name: update django super user password + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} exec ansible-tower-management -- \ + bash -c "awx-manage update_password --username='{{ admin_user }}' --password='{{ admin_password }}'" + no_log: yes + register: result + changed_when: "'Password updated' in result.stdout" + +- name: Create the default organization if it is needed. + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} exec ansible-tower-management -- \ + bash -c "awx-manage create_preload_data" + register: cdo + changed_when: "'added' in cdo.stdout" + +- name: Delete management pod + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + delete pod ansible-tower-management --grace-period=0 --ignore-not-found + +- name: Scale up deployment + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + scale {{ deployment_object }} {{ kubernetes_deployment_name }} --replicas=0 + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + scale {{ deployment_object }} {{ kubernetes_deployment_name }} --replicas={{ kubernetes_deployment_replica_size }} diff --git a/installer/roles/kubernetes/tasks/openshift.yml b/installer/roles/kubernetes/tasks/openshift.yml index f22c905182..4e4ba8f2f9 100644 --- a/installer/roles/kubernetes/tasks/openshift.yml +++ b/installer/roles/kubernetes/tasks/openshift.yml @@ -1,53 +1,4 @@ --- -- include_vars: openshift.yml - -- name: Set kubernetes_namespace - set_fact: - kubernetes_namespace: "{{ openshift_project }}" - -- name: Ensure workspace directories exist - file: - path: "{{ item }}" - state: directory - with_items: - - "{{ kubernetes_base_path }}" - - "{{ openshift_oc_config_file | dirname }}" - -- name: Authenticate with OpenShift via user and password - shell: | - {{ openshift_oc_bin }} login {{ openshift_host }} \ - -u {{ openshift_user }} \ - -p {{ openshift_password }} \ - --insecure-skip-tls-verify={{ openshift_skip_tls_verify | default(false) | bool }} - when: - - openshift_user is defined - - openshift_password is defined - - openshift_token is not defined - register: openshift_auth_result - ignore_errors: true - no_log: true - -- name: OpenShift authentication failed on TLS verification - fail: - msg: "Failed to verify TLS, consider settings openshift_skip_tls_verify=True {{ openshift_auth_result.stderr }}" - when: - - openshift_skip_tls_verify is not defined or not openshift_skip_tls_verify - - openshift_auth_result.rc != 0 - - openshift_auth_result.stderr | search("certificate that does not match its hostname") - -- name: OpenShift authentication failed - fail: - msg: "{{ openshift_auth_result.stderr }}" - when: openshift_auth_result.rc != 0 - -- name: Authenticate with OpenShift via token - shell: | - {{ openshift_oc_bin }} login {{ openshift_host }} \ - --token {{ openshift_token }} \ - --insecure-skip-tls-verify={{ openshift_skip_tls_verify | default(false) | bool }} - when: openshift_token is defined - no_log: true - - name: Get Project Detail shell: "{{ openshift_oc_bin }} get project {{ openshift_project }}" register: project_details @@ -77,3 +28,7 @@ set_fact: postgresql_service_name: "postgresql" when: "pg_hostname is not defined or pg_hostname == ''" + +- name: Add privileged SCC to service account + shell: | + {{ openshift_oc_bin }} adm policy add-scc-to-user privileged system:serviceaccount:{{ openshift_project }}:awx diff --git a/installer/roles/kubernetes/tasks/openshift_auth.yml b/installer/roles/kubernetes/tasks/openshift_auth.yml new file mode 100644 index 0000000000..23cd59cb0c --- /dev/null +++ b/installer/roles/kubernetes/tasks/openshift_auth.yml @@ -0,0 +1,34 @@ +--- +- include_vars: openshift.yml + +- name: Set kubernetes_namespace + set_fact: + kubernetes_namespace: "{{ openshift_project }}" + +- name: Ensure workspace directories exist + file: + path: "{{ item }}" + state: directory + with_items: + - "{{ kubernetes_base_path }}" + - "{{ openshift_oc_config_file | dirname }}" + +- name: Authenticate with OpenShift via user and password + shell: | + {{ openshift_oc_bin }} login {{ openshift_host }} \ + -u {{ openshift_user }} \ + -p {{ openshift_password }} \ + --insecure-skip-tls-verify={{ openshift_skip_tls_verify | default(false) | bool }} + when: + - openshift_user is defined + - openshift_password is defined + - openshift_token is not defined + no_log: true + +- name: Authenticate with OpenShift via token + shell: | + {{ openshift_oc_bin }} login {{ openshift_host }} \ + --token {{ openshift_token }} \ + --insecure-skip-tls-verify={{ openshift_skip_tls_verify | default(false) | bool }} + when: openshift_token is defined + no_log: true diff --git a/installer/roles/kubernetes/tasks/restore.yml b/installer/roles/kubernetes/tasks/restore.yml new file mode 100644 index 0000000000..84967896d4 --- /dev/null +++ b/installer/roles/kubernetes/tasks/restore.yml @@ -0,0 +1,123 @@ +--- +- include_tasks: openshift_auth.yml + when: openshift_host is defined + +- include_tasks: kubernetes_auth.yml + when: kubernetes_context is defined + +- name: Use kubectl or oc + set_fact: + kubectl_or_oc: "{{ openshift_oc_bin if openshift_oc_bin is defined else 'kubectl' }}" + +- name: Remove any present restore directories + file: + state: absent + path: "{{ playbook_dir }}/tower-openshift-restore" + +- name: Create directory for restore data + file: + state: directory + path: "{{ playbook_dir }}/tower-openshift-restore" + +- name: Unarchive Tower backup + unarchive: + src: tower-openshift-backup-latest.tar.gz + dest: "{{ playbook_dir }}/tower-openshift-restore" + extra_opts: [--strip-components=1] + +- set_fact: + deployment_object: "{{ 'dc' if openshift_host is defined else 'deployment' }}" + +- name: Record deployment size + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + get {{ deployment_object }} {{ kubernetes_deployment_name }} -o jsonpath="{.status.replicas}" + register: deployment_size + +- name: Scale deployment down + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + scale {{ deployment_object }} {{ kubernetes_deployment_name }} --replicas=0 + +- name: Delete any existing management pod + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + delete pod ansible-tower-management --grace-period=0 --ignore-not-found + +- name: Wait for scale down + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} get pods \ + -o jsonpath='{.items[*].metadata.name}' \ + | tr -s '[[:space:]]' '\n' \ + | grep {{ kubernetes_deployment_name }} \ + | grep -v postgres | wc -l + register: tower_pods + until: (tower_pods.stdout | trim) == '0' + retries: 30 + +- name: Template management pod + set_fact: + management_pod: "{{ lookup('template', 'management-pod.yml.j2') }}" + +- name: Create management pod + shell: | + echo {{ management_pod | quote }} | {{ kubectl_or_oc }} apply -f - + +- name: Wait for management pod to start + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + get pod ansible-tower-management -o jsonpath="{.status.phase}" + register: result + until: result.stdout == "Running" + retries: 60 + +- name: Temporarily grant createdb role + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + exec -i ansible-tower-management -- bash -c "PGPASSWORD={{ pg_password }} \ + psql \ + --host={{ pg_hostname | default('postgresql') }} \ + --port={{ pg_port | default('5432') }} \ + --username=postgres \ + --dbname=template1 -c 'ALTER USER tower CREATEDB;'" + no_log: true + when: pg_hostname is not defined or pg_hostname == '' + +- name: Perform a PostgreSQL restore + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + exec -i ansible-tower-management -- bash -c "PGPASSWORD={{ pg_password }} \ + psql \ + --host={{ pg_hostname | default('postgresql') }} \ + --port={{ pg_port | default('5432') }} \ + --username={{ pg_username }} \ + --dbname=template1" < {{ playbook_dir }}/tower-openshift-restore/tower.db + no_log: true + +- name: Revoke createdb role + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + exec -i ansible-tower-management -- bash -c "PGPASSWORD={{ pg_password }} \ + psql \ + --host={{ pg_hostname | default('postgresql') }} \ + --port={{ pg_port | default('5432') }} \ + --username=postgres \ + --dbname=template1 -c 'ALTER USER tower NOCREATEDB;'" + no_log: true + when: pg_hostname is not defined or pg_hostname == '' + +- name: Delete management pod + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + delete pod ansible-tower-management --grace-period=0 --ignore-not-found + +- name: Remove restore directory + file: + state: absent + path: "{{ playbook_dir }}/tower-openshift-restore" + +- name: Scale deployment back up + shell: | + {{ kubectl_or_oc }} -n {{ kubernetes_namespace }} \ + scale {{ deployment_object }} {{ kubernetes_deployment_name }} --replicas={{ deployment_size.stdout }} + when: deployment_size.stdout != '' diff --git a/installer/roles/kubernetes/templates/configmap.yml.j2 b/installer/roles/kubernetes/templates/configmap.yml.j2 index a2432ec54c..ae67be2b11 100644 --- a/installer/roles/kubernetes/templates/configmap.yml.j2 +++ b/installer/roles/kubernetes/templates/configmap.yml.j2 @@ -10,8 +10,7 @@ data: import socket ADMINS = () - # Container environments don't like chroots - AWX_PROOT_ENABLED = False + AWX_PROOT_ENABLED = True # Automatically deprovision pods that go offline AWX_AUTO_DEPROVISION_INSTANCES = True @@ -51,10 +50,11 @@ data: LOGGING['loggers']['django.request']['handlers'] = ['console'] LOGGING['loggers']['rest_framework.request']['handlers'] = ['console'] - LOGGING['loggers']['awx']['handlers'] = ['console', 'external_logger'] + LOGGING['loggers']['awx']['handlers'] = ['console'] LOGGING['loggers']['awx.main.commands.run_callback_receiver']['handlers'] = ['console'] - LOGGING['loggers']['awx.main.tasks']['handlers'] = ['console', 'external_logger'] - LOGGING['loggers']['awx.main.scheduler']['handlers'] = ['console', 'external_logger'] + LOGGING['loggers']['awx.main.commands.inventory_import']['handlers'] = ['console'] + LOGGING['loggers']['awx.main.tasks']['handlers'] = ['console'] + LOGGING['loggers']['awx.main.scheduler']['handlers'] = ['console'] LOGGING['loggers']['django_auth_ldap']['handlers'] = ['console'] LOGGING['loggers']['social']['handlers'] = ['console'] LOGGING['loggers']['system_tracking_migrations']['handlers'] = ['console'] @@ -68,28 +68,6 @@ data: LOGGING['handlers']['system_tracking_migrations'] = {'class': 'logging.NullHandler'} LOGGING['handlers']['management_playbooks'] = {'class': 'logging.NullHandler'} - DATABASES = { - 'default': { - 'ATOMIC_REQUESTS': True, - 'ENGINE': 'django.db.backends.postgresql', - 'NAME': "{{ pg_database }}", - 'USER': "{{ pg_username }}", - 'PASSWORD': "{{ pg_password }}", - 'HOST': "{{ pg_hostname|default('postgresql') }}", - 'PORT': "{{ pg_port }}", - } - } - BROKER_URL = 'amqp://{}:{}@{}:{}/{}'.format( - "awx", - "abcdefg", - "localhost", - "5672", - "awx") - CHANNEL_LAYERS = { - 'default': {'BACKEND': 'asgi_amqp.AMQPChannelLayer', - 'ROUTING': 'awx.main.routing.channel_routing', - 'CONFIG': {'url': BROKER_URL}} - } CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', @@ -99,3 +77,5 @@ data: 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, } + + USE_X_FORWARDED_PORT = True diff --git a/installer/roles/kubernetes/templates/credentials.py.j2 b/installer/roles/kubernetes/templates/credentials.py.j2 new file mode 100644 index 0000000000..abb73c06ec --- /dev/null +++ b/installer/roles/kubernetes/templates/credentials.py.j2 @@ -0,0 +1,22 @@ +DATABASES = { + 'default': { + 'ATOMIC_REQUESTS': True, + 'ENGINE': 'django.db.backends.postgresql', + 'NAME': "{{ pg_database }}", + 'USER': "{{ pg_username }}", + 'PASSWORD': "{{ pg_password }}", + 'HOST': "{{ pg_hostname|default('postgresql') }}", + 'PORT': "{{ pg_port }}", + } +} +BROKER_URL = 'amqp://{}:{}@{}:{}/{}'.format( + "{{ rabbitmq_user }}", + "{{ rabbitmq_password }}", + "localhost", + "5672", + "awx") +CHANNEL_LAYERS = { + 'default': {'BACKEND': 'asgi_amqp.AMQPChannelLayer', + 'ROUTING': 'awx.main.routing.channel_routing', + 'CONFIG': {'url': BROKER_URL}} +} diff --git a/installer/roles/kubernetes/templates/deployment.yml.j2 b/installer/roles/kubernetes/templates/deployment.yml.j2 index 7387b5e8d1..88485205f0 100644 --- a/installer/roles/kubernetes/templates/deployment.yml.j2 +++ b/installer/roles/kubernetes/templates/deployment.yml.j2 @@ -1,3 +1,10 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: awx + namespace: {{ kubernetes_namespace }} + --- kind: Service apiVersion: v1 @@ -31,8 +38,8 @@ data: enabled_plugins: | [rabbitmq_management,rabbitmq_peer_discovery_k8s]. rabbitmq.conf: | - default_user = awx - default_pass = abcdefg + default_user = {{ rabbitmq_user }} + default_pass = {{ rabbitmq_password }} default_vhost = awx ## Clustering @@ -47,13 +54,6 @@ data: ## enable guest user loopback_users.guest = false ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rabbitmq - namespace: {{ kubernetes_namespace }} - {% if kubernetes_context is defined %} --- kind: Role @@ -73,7 +73,7 @@ metadata: namespace: {{ kubernetes_namespace }} subjects: - kind: ServiceAccount - name: rabbitmq + name: awx roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -102,10 +102,10 @@ roleRef: namespace: {{ kubernetes_namespace }} subjects: - kind: ServiceAccount - name: rabbitmq + name: awx namespace: {{ kubernetes_namespace }} userNames: - - system:serviceaccount:{{ kubernetes_namespace }}:rabbitmq + - system:serviceaccount:{{ kubernetes_namespace }}:awx {% endif %} --- @@ -128,10 +128,10 @@ spec: service: django app: rabbitmq spec: - serviceAccountName: rabbitmq + serviceAccountName: awx containers: - name: {{ kubernetes_deployment_name }}-web - image: {{ kubernetes_web_image | default(dockerhub_web_image) }} + image: "{{ kubernetes_web_image }}:{{ kubernetes_web_version }}" imagePullPolicy: Always env: - name: DATABASE_USER @@ -143,7 +143,10 @@ spec: - name: DATABASE_PORT value: "{{ pg_port|default('5432') }}" - name: DATABASE_PASSWORD - value: {{ pg_password }} + valueFrom: + secretKeyRef: + name: "{{ kubernetes_deployment_name }}-secrets" + key: pg_password - name: MEMCACHED_HOST value: {{ memcached_hostname|default('localhost') }} - name: RABBITMQ_HOST @@ -151,19 +154,35 @@ spec: ports: - containerPort: 8052 volumeMounts: - - mountPath: /etc/tower - name: {{ kubernetes_deployment_name }}-application-config + - name: {{ kubernetes_deployment_name }}-application-config + mountPath: "/etc/tower" + readOnly: true + + - name: "{{ kubernetes_deployment_name }}-confd" + mountPath: "/etc/tower/conf.d/" + readOnly: true resources: requests: memory: "{{ web_mem_request }}Gi" cpu: "{{ web_cpu_request }}m" - name: {{ kubernetes_deployment_name }}-celery - image: {{ kubernetes_task_image | default(dockerhub_task_image) }} + securityContext: + privileged: true + image: "{{ kubernetes_task_image }}:{{ kubernetes_task_version }}" + command: + - /usr/bin/launch_awx_task.sh imagePullPolicy: Always volumeMounts: - - mountPath: /etc/tower - name: {{ kubernetes_deployment_name }}-application-config + - name: {{ kubernetes_deployment_name }}-application-config + mountPath: "/etc/tower" + readOnly: true + + - name: "{{ kubernetes_deployment_name }}-confd" + mountPath: "/etc/tower/conf.d/" + readOnly: true env: + - name: AWX_SKIP_MIGRATIONS + value: "1" - name: DATABASE_USER value: {{ pg_username }} - name: DATABASE_NAME @@ -173,15 +192,21 @@ spec: - name: DATABASE_PORT value: "{{ pg_port|default('5432') }}" - name: DATABASE_PASSWORD - value: {{ pg_password }} + valueFrom: + secretKeyRef: + name: "{{ kubernetes_deployment_name }}-secrets" + key: pg_password - name: MEMCACHED_HOST value: {{ memcached_hostname|default('localhost') }} - name: RABBITMQ_HOST value: {{ rabbitmq_hostname|default('localhost') }} - name: AWX_ADMIN_USER - value: {{ default_admin_user|default('admin') }} + value: {{ admin_user }} - name: AWX_ADMIN_PASSWORD - value: {{ default_admin_password|default('password') }} + valueFrom: + secretKeyRef: + name: "{{ kubernetes_deployment_name }}-secrets" + key: admin_password resources: requests: memory: "{{ task_mem_request }}Gi" @@ -215,10 +240,13 @@ spec: value: "true" - name: RABBITMQ_NODENAME value: "rabbit@$(MY_POD_IP)" + - name: RABBITMQ_ERLANG_COOKIE + valueFrom: + secretKeyRef: + name: "{{ kubernetes_deployment_name }}-secrets" + key: rabbitmq_erlang_cookie - name: K8S_SERVICE_NAME value: "rabbitmq" - - name: RABBITMQ_ERLANG_COOKIE - value: "cookiemonster" volumeMounts: - name: rabbitmq-config mountPath: /etc/rabbitmq @@ -242,6 +270,14 @@ spec: path: settings.py - key: secret_key path: SECRET_KEY + + - name: "{{ kubernetes_deployment_name }}-confd" + secret: + secretName: "{{ kubernetes_deployment_name }}-secrets" + items: + - key: confd_contents + path: 'secrets.py' + - name: rabbitmq-config configMap: name: rabbitmq-config diff --git a/installer/roles/kubernetes/templates/management-pod.yml.j2 b/installer/roles/kubernetes/templates/management-pod.yml.j2 new file mode 100644 index 0000000000..c808b72620 --- /dev/null +++ b/installer/roles/kubernetes/templates/management-pod.yml.j2 @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: ansible-tower-management + namespace: {{ kubernetes_namespace }} +spec: + containers: + - name: ansible-tower-management + image: {{ kubernetes_task_image }} + command: ["sleep", "infinity"] + volumeMounts: + - name: {{ kubernetes_deployment_name }}-application-config + mountPath: "/etc/tower" + readOnly: true + + - name: "{{ kubernetes_deployment_name }}-confd" + mountPath: "/etc/tower/conf.d/" + readOnly: true + volumes: + - name: {{ kubernetes_deployment_name }}-application-config + configMap: + name: {{ kubernetes_deployment_name }}-config + items: + - key: {{ kubernetes_deployment_name }}_settings + path: settings.py + - key: secret_key + path: SECRET_KEY + + - name: "{{ kubernetes_deployment_name }}-confd" + secret: + secretName: "{{ kubernetes_deployment_name }}-secrets" + items: + - key: confd_contents + path: 'secrets.py' + + restartPolicy: Never diff --git a/installer/roles/kubernetes/templates/postgresql-persistent.yml.j2 b/installer/roles/kubernetes/templates/postgresql-persistent.yml.j2 index 1a31b2d88c..798152df69 100644 --- a/installer/roles/kubernetes/templates/postgresql-persistent.yml.j2 +++ b/installer/roles/kubernetes/templates/postgresql-persistent.yml.j2 @@ -35,11 +35,13 @@ objects: annotations: template.openshift.io/expose-database_name: '{.data[''database-name'']}' template.openshift.io/expose-password: '{.data[''database-password'']}' + template.openshift.io/expose-admin_password: '{.data[''database-admin-password'']}' template.openshift.io/expose-username: '{.data[''database-user'']}' name: ${DATABASE_SERVICE_NAME} stringData: database-name: ${POSTGRESQL_DATABASE} database-password: ${POSTGRESQL_PASSWORD} + database-admin-password: ${POSTGRESQL_PASSWORD} database-user: ${POSTGRESQL_USER} - apiVersion: v1 kind: Service @@ -90,6 +92,11 @@ objects: secretKeyRef: key: database-password name: ${DATABASE_SERVICE_NAME} + - name: POSTGRESQL_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + key: database-admin-password + name: ${DATABASE_SERVICE_NAME} - name: POSTGRESQL_DATABASE valueFrom: secretKeyRef: @@ -97,7 +104,7 @@ objects: name: ${DATABASE_SERVICE_NAME} - name: POSTGRESQL_MAX_CONNECTIONS value: ${POSTGRESQL_MAX_CONNECTIONS} - image: ' ' + image: registry.access.redhat.com/rhscl/postgresql-96-rhel7 imagePullPolicy: IfNotPresent livenessProbe: initialDelaySeconds: 30 @@ -114,7 +121,7 @@ objects: - /bin/sh - -i - -c - - psql -h 127.0.0.1 -U $POSTGRESQL_USER -q -d $POSTGRESQL_DATABASE + - psql -h 127.0.0.1 -U $POSTGRESQL_USER -q -d template1 -c 'SELECT 1' initialDelaySeconds: 5 timeoutSeconds: 1 @@ -139,16 +146,6 @@ objects: claimName: {{ openshift_pg_pvc_name }} {% endif %} triggers: - - imageChangeParams: - automatic: true - containerNames: - - postgresql - from: - kind: ImageStreamTag - name: postgresql:${POSTGRESQL_VERSION} - namespace: ${NAMESPACE} - lastTriggeredImage: "" - type: ImageChange - type: ConfigChange status: {} parameters: @@ -179,13 +176,14 @@ parameters: generate: expression name: POSTGRESQL_PASSWORD required: true +- description: Password for the PostgreSQL connection admin user. + displayName: PostgreSQL Connection Admin Password + from: '[a-zA-Z0-9]{16}' + generate: expression + name: POSTGRESQL_ADMIN_PASSWORD + required: true - description: Name of the PostgreSQL database accessed. displayName: PostgreSQL Database Name name: POSTGRESQL_DATABASE required: true value: sampledb -- description: Version of PostgreSQL image to be used (9.2, 9.4, 9.5 or latest). - displayName: Version of PostgreSQL Image - name: POSTGRESQL_VERSION - required: true - value: "9.5" diff --git a/installer/roles/kubernetes/templates/secret.yml.j2 b/installer/roles/kubernetes/templates/secret.yml.j2 new file mode 100644 index 0000000000..f85f75e295 --- /dev/null +++ b/installer/roles/kubernetes/templates/secret.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + namespace: {{ kubernetes_namespace }} + name: "{{ kubernetes_deployment_name }}-secrets" +type: Opaque +data: + admin_password: "{{ admin_password | b64encode }}" + pg_password: "{{ pg_password | b64encode }}" + rabbitmq_password: "{{ rabbitmq_password | b64encode }}" + rabbitmq_erlang_cookie: "{{ rabbitmq_erlang_cookie | b64encode }}" + confd_contents: "{{ lookup('template', 'credentials.py.j2') | b64encode }}"