Fixed conflicts, ipip:true as defualt and added ipip_mode

This commit is contained in:
AtzeDeVries
2017-07-08 14:36:44 +02:00
74 changed files with 466 additions and 357 deletions

View File

@@ -2,3 +2,4 @@
pypy_version: 2.4.0
pip_python_modules:
- httplib2
- six

View File

@@ -30,3 +30,6 @@ dns_memory_requests: 50Mi
# Autoscaler parameters
dnsmasq_nodes_per_replica: 10
dnsmasq_min_replicas: 1
# Custom name servers
dnsmasq_upstream_dns_servers: []

View File

@@ -11,6 +11,11 @@ server=/{{ dns_domain }}/{{ skydns_server }}
local=/{{ bogus_domains }}
#Set upstream dns servers
{% if dnsmasq_upstream_dns_servers|length > 0 %}
{% for srv in dnsmasq_upstream_dns_servers %}
server={{ srv }}
{% endfor %}
{% endif %}
{% if system_and_upstream_dns_servers|length > 0 %}
{% for srv in system_and_upstream_dns_servers %}
server={{ srv }}

View File

@@ -19,7 +19,7 @@ spec:
labels:
k8s-app: dnsmasq
kubernetes.io/cluster-service: "true"
kargo/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}"
kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}"
spec:
containers:
- name: dnsmasq

View File

@@ -8,3 +8,5 @@ docker_repo_key_info:
docker_repo_info:
repos:
docker_dns_servers_strict: yes

View File

@@ -52,8 +52,13 @@
- name: check number of nameservers
fail:
msg: "Too many nameservers"
when: docker_dns_servers|length > 3
msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=no and we will only use the first 3."
when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool
- name: rtrim number of nameservers to 3
set_fact:
docker_dns_servers: "{{ docker_dns_servers[0:3] }}"
when: docker_dns_servers|length > 3 and not docker_dns_servers_strict|bool
- name: check number of search domains
fail:

View File

@@ -52,9 +52,6 @@ calico_policy_image_repo: "calico/kube-policy-controller"
calico_policy_image_tag: "{{ calico_policy_version }}"
calico_rr_image_repo: "quay.io/calico/routereflector"
calico_rr_image_tag: "v0.3.0"
exechealthz_version: 1.1
exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64"
exechealthz_image_tag: "{{ exechealthz_version }}"
hyperkube_image_repo: "quay.io/coreos/hyperkube"
hyperkube_image_tag: "{{ kube_version }}_coreos.0"
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
@@ -74,12 +71,16 @@ nginx_image_tag: 1.11.4-alpine
dnsmasq_version: 2.72
dnsmasq_image_repo: "andyshinn/dnsmasq"
dnsmasq_image_tag: "{{ dnsmasq_version }}"
kubednsmasq_version: 1.3
kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64"
kubednsmasq_image_tag: "{{ kubednsmasq_version }}"
kubedns_version: 1.7
kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
kubedns_version: 1.14.2
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
kubedns_image_tag: "{{ kubedns_version }}"
dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64"
dnsmasq_sidecar_image_tag: "{{ kubedns_version }}"
kubednsautoscaler_version: 1.1.1
kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64"
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
test_image_repo: busybox
test_image_tag: latest
elasticsearch_version: "v2.4.1"
@@ -193,26 +194,31 @@ downloads:
repo: "{{ dnsmasq_image_repo }}"
tag: "{{ dnsmasq_image_tag }}"
sha256: "{{ dnsmasq_digest_checksum|default(None) }}"
kubednsmasq:
container: true
repo: "{{ kubednsmasq_image_repo }}"
tag: "{{ kubednsmasq_image_tag }}"
sha256: "{{ kubednsmasq_digest_checksum|default(None) }}"
kubedns:
container: true
repo: "{{ kubedns_image_repo }}"
tag: "{{ kubedns_image_tag }}"
sha256: "{{ kubedns_digest_checksum|default(None) }}"
dnsmasq_nanny:
container: true
repo: "{{ dnsmasq_nanny_image_repo }}"
tag: "{{ dnsmasq_nanny_image_tag }}"
sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}"
dnsmasq_sidecar:
container: true
repo: "{{ dnsmasq_sidecar_image_repo }}"
tag: "{{ dnsmasq_sidecar_image_tag }}"
sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}"
kubednsautoscaler:
container: true
repo: "{{ kubednsautoscaler_image_repo }}"
tag: "{{ kubednsautoscaler_image_tag }}"
sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}"
testbox:
container: true
repo: "{{ test_image_repo }}"
tag: "{{ test_image_tag }}"
sha256: "{{ testbox_digest_checksum|default(None) }}"
exechealthz:
container: true
repo: "{{ exechealthz_image_repo }}"
tag: "{{ exechealthz_image_tag }}"
sha256: "{{ exechealthz_digest_checksum|default(None) }}"
elasticsearch:
container: true
repo: "{{ elasticsearch_image_repo }}"

View File

@@ -2,6 +2,7 @@
# Set to false to only do certificate management
etcd_cluster_setup: true
etcd_backup_prefix: "/var/backups"
etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/"
etcd_data_dir: "/var/lib/etcd"

View File

@@ -3,7 +3,6 @@
command: /bin/true
notify:
- Refresh Time Fact
- Set etcd Backup Directory Prefix
- Set Backup Directory
- Create Backup Directory
- Backup etcd v2 data
@@ -13,10 +12,6 @@
- name: Refresh Time Fact
setup: filter=ansible_date_time
- name: Set etcd Backup Directory Prefix
set_fact:
etcd_backup_prefix: '/var/backups'
- name: Set Backup Directory
set_fact:
etcd_backup_directory: "{{ etcd_backup_prefix }}/etcd-{{ ansible_date_time.date }}_{{ ansible_date_time.time }}"

View File

@@ -1,5 +0,0 @@
- name: Configure defaults
debug:
msg: "Check roles/kargo-defaults/defaults/main.yml"
tags:
- always

View File

@@ -1,23 +1,23 @@
# Versions
kubedns_version: 1.9
kubednsmasq_version: 1.3
exechealthz_version: 1.1
kubedns_version : 1.14.2
kubednsautoscaler_version: 1.1.1
# Limits for dnsmasq/kubedns apps
dns_cpu_limit: 100m
dns_memory_limit: 170Mi
dns_cpu_requests: 70m
dns_memory_requests: 50Mi
kubedns_min_replicas: 1
dns_cpu_requests: 100m
dns_memory_requests: 70Mi
kubedns_min_replicas: 2
kubedns_nodes_per_replica: 10
# Images
kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
kubedns_image_tag: "{{ kubedns_version }}"
kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64"
kubednsmasq_image_tag: "{{ kubednsmasq_version }}"
exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64"
exechealthz_image_tag: "{{ exechealthz_version }}"
dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64"
dnsmasq_sidecar_image_tag: "{{ kubedns_version }}"
kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64"
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
# Netchecker
deploy_netchecker: false
@@ -40,3 +40,4 @@ netchecker_server_memory_requests: 64M
# SSL
etcd_cert_dir: "/etc/ssl/etcd/ssl"
canal_cert_dir: "/etc/canal/certs"

View File

@@ -1,7 +1,7 @@
---
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: http://localhost:8080/healthz
url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz
register: result
until: result.status == 200
retries: 10
@@ -13,8 +13,8 @@
src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- {name: kubedns, file: kubedns-deploy.yml, type: deployment}
- {name: kubedns, file: kubedns-svc.yml, type: svc}
- {name: kube-dns, file: kubedns-deploy.yml, type: deployment}
- {name: kube-dns, file: kubedns-svc.yml, type: svc}
- {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment}
register: manifests
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]

View File

@@ -32,7 +32,7 @@ spec:
spec:
containers:
- name: autoscaler
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}"
resources:
requests:
cpu: "20m"
@@ -42,7 +42,7 @@ spec:
- --namespace=kube-system
- --configmap=kubedns-autoscaler
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
- --target=Deployment/kubedns
- --target=Deployment/kube-dns
- --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
- --logtostderr=true
- --v=2

View File

@@ -1,25 +1,39 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kubedns
namespace: {{ system_namespace }}
name: kube-dns
namespace: "{{system_namespace}}"
labels:
k8s-app: kubedns
version: v19
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: {{ kubedns_min_replicas }}
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kubedns
version: v19
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kubedns
version: v19
kubernetes.io/cluster-service: "true"
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}"
@@ -30,15 +44,14 @@ spec:
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }}
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
livenessProbe:
httpGet:
path: /healthz
port: 8080
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
@@ -51,13 +64,16 @@ spec:
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 30
initialDelaySeconds: 3
timeoutSeconds: 5
args:
# command = "/kube-dns"
- --domain={{ dns_domain }}.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v={{ kube_log_level }}
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
@@ -65,25 +81,36 @@ spec:
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
image: "{{ dnsmasq_nanny_image_repo }}:{{ dnsmasq_nanny_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }}
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --log-facility=-
- -v={{ kube_log_level }}
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --no-resolv
- --server=127.0.0.1#10053
{% if kube_log_level == '4' %}
- --log-queries
{% endif %}
- --local=/{{ bogus_domains }}
- --log-facility=-
- --server=/{{ dns_domain }}/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
@@ -91,26 +118,37 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
- name: healthz
image: "{{ exechealthz_image_repo }}:{{ exechealthz_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 10m
memory: 50Mi
requests:
cpu: 10m
# Note that this container shouldn't really need 50Mi of memory. The
# limits are set higher than expected pending investigation on #29688.
# The extra memory was stolen from the kubedns container to keep the
# net memory requested by the pod constant.
memory: 50Mi
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: "{{ dnsmasq_sidecar_image_repo }}:{{ dnsmasq_sidecar_image_tag }}"
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -cmd=nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1:10053 >/dev/null
- -port=8080
- -quiet
- --v={{ kube_log_level }}
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ dns_domain }},5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ dns_domain }},5,A
ports:
- containerPort: 8080
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.

View File

@@ -1,15 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: kubedns
name: kube-dns
namespace: {{ system_namespace }}
labels:
k8s-app: kubedns
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "kubedns"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kubedns
k8s-app: kube-dns
clusterIP: {{ skydns_server }}
ports:
- name: dns
@@ -18,3 +19,4 @@ spec:
- name: dns-tcp
port: 53
protocol: TCP

View File

@@ -39,7 +39,7 @@
- name: Master | wait for the apiserver to be running
uri:
url: http://localhost:8080/healthz
url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz
register: result
until: result.status == 200
retries: 20

View File

@@ -5,7 +5,7 @@ metadata:
namespace: {{system_namespace}}
labels:
k8s-app: kube-apiserver
kargo: v2
kubespray: v2
spec:
hostNetwork: true
{% if kube_version | version_compare('v1.6', '>=') %}
@@ -92,7 +92,7 @@ spec:
httpGet:
host: 127.0.0.1
path: /healthz
port: 8080
port: {{ kube_apiserver_insecure_port }}
initialDelaySeconds: 30
timeoutSeconds: 10
volumeMounts:
@@ -124,4 +124,4 @@ spec:
- hostPath:
path: /etc/ssl/certs/ca-bundle.crt
name: rhel-ca-bundle
{% endif %}
{% endif %}

View File

@@ -22,12 +22,15 @@ dependencies:
file: "{{ downloads.netcheck_agent }}"
when: deploy_netchecker
tags: [download, netchecker]
- role: download
file: "{{ downloads.kubednsmasq }}"
tags: [download, dnsmasq]
- role: download
file: "{{ downloads.kubedns }}"
tags: [download, dnsmasq]
- role: download
file: "{{ downloads.exechealthz }}"
file: "{{ downloads.dnsmasq_nanny }}"
tags: [download, dnsmasq]
- role: download
file: "{{ downloads.dnsmasq_sidecar }}"
tags: [download, dnsmasq]
- role: download
file: "{{ downloads.kubednsautoscaler }}"
tags: [download, dnsmasq]

View File

@@ -32,7 +32,7 @@ openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
# For the vsphere integration, kubelet will need credentials to access
# vsphere apis
# Documentation regarting these values can be found
# Documentation regarding these values can be found
# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
@@ -49,3 +49,6 @@ vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('')
# Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
# for hostnet pods and infra needs
resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
# All inventory hostnames will be written into each /etc/hosts file.
populate_inventory_to_hosts_file: true

View File

@@ -1,9 +1,9 @@
---
# These tasks will undo changes done by kargo in the past if needed (e.g. when upgrading from kargo 2.0.x
# These tasks will undo changes done by kubespray in the past if needed (e.g. when upgrading from kubespray 2.0.x
# or when changing resolvconf_mode)
- name: Remove kargo specific config from dhclient config
- name: Remove kubespray specific config from dhclient config
blockinfile:
dest: "{{dhclientconffile}}"
state: absent
@@ -13,7 +13,7 @@
when: dhclientconffile is defined
notify: Preinstall | restart network
- name: Remove kargo specific dhclient hook
- name: Remove kubespray specific dhclient hook
file:
path: "{{ dhclienthookfile }}"
state: absent

View File

@@ -9,6 +9,7 @@
create: yes
backup: yes
marker: "# Ansible inventory hosts {mark}"
when: populate_inventory_to_hosts_file
- name: Hosts | populate kubernetes loadbalancer address into hosts file
lineinfile:

View File

@@ -0,0 +1,5 @@
- name: Configure defaults
debug:
msg: "Check roles/kubespray-defaults/defaults/main.yml"
tags:
- always

View File

@@ -4,6 +4,7 @@ nat_outgoing: true
# Use IP-over-IP encapsulation across hosts
ipip: true
ipip_mode: always # change to "cross-subnet" if you only want ipip encapsulation on traffic going across subnets
# Set to true if you want your calico cni binaries to overwrite the
# ones from hyperkube while leaving other cni plugins intact.

View File

@@ -94,7 +94,7 @@
shell: >
echo '{
"kind": "ipPool",
"spec": {"disabled": false, "ipip": {"enabled": {{ ipip }}},
"spec": {"disabled": false, "ipip": {"enabled": {{ ipip }}, "mode": "{{ ipip_mode }}"},
"nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}},
"apiVersion": "v1",
"metadata": {"cidr": "{{ kube_pods_subnet }}"}

View File

@@ -83,6 +83,15 @@
- /etc/dhcp/dhclient.d/zdnsupdate.sh
- /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
- "{{ bin_dir }}/kubelet"
- "{{ bin_dir }}/kubernetes-scripts"
- /run/flannel
- /etc/flannel
- /run/kubernetes
- /usr/local/share/ca-certificates/kube-ca.crt
- /usr/local/share/ca-certificates/etcd-ca.crt
- /etc/ssl/certs/kube-ca.pem
- /etc/ssl/certs/etcd-ca.pem
- /var/log/pods/
tags: ['files']

View File

@@ -3,4 +3,5 @@
- name: Uncordon node
command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}"
delegate_to: "{{ groups['kube-master'][0] }}"
when: needs_cordoning|default(false)
when: (needs_cordoning|default(false)) and ( {%- if inventory_hostname in groups['kube-node'] -%} true {%- else -%} false {%- endif -%} )

View File

@@ -7,11 +7,11 @@
- set_fact:
needs_cordoning: >-
{% if " Ready" in kubectl_nodes.stdout %}
{% if " Ready" in kubectl_nodes.stdout -%}
true
{% else %}
{%- else -%}
false
{% endif %}
{%- endif %}
- name: Cordon node
command: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }}"

View File

@@ -1,6 +1,6 @@
---
- name: bootstrap/ca_trust | pull CA from cert from groups.vault|first
- name: "bootstrap/ca_trust | pull CA from cert from {{groups.vault|first}}"
command: "cat {{ vault_cert_dir }}/ca.pem"
register: vault_cert_file_cat
delegate_to: "{{ groups['vault']|first }}"

View File

@@ -26,7 +26,7 @@
mode: "{{ issue_cert_dir_mode | d('0755') }}"
owner: "{{ issue_cert_file_owner | d('root') }}"
- name: issue_cert | Generate the cert
- name: "issue_cert | Generate the cert for {{ issue_cert_role }}"
uri:
url: "{{ issue_cert_url }}/v1/{{ issue_cert_mount|d('pki') }}/issue/{{ issue_cert_role }}"
headers: "{{ issue_cert_headers }}"
@@ -40,7 +40,7 @@
register: issue_cert_result
when: inventory_hostname == issue_cert_hosts|first
- name: issue_cert | Copy the cert to all hosts
- name: "issue_cert | Copy {{ issue_cert_path }} cert to all hosts"
copy:
content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['certificate'] }}"
dest: "{{ issue_cert_path }}"
@@ -48,7 +48,7 @@
mode: "{{ issue_cert_file_mode | d('0644') }}"
owner: "{{ issue_cert_file_owner | d('root') }}"
- name: issue_cert | Copy the key to all hosts
- name: "issue_cert | Copy key for {{ issue_cert_path }} to all hosts"
copy:
content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['private_key'] }}"
dest: "{{ issue_cert_path.rsplit('.', 1)|first }}-key.{{ issue_cert_path.rsplit('.', 1)|last }}"

View File

@@ -28,7 +28,7 @@
state: directory
when: inventory_hostname not in sync_file_srcs
- name: "sync_file | Copy the file to hosts that don't have it"
- name: "sync_file | Copy {{ sync_file_path }} to hosts that don't have it"
copy:
content: "{{ sync_file_contents }}"
dest: "{{ sync_file_path }}"
@@ -37,7 +37,7 @@
owner: "{{ sync_file_owner|d('root') }}"
when: inventory_hostname not in sync_file_srcs
- name: "sync_file | Copy the key file to hosts that don't have it"
- name: "sync_file | Copy {{ sync_file_key_path }} to hosts that don't have it"
copy:
content: "{{ sync_file_key_contents }}"
dest: "{{ sync_file_key_path }}"

View File

@@ -19,12 +19,12 @@
when: >-
sync_file_is_cert|d() and (sync_file_key_path is not defined or sync_file_key_path == '')
- name: "sync_file | Check if file exists"
- name: "sync_file | Check if {{sync_file_path}} file exists"
stat:
path: "{{ sync_file_path }}"
register: sync_file_stat
- name: "sync_file | Check if key file exists"
- name: "sync_file | Check if {{ sync_file_key_path }} key file exists"
stat:
path: "{{ sync_file_key_path }}"
register: sync_file_key_stat