project: fix var-spacing ansible rule (#10266)

* project: fix var-spacing ansible rule

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix spacing on the beginning/end of jinja template

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix spacing of default filter

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix spacing between filter arguments

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix double space at beginning/end of jinja

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix remaining jinja[spacing] ansible-lint warning

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

---------

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>
This commit is contained in:
Arthur Outhenin-Chalandre
2023-07-05 05:36:54 +02:00
committed by GitHub
parent f8b93fa88a
commit 5d00b851ce
178 changed files with 767 additions and 733 deletions

View File

@@ -4,7 +4,7 @@
- block:
- name: Set the retry count
set_fact:
retry_count: "{{ 0 if retry_count is undefined else retry_count|int + 1 }}"
retry_count: "{{ 0 if retry_count is undefined else retry_count | int + 1 }}"
- name: Calico | Set label for route reflector # noqa command-instead-of-shell
shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite"
@@ -24,6 +24,7 @@
retries: 10
- name: Calico-rr | Set route reflector cluster ID
# noqa: jinja[spacing]
set_fact:
calico_rr_node_patched: >-
{{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':
@@ -38,7 +39,7 @@
- name: Fail if retry limit is reached
fail:
msg: Ended after 10 retries
when: retry_count|int == 10
when: retry_count | int == 10
- name: Retrying node configuration
debug:

View File

@@ -168,7 +168,7 @@
- name: "Check if inventory match current cluster configuration"
assert:
that:
- calico_pool_conf.spec.blockSize|int == (calico_pool_blocksize | default(kube_network_node_prefix) | int)
- calico_pool_conf.spec.blockSize | int == (calico_pool_blocksize | default(kube_network_node_prefix) | int)
- calico_pool_conf.spec.cidr == (calico_pool_cidr | default(kube_pods_subnet))
- not calico_pool_conf.spec.ipipMode is defined or calico_pool_conf.spec.ipipMode == calico_ipip_mode
- not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode

View File

@@ -122,7 +122,7 @@
- block:
- name: Calico | Check if extra directory is needed
stat:
path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('v3.22.3','<')) else 'crd' }}"
path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('v3.22.3', '<')) else 'crd' }}"
register: kdd_path
- name: Calico | Set kdd path when calico < v3.22.3
set_fact:
@@ -196,7 +196,7 @@
- name: Calico | Configure calico FelixConfiguration
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config|to_json) }}"
stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config | to_json) }}"
changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
@@ -222,7 +222,7 @@
"cidr": "{{ calico_pool_cidr | default(kube_pods_subnet) }}",
"ipipMode": "{{ calico_ipip_mode }}",
"vxlanMode": "{{ calico_vxlan_mode }}",
"natOutgoing": {{ nat_outgoing|default(false) }}
"natOutgoing": {{ nat_outgoing | default(false) }}
}
}
@@ -235,7 +235,7 @@
- name: Calico | Configure calico network pool
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool|to_json) }}"
stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool | to_json) }}"
changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
@@ -261,7 +261,7 @@
"cidr": "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}",
"ipipMode": "{{ calico_ipip_mode_ipv6 }}",
"vxlanMode": "{{ calico_vxlan_mode_ipv6 }}",
"natOutgoing": {{ nat_outgoing_ipv6|default(false) }}
"natOutgoing": {{ nat_outgoing_ipv6 | default(false) }}
}
}
@@ -274,7 +274,7 @@
- name: Calico | Configure calico ipv6 network pool
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6|to_json) }}"
stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6 | to_json) }}"
changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
@@ -282,13 +282,13 @@
- name: Populate Service External IPs
set_fact:
_service_external_ips: "{{ _service_external_ips|default([]) + [ {'cidr': item} ] }}"
_service_external_ips: "{{ _service_external_ips | default([]) + [{'cidr': item}] }}"
with_items: "{{ calico_advertise_service_external_ips }}"
run_once: yes
- name: Populate Service LoadBalancer IPs
set_fact:
_service_loadbalancer_ips: "{{ _service_loadbalancer_ips|default([]) + [ {'cidr': item} ] }}"
_service_loadbalancer_ips: "{{ _service_loadbalancer_ips | default([]) + [{'cidr': item}] }}"
with_items: "{{ calico_advertise_service_loadbalancer_ips }}"
run_once: yes
@@ -296,7 +296,7 @@
set_fact:
nodeToNodeMeshEnabled: "false"
when:
- peer_with_router|default(false) or peer_with_calico_rr|default(false)
- peer_with_router | default(false) or peer_with_calico_rr | default(false)
- inventory_hostname in groups['k8s_cluster']
run_once: yes
@@ -309,6 +309,7 @@
- name: Calico | Set kubespray BGP Configuration
set_fact:
# noqa: jinja[spacing]
_bgp_config: >
{
"kind": "BGPConfiguration",
@@ -319,12 +320,12 @@
"spec": {
"listenPort": {{ calico_bgp_listen_port }},
"logSeverityScreen": "Info",
{% if not calico_no_global_as_num|default(false) %}"asNumber": {{ global_as_num }},{% endif %}
"nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled|default('true') }} ,
{% if calico_advertise_cluster_ips|default(false) %}
{% if not calico_no_global_as_num | default(false) %}"asNumber": {{ global_as_num }},{% endif %}
"nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled | default('true') }} ,
{% if calico_advertise_cluster_ips | default(false) %}
"serviceClusterIPs": [{"cidr": "{{ kube_service_addresses }}" } {{ ',{"cidr":"' + kube_service_addresses_ipv6 + '"}' if enable_dual_stack_networks else '' }}],{% endif %}
{% if calico_advertise_service_loadbalancer_ips|length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %}
"serviceExternalIPs": {{ _service_external_ips|default([]) }}
{% if calico_advertise_service_loadbalancer_ips | length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %}
"serviceExternalIPs": {{ _service_external_ips | default([]) }}
}
}
@@ -337,7 +338,7 @@
- name: Calico | Set up BGP Configuration
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config|to_json) }}"
stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config | to_json) }}"
changed_when: False
when:
- inventory_hostname == groups['kube_control_plane'][0]
@@ -464,8 +465,8 @@
- include_tasks: peer_with_calico_rr.yml
when:
- peer_with_calico_rr|default(false)
- peer_with_calico_rr | default(false)
- include_tasks: peer_with_router.yml
when:
- peer_with_router|default(false)
- peer_with_router | default(false)

View File

@@ -13,7 +13,7 @@
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
# revert when it's already a string
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}"
vars:
stdin: >
{"apiVersion": "projectcalico.org/v3",
@@ -38,7 +38,7 @@
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
# revert when it's already a string
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}"
vars:
stdin: >
{"apiVersion": "projectcalico.org/v3",
@@ -64,7 +64,7 @@
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
# revert when it's already a string
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}"
vars:
stdin: >
{"apiVersion": "projectcalico.org/v3",

View File

@@ -2,13 +2,13 @@
- name: Calico | Configure peering with router(s) at global scope
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}"
vars:
stdin: >
{"apiVersion": "projectcalico.org/v3",
"kind": "BGPPeer",
"metadata": {
"name": "global-{{ item.name | default(item.router_id|replace(':','-')) }}"
"name": "global-{{ item.name | default(item.router_id | replace(':', '-')) }}"
},
"spec": {
"asNumber": "{{ item.as }}",
@@ -19,14 +19,14 @@
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
with_items:
- "{{ peers|selectattr('scope','defined')|selectattr('scope','equalto', 'global')|list|default([]) }}"
- "{{ peers | selectattr('scope', 'defined') | selectattr('scope', 'equalto', 'global') | list | default([]) }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Configure node asNumber for per node peering
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}"
vars:
stdin: >
{"apiVersion": "projectcalico.org/v3",
@@ -52,26 +52,26 @@
- name: Calico | Configure peering with router(s) at node scope
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}"
vars:
stdin: >
{"apiVersion": "projectcalico.org/v3",
"kind": "BGPPeer",
"metadata": {
"name": "{{ inventory_hostname }}-{{ item.name | default(item.router_id|replace(':','-')) }}"
"name": "{{ inventory_hostname }}-{{ item.name | default(item.router_id | replace(':', '-')) }}"
},
"spec": {
"asNumber": "{{ item.as }}",
"node": "{{ inventory_hostname }}",
"peerIP": "{{ item.router_id }}",
"sourceAddress": "{{ item.sourceaddress|default('UseNodeIP') }}"
"sourceAddress": "{{ item.sourceaddress | default('UseNodeIP') }}"
}}
register: output
retries: 4
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
with_items:
- "{{ peers|selectattr('scope','undefined')|list|default([]) | union(peers|selectattr('scope','defined')|selectattr('scope','equalto', 'node')|list|default([])) }}"
- "{{ peers | selectattr('scope', 'undefined') | list | default([]) | union(peers | selectattr('scope', 'defined') | selectattr('scope', 'equalto', 'node') | list | default([])) }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- inventory_hostname in groups['k8s_cluster']

View File

@@ -32,12 +32,12 @@
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}-{{ ansible_architecture }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_distribution | lower }}.yml"
- "{{ ansible_os_family | lower }}-{{ ansible_architecture }}.yml"
- "{{ ansible_os_family | lower }}.yml"
- defaults.yml
paths:
- ../vars

View File

@@ -22,8 +22,8 @@ data:
cluster_type: "kubespray"
calico_backend: "{{ calico_network_backend }}"
{% endif %}
{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router|default(false) %}
as: "{{ local_as|default(global_as_num) }}"
{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router | default(false) %}
as: "{{ local_as | default(global_as_num) }}"
{% endif -%}
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
@@ -73,7 +73,7 @@ data:
"allow_ip_forwarding": true
},
{% endif %}
{% if (calico_feature_control is defined) and (calico_feature_control|length > 0) %}
{% if (calico_feature_control is defined) and (calico_feature_control | length > 0) %}
"feature_control": {
{% for fc in calico_feature_control -%}
{% set fcval = calico_feature_control[fc] -%}

View File

@@ -211,7 +211,7 @@ spec:
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "{{ calico_endpoint_to_host_action|default('RETURN') }}"
value: "{{ calico_endpoint_to_host_action | default('RETURN') }}"
- name: FELIX_HEALTHHOST
value: "{{ calico_healthhost }}"
{% if kube_proxy_mode == 'ipvs' and kube_apiserver_node_port_range is defined %}
@@ -286,7 +286,7 @@ spec:
- name: IP6
value: autodetect
{% endif %}
{% if calico_use_default_route_src_ipaddr|default(false) %}
{% if calico_use_default_route_src_ipaddr | default(false) %}
- name: FELIX_DEVICEROUTESOURCEADDRESS
valueFrom:
fieldRef:

View File

@@ -8,7 +8,7 @@ cilium_enable_ipv4: true
cilium_enable_ipv6: false
# Cilium agent health port
cilium_agent_health_port: "{%- if cilium_version | regex_replace('v') is version('1.11.6', '>=') -%}9879 {%- else -%} 9876 {%- endif -%}"
cilium_agent_health_port: "{%- if cilium_version | regex_replace('v') is version('1.11.6', '>=') -%}9879{%- else -%}9876{%- endif -%}"
# Identity allocation mode selects how identities are shared between cilium
# nodes by setting how they are stored. The options are "crd" or "kvstore".

View File

@@ -124,7 +124,7 @@ spec:
mountPath: /var/lib/etcd-config
readOnly: true
- name: etcd-secrets
mountPath: "{{cilium_cert_dir}}"
mountPath: "{{ cilium_cert_dir }}"
readOnly: true
{% endif %}
{% for volume_mount in cilium_operator_extra_volume_mounts %}
@@ -163,7 +163,7 @@ spec:
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
path: "{{cilium_cert_dir}}"
path: "{{ cilium_cert_dir }}"
{% endif %}
{% for volume in cilium_operator_extra_volumes %}
- {{ volume | to_nice_yaml(indent=2) | indent(10) }}

View File

@@ -104,7 +104,7 @@ data:
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "{{cilium_preallocate_bpf_maps}}"
preallocate-bpf-maps: "{{ cilium_preallocate_bpf_maps }}"
# Regular expression matching compatible Istio sidecar istio-proxy
# container image names
@@ -251,6 +251,6 @@ data:
{% for cidr in cilium_non_masquerade_cidrs %}
- {{ cidr }}
{% endfor %}
masqLinkLocal: {{ cilium_masq_link_local|bool }}
masqLinkLocal: {{ cilium_masq_link_local | bool }}
resyncInterval: "{{ cilium_ip_masq_resync_interval }}"
{% endif %}

View File

@@ -28,7 +28,7 @@ spec:
spec:
containers:
- name: cilium-agent
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- cilium-agent
@@ -160,7 +160,7 @@ spec:
mountPath: /var/lib/etcd-config
readOnly: true
- name: etcd-secrets
mountPath: "{{cilium_cert_dir}}"
mountPath: "{{ cilium_cert_dir }}"
readOnly: true
{% endif %}
- name: clustermesh-secrets
@@ -201,7 +201,7 @@ spec:
initContainers:
{% if cilium_version | regex_replace('v') is version('1.11', '>=') and cilium_cgroup_auto_mount %}
- name: mount-cgroup
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: CGROUP_ROOT
@@ -230,7 +230,7 @@ spec:
{% endif %}
{% if cilium_version | regex_replace('v') is version('1.11.7', '>=') %}
- name: apply-sysctl-overwrites
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: BIN_PATH
@@ -256,7 +256,7 @@ spec:
privileged: true
{% endif %}
- name: clean-cilium-state
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- /init-container.sh
@@ -309,7 +309,7 @@ spec:
{% if cilium_version | regex_replace('v') is version('1.13.1', '>=') %}
# Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent
- name: install-cni-binaries
image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- "/install-plugin.sh"
@@ -398,7 +398,7 @@ spec:
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
path: "{{cilium_cert_dir}}"
path: "{{ cilium_cert_dir }}"
{% endif %}
# To read the clustermesh configuration
- name: clustermesh-secrets

View File

@@ -2,7 +2,7 @@
# Flannel public IP
# The address that flannel should advertise as how to access the system
# Disabled until https://github.com/coreos/flannel/issues/712 is fixed
# flannel_public_ip: "{{ access_ip|default(ip|default(fallback_ips[inventory_hostname])) }}"
# flannel_public_ip: "{{ access_ip | default(ip | default(fallback_ips[inventory_hostname])) }}"
## interface that should be used for flannel operations
## This is actually an inventory cluster-level item

View File

@@ -42,23 +42,23 @@ spec:
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- /kube-ovn/start-controller.sh
- --default-cidr={{ kube_pods_subnet }}{% if enable_dual_stack_networks %},{{ kube_ovn_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}{% endif %}{{''}}
- --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{''}}
- --default-gateway-check={{ kube_ovn_default_gateway_check|string }}
- --default-logical-gateway={{ kube_ovn_default_logical_gateway|string }}
- --default-cidr={{ kube_pods_subnet }}{% if enable_dual_stack_networks %},{{ kube_ovn_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}{% endif %}{{ '' }}
- --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{ '' }}
- --default-gateway-check={{ kube_ovn_default_gateway_check | string }}
- --default-logical-gateway={{ kube_ovn_default_logical_gateway | string }}
- --default-u2o-interconnection={{ kube_ovn_u2o_interconnection }}
- --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{''}}
- --node-switch-cidr={{ kube_ovn_node_switch_cidr }}{% if enable_dual_stack_networks %},{{ kube_ovn_node_switch_cidr_ipv6 }}{% endif %}{{''}}
- --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{''}}
- --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{ '' }}
- --node-switch-cidr={{ kube_ovn_node_switch_cidr }}{% if enable_dual_stack_networks %},{{ kube_ovn_node_switch_cidr_ipv6 }}{% endif %}{{ '' }}
- --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{ '' }}
- --network-type={{ kube_ovn_network_type }}
- --default-interface-name={{ kube_ovn_default_interface_name|default('') }}
- --default-interface-name={{ kube_ovn_default_interface_name | default('') }}
- --default-vlan-id={{ kube_ovn_default_vlan_id }}
- --ls-dnat-mod-dl-dst={{ kube_ovn_ls_dnat_mod_dl_dst }}
- --pod-nic-type={{ kube_ovn_pod_nic_type }}
- --enable-lb={{ kube_ovn_enable_lb|string }}
- --enable-np={{ kube_ovn_enable_np|string }}
- --enable-lb={{ kube_ovn_enable_lb | string }}
- --enable-np={{ kube_ovn_enable_np | string }}
- --enable-eip-snat={{ kube_ovn_eip_snat_enabled }}
- --enable-external-vpc={{ kube_ovn_enable_external_vpc|string }}
- --enable-external-vpc={{ kube_ovn_enable_external_vpc | string }}
- --logtostderr=false
- --alsologtostderr=true
- --gc-interval=360
@@ -187,11 +187,11 @@ spec:
args:
- --enable-mirror={{ kube_ovn_traffic_mirror | lower }}
- --encap-checksum={{ kube_ovn_encap_checksum | lower }}
- --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{''}}
- --iface={{ kube_ovn_iface|default('') }}
- --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{ '' }}
- --iface={{ kube_ovn_iface | default('') }}
- --dpdk-tunnel-iface={{ kube_ovn_dpdk_tunnel_iface }}
- --network-type={{ kube_ovn_network_type }}
- --default-interface-name={{ kube_ovn_default_interface_name|default('') }}
- --default-interface-name={{ kube_ovn_default_interface_name | default('') }}
{% if kube_ovn_mtu is defined %}
- --mtu={{ kube_ovn_mtu }}
{% endif %}
@@ -359,7 +359,7 @@ spec:
command:
- /kube-ovn/kube-ovn-pinger
args:
- --external-address={{ kube_ovn_external_address }}{% if enable_dual_stack_networks %},{{ kube_ovn_external_address_ipv6 }}{% endif %}{{''}}
- --external-address={{ kube_ovn_external_address }}{% if enable_dual_stack_networks %},{{ kube_ovn_external_address_ipv6 }}{% endif %}{{ '' }}
- --external-dns={{ kube_ovn_external_dns }}
- --logtostderr=false
- --alsologtostderr=true
@@ -668,6 +668,6 @@ data:
ic-db-host: "{{ kube_ovn_ic_dbhost }}"
ic-nb-port: "6645"
ic-sb-port: "6646"
gw-nodes: "{{ kube_ovn_central_hosts|join(',') }}"
gw-nodes: "{{ kube_ovn_central_hosts | join(',') }}"
auto-route: "{{ kube_ovn_ic_autoroute | lower }}"
{% endif %}

View File

@@ -7,6 +7,7 @@
- name: Macvlan | reload network
service:
# noqa: jinja[spacing]
name: >-
{% if ansible_os_family == "RedHat" -%}
network

View File

@@ -3,7 +3,7 @@ multus_conf_file: "auto"
multus_cni_conf_dir_host: "/etc/cni/net.d"
multus_cni_bin_dir_host: "/opt/cni/bin"
multus_cni_run_dir_host: "/run"
multus_cni_conf_dir: "{{ ('/host', multus_cni_conf_dir_host) | join }}"
multus_cni_conf_dir: "{{ ('/host', multus_cni_conf_dir_host) | join }}"
multus_cni_bin_dir: "{{ ('/host', multus_cni_bin_dir_host) | join }}"
multus_cni_run_dir: "{{ ('/host', multus_cni_run_dir_host) | join }}"
multus_cni_version: "0.4.0"

View File

@@ -14,7 +14,7 @@
- name: Multus | Check container engine type
set_fact:
container_manager_types: "{{ ansible_play_hosts_all|map('extract', hostvars, ['container_manager'])|list|unique }}"
container_manager_types: "{{ ansible_play_hosts_all | map('extract', hostvars, ['container_manager']) | list | unique }}"
- name: Multus | Copy manifest templates
template:
@@ -28,7 +28,7 @@
register: multus_manifest_2
vars:
query: "*|[?container_manager=='{{ container_manager }}']|[0].inventory_hostname"
vars_from_node: "{{ hostvars|json_query(query) }}"
vars_from_node: "{{ hostvars | json_query(query) }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- item.engine in container_manager_types

View File

@@ -2,7 +2,7 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
{% if container_manager_types|length >= 2 %}
{% if container_manager_types | length >= 2 %}
name: kube-multus-{{ container_manager }}-{{ image_arch }}
{% else %}
name: kube-multus-ds-{{ image_arch }}
@@ -26,7 +26,7 @@ spec:
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
kubernetes.io/arch: {{ image_arch }}
{% if container_manager_types|length >= 2 %}
{% if container_manager_types | length >= 2 %}
kubespray.io/container_manager: {{ container_manager }}
{% endif %}
tolerations:
@@ -62,7 +62,7 @@ spec:
mountPropagation: HostToContainer
{% endif %}
- name: cni
mountPath: {{ multus_cni_conf_dir }}
mountPath: {{ multus_cni_conf_dir }}
- name: cnibin
mountPath: {{ multus_cni_bin_dir }}
volumes: