refact ip stack (#11953)

This commit is contained in:
Boris
2025-02-11 14:37:58 +03:00
committed by GitHub
parent c557adf911
commit a51e7dd07d
64 changed files with 470 additions and 208 deletions

View File

@@ -146,12 +146,16 @@
check_mode: false
register: calico
run_once: true
when: ipv4_stack | bool
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Set calico_pool_conf"
set_fact:
calico_pool_conf: '{{ calico.stdout | from_json }}'
when: calico.rc == 0 and calico.stdout
when:
- ipv4_stack | bool
- calico is defined
- calico.rc == 0 and calico.stdout
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
@@ -164,10 +168,45 @@
- not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode
msg: "Your inventory doesn't match the current cluster configuration"
when:
- ipv4_stack | bool
- calico_pool_conf is defined
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Get Calico {{ calico_pool_name }}-ipv6 configuration"
command: "{{ bin_dir }}/calicoctl.sh get ipPool {{ calico_pool_name }}-ipv6 -o json"
failed_when: false
changed_when: false
check_mode: false
register: calico_ipv6
run_once: true
when: ipv6_stack | bool
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Set calico_pool_ipv6_conf"
set_fact:
calico_pool_conf: '{{ calico_ipv6.stdout | from_json }}'
when:
- ipv6_stack | bool
- alico_ipv6 is defined
- calico_ipv6.rc == 0 and calico_ipv6.stdout
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check if ipv6 inventory match current cluster configuration"
assert:
that:
- calico_pool_conf.spec.blockSize | int == calico_pool_blocksize_ipv6 | int
- calico_pool_conf.spec.cidr == (calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6))
- not calico_pool_conf.spec.ipipMode is defined or calico_pool_conf.spec.ipipMode == calico_ipip_mode_ipv6
- not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode_ipv6
msg: "Your ipv6 inventory doesn't match the current cluster configuration"
when:
- ipv6_stack | bool
- calico_pool_ipv6_conf is defined
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check kdd calico_datastore if calico_apiserver_enabled"
assert:
that: calico_datastore == "kdd"
@@ -191,7 +230,6 @@
that:
- "calico_ipip_mode_ipv6 in ['Never']"
msg: "Calico doesn't support ipip tunneling for the IPv6"
when:
- enable_dual_stack_networks
when: ipv6_stack | bool
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"

View File

@@ -84,6 +84,7 @@
changed_when: false
when:
- inventory_hostname == groups['kube_control_plane'][0]
- ipv4_stack | bool
- name: Calico | Ensure that calico_pool_cidr is within kube_pods_subnet when defined
assert:
@@ -91,8 +92,9 @@
msg: "{{ calico_pool_cidr }} is not within or equal to {{ kube_pods_subnet }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- 'calico_conf.stdout == "0"'
- ipv4_stack | bool
- calico_pool_cidr is defined
- 'calico_conf.stdout == "0"'
- name: Calico | Check if calico IPv6 network pool has already been configured
# noqa risky-shell-pipe - grep will exit 1 if no match found
@@ -107,7 +109,7 @@
changed_when: false
when:
- inventory_hostname == groups['kube_control_plane'][0]
- enable_dual_stack_networks
- ipv6_stack
- name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined
assert:
@@ -115,9 +117,9 @@
msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- ipv6_stack | bool
- calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0"
- calico_pool_cidr_ipv6 is defined
- enable_dual_stack_networks
- name: Calico | kdd specific configuration
when:
@@ -206,6 +208,7 @@
- name: Calico | Configure Calico IP Pool
when:
- inventory_hostname == groups['kube_control_plane'][0]
- ipv4_stack | bool
block:
- name: Calico | Get existing calico network pool
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json"
@@ -256,7 +259,7 @@
- name: Calico | Configure Calico IPv6 Pool
when:
- inventory_hostname == groups['kube_control_plane'][0]
- enable_dual_stack_networks | bool
- ipv6_stack | bool
block:
- name: Calico | Get existing calico ipv6 network pool
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json"
@@ -350,7 +353,15 @@
{% if not calico_no_global_as_num | default(false) %}"asNumber": {{ global_as_num }},{% endif %}
"nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled | default('true') }} ,
{% if calico_advertise_cluster_ips | default(false) %}
"serviceClusterIPs": [{"cidr": "{{ kube_service_addresses }}" } {{ ',{"cidr":"' + kube_service_addresses_ipv6 + '"}' if enable_dual_stack_networks else '' }}],{% endif %}
"serviceClusterIPs": >-
{%- if ipv4_stack and ipv6_stack-%}
[{"cidr": "{{ kube_service_addresses }}", "cidr": "{{ kube_service_addresses_ipv6 }}"}],
{%- elif ipv6_stack-%}
[{"cidr": "{{ kube_service_addresses_ipv6 }}"}],
{%- else -%}
[{"cidr": "{{ kube_service_addresses }}"}],
{%- endif -%}
{% endif %}
{% if calico_advertise_service_loadbalancer_ips | length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %}
"serviceExternalIPs": {{ _service_external_ips | default([]) }}
}

View File

@@ -53,13 +53,15 @@ data:
"type": "host-local",
"subnet": "usePodCidr"
},
{% else %}
{% else %}
"ipam": {
"type": "calico-ipam",
{% if enable_dual_stack_networks %}
"assign_ipv6": "true",
{% endif %}
"assign_ipv4": "true"
{% if ipv4_stack %}
"assign_ipv4": "true"{{ ',' if (ipv6_stack and ipv4_stack) }}
{% endif %}
{% if ipv6_stack %}
"assign_ipv6": "true"
{% endif %}
},
{% endif %}
{% if calico_allow_ip_forwarding %}

View File

@@ -265,7 +265,7 @@ spec:
- name: CALICO_IPV4POOL_VXLAN
value: "Never"
- name: FELIX_IPV6SUPPORT
value: "{{ enable_dual_stack_networks | default(false) }}"
value: "{{ ipv6_stack | default(false) }}"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "{{ calico_loglevel }}"
@@ -308,9 +308,18 @@ spec:
- name: IP_AUTODETECTION_METHOD
value: "can-reach=$(NODEIP)"
{% endif %}
{% if ipv4_stack %}
- name: IP
value: "autodetect"
{% if calico_ip6_auto_method is defined and enable_dual_stack_networks %}
{% else %}
- name: IP
value: none
{% endif %}
{% if ipv6_stack %}
- name: IP6
value: autodetect
{% endif %}
{% if calico_ip6_auto_method is defined and ipv6_stack %}
- name: IP6_AUTODETECTION_METHOD
value: "{{ calico_ip6_auto_method }}"
{% endif %}
@@ -318,10 +327,6 @@ spec:
- name: FELIX_MTUIFACEPATTERN
value: "{{ calico_felix_mtu_iface_pattern }}"
{% endif %}
{% if enable_dual_stack_networks %}
- name: IP6
value: autodetect
{% endif %}
{% if calico_use_default_route_src_ipaddr | default(false) %}
- name: FELIX_DEVICEROUTESOURCEADDRESS
valueFrom:

View File

@@ -22,7 +22,7 @@ calico_pool_blocksize: 26
# Calico doesn't support ipip tunneling for the IPv6.
calico_ipip_mode_ipv6: Never
calico_vxlan_mode_ipv6: Never
calico_vxlan_mode_ipv6: Always
# add default ipv6 ippool blockSize
calico_pool_blocksize_ipv6: 122

View File

@@ -4,8 +4,8 @@ cilium_min_version_required: "1.10"
cilium_debug: false
cilium_mtu: ""
cilium_enable_ipv4: true
cilium_enable_ipv6: false
cilium_enable_ipv4: "{{ ipv4_stack }}"
cilium_enable_ipv6: "{{ ipv6_stack }}"
# Enable l2 announcement from cilium to replace Metallb Ref: https://docs.cilium.io/en/v1.14/network/l2-announcements/
cilium_l2announcements: false

View File

@@ -2,7 +2,7 @@
# Flannel public IP
# The address that flannel should advertise as how to access the system
# Disabled until https://github.com/coreos/flannel/issues/712 is fixed
# flannel_public_ip: "{{ access_ip | default(ip | default(fallback_ip)) }}"
# flannel_public_ip: "{{ main_access_ip }}"
## interface that should be used for flannel operations
## This is actually an inventory cluster-level item

View File

@@ -30,12 +30,14 @@ data:
}
net-conf.json: |
{
{% if ipv4_stack %}
"Network": "{{ kube_pods_subnet }}",
"EnableIPv4": true,
{% if enable_dual_stack_networks %}
{% endif %}
{% if ipv6_stack %}
"EnableIPv6": true,
"IPv6Network": "{{ kube_pods_subnet_ipv6 }}",
{% endif %}
{% endif %}
"Backend": {
"Type": "{{ flannel_backend_type }}"{% if flannel_backend_type == "vxlan" %},
"VNI": {{ flannel_vxlan_vni }},

View File

@@ -33,7 +33,7 @@ kube_ovn_central_replics: "{{ kube_ovn_central_hosts | length }}"
kube_ovn_controller_replics: "{{ kube_ovn_central_hosts | length }}"
kube_ovn_central_ips: |-
{% for item in kube_ovn_central_hosts -%}
{{ hostvars[item]['ip'] | default(hostvars[item]['fallback_ip']) }}{% if not loop.last %},{% endif %}
{{ hostvars[item]['main_ip'] }}{% if not loop.last %},{% endif %}
{%- endfor %}
kube_ovn_ic_enable: false
@@ -62,6 +62,15 @@ kube_ovn_traffic_mirror: false
kube_ovn_external_address: 8.8.8.8
kube_ovn_external_address_ipv6: 2400:3200::1
kube_ovn_external_address_merged: >-
{%- if ipv4_stack and ipv6_stack -%}
{{ kube_ovn_external_address }},{{ kube_ovn_external_address_ipv6 }}
{%- elif ipv4_stack -%}
{{ kube_ovn_external_address }}
{%- else -%}
{{ kube_ovn_external_address_ipv6 }}
{%- endif -%}
kube_ovn_external_dns: alauda.cn
# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0
@@ -74,6 +83,14 @@ kube_ovn_u2o_interconnection: false
# kube_ovn_default_exclude_ips: 10.16.0.1
kube_ovn_node_switch_cidr: 100.64.0.0/16
kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64
kube_ovn_node_switch_cidr_merged: >-
{%- if ipv4_stack and ipv6_stack -%}
{{ kube_ovn_node_switch_cidr }},{{ kube_ovn_node_switch_cidr_ipv6 }}
{%- elif ipv4_stack -%}
{{ kube_ovn_node_switch_cidr }}
{%- else -%}
{{ kube_ovn_node_switch_cidr_ipv6 }}
{%- endif -%}
## vlan config, set default interface name and vlan id
# kube_ovn_default_interface_name: eth0

View File

@@ -240,14 +240,14 @@ spec:
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- /kube-ovn/start-controller.sh
- --default-cidr={{ kube_pods_subnet }}{% if enable_dual_stack_networks %},{{ kube_ovn_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}{% endif %}{{ '' }}
- --default-cidr={{ kube_pods_subnets }}
- --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{ '' }}
- --default-gateway-check={{ kube_ovn_default_gateway_check | string }}
- --default-logical-gateway={{ kube_ovn_default_logical_gateway | string }}
- --default-u2o-interconnection={{ kube_ovn_u2o_interconnection }}
- --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{ '' }}
- --node-switch-cidr={{ kube_ovn_node_switch_cidr }}{% if enable_dual_stack_networks %},{{ kube_ovn_node_switch_cidr_ipv6 }}{% endif %}{{ '' }}
- --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{ '' }}
- --node-switch-cidr={{ kube_ovn_node_switch_cidr_merged }}
- --service-cluster-ip-range={{ kube_service_subnets }}
- --network-type={{ kube_ovn_network_type }}
- --default-interface-name={{ kube_ovn_default_interface_name | default('') }}
- --default-vlan-id={{ kube_ovn_default_vlan_id }}
@@ -403,7 +403,7 @@ spec:
args:
- --enable-mirror={{ kube_ovn_traffic_mirror | lower }}
- --encap-checksum={{ kube_ovn_encap_checksum | lower }}
- --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{ '' }}
- --service-cluster-ip-range={{ kube_service_subnets }}
- --iface={{ kube_ovn_iface | default('') }}
- --dpdk-tunnel-iface={{ kube_ovn_dpdk_tunnel_iface }}
- --network-type={{ kube_ovn_network_type }}
@@ -588,7 +588,7 @@ spec:
command:
- /kube-ovn/kube-ovn-pinger
args:
- --external-address={{ kube_ovn_external_address }}{% if enable_dual_stack_networks %},{{ kube_ovn_external_address_ipv6 }}{% endif %}{{ '' }}
- --external-address={{ kube_ovn_external_address_merged }}
- --external-dns={{ kube_ovn_external_dns }}
- --logtostderr=false
- --alsologtostderr=true
@@ -837,7 +837,7 @@ spec:
- name: metrics
port: 10661
type: ClusterIP
{% if enable_dual_stack_networks %}
{% if ipv6_stack %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
@@ -852,7 +852,7 @@ metadata:
labels:
app: kube-ovn-pinger
spec:
{% if enable_dual_stack_networks %}
{% if ipv6_stack %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
@@ -869,7 +869,7 @@ metadata:
labels:
app: kube-ovn-controller
spec:
{% if enable_dual_stack_networks %}
{% if ipv6_stack %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
@@ -886,7 +886,7 @@ metadata:
labels:
app: kube-ovn-cni
spec:
{% if enable_dual_stack_networks %}
{% if ipv6_stack %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:

View File

@@ -260,7 +260,7 @@ spec:
port: 6641
targetPort: 6641
type: ClusterIP
{% if enable_dual_stack_networks %}
{% if ipv6_stack %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
@@ -280,7 +280,7 @@ spec:
port: 6642
targetPort: 6642
type: ClusterIP
{% if enable_dual_stack_networks %}
{% if ipv6_stack %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
@@ -300,7 +300,7 @@ spec:
port: 6643
targetPort: 6643
type: ClusterIP
{% if enable_dual_stack_networks %}
{% if ipv6_stack %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:

View File

@@ -1,6 +1,6 @@
apiVersion: v1
kind: Config
clusterCIDR: {{ kube_pods_subnet }}
clusterCIDR: {{ kube_pods_subnets }}
clusters:
- name: cluster
cluster:

View File

@@ -18,7 +18,7 @@ weave_hairpin_mode: true
# The range of IP addresses used by Weave Net and the subnet they are placed in
# (CIDR format; default 10.32.0.0/12)
weave_ipalloc_range: "{{ kube_pods_subnet }}"
weave_ipalloc_range: "{{ kube_pods_subnets }}"
# Set to 0 to disable Network Policy Controller (default is on)
weave_expect_npc: "{{ enable_network_policy }}"