Max Gautier 8abf49ae13
Disable podCIDR allocation from control-plane when using calico (#10639)
* Disable control plane allocating podCIDR for nodes when using calico

Calico does not use the .spec.podCIDR field for its IP address
management.
Furthermore, it can false positives from the kube controller manager if
kube_network_node_prefix and calico_pool_blocksize are unaligned, which
is the case with the default shipped by kubespray.

If the subnets obtained from using kube_network_node_prefix are bigger,
this would result at some point in the control plane thinking it does
not have subnets left for a new node, while calico will work without
problems.

Explicitely set a default value of false for calico_ipam_host_local to
facilitate its use in templates.

* Don't default to kube_network_node_prefix for calico_pool_blocksize

They have different semantics: kube_network_node_prefix is intended to
be the size of the subnet for all pods on a node, while there can be
more than on calico block of the specified size (they are allocated on
demand).

Besides, this commit does not actually change anything, because the
current code is buggy: we don't ever default to
kube_network_node_prefix, since the variable is defined in the role
defaults.
2023-12-12 14:38:36 +01:00

112 lines
3.7 KiB
Django/Jinja

kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
{% if calico_datastore == "etcd" %}
etcd_endpoints: "{{ etcd_access_addresses }}"
etcd_ca: "/calico-secrets/ca_cert.crt"
etcd_cert: "/calico-secrets/cert.crt"
etcd_key: "/calico-secrets/key.pem"
{% elif calico_datastore == "kdd" and typha_enabled %}
# To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas
# below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is
# essential.
typha_service_name: "calico-typha"
{% endif %}
{% if calico_network_backend == 'bird' %}
cluster_type: "kubespray,bgp"
calico_backend: "bird"
{% else %}
cluster_type: "kubespray"
calico_backend: "{{ calico_network_backend }}"
{% endif %}
{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router | default(false) %}
as: "{{ local_as | default(global_as_num) }}"
{% endif -%}
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "{{ calico_cni_name }}",
"cniVersion":"0.3.1",
"plugins":[
{
{% if calico_datastore == "kdd" %}
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
{% else %}
{% if cloud_provider is defined %}
"nodename": "{{ calico_kubelet_name.stdout }}",
{% else %}
"nodename": "{{ calico_baremetal_nodename }}",
{% endif %}
{% endif %}
"type": "calico",
"log_level": "info",
{% if calico_cni_log_file_path %}
"log_file_path": "{{ calico_cni_log_file_path }}",
{% endif %}
{% if calico_datastore == "etcd" %}
"etcd_endpoints": "{{ etcd_access_addresses }}",
"etcd_cert_file": "{{ calico_cert_dir }}/cert.crt",
"etcd_key_file": "{{ calico_cert_dir }}/key.pem",
"etcd_ca_cert_file": "{{ calico_cert_dir }}/ca_cert.crt",
{% endif %}
{% if calico_ipam_host_local %}
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
{% else %}
"ipam": {
"type": "calico-ipam",
{% if enable_dual_stack_networks %}
"assign_ipv6": "true",
{% endif %}
"assign_ipv4": "true"
},
{% endif %}
{% if calico_allow_ip_forwarding %}
"container_settings": {
"allow_ip_forwarding": true
},
{% endif %}
{% if (calico_feature_control is defined) and (calico_feature_control | length > 0) %}
"feature_control": {
{% for fc in calico_feature_control -%}
{% set fcval = calico_feature_control[fc] -%}
"{{ fc }}": {{ (fcval | string | lower) if (fcval == true or fcval == false) else "\"" + fcval + "\"" }}{{ "," if not loop.last else "" }}
{% endfor -%}
{{- "" }}
},
{% endif %}
{% if enable_network_policy %}
"policy": {
"type": "k8s"
},
{% endif %}
{% if calico_mtu is defined and calico_mtu is number %}
"mtu": {{ calico_mtu }},
{% endif %}
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type":"portmap",
"capabilities": {
"portMappings": true
}
},
{
"type":"bandwidth",
"capabilities": {
"bandwidth": true
}
}
]
}