Merge pull request #12101 from tico88612/refactor/cilium-install

Refactor Cilium CNI installation
This commit is contained in:
Kubernetes Prow Robot 2025-05-20 01:01:15 -07:00 committed by GitHub
commit 019cf2ab42
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
32 changed files with 250 additions and 2083 deletions

View File

@ -119,7 +119,7 @@ Note:
- Network Plugin
- [cni-plugins](https://github.com/containernetworking/plugins) 1.4.1
- [calico](https://github.com/projectcalico/calico) 3.29.3
- [cilium](https://github.com/cilium/cilium) 1.15.9
- [cilium](https://github.com/cilium/cilium) 1.17.3
- [flannel](https://github.com/flannel-io/flannel) 0.22.0
- [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21
- [kube-router](https://github.com/cloudnativelabs/kube-router) 2.1.1

View File

@ -237,7 +237,7 @@ cilium_operator_extra_volume_mounts:
## Choose Cilium version
```yml
cilium_version: "1.15.9"
cilium_version: "1.17.3"
```
## Add variable to config

View File

@ -113,7 +113,7 @@ flannel_cni_version: 1.1.2
weave_version: 2.8.7
cni_version: "{{ (cni_binary_checksums['amd64'] | dict2items)[0].key }}"
cilium_version: "1.15.9"
cilium_version: "1.17.3"
cilium_cli_version: "{{ (ciliumcli_binary_checksums['amd64'] | dict2items)[0].key }}"
cilium_enable_hubble: false
@ -261,13 +261,13 @@ cilium_operator_image_tag: "v{{ cilium_version }}"
cilium_hubble_relay_image_repo: "{{ quay_image_repo }}/cilium/hubble-relay"
cilium_hubble_relay_image_tag: "v{{ cilium_version }}"
cilium_hubble_certgen_image_repo: "{{ quay_image_repo }}/cilium/certgen"
cilium_hubble_certgen_image_tag: "v0.1.8"
cilium_hubble_certgen_image_tag: "v0.2.1"
cilium_hubble_ui_image_repo: "{{ quay_image_repo }}/cilium/hubble-ui"
cilium_hubble_ui_image_tag: "v0.11.0"
cilium_hubble_ui_image_tag: "v0.13.2"
cilium_hubble_ui_backend_image_repo: "{{ quay_image_repo }}/cilium/hubble-ui-backend"
cilium_hubble_ui_backend_image_tag: "v0.11.0"
cilium_hubble_envoy_image_repo: "{{ docker_image_repo }}/envoyproxy/envoy"
cilium_hubble_envoy_image_tag: "v1.22.5"
cilium_hubble_ui_backend_image_tag: "v0.13.2"
cilium_hubble_envoy_image_repo: "{{ quay_image_repo }}/cilium/cilium-envoy"
cilium_hubble_envoy_image_tag: "v1.32.5-1744305768-f9ddca7dcd91f7ca25a505560e655c47d3dec2cf"
kube_ovn_container_image_repo: "{{ docker_image_repo }}/kubeovn/kube-ovn"
kube_ovn_container_image_tag: "v{{ kube_ovn_version }}"
kube_ovn_vpc_container_image_repo: "{{ docker_image_repo }}/kubeovn/vpc-nat-gateway"

View File

@ -556,6 +556,11 @@ calicoctl_binary_checksums:
3.27.0: sha256:3de46d8bc30c6f9d9387d484ed62a5655c1f204b1b831b5a90f0a0d1c1ffd752
ciliumcli_binary_checksums:
arm64:
0.18.3: sha256:e0588268fc9ab6e0b7a363c4e15ecf69ed2a4cade956ab272745262e456f0e54
0.18.2: sha256:db3fae09ba005d6d345858655777bb5c972c9c841f98dc3fad3455d3084dba61
0.18.1: sha256:e6556fc7ccd071d7612446945d361c869dfeb423e0738147e0b46b2550bc2bf9
0.18.0: sha256:fd20a79875c8089694fb9b5dc3a0bf89d51711f9239637931ff0ace76ce78816
0.17.0: sha256:dee29ad27f3958882b450019e2021698282e8fcf8b136c27397798102cc1ad13
0.16.24: sha256:cf7f1276bbcf4aa5e6347d5619efe990cf1340d5898f8405931e277a1f76c670
0.16.23: sha256:7973302bead01c3f2e1d0f03e2766a0d6e76d3c52c666c750b9871a28b9afb32
0.16.22: sha256:b70c15e40b36ac34d59597f2448c5b4e0033964c517f926dbb9654aa07fb1e5b
@ -591,6 +596,11 @@ ciliumcli_binary_checksums:
0.15.16: sha256:86ed6a2e796c39dd00072e7c141fc35b68d63392d1ac5e183a7ce9d7263e23a0
0.15.15: sha256:5c1693ea163b094a92ebc6997b6e678cc8c24a52040c22433b58b419de74b28f
amd64:
0.18.3: sha256:5fe565f3b98b5846b867319aa76bc057fca37894d80db56edc20e4e809d10b25
0.18.2: sha256:1b4bd5fd5c96ab1195cd4eb56841c983a21149c62ee39922b7955f1cd0eda23a
0.18.1: sha256:c472639d460173e8d807a3f57048f9d1bcdb325e9edba320550d7ec62b72f956
0.18.0: sha256:3ac8bd270763e40a7853c73f8c7ec9e49707e1723801884a083dc25469b6b4ba
0.17.0: sha256:4ba0687ff7d47e182a7328409fb0eae123e64fa6099cd6f8b9bf240c0012ecf4
0.16.24: sha256:019c9c765222b3db5786f7b3a0bff2cd62944a8ce32681acfb47808330f405a7
0.16.23: sha256:e7cd3b982eca9b6214226536a147490ebb6ea3caad40d5a724daeea0bec5e3be
0.16.22: sha256:8bd9faae272aef2e75c686a55de782018013098b66439a1ee0c8ff1e05c5d32c

View File

@ -1,9 +1,9 @@
---
cilium_min_version_required: "1.10"
cilium_min_version_required: "1.15"
# Log-level
cilium_debug: false
cilium_mtu: ""
cilium_mtu: "0"
cilium_enable_ipv4: "{{ ipv4_stack }}"
cilium_enable_ipv6: "{{ ipv6_stack }}"
@ -11,7 +11,7 @@ cilium_enable_ipv6: "{{ ipv6_stack }}"
cilium_l2announcements: false
# Cilium agent health port
cilium_agent_health_port: "{%- if cilium_version is version('1.11.6', '>=') -%}9879{%- else -%}9876{%- endif -%}"
cilium_agent_health_port: "9879"
# Identity allocation mode selects how identities are shared between cilium
# nodes by setting how they are stored. The options are "crd" or "kvstore".
@ -26,7 +26,7 @@ cilium_agent_health_port: "{%- if cilium_version is version('1.11.6', '>=') -%}9
# - --synchronize-k8s-nodes
# - --identity-allocation-mode=kvstore
# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations
cilium_identity_allocation_mode: kvstore
cilium_identity_allocation_mode: crd
# Etcd SSL dirs
cilium_cert_dir: /etc/cilium/certs
@ -55,20 +55,20 @@ cilium_enable_prometheus: false
cilium_enable_portmap: false
# Monitor aggregation level (none/low/medium/maximum)
cilium_monitor_aggregation: medium
# Kube Proxy Replacement mode (strict/partial)
cilium_kube_proxy_replacement: partial
# Kube Proxy Replacement mode (true/false)
cilium_kube_proxy_replacement: false
# If not defined `cilium_dns_proxy_enable_transparent_mode`, it will following the Cilium behavior.
# When Cilium is configured to replace kube-proxy, it automatically enables dnsProxy, which will conflict with nodelocaldns.
# You can set `false` avoid conflict with nodelocaldns.
# https://github.com/cilium/cilium/issues/33144
# cilium_dns_proxy_enable_transparent_mode:
# If upgrading from Cilium < 1.5, you may want to override some of these options
# to prevent service disruptions. See also:
# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
cilium_preallocate_bpf_maps: false
# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9
cilium_tofqdns_enable_poller: false
# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9
cilium_enable_legacy_services: false
# Auto direct nodes routes can be used to advertise pods routes in your cluster
# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`).
# This works only if you have a L2 connectivity between all your nodes.
@ -100,8 +100,8 @@ cilium_encryption_enabled: false
cilium_encryption_type: "ipsec"
# Enable encryption for pure node to node traffic.
# This option is only effective when `cilium_encryption_type` is set to `ipsec`.
cilium_ipsec_node_encryption: false
# This option is only effective when `cilium_encryption_type` is set to `wireguard`.
cilium_encryption_node_encryption: false
# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation.
# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard,
@ -115,6 +115,7 @@ cilium_wireguard_userspace_fallback: false
# In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods.
# Bandwidth Manager requires a v5.1.x or more recent Linux kernel.
cilium_enable_bandwidth_manager: false
cilium_enable_bandwidth_manager_bbr: false
# IP Masquerade Agent
# https://docs.cilium.io/en/stable/concepts/networking/masquerading/
@ -137,6 +138,7 @@ cilium_non_masquerade_cidrs:
### Indicates whether to masquerade traffic to the link local prefix.
### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list.
cilium_masq_link_local: false
cilium_masq_link_local_ipv6: false
### A time interval at which the agent attempts to reload config from disk
cilium_ip_masq_resync_interval: 60s
@ -145,10 +147,10 @@ cilium_ip_masq_resync_interval: 60s
cilium_enable_hubble: false
### Enable Hubble-ui
cilium_enable_hubble_ui: "{{ cilium_enable_hubble }}"
### Enable Hubble Metrics
### Enable Hubble Metrics (deprecated)
cilium_enable_hubble_metrics: false
### if cilium_enable_hubble_metrics: true
cilium_hubble_metrics: {}
cilium_hubble_metrics: []
# - dns
# - drop
# - tcp
@ -160,12 +162,25 @@ cilium_hubble_install: false
### Enable auto generate certs if cilium_hubble_install: true
cilium_hubble_tls_generate: false
cilium_hubble_export_file_max_backups: "5"
cilium_hubble_export_file_max_size_mb: "10"
cilium_hubble_export_dynamic_enabled: false
cilium_hubble_export_dynamic_config_content:
- name: all
fieldMask: []
includeFilters: []
excludeFilters: []
filePath: "/var/run/cilium/hubble/events.log"
### Capacity of Hubble events buffer. The provided value must be one less than an integer power of two and no larger than 65535
### (ie: 1, 3, ..., 2047, 4095, ..., 65535) (default 4095)
# cilium_hubble_event_buffer_capacity: 4095
### Buffer size of the channel to receive monitor events.
# cilium_hubble_event_queue_size: 50
cilium_gateway_api_enabled: false
# The default IP address management mode is "Cluster Scope".
# https://docs.cilium.io/en/stable/concepts/networking/ipam/
cilium_ipam_mode: cluster-pool
@ -190,7 +205,8 @@ cilium_ipam_mode: cluster-pool
# Extra arguments for the Cilium agent
cilium_agent_custom_args: []
cilium_agent_custom_args: [] # deprecated
cilium_agent_extra_args: []
# For adding and mounting extra volumes to the cilium agent
cilium_agent_extra_volumes: []
@ -214,13 +230,19 @@ cilium_operator_extra_volumes: []
cilium_operator_extra_volume_mounts: []
# Extra arguments for the Cilium Operator
cilium_operator_custom_args: []
cilium_operator_custom_args: [] # deprecated
cilium_operator_extra_args: []
# Tolerations of the cilium operator
cilium_operator_tolerations:
- operator: "Exists"
# Unique ID of the cluster. Must be unique across all connected
# clusters and in the range of 1 to 255. Only required for Cluster Mesh,
# may be 0 if Cluster Mesh is not used.
cilium_cluster_id: 0
# Name of the cluster. Only relevant when building a mesh of clusters.
# The "default" name cannot be used if the Cluster ID is different from 0.
cilium_cluster_name: default
# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`.
@ -263,7 +285,7 @@ cilium_enable_bpf_masquerade: false
# host stack (true) or directly and more efficiently out of BPF (false) if
# the kernel supports it. The latter has the implication that it will also
# bypass netfilter in the host namespace.
cilium_enable_host_legacy_routing: true
cilium_enable_host_legacy_routing: false
# -- Enable use of the remote node identity.
# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity
@ -307,9 +329,9 @@ cilium_rolling_restart_wait_retries_count: 30
cilium_rolling_restart_wait_retries_delay_seconds: 10
# Cilium changed the default metrics exporter ports in 1.12
cilium_agent_scrape_port: "{{ cilium_version is version('1.12', '>=') | ternary('9962', '9090') }}"
cilium_operator_scrape_port: "{{ cilium_version is version('1.12', '>=') | ternary('9963', '6942') }}"
cilium_hubble_scrape_port: "{{ cilium_version is version('1.12', '>=') | ternary('9965', '9091') }}"
cilium_agent_scrape_port: "9962"
cilium_operator_scrape_port: "9963"
cilium_hubble_scrape_port: "9965"
# Cilium certgen args for generate certificate for hubble mTLS
cilium_certgen_args:
@ -328,26 +350,5 @@ cilium_certgen_args:
hubble-relay-client-cert-secret-name: hubble-relay-client-certs
hubble-relay-server-cert-generate: false
# A list of extra rules variables to add to clusterrole for cilium operator, formatted like:
# cilium_clusterrole_rules_operator_extra_vars:
# - apiGroups:
# - '""'
# resources:
# - pods
# verbs:
# - delete
# - apiGroups:
# - '""'
# resources:
# - nodes
# verbs:
# - list
# - watch
# resourceNames:
# - toto
cilium_clusterrole_rules_operator_extra_vars: []
cilium_enable_host_firewall: false
cilium_policy_audit_mode: false
cilium_hubble_export_file_max_backups: "5"
cilium_hubble_export_file_max_size_mb: "10"

View File

@ -1,14 +1,7 @@
---
- name: Cilium | Start Resources
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.name }}-{{ item.item.file }}"
state: "latest"
loop: "{{ cilium_node_manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
- name: Cilium | Install
command: "{{ bin_dir }}/cilium install --version {{ cilium_version }} -f {{ kube_config_dir }}/cilium-values.yaml"
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Cilium | Wait for pods to run
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa literal-compare
@ -19,19 +12,6 @@
failed_when: false
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Cilium | Hubble install
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/hubble/{{ item.item.name }}-{{ item.item.file }}"
state: "latest"
loop: "{{ cilium_hubble_manifests.results }}"
when:
- inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
- cilium_enable_hubble and cilium_hubble_install
- name: Cilium | Wait for CiliumLoadBalancerIPPool CRD to be present
command: "{{ kubectl }} wait --for condition=established --timeout=60s crd/ciliumloadbalancerippools.cilium.io"
register: cillium_lbippool_crd_ready

View File

@ -48,7 +48,7 @@
msg: "cilium_encryption_type must be either 'ipsec' or 'wireguard'"
when: cilium_encryption_enabled
- name: Stop if cilium_version is < 1.10.0
- name: Stop if cilium_version is < {{ cilium_min_version_required }}
assert:
that: cilium_version is version(cilium_min_version_required, '>=')
msg: "cilium_version is too low. Minimum version {{ cilium_min_version_required }}"

View File

@ -30,58 +30,6 @@
when:
- cilium_identity_allocation_mode == "kvstore"
- name: Cilium | Create hubble dir
file:
path: "{{ kube_config_dir }}/addons/hubble"
state: directory
owner: root
group: root
mode: "0755"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- cilium_hubble_install
- name: Cilium | Create Cilium node manifests
template:
src: "{{ item.name }}/{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}"
mode: "0644"
loop:
- {name: cilium, file: config.yml, type: cm}
- {name: cilium-operator, file: crb.yml, type: clusterrolebinding}
- {name: cilium-operator, file: cr.yml, type: clusterrole}
- {name: cilium, file: crb.yml, type: clusterrolebinding}
- {name: cilium, file: cr.yml, type: clusterrole}
- {name: cilium, file: secret.yml, type: secret, when: "{{ cilium_encryption_enabled and cilium_encryption_type == 'ipsec' }}"}
- {name: cilium, file: ds.yml, type: ds}
- {name: cilium-operator, file: deploy.yml, type: deploy}
- {name: cilium-operator, file: sa.yml, type: sa}
- {name: cilium, file: sa.yml, type: sa}
register: cilium_node_manifests
when:
- ('kube_control_plane' in group_names)
- item.when | default(True) | bool
- name: Cilium | Create Cilium Hubble manifests
template:
src: "{{ item.name }}/{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/hubble/{{ item.name }}-{{ item.file }}"
mode: "0644"
loop:
- {name: hubble, file: config.yml, type: cm}
- {name: hubble, file: crb.yml, type: clusterrolebinding}
- {name: hubble, file: cr.yml, type: clusterrole}
- {name: hubble, file: cronjob.yml, type: cronjob, when: "{{ cilium_hubble_tls_generate }}"}
- {name: hubble, file: deploy.yml, type: deploy}
- {name: hubble, file: job.yml, type: job, when: "{{ cilium_hubble_tls_generate }}"}
- {name: hubble, file: sa.yml, type: sa}
- {name: hubble, file: service.yml, type: service}
register: cilium_hubble_manifests
when:
- inventory_hostname == groups['kube_control_plane'][0]
- cilium_enable_hubble and cilium_hubble_install
- item.when | default(True) | bool
- name: Cilium | Enable portmap addon
template:
src: 000-cilium-portmap.conflist.j2
@ -89,6 +37,14 @@
mode: "0644"
when: cilium_enable_portmap
- name: Cilium | Render values
template:
src: values.yaml.j2
dest: "{{ kube_config_dir }}/cilium-values.yaml"
mode: "0644"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Cilium | Copy Ciliumcli binary from download dir
copy:
src: "{{ local_release_dir }}/cilium"

View File

@ -1,193 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium-operator
rules:
- apiGroups:
- ""
resources:
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- pods
verbs:
- get
- list
- watch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
# To remove node taints
- nodes
# To set NetworkUnavailable false on startup
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to perform LB IP allocation for BGP
- services/status
verbs:
- update
- patch
- apiGroups:
- ""
resources:
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
- services
- endpoints
# to check apiserver connectivity
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumidentities/status
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
{% if cilium_version is version('1.11', '>=') %}
- ciliumendpointslices
{% endif %}
{% if cilium_version is version('1.12', '>=') %}
- ciliumbgploadbalancerippools
- ciliumloadbalancerippools
- ciliumloadbalancerippools/status
- ciliumbgppeeringpolicies
- ciliumenvoyconfigs
{% endif %}
{% if cilium_version is version('1.15', '>=') %}
- ciliumbgppeerconfigs
- ciliumbgpadvertisements
- ciliumbgpnodeconfigs
{% endif %}
{% if cilium_version is version('1.16', '>=') %}
- ciliumbgpclusterconfigs
- ciliumbgpclusterconfigs/status
- ciliumbgpnodeconfigoverrides
{% endif %}
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- list
- update
- watch
# For cilium-operator running in HA mode.
#
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
# between multiple running instances.
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
# common and fewer objects in the cluster watch "all Leases".
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
{% if cilium_version is version('1.12', '>=') %}
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
resourceNames:
- ciliumbgploadbalancerippools.cilium.io
- ciliumbgppeeringpolicies.cilium.io
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumegressnatpolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
- ciliumexternalworkloads.cilium.io
- ciliumidentities.cilium.io
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
{% if cilium_version is version('1.14', '>=') %}
- ciliumnodeconfigs.cilium.io
- ciliumcidrgroups.cilium.io
- ciliuml2announcementpolicies.cilium.io
- ciliumpodippools.cilium.io
- ciliumloadbalancerippools.cilium.io
{% endif %}
{% if cilium_version is version('1.15', '>=') %}
- ciliumbgpclusterconfigs.cilium.io
- ciliumbgppeerconfigs.cilium.io
- ciliumbgpadvertisements.cilium.io
- ciliumbgpnodeconfigs.cilium.io
- ciliumbgpnodeconfigoverrides.cilium.io
{% endif %}
{% endif %}
{% for rules in cilium_clusterrole_rules_operator_extra_vars %}
- apiGroups:
{% for api in rules['apiGroups'] %}
- {{ api }}
{% endfor %}
resources:
{% for resource in rules['resources'] %}
- {{ resource }}
{% endfor %}
verbs:
{% for verb in rules['verbs'] %}
- {{ verb }}
{% endfor %}
{% if 'resourceNames' in rules %}
resourceNames:
{% for resourceName in rules['resourceNames'] %}
- {{ resourceName }}
{% endfor %}
{% endif %}
{% endfor %}

View File

@ -1,13 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium-operator
subjects:
- kind: ServiceAccount
name: cilium-operator
namespace: kube-system

View File

@ -1,170 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cilium-operator
namespace: kube-system
labels:
io.cilium/app: operator
name: cilium-operator
spec:
{% if groups.k8s_cluster | length == 1 %}
replicas: 1
{% else %}
replicas: {{ cilium_operator_replicas }}
{% endif %}
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
{% if cilium_enable_prometheus %}
annotations:
prometheus.io/port: "{{ cilium_operator_scrape_port }}"
prometheus.io/scrape: "true"
{% endif %}
labels:
io.cilium/app: operator
name: cilium-operator
spec:
containers:
- name: cilium-operator
image: "{{ cilium_operator_image_repo }}:{{ cilium_operator_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- cilium-operator
args:
- --config-dir=/tmp/cilium/config-map
- --debug=$(CILIUM_DEBUG)
{% if cilium_operator_custom_args is string %}
- {{ cilium_operator_custom_args }}
{% else %}
{% for flag in cilium_operator_custom_args %}
- {{ flag }}
{% endfor %}
{% endif %}
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: cilium-aws
key: AWS_ACCESS_KEY_ID
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: cilium-aws
key: AWS_SECRET_ACCESS_KEY
optional: true
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
name: cilium-aws
key: AWS_DEFAULT_REGION
optional: true
{% if (cilium_kube_proxy_replacement == 'strict') or (cilium_kube_proxy_replacement | bool) or (cilium_kube_proxy_replacement | string | lower == 'true') %}
- name: KUBERNETES_SERVICE_HOST
value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
{% endif %}
{% if cilium_enable_prometheus %}
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
ports:
- name: prometheus
containerPort: {{ cilium_operator_scrape_port }}
hostPort: {{ cilium_operator_scrape_port }}
protocol: TCP
{% endif %}
livenessProbe:
httpGet:
{% if cilium_enable_ipv4 %}
host: 127.0.0.1
{% else %}
host: '::1'
{% endif %}
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 3
volumeMounts:
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
{% if cilium_identity_allocation_mode == "kvstore" %}
- name: etcd-config-path
mountPath: /var/lib/etcd-config
readOnly: true
- name: etcd-secrets
mountPath: "{{ cilium_cert_dir }}"
readOnly: true
{% endif %}
{% for volume_mount in cilium_operator_extra_volume_mounts %}
- {{ volume_mount | to_nice_yaml(indent=2) | indent(14) }}
{% endfor %}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
# In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
io.cilium/app: operator
tolerations:
{{ cilium_operator_tolerations | list | to_nice_yaml(indent=2) | indent(8) }}
volumes:
- name: cilium-config-path
configMap:
name: cilium-config
{% if cilium_identity_allocation_mode == "kvstore" %}
# To read the etcd config stored in config maps
- name: etcd-config-path
configMap:
name: cilium-config
defaultMode: 420
items:
- key: etcd-config
path: etcd.config
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
path: "{{ cilium_cert_dir }}"
{% endif %}
{% for volume in cilium_operator_extra_volumes %}
- {{ volume | to_nice_yaml(indent=2) | indent(10) }}
{% endfor %}

View File

@ -1,6 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium-operator
namespace: kube-system

View File

@ -1,301 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-config
namespace: kube-system
data:
identity-allocation-mode: {{ cilium_identity_allocation_mode }}
{% if cilium_identity_allocation_mode == "kvstore" %}
# This etcd-config contains the etcd endpoints of your cluster. If you use
# TLS please make sure you follow the tutorial in https://cilium.link/etcd-config
etcd-config: |-
---
endpoints:
{% for ip_addr in etcd_access_addresses.split(',') %}
- {{ ip_addr }}
{% endfor %}
# In case you want to use TLS in etcd, uncomment the 'ca-file' line
# and create a kubernetes secret by following the tutorial in
# https://cilium.link/etcd-config
{% if cilium_version | regex_replace('v') is version('1.17.0', '>=') %}
trusted-ca-file: "{{ cilium_cert_dir }}/ca_cert.crt"
{% else %}
ca-file: "{{ cilium_cert_dir }}/ca_cert.crt"
{% endif %}
# In case you want client to server authentication, uncomment the following
# lines and create a kubernetes secret by following the tutorial in
# https://cilium.link/etcd-config
key-file: "{{ cilium_cert_dir }}/key.pem"
cert-file: "{{ cilium_cert_dir }}/cert.crt"
# kvstore
# https://docs.cilium.io/en/latest/cmdref/kvstore/
kvstore: etcd
kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}'
{% endif %}
# If you want metrics enabled in all of your Cilium agents, set the port for
# which the Cilium agents will have their metrics exposed.
# This option deprecates the "prometheus-serve-addr" in the
# "cilium-metrics-config" ConfigMap
# NOTE that this will open the port on ALL nodes where Cilium pods are
# scheduled.
{% if cilium_enable_prometheus %}
prometheus-serve-addr: ":{{ cilium_agent_scrape_port }}"
operator-prometheus-serve-addr: ":{{ cilium_operator_scrape_port }}"
enable-metrics: "true"
{% endif %}
# If you want to run cilium in debug mode change this value to true
debug: "{{ cilium_debug }}"
enable-ipv4: "{{ cilium_enable_ipv4 }}"
enable-ipv6: "{{ cilium_enable_ipv6 }}"
# If a serious issue occurs during Cilium startup, this
# invasive option may be set to true to remove all persistent
# state. Endpoints will not be restored using knowledge from a
# prior Cilium run, so they may receive new IP addresses upon
# restart. This also triggers clean-cilium-bpf-state.
clean-cilium-state: "false"
# If you want to clean cilium BPF state, set this to true;
# Removes all BPF maps from the filesystem. Upon restart,
# endpoints are restored with the same IP addresses, however
# any ongoing connections may be disrupted briefly.
# Loadbalancing decisions will be reset, so any ongoing
# connections via a service may be loadbalanced to a different
# backend after restart.
clean-cilium-bpf-state: "false"
# Users who wish to specify their own custom CNI configuration file must set
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
custom-cni-conf: "false"
{% if cilium_version is version('1.14.0', '>=') %}
# Tell the agent to generate and write a CNI configuration file
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
cni-exclusive: "{{ cilium_cni_exclusive }}"
cni-log-file: "{{ cilium_cni_log_file }}"
{% endif %}
# If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output.
monitor-aggregation: "{{ cilium_monitor_aggregation }}"
# ct-global-max-entries-* specifies the maximum number of connections
# supported across all endpoints, split by protocol: tcp or other. One pair
# of maps uses these values for IPv4 connections, and another pair of maps
# use these values for IPv6 connections.
#
# If these values are modified, then during the next Cilium startup the
# tracking of ongoing connections may be disrupted. This may lead to brief
# policy drops or a change in loadbalancing decisions for a connection.
#
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
# during the upgrade process, comment out these options.
bpf-ct-global-tcp-max: "524288"
bpf-ct-global-any-max: "262144"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
# users who are sensitive to latency may consider setting this to "true".
#
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# this option and behave as though it is set to "true".
#
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# This may lead to policy drops or a change in loadbalancing decisions for a
# connection for some time. Endpoints may need to be recreated to restore
# connectivity.
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "{{ cilium_preallocate_bpf_maps }}"
# Regular expression matching compatible Istio sidecar istio-proxy
# container image names
sidecar-istio-proxy-image: "cilium/istio_proxy"
# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
{% if cilium_version is version('1.14.0', '<') %}
tunnel: "{{ cilium_tunnel_mode }}"
{% elif cilium_version is version('1.14.0', '>=') and cilium_tunnel_mode == 'disabled' %}
routing-mode: 'native'
{% elif cilium_version is version('1.14.0', '>=') and cilium_tunnel_mode != 'disabled' %}
routing-mode: 'tunnel'
tunnel-protocol: "{{ cilium_tunnel_mode }}"
{% endif %}
## DSR setting
bpf-lb-mode: "{{ cilium_loadbalancer_mode }}"
# l2
enable-l2-announcements: "{{ cilium_l2announcements }}"
# Enable Bandwidth Manager
# Cilium's bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation.
# Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies.
# In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods.
# Bandwidth Manager requires a v5.1.x or more recent Linux kernel.
{% if cilium_enable_bandwidth_manager %}
enable-bandwidth-manager: "true"
{% endif %}
# Host Firewall and Policy Audit Mode
enable-host-firewall: "{{ cilium_enable_host_firewall | capitalize }}"
policy-audit-mode: "{{ cilium_policy_audit_mode | capitalize }}"
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: "{{ cilium_cluster_name }}"
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
#cluster-id: 1
{% if cilium_cluster_id is defined %}
cluster-id: "{{ cilium_cluster_id }}"
{% endif %}
# `wait-bpf-mount` is removed after v1.10.4
# https://github.com/cilium/cilium/commit/d2217045cb3726a7f823174e086913b69b8090da
{% if cilium_version is version('1.10.4', '<') %}
# wait-bpf-mount makes init container wait until bpf filesystem is mounted
wait-bpf-mount: "false"
{% endif %}
# `kube-proxy-replacement=partial|strict|disabled` is deprecated since january 2024 and unsupported in 1.16.
# Replaced by `kube-proxy-replacement=true|false`
# https://github.com/cilium/cilium/pull/31286
{% if cilium_version is version('1.16', '<') %}
kube-proxy-replacement: "{{ cilium_kube_proxy_replacement }}"
{% else %}
kube-proxy-replacement: "{% if (cilium_kube_proxy_replacement == 'strict') or (cilium_kube_proxy_replacement | bool) or (cilium_kube_proxy_replacement | string | lower == 'true') %}true{% else %}false{% endif %}"
{% endif %}
# `native-routing-cidr` is deprecated in 1.10, removed in 1.12.
# Replaced by `ipv4-native-routing-cidr`
# https://github.com/cilium/cilium/pull/16695
{% if cilium_version is version('1.12', '<') %}
native-routing-cidr: "{{ cilium_native_routing_cidr }}"
{% else %}
{% if cilium_native_routing_cidr | length %}
ipv4-native-routing-cidr: "{{ cilium_native_routing_cidr }}"
{% endif %}
{% if cilium_native_routing_cidr_ipv6 | length %}
ipv6-native-routing-cidr: "{{ cilium_native_routing_cidr_ipv6 }}"
{% endif %}
{% endif %}
auto-direct-node-routes: "{{ cilium_auto_direct_node_routes }}"
operator-api-serve-addr: "{{ cilium_operator_api_serve_addr }}"
# Hubble settings
{% if cilium_enable_hubble %}
enable-hubble: "true"
{% if cilium_enable_hubble_metrics %}
hubble-metrics-server: ":{{ cilium_hubble_scrape_port }}"
hubble-metrics:
{% for hubble_metrics_cycle in cilium_hubble_metrics %}
{{ hubble_metrics_cycle }}
{% endfor %}
{% endif %}
{% if cilium_hubble_event_buffer_capacity is defined %}
hubble-event-buffer-capacity: "{{ cilium_hubble_event_buffer_capacity }}"
{% endif %}
{% if cilium_hubble_event_queue_size is defined %}
hubble-event-queue-size: "{{ cilium_hubble_event_queue_size }}"
{% endif %}
hubble-listen-address: ":4244"
{% if cilium_enable_hubble and cilium_hubble_install %}
hubble-disable-tls: "{% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}"
hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
{% endif %}
hubble-export-file-max-backups: "{{ cilium_hubble_export_file_max_backups }}"
hubble-export-file-max-size-mb: "{{ cilium_hubble_export_file_max_size_mb }}"
{% endif %}
# IP Masquerade Agent
enable-ip-masq-agent: "{{ cilium_ip_masq_agent_enable }}"
{% for key, value in cilium_config_extra_vars.items() %}
{{ key }}: "{{ value }}"
{% endfor %}
# Enable transparent network encryption
{% if cilium_encryption_enabled %}
{% if cilium_encryption_type == "ipsec" %}
enable-ipsec: "true"
ipsec-key-file: /etc/ipsec/keys
encrypt-node: "{{ cilium_ipsec_node_encryption }}"
{% endif %}
{% if cilium_encryption_type == "wireguard" %}
enable-wireguard: "true"
enable-wireguard-userspace-fallback: "{{ cilium_wireguard_userspace_fallback }}"
{% endif %}
{% endif %}
# IPAM settings
ipam: "{{ cilium_ipam_mode }}"
{% if cilium_ipam_mode == "cluster-pool" %}
cluster-pool-ipv4-cidr: "{{ cilium_pool_cidr | default(kube_pods_subnet) }}"
cluster-pool-ipv4-mask-size: "{{ cilium_pool_mask_size | default(kube_network_node_prefix) }}"
{% if cilium_enable_ipv6 %}
cluster-pool-ipv6-cidr: "{{ cilium_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}"
cluster-pool-ipv6-mask-size: "{{ cilium_pool_mask_size_ipv6 | default(kube_network_node_prefix_ipv6) }}"
{% endif %}
{% endif %}
agent-health-port: "{{ cilium_agent_health_port }}"
{% if cilium_version is version('1.11', '>=') and cilium_cgroup_host_root != '' %}
cgroup-root: "{{ cilium_cgroup_host_root }}"
{% endif %}
bpf-map-dynamic-size-ratio: "{{ cilium_bpf_map_dynamic_size_ratio }}"
enable-ipv4-masquerade: "{{ cilium_enable_ipv4_masquerade }}"
enable-ipv6-masquerade: "{{ cilium_enable_ipv6_masquerade }}"
enable-bpf-masquerade: "{{ cilium_enable_bpf_masquerade }}"
enable-host-legacy-routing: "{{ cilium_enable_host_legacy_routing }}"
enable-remote-node-identity: "{{ cilium_enable_remote_node_identity }}"
enable-well-known-identities: "{{ cilium_enable_well_known_identities }}"
monitor-aggregation-flags: "{{ cilium_monitor_aggregation_flags }}"
enable-bpf-clock-probe: "{{ cilium_enable_bpf_clock_probe }}"
enable-bgp-control-plane: "{{ cilium_enable_bgp_control_plane }}"
disable-cnp-status-updates: "{{ cilium_disable_cnp_status_updates }}"
{% if cilium_ip_masq_agent_enable %}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ip-masq-agent
namespace: kube-system
data:
config: |
nonMasqueradeCIDRs:
{% for cidr in cilium_non_masquerade_cidrs %}
- {{ cidr }}
{% endfor %}
masqLinkLocal: {{ cilium_masq_link_local | bool }}
resyncInterval: "{{ cilium_ip_masq_resync_interval }}"
{% endif %}

View File

@ -1,166 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
- services
- pods
- endpoints
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
{% if cilium_version is version('1.12', '<') %}
- apiGroups:
- ""
resources:
- pods
- pods/finalizers
verbs:
- get
- list
- watch
- update
- delete
- apiGroups:
- ""
resources:
- pods
- nodes
verbs:
- get
- list
- watch
- update
{% endif %}
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- patch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
# Deprecated for removal in v1.10
- create
- list
- watch
- update
# This is used when validating policies in preflight. This will need to stay
# until we figure out how to avoid "get" inside the preflight, and then
# should be removed ideally.
- get
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints
- ciliumendpoints/status
- ciliumnodes
- ciliumnodes/status
- ciliumidentities
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumegressnatpolicies
{% if cilium_version is version('1.11', '>=') %}
- ciliumendpointslices
{% endif %}
{% if cilium_version is version('1.12', '>=') %}
- ciliumbgploadbalancerippools
- ciliumbgppeeringpolicies
{% if cilium_version is version('1.13', '>=') %}
- ciliumloadbalancerippools
{% endif %}
{% endif %}
{% if cilium_version is version('1.11.5', '<') %}
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints/finalizers
- ciliumnodes/finalizers
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies/finalizers
{% endif %}
{% if cilium_version is version('1.14', '>=') %}
- ciliuml2announcementpolicies/status
{% endif %}
{% if cilium_version is version('1.15', '>=') %}
- ciliumbgpnodeconfigs
- ciliumbgpnodeconfigs/status
- ciliumbgpadvertisements
- ciliumbgppeerconfigs
{% endif %}
{% if cilium_version is version('1.16', '>=') %}
- ciliumbgpclusterconfigs
{% endif %}
verbs:
- '*'
{% if cilium_version is version('1.12', '>=') %}
- apiGroups:
- cilium.io
resources:
- ciliumclusterwideenvoyconfigs
- ciliumenvoyconfigs
- ciliumegressgatewaypolicies
verbs:
- list
- watch
{% endif %}
{% if cilium_version is version('1.14', '>=') %}
- apiGroups:
- cilium.io
resources:
- ciliumcidrgroups
- ciliuml2announcementpolicies
- ciliumpodippools
- ciliumloadbalancerippools
- ciliuml2announcementpolicies/status
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
- list
- delete
{% endif %}

View File

@ -1,13 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system

View File

@ -1,446 +0,0 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium
namespace: kube-system
labels:
k8s-app: cilium
spec:
selector:
matchLabels:
k8s-app: cilium
updateStrategy:
rollingUpdate:
# Specifies the maximum number of Pods that can be unavailable during the update process.
maxUnavailable: 2
type: RollingUpdate
template:
metadata:
annotations:
{% if cilium_enable_prometheus %}
prometheus.io/port: "{{ cilium_agent_scrape_port }}"
prometheus.io/scrape: "true"
{% endif %}
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]'
labels:
k8s-app: cilium
spec:
containers:
- name: cilium-agent
image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- cilium-agent
args:
- --config-dir=/tmp/cilium/config-map
{% if cilium_mtu != "" %}
- --mtu={{ cilium_mtu }}
{% endif %}
{% if cilium_agent_custom_args is string %}
- {{ cilium_agent_custom_args }}
{% else %}
{% for flag in cilium_agent_custom_args %}
- {{ flag }}
{% endfor %}
{% endif %}
startupProbe:
httpGet:
host: '127.0.0.1'
path: /healthz
port: {{ cilium_agent_health_port }}
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
failureThreshold: 105
periodSeconds: 2
successThreshold: 1
livenessProbe:
httpGet:
host: '127.0.0.1'
path: /healthz
port: {{ cilium_agent_health_port }}
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
failureThreshold: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: {{ cilium_agent_health_port }}
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 5
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
{% if (cilium_kube_proxy_replacement == 'strict') or (cilium_kube_proxy_replacement | bool) or (cilium_kube_proxy_replacement | string | lower == 'true') %}
- name: KUBERNETES_SERVICE_HOST
value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
{% endif %}
{% for env_var in cilium_agent_extra_env_vars %}
- {{ env_var | to_nice_yaml(indent=2) | indent(10) }}
{% endfor %}
lifecycle:
{% if cilium_version is version('1.14', '<') %}
postStart:
exec:
command:
- "/cni-install.sh"
- "--cni-exclusive={{ cilium_cni_exclusive | string | lower }}"
{% if cilium_version is version('1.12', '>=') %}
- "--enable-debug={{ cilium_debug | string | lower }}"
- "--log-file={{ cilium_cni_log_file }}"
{% endif %}
{% endif %}
preStop:
exec:
command:
- /cni-uninstall.sh
resources:
limits:
cpu: {{ cilium_cpu_limit }}
memory: {{ cilium_memory_limit }}
requests:
cpu: {{ cilium_cpu_requests }}
memory: {{ cilium_memory_requests }}
{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %}
ports:
{% endif %}
{% if cilium_enable_prometheus %}
- name: prometheus
containerPort: {{ cilium_agent_scrape_port }}
hostPort: {{ cilium_agent_scrape_port }}
protocol: TCP
{% endif %}
{% if cilium_enable_hubble_metrics %}
- name: hubble-metrics
containerPort: {{ cilium_hubble_scrape_port }}
hostPort: {{ cilium_hubble_scrape_port }}
protocol: TCP
{% endif %}
securityContext:
privileged: true
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
- name: cilium-run
mountPath: /var/run/cilium
{% if cilium_version is version('1.13.1', '<') %}
- name: cni-path
mountPath: /host/opt/cni/bin
{% endif %}
- name: etc-cni-netd
mountPath: /host/etc/cni/net.d
{% if cilium_identity_allocation_mode == "kvstore" %}
- name: etcd-config-path
mountPath: /var/lib/etcd-config
readOnly: true
- name: etcd-secrets
mountPath: "{{ cilium_cert_dir }}"
readOnly: true
{% endif %}
- name: clustermesh-secrets
mountPath: /var/lib/cilium/clustermesh
readOnly: true
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
{% if cilium_ip_masq_agent_enable %}
- name: ip-masq-agent
mountPath: /etc/config
readOnly: true
{% endif %}
# Needed to be able to load kernel modules
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %}
- name: cilium-ipsec-secrets
mountPath: /etc/ipsec
readOnly: true
{% endif %}
{% if cilium_hubble_install %}
- name: hubble-tls
mountPath: /var/lib/cilium/tls/hubble
readOnly: true
{% endif %}
{% for volume_mount in cilium_agent_extra_volume_mounts %}
- {{ volume_mount | to_nice_yaml(indent=2) | indent(10) }}
{% endfor %}
# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service
{% if cilium_identity_allocation_mode == "kvstore" %}
dnsPolicy: ClusterFirstWithHostNet
{% endif %}
hostNetwork: true
initContainers:
{% if cilium_version is version('1.11', '>=') and cilium_cgroup_auto_mount %}
- name: mount-cgroup
image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: CGROUP_ROOT
value: {{ cilium_cgroup_host_root }}
- name: BIN_PATH
value: /opt/cni/bin
command:
- sh
- -ec
# The statically linked Go program binary is invoked to avoid any
# dependency on utilities like sh and mount that can be missing on certain
# distros installed on the underlying host. Copy the binary to the
# same directory where we install cilium cni plugin so that exec permissions
# are available.
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
volumeMounts:
- name: hostproc
mountPath: /hostproc
- name: cni-path
mountPath: /hostbin
securityContext:
privileged: true
{% endif %}
{% if cilium_version is version('1.11.7', '>=') %}
- name: apply-sysctl-overwrites
image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: BIN_PATH
value: /opt/cni/bin
command:
- sh
- -ec
# The statically linked Go program binary is invoked to avoid any
# dependency on utilities like sh that can be missing on certain
# distros installed on the underlying host. Copy the binary to the
# same directory where we install cilium cni plugin so that exec permissions
# are available.
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
volumeMounts:
- name: hostproc
mountPath: /hostproc
- name: cni-path
mountPath: /hostbin
securityContext:
privileged: true
{% endif %}
- name: clean-cilium-state
image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-state
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-bpf-state
optional: true
# Removed in 1.11 and up.
# https://github.com/cilium/cilium/commit/f7a3f59fd74983c600bfce9cac364b76d20849d9
{% if cilium_version is version('1.11', '<') %}
- name: CILIUM_WAIT_BPF_MOUNT
valueFrom:
configMapKeyRef:
key: wait-bpf-mount
name: cilium-config
optional: true
{% endif %}
{% if (cilium_kube_proxy_replacement == 'strict') or (cilium_kube_proxy_replacement | bool) or (cilium_kube_proxy_replacement | string | lower == 'true') %}
- name: KUBERNETES_SERVICE_HOST
value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
{% endif %}
securityContext:
privileged: true
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
{% if cilium_version is version('1.11', '>=') %}
# Required to mount cgroup filesystem from the host to cilium agent pod
- name: cilium-cgroup
mountPath: {{ cilium_cgroup_host_root }}
mountPropagation: HostToContainer
{% endif %}
- name: cilium-run
mountPath: /var/run/cilium
resources:
requests:
cpu: 100m
memory: 100Mi
{% if cilium_version is version('1.13.1', '>=') %}
# Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent
- name: install-cni-binaries
image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- "/install-plugin.sh"
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: cni-path
mountPath: /host/opt/cni/bin
{% endif %}
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
hostNetwork: true
# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service
{% if cilium_identity_allocation_mode == "kvstore" %}
dnsPolicy: ClusterFirstWithHostNet
{% endif %}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
k8s-app: cilium
tolerations:
- operator: Exists
volumes:
# To keep state between restarts / upgrades
- name: cilium-run
hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
# To keep state between restarts / upgrades for bpf maps
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
{% if cilium_version is version('1.11', '>=') %}
# To mount cgroup2 filesystem on the host
- name: hostproc
hostPath:
path: /proc
type: Directory
# To keep state between restarts / upgrades for cgroup2 filesystem
- name: cilium-cgroup
hostPath:
path: {{ cilium_cgroup_host_root }}
type: DirectoryOrCreate
{% endif %}
# To install cilium cni plugin in the host
- name: cni-path
hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
# To install cilium cni configuration in the host
- name: etc-cni-netd
hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
# To be able to load kernel modules
- name: lib-modules
hostPath:
path: /lib/modules
# To access iptables concurrently with other processes (e.g. kube-proxy)
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
{% if cilium_identity_allocation_mode == "kvstore" %}
# To read the etcd config stored in config maps
- name: etcd-config-path
configMap:
name: cilium-config
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
items:
- key: etcd-config
path: etcd.config
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
hostPath:
path: "{{ cilium_cert_dir }}"
{% endif %}
# To read the clustermesh configuration
- name: clustermesh-secrets
secret:
secretName: cilium-clustermesh
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
optional: true
# To read the configuration from the config map
- name: cilium-config-path
configMap:
name: cilium-config
{% if cilium_ip_masq_agent_enable %}
- name: ip-masq-agent
configMap:
name: ip-masq-agent
optional: true
items:
- key: config
path: ip-masq-agent
{% endif %}
{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %}
- name: cilium-ipsec-secrets
secret:
secretName: cilium-ipsec-keys
{% endif %}
{% if cilium_hubble_install %}
- name: hubble-tls
projected:
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
sources:
- secret:
name: hubble-server-certs
optional: true
items:
- key: ca.crt
path: client-ca.crt
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
{% endif %}

View File

@ -1,6 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium
namespace: kube-system

View File

@ -1,9 +0,0 @@
---
apiVersion: v1
data:
keys: {{ cilium_ipsec_key }}
kind: Secret
metadata:
name: cilium-ipsec-keys
namespace: kube-system
type: Opaque

View File

@ -1,71 +0,0 @@
#jinja2: trim_blocks:False
---
# Source: cilium helm chart: cilium/templates/hubble-relay/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: hubble-relay-config
namespace: kube-system
data:
config.yaml: |
cluster-name: "{{ cilium_cluster_name }}"
peer-service: "hubble-peer.kube-system.svc.{{ dns_domain }}:443"
listen-address: :4245
metrics-listen-address: ":9966"
dial-timeout:
retry-timeout:
sort-buffer-len-max:
sort-buffer-drain-timeout:
tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt
tls-client-key-file: /var/lib/hubble-relay/tls/client.key
tls-server-cert-file: /var/lib/hubble-relay/tls/server.crt
tls-server-key-file: /var/lib/hubble-relay/tls/server.key
tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt
disable-server-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}
disable-client-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}
---
# Source: cilium/templates/hubble-ui/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: hubble-ui-nginx
namespace: kube-system
data:
nginx.conf: |
server {
listen 8081;
{% if cilium_enable_ipv6 %}
listen [::]:8081;
{% endif %}
server_name localhost;
root /app;
index index.html;
client_max_body_size 1G;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
# CORS
add_header Access-Control-Allow-Methods "GET, POST, PUT, HEAD, DELETE, OPTIONS";
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Max-Age 1728000;
add_header Access-Control-Expose-Headers content-length,grpc-status,grpc-message;
add_header Access-Control-Allow-Headers range,keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout;
if ($request_method = OPTIONS) {
return 204;
}
# /CORS
location /api {
proxy_http_version 1.1;
proxy_pass_request_headers on;
proxy_hide_header Access-Control-Allow-Origin;
proxy_pass http://127.0.0.1:8090;
}
location / {
try_files $uri $uri/ /index.html;
}
}
}

View File

@ -1,108 +0,0 @@
{% if cilium_hubble_tls_generate %}
---
# Source: cilium/templates/hubble-generate-certs-clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: hubble-generate-certs
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- secrets
resourceNames:
- hubble-server-certs
- hubble-relay-client-certs
- hubble-relay-server-certs
verbs:
- update
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- hubble-ca-cert
verbs:
- update
- apiGroups:
- ""
resources:
- secrets
resourceNames:
- hubble-ca-secret
verbs:
- get
{% endif %}
---
# Source: cilium/templates/hubble-relay-clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-relay
rules:
- apiGroups:
- ""
resources:
- componentstatuses
- endpoints
- namespaces
- nodes
- pods
- services
verbs:
- get
- list
- watch
{% if cilium_enable_hubble_ui %}
---
# Source: cilium/templates/hubble-ui-clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-ui
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- componentstatuses
- endpoints
- namespaces
- nodes
- pods
- services
verbs:
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- "*"
verbs:
- get
- list
- watch
{% endif %}

View File

@ -1,46 +0,0 @@
{% if cilium_hubble_tls_generate %}
---
# Source: cilium/templates/hubble-generate-certs-clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: hubble-generate-certs
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hubble-generate-certs
subjects:
- kind: ServiceAccount
name: hubble-generate-certs
namespace: kube-system
{% endif %}
---
# Source: cilium/templates/hubble-relay-clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-relay
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hubble-relay
subjects:
- kind: ServiceAccount
namespace: kube-system
name: hubble-relay
{% if cilium_enable_hubble_ui %}
---
# Source: cilium/templates/hubble-ui-clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-ui
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hubble-ui
subjects:
- kind: ServiceAccount
namespace: kube-system
name: hubble-ui
{% endif %}

View File

@ -1,38 +0,0 @@
---
# Source: cilium/templates/hubble-generate-certs-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: hubble-generate-certs
namespace: kube-system
labels:
k8s-app: hubble-generate-certs
spec:
schedule: "0 0 1 */4 *"
concurrencyPolicy: Forbid
jobTemplate:
spec:
template:
metadata:
labels:
k8s-app: hubble-generate-certs
spec:
serviceAccount: hubble-generate-certs
serviceAccountName: hubble-generate-certs
containers:
- name: certgen
image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- "/usr/bin/cilium-certgen"
# Because this is executed as a job, we pass the values as command
# line args instead of via config map. This allows users to inspect
# the values used in past runs by inspecting the completed pod.
args:
{% for key, value in cilium_certgen_args.items() -%}
- "--{{ key }}={{ value }}"
{% endfor %}
hostNetwork: true
restartPolicy: OnFailure
ttlSecondsAfterFinished: 1800

View File

@ -1,203 +0,0 @@
---
# Source: cilium/templates/hubble-relay-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hubble-relay
labels:
k8s-app: hubble-relay
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: hubble-relay
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
labels:
k8s-app: hubble-relay
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "k8s-app"
operator: In
values:
- cilium
topologyKey: "kubernetes.io/hostname"
containers:
- name: hubble-relay
image: "{{ cilium_hubble_relay_image_repo }}:{{ cilium_hubble_relay_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- hubble-relay
args:
- serve
ports:
- name: grpc
containerPort: 4245
{% if cilium_enable_prometheus %}
- name: prometheus
containerPort: 9966
protocol: TCP
{% endif %}
readinessProbe:
tcpSocket:
port: grpc
livenessProbe:
tcpSocket:
port: grpc
volumeMounts:
- mountPath: /var/run/cilium
name: hubble-sock-dir
readOnly: true
- mountPath: /etc/hubble-relay
name: config
readOnly: true
{% if cilium_hubble_tls_generate -%}
- mountPath: /var/lib/hubble-relay/tls
name: tls
readOnly: true
{%- endif %}
restartPolicy: Always
serviceAccount: hubble-relay
serviceAccountName: hubble-relay
terminationGracePeriodSeconds: 0
volumes:
- configMap:
name: hubble-relay-config
items:
- key: config.yaml
path: config.yaml
name: config
- hostPath:
path: /var/run/cilium
type: Directory
name: hubble-sock-dir
{% if cilium_hubble_tls_generate -%}
- projected:
sources:
- secret:
name: hubble-relay-client-certs
items:
- key: ca.crt
path: hubble-server-ca.crt
- key: tls.crt
path: client.crt
- key: tls.key
path: client.key
- secret:
name: hubble-server-certs
items:
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
name: tls
{%- endif %}
{% if cilium_enable_hubble_ui %}
---
# Source: cilium/templates/hubble-ui/deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
namespace: kube-system
labels:
k8s-app: hubble-ui
name: hubble-ui
spec:
replicas: 1
selector:
matchLabels:
k8s-app: hubble-ui
template:
metadata:
annotations:
labels:
k8s-app: hubble-ui
spec:
securityContext:
runAsUser: 1001
serviceAccount: hubble-ui
serviceAccountName: hubble-ui
containers:
- name: frontend
image: "{{ cilium_hubble_ui_image_repo }}:{{ cilium_hubble_ui_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
ports:
- containerPort: 8081
name: http
volumeMounts:
- name: hubble-ui-nginx-conf
mountPath: /etc/nginx/conf.d/default.conf
subPath: nginx.conf
- name: tmp-dir
mountPath: /tmp
resources:
{}
- name: backend
image: "{{ cilium_hubble_ui_backend_image_repo }}:{{ cilium_hubble_ui_backend_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: EVENTS_SERVER_PORT
value: "8090"
{% if cilium_hubble_tls_generate -%}
- name: TLS_TO_RELAY_ENABLED
value: "true"
- name: FLOWS_API_ADDR
value: "hubble-relay:443"
- name: TLS_RELAY_SERVER_NAME
value: ui.{{ cilium_cluster_name }}.hubble-grpc.cilium.io
- name: TLS_RELAY_CA_CERT_FILES
value: /var/lib/hubble-ui/certs/hubble-server-ca.crt
- name: TLS_RELAY_CLIENT_CERT_FILE
value: /var/lib/hubble-ui/certs/client.crt
- name: TLS_RELAY_CLIENT_KEY_FILE
value: /var/lib/hubble-ui/certs/client.key
{% else -%}
- name: FLOWS_API_ADDR
value: "hubble-relay:80"
{% endif %}
{% if cilium_hubble_tls_generate -%}
volumeMounts:
- name: tls
mountPath: /var/lib/hubble-ui/certs
readOnly: true
{%- endif %}
ports:
- containerPort: 8090
name: grpc
resources:
{}
volumes:
- configMap:
defaultMode: 420
name: hubble-ui-nginx
name: hubble-ui-nginx-conf
{% if cilium_hubble_tls_generate -%}
- projected:
sources:
- secret:
name: hubble-relay-client-certs
items:
- key: ca.crt
path: hubble-server-ca.crt
- key: tls.crt
path: client.crt
- key: tls.key
path: client.key
name: tls
{%- endif %}
- emptyDir: {}
name: tmp-dir
{% endif %}

View File

@ -1,34 +0,0 @@
---
# Source: cilium/templates/hubble-generate-certs-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: hubble-generate-certs
namespace: kube-system
labels:
k8s-app: hubble-generate-certs
spec:
template:
metadata:
labels:
k8s-app: hubble-generate-certs
spec:
serviceAccount: hubble-generate-certs
serviceAccountName: hubble-generate-certs
containers:
- name: certgen
image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- "/usr/bin/cilium-certgen"
# Because this is executed as a job, we pass the values as command
# line args instead of via config map. This allows users to inspect
# the values used in past runs by inspecting the completed pod.
args:
{% for key, value in cilium_certgen_args.items() -%}
- "--{{ key }}={{ value }}"
{% endfor %}
hostNetwork: true
restartPolicy: OnFailure
ttlSecondsAfterFinished: 1800

View File

@ -1,25 +0,0 @@
{% if cilium_hubble_tls_generate %}
---
# Source: cilium/templates/hubble-generate-certs-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: hubble-generate-certs
namespace: kube-system
{% endif %}
---
# Source: cilium/templates/hubble-relay-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: hubble-relay
namespace: kube-system
{% if cilium_enable_hubble_ui %}
---
# Source: cilium/templates/hubble-ui-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: hubble-ui
namespace: kube-system
{% endif %}

View File

@ -1,106 +0,0 @@
{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %}
---
# Source: cilium/templates/cilium-agent-service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-metrics
namespace: kube-system
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: "{{ cilium_hubble_scrape_port }}"
labels:
k8s-app: hubble
spec:
clusterIP: None
type: ClusterIP
ports:
- name: hubble-metrics
port: 9091
protocol: TCP
targetPort: hubble-metrics
selector:
k8s-app: cilium
---
# Source: cilium/templates/hubble-relay/metrics-service.yaml
# We use a separate service from hubble-relay which can be exposed externally
kind: Service
apiVersion: v1
metadata:
name: hubble-relay-metrics
namespace: kube-system
labels:
k8s-app: hubble-relay
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: "9966"
spec:
clusterIP: None
type: ClusterIP
selector:
k8s-app: hubble-relay
ports:
- name: metrics
port: 9966
protocol: TCP
targetPort: prometheus
{% endif %}
---
# Source: cilium/templates/hubble-relay-service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-relay
namespace: kube-system
labels:
k8s-app: hubble-relay
spec:
type: ClusterIP
selector:
k8s-app: hubble-relay
ports:
- protocol: TCP
{% if cilium_hubble_tls_generate -%}
port: 443
{% else -%}
port: 80
{% endif -%}
targetPort: 4245
---
{% if cilium_enable_hubble_ui %}
# Source: cilium/templates/hubble-ui-service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-ui
labels:
k8s-app: hubble-ui
namespace: kube-system
spec:
selector:
k8s-app: hubble-ui
ports:
- name: http
port: 80
targetPort: 8081
type: ClusterIP
---
{% endif %}
# Source: cilium/templates/hubble/peer-service.yaml
apiVersion: v1
kind: Service
metadata:
name: hubble-peer
namespace: kube-system
labels:
k8s-app: cilium
spec:
selector:
k8s-app: cilium
ports:
- name: peer-service
port: 443
protocol: TCP
targetPort: 4244
internalTrafficPolicy: Local

View File

@ -0,0 +1,164 @@
MTU: {{ cilium_mtu }}
debug:
enabled: {{ cilium_debug }}
image:
repository: {{ cilium_image_repo }}
tag: {{ cilium_image_tag }}
k8sServiceHost: "auto"
k8sServicePort: "auto"
ipv4:
enabled: {{ cilium_enable_ipv4 }}
ipv6:
enabled: {{ cilium_enable_ipv6 }}
l2announcements:
enabled: {{ cilium_l2announcements }}
healthPort: {{ cilium_agent_health_port }}
identityAllocationMode: {{ cilium_identity_allocation_mode }}
tunnelProtocol: {{ cilium_tunnel_mode }}
loadbalancer:
mode: {{ cilium_loadbalancer_mode }}
kubeProxyReplacement: {{ cilium_kube_proxy_replacement }}
{% if cilium_dns_proxy_enable_transparent_mode is defined %}
dnsProxy:
enableTransparentMode: {{ cilium_dns_proxy_enable_transparent_mode }}
{% endif %}
extraVolumes:
{{ cilium_agent_extra_volumes | to_nice_yaml(indent=2) | indent(2) }}
extraVolumeMounts:
{{ cilium_agent_extra_volume_mounts | to_nice_yaml(indent=2) | indent(2) }}
extraArgs:
{{ cilium_agent_extra_args | to_nice_yaml(indent=2) | indent(2) }}
bpf:
masquerade: {{ cilium_enable_bpf_masquerade }}
hostLegacyRouting: {{ cilium_enable_host_legacy_routing }}
monitorAggregation: {{ cilium_monitor_aggregation }}
preallocateMaps: {{ cilium_preallocate_bpf_maps }}
mapDynamicSizeRatio: {{ cilium_bpf_map_dynamic_size_ratio }}
cni:
exclusive: {{ cilium_cni_exclusive }}
logFile: {{ cilium_cni_log_file }}
autoDirectNodeRoutes: {{ cilium_auto_direct_node_routes }}
ipv4NativeRoutingCIDR: {{ cilium_native_routing_cidr }}
ipv6NativeRoutingCIDR: {{ cilium_native_routing_cidr_ipv6 }}
encryption:
enabled: {{ cilium_encryption_enabled }}
{% if cilium_encryption_enabled %}
type: {{ cilium_encryption_type }}
{% if cilium_encryption_type == 'wireguard' %}
nodeEncryption: {{ cilium_encryption_node_encryption }}
{% endif %}
{% endif %}
bandwidthManager:
enabled: {{ cilium_enable_bandwidth_manager }}
bbr: {{ cilium_enable_bandwidth_manager_bbr }}
ipMasqAgent:
enabled: {{ cilium_ip_masq_agent_enable }}
{% if cilium_ip_masq_agent_enable %}
config:
nonMasqueradeCIDRs: {{ cilium_non_masquerade_cidrs }}
masqLinkLocal: {{ cilium_masq_link_local }}
masqLinkLocalIPv6: {{ cilium_masq_link_local_ipv6 }}
# cilium_ip_masq_resync_interval
{% endif %}
hubble:
enabled: {{ cilium_enable_hubble }}
relay:
enabled: {{ cilium_enable_hubble }}
image:
repository: {{ cilium_hubble_relay_image_repo }}
tag: {{ cilium_hubble_relay_image_tag }}
ui:
enabled: {{ cilium_enable_hubble_ui }}
backend:
image:
repository: {{ cilium_hubble_ui_backend_image_repo }}
tag: {{ cilium_hubble_ui_backend_image_tag }}
frontend:
image:
repository: {{ cilium_hubble_ui_image_repo }}
tag: {{ cilium_hubble_ui_image_tag }}
metrics:
enabled: {{ cilium_hubble_metrics }}
export:
fileMaxBackups: {{ cilium_hubble_export_file_max_backups }}
fileMaxSizeMb: {{ cilium_hubble_export_file_max_size_mb }}
dynamic:
enabled: {{ cilium_hubble_export_dynamic_enabled }}
config:
content:
{{ cilium_hubble_export_dynamic_config_content | to_nice_yaml(indent=10) | indent(10) }}
gatewayAPI:
enabled: {{ cilium_gateway_api_enabled }}
ipam:
mode: {{ cilium_ipam_mode }}
operator:
clusterPoolIPv4PodCIDRList:
- {{ cilium_pool_cidr | default(kube_pods_subnet) }}
clusterPoolIPv4MaskSize: {{ cilium_pool_mask_size | default(kube_network_node_prefix) }}
clusterPoolIPv6PodCIDRList:
- {{ cilium_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}
clusterPoolIPv6MaskSize: {{ cilium_pool_mask_size_ipv6 | default(kube_network_node_prefix_ipv6) }}
cgroup:
autoMount:
enabled: {{ cilium_cgroup_auto_mount }}
hostRoot: {{ cilium_cgroup_host_root }}
operator:
image:
repository: {{ cilium_operator_image_repo }}
tag: {{ cilium_operator_image_tag }}
replicas: {{ cilium_operator_replicas }}
extraArgs:
{{ cilium_operator_extra_args | to_nice_yaml(indent=2) | indent(4) }}
extraVolumes:
{{ cilium_operator_extra_volumes | to_nice_yaml(indent=2) | indent(4) }}
extraVolumeMounts:
{{ cilium_operator_extra_volume_mounts | to_nice_yaml(indent=2) | indent(4) }}
tolerations:
{{ cilium_operator_tolerations | to_nice_yaml(indent=2) | indent(4) }}
cluster:
id: {{ cilium_cluster_id }}
name: {{ cilium_cluster_name }}
enableIPv4Masquerade: {{ cilium_enable_ipv4_masquerade }}
enableIPv6Masquerade: {{ cilium_enable_ipv6_masquerade }}
hostFirewall:
enabled: {{ cilium_enable_host_firewall }}
certgen:
image:
repositry: {{ cilium_hubble_certgen_image_repo }}
tag: {{ cilium_hubble_certgen_image_tag }}
envoy:
image:
repositry: {{ cilium_hubble_envoy_image_repo }}
tag: {{ cilium_hubble_envoy_image_tag }}

View File

@ -7,4 +7,6 @@ mode: ha
kube_network_plugin: cilium
enable_network_policy: true
cilium_kube_proxy_replacement: strict
cilium_kube_proxy_replacement: true
kube_owner: root

View File

@ -8,3 +8,5 @@ kube_network_plugin: cilium
# ntp settings
ntp_enabled: true
ntp_package: ntp
kube_owner: root

View File

@ -5,6 +5,8 @@ cloud_image: opensuse-leap-15-6
# Kubespray settings
kube_network_plugin: cilium
kube_owner: root
# Docker specific settings:
container_manager: docker
etcd_deployment_type: docker

View File

@ -6,7 +6,9 @@ vm_memory: 3072
# Kubespray settings
kube_network_plugin: cilium
cilium_kube_proxy_replacement: strict
cilium_kube_proxy_replacement: true
kube_owner: root
# Node Feature Discovery
node_feature_discovery_enabled: true

View File

@ -7,3 +7,5 @@ mode: separate
kube_network_plugin: cilium
enable_network_policy: true
auto_renew_certificates: true
kube_owner: root