Compare commits

...

7 Commits

Author SHA1 Message Date
Max Gautier
3aa0c0cc64 coredns: allow to customize service name (#12951) 2026-02-06 09:52:29 +05:30
chun
9bbef44e32 Bump: Prometheus Operator CRD to 0.88.1 (#12968)
Signed-off-by: hcc429 <dev.hcc29@gmail.com>
2026-02-06 08:36:30 +05:30
Srishti Jaiswal
03cfdbf2a9 add removed var validation to validate_inventory (#12942) 2026-02-05 15:34:31 +05:30
Jordan Liggitt
b5b599ecf8 Clean up unused nodes/proxy permission from node-feature-discovery-gc (#12955) 2026-02-05 15:30:34 +05:30
Max Gautier
4245ddcee8 Make etcd node removal idempotent (#12949) 2026-02-05 11:40:28 +05:30
Joshua N Haupt
422e7366ec Fix Gluster image_id and update openstack_blockstorage_volume_v3 (#12910)
This fixes the Terraform Gluster Compute image_id bug and updates the openstack_blockstorage_volume_v2 to
openstack_blockstorage_volume_v3.

Resolves:
[Bug] OpenStack Compute variable handling of image_id and image_name for Gluster nodes is broken

https://github.com/kubernetes-sigs/kubespray/issues/12902

Update openstack_blockstorage_volume_v2 to openstack_blockstorage_volume_v3

https://github.com/kubernetes-sigs/kubespray/issues/12901

Signed-off-by: Joshua Nathaniel Haupt <joshua@hauptj.com>
2026-02-04 11:08:26 +05:30
Tushar240503
bf69e67240 refactor/dynamic-role-loading-network (#12933)
Signed-off-by: Tushar Sharma <tusharkumargzb6@gmail.com>
2026-02-03 21:58:29 +05:30
9 changed files with 75 additions and 55 deletions

View File

@@ -1006,7 +1006,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
count = var.number_of_gfs_nodes_no_floating_ip
availability_zone = element(var.az_list, count.index)
image_name = var.gfs_root_volume_size_in_gb == 0 ? local.image_to_use_gfs : null
image_id = var.gfs_root_volume_size_in_gb == 0 ? local.image_to_use_gfs : null
flavor_id = var.flavor_gfs_node
key_pair = openstack_compute_keypair_v2.k8s.name
@@ -1078,7 +1078,7 @@ resource "openstack_networking_floatingip_associate_v2" "k8s_nodes" {
port_id = openstack_networking_port_v2.k8s_nodes_port[each.key].id
}
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
resource "openstack_blockstorage_volume_v3" "glusterfs_volume" {
name = "${var.cluster_name}-glusterfs_volume-${count.index + 1}"
count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0
description = "Non-ephemeral volume for GlusterFS"
@@ -1088,5 +1088,5 @@ resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0
instance_id = element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)
volume_id = element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)
volume_id = element(openstack_blockstorage_volume_v3.glusterfs_volume.*.id, count.index)
}

View File

@@ -11,6 +11,7 @@ dns_nodes_per_replica: 16
dns_cores_per_replica: 256
dns_prevent_single_point_failure: "{{ 'true' if dns_min_replicas | int > 1 else 'false' }}"
enable_coredns_reverse_dns_lookups: true
coredns_svc_name: "coredns"
coredns_ordinal_suffix: ""
# dns_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}]
coredns_affinity:

View File

@@ -2,7 +2,7 @@
apiVersion: v1
kind: Service
metadata:
name: coredns{{ coredns_ordinal_suffix }}
name: {{ coredns_svc_name }}{{ coredns_ordinal_suffix }}
namespace: kube-system
labels:
k8s-app: kube-dns{{ coredns_ordinal_suffix }}

View File

@@ -58,12 +58,6 @@ rules:
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/proxy
verbs:
- get
- apiGroups:
- topology.node.k8s.io
resources:

View File

@@ -1407,6 +1407,15 @@ gateway_api_experimental_crds_checksums:
1.0.0: sha256:6c601dced7872a940d76fa667ae126ba718cb4c6db970d0bab49128ecc1192a3
prometheus_operator_crds_checksums:
no_arch:
0.88.1: sha256:b827b8ec478e6b31cc1b85c1736570a3575953fe9f470fc29d0ffdb2803d94c4
0.88.0: sha256:11ee66653657f3abc1bc8c41e17aa950eadb66035edb7f84cd3a1cbe4c67b2a4
0.87.1: sha256:62490f7c1863539d61295f53784e27d70deec96a3b465832ba3cf96120e298b5
0.87.0: sha256:a5282133ffa634405b0414d2fdc07e6fe393124d1d5072073af363689dac6a62
0.86.2: sha256:7c9d455333ac5ea7837d5f0e4edd966698e44edd79108bafdd8508f2da503b5b
0.86.1: sha256:9a30912ba9970a2968d7a8bf030a9f6579a5e8b312961018b5fe4c1153fc5fce
0.86.0: sha256:0d2a590b288c79a98515e9fc4315451cfbde964c7977eb527696f7c2ebf47f58
0.85.0: sha256:30e1b1b034ebc750d50a77dc19841176d698d524edf677276a760f9e228e1208
0.84.1: sha256:f4a186ac58f354793e27a0b4b6f8baf5a31a9d10045e5085c23b0570dbfd30dd
0.84.0: sha256:8990f6837ccff4461df9abe19d31d532fef11386d85d861b392249fff2502255
argocd_install_checksums:
no_arch:

View File

@@ -1,44 +0,0 @@
---
dependencies:
- role: network_plugin/cni
when: kube_network_plugin != 'none'
- role: network_plugin/cilium
when: kube_network_plugin == 'cilium' or cilium_deploy_additionally
tags:
- cilium
- role: network_plugin/calico
when: kube_network_plugin == 'calico'
tags:
- calico
- role: network_plugin/flannel
when: kube_network_plugin == 'flannel'
tags:
- flannel
- role: network_plugin/macvlan
when: kube_network_plugin == 'macvlan'
tags:
- macvlan
- role: network_plugin/kube-ovn
when: kube_network_plugin == 'kube-ovn'
tags:
- kube-ovn
- role: network_plugin/kube-router
when: kube_network_plugin == 'kube-router'
tags:
- kube-router
- role: network_plugin/custom_cni
when: kube_network_plugin == 'custom_cni'
tags:
- custom_cni
- role: network_plugin/multus
when: kube_network_plugin_multus
tags:
- multus

View File

@@ -0,0 +1,47 @@
---
- name: Container Network Interface plugin
include_role:
name: network_plugin/cni
when: kube_network_plugin != 'none'
- name: Network plugin
include_role:
name: "network_plugin/{{ kube_network_plugin }}"
apply:
tags:
- "{{ kube_network_plugin }}"
- network
when:
- kube_network_plugin != 'none'
tags:
- cilium
- calico
- flannel
- macvlan
- kube-ovn
- kube-router
- custom_cni
- name: Cilium additional
include_role:
name: network_plugin/cilium
apply:
tags:
- cilium
- network
when:
- kube_network_plugin != 'cilium'
- cilium_deploy_additionally
tags:
- cilium
- name: Multus
include_role:
name: network_plugin/multus
apply:
tags:
- multus
- network
when: kube_network_plugin_multus
tags:
- multus

View File

@@ -21,6 +21,10 @@
- "{{ bin_dir }}/etcdctl"
- member
- remove
- "{{ '%x' | format(((etcd_members.stdout | from_json).members | selectattr('peerURLs.0', '==', etcd_peer_url))[0].ID) }}"
- "{{ '%x' | format(etcd_removed_nodes[0].ID) }}"
vars:
etcd_removed_nodes: "{{ (etcd_members.stdout | from_json).members | selectattr('peerURLs.0', '==', etcd_peer_url) }}"
# This should always have at most one member, since the etcd_peer_url should be unique in the etcd cluster
when: etcd_removed_nodes != []
register: etcd_removal_output
changed_when: "'Removed member' in etcd_removal_output.stdout"

View File

@@ -6,6 +6,15 @@
# -> nothing depending on facts or similar cluster state
# Checks depending on current state (of the nodes or the cluster)
# should be in roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
- name: Fail if removed variables are used
vars:
removed_vars: []
removed_vars_found: "{{ query('varnames', '^' + (removed_vars | join('|')) + '$') }}"
assert:
that: removed_vars_found | length == 0
fail_msg: "Removed variables present: {{ removed_vars_found | join(', ') }}"
run_once: true
- name: Stop if kube_control_plane group is empty
assert:
that: groups.get( 'kube_control_plane' )