mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-02 01:58:12 -03:30
Compare commits
8 Commits
component_
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
20da3bb1b0 | ||
|
|
4d4058ee8e | ||
|
|
f071fccc33 | ||
|
|
70daea701a | ||
|
|
3e42b84e94 | ||
|
|
868ff3cea9 | ||
|
|
0b69a18e35 | ||
|
|
e30076016c |
@@ -20,7 +20,7 @@ jobs:
|
|||||||
query get_release_branches($owner:String!, $name:String!) {
|
query get_release_branches($owner:String!, $name:String!) {
|
||||||
repository(owner:$owner, name:$name) {
|
repository(owner:$owner, name:$name) {
|
||||||
refs(refPrefix: "refs/heads/",
|
refs(refPrefix: "refs/heads/",
|
||||||
first: 2, # TODO increment once we have release branch with the new checksums format
|
first: 3,
|
||||||
query: "release-",
|
query: "release-",
|
||||||
orderBy: {
|
orderBy: {
|
||||||
field: ALPHABETICAL,
|
field: ALPHABETICAL,
|
||||||
|
|||||||
@@ -37,7 +37,6 @@ terraform_validate:
|
|||||||
- hetzner
|
- hetzner
|
||||||
- vsphere
|
- vsphere
|
||||||
- upcloud
|
- upcloud
|
||||||
- nifcloud
|
|
||||||
|
|
||||||
.terraform_apply:
|
.terraform_apply:
|
||||||
extends: .terraform_install
|
extends: .terraform_install
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# Use immutable image tags rather than mutable tags (like ubuntu:22.04)
|
# Use immutable image tags rather than mutable tags (like ubuntu:24.04)
|
||||||
FROM ubuntu:22.04@sha256:149d67e29f765f4db62aa52161009e99e389544e25a8f43c8c89d4a445a7ca37
|
FROM ubuntu:noble-20260113@sha256:cd1dba651b3080c3686ecf4e3c4220f026b521fb76978881737d24f200828b2b
|
||||||
|
|
||||||
# Some tools like yamllint need this
|
# Some tools like yamllint need this
|
||||||
# Pip needs this as well at the moment to install ansible
|
# Pip needs this as well at the moment to install ansible
|
||||||
@@ -29,7 +29,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|||||||
|
|
||||||
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||||
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
||||||
pip install --no-compile --no-cache-dir -r requirements.txt \
|
pip install --break-system-packages --no-compile --no-cache-dir -r requirements.txt \
|
||||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||||
|
|
||||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|||||||
5
contrib/terraform/nifcloud/.gitignore
vendored
5
contrib/terraform/nifcloud/.gitignore
vendored
@@ -1,5 +0,0 @@
|
|||||||
*.tfstate*
|
|
||||||
.terraform.lock.hcl
|
|
||||||
.terraform
|
|
||||||
|
|
||||||
sample-inventory/inventory.ini
|
|
||||||
@@ -1,138 +0,0 @@
|
|||||||
# Kubernetes on NIFCLOUD with Terraform
|
|
||||||
|
|
||||||
Provision a Kubernetes cluster on [NIFCLOUD](https://pfs.nifcloud.com/) using Terraform and Kubespray
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
The setup looks like following
|
|
||||||
|
|
||||||
```text
|
|
||||||
Kubernetes cluster
|
|
||||||
+----------------------------+
|
|
||||||
+---------------+ | +--------------------+ |
|
|
||||||
| | | | +--------------------+ |
|
|
||||||
| API server LB +---------> | | | |
|
|
||||||
| | | | | Control Plane/etcd | |
|
|
||||||
+---------------+ | | | node(s) | |
|
|
||||||
| +-+ | |
|
|
||||||
| +--------------------+ |
|
|
||||||
| ^ |
|
|
||||||
| | |
|
|
||||||
| v |
|
|
||||||
| +--------------------+ |
|
|
||||||
| | +--------------------+ |
|
|
||||||
| | | | |
|
|
||||||
| | | Worker | |
|
|
||||||
| | | node(s) | |
|
|
||||||
| +-+ | |
|
|
||||||
| +--------------------+ |
|
|
||||||
+----------------------------+
|
|
||||||
```
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
* Terraform 1.3.7
|
|
||||||
|
|
||||||
## Quickstart
|
|
||||||
|
|
||||||
### Export Variables
|
|
||||||
|
|
||||||
* Your NIFCLOUD credentials:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export NIFCLOUD_ACCESS_KEY_ID=<YOUR ACCESS KEY>
|
|
||||||
export NIFCLOUD_SECRET_ACCESS_KEY=<YOUR SECRET ACCESS KEY>
|
|
||||||
```
|
|
||||||
|
|
||||||
* The SSH KEY used to connect to the instance:
|
|
||||||
* FYI: [Cloud Help(SSH Key)](https://pfs.nifcloud.com/help/ssh.htm)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export TF_VAR_SSHKEY_NAME=<YOUR SSHKEY NAME>
|
|
||||||
```
|
|
||||||
|
|
||||||
* The IP address to connect to bastion server:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export TF_VAR_working_instance_ip=$(curl ifconfig.me)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Create The Infrastructure
|
|
||||||
|
|
||||||
* Run terraform:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
terraform init
|
|
||||||
terraform apply -var-file ./sample-inventory/cluster.tfvars
|
|
||||||
```
|
|
||||||
|
|
||||||
### Setup The Kubernetes
|
|
||||||
|
|
||||||
* Generate cluster configuration file:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./generate-inventory.sh > sample-inventory/inventory.ini
|
|
||||||
```
|
|
||||||
|
|
||||||
* Export Variables:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
BASTION_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.bastion_info | to_entries[].value.public_ip')
|
|
||||||
API_LB_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_lb')
|
|
||||||
CP01_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[0].value.private_ip')
|
|
||||||
export ANSIBLE_SSH_ARGS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand=\"ssh root@${BASTION_IP} -W %h:%p\""
|
|
||||||
```
|
|
||||||
|
|
||||||
* Set ssh-agent"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
eval `ssh-agent`
|
|
||||||
ssh-add <THE PATH TO YOUR SSH KEY>
|
|
||||||
```
|
|
||||||
|
|
||||||
* Run cluster.yml playbook:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd ./../../../
|
|
||||||
ansible-playbook -i contrib/terraform/nifcloud/inventory/inventory.ini cluster.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
### Connecting to Kubernetes
|
|
||||||
|
|
||||||
* [Install kubectl](https://kubernetes.io/docs/tasks/tools/) on the localhost
|
|
||||||
* Fetching kubeconfig file:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
mkdir -p ~/.kube
|
|
||||||
scp -o ProxyCommand="ssh root@${BASTION_IP} -W %h:%p" root@${CP01_IP}:/etc/kubernetes/admin.conf ~/.kube/config
|
|
||||||
```
|
|
||||||
|
|
||||||
* Rewrite /etc/hosts
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo echo "${API_LB_IP} lb-apiserver.kubernetes.local" >> /etc/hosts
|
|
||||||
```
|
|
||||||
|
|
||||||
* Run kubectl
|
|
||||||
|
|
||||||
```bash
|
|
||||||
kubectl get node
|
|
||||||
```
|
|
||||||
|
|
||||||
## Variables
|
|
||||||
|
|
||||||
* `region`: Region where to run the cluster
|
|
||||||
* `az`: Availability zone where to run the cluster
|
|
||||||
* `private_ip_bn`: Private ip address of bastion server
|
|
||||||
* `private_network_cidr`: Subnet of private network
|
|
||||||
* `instances_cp`: Machine to provision as Control Plane. Key of this object will be used as part of the machine' name
|
|
||||||
* `private_ip`: private ip address of machine
|
|
||||||
* `instances_wk`: Machine to provision as Worker Node. Key of this object will be used as part of the machine' name
|
|
||||||
* `private_ip`: private ip address of machine
|
|
||||||
* `instance_key_name`: The key name of the Key Pair to use for the instance
|
|
||||||
* `instance_type_bn`: The instance type of bastion server
|
|
||||||
* `instance_type_wk`: The instance type of worker node
|
|
||||||
* `instance_type_cp`: The instance type of control plane
|
|
||||||
* `image_name`: OS image used for the instance
|
|
||||||
* `working_instance_ip`: The IP address to connect to bastion server
|
|
||||||
* `accounting_type`: Accounting type. (1: monthly, 2: pay per use)
|
|
||||||
@@ -1,64 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
#
|
|
||||||
# Generates a inventory file based on the terraform output.
|
|
||||||
# After provisioning a cluster, simply run this command and supply the terraform state file
|
|
||||||
# Default state file is terraform.tfstate
|
|
||||||
#
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
TF_OUT=$(terraform output -json)
|
|
||||||
|
|
||||||
CONTROL_PLANES=$(jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[]' <(echo "${TF_OUT}"))
|
|
||||||
WORKERS=$(jq -r '.kubernetes_cluster.value.worker_info | to_entries[]' <(echo "${TF_OUT}"))
|
|
||||||
mapfile -t CONTROL_PLANE_NAMES < <(jq -r '.key' <(echo "${CONTROL_PLANES}"))
|
|
||||||
mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}"))
|
|
||||||
|
|
||||||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
|
||||||
|
|
||||||
echo "[all]"
|
|
||||||
# Generate control plane hosts
|
|
||||||
i=1
|
|
||||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
|
||||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${CONTROL_PLANES}"))
|
|
||||||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip} etcd_member_name=etcd${i}"
|
|
||||||
i=$(( i + 1 ))
|
|
||||||
done
|
|
||||||
|
|
||||||
# Generate worker hosts
|
|
||||||
for name in "${WORKER_NAMES[@]}"; do
|
|
||||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}"))
|
|
||||||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip}"
|
|
||||||
done
|
|
||||||
|
|
||||||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "[all:vars]"
|
|
||||||
echo "upstream_dns_servers=['8.8.8.8','8.8.4.4']"
|
|
||||||
echo "loadbalancer_apiserver={'address':'${API_LB}','port':'6443'}"
|
|
||||||
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "[kube_control_plane]"
|
|
||||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
|
||||||
echo "${name}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "[etcd]"
|
|
||||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
|
||||||
echo "${name}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "[kube_node]"
|
|
||||||
for name in "${WORKER_NAMES[@]}"; do
|
|
||||||
echo "${name}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "[k8s_cluster:children]"
|
|
||||||
echo "kube_control_plane"
|
|
||||||
echo "kube_node"
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
provider "nifcloud" {
|
|
||||||
region = var.region
|
|
||||||
}
|
|
||||||
|
|
||||||
module "kubernetes_cluster" {
|
|
||||||
source = "./modules/kubernetes-cluster"
|
|
||||||
|
|
||||||
availability_zone = var.az
|
|
||||||
prefix = "dev"
|
|
||||||
|
|
||||||
private_network_cidr = var.private_network_cidr
|
|
||||||
|
|
||||||
instance_key_name = var.instance_key_name
|
|
||||||
instances_cp = var.instances_cp
|
|
||||||
instances_wk = var.instances_wk
|
|
||||||
image_name = var.image_name
|
|
||||||
|
|
||||||
instance_type_bn = var.instance_type_bn
|
|
||||||
instance_type_cp = var.instance_type_cp
|
|
||||||
instance_type_wk = var.instance_type_wk
|
|
||||||
|
|
||||||
private_ip_bn = var.private_ip_bn
|
|
||||||
|
|
||||||
additional_lb_filter = [var.working_instance_ip]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
|
||||||
security_group_names = [
|
|
||||||
module.kubernetes_cluster.security_group_name.bastion
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = 22
|
|
||||||
to_port = 22
|
|
||||||
protocol = "TCP"
|
|
||||||
cidr_ip = var.working_instance_ip
|
|
||||||
}
|
|
||||||
@@ -1,301 +0,0 @@
|
|||||||
#################################################
|
|
||||||
##
|
|
||||||
## Local variables
|
|
||||||
##
|
|
||||||
locals {
|
|
||||||
# e.g. east-11 is 11
|
|
||||||
az_num = reverse(split("-", var.availability_zone))[0]
|
|
||||||
# e.g. east-11 is e11
|
|
||||||
az_short_name = "${substr(reverse(split("-", var.availability_zone))[1], 0, 1)}${local.az_num}"
|
|
||||||
|
|
||||||
# Port used by the protocol
|
|
||||||
port_ssh = 22
|
|
||||||
port_kubectl = 6443
|
|
||||||
port_kubelet = 10250
|
|
||||||
|
|
||||||
# calico: https://docs.tigera.io/calico/latest/getting-started/kubernetes/requirements#network-requirements
|
|
||||||
port_bgp = 179
|
|
||||||
port_vxlan = 4789
|
|
||||||
port_etcd = 2379
|
|
||||||
}
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## General
|
|
||||||
##
|
|
||||||
|
|
||||||
# data
|
|
||||||
data "nifcloud_image" "this" {
|
|
||||||
image_name = var.image_name
|
|
||||||
}
|
|
||||||
|
|
||||||
# private lan
|
|
||||||
resource "nifcloud_private_lan" "this" {
|
|
||||||
private_lan_name = "${var.prefix}lan"
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
cidr_block = var.private_network_cidr
|
|
||||||
accounting_type = var.accounting_type
|
|
||||||
}
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Bastion
|
|
||||||
##
|
|
||||||
resource "nifcloud_security_group" "bn" {
|
|
||||||
group_name = "${var.prefix}bn"
|
|
||||||
description = "${var.prefix} bastion"
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_instance" "bn" {
|
|
||||||
|
|
||||||
instance_id = "${local.az_short_name}${var.prefix}bn01"
|
|
||||||
security_group = nifcloud_security_group.bn.group_name
|
|
||||||
instance_type = var.instance_type_bn
|
|
||||||
|
|
||||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
|
||||||
private_ip_address = var.private_ip_bn
|
|
||||||
ssh_port = local.port_ssh
|
|
||||||
hostname = "${local.az_short_name}${var.prefix}bn01"
|
|
||||||
})
|
|
||||||
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
accounting_type = var.accounting_type
|
|
||||||
image_id = data.nifcloud_image.this.image_id
|
|
||||||
key_name = var.instance_key_name
|
|
||||||
|
|
||||||
network_interface {
|
|
||||||
network_id = "net-COMMON_GLOBAL"
|
|
||||||
}
|
|
||||||
network_interface {
|
|
||||||
network_id = nifcloud_private_lan.this.network_id
|
|
||||||
ip_address = "static"
|
|
||||||
}
|
|
||||||
|
|
||||||
# The image_id changes when the OS image type is demoted from standard to public.
|
|
||||||
lifecycle {
|
|
||||||
ignore_changes = [
|
|
||||||
image_id,
|
|
||||||
user_data,
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Control Plane
|
|
||||||
##
|
|
||||||
resource "nifcloud_security_group" "cp" {
|
|
||||||
group_name = "${var.prefix}cp"
|
|
||||||
description = "${var.prefix} control plane"
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_instance" "cp" {
|
|
||||||
for_each = var.instances_cp
|
|
||||||
|
|
||||||
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
|
||||||
security_group = nifcloud_security_group.cp.group_name
|
|
||||||
instance_type = var.instance_type_cp
|
|
||||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
|
||||||
private_ip_address = each.value.private_ip
|
|
||||||
ssh_port = local.port_ssh
|
|
||||||
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
|
||||||
})
|
|
||||||
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
accounting_type = var.accounting_type
|
|
||||||
image_id = data.nifcloud_image.this.image_id
|
|
||||||
key_name = var.instance_key_name
|
|
||||||
|
|
||||||
network_interface {
|
|
||||||
network_id = "net-COMMON_GLOBAL"
|
|
||||||
}
|
|
||||||
network_interface {
|
|
||||||
network_id = nifcloud_private_lan.this.network_id
|
|
||||||
ip_address = "static"
|
|
||||||
}
|
|
||||||
|
|
||||||
# The image_id changes when the OS image type is demoted from standard to public.
|
|
||||||
lifecycle {
|
|
||||||
ignore_changes = [
|
|
||||||
image_id,
|
|
||||||
user_data,
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_load_balancer" "this" {
|
|
||||||
load_balancer_name = "${local.az_short_name}${var.prefix}cp"
|
|
||||||
accounting_type = var.accounting_type
|
|
||||||
balancing_type = 1 // Round-Robin
|
|
||||||
load_balancer_port = local.port_kubectl
|
|
||||||
instance_port = local.port_kubectl
|
|
||||||
instances = [for v in nifcloud_instance.cp : v.instance_id]
|
|
||||||
filter = concat(
|
|
||||||
[for k, v in nifcloud_instance.cp : v.public_ip],
|
|
||||||
[for k, v in nifcloud_instance.wk : v.public_ip],
|
|
||||||
var.additional_lb_filter,
|
|
||||||
)
|
|
||||||
filter_type = 1 // Allow
|
|
||||||
}
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Worker
|
|
||||||
##
|
|
||||||
resource "nifcloud_security_group" "wk" {
|
|
||||||
group_name = "${var.prefix}wk"
|
|
||||||
description = "${var.prefix} worker"
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_instance" "wk" {
|
|
||||||
for_each = var.instances_wk
|
|
||||||
|
|
||||||
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
|
||||||
security_group = nifcloud_security_group.wk.group_name
|
|
||||||
instance_type = var.instance_type_wk
|
|
||||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
|
||||||
private_ip_address = each.value.private_ip
|
|
||||||
ssh_port = local.port_ssh
|
|
||||||
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
|
||||||
})
|
|
||||||
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
accounting_type = var.accounting_type
|
|
||||||
image_id = data.nifcloud_image.this.image_id
|
|
||||||
key_name = var.instance_key_name
|
|
||||||
|
|
||||||
network_interface {
|
|
||||||
network_id = "net-COMMON_GLOBAL"
|
|
||||||
}
|
|
||||||
network_interface {
|
|
||||||
network_id = nifcloud_private_lan.this.network_id
|
|
||||||
ip_address = "static"
|
|
||||||
}
|
|
||||||
|
|
||||||
# The image_id changes when the OS image type is demoted from standard to public.
|
|
||||||
lifecycle {
|
|
||||||
ignore_changes = [
|
|
||||||
image_id,
|
|
||||||
user_data,
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Security Group Rule: Kubernetes
|
|
||||||
##
|
|
||||||
|
|
||||||
# ssh
|
|
||||||
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.wk.group_name,
|
|
||||||
nifcloud_security_group.cp.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_ssh
|
|
||||||
to_port = local.port_ssh
|
|
||||||
protocol = "TCP"
|
|
||||||
source_security_group_name = nifcloud_security_group.bn.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
# kubectl
|
|
||||||
resource "nifcloud_security_group_rule" "kubectl_from_worker" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.cp.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_kubectl
|
|
||||||
to_port = local.port_kubectl
|
|
||||||
protocol = "TCP"
|
|
||||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
# kubelet
|
|
||||||
resource "nifcloud_security_group_rule" "kubelet_from_worker" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.cp.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_kubelet
|
|
||||||
to_port = local.port_kubelet
|
|
||||||
protocol = "TCP"
|
|
||||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_security_group_rule" "kubelet_from_control_plane" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.wk.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_kubelet
|
|
||||||
to_port = local.port_kubelet
|
|
||||||
protocol = "TCP"
|
|
||||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Security Group Rule: calico
|
|
||||||
##
|
|
||||||
|
|
||||||
# vslan
|
|
||||||
resource "nifcloud_security_group_rule" "vxlan_from_control_plane" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.wk.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_vxlan
|
|
||||||
to_port = local.port_vxlan
|
|
||||||
protocol = "UDP"
|
|
||||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_security_group_rule" "vxlan_from_worker" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.cp.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_vxlan
|
|
||||||
to_port = local.port_vxlan
|
|
||||||
protocol = "UDP"
|
|
||||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
# bgp
|
|
||||||
resource "nifcloud_security_group_rule" "bgp_from_control_plane" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.wk.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_bgp
|
|
||||||
to_port = local.port_bgp
|
|
||||||
protocol = "TCP"
|
|
||||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_security_group_rule" "bgp_from_worker" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.cp.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_bgp
|
|
||||||
to_port = local.port_bgp
|
|
||||||
protocol = "TCP"
|
|
||||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
# etcd
|
|
||||||
resource "nifcloud_security_group_rule" "etcd_from_worker" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.cp.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_etcd
|
|
||||||
to_port = local.port_etcd
|
|
||||||
protocol = "TCP"
|
|
||||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
|
||||||
}
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
output "control_plane_lb" {
|
|
||||||
description = "The DNS name of LB for control plane"
|
|
||||||
value = nifcloud_load_balancer.this.dns_name
|
|
||||||
}
|
|
||||||
|
|
||||||
output "security_group_name" {
|
|
||||||
description = "The security group used in the cluster"
|
|
||||||
value = {
|
|
||||||
bastion = nifcloud_security_group.bn.group_name,
|
|
||||||
control_plane = nifcloud_security_group.cp.group_name,
|
|
||||||
worker = nifcloud_security_group.wk.group_name,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
output "private_network_id" {
|
|
||||||
description = "The private network used in the cluster"
|
|
||||||
value = nifcloud_private_lan.this.id
|
|
||||||
}
|
|
||||||
|
|
||||||
output "bastion_info" {
|
|
||||||
description = "The basion information in cluster"
|
|
||||||
value = { (nifcloud_instance.bn.instance_id) : {
|
|
||||||
instance_id = nifcloud_instance.bn.instance_id,
|
|
||||||
unique_id = nifcloud_instance.bn.unique_id,
|
|
||||||
private_ip = nifcloud_instance.bn.private_ip,
|
|
||||||
public_ip = nifcloud_instance.bn.public_ip,
|
|
||||||
} }
|
|
||||||
}
|
|
||||||
|
|
||||||
output "worker_info" {
|
|
||||||
description = "The worker information in cluster"
|
|
||||||
value = { for v in nifcloud_instance.wk : v.instance_id => {
|
|
||||||
instance_id = v.instance_id,
|
|
||||||
unique_id = v.unique_id,
|
|
||||||
private_ip = v.private_ip,
|
|
||||||
public_ip = v.public_ip,
|
|
||||||
} }
|
|
||||||
}
|
|
||||||
|
|
||||||
output "control_plane_info" {
|
|
||||||
description = "The control plane information in cluster"
|
|
||||||
value = { for v in nifcloud_instance.cp : v.instance_id => {
|
|
||||||
instance_id = v.instance_id,
|
|
||||||
unique_id = v.unique_id,
|
|
||||||
private_ip = v.private_ip,
|
|
||||||
public_ip = v.public_ip,
|
|
||||||
} }
|
|
||||||
}
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## IP Address
|
|
||||||
##
|
|
||||||
configure_private_ip_address () {
|
|
||||||
cat << EOS > /etc/netplan/01-netcfg.yaml
|
|
||||||
network:
|
|
||||||
version: 2
|
|
||||||
renderer: networkd
|
|
||||||
ethernets:
|
|
||||||
ens192:
|
|
||||||
dhcp4: yes
|
|
||||||
dhcp6: yes
|
|
||||||
dhcp-identifier: mac
|
|
||||||
ens224:
|
|
||||||
dhcp4: no
|
|
||||||
dhcp6: no
|
|
||||||
addresses: [${private_ip_address}]
|
|
||||||
EOS
|
|
||||||
netplan apply
|
|
||||||
}
|
|
||||||
configure_private_ip_address
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## SSH
|
|
||||||
##
|
|
||||||
configure_ssh_port () {
|
|
||||||
sed -i 's/^#*Port [0-9]*/Port ${ssh_port}/' /etc/ssh/sshd_config
|
|
||||||
}
|
|
||||||
configure_ssh_port
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Hostname
|
|
||||||
##
|
|
||||||
hostnamectl set-hostname ${hostname}
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Disable swap files genereated by systemd-gpt-auto-generator
|
|
||||||
##
|
|
||||||
systemctl mask "dev-sda3.swap"
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
terraform {
|
|
||||||
required_version = ">=1.3.7"
|
|
||||||
required_providers {
|
|
||||||
nifcloud = {
|
|
||||||
source = "nifcloud/nifcloud"
|
|
||||||
version = ">= 1.8.0, < 2.0.0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,81 +0,0 @@
|
|||||||
variable "availability_zone" {
|
|
||||||
description = "The availability zone"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "prefix" {
|
|
||||||
description = "The prefix for the entire cluster"
|
|
||||||
type = string
|
|
||||||
validation {
|
|
||||||
condition = length(var.prefix) <= 5
|
|
||||||
error_message = "Must be a less than 5 character long."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "private_network_cidr" {
|
|
||||||
description = "The subnet of private network"
|
|
||||||
type = string
|
|
||||||
validation {
|
|
||||||
condition = can(cidrnetmask(var.private_network_cidr))
|
|
||||||
error_message = "Must be a valid IPv4 CIDR block address."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "private_ip_bn" {
|
|
||||||
description = "Private IP of bastion server"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instances_cp" {
|
|
||||||
type = map(object({
|
|
||||||
private_ip = string
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instances_wk" {
|
|
||||||
type = map(object({
|
|
||||||
private_ip = string
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_key_name" {
|
|
||||||
description = "The key name of the Key Pair to use for the instance"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_type_bn" {
|
|
||||||
description = "The instance type of bastion server"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_type_wk" {
|
|
||||||
description = "The instance type of worker"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_type_cp" {
|
|
||||||
description = "The instance type of control plane"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "image_name" {
|
|
||||||
description = "The name of image"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "additional_lb_filter" {
|
|
||||||
description = "Additional LB filter"
|
|
||||||
type = list(string)
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "accounting_type" {
|
|
||||||
type = string
|
|
||||||
default = "1"
|
|
||||||
validation {
|
|
||||||
condition = anytrue([
|
|
||||||
var.accounting_type == "1", // Monthly
|
|
||||||
var.accounting_type == "2", // Pay per use
|
|
||||||
])
|
|
||||||
error_message = "Must be a 1 or 2."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
output "kubernetes_cluster" {
|
|
||||||
value = module.kubernetes_cluster
|
|
||||||
}
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
region = "jp-west-1"
|
|
||||||
az = "west-11"
|
|
||||||
|
|
||||||
instance_key_name = "deployerkey"
|
|
||||||
|
|
||||||
instance_type_bn = "e-medium"
|
|
||||||
instance_type_cp = "e-medium"
|
|
||||||
instance_type_wk = "e-medium"
|
|
||||||
|
|
||||||
private_network_cidr = "192.168.30.0/24"
|
|
||||||
instances_cp = {
|
|
||||||
"cp01" : { private_ip : "192.168.30.11/24" }
|
|
||||||
"cp02" : { private_ip : "192.168.30.12/24" }
|
|
||||||
"cp03" : { private_ip : "192.168.30.13/24" }
|
|
||||||
}
|
|
||||||
instances_wk = {
|
|
||||||
"wk01" : { private_ip : "192.168.30.21/24" }
|
|
||||||
"wk02" : { private_ip : "192.168.30.22/24" }
|
|
||||||
}
|
|
||||||
private_ip_bn = "192.168.30.10/24"
|
|
||||||
|
|
||||||
image_name = "Ubuntu Server 22.04 LTS"
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
../../../../inventory/sample/group_vars
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
terraform {
|
|
||||||
required_version = ">=1.3.7"
|
|
||||||
required_providers {
|
|
||||||
nifcloud = {
|
|
||||||
source = "nifcloud/nifcloud"
|
|
||||||
version = "1.8.0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
variable "region" {
|
|
||||||
description = "The region"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "az" {
|
|
||||||
description = "The availability zone"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "private_ip_bn" {
|
|
||||||
description = "Private IP of bastion server"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "private_network_cidr" {
|
|
||||||
description = "The subnet of private network"
|
|
||||||
type = string
|
|
||||||
validation {
|
|
||||||
condition = can(cidrnetmask(var.private_network_cidr))
|
|
||||||
error_message = "Must be a valid IPv4 CIDR block address."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instances_cp" {
|
|
||||||
type = map(object({
|
|
||||||
private_ip = string
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instances_wk" {
|
|
||||||
type = map(object({
|
|
||||||
private_ip = string
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_key_name" {
|
|
||||||
description = "The key name of the Key Pair to use for the instance"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_type_bn" {
|
|
||||||
description = "The instance type of bastion server"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_type_wk" {
|
|
||||||
description = "The instance type of worker"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_type_cp" {
|
|
||||||
description = "The instance type of control plane"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "image_name" {
|
|
||||||
description = "The name of image"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "working_instance_ip" {
|
|
||||||
description = "The IP address to connect to bastion server."
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "accounting_type" {
|
|
||||||
type = string
|
|
||||||
default = "2"
|
|
||||||
validation {
|
|
||||||
condition = anytrue([
|
|
||||||
var.accounting_type == "1", // Monthly
|
|
||||||
var.accounting_type == "2", // Pay per use
|
|
||||||
])
|
|
||||||
error_message = "Must be a 1 or 2."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
namespace: kubernetes_sigs
|
namespace: kubernetes_sigs
|
||||||
description: Deploy a production ready Kubernetes cluster
|
description: Deploy a production ready Kubernetes cluster
|
||||||
name: kubespray
|
name: kubespray
|
||||||
version: 2.30.0
|
version: 2.31.0
|
||||||
readme: README.md
|
readme: README.md
|
||||||
authors:
|
authors:
|
||||||
- The Kubespray maintainers (https://kubernetes.slack.com/channels/kubespray)
|
- The Kubespray maintainers (https://kubernetes.slack.com/channels/kubespray)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Use immutable image tags rather than mutable tags (like ubuntu:22.04)
|
# Use immutable image tags rather than mutable tags (like ubuntu:24.04)
|
||||||
FROM ubuntu:jammy-20230308
|
FROM ubuntu:noble-20260113@sha256:cd1dba651b3080c3686ecf4e3c4220f026b521fb76978881737d24f200828b2b
|
||||||
# Some tools like yamllint need this
|
# Some tools like yamllint need this
|
||||||
# Pip needs this as well at the moment to install ansible
|
# Pip needs this as well at the moment to install ansible
|
||||||
# (and potentially other packages)
|
# (and potentially other packages)
|
||||||
@@ -44,9 +44,8 @@ ADD ./requirements.txt /kubespray/requirements.txt
|
|||||||
ADD ./tests/requirements.txt /kubespray/tests/requirements.txt
|
ADD ./tests/requirements.txt /kubespray/tests/requirements.txt
|
||||||
|
|
||||||
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
||||||
&& pip install --no-compile --no-cache-dir pip -U \
|
&& pip install --break-system-packages --ignore-installed --no-compile --no-cache-dir pip -U \
|
||||||
&& pip install --no-compile --no-cache-dir -r tests/requirements.txt \
|
&& pip install --break-system-packages --no-compile --no-cache-dir -r tests/requirements.txt \
|
||||||
&& pip install --no-compile --no-cache-dir -r requirements.txt \
|
|
||||||
&& curl -L https://dl.k8s.io/release/v1.34.3/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
&& curl -L https://dl.k8s.io/release/v1.34.3/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||||
&& echo $(curl -L https://dl.k8s.io/release/v1.34.3/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
&& echo $(curl -L https://dl.k8s.io/release/v1.34.3/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||||
&& chmod a+x /usr/local/bin/kubectl \
|
&& chmod a+x /usr/local/bin/kubectl \
|
||||||
@@ -56,5 +55,5 @@ RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
|||||||
&& rm vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \
|
&& rm vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \
|
||||||
&& vagrant plugin install vagrant-libvirt \
|
&& vagrant plugin install vagrant-libvirt \
|
||||||
# Install Kubernetes collections
|
# Install Kubernetes collections
|
||||||
&& pip install --no-compile --no-cache-dir kubernetes \
|
&& pip install --break-system-packages --no-compile --no-cache-dir kubernetes \
|
||||||
&& ansible-galaxy collection install kubernetes.core
|
&& ansible-galaxy collection install kubernetes.core
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
ansible==10.7.0
|
ansible==10.7.0
|
||||||
# Needed for community.crypto module
|
# Needed for community.crypto module
|
||||||
cryptography==46.0.3
|
cryptography==46.0.4
|
||||||
# Needed for jinja2 json_query templating
|
# Needed for jinja2 json_query templating
|
||||||
jmespath==1.1.0
|
jmespath==1.1.0
|
||||||
# Needed for ansible.utils.ipaddr
|
# Needed for ansible.utils.ipaddr
|
||||||
|
|||||||
@@ -8,3 +8,4 @@ local_path_provisioner_is_default_storageclass: "true"
|
|||||||
local_path_provisioner_debug: false
|
local_path_provisioner_debug: false
|
||||||
local_path_provisioner_helper_image_repo: "busybox"
|
local_path_provisioner_helper_image_repo: "busybox"
|
||||||
local_path_provisioner_helper_image_tag: "latest"
|
local_path_provisioner_helper_image_tag: "latest"
|
||||||
|
local_path_provisioner_resources: {}
|
||||||
|
|||||||
@@ -35,6 +35,10 @@ spec:
|
|||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: metadata.namespace
|
fieldPath: metadata.namespace
|
||||||
|
{% if local_path_provisioner_resources %}
|
||||||
|
resources:
|
||||||
|
{{ local_path_provisioner_resources | to_nice_yaml | indent(10) | trim }}
|
||||||
|
{% endif %}
|
||||||
volumes:
|
volumes:
|
||||||
- name: config-volume
|
- name: config-volume
|
||||||
configMap:
|
configMap:
|
||||||
|
|||||||
@@ -20,7 +20,7 @@
|
|||||||
when:
|
when:
|
||||||
- not ignore_assert_errors
|
- not ignore_assert_errors
|
||||||
|
|
||||||
- name: Warn if `kube_network_plugin` is `none
|
- name: Warn if `kube_network_plugin` is `none`
|
||||||
debug:
|
debug:
|
||||||
msg: |
|
msg: |
|
||||||
"WARNING! => `kube_network_plugin` is set to `none`. The network configuration will be skipped.
|
"WARNING! => `kube_network_plugin` is set to `none`. The network configuration will be skipped.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# Use immutable image tags rather than mutable tags (like ubuntu:22.04)
|
# Use immutable image tags rather than mutable tags (like ubuntu:24.04)
|
||||||
FROM ubuntu:22.04@sha256:149d67e29f765f4db62aa52161009e99e389544e25a8f43c8c89d4a445a7ca37
|
FROM ubuntu:noble-20260113@sha256:cd1dba651b3080c3686ecf4e3c4220f026b521fb76978881737d24f200828b2b
|
||||||
|
|
||||||
# Some tools like yamllint need this
|
# Some tools like yamllint need this
|
||||||
# Pip needs this as well at the moment to install ansible
|
# Pip needs this as well at the moment to install ansible
|
||||||
@@ -29,7 +29,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|||||||
|
|
||||||
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||||
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
||||||
pip install --no-compile --no-cache-dir -r requirements.txt \
|
pip install --break-system-packages --no-compile --no-cache-dir -r requirements.txt \
|
||||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||||
|
|
||||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|||||||
@@ -116,4 +116,9 @@ infos = {
|
|||||||
"graphql_id": "R_kgDODQ6RZw",
|
"graphql_id": "R_kgDODQ6RZw",
|
||||||
"binary": True,
|
"binary": True,
|
||||||
},
|
},
|
||||||
|
"prometheus_operator_crds": {
|
||||||
|
"url": "https://github.com/prometheus-operator/prometheus-operator/releases/download/v{version}/stripped-down-crds.yaml",
|
||||||
|
"graphql_id": "R_kgDOBBxPpw",
|
||||||
|
"binary": True,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Use immutable image tags rather than mutable tags (like ubuntu:22.04)
|
# Use immutable image tags rather than mutable tags (like ubuntu:24.04)
|
||||||
FROM ubuntu:jammy-20230308
|
FROM ubuntu:noble-20260113@sha256:cd1dba651b3080c3686ecf4e3c4220f026b521fb76978881737d24f200828b2b
|
||||||
# Some tools like yamllint need this
|
# Some tools like yamllint need this
|
||||||
# Pip needs this as well at the moment to install ansible
|
# Pip needs this as well at the moment to install ansible
|
||||||
# (and potentially other packages)
|
# (and potentially other packages)
|
||||||
@@ -44,9 +44,8 @@ ADD ./requirements.txt /kubespray/requirements.txt
|
|||||||
ADD ./tests/requirements.txt /kubespray/tests/requirements.txt
|
ADD ./tests/requirements.txt /kubespray/tests/requirements.txt
|
||||||
|
|
||||||
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
||||||
&& pip install --no-compile --no-cache-dir pip -U \
|
&& pip install --break-system-packages --ignore-installed --no-compile --no-cache-dir pip -U \
|
||||||
&& pip install --no-compile --no-cache-dir -r tests/requirements.txt \
|
&& pip install --break-system-packages --no-compile --no-cache-dir -r tests/requirements.txt \
|
||||||
&& pip install --no-compile --no-cache-dir -r requirements.txt \
|
|
||||||
&& curl -L https://dl.k8s.io/release/v{{ kube_version }}/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
&& curl -L https://dl.k8s.io/release/v{{ kube_version }}/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||||
&& echo $(curl -L https://dl.k8s.io/release/v{{ kube_version }}/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
&& echo $(curl -L https://dl.k8s.io/release/v{{ kube_version }}/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||||
&& chmod a+x /usr/local/bin/kubectl \
|
&& chmod a+x /usr/local/bin/kubectl \
|
||||||
@@ -56,5 +55,5 @@ RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
|||||||
&& rm vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \
|
&& rm vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \
|
||||||
&& vagrant plugin install vagrant-libvirt \
|
&& vagrant plugin install vagrant-libvirt \
|
||||||
# Install Kubernetes collections
|
# Install Kubernetes collections
|
||||||
&& pip install --no-compile --no-cache-dir kubernetes \
|
&& pip install --break-system-packages --no-compile --no-cache-dir kubernetes \
|
||||||
&& ansible-galaxy collection install kubernetes.core
|
&& ansible-galaxy collection install kubernetes.core
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ if [ "${UPGRADE_TEST}" != "false" ]; then
|
|||||||
# Checkout the current tests/ directory ; even when testing old version,
|
# Checkout the current tests/ directory ; even when testing old version,
|
||||||
# we want the up-to-date test setup/provisionning
|
# we want the up-to-date test setup/provisionning
|
||||||
git checkout "${CI_COMMIT_SHA}" -- tests/
|
git checkout "${CI_COMMIT_SHA}" -- tests/
|
||||||
pip install --no-compile --no-cache-dir -r requirements.txt
|
pip install --break-system-packages --no-compile --no-cache-dir -r requirements.txt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export ANSIBLE_BECOME=true
|
export ANSIBLE_BECOME=true
|
||||||
@@ -58,7 +58,7 @@ fi
|
|||||||
if [ "${UPGRADE_TEST}" != "false" ]; then
|
if [ "${UPGRADE_TEST}" != "false" ]; then
|
||||||
git checkout "${CI_COMMIT_SHA}"
|
git checkout "${CI_COMMIT_SHA}"
|
||||||
|
|
||||||
pip install --no-compile --no-cache-dir -r requirements.txt
|
pip install --break-system-packages --no-compile --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
case "${UPGRADE_TEST}" in
|
case "${UPGRADE_TEST}" in
|
||||||
"basic")
|
"basic")
|
||||||
|
|||||||
Reference in New Issue
Block a user