Compare commits

..

3 Commits

Author SHA1 Message Date
Antoine Legrand
a222be7fae fix tag push 2017-02-07 00:29:45 +01:00
Antoine Legrand
9d43cd86be WIP 2017-02-06 22:02:30 +01:00
Antoine Legrand
6ed99f1f44 WIP 2017-02-06 19:59:11 +01:00
631 changed files with 4378 additions and 21597 deletions

View File

@@ -24,7 +24,7 @@ explain why.
- **Version of Ansible** (`ansible --version`):
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
**Kargo version (commit) (`git rev-parse --short HEAD`):**
**Network plugin used**:

88
.gitignore vendored
View File

@@ -1,96 +1,14 @@
.vagrant
*.retry
inventory/vagrant_ansible_inventory
inventory/credentials/
inventory/group_vars/fake_hosts.yml
inventory/host_vars/
temp
.idea
.tox
.cache
*.bak
*.egg-info
*.pyc
*.pyo
*.tfstate
*.tfstate.backup
contrib/terraform/aws/credentials.tfvars
**/*.sw[pon]
/ssh-bastion.conf
**/*.sw[pon]
vagrant/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# Distribution / packaging
.Python
inventory/*/artifacts/
env/
build/
credentials/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# IPython Notebook
.ipynb_checkpoints
# pyenv
.python-version
# dotenv
.env
# virtualenv
venv/
ENV/

View File

@@ -1,31 +1,14 @@
stages:
- unit-tests
- moderator
- deploy-part1
- deploy-part2
- deploy-special
- unit-tests
- deploy-gce-part1
- deploy-gce-part2
- deploy-gce-special
variables:
FAILFASTCI_NAMESPACE: 'kargo-ci'
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-incubator__kubespray'
# DOCKER_HOST: tcp://localhost:2375
ANSIBLE_FORCE_COLOR: "true"
MAGIC: "ci check this"
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
GS_ACCESS_KEY_ID: $GS_KEY
GS_SECRET_ACCESS_KEY: $GS_SECRET
CONTAINER_ENGINE: docker
SSH_USER: root
GCE_PREEMPTIBLE: "false"
ANSIBLE_KEEP_REMOTE_FILES: "1"
ANSIBLE_CONFIG: ./tests/ansible.cfg
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
IDEMPOT_CHECK: "false"
RESET_CHECK: "false"
UPGRADE_TEST: "false"
KUBEADM_ENABLED: "false"
LOG_LEVEL: "-vv"
# asia-east1-a
# asia-northeast1-a
@@ -35,14 +18,18 @@ variables:
# us-west1-a
before_script:
- /usr/bin/python -m pip install -r tests/requirements.txt
- pip install ansible==2.2.1.0
- pip install netaddr
- pip install apache-libcloud==0.20.1
- pip install boto==2.9.0
- mkdir -p /.ssh
- cp tests/ansible.cfg .
.job: &job
tags:
- kubernetes
- docker
image: quay.io/kubespray/kubespray:latest
image: quay.io/ant31/kargo:master
.docker_service: &docker_service
services:
@@ -55,17 +42,22 @@ before_script:
.gce_variables: &gce_variables
GCE_USER: travis
SSH_USER: $GCE_USER
CLOUD_MACHINE_TYPE: "g1-small"
CI_PLATFORM: "gce"
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
CONTAINER_ENGINE: docker
PRIVATE_KEY: $GCE_PRIVATE_KEY
GS_ACCESS_KEY_ID: $GS_KEY
GS_SECRET_ACCESS_KEY: $GS_SECRET
ANSIBLE_KEEP_REMOTE_FILES: "1"
BOOTSTRAP_OS: none
IDEMPOT_CHECK: "false"
RESOLVCONF_MODE: docker_dns
LOG_LEVEL: "-vv"
ETCD_DEPLOYMENT: "docker"
KUBELET_DEPLOYMENT: "docker"
WEAVE_CPU_LIMIT: "100m"
MAGIC: "ci check this"
.do_variables: &do_variables
PRIVATE_KEY: $DO_PRIVATE_KEY
CI_PLATFORM: "do"
SSH_USER: root
.testcases: &testcases
.gce: &gce
<<: *job
<<: *docker_service
cache:
@@ -75,271 +67,278 @@ before_script:
- $HOME/.cache
before_script:
- docker info
- /usr/bin/python -m pip install -r requirements.txt
- /usr/bin/python -m pip install -r tests/requirements.txt
- pip install ansible==2.2.1.0
- pip install netaddr
- pip install apache-libcloud==0.20.1
- pip install boto==2.9.0
- mkdir -p /.ssh
- cp tests/ansible.cfg .
- mkdir -p $HOME/.ssh
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
- echo $GCE_CREDENTIALS > $HOME/.ssh/gce.json
- chmod 400 $HOME/.ssh/id_rsa
- ansible-playbook --version
- export PYPATH=$([[ ! "$CI_JOB_NAME" =~ "coreos" ]] && echo /usr/bin/python || echo /opt/bin/python)
- echo "CI_JOB_NAME is $CI_JOB_NAME"
- echo "PYPATH is $PYPATH"
- cp tests/ansible.cfg .
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
script:
- pwd
- ls
- echo ${PWD}
- echo "${STARTUP_SCRIPT}"
- cd tests && make create-${CI_PLATFORM} -s ; cd -
# Check out latest tag if testing upgrade
# Uncomment when gitlab kubespray repo has tags
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
- test "${UPGRADE_TEST}" != "false" && git checkout f7d52564aad2ff8e337634951beb4a881c0e8aa6
# Checkout the CI vars file so it is available
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
- 'sh -c "echo ignore_assert_errors: true | tee -a tests/files/${CI_JOB_NAME}.yml"'
- >
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
${LOG_LEVEL}
-e cloud_image=${CLOUD_IMAGE}
-e cloud_region=${CLOUD_REGION}
-e gce_credentials_file=${HOME}/.ssh/gce.json
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e inventory_path=${PWD}/inventory/inventory.ini
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
# Create cluster
- >
ansible-playbook
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_ssh_user=${SSH_USER}
-e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER}
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-e deploy_netchecker=true
-e download_localhost=true
-e download_run_once=true
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
--limit "all:!fake_hosts"
-e resolvconf_mode=${RESOLVCONF_MODE}
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
cluster.yml
# Repeat deployment if testing upgrade
- >
if [ "${UPGRADE_TEST}" != "false" ]; then
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
git checkout "${CI_BUILD_REF}";
ansible-playbook
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_ssh_user=${SSH_USER}
-e local_release_dir=${PWD}/downloads
--limit "all:!fake_hosts"
$PLAYBOOK;
fi
# Tests Cases
## Test Master API
- >
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
## Ping the between 2 pod
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
## Advanced DNS checks
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
## Idempotency checks 1/5 (repeat deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_python_interpreter=${PYPATH}
-e local_release_dir=${PWD}/downloads
--limit "all:!fake_hosts"
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_run_once=true
-e download_localhost=true
-e deploy_netchecker=true
-e resolvconf_mode=${RESOLVCONF_MODE}
-e local_release_dir=${PWD}/downloads
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
cluster.yml;
fi
## Idempotency checks 2/5 (Advanced DNS checks)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
--limit "all:!fake_hosts"
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi
## Idempotency checks 3/5 (reset deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_python_interpreter=${PYPATH}
-e reset_confirmation=yes
--limit "all:!fake_hosts"
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
reset.yml;
fi
## Idempotency checks 4/5 (redeploy after reset)
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
-e ansible_python_interpreter=${PYPATH}
-e local_release_dir=${PWD}/downloads
--limit "all:!fake_hosts"
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_run_once=true
-e download_localhost=true
-e deploy_netchecker=true
-e resolvconf_mode=${RESOLVCONF_MODE}
-e local_release_dir=${PWD}/downloads
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
cluster.yml;
fi
## Idempotency checks 5/5 (Advanced DNS checks)
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts"
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi
after_script:
- cd tests && make delete-${CI_PLATFORM} -s ; cd -
.gce: &gce
<<: *testcases
variables:
<<: *gce_variables
.do: &do
variables:
<<: *do_variables
<<: *testcases
- >
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e gce_credentials_file=${HOME}/.ssh/gce.json
-e cloud_image=${CLOUD_IMAGE}
-e inventory_path=${PWD}/inventory/inventory.ini
-e cloud_region=${CLOUD_REGION}
# Test matrix. Leave the comments for markup scripts.
.coreos_calico_aio_variables: &coreos_calico_aio_variables
# stage: deploy-part1
MOVED_TO_GROUP_VARS: "true"
.coreos_calico_sep_variables: &coreos_calico_sep_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: calico
CLOUD_IMAGE: coreos-stable-1235-6-0-v20170111
CLOUD_REGION: us-west1-b
CLUSTER_MODE: separate
BOOTSTRAP_OS: coreos
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
# stage: deploy-part1
UPGRADE_TEST: "graceful"
.debian8_canal_ha_variables: &debian8_canal_ha_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: canal
CLOUD_IMAGE: debian-8-kubespray
CLOUD_REGION: us-east1-b
CLUSTER_MODE: ha
.centos_weave_kubeadm_variables: &centos_weave_kubeadm_variables
# stage: deploy-part1
UPGRADE_TEST: "graceful"
.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
# stage: deploy-part1
MOVED_TO_GROUP_VARS: "true"
.ubuntu_contiv_sep_variables: &ubuntu_contiv_sep_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
.coreos_cilium_variables: &coreos_cilium_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
.rhel7_weave_variables: &rhel7_weave_variables
# stage: deploy-part1
MOVED_TO_GROUP_VARS: "true"
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: weave
CLOUD_IMAGE: rhel-7
CLOUD_REGION: europe-west1-b
CLUSTER_MODE: default
.centos7_flannel_addons_variables: &centos7_flannel_addons_variables
# stage: deploy-part2
MOVED_TO_GROUP_VARS: "true"
.centos7_flannel_variables: &centos7_flannel_variables
# stage: deploy-gce-part2
KUBE_NETWORK_PLUGIN: flannel
CLOUD_IMAGE: centos-7
CLOUD_REGION: us-west1-a
CLUSTER_MODE: default
.debian8_calico_variables: &debian8_calico_variables
# stage: deploy-part2
MOVED_TO_GROUP_VARS: "true"
# stage: deploy-gce-part2
KUBE_NETWORK_PLUGIN: calico
CLOUD_IMAGE: debian-8-kubespray
CLOUD_REGION: us-central1-b
CLUSTER_MODE: default
.coreos_canal_variables: &coreos_canal_variables
# stage: deploy-part2
MOVED_TO_GROUP_VARS: "true"
# stage: deploy-gce-part2
KUBE_NETWORK_PLUGIN: canal
CLOUD_IMAGE: coreos-stable-1235-6-0-v20170111
CLOUD_REGION: us-east1-b
CLUSTER_MODE: default
BOOTSTRAP_OS: coreos
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
IDEMPOT_CHECK: "true"
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
# stage: deploy-gce-special
KUBE_NETWORK_PLUGIN: canal
CLOUD_IMAGE: rhel-7
CLOUD_REGION: us-east1-b
CLUSTER_MODE: separate
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
# stage: deploy-gce-special
KUBE_NETWORK_PLUGIN: weave
CLOUD_IMAGE: ubuntu-1604-xenial
CLOUD_REGION: us-central1-b
CLUSTER_MODE: separate
IDEMPOT_CHECK: "false"
.centos7_calico_ha_variables: &centos7_calico_ha_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
# stage: deploy-gce-special
KUBE_NETWORK_PLUGIN: calico
CLOUD_IMAGE: centos-7
CLOUD_REGION: europe-west1-b
CLUSTER_MODE: ha
IDEMPOT_CHECK: "true"
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
# stage: deploy-gce-special
KUBE_NETWORK_PLUGIN: weave
CLOUD_IMAGE: coreos-alpha
CLOUD_REGION: us-west1-a
CLUSTER_MODE: ha
BOOTSTRAP_OS: coreos
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
# stage: deploy-part1
MOVED_TO_GROUP_VARS: "true"
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
# stage: deploy-part1
MOVED_TO_GROUP_VARS: "true"
.ubuntu_flannel_variables: &ubuntu_flannel_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
.opensuse_canal_variables: &opensuse_canal_variables
# stage: deploy-part2
MOVED_TO_GROUP_VARS: "true"
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: flannel
CLOUD_IMAGE: ubuntu-1604-xenial
CLOUD_REGION: us-central1-b
CLUSTER_MODE: separate
ETCD_DEPLOYMENT: rkt
KUBELET_DEPLOYMENT: rkt
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
### PR JOBS PART1
gce_coreos-calico-aio:
stage: deploy-part1
<<: *job
<<: *gce
variables:
<<: *coreos_calico_aio_variables
<<: *gce_variables
when: on_success
except: ['triggers']
only: [/^pr-.*$/]
### PR JOBS PART2
gce_centos7-flannel-addons:
stage: deploy-part2
coreos-calico-sep:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos7_flannel_addons_variables
<<: *coreos_calico_sep_variables
when: on_success
except: ['triggers']
only: [/^pr-.*$/]
gce_ubuntu-weave-sep:
stage: deploy-part2
coreos-calico-sep-triggers:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *coreos_calico_sep_variables
when: on_success
only: ['triggers']
centos7-flannel:
stage: deploy-gce-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos7_flannel_variables
when: on_success
except: ['triggers']
only: [/^pr-.*$/]
centos7-flannel-triggers:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos7_flannel_variables
when: on_success
only: ['triggers']
ubuntu-weave-sep:
stage: deploy-gce-special
<<: *job
<<: *gce
variables:
@@ -349,40 +348,8 @@ gce_ubuntu-weave-sep:
except: ['triggers']
only: [/^pr-.*$/]
### MANUAL JOBS
gce_coreos-calico-sep-triggers:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *coreos_calico_aio_variables
when: on_success
only: ['triggers']
gce_ubuntu-canal-ha-triggers:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_ha_variables
when: on_success
only: ['triggers']
gce_centos7-flannel-addons-triggers:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos7_flannel_addons_variables
when: on_success
only: ['triggers']
gce_ubuntu-weave-sep-triggers:
stage: deploy-part2
ubuntu-weave-sep-triggers:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
@@ -392,104 +359,29 @@ gce_ubuntu-weave-sep-triggers:
only: ['triggers']
# More builds for PRs/merges (manual) and triggers (auto)
do_ubuntu-canal-ha:
stage: deploy-part2
<<: *job
<<: *do
variables:
<<: *do_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-canal-ha:
stage: deploy-part2
debian8-canal-ha:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_ha_variables
<<: *debian8_canal_ha_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-canal-kubeadm:
stage: deploy-part2
debian8-canal-ha-triggers:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_kubeadm_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-canal-kubeadm-triggers:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_kubeadm_variables
<<: *debian8_canal_ha_variables
when: on_success
only: ['triggers']
gce_centos-weave-kubeadm:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos_weave_kubeadm_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_centos-weave-kubeadm-triggers:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos_weave_kubeadm_variables
when: on_success
only: ['triggers']
gce_ubuntu-contiv-sep:
stage: deploy-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_contiv_sep_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_coreos-cilium:
stage: deploy-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *coreos_cilium_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-cilium-sep:
stage: deploy-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_cilium_sep_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_rhel7-weave:
stage: deploy-part2
rhel7-weave:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
@@ -499,8 +391,8 @@ gce_rhel7-weave:
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_rhel7-weave-triggers:
stage: deploy-part2
rhel7-weave-triggers:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
@@ -509,8 +401,8 @@ gce_rhel7-weave-triggers:
when: on_success
only: ['triggers']
gce_debian8-calico-upgrade:
stage: deploy-part2
debian8-calico:
stage: deploy-gce-part2
<<: *job
<<: *gce
variables:
@@ -520,8 +412,8 @@ gce_debian8-calico-upgrade:
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_debian8-calico-triggers:
stage: deploy-part2
debian8-calico-triggers:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
@@ -530,8 +422,8 @@ gce_debian8-calico-triggers:
when: on_success
only: ['triggers']
gce_coreos-canal:
stage: deploy-part2
coreos-canal:
stage: deploy-gce-part2
<<: *job
<<: *gce
variables:
@@ -541,8 +433,8 @@ gce_coreos-canal:
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_coreos-canal-triggers:
stage: deploy-part2
coreos-canal-triggers:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
@@ -551,8 +443,8 @@ gce_coreos-canal-triggers:
when: on_success
only: ['triggers']
gce_rhel7-canal-sep:
stage: deploy-special
rhel7-canal-sep:
stage: deploy-gce-special
<<: *job
<<: *gce
variables:
@@ -562,8 +454,8 @@ gce_rhel7-canal-sep:
except: ['triggers']
only: ['master', /^pr-.*$/,]
gce_rhel7-canal-sep-triggers:
stage: deploy-part2
rhel7-canal-sep-triggers:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
@@ -572,8 +464,8 @@ gce_rhel7-canal-sep-triggers:
when: on_success
only: ['triggers']
gce_centos7-calico-ha:
stage: deploy-special
centos7-calico-ha:
stage: deploy-gce-special
<<: *job
<<: *gce
variables:
@@ -583,8 +475,8 @@ gce_centos7-calico-ha:
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_centos7-calico-ha-triggers:
stage: deploy-part2
centos7-calico-ha-triggers:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
@@ -593,20 +485,9 @@ gce_centos7-calico-ha-triggers:
when: on_success
only: ['triggers']
gce_opensuse-canal:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *opensuse_canal_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
gce_coreos-alpha-weave-ha:
stage: deploy-special
coreos-alpha-weave-ha:
stage: deploy-gce-special
<<: *job
<<: *gce
variables:
@@ -616,8 +497,8 @@ gce_coreos-alpha-weave-ha:
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-rkt-sep:
stage: deploy-part2
ubuntu-rkt-sep:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
@@ -627,28 +508,6 @@ gce_ubuntu-rkt-sep:
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-vault-sep:
stage: deploy-part2
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_vault_sep_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-flannel-sep:
stage: deploy-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_flannel_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
# Premoderated with manual actions
ci-authorized:
<<: *job
@@ -658,22 +517,12 @@ ci-authorized:
script:
- /bin/sh scripts/premoderator.sh
except: ['triggers', 'master']
syntax-check:
<<: *job
stage: unit-tests
script:
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root upgrade-cluster.yml -vvv --syntax-check
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root reset.yml -vvv --syntax-check
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
except: ['triggers', 'master']
yamllint:
<<: *job
stage: unit-tests
script:
- yamllint roles
except: ['triggers', 'master']
tox-inventory-builder:

161
.travis.yml.bak Normal file
View File

@@ -0,0 +1,161 @@
sudo: required
services:
- docker
git:
depth: 5
env:
global:
GCE_USER=travis
SSH_USER=$GCE_USER
TEST_ID=$TRAVIS_JOB_NUMBER
CONTAINER_ENGINE=docker
PRIVATE_KEY=$GCE_PRIVATE_KEY
GS_ACCESS_KEY_ID=$GS_KEY
GS_SECRET_ACCESS_KEY=$GS_SECRET
ANSIBLE_KEEP_REMOTE_FILES=1
CLUSTER_MODE=default
BOOTSTRAP_OS=none
matrix:
# Debian Jessie
- >-
KUBE_NETWORK_PLUGIN=canal
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=asia-east1-a
CLUSTER_MODE=ha
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=europe-west1-c
CLUSTER_MODE=default
# Centos 7
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=centos-7
CLOUD_REGION=asia-northeast1-c
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=centos-7
CLOUD_REGION=us-central1-b
CLUSTER_MODE=ha
# Redhat 7
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=rhel-7
CLOUD_REGION=us-east1-c
CLUSTER_MODE=default
# CoreOS stable
#- >-
# KUBE_NETWORK_PLUGIN=weave
# CLOUD_IMAGE=coreos-stable
# CLOUD_REGION=europe-west1-b
# CLUSTER_MODE=ha
# BOOTSTRAP_OS=coreos
- >-
KUBE_NETWORK_PLUGIN=canal
CLOUD_IMAGE=coreos-stable
CLOUD_REGION=us-west1-b
CLUSTER_MODE=default
BOOTSTRAP_OS=coreos
# Extra cases for separated roles
- >-
KUBE_NETWORK_PLUGIN=canal
CLOUD_IMAGE=rhel-7
CLOUD_REGION=asia-northeast1-b
CLUSTER_MODE=separate
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=europe-west1-d
CLUSTER_MODE=separate
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=coreos-stable
CLOUD_REGION=us-central1-f
CLUSTER_MODE=separate
BOOTSTRAP_OS=coreos
matrix:
allow_failures:
- env: KUBE_NETWORK_PLUGIN=weave CLOUD_IMAGE=coreos-stable CLOUD_REGION=europe-west1-b CLUSTER_MODE=ha BOOTSTRAP_OS=coreos
before_install:
# Install Ansible.
- pip install --user ansible
- pip install --user netaddr
# W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
- pip install --user apache-libcloud==0.20.1
- pip install --user boto==2.9.0 -U
# Load cached docker images
- if [ -d /var/tmp/releases ]; then find /var/tmp/releases -type f -name "*.tar" | xargs -I {} sh -c "zcat {} | docker load"; fi
cache:
- directories:
- $HOME/.cache/pip
- $HOME/.local
- /var/tmp/releases
before_script:
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
- mkdir -p $HOME/.ssh
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
- chmod 400 $HOME/.ssh/id_rsa
- chmod 755 $HOME/.local/bin/ansible-playbook
- $HOME/.local/bin/ansible-playbook --version
- cp tests/ansible.cfg .
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
script:
- >
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e gce_pem_file=${HOME}/.ssh/gce
-e cloud_image=${CLOUD_IMAGE}
-e inventory_path=${PWD}/inventory/inventory.ini
-e cloud_region=${CLOUD_REGION}
# Create cluster with netchecker app deployed
- >
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_run_once=true
-e download_localhost=true
-e local_release_dir=/var/tmp/releases
-e deploy_netchecker=true
cluster.yml
# Tests Cases
## Test Master API
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
## Ping the between 2 pod
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
## Advanced DNS checks
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
after_script:
- >
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e gce_pem_file=${HOME}/.ssh/gce
-e cloud_image=${CLOUD_IMAGE}
-e inventory_path=${PWD}/inventory/inventory.ini
-e cloud_region=${CLOUD_REGION}

View File

@@ -1,16 +0,0 @@
---
extends: default
rules:
braces:
min-spaces-inside: 0
max-spaces-inside: 1
brackets:
min-spaces-inside: 0
max-spaces-inside: 1
indentation:
spaces: 2
indent-sequences: consistent
line-length: disable
new-line-at-end-of-file: disable
truthy: disable

View File

@@ -1,16 +0,0 @@
FROM ubuntu:16.04
RUN mkdir /kubespray
WORKDIR /kubespray
RUN apt update -y && \
apt install -y \
libssl-dev python-dev sshpass apt-transport-https \
ca-certificates curl gnupg2 software-properties-common python-pip
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable" \
&& apt update -y && apt-get install docker-ce -y
COPY . .
RUN /usr/bin/python -m pip install pip -U && /usr/bin/python -m pip install -r tests/requirements.txt && python -m pip install -r requirements.txt

179
README.md
View File

@@ -1,160 +1,101 @@
![Kubernetes Logo](https://s28.postimg.org/lf3q4ocpp/k8s.png)
Deploy a Production Ready Kubernetes Cluster
============================================
##Deploy a production ready kubernetes cluster
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kargo**.
-   Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal**
- **High available** cluster
- **Composable** (Choice of the network plugin for instance)
- Support most popular **Linux distributions**
- **Continuous integration tests**
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
- **High available** cluster
- **Composable** (Choice of the network plugin for instance)
- Support most popular **Linux distributions**
- **Continuous integration tests**
Quick Start
-----------
To deploy the cluster you can use :
### Ansible
[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br>
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py) <br>
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
# Copy ``inventory/sample`` as ``inventory/mycluster``
cp -rfp inventory/sample inventory/mycluster
# Update Ansible inventory file with inventory builder
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
* [Requirements](#requirements)
* [Kargo vs ...](docs/comparisons.md)
* [Getting started](docs/getting-started.md)
* [Ansible inventory and tags](docs/ansible.md)
* [Deployment data variables](docs/vars.md)
* [DNS stack](docs/dns-stack.md)
* [HA mode](docs/ha-mode.md)
* [Network plugins](#network-plugins)
* [Vagrant install](docs/vagrant.md)
* [CoreOS bootstrap](docs/coreos.md)
* [Downloaded artifacts](docs/downloads.md)
* [Cloud providers](docs/cloud.md)
* [OpenStack](docs/openstack.md)
* [AWS](docs/aws.md)
* [Azure](docs/azure.md)
* [Large deployments](docs/large-deployments.md)
* [Upgrades basics](docs/upgrades.md)
* [Roadmap](docs/roadmap.md)
# Review and change parameters under ``inventory/mycluster/group_vars``
cat inventory/mycluster/group_vars/all.yml
cat inventory/mycluster/group_vars/k8s-cluster.yml
Supported Linux distributions
===============
# Deploy Kubespray with Ansible Playbook
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml
### Vagrant
# Simply running `vagrant up` (for tests purposes)
vagrant up
Documents
---------
- [Requirements](#requirements)
- [Kubespray vs ...](docs/comparisons.md)
- [Getting started](docs/getting-started.md)
- [Ansible inventory and tags](docs/ansible.md)
- [Integration with existing ansible repo](docs/integration.md)
- [Deployment data variables](docs/vars.md)
- [DNS stack](docs/dns-stack.md)
- [HA mode](docs/ha-mode.md)
- [Network plugins](#network-plugins)
- [Vagrant install](docs/vagrant.md)
- [CoreOS bootstrap](docs/coreos.md)
- [Debian Jessie setup](docs/debian.md)
- [openSUSE setup](docs/opensuse.md)
- [Downloaded artifacts](docs/downloads.md)
- [Cloud providers](docs/cloud.md)
- [OpenStack](docs/openstack.md)
- [AWS](docs/aws.md)
- [Azure](docs/azure.md)
- [vSphere](docs/vsphere.md)
- [Large deployments](docs/large-deployments.md)
- [Upgrades basics](docs/upgrades.md)
- [Roadmap](docs/roadmap.md)
Supported Linux Distributions
-----------------------------
- **Container Linux by CoreOS**
- **Debian** Jessie, Stretch, Wheezy
- **Ubuntu** 16.04
- **CentOS/RHEL** 7
- **Fedora/CentOS** Atomic
- **openSUSE** Leap 42.3/Tumbleweed
* **Container Linux by CoreOS**
* **Debian** Jessie
* **Ubuntu** 16.04
* **CentOS/RHEL** 7
Note: Upstart/SysV init based OS types are not supported.
Versions of supported components
--------------------------------
- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.5
- [etcd](https://github.com/coreos/etcd/releases) v3.2.4
- [flanneld](https://github.com/coreos/flannel/releases) v0.10.0
- [calico](https://docs.projectcalico.org/v2.6/releases/) v2.6.8
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.0.0-rc8
- [contiv](https://github.com/contiv/install/releases) v1.1.7
- [weave](http://weave.works/) v2.2.1
- [docker](https://www.docker.com/) v17.03 (see note)
- [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.5.1 <br>
[etcd](https://github.com/coreos/etcd/releases) v3.0.6 <br>
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
[weave](http://weave.works/) v1.6.1 <br>
[docker](https://www.docker.com/) v1.12.5 <br>
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 <br>
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
Note 2: rkt support as docker alternative is limited to control plane (etcd and
Note: rkt support as docker alternative is limited to control plane (etcd and
kubelet). Docker is still used for Kubernetes cluster workloads and network
plugins' related OS services. Also note, only one of the supported network
plugins can be deployed for a given single cluster.
Requirements
------------
--------------
- **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
that will run Ansible commands**
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
- The target servers must have **access to the Internet** in order to pull docker images.
- The target servers are configured to allow **IPv4 forwarding**.
- **Your ssh key must be copied** to all the servers part of your inventory.
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should disable your firewall.
- If kubespray is ran from non-root user account, correct privilege escalation method
should be configured in the target servers. Then the `ansible_become` flag
or command parameters `--become or -b` should be specified.
* The target servers must have **access to the Internet** in order to pull docker images.
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should disable your firewall.
* The target servers are configured to allow **IPv4 forwarding**.
* **Copy your ssh keys** to all the servers part of your inventory.
* **Ansible v2.2 (or newer) and python-netaddr**
Network Plugins
---------------
You can choose between 6 network plugins. (default: `calico`, except Vagrant uses `flannel`)
## Network plugins
You can choose between 4 network plugins. (default: `flannel` with vxlan backend)
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
- [calico](docs/calico.md): bgp (layer 3) networking.
* [**calico**](docs/calico.md): bgp (layer 3) networking.
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
* **weave**: Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
The choice is defined with the variable `kube_network_plugin`. There is also an
option to leverage built-in cloud provider networking instead.
See also [Network checker](docs/netcheck.md).
Community docs and resources
----------------------------
- [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
Tools and projects on top of Kubespray
--------------------------------------
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
CI Tests
--------
## CI Tests
![Gitlab Logo](https://s27.postimg.org/wmtaig1wz/gitlabci.png)
[![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/badges/master/build.svg)](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/pipelines) </br>
CI/end-to-end tests sponsored by Google (GCE)
CI/end-to-end tests sponsored by Google (GCE), and [teuto.net](https://teuto.net/) for OpenStack.
See the [test matrix](docs/test_cases.md) for details.

View File

@@ -1,16 +1,16 @@
# Release Process
The Kubespray Project is released on an as-needed basis. The process is as follows:
The Kargo Project is released on an as-needed basis. The process is as follows:
1. An issue is proposing a new release with a changelog since the last release
2. At least one of the [OWNERS](OWNERS) must LGTM this release
2. At least on of the [OWNERS](OWNERS) must LGTM this release
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
4. The release issue is closed
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released`
## Major/minor releases, merge freezes and milestones
* Kubespray does not maintain stable branches for releases. Releases are tags, not
* Kargo does not maintain stable branches for releases. Releases are tags, not
branches, and there are no backports. Therefore, there is no need for merge
freezes as well.
@@ -20,21 +20,24 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
support lifetime, which ends once the milestone closed. Then only a next major
or minor release can be done.
* Kubespray major and minor releases are bound to the given ``kube_version`` major/minor
* Kargo major and minor releases are bound to the given ``kube_version`` major/minor
version numbers and other components' arbitrary versions, like etcd or network plugins.
Older or newer versions are not supported and not tested for the given release.
* There is no unstable releases and no APIs, thus Kubespray doesn't follow
* There is no unstable releases and no APIs, thus Kargo doesn't follow
[semver](http://semver.org/). Every version describes only a stable release.
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
playbooks, shall be described in the release notes. Other breaking changes, if any in
the contributed addons or bound versions of Kubernetes and other components, are
considered out of Kubespray scope and are up to the components' teams to deal with and
considered out of Kargo scope and are up to the components' teams to deal with and
document.
* Minor releases can change components' versions, but not the major ``kube_version``.
Greater ``kube_version`` requires a new major or minor release. For example, if Kubespray v2.0.0
Greater ``kube_version`` requires a new major or minor release. For example, if Kargo v2.0.0
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
then Kubespray v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
then Kargo v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
and *any* changes to other components, like etcd v4, or calico 1.2.3.
And Kubespray v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
And Kargo v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
foo
foo
foo

95
Vagrantfile vendored
View File

@@ -3,48 +3,20 @@
require 'fileutils'
Vagrant.require_version ">= 2.0.0"
Vagrant.require_version ">= 1.8.0"
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
# Uniq disk UUID for libvirt
DISK_UUID = Time.now.utc.to_i
SUPPORTED_OS = {
"coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
"coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
"centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"},
"opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
}
# Defaults for config options defined in CONFIG
$num_instances = 3
$instance_name_prefix = "k8s"
$vm_gui = false
$vm_memory = 2048
$vm_memory = 1536
$vm_cpus = 1
$shared_folders = {}
$forwarded_ports = {}
$subnet = "172.17.8"
$os = "ubuntu"
$network_plugin = "flannel"
# The first three nodes are etcd servers
$etcd_instances = $num_instances
# The first two nodes are kube masters
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
# All nodes are kube nodes
$kube_node_instances = $num_instances
# The following only works when using the libvirt provider
$kube_node_instances_with_disks = false
$kube_node_instances_with_disks_size = "20G"
$kube_node_instances_with_disks_number = 2
$local_release_dir = "/vagrant/temp"
$box = "bento/ubuntu-16.04"
host_vars = {}
@@ -52,9 +24,8 @@ if File.exist?(CONFIG)
require CONFIG
end
$box = SUPPORTED_OS[$os][:box]
# if $inventory is not set, try to use example
$inventory = File.join(File.dirname(__FILE__), "inventory", "sample") if ! $inventory
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
# if $inventory has a hosts file use it, otherwise copy over vars etc
# to where vagrant expects dynamic inventory to be.
@@ -63,7 +34,7 @@ if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
"provisioners", "ansible")
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
FileUtils.ln_s($inventory, $vagrant_ansible)
end
end
@@ -78,14 +49,12 @@ Vagrant.configure("2") do |config|
# always use Vagrants insecure key
config.ssh.insert_key = false
config.vm.box = $box
if SUPPORTED_OS[$os].has_key? :box_url
config.vm.box_url = SUPPORTED_OS[$os][:box_url]
end
config.ssh.username = SUPPORTED_OS[$os][:user]
# plugin conflict
if Vagrant.has_plugin?("vagrant-vbguest") then
config.vbguest.auto_update = false
end
(1..$num_instances).each do |i|
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
config.vm.hostname = vm_name
@@ -111,48 +80,23 @@ Vagrant.configure("2") do |config|
end
end
config.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
$shared_folders.each do |src, dst|
config.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
end
config.vm.provider :virtualbox do |vb|
vb.gui = $vm_gui
vb.memory = $vm_memory
vb.cpus = $vm_cpus
end
config.vm.provider :libvirt do |lv|
lv.memory = $vm_memory
end
ip = "#{$subnet}.#{i+100}"
host_vars[vm_name] = {
"ip": ip,
"bootstrap_os": SUPPORTED_OS[$os][:bootstrap_os],
"local_release_dir" => $local_release_dir,
"download_run_once": "False",
"kube_network_plugin": $network_plugin
"ip" => ip,
#"access_ip" => ip,
"flannel_interface" => ip,
"flannel_backend_type" => "host-gw",
"local_release_dir" => "/vagrant/temp",
"download_run_once" => "False"
}
config.vm.network :private_network, ip: ip
# Disable swap for each vm
config.vm.provision "shell", inline: "swapoff -a"
if $kube_node_instances_with_disks
# Libvirt
driverletters = ('a'..'z').to_a
config.vm.provider :libvirt do |lv|
# always make /dev/sd{a/b/c} so that CI can ensure that
# virtualbox and libvirt will have the same devices to use for OSDs
(1..$kube_node_instances_with_disks_number).each do |d|
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "ide"
end
end
end
# Only execute once the Ansible provisioner,
# when all the machines are up and ready.
if i == $num_instances
@@ -161,16 +105,19 @@ Vagrant.configure("2") do |config|
if File.exist?(File.join(File.dirname($inventory), "hosts"))
ansible.inventory_path = $inventory
end
ansible.become = true
ansible.sudo = true
ansible.limit = "all"
ansible.host_key_checking = false
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
ansible.raw_arguments = ["--forks=#{$num_instances}"]
ansible.host_vars = host_vars
#ansible.tags = ['download']
ansible.groups = {
"etcd" => ["#{$instance_name_prefix}-0[1:#{$etcd_instances}]"],
"kube-master" => ["#{$instance_name_prefix}-0[1:#{$kube_master_instances}]"],
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$kube_node_instances}]"],
# The first three nodes should be etcd servers
"etcd" => ["#{$instance_name_prefix}-0[1:3]"],
# The first two nodes should be masters
"kube-master" => ["#{$instance_name_prefix}-0[1:2]"],
# all nodes should be kube nodes
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$num_instances}]"],
"k8s-cluster:children" => ["kube-master", "kube-node"],
}
end

View File

@@ -1,6 +1,6 @@
[ssh_connection]
pipelining=True
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
#ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
[defaults]
host_key_checking=False
@@ -9,7 +9,3 @@ fact_caching = jsonfile
fact_caching_connection = /tmp
stdout_callback = skippy
library = ./library
callback_whitelist = profile_tasks
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
deprecation_warnings=False
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds

View File

@@ -2,123 +2,64 @@
- hosts: localhost
gather_facts: False
roles:
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- bastion-ssh-config
tags: [localhost, bastion]
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
gather_facts: false
vars:
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
ansible_ssh_pipelining: false
roles:
- { role: kubespray-defaults}
- { role: bootstrap-os, tags: bootstrap-os}
- bootstrap-os
tags:
- bootstrap-os
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
vars:
ansible_ssh_pipelining: true
gather_facts: true
pre_tasks:
- name: gather facts from all instances
setup:
delegate_to: "{{item}}"
delegate_facts: True
with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
roles:
- { role: kubespray-defaults}
- { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker }
- role: rkt
tags: rkt
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
- { role: download, tags: download, skip_downloads: false }
environment: "{{proxy_env}}"
- { role: rkt, tags: rkt, when: "'rkt' in [ etcd_deployment_type, kubelet_deployment_type ]" }
- hosts: etcd:k8s-cluster:vault:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
- hosts: etcd:!k8s-cluster
any_errors_fatal: true
roles:
- { role: kubespray-defaults, when: "cert_management == 'vault'" }
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
environment: "{{proxy_env}}"
- hosts: etcd
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: etcd, tags: etcd, etcd_cluster_setup: true }
- hosts: k8s-cluster:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
- hosts: etcd:k8s-cluster:vault:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
environment: "{{proxy_env}}"
- { role: etcd, tags: etcd }
- hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
roles:
- { role: kubespray-defaults}
- { role: etcd, tags: etcd }
- { role: kubernetes/node, tags: node }
environment: "{{proxy_env}}"
- hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes/master, tags: master }
- { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
- { role: network_plugin, tags: network }
- hosts: kube-master[0]
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
- hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
roles:
- { role: kubespray-defaults}
- { role: kubernetes/master, tags: master }
- { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- hosts: calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
roles:
- { role: kubespray-defaults}
- { role: network_plugin/calico/rr, tags: network }
- hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
roles:
- { role: kubespray-defaults}
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
environment: "{{proxy_env}}"
- hosts: kube-master[0]
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
roles:
- { role: kubespray-defaults}
- { role: kubernetes-apps, tags: apps }

View File

@@ -1,3 +1,59 @@
# Kubernetes Community Code of Conduct
## Kubernetes Community Code of Conduct
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
### Contributor Code of Conduct
As contributors and maintainers of this project, and in the interest of fostering
an open and welcoming community, we pledge to respect all people who contribute
through reporting issues, posting feature requests, updating documentation,
submitting pull requests or patches, and other activities.
We are committed to making participation in this project a harassment-free experience for
everyone, regardless of level of experience, gender, gender identity and expression,
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
religion, or nationality.
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing other's private information, such as physical or electronic addresses,
without explicit permission
* Other unethical or unprofessional conduct.
Project maintainers have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are not
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
commit themselves to fairly and consistently applying these principles to every aspect
of managing this project. Project maintainers who do not follow or enforce the Code of
Conduct may be permanently removed from the project team.
This code of conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
opening an issue or contacting one or more of the project maintainers.
This Code of Conduct is adapted from the Contributor Covenant
(http://contributor-covenant.org), version 1.2.0, available at
http://contributor-covenant.org/version/1/2/0/
### Kubernetes Events Code of Conduct
Kubernetes events are working conferences intended for professional networking and collaboration in the
Kubernetes community. Attendees are expected to behave according to professional standards and in accordance
with their employer's policies on appropriate workplace behavior.
While at Kubernetes events or related social networking opportunities, attendees should not engage in
discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should
be especially aware of these concerns.
The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
be engaging in discriminatory or offensive speech or actions.
Please bring any concerns to to the immediate attention of Kubernetes event staff
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/code-of-conduct.md?pixel)]()

View File

@@ -1,27 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["elasticloadbalancing:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
}
]
}

View File

@@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@@ -1,45 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
},
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": "*"
}
]
}

View File

@@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@@ -1,61 +0,0 @@
#!/usr/bin/env python
import boto3
import os
import argparse
import json
class SearchEC2Tags(object):
def __init__(self):
self.parse_args()
if self.args.list:
self.search_tags()
if self.args.host:
data = {}
print json.dumps(data, indent=2)
def parse_args(self):
##Check if VPC_VISIBILITY is set, if not default to private
if "VPC_VISIBILITY" in os.environ:
self.vpc_visibility = os.environ['VPC_VISIBILITY']
else:
self.vpc_visibility = "private"
##Support --list and --host flags. We largely ignore the host one.
parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true', default=False, help='List instances')
parser.add_argument('--host', action='store_true', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def search_tags(self):
hosts = {}
hosts['_meta'] = { 'hostvars': {} }
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
for group in ["kube-master", "kube-node", "etcd"]:
hosts[group] = []
tag_key = "kubespray-role"
tag_value = ["*"+group+"*"]
region = os.environ['REGION']
ec2 = boto3.resource('ec2', region)
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
for instance in instances:
if self.vpc_visibility == "public":
hosts[group].append(instance.public_dns_name)
hosts['_meta']['hostvars'][instance.public_dns_name] = {
'ansible_ssh_host': instance.public_ip_address
}
else:
hosts[group].append(instance.private_dns_name)
hosts['_meta']['hostvars'][instance.private_dns_name] = {
'ansible_ssh_host': instance.private_ip_address
}
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
print json.dumps(hosts, sort_keys=True, indent=2)
SearchEC2Tags()

View File

@@ -5,7 +5,7 @@ Provision the base infrastructure for a Kubernetes cluster by using [Azure Resou
## Status
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kubespray of course).
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kargo of course).
## Requirements
@@ -47,7 +47,7 @@ $ ./clear-rg.sh <resource_group_name>
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
## Generating an inventory for kubespray
## Generating an inventory for kargo
After you have applied the templates, you can generate an inventory with this call:
@@ -55,10 +55,10 @@ After you have applied the templates, you can generate an inventory with this ca
$ ./generate-inventory.sh <resource_group_name>
```
It will create the file ./inventory which can then be used with kubespray, e.g.:
It will create the file ./inventory which can then be used with kargo, e.g.:
```shell
$ cd kubespray-root-dir
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all.yml" cluster.yml
$ cd kargo-root-dir
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
```

View File

@@ -9,18 +9,11 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
exit 1
fi
if az &>/dev/null; then
echo "azure cli 2.0 found, using it instead of 1.0"
./apply-rg_2.sh "$AZURE_RESOURCE_GROUP"
elif azure &>/dev/null; then
ansible-playbook generate-templates.yml
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
else
echo "Azure cli not found"
fi
ansible-playbook generate-templates.yml
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP

View File

@@ -1,19 +0,0 @@
#!/usr/bin/env bash
set -e
AZURE_RESOURCE_GROUP="$1"
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
ansible-playbook generate-templates.yml
az group deployment create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
az group deployment create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
az group deployment create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
az group deployment create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
az group deployment create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
az group deployment create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP

View File

@@ -9,10 +9,6 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
exit 1
fi
if az &>/dev/null; then
echo "azure cli 2.0 found, using it instead of 1.0"
./clear-rg_2.sh "$AZURE_RESOURCE_GROUP"
else
ansible-playbook generate-templates.yml
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
fi
ansible-playbook generate-templates.yml
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete

View File

@@ -1,14 +0,0 @@
#!/usr/bin/env bash
set -e
AZURE_RESOURCE_GROUP="$1"
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
ansible-playbook generate-templates.yml
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete

View File

@@ -8,11 +8,5 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
# check if azure cli 2.0 exists else use azure cli 1.0
if az &>/dev/null; then
ansible-playbook generate-inventory_2.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
elif azure &>/dev/null; then
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
else
echo "Azure cli not found"
fi
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"

View File

@@ -1,5 +0,0 @@
---
- hosts: localhost
gather_facts: False
roles:
- generate-inventory_2

View File

@@ -1,6 +1,5 @@
# Due to some Azure limitations (ex:- Storage Account's name must be unique),
# this name must be globally unique - it will be used as a prefix for azure components
# Due to some Azure limitations, this name must be globally unique
cluster_name: example
# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion
@@ -18,29 +17,10 @@ minions_os_disk_size: 1000
admin_username: devops
admin_password: changeme
# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
# Disable using ssh using password. Change it to false to allow to connect to ssh by password
disablePasswordAuthentication: true
# Azure CIDRs
azure_vnet_cidr: 10.0.0.0/8
azure_admin_cidr: 10.241.2.0/24
azure_masters_cidr: 10.0.4.0/24
azure_minions_cidr: 10.240.0.0/16
# Azure loadbalancer port to use to access your cluster
kube_apiserver_port: 6443
# Azure Netwoking and storage naming to use with inventory/all.yml
#azure_virtual_network_name: KubeVNET
#azure_subnet_admin_name: ad-subnet
#azure_subnet_masters_name: master-subnet
#azure_subnet_minions_name: minion-subnet
#azure_route_table_name: routetable
#azure_security_group_name: secgroup
# Storage types available are: "Standard_LRS","Premium_LRS"
#azure_storage_account_type: Standard_LRS

View File

@@ -8,4 +8,4 @@
vm_list: "{{ vm_list_cmd.stdout }}"
- name: Generate inventory
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"

View File

@@ -1,16 +0,0 @@
---
- name: Query Azure VMs IPs
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
register: vm_ip_list_cmd
- name: Query Azure VMs Roles
command: az vm list -o json --resource-group {{ azure_resource_group }}
register: vm_list_cmd
- set_fact:
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
vm_roles_list: "{{ vm_list_cmd.stdout }}"
- name: Generate inventory
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"

View File

@@ -1,34 +0,0 @@
{% for vm in vm_ip_list %}
{% if not use_bastion or vm.virtualMachine.name == 'bastion' %}
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
{% else %}
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }}
{% endif %}
{% endfor %}
[kube-master]
{% for vm in vm_roles_list %}
{% if 'kube-master' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[etcd]
{% for vm in vm_roles_list %}
{% if 'etcd' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[kube-node]
{% for vm in vm_roles_list %}
{% if 'kube-node' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[k8s-cluster:children]
kube-node
kube-master

View File

@@ -1,15 +1,15 @@
apiVersion: "2015-06-15"
virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
virtualNetworkName: "KubVNET"
subnetAdminName: "{{ azure_subnet_admin_name | default('ad-subnet') }}"
subnetMastersName: "{{ azure_subnet_masters_name | default('master-subnet') }}"
subnetMinionsName: "{{ azure_subnet_minions_name | default('minion-subnet') }}"
subnetAdminName: "ad-subnet"
subnetMastersName: "master-subnet"
subnetMinionsName: "minion-subnet"
routeTableName: "{{ azure_route_table_name | default('routetable') }}"
securityGroupName: "{{ azure_security_group_name | default('secgroup') }}"
routeTableName: "routetable"
securityGroupName: "secgroup"
nameSuffix: "{{ cluster_name }}"
nameSuffix: "{{cluster_name}}"
availabilitySetMasters: "master-avs"
availabilitySetMinions: "minion-avs"
@@ -33,5 +33,5 @@ imageReference:
imageReferenceJson: "{{imageReference|to_json}}"
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
storageAccountType: "Standard_LRS"

View File

@@ -62,8 +62,8 @@
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
},
"protocol": "tcp",
"frontendPort": "{{kube_apiserver_port}}",
"backendPort": "{{kube_apiserver_port}}",
"frontendPort": 443,
"backendPort": 443,
"enableFloatingIP": false,
"idleTimeoutInMinutes": 5,
"probe": {
@@ -77,7 +77,7 @@
"name": "kube-api",
"properties": {
"protocol": "tcp",
"port": "{{kube_apiserver_port}}",
"port": 443,
"intervalInSeconds": 5,
"numberOfProbes": 2
}
@@ -193,4 +193,4 @@
} {% if not loop.last %},{% endif %}
{% endfor %}
]
}
}

View File

@@ -92,7 +92,7 @@
"description": "Allow secure kube-api",
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "{{kube_apiserver_port}}",
"destinationPortRange": "443",
"sourceAddressPrefix": "Internet",
"destinationAddressPrefix": "*",
"access": "Allow",
@@ -106,4 +106,4 @@
"dependsOn": []
}
]
}
}

View File

@@ -41,7 +41,7 @@ import re
import sys
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster:children',
'calico-rr', 'vault']
'calico-rr']
PROTECTED_NAMES = ROLES
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
@@ -54,7 +54,7 @@ def get_var_as_bool(name, default):
# Configurable as shell vars start
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.ini")
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory.cfg")
# Reconfigures cluster distribution at scale
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
@@ -65,7 +65,7 @@ HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
# Configurable as shell vars end
class KubesprayInventory(object):
class KargoInventory(object):
def __init__(self, changed_hosts=None, config_file=None):
self.config = configparser.ConfigParser(allow_no_value=True,
@@ -250,7 +250,6 @@ class KubesprayInventory(object):
def set_etcd(self, hosts):
for host in hosts:
self.add_host_to_group('etcd', host)
self.add_host_to_group('vault', host)
def load_file(self, files=None):
'''Directly loads JSON, or YAML file to inventory.'''
@@ -318,7 +317,7 @@ Delete a host by id: inventory.py -node1
Configurable env vars:
DEBUG Enable debug printing. Default: True
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.ini
CONFIG_FILE File to write config to Default: ./inventory.cfg
HOST_PREFIX Host prefix for generated hosts. Default: node
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
@@ -338,7 +337,7 @@ MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
def main(argv=None):
if not argv:
argv = sys.argv[1:]
KubesprayInventory(argv, CONFIG_FILE)
KargoInventory(argv, CONFIG_FILE)
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,3 +1,3 @@
[metadata]
name = kubespray-inventory-builder
name = kargo-inventory-builder
version = 0.1

View File

@@ -31,7 +31,7 @@ class TestInventory(unittest.TestCase):
sys_mock.exit = mock.Mock()
super(TestInventory, self).setUp()
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
self.inv = inventory.KubesprayInventory()
self.inv = inventory.KargoInventory()
def test_get_ip_from_opts(self):
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"

View File

@@ -1,11 +0,0 @@
# Kubespray on KVM Virtual Machines hypervisor preparation
A simple playbook to ensure your system has the right settings to enable Kubespray
deployment on VMs.
This playbook does not create Virtual Machines, nor does it run Kubespray itself.
### User creation
If you want to create a user for running Kubespray deployment, you should specify
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.

View File

@@ -1,3 +0,0 @@
#k8s_deployment_user: kubespray
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa

View File

@@ -1,8 +0,0 @@
---
- hosts: localhost
gather_facts: False
become: yes
vars:
- bootstrap_os: none
roles:
- kvm-setup

View File

@@ -1,46 +0,0 @@
---
- name: Upgrade all packages to the latest version (yum)
yum:
name: '*'
state: latest
when: ansible_os_family == "RedHat"
- name: Install required packages
yum:
name: "{{ item }}"
state: latest
with_items:
- bind-utils
- ntp
when: ansible_os_family == "RedHat"
- name: Install required packages
apt:
upgrade: yes
update_cache: yes
cache_valid_time: 3600
name: "{{ item }}"
state: latest
install_recommends: no
with_items:
- dnsutils
- ntp
when: ansible_os_family == "Debian"
- name: Upgrade all packages to the latest version (apt)
shell: apt-get -o \
Dpkg::Options::=--force-confdef -o \
Dpkg::Options::=--force-confold -q -y \
dist-upgrade
environment:
DEBIAN_FRONTEND: noninteractive
when: ansible_os_family == "Debian"
# Create deployment user if required
- include: user.yml
when: k8s_deployment_user is defined
# Set proper sysctl values
- include: sysctl.yml

View File

@@ -1,46 +0,0 @@
---
- name: Load br_netfilter module
modprobe:
name: br_netfilter
state: present
register: br_netfilter
- name: Add br_netfilter into /etc/modules
lineinfile:
dest: /etc/modules
state: present
line: 'br_netfilter'
when: br_netfilter is defined and ansible_os_family == 'Debian'
- name: Add br_netfilter into /etc/modules-load.d/kubespray.conf
copy:
dest: /etc/modules-load.d/kubespray.conf
content: |-
### This file is managed by Ansible
br-netfilter
owner: root
group: root
mode: 0644
when: br_netfilter is defined
- name: Enable net.ipv4.ip_forward in sysctl
sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_file: /etc/sysctl.d/ipv4-ip_forward.conf
state: present
reload: yes
- name: Set bridge-nf-call-{arptables,iptables} to 0
sysctl:
name: "{{ item }}"
state: present
value: 0
sysctl_file: /etc/sysctl.d/bridge-nf-call.conf
reload: yes
with_items:
- net.bridge.bridge-nf-call-arptables
- net.bridge.bridge-nf-call-ip6tables
- net.bridge.bridge-nf-call-iptables
when: br_netfilter is defined

View File

@@ -1,46 +0,0 @@
---
- name: Create user {{ k8s_deployment_user }}
user:
name: "{{ k8s_deployment_user }}"
groups: adm
shell: /bin/bash
- name: Ensure that .ssh exists
file:
path: "/home/{{ k8s_deployment_user }}/.ssh"
state: directory
owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}"
- name: Configure sudo for deployment user
copy:
content: |
%{{ k8s_deployment_user }} ALL=(ALL) NOPASSWD: ALL
dest: "/etc/sudoers.d/55-k8s-deployment"
owner: root
group: root
mode: 0644
- name: Write private SSH key
copy:
src: "{{ k8s_deployment_user_pkey_path }}"
dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa"
mode: 0400
owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}"
when: k8s_deployment_user_pkey_path is defined
- name: Write public SSH key
shell: "ssh-keygen -y -f /home/{{ k8s_deployment_user }}/.ssh/id_rsa \
> /home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
args:
creates: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
when: k8s_deployment_user_pkey_path is defined
- name: Fix ssh-pub-key permissions
file:
path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
mode: 0600
owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}"
when: k8s_deployment_user_pkey_path is defined

View File

@@ -1,4 +1,4 @@
# Deploying a Kubespray Kubernetes Cluster with GlusterFS
# Deploying a Kargo Kubernetes Cluster with GlusterFS
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
@@ -6,16 +6,16 @@ You can either deploy using Ansible on its own by supplying your own inventory f
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kargo root folder, and execute (supposing that the machines are all using ubuntu):
```
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
```
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
```
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
```
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
@@ -28,7 +28,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
## Using Terraform and Ansible
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
First step is to fill in a `my-kargo-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
```
cluster_name = "cluster1"
@@ -65,15 +65,15 @@ $ echo Setting up Terraform creds && \
export TF_VAR_auth_url=${OS_AUTH_URL}
```
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
Then, standing on the kargo directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
```
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
```
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
Then, provision your Kubernetes (Kargo) cluster with the following ansible call:
```
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
@@ -88,5 +88,5 @@ ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./co
If you need to destroy the cluster, you can run:
```
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
```

View File

@@ -1,17 +1,8 @@
---
- hosts: gfs-cluster
gather_facts: false
vars:
ansible_ssh_pipelining: false
roles:
- { role: bootstrap-os, tags: bootstrap-os}
- hosts: all
gather_facts: true
- hosts: gfs-cluster
vars:
ansible_ssh_pipelining: true
roles:
- { role: glusterfs/server }
@@ -21,5 +12,6 @@
- hosts: kube-master[0]
roles:
- { role: kubernetes-pv/lib }
- { role: kubernetes-pv }

View File

@@ -1 +0,0 @@
../../../inventory/group_vars

View File

@@ -1 +0,0 @@
../../../../roles/bootstrap-os

View File

@@ -4,7 +4,6 @@
with_items:
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
register: gluster_pv
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined

View File

@@ -1,12 +0,0 @@
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "glusterfs"
},
"spec": {
"ports": [
{"port": 1}
]
}
}

View File

@@ -1,62 +0,0 @@
%global srcname kubespray
%{!?upstream_version: %global upstream_version %{version}%{?milestone}}
Name: kubespray
Version: master
Release: %(git describe | sed -r 's/v(\S+-?)-(\S+)-(\S+)/\1.dev\2+\3/')
Summary: Ansible modules for installing Kubernetes
Group: System Environment/Libraries
License: ASL 2.0
Url: https://github.com/kubernetes-incubator/kubespray
Source0: https://github.com/kubernetes-incubator/kubespray/archive/%{upstream_version}.tar.gz#/%{name}-%{release}.tar.gz
BuildArch: noarch
BuildRequires: git
BuildRequires: python2
BuildRequires: python2-devel
BuildRequires: python2-setuptools
BuildRequires: python-d2to1
BuildRequires: python2-pbr
Requires: ansible >= 2.4.0
Requires: python-jinja2 >= 2.10
Requires: python-netaddr
Requires: python-pbr
%description
Ansible-kubespray is a set of Ansible modules and playbooks for
installing a Kubernetes cluster. If you have questions, join us
on the https://slack.k8s.io, channel '#kubespray'.
%prep
%autosetup -n %{name}-%{upstream_version} -S git
%build
export PBR_VERSION=%{release}
%{__python2} setup.py build bdist_rpm
%install
export PBR_VERSION=%{release}
export SKIP_PIP_INSTALL=1
%{__python2} setup.py install --skip-build --root %{buildroot} bdist_rpm
%files
%doc %{_docdir}/%{name}/README.md
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini
%config %{_sysconfdir}/%{name}/ansible.cfg
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
%license %{_docdir}/%{name}/LICENSE
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
%{_datarootdir}/%{name}/roles/
%{_datarootdir}/%{name}/playbooks/
%defattr(-,root,root)
%changelog

View File

@@ -1,2 +1,2 @@
*.tfstate*
.terraform
inventory

View File

@@ -0,0 +1,261 @@
variable "deploymentName" {
type = "string"
description = "The desired name of your deployment."
}
variable "numControllers"{
type = "string"
description = "Desired # of controllers."
}
variable "numEtcd" {
type = "string"
description = "Desired # of etcd nodes. Should be an odd number."
}
variable "numNodes" {
type = "string"
description = "Desired # of nodes."
}
variable "volSizeController" {
type = "string"
description = "Volume size for the controllers (GB)."
}
variable "volSizeEtcd" {
type = "string"
description = "Volume size for etcd (GB)."
}
variable "volSizeNodes" {
type = "string"
description = "Volume size for nodes (GB)."
}
variable "subnet" {
type = "string"
description = "The subnet in which to put your cluster."
}
variable "securityGroups" {
type = "string"
description = "The sec. groups in which to put your cluster."
}
variable "ami"{
type = "string"
description = "AMI to use for all VMs in cluster."
}
variable "SSHKey" {
type = "string"
description = "SSH key to use for VMs."
}
variable "master_instance_type" {
type = "string"
description = "Size of VM to use for masters."
}
variable "etcd_instance_type" {
type = "string"
description = "Size of VM to use for etcd."
}
variable "node_instance_type" {
type = "string"
description = "Size of VM to use for nodes."
}
variable "terminate_protect" {
type = "string"
default = "false"
}
variable "awsRegion" {
type = "string"
}
provider "aws" {
region = "${var.awsRegion}"
}
variable "iam_prefix" {
type = "string"
description = "Prefix name for IAM profiles"
}
resource "aws_iam_instance_profile" "kubernetes_master_profile" {
name = "${var.iam_prefix}_kubernetes_master_profile"
roles = ["${aws_iam_role.kubernetes_master_role.name}"]
}
resource "aws_iam_role" "kubernetes_master_role" {
name = "${var.iam_prefix}_kubernetes_master_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}
EOF
}
resource "aws_iam_role_policy" "kubernetes_master_policy" {
name = "${var.iam_prefix}_kubernetes_master_policy"
role = "${aws_iam_role.kubernetes_master_role.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["elasticloadbalancing:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": "*"
}
]
}
EOF
}
resource "aws_iam_instance_profile" "kubernetes_node_profile" {
name = "${var.iam_prefix}_kubernetes_node_profile"
roles = ["${aws_iam_role.kubernetes_node_role.name}"]
}
resource "aws_iam_role" "kubernetes_node_role" {
name = "${var.iam_prefix}_kubernetes_node_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}
EOF
}
resource "aws_iam_role_policy" "kubernetes_node_policy" {
name = "${var.iam_prefix}_kubernetes_node_policy"
role = "${aws_iam_role.kubernetes_node_role.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
}
]
}
EOF
}
resource "aws_instance" "master" {
count = "${var.numControllers}"
ami = "${var.ami}"
instance_type = "${var.master_instance_type}"
subnet_id = "${var.subnet}"
vpc_security_group_ids = ["${var.securityGroups}"]
key_name = "${var.SSHKey}"
disable_api_termination = "${var.terminate_protect}"
iam_instance_profile = "${aws_iam_instance_profile.kubernetes_master_profile.id}"
root_block_device {
volume_size = "${var.volSizeController}"
}
tags {
Name = "${var.deploymentName}-master-${count.index + 1}"
}
}
resource "aws_instance" "etcd" {
count = "${var.numEtcd}"
ami = "${var.ami}"
instance_type = "${var.etcd_instance_type}"
subnet_id = "${var.subnet}"
vpc_security_group_ids = ["${var.securityGroups}"]
key_name = "${var.SSHKey}"
disable_api_termination = "${var.terminate_protect}"
root_block_device {
volume_size = "${var.volSizeEtcd}"
}
tags {
Name = "${var.deploymentName}-etcd-${count.index + 1}"
}
}
resource "aws_instance" "minion" {
count = "${var.numNodes}"
ami = "${var.ami}"
instance_type = "${var.node_instance_type}"
subnet_id = "${var.subnet}"
vpc_security_group_ids = ["${var.securityGroups}"]
key_name = "${var.SSHKey}"
disable_api_termination = "${var.terminate_protect}"
iam_instance_profile = "${aws_iam_instance_profile.kubernetes_node_profile.id}"
root_block_device {
volume_size = "${var.volSizeNodes}"
}
tags {
Name = "${var.deploymentName}-minion-${count.index + 1}"
}
}
output "kubernetes_master_profile" {
value = "${aws_iam_instance_profile.kubernetes_master_profile.id}"
}
output "kubernetes_node_profile" {
value = "${aws_iam_instance_profile.kubernetes_node_profile.id}"
}
output "master-ip" {
value = "${join(", ", aws_instance.master.*.private_ip)}"
}
output "etcd-ip" {
value = "${join(", ", aws_instance.etcd.*.private_ip)}"
}
output "minion-ip" {
value = "${join(", ", aws_instance.minion.*.private_ip)}"
}

View File

@@ -0,0 +1,37 @@
variable "SSHUser" {
type = "string"
description = "SSH User for VMs."
}
resource "null_resource" "ansible-provision" {
depends_on = ["aws_instance.master","aws_instance.etcd","aws_instance.minion"]
##Create Master Inventory
provisioner "local-exec" {
command = "echo \"[kube-master]\" > inventory"
}
provisioner "local-exec" {
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.master.*.private_ip, var.SSHUser))}\" >> inventory"
}
##Create ETCD Inventory
provisioner "local-exec" {
command = "echo \"\n[etcd]\" >> inventory"
}
provisioner "local-exec" {
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.etcd.*.private_ip, var.SSHUser))}\" >> inventory"
}
##Create Nodes Inventory
provisioner "local-exec" {
command = "echo \"\n[kube-node]\" >> inventory"
}
provisioner "local-exec" {
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.minion.*.private_ip, var.SSHUser))}\" >> inventory"
}
provisioner "local-exec" {
command = "echo \"\n[k8s-cluster:children]\nkube-node\nkube-master\" >> inventory"
}
}

View File

@@ -2,124 +2,27 @@
**Overview:**
This project will create:
* VPC with Public and Private Subnets in # Availability Zones
* Bastion Hosts and NAT Gateways in the Public Subnet
* A dynamic number of masters, etcd, and worker nodes in the Private Subnet
* even distributed over the # of Availability Zones
* AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
- This will create nodes in a VPC inside of AWS
**Requirements**
- Terraform 0.8.7 or newer
- A dynamic number of masters, etcd, and nodes can be created
- These scripts currently expect Private IP connectivity with the nodes that are created. This means that you may need a tunnel to your VPC or to run these scripts from a VM inside the VPC. Will be looking into how to work around this later.
**How to Use:**
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
- Export the variables for your Amazon credentials:
```
export AWS_ACCESS_KEY_ID="www"
export AWS_SECRET_ACCESS_KEY ="xxx"
export AWS_SSH_KEY_NAME="yyy"
export AWS_DEFAULT_REGION="zzz"
```
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
- Allocate a new AWS Elastic IP. Use this for your `loadbalancer_apiserver_address` value (below)
- Create an AWS EC2 SSH Key
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
Example:
```commandline
terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_address=34.212.228.77'
export AWS_ACCESS_KEY_ID="xxx"
export AWS_SECRET_ACCESS_KEY="yyy"
```
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
- Update contrib/terraform/aws/terraform.tfvars with your data
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
Ansible automatically detects bastion and changes ssh_args
```commandline
ssh -F ./ssh-bastion.conf user@$ip
```
- Run with `terraform apply`
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
- Once the infrastructure is created, you can run the kubespray playbooks and supply contrib/terraform/aws/inventory with the `-i` flag.
Example (this one assumes you are using CoreOS)
```commandline
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
```
***Using other distrib than CoreOs***
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
**Future Work:**
For example, to use:
- Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with
data "aws_ami" "distro" {
most_recent = true
filter {
name = "name"
values = ["debian-jessie-amd64-hvm-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = ["379101102735"]
}
- Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with
data "aws_ami" "distro" {
most_recent = true
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = ["099720109477"]
}
- Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with
data "aws_ami" "distro" {
most_recent = true
filter {
name = "name"
values = ["dcos-centos7-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = ["688023202711"]
}
**Troubleshooting**
***Remaining AWS IAM Instance Profile***:
If the cluster was destroyed without using Terraform it is possible that
the AWS IAM Instance Profiles still remain. To delete them you can use
the `AWS CLI` with the following command:
```
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
```
***Ansible Inventory doesnt get created:***
It could happen that Terraform doesnt create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
**Architecture**
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
![AWS Infrastructure with Terraform ](docs/aws_kubespray.png)
- Update the inventory creation file to be something a little more reasonable. It's just a local-exec from Terraform now, using terraform.py or something may make sense in the future.

View File

@@ -1,191 +0,0 @@
terraform {
required_version = ">= 0.8.7"
}
provider "aws" {
access_key = "${var.AWS_ACCESS_KEY_ID}"
secret_key = "${var.AWS_SECRET_ACCESS_KEY}"
region = "${var.AWS_DEFAULT_REGION}"
}
data "aws_availability_zones" "available" {}
/*
* Calling modules who create the initial AWS VPC / AWS ELB
* and AWS IAM Roles for Kubernetes Deployment
*/
module "aws-vpc" {
source = "modules/vpc"
aws_cluster_name = "${var.aws_cluster_name}"
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
aws_cidr_subnets_private="${var.aws_cidr_subnets_private}"
aws_cidr_subnets_public="${var.aws_cidr_subnets_public}"
default_tags="${var.default_tags}"
}
module "aws-elb" {
source = "modules/elb"
aws_cluster_name="${var.aws_cluster_name}"
aws_vpc_id="${module.aws-vpc.aws_vpc_id}"
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}"
aws_elb_api_port = "${var.aws_elb_api_port}"
k8s_secure_api_port = "${var.k8s_secure_api_port}"
default_tags="${var.default_tags}"
}
module "aws-iam" {
source = "modules/iam"
aws_cluster_name="${var.aws_cluster_name}"
}
/*
* Create Bastion Instances in AWS
*
*/
resource "aws_instance" "bastion-server" {
ami = "${data.aws_ami.distro.id}"
instance_type = "${var.aws_bastion_size}"
count = "${length(var.aws_cidr_subnets_public)}"
associate_public_ip_address = true
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
key_name = "${var.AWS_SSH_KEY_NAME}"
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
"Cluster", "${var.aws_cluster_name}",
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
))}"
}
/*
* Create K8s Master and worker nodes and etcd instances
*
*/
resource "aws_instance" "k8s-master" {
ami = "${data.aws_ami.distro.id}"
instance_type = "${var.aws_kube_master_size}"
count = "${var.aws_kube_master_num}"
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
iam_instance_profile = "${module.aws-iam.kube-master-profile}"
key_name = "${var.AWS_SSH_KEY_NAME}"
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "master"
))}"
}
resource "aws_elb_attachment" "attach_master_nodes" {
count = "${var.aws_kube_master_num}"
elb = "${module.aws-elb.aws_elb_api_id}"
instance = "${element(aws_instance.k8s-master.*.id,count.index)}"
}
resource "aws_instance" "k8s-etcd" {
ami = "${data.aws_ami.distro.id}"
instance_type = "${var.aws_etcd_size}"
count = "${var.aws_etcd_num}"
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
key_name = "${var.AWS_SSH_KEY_NAME}"
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "etcd"
))}"
}
resource "aws_instance" "k8s-worker" {
ami = "${data.aws_ami.distro.id}"
instance_type = "${var.aws_kube_worker_size}"
count = "${var.aws_kube_worker_num}"
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
key_name = "${var.AWS_SSH_KEY_NAME}"
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "worker"
))}"
}
/*
* Create Kubespray Inventory File
*
*/
data "template_file" "inventory" {
template = "${file("${path.module}/templates/inventory.tpl")}"
vars {
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_host=%s" , aws_instance.bastion-server.*.public_ip))}"
connection_strings_master = "${join("\n",formatlist("%s ansible_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
connection_strings_etcd = "${join("\n",formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
}
}
resource "null_resource" "inventories" {
provisioner "local-exec" {
command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
}
triggers {
template = "${data.template_file.inventory.rendered}"
}
}

View File

@@ -1,8 +0,0 @@
#AWS Access Key
AWS_ACCESS_KEY_ID = ""
#AWS Secret Key
AWS_SECRET_ACCESS_KEY = ""
#EC2 SSH Key Name
AWS_SSH_KEY_NAME = ""
#AWS Region
AWS_DEFAULT_REGION = "eu-central-1"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 114 KiB

View File

@@ -1,58 +0,0 @@
resource "aws_security_group" "aws-elb" {
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
vpc_id = "${var.aws_vpc_id}"
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
))}"
}
resource "aws_security_group_rule" "aws-allow-api-access" {
type = "ingress"
from_port = "${var.aws_elb_api_port}"
to_port = "${var.k8s_secure_api_port}"
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = "${aws_security_group.aws-elb.id}"
}
resource "aws_security_group_rule" "aws-allow-api-egress" {
type = "egress"
from_port = 0
to_port = 65535
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = "${aws_security_group.aws-elb.id}"
}
# Create a new AWS ELB for K8S API
resource "aws_elb" "aws-elb-api" {
name = "kubernetes-elb-${var.aws_cluster_name}"
subnets = ["${var.aws_subnet_ids_public}"]
security_groups = ["${aws_security_group.aws-elb.id}"]
listener {
instance_port = "${var.k8s_secure_api_port}"
instance_protocol = "tcp"
lb_port = "${var.aws_elb_api_port}"
lb_protocol = "tcp"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "TCP:${var.k8s_secure_api_port}"
interval = 30
}
cross_zone_load_balancing = true
idle_timeout = 400
connection_draining = true
connection_draining_timeout = 400
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-elb-api"
))}"
}

View File

@@ -1,7 +0,0 @@
output "aws_elb_api_id" {
value = "${aws_elb.aws-elb-api.id}"
}
output "aws_elb_api_fqdn" {
value = "${aws_elb.aws-elb-api.dns_name}"
}

View File

@@ -1,33 +0,0 @@
variable "aws_cluster_name" {
description = "Name of Cluster"
}
variable "aws_vpc_id" {
description = "AWS VPC ID"
}
variable "aws_elb_api_port" {
description = "Port for AWS ELB"
}
variable "k8s_secure_api_port" {
description = "Secure Port of K8S API Server"
}
variable "aws_avail_zones" {
description = "Availability Zones Used"
type = "list"
}
variable "aws_subnet_ids_public" {
description = "IDs of Public Subnets"
type = "list"
}
variable "default_tags" {
description = "Tags for all resources"
type = "map"
}

View File

@@ -1,138 +0,0 @@
#Add AWS Roles for Kubernetes
resource "aws_iam_role" "kube-master" {
name = "kubernetes-${var.aws_cluster_name}-master"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
}
}
]
}
EOF
}
resource "aws_iam_role" "kube-worker" {
name = "kubernetes-${var.aws_cluster_name}-node"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
}
}
]
}
EOF
}
#Add AWS Policies for Kubernetes
resource "aws_iam_role_policy" "kube-master" {
name = "kubernetes-${var.aws_cluster_name}-master"
role = "${aws_iam_role.kube-master.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["elasticloadbalancing:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
}
]
}
EOF
}
resource "aws_iam_role_policy" "kube-worker" {
name = "kubernetes-${var.aws_cluster_name}-node"
role = "${aws_iam_role.kube-worker.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
},
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": "*"
}
]
}
EOF
}
#Create AWS Instance Profiles
resource "aws_iam_instance_profile" "kube-master" {
name = "kube_${var.aws_cluster_name}_master_profile"
role = "${aws_iam_role.kube-master.name}"
}
resource "aws_iam_instance_profile" "kube-worker" {
name = "kube_${var.aws_cluster_name}_node_profile"
role = "${aws_iam_role.kube-worker.name}"
}

View File

@@ -1,7 +0,0 @@
output "kube-master-profile" {
value = "${aws_iam_instance_profile.kube-master.name }"
}
output "kube-worker-profile" {
value = "${aws_iam_instance_profile.kube-worker.name }"
}

View File

@@ -1,3 +0,0 @@
variable "aws_cluster_name" {
description = "Name of Cluster"
}

View File

@@ -1,142 +0,0 @@
resource "aws_vpc" "cluster-vpc" {
cidr_block = "${var.aws_vpc_cidr_block}"
#DNS Related Entries
enable_dns_support = true
enable_dns_hostnames = true
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
))}"
}
resource "aws_eip" "cluster-nat-eip" {
count = "${length(var.aws_cidr_subnets_public)}"
vpc = true
}
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
vpc_id = "${aws_vpc.cluster-vpc.id}"
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-internetgw"
))}"
}
resource "aws_subnet" "cluster-vpc-subnets-public" {
vpc_id = "${aws_vpc.cluster-vpc.id}"
count="${length(var.aws_avail_zones)}"
availability_zone = "${element(var.aws_avail_zones, count.index)}"
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
))}"
}
resource "aws_nat_gateway" "cluster-nat-gateway" {
count = "${length(var.aws_cidr_subnets_public)}"
allocation_id = "${element(aws_eip.cluster-nat-eip.*.id, count.index)}"
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
}
resource "aws_subnet" "cluster-vpc-subnets-private" {
vpc_id = "${aws_vpc.cluster-vpc.id}"
count="${length(var.aws_avail_zones)}"
availability_zone = "${element(var.aws_avail_zones, count.index)}"
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
))}"
}
#Routing in VPC
#TODO: Do we need two routing tables for each subnet for redundancy or is one enough?
resource "aws_route_table" "kubernetes-public" {
vpc_id = "${aws_vpc.cluster-vpc.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
}
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
))}"
}
resource "aws_route_table" "kubernetes-private" {
count = "${length(var.aws_cidr_subnets_private)}"
vpc_id = "${aws_vpc.cluster-vpc.id}"
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
}
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
))}"
}
resource "aws_route_table_association" "kubernetes-public" {
count = "${length(var.aws_cidr_subnets_public)}"
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id,count.index)}"
route_table_id = "${aws_route_table.kubernetes-public.id}"
}
resource "aws_route_table_association" "kubernetes-private" {
count = "${length(var.aws_cidr_subnets_private)}"
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id,count.index)}"
route_table_id = "${element(aws_route_table.kubernetes-private.*.id,count.index)}"
}
#Kubernetes Security Groups
resource "aws_security_group" "kubernetes" {
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
vpc_id = "${aws_vpc.cluster-vpc.id}"
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
))}"
}
resource "aws_security_group_rule" "allow-all-ingress" {
type = "ingress"
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks= ["${var.aws_vpc_cidr_block}"]
security_group_id = "${aws_security_group.kubernetes.id}"
}
resource "aws_security_group_rule" "allow-all-egress" {
type = "egress"
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = "${aws_security_group.kubernetes.id}"
}
resource "aws_security_group_rule" "allow-ssh-connections" {
type = "ingress"
from_port = 22
to_port = 22
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = "${aws_security_group.kubernetes.id}"
}

View File

@@ -1,21 +0,0 @@
output "aws_vpc_id" {
value = "${aws_vpc.cluster-vpc.id}"
}
output "aws_subnet_ids_private" {
value = ["${aws_subnet.cluster-vpc-subnets-private.*.id}"]
}
output "aws_subnet_ids_public" {
value = ["${aws_subnet.cluster-vpc-subnets-public.*.id}"]
}
output "aws_security_group" {
value = ["${aws_security_group.kubernetes.*.id}"]
}
output "default_tags" {
value = "${var.default_tags}"
}

View File

@@ -1,29 +0,0 @@
variable "aws_vpc_cidr_block" {
description = "CIDR Blocks for AWS VPC"
}
variable "aws_cluster_name" {
description = "Name of Cluster"
}
variable "aws_avail_zones" {
description = "AWS Availability Zones Used"
type = "list"
}
variable "aws_cidr_subnets_private" {
description = "CIDR Blocks for private subnets in Availability zones"
type = "list"
}
variable "aws_cidr_subnets_public" {
description = "CIDR Blocks for public subnets in Availability zones"
type = "list"
}
variable "default_tags" {
description = "Default tags for all resources"
type = "map"
}

View File

@@ -1,28 +0,0 @@
output "bastion_ip" {
value = "${join("\n", aws_instance.bastion-server.*.public_ip)}"
}
output "masters" {
value = "${join("\n", aws_instance.k8s-master.*.private_ip)}"
}
output "workers" {
value = "${join("\n", aws_instance.k8s-worker.*.private_ip)}"
}
output "etcd" {
value = "${join("\n", aws_instance.k8s-etcd.*.private_ip)}"
}
output "aws_elb_api_fqdn" {
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
}
output "inventory" {
value = "${data.template_file.inventory.rendered}"
}
output "default_tags" {
value = "${var.default_tags}"
}

View File

@@ -1,27 +0,0 @@
[all]
${connection_strings_master}
${connection_strings_node}
${connection_strings_etcd}
${public_ip_address_bastion}
[kube-master]
${list_master}
[kube-node]
${list_node}
[etcd]
${list_etcd}
[k8s-cluster:children]
kube-node
kube-master
[k8s-cluster:vars]
${elb_api_fqdn}

View File

@@ -1,33 +1,22 @@
#Global Vars
aws_cluster_name = "devtest"
deploymentName="test-kube-deploy"
#VPC Vars
aws_vpc_cidr_block = "10.250.192.0/18"
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
numControllers="2"
numEtcd="3"
numNodes="2"
#Bastion Host
aws_bastion_size = "t2.medium"
volSizeController="20"
volSizeEtcd="20"
volSizeNodes="20"
awsRegion="us-west-2"
subnet="subnet-xxxxx"
ami="ami-32a85152"
securityGroups="sg-xxxxx"
SSHUser="core"
SSHKey="my-key"
#Kubernetes Cluster
master_instance_type="m3.xlarge"
etcd_instance_type="m3.xlarge"
node_instance_type="m3.xlarge"
aws_kube_master_num = 3
aws_kube_master_size = "t2.medium"
aws_etcd_num = 3
aws_etcd_size = "t2.medium"
aws_kube_worker_num = 4
aws_kube_worker_size = "t2.medium"
#Settings AWS ELB
aws_elb_api_port = 6443
k8s_secure_api_port = 6443
kube_insecure_apiserver_address = "0.0.0.0"
default_tags = {
# Env = "devtest"
# Product = "kubernetes"
}
terminate_protect="false"

View File

@@ -1,32 +0,0 @@
#Global Vars
aws_cluster_name = "devtest"
#VPC Vars
aws_vpc_cidr_block = "10.250.192.0/18"
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
aws_avail_zones = ["eu-central-1a","eu-central-1b"]
#Bastion Host
aws_bastion_ami = "ami-5900cc36"
aws_bastion_size = "t2.small"
#Kubernetes Cluster
aws_kube_master_num = 3
aws_kube_master_size = "t2.medium"
aws_etcd_num = 3
aws_etcd_size = "t2.medium"
aws_kube_worker_num = 4
aws_kube_worker_size = "t2.medium"
aws_cluster_ami = "ami-903df7ff"
#Settings AWS ELB
aws_elb_api_port = 6443
k8s_secure_api_port = 6443
kube_insecure_apiserver_address = 0.0.0.0

View File

@@ -1,105 +0,0 @@
variable "AWS_ACCESS_KEY_ID" {
description = "AWS Access Key"
}
variable "AWS_SECRET_ACCESS_KEY" {
description = "AWS Secret Key"
}
variable "AWS_SSH_KEY_NAME" {
description = "Name of the SSH keypair to use in AWS."
}
variable "AWS_DEFAULT_REGION" {
description = "AWS Region"
}
//General Cluster Settings
variable "aws_cluster_name" {
description = "Name of AWS Cluster"
}
data "aws_ami" "distro" {
most_recent = true
filter {
name = "name"
values = ["CoreOS-stable-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = ["595879546273"] #CoreOS
}
//AWS VPC Variables
variable "aws_vpc_cidr_block" {
description = "CIDR Block for VPC"
}
variable "aws_cidr_subnets_private" {
description = "CIDR Blocks for private subnets in Availability Zones"
type = "list"
}
variable "aws_cidr_subnets_public" {
description = "CIDR Blocks for public subnets in Availability Zones"
type = "list"
}
//AWS EC2 Settings
variable "aws_bastion_size" {
description = "EC2 Instance Size of Bastion Host"
}
/*
* AWS EC2 Settings
* The number should be divisable by the number of used
* AWS Availability Zones without an remainder.
*/
variable "aws_kube_master_num" {
description = "Number of Kubernetes Master Nodes"
}
variable "aws_kube_master_size" {
description = "Instance size of Kube Master Nodes"
}
variable "aws_etcd_num" {
description = "Number of etcd Nodes"
}
variable "aws_etcd_size" {
description = "Instance size of etcd Nodes"
}
variable "aws_kube_worker_num" {
description = "Number of Kubernetes Worker Nodes"
}
variable "aws_kube_worker_size" {
description = "Instance size of Kubernetes Worker Nodes"
}
/*
* AWS ELB Settings
*
*/
variable "aws_elb_api_port" {
description = "Port for AWS ELB"
}
variable "k8s_secure_api_port" {
description = "Secure Port of K8S API Server"
}
variable "default_tags" {
description = "Default tags for all resources"
type = "map"
}

View File

@@ -1 +0,0 @@
../../inventory/group_vars

View File

@@ -1,4 +0,0 @@
.terraform
*.tfvars
*.tfstate
*.tfstate.backup

View File

@@ -5,287 +5,53 @@ Openstack.
## Status
This will install a Kubernetes cluster on an Openstack Cloud. It should work on
most modern installs of OpenStack that support the basic services.
This will install a Kubernetes cluster on an Openstack Cloud. It has been tested on a
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and on OpenStack at [EMBL-EBI's](http://www.ebi.ac.uk/) [EMBASSY Cloud](http://www.embassycloud.org/). This should work on most modern installs of OpenStack that support the basic
services.
## Approach
The terraform configuration inspects variables found in
[variables.tf](variables.tf) to create resources in your OpenStack cluster.
There is a [python script](../terraform.py) that reads the generated`.tfstate`
file to generate a dynamic inventory that is consumed by the main ansible script
to actually install kubernetes and stand up the cluster.
There are some assumptions made to try and ensure it will work on your openstack cluster.
### Networking
The configuration includes creating a private subnet with a router to the
external net. It will allocate floating IPs from a pool and assign them to the
hosts where that makes sense. You have the option of creating bastion hosts
inside the private subnet to access the nodes there. Alternatively, a node with
a floating IP can be used as a jump host to nodes without.
* floating-ips are used for access, but you can have masters and nodes that don't use floating-ips if needed. You need currently at least 1 floating ip, which we would suggest is used on a master.
* you already have a suitable OS image in glance
* you already have both an internal network and a floating-ip pool created
* you have security-groups enabled
### Kubernetes Nodes
You can create many different kubernetes topologies by setting the number of
different classes of hosts. For each class there are options for allocating
floating IP addresses or not.
- Master nodes with etcd
- Master nodes without etcd
- Standalone etcd hosts
- Kubernetes worker nodes
Note that the Ansible script will report an invalid configuration if you wind up
with an even number of etcd instances since that is not a valid configuration.
### GlusterFS
The Terraform configuration supports provisioning of an optional GlusterFS
shared file system based on a separate set of VMs. To enable this, you need to
specify:
- the number of Gluster hosts (minimum 2)
- Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks
- Other properties related to provisioning the hosts
Even if you are using Container Linux by CoreOS for your cluster, you will still
need the GlusterFS VMs to be based on either Debian or RedHat based images.
Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through
binaries available on hyperkube v1.4.3_coreos.0 or higher.
## Requirements
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
- you already have a suitable OS image in Glance
- you already have a floating IP pool created
- you have security groups enabled
- you have a pair of keys generated that can be used to secure the new hosts
## Module Architecture
The configuration is divided into three modules:
- Network
- IPs
- Compute
The main reason for splitting the configuration up in this way is to easily
accommodate situations where floating IPs are limited by a quota or if you have
any external references to the floating IP (e.g. DNS) that would otherwise have
to be updated.
You can force your existing IPs by modifying the compute variables in
`kubespray.tf` as follows:
```
k8s_master_fips = ["151.101.129.67"]
k8s_node_fips = ["151.101.129.68"]
```
## Terraform
Terraform will be used to provision all of the OpenStack resources with base software as appropriate.
### Configuration
Terraform will be used to provision all of the OpenStack resources. It is also used to deploy and provision the software
requirements.
#### Inventory files
### Prep
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
#### OpenStack
```ShellSession
$ cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER
$ cd inventory/$CLUSTER
$ ln -s ../../contrib/terraform/openstack/hosts
```
This will be the base for subsequent Terraform commands.
#### OpenStack access and credentials
No provider variables are hardcoded inside `variables.tf` because Terraform
supports various authentication methods for OpenStack: the older script and
environment method (using `openrc`) as well as a newer declarative method, and
different OpenStack environments may support Identity API version 2 or 3.
These are examples and may vary depending on your OpenStack cloud provider,
for an exhaustive list on how to authenticate on OpenStack with Terraform
please read the [OpenStack provider documentation](https://www.terraform.io/docs/providers/openstack/).
##### Declarative method (recommended)
The recommended authentication method is to describe credentials in a YAML file `clouds.yaml` that can be stored in:
* the current directory
* `~/.config/openstack`
* `/etc/openstack`
`clouds.yaml`:
Ensure your OpenStack **Identity v2** credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
```
clouds:
mycloud:
auth:
auth_url: https://openstack:5000/v3
username: "username"
project_name: "projectname"
project_id: projectid
user_domain_name: "Default"
password: "password"
region_name: "RegionOne"
interface: "public"
identity_api_version: 3
$ source ~/.stackrc
```
If you have multiple clouds defined in your `clouds.yaml` file you can choose
the one you want to use with the environment variable `OS_CLOUD`:
You will need two networks before installing, an internal network and
an external (floating IP Pool) network. The internet network can be shared as
we use security groups to provide network segregation. Due to the many
differences between OpenStack installs the Terraform does not attempt to create
these for you.
```
export OS_CLOUD=mycloud
```
By default Terraform will expect that your networks are called `internal` and
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`. This can be done on a new variables file or through environment variables.
##### Openrc method (deprecated)
A full list of variables you can change can be found at [variables.tf](variables.tf).
When using classic environment variables, Terraform uses default `OS_*`
environment variables. A script suitable for your environment may be available
from Horizon under *Project* -> *Compute* -> *Access & Security* -> *API Access*.
All OpenStack resources will use the Terraform variable `cluster_name` (
default `example`) in their name to make it easier to track. For example the
first compute resource will be named `example-kubernetes-1`.
With identity v2:
```
source openrc
env | grep OS
OS_AUTH_URL=https://openstack:5000/v2.0
OS_PROJECT_ID=projectid
OS_PROJECT_NAME=projectname
OS_USERNAME=username
OS_PASSWORD=password
OS_REGION_NAME=RegionOne
OS_INTERFACE=public
OS_IDENTITY_API_VERSION=2
```
With identity v3:
```
source openrc
env | grep OS
OS_AUTH_URL=https://openstack:5000/v3
OS_PROJECT_ID=projectid
OS_PROJECT_NAME=username
OS_PROJECT_DOMAIN_ID=default
OS_USERNAME=username
OS_PASSWORD=password
OS_REGION_NAME=RegionOne
OS_INTERFACE=public
OS_IDENTITY_API_VERSION=3
OS_USER_DOMAIN_NAME=Default
```
Terraform does not support a mix of DomainName and DomainID, choose one or the
other:
```
* provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username
```
```
unset OS_USER_DOMAIN_NAME
export OS_USER_DOMAIN_ID=default
or
unset OS_PROJECT_DOMAIN_ID
set OS_PROJECT_DOMAIN_NAME=Default
```
#### Cluster variables
The construction of the cluster is driven by values found in
[variables.tf](variables.tf).
For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|Variable | Description |
|---------|-------------|
|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. |
|`network_name` | The name to be given to the internal network that will be generated |
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|`external_net` | UUID of the external network that will be routed to |
|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through`nova flavor-list` |
|`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|`ssh_user`,`ssh_user_gfs` | The username to ssh into the image with. This usually depends on the image you have selected |
|`public_key_path` | Path on your local workstation to the public key file you wish to use in creating the key pairs |
|`number_of_k8s_masters`, `number_of_k8s_masters_no_floating_ip` | Number of nodes that serve as both master and etcd. These can be provisioned with or without floating IP addresses|
|`number_of_k8s_masters_no_etcd`, `number_of_k8s_masters_no_floating_ip_no_etcd` | Number of nodes that serve as just master with no etcd. These can be provisioned with or without floating IP addresses |
|`number_of_etcd` | Number of pure etcd nodes |
|`number_of_k8s_nodes`, `number_of_k8s_nodes_no_floating_ip` | Kubernetes worker nodes. These can be provisioned with or without floating ip addresses. |
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
#### Terraform state files
In the cluster's inventory folder, the following files might be created (either by Terraform
or manually), to prevent you from pushing them accidentally they are in a
`.gitignore` file in the `terraform/openstack` directory :
* `.terraform`
* `.tfvars`
* `.tfstate`
* `.tfstate.backup`
You can still add them manually if you want to.
### Initialization
Before Terraform can operate on your cluster you need to install the required
plugins. This is accomplished as follows:
```ShellSession
$ cd inventory/$CLUSTER
$ terraform init ../../contrib/terraform/openstack
```
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
### Provisioning cluster
You can apply the Terraform configuration to your cluster with the following command
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
```ShellSession
$ terraform apply -var-file=cluster.tf ../../contrib/terraform/openstack
```
if you chose to create a bastion host, this script will create
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to
be able to access your machines tunneling through the bastion's IP address. If
you want to manually handle the ssh tunneling to these machines, please delete
or move that file. If you want to use this, just leave it there, as ansible will
pick it up automatically.
### Destroying cluster
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
```ShellSession
$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/openstack
```
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
* remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
* clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
### Debugging
You can enable debugging output from Terraform by setting
`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before running the Terraform command.
### Terraform output
Terraform can output values that are useful for configure Neutron/Octavia LBaaS or Cinder persistent volume provisioning as part of your Kubernetes deployment:
- `private_subnet_id`: the subnet where your instances are running is used for `openstack_lbaas_subnet_id`
- `floating_network_id`: the network_id where the floating IP are provisioned is used for `openstack_lbaas_floating_network_id`
## Ansible
### Node access
#### SSH
#### Terraform
Ensure your local ssh-agent is running and your ssh key has been added. This
step is required by the terraform provisioner:
@@ -295,22 +61,77 @@ $ eval $(ssh-agent -s)
$ ssh-add ~/.ssh/id_rsa
```
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
#### Bastion host
If you are not using a bastion host, but not all of your nodes have floating IPs, create a file `inventory/$CLUSTER/group_vars/no-floating.yml` with the following content. Use one of your nodes with a floating IP (this should have been output at the end of the Terraform step) and the appropriate user for that OS, or if you have another jump host, use that.
Ensure that you have your Openstack credentials loaded into Terraform
environment variables. Likely via a command similar to:
```
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@MASTER_IP"'
$ echo Setting up Terraform creds && \
export TF_VAR_username=${OS_USERNAME} && \
export TF_VAR_password=${OS_PASSWORD} && \
export TF_VAR_tenant=${OS_TENANT_NAME} && \
export TF_VAR_auth_url=${OS_AUTH_URL}
```
#### Test access
Make sure you can connect to the hosts. Note that Container Linux by CoreOS will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
If you want to provision master or node VMs that don't use floating ips, write on a `my-terraform-vars.tfvars` file, for example:
```
$ ansible -i inventory/$CLUSTER/hosts -m ping all
number_of_k8s_masters = "1"
number_of_k8s_masters_no_floating_ip = "2"
number_of_k8s_nodes_no_floating_ip = "1"
number_of_k8s_nodes = "0"
```
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy) and one VM as node, again without a floating ip.
Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables:
```
# Flavour depends on your openstack installation, you can get available flavours through `nova list-flavors`
flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da"
# This is the name of an image already available in your openstack installation.
image_gfs = "Ubuntu 15.10"
number_of_gfs_nodes_no_floating_ip = "3"
# This is the size of the non-ephemeral volumes to be attached to store the GlusterFS bricks.
gfs_volume_size_in_gb = "50"
# The user needed for the image choosen for GlusterFS.
ssh_user_gfs = "ubuntu"
```
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
# Provision a Kubernetes Cluster on OpenStack
If not using a tfvars file for your setup, then execute:
```
terraform apply -state=contrib/terraform/openstack/terraform.tfstate contrib/terraform/openstack
openstack_compute_secgroup_v2.k8s_master: Creating...
description: "" => "example - Kubernetes Master"
name: "" => "example-k8s-master"
rule.#: "" => "<computed>"
...
...
Apply complete! Resources: 9 added, 0 changed, 0 destroyed.
The state of your infrastructure has been saved to the path
below. This state is required to modify and destroy your
infrastructure, so keep it safe. To inspect the complete state
use the `terraform show` command.
State path: contrib/terraform/openstack/terraform.tfstate
```
Alternatively, if you wrote your terraform variables on a file `my-terraform-vars.tfvars`, your command would look like:
```
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
```
if you choose to add masters or nodes without floating ips (only internal ips on your OpenStack tenancy), this script will create as well a file `contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for ansible to be able to access your machines tunneling through the first floating ip used. If you want to manually handling the ssh tunneling to these machines, please delete or move that file. If you want to use this, just leave it there, as ansible will pick it up automatically.
Make sure you can connect to the hosts:
```
$ ansible -i contrib/terraform/openstack/hosts -m ping all
example-k8s_node-1 | SUCCESS => {
"changed": false,
"ping": "pong"
@@ -325,113 +146,26 @@ example-k8s-master-1 | SUCCESS => {
}
```
If it fails try to connect manually via SSH. It could be something as simple as a stale host key.
if you are deploying a system that needs bootstrapping, like Container Linux by CoreOS, these might have a state `FAILED` due to Container Linux by CoreOS not having python. As long as the state is not `UNREACHABLE`, this is fine.
### Configure cluster variables
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
Edit `inventory/$CLUSTER/group_vars/all.yml`:
- Set variable **bootstrap_os** appropriately for your desired image:
```
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: coreos
```
- **bin_dir**:
```
# Directory where the binaries will be installed
# Default:
# bin_dir: /usr/local/bin
# For Container Linux by CoreOS:
bin_dir: /opt/bin
```
- and **cloud_provider**:
```
cloud_provider: openstack
```
Edit `inventory/$CLUSTER/group_vars/k8s-cluster.yml`:
- Set variable **kube_network_plugin** to your desired networking plugin.
- **flannel** works out-of-the-box
- **calico** requires [configuring OpenStack Neutron ports](/docs/openstack.md) to allow service and pod subnets
```
# Choose network plugin (calico, weave or flannel)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: flannel
```
- Set variable **resolvconf_mode**
```
# Can be docker_dns, host_resolvconf or none
# Default:
# resolvconf_mode: docker_dns
# For Container Linux by CoreOS:
resolvconf_mode: host_resolvconf
```
### Deploy Kubernetes
Deploy kubernetes:
```
$ ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
```
This will take some time as there are many tasks to run.
# clean up:
## Kubernetes
```
$ terraform destroy
Do you really want to destroy?
Terraform will delete all your managed infrastructure.
There is no undo. Only 'yes' will be accepted to confirm.
### Set up kubectl
1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your workstation
2. Add a route to the internal IP of a master node (if needed):
Enter a value: yes
...
...
Apply complete! Resources: 0 added, 0 changed, 12 destroyed.
```
sudo route add [master-internal-ip] gw [router-ip]
```
or
```
sudo route add -net [internal-subnet]/24 gw [router-ip]
```
3. List Kubernetes certificates & keys:
```
ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
```
4. Get `admin`'s certificates and keys:
```
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
```
5. Configure kubectl:
```ShellSession
$ kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
--certificate-authority=ca.pem
$ kubectl config set-credentials default-admin \
--certificate-authority=ca.pem \
--client-key=admin-key.pem \
--client-certificate=admin.pem
$ kubectl config set-context default-system --cluster=default-cluster --user=default-admin
$ kubectl config use-context default-system
```
7. Check it:
```
kubectl version
```
If you are using floating ip addresses then you may get this error:
```
Unable to connect to the server: x509: certificate is valid for 10.0.0.6, 10.0.0.6, 10.233.0.1, 127.0.0.1, not 132.249.238.25
```
You can tell kubectl to ignore this condition by adding the
`--insecure-skip-tls-verify` option.
## GlusterFS
GlusterFS is not deployed by the standard`cluster.yml` playbook, see the
[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md)
for instructions.
Basically you will install Gluster as
```ShellSession
$ ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
```
## What's next
Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/).

View File

@@ -1 +1 @@
ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'"
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"'

View File

@@ -0,0 +1 @@
../../../../inventory/group_vars/all.yml

View File

@@ -1,77 +1,167 @@
module "network" {
source = "modules/network"
external_net = "${var.external_net}"
network_name = "${var.network_name}"
cluster_name = "${var.cluster_name}"
dns_nameservers = "${var.dns_nameservers}"
resource "openstack_networking_floatingip_v2" "k8s_master" {
count = "${var.number_of_k8s_masters}"
pool = "${var.floatingip_pool}"
}
module "ips" {
source = "modules/ips"
number_of_k8s_masters = "${var.number_of_k8s_masters}"
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
number_of_k8s_nodes = "${var.number_of_k8s_nodes}"
floatingip_pool = "${var.floatingip_pool}"
number_of_bastions = "${var.number_of_bastions}"
external_net = "${var.external_net}"
network_name = "${var.network_name}"
router_id = "${module.network.router_id}"
resource "openstack_networking_floatingip_v2" "k8s_node" {
count = "${var.number_of_k8s_nodes}"
pool = "${var.floatingip_pool}"
}
module "compute" {
source = "modules/compute"
cluster_name = "${var.cluster_name}"
number_of_k8s_masters = "${var.number_of_k8s_masters}"
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
number_of_etcd = "${var.number_of_etcd}"
number_of_k8s_masters_no_floating_ip = "${var.number_of_k8s_masters_no_floating_ip}"
number_of_k8s_masters_no_floating_ip_no_etcd = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
number_of_k8s_nodes = "${var.number_of_k8s_nodes}"
number_of_bastions = "${var.number_of_bastions}"
number_of_k8s_nodes_no_floating_ip = "${var.number_of_k8s_nodes_no_floating_ip}"
number_of_gfs_nodes_no_floating_ip = "${var.number_of_gfs_nodes_no_floating_ip}"
gfs_volume_size_in_gb = "${var.gfs_volume_size_in_gb}"
public_key_path = "${var.public_key_path}"
image = "${var.image}"
image_gfs = "${var.image_gfs}"
ssh_user = "${var.ssh_user}"
ssh_user_gfs = "${var.ssh_user_gfs}"
flavor_k8s_master = "${var.flavor_k8s_master}"
flavor_k8s_node = "${var.flavor_k8s_node}"
flavor_etcd = "${var.flavor_etcd}"
flavor_gfs_node = "${var.flavor_gfs_node}"
network_name = "${var.network_name}"
flavor_bastion = "${var.flavor_bastion}"
k8s_master_fips = "${module.ips.k8s_master_fips}"
k8s_node_fips = "${module.ips.k8s_node_fips}"
bastion_fips = "${module.ips.bastion_fips}"
network_id = "${module.network.router_id}"
resource "openstack_compute_keypair_v2" "k8s" {
name = "kubernetes-${var.cluster_name}"
public_key = "${file(var.public_key_path)}"
}
output "private_subnet_id" {
value = "${module.network.subnet_id}"
resource "openstack_compute_secgroup_v2" "k8s_master" {
name = "${var.cluster_name}-k8s-master"
description = "${var.cluster_name} - Kubernetes Master"
}
output "floating_network_id" {
value = "${var.external_net}"
resource "openstack_compute_secgroup_v2" "k8s" {
name = "${var.cluster_name}-k8s"
description = "${var.cluster_name} - Kubernetes"
rule {
ip_protocol = "tcp"
from_port = "22"
to_port = "22"
cidr = "0.0.0.0/0"
}
rule {
ip_protocol = "icmp"
from_port = "-1"
to_port = "-1"
cidr = "0.0.0.0/0"
}
rule {
ip_protocol = "tcp"
from_port = "1"
to_port = "65535"
self = true
}
rule {
ip_protocol = "udp"
from_port = "1"
to_port = "65535"
self = true
}
rule {
ip_protocol = "icmp"
from_port = "-1"
to_port = "-1"
self = true
}
}
output "router_id" {
value = "${module.network.router_id}"
resource "openstack_compute_instance_v2" "k8s_master" {
name = "${var.cluster_name}-k8s-master-${count.index+1}"
count = "${var.number_of_k8s_masters}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
"${openstack_compute_secgroup_v2.k8s.name}" ]
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index)}"
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
}
}
output "k8s_master_fips" {
value = "${module.ips.k8s_master_fips}"
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
count = "${var.number_of_k8s_masters_no_floating_ip}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
"${openstack_compute_secgroup_v2.k8s.name}" ]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml"
}
}
output "k8s_node_fips" {
value = "${module.ips.k8s_node_fips}"
resource "openstack_compute_instance_v2" "k8s_node" {
name = "${var.cluster_name}-k8s-node-${count.index+1}"
count = "${var.number_of_k8s_nodes}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_node.*.address, count.index)}"
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster"
}
}
output "bastion_fips" {
value = "${module.ips.bastion_fips}"
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
count = "${var.number_of_k8s_nodes_no_floating_ip}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster"
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml"
}
}
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
name = "${var.cluster_name}-gfs-nephe-vol-${count.index+1}"
count = "${var.number_of_gfs_nodes_no_floating_ip}"
description = "Non-ephemeral volume for GlusterFS"
size = "${var.gfs_volume_size_in_gb}"
}
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
count = "${var.number_of_gfs_nodes_no_floating_ip}"
image_name = "${var.image_gfs}"
flavor_id = "${var.flavor_gfs_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
metadata = {
ssh_user = "${var.ssh_user_gfs}"
kubespray_groups = "gfs-cluster,network-storage"
}
volume {
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/gfs-cluster.yml"
}
}
#output "msg" {
# value = "Your hosts are ready to go!\nYour ssh hosts are: ${join(", ", openstack_networking_floatingip_v2.k8s_master.*.address )}"
#}

View File

@@ -1,306 +0,0 @@
resource "openstack_compute_keypair_v2" "k8s" {
name = "kubernetes-${var.cluster_name}"
public_key = "${chomp(file(var.public_key_path))}"
}
resource "openstack_compute_secgroup_v2" "k8s_master" {
name = "${var.cluster_name}-k8s-master"
description = "${var.cluster_name} - Kubernetes Master"
rule {
ip_protocol = "tcp"
from_port = "6443"
to_port = "6443"
cidr = "0.0.0.0/0"
}
}
resource "openstack_compute_secgroup_v2" "bastion" {
name = "${var.cluster_name}-bastion"
description = "${var.cluster_name} - Bastion Server"
rule {
ip_protocol = "tcp"
from_port = "22"
to_port = "22"
cidr = "0.0.0.0/0"
}
}
resource "openstack_compute_secgroup_v2" "k8s" {
name = "${var.cluster_name}-k8s"
description = "${var.cluster_name} - Kubernetes"
rule {
ip_protocol = "icmp"
from_port = "-1"
to_port = "-1"
cidr = "0.0.0.0/0"
}
rule {
ip_protocol = "tcp"
from_port = "1"
to_port = "65535"
self = true
}
rule {
ip_protocol = "udp"
from_port = "1"
to_port = "65535"
self = true
}
rule {
ip_protocol = "icmp"
from_port = "-1"
to_port = "-1"
self = true
}
}
resource "openstack_compute_instance_v2" "bastion" {
name = "${var.cluster_name}-bastion-${count.index+1}"
count = "${var.number_of_bastions}"
image_name = "${var.image}"
flavor_id = "${var.flavor_bastion}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
"${openstack_compute_secgroup_v2.bastion.name}",
"default",
]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "bastion"
depends_on = "${var.network_id}"
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
}
}
resource "openstack_compute_instance_v2" "k8s_master" {
name = "${var.cluster_name}-k8s-master-${count.index+1}"
count = "${var.number_of_k8s_masters}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
"${openstack_compute_secgroup_v2.bastion.name}",
"${openstack_compute_secgroup_v2.k8s.name}",
"default",
]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,k8s-cluster,vault"
depends_on = "${var.network_id}"
}
}
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
count = "${var.number_of_k8s_masters_no_etcd}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
"${openstack_compute_secgroup_v2.k8s.name}",
]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,k8s-cluster,vault"
depends_on = "${var.network_id}"
}
}
resource "openstack_compute_instance_v2" "etcd" {
name = "${var.cluster_name}-etcd-${count.index+1}"
count = "${var.number_of_etcd}"
image_name = "${var.image}"
flavor_id = "${var.flavor_etcd}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}"]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,vault,no-floating"
depends_on = "${var.network_id}"
}
}
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
count = "${var.number_of_k8s_masters_no_floating_ip}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
"${openstack_compute_secgroup_v2.k8s.name}",
"default",
]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,k8s-cluster,vault,no-floating"
depends_on = "${var.network_id}"
}
}
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
"${openstack_compute_secgroup_v2.k8s.name}",
]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,k8s-cluster,vault,no-floating"
depends_on = "${var.network_id}"
}
}
resource "openstack_compute_instance_v2" "k8s_node" {
name = "${var.cluster_name}-k8s-node-${count.index+1}"
count = "${var.number_of_k8s_nodes}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
"${openstack_compute_secgroup_v2.bastion.name}",
"default",
]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster"
depends_on = "${var.network_id}"
}
}
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
count = "${var.number_of_k8s_nodes_no_floating_ip}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
"default",
]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster,no-floating"
depends_on = "${var.network_id}"
}
}
resource "openstack_compute_floatingip_associate_v2" "bastion" {
count = "${var.number_of_bastions}"
floating_ip = "${var.bastion_fips[count.index]}"
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
}
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
count = "${var.number_of_k8s_masters}"
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
floating_ip = "${var.k8s_master_fips[count.index]}"
}
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
count = "${var.number_of_k8s_nodes}"
floating_ip = "${var.k8s_node_fips[count.index]}"
instance_id = "${element(openstack_compute_instance_v2.k8s_node.*.id, count.index)}"
}
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
name = "${var.cluster_name}-glusterfs_volume-${count.index+1}"
count = "${var.number_of_gfs_nodes_no_floating_ip}"
description = "Non-ephemeral volume for GlusterFS"
size = "${var.gfs_volume_size_in_gb}"
}
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
count = "${var.number_of_gfs_nodes_no_floating_ip}"
image_name = "${var.image_gfs}"
flavor_id = "${var.flavor_gfs_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
"default",
]
metadata = {
ssh_user = "${var.ssh_user_gfs}"
kubespray_groups = "gfs-cluster,network-storage,no-floating"
depends_on = "${var.network_id}"
}
}
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
count = "${var.number_of_gfs_nodes_no_floating_ip}"
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)}"
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
}

View File

@@ -1,57 +0,0 @@
variable "cluster_name" {}
variable "number_of_k8s_masters" {}
variable "number_of_k8s_masters_no_etcd" {}
variable "number_of_etcd" {}
variable "number_of_k8s_masters_no_floating_ip" {}
variable "number_of_k8s_masters_no_floating_ip_no_etcd" {}
variable "number_of_k8s_nodes" {}
variable "number_of_k8s_nodes_no_floating_ip" {}
variable "number_of_bastions" {}
variable "number_of_gfs_nodes_no_floating_ip" {}
variable "gfs_volume_size_in_gb" {}
variable "public_key_path" {}
variable "image" {}
variable "image_gfs" {}
variable "ssh_user" {}
variable "ssh_user_gfs" {}
variable "flavor_k8s_master" {}
variable "flavor_k8s_node" {}
variable "flavor_etcd" {}
variable "flavor_gfs_node" {}
variable "network_name" {}
variable "flavor_bastion" {}
variable "network_id" {}
variable "k8s_master_fips" {
type = "list"
}
variable "k8s_node_fips" {
type = "list"
}
variable "bastion_fips" {
type = "list"
}

View File

@@ -1,23 +0,0 @@
resource "null_resource" "dummy_dependency" {
triggers {
dependency_id = "${var.router_id}"
}
}
resource "openstack_networking_floatingip_v2" "k8s_master" {
count = "${var.number_of_k8s_masters}"
pool = "${var.floatingip_pool}"
depends_on = ["null_resource.dummy_dependency"]
}
resource "openstack_networking_floatingip_v2" "k8s_node" {
count = "${var.number_of_k8s_nodes}"
pool = "${var.floatingip_pool}"
depends_on = ["null_resource.dummy_dependency"]
}
resource "openstack_networking_floatingip_v2" "bastion" {
count = "${var.number_of_bastions}"
pool = "${var.floatingip_pool}"
depends_on = ["null_resource.dummy_dependency"]
}

View File

@@ -1,11 +0,0 @@
output "k8s_master_fips" {
value = ["${openstack_networking_floatingip_v2.k8s_master.*.address}"]
}
output "k8s_node_fips" {
value = ["${openstack_networking_floatingip_v2.k8s_node.*.address}"]
}
output "bastion_fips" {
value = ["${openstack_networking_floatingip_v2.bastion.*.address}"]
}

View File

@@ -1,15 +0,0 @@
variable "number_of_k8s_masters" {}
variable "number_of_k8s_masters_no_etcd" {}
variable "number_of_k8s_nodes" {}
variable "floatingip_pool" {}
variable "number_of_bastions" {}
variable "external_net" {}
variable "network_name" {}
variable "router_id" {}

View File

@@ -1,23 +0,0 @@
resource "openstack_networking_router_v2" "k8s" {
name = "${var.cluster_name}-router"
admin_state_up = "true"
external_network_id = "${var.external_net}"
}
resource "openstack_networking_network_v2" "k8s" {
name = "${var.network_name}"
admin_state_up = "true"
}
resource "openstack_networking_subnet_v2" "k8s" {
name = "${var.cluster_name}-internal-network"
network_id = "${openstack_networking_network_v2.k8s.id}"
cidr = "10.0.0.0/24"
ip_version = 4
dns_nameservers = "${var.dns_nameservers}"
}
resource "openstack_networking_router_interface_v2" "k8s" {
router_id = "${openstack_networking_router_v2.k8s.id}"
subnet_id = "${openstack_networking_subnet_v2.k8s.id}"
}

View File

@@ -1,7 +0,0 @@
output "router_id" {
value = "${openstack_networking_router_interface_v2.k8s.id}"
}
output "subnet_id" {
value = "${openstack_networking_subnet_v2.k8s.id}"
}

View File

@@ -1,9 +0,0 @@
variable "external_net" {}
variable "network_name" {}
variable "cluster_name" {}
variable "dns_nameservers" {
type = "list"
}

View File

@@ -1,45 +0,0 @@
# your Kubernetes cluster name here
cluster_name = "i-didnt-read-the-docs"
# SSH key to use for access to nodes
public_key_path = "~/.ssh/id_rsa.pub"
# image to use for bastion, masters, standalone etcd instances, and nodes
image = "<image name>"
# user on the node (ex. core on Container Linux, ubuntu on Ubuntu, etc.)
ssh_user = "<cloud-provisioned user>"
# 0|1 bastion nodes
number_of_bastions = 0
#flavor_bastion = "<UUID>"
# standalone etcds
number_of_etcd = 0
# masters
number_of_k8s_masters = 1
number_of_k8s_masters_no_etcd = 0
number_of_k8s_masters_no_floating_ip = 0
number_of_k8s_masters_no_floating_ip_no_etcd = 0
flavor_k8s_master = "<UUID>"
# nodes
number_of_k8s_nodes = 2
number_of_k8s_nodes_no_floating_ip = 4
#flavor_k8s_node = "<UUID>"
# GlusterFS
# either 0 or more than one
#number_of_gfs_nodes_no_floating_ip = 0
#gfs_volume_size_in_gb = 150
# Container Linux does not support GlusterFS
#image_gfs = "<image name>"
# May be different from other nodes
#ssh_user_gfs = "ubuntu"
#flavor_gfs_node = "<UUID>"
# networking
network_name = "<network>"
external_net = "<UUID>"
floatingip_pool = "<pool>"

View File

@@ -1 +0,0 @@
../../../../inventory/sample/group_vars

View File

@@ -2,30 +2,14 @@ variable "cluster_name" {
default = "example"
}
variable "number_of_bastions" {
default = 1
}
variable "number_of_k8s_masters" {
default = 2
}
variable "number_of_k8s_masters_no_etcd" {
default = 2
}
variable "number_of_etcd" {
default = 2
}
variable "number_of_k8s_masters_no_floating_ip" {
default = 2
}
variable "number_of_k8s_masters_no_floating_ip_no_etcd" {
default = 2
}
variable "number_of_k8s_nodes" {
default = 1
}
@@ -44,70 +28,63 @@ variable "gfs_volume_size_in_gb" {
variable "public_key_path" {
description = "The path of the ssh pub key"
default = "~/.ssh/id_rsa.pub"
default = "~/.ssh/id_rsa.pub"
}
variable "image" {
description = "the image to use"
default = "ubuntu-14.04"
default = "ubuntu-14.04"
}
variable "image_gfs" {
description = "Glance image to use for GlusterFS"
default = "ubuntu-16.04"
default = "ubuntu-16.04"
}
variable "ssh_user" {
description = "used to fill out tags for ansible inventory"
default = "ubuntu"
default = "ubuntu"
}
variable "ssh_user_gfs" {
description = "used to fill out tags for ansible inventory"
default = "ubuntu"
}
variable "flavor_bastion" {
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
default = 3
default = "ubuntu"
}
variable "flavor_k8s_master" {
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
default = 3
default = 3
}
variable "flavor_k8s_node" {
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
default = 3
}
variable "flavor_etcd" {
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
default = 3
default = 3
}
variable "flavor_gfs_node" {
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
default = 3
default = 3
}
variable "network_name" {
description = "name of the internal network to use"
default = "internal"
}
variable "dns_nameservers" {
description = "An array of DNS name server names used by hosts in this subnet."
type = "list"
default = []
default = "internal"
}
variable "floatingip_pool" {
description = "name of the floating ip pool to use"
default = "external"
default = "external"
}
variable "external_net" {
description = "uuid of the external/public network"
variable "username" {
description = "Your openstack username"
}
variable "password" {
description = "Your openstack password"
}
variable "tenant" {
description = "Your openstack tenant/project"
}
variable "auth_url" {
description = "Your openstack auth URL"
}

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python
#
# Copyright 2015 Cisco Systems, Inc.
#
@@ -70,14 +70,6 @@ def iterhosts(resources):
yield parser(resource, module_name)
def iterips(resources):
'''yield ip tuples of (instance_id, ip)'''
for module_name, key, resource in resources:
resource_type, name = key.split('.', 1)
if resource_type == 'openstack_compute_floatingip_associate_v2':
yield openstack_floating_ips(resource)
def parses(prefix):
def inner(func):
PARSERS[prefix] = func
@@ -306,17 +298,6 @@ def softlayer_host(resource, module_name):
return name, attrs, groups
def openstack_floating_ips(resource):
raw_attrs = resource['primary']['attributes']
attrs = {
'ip': raw_attrs['floating_ip'],
'instance_id': raw_attrs['instance_id'],
}
return attrs
def openstack_floating_ips(resource):
raw_attrs = resource['primary']['attributes']
return raw_attrs['instance_id'], raw_attrs['floating_ip']
@parses('openstack_compute_instance_v2')
@calculate_mantl_vars
@@ -362,8 +343,6 @@ def openstack_host(resource, module_name):
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
# Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017
# attrs specific to Ansible
if 'metadata.ssh_user' in raw_attrs:
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
@@ -677,19 +656,6 @@ def clc_server(resource, module_name):
return name, attrs, groups
def iter_host_ips(hosts, ips):
'''Update hosts that have an entry in the floating IP list'''
for host in hosts:
host_id = host[1]['id']
if host_id in ips:
ip = ips[host_id]
host[1].update({
'access_ip_v4': ip,
'public_ipv4': ip,
'ansible_ssh_host': ip,
})
yield host
## QUERY TYPES
def query_host(hosts, target):
@@ -761,13 +727,6 @@ def main():
parser.exit()
hosts = iterhosts(iterresources(tfstates(args.root)))
# Perform a second pass on the file to pick up floating_ip entries to update the ip address of referenced hosts
ips = dict(iterips(iterresources(tfstates(args.root))))
if ips:
hosts = iter_host_ips(hosts, ips)
if args.list:
output = query_list(hosts)
if args.nometa:

View File

@@ -8,7 +8,7 @@ The inventory is composed of 3 groups:
* **kube-node** : list of kubernetes nodes where the pods will run.
* **kube-master** : list of servers where kubernetes master components (apiserver, scheduler, controller) will run.
* **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose.
* **etcd**: list of server to compose the etcd server. you should have at least 3 servers for failover purposes.
Note: do not modify the children of _k8s-cluster_, like putting
the _etcd_ group into the _k8s-cluster_, unless you are certain
@@ -27,7 +27,7 @@ not _kube-node_.
There are also two special groups:
* **calico-rr** : explained for [advanced Calico networking cases](calico.md)
* **calico-rr** : explained for [advanced Calico networking cases](docs/calico.md)
* **bastion** : configure a bastion host if your nodes are not directly reachable
Below is a complete inventory example:
@@ -66,34 +66,32 @@ kube-master
Group vars and overriding variables precedence
----------------------------------------------
The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``.
Optional variables are located in the `inventory/sample/group_vars/all.yml`.
Mandatory variables that are common for at least one role (or a node group) can be found in the
`inventory/sample/group_vars/k8s-cluster.yml`.
The group variables to control main deployment options are located in the directory ``inventory/group_vars``.
There are also role vars for docker, rkt, kubernetes preinstall and master roles.
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
those cannot be overriden from the group vars. In order to override, one should use
the `-e ` runtime flags (most simple way) or other layers described in the docs.
Kubespray uses only a few layers to override things (or expect them to
Kargo uses only a few layers to override things (or expect them to
be overriden for roles):
Layer | Comment
------|--------
**role defaults** | provides best UX to override things for Kubespray deployments
**role defaults** | provides best UX to override things for Kargo deployments
inventory vars | Unused
**inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things
inventory host_vars | Unused
playbook group_vars | Unused
playbook group_vars | Unuses
playbook host_vars | Unused
**host facts** | Kubespray overrides for internal roles' logic, like state flags
**host facts** | Kargo overrides for internal roles' logic, like state flags
play vars | Unused
play vars_prompt | Unused
play vars_files | Unused
registered vars | Unused
set_facts | Kubespray overrides those, for some places
set_facts | Kargo overrides those, for some places
**role and include vars** | Provides bad UX to override things! Use extra vars to enforce
block vars (only for tasks in block) | Kubespray overrides for internal roles' logic
block vars (only for tasks in block) | Kargo overrides for internal roles' logic
task vars (only for the task) | Unused for roles, but only for helper scripts
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
@@ -124,12 +122,12 @@ The following tags are defined in playbooks:
| k8s-pre-upgrade | Upgrading K8s cluster
| k8s-secrets | Configuring K8s certs/keys
| kpm | Installing K8s apps definitions with KPM
| kube-apiserver | Configuring static pod kube-apiserver
| kube-controller-manager | Configuring static pod kube-controller-manager
| kube-apiserver | Configuring self-hosted kube-apiserver
| kube-controller-manager | Configuring self-hosted kube-controller-manager
| kubectl | Installing kubectl and bash completion
| kubelet | Configuring kubelet service
| kube-proxy | Configuring static pod kube-proxy
| kube-scheduler | Configuring static pod kube-scheduler
| kube-proxy | Configuring self-hosted kube-proxy
| kube-scheduler | Configuring self-hosted kube-scheduler
| localhost | Special steps for the localhost (ansible runner)
| master | Configuring K8s master node role
| netchecker | Installing netchecker K8s app
@@ -153,16 +151,16 @@ Example command to filter and apply only DNS configuration tasks and skip
everything else related to host OS configuration and downloading images of containers:
```
ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os
ansible-playbook -i inventory/inventory.ini cluster.yml --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os
```
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
```
ansible-playbook -i inventory/sample/hosts.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf
ansible-playbook -i inventory/inventory.ini -e dns_server='' cluster.yml --tags resolvconf
```
And this prepares all container images localy (at the ansible runner node) without installing
or upgrading related stuff or trying to upload container to K8s cluster nodes:
```
ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
ansible-playbook -i inventory/inventory.ini cluster.yaml \
-e download_run_once=true -e download_localhost=true \
--tags download --skip-tags upload,upgrade
```

View File

@@ -1,23 +0,0 @@
Atomic host bootstrap
=====================
Atomic host testing has been done with the network plugin flannel. Change the inventory var `kube_network_plugin: flannel`.
Note: Flannel is the only plugin that has currently been tested with atomic
### Vagrant
* For bootstrapping with Vagrant, use box centos/atomic-host or fedora/atomic-host
* Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
* Update `vm_memory = 2048` and `vm_cpus = 2`
* Networking on vagrant hosts has to be brought up manually once they are booted.
```
vagrant ssh
sudo /sbin/ifup enp0s8
```
* For users of vagrant-libvirt download centos/atomic-host qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
* For users of vagrant-libvirt download fedora/atomic-host qcow2 format from https://getfedora.org/en/atomic/download/
Then you can proceed to [cluster deployment](#run-deployment)

View File

@@ -3,58 +3,8 @@ AWS
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes/kubernetes/tree/master/cluster/aws/templates/iam). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
You can now create your cluster!
### Dynamic Inventory ###
There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. It also does not handle all use cases and groups that we may use as part of more advanced deployments. Additions welcome.
This will produce an inventory that is passed into Ansible that looks like the following:
```
{
"_meta": {
"hostvars": {
"ip-172-31-3-xxx.us-east-2.compute.internal": {
"ansible_ssh_host": "172.31.3.xxx"
},
"ip-172-31-8-xxx.us-east-2.compute.internal": {
"ansible_ssh_host": "172.31.8.xxx"
}
}
},
"etcd": [
"ip-172-31-3-xxx.us-east-2.compute.internal"
],
"k8s-cluster": {
"children": [
"kube-master",
"kube-node"
]
},
"kube-master": [
"ip-172-31-3-xxx.us-east-2.compute.internal"
],
"kube-node": [
"ip-172-31-8-xxx.us-east-2.compute.internal"
]
}
```
Guide:
- Create instances in AWS as needed.
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
- Set the following AWS credentials and info as environment variables in your terminal:
```
export AWS_ACCESS_KEY_ID="xxxxx"
export AWS_SECRET_ACCESS_KEY="yyyyy"
export REGION="us-east-2"
```
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`

View File

@@ -1,7 +1,7 @@
Azure
===============
To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
To deploy kubespray on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in.
@@ -49,8 +49,8 @@ This is the AppId from the last command
- Create the role assignment with:
`azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID`
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
azure\_aad\_client\_id musst be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
## Provisioning Azure with Resource Group Templates
You'll find Resource Group Templates and scripts to provision the required infrastructure to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)
You'll find Resource Group Templates and scripts to provision the required infrastructore to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)

View File

@@ -96,7 +96,7 @@ You need to edit your inventory and add:
* `cluster_id` by route reflector node/group (see details
[here](https://hub.docker.com/r/calico/routereflector/))
Here's an example of Kubespray inventory with route reflectors:
Here's an example of Kargo inventory with route reflectors:
```
[all]
@@ -145,27 +145,9 @@ cluster_id="1.0.0.1"
The inventory above will deploy the following topology assuming that calico's
`global_as_num` is set to `65400`:
![Image](figures/kubespray-calico-rr.png?raw=true)
##### Optional : Define default endpoint to host action
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped.
To re-define default action please set the following variable in your inventory:
```
calico_endpoint_to_host_action: "ACCEPT"
```
![Image](figures/kargo-calico-rr.png?raw=true)
Cloud providers configuration
=============================
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``ipip: true`` if the cloud provider was defined.
##### Optional : Ignore kernel's RPF check setting
By default the felix agent(calico-node) will abort if the Kernel RPF setting is not 'strict'. If you want Calico to ignore the Kernel setting:
```
calico_node_ignorelooserpf: true
```

View File

@@ -3,11 +3,20 @@ Cloud providers
#### Provisioning
You can deploy instances in your cloud environment in several different ways. Examples include Terraform, Ansible (ec2 and gce modules), and manual creation.
You can use kargo-cli to start new instances on cloud providers
here's an example
```
kargo [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
```
#### Deploy kubernetes
With ansible-playbook command
With kargo-cli
```
kargo deploy [--aws|--gce] -u admin
```
Or ansible-playbook command
```
ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml
```

View File

@@ -1,25 +1,25 @@
Kubespray vs [Kops](https://github.com/kubernetes/kops)
Kargo vs [Kops](https://github.com/kubernetes/kops)
---------------
Kubespray runs on bare metal and most clouds, using Ansible as its substrate for
Kargo runs on bare metal and most clouds, using Ansible as its substrate for
provisioning and orchestration. Kops performs the provisioning and orchestration
itself, and as such is less flexible in deployment platforms. For people with
familiarity with Ansible, existing Ansible deployments or the desire to run a
Kubernetes cluster across multiple platforms, Kubespray is a good choice. Kops,
Kubernetes cluster across multiple platforms, Kargo is a good choice. Kops,
however, is more tightly integrated with the unique features of the clouds it
supports so it could be a better choice if you know that you will only be using
one platform for the foreseeable future.
Kubespray vs [Kubeadm](https://github.com/kubernetes/kubeadm)
Kargo vs [Kubeadm](https://github.com/kubernetes/kubeadm)
------------------
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
management, including self-hosted layouts, dynamic discovery services and so
on. Had it belonged to the new [operators world](https://coreos.com/blog/introducing-operators.html),
it may have been named a "Kubernetes cluster operator". Kubespray however,
on. Had it belong to the new [operators world](https://coreos.com/blog/introducing-operators.html),
it would've likely been named a "Kubernetes cluster operator". Kargo however,
does generic configuration management tasks from the "OS operators" ansible
world, plus some initial K8s clustering (with networking plugins included) and
control plane bootstrapping. Kubespray [strives](https://github.com/kubernetes-incubator/kubespray/issues/553)
control plane bootstrapping. Kargo [strives](https://github.com/kubernetes-incubator/kargo/issues/553)
to adopt kubeadm as a tool in order to consume life cycle management domain
knowledge from it and offload generic OS configuration things from it, which
hopefully benefits both sides.

View File

@@ -1,74 +0,0 @@
Contiv
======
Here is the [Contiv documentation](http://contiv.github.io/documents/).
## Administrate Contiv
There are two ways to manage Contiv:
* a web UI managed by the api proxy service
* a CLI named `netctl`
### Interfaces
#### The Web Interface
This UI is hosted on all kubernetes master nodes. The service is available at `https://<one of your master node>:10000`.
You can configure the api proxy by overriding the following variables:
```yaml
contiv_enable_api_proxy: true
contiv_api_proxy_port: 10000
contiv_generate_certificate: true
```
The default credentials to log in are: admin/admin.
#### The Command Line Interface
The second way to modify the Contiv configuration is to use the CLI. To do this, you have to connect to the server and export an environment variable to tell netctl how to connect to the cluster:
```bash
export NETMASTER=http://127.0.0.1:9999
```
The port can be changed by overriding the following variable:
```yaml
contiv_netmaster_port: 9999
```
The CLI doesn't use the authentication process needed by the web interface.
### Network configuration
The default configuration uses VXLAN to create an overlay. Two networks are created by default:
* `contivh1`: an infrastructure network. It allows nodes to access the pods IPs. It is mandatory in a Kubernetes environment that uses VXLAN.
* `default-net` : the default network that hosts pods.
You can change the default network configuration by overriding the `contiv_networks` variable.
The default forward mode is set to routing:
```yaml
contiv_fwd_mode: routing
```
The following is an example of how you can use VLAN instead of VXLAN:
```yaml
contiv_fwd_mode: bridge
contiv_vlan_interface: eth0
contiv_networks:
- name: default-net
subnet: "{{ kube_pods_subnet }}"
gateway: "{{ kube_pods_subnet|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
encap: vlan
pkt_tag: 10
```

View File

@@ -1,14 +1,24 @@
CoreOS bootstrap
===============
Example with Ansible:
Example with **kargo-cli**:
```
kargo deploy --gce --coreos
```
Or with Ansible:
Before running the cluster playbook you must satisfy the following requirements:
General CoreOS Pre-Installation Notes:
- You should set the bootstrap_os variable to `coreos`
- Ensure that the bin_dir is set to `/opt/bin`
- ansible_python_interpreter should be `/opt/bin/python`. This will be laid down by the bootstrap task.
- The default resolvconf_mode setting of `docker_dns` **does not** work for CoreOS. This is because we do not edit the systemd service file for docker on CoreOS nodes. Instead, just use the `host_resolvconf` mode. It should work out of the box.
* On each CoreOS nodes a writable directory **/opt/bin** (~400M disk space)
* Uncomment the variable **ansible\_python\_interpreter** in the file `inventory/group_vars/all.yml`
* run the Python bootstrap playbook
```
ansible-playbook -u smana -e ansible_ssh_user=smana -b --become-user=root -i inventory/inventory.cfg coreos-bootstrap.yml
```
Then you can proceed to [cluster deployment](#run-deployment)

View File

@@ -1,38 +0,0 @@
Debian Jessie
===============
Debian Jessie installation Notes:
- Add
```GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"```
to /etc/default/grub. Then update with
```
sudo update-grub
sudo update-grub2
sudo reboot
```
- Add the [backports](https://backports.debian.org/Instructions/) which contain Systemd 2.30 and update Systemd.
```apt-get -t jessie-backports install systemd```
(Necessary because the default Systemd version (2.15) does not support the "Delegate" directive in service files)
- Add the Ansible repository and install Ansible to get a proper version
```
sudo add-apt-repository ppa:ansible/ansible
sudo apt-get update
sudo apt-get install ansible
```
- Install Jinja2 and Python-Netaddr
```sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr```
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)

View File

@@ -1,7 +1,7 @@
K8s DNS stack by Kubespray
K8s DNS stack by Kargo
======================
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
For K8s cluster nodes, kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
to serve as an authoritative DNS server for a given ``dns_domain`` and its
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
@@ -44,13 +44,13 @@ DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode``
DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream
DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``).
DNS modes supported by Kubespray
DNS modes supported by kargo
============================
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
You can modify how kargo sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
## dns_mode
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
``dns_mode`` configures how kargo will setup cluster DNS. There are three modes available:
#### dnsmasq_kubedns (default)
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
@@ -62,26 +62,12 @@ other queries are forwardet to the nameservers found in ``upstream_dns_servers``
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
all queries.
#### coredns
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for
all queries.
#### coredns_dual
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for
all queries. It will also deploy a secondary CoreDNS stack
#### manual
This does not install dnsmasq or kubedns, but allows you to specify
`manual_dns_server`, which will be configured on nodes for handling Pod DNS.
Use this method if you plan to install your own DNS server in the cluster after
initial deployment.
#### none
This does not install any of dnsmasq and kubedns/skydns. This basically disables cluster DNS completely and
leaves you with a non functional cluster.
## resolvconf_mode
``resolvconf_mode`` configures how Kubespray will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
``resolvconf_mode`` configures how kargo will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
There are three modes available:
#### docker_dns (default)
@@ -114,7 +100,7 @@ used as a backup nameserver. After cluster DNS is running, all queries will be a
servers, which in turn will forward queries to the system nameserver if required.
#### host_resolvconf
This activates the classic Kubespray behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
This activates the classic kargo behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode).
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
@@ -134,7 +120,7 @@ cluster service names.
Limitations
-----------
* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can
* Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can
not answer with authority to arbitrary recursive resolvers. This task is left
for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
for details.

View File

@@ -1,7 +1,7 @@
Downloading binaries and containers
===================================
Kubespray supports several download/upload modes. The default is:
Kargo supports several download/upload modes. The default is:
* Each node downloads binaries and container images on its own, which is
``download_run_once: False``.

View File

Before

Width:  |  Height:  |  Size: 40 KiB

After

Width:  |  Height:  |  Size: 40 KiB

View File

@@ -23,6 +23,13 @@ ip a show dev flannel.1
valid_lft forever preferred_lft forever
```
* Docker must be configured with a bridge ip in the flannel subnet.
```
ps aux | grep docker
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
```
* Try to run a container and check its ip address
```

Some files were not shown because too many files have changed in this diff Show More