Remove contiv related files (#6964)

This commit is contained in:
Florian Ruynat
2020-11-30 15:48:50 +01:00
committed by GitHub
parent 4a8a52bad9
commit f6eed8091e
42 changed files with 48 additions and 1304 deletions

View File

@@ -1,55 +0,0 @@
---
contiv_config_dir: "{{ kube_config_dir }}/contiv"
contiv_etcd_conf_dir: "/etc/contiv/etcd"
contiv_etcd_data_dir: "/var/lib/etcd/contiv-data"
contiv_netmaster_port: 9999
contiv_cni_version: 0.3.1
# No need to download it by default, but must be defined
contiv_etcd_image_repo: "{{ etcd_image_repo }}"
contiv_etcd_image_tag: "{{ etcd_image_tag }}"
contiv_etcd_listen_port: 6666
contiv_etcd_peer_port: 6667
contiv_etcd_endpoints: |-
{% for host in groups['kube-master'] -%}
contiv_etcd{{ loop.index }}=http://{{ hostvars[host]['ip'] | default(fallback_ips[host]) }}:{{ contiv_etcd_peer_port }}{% if not loop.last %},{% endif %}
{%- endfor %}
# Parameters for Contiv api-proxy
contiv_enable_api_proxy: true
contiv_api_proxy_port: 10000
contiv_generate_certificate: true
# Forwarding mode: bridge or routing
contiv_fwd_mode: routing
# Fabric mode: aci, aci-opflex or default
contiv_fabric_mode: default
# Default netmode: vxlan or vlan
contiv_net_mode: vxlan
# Dataplane interface
contiv_vlan_interface: ""
# Default loglevels are INFO
contiv_netmaster_loglevel: "WARN"
contiv_netplugin_loglevel: "WARN"
contiv_ovsdb_server_loglevel: "warn"
contiv_ovs_vswitchd_loglevel: "warn"
# VxLAN port
contiv_vxlan_port: 4789
# Default network configuration
contiv_networks:
- name: contivh1
subnet: "10.233.128.0/18"
gateway: "10.233.128.1"
nw_type: infra
- name: default-net
subnet: "{{ kube_pods_subnet }}"
gateway: "{{ kube_pods_subnet|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
pkt_tag: 10

View File

@@ -1,10 +0,0 @@
#!/bin/bash
set -e
echo "Starting cleanup"
ovs-vsctl list-br | grep contiv | xargs -I % ovs-vsctl del-br %
for p in $(ifconfig | grep vport | awk '{print $1}');
do
ip link delete $p type veth
done
touch /tmp/cleanup.done
sleep 60

View File

@@ -1,6 +0,0 @@
---
- name: Contiv | Reload kernel modules
service:
name: systemd-modules-load
state: restarted
enabled: yes

View File

@@ -1,3 +0,0 @@
---
dependencies:
- role: network_plugin/cni

View File

@@ -1,156 +0,0 @@
---
- name: Contiv | Load openvswitch kernel module
copy:
dest: /etc/modules-load.d/openvswitch.conf
content: "openvswitch"
notify:
- Contiv | Reload kernel modules
- name: Contiv | Create contiv etcd directories
file:
dest: "{{ item }}"
state: directory
mode: 0750
owner: root
group: root
with_items:
- "{{ contiv_etcd_conf_dir }}"
- "{{ contiv_etcd_data_dir }}"
when: inventory_hostname in groups['kube-master']
- name: Contiv | Workaround https://github.com/contiv/netplugin/issues/1152
set_fact:
kube_apiserver_endpoint_for_contiv: |-
{% if not is_kube_master and loadbalancer_apiserver_localhost -%}
https://localhost:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
{%- elif loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}
{%- if loadbalancer_apiserver.port|string != "443" -%}
:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
{%- endif -%}
{%- else -%}
https://{{ first_kube_master }}:{{ kube_apiserver_port }}
{%- endif %}
when: inventory_hostname in groups['kube-master']
- name: Contiv | Set necessary facts
set_fact:
contiv_config_dir: "{{ contiv_config_dir }}"
contiv_enable_api_proxy: "{{ contiv_enable_api_proxy }}"
contiv_fabric_mode: "{{ contiv_fabric_mode }}"
contiv_fwd_mode: "{{ contiv_fwd_mode }}"
contiv_netmaster_port: "{{ contiv_netmaster_port }}"
contiv_networks: "{{ contiv_networks }}"
contiv_manifests:
- {name: contiv-config, file: contiv-config.yml, type: configmap}
- {name: contiv-etcd, file: contiv-etcd.yml, type: daemonset}
- {name: contiv-etcd-proxy, file: contiv-etcd-proxy.yml, type: daemonset}
- {name: contiv-ovs, file: contiv-ovs.yml, type: daemonset}
- {name: contiv-netmaster, file: contiv-netmaster-clusterrolebinding.yml, type: clusterrolebinding}
- {name: contiv-netmaster, file: contiv-netmaster-clusterrole.yml, type: clusterrole}
- {name: contiv-netmaster, file: contiv-netmaster-serviceaccount.yml, type: serviceaccount}
- {name: contiv-netmaster, file: contiv-netmaster.yml, type: daemonset}
- {name: contiv-netplugin, file: contiv-netplugin-clusterrolebinding.yml, type: clusterrolebinding}
- {name: contiv-netplugin, file: contiv-netplugin-clusterrole.yml, type: clusterrole}
- {name: contiv-netplugin, file: contiv-netplugin-serviceaccount.yml, type: serviceaccount}
- {name: contiv-netplugin, file: contiv-netplugin.yml, type: daemonset}
when: inventory_hostname in groups['kube-master']
- name: Contiv | Add another manifest if contiv_enable_api_proxy is true
set_fact:
contiv_manifests: |-
{% set _ = contiv_manifests.append({"name": "contiv-api-proxy", "file": "contiv-api-proxy.yml", "type": "daemonset"}) %}
{{ contiv_manifests }}
when:
- contiv_enable_api_proxy
- inventory_hostname in groups['kube-master']
- name: Contiv | Create /var/contiv
file:
path: /var/contiv
state: directory
- name: Contiv | Create contiv config directory
file:
dest: "{{ contiv_config_dir }}"
state: directory
mode: 0755
owner: root
group: root
when: inventory_hostname in groups['kube-master']
- name: Contiv | Install all Kubernetes resources
template:
src: "{{ item.file }}.j2"
dest: "{{ contiv_config_dir }}/{{ item.file }}"
with_items: "{{ contiv_manifests }}"
register: contiv_manifests_results
when: inventory_hostname in groups['kube-master']
- name: Contiv | Copy certs generation script
template:
src: "generate-certificate.sh.j2"
dest: "/var/contiv/generate-certificate.sh"
mode: 0700
when:
- contiv_enable_api_proxy
- contiv_generate_certificate
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true
- name: Contiv | Check for cert key existence
stat:
path: /var/contiv/auth_proxy_key.pem
register: contiv_certificate_key_state
when:
- contiv_enable_api_proxy
- contiv_generate_certificate
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true
- name: Contiv | Generate contiv-api-proxy certificates
command: /var/contiv/generate-certificate.sh
when:
- contiv_enable_api_proxy
- contiv_generate_certificate
- (not contiv_certificate_key_state.stat.exists)
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true
- name: Contiv | Fetch the generated certificate
fetch:
src: "/var/contiv/{{ item }}"
dest: "/tmp/kubespray-contiv-{{ item }}"
flat: yes
with_items:
- auth_proxy_key.pem
- auth_proxy_cert.pem
when:
- contiv_enable_api_proxy
- contiv_generate_certificate
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true
- name: Contiv | Copy the generated certificate on nodes
copy:
src: "/tmp/kubespray-contiv-{{ item }}"
dest: "/var/contiv/{{ item }}"
with_items:
- auth_proxy_key.pem
- auth_proxy_cert.pem
when:
- inventory_hostname != groups['kube-master'][0]
- inventory_hostname in groups['kube-master']
- contiv_enable_api_proxy
- contiv_generate_certificate
- name: Contiv | Copy netctl binary from docker container
command: sh -c "{{ docker_bin_dir }}/docker rm -f netctl-binarycopy;
{{ docker_bin_dir }}/docker create --name netctl-binarycopy {{ contiv_image_repo }}:{{ contiv_image_tag }} &&
{{ docker_bin_dir }}/docker cp netctl-binarycopy:/contiv/bin/netctl {{ bin_dir }}/netctl &&
{{ docker_bin_dir }}/docker rm -f netctl-binarycopy"
register: contiv_task_result
until: contiv_task_result.rc == 0
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false

View File

@@ -1,66 +0,0 @@
---
- name: reset | Check that kubectl is still here
stat:
path: "{{ bin_dir }}/kubectl"
register: contiv_kubectl
- name: reset | Delete contiv netplugin and netmaster daemonsets
kube:
name: "{{ item }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "ds"
state: absent
with_items:
- contiv-netplugin
- contiv-netmaster
register: contiv_cleanup_deletion
tags:
- network
when:
- contiv_kubectl.stat.exists
- inventory_hostname == groups['kube-master'][0]
- name: reset | Copy contiv temporary cleanup script
copy:
src: ../files/contiv-cleanup.sh # noqa 404 Not in role_path so we must trick...
dest: /opt/cni/bin/cleanup
owner: root
group: root
mode: 0750
when:
- contiv_kubectl.stat.exists
- name: reset | Lay down contiv cleanup template
template:
src: ../templates/contiv-cleanup.yml.j2 # noqa 404 Not in role_path so we must trick...
dest: "{{ kube_config_dir }}/contiv-cleanup.yml" # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset
register: contiv_cleanup_manifest
when:
- contiv_kubectl.stat.exists
- inventory_hostname == groups['kube-master'][0]
- name: reset | Start contiv cleanup resources
kube:
name: "contiv-cleanup"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "ds"
state: latest
filename: "{{ kube_config_dir }}/contiv-cleanup.yml"
when:
- contiv_kubectl.stat.exists
- inventory_hostname == groups['kube-master'][0]
ignore_errors: true
- name: reset | Wait until contiv cleanup is done
command: "{{ bin_dir }}/kubectl -n kube-system get ds contiv-cleanup -o jsonpath='{.status.numberReady}'"
register: cleanup_done_all_nodes
until: cleanup_done_all_nodes.stdout|int == groups['k8s-cluster']|length
retries: 5
delay: 5
ignore_errors: true
changed_when: false
when:
- contiv_kubectl.stat.exists
- inventory_hostname == groups['kube-master'][0]

View File

@@ -1,9 +0,0 @@
---
- name: reset | check contiv vxlan_sys network device
stat:
path: "/sys/class/net/vxlan_sys_{{ contiv_vxlan_port | default('4789') }}"
register: contiv_vxlan_sys
- name: reset | remove the vxlan_sys network device created by contiv
command: "ip link del vxlan_sys_{{ contiv_vxlan_port | default('4789') }}"
when: contiv_vxlan_sys.stat.exists

View File

@@ -1,62 +0,0 @@
# This manifest deploys the Contiv API Proxy Server on Kubernetes.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: contiv-api-proxy
namespace: kube-system
labels:
k8s-app: contiv-api-proxy
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
k8s-app: contiv-api-proxy
template:
metadata:
name: contiv-api-proxy
namespace: kube-system
labels:
k8s-app: contiv-api-proxy
spec:
priorityClassName: system-node-critical
# The API proxy must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- operator: Exists
serviceAccountName: contiv-netmaster
containers:
- name: contiv-api-proxy
image: {{ contiv_auth_proxy_image_repo }}:{{ contiv_auth_proxy_image_tag }}
args:
- --listen-address=0.0.0.0:{{ contiv_api_proxy_port }}
- --tls-key-file=/var/contiv/auth_proxy_key.pem
- --tls-certificate=/var/contiv/auth_proxy_cert.pem
- --data-store-driver=$(STORE_DRIVER)
- --data-store-address=$(CONTIV_ETCD)
- --netmaster-address=127.0.0.1:{{ contiv_netmaster_port }}
env:
- name: NO_NETMASTER_STARTUP_CHECK
value: "0"
- name: STORE_DRIVER
value: etcd
- name: CONTIV_ETCD
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_etcd
securityContext:
privileged: false
volumeMounts:
- mountPath: /var/contiv
name: var-contiv
readOnly: false
volumes:
- name: var-contiv
hostPath:
path: /var/contiv

View File

@@ -1,58 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: contiv-cleanup
namespace: kube-system
labels:
k8s-app: contiv-cleanup
spec:
selector:
matchLabels:
k8s-app: contiv-cleanup
template:
metadata:
labels:
k8s-app: contiv-cleanup
spec:
priorityClassName: system-node-critical
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
tolerations:
- operator: Exists
serviceAccountName: contiv-netplugin
containers:
- name: contiv-ovs-cleanup
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
command: ["/opt/cni/bin/cleanup"]
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/openvswitch
name: etc-openvswitch
readOnly: false
- mountPath: /var/run
name: var-run
readOnly: false
- mountPath: /opt/cni/bin
name: cni-bin-dir
readOnly: false
readinessProbe:
exec:
command:
- cat
- /tmp/cleanup.done
initialDelaySeconds: 3
periodSeconds: 3
successThreshold: 1
volumes:
- name: etc-openvswitch
hostPath:
path: /etc/openvswitch
- name: var-run
hostPath:
path: /var/run
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin

View File

@@ -1,31 +0,0 @@
# This ConfigMap is used to configure a self-hosted Contiv installation.
# It can be used with an external cluster store(etcd or consul) or used
# with the etcd instance being installed as contiv-etcd
kind: ConfigMap
apiVersion: v1
metadata:
name: contiv-config
namespace: kube-system
data:
contiv_netmaster_loglevel: {{ contiv_netmaster_loglevel }}
contiv_netplugin_loglevel: {{ contiv_netplugin_loglevel }}
contiv_ovsdb_server_extra_flags: "--verbose={{ contiv_ovsdb_server_loglevel }}"
contiv_ovs_vswitchd_extra_flags: "--verbose={{ contiv_ovs_vswitchd_loglevel }}"
contiv_fwdmode: {{ contiv_fwd_mode }}
contiv_netmode: {{ contiv_net_mode }}
contiv_etcd: "http://127.0.0.1:{{ contiv_etcd_listen_port }}"
contiv_cni_config: |-
{
"cniVersion": "{{ contiv_cni_version }}",
"name": "contiv-net",
"type": "contivk8s"
}
contiv_k8s_config: |-
{
"K8S_API_SERVER": "{{ kube_apiserver_endpoint_for_contiv }}",
"K8S_CA": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
"K8S_KEY": "",
"K8S_CERT": "",
"K8S_TOKEN": "",
"SVC_SUBNET": "{{ kube_service_addresses }}"
}

View File

@@ -1,38 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: contiv-etcd-proxy
namespace: kube-system
labels:
k8s-app: contiv-etcd-proxy
spec:
selector:
matchLabels:
k8s-app: contiv-etcd-proxy
template:
metadata:
labels:
k8s-app: contiv-etcd-proxy
spec:
priorityClassName: system-node-critical
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: DoesNotExist
containers:
- name: contiv-etcd-proxy
image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}
env:
- name: ETCD_LISTEN_CLIENT_URLS
value: 'http://127.0.0.1:{{ contiv_etcd_listen_port }}'
- name: ETCD_PROXY
value: "on"
- name: ETCD_INITIAL_CLUSTER
value: '{{ contiv_etcd_endpoints }}'

View File

@@ -1,65 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: contiv-etcd
namespace: kube-system
labels:
k8s-app: contiv-etcd
spec:
selector:
matchLabels:
k8s-app: contiv-etcd
template:
metadata:
labels:
k8s-app: contiv-etcd
spec:
priorityClassName: system-node-critical
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- operator: Exists
initContainers:
- name: contiv-etcd-init
image: {{ contiv_etcd_init_image_repo }}:{{ contiv_etcd_init_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: ETCD_INIT_ARGSFILE
value: '{{ contiv_etcd_conf_dir }}/contiv-etcd-args'
- name: ETCD_INIT_LISTEN_PORT
value: '{{ contiv_etcd_listen_port }}'
- name: ETCD_INIT_PEER_PORT
value: '{{ contiv_etcd_peer_port }}'
- name: ETCD_INIT_CLUSTER
value: '{{ contiv_etcd_endpoints }}'
- name: ETCD_INIT_DATA_DIR
value: '{{ contiv_etcd_data_dir }}'
volumeMounts:
- name: contiv-etcd-conf-dir
mountPath: {{ contiv_etcd_conf_dir }}
containers:
- name: contiv-etcd
image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}
command:
- sh
- -c
- "/usr/local/bin/etcd $(cat $ETCD_INIT_ARGSFILE)"
env:
- name: ETCD_INIT_ARGSFILE
value: {{ contiv_etcd_conf_dir }}/contiv-etcd-args
volumeMounts:
- name: contiv-etcd-conf-dir
mountPath: {{ contiv_etcd_conf_dir }}
- name: contiv-etcd-data-dir
mountPath: {{ contiv_etcd_data_dir }}
volumes:
- name: contiv-etcd-data-dir
hostPath:
path: {{ contiv_etcd_data_dir }}
- name: contiv-etcd-conf-dir
hostPath:
path: {{ contiv_etcd_conf_dir }}

View File

@@ -1,27 +0,0 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: contiv-netmaster
namespace: kube-system
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- nodes
- namespaces
- networkpolicies
verbs:
- get
- watch
- list
- update
- apiGroups:
- policy
resourceNames:
- privileged
resources:
- podsecuritypolicies
verbs:
- use

View File

@@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: contiv-netmaster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: contiv-netmaster
subjects:
- kind: ServiceAccount
name: contiv-netmaster
namespace: kube-system

View File

@@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: contiv-netmaster
namespace: kube-system

View File

@@ -1,71 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: contiv-netmaster
namespace: kube-system
labels:
k8s-app: contiv-netmaster
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
k8s-app: contiv-netmaster
template:
metadata:
name: contiv-netmaster
namespace: kube-system
labels:
k8s-app: contiv-netmaster
spec:
priorityClassName: system-node-critical
# The netmaster must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- operator: Exists
serviceAccountName: contiv-netmaster
containers:
- name: contiv-netmaster
image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
env:
- name: CONTIV_ROLE
value: netmaster
- name: CONTIV_NETMASTER_MODE
value: kubernetes
- name: CONTIV_NETMASTER_ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_etcd
- name: CONTIV_NETMASTER_FORWARD_MODE
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_fwdmode
- name: CONTIV_NETMASTER_NET_MODE
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_netmode
- name: CONTIV_NETMASTER_LOG_LEVEL
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_netmaster_loglevel
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/contiv
name: var-contiv
readOnly: false
volumes:
# Used by contiv-netmaster
- name: var-contiv
hostPath:
path: /var/contiv

View File

@@ -1,29 +0,0 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: contiv-netplugin
namespace: kube-system
rules:
- apiGroups:
- ""
- extensions
resources:
- endpoints
- nodes
- namespaces
- networkpolicies
- pods
- services
verbs:
- watch
- list
- update
- get
- apiGroups:
- policy
resourceNames:
- privileged
resources:
- podsecuritypolicies
verbs:
- use

View File

@@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: contiv-netplugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: contiv-netplugin
subjects:
- kind: ServiceAccount
name: contiv-netplugin
namespace: kube-system

View File

@@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: contiv-netplugin
namespace: kube-system

View File

@@ -1,128 +0,0 @@
---
# This manifest installs contiv-netplugin container, as well
# as the Contiv CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: contiv-netplugin
namespace: kube-system
labels:
k8s-app: contiv-netplugin
spec:
selector:
matchLabels:
k8s-app: contiv-netplugin
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
k8s-app: contiv-netplugin
spec:
priorityClassName: system-node-critical
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
tolerations:
- operator: Exists
serviceAccountName: contiv-netplugin
initContainers:
- name: contiv-netplugin-init
image: {{ contiv_init_image_repo }}:{{ contiv_init_image_tag }}
env:
- name: CONTIV_ROLE
value: netplugin
- name: CONTIV_MODE
value: kubernetes
- name: CONTIV_K8S_CONFIG
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_k8s_config
- name: CONTIV_CNI_CONFIG
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_cni_config
volumeMounts:
- mountPath: /var/contiv
name: var-contiv
readOnly: false
- mountPath: /etc/cni/net.d/
name: etc-cni-dir
readOnly: false
- name: contiv-cni
image: {{ contiv_image_repo }}:{{ contiv_version }}
command: ["cp", "/contiv/bin/contivk8s", "/opt/cni/bin/contivk8s"]
volumeMounts:
- mountPath: /opt/cni/bin
name: cni-bin-dir
readOnly: false
containers:
# Runs netplugin container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: contiv-netplugin
image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
env:
- name: VLAN_IF
value: {{ contiv_vlan_interface }}
- name: CONTIV_NETPLUGIN_VLAN_UPLINKS
value: {{ contiv_vlan_interface }}
- name: CONTIV_NETPLUGIN_VXLAN_PORT
value: "{{ contiv_vxlan_port }}"
- name: CONTIV_ROLE
value: netplugin
- name: CONTIV_NETPLUGIN_MODE
value: kubernetes
- name: CONTIV_NETPLUGIN_VTEP_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: CONTIV_NETPLUGIN_ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_etcd
- name: CONTIV_NETPLUGIN_FORWARD_MODE
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_fwdmode
- name: CONTIV_NETPLUGIN_NET_MODE
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_netmode
- name: CONTIV_NETPLUGIN_LOG_LEVEL
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_netplugin_loglevel
resources:
requests:
cpu: 250m
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/run
name: var-run
readOnly: false
- mountPath: /var/contiv
name: var-contiv
readOnly: false
volumes:
# Used by contiv-netplugin
- name: var-run
hostPath:
path: /var/run
- name: var-contiv
hostPath:
path: /var/contiv
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: etc-cni-dir
hostPath:
path: /etc/cni/net.d/

View File

@@ -1,79 +0,0 @@
---
apiVersion: apps/v1
# This manifest deploys the contiv-ovs pod.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: contiv-ovs
namespace: kube-system
labels:
k8s-app: contiv-ovs
spec:
selector:
matchLabels:
k8s-app: contiv-ovs
template:
metadata:
labels:
k8s-app: contiv-ovs
spec:
priorityClassName: system-node-critical
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
tolerations:
- operator: Exists
containers:
# Runs ovs containers on each Kubernetes node.
- name: contiv-ovsdb-server
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
command: ["/scripts/start-ovsdb-server.sh"]
securityContext:
privileged: false
# Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again
env:
- name: OVSDBSERVER_EXTRA_FLAGS
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_ovsdb_server_extra_flags
volumeMounts:
- mountPath: /etc/openvswitch
name: etc-openvswitch
readOnly: false
- mountPath: /var/run
name: var-run
readOnly: false
- name: contiv-ovs-vswitchd
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
command: ["/scripts/start-ovs-vswitchd.sh"]
securityContext:
privileged: true
# Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again
env:
- name: OVSVSWITCHD_EXTRA_FLAGS
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_ovs_vswitchd_extra_flags
volumeMounts:
- mountPath: /etc/openvswitch
name: etc-openvswitch
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run
name: var-run
readOnly: false
volumes:
# Used by contiv-ovs
- name: etc-openvswitch
hostPath:
path: /etc/openvswitch
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run
hostPath:
path: /var/run

View File

@@ -1,23 +0,0 @@
#!/bin/bash
set -euo pipefail
PREFIX="/var/contiv"
KEY_PATH="$PREFIX/auth_proxy_key.pem"
CERT_PATH="$PREFIX/auth_proxy_cert.pem"
# if both files exist, just exit
if [[ -f $KEY_PATH && -f $CERT_PATH ]]; then
exit 0
fi
mkdir -p "$PREFIX"
rm -f $KEY_PATH
rm -f $CERT_PATH
openssl genrsa -out $KEY_PATH {{certificates_key_size}} >/dev/null 2>&1
openssl req -new -x509 -sha256 -days {{certificates_duration}} \
-key $KEY_PATH \
-out $CERT_PATH \
-subj "/C=US/ST=CA/L=San Jose/O=CPSG/OU=IT Department/CN=auth-local.cisco.com"

View File

@@ -35,11 +35,6 @@ dependencies:
tags:
- macvlan
- role: network_plugin/contiv
when: kube_network_plugin == 'contiv'
tags:
- contiv
- role: network_plugin/kube-ovn
when: kube_network_plugin == 'kube-ovn'
tags: