Cloud provider support for OCI (Oracle Cloud Infrastructure)

Signed-off-by: Jeff Bornemann <jeff.bornemann@oracle.com>
This commit is contained in:
Jeff Bornemann
2018-07-20 10:56:38 -04:00
parent 2c5781ace1
commit 94df70be98
17 changed files with 247 additions and 7 deletions

View File

@@ -0,0 +1,5 @@
---
oci_security_list_management: All
oci_use_instance_principals: false
oci_cloud_controller_version: 0.5.0

View File

@@ -0,0 +1,56 @@
---
- name: "OCI Cloud Controller | Credentials Check | oci_private_key"
fail:
msg: "oci_private_key is missing"
when: (oci_use_instance_principals == false) and
(oci_private_key is not defined or oci_private_key == "")
- name: "OCI Cloud Controller | Credentials Check | oci_region_id"
fail:
msg: "oci_region_id is missing"
when: (oci_use_instance_principals == false) and
(oci_region_id is not defined or oci_region_id == "")
- name: "OCI Cloud Controller | Credentials Check | oci_tenancy_id"
fail:
msg: "oci_tenancy_id is missing"
when: (oci_use_instance_principals == false) and
(oci_tenancy_id is not defined or oci_tenancy_id == "")
- name: "OCI Cloud Controller | Credentials Check | oci_user_id"
fail:
msg: "oci_user_id is missing"
when: (oci_use_instance_principals == false) and
(oci_user_id is not defined or oci_user_id == "")
- name: "OCI Cloud Controller | Credentials Check | oci_user_fingerprint"
fail:
msg: "oci_user_fingerprint is missing"
when: (oci_use_instance_principals == false) and
(oci_user_fingerprint is not defined or oci_user_fingerprint == "")
- name: "OCI Cloud Controller | Credentials Check | oci_compartment_id"
fail:
msg: "oci_compartment_id is missing. This is the compartment in which the cluster resides"
when: oci_compartment_id is not defined or oci_compartment_id == ""
- name: "OCI Cloud Controller | Credentials Check | oci_vnc_id"
fail:
msg: "oci_vnc_id is missin. This is the Virtual Cloud Network in which the cluster resides"
when: oci_vnc_id is not defined or oci_vnc_id == ""
- name: "OCI Cloud Controller | Credentials Check | oci_subnet1_id"
fail:
msg: "oci_subnet1_id is missing. This is the first subnet to which loadbalancers will be added"
when: oci_subnet1_id is not defined or oci_subnet1_id == ""
- name: "OCI Cloud Controller | Credentials Check | oci_subnet2_id"
fail:
msg: "oci_subnet2_id is missing. Two subnets are required for load balancer high availability"
when: oci_subnet2_id is not defined or oci_subnet2_id == ""
- name: "OCI Cloud Controller | Credentials Check | oci_security_list_management"
fail:
msg: "oci_security_list_management is missing, or not defined correctly. Valid options are (All, Frontend, None)."
when: oci_security_list_management is not defined or oci_security_list_management not in ["All", "Frontend", "None"]

View File

@@ -0,0 +1,51 @@
---
- include: credentials-check.yml
tags: oci
- name: "OCI Cloud Controller | Generate Configuration"
template:
src: controller-manager-config.yml.j2
dest: /tmp/controller-manager-config.yml
register: controller_manager_config
when: inventory_hostname == groups['kube-master'][0]
tags: oci
- name: "OCI Cloud Controller | Encode Configuration"
set_fact:
controller_manager_config_base64: "{{ lookup('file', '/tmp/controller-manager-config.yml') | b64encode }}"
when: inventory_hostname == groups['kube-master'][0]
tags: oci
- name: "OCI Cloud Controller | Apply Configuration To Secret"
template:
src: cloud-provider.yml.j2
dest: /tmp/cloud-provider.yml
when: inventory_hostname == groups['kube-master'][0]
tags: oci
- name: "OCI Cloud Controller | Apply Configuration"
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "/tmp/cloud-provider.yml"
when: inventory_hostname == groups['kube-master'][0]
tags: oci
- name: "OCI Cloud Controller | Download Controller Manifest"
get_url:
url: "https://raw.githubusercontent.com/oracle/oci-cloud-controller-manager/{{oci_cloud_controller_version}}/manifests/oci-cloud-controller-manager.yaml"
dest: "/tmp/oci-cloud-controller-manager.yml"
force: yes
register: result
until: "'OK' in result.msg"
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when: inventory_hostname == groups['kube-master'][0]
tags: oci
- name: "OCI Cloud Controller | Apply Controller Manifest"
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "/tmp/oci-cloud-controller-manager.yml"
when: inventory_hostname == groups['kube-master'][0]
tags: oci

View File

@@ -0,0 +1,8 @@
apiVersion: v1
data:
cloud-provider.yaml: {{ controller_manager_config_base64 }}
kind: Secret
metadata:
name: oci-cloud-controller-manager
namespace: kube-system
type: Opaque

View File

@@ -0,0 +1,56 @@
auth:
{% if oci_use_instance_principals %}
# (https://docs.us-phoenix-1.oraclecloud.com/Content/Identity/Tasks/callingservicesfrominstances.htm).
# Ensure you have setup the following OCI policies and your kubernetes nodes are running within them
# allow dynamic-group [your dynamic group name] to read instance-family in compartment [your compartment name]
# allow dynamic-group [your dynamic group name] to use virtual-network-family in compartment [your compartment name]
# allow dynamic-group [your dynamic group name] to manage load-balancers in compartment [your compartment name]
useInstancePrincipals: true
{% else %}
useInstancePrincipals: false
region: {{ oci_region_id }}
tenancy: {{ oci_tenancy_id }}
user: {{ oci_user_id }}
key: |
{{ oci_private_key }}
{% if oci_private_key_passphrase is defined %}
passphrase: {{ oci_private_key_passphrase }}
{% endif %}
fingerprint: {{ oci_user_fingerprint }}
{% endif %}
# compartment configures Compartment within which the cluster resides.
compartment: {{ oci_compartment_id }}
# vcn configures the Virtual Cloud Network (VCN) within which the cluster resides.
vcn: {{ oci_vnc_id }}
loadBalancer:
# subnet1 configures one of two subnets to which load balancers will be added.
# OCI load balancers require two subnets to ensure high availability.
subnet1: {{ oci_subnet1_id }}
# subnet2 configures the second of two subnets to which load balancers will be
# added. OCI load balancers require two subnets to ensure high availability.
subnet2: {{ oci_subnet2_id }}
# SecurityListManagementMode configures how security lists are managed by the CCM.
# "All" (default): Manage all required security list rules for load balancer services.
# "Frontend": Manage only security list rules for ingress to the load
# balancer. Requires that the user has setup a rule that
# allows inbound traffic to the appropriate ports for kube
# proxy health port, node port ranges, and health check port ranges.
# E.g. 10.82.0.0/16 30000-32000.
# "None": Disables all security list management. Requires that the
# user has setup a rule that allows inbound traffic to the
# appropriate ports for kube proxy health port, node port
# ranges, and health check port ranges. E.g. 10.82.0.0/16 30000-32000.
# Additionally requires the user to mange rules to allow
# inbound traffic to load balancers.
securityListManagementMode: {{ oci_security_list_management }}