From c11d466732d385a54a1c50b0b4f4dfd79e7b97f2 Mon Sep 17 00:00:00 2001 From: Cristian Calin Date: Mon, 16 May 2022 17:07:50 +0300 Subject: [PATCH] add kubernetes cluster sample --- kubernetes-cluster/README.md | 7 +++ kubernetes-cluster/credentials.tf | 15 +++++ kubernetes-cluster/data.tf | 3 + kubernetes-cluster/firewall.tf | 67 +++++++++++++++++++++ kubernetes-cluster/instances.tf | 36 +++++++++++ kubernetes-cluster/providers.tf | 12 ++++ kubernetes-cluster/resources.tf | 6 ++ kubernetes-cluster/templates/cloud-init.yml | 12 ++++ kubernetes-cluster/variables.tf | 32 ++++++++++ 9 files changed, 190 insertions(+) create mode 100644 kubernetes-cluster/README.md create mode 100644 kubernetes-cluster/credentials.tf create mode 100644 kubernetes-cluster/data.tf create mode 100644 kubernetes-cluster/firewall.tf create mode 100644 kubernetes-cluster/instances.tf create mode 100644 kubernetes-cluster/providers.tf create mode 100644 kubernetes-cluster/resources.tf create mode 100644 kubernetes-cluster/templates/cloud-init.yml create mode 100644 kubernetes-cluster/variables.tf diff --git a/kubernetes-cluster/README.md b/kubernetes-cluster/README.md new file mode 100644 index 0000000..5ddca64 --- /dev/null +++ b/kubernetes-cluster/README.md @@ -0,0 +1,7 @@ +# Kubernetes Cluster Example + +This example uses the triton and ansible terraform providers to create a Kubernetes cluster. + +The initial version is able to create a single type of node, a mixed control plane, etcd and worker node. + +This is ideal for PoC (single node) or small scale clusters with collocated control and worker functions (3 nodes). diff --git a/kubernetes-cluster/credentials.tf b/kubernetes-cluster/credentials.tf new file mode 100644 index 0000000..6e63211 --- /dev/null +++ b/kubernetes-cluster/credentials.tf @@ -0,0 +1,15 @@ +provider "triton" { + account = var.spearhead_account + key_id = var.spearhead_key_id + key_material = var.spearhead_key_material + + # If using a private installation of Triton, specify the URL, otherwise + # set the URL according to the region you wish to provision. + url = "https://eu-ro-1.api.spearhead.cloud" + + # If using a test Triton installation (self-signed certifcate), use: + insecure_skip_tls_verify = false +} + +provider "ansible" { +} diff --git a/kubernetes-cluster/data.tf b/kubernetes-cluster/data.tf new file mode 100644 index 0000000..5ab1098 --- /dev/null +++ b/kubernetes-cluster/data.tf @@ -0,0 +1,3 @@ +data "template_file" "cloud_init" { + template = file("${path.module}/templates/cloud-init.yml") +} diff --git a/kubernetes-cluster/firewall.tf b/kubernetes-cluster/firewall.tf new file mode 100644 index 0000000..0368501 --- /dev/null +++ b/kubernetes-cluster/firewall.tf @@ -0,0 +1,67 @@ +resource "triton_firewall_rule" "allow_ssh" { + description = "Allow ssh traffic on port tcp/22 to all machines." + rule = "FROM any TO all vms ALLOW tcp PORT 22" + enabled = true +} + +resource "triton_firewall_rule" "allow_http" { + description = "Allow http traffic on port tcp/80 to all machines." + rule = "FROM any TO tag \"kube_node\" ALLOW tcp PORT 80" + enabled = true +} + +resource "triton_firewall_rule" "allow_https" { + description = "Allow https traffic on port tcp/443 to all machines." + rule = "FROM any TO tag \"kube_node\" ALLOW tcp PORT 443" + enabled = true +} + +resource "triton_firewall_rule" "allow_kube_proxy" { + description = "Allow kube_proxy traffic on port tcp/10250 to all machines." + rule = "FROM any TO tag \"kube_node\" ALLOW tcp PORT 10250" + enabled = true +} + +resource "triton_firewall_rule" "allow_kube_api" { + description = "Allow kube-api traffic on port tcp/6443 to all kube_node machines." + rule = "FROM any TO all vms ALLOW tcp PORT 6443" + enabled = true +} + + +# resource "triton_firewall_rule" "allow_etcd_api" { +# description = "Allow kube-api traffic on port tcp/2380 to all kube_node machines." +# rule = "FROM any TO tag \"kube_node\" ALLOW tcp PORT 2380" +# enabled = true +# } + +# resource "triton_firewall_rule" "allow_etcd_api_2" { +# description = "Allow kube-api traffic on port tcp/2379 to all kube_node machines." +# rule = "FROM any TO tag \"kube_node\" ALLOW tcp PORT 2379" +# enabled = true +# } + +# resource "triton_firewall_rule" "allow_calico_vxlan" { +# description = "Allow vxlan traffic on port udp/47809 to all kube_node machines." +# rule = "FROM tag \"kube_node\" TO tag \"kube_node\" ALLOW udp PORT 4789" +# enabled = true +#} + +resource "triton_firewall_rule" "allow_internal_udp" { + description = "Allow udp internal traffic between all kube_node machines." + rule = "FROM tag \"kube_node\" TO tag \"kube_node\" ALLOW udp PORT all" + enabled = true +} + +resource "triton_firewall_rule" "allow_internal_tcp" { + description = "Allow tcp internal traffic between all kube_node machines." + rule = "FROM tag \"kube_node\" TO tag \"kube_node\" ALLOW tcp PORT all" + enabled = true +} + +resource "triton_firewall_rule" "allow_internal_icmp" { + description = "Allow icmp internal traffic between all kube_node machines." + rule = "FROM tag \"kube_node\" TO tag \"kube_node\" ALLOW icmp TYPE 8 CODE 0" + enabled = true +} + diff --git a/kubernetes-cluster/instances.tf b/kubernetes-cluster/instances.tf new file mode 100644 index 0000000..148d66c --- /dev/null +++ b/kubernetes-cluster/instances.tf @@ -0,0 +1,36 @@ +resource "triton_machine" "kube_nodes" { + count = var.number_of_kube_nodes + name = format("kube-node-%s", count.index) + package = "hvm-2cpu-4ram-100disk" + image = "${data.triton_image.base_image.id}" + firewall_enabled = true + + cloud_config = data.template_file.cloud_init.rendered + + tags = { + kube_node = "true" + } + + cns { + services = ["kube-api", "ghost-blog"] + } + + affinity = ["kube_node!=~true"] +} + +resource "ansible_host" "kube_nodes" { + count = var.number_of_kube_nodes + inventory_hostname = element(resource.triton_machine.kube_nodes.*.name, count.index) + groups = [ "kube_control_plane", "kube_node", "etcd" ] + + vars = { + ansible_user = "ubuntu" + ansible_ssh_host = element(resource.triton_machine.kube_nodes.*.primaryip, count.index) + ip = element(element(resource.triton_machine.kube_nodes.*.ips, count.index), 1) + } +} + +resource "ansible_group" "k8s_cluster" { + inventory_group_name = "k8s_cluster" + children = [ "kube_control_plane", "kube_node", "etcd" ] +} diff --git a/kubernetes-cluster/providers.tf b/kubernetes-cluster/providers.tf new file mode 100644 index 0000000..ff09503 --- /dev/null +++ b/kubernetes-cluster/providers.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + triton = { + source = "joyent/triton" + version = "0.8.2" + } + ansible = { + source = "nbering/ansible" + version = "1.0.4" + } + } +} diff --git a/kubernetes-cluster/resources.tf b/kubernetes-cluster/resources.tf new file mode 100644 index 0000000..41a9f6e --- /dev/null +++ b/kubernetes-cluster/resources.tf @@ -0,0 +1,6 @@ +data "triton_image" "base_image" { + name = var.image_name + # "ubuntu-20.04.04" + version = var.image_version + "20220405.02" +} diff --git a/kubernetes-cluster/templates/cloud-init.yml b/kubernetes-cluster/templates/cloud-init.yml new file mode 100644 index 0000000..cd56844 --- /dev/null +++ b/kubernetes-cluster/templates/cloud-init.yml @@ -0,0 +1,12 @@ +#cloud-config +package_update: true +package_upgrade: true +swap: +bootcmd: + - swapoff -a + - growpart /dev/vda 3 + - pvresize /dev/vda3 + - lvcreate ubuntu-vg -n longhorn-lv -L 40G + - mkfs -t ext4 /dev/ubuntu-vg/longhorn-lv +mounts: + - [ /dev/ubuntu-vg/longhorn-lv, /var/lib/longhorn, ext4, defaults, 0, 0 ] diff --git a/kubernetes-cluster/variables.tf b/kubernetes-cluster/variables.tf new file mode 100644 index 0000000..a68542b --- /dev/null +++ b/kubernetes-cluster/variables.tf @@ -0,0 +1,32 @@ +variable "spearhead_account" { + description = "Spearhead Account Name" + type = string +} + +variable "spearhead_key_id" { + description = "Spearhead Key Id (ssh fingerprint)" + type = string +} + +variable "spearhead_key_material" { + description = "Spearhead Key Material (location of ssh key file)" + type = string +} + +variable "image_name" { + description = "Spearhead Cloud Image Name" + type = string + default = "ubuntu-20.04.04" +} + +variable "image_version" { + description = "Spearhead Cloud Image Version" + type = string + default = "20220405.02" +} + +variable "number_of_kube_nodes" { + description = "Number of kubernetes nodes" + type = number + default = 3 +}