add kubernetes cluster sample #1
7
kubernetes-cluster/README.md
Normal file
7
kubernetes-cluster/README.md
Normal file
@ -0,0 +1,7 @@
|
||||
# Kubernetes Cluster Example
|
||||
|
||||
This example uses the triton and ansible terraform providers to create a Kubernetes cluster.
|
||||
|
||||
The initial version is able to create a single type of node, a mixed control plane, etcd and worker node.
|
||||
|
||||
This is ideal for PoC (single node) or small scale clusters with collocated control and worker functions (3 nodes).
|
15
kubernetes-cluster/credentials.tf
Normal file
15
kubernetes-cluster/credentials.tf
Normal file
@ -0,0 +1,15 @@
|
||||
provider "triton" {
|
||||
account = var.spearhead_account
|
||||
key_id = var.spearhead_key_id
|
||||
key_material = var.spearhead_key_material
|
||||
|
||||
# If using a private installation of Triton, specify the URL, otherwise
|
||||
# set the URL according to the region you wish to provision.
|
||||
url = "https://eu-ro-1.api.spearhead.cloud"
|
||||
|
||||
# If using a test Triton installation (self-signed certifcate), use:
|
||||
insecure_skip_tls_verify = false
|
||||
}
|
||||
|
||||
provider "ansible" {
|
||||
}
|
3
kubernetes-cluster/data.tf
Normal file
3
kubernetes-cluster/data.tf
Normal file
@ -0,0 +1,3 @@
|
||||
data "template_file" "cloud_init" {
|
||||
template = file("${path.module}/templates/cloud-init.yml")
|
||||
}
|
67
kubernetes-cluster/firewall.tf
Normal file
67
kubernetes-cluster/firewall.tf
Normal file
@ -0,0 +1,67 @@
|
||||
resource "triton_firewall_rule" "allow_ssh" {
|
||||
description = "Allow ssh traffic on port tcp/22 to all machines."
|
||||
rule = "FROM any TO all vms ALLOW tcp PORT 22"
|
||||
enabled = true
|
||||
}
|
||||
|
||||
resource "triton_firewall_rule" "allow_http" {
|
||||
description = "Allow http traffic on port tcp/80 to all machines."
|
||||
rule = "FROM any TO tag \"kube_node\" ALLOW tcp PORT 80"
|
||||
enabled = true
|
||||
}
|
||||
|
||||
resource "triton_firewall_rule" "allow_https" {
|
||||
description = "Allow https traffic on port tcp/443 to all machines."
|
||||
rule = "FROM any TO tag \"kube_node\" ALLOW tcp PORT 443"
|
||||
enabled = true
|
||||
}
|
||||
|
||||
resource "triton_firewall_rule" "allow_kube_proxy" {
|
||||
description = "Allow kube_proxy traffic on port tcp/10250 to all machines."
|
||||
rule = "FROM any TO tag \"kube_node\" ALLOW tcp PORT 10250"
|
||||
enabled = true
|
||||
}
|
||||
|
||||
resource "triton_firewall_rule" "allow_kube_api" {
|
||||
description = "Allow kube-api traffic on port tcp/6443 to all kube_node machines."
|
||||
rule = "FROM any TO all vms ALLOW tcp PORT 6443"
|
||||
enabled = true
|
||||
}
|
||||
|
||||
|
||||
# resource "triton_firewall_rule" "allow_etcd_api" {
|
||||
# description = "Allow kube-api traffic on port tcp/2380 to all kube_node machines."
|
||||
# rule = "FROM any TO tag \"kube_node\" ALLOW tcp PORT 2380"
|
||||
# enabled = true
|
||||
# }
|
||||
|
||||
# resource "triton_firewall_rule" "allow_etcd_api_2" {
|
||||
# description = "Allow kube-api traffic on port tcp/2379 to all kube_node machines."
|
||||
# rule = "FROM any TO tag \"kube_node\" ALLOW tcp PORT 2379"
|
||||
# enabled = true
|
||||
# }
|
||||
|
||||
# resource "triton_firewall_rule" "allow_calico_vxlan" {
|
||||
# description = "Allow vxlan traffic on port udp/47809 to all kube_node machines."
|
||||
# rule = "FROM tag \"kube_node\" TO tag \"kube_node\" ALLOW udp PORT 4789"
|
||||
# enabled = true
|
||||
#}
|
||||
|
||||
resource "triton_firewall_rule" "allow_internal_udp" {
|
||||
description = "Allow udp internal traffic between all kube_node machines."
|
||||
rule = "FROM tag \"kube_node\" TO tag \"kube_node\" ALLOW udp PORT all"
|
||||
enabled = true
|
||||
}
|
||||
|
||||
resource "triton_firewall_rule" "allow_internal_tcp" {
|
||||
description = "Allow tcp internal traffic between all kube_node machines."
|
||||
rule = "FROM tag \"kube_node\" TO tag \"kube_node\" ALLOW tcp PORT all"
|
||||
enabled = true
|
||||
}
|
||||
|
||||
resource "triton_firewall_rule" "allow_internal_icmp" {
|
||||
description = "Allow icmp internal traffic between all kube_node machines."
|
||||
rule = "FROM tag \"kube_node\" TO tag \"kube_node\" ALLOW icmp TYPE 8 CODE 0"
|
||||
enabled = true
|
||||
}
|
||||
|
36
kubernetes-cluster/instances.tf
Normal file
36
kubernetes-cluster/instances.tf
Normal file
@ -0,0 +1,36 @@
|
||||
resource "triton_machine" "kube_nodes" {
|
||||
count = var.number_of_kube_nodes
|
||||
name = format("kube-node-%s", count.index)
|
||||
package = "hvm-2cpu-4ram-100disk"
|
||||
image = "${data.triton_image.base_image.id}"
|
||||
firewall_enabled = true
|
||||
|
||||
cloud_config = data.template_file.cloud_init.rendered
|
||||
|
||||
tags = {
|
||||
kube_node = "true"
|
||||
}
|
||||
|
||||
cns {
|
||||
services = ["kube-api", "ghost-blog"]
|
||||
}
|
||||
|
||||
affinity = ["kube_node!=~true"]
|
||||
}
|
||||
|
||||
resource "ansible_host" "kube_nodes" {
|
||||
count = var.number_of_kube_nodes
|
||||
inventory_hostname = element(resource.triton_machine.kube_nodes.*.name, count.index)
|
||||
groups = [ "kube_control_plane", "kube_node", "etcd" ]
|
||||
|
||||
vars = {
|
||||
ansible_user = "ubuntu"
|
||||
ansible_ssh_host = element(resource.triton_machine.kube_nodes.*.primaryip, count.index)
|
||||
ip = element(element(resource.triton_machine.kube_nodes.*.ips, count.index), 1)
|
||||
}
|
||||
}
|
||||
|
||||
resource "ansible_group" "k8s_cluster" {
|
||||
inventory_group_name = "k8s_cluster"
|
||||
children = [ "kube_control_plane", "kube_node", "etcd" ]
|
||||
}
|
12
kubernetes-cluster/providers.tf
Normal file
12
kubernetes-cluster/providers.tf
Normal file
@ -0,0 +1,12 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
triton = {
|
||||
source = "joyent/triton"
|
||||
version = "0.8.2"
|
||||
}
|
||||
ansible = {
|
||||
source = "nbering/ansible"
|
||||
version = "1.0.4"
|
||||
}
|
||||
}
|
||||
}
|
6
kubernetes-cluster/resources.tf
Normal file
6
kubernetes-cluster/resources.tf
Normal file
@ -0,0 +1,6 @@
|
||||
data "triton_image" "base_image" {
|
||||
name = var.image_name
|
||||
# "ubuntu-20.04.04"
|
||||
version = var.image_version
|
||||
"20220405.02"
|
||||
}
|
12
kubernetes-cluster/templates/cloud-init.yml
Normal file
12
kubernetes-cluster/templates/cloud-init.yml
Normal file
@ -0,0 +1,12 @@
|
||||
#cloud-config
|
||||
package_update: true
|
||||
package_upgrade: true
|
||||
swap:
|
||||
bootcmd:
|
||||
- swapoff -a
|
||||
- growpart /dev/vda 3
|
||||
- pvresize /dev/vda3
|
||||
- lvcreate ubuntu-vg -n longhorn-lv -L 40G
|
||||
- mkfs -t ext4 /dev/ubuntu-vg/longhorn-lv
|
||||
mounts:
|
||||
- [ /dev/ubuntu-vg/longhorn-lv, /var/lib/longhorn, ext4, defaults, 0, 0 ]
|
32
kubernetes-cluster/variables.tf
Normal file
32
kubernetes-cluster/variables.tf
Normal file
@ -0,0 +1,32 @@
|
||||
variable "spearhead_account" {
|
||||
description = "Spearhead Account Name"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "spearhead_key_id" {
|
||||
description = "Spearhead Key Id (ssh fingerprint)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "spearhead_key_material" {
|
||||
description = "Spearhead Key Material (location of ssh key file)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_name" {
|
||||
description = "Spearhead Cloud Image Name"
|
||||
type = string
|
||||
default = "ubuntu-20.04.04"
|
||||
}
|
||||
|
||||
variable "image_version" {
|
||||
description = "Spearhead Cloud Image Version"
|
||||
type = string
|
||||
default = "20220405.02"
|
||||
}
|
||||
|
||||
variable "number_of_kube_nodes" {
|
||||
description = "Number of kubernetes nodes"
|
||||
type = number
|
||||
default = 3
|
||||
}
|
Loading…
Reference in New Issue
Block a user