Skip to content

Commit 9e07b66

Browse files
committed
Define worker nodes with sample workload
1 parent f2220a2 commit 9e07b66

12 files changed

+261
-47
lines changed

Readme.md

+11
Original file line numberDiff line numberDiff line change
@@ -2,3 +2,14 @@
22
- add packages: libvirt-daemon-system qemu-utils qemu-system-x86
33
- ssh-keygen -f id_ed25519 -t ed25519 -C terraform@main
44
- https://askubuntu.com/a/1293019 AppArmor preventing access to storage pool
5+
6+
run the command below to access the sample workload
7+
``` bash
8+
sudo KUBECONFIG=/etc/kubernetes/admin.conf kubectl port-forward --address 0.0.0.0 svc/frontend 8080:80
9+
```
10+
11+
``` bash
12+
mkdir -p $HOME/.kube
13+
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
14+
sudo chown $(id -u):$(id -g) $HOME/.kube/config
15+
```

controlplane.tf

+4-2
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ locals {
1313
controleplane_ips = [for i in range(2, 2 + var.controlplane_count) : cidrhost(local.controleplane_network, i)]
1414
domainname = "k8s.lab"
1515
cluster_endpoint = "cluster-endpoint.${local.domainname}"
16+
cluster_endpoint_ip = module.control_plane.ip_address[0]
1617
cluster_endpoint_with_user = "${var.ssh_admin}@${local.cluster_endpoint}"
1718
kubeadm_token_id = substr(random_password.kubeadm_token.result, 0, 6)
1819
kubeadm_token = join(".", [local.kubeadm_token_id, substr(random_password.kubeadm_token.result, 6, 16)])
@@ -51,18 +52,19 @@ module "control_plane" {
5152
]
5253

5354
runcmd = [
54-
"install-kubeadm.sh ${local.cluster_endpoint}:6443 ${local.kubeadm_token} ${local.kubeadm_certificate_key} --control-plane --discovery-token-unsafe-skip-ca-verification"
55+
"install-kubeadm.sh ${local.cluster_endpoint}:6443 ${local.kubeadm_token} --certificate-key ${local.kubeadm_certificate_key} --control-plane --discovery-token-unsafe-skip-ca-verification"
5556
]
5657
}
5758

5859
resource "ssh_resource" "control_plane_certs" {
59-
host = module.control_plane.ip_address[0]
60+
host = local.cluster_endpoint_ip
6061
user = var.ssh_admin
6162
private_key = var.ssh_private_key
6263
timeout = "1m"
6364

6465
triggers = {
6566
count_changes = length(local.controleplane_ips)
67+
workers = var.worker_count
6668
}
6769
commands = [
6870
"sudo kubeadm init phase upload-certs --upload-certs --certificate-key ${local.kubeadm_certificate_key}",

main.tf

+72
Original file line numberDiff line numberDiff line change
@@ -27,3 +27,75 @@ resource "libvirt_network" "default" {
2727
}
2828
}
2929
}
30+
31+
module "workers" {
32+
source = "./modules/vm"
33+
34+
autostart = false
35+
vm_hostname_prefix = "worker"
36+
vm_count = var.worker_count
37+
memory = "1024"
38+
vcpu = 1
39+
system_volume = 10
40+
41+
time_zone = "CET"
42+
43+
os_img_url = var.os_img_url
44+
pool = libvirt_pool.cluster.name
45+
46+
dhcp = true
47+
# vm_domain = local.domainname
48+
# ip_address = local.controleplane_ips
49+
# ip_gateway = cidrhost(local.controleplane_network, 1)
50+
# ip_nameserver = cidrhost(local.controleplane_network, 1)
51+
52+
bridge = libvirt_network.default.bridge
53+
54+
http_proxy = var.http_proxy
55+
56+
ssh_admin = var.ssh_admin
57+
ssh_private_key = var.ssh_private_key
58+
ssh_keys = [
59+
file("${var.ssh_private_key}.pub"),
60+
]
61+
62+
runcmd = [
63+
"install-kubeadm.sh ${local.cluster_endpoint}:6443 ${local.kubeadm_token} --discovery-token-unsafe-skip-ca-verification"
64+
]
65+
}
66+
67+
resource "ssh_resource" "workers_destroy" {
68+
count = var.worker_count
69+
host = local.cluster_endpoint_ip
70+
user = var.ssh_admin
71+
private_key = var.ssh_private_key
72+
when = "destroy"
73+
timeout = "30s"
74+
75+
commands = [
76+
"sudo /usr/local/bin/remove-node.sh ${module.workers.name[count.index]}"
77+
]
78+
}
79+
80+
resource "ssh_resource" "sample_work" {
81+
host = local.cluster_endpoint_ip
82+
user = var.ssh_admin
83+
private_key = var.ssh_private_key
84+
timeout = "11s"
85+
86+
file {
87+
source = "sample_work.yaml"
88+
destination = "/tmp/sample_work.yaml"
89+
}
90+
91+
commands = [
92+
"sudo KUBECONFIG=/etc/kubernetes/admin.conf kubectl apply -f /tmp/sample_work.yaml"
93+
]
94+
}
95+
96+
output "worker" {
97+
value = module.workers
98+
}
99+
output "run" {
100+
value = ssh_resource.sample_work.result
101+
}

modules/vm/main.tf

+4
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,10 @@ resource "libvirt_domain" "virt-machine" {
7070
autoport = true
7171
}
7272

73+
timeouts {
74+
create = "10m"
75+
}
76+
7377
provisioner "remote-exec" {
7478
inline = [
7579
"echo \"Virtual Machine \"$(hostname)\" is UP!\"",

modules/vm/output.tf

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,5 @@ output "name" {
22
value = libvirt_domain.virt-machine[*].name
33
}
44
output "ip_address" {
5-
value = libvirt_domain.virt-machine[*].network_interface[0].addresses[0]
5+
value = element(libvirt_domain.virt-machine[*].network_interface[0].addresses, 0)
66
}

modules/vm/templates/10-containerd-net.conflist

-33
This file was deleted.

modules/vm/templates/cloud_init.tpl

+1-4
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,9 @@ packages:
3030
runcmd:
3131
- [ systemctl, daemon-reload ]
3232
- [ systemctl, enable, qemu-guest-agent ]
33-
- [ systemctl, start, qemu-guest-agent ]
3433
- [ systemctl, restart, systemd-networkd ]
3534
${runcmd}
35+
- [ systemctl, start, qemu-guest-agent ]
3636

3737
fqdn: ${hostname}
3838

@@ -105,9 +105,6 @@ write_files:
105105
- path: /etc/containerd/config.toml
106106
content: |
107107
${ indent(8, file("${path}/templates/containerd-config.toml")) }
108-
- path: /etc/cni/net.d/10-containerd-net.conflist
109-
content: |
110-
${ indent(8, file("${path}/templates/10-containerd-net.conflist")) }
111108
- path: /usr/local/bin/install-kubeadm.sh
112109
permissions: 0o755
113110
content: |

modules/vm/templates/containerd-config.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ version = 2
44
[plugins."io.containerd.grpc.v1.cri"]
55
sandbox_image = "registry.k8s.io/pause:3.9"
66
[plugins."io.containerd.grpc.v1.cri".cni]
7-
bin_dir = "/usr/lib/cni"
7+
bin_dir = "/opt/cni/bin"
88
conf_dir = "/etc/cni/net.d"
99
[plugins."io.containerd.internal.v1.opt"]
1010
path = "/var/lib/containerd/opt"

modules/vm/templates/install-kubeadm.sh

+5-3
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,16 @@ sysctl --system
1212

1313
ENDPOINT=$1
1414
TOKEN=$2
15-
CERT_KEY=$3
16-
OTHER_JOIN_ARGS=${*:4}
15+
CERT_KEY=$4
16+
OTHER_JOIN_ARGS=${*:3}
1717

1818
INIT=0
1919
if [ `hostname` = 'controlplane-01' ]; then INIT=1 ; fi
2020

2121
if [ $INIT -eq 1 ]; then
2222
kubeadm init --control-plane-endpoint=$ENDPOINT --upload-certs --certificate-key $CERT_KEY --token $TOKEN
23+
export KUBECONFIG=/etc/kubernetes/admin.conf
24+
kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s.yaml
2325
else
24-
kubeadm join $ENDPOINT --certificate-key $CERT_KEY --token $TOKEN $OTHER_JOIN_ARGS
26+
kubeadm join $ENDPOINT --token $TOKEN $OTHER_JOIN_ARGS
2527
fi

modules/vm/templates/remove-node.sh

+11-3
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,21 @@
22

33
set -e
44

5-
NODE_NAME=`hostname`
5+
NODE_NAME=${1:-`hostname`}
66

77
export KUBECONFIG=/etc/kubernetes/admin.conf
88

99
kubectl drain $NODE_NAME --delete-emptydir-data --force --ignore-daemonsets
1010
kubectl delete node $NODE_NAME
1111

12-
ETCD_ID=$(etcdctl --endpoints=127.0.0.1:2379 --key /etc/kubernetes/pki/etcd/healthcheck-client.key --cert /etc/kubernetes/pki/etcd/healthcheck-client.crt --cacert /etc/kubernetes/pki/etcd/ca.crt endpoint status | awk -F, '{print $2}')
12+
# having param $1 set, implies that it's a worker node
13+
if [ -v 1 ]; then exit 0 ; fi
1314

14-
etcdctl --endpoints=127.0.0.1:2379 --key /etc/kubernetes/pki/etcd/healthcheck-client.key --cert /etc/kubernetes/pki/etcd/healthcheck-client.crt --cacert /etc/kubernetes/pki/etcd/ca.crt member remove $ETCD_ID
15+
ETCD_COUNT=$(etcdctl --key /etc/kubernetes/pki/etcd/healthcheck-client.key --cert /etc/kubernetes/pki/etcd/healthcheck-client.crt --cacert /etc/kubernetes/pki/etcd/ca.crt member list | wc -l)
16+
17+
# check for last controlplane node
18+
if [ $ETCD_COUNT -eq 1 ]; then exit 0 ; fi
19+
20+
ETCD_ID=$(etcdctl --key /etc/kubernetes/pki/etcd/healthcheck-client.key --cert /etc/kubernetes/pki/etcd/healthcheck-client.crt --cacert /etc/kubernetes/pki/etcd/ca.crt endpoint status | awk -F, '{print $2}')
21+
22+
etcdctl --key /etc/kubernetes/pki/etcd/healthcheck-client.key --cert /etc/kubernetes/pki/etcd/healthcheck-client.crt --cacert /etc/kubernetes/pki/etcd/ca.crt member remove $ETCD_ID

sample_work.yaml

+145
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,145 @@
1+
# from https://kubernetes.io/docs/tutorials/stateless-application/guestbook/
2+
apiVersion: v1
3+
kind: Service
4+
metadata:
5+
name: redis-leader
6+
labels:
7+
app: redis
8+
role: leader
9+
tier: backend
10+
spec:
11+
ports:
12+
- port: 6379
13+
targetPort: 6379
14+
selector:
15+
app: redis
16+
role: leader
17+
tier: backend
18+
---
19+
apiVersion: apps/v1
20+
kind: Deployment
21+
metadata:
22+
name: redis-leader
23+
labels:
24+
app: redis
25+
role: leader
26+
tier: backend
27+
spec:
28+
replicas: 1
29+
selector:
30+
matchLabels:
31+
app: redis
32+
template:
33+
metadata:
34+
labels:
35+
app: redis
36+
role: leader
37+
tier: backend
38+
spec:
39+
containers:
40+
- name: leader
41+
image: "docker.io/redis:6.0.5"
42+
resources:
43+
requests:
44+
cpu: 100m
45+
memory: 100Mi
46+
ports:
47+
- containerPort: 6379
48+
---
49+
apiVersion: v1
50+
kind: Service
51+
metadata:
52+
name: redis-follower
53+
labels:
54+
app: redis
55+
role: follower
56+
tier: backend
57+
spec:
58+
ports:
59+
# the port that this service should serve on
60+
- port: 6379
61+
selector:
62+
app: redis
63+
role: follower
64+
tier: backend
65+
---
66+
apiVersion: apps/v1
67+
kind: Deployment
68+
metadata:
69+
name: redis-follower
70+
labels:
71+
app: redis
72+
role: follower
73+
tier: backend
74+
spec:
75+
replicas: 2
76+
selector:
77+
matchLabels:
78+
app: redis
79+
template:
80+
metadata:
81+
labels:
82+
app: redis
83+
role: follower
84+
tier: backend
85+
spec:
86+
containers:
87+
- name: follower
88+
image: gcr.io/google_samples/gb-redis-follower:v2
89+
env:
90+
- name: GET_HOSTS_FROM
91+
value: "dns"
92+
resources:
93+
requests:
94+
cpu: 100m
95+
memory: 100Mi
96+
ports:
97+
- containerPort: 6379
98+
---
99+
apiVersion: v1
100+
kind: Service
101+
metadata:
102+
name: frontend
103+
labels:
104+
app: guestbook
105+
tier: frontend
106+
spec:
107+
# if your cluster supports it, uncomment the following to automatically create
108+
# an external load-balanced IP for the frontend service.
109+
# type: LoadBalancer
110+
#type: LoadBalancer
111+
ports:
112+
# the port that this service should serve on
113+
- port: 80
114+
selector:
115+
app: guestbook
116+
tier: frontend
117+
---
118+
apiVersion: apps/v1
119+
kind: Deployment
120+
metadata:
121+
name: frontend
122+
spec:
123+
replicas: 3
124+
selector:
125+
matchLabels:
126+
app: guestbook
127+
tier: frontend
128+
template:
129+
metadata:
130+
labels:
131+
app: guestbook
132+
tier: frontend
133+
spec:
134+
containers:
135+
- name: php-redis
136+
image: gcr.io/google_samples/gb-frontend:v5
137+
env:
138+
- name: GET_HOSTS_FROM
139+
value: "dns"
140+
resources:
141+
requests:
142+
cpu: 100m
143+
memory: 100Mi
144+
ports:
145+
- containerPort: 80

0 commit comments

Comments
 (0)