@@ -27,3 +27,75 @@ resource "libvirt_network" "default" {
27
27
}
28
28
}
29
29
}
30
+
31
+ module "workers" {
32
+ source = " ./modules/vm"
33
+
34
+ autostart = false
35
+ vm_hostname_prefix = " worker"
36
+ vm_count = var. worker_count
37
+ memory = " 1024"
38
+ vcpu = 1
39
+ system_volume = 10
40
+
41
+ time_zone = " CET"
42
+
43
+ os_img_url = var. os_img_url
44
+ pool = libvirt_pool. cluster . name
45
+
46
+ dhcp = true
47
+ # vm_domain = local.domainname
48
+ # ip_address = local.controleplane_ips
49
+ # ip_gateway = cidrhost(local.controleplane_network, 1)
50
+ # ip_nameserver = cidrhost(local.controleplane_network, 1)
51
+
52
+ bridge = libvirt_network. default . bridge
53
+
54
+ http_proxy = var. http_proxy
55
+
56
+ ssh_admin = var. ssh_admin
57
+ ssh_private_key = var. ssh_private_key
58
+ ssh_keys = [
59
+ file (" ${ var . ssh_private_key } .pub" ),
60
+ ]
61
+
62
+ runcmd = [
63
+ " install-kubeadm.sh ${ local . cluster_endpoint } :6443 ${ local . kubeadm_token } --discovery-token-unsafe-skip-ca-verification"
64
+ ]
65
+ }
66
+
67
+ resource "ssh_resource" "workers_destroy" {
68
+ count = var. worker_count
69
+ host = local. cluster_endpoint_ip
70
+ user = var. ssh_admin
71
+ private_key = var. ssh_private_key
72
+ when = " destroy"
73
+ timeout = " 30s"
74
+
75
+ commands = [
76
+ " sudo /usr/local/bin/remove-node.sh ${ module . workers . name [count . index ]} "
77
+ ]
78
+ }
79
+
80
+ resource "ssh_resource" "sample_work" {
81
+ host = local. cluster_endpoint_ip
82
+ user = var. ssh_admin
83
+ private_key = var. ssh_private_key
84
+ timeout = " 11s"
85
+
86
+ file {
87
+ source = " sample_work.yaml"
88
+ destination = " /tmp/sample_work.yaml"
89
+ }
90
+
91
+ commands = [
92
+ " sudo KUBECONFIG=/etc/kubernetes/admin.conf kubectl apply -f /tmp/sample_work.yaml"
93
+ ]
94
+ }
95
+
96
+ output "worker" {
97
+ value = module. workers
98
+ }
99
+ output "run" {
100
+ value = ssh_resource. sample_work . result
101
+ }
0 commit comments