@@ -8,27 +8,6 @@ resource "libvirt_pool" "cluster" {
8
8
path = var. pool_path
9
9
}
10
10
11
- resource "random_password" "kubeadm_token" {
12
- length = 22
13
- special = false
14
- upper = false
15
- }
16
-
17
- resource "random_id" "kubeadm_certificate_key" {
18
- byte_length = 32
19
- }
20
-
21
- locals {
22
- controleplane_network = var. networks [0 ]
23
- controleplane_ips = [for i in range (2 , 2 + var. controlplane_count ) : cidrhost (local. controleplane_network , i)]
24
- domainname = " k8s.lab"
25
- cluster_endpoint = " cluster-endpoint.${ local . domainname } "
26
- cluster_endpoint_with_user = " ${ var . ssh_admin } @${ local . cluster_endpoint } "
27
- kubeadm_token_id = substr (random_password. kubeadm_token . result , 0 , 6 )
28
- kubeadm_token = join (" ." , [local . kubeadm_token_id , substr (random_password. kubeadm_token . result , 6 , 16 )])
29
- kubeadm_certificate_key = random_id. kubeadm_certificate_key . hex
30
- }
31
-
32
11
resource "libvirt_network" "default" {
33
12
name = " default"
34
13
addresses = var. networks
@@ -48,95 +27,3 @@ resource "libvirt_network" "default" {
48
27
}
49
28
}
50
29
}
51
-
52
- module "control_plane" {
53
- source = " ./modules/vm"
54
-
55
- autostart = false
56
- vm_hostname_prefix = " controlplane-"
57
- vm_count = length (local. controleplane_ips )
58
- memory = " 2048"
59
- vcpu = 2
60
- system_volume = 10
61
-
62
- time_zone = " CET"
63
-
64
- os_img_url = var. os_img_url
65
- pool = libvirt_pool. cluster . name
66
-
67
- # dhcp = true
68
- vm_domain = local. domainname
69
- ip_address = local. controleplane_ips
70
- ip_gateway = cidrhost (local. controleplane_network , 1 )
71
- ip_nameserver = cidrhost (local. controleplane_network , 1 )
72
-
73
- bridge = libvirt_network. default . bridge
74
-
75
- http_proxy = var. http_proxy
76
-
77
- ssh_admin = var. ssh_admin
78
- ssh_private_key = var. ssh_private_key
79
- ssh_keys = [
80
- file (" ${ var . ssh_private_key } .pub" ),
81
- ]
82
- }
83
- resource "ssh_resource" "control_plane_certs" {
84
- host = module. control_plane . ip_address [0 ]
85
- user = var. ssh_admin
86
- private_key = var. ssh_private_key
87
- timeout = " 1m"
88
-
89
- triggers = {
90
- count_changes = length (local. controleplane_ips )
91
- }
92
- commands = [
93
- " sudo kubeadm init phase upload-certs --upload-certs --certificate-key ${ local . kubeadm_certificate_key } " ,
94
- " sudo kubeadm token create ${ local . kubeadm_token } || true" ,
95
- ]
96
- }
97
- resource "ssh_resource" "control_plane" {
98
- count = length (local. controleplane_ips )
99
- host = module. control_plane . ip_address [count . index ]
100
- user = var. ssh_admin
101
- private_key = var. ssh_private_key
102
-
103
- commands = [
104
- " sudo /usr/local/bin/install-kubeadm.sh cluster-endpoint.k8s.lab:6443 ${ local . kubeadm_token } ${ local . kubeadm_certificate_key } --control-plane --discovery-token-unsafe-skip-ca-verification"
105
- ]
106
- }
107
-
108
- resource "ssh_resource" "control_plane_destroy" {
109
- count = length (local. controleplane_ips )
110
- host = module. control_plane . ip_address [count . index ]
111
- user = var. ssh_admin
112
- private_key = var. ssh_private_key
113
- when = " destroy"
114
- timeout = " 30s"
115
-
116
- file {
117
- source = " remove-node.sh"
118
- destination = " /tmp/remove-node.sh"
119
- permissions = " 0700"
120
- }
121
-
122
- commands = [
123
- " sudo /tmp/remove-node.sh"
124
- ]
125
- }
126
-
127
- # kubeadm init phase upload-certs --upload-certs --certificate-key d9456efcc50c12d8f5fff93c097a16d2495fb5df9cb17cd2fd26f8022a926af4
128
- # kubeadm token create qahkjs.ru8katsu52fep1ea
129
-
130
- # # kubectl cordon controlplane-02
131
- # kubectl drain controlplane-02 --ignore-daemonsets
132
- # kubectl delete node controlplane-02
133
-
134
- # sudo etcdctl --endpoints=127.0.0.1:2379 --key /etc/kubernetes/pki/etcd/healthcheck-client.key --cert /etc/kubernetes/pki/etcd/healthcheck-client.crt --cacert /etc/kubernetes/pki/etcd/ca.crt endpoint status
135
- # sudo etcdctl --endpoints=cluster-endpoint.k8s.lab:2379 --key /etc/kubernetes/pki/etcd/healthcheck-client.key --cert /etc/kubernetes/pki/etcd/healthcheck-client.crt --cacert /etc/kubernetes/pki/etcd/ca.crt member remove c7b9a74f4a348e3d
136
-
137
- output "outputs" {
138
- value = module. control_plane
139
- }
140
- output "run" {
141
- value = ssh_resource. control_plane [* ]. result
142
- }
0 commit comments