forked from jhong40/kubernetes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcka
286 lines (208 loc) · 7.64 KB
/
cka
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
##### App lifecycle management
# create deployment
kubectl create deployment webapp1 --image=nginx:1.16-alpine-perl --dry-run -o yaml > webapp1.yml
# create pod
kubectl run nginx --image=nginx --restart=Never --dry-run -o yaml
#rollout
k apply -f webapp2.yml #nginx:1.17-alpine-perl
kubectl rollout status deployment webapp1
deployment "webapp1" successfully rolled out
kubectl get deployment -o wide | grep webapp1
webapp1 8/10 5 8 27h nginx nginx:1.17-alpine app=webapp
kubectl rollout undo deploy webapp1
deployment.apps/webapp1 rolled back
kubectl get deployment -o wide | grep webapp1
webapp1 10/10 10 10 27h nginx nginx:1.16-alpine app=webapp
kp -w
kubectl scale --replicas=5 deployment/webapp1
# CronJobs / ConfigMap
#### Installation, configuration, Validation (kubeadm)
# kubeadmin init, kubeadm join, kubeadm reset
kubectl get componentstatus
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
etcd-0 Healthy {"health":"true"}
scheduler Healthy ok
kubectl cluster-info
Kubernetes master is running at https://1a733cd8-97a3-452d-b3e9-8968c5836625.k8s.ondigitalocean.com
CoreDNS is running at https://1a733cd8-97a3-452d-b3e9-8968c5836625.k8s.ondigitalocean.com/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
kubectl get nodes
NAME STATUS ROLES AGE VERSION
pool-nyqch4qlz-3p9k6 Ready <none> 5d11h v1.18.8
pool-nyqch4qlz-3pjhg Ready <none> 12d v1.18.8
#### Networking
$ kubectl expose deployment webapp1 --port=80
service/webapp1 exposed
$ kubectl get svc | grep webapp1
webapp1 ClusterIP 10.245.61.106 <none> 80/TCP 25s
$ kubectl get svc -o=wide | grep webapp1
webapp1 ClusterIP 10.245.61.106 <none> 80/TCP 80s app=webapp
$ kubectl expose deployment webapp1 --port=80 --dry-run -o yaml
W1027 22:26:38.363187 23564 helpers.go:535] --dry-run is deprecated and can be replaced with --dry-run=client.
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: webapp1
name: webapp1
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webapp
status:
loadBalancer: {}
#### scheduling
kubectl label nodes node1 disktype=ssd
kubectl get nodes --show-labels=true
# in deployment, select the above node
nodeSelector:
distype: "ssd"
# multiple scheduler
# manually schedule pod without a scheduler
kubectl get events -w # watch event as it happens
#### Security
#### Cluster Maintenence
# Upgrade
backup
upgrade kubeadm
drain the control plane node
kubeadm upgrade plan
kubeadm upgrade apply v1.18.x
kubectl uncordon
upgrade kubectl
systemctl restart kubelet
upgrade worker nodes
## backup/restor
kubectl get all --all-namespaces -o yaml > all-deployment-services.yaml
(velero - former called ark)
# backup both master node with kubernetes config / etc db
# backup etcd 2 ways: etcd built-in snapshot / volume snapshot
etcd: when launched, there is --data-dir parm which define the etcd data file, backup that file.
ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key \
snapshot save /opt/snapshot-pre-boot.db
ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key \
--data-dir /var/lib/etcd-from-backup \
snapshot restore /opt/snapshot-pre-boot.db
Command:
etcd
--advertise-client-urls=https://172.17.0.21:2379
--cert-file=/etc/kubernetes/pki/etcd/server.crt
--client-cert-auth=true
--data-dir=/var/lib/etcd ******=> etcd-from-backup
--initial-advertise-peer-urls=https://172.17.0.21:2380
--initial-cluster=controlplane=https://172.17.0.21:2380
--key-file=/etc/kubernetes/pki/etcd/server.key
--listen-client-urls=https://127.0.0.1:2379,https://172.17.0.21:2379
--listen-metrics-urls=http://127.0.0.1:2381
--listen-peer-urls=https://172.17.0.21:2380
--name=controlplane
--peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt
--peer-client-cert-auth=true
--peer-key-file=/etc/kubernetes/pki/etcd/peer.key
--peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
--snapshot-count=10000
--trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/etcd/ca.crt --cert=/etc/etcd/etcd.crt --key=/etc/etcd/etcd.key put course "Today is great"
ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/etcd/ca.crt --cert=/etc/etcd/etcd.crt --key=/etc/etcd/etcd.key get course
#### Monitor and Logging
## Monitor cluster
# Node problem detector
# Metrics-server
kubectl top nodes
kubectl top pods
## monitor app
Liveness probe
Readiness probe
## Cluster component log
# node /var/log or /var/log/containers
kube-apiserver.log kube-scheduler.log, kube-controller-manager.log
kubelet.log kube-proxy.log
## app logs
kubectl logs pod1
kubectl describe pod1
### Storage
# Static Provisioning
# create storage, pv, pvc, pod (using pvc)
--pod
volumeMounts:
- mountPath: /log
name: log-volume
volumes:
- name: log-volum
hostpath:
path: "/var/log/webapp"
--pv
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-log
spec:
storageClassName: manual
capacity:
storage: 100Mi
accessModes:
- ReadWriteMany
hostPath:
path: "/pv/log"
--pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: claim-log-1
spec:
storageClassName: manual
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 30Mi
--pod
volumeMounts:
- mountPath: /log
name: log-volume
volumes:
- name: log-volum
persistentVolumeClaim:
claimName: claim-log-1
# Dynamic Provsioning
Storage class (dynamically create storage), pvc, pod (using pvc)
### Json Path
kubectl get pods -o=jsonpath='{ .items[1].spec.containers[0].image }'
nginx
kubectl get nodes -o=jsonpath='{.items[*].metadata.name}'
master node1 node2
kubectl get nodes -o=jsonpath='{.items[*].status.nodeInfo.architecture}'
amd64 amd64 amd64
kubectl get nodes -o=jsonpath='{.items[*].status.capacity.cpu}'
12 12 12
kubectl get nodes -o=jsonpath='{.items[*].metadata.name} {.items[*].status.capacity.cpu}'
master node1 node2 12 12 12
kubectl get nodes -o=jsonpath='{.items[*].metadata.name} {"\n"} {.items[*].status.capacity.cpu}'
master node1 node2
12 12 12
kubectl get nodes -o=jsonpath='{range .items[*]} {.metadata.name} {"\t"} {.status.capacity.cpu}{"\n"}{end}'
master 12
node1 12
node2 12
kubectl get nodes -o=custom-columns='NODE:.metadata.name, CPU:.status.capacity.cpu'
Node CPU
master 12
node1 12
node2 12
kubectl get nodes --sort-by=.metadata.name
kubectl get nodes --sort-by=.status.capacity.cpu
k -n postgresql get secret postgresql -o=jsonpath='{.data.postgresql-password}' | base64 --decode | more
Run the command kubectl expose deployment hr-web-app --type=NodePort --port=8080 --name=hr-web-app-service --dry-run -o yaml > hr-web-app-service.yaml to generate a service definition file. Then edit the nodeport in it and create a service.
format_list_bulleted
Tests
11 / 12
Use JSON PATH query to retrieve the osImages of all the nodes and store it in a file /opt/outputs/nodes_os_x43kj56.txt
# get osimage for each node
kubectl get nodes -o jsonpath='{.items[*].status.nodeInfo.osImage}' > /opt/outputs/nodes_os_x43kj56.txt