Skip to content

Commit 4ca91af

Browse files
authored
Issue #SB-30208 feat: Helm chart & Ansible changes for Kafka provision on Kubernetes (#1491)
Issue #TG-1149 feat: Helm chart & Ansible changes for Kafka provision on Kubernetes
1 parent c628323 commit 4ca91af

30 files changed

+3769
-0
lines changed
+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
---
2+
- hosts: local
3+
gather_facts: no
4+
vars_files:
5+
- "{{inventory_dir}}/secrets.yml"
6+
environment:
7+
KUBECONFIG: "{{ kubeconfig_path }}"
8+
roles:
9+
- kafka
10+
tags: kafka
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
kafka_namespace: "kafka"
2+
kafka_image_repository: "bitnami/kafka"
3+
kafka_image_tag: "2.8.1-debian-10-r31"
4+
5+
kafka_delete_topic_enable: true
6+
7+
kafka_replica_count: 3
8+
9+
# Kubernetes Service type for external access. It can be NodePort or LoadBalancer
10+
service_type: "LoadBalancer"
11+
service_port: 9092
12+
13+
# PV config
14+
kafka_persistence_size: 50Gi
15+
16+
#Zookeeper configs
17+
zookeeper_enabled: true
18+
zookeeper_heapsize: 256
19+
zookeeper_replica_count: 3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
- name: template values.yaml file
2+
template:
3+
src: "{{ chart_base_path }}/kafka/values.j2"
4+
dest: "{{ chart_base_path }}/kafka/values.yaml"
5+
6+
- name: Install dependencies
7+
shell: helm dependency update {{ chart_base_path }}/kafka
8+
9+
- name: Install kafka cluster
10+
shell: helm upgrade --install -n {{ kafka_namespace }} kafka-cluster {{ chart_base_path }}/kafka --create-namespace
+32
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
annotations:
2+
category: Infrastructure
3+
apiVersion: v2
4+
appVersion: 2.8.0
5+
dependencies:
6+
- name: common
7+
repository: https://charts.bitnami.com/bitnami
8+
tags:
9+
- bitnami-common
10+
version: 1.x.x
11+
- condition: zookeeper.enabled
12+
name: zookeeper
13+
repository: https://charts.bitnami.com/bitnami
14+
version: 7.x.x
15+
description: Apache Kafka is a distributed streaming platform.
16+
engine: gotpl
17+
home: https://github.com/bitnami/charts/tree/master/bitnami/kafka
18+
icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-220x234.png
19+
keywords:
20+
- kafka
21+
- zookeeper
22+
- streaming
23+
- producer
24+
- consumer
25+
maintainers:
26+
27+
name: Bitnami
28+
name: kafka
29+
sources:
30+
- https://github.com/bitnami/bitnami-docker-kafka
31+
- https://kafka.apache.org/
32+
version: 14.0.0
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,256 @@
1+
CHART NAME: {{ .Chart.Name }}
2+
CHART VERSION: {{ .Chart.Version }}
3+
APP VERSION: {{ .Chart.AppVersion }}
4+
5+
{{- if .Values.diagnosticMode.enabled }}
6+
The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
7+
8+
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
9+
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
10+
11+
Get the list of pods by executing:
12+
13+
kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
14+
15+
Access the pod you want to debug by executing
16+
17+
kubectl exec --namespace {{ .Release.Namespace }} -ti <NAME OF THE POD> -- bash
18+
19+
In order to replicate the container startup scripts execute this command:
20+
21+
/opt/bitnami/scripts/kafka/entrypoint.sh /opt/bitnami/scripts/kafka/run.sh
22+
23+
{{- else }}
24+
25+
{{- $replicaCount := int .Values.replicaCount -}}
26+
{{- $releaseNamespace := .Release.Namespace -}}
27+
{{- $clusterDomain := .Values.clusterDomain -}}
28+
{{- $fullname := include "kafka.fullname" . -}}
29+
{{- $clientProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.clientProtocol) -}}
30+
{{- $saslMechanisms := coalesce .Values.auth.sasl.mechanisms .Values.auth.saslMechanisms -}}
31+
{{- $tlsEndpointIdentificationAlgorithm := default "" (coalesce .Values.auth.tls.endpointIdentificationAlgorithm .Values.auth.tlsEndpointIdentificationAlgorithm) -}}
32+
{{- $tlsPassword := coalesce .Values.auth.tls.password .Values.auth.jksPassword -}}
33+
{{- $servicePort := int .Values.service.port -}}
34+
{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs -}}
35+
{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer") }}
36+
37+
###############################################################################
38+
### ERROR: You enabled external access to Kafka brokers without specifying ###
39+
### the array of load balancer IPs for Kafka brokers. ###
40+
###############################################################################
41+
42+
This deployment will be incomplete until you configure the array of load balancer
43+
IPs for Kafka brokers. To complete your deployment follow the steps below:
44+
45+
1. Wait for the load balancer IPs (it may take a few minutes for them to be available):
46+
47+
kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w
48+
49+
2. Obtain the load balancer IPs and upgrade your chart:
50+
51+
{{- range $i, $e := until $replicaCount }}
52+
LOAD_BALANCER_IP_{{ add $i 1 }}="$(kubectl get svc --namespace {{ $releaseNamespace }} {{ $fullname }}-{{ $i }}-external -o jsonpath='{.status.loadBalancer.ingress[0].ip}')"
53+
{{- end }}
54+
55+
3. Upgrade you chart:
56+
57+
helm upgrade --namespace {{ .Release.Namespace }} {{ .Release.Name }} bitnami/{{ .Chart.Name }} \
58+
--set replicaCount={{ $replicaCount }} \
59+
--set externalAccess.enabled=true \
60+
{{- range $i, $e := until $replicaCount }}
61+
--set externalAccess.service.loadBalancerIPs[{{ $i }}]=$LOAD_BALANCER_IP_{{ add $i 1 }} \
62+
{{- end }}
63+
--set externalAccess.service.type=LoadBalancer
64+
65+
{{- else }}
66+
67+
{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq $clientProtocol "PLAINTEXT") }}
68+
---------------------------------------------------------------------------------------------
69+
WARNING
70+
71+
By specifying "serviceType=LoadBalancer" and not configuring the authentication
72+
you have most likely exposed the Kafka service externally without any
73+
authentication mechanism.
74+
75+
For security reasons, we strongly suggest that you switch to "ClusterIP" or
76+
"NodePort". As alternative, you can also configure the Kafka authentication.
77+
78+
---------------------------------------------------------------------------------------------
79+
{{- end }}
80+
81+
** Please be patient while the chart is being deployed **
82+
83+
Kafka can be accessed by consumers via port {{ $servicePort }} on the following DNS name from within your cluster:
84+
85+
{{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}
86+
87+
Each Kafka broker can be accessed by producers via port {{ $servicePort }} on the following DNS name(s) from within your cluster:
88+
89+
{{- $brokerList := list }}
90+
{{- range $e, $i := until $replicaCount }}
91+
{{- $brokerList = append $brokerList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $servicePort) }}
92+
{{- end }}
93+
{{ join "\n" $brokerList | nindent 4 }}
94+
{{- if (include "kafka.client.saslAuthentication" .) }}
95+
96+
You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files with the content below:
97+
98+
- kafka_jaas.conf:
99+
100+
KafkaClient {
101+
{{- if $saslMechanisms | regexFind "scram" }}
102+
org.apache.kafka.common.security.scram.ScramLoginModule required
103+
{{- else }}
104+
org.apache.kafka.common.security.plain.PlainLoginModule required
105+
{{- end }}
106+
username="{{ index (coalesce .Values.auth.sasl.jaas.clientUsers .Values.auth.jaas.clientUsers) 0 }}"
107+
password="$(kubectl get secret {{ $fullname }}-jaas --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 --decode | cut -d , -f 1)";
108+
};
109+
110+
- client.properties:
111+
112+
security.protocol={{ $clientProtocol }}
113+
{{- if $saslMechanisms | regexFind "scram-sha-256" }}
114+
sasl.mechanism=SCRAM-SHA-256
115+
{{- else if $saslMechanisms | regexFind "scram-sha-512" }}
116+
sasl.mechanism=SCRAM-SHA-512
117+
{{- else }}
118+
sasl.mechanism=PLAIN
119+
{{- end }}
120+
{{- if eq $clientProtocol "SASL_SSL" }}
121+
ssl.truststore.type={{ upper .Values.auth.tls.type }}
122+
{{- if eq .Values.auth.tls.type "jks" }}
123+
ssl.truststore.location=/tmp/kafka.truststore.jks
124+
{{- if not (empty $tlsPassword) }}
125+
ssl.truststore.password={{ $tlsPassword }}
126+
{{- end }}
127+
{{- else if eq .Values.auth.tls.type "pem" }}
128+
ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \
129+
... \
130+
-----END CERTIFICATE-----
131+
{{- end }}
132+
{{- if eq $tlsEndpointIdentificationAlgorithm "" }}
133+
ssl.endpoint.identification.algorithm=
134+
{{- end }}
135+
{{- end }}
136+
137+
{{- else if (include "kafka.client.tlsEncryption" .) }}
138+
139+
You need to configure your Kafka client to access using TLS authentication. To do so, you need to create the 'client.properties' configuration file with the content below:
140+
141+
security.protocol={{ $clientProtocol }}
142+
ssl.truststore.type={{ upper .Values.auth.tls.type }}
143+
{{- if eq .Values.auth.tls.type "jks" }}
144+
ssl.truststore.location=/tmp/kafka.truststore.{{ .Values.auth.tls.type }}
145+
{{- if not (empty $tlsPassword) }}
146+
ssl.truststore.password={{ $tlsPassword }}
147+
{{- end }}
148+
{{- else if eq .Values.auth.tls.type "pem" }}
149+
ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \
150+
... \
151+
-----END CERTIFICATE-----
152+
{{- end }}
153+
{{- if eq .Values.auth.clientProtocol "mtls" }}
154+
ssl.keystore.type={{ upper .Values.auth.tls.type }}
155+
{{- if eq .Values.auth.tls.type "jks" }}
156+
ssl.keystore.location=/tmp/client.keystore.jks
157+
{{- if not (empty $tlsPassword) }}
158+
ssl.keystore.password={{ $tlsPassword }}
159+
{{- end }}
160+
{{- else if eq .Values.auth.tls.type "pem" }}
161+
ssl.keystore.certificate.chain=-----BEGIN CERTIFICATE----- \
162+
... \
163+
-----END CERTIFICATE-----
164+
ssl.keystore.key=-----BEGIN ENCRYPTED PRIVATE KEY----- \
165+
... \
166+
-----END ENCRYPTED PRIVATE KEY-----
167+
{{- end }}
168+
{{- end }}
169+
{{- if eq $tlsEndpointIdentificationAlgorithm "" }}
170+
ssl.endpoint.identification.algorithm=
171+
{{- end }}
172+
173+
{{- end }}
174+
175+
To create a pod that you can use as a Kafka client run the following commands:
176+
177+
kubectl run {{ $fullname }}-client --restart='Never' --image {{ template "kafka.image" . }} --namespace {{ $releaseNamespace }} --command -- sleep infinity
178+
{{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }}
179+
kubectl cp --namespace {{ $releaseNamespace }} /path/to/client.properties {{ $fullname }}-client:/tmp/client.properties
180+
{{- end }}
181+
{{- if (include "kafka.client.saslAuthentication" .) }}
182+
kubectl cp --namespace {{ $releaseNamespace }} /path/to/kafka_jaas.conf {{ $fullname }}-client:/tmp/kafka_jaas.conf
183+
{{- end }}
184+
{{- if and (include "kafka.client.tlsEncryption" .) (eq .Values.auth.tls.type "jks") }}
185+
kubectl cp --namespace {{ $releaseNamespace }} ./kafka.truststore.jks {{ $fullname }}-client:/tmp/kafka.truststore.jks
186+
{{- if eq .Values.auth.clientProtocol "mtls" }}
187+
kubectl cp --namespace {{ $releaseNamespace }} ./client.keystore.jks {{ $fullname }}-client:/tmp/client.keystore.jks
188+
{{- end }}
189+
{{- end }}
190+
kubectl exec --tty -i {{ $fullname }}-client --namespace {{ $releaseNamespace }} -- bash
191+
{{- if (include "kafka.client.saslAuthentication" .) }}
192+
export KAFKA_OPTS="-Djava.security.auth.login.config=/tmp/kafka_jaas.conf"
193+
{{- end }}
194+
195+
PRODUCER:
196+
kafka-console-producer.sh \
197+
{{ if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }}--producer.config /tmp/client.properties \{{ end }}
198+
--broker-list {{ join "," $brokerList }} \
199+
--topic test
200+
201+
CONSUMER:
202+
kafka-console-consumer.sh \
203+
{{ if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }}--consumer.config /tmp/client.properties \{{ end }}
204+
--bootstrap-server {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ .Values.service.port }} \
205+
--topic test \
206+
--from-beginning
207+
208+
{{- if .Values.externalAccess.enabled }}
209+
210+
To connect to your Kafka server from outside the cluster, follow the instructions below:
211+
212+
{{- if eq "NodePort" .Values.externalAccess.service.type }}
213+
{{- if .Values.externalAccess.service.domain }}
214+
215+
Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.service.domain }}
216+
217+
{{- else }}
218+
219+
Kafka brokers domain: You can get the external node IP from the Kafka configuration file with the following commands (Check the EXTERNAL listener)
220+
221+
1. Obtain the pod name:
222+
223+
kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka"
224+
225+
2. Obtain pod configuration:
226+
227+
kubectl exec -it KAFKA_POD -- cat /opt/bitnami/kafka/config/server.properties | grep advertised.listeners
228+
229+
{{- end }}
230+
231+
Kafka brokers port: You will have a different node port for each Kafka broker. You can get the list of configured node ports using the command below:
232+
233+
echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].spec.ports[0].nodePort}' | tr ' ' '\n')"
234+
235+
{{- else if contains "LoadBalancer" .Values.externalAccess.service.type }}
236+
237+
NOTE: It may take a few minutes for the LoadBalancer IPs to be available.
238+
Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w'
239+
240+
Kafka Brokers domain: You will have a different external IP for each Kafka broker. You can get the list of external IPs using the command below:
241+
242+
echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')"
243+
244+
Kafka Brokers port: {{ .Values.externalAccess.service.port }}
245+
246+
{{- end }}
247+
{{- end }}
248+
{{- end }}
249+
{{- end }}
250+
251+
{{- include "common.warnings.rollingTag" .Values.image }}
252+
{{- include "common.warnings.rollingTag" .Values.externalAccess.autoDiscovery.image }}
253+
{{- include "common.warnings.rollingTag" .Values.metrics.kafka.image }}
254+
{{- include "common.warnings.rollingTag" .Values.metrics.jmx.image }}
255+
{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
256+
{{- include "kafka.validateValues" . }}

0 commit comments

Comments
 (0)