Skip to content

Commit

Permalink
Run webhook only on master nodes (#361)
Browse files Browse the repository at this point in the history
Currently all instances of the handler on all nodes serve as a webhook.
That includes both masters and workers.

Since workers are expected to be less stable than masters this commit
changes the behaviour to run webhook servers only on masters.

Since the webhooks are behind a Service we need to deploy two sets of
daemonsets, one will run only on workers, one only on masters.
The masters 'name' label would be selected in the Service selector.

Also, a RUN_WEBHOOK_SERVER ="" env variable was added to the master pods.
In case the env var exists, the handler won't register+run the webhook.


Signed-off-by: Alona Kaplan <[email protected]>
  • Loading branch information
AlonaKaplan authored Feb 2, 2020
1 parent 20c3530 commit 176f29a
Show file tree
Hide file tree
Showing 3 changed files with 109 additions and 13 deletions.
10 changes: 6 additions & 4 deletions cmd/manager/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,10 +120,12 @@ func main() {
os.Exit(1)
}

// Setup webhook
if err := webhook.AddToManager(mgr); err != nil {
log.Error(err, "Cannot initialize webhook")
os.Exit(1)
// Setup webhook on master only
if _, runWebhookServer := os.LookupEnv("RUN_WEBHOOK_SERVER"); runWebhookServer {
if err := webhook.AddToManager(mgr); err != nil {
log.Error(err, "Cannot initialize webhook")
os.Exit(1)
}
}

if err = serveCRMetrics(cfg); err != nil {
Expand Down
79 changes: 78 additions & 1 deletion deploy/operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ spec:
serviceAccountName: nmstate-handler
nodeSelector:
beta.kubernetes.io/arch: amd64
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
Expand All @@ -37,6 +38,8 @@ spec:
env:
- name: WATCH_NAMESPACE
value: ""
- name: RUN_WEBHOOK_SERVER
value: ""
- name: POD_NAME
valueFrom:
fieldRef:
Expand Down Expand Up @@ -68,6 +71,80 @@ spec:
path: /run/dbus/system_bus_socket
type: Socket
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nmstate-handler-worker
namespace: nmstate
spec:
selector:
matchLabels:
name: nmstate-handler-worker
template:
metadata:
labels:
app: kubernetes-nmstate
name: nmstate-handler-worker
spec:
# Needed to force vlan filtering config with iproute commands until
# future nmstate/NM is in place.
# https://github.com/nmstate/nmstate/pull/440
hostNetwork: true
serviceAccountName: nmstate-handler
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: NotIn
values:
- ""
nodeSelector:
beta.kubernetes.io/arch: amd64
containers:
- name: nmstate-handler
args:
- --v=production
# Replace this with the built image name
image: REPLACE_IMAGE
imagePullPolicy: Always
command:
- kubernetes-nmstate
env:
- name: WATCH_NAMESPACE
value: ""
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: "nmstate-handler"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NODE_NETWORK_STATE_REFRESH_INTERVAL
valueFrom:
configMapKeyRef:
name: nmstate-config
key: node_network_state_refresh_interval
- name: INTERFACES_FILTER
valueFrom:
configMapKeyRef:
name: nmstate-config
key: interfaces_filter
volumeMounts:
- name: dbus-socket
mountPath: /run/dbus/system_bus_socket
securityContext:
privileged: true
volumes:
- name: dbus-socket
hostPath:
path: /run/dbus/system_bus_socket
type: Socket
---
apiVersion: v1
kind: ConfigMap
metadata:
Expand All @@ -90,7 +167,7 @@ spec:
- port: 443
targetPort: 8443
selector:
app: kubernetes-nmstate
name: nmstate-handler
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
Expand Down
33 changes: 25 additions & 8 deletions hack/cluster-sync-handler.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,21 +8,38 @@ ${KUBECTL} delete --ignore-not-found -f ${local_handler_manifest}
# Set debug verbosity level for logs when using cluster-sync
sed "s#--v=production#--v=debug#" ${local_handler_manifest} | ${KUBECTL} create -f -

for i in {300..0}; do
# We have to re-check desired number, sometimes takes some time to be filled in
desiredNumberScheduled=$(${KUBECTL} get daemonset -n nmstate nmstate-handler -o=jsonpath='{.status.desiredNumberScheduled}')
function getDesiredNumberScheduled {
echo $(${KUBECTL} get daemonset -n nmstate $1 -o=jsonpath='{.status.desiredNumberScheduled}')
}

function getNumberAvailable {
echo $(${KUBECTL} get daemonset -n nmstate $1 -o=jsonpath='{.status.numberAvailable}')
}

function isOk {
desiredNumberScheduled=$(getDesiredNumberScheduled $1)
numberAvailable=$(getNumberAvailable $1)

numberAvailable=$(${KUBECTL} get daemonset -n nmstate nmstate-handler -o=jsonpath='{.status.numberAvailable}')

if [ "$desiredNumberScheduled" == "$numberAvailable" ]; then
echo "nmstate-handler DS is ready"
break
if [ "$desiredNumberScheduled" == "$numberAvailable" ]; then
echo "$1 DS is ready"
return 0
else
return 1
fi
}

for i in {300..0}; do
# We have to re-check desired number, sometimes takes some time to be filled in
if isOk nmstate-handler && isOk nmstate-handler-worker; then
break
fi

if [ $i -eq 0 ]; then
echo "nmstate-handler DS haven't turned ready within the given timeout"
echo "nmstate-handler or nmstate-handler-worker DS haven't turned ready within the given timeout"
exit 1
fi


sleep 1
done

0 comments on commit 176f29a

Please sign in to comment.