Skip to content

Commit

Permalink
Fixes to allow Kubernetes dashboard (UI) to work
Browse files Browse the repository at this point in the history
The UI didn't work with vSphere kube-up implementation. This fixes
that by making the following changes:

* Configure the apiserver with admission controls, especially
  ServiceAccount. This will provide the token to the dashboard pod
  that it needs to talk to the apiserver. This will also improve other
  pods that require service accounts.
* Add routes to the master so it can communicate with the pods, so
  hitting the https://MASTER/ui URL will allow it to contact the
  pods.
* Add an extra subject for the cluster IP to the apiserver, so when
  the dashboard communicates with the apiserver, the certificate
  matches the IP address it's using.
  • Loading branch information
Alain Roy committed Apr 27, 2016
1 parent 9db40b6 commit 10545d7
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 2 deletions.
6 changes: 6 additions & 0 deletions cluster/vsphere/config-default.sh
Original file line number Diff line number Diff line change
Expand Up @@ -57,5 +57,11 @@ DNS_REPLICAS=1
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"

# We need to configure subject alternate names (SANs) for the master's certificate
# we generate. While users will connect via the external IP, pods (like the UI)
# will connect via the cluster IP, from the SERVICE_CLUSTER_IP_RANGE.
# In addition to the extra SANS here, we'll also add one for for the service IP.
MASTER_EXTRA_SANS="DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN}"

# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
3 changes: 2 additions & 1 deletion cluster/vsphere/templates/create-dynamic-salt-files.sh
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ node_instance_prefix: $NODE_INSTANCE_PREFIX
service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE
enable_cluster_monitoring: "${ENABLE_CLUSTER_MONITORING:-none}"
enable_cluster_logging: "${ENABLE_CLUSTER_LOGGING:false}"
enable_cluster_ui: "${ENABLE_CLUSTER_UI:false}"
enable_cluster_ui: "${ENABLE_CLUSTER_UI:true}"
enable_node_logging: "${ENABLE_NODE_LOGGING:false}"
logging_destination: $LOGGING_DESTINATION
elasticsearch_replicas: $ELASTICSEARCH_LOGGING_REPLICAS
Expand All @@ -123,6 +123,7 @@ dns_domain: $DNS_DOMAIN
e2e_storage_test_environment: "${E2E_STORAGE_TEST_ENVIRONMENT:-false}"
cluster_cidr: "$NODE_IP_RANGES"
allocate_node_cidrs: "${ALLOCATE_NODE_CIDRS:-true}"
admission_control: NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
EOF

mkdir -p /srv/salt-overlay/salt/nginx
Expand Down
1 change: 1 addition & 0 deletions cluster/vsphere/templates/salt-master.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ grains:
- kubernetes-master
cbr-cidr: $MASTER_IP_RANGE
cloud: vsphere
master_extra_sans: $MASTER_EXTRA_SANS
EOF

# Auto accept all keys from minions that try to join
Expand Down
18 changes: 17 additions & 1 deletion cluster/vsphere/util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -326,10 +326,12 @@ function setup-pod-routes {
done


# make the pods visible to each other.
# Make the pods visible to each other and to the master.
# The master needs have routes to the pods for the UI to work.
local j
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
printf "setting up routes for ${NODE_NAMES[$i]}"
kube-ssh "${KUBE_MASTER_IP}" "sudo route add -net ${KUBE_NODE_BRIDGE_NETWORK[${i}]} gw ${KUBE_NODE_IP_ADDRESSES[${i}]}"
for (( j=0; j<${#NODE_NAMES[@]}; j++)); do
if [[ $i != $j ]]; then
kube-ssh ${KUBE_NODE_IP_ADDRESSES[$i]} "sudo route add -net ${KUBE_NODE_BRIDGE_NETWORK[$j]} gw ${KUBE_NODE_IP_ADDRESSES[$j]}"
Expand All @@ -355,6 +357,18 @@ function kube-up {
local htpasswd
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")

# This calculation of the service IP should work, but if you choose an
# alternate subnet, there's a small chance you'd need to modify the
# service_ip, below. We'll choose an IP like 10.244.240.1 by taking
# the first three octets of the SERVICE_CLUSTER_IP_RANGE and tacking
# on a .1
local octets
local service_ip
octets=($(echo "${SERVICE_CLUSTER_IP_RANGE}" | sed -e 's|/.*||' -e 's/\./ /g'))
((octets[3]+=1))
service_ip=$(echo "${octets[*]}" | sed 's/ /./g')
MASTER_EXTRA_SANS="IP:${service_ip},DNS:${MASTER_NAME},${MASTER_EXTRA_SANS}"

echo "Starting master VM (this can take a minute)..."

(
Expand All @@ -371,6 +385,7 @@ function kube-up {
echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
echo "readonly ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'"
echo "readonly ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI:-false}'"
echo "readonly DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
echo "readonly DNS_DOMAIN='${DNS_DOMAIN:-}'"
echo "readonly KUBE_USER='${KUBE_USER:-}'"
Expand All @@ -379,6 +394,7 @@ function kube-up {
echo "readonly SALT_TAR='${SALT_TAR##*/}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'"
echo "readonly MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/install-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-master.sh"
Expand Down

0 comments on commit 10545d7

Please sign in to comment.