|
| 1 | +# Conformance testing Amazon EKS-D |
| 2 | + |
| 3 | +## Setup EKS-D Cluster |
| 4 | + |
| 5 | +Setup EKS-D cluster according to the [EKS-D documentation](https://distro.eks.amazonaws.com/). |
| 6 | + |
| 7 | +By following these steps, you may reproduce the EKS-D Conformance e2e results using |
| 8 | +[kops](https://github.com/kubernetes/kops). |
| 9 | + |
| 10 | +## Requirements |
| 11 | +There are several packages you will need to install and configure. |
| 12 | + |
| 13 | +### Kubectl |
| 14 | + |
| 15 | +Install and configure the Kubernetes command-line tool |
| 16 | +[kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). |
| 17 | + |
| 18 | +### AWS CLI |
| 19 | + |
| 20 | +Install and configure the [AWS CLI](https://aws.amazon.com/cli/). |
| 21 | + |
| 22 | +### Sonobuoy |
| 23 | + |
| 24 | +Download a binary release of [sonobuoy](https://github.com/vmware-tanzu/sonobuoy/releases/). |
| 25 | + |
| 26 | +If you are on a Mac, you may need to open the Security & Privacy and approve sonobuoy for |
| 27 | +execution. |
| 28 | + |
| 29 | +```shell |
| 30 | +sonobuoy_version="0.57.1" |
| 31 | +if [[ "$(uname)" == "Darwin" ]] |
| 32 | +then |
| 33 | + SONOBUOY=https://github.com/vmware-tanzu/sonobuoy/releases/download/v${sonobuoy_version}/sonobuoy_${sonobuoy_version}_darwin_amd64.tar.gz |
| 34 | +else |
| 35 | + SONOBUOY=https://github.com/vmware-tanzu/sonobuoy/releases/download/v${sonobuoy_version}/sonobuoy_${sonobuoy_version}_linux_386.tar.gz |
| 36 | +fi |
| 37 | +wget -qO- ${SONOBUOY} |tar -xz sonobuoy |
| 38 | +chmod 755 sonobuoy |
| 39 | +``` |
| 40 | + |
| 41 | +### kops |
| 42 | + |
| 43 | +Install kops: |
| 44 | + |
| 45 | +```shell |
| 46 | +# os_arch="linux-amd64" |
| 47 | +os_arch="darwin-amd64" |
| 48 | +kops_version="v1.29.2" |
| 49 | +wget -qO ./kops "https://github.com/kubernetes/kops/releases/download/${kops_version}/kops-${os_arch}" |
| 50 | +chmod +x ./kops |
| 51 | +``` |
| 52 | + |
| 53 | +Validate `kops` is working correctly: |
| 54 | + |
| 55 | +```shell |
| 56 | +./kops version |
| 57 | +``` |
| 58 | + |
| 59 | +Some macOS systems may prevent the unsigned binary from running. Open macOS Security & |
| 60 | +Privacy settings and approve kops for execution. |
| 61 | + |
| 62 | +## Create kops Cluster |
| 63 | + |
| 64 | +Use this shell script to create a cluster: |
| 65 | + |
| 66 | +```shell |
| 67 | +#!/usr/bin/env bash |
| 68 | + |
| 69 | +CLUSTER_NAME="${CLUSTER_NAME:-${1?First required argument is cluster name. Cluster name must be an FQDN}}" |
| 70 | + |
| 71 | +RELEASE_BRANCH=1-34 |
| 72 | +RELEASE=7 |
| 73 | +KUBERNETES_VERSION=v1.34.1 |
| 74 | +CNI_VERSION=v1.7.1 |
| 75 | +METRICS_SERVER_VERSION=v0.7.2 |
| 76 | +AWS_AUTH_VERSION=v0.7.7 |
| 77 | +COREDNS_VERSION=v1.12.2 |
| 78 | + |
| 79 | +export AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION:-us-west-2} |
| 80 | +S3_BUCKET="kops-state-store-${RELEASE_BRANCH}-${RELEASE}" |
| 81 | +export KOPS_STATE_STORE=s3://${S3_BUCKET} |
| 82 | +export CNI_VERSION_URL=https://distro.eks.amazonaws.com/kubernetes-${RELEASE_BRANCH}/releases/${RELEASE}/artifacts/plugins/${CNI_VERSION}/cni-plugins-linux-amd64-${CNI_VERSION}.tar.gz |
| 83 | +export CNI_ASSET_HASH_STRING=sha256:4726c1b545ab016115a9153e48e9e5a98d367bad19d578117a1f4be1ef4b9346 |
| 84 | + |
| 85 | +echo "Create bucket if it does not exist..." |
| 86 | +aws s3api create-bucket --bucket $S3_BUCKET --create-bucket-configuration LocationConstraint=$AWS_DEFAULT_REGION |
| 87 | + |
| 88 | +cat << EOF > ./values.yaml |
| 89 | +kubernetesVersion: https://distro.eks.amazonaws.com/kubernetes-${RELEASE_BRANCH}/releases/${RELEASE}/artifacts/kubernetes/${KUBERNETES_VERSION} |
| 90 | +clusterName: $CLUSTER_NAME |
| 91 | +configBase: $KOPS_STATE_STORE/$CLUSTER_NAME |
| 92 | +awsRegion: $AWS_DEFAULT_REGION |
| 93 | +pause: |
| 94 | + repository: public.ecr.aws/eks-distro/kubernetes/pause |
| 95 | + tag: ${KUBERNETES_VERSION}-eks-${RELEASE_BRANCH}-${RELEASE} |
| 96 | +kube_apiserver: |
| 97 | + repository: public.ecr.aws/eks-distro/kubernetes/kube-apiserver |
| 98 | + tag: ${KUBERNETES_VERSION}-eks-${RELEASE_BRANCH}-${RELEASE} |
| 99 | +kube_controller_manager: |
| 100 | + repository: public.ecr.aws/eks-distro/kubernetes/kube-controller-manager |
| 101 | + tag: ${KUBERNETES_VERSION}-eks-${RELEASE_BRANCH}-${RELEASE} |
| 102 | +kube_scheduler: |
| 103 | + repository: public.ecr.aws/eks-distro/kubernetes/kube-scheduler |
| 104 | + tag: ${KUBERNETES_VERSION}-eks-${RELEASE_BRANCH}-${RELEASE} |
| 105 | +kube_proxy: |
| 106 | + repository: public.ecr.aws/eks-distro/kubernetes/kube-proxy |
| 107 | + tag: ${KUBERNETES_VERSION}-eks-${RELEASE_BRANCH}-${RELEASE} |
| 108 | +metrics_server: |
| 109 | + repository: public.ecr.aws/eks-distro/kubernetes-sigs/metrics-server |
| 110 | + tag: ${METRICS_SERVER_VERSION}-eks-${RELEASE_BRANCH}-${RELEASE} |
| 111 | +awsiamauth: |
| 112 | + repository: public.ecr.aws/eks-distro/kubernetes-sigs/aws-iam-authenticator |
| 113 | + tag: ${AWS_AUTH_VERSION}-eks-${RELEASE_BRANCH}-${RELEASE} |
| 114 | +coredns: |
| 115 | + repository: public.ecr.aws/eks-distro/coredns/coredns |
| 116 | + tag: ${COREDNS_VERSION}-eks-${RELEASE_BRANCH}-${RELEASE} |
| 117 | +EOF |
| 118 | + |
| 119 | +cat << EOF >./aws-iam-authenticator.yaml |
| 120 | +apiVersion: v1 |
| 121 | +kind: ConfigMap |
| 122 | +metadata: |
| 123 | + name: aws-iam-authenticator |
| 124 | + namespace: kube-system |
| 125 | + labels: |
| 126 | + k8s-app: aws-iam-authenticator |
| 127 | +data: |
| 128 | + config.yaml: | |
| 129 | + clusterID: $KOPS_CLUSTER_NAME |
| 130 | +EOF |
| 131 | + |
| 132 | +cat << EOF >./eks-d.tpl |
| 133 | +apiVersion: kops.k8s.io/v1alpha2 |
| 134 | +kind: Cluster |
| 135 | +metadata: |
| 136 | + name: {{ .clusterName }} |
| 137 | +spec: |
| 138 | + api: |
| 139 | + dns: {} |
| 140 | + authorization: |
| 141 | + rbac: {} |
| 142 | + channel: stable |
| 143 | + cloudProvider: aws |
| 144 | + configBase: {{ .configBase }} |
| 145 | + containerRuntime: docker |
| 146 | + etcdClusters: |
| 147 | + - cpuRequest: 200m |
| 148 | + etcdMembers: |
| 149 | + - instanceGroup: control-plane-{{.awsRegion}}a |
| 150 | + name: a |
| 151 | + memoryRequest: 100Mi |
| 152 | + name: main |
| 153 | + - cpuRequest: 100m |
| 154 | + etcdMembers: |
| 155 | + - instanceGroup: control-plane-{{.awsRegion}}a |
| 156 | + name: a |
| 157 | + memoryRequest: 100Mi |
| 158 | + name: events |
| 159 | + iam: |
| 160 | + allowContainerRegistry: true |
| 161 | + legacy: false |
| 162 | + kubernetesApiAccess: |
| 163 | + - 0.0.0.0/0 |
| 164 | + kubernetesVersion: {{ .kubernetesVersion }} |
| 165 | + masterPublicName: api.{{ .clusterName }} |
| 166 | + networkCIDR: 172.20.0.0/16 |
| 167 | + networking: |
| 168 | + kubenet: {} |
| 169 | + nonMasqueradeCIDR: 100.64.0.0/10 |
| 170 | + sshAccess: |
| 171 | + - 0.0.0.0/0 |
| 172 | + subnets: |
| 173 | + - cidr: 172.20.32.0/19 |
| 174 | + name: {{.awsRegion}}a |
| 175 | + type: Public |
| 176 | + zone: {{.awsRegion}}a |
| 177 | + - cidr: 172.20.64.0/19 |
| 178 | + name: {{.awsRegion}}b |
| 179 | + type: Public |
| 180 | + zone: {{.awsRegion}}b |
| 181 | + - cidr: 172.20.96.0/19 |
| 182 | + name: {{.awsRegion}}c |
| 183 | + type: Public |
| 184 | + zone: {{.awsRegion}}c |
| 185 | + topology: |
| 186 | + dns: |
| 187 | + type: Public |
| 188 | + masters: public |
| 189 | + nodes: public |
| 190 | + kubeAPIServer: |
| 191 | + image: {{ .kube_apiserver.repository }}:{{ .kube_apiserver.tag }} |
| 192 | + kubeControllerManager: |
| 193 | + image: {{ .kube_controller_manager.repository }}:{{ .kube_controller_manager.tag }} |
| 194 | + kubeScheduler: |
| 195 | + image: {{ .kube_scheduler.repository }}:{{ .kube_scheduler.tag }} |
| 196 | + kubeProxy: |
| 197 | + image: {{ .kube_proxy.repository }}:{{ .kube_proxy.tag }} |
| 198 | + metricsServer: |
| 199 | + enabled: true |
| 200 | + insecure: true |
| 201 | + image: {{ .metrics_server.repository }}:{{ .metrics_server.tag }} |
| 202 | + authentication: |
| 203 | + aws: |
| 204 | + image: {{ .awsiamauth.repository }}:{{ .awsiamauth.tag }} |
| 205 | + kubeDNS: |
| 206 | + provider: CoreDNS |
| 207 | + coreDNSImage: {{ .coredns.repository }}:{{ .coredns.tag }} |
| 208 | + masterKubelet: |
| 209 | + podInfraContainerImage: {{ .pause.repository }}:{{ .pause.tag }} |
| 210 | + kubelet: |
| 211 | + podInfraContainerImage: {{ .pause.repository }}:{{ .pause.tag }} |
| 212 | + anonymousAuth: false |
| 213 | + authorizationMode: Webhook |
| 214 | + authenticationTokenWebhook: true |
| 215 | +
|
| 216 | +--- |
| 217 | +
|
| 218 | +apiVersion: kops.k8s.io/v1alpha2 |
| 219 | +kind: InstanceGroup |
| 220 | +metadata: |
| 221 | + labels: |
| 222 | + kops.k8s.io/cluster: {{.clusterName}} |
| 223 | + name: control-plane-{{.awsRegion}}a |
| 224 | +spec: |
| 225 | + image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20201026 |
| 226 | + machineType: t3.medium |
| 227 | + maxSize: 1 |
| 228 | + minSize: 1 |
| 229 | + nodeLabels: |
| 230 | + kops.k8s.io/instancegroup: control-plane-{{.awsRegion}}a |
| 231 | + role: Master |
| 232 | + subnets: |
| 233 | + - {{.awsRegion}}a |
| 234 | +
|
| 235 | +--- |
| 236 | +
|
| 237 | +apiVersion: kops.k8s.io/v1alpha2 |
| 238 | +kind: InstanceGroup |
| 239 | +metadata: |
| 240 | + labels: |
| 241 | + kops.k8s.io/cluster: {{.clusterName}} |
| 242 | + name: nodes |
| 243 | +spec: |
| 244 | + image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20201026 |
| 245 | + machineType: t3.medium |
| 246 | + maxSize: 3 |
| 247 | + minSize: 3 |
| 248 | + nodeLabels: |
| 249 | + kops.k8s.io/instancegroup: nodes |
| 250 | + role: Node |
| 251 | + subnets: |
| 252 | + - {{.awsRegion}}a |
| 253 | + - {{.awsRegion}}b |
| 254 | + - {{.awsRegion}}c |
| 255 | +EOF |
| 256 | + |
| 257 | +./kops toolbox template --template ./eks-d.tpl --values ./values.yaml >${CLUSTER_NAME}.yaml |
| 258 | +./kops create -f ./${CLUSTER_NAME}.yaml |
| 259 | +./kops create secret --name ${CLUSTER_NAME} sshpublickey admin -i ~/.ssh/id_rsa.pub |
| 260 | +./kops update cluster --admin --name ${CLUSTER_NAME} --yes |
| 261 | + |
| 262 | +export KOPS_FEATURE_FLAGS=SpecOverrideFlag |
| 263 | +./kops set cluster "${KOPS_CLUSTER_NAME}" 'cluster.spec.nodePortAccess=0.0.0.0/0' |
| 264 | +./kops update cluster --yes |
| 265 | + |
| 266 | +while ! kubectl --context $CLUSTER_NAME apply -f ./aws-iam-authenticator.yaml |
| 267 | +do |
| 268 | + sleep 5 |
| 269 | + echo 'Waiting for cluster to come up...' |
| 270 | +done |
| 271 | + |
| 272 | +cat << EOF >./core_dns_cluster_role.yaml |
| 273 | +--- |
| 274 | +apiVersion: rbac.authorization.k8s.io/v1 |
| 275 | +kind: ClusterRole |
| 276 | +metadata: |
| 277 | + labels: |
| 278 | + kubernetes.io/bootstrapping: rbac-defaults |
| 279 | + name: system:coredns |
| 280 | +rules: |
| 281 | + - apiGroups: |
| 282 | + - "" |
| 283 | + resources: |
| 284 | + - endpoints |
| 285 | + - services |
| 286 | + - pods |
| 287 | + - namespaces |
| 288 | + verbs: |
| 289 | + - list |
| 290 | + - watch |
| 291 | + - apiGroups: |
| 292 | + - discovery.k8s.io |
| 293 | + resources: |
| 294 | + - endpointslices |
| 295 | + verbs: |
| 296 | + - list |
| 297 | + - watch |
| 298 | +EOF |
| 299 | +while ! kubectl --context $KOPS_CLUSTER_NAME apply -f ./core_dns_cluster_role.yaml |
| 300 | +do |
| 301 | + sleep 5 |
| 302 | + echo 'Waiting for coredns to come up...' |
| 303 | +done |
| 304 | + |
| 305 | +./kops validate cluster --wait 15m |
| 306 | +``` |
| 307 | + |
| 308 | +## Run Sonobuoy e2e |
| 309 | +```shell |
| 310 | +./sonobuoy run --mode=certified-conformance --wait --kube-conformance-image k8s.gcr.io/conformance:${KUBERNETES_VERSION} |
| 311 | +results=$(./sonobuoy retrieve) |
| 312 | +mkdir ./results |
| 313 | +tar xzf $results -C ./results |
| 314 | +./sonobuoy e2e ${results} |
| 315 | +mv results/plugins/e2e/results/global/* . |
| 316 | +``` |
| 317 | + |
| 318 | +## Cleanup |
| 319 | +```shell |
| 320 | +# Cleanup your cluster: |
| 321 | +./kops delete cluster --name ${CLUSTER_NAME} --yes |
| 322 | +rm -rf sonobuoy* kops *tar.gz aws-iam-authenticator.yaml core_dns_cluster_role.yaml eks-d.tpl results ${CLUSTER_NAME}.yaml values.yaml |
| 323 | +``` |
0 commit comments