Skip to content
Open
112 changes: 112 additions & 0 deletions tests/assets/karpenter/controller-role-policy-document.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
{
"Statement": [
{
"Action": [
"ssm:GetParameter",
"ec2:DescribeImages",
"ec2:RunInstances",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeLaunchTemplates",
"ec2:DescribeInstances",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstanceTypeOfferings",
"ec2:DeleteLaunchTemplate",
"ec2:CreateTags",
"ec2:CreateLaunchTemplate",
"ec2:CreateFleet",
"ec2:DescribeSpotPriceHistory",
"pricing:GetProducts"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "Karpenter"
},
{
"Action": "ec2:TerminateInstances",
"Condition": {
"StringLike": {
"ec2:ResourceTag/karpenter.sh/nodepool": "*"
}
},
"Effect": "Allow",
"Resource": "*",
"Sid": "ConditionalEC2Termination"
},
{
"Effect": "Allow",
"Action": "iam:PassRole",
"Resource": "arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/KarpenterNodeRole-${CLUSTER_NAME}",
"Sid": "PassNodeIAMRole"
},
{
"Effect": "Allow",
"Action": "eks:DescribeCluster",
"Resource": "arn:${AWS_PARTITION}:eks:${AWS_REGION}:${AWS_ACCOUNT_ID}:cluster/${CLUSTER_NAME}",
"Sid": "EKSClusterEndpointLookup"
},
{
"Sid": "AllowScopedInstanceProfileCreationActions",
"Effect": "Allow",
"Resource": "*",
"Action": [
"iam:CreateInstanceProfile"
],
"Condition": {
"StringEquals": {
"aws:RequestTag/kubernetes.io/cluster/${CLUSTER_NAME}": "owned",
"aws:RequestTag/topology.kubernetes.io/region": "${AWS_REGION}"
},
"StringLike": {
"aws:RequestTag/karpenter.k8s.aws/ec2nodeclass": "*"
}
}
},
{
"Sid": "AllowScopedInstanceProfileTagActions",
"Effect": "Allow",
"Resource": "*",
"Action": [
"iam:TagInstanceProfile"
],
"Condition": {
"StringEquals": {
"aws:ResourceTag/kubernetes.io/cluster/${CLUSTER_NAME}": "owned",
"aws:ResourceTag/topology.kubernetes.io/region": "${AWS_REGION}",
"aws:RequestTag/kubernetes.io/cluster/${CLUSTER_NAME}": "owned",
"aws:RequestTag/topology.kubernetes.io/region": "${AWS_REGION}"
},
"StringLike": {
"aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass": "*",
"aws:RequestTag/karpenter.k8s.aws/ec2nodeclass": "*"
}
}
},
{
"Sid": "AllowScopedInstanceProfileActions",
"Effect": "Allow",
"Resource": "*",
"Action": [
"iam:AddRoleToInstanceProfile",
"iam:RemoveRoleFromInstanceProfile",
"iam:DeleteInstanceProfile"
],
"Condition": {
"StringEquals": {
"aws:ResourceTag/kubernetes.io/cluster/${CLUSTER_NAME}": "owned",
"aws:ResourceTag/topology.kubernetes.io/region": "${AWS_REGION}"
},
"StringLike": {
"aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass": "*"
}
}
},
{
"Sid": "AllowInstanceProfileReadActions",
"Effect": "Allow",
"Resource": "*",
"Action": "iam:GetInstanceProfile"
}
],
"Version": "2012-10-17"
}
18 changes: 18 additions & 0 deletions tests/assets/karpenter/controller-role-trust-policy-document.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDC_ENDPOINT}"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"${OIDC_ENDPOINT}:aud": "sts.amazonaws.com",
"${OIDC_ENDPOINT}:sub": "system:serviceaccount:karpenter:karpenter"
}
}
}
]
}
12 changes: 12 additions & 0 deletions tests/assets/karpenter/node-role-policy-document.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
60 changes: 60 additions & 0 deletions tests/assets/karpenter/nodeclass.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
apiVersion: karpenter.k8s.aws/v1
kind: EC2NodeClass
metadata:
name: default
spec:
amiFamily: Custom
instanceProfile: "KarpenterNodeInstanceProfile-${CLUSTER_NAME}"
amiSelectorTerms:
- alias: "al2023@${ALIAS_VERSION}"
subnetSelectorTerms:
- tags:
karpenter.sh/discovery: "${CLUSTER_NAME}"
- tags:
aws:cloudformation:stack-name: "${CLUSTER_NAME}"
securityGroupSelectorTerms:
- tags:
karpenter.sh/discovery: "${CLUSTER_NAME}"
- tags:
aws:cloudformation:stack-name: "${CLUSTER_NAME}"
- tags:
kubernetes.io/cluster/${CLUSTER_NAME}: owned
kubelet:
maxPods: 110
systemReserved:
cpu: 100m
memory: 100Mi
ephemeral-storage: 1Gi
kubeReserved:
cpu: 100m
memory: 100Mi
ephemeral-storage: 1Gi
evictionHard:
memory.available: 5%
nodefs.available: 10%
nodefs.inodesFree: 10%
userData: |
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="BOUNDARY"

--BOUNDARY
Content-Type: application/node.eks.aws

apiVersion: node.eks.aws/v1alpha1
kind: NodeConfig
spec:
cluster:
name: ${CLUSTER_NAME}
apiServerEndpoint: ${CLUSTER_ENDPOINT} # Using the actual cluster endpoint
certificateAuthority: ${CLUSTER_CA}
cidr: "172.20.0.0/16"
kubelet:
config:
nodeStatusReportFrequency: "60m"
nodeLeaseDurationSeconds: 60
maxPods: 110
clusterDNS: ["172.20.0.10"]
flags:
- --node-labels=karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=titan-pool
- --register-with-taints=karpenter.sh/unregistered:NoExecute
--BOUNDARY--
47 changes: 47 additions & 0 deletions tests/assets/karpenter/nodepool.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
apiVersion: karpenter.sh/v1
kind: NodePool
metadata:
name: ${CLUSTER_NAME}-${AZ}
spec:
disruption:
budgets:
- nodes: 5%
consolidateAfter: 0s
consolidationPolicy: WhenEmptyOrUnderutilized
replicas: 0
template:
spec:
expireAfter: 720h
nodeClassRef:
group: karpenter.k8s.aws
kind: EC2NodeClass
name: default
requirements:
- key: topology.kubernetes.io/zone
operator: In
values:
- ${AZ}
- key: kubernetes.io/arch
operator: In
values:
- amd64
- key: kubernetes.io/os
operator: In
values:
- linux
- key: karpenter.sh/capacity-type
operator: In
values:
- on-demand
- key: node.kubernetes.io/instance-category
operator: In
values:
- c
- m
- r
- t
- key: karpenter.k8s.aws/instance-size
operator: In
values:
- medium
- large
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: node-condition-wait
namespace: scalability
spec:
description: "waits for there to be no nodes with the specific condition"
params:
- name: cluster-name
description: The name of the cluster
- name: endpoint
description: eks endpoint to use
- name: aws-region
- name: initial-delay
default: 30m
- name: condition
description: condition to check
- name: value
description: value of the condition to validate
steps:
- name: drift-nodepool
image: amazon/aws-cli
script: |
sleep $(params.initial-delay)
CHECK_INTERVAL=300
while true; do
aws eks update-kubeconfig --name $(params.cluster-name) --endpoint $(params.endpoint)
echo "$(date): Checking node conditions..."
# Get nodes that still have the unwanted condition
nodes_with_condition=$(kubectl get nodes -o json | jq -r --arg type $(params.condition) --arg status $(params.value) '
.items[] |
select(.status.conditions[] | select(.type == $type and .status == $status)) |
.metadata.name
')
if [ -z "$nodes_with_condition" ]; then
echo "$(date): All nodes are clear of condition $(params.condition)=$(params.value)"
echo "Condition check completed successfully!"
exit 0
else
echo "$(date): The following nodes still have $(params.condition)=$(params.value):"
echo "$nodes_with_condition"
echo "Waiting 5 minutes before next check..."
sleep $CHECK_INTERVAL
fi
done

exit 1

Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: nodepool-drift
namespace: scalability
spec:
description: "drift a nodepool by adding a new label to the specified nodepool"
params:
- name: nodepool
description: Name of the nodepool to drift
- name: cluster-name
description: The name of the cluster
- name: endpoint
description: eks endpoint to use
- name: aws-region
steps:
- name: drift-nodepool
image: amazon/aws-cli
script: |
aws eks update-kubeconfig --name $(params.cluster-name) --endpoint $(params.endpoint)
kubectl patch nodepool ${params.nodepool} --patch '{"spec": {"template": {"metadata": {"labels": {"myLabel": "myValue"}}}}}'
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: nodepool-scale
namespace: scalability
spec:
description: "drift a cluster by adding a new label to the specified nodepool"
params:
- name: replicas
description: Number of replicas to scale to
- name: nodepool
description: Name of the nodepool to drift
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is generically used, you may want to remove drift here.

- name: cluster-name
description: The name of the cluster
- name: endpoint
description: eks endpoint to use
steps:
- name: scale-nodepool
image: amazon/aws-cli
script: |
aws eks update-kubeconfig --name $(params.cluster-name) --endpoint $(params.endpoint)
kubectl scale nodepool ${params.nodepool} --replicas $(params.replicas)
Loading
Loading