Skip to content

Commit

Permalink
e2e: use labels from label package
Browse files Browse the repository at this point in the history
replace all plain-text labels with the aliases from the
label package.

There are more labels to be added, but this is a good start.

Signed-off-by: Talor Itzhak <[email protected]>
  • Loading branch information
Tal-or committed Jan 28, 2025
1 parent 4425e41 commit 8923e8b
Show file tree
Hide file tree
Showing 17 changed files with 110 additions and 93 deletions.
15 changes: 8 additions & 7 deletions test/e2e/serial/tests/configuration.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ import (
"github.com/openshift-kni/numaresources-operator/pkg/status"
"github.com/openshift-kni/numaresources-operator/pkg/validation"
rteconfig "github.com/openshift-kni/numaresources-operator/rte/pkg/config"
"github.com/openshift-kni/numaresources-operator/test/e2e/label"
e2eclient "github.com/openshift-kni/numaresources-operator/test/utils/clients"
"github.com/openshift-kni/numaresources-operator/test/utils/configuration"
e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture"
Expand Down Expand Up @@ -132,7 +133,7 @@ var _ = Describe("[serial][disruptive] numaresources configuration management",
Context("cluster has at least one suitable node", func() {
timeout := 5 * time.Minute

It("[test_id:47674][reboot_required][slow][images][tier2] should be able to modify the configurable values under the NUMAResourcesOperator CR", Label("reboot_required", "slow", "images", "tier2"), func() {
It("[test_id:47674][reboot_required][slow][images][tier2] should be able to modify the configurable values under the NUMAResourcesOperator CR", Label("reboot_required", label.Slow, "images", label.Tier2), func() {
fxt.IsRebootTest = true
nroOperObj := &nropv1.NUMAResourcesOperator{}
nroKey := objects.NROObjectKey()
Expand Down Expand Up @@ -356,7 +357,7 @@ var _ = Describe("[serial][disruptive] numaresources configuration management",

})

It("[test_id:54916][tier2][schedrst] should be able to modify the configurable values under the NUMAResourcesScheduler CR", Label("tier2", "schedrst"), Label("feature:schedrst"), func() {
It("[test_id:54916][tier2][schedrst] should be able to modify the configurable values under the NUMAResourcesScheduler CR", Label(label.Tier2, "schedrst"), Label("feature:schedrst"), func() {
initialNroSchedObj := &nropv1.NUMAResourcesScheduler{}
nroSchedKey := objects.NROSchedObjectKey()
err := fxt.Client.Get(context.TODO(), nroSchedKey, initialNroSchedObj)
Expand Down Expand Up @@ -422,7 +423,7 @@ var _ = Describe("[serial][disruptive] numaresources configuration management",
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.SchedulerTestName)
})

It("[test_id:47585][reboot_required][slow] can change kubeletconfig and controller should adapt", Label("reboot_required", "slow"), func() {
It("[test_id:47585][reboot_required][slow] can change kubeletconfig and controller should adapt", Label("reboot_required", label.Slow), func() {
fxt.IsRebootTest = true
var performanceProfile perfprof.PerformanceProfile
var targetedKC *machineconfigv1.KubeletConfig
Expand Down Expand Up @@ -774,7 +775,7 @@ var _ = Describe("[serial][disruptive] numaresources configuration management",
Expect(nrsGot).To(Equal(nrsExpected), "mismatching related objects for NUMAResourcesScheduler")
})

It("[slow][tier1] ignores non-matching kubeletconfigs", Label("slow", "tier1"), func(ctx context.Context) {
It("[slow][tier1] ignores non-matching kubeletconfigs", Label(label.Slow, label.Tier1), func(ctx context.Context) {
By("getting the NROP object")
nroOperObj := &nropv1.NUMAResourcesOperator{}
nroKey := objects.NROObjectKey()
Expand Down Expand Up @@ -809,7 +810,7 @@ var _ = Describe("[serial][disruptive] numaresources configuration management",
}).WithContext(ctx).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Equal(kcCmNamesPre))
})

It("[test_id:75354][reboot_required][slow][unsched][schedrst][tier2] should be able to correctly identify topology manager policy without scheduler restarting", Label("reboot_required", "slow", "unsched", "schedrst", "tier2"), Label("feature:schedattrwatch", "feature:schedrst"), func(ctx context.Context) {
It("[test_id:75354][reboot_required][slow][unsched][schedrst][tier2] should be able to correctly identify topology manager policy without scheduler restarting", Label("reboot_required", label.Slow, "unsched", "schedrst", label.Tier2), Label("feature:schedattrwatch", "feature:schedrst"), func(ctx context.Context) {
// https://issues.redhat.com/browse/OCPBUGS-34583
fxt.IsRebootTest = true
By("getting the number of cpus that is required for a numa zone to create a Topology Affinity Error deployment")
Expand Down Expand Up @@ -1121,7 +1122,7 @@ var _ = Describe("[serial][disruptive] numaresources configuration management",
initialOperObj := &nropv1.NUMAResourcesOperator{}
nroKey := objects.NROObjectKey()

It("[tier2] should not allow configuring PoolName and MCP selector on same node group", Label("tier2"), func(ctx context.Context) {
It("[tier2] should not allow configuring PoolName and MCP selector on same node group", Label(label.Tier2), func(ctx context.Context) {
Expect(fxt.Client.Get(ctx, nroKey, initialOperObj)).To(Succeed(), "cannot get %q in the cluster", nroKey.String())

labelSel := &metav1.LabelSelector{
Expand Down Expand Up @@ -1168,7 +1169,7 @@ var _ = Describe("[serial][disruptive] numaresources configuration management",
Expect(strings.Contains(cond.Message, expectedCondMsg)).To(BeTrue(), "different degrade message was found: expected to contains %q but found %q", "must have only a single specifier set", expectedCondMsg, cond.Message)
})

It("[tier1] should report the NodeGroupConfig in the NodeGroupStatus with NodePool set and allow updates", func(ctx context.Context) {
It("[tier1] should report the NodeGroupConfig in the NodeGroupStatus with NodePool set and allow updates", Label(label.Tier1), func(ctx context.Context) {
Expect(fxt.Client.Get(ctx, nroKey, initialOperObj)).To(Succeed(), "cannot get %q in the cluster", nroKey.String())

mcp := objects.TestMCP()
Expand Down
7 changes: 4 additions & 3 deletions test/e2e/serial/tests/non_regression.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ import (

"github.com/openshift-kni/numaresources-operator/internal/baseload"
intnrt "github.com/openshift-kni/numaresources-operator/internal/noderesourcetopology"
"github.com/openshift-kni/numaresources-operator/test/e2e/label"
serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config"
e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture"
"github.com/openshift-kni/numaresources-operator/test/utils/images"
Expand Down Expand Up @@ -140,7 +141,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme
}
})

It("[test_id:47584][tier2][nonreg] should be able to schedule guaranteed pod in selective way", Label("tier2", "nonreg"), func() {
It("[test_id:47584][tier2][nonreg] should be able to schedule guaranteed pod in selective way", Label(label.Tier2, "nonreg"), func() {
nodesNameSet := e2enrt.AccumulateNames(nrts)
targetNodeName, ok := e2efixture.PopNodeName(nodesNameSet)
Expect(ok).To(BeTrue())
Expand Down Expand Up @@ -202,7 +203,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme
Expect(ok).To(BeTrue(), "NRT resources not restored correctly on %q", targetNodeName)
})

It("[test_id:48964][tier3][nonreg] should be able to schedule a guaranteed deployment pod to a specific node", Label("tier3", "nonreg"), func() {
It("[test_id:48964][tier3][nonreg] should be able to schedule a guaranteed deployment pod to a specific node", Label(label.Tier3, "nonreg"), func() {
nrtInitialList := nrtv1alpha2.NodeResourceTopologyList{}

err := fxt.Client.List(context.TODO(), &nrtInitialList)
Expand Down Expand Up @@ -324,7 +325,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme
})

Context("Requesting resources that are greater than allocatable at numa level", func() {
It("[test_id:47613][tier3][nonreg][unsched] should not schedule a pod requesting resources that are not allocatable at numa level", Label("tier3", "nonreg", "unsched"), Label("feature:unsched"), func() {
It("[test_id:47613][tier3][nonreg][unsched] should not schedule a pod requesting resources that are not allocatable at numa level", Label(label.Tier3, "nonreg", "unsched"), Label("feature:unsched"), func() {
//the test can run on node with any numa number, so no need to filter the nrts
nrtNames := e2enrt.AccumulateNames(nrts)

Expand Down
13 changes: 7 additions & 6 deletions test/e2e/serial/tests/non_regression_fundamentals.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (

nropv1 "github.com/openshift-kni/numaresources-operator/api/v1"
"github.com/openshift-kni/numaresources-operator/internal/wait"
"github.com/openshift-kni/numaresources-operator/test/e2e/label"
serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config"
e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture"
e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies"
Expand Down Expand Up @@ -147,20 +148,20 @@ var _ = Describe("[serial][fundamentals][scheduler][nonreg] numaresources fundam
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
}
},
Entry("should handle a burst of qos=guaranteed pods [tier0]", Label("tier0"), func(pod *corev1.Pod) {
Entry("should handle a burst of qos=guaranteed pods [tier0]", Label(label.Tier0), func(pod *corev1.Pod) {
pod.Spec.Containers[0].Resources.Limits = corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(cpusPerPod, resource.DecimalSI),
corev1.ResourceMemory: resource.MustParse("64Mi"),
}
}),
Entry("should handle a burst of qos=burstable pods [tier1]", Label("tier1"), func(pod *corev1.Pod) {
Entry("should handle a burst of qos=burstable pods [tier1]", Label(label.Tier1), func(pod *corev1.Pod) {
pod.Spec.Containers[0].Resources.Requests = corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(cpusPerPod, resource.DecimalSI),
corev1.ResourceMemory: resource.MustParse("64Mi"),
}
}),
// this is REALLY REALLY to prevent the most catastrophic regressions
Entry("should handle a burst of qos=best-effort pods [tier2]", Label("tier2"), func(pod *corev1.Pod) {}),
Entry("should handle a burst of qos=best-effort pods [tier2]", Label(label.Tier2), func(pod *corev1.Pod) {}),
)

DescribeTable("[nodeAll] against all the available worker nodes", Label("nodeAll"),
Expand Down Expand Up @@ -243,20 +244,20 @@ var _ = Describe("[serial][fundamentals][scheduler][nonreg] numaresources fundam
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
}
},
Entry("should handle a burst of qos=guaranteed pods [tier0]", Label("tier0"), func(pod *corev1.Pod) {
Entry("should handle a burst of qos=guaranteed pods [tier0]", Label(label.Tier0), func(pod *corev1.Pod) {
pod.Spec.Containers[0].Resources.Limits = corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(cpusPerPod, resource.DecimalSI),
corev1.ResourceMemory: resource.MustParse("64Mi"),
}
}),
Entry("should handle a burst of qos=burstable pods [tier1]", Label("tier1"), func(pod *corev1.Pod) {
Entry("should handle a burst of qos=burstable pods [tier1]", Label(label.Tier1), func(pod *corev1.Pod) {
pod.Spec.Containers[0].Resources.Requests = corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(cpusPerPod, resource.DecimalSI),
corev1.ResourceMemory: resource.MustParse("64Mi"),
}
}),
// this is REALLY REALLY to prevent the most catastrophic regressions
Entry("should handle a burst of qos=best-effort pods [tier2]", Label("tier2"), func(pod *corev1.Pod) {}),
Entry("should handle a burst of qos=best-effort pods [tier2]", Label(label.Tier2), func(pod *corev1.Pod) {}),
)

// TODO: mixed
Expand Down
15 changes: 8 additions & 7 deletions test/e2e/serial/tests/resource_accounting.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ import (
e2ereslist "github.com/openshift-kni/numaresources-operator/internal/resourcelist"
"github.com/openshift-kni/numaresources-operator/internal/wait"

"github.com/openshift-kni/numaresources-operator/test/e2e/label"
e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture"
"github.com/openshift-kni/numaresources-operator/test/utils/images"
e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies"
Expand Down Expand Up @@ -112,7 +113,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa
}
})

It("[placement][test_id:49068][tier2] should keep the pod pending if not enough resources available, then schedule when resources are freed", Label("placement", "tier2"), func() {
It("[placement][test_id:49068][tier2] should keep the pod pending if not enough resources available, then schedule when resources are freed", Label("placement", label.Tier2), func() {
// make sure this is > 1 and LESS than required Res!
unsuitableFreeRes := corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("2"),
Expand Down Expand Up @@ -408,7 +409,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa
klog.Infof("reference NRT target: %s", intnrt.ToString(*targetNrtReference))
})

It("[test_id:48685][tier1] should properly schedule a best-effort pod with no changes in NRTs", Label("tier1"), func() {
It("[test_id:48685][tier1] should properly schedule a best-effort pod with no changes in NRTs", Label(label.Tier1), func() {
By("create a best-effort pod")

pod := objects.NewTestPodPause(fxt.Namespace.Name, "testpod-be")
Expand Down Expand Up @@ -437,7 +438,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa
Expect(err).ToNot(HaveOccurred())
})

It("[test_id:48686][tier1] should properly schedule a burstable pod with no changes in NRTs", Label("tier1"), func() {
It("[test_id:48686][tier1] should properly schedule a burstable pod with no changes in NRTs", Label(label.Tier1), func() {
By("create a burstable pod")

pod := objects.NewTestPodPause(fxt.Namespace.Name, "testpod-bu")
Expand Down Expand Up @@ -468,7 +469,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa
Expect(err).ToNot(HaveOccurred())
})

It("[test_id:47618][tier2] should properly schedule deployment with burstable pod with no changes in NRTs", Label("tier2"), func() {
It("[test_id:47618][tier2] should properly schedule deployment with burstable pod with no changes in NRTs", Label(label.Tier2), func() {
By("create a deployment with one burstable pod")
deploymentName := "test-dp"
var replicas int32 = 1
Expand Down Expand Up @@ -509,7 +510,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa
Expect(e2enrt.CheckEqualAvailableResources(*targetNrtReference, *targetNrtCurrent)).To(BeTrue(), "new resources are accounted in NRT although scheduling burstable pod")
})

It("[tier2] should properly schedule a burstable pod when one of the containers is asking for requests=limits, with no changes in NRTs", Label("tier2"), func() {
It("[tier2] should properly schedule a burstable pod when one of the containers is asking for requests=limits, with no changes in NRTs", Label(label.Tier2), func() {
By("create a burstable pod")
pod := objects.NewTestPodPause(fxt.Namespace.Name, "testpod-bu")
pod.Spec.SchedulerName = serialconfig.Config.SchedulerName
Expand Down Expand Up @@ -579,7 +580,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa
Expect(err).ToNot(HaveOccurred())
})

It("[test_id:47620][tier2] should properly schedule a burstable pod with no changes in NRTs followed by a guaranteed pod that stays pending till burstable pod is deleted", Label("tier2"), func() {
It("[test_id:47620][tier2] should properly schedule a burstable pod with no changes in NRTs followed by a guaranteed pod that stays pending till burstable pod is deleted", Label(label.Tier2), func() {
By("create a burstable pod")

podBurstable := objects.NewTestPodPause(fxt.Namespace.Name, "testpod-first-bu")
Expand Down Expand Up @@ -718,7 +719,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa

})

It("[test_id:49071][tier2] should properly schedule daemonset with burstable pod with no changes in NRTs", Label("tier2"), func() {
It("[test_id:49071][tier2] should properly schedule daemonset with burstable pod with no changes in NRTs", Label(label.Tier2), func() {
By("create a daemonset with one burstable pod")
dsName := "test-ds"

Expand Down
11 changes: 6 additions & 5 deletions test/e2e/serial/tests/resource_hostlevel.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
intnrt "github.com/openshift-kni/numaresources-operator/internal/noderesourcetopology"
intreslist "github.com/openshift-kni/numaresources-operator/internal/resourcelist"
"github.com/openshift-kni/numaresources-operator/internal/wait"
"github.com/openshift-kni/numaresources-operator/test/e2e/label"
serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config"
e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture"
e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies"
Expand Down Expand Up @@ -60,12 +61,12 @@ var _ = Describe("[serial][hostlevel] numaresources host-level resources", Seria
Expect(e2efixture.Teardown(fxt)).To(Succeed())
})

Context("with at least two nodes suitable", Label("tier0"), func() {
Context("with at least two nodes suitable", Label(label.Tier0), func() {
// testing scope=container is pointless in this case: 1 pod with 1 container.
// It should behave exactly like scope=pod. But we keep these tests as non-regression
// to have a signal the system is behaving as expected.
// This is the reason we don't filter for scope, but only by policy.
DescribeTable("[tier0][hostlevel] a pod should be placed and aligned on the node", Label("tier0", "hostlevel"),
DescribeTable("[tier0][hostlevel] a pod should be placed and aligned on the node", Label(label.Tier0, "hostlevel"),
func(tmPolicy string, requiredRes []corev1.ResourceList, expectedQOS corev1.PodQOSClass) {
ctx := context.TODO()
nrtCandidates := filterNodes(fxt, desiredNodesState{
Expand Down Expand Up @@ -345,7 +346,7 @@ var _ = Describe("[serial][hostlevel] numaresources host-level resources", Seria
Expect(isFailed).To(BeTrue(), "pod %s/%s with scheduler %s did NOT fail", updatedPod.Namespace, updatedPod.Name, updatedPod.Spec.SchedulerName)
},
Entry("[test_id:74253][tier2][qos:gu][unsched] with ephemeral storage, multi-container",
Label("tier2", "qos:gu", "unsched"),
Label(label.Tier2, "qos:gu", "unsched"),
intnrt.SingleNUMANode,
// required resources for the test pod
[]corev1.ResourceList{
Expand All @@ -363,7 +364,7 @@ var _ = Describe("[serial][hostlevel] numaresources host-level resources", Seria
corev1.PodQOSGuaranteed,
),
Entry("[test_id:74254][tier2][qos:bu][unsched] with ephemeral storage, multi-container",
Label("tier2", "qos:bu", "unsched"),
Label(label.Tier2, "qos:bu", "unsched"),
intnrt.SingleNUMANode,
// required resources for the test pod
[]corev1.ResourceList{
Expand All @@ -380,7 +381,7 @@ var _ = Describe("[serial][hostlevel] numaresources host-level resources", Seria
corev1.PodQOSBurstable,
),
Entry("[test_id:74255][tier3][qos:be][unsched] with ephemeral storage, multi-container",
Label("tier3", "qos:be", "unsched"),
Label(label.Tier3, "qos:be", "unsched"),
intnrt.SingleNUMANode,
// required resources for the test pod
[]corev1.ResourceList{
Expand Down
3 changes: 2 additions & 1 deletion test/e2e/serial/tests/scheduler_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ import (
e2enrtint "github.com/openshift-kni/numaresources-operator/internal/noderesourcetopology"
e2ereslist "github.com/openshift-kni/numaresources-operator/internal/resourcelist"
"github.com/openshift-kni/numaresources-operator/internal/wait"
"github.com/openshift-kni/numaresources-operator/test/e2e/label"
serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config"
e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture"
"github.com/openshift-kni/numaresources-operator/test/utils/images"
Expand Down Expand Up @@ -64,7 +65,7 @@ type interferenceDesc struct {
ratio int
}

var _ = Describe("[serial][scheduler][cache][tier0] scheduler cache", Serial, Label("scheduler", "cache", "tier0"), Label("feature:cache"), func() {
var _ = Describe("[serial][scheduler][cache][tier0] scheduler cache", Serial, Label("scheduler", "cache", label.Tier0), Label("feature:cache"), func() {
var fxt *e2efixture.Fixture
var nrtList nrtv1alpha2.NodeResourceTopologyList

Expand Down
Loading

0 comments on commit 8923e8b

Please sign in to comment.