From a3177e2e095264259d80e5745ba40b49c8e0c29a Mon Sep 17 00:00:00 2001 From: Talor Itzhak Date: Tue, 28 Jan 2025 09:57:24 +0200 Subject: [PATCH] e2e: use labels from label package replace all plain-text labels with the aliases from the label package. There are more labels to be added, but this is a good start. Signed-off-by: Talor Itzhak --- test/e2e/serial/tests/configuration.go | 15 ++--- test/e2e/serial/tests/non_regression.go | 7 ++- .../tests/non_regression_fundamentals.go | 13 ++-- test/e2e/serial/tests/resource_accounting.go | 15 ++--- test/e2e/serial/tests/resource_hostlevel.go | 11 ++-- test/e2e/serial/tests/scheduler_cache.go | 3 +- .../e2e/serial/tests/scheduler_cache_stall.go | 7 ++- test/e2e/serial/tests/scheduler_removal.go | 7 ++- test/e2e/serial/tests/tolerations.go | 21 ++++--- test/e2e/serial/tests/workload_overhead.go | 5 +- test/e2e/serial/tests/workload_placement.go | 5 +- .../serial/tests/workload_placement_no_nrt.go | 7 ++- .../tests/workload_placement_nodelabel.go | 5 +- .../tests/workload_placement_resources.go | 3 +- .../serial/tests/workload_placement_taint.go | 3 +- .../serial/tests/workload_placement_tmpol.go | 61 ++++++++++--------- .../serial/tests/workload_unschedulable.go | 15 ++--- 17 files changed, 110 insertions(+), 93 deletions(-) diff --git a/test/e2e/serial/tests/configuration.go b/test/e2e/serial/tests/configuration.go index 87fb0dd26..d6391f037 100644 --- a/test/e2e/serial/tests/configuration.go +++ b/test/e2e/serial/tests/configuration.go @@ -68,6 +68,7 @@ import ( "github.com/openshift-kni/numaresources-operator/test/utils/configuration" e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" "github.com/openshift-kni/numaresources-operator/test/utils/images" + "github.com/openshift-kni/numaresources-operator/test/utils/label" e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies" "github.com/openshift-kni/numaresources-operator/test/utils/nrosched" "github.com/openshift-kni/numaresources-operator/test/utils/objects" @@ -132,7 +133,7 @@ var _ = Describe("[serial][disruptive] numaresources configuration management", Context("cluster has at least one suitable node", func() { timeout := 5 * time.Minute - It("[test_id:47674][reboot_required][slow][images][tier2] should be able to modify the configurable values under the NUMAResourcesOperator CR", Label("reboot_required", "slow", "images", "tier2"), func() { + It("[test_id:47674][reboot_required][slow][images][tier2] should be able to modify the configurable values under the NUMAResourcesOperator CR", Label("reboot_required", label.Slow, "images", label.Tier2), func() { fxt.IsRebootTest = true nroOperObj := &nropv1.NUMAResourcesOperator{} nroKey := objects.NROObjectKey() @@ -356,7 +357,7 @@ var _ = Describe("[serial][disruptive] numaresources configuration management", }) - It("[test_id:54916][tier2][schedrst] should be able to modify the configurable values under the NUMAResourcesScheduler CR", Label("tier2", "schedrst"), Label("feature:schedrst"), func() { + It("[test_id:54916][tier2][schedrst] should be able to modify the configurable values under the NUMAResourcesScheduler CR", Label(label.Tier2, "schedrst"), Label("feature:schedrst"), func() { initialNroSchedObj := &nropv1.NUMAResourcesScheduler{} nroSchedKey := objects.NROSchedObjectKey() err := fxt.Client.Get(context.TODO(), nroSchedKey, initialNroSchedObj) @@ -422,7 +423,7 @@ var _ = Describe("[serial][disruptive] numaresources configuration management", Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.SchedulerTestName) }) - It("[test_id:47585][reboot_required][slow] can change kubeletconfig and controller should adapt", Label("reboot_required", "slow"), func() { + It("[test_id:47585][reboot_required][slow] can change kubeletconfig and controller should adapt", Label("reboot_required", label.Slow), func() { fxt.IsRebootTest = true var performanceProfile perfprof.PerformanceProfile var targetedKC *machineconfigv1.KubeletConfig @@ -774,7 +775,7 @@ var _ = Describe("[serial][disruptive] numaresources configuration management", Expect(nrsGot).To(Equal(nrsExpected), "mismatching related objects for NUMAResourcesScheduler") }) - It("[slow][tier1] ignores non-matching kubeletconfigs", Label("slow", "tier1"), func(ctx context.Context) { + It("[slow][tier1] ignores non-matching kubeletconfigs", Label(label.Slow, label.Tier1), func(ctx context.Context) { By("getting the NROP object") nroOperObj := &nropv1.NUMAResourcesOperator{} nroKey := objects.NROObjectKey() @@ -809,7 +810,7 @@ var _ = Describe("[serial][disruptive] numaresources configuration management", }).WithContext(ctx).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Equal(kcCmNamesPre)) }) - It("[test_id:75354][reboot_required][slow][unsched][schedrst][tier2] should be able to correctly identify topology manager policy without scheduler restarting", Label("reboot_required", "slow", "unsched", "schedrst", "tier2"), Label("feature:schedattrwatch", "feature:schedrst"), func(ctx context.Context) { + It("[test_id:75354][reboot_required][slow][unsched][schedrst][tier2] should be able to correctly identify topology manager policy without scheduler restarting", Label("reboot_required", label.Slow, "unsched", "schedrst", label.Tier2), Label("feature:schedattrwatch", "feature:schedrst"), func(ctx context.Context) { // https://issues.redhat.com/browse/OCPBUGS-34583 fxt.IsRebootTest = true By("getting the number of cpus that is required for a numa zone to create a Topology Affinity Error deployment") @@ -1121,7 +1122,7 @@ var _ = Describe("[serial][disruptive] numaresources configuration management", initialOperObj := &nropv1.NUMAResourcesOperator{} nroKey := objects.NROObjectKey() - It("[tier2] should not allow configuring PoolName and MCP selector on same node group", Label("tier2"), func(ctx context.Context) { + It("[tier2] should not allow configuring PoolName and MCP selector on same node group", Label(label.Tier2), func(ctx context.Context) { Expect(fxt.Client.Get(ctx, nroKey, initialOperObj)).To(Succeed(), "cannot get %q in the cluster", nroKey.String()) labelSel := &metav1.LabelSelector{ @@ -1168,7 +1169,7 @@ var _ = Describe("[serial][disruptive] numaresources configuration management", Expect(strings.Contains(cond.Message, expectedCondMsg)).To(BeTrue(), "different degrade message was found: expected to contains %q but found %q", "must have only a single specifier set", expectedCondMsg, cond.Message) }) - It("[tier1] should report the NodeGroupConfig in the NodeGroupStatus with NodePool set and allow updates", func(ctx context.Context) { + It("[tier1] should report the NodeGroupConfig in the NodeGroupStatus with NodePool set and allow updates", Label(label.Tier1), func(ctx context.Context) { Expect(fxt.Client.Get(ctx, nroKey, initialOperObj)).To(Succeed(), "cannot get %q in the cluster", nroKey.String()) mcp := objects.TestMCP() diff --git a/test/e2e/serial/tests/non_regression.go b/test/e2e/serial/tests/non_regression.go index 85a7412cc..453891fc4 100644 --- a/test/e2e/serial/tests/non_regression.go +++ b/test/e2e/serial/tests/non_regression.go @@ -46,6 +46,7 @@ import ( serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config" e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" "github.com/openshift-kni/numaresources-operator/test/utils/images" + "github.com/openshift-kni/numaresources-operator/test/utils/label" e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies" "github.com/openshift-kni/numaresources-operator/test/utils/nrosched" "github.com/openshift-kni/numaresources-operator/test/utils/objects" @@ -140,7 +141,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme } }) - It("[test_id:47584][tier2][nonreg] should be able to schedule guaranteed pod in selective way", Label("tier2", "nonreg"), func() { + It("[test_id:47584][tier2][nonreg] should be able to schedule guaranteed pod in selective way", Label(label.Tier2, "nonreg"), func() { nodesNameSet := e2enrt.AccumulateNames(nrts) targetNodeName, ok := e2efixture.PopNodeName(nodesNameSet) Expect(ok).To(BeTrue()) @@ -202,7 +203,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme Expect(ok).To(BeTrue(), "NRT resources not restored correctly on %q", targetNodeName) }) - It("[test_id:48964][tier3][nonreg] should be able to schedule a guaranteed deployment pod to a specific node", Label("tier3", "nonreg"), func() { + It("[test_id:48964][tier3][nonreg] should be able to schedule a guaranteed deployment pod to a specific node", Label(label.Tier3, "nonreg"), func() { nrtInitialList := nrtv1alpha2.NodeResourceTopologyList{} err := fxt.Client.List(context.TODO(), &nrtInitialList) @@ -324,7 +325,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }) Context("Requesting resources that are greater than allocatable at numa level", func() { - It("[test_id:47613][tier3][nonreg][unsched] should not schedule a pod requesting resources that are not allocatable at numa level", Label("tier3", "nonreg", "unsched"), Label("feature:unsched"), func() { + It("[test_id:47613][tier3][nonreg][unsched] should not schedule a pod requesting resources that are not allocatable at numa level", Label(label.Tier3, "nonreg", "unsched"), Label("feature:unsched"), func() { //the test can run on node with any numa number, so no need to filter the nrts nrtNames := e2enrt.AccumulateNames(nrts) diff --git a/test/e2e/serial/tests/non_regression_fundamentals.go b/test/e2e/serial/tests/non_regression_fundamentals.go index b0c7acc50..1bc44b6cc 100644 --- a/test/e2e/serial/tests/non_regression_fundamentals.go +++ b/test/e2e/serial/tests/non_regression_fundamentals.go @@ -35,6 +35,7 @@ import ( "github.com/openshift-kni/numaresources-operator/internal/wait" serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config" e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" + "github.com/openshift-kni/numaresources-operator/test/utils/label" e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies" "github.com/openshift-kni/numaresources-operator/test/utils/nrosched" "github.com/openshift-kni/numaresources-operator/test/utils/objects" @@ -147,20 +148,20 @@ var _ = Describe("[serial][fundamentals][scheduler][nonreg] numaresources fundam Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName) } }, - Entry("should handle a burst of qos=guaranteed pods [tier0]", Label("tier0"), func(pod *corev1.Pod) { + Entry("should handle a burst of qos=guaranteed pods [tier0]", Label(label.Tier0), func(pod *corev1.Pod) { pod.Spec.Containers[0].Resources.Limits = corev1.ResourceList{ corev1.ResourceCPU: *resource.NewQuantity(cpusPerPod, resource.DecimalSI), corev1.ResourceMemory: resource.MustParse("64Mi"), } }), - Entry("should handle a burst of qos=burstable pods [tier1]", Label("tier1"), func(pod *corev1.Pod) { + Entry("should handle a burst of qos=burstable pods [tier1]", Label(label.Tier1), func(pod *corev1.Pod) { pod.Spec.Containers[0].Resources.Requests = corev1.ResourceList{ corev1.ResourceCPU: *resource.NewQuantity(cpusPerPod, resource.DecimalSI), corev1.ResourceMemory: resource.MustParse("64Mi"), } }), // this is REALLY REALLY to prevent the most catastrophic regressions - Entry("should handle a burst of qos=best-effort pods [tier2]", Label("tier2"), func(pod *corev1.Pod) {}), + Entry("should handle a burst of qos=best-effort pods [tier2]", Label(label.Tier2), func(pod *corev1.Pod) {}), ) DescribeTable("[nodeAll] against all the available worker nodes", Label("nodeAll"), @@ -243,20 +244,20 @@ var _ = Describe("[serial][fundamentals][scheduler][nonreg] numaresources fundam Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName) } }, - Entry("should handle a burst of qos=guaranteed pods [tier0]", Label("tier0"), func(pod *corev1.Pod) { + Entry("should handle a burst of qos=guaranteed pods [tier0]", Label(label.Tier0), func(pod *corev1.Pod) { pod.Spec.Containers[0].Resources.Limits = corev1.ResourceList{ corev1.ResourceCPU: *resource.NewQuantity(cpusPerPod, resource.DecimalSI), corev1.ResourceMemory: resource.MustParse("64Mi"), } }), - Entry("should handle a burst of qos=burstable pods [tier1]", Label("tier1"), func(pod *corev1.Pod) { + Entry("should handle a burst of qos=burstable pods [tier1]", Label(label.Tier1), func(pod *corev1.Pod) { pod.Spec.Containers[0].Resources.Requests = corev1.ResourceList{ corev1.ResourceCPU: *resource.NewQuantity(cpusPerPod, resource.DecimalSI), corev1.ResourceMemory: resource.MustParse("64Mi"), } }), // this is REALLY REALLY to prevent the most catastrophic regressions - Entry("should handle a burst of qos=best-effort pods [tier2]", Label("tier2"), func(pod *corev1.Pod) {}), + Entry("should handle a burst of qos=best-effort pods [tier2]", Label(label.Tier2), func(pod *corev1.Pod) {}), ) // TODO: mixed diff --git a/test/e2e/serial/tests/resource_accounting.go b/test/e2e/serial/tests/resource_accounting.go index d35efe834..c6ff225d2 100644 --- a/test/e2e/serial/tests/resource_accounting.go +++ b/test/e2e/serial/tests/resource_accounting.go @@ -44,6 +44,7 @@ import ( e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" "github.com/openshift-kni/numaresources-operator/test/utils/images" + "github.com/openshift-kni/numaresources-operator/test/utils/label" e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies" "github.com/openshift-kni/numaresources-operator/test/utils/nrosched" "github.com/openshift-kni/numaresources-operator/test/utils/objects" @@ -112,7 +113,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa } }) - It("[placement][test_id:49068][tier2] should keep the pod pending if not enough resources available, then schedule when resources are freed", Label("placement", "tier2"), func() { + It("[placement][test_id:49068][tier2] should keep the pod pending if not enough resources available, then schedule when resources are freed", Label("placement", label.Tier2), func() { // make sure this is > 1 and LESS than required Res! unsuitableFreeRes := corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("2"), @@ -408,7 +409,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa klog.Infof("reference NRT target: %s", intnrt.ToString(*targetNrtReference)) }) - It("[test_id:48685][tier1] should properly schedule a best-effort pod with no changes in NRTs", Label("tier1"), func() { + It("[test_id:48685][tier1] should properly schedule a best-effort pod with no changes in NRTs", Label(label.Tier1), func() { By("create a best-effort pod") pod := objects.NewTestPodPause(fxt.Namespace.Name, "testpod-be") @@ -437,7 +438,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa Expect(err).ToNot(HaveOccurred()) }) - It("[test_id:48686][tier1] should properly schedule a burstable pod with no changes in NRTs", Label("tier1"), func() { + It("[test_id:48686][tier1] should properly schedule a burstable pod with no changes in NRTs", Label(label.Tier1), func() { By("create a burstable pod") pod := objects.NewTestPodPause(fxt.Namespace.Name, "testpod-bu") @@ -468,7 +469,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa Expect(err).ToNot(HaveOccurred()) }) - It("[test_id:47618][tier2] should properly schedule deployment with burstable pod with no changes in NRTs", Label("tier2"), func() { + It("[test_id:47618][tier2] should properly schedule deployment with burstable pod with no changes in NRTs", Label(label.Tier2), func() { By("create a deployment with one burstable pod") deploymentName := "test-dp" var replicas int32 = 1 @@ -509,7 +510,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa Expect(e2enrt.CheckEqualAvailableResources(*targetNrtReference, *targetNrtCurrent)).To(BeTrue(), "new resources are accounted in NRT although scheduling burstable pod") }) - It("[tier2] should properly schedule a burstable pod when one of the containers is asking for requests=limits, with no changes in NRTs", Label("tier2"), func() { + It("[tier2] should properly schedule a burstable pod when one of the containers is asking for requests=limits, with no changes in NRTs", Label(label.Tier2), func() { By("create a burstable pod") pod := objects.NewTestPodPause(fxt.Namespace.Name, "testpod-bu") pod.Spec.SchedulerName = serialconfig.Config.SchedulerName @@ -579,7 +580,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa Expect(err).ToNot(HaveOccurred()) }) - It("[test_id:47620][tier2] should properly schedule a burstable pod with no changes in NRTs followed by a guaranteed pod that stays pending till burstable pod is deleted", Label("tier2"), func() { + It("[test_id:47620][tier2] should properly schedule a burstable pod with no changes in NRTs followed by a guaranteed pod that stays pending till burstable pod is deleted", Label(label.Tier2), func() { By("create a burstable pod") podBurstable := objects.NewTestPodPause(fxt.Namespace.Name, "testpod-first-bu") @@ -718,7 +719,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa }) - It("[test_id:49071][tier2] should properly schedule daemonset with burstable pod with no changes in NRTs", Label("tier2"), func() { + It("[test_id:49071][tier2] should properly schedule daemonset with burstable pod with no changes in NRTs", Label(label.Tier2), func() { By("create a daemonset with one burstable pod") dsName := "test-ds" diff --git a/test/e2e/serial/tests/resource_hostlevel.go b/test/e2e/serial/tests/resource_hostlevel.go index 89b33b61b..ba47dea15 100644 --- a/test/e2e/serial/tests/resource_hostlevel.go +++ b/test/e2e/serial/tests/resource_hostlevel.go @@ -35,6 +35,7 @@ import ( "github.com/openshift-kni/numaresources-operator/internal/wait" serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config" e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" + "github.com/openshift-kni/numaresources-operator/test/utils/label" e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies" "github.com/openshift-kni/numaresources-operator/test/utils/nrosched" "github.com/openshift-kni/numaresources-operator/test/utils/objects" @@ -60,12 +61,12 @@ var _ = Describe("[serial][hostlevel] numaresources host-level resources", Seria Expect(e2efixture.Teardown(fxt)).To(Succeed()) }) - Context("with at least two nodes suitable", Label("tier0"), func() { + Context("with at least two nodes suitable", Label(label.Tier0), func() { // testing scope=container is pointless in this case: 1 pod with 1 container. // It should behave exactly like scope=pod. But we keep these tests as non-regression // to have a signal the system is behaving as expected. // This is the reason we don't filter for scope, but only by policy. - DescribeTable("[tier0][hostlevel] a pod should be placed and aligned on the node", Label("tier0", "hostlevel"), + DescribeTable("[tier0][hostlevel] a pod should be placed and aligned on the node", Label(label.Tier0, "hostlevel"), func(tmPolicy string, requiredRes []corev1.ResourceList, expectedQOS corev1.PodQOSClass) { ctx := context.TODO() nrtCandidates := filterNodes(fxt, desiredNodesState{ @@ -345,7 +346,7 @@ var _ = Describe("[serial][hostlevel] numaresources host-level resources", Seria Expect(isFailed).To(BeTrue(), "pod %s/%s with scheduler %s did NOT fail", updatedPod.Namespace, updatedPod.Name, updatedPod.Spec.SchedulerName) }, Entry("[test_id:74253][tier2][qos:gu][unsched] with ephemeral storage, multi-container", - Label("tier2", "qos:gu", "unsched"), + Label(label.Tier2, "qos:gu", "unsched"), intnrt.SingleNUMANode, // required resources for the test pod []corev1.ResourceList{ @@ -363,7 +364,7 @@ var _ = Describe("[serial][hostlevel] numaresources host-level resources", Seria corev1.PodQOSGuaranteed, ), Entry("[test_id:74254][tier2][qos:bu][unsched] with ephemeral storage, multi-container", - Label("tier2", "qos:bu", "unsched"), + Label(label.Tier2, "qos:bu", "unsched"), intnrt.SingleNUMANode, // required resources for the test pod []corev1.ResourceList{ @@ -380,7 +381,7 @@ var _ = Describe("[serial][hostlevel] numaresources host-level resources", Seria corev1.PodQOSBurstable, ), Entry("[test_id:74255][tier3][qos:be][unsched] with ephemeral storage, multi-container", - Label("tier3", "qos:be", "unsched"), + Label(label.Tier3, "qos:be", "unsched"), intnrt.SingleNUMANode, // required resources for the test pod []corev1.ResourceList{ diff --git a/test/e2e/serial/tests/scheduler_cache.go b/test/e2e/serial/tests/scheduler_cache.go index ba734a95d..568e3a70f 100644 --- a/test/e2e/serial/tests/scheduler_cache.go +++ b/test/e2e/serial/tests/scheduler_cache.go @@ -40,6 +40,7 @@ import ( serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config" e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" "github.com/openshift-kni/numaresources-operator/test/utils/images" + "github.com/openshift-kni/numaresources-operator/test/utils/label" e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies" "github.com/openshift-kni/numaresources-operator/test/utils/nrosched" "github.com/openshift-kni/numaresources-operator/test/utils/objects" @@ -64,7 +65,7 @@ type interferenceDesc struct { ratio int } -var _ = Describe("[serial][scheduler][cache][tier0] scheduler cache", Serial, Label("scheduler", "cache", "tier0"), Label("feature:cache"), func() { +var _ = Describe("[serial][scheduler][cache][tier0] scheduler cache", Serial, Label("scheduler", "cache", label.Tier0), Label("feature:cache"), func() { var fxt *e2efixture.Fixture var nrtList nrtv1alpha2.NodeResourceTopologyList diff --git a/test/e2e/serial/tests/scheduler_cache_stall.go b/test/e2e/serial/tests/scheduler_cache_stall.go index c2c462fad..e456a93fd 100644 --- a/test/e2e/serial/tests/scheduler_cache_stall.go +++ b/test/e2e/serial/tests/scheduler_cache_stall.go @@ -41,6 +41,7 @@ import ( serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config" e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" "github.com/openshift-kni/numaresources-operator/test/utils/images" + "github.com/openshift-kni/numaresources-operator/test/utils/label" e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies" "github.com/openshift-kni/numaresources-operator/test/utils/nrosched" "github.com/openshift-kni/numaresources-operator/test/utils/objects" @@ -110,7 +111,7 @@ var _ = Describe("[serial][scheduler][cache] scheduler cache stall", Label("sche klog.Infof("using MCP %q - refresh period %v", mcpName, refreshPeriod) }) - When("there are jobs in the cluster [tier0]", Label("job", "generic", "tier0"), func() { + When("there are jobs in the cluster [tier0]", Label("job", "generic", label.Tier0), func() { var idleJob *batchv1.Job var hostsRequired int var NUMAZonesRequired int @@ -303,13 +304,13 @@ var _ = Describe("[serial][scheduler][cache] scheduler cache stall", Label("sche Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName) } }, - Entry("should handle a burst of qos=guaranteed pods [tier0]", Label("tier0"), func(pod *corev1.Pod) { + Entry("should handle a burst of qos=guaranteed pods [tier0]", Label(label.Tier0), func(pod *corev1.Pod) { pod.Spec.Containers[0].Resources.Limits = corev1.ResourceList{ corev1.ResourceCPU: *resource.NewQuantity(cpusPerPod, resource.DecimalSI), corev1.ResourceMemory: resource.MustParse("64Mi"), } }), - Entry("should handle a burst of qos=burstable pods [tier0]", Label("tier0"), func(pod *corev1.Pod) { + Entry("should handle a burst of qos=burstable pods [tier0]", Label(label.Tier0), func(pod *corev1.Pod) { pod.Spec.Containers[0].Resources.Requests = corev1.ResourceList{ corev1.ResourceCPU: *resource.NewQuantity(cpusPerPod, resource.DecimalSI), corev1.ResourceMemory: resource.MustParse("64Mi"), diff --git a/test/e2e/serial/tests/scheduler_removal.go b/test/e2e/serial/tests/scheduler_removal.go index ca3f98983..fffc85ab0 100644 --- a/test/e2e/serial/tests/scheduler_removal.go +++ b/test/e2e/serial/tests/scheduler_removal.go @@ -36,6 +36,7 @@ import ( e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" "github.com/openshift-kni/numaresources-operator/test/utils/images" + "github.com/openshift-kni/numaresources-operator/test/utils/label" "github.com/openshift-kni/numaresources-operator/test/utils/nrosched" "github.com/openshift-kni/numaresources-operator/test/utils/objects" @@ -65,7 +66,7 @@ var _ = Describe("[serial][disruptive][scheduler][schedrst] numaresources schedu }) When("removing the topology aware scheduler from a live cluster", func() { - It("[case:1][test_id:47593][tier1] should keep existing workloads running", Label("tier1"), func() { + It("[case:1][test_id:47593][tier1] should keep existing workloads running", Label(label.Tier1), func() { var err error dp := createDeploymentSync(fxt, "testdp", serialconfig.Config.SchedulerName) @@ -88,7 +89,7 @@ var _ = Describe("[serial][disruptive][scheduler][schedrst] numaresources schedu } }) - It("[case:2][test_id:49093][tier1][unsched] should keep new scheduled workloads pending", Label("tier1", "unsched"), Label("feature:unsched"), func() { + It("[case:2][test_id:49093][tier1][unsched] should keep new scheduled workloads pending", Label(label.Tier1, "unsched"), Label("feature:unsched"), func() { var err error By(fmt.Sprintf("deleting the NRO Scheduler object: %s", serialconfig.Config.NROSchedObj.Name)) @@ -140,7 +141,7 @@ var _ = Describe("[serial][disruptive][scheduler][schedrst] numaresources schedu }) When("restarting the topology aware scheduler in a live cluster", func() { - It("[case:1][test_id:48069][tier2] should schedule any pending workloads submitted while the scheduler was unavailable", Label("tier2"), func() { + It("[case:1][test_id:48069][tier2] should schedule any pending workloads submitted while the scheduler was unavailable", Label(label.Tier2), func() { var err error dpNName := nroSchedObj.Status.Deployment // shortcut diff --git a/test/e2e/serial/tests/tolerations.go b/test/e2e/serial/tests/tolerations.go index c4572b08c..05d3139be 100644 --- a/test/e2e/serial/tests/tolerations.go +++ b/test/e2e/serial/tests/tolerations.go @@ -49,6 +49,7 @@ import ( "github.com/openshift-kni/numaresources-operator/test/utils/deploy" e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" "github.com/openshift-kni/numaresources-operator/test/utils/k8simported/taints" + "github.com/openshift-kni/numaresources-operator/test/utils/label" "github.com/openshift-kni/numaresources-operator/test/utils/objects" serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config" @@ -107,7 +108,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su Expect(err).ToNot(HaveOccurred(), "cannot get %q in the cluster", dsKey.String()) }) - When("[tier2] invalid tolerations are submitted ", Label("tier2"), func() { + When("[tier2] invalid tolerations are submitted ", Label(label.Tier2), func() { It("should handle invalid field: operator", func(ctx context.Context) { By("adding extra invalid tolerations with wrong operator field") _ = setRTETolerations(ctx, fxt.Client, nroKey, []corev1.Toleration{ @@ -167,7 +168,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su }) }) - It("[tier3] should enable to change tolerations in the RTE daemonsets", Label("tier3"), func(ctx context.Context) { + It("[tier3] should enable to change tolerations in the RTE daemonsets", Label(label.Tier3), func(ctx context.Context) { By("getting RTE manifests object") // TODO: this is similar but not quite what the main operator does rteManifests, err := rtemanifests.GetManifests(configuration.Plat, configuration.PlatVersion, "", true, true) @@ -229,7 +230,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su Expect(int(updatedDs.Status.NumberReady)).To(Equal(len(workers)), "RTE DS ready=%v original worker nodes=%d", updatedDs.Status.NumberReady, len(workers)) }) - It("[tier2][slow][test_id:72857] should handle untolerations of tainted nodes while RTEs are running", Label("slow", "tier2"), func(ctx context.Context) { + It("[tier2][slow][test_id:72857] should handle untolerations of tainted nodes while RTEs are running", Label(label.Slow, label.Tier2), func(ctx context.Context) { var err error By("adding extra tolerations") _ = setRTETolerations(ctx, fxt.Client, nroKey, testToleration()) @@ -285,7 +286,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su Expect(int(updatedDs.Status.NumberReady)).To(Equal(len(workers)-1), "updated DS ready=%v original worker nodes=%d", updatedDs.Status.NumberReady, len(workers)-1) }) - It("[tier3][slow] should evict running RTE pod if taint-toleration matching criteria is shaken - NROP CR toleration update", Label("tier3", "slow"), func(ctx context.Context) { + It("[tier3][slow] should evict running RTE pod if taint-toleration matching criteria is shaken - NROP CR toleration update", Label(label.Tier3, label.Slow), func(ctx context.Context) { By("add toleration with value to the NROP CR") tolerateVal := corev1.Toleration{ Key: testKey, @@ -340,7 +341,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su Expect(err).ToNot(HaveOccurred(), "pod %s/%s still exists", podOnNode.Namespace, podOnNode.Name) }) - It("[tier3][slow] should evict running RTE pod if taint-tolartion matching criteria is shaken - node taints update", Label("tier3", "slow"), func(ctx context.Context) { + It("[tier3][slow] should evict running RTE pod if taint-tolartion matching criteria is shaken - node taints update", Label(label.Tier3, label.Slow), func(ctx context.Context) { By("taint one node with taint value and NoExecute effect") var err error workers, err = nodes.GetWorkers(fxt.DEnv()) @@ -424,7 +425,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su Expect(err).ToNot(HaveOccurred(), "failed to get the daemonset %s: %v", dsKey.String(), err) }) - It("[tier2][test_id:72861] should tolerate partial taints and not schedule or evict the pod on the tainted node", Label("tier2"), func(ctx context.Context) { + It("[tier2][test_id:72861] should tolerate partial taints and not schedule or evict the pod on the tainted node", Label(label.Tier2), func(ctx context.Context) { var err error By("getting the worker nodes") workers, err = nodes.GetWorkers(fxt.DEnv()) @@ -473,7 +474,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) }) - It("[tier3][test_id:72859] should not restart a running RTE pod on tainted node with NoSchedule effect", Label("tier3"), func(ctx context.Context) { + It("[tier3][test_id:72859] should not restart a running RTE pod on tainted node with NoSchedule effect", Label(label.Tier3), func(ctx context.Context) { By("taint one worker node") var err error workers, err = nodes.GetWorkers(fxt.DEnv()) @@ -597,7 +598,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su } }) - It("[test_id:72854][reboot_required][slow][tier2] should add tolerations in-place while RTEs are running", Label("reboot_required", "slow", "tier2"), func(ctx context.Context) { + It("[test_id:72854][reboot_required][slow][tier2] should add tolerations in-place while RTEs are running", Label("reboot_required", label.Slow, label.Tier2), func(ctx context.Context) { fxt.IsRebootTest = true By("create NROP CR with no tolerations to the tainted node") nropNewObj := nroOperObj.DeepCopy() @@ -637,7 +638,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su Expect(found).To(BeTrue(), "no RTE pod was found on node %q", taintedNode.Name) }) - It("[test_id:72855][reboot_required][slow][tier2] should tolerate node taint on NROP CR creation", Label("reboot_required", "slow", "tier2"), func(ctx context.Context) { + It("[test_id:72855][reboot_required][slow][tier2] should tolerate node taint on NROP CR creation", Label("reboot_required", label.Slow, label.Tier2), func(ctx context.Context) { fxt.IsRebootTest = true By("add tolerations to NROP CR to tolerate the taint - no RTE running yet on any node") nropNewObj := nroOperObj.DeepCopy() @@ -670,7 +671,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su }) }) - It("[tier3] should evict RTE pod on tainted node with NoExecute effect and restore it when taint is removed", Label("tier3"), func(ctx context.Context) { + It("[tier3] should evict RTE pod on tainted node with NoExecute effect and restore it when taint is removed", Label(label.Tier3), func(ctx context.Context) { By("taint one worker node with NoExecute effect") var err error workers, err = nodes.GetWorkers(fxt.DEnv()) diff --git a/test/e2e/serial/tests/workload_overhead.go b/test/e2e/serial/tests/workload_overhead.go index bd1565704..fed3486ae 100644 --- a/test/e2e/serial/tests/workload_overhead.go +++ b/test/e2e/serial/tests/workload_overhead.go @@ -43,6 +43,7 @@ import ( e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" "github.com/openshift-kni/numaresources-operator/test/utils/images" + "github.com/openshift-kni/numaresources-operator/test/utils/label" e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies" "github.com/openshift-kni/numaresources-operator/test/utils/nrosched" "github.com/openshift-kni/numaresources-operator/test/utils/objects" @@ -138,7 +139,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload overhea } } }) - It("[test_id:47582][tier2] schedule a guaranteed Pod in a single NUMA zone and check overhead is not accounted in NRT", Label("tier2"), func() { + It("[test_id:47582][tier2] schedule a guaranteed Pod in a single NUMA zone and check overhead is not accounted in NRT", Label(label.Tier2), func() { // even if it is not a hard rule, and even if there are a LOT of edge cases, a good starting point is usually // in the ballpark of 5x the base load. We start like this @@ -273,7 +274,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload overhea } }) - It("[test_id:53819][tier2][unsched] Pod pending when resources requested + pod overhead don't fit on the target node; NRT objects are not updated", Label("tier2", "unsched"), Label("feature:unsched"), func() { + It("[test_id:53819][tier2][unsched] Pod pending when resources requested + pod overhead don't fit on the target node; NRT objects are not updated", Label(label.Tier2, "unsched"), Label("feature:unsched"), func() { var targetNodeName string var targetNrtInitial *nrtv1alpha2.NodeResourceTopology var targetNrtListInitial nrtv1alpha2.NodeResourceTopologyList diff --git a/test/e2e/serial/tests/workload_placement.go b/test/e2e/serial/tests/workload_placement.go index 258a4131a..21404e4bd 100644 --- a/test/e2e/serial/tests/workload_placement.go +++ b/test/e2e/serial/tests/workload_placement.go @@ -46,6 +46,7 @@ import ( "github.com/k8stopologyawareschedwg/deployer/pkg/flagcodec" "github.com/openshift-kni/numaresources-operator/pkg/loglevel" + "github.com/openshift-kni/numaresources-operator/test/utils/label" intbaseload "github.com/openshift-kni/numaresources-operator/internal/baseload" "github.com/openshift-kni/numaresources-operator/internal/podlist" @@ -152,7 +153,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme } }) - It("[test_id:47591][tier1] should modify workload post scheduling while keeping the resource requests available", Label("tier1"), func() { + It("[test_id:47591][tier1] should modify workload post scheduling while keeping the resource requests available", Label(label.Tier1), func() { paddedNodeNames := sets.New[string](padder.GetPaddedNodes()...) nodesNameSet := e2enrt.AccumulateNames(nrts) // the only node which was not padded is the targetedNode @@ -542,7 +543,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme } }) - It("[test_id:48746][tier2] should modify workload post scheduling while keeping the resource requests available across all NUMA node", Label("tier2"), func() { + It("[test_id:48746][tier2] should modify workload post scheduling while keeping the resource requests available across all NUMA node", Label(label.Tier2), func() { paddedNodeNames := sets.New[string](padder.GetPaddedNodes()...) nodesNameSet := e2enrt.AccumulateNames(nrts) // the only node which was not padded is the targetedNode diff --git a/test/e2e/serial/tests/workload_placement_no_nrt.go b/test/e2e/serial/tests/workload_placement_no_nrt.go index 289ddd322..3fffc3e4d 100644 --- a/test/e2e/serial/tests/workload_placement_no_nrt.go +++ b/test/e2e/serial/tests/workload_placement_no_nrt.go @@ -35,6 +35,7 @@ import ( serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config" e2eclient "github.com/openshift-kni/numaresources-operator/test/utils/clients" e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" + "github.com/openshift-kni/numaresources-operator/test/utils/label" e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies" "github.com/openshift-kni/numaresources-operator/test/utils/objects" ) @@ -88,7 +89,7 @@ var _ = Describe("[serial] numaresources profile update", Serial, Label("feature updateInfoRefreshPause(fxt, initialInfoRefreshPause, nropObjInitial) }) - It("[tier1] should make a best-effort pod running", Label("tier1"), func() { + It("[tier1] should make a best-effort pod running", Label(label.Tier1), func() { By("create best-effort pod expect it to start running") testPod := objects.NewTestPodPause(fxt.Namespace.Name, "testpod") testPod.Spec.SchedulerName = serialconfig.Config.SchedulerName @@ -102,7 +103,7 @@ var _ = Describe("[serial] numaresources profile update", Serial, Label("feature Expect(err).ToNot(HaveOccurred()) }) - It("[tier1] should make a burstable pod running", Label("tier1"), func() { + It("[tier1] should make a burstable pod running", Label(label.Tier1), func() { By("create burstable pod and expect it to run") testPod = objects.NewTestPodPause(fxt.Namespace.Name, "testpod") testPod.Spec.SchedulerName = serialconfig.Config.SchedulerName @@ -119,7 +120,7 @@ var _ = Describe("[serial] numaresources profile update", Serial, Label("feature Expect(err).ToNot(HaveOccurred()) }) - It("[tier1][test_id:47611] should make a guaranteed pod running", Label("tier1"), func() { + It("[tier1][test_id:47611] should make a guaranteed pod running", Label(label.Tier1), func() { By("create guaranteed pod and expect it to run") testPod = objects.NewTestPodPause(fxt.Namespace.Name, "testpod") testPod.Spec.SchedulerName = serialconfig.Config.SchedulerName diff --git a/test/e2e/serial/tests/workload_placement_nodelabel.go b/test/e2e/serial/tests/workload_placement_nodelabel.go index d8cd4ffa3..f4aa5ee24 100644 --- a/test/e2e/serial/tests/workload_placement_nodelabel.go +++ b/test/e2e/serial/tests/workload_placement_nodelabel.go @@ -39,6 +39,7 @@ import ( "github.com/openshift-kni/numaresources-operator/internal/podlist" e2ereslist "github.com/openshift-kni/numaresources-operator/internal/resourcelist" "github.com/openshift-kni/numaresources-operator/internal/wait" + "github.com/openshift-kni/numaresources-operator/test/utils/label" e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" "github.com/openshift-kni/numaresources-operator/test/utils/images" @@ -182,7 +183,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme Expect(err).NotTo(HaveOccurred()) }) - It("[test_id:47598][tier2] should place the pod in the node with available resources in one NUMA zone and fulfilling node selector", Label("tier2"), func() { + It("[test_id:47598][tier2] should place the pod in the node with available resources in one NUMA zone and fulfilling node selector", Label(label.Tier2), func() { By(fmt.Sprintf("Labeling nodes %q and %q with label %q:%q", targetNodeName, alternativeNodeName, labelName, labelValueMedium)) unlabelTarget, err := labelNodeWithValue(fxt.Client, labelName, labelValueMedium, targetNodeName) @@ -278,7 +279,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme } }) - DescribeTable("[tier2] a guaranteed deployment pod with nodeAffinity should be scheduled on one NUMA zone on a matching labeled node with enough resources", Serial, Label("tier2"), + DescribeTable("[tier2] a guaranteed deployment pod with nodeAffinity should be scheduled on one NUMA zone on a matching labeled node with enough resources", Serial, Label(label.Tier2), func(getNodeAffFunc getNodeAffinityFunc) { affinity := getNodeAffFunc(labelName, []string{labelValueLarge, labelValueMedium}, corev1.NodeSelectorOpIn) By(fmt.Sprintf("create a deployment with one guaranteed pod with node affinity property: %+v ", affinity.NodeAffinity)) diff --git a/test/e2e/serial/tests/workload_placement_resources.go b/test/e2e/serial/tests/workload_placement_resources.go index 02f38807e..323510934 100644 --- a/test/e2e/serial/tests/workload_placement_resources.go +++ b/test/e2e/serial/tests/workload_placement_resources.go @@ -36,6 +36,7 @@ import ( intnrt "github.com/openshift-kni/numaresources-operator/internal/noderesourcetopology" e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" + "github.com/openshift-kni/numaresources-operator/test/utils/label" e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies" "github.com/openshift-kni/numaresources-operator/test/utils/nrosched" "github.com/openshift-kni/numaresources-operator/test/utils/objects" @@ -86,7 +87,7 @@ var _ = Describe("[serial][disruptive][scheduler][byres] numaresources workload // FIXME: this is a slight abuse of DescribeTable, but we need to run // the same code with a different test_id per tmscope DescribeTable("[tier0][ressched] a guaranteed pod with one container should be placed and aligned on the node", - Label("tier0", "ressched"), + Label(label.Tier0, "ressched"), func(tmPolicy, tmScope string, requiredRes, expectedFreeRes corev1.ResourceList) { ctx := context.TODO() diff --git a/test/e2e/serial/tests/workload_placement_taint.go b/test/e2e/serial/tests/workload_placement_taint.go index 2befa7d31..9770c7c62 100644 --- a/test/e2e/serial/tests/workload_placement_taint.go +++ b/test/e2e/serial/tests/workload_placement_taint.go @@ -43,6 +43,7 @@ import ( e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" "github.com/openshift-kni/numaresources-operator/test/utils/k8simported/taints" + "github.com/openshift-kni/numaresources-operator/test/utils/label" e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies" "github.com/openshift-kni/numaresources-operator/test/utils/nrosched" "github.com/openshift-kni/numaresources-operator/test/utils/objects" @@ -165,7 +166,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme checkNodesUntainted(fxt.Client, nodeNames) }) - It("[test_id:47594][tier1] should make a pod with a toleration land on a node with enough resources on a specific NUMA zone", Label("tier1"), func() { + It("[test_id:47594][tier1] should make a pod with a toleration land on a node with enough resources on a specific NUMA zone", Label(label.Tier1), func() { paddedNodeNames := sets.New[string](padder.GetPaddedNodes()...) nodesNameSet := e2enrt.AccumulateNames(nrts) // the only node which was not padded is the targetedNode diff --git a/test/e2e/serial/tests/workload_placement_tmpol.go b/test/e2e/serial/tests/workload_placement_tmpol.go index e8e212617..860ce55b4 100644 --- a/test/e2e/serial/tests/workload_placement_tmpol.go +++ b/test/e2e/serial/tests/workload_placement_tmpol.go @@ -45,6 +45,7 @@ import ( e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" "github.com/openshift-kni/numaresources-operator/test/utils/images" + "github.com/openshift-kni/numaresources-operator/test/utils/label" e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies" "github.com/openshift-kni/numaresources-operator/test/utils/nrosched" "github.com/openshift-kni/numaresources-operator/test/utils/objects" @@ -174,7 +175,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme // FIXME: this is a slight abuse of DescribeTable, but we need to run // the same code which a different test_id per tmscope DescribeTable("[tier1] a guaranteed pod with one container should be scheduled into one NUMA zone", - Label("tier1"), + Label(label.Tier1), func(tmPolicy, tmScope string, requiredRes, paddingRes corev1.ResourceList) { setupCluster(requiredRes, paddingRes, tmPolicy, tmScope) @@ -272,7 +273,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme // FIXME: this is a slight abuse of DescribeTable, but we need to run // the same code which a different test_id per tmscope DescribeTable("[tier0] a deployment with a guaranteed pod with one container should be scheduled into one NUMA zone", - Label("tier0"), + Label(label.Tier0), func(tmPolicy, tmScope string, requiredRes, paddingRes corev1.ResourceList) { setupCluster(requiredRes, paddingRes, tmPolicy, tmScope) @@ -521,7 +522,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, Entry("[test_id:47575][tmscope:cnt][tier0] should make a pod with two gu cnt land on a node with enough resources on a specific NUMA zone, each cnt on a different zone", - Label("tmscope:cnt", "tier0"), + Label("tmscope:cnt", label.Tier0), tmSingleNUMANodeFuncsHandler[intnrt.Container], podResourcesRequest{ appCnt: []corev1.ResourceList{ @@ -551,7 +552,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme []corev1.ResourceList{}, ), Entry("[test_id:47577][tmscope:pod][tier0] should make a pod with two gu cnt land on a node with enough resources on a specific NUMA zone, all cnt on the same zone", - Label("tmscope:pod", "tier0"), + Label("tmscope:pod", label.Tier0), tmSingleNUMANodeFuncsHandler[intnrt.Pod], podResourcesRequest{ appCnt: []corev1.ResourceList{ @@ -661,7 +662,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[tier1][testtype4][tmscope:container] should make a pod with three gu cnt land on a node with enough resources, containers should be spread on a different zone", - Label("tier1", "tmscope:cnt"), + Label(label.Tier1, "tmscope:cnt"), tmSingleNUMANodeFuncsHandler[intnrt.Container], podResourcesRequest{ appCnt: []corev1.ResourceList{ @@ -705,7 +706,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[tier1][testtype4][tmscope:container][cpu] pod with two gu cnt land on a node with enough resources, containers should be spread on a different zone", - Label("tier1", "tmscope:cnt", "cpu"), + Label(label.Tier1, "tmscope:cnt", "cpu"), tmSingleNUMANodeFuncsHandler[intnrt.Container], podResourcesRequest{ appCnt: []corev1.ResourceList{ @@ -757,7 +758,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[tier1][testtype4][tmscope:container][memory] pod with two gu cnt land on a node with enough resources, containers should be spread on a different zone", - Label("tier1", "tmscope:cnt", "memory"), + Label(label.Tier1, "tmscope:cnt", "memory"), tmSingleNUMANodeFuncsHandler[intnrt.Container], podResourcesRequest{ appCnt: []corev1.ResourceList{ @@ -809,7 +810,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[tier1][testtype4][tmscope:container][hugepages2Mi] pod with two gu cnt land on a node with enough resources, containers should be spread on a different zone", - Label("tier1", "tmscope:cnt", "hugepages2Mi"), + Label(label.Tier1, "tmscope:cnt", "hugepages2Mi"), tmSingleNUMANodeFuncsHandler[intnrt.Container], podResourcesRequest{ appCnt: []corev1.ResourceList{ @@ -861,7 +862,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[tier1][testtype4][tmscope:container][hugepages1Gi] pod with two gu cnt land on a node with enough resources, containers should be spread on a different zone", - Label("tier1", "tmscope:cnt", "hugepages1Gi"), + Label(label.Tier1, "tmscope:cnt", "hugepages1Gi"), tmSingleNUMANodeFuncsHandler[intnrt.Container], podResourcesRequest{ appCnt: []corev1.ResourceList{ @@ -912,7 +913,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:54021][tier1][testtype4][tmscope:container][devices] pod with two gu cnt land on a node with enough resources, containers should be spread on a different zone", - Label("tier1", "tmscope:cnt", "devices"), + Label(label.Tier1, "tmscope:cnt", "devices"), tmSingleNUMANodeFuncsHandler[intnrt.Container], podResourcesRequest{ appCnt: []corev1.ResourceList{ @@ -962,7 +963,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[tier1][testtype11][tmscope:container] should make a pod with one init cnt and three gu cnt land on a node with enough resources, containers should be spread on a different zone", - Label("tier1", "tmscope:cnt"), + Label(label.Tier1, "tmscope:cnt"), tmSingleNUMANodeFuncsHandler[intnrt.Container], podResourcesRequest{ initCnt: []corev1.ResourceList{ @@ -1012,7 +1013,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[tier1][testtype29][tmscope:container] should make a pod with 3 gu cnt and 3 init cnt land on a node with enough resources, when sum of init and app cnt resources are more than node resources", - Label("tier1", "tmscope:cnt"), + Label(label.Tier1, "tmscope:cnt"), tmSingleNUMANodeFuncsHandler[intnrt.Container], podResourcesRequest{ initCnt: []corev1.ResourceList{ @@ -1115,7 +1116,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:54016][tmscope:pod][tier0][devices] should make a pod with one gu cnt requesting devices land on a node with enough resources on a specific NUMA zone", - Label("tier0", "tmscope:pod", "devices"), + Label(label.Tier0, "tmscope:pod", "devices"), tmSingleNUMANodeFuncsHandler[intnrt.Pod], podResourcesRequest{ appCnt: []corev1.ResourceList{ @@ -1151,7 +1152,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:54025][tmscope:cnt][tier2][devices] should make a besteffort pod requesting devices land on a node with enough resources on a specific NUMA zone, containers should be spread on a different zone", - Label("tier2", "tmscope:cnt", "devices"), + Label(label.Tier2, "tmscope:cnt", "devices"), tmSingleNUMANodeFuncsHandler[intnrt.Container], podResourcesRequest{ appCnt: []corev1.ResourceList{ @@ -1189,7 +1190,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:55431][tmscope:pod][tier0][devices] should make a besteffort pod requesting devices land on a node with enough resources on a specific NUMA zone", - Label("tier0", "tmscope:pod", "devices"), + Label(label.Tier0, "tmscope:pod", "devices"), tmSingleNUMANodeFuncsHandler[intnrt.Pod], podResourcesRequest{ appCnt: []corev1.ResourceList{ @@ -1222,7 +1223,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:55450][tmscope:pod][tier2][devices][hostlevel] should make a burstable pod requesting devices land on a node with enough resources on a specific NUMA zone", - Label("tier2", "tmscope:pod", "devices", "hostlevel"), + Label(label.Tier2, "tmscope:pod", "devices", "hostlevel"), tmSingleNUMANodeFuncsHandler[intnrt.Pod], podResourcesRequest{ appCnt: []corev1.ResourceList{ @@ -1260,7 +1261,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:54024][tmscope:cnt][tier2][devices][hostlevel] should make a burstable pod requesting devices land on a node with enough resources on a specific NUMA zone, containers should be spread on a different zone", - Label("tier2", "tmscope:cnt", "devices", "hostlevel"), + Label(label.Tier2, "tmscope:cnt", "devices", "hostlevel"), tmSingleNUMANodeFuncsHandler[intnrt.Container], podResourcesRequest{ appCnt: []corev1.ResourceList{ @@ -1445,7 +1446,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme // to see the reason for not scheduling the pod on that target node as "cannot align container: testcnt-1", because the other worker nodes have insufficient // free resources to accommodate the pod thus they will be rejected as candidates at earlier stage Entry("[tier0][unsched][tmscope:container][cpu] pod with two gu cnt keep on pending because cannot align the second container to a single numa node", - Label("tier0", "unsched", "tmscope:cnt", "cpu"), + Label(label.Tier0, "unsched", "tmscope:cnt", "cpu"), tmSingleNUMANodeFuncsHandler[intnrt.Container], nrosched.ErrorCannotAlignContainer, podResourcesRequest{ @@ -1506,7 +1507,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:74256][tier3][unsched][tmscope:pod][cpu] guaranteed pod with multi cnt with fractional cpus keep on pending because cannot align the second container to a single numa node", - Label("tier3", "unsched", "tmscope:pod", "cpu"), + Label(label.Tier3, "unsched", "tmscope:pod", "cpu"), tmSingleNUMANodeFuncsHandler[intnrt.Pod], nrosched.ErrorCannotAlignPod, podResourcesRequest{ @@ -1551,7 +1552,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:74257][tier3][unsched][tmscope:pod][cpu][nonreg] burstable pod with multi cnt with fractional cpus keep on pending because of not enough free cpus", - Label("tier3", "unsched", "tmscope:pod", "cpu", "nonreg"), + Label(label.Tier3, "unsched", "tmscope:pod", "cpu", "nonreg"), Label("feature:nonreg"), tmSingleNUMANodeFuncsHandler[intnrt.Pod], "0.* nodes are available: [0-9]* Insufficient cpu", @@ -1594,7 +1595,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[tier0][unsched][tmscope:container][memory] pod with two gu cnt keep on pending because cannot align the second container to a single numa node", - Label("tier0", "unsched", "tmscope:cnt", "memory"), + Label(label.Tier0, "unsched", "tmscope:cnt", "memory"), tmSingleNUMANodeFuncsHandler[intnrt.Container], nrosched.ErrorCannotAlignContainer, podResourcesRequest{ @@ -1653,7 +1654,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[tier0][unsched][tmscope:container][hugepages2Mi] pod with two gu cnt keep on pending because cannot align the second container to a single numa node", - Label("tier0", "unsched", "tmscope:cnt", "hugepages2Mi"), + Label(label.Tier0, "unsched", "tmscope:cnt", "hugepages2Mi"), tmSingleNUMANodeFuncsHandler[intnrt.Container], nrosched.ErrorCannotAlignContainer, podResourcesRequest{ @@ -1711,7 +1712,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[tier0][unsched][tmscope:container][hugepages1Gi] pod with two gu cnt keep on pending because cannot align the second container to a single numa node", - Label("tier0", "unsched", "tmscope:cnt", "hugepages1Gi"), + Label(label.Tier0, "unsched", "tmscope:cnt", "hugepages1Gi"), tmSingleNUMANodeFuncsHandler[intnrt.Container], nrosched.ErrorCannotAlignContainer, podResourcesRequest{ @@ -1768,7 +1769,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:54020][tier2][unsched][tmscope:container][devices] pod with two gu cnt requesting multiple device types keep on pending because cannot align the second container to a single numa node", - Label("tier2", "unsched", "tmscope:cnt", "devices"), + Label(label.Tier2, "unsched", "tmscope:cnt", "devices"), tmSingleNUMANodeFuncsHandler[intnrt.Container], nrosched.ErrorCannotAlignContainer, podResourcesRequest{ @@ -1821,7 +1822,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:54019][tier1][unsched][tmscope:container][devices] pod with two gu cnt keep on pending because cannot align the second container to a single numa node", - Label("tier1", "unsched", "tmscope:cnt", "devices"), + Label(label.Tier1, "unsched", "tmscope:cnt", "devices"), tmSingleNUMANodeFuncsHandler[intnrt.Container], nrosched.ErrorCannotAlignContainer, podResourcesRequest{ @@ -1866,7 +1867,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:54017][tier1][unsched][tmscope:pod][devices] pod with two gu cnt keep on pending because cannot align the both containers on single numa", - Label("tier1", "unsched", "tmscope:pod", "devices"), + Label(label.Tier1, "unsched", "tmscope:pod", "devices"), tmSingleNUMANodeFuncsHandler[intnrt.Pod], nrosched.ErrorCannotAlignPod, podResourcesRequest{ @@ -1911,7 +1912,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:55430][tier2][unsched][tmscope:pod][devices] besteffort pod requesting multiple device types keep on pending because cannot align the container to a single numa node", - Label("tier2", "unsched", "tmscope:pod", "devices"), + Label(label.Tier2, "unsched", "tmscope:pod", "devices"), tmSingleNUMANodeFuncsHandler[intnrt.Pod], nrosched.ErrorCannotAlignPod, podResourcesRequest{ @@ -1957,7 +1958,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:55429][tier2][unsched][tmscope:pod][devices] burstable pod requesting multiple device types keep on pending because cannot align the container to a single numa node", - Label("tier2", "unsched", "tmscope:pod", "devices"), + Label(label.Tier2, "unsched", "tmscope:pod", "devices"), tmSingleNUMANodeFuncsHandler[intnrt.Pod], nrosched.ErrorCannotAlignPod, podResourcesRequest{ @@ -2008,7 +2009,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:54023][tier2][unsched][tmscope:container][devices] besteffort pod requesting multiple device types keep on pending because cannot align the container to a single numa node", - Label("tier2", "unsched", "tmscope:cnt", "devices"), + Label(label.Tier2, "unsched", "tmscope:cnt", "devices"), tmSingleNUMANodeFuncsHandler[intnrt.Container], nrosched.ErrorCannotAlignContainer, podResourcesRequest{ @@ -2055,7 +2056,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme }, ), Entry("[test_id:54022][tier2][unsched][tmscope:container][devices] burstable pod requesting multiple device types keep on pending because cannot align the container to a single numa node", - Label("tier2", "unsched", "tmscope:cnt", "devices"), + Label(label.Tier2, "unsched", "tmscope:cnt", "devices"), tmSingleNUMANodeFuncsHandler[intnrt.Container], nrosched.ErrorCannotAlignContainer, podResourcesRequest{ diff --git a/test/e2e/serial/tests/workload_unschedulable.go b/test/e2e/serial/tests/workload_unschedulable.go index 69a6ccf59..7b9bd60c2 100644 --- a/test/e2e/serial/tests/workload_unschedulable.go +++ b/test/e2e/serial/tests/workload_unschedulable.go @@ -44,6 +44,7 @@ import ( e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture" "github.com/openshift-kni/numaresources-operator/test/utils/images" + "github.com/openshift-kni/numaresources-operator/test/utils/label" e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies" "github.com/openshift-kni/numaresources-operator/test/utils/nrosched" "github.com/openshift-kni/numaresources-operator/test/utils/objects" @@ -277,7 +278,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload unsched } }) - It("[test_id:47619][tier3][unsched][default-scheduler] a deployment with a guaranteed pod resources available on one node but not on a single numa; scheduled by default scheduler", Label("tier2", "unsched", "default-scheduler"), func() { + It("[test_id:47619][tier3][unsched][default-scheduler] a deployment with a guaranteed pod resources available on one node but not on a single numa; scheduled by default scheduler", Label(label.Tier2, "unsched", "default-scheduler"), func() { By("Scheduling the testing deployment") deploymentName := "test-dp-with-default-sched" var replicas int32 = 1 @@ -317,7 +318,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload unsched }) Context("with at least two nodes with two numa zones and enough resources in one numa zone", func() { - It("[test_id:47592][tier2][unsched][failalign] a daemonset with a guaranteed pod resources available on one node/one single numa zone but not in any other node", Label("tier2", "unsched", "failalign"), func() { + It("[test_id:47592][tier2][unsched][failalign] a daemonset with a guaranteed pod resources available on one node/one single numa zone but not in any other node", Label(label.Tier2, "unsched", "failalign"), func() { requiredNUMAZones := 2 By(fmt.Sprintf("filtering available nodes with at least %d NUMA zones", requiredNUMAZones)) nrtCandidates := e2enrt.FilterZoneCountEqual(nrts, requiredNUMAZones) @@ -446,7 +447,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload unsched }) Context("with at least one node", func() { - It("[test_id:47616][tier2][tmscope:pod][failalign] pod with two containers each on one numa zone can NOT be scheduled", Label("tier2", "tmscope:pod", "failalign"), func() { + It("[test_id:47616][tier2][tmscope:pod][failalign] pod with two containers each on one numa zone can NOT be scheduled", Label(label.Tier2, "tmscope:pod", "failalign"), func() { // Requirements: // Need at least this nodes neededNodes := 1 @@ -619,7 +620,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload unsched // other than the other tests, here we expect all the worker nodes (including none-bm hosts) to be padded Context("with zero suitable nodes", func() { - It("[test_id:47615][tier2][unsched] a deployment with multiple guaranteed pods resources that doesn't fit at the NUMA level", Label("tier2", "unsched"), func() { + It("[test_id:47615][tier2][unsched] a deployment with multiple guaranteed pods resources that doesn't fit at the NUMA level", Label(label.Tier2, "unsched"), func() { neededNodes := 1 numOfnrtCandidates := len(nrts) if numOfnrtCandidates < neededNodes { @@ -843,7 +844,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload unsched e2efixture.MustSettleNRT(fxt) }) - It("[test_id:47614][tier3][unsched][pod] workload requests guaranteed pod resources available on one node but not on a single numa", Label("tier3", "unsched", "pod"), func() { + It("[test_id:47614][tier3][unsched][pod] workload requests guaranteed pod resources available on one node but not on a single numa", Label(label.Tier3, "unsched", "pod"), func() { By("Scheduling the testing pod") pod := objects.NewTestPodPause(fxt.Namespace.Name, "testpod") @@ -861,7 +862,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload unsched Expect(err).ToNot(HaveOccurred()) }) - It("[test_id:47614][tier3][unsched][deployment] a deployment with a guaranteed pod resources available on one node but not on a single numa", Label("tier3", "unsched", "deployment"), func() { + It("[test_id:47614][tier3][unsched][deployment] a deployment with a guaranteed pod resources available on one node but not on a single numa", Label(label.Tier3, "unsched", "deployment"), func() { By("Scheduling the testing deployment") deploymentName := "test-dp" @@ -896,7 +897,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload unsched } }) - It("[test_id:47614][tier3][unsched][daemonset] a daemonset with a guaranteed pod resources available on one node but not on a single numa", Label("tier3", "unsched", "daemonset"), func() { + It("[test_id:47614][tier3][unsched][daemonset] a daemonset with a guaranteed pod resources available on one node but not on a single numa", Label(label.Tier3, "unsched", "daemonset"), func() { By("Scheduling the testing daemonset") dsName := "test-ds"