diff --git a/pkg/controllers/internalmembercluster/v1beta1/member_controller_integration_test.go b/pkg/controllers/internalmembercluster/v1beta1/member_controller_integration_test.go index 12e11e88a..8f92baeb3 100644 --- a/pkg/controllers/internalmembercluster/v1beta1/member_controller_integration_test.go +++ b/pkg/controllers/internalmembercluster/v1beta1/member_controller_integration_test.go @@ -2,220 +2,834 @@ Copyright (c) Microsoft Corporation. Licensed under the MIT license. */ + package v1beta1 import ( - "context" - "strings" + "fmt" "time" + "github.com/google/go-cmp/cmp" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" clusterv1beta1 "go.goms.io/fleet/apis/cluster/v1beta1" - "go.goms.io/fleet/pkg/controllers/work" "go.goms.io/fleet/pkg/propertyprovider" - "go.goms.io/fleet/pkg/utils" ) -var _ = Describe("Test Internal Member Cluster Controller", Serial, func() { - var ( - ctx context.Context - HBPeriod int - memberClusterName string - memberClusterNamespace string - memberClusterNamespacedName types.NamespacedName - nodes corev1.NodeList - r *Reconciler - ) - - BeforeEach(func() { - ctx = context.Background() - HBPeriod = int(utils.RandSecureInt(600)) - memberClusterName = "rand-" + strings.ToLower(utils.RandStr()) + "-mc" - memberClusterNamespace = "fleet-" + memberClusterName - memberClusterNamespacedName = types.NamespacedName{ - Name: memberClusterName, - Namespace: memberClusterNamespace, - } - - By("create the member cluster namespace") - ns := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: memberClusterNamespace, - }, - } - Expect(k8sClient.Create(ctx, &ns)).Should(Succeed()) - - By("creating member cluster nodes") - nodes = corev1.NodeList{Items: utils.NewTestNodes(memberClusterNamespace)} - for _, node := range nodes.Items { - node := node // prevent Implicit memory aliasing in for loop - Expect(k8sClient.Create(ctx, &node)).Should(Succeed()) - } - - By("create the internalMemberCluster reconciler") - workController := work.NewApplyWorkReconciler( - k8sClient, nil, k8sClient, nil, nil, 5, memberClusterNamespace) - var err error - r, err = NewReconciler(ctx, k8sClient, mgr.GetConfig(), k8sClient, workController, nil) - Expect(err).ToNot(HaveOccurred()) - err = r.SetupWithManager(mgr) - Expect(err).ToNot(HaveOccurred()) - }) +const ( + propertiesManuallyUpdatedConditionType = "PropertiesManuallyUpdated" + propertiesManuallyUpdatedConditionReason1 = "NewPropertiesPushed" + propertiesManuallyUpdatedConditionMsg1 = "Properties have been manually updated" + propertiesManuallyUpdatedConditionReason2 = "NewPropertiesPushedAgain" + propertiesManuallyUpdatedConditionMsg2 = "Properties have been manually updated again" +) - AfterEach(func() { - By("delete member cluster namespace") - ns := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: memberClusterNamespace, - }, - } - Expect(k8sClient.Delete(ctx, &ns)).Should(Succeed()) - - By("delete member cluster nodes") - for _, node := range nodes.Items { - node := node - Expect(k8sClient.Delete(ctx, &node)).Should(Succeed()) - } - - By("delete member cluster") - internalMemberCluster := clusterv1beta1.InternalMemberCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: memberClusterName, - Namespace: memberClusterNamespace, - }, - } - Expect(k8sClient.Delete(ctx, &internalMemberCluster)).Should(SatisfyAny(Succeed(), &utils.NotFoundMatcher{})) - }) +const ( + eventuallyTimeout = time.Second * 30 + eventuallyInterval = time.Millisecond * 500 +) + +var _ = Describe("Test InternalMemberCluster Controller", func() { + // Note that specs in this context run in serial, however, they might run in parallel with + // the other contexts if parallelization is enabled. + // + // This is safe as the controller managers have been configured to watch only their own + // respective namespaces. + Context("Test setup with property provider", Ordered, func() { + var ( + // Add an offset of -1 second to avoid flakiness caused by approximation. + timeStarted = metav1.Time{Time: time.Now().Add(-time.Second)} - Context("join", func() { - BeforeEach(func() { - internalMemberCluster := clusterv1beta1.InternalMemberCluster{ + // The timestamps below are set in later steps. + timeUpdated metav1.Time + timeLeft metav1.Time + ) + + BeforeAll(func() { + // Create the InternalMemberCluster object. + imc := &clusterv1beta1.InternalMemberCluster{ ObjectMeta: metav1.ObjectMeta{ - Name: memberClusterName, - Namespace: memberClusterNamespace, + Name: member1Name, + Namespace: member1ReservedNSName, }, Spec: clusterv1beta1.InternalMemberClusterSpec{ - State: clusterv1beta1.ClusterStateJoin, - HeartbeatPeriodSeconds: int32(HBPeriod), + State: clusterv1beta1.ClusterStateJoin, + // Use a shorter heartbeat period to improve responsiveness. + HeartbeatPeriodSeconds: 2, }, } - Expect(k8sClient.Create(ctx, &internalMemberCluster)).Should(Succeed()) - }) + Expect(hubClient.Create(ctx, imc)).Should(Succeed()) - It("should update internalMemberCluster to joined", func() { - result, err := r.Reconcile(ctx, ctrl.Request{ - NamespacedName: memberClusterNamespacedName, + // Report properties via the property provider. + observationTime := metav1.Now() + propertyProvider1.Update(&propertyprovider.PropertyCollectionResponse{ + Properties: map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue{ + propertyprovider.NodeCountProperty: { + Value: "1", + ObservationTime: observationTime, + }, + }, + Resources: clusterv1beta1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10"), + corev1.ResourceMemory: resource.MustParse("10Gi"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("8"), + corev1.ResourceMemory: resource.MustParse("8Gi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + ObservationTime: observationTime, + }, + Conditions: []metav1.Condition{ + { + Type: propertiesManuallyUpdatedConditionType, + Status: metav1.ConditionTrue, + Reason: propertiesManuallyUpdatedConditionReason1, + Message: propertiesManuallyUpdatedConditionMsg1, + }, + }, }) - // take into account the +- jitter - upperBoundOfWantRequeueAfter := (1000 + 1000*jitterPercent/2/100) * time.Millisecond.Milliseconds() * int64(HBPeriod) - lowerBoundOfWantRequeueAfter := (1000 - 1000*jitterPercent/2/100) * time.Millisecond.Milliseconds() * int64(HBPeriod) - Expect(result.RequeueAfter.Milliseconds() <= upperBoundOfWantRequeueAfter).Should(BeTrue(), "Reconcile() RequeueAfter got %v, want <= %v", result.RequeueAfter, upperBoundOfWantRequeueAfter) - Expect(result.RequeueAfter.Milliseconds() >= lowerBoundOfWantRequeueAfter).Should(BeTrue(), "Reconcile() RequeueAfter got %v, want >= %v", result.RequeueAfter, lowerBoundOfWantRequeueAfter) - Expect(err).Should(Not(HaveOccurred())) - - var imc clusterv1beta1.InternalMemberCluster - Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &imc)).Should(Succeed()) - - By("checking updated join condition") - updatedJoinedCond := imc.GetConditionWithType(clusterv1beta1.MemberAgent, string(clusterv1beta1.AgentJoined)) - Expect(updatedJoinedCond.Status).To(Equal(metav1.ConditionTrue)) - Expect(updatedJoinedCond.Reason).To(Equal(EventReasonInternalMemberClusterJoined)) - - By("checking updated heartbeat condition") - agentStatus := imc.Status.AgentStatus[0] - Expect(agentStatus.LastReceivedHeartbeat).ToNot(Equal(metav1.Now())) - - By("checking updated health condition") - updatedHealthCond := imc.GetConditionWithType(clusterv1beta1.MemberAgent, string(clusterv1beta1.AgentHealthy)) - Expect(updatedHealthCond.Status).To(Equal(metav1.ConditionTrue)) - Expect(updatedHealthCond.Reason).To(Equal(EventReasonInternalMemberClusterHealthy)) - - By("checking updated member cluster usage") - Expect(imc.Status.Properties[propertyprovider.NodeCountProperty].Value).ShouldNot(BeEmpty()) - Expect(imc.Status.ResourceUsage.Allocatable).ShouldNot(BeNil()) - Expect(imc.Status.ResourceUsage.Capacity).ShouldNot(BeNil()) - Expect(imc.Status.ResourceUsage.Available).ShouldNot(BeNil()) - Expect(imc.Status.ResourceUsage.ObservationTime).ToNot(Equal(metav1.Now())) }) - It("last received heart beat gets updated after heartbeat", func() { - result, err := r.Reconcile(ctx, ctrl.Request{ - NamespacedName: memberClusterNamespacedName, - }) - // take into account the +- jitter - upperBoundOfWantRequeueAfter := (1000 + 1000*jitterPercent/2/100) * time.Millisecond.Milliseconds() * int64(HBPeriod) - lowerBoundOfWantRequeueAfter := (1000 - 1000*jitterPercent/2/100) * time.Millisecond.Milliseconds() * int64(HBPeriod) - Expect(result.RequeueAfter.Milliseconds() <= upperBoundOfWantRequeueAfter).Should(BeTrue(), "Reconcile() RequeueAfter got %v, want <= %v", result.RequeueAfter, upperBoundOfWantRequeueAfter) - Expect(result.RequeueAfter.Milliseconds() >= lowerBoundOfWantRequeueAfter).Should(BeTrue(), "Reconcile() RequeueAfter got %v, want >= %v", result.RequeueAfter, lowerBoundOfWantRequeueAfter) - Expect(err).Should(Not(HaveOccurred())) + It("should join the cluster", func() { + // Verify that the agent status has been updated. + Eventually(func() error { + imc := &clusterv1beta1.InternalMemberCluster{} + objKey := types.NamespacedName{ + Name: member1Name, + Namespace: member1ReservedNSName, + } + if err := hubClient.Get(ctx, objKey, imc); err != nil { + return fmt.Errorf("failed to get InternalMemberCluster: %w", err) + } + + wantIMCStatus := clusterv1beta1.InternalMemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.ConditionTypeClusterPropertyCollectionSucceeded), + Status: metav1.ConditionTrue, + Reason: ClusterPropertyCollectionSucceededReason, + Message: ClusterPropertyCollectionSucceededMessage, + ObservedGeneration: imc.Generation, + }, + { + Type: string(clusterv1beta1.ConditionTypeClusterPropertyProviderStarted), + Status: metav1.ConditionTrue, + Reason: ClusterPropertyProviderStartedReason, + Message: ClusterPropertyProviderStartedMessage, + ObservedGeneration: imc.Generation, + }, + { + Type: propertiesManuallyUpdatedConditionType, + Status: metav1.ConditionTrue, + Reason: propertiesManuallyUpdatedConditionReason1, + Message: propertiesManuallyUpdatedConditionMsg1, + ObservedGeneration: imc.Generation, + }, + }, + Properties: map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue{ + propertyprovider.NodeCountProperty: { + Value: "1", + }, + }, + ResourceUsage: clusterv1beta1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10"), + corev1.ResourceMemory: resource.MustParse("10Gi"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("8"), + corev1.ResourceMemory: resource.MustParse("8Gi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + AgentStatus: []clusterv1beta1.AgentStatus{ + { + Type: clusterv1beta1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: EventReasonInternalMemberClusterJoined, + ObservedGeneration: imc.Generation, + }, + { + Type: string(clusterv1beta1.AgentHealthy), + Status: metav1.ConditionTrue, + Reason: EventReasonInternalMemberClusterHealthy, + ObservedGeneration: imc.Generation, + }, + }, + }, + }, + } + + if diff := cmp.Diff( + imc.Status, wantIMCStatus, + ignoreAllTimeFields, + sortByConditionType, + ); diff != "" { + return fmt.Errorf("InternalMemberCluster status diff (-got, +want):\n%s", diff) + } + + // Verify the timestamps. + + // Verify the last transition timestamps in the conditions. + // + // Note that at this point the structure of the InternalMemberCluster status + // object is already known. + if cond := imc.Status.Conditions[0]; cond.LastTransitionTime.Before(&timeStarted) { + return fmt.Errorf("InternalMemberCluster condition %s has last transition time %v, want before %v", cond.Type, cond.LastTransitionTime, timeStarted) + } - var imc clusterv1beta1.InternalMemberCluster - Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &imc)).Should(Succeed()) + conds := imc.Status.AgentStatus[0].Conditions + for idx := range conds { + cond := conds[idx] + if cond.LastTransitionTime.Before(&timeStarted) { + return fmt.Errorf("InternalMemberCluster agent status condition %s has last transition time %v, want before %v", cond.Type, cond.LastTransitionTime, timeStarted) + } + } - memberAgentStatus := imc.GetAgentStatus(clusterv1beta1.MemberAgent) - lastReceivedHeartbeat := memberAgentStatus.LastReceivedHeartbeat + // Verify the observation timestamps in the properties. + for pn, pv := range imc.Status.Properties { + if pv.ObservationTime.Before(&timeStarted) { + return fmt.Errorf("InternalMemberCluster property %s has observation time %v, want before %v", pn, pv.ObservationTime, timeStarted) + } + } + if u := imc.Status.ResourceUsage; u.ObservationTime.Before(&timeStarted) { + return fmt.Errorf("InternalMemberCluster resource usage has observation time %v, want before %v", u.ObservationTime, timeStarted) + } - time.Sleep(time.Second) + return nil + }, eventuallyTimeout, eventuallyInterval).Should(Succeed(), "Failed to update the agent status") + }) + + It("can apply a new property update", func() { + // Add an offset of -1 second to avoid flakiness caused by approximation. + timeUpdated = metav1.Time{Time: time.Now().Add(-time.Second)} - By("trigger reconcile which should update last received heart beat time") - result, err = r.Reconcile(ctx, ctrl.Request{ - NamespacedName: memberClusterNamespacedName, + observationTime := metav1.Now() + propertyProvider1.Update(&propertyprovider.PropertyCollectionResponse{ + Properties: map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue{ + propertyprovider.NodeCountProperty: { + Value: "2", + ObservationTime: observationTime, + }, + }, + Resources: clusterv1beta1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("15"), + corev1.ResourceMemory: resource.MustParse("30Gi"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("12"), + corev1.ResourceMemory: resource.MustParse("24Gi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("6Gi"), + }, + ObservationTime: observationTime, + }, + Conditions: []metav1.Condition{ + { + Type: propertiesManuallyUpdatedConditionType, + Status: metav1.ConditionFalse, + Reason: propertiesManuallyUpdatedConditionReason2, + Message: propertiesManuallyUpdatedConditionMsg2, + }, + }, }) - // take into account the +- jitter - Expect(result.RequeueAfter.Milliseconds() <= upperBoundOfWantRequeueAfter).Should(BeTrue(), "Reconcile() RequeueAfter got %v, want <= %v", result.RequeueAfter, upperBoundOfWantRequeueAfter) - Expect(result.RequeueAfter.Milliseconds() >= lowerBoundOfWantRequeueAfter).Should(BeTrue(), "Reconcile() RequeueAfter got %v, want >= %v", result.RequeueAfter, lowerBoundOfWantRequeueAfter) - Expect(err).Should(Not(HaveOccurred())) - Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &imc)).Should(Succeed()) - Expect(lastReceivedHeartbeat).ShouldNot(Equal(imc.Status.AgentStatus[0].LastReceivedHeartbeat)) + }) + + It("should update the properties", func() { + // Verify that the agent status has been updated. + Eventually(func() error { + imc := &clusterv1beta1.InternalMemberCluster{} + objKey := types.NamespacedName{ + Name: member1Name, + Namespace: member1ReservedNSName, + } + if err := hubClient.Get(ctx, objKey, imc); err != nil { + return fmt.Errorf("failed to get InternalMemberCluster: %w", err) + } + + wantIMCStatus := clusterv1beta1.InternalMemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.ConditionTypeClusterPropertyCollectionSucceeded), + Status: metav1.ConditionTrue, + Reason: ClusterPropertyCollectionSucceededReason, + Message: ClusterPropertyCollectionSucceededMessage, + ObservedGeneration: imc.Generation, + }, + { + Type: string(clusterv1beta1.ConditionTypeClusterPropertyProviderStarted), + Status: metav1.ConditionTrue, + Reason: ClusterPropertyProviderStartedReason, + Message: ClusterPropertyProviderStartedMessage, + ObservedGeneration: imc.Generation, + }, + { + Type: propertiesManuallyUpdatedConditionType, + Status: metav1.ConditionFalse, + Reason: propertiesManuallyUpdatedConditionReason2, + Message: propertiesManuallyUpdatedConditionMsg2, + ObservedGeneration: imc.Generation, + }, + }, + Properties: map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue{ + propertyprovider.NodeCountProperty: { + Value: "2", + }, + }, + ResourceUsage: clusterv1beta1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("15"), + corev1.ResourceMemory: resource.MustParse("30Gi"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("12"), + corev1.ResourceMemory: resource.MustParse("24Gi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("6Gi"), + }, + }, + AgentStatus: []clusterv1beta1.AgentStatus{ + { + Type: clusterv1beta1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: EventReasonInternalMemberClusterJoined, + ObservedGeneration: imc.Generation, + }, + { + Type: string(clusterv1beta1.AgentHealthy), + Status: metav1.ConditionTrue, + Reason: EventReasonInternalMemberClusterHealthy, + ObservedGeneration: imc.Generation, + }, + }, + }, + }, + } + + if diff := cmp.Diff( + imc.Status, wantIMCStatus, + ignoreAllTimeFields, + sortByConditionType, + ); diff != "" { + return fmt.Errorf("InternalMemberCluster status diff (-got, +want):\n%s", diff) + } + + // Verify the timestamps. + + // Verify the last transition timestamps in the conditions. + // + // Note that at this point the structure of the InternalMemberCluster status + // object is already known. + if cond := imc.Status.Conditions[0]; cond.LastTransitionTime.Before(&timeStarted) { + return fmt.Errorf("InternalMemberCluster condition %s has last transition time %v, want before %v", cond.Type, cond.LastTransitionTime, timeStarted) + } + + conds := imc.Status.AgentStatus[0].Conditions + for idx := range conds { + cond := conds[idx] + if cond.LastTransitionTime.Before(&timeStarted) { + return fmt.Errorf("InternalMemberCluster agent status condition %s has last transition time %v, want before %v", cond.Type, cond.LastTransitionTime, timeStarted) + } + } + + // Verify the observation timestamps in the properties. + for pn, pv := range imc.Status.Properties { + if pv.ObservationTime.Before(&timeUpdated) { + return fmt.Errorf("InternalMemberCluster property %s has observation time %v, want before %v", pn, pv.ObservationTime, timeStarted) + } + } + if u := imc.Status.ResourceUsage; u.ObservationTime.Before(&timeUpdated) { + return fmt.Errorf("InternalMemberCluster resource usage has observation time %v, want before %v", u.ObservationTime, timeStarted) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).Should(Succeed(), "Failed to update the agent status") + }) + + It("can mark the cluster as left", func() { + // Add an offset of -1 second to avoid flakiness caused by approximation. + timeLeft = metav1.Time{Time: time.Now().Add(-time.Second)} + + imc := &clusterv1beta1.InternalMemberCluster{} + objKey := types.NamespacedName{ + Name: member1Name, + Namespace: member1ReservedNSName, + } + Expect(hubClient.Get(ctx, objKey, imc)).Should(Succeed()) + + imc.Spec.State = clusterv1beta1.ClusterStateLeave + Expect(hubClient.Update(ctx, imc)).Should(Succeed()) + }) + + It("should let the cluster go", func() { + Eventually(func() error { + imc := &clusterv1beta1.InternalMemberCluster{} + objKey := types.NamespacedName{ + Name: member1Name, + Namespace: member1ReservedNSName, + } + if err := hubClient.Get(ctx, objKey, imc); err != nil { + return fmt.Errorf("failed to get InternalMemberCluster: %w", err) + } + + wantIMCStatus := clusterv1beta1.InternalMemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.ConditionTypeClusterPropertyCollectionSucceeded), + Status: metav1.ConditionTrue, + Reason: ClusterPropertyCollectionSucceededReason, + Message: ClusterPropertyCollectionSucceededMessage, + ObservedGeneration: imc.Generation, + }, + { + Type: string(clusterv1beta1.ConditionTypeClusterPropertyProviderStarted), + Status: metav1.ConditionTrue, + Reason: ClusterPropertyProviderStartedReason, + Message: ClusterPropertyProviderStartedMessage, + ObservedGeneration: imc.Generation, + }, + { + Type: propertiesManuallyUpdatedConditionType, + Status: metav1.ConditionFalse, + Reason: propertiesManuallyUpdatedConditionReason2, + Message: propertiesManuallyUpdatedConditionMsg2, + ObservedGeneration: imc.Generation, + }, + }, + Properties: map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue{ + propertyprovider.NodeCountProperty: { + Value: "2", + }, + }, + ResourceUsage: clusterv1beta1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("15"), + corev1.ResourceMemory: resource.MustParse("30Gi"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("12"), + corev1.ResourceMemory: resource.MustParse("24Gi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("6Gi"), + }, + }, + AgentStatus: []clusterv1beta1.AgentStatus{ + { + Type: clusterv1beta1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.AgentJoined), + Status: metav1.ConditionFalse, + Reason: EventReasonInternalMemberClusterLeft, + ObservedGeneration: imc.Generation, + }, + { + Type: string(clusterv1beta1.AgentHealthy), + Status: metav1.ConditionTrue, + Reason: EventReasonInternalMemberClusterHealthy, + ObservedGeneration: imc.Generation, + }, + }, + }, + }, + } + + if diff := cmp.Diff( + imc.Status, wantIMCStatus, + ignoreAllTimeFields, + sortByConditionType, + ); diff != "" { + return fmt.Errorf("InternalMemberCluster status diff (-got, +want):\n%s", diff) + } + + // Verify the timestamp; for this spec only the last transition time of the + // AgentJoined condition needs to be checked. + + // Note that at this point the structure of the InternalMemberCluster status object + // is already known; this condition is guaranteed to be present. + cond := imc.GetConditionWithType(clusterv1beta1.MemberAgent, string(clusterv1beta1.AgentJoined)) + if cond.LastTransitionTime.Before(&timeLeft) { + return fmt.Errorf("InternalMemberCluster agent status condition %s has last transition time %v, want before %v", cond.Type, cond.LastTransitionTime, timeLeft) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(Succeed(), "Failed to let the cluster go") }) }) - Context("leave", func() { - BeforeEach(func() { - By("create internalMemberCluster CR") - internalMemberCluster := clusterv1beta1.InternalMemberCluster{ + // Note that specs in this context run in serial, however, they might run in parallel with + // the other contexts if parallelization is enabled. + // + // This is safe as the controller managers have been configured to watch only their own + // respective namespaces. + Context("Test setup with no property provider", Ordered, func() { + var ( + // Add an offset of -1 second to avoid flakiness caused by approximation. + timeStarted = metav1.Time{Time: time.Now().Add(-time.Second)} + + // The timestamps below are set in later steps. + timeUpdated metav1.Time + timeLeft metav1.Time + ) + + BeforeAll(func() { + // Create the InternalMemberCluster object. + imc := &clusterv1beta1.InternalMemberCluster{ ObjectMeta: metav1.ObjectMeta{ - Name: memberClusterName, - Namespace: memberClusterNamespace, + Name: member2Name, + Namespace: member2ReservedNSName, }, Spec: clusterv1beta1.InternalMemberClusterSpec{ - State: clusterv1beta1.ClusterStateLeave, - HeartbeatPeriodSeconds: int32(HBPeriod), + State: clusterv1beta1.ClusterStateJoin, + // Use a shorter heartbeat period to improve responsiveness. + HeartbeatPeriodSeconds: 2, }, } - Expect(k8sClient.Create(ctx, &internalMemberCluster)).Should(Succeed()) - - By("update internalMemberCluster CR with random usage status") - internalMemberCluster.Status = clusterv1beta1.InternalMemberClusterStatus{ - ResourceUsage: clusterv1beta1.ResourceUsage{ - Capacity: utils.NewResourceList(), - Allocatable: utils.NewResourceList(), - ObservationTime: metav1.Now(), + Expect(hubClient.Create(ctx, imc)).Should(Succeed()) + }) + + It("should join the cluster", func() { + Eventually(func() error { + imc := &clusterv1beta1.InternalMemberCluster{} + objKey := types.NamespacedName{ + Name: member2Name, + Namespace: member2ReservedNSName, + } + if err := hubClient.Get(ctx, objKey, imc); err != nil { + return fmt.Errorf("failed to get InternalMemberCluster: %w", err) + } + + wantIMCStatus := clusterv1beta1.InternalMemberClusterStatus{ + Properties: map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue{ + propertyprovider.NodeCountProperty: { + Value: "2", + }, + }, + ResourceUsage: clusterv1beta1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("14"), + corev1.ResourceMemory: resource.MustParse("48Gi"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("12"), + corev1.ResourceMemory: resource.MustParse("42Gi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10"), + corev1.ResourceMemory: resource.MustParse("36Gi"), + }, + }, + AgentStatus: []clusterv1beta1.AgentStatus{ + { + Type: clusterv1beta1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: EventReasonInternalMemberClusterJoined, + ObservedGeneration: imc.Generation, + }, + { + Type: string(clusterv1beta1.AgentHealthy), + Status: metav1.ConditionTrue, + Reason: EventReasonInternalMemberClusterHealthy, + ObservedGeneration: imc.Generation, + }, + }, + }, + }, + } + + if diff := cmp.Diff( + imc.Status, wantIMCStatus, + ignoreAllTimeFields, + sortByConditionType, + ); diff != "" { + return fmt.Errorf("InternalMemberCluster status diff (-got, +want):\n%s", diff) + } + + // Verify the timestamps. + + // Verify the last transition timestamps in the conditions. + // + // Note that at this point the structure of the InternalMemberCluster status + // object is already known. + conds := imc.Status.AgentStatus[0].Conditions + for idx := range conds { + cond := conds[idx] + if cond.LastTransitionTime.Before(&timeStarted) { + return fmt.Errorf("InternalMemberCluster agent status condition %s has last transition time %v, want before %v", cond.Type, cond.LastTransitionTime, timeStarted) + } + } + + // Verify the observation timestamps in the properties. + for pn, pv := range imc.Status.Properties { + if pv.ObservationTime.Before(&timeStarted) { + return fmt.Errorf("InternalMemberCluster property %s has observation time %v, want before %v", pn, pv.ObservationTime, timeStarted) + } + } + + if u := imc.Status.ResourceUsage; u.ObservationTime.Before(&timeStarted) { + return fmt.Errorf("InternalMemberCluster resource usage has observation time %v, want before %v", u.ObservationTime, timeStarted) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).Should(Succeed(), "Failed to update the agent status") + }) + + It("can add more nodes/pods", func() { + // Add an offset of -1 second to avoid flakiness caused by approximation. + timeUpdated = metav1.Time{Time: time.Now().Add(-time.Second)} + + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName3, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, }, } - Expect(k8sClient.Status().Update(ctx, &internalMemberCluster)).Should(Succeed()) + + Expect(member2Client.Create(ctx, node.DeepCopy())).Should(Succeed()) + Expect(member2Client.Status().Update(ctx, node.DeepCopy())).Should(Succeed()) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName5, + Namespace: "default", + }, + Spec: corev1.PodSpec{ + NodeName: nodeName1, + Containers: []corev1.Container{ + { + Name: containerName1, + Image: imageName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + } + + Expect(member2Client.Create(ctx, pod.DeepCopy())).Should(Succeed()) + Expect(member2Client.Status().Update(ctx, pod.DeepCopy())).Should(Succeed()) }) - It("should update internalMemberCluster to Left", func() { - result, err := r.Reconcile(ctx, ctrl.Request{ - NamespacedName: memberClusterNamespacedName, - }) - Expect(result).Should(Equal(ctrl.Result{})) - Expect(err).Should(Not(HaveOccurred())) + It("should update the properties", func() { + Eventually(func() error { + imc := &clusterv1beta1.InternalMemberCluster{} + objKey := types.NamespacedName{ + Name: member2Name, + Namespace: member2ReservedNSName, + } + if err := hubClient.Get(ctx, objKey, imc); err != nil { + return fmt.Errorf("failed to get InternalMemberCluster: %w", err) + } + + wantIMCStatus := clusterv1beta1.InternalMemberClusterStatus{ + Properties: map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue{ + propertyprovider.NodeCountProperty: { + Value: "3", + }, + }, + ResourceUsage: clusterv1beta1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("15"), + corev1.ResourceMemory: resource.MustParse("49Gi"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("13"), + corev1.ResourceMemory: resource.MustParse("43Gi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10"), + corev1.ResourceMemory: resource.MustParse("36Gi"), + }, + }, + AgentStatus: []clusterv1beta1.AgentStatus{ + { + Type: clusterv1beta1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: EventReasonInternalMemberClusterJoined, + ObservedGeneration: imc.Generation, + }, + { + Type: string(clusterv1beta1.AgentHealthy), + Status: metav1.ConditionTrue, + Reason: EventReasonInternalMemberClusterHealthy, + ObservedGeneration: imc.Generation, + }, + }, + }, + }, + } + + if diff := cmp.Diff( + imc.Status, wantIMCStatus, + ignoreAllTimeFields, + sortByConditionType, + ); diff != "" { + return fmt.Errorf("InternalMemberCluster status diff (-got, +want):\n%s", diff) + } + + // Verify the last transition timestamps in the conditions. + // + // Note that at this point the structure of the InternalMemberCluster status + // object is already known. + conds := imc.Status.AgentStatus[0].Conditions + for idx := range conds { + cond := conds[idx] + if cond.LastTransitionTime.Before(&timeStarted) { + return fmt.Errorf("InternalMemberCluster agent status condition %s has last transition time %v, want before %v", cond.Type, cond.LastTransitionTime, timeStarted) + } + } + + // Verify the observation timestamps in the properties. + for pn, pv := range imc.Status.Properties { + if pv.ObservationTime.Before(&timeUpdated) { + return fmt.Errorf("InternalMemberCluster property %s has observation time %v, want before %v", pn, pv.ObservationTime, timeStarted) + } + } + + if u := imc.Status.ResourceUsage; u.ObservationTime.Before(&timeUpdated) { + return fmt.Errorf("InternalMemberCluster resource usage has observation time %v, want before %v", u.ObservationTime, timeStarted) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).Should(Succeed(), "Failed to update the agent status") + }) + + It("can mark the cluster as left", func() { + // Add an offset of -1 second to avoid flakiness caused by approximation. + timeLeft = metav1.Time{Time: time.Now().Add(-time.Second)} + + imc := &clusterv1beta1.InternalMemberCluster{} + objKey := types.NamespacedName{ + Name: member2Name, + Namespace: member2ReservedNSName, + } + Expect(hubClient.Get(ctx, objKey, imc)).Should(Succeed()) + + imc.Spec.State = clusterv1beta1.ClusterStateLeave + Expect(hubClient.Update(ctx, imc)).Should(Succeed()) + }) + + It("should let the cluster go", func() { + Eventually(func() error { + imc := &clusterv1beta1.InternalMemberCluster{} + objKey := types.NamespacedName{ + Name: member2Name, + Namespace: member2ReservedNSName, + } + if err := hubClient.Get(ctx, objKey, imc); err != nil { + return fmt.Errorf("failed to get InternalMemberCluster: %w", err) + } + + wantIMCStatus := clusterv1beta1.InternalMemberClusterStatus{ + Properties: map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue{ + propertyprovider.NodeCountProperty: { + Value: "3", + }, + }, + ResourceUsage: clusterv1beta1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("15"), + corev1.ResourceMemory: resource.MustParse("49Gi"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("13"), + corev1.ResourceMemory: resource.MustParse("43Gi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10"), + corev1.ResourceMemory: resource.MustParse("36Gi"), + }, + }, + AgentStatus: []clusterv1beta1.AgentStatus{ + { + Type: clusterv1beta1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.AgentJoined), + Status: metav1.ConditionFalse, + Reason: EventReasonInternalMemberClusterLeft, + ObservedGeneration: imc.Generation, + }, + { + Type: string(clusterv1beta1.AgentHealthy), + Status: metav1.ConditionTrue, + Reason: EventReasonInternalMemberClusterHealthy, + ObservedGeneration: imc.Generation, + }, + }, + }, + }, + } + + if diff := cmp.Diff( + imc.Status, wantIMCStatus, + ignoreAllTimeFields, + sortByConditionType, + ); diff != "" { + return fmt.Errorf("InternalMemberCluster status diff (-got, +want):\n%s", diff) + } + + // Verify the timestamp; for this spec only the last transition time of the + // AgentJoined condition needs to be checked. - var internalMemberCluster clusterv1beta1.InternalMemberCluster - Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &internalMemberCluster)).Should(Succeed()) + // Note that at this point the structure of the InternalMemberCluster status object + // is already known; this condition is guaranteed to be present. + cond := imc.GetConditionWithType(clusterv1beta1.MemberAgent, string(clusterv1beta1.AgentJoined)) + if cond.LastTransitionTime.Before(&timeLeft) { + return fmt.Errorf("InternalMemberCluster agent status condition %s has last transition time %v, want before %v", cond.Type, cond.LastTransitionTime, timeLeft) + } - By("checking updated join condition") - updatedJoinedCond := internalMemberCluster.GetConditionWithType(clusterv1beta1.MemberAgent, string(clusterv1beta1.AgentJoined)) - Expect(updatedJoinedCond.Status).Should(Equal(metav1.ConditionFalse)) - Expect(updatedJoinedCond.Reason).Should(Equal(EventReasonInternalMemberClusterLeft)) + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(Succeed(), "Failed to let the cluster go") }) }) }) diff --git a/pkg/controllers/internalmembercluster/v1beta1/member_controller_test.go b/pkg/controllers/internalmembercluster/v1beta1/member_controller_test.go index 7f6507042..f49ed6b1f 100644 --- a/pkg/controllers/internalmembercluster/v1beta1/member_controller_test.go +++ b/pkg/controllers/internalmembercluster/v1beta1/member_controller_test.go @@ -49,6 +49,8 @@ const ( podName1 = "pod-1" podName2 = "pod-2" podName3 = "pod-3" + podName4 = "pod-4" + podName5 = "pod-5" containerName1 = "container-1" containerName2 = "container-2" @@ -57,6 +59,10 @@ const ( var ( ignoreLTTConditionField = cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime") ignoreAllTimeFields = cmpopts.IgnoreTypes(time.Time{}, metav1.Time{}) + + sortByConditionType = cmpopts.SortSlices(func(a, b metav1.Condition) bool { + return a.Type < b.Type + }) ) func TestMarkInternalMemberClusterJoined(t *testing.T) { diff --git a/pkg/controllers/internalmembercluster/v1beta1/member_suite_test.go b/pkg/controllers/internalmembercluster/v1beta1/member_suite_test.go index 9d5a97d24..9d46f3e30 100644 --- a/pkg/controllers/internalmembercluster/v1beta1/member_suite_test.go +++ b/pkg/controllers/internalmembercluster/v1beta1/member_suite_test.go @@ -5,33 +5,67 @@ Licensed under the MIT license. package v1beta1 import ( - "flag" + "context" "os" "path/filepath" + "sync" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/klog/v2" "k8s.io/klog/v2/textlogger" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" - metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" clusterv1beta1 "go.goms.io/fleet/apis/cluster/v1beta1" - placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/controllers/work" + "go.goms.io/fleet/pkg/propertyprovider" +) + +const ( + member1Name = "cluster-1" + member2Name = "cluster-2" + member1ReservedNSName = "fleet-member-cluster-1" + member2ReservedNSName = "fleet-member-cluster-2" + + imageName = "nginx" ) var ( - cfg *rest.Config - mgr manager.Manager - k8sClient client.Client - testEnv *envtest.Environment + // This test suite features three clusters: + // + // * Hub cluster + // * Member cluster 1: the member cluster that has a property provider set up + // * Member cluster 2: the member cluster that does not have a property provider set up + hubCfg *rest.Config + member1Cfg *rest.Config + member2Cfg *rest.Config + hubEnv *envtest.Environment + member1Env *envtest.Environment + member2Env *envtest.Environment + member1Mgr manager.Manager + member2Mgr manager.Manager + hubClient client.Client + member1Client client.Client + member2Client client.Client + workApplier1 *work.ApplyWorkReconciler + workApplier2 *work.ApplyWorkReconciler + propertyProvider1 *manuallyUpdatedProvider + + ctx context.Context + cancel context.CancelFunc ) func TestMain(m *testing.M) { @@ -39,65 +73,346 @@ func TestMain(m *testing.M) { // since the unit tests in this package is not using the Ginkgo framework, and some of the // tests might emit error logs, here the configuration happens at the test entrypoint to // avoid R/W data races on the logger. - klog.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + logger := zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)) + klog.SetLogger(logger) + ctrllog.SetLogger(logger) os.Exit(m.Run()) } -func TestInternalMemberCluster(t *testing.T) { +func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) + RunSpecs(t, "Internal Member Cluster Controller Integration Test Suite") } +type manuallyUpdatedProvider struct { + mu sync.Mutex + lastUpdatedRes *propertyprovider.PropertyCollectionResponse +} + +var _ propertyprovider.PropertyProvider = &manuallyUpdatedProvider{} + +func (m *manuallyUpdatedProvider) Start(_ context.Context, _ *rest.Config) error { + return nil +} + +func (m *manuallyUpdatedProvider) Collect(_ context.Context) propertyprovider.PropertyCollectionResponse { + m.mu.Lock() + defer m.mu.Unlock() + + if m.lastUpdatedRes != nil { + return *m.lastUpdatedRes + } + return propertyprovider.PropertyCollectionResponse{} +} + +func (m *manuallyUpdatedProvider) Update(res *propertyprovider.PropertyCollectionResponse) { + m.mu.Lock() + defer m.mu.Unlock() + + m.lastUpdatedRes = res +} + +func setupResources() { + // Create the namespaces. + member1ReservedNS := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: member1ReservedNSName, + }, + } + Expect(hubClient.Create(ctx, &member1ReservedNS)).To(Succeed()) + + member2ReservedNS := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: member2ReservedNSName, + }, + } + Expect(hubClient.Create(ctx, &member2ReservedNS)).To(Succeed()) + + memberClients := []client.Client{member1Client, member2Client} + + // Create the nodes in the clusters. + nodes := []*corev1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName1, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("16Gi"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("12Gi"), + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName2, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10"), + corev1.ResourceMemory: resource.MustParse("32Gi"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("9"), + corev1.ResourceMemory: resource.MustParse("30Gi"), + }, + }, + }, + } + + for nidx := range nodes { + node := nodes[nidx] + + for cidx := range memberClients { + nodeCopy := node.DeepCopy() + client := memberClients[cidx] + Expect(client.Create(ctx, nodeCopy)).To(Succeed()) + + nodeCopy.Status = *node.Status.DeepCopy() + Expect(client.Status().Update(ctx, nodeCopy)).To(Succeed()) + } + } + + // Create the pods in the clusters. + pods := []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: podName1, + Namespace: "default", + }, + Spec: corev1.PodSpec{ + NodeName: nodeName1, + Containers: []corev1.Container{ + { + Name: containerName1, + Image: imageName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, + }, + }, + { + Name: containerName2, + Image: imageName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: podName2, + Namespace: "default", + }, + Spec: corev1.PodSpec{ + NodeName: nodeName2, + Containers: []corev1.Container{ + { + Name: containerName1, + Image: imageName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: podName3, + Namespace: "default", + }, + Spec: corev1.PodSpec{ + NodeName: nodeName2, + Containers: []corev1.Container{ + { + Name: containerName1, + Image: imageName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodFailed, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: podName4, + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: containerName1, + Image: imageName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + } + + for pidx := range pods { + pod := pods[pidx] + + for cidx := range memberClients { + podCopy := pod.DeepCopy() + client := memberClients[cidx] + Expect(client.Create(ctx, podCopy)).To(Succeed()) + + podCopy.Status = *pod.Status.DeepCopy() + Expect(client.Status().Update(ctx, podCopy)).To(Succeed()) + } + } +} + var _ = BeforeSuite(func() { - done := make(chan interface{}) + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environments") + hubEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("../../../../", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + member1Env = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("../../../../", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + member2Env = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("../../../../", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + var err error + hubCfg, err = hubEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(hubCfg).NotTo(BeNil()) + + member1Cfg, err = member1Env.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(member1Cfg).NotTo(BeNil()) + + member2Cfg, err = member2Env.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(member2Cfg).NotTo(BeNil()) + + err = clusterv1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + By("building the K8s clients") + hubClient, err = client.New(hubCfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(hubClient).NotTo(BeNil()) + + member1Client, err = client.New(member1Cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(member1Client).NotTo(BeNil()) + + member2Client, err = client.New(member2Cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(member2Client).NotTo(BeNil()) + + By("setting up the resources") + setupResources() + + By("starting the controller managers and setting up the controllers") + member1Mgr, err = ctrl.NewManager(hubCfg, ctrl.Options{ + Scheme: scheme.Scheme, + Metrics: server.Options{ + BindAddress: "0", + }, + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + member1ReservedNSName: {}, + }, + }, + Logger: textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(4))), + }) + Expect(err).NotTo(HaveOccurred()) + + // This controller is created for testing purposes only; no reconciliation loop is actually + // run. + workApplier1 = work.NewApplyWorkReconciler(hubClient, nil, nil, nil, nil, 0, "") + + propertyProvider1 = &manuallyUpdatedProvider{} + member1Reconciler, err := NewReconciler(ctx, hubClient, member1Cfg, member1Client, workApplier1, propertyProvider1) + Expect(err).NotTo(HaveOccurred()) + Expect(member1Reconciler.SetupWithManager(member1Mgr)).To(Succeed()) + + member2Mgr, err = ctrl.NewManager(hubCfg, ctrl.Options{ + Scheme: scheme.Scheme, + Metrics: server.Options{ + BindAddress: "0", + }, + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + member2ReservedNSName: {}, + }, + }, + Logger: textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(4))), + }) + Expect(err).NotTo(HaveOccurred()) + + // This controller is created for testing purposes only; no reconciliation loop is actually + // run. + workApplier2 = work.NewApplyWorkReconciler(hubClient, nil, nil, nil, nil, 0, "") + + member2Reconciler, err := NewReconciler(ctx, hubClient, member2Cfg, member2Client, workApplier2, nil) + Expect(err).NotTo(HaveOccurred()) + Expect(member2Reconciler.SetupWithManager(member2Mgr)).To(Succeed()) + go func() { - // GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` Since Gomega - // assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that calls out - // to Gomega. - // Source: https://pkg.go.dev/github.com/onsi/ginkgo#GinkgoRecover defer GinkgoRecover() + Expect(member1Mgr.Start(ctx)).To(Succeed()) + }() - By("bootstrapping test environment") - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("../../../../", "config", "crd", "bases")}, - ErrorIfCRDPathMissing: true, - } - var err error - cfg, err = testEnv.Start() - Expect(err).NotTo(HaveOccurred()) - Expect(cfg).NotTo(BeNil()) - - // register types to scheme - err = placementv1beta1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - err = clusterv1beta1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - //+kubebuilder:scaffold:scheme - By("construct the k8s client") - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - Expect(err).NotTo(HaveOccurred()) - Expect(k8sClient).NotTo(BeNil()) - - By("Starting the controller manager") - klog.InitFlags(flag.CommandLine) - mgr, err = ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - Metrics: metricsserver.Options{ - BindAddress: "0", - }, - Logger: textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(4))), - }) - Expect(err).ToNot(HaveOccurred()) - - close(done) + go func() { + defer GinkgoRecover() + Expect(member2Mgr.Start(ctx)).To(Succeed()) }() - Eventually(done, 60).Should(BeClosed()) }) var _ = AfterSuite(func() { - By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).NotTo(HaveOccurred()) + defer klog.Flush() + + cancel() + By("tearing down the test environments") + Expect(hubEnv.Stop()).To(Succeed()) + Expect(member1Env.Stop()).To(Succeed()) + Expect(member2Env.Stop()).To(Succeed()) })