diff --git a/Makefile b/Makefile index 2eb00f4911..4c36d4c5c5 100644 --- a/Makefile +++ b/Makefile @@ -398,6 +398,7 @@ generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the mai "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/topology" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/cluster-template-topology-supervisor.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/conformance" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/cluster-template-conformance-supervisor.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/install-on-bootstrap" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/cluster-template-install-on-bootstrap-supervisor.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/ownerrefs-finalizers" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/cluster-template-ownerrefs-finalizers-supervisor.yaml" .PHONY: generate-e2e-templates-v1.10 generate-e2e-templates-v1.10: $(KUSTOMIZE) diff --git a/packaging/flavorgen/cloudprovider/csi.go b/packaging/flavorgen/cloudprovider/csi.go index 6e07d6ce27..32229951d4 100644 --- a/packaging/flavorgen/cloudprovider/csi.go +++ b/packaging/flavorgen/cloudprovider/csi.go @@ -43,7 +43,7 @@ func CSICloudConfigSecret(data string) *corev1.Secret { APIVersion: corev1.SchemeGroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ - Name: "vsphere-config-secret", + Name: "vsphere-config-secret", // NOTE: this name is used in E2E tests. Namespace: CSINamespace, }, Type: corev1.SecretTypeOpaque, diff --git a/packaging/flavorgen/flavors/crs/cpi.go b/packaging/flavorgen/flavors/crs/cpi.go index 4deb25d00f..b5fc3b5e4b 100644 --- a/packaging/flavorgen/flavors/crs/cpi.go +++ b/packaging/flavorgen/flavors/crs/cpi.go @@ -99,7 +99,7 @@ func cpiCredentials(credentials map[string]string) *corev1.Secret { }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceSystem, - Name: "cloud-provider-vsphere-credentials", + Name: "cloud-provider-vsphere-credentials", // NOTE: this name is used in E2E tests. }, Type: corev1.SecretTypeOpaque, StringData: credentials, diff --git a/packaging/flavorgen/flavors/crs/util.go b/packaging/flavorgen/flavors/crs/util.go index 10737f29d9..d57473b1f6 100644 --- a/packaging/flavorgen/flavors/crs/util.go +++ b/packaging/flavorgen/flavors/crs/util.go @@ -77,7 +77,7 @@ func appendConfigMapToCrsResource(crs *addonsv1.ClusterResourceSet, generatedCon func newCPIConfig() ([]byte, error) { config := map[string]interface{}{ "global": map[string]interface{}{ - "secretName": "cloud-provider-vsphere-credentials", + "secretName": "cloud-provider-vsphere-credentials", // NOTE: this name is used in E2E tests. "secretNamespace": metav1.NamespaceSystem, "thumbprint": env.VSphereThumbprint, "port": 443, diff --git a/packaging/flavorgen/flavors/flavors.go b/packaging/flavorgen/flavors/flavors.go index f9e54f2a54..684b1edf91 100644 --- a/packaging/flavorgen/flavors/flavors.go +++ b/packaging/flavorgen/flavors/flavors.go @@ -108,7 +108,6 @@ func ClusterTopologyTemplateSupervisor() ([]runtime.Object, error) { if err != nil { return nil, err } - identitySecret := newIdentitySecret() clusterResourceSet := newClusterResourceSet(cluster) crsResourcesCSI, err := crs.CreateCrsResourceObjectsCSI(&clusterResourceSet) if err != nil { @@ -117,7 +116,6 @@ func ClusterTopologyTemplateSupervisor() ([]runtime.Object, error) { crsResourcesCPI := crs.CreateCrsResourceObjectsCPI(&clusterResourceSet) MultiNodeTemplate := []runtime.Object{ &cluster, - &identitySecret, &clusterResourceSet, } MultiNodeTemplate = append(MultiNodeTemplate, crsResourcesCSI...) @@ -179,7 +177,6 @@ func MultiNodeTemplateSupervisor() ([]runtime.Object, error) { return nil, err } crsResourcesCPI := crs.CreateCrsResourceObjectsCPI(&clusterResourceSet) - identitySecret := newIdentitySecret() MultiNodeTemplate := []runtime.Object{ &cluster, @@ -190,7 +187,6 @@ func MultiNodeTemplateSupervisor() ([]runtime.Object, error) { &kubeadmJoinTemplate, &machineDeployment, &clusterResourceSet, - &identitySecret, } MultiNodeTemplate = append(MultiNodeTemplate, crsResourcesCSI...) diff --git a/templates/cluster-template-supervisor.yaml b/templates/cluster-template-supervisor.yaml index 62f9317888..7c01e87796 100644 --- a/templates/cluster-template-supervisor.yaml +++ b/templates/cluster-template-supervisor.yaml @@ -311,15 +311,6 @@ spec: --- apiVersion: v1 kind: Secret -metadata: - name: '${CLUSTER_NAME}' - namespace: '${NAMESPACE}' -stringData: - password: "${VSPHERE_PASSWORD}" - username: "${VSPHERE_USERNAME}" ---- -apiVersion: v1 -kind: Secret metadata: name: vsphere-config-secret namespace: '${NAMESPACE}' diff --git a/templates/cluster-template-topology-supervisor.yaml b/templates/cluster-template-topology-supervisor.yaml index a645945e74..af5723da31 100644 --- a/templates/cluster-template-topology-supervisor.yaml +++ b/templates/cluster-template-topology-supervisor.yaml @@ -96,15 +96,6 @@ spec: name: md-0 replicas: ${WORKER_MACHINE_COUNT} --- -apiVersion: v1 -kind: Secret -metadata: - name: '${CLUSTER_NAME}' - namespace: '${NAMESPACE}' -stringData: - password: "${VSPHERE_PASSWORD}" - username: "${VSPHERE_USERNAME}" ---- apiVersion: addons.cluster.x-k8s.io/v1beta1 kind: ClusterResourceSet metadata: diff --git a/test/e2e/config/vsphere.yaml b/test/e2e/config/vsphere.yaml index 2aeb087684..e940e05bbf 100644 --- a/test/e2e/config/vsphere.yaml +++ b/test/e2e/config/vsphere.yaml @@ -178,6 +178,7 @@ providers: - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass-quick-start-supervisor.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/cluster-template-install-on-bootstrap-supervisor.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/cluster-template-conformance-supervisor.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/cluster-template-ownerrefs-finalizers-supervisor.yaml" - sourcePath: "../data/shared/capv/main/metadata.yaml" - name: "{go://sigs.k8s.io/cluster-api-provider-vsphere@v1.10}" # supported release in the v1beta1 series # Use manifest from source files diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/main/ownerrefs-finalizers/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/main/ownerrefs-finalizers/kustomization.yaml new file mode 100644 index 0000000000..0c80a7a914 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/main/ownerrefs-finalizers/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../base +patchesStrategicMerge: + - ../commons/cluster-resource-set-label.yaml + - ../commons/cluster-network-CIDR.yaml + - ../commons/cluster-resource-set-csi-insecure.yaml diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 1e8f623682..a11b5b30b0 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -24,6 +24,7 @@ import ( "path/filepath" "strings" "testing" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -210,8 +211,9 @@ var _ = SynchronizedBeforeSuite(func() []byte { if testTarget == VCSimTestTarget { Byf("Creating a vcsim server") - err := vspherevcsim.Create(ctx, bootstrapClusterProxy.GetClient()) - Expect(err).ToNot(HaveOccurred(), "Failed to create VCenterSimulator") + Eventually(func() error { + return vspherevcsim.Create(ctx, bootstrapClusterProxy.GetClient()) + }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred(), "Failed to create VCenterSimulator") } By("Getting AddressClaim labels") diff --git a/test/e2e/ownerrefs_finalizers_test.go b/test/e2e/ownerrefs_finalizers_test.go index 3faf85b445..6ff541c327 100644 --- a/test/e2e/ownerrefs_finalizers_test.go +++ b/test/e2e/ownerrefs_finalizers_test.go @@ -28,6 +28,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" @@ -44,26 +45,30 @@ import ( ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" ) -var _ = Describe("Ensure OwnerReferences and Finalizers are resilient with FailureDomains and ClusterIdentity", func() { +var _ = Describe("Ensure OwnerReferences and Finalizers are resilient with FailureDomains and ClusterIdentity [vcsim] [supervisor]", func() { const specName = "owner-reference" Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { - // NOTE: When testing with vcsim VSPHERE_USERNAME and VSPHERE_PASSWORD are provided as a test specific variables, - // when running on CI same variables are provided as env variables. - input := testSpecificSettingsGetter() - username, ok := input.Variables["VSPHERE_USERNAME"] - if !ok { - username = os.Getenv("VSPHERE_USERNAME") - } - password, ok := input.Variables["VSPHERE_PASSWORD"] - if !ok { - password = os.Getenv("VSPHERE_PASSWORD") - } + if testMode == GovmomiTestMode { + // NOTE: When testing with vcsim VSPHERE_USERNAME and VSPHERE_PASSWORD are provided as a test specific variables, + // when running on CI same variables are provided as env variables. + input := testSpecificSettingsGetter() + username, ok := input.Variables["VSPHERE_USERNAME"] + if !ok { + username = os.Getenv("VSPHERE_USERNAME") + } + password, ok := input.Variables["VSPHERE_PASSWORD"] + if !ok { + password = os.Getenv("VSPHERE_PASSWORD") + } - // Before running the test create the secret used by the VSphereClusterIdentity to connect to the vCenter. - createVsphereIdentitySecret(ctx, bootstrapClusterProxy, username, password) + // Before running the test create the secret used by the VSphereClusterIdentity to connect to the vCenter. + createVsphereIdentitySecret(ctx, bootstrapClusterProxy, username, password) + } return capi_e2e.QuickStartSpecInput{ E2EConfig: e2eConfig, @@ -74,17 +79,21 @@ var _ = Describe("Ensure OwnerReferences and Finalizers are resilient with Failu Flavor: ptr.To(testSpecificSettingsGetter().FlavorForMode("ownerrefs-finalizers")), PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { - // check the cluster identity secret has expected ownerReferences and finalizers, and they are resilient - // Note: identity secret is not part of the object graph, so it requires an ad-hoc test. - checkClusterIdentitySecretOwnerRefAndFinalizer(ctx, proxy.GetClient()) - - // Set up a periodic patch to ensure the DeploymentZone anc ClusterResourceSets are reconciled. - // Note: this is required because DeploymentZone are not watching for clusters, and thus the DeploymentZone controller - // won't be triggered when we un-pause clusters after modifying objects ownerReferences & Finalizers to test resilience. - // WRT to ClusterResourceSets, we are forcing reconcile to avoid the issue described in https://github.com/kubernetes-sigs/cluster-api/pull/10656; - // we should reconsider if possible to drop force reconcile after this PR is merged. - forceCtx, forceCancelFunc := context.WithCancel(ctx) - forcePeriodicReconcile(forceCtx, proxy.GetClient(), namespace) + var forceCtx context.Context + var forceCancelFunc context.CancelFunc + if testMode == GovmomiTestMode { + // check the cluster identity secret has expected ownerReferences and finalizers, and they are resilient + // Note: identity secret is not part of the object graph, so it requires an ad-hoc test. + checkClusterIdentitySecretOwnerRefAndFinalizer(ctx, proxy.GetClient()) + + // Set up a periodic patch to ensure the DeploymentZone anc ClusterResourceSets are reconciled. + // Note: this is required because DeploymentZone are not watching for clusters, and thus the DeploymentZone controller + // won't be triggered when we un-pause clusters after modifying objects ownerReferences & Finalizers to test resilience. + // WRT to ClusterResourceSets, we are forcing reconcile to avoid the issue described in https://github.com/kubernetes-sigs/cluster-api/pull/10656; + // we should reconsider if possible to drop force reconcile after this PR is merged. + forceCtx, forceCancelFunc = context.WithCancel(ctx) + forcePeriodicReconcile(forceCtx, proxy.GetClient(), namespace) + } // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. By("Checking that owner references are resilient") @@ -94,7 +103,7 @@ var _ = Describe("Ensure OwnerReferences and Finalizers are resilient with Failu framework.KubeadmControlPlaneOwnerReferenceAssertions, framework.ExpOwnerReferenceAssertions, VSphereKubernetesReferenceAssertions, - VSphereReferenceAssertions, + VSphereReferenceAssertions(), ) // This check ensures that owner references are always updated to the most recent apiVersion. By("Checking that owner references are updated to the correct API version") @@ -104,39 +113,56 @@ var _ = Describe("Ensure OwnerReferences and Finalizers are resilient with Failu framework.KubeadmControlPlaneOwnerReferenceAssertions, framework.ExpOwnerReferenceAssertions, VSphereKubernetesReferenceAssertions, - VSphereReferenceAssertions, + VSphereReferenceAssertions(), ) // This check ensures that finalizers are resilient - i.e. correctly re-reconciled, when removed. By("Checking that finalizers are resilient") - framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName), + framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, FilterObjectsWithKindAndName(clusterName), framework.CoreFinalizersAssertionWithLegacyClusters, framework.KubeadmControlPlaneFinalizersAssertion, framework.ExpFinalizersAssertion, - vSphereFinalizers, + vSphereFinalizers(), ) - // Stop periodic patch. - forceCancelFunc() + // Stop periodic patch if any. + if forceCancelFunc != nil { + forceCancelFunc() + } // This check ensures that the resourceVersions are stable, i.e. it verifies there are no // continuous reconciles when everything should be stable. By("Checking that resourceVersions are stable") - framework.ValidateResourceVersionStable(ctx, proxy, namespace, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName)) + framework.ValidateResourceVersionStable(ctx, proxy, namespace, FilterObjectsWithKindAndName(clusterName)) }, } }) // Delete objects created by the test which are not in the test namespace. AfterEach(func() { - cleanupVSphereObjects(ctx, bootstrapClusterProxy) + if testMode == GovmomiTestMode { + cleanupVSphereObjects(ctx, bootstrapClusterProxy) + } }) }) }) +const ( + // well know secret names from flavorgen / templates. + csiConfigSecretName = "vsphere-config-secret" //nolint: gosec + cpiCredentialSecretName = "cloud-provider-vsphere-credentials" +) + var ( VSphereKubernetesReferenceAssertions = map[string]func(types.NamespacedName, []metav1.OwnerReference) error{ // Need custom Kubernetes assertions for secrets. Secrets in the CAPV tests can also be owned by the vSphereCluster. - "Secret": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + "Secret": func(s types.NamespacedName, owners []metav1.OwnerReference) error { + // When using vcsim CRS cannot be applied (not supported by the fake API server), so ignoring all the Secrets that should be deployed by CRS. + if testTarget == VCSimTestTarget { + if s.Name == csiConfigSecretName || s.Name == cpiCredentialSecretName { + return nil + } + } + return framework.HasOneOfExactOwners(owners, // Secrets for cluster certificates must be owned by the KubeadmControlPlane. []metav1.OwnerReference{kubeadmControlPlaneController}, @@ -149,6 +175,11 @@ var ( ) }, "ConfigMap": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + // When using vcsim CRS cannot be applied (not supported by the fake API server), so ignoring all the ConfigMaps that should be deployed by CRS. + if testTarget == VCSimTestTarget { + return nil + } + // The only configMaps considered here are those owned by a ClusterResourceSet. return framework.HasExactOwners(owners, clusterResourceSetOwner) }, @@ -156,36 +187,68 @@ var ( ) var ( - VSphereReferenceAssertions = map[string]func(types.NamespacedName, []metav1.OwnerReference) error{ - "VSphereCluster": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { - return framework.HasExactOwners(owners, clusterController) - }, - "VSphereClusterTemplate": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { - return framework.HasExactOwners(owners, clusterClassOwner) - }, - "VSphereMachine": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { - return framework.HasExactOwners(owners, machineController) - }, - "VSphereMachineTemplate": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { - // The vSphereMachineTemplate can be owned by the Cluster or the ClusterClass. - return framework.HasOneOfExactOwners(owners, []metav1.OwnerReference{clusterOwner}, []metav1.OwnerReference{clusterClassOwner}) - }, - "VSphereVM": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { - return framework.HasExactOwners(owners, vSphereMachineOwner) - }, - // VSphereClusterIdentity does not have any owners. - "VSphereClusterIdentity": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { - // The vSphereClusterIdentity does not have any owners. - return framework.HasExactOwners(owners) - }, - "VSphereDeploymentZone": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { - // The vSphereDeploymentZone does not have any owners. - return framework.HasExactOwners(owners) - }, - "VSphereFailureDomain": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { - // The vSphereFailureDomain can be owned by one or more vSphereDeploymentZones. - return framework.HasOneOfExactOwners(owners, []metav1.OwnerReference{vSphereDeploymentZoneOwner}, []metav1.OwnerReference{vSphereDeploymentZoneOwner, vSphereDeploymentZoneOwner}) - }, + VSphereReferenceAssertions = func() map[string]func(types.NamespacedName, []metav1.OwnerReference) error { + if testMode == SupervisorTestMode { + return map[string]func(types.NamespacedName, []metav1.OwnerReference) error{ + "VSphereCluster": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + return framework.HasExactOwners(owners, clusterController) + }, + "VSphereClusterTemplate": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + return framework.HasExactOwners(owners, clusterClassOwner) + }, + "VSphereMachine": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + return framework.HasExactOwners(owners, machineController) + }, + "VSphereMachineTemplate": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + // The vSphereMachineTemplate can be owned by the Cluster or the ClusterClass. + return framework.HasOneOfExactOwners(owners, []metav1.OwnerReference{clusterOwner}, []metav1.OwnerReference{clusterClassOwner}) + }, + "VirtualMachine": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + return framework.HasExactOwners(owners, vmwareVSphereMachineController) + }, + + // Following objects are for vm-operator (not managed by CAPV), so checking ownerReferences is not relevant. + "VirtualMachineImage": func(_ types.NamespacedName, _ []metav1.OwnerReference) error { return nil }, + "NetworkInterface": func(_ types.NamespacedName, _ []metav1.OwnerReference) error { return nil }, + "ContentSourceBinding": func(_ types.NamespacedName, _ []metav1.OwnerReference) error { return nil }, + "VirtualMachineSetResourcePolicy": func(_ types.NamespacedName, _ []metav1.OwnerReference) error { return nil }, + "VirtualMachineClassBinding": func(_ types.NamespacedName, _ []metav1.OwnerReference) error { return nil }, + "VirtualMachineClass": func(_ types.NamespacedName, _ []metav1.OwnerReference) error { return nil }, + "VMOperatorDependencies": func(_ types.NamespacedName, _ []metav1.OwnerReference) error { return nil }, + } + } + + return map[string]func(types.NamespacedName, []metav1.OwnerReference) error{ + "VSphereCluster": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + return framework.HasExactOwners(owners, clusterController) + }, + "VSphereClusterTemplate": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + return framework.HasExactOwners(owners, clusterClassOwner) + }, + "VSphereMachine": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + return framework.HasExactOwners(owners, machineController) + }, + "VSphereMachineTemplate": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + // The vSphereMachineTemplate can be owned by the Cluster or the ClusterClass. + return framework.HasOneOfExactOwners(owners, []metav1.OwnerReference{clusterOwner}, []metav1.OwnerReference{clusterClassOwner}) + }, + "VSphereVM": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + return framework.HasExactOwners(owners, vSphereMachineOwner) + }, + // VSphereClusterIdentity does not have any owners. + "VSphereClusterIdentity": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + // The vSphereClusterIdentity does not have any owners. + return framework.HasExactOwners(owners) + }, + "VSphereDeploymentZone": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + // The vSphereDeploymentZone does not have any owners. + return framework.HasExactOwners(owners) + }, + "VSphereFailureDomain": func(_ types.NamespacedName, owners []metav1.OwnerReference) error { + // The vSphereFailureDomain can be owned by one or more vSphereDeploymentZones. + return framework.HasOneOfExactOwners(owners, []metav1.OwnerReference{vSphereDeploymentZoneOwner}, []metav1.OwnerReference{vSphereDeploymentZoneOwner, vSphereDeploymentZoneOwner}) + }, + } } ) @@ -196,6 +259,8 @@ var ( vSphereDeploymentZoneOwner = metav1.OwnerReference{Kind: "VSphereDeploymentZone", APIVersion: infrav1.GroupVersion.String()} vSphereClusterIdentityOwner = metav1.OwnerReference{Kind: "VSphereClusterIdentity", APIVersion: infrav1.GroupVersion.String()} + vmwareVSphereMachineController = metav1.OwnerReference{Kind: "VSphereMachine", APIVersion: vmwarev1.GroupVersion.String(), Controller: ptr.To(true)} + // CAPI owners. clusterClassOwner = metav1.OwnerReference{Kind: "ClusterClass", APIVersion: clusterv1.GroupVersion.String()} clusterOwner = metav1.OwnerReference{Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String()} @@ -218,13 +283,35 @@ var ( ) // vSphereFinalizers maps VSphere infrastructure resource types to their expected finalizers. -var vSphereFinalizers = map[string]func(types.NamespacedName) []string{ - "VSphereVM": func(_ types.NamespacedName) []string { return []string{infrav1.VMFinalizer} }, - "VSphereClusterIdentity": func(_ types.NamespacedName) []string { return []string{infrav1.VSphereClusterIdentityFinalizer} }, - "VSphereDeploymentZone": func(_ types.NamespacedName) []string { return []string{infrav1.DeploymentZoneFinalizer} }, - "VSphereMachine": func(_ types.NamespacedName) []string { return []string{infrav1.MachineFinalizer} }, - "IPAddressClaim": func(_ types.NamespacedName) []string { return []string{infrav1.IPAddressClaimFinalizer} }, - "VSphereCluster": func(_ types.NamespacedName) []string { return []string{infrav1.ClusterFinalizer} }, +var vSphereFinalizers = func() map[string]func(types.NamespacedName) []string { + if testMode == SupervisorTestMode { + return map[string]func(types.NamespacedName) []string{ + "VirtualMachine": func(_ types.NamespacedName) []string { + // When using vcsim additional finalizers are added. + if testTarget == VCSimTestTarget { + return []string{"virtualmachine.vmoperator.vmware.com", vcsimv1.VMFinalizer} + } + return []string{"virtualmachine.vmoperator.vmware.com"} + }, + "VSphereMachine": func(_ types.NamespacedName) []string { return []string{infrav1.MachineFinalizer} }, + "VSphereCluster": func(_ types.NamespacedName) []string { return []string{vmwarev1.ClusterFinalizer} }, + } + } + + return map[string]func(types.NamespacedName) []string{ + "VSphereVM": func(_ types.NamespacedName) []string { + // When using vcsim additional finalizers are added. + if testTarget == VCSimTestTarget { + return []string{infrav1.VMFinalizer, vcsimv1.VMFinalizer} + } + return []string{infrav1.VMFinalizer} + }, + "VSphereClusterIdentity": func(_ types.NamespacedName) []string { return []string{infrav1.VSphereClusterIdentityFinalizer} }, + "VSphereDeploymentZone": func(_ types.NamespacedName) []string { return []string{infrav1.DeploymentZoneFinalizer} }, + "VSphereMachine": func(_ types.NamespacedName) []string { return []string{infrav1.MachineFinalizer} }, + "IPAddressClaim": func(_ types.NamespacedName) []string { return []string{infrav1.IPAddressClaimFinalizer} }, + "VSphereCluster": func(_ types.NamespacedName) []string { return []string{infrav1.ClusterFinalizer} }, + } } // cleanupVSphereObjects deletes the Secret, VSphereClusterIdentity, and VSphereDeploymentZone created for this test. @@ -354,3 +441,26 @@ func forcePeriodicReconcile(ctx context.Context, c ctrlclient.Client, namespace } }() } + +func FilterObjectsWithKindAndName(clusterName string) func(u unstructured.Unstructured) bool { + f := clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName) + + return func(u unstructured.Unstructured) bool { + // When using vcsim CRS cannot be applied (not supported by the fake API server), so ignoring. + if testTarget == VCSimTestTarget { + if u.GetKind() == "ClusterResourceSet" { + return false + } + } + + // Following objects are for vm-operator (not managed by CAPV), so checking finalizers/resourceVersion is not relevant. + // Note: we are excluding also VirtualMachines, which instead are considered for the owenerReference tests. + if testMode == SupervisorTestMode { + if sets.NewString("VirtualMachineImage", "NetworkInterface", "ContentSourceBinding", "VirtualMachineSetResourcePolicy", "VirtualMachineClass", "VMOperatorDependencies", "VirtualMachine").Has(u.GetKind()) { + return false + } + } + + return f(u) + } +} diff --git a/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go b/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go index 8879f52c88..a4cfc60ac8 100644 --- a/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go +++ b/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go @@ -24,6 +24,10 @@ const ( // VCenterFinalizer allows VCenterReconciler to clean up resources associated with VCenter before // removing it from the API server. VCenterFinalizer = "vcenter.vcsim.infrastructure.cluster.x-k8s.io" + + // VMFinalizer allows this reconciler to cleanup resources before removing the + // VSphereVM from the API Server. + VMFinalizer = "vcsim.fake.infrastructure.cluster.x-k8s.io" ) // VCenterSimulatorSpec defines the desired state of the VCenterSimulator. diff --git a/test/infrastructure/vcsim/controllers/virtualmachine_controller.go b/test/infrastructure/vcsim/controllers/virtualmachine_controller.go index 3c805a1fda..840106b0e5 100644 --- a/test/infrastructure/vcsim/controllers/virtualmachine_controller.go +++ b/test/infrastructure/vcsim/controllers/virtualmachine_controller.go @@ -214,8 +214,8 @@ func (r *VirtualMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Add finalizer first if not set to avoid the race condition between init and delete. // Note: Finalizers in general can only be added when the deletionTimestamp is not set. - if !controllerutil.ContainsFinalizer(virtualMachine, VMFinalizer) { - controllerutil.AddFinalizer(virtualMachine, VMFinalizer) + if !controllerutil.ContainsFinalizer(virtualMachine, vcsimv1.VMFinalizer) { + controllerutil.AddFinalizer(virtualMachine, vcsimv1.VMFinalizer) return ctrl.Result{}, nil } @@ -243,7 +243,7 @@ func (r *VirtualMachineReconciler) reconcileDelete(ctx context.Context, cluster return ret, err } - controllerutil.RemoveFinalizer(virtualMachine, VMFinalizer) + controllerutil.RemoveFinalizer(virtualMachine, vcsimv1.VMFinalizer) return ctrl.Result{}, nil } diff --git a/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go b/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go index f2e3c43803..4c655d2e92 100644 --- a/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go +++ b/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go @@ -37,6 +37,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" ) func Test_Reconcile_VirtualMachine(t *testing.T) { @@ -106,7 +107,7 @@ func Test_Reconcile_VirtualMachine(t *testing.T) { }, }, Finalizers: []string{ - VMFinalizer, // Adding this to move past the first reconcile + vcsimv1.VMFinalizer, // Adding this to move past the first reconcile }, }, } @@ -235,7 +236,7 @@ func Test_Reconcile_VirtualMachine(t *testing.T) { }, }, Finalizers: []string{ - VMFinalizer, // Adding this to move past the first reconcile + vcsimv1.VMFinalizer, // Adding this to move past the first reconcile }, }, Status: vmoprv1.VirtualMachineStatus{ diff --git a/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go b/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go index b31d2e36a9..aeedcf6cbc 100644 --- a/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go +++ b/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go @@ -50,12 +50,6 @@ import ( // TODO: investigate if we can share this code with the CAPI in memory provider. -const ( - // VMFinalizer allows this reconciler to cleanup resources before removing the - // VSphereVM from the API Server. - VMFinalizer = "vcsim.fake.infrastructure.cluster.x-k8s.io" -) - const ( // VMProvisionedCondition documents the status of VM provisioning, // which includes the VM being provisioned and with a boostrap secret available. diff --git a/test/infrastructure/vcsim/controllers/vspherevm_controller.go b/test/infrastructure/vcsim/controllers/vspherevm_controller.go index bd2c542c78..afb12095df 100644 --- a/test/infrastructure/vcsim/controllers/vspherevm_controller.go +++ b/test/infrastructure/vcsim/controllers/vspherevm_controller.go @@ -216,8 +216,8 @@ func (r *VSphereVMReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // Add finalizer first if not set to avoid the race condition between init and delete. // Note: Finalizers in general can only be added when the deletionTimestamp is not set. - if !controllerutil.ContainsFinalizer(vSphereVM, VMFinalizer) { - controllerutil.AddFinalizer(vSphereVM, VMFinalizer) + if !controllerutil.ContainsFinalizer(vSphereVM, vcsimv1.VMFinalizer) { + controllerutil.AddFinalizer(vSphereVM, vcsimv1.VMFinalizer) return ctrl.Result{}, nil } @@ -245,7 +245,7 @@ func (r *VSphereVMReconciler) reconcileDelete(ctx context.Context, cluster *clus return ret, err } - controllerutil.RemoveFinalizer(vSphereVM, VMFinalizer) + controllerutil.RemoveFinalizer(vSphereVM, vcsimv1.VMFinalizer) return ctrl.Result{}, nil } diff --git a/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go b/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go index 8365f64b75..456a856626 100644 --- a/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go +++ b/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go @@ -133,7 +133,7 @@ func Test_Reconcile_VSphereVM(t *testing.T) { }, }, Finalizers: []string{ - VMFinalizer, // Adding this to move past the first reconcile + vcsimv1.VMFinalizer, // Adding this to move past the first reconcile }, }, } @@ -262,7 +262,7 @@ func Test_Reconcile_VSphereVM(t *testing.T) { }, }, Finalizers: []string{ - VMFinalizer, // Adding this to move past the first reconcile + vcsimv1.VMFinalizer, // Adding this to move past the first reconcile }, }, Spec: infrav1.VSphereVMSpec{