diff --git a/go.sum b/go.sum index 5e77b1a302..756c837b07 100644 --- a/go.sum +++ b/go.sum @@ -48,6 +48,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/dougm/pretty v0.0.0-20160325215624-add1dbc86daf h1:A2XbJkAuMMFy/9EftoubSKBUIyiOm6Z8+X5G7QpS6so= +github.com/dougm/pretty v0.0.0-20160325215624-add1dbc86daf/go.mod h1:7NQ3kWOx2cZOSjtcveTa5nqupVr2s6/83sG+rTlI7uA= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= diff --git a/hack/tools/boskosctl/main.go b/hack/tools/boskosctl/main.go index ac6f3a43f9..61f81f7abf 100644 --- a/hack/tools/boskosctl/main.go +++ b/hack/tools/boskosctl/main.go @@ -364,7 +364,7 @@ func release(ctx context.Context, client *boskos.Client, resourceName, vSphereUs log.Info("Cleaning up vSphere") // Note: We intentionally want to skip clusterModule cleanup. If we run this too often we might hit race conditions // when other tests are creating cluster modules in parallel. - if err := j.CleanupVSphere(ctx, []string{vSphereFolder}, []string{vSphereResourcePool}, []string{vSphereFolder}, true); err != nil { + if err := j.CleanupVSphere(ctx, []string{vSphereFolder}, []string{vSphereResourcePool}, []string{vSphereFolder}, resourceName, true); err != nil { log.Info("Cleaning up vSphere failed") // Try to release resource as dirty. diff --git a/hack/tools/janitor/main.go b/hack/tools/janitor/main.go index 946f18888b..a49be4b880 100644 --- a/hack/tools/janitor/main.go +++ b/hack/tools/janitor/main.go @@ -162,7 +162,7 @@ func run(ctx context.Context) error { j := janitor.NewJanitor(vSphereClients, false) log.Info("Cleaning up vSphere") - if err := j.CleanupVSphere(ctx, []string{folder.(string)}, []string{resourcePool.(string)}, []string{folder.(string)}, false); err != nil { + if err := j.CleanupVSphere(ctx, []string{folder.(string)}, []string{resourcePool.(string)}, []string{folder.(string)}, res.Name, false); err != nil { log.Info("Cleaning up vSphere failed") // Intentionally keep this resource in cleaning state. The reaper will move it from cleaning to dirty diff --git a/hack/tools/pkg/janitor/janitor.go b/hack/tools/pkg/janitor/janitor.go index 1da63842f7..d7daaeca75 100644 --- a/hack/tools/pkg/janitor/janitor.go +++ b/hack/tools/pkg/janitor/janitor.go @@ -25,11 +25,13 @@ import ( "strings" "github.com/pkg/errors" + cnstypes "github.com/vmware/govmomi/cns/types" "github.com/vmware/govmomi/object" govmomicluster "github.com/vmware/govmomi/vapi/cluster" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" ) @@ -52,8 +54,12 @@ type virtualMachine struct { object *object.VirtualMachine } +// boskosResourceLabel is used to identify volumes created in e2e tests. +// The value should contain the boskos resource name. +const boskosResourceLabel = "capv-e2e-test-boskos-resource" + // CleanupVSphere cleans up vSphere VMs, folders and resource pools. -func (s *Janitor) CleanupVSphere(ctx context.Context, folders, resourcePools, vmFolders []string, skipClusterModule bool) error { +func (s *Janitor) CleanupVSphere(ctx context.Context, folders, resourcePools, vmFolders []string, boskosResourceName string, skipClusterModule bool) error { errList := []error{} // Delete vms to cleanup folders and resource pools. @@ -86,6 +92,11 @@ func (s *Janitor) CleanupVSphere(ctx context.Context, folders, resourcePools, vm return errors.Wrap(err, "cleaning up folders") } + // Delete CNS volumes. + if err := s.DeleteCNSVolumes(ctx, boskosResourceName); err != nil { + return errors.Wrap(err, "cleaning up volumes") + } + if skipClusterModule { return nil } @@ -197,6 +208,118 @@ func (s *Janitor) deleteVSphereVMs(ctx context.Context, folder string) error { return nil } +// DeleteCNSVolumes deletes all volumes from tests. +func (s *Janitor) DeleteCNSVolumes(ctx context.Context, boskosResourceName string) error { + log := ctrl.LoggerFrom(ctx).WithName("volumes") + ctx = ctrl.LoggerInto(ctx, log) + + log.Info("Deleting volumes") + + type cnsVolumeToDelete struct { + volumeID cnstypes.CnsVolumeId + pvcName string + pvcNamespace string + } + volumesToDelete := []cnsVolumeToDelete{} + + queryFilter := cnstypes.CnsQueryFilter{ + Labels: []types.KeyValue{ + { + Key: boskosResourceLabel, + Value: boskosResourceName, + }, + }, + } + + for { + res, err := s.vSphereClients.CNS.QueryVolume(ctx, queryFilter) + if err != nil { + return err + } + + for _, volume := range res.Volumes { + var pvcMetadata *cnstypes.CnsKubernetesEntityMetadata + for _, meta := range volume.Metadata.EntityMetadata { + k8sMetadata, ok := meta.(*cnstypes.CnsKubernetesEntityMetadata) + if !ok { + continue + } + if k8sMetadata.EntityType != string(cnstypes.CnsKubernetesEntityTypePVC) { + continue + } + pvcMetadata = k8sMetadata + } + + if pvcMetadata == nil { + // Ignoring non-PVC volumes. + continue + } + + var matchesBoskosResourcename bool + // Check again that the volume has a matching label. + for _, v := range pvcMetadata.Labels { + if v.Key != boskosResourceLabel { + continue + } + if v.Value != boskosResourceName { + continue + } + matchesBoskosResourcename = true + } + + // Ignore not matching volume. + if !matchesBoskosResourcename { + continue + } + + volumesToDelete = append(volumesToDelete, cnsVolumeToDelete{ + volumeID: volume.VolumeId, + pvcName: pvcMetadata.EntityName, + pvcNamespace: pvcMetadata.Namespace, + }) + } + + if res.Cursor.Offset == res.Cursor.TotalRecords || len(res.Volumes) == 0 { + break + } + + queryFilter.Cursor = &res.Cursor + } + + if len(volumesToDelete) == 0 { + log.Info("No CNS Volumes to delete") + return nil + } + + deleteTasks := []*object.Task{} + for _, volume := range volumesToDelete { + log := log.WithValues("volumeID", volume.volumeID, "PersistentVolumeClaim", klog.KRef(volume.pvcNamespace, volume.pvcName)) + + log.Info("Deleting CNS Volume in vSphere") + + if s.dryRun { + // Skipping actual delete on dryRun. + continue + } + + // Trigger deletion of the CNS Volume + task, err := s.vSphereClients.CNS.DeleteVolume(ctx, []cnstypes.CnsVolumeId{volume.volumeID}, true) + if err != nil { + return errors.Wrap(err, "failed to create CNS Volume deletion task") + } + + log.Info("Created CNS Volume deletion task", "task", task.Reference().Value) + deleteTasks = append(deleteTasks, task) + } + + // Wait for all delete tasks to succeed. + if err := waitForTasksFinished(ctx, deleteTasks, false); err != nil { + return errors.Wrap(err, "failed to wait for CNS Volume deletion tasks to finish") + } + + return nil +} + // deleteObjectChildren deletes all child objects in a given object in vSphere if they don't // contain any virtual machine. // An object only gets deleted if: diff --git a/hack/tools/pkg/janitor/janitor_test.go b/hack/tools/pkg/janitor/janitor_test.go index b6d51611a1..2bd0f105d2 100644 --- a/hack/tools/pkg/janitor/janitor_test.go +++ b/hack/tools/pkg/janitor/janitor_test.go @@ -24,13 +24,19 @@ import ( "strings" "testing" + "github.com/google/uuid" "github.com/onsi/gomega" "github.com/onsi/gomega/gbytes" "github.com/vmware/govmomi" + "github.com/vmware/govmomi/cns" + cnssimulator "github.com/vmware/govmomi/cns/simulator" + cnstypes "github.com/vmware/govmomi/cns/types" "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/simulator" "github.com/vmware/govmomi/simulator/vpx" "github.com/vmware/govmomi/view" + "github.com/vmware/govmomi/vim25/types" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" @@ -57,6 +63,8 @@ func setup(ctx context.Context, t *testing.T) (*VSphereClients, *vcsim.Simulator panic(fmt.Sprintf("unable to create simulator %s", err)) } + model.Service.RegisterSDK(cnssimulator.New()) + fmt.Printf(" export GOVC_URL=%s\n", vcsim.ServerURL()) fmt.Printf(" export GOVC_USERNAME=%s\n", vcsim.Username()) fmt.Printf(" export GOVC_PASSWORD=%s\n", vcsim.Password()) @@ -77,7 +85,7 @@ func setup(ctx context.Context, t *testing.T) (*VSphereClients, *vcsim.Simulator return clients, vcsim } -func setupTestCase(g *gomega.WithT, sim *vcsim.Simulator, objects []*vcsimObject) string { +func setupTestCase(ctx context.Context, g *gomega.WithT, sim *vcsim.Simulator, clients *VSphereClients, objects []vcsimObject) string { g.THelper() relativePath := rand.String(10) @@ -86,13 +94,13 @@ func setupTestCase(g *gomega.WithT, sim *vcsim.Simulator, objects []*vcsimObject baseFolder := vcsimFolder("") baseDatastore := vcsimDatastore("", os.TempDir()) // Create base objects for the test case - g.Expect(baseRP.Create(sim, relativePath)).To(gomega.Succeed()) - g.Expect(baseFolder.Create(sim, relativePath)).To(gomega.Succeed()) - g.Expect(baseDatastore.Create(sim, relativePath)).To(gomega.Succeed()) + g.Expect(baseRP.Create(ctx, sim, clients, relativePath)).To(gomega.Succeed()) + g.Expect(baseFolder.Create(ctx, sim, clients, relativePath)).To(gomega.Succeed()) + g.Expect(baseDatastore.Create(ctx, sim, clients, relativePath)).To(gomega.Succeed()) // Create objects for _, object := range objects { - g.Expect(object.Create(sim, relativePath)).To(gomega.Succeed()) + g.Expect(object.Create(ctx, sim, clients, relativePath)).To(gomega.Succeed()) } return relativePath @@ -109,16 +117,17 @@ func Test_janitor_deleteVSphereVMs(t *testing.T) { // Initialize and start vcsim clients, sim := setup(ctx, t) + defer sim.Destroy() tests := []struct { name string - objects []*vcsimObject + objects []vcsimObject wantErr bool want map[string]bool }{ { name: "delete all VMs", - objects: []*vcsimObject{ + objects: []vcsimObject{ vcsimVirtualMachine("foo"), }, wantErr: false, @@ -126,7 +135,7 @@ func Test_janitor_deleteVSphereVMs(t *testing.T) { }, { name: "recursive vm deletion", - objects: []*vcsimObject{ + objects: []vcsimObject{ vcsimResourcePool("a"), vcsimFolder("a"), vcsimResourcePool("a/b"), @@ -152,7 +161,7 @@ func Test_janitor_deleteVSphereVMs(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := gomega.NewWithT(t) - relativePath := setupTestCase(g, sim, tt.objects) + relativePath := setupTestCase(ctx, g, sim, clients, tt.objects) s := &Janitor{ dryRun: false, @@ -187,12 +196,13 @@ func Test_janitor_deleteObjectChildren(t *testing.T) { // Initialize and start vcsim clients, sim := setup(ctx, t) + defer sim.Destroy() tests := []struct { name string basePath string objectType string - objects []*vcsimObject + objects []vcsimObject wantErr bool want map[string]bool }{ @@ -200,7 +210,7 @@ func Test_janitor_deleteObjectChildren(t *testing.T) { name: "should preserve resource pool if it contains a vm and delete empty resource pools", basePath: resourcePoolBase, objectType: "ResourcePool", - objects: []*vcsimObject{ + objects: []vcsimObject{ vcsimResourcePool("a"), vcsimResourcePool("b"), // this one will be deleted vcsimFolder("a"), @@ -216,7 +226,7 @@ func Test_janitor_deleteObjectChildren(t *testing.T) { name: "should preserve folder if it contains a vm and delete empty folders", basePath: folderBase, objectType: "Folder", - objects: []*vcsimObject{ + objects: []vcsimObject{ vcsimResourcePool("a"), vcsimFolder("a"), vcsimFolder("b"), // this one will be deleted @@ -232,13 +242,13 @@ func Test_janitor_deleteObjectChildren(t *testing.T) { name: "no-op", basePath: resourcePoolBase, objectType: "ResourcePool", - objects: []*vcsimObject{}, + objects: []vcsimObject{}, }, { name: "single resource pool", basePath: resourcePoolBase, objectType: "ResourcePool", - objects: []*vcsimObject{ + objects: []vcsimObject{ vcsimResourcePool("a"), }, }, @@ -246,7 +256,7 @@ func Test_janitor_deleteObjectChildren(t *testing.T) { name: "multiple nested resource pools", basePath: resourcePoolBase, objectType: "ResourcePool", - objects: []*vcsimObject{ + objects: []vcsimObject{ vcsimResourcePool("a"), vcsimResourcePool("a/b"), vcsimResourcePool("a/b/c"), @@ -259,13 +269,13 @@ func Test_janitor_deleteObjectChildren(t *testing.T) { name: "no-op", basePath: folderBase, objectType: "Folder", - objects: []*vcsimObject{}, + objects: []vcsimObject{}, }, { name: "single folder", basePath: folderBase, objectType: "Folder", - objects: []*vcsimObject{ + objects: []vcsimObject{ vcsimFolder("a"), }, }, @@ -273,7 +283,7 @@ func Test_janitor_deleteObjectChildren(t *testing.T) { name: "multiple nested folders", basePath: folderBase, objectType: "Folder", - objects: []*vcsimObject{ + objects: []vcsimObject{ vcsimFolder("a"), vcsimFolder("a/b"), vcsimFolder("a/b/c"), @@ -287,7 +297,7 @@ func Test_janitor_deleteObjectChildren(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := gomega.NewWithT(t) - relativePath := setupTestCase(g, sim, tt.objects) + relativePath := setupTestCase(ctx, g, sim, clients, tt.objects) inventoryPath := path.Join(tt.basePath, relativePath) @@ -311,52 +321,127 @@ func Test_janitor_deleteObjectChildren(t *testing.T) { } } +func TestJanitor_deleteCNSVolumes(t *testing.T) { + ctx := context.Background() + ctx = ctrl.LoggerInto(ctx, klog.Background()) + + // Initialize and start vcsim + clients, sim := setup(ctx, t) + defer sim.Destroy() + + _ = sim + tests := []struct { + name string + objects []vcsimObject + wantVolumes int + }{ + { + name: "noop", + objects: []vcsimObject{}, + wantVolumes: 0, + }, + { + name: "Keep other volumes", + objects: []vcsimObject{ + vcsimCNSVolume("this", true), + vcsimCNSVolume("this", true), + vcsimCNSVolume("other", true), + }, + wantVolumes: 1, + }, + { + name: "Ignore volume without PVC metadata", + objects: []vcsimObject{ + vcsimCNSVolume("this", true), + vcsimCNSVolume("this", true), + vcsimCNSVolume("this", false), + }, + wantVolumes: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := gomega.NewWithT(t) + + s := &Janitor{ + dryRun: false, + vSphereClients: clients, + } + + relativePath := setupTestCase(ctx, g, sim, clients, tt.objects) + + boskosResource := relativePath + "-this" + + // Check that all volumes exist. + cnsVolumes, err := queryTestCNSVolumes(ctx, clients.CNS, relativePath) + g.Expect(err).ToNot(gomega.HaveOccurred()) + g.Expect(cnsVolumes).To(gomega.HaveLen(len(tt.objects))) + + // Run deletion but only for the given boskosResource. + g.Expect(s.DeleteCNSVolumes(ctx, boskosResource)).To(gomega.Succeed()) + + // Check that the expected number of volumes are preserved. + cnsVolumes, err = queryTestCNSVolumes(ctx, clients.CNS, relativePath) + g.Expect(err).ToNot(gomega.HaveOccurred()) + g.Expect(cnsVolumes).To(gomega.HaveLen(tt.wantVolumes)) + }) + } +} + func Test_janitor_CleanupVSphere(t *testing.T) { ctx := context.Background() ctx = ctrl.LoggerInto(ctx, klog.Background()) // Initialize and start vcsim clients, sim := setup(ctx, t) + defer sim.Destroy() tests := []struct { - name string - dryRun bool - objects []*vcsimObject - want map[string]bool + name string + dryRun bool + objects []vcsimObject + want map[string]bool + wantVolumes int }{ { - name: "no-op", - dryRun: false, - objects: nil, - want: map[string]bool{}, + name: "no-op", + dryRun: false, + objects: nil, + want: map[string]bool{}, + wantVolumes: 0, }, { - name: "dryRun: no-op", - dryRun: true, - objects: nil, - want: map[string]bool{}, + name: "dryRun: no-op", + dryRun: true, + objects: nil, + want: map[string]bool{}, + wantVolumes: 0, }, { name: "delete everything", dryRun: false, - objects: []*vcsimObject{ + objects: []vcsimObject{ vcsimFolder("a"), vcsimResourcePool("a"), vcsimVirtualMachine("a/b"), vcsimFolder("c"), vcsimResourcePool("c"), + vcsimCNSVolume("this", true), + vcsimCNSVolume("other", true), }, - want: map[string]bool{}, + want: map[string]bool{}, + wantVolumes: 1, }, { name: "dryRun: would delete everything", dryRun: true, - objects: []*vcsimObject{ + objects: []vcsimObject{ vcsimFolder("a"), vcsimResourcePool("a"), vcsimVirtualMachine("a/b"), vcsimFolder("c"), vcsimResourcePool("c"), + vcsimCNSVolume("this", true), }, want: map[string]bool{ "Folder/a": true, @@ -365,26 +450,29 @@ func Test_janitor_CleanupVSphere(t *testing.T) { "ResourcePool/c": true, "VirtualMachine/a/b": true, }, + wantVolumes: 1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := gomega.NewWithT(t) - relativePath := setupTestCase(g, sim, tt.objects) + relativePath := setupTestCase(ctx, g, sim, clients, tt.objects) s := &Janitor{ dryRun: tt.dryRun, vSphereClients: clients, } + boskosResource := relativePath + "-this" + folder := vcsimFolder("").Path(relativePath) resourcePool := vcsimResourcePool("").Path(relativePath) folders := []string{folder} resourcePools := []string{resourcePool} - g.Expect(s.CleanupVSphere(ctx, folders, resourcePools, folders, false)).To(gomega.Succeed()) + g.Expect(s.CleanupVSphere(ctx, folders, resourcePools, folders, boskosResource, false)).To(gomega.Succeed()) existingObjects, err := recursiveListFoldersAndResourcePools(ctx, relativePath, clients.Govmomi, clients.Finder, clients.ViewManager) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(existingObjects).To(gomega.BeEquivalentTo(tt.want)) @@ -392,10 +480,32 @@ func Test_janitor_CleanupVSphere(t *testing.T) { // Ensure the parent object still exists assertObjectExists(ctx, g, clients.Finder, folder) assertObjectExists(ctx, g, clients.Finder, resourcePool) + + cnsVolumes, err := queryTestCNSVolumes(ctx, clients.CNS, relativePath) + g.Expect(err).ToNot(gomega.HaveOccurred()) + g.Expect(cnsVolumes).To(gomega.HaveLen(tt.wantVolumes)) }) } } +func queryTestCNSVolumes(ctx context.Context, client *cns.Client, testPrefix string) ([]cnstypes.CnsVolume, error) { + // VCSim only implements queryfilters on volume IDs. + res, err := client.QueryVolume(ctx, cnstypes.CnsQueryFilter{}) + if err != nil { + return nil, err + } + + volumes := []cnstypes.CnsVolume{} + + for _, volume := range res.Volumes { + if strings.HasPrefix(volume.Name, testPrefix) { + volumes = append(volumes, volume) + } + } + + return volumes, nil +} + func assertObjectExists(ctx context.Context, g *gomega.WithT, finder *find.Finder, inventoryPath string) { g.THelper() @@ -427,13 +537,17 @@ func recursiveListFoldersAndResourcePools(ctx context.Context, testPrefix string return objects, nil } -type vcsimObject struct { +type vcsimObject interface { + Create(ctx context.Context, sim *vcsim.Simulator, vsphereClients *VSphereClients, testPrefix string) error +} + +type vcsimInventoryObject struct { pathSuffix string objectType string datastoreTempDir string } -func (o vcsimObject) Path(testPrefix string) string { +func (o vcsimInventoryObject) Path(testPrefix string) string { var pathPrefix string switch o.objectType { @@ -453,7 +567,7 @@ func (o vcsimObject) Path(testPrefix string) string { return path.Join(pathPrefix, testPrefix, o.pathSuffix) } -func (o vcsimObject) Create(sim *vcsim.Simulator, testPrefix string) error { +func (o vcsimInventoryObject) Create(_ context.Context, sim *vcsim.Simulator, _ *VSphereClients, testPrefix string) error { var cmd string switch o.objectType { case "ResourcePool": @@ -487,18 +601,69 @@ func (o vcsimObject) Create(sim *vcsim.Simulator, testPrefix string) error { return nil } -func vcsimResourcePool(p string) *vcsimObject { - return &vcsimObject{pathSuffix: p, objectType: "ResourcePool"} +type vcsimCNSVolumeObject struct { + boskosResourceName string + hasPVCMetadata bool +} + +func (v vcsimCNSVolumeObject) Create(ctx context.Context, _ *vcsim.Simulator, vsphereClients *VSphereClients, testPrefix string) error { + ds, err := vsphereClients.Finder.Datastore(ctx, testPrefix) + if err != nil { + return err + } + + spec := cnstypes.CnsVolumeCreateSpec{ + Name: fmt.Sprintf("%s-pvc-%s", testPrefix, uuid.New().String()), + VolumeType: string(cnstypes.CnsVolumeTypeBlock), + Datastores: []types.ManagedObjectReference{ds.Reference()}, + Metadata: cnstypes.CnsVolumeMetadata{ + EntityMetadata: []cnstypes.BaseCnsEntityMetadata{}, + }, + BackingObjectDetails: &cnstypes.CnsBlockBackingDetails{ + CnsBackingObjectDetails: cnstypes.CnsBackingObjectDetails{ + CapacityInMb: 5120, + }, + }, + } + + if v.hasPVCMetadata { + spec.Metadata.EntityMetadata = append(spec.Metadata.EntityMetadata, &cnstypes.CnsKubernetesEntityMetadata{ + EntityType: string(cnstypes.CnsKubernetesEntityTypePVC), + CnsEntityMetadata: cnstypes.CnsEntityMetadata{ + Labels: []types.KeyValue{ + { + Key: boskosResourceLabel, + Value: testPrefix + "-" + v.boskosResourceName, + }, + }, + }, + }) + } + + task, err := vsphereClients.CNS.CreateVolume(ctx, []cnstypes.CnsVolumeCreateSpec{spec}) + if err != nil { + return err + } + + return waitForTasksFinished(ctx, []*object.Task{task}, false) +} + +func vcsimResourcePool(p string) *vcsimInventoryObject { + return &vcsimInventoryObject{pathSuffix: p, objectType: "ResourcePool"} +} + +func vcsimFolder(p string) *vcsimInventoryObject { + return &vcsimInventoryObject{pathSuffix: p, objectType: "Folder"} } -func vcsimFolder(p string) *vcsimObject { - return &vcsimObject{pathSuffix: p, objectType: "Folder"} +func vcsimDatastore(p, datastoreTempDir string) *vcsimInventoryObject { + return &vcsimInventoryObject{pathSuffix: p, objectType: "Datastore", datastoreTempDir: datastoreTempDir} } -func vcsimDatastore(p, datastoreTempDir string) *vcsimObject { - return &vcsimObject{pathSuffix: p, objectType: "Datastore", datastoreTempDir: datastoreTempDir} +func vcsimVirtualMachine(p string) *vcsimInventoryObject { + return &vcsimInventoryObject{pathSuffix: p, objectType: "VirtualMachine"} } -func vcsimVirtualMachine(p string) *vcsimObject { - return &vcsimObject{pathSuffix: p, objectType: "VirtualMachine"} +func vcsimCNSVolume(boskosResourceName string, hasPVCMetadata bool) *vcsimCNSVolumeObject { + return &vcsimCNSVolumeObject{boskosResourceName: boskosResourceName, hasPVCMetadata: hasPVCMetadata} } diff --git a/hack/tools/pkg/janitor/vsphere.go b/hack/tools/pkg/janitor/vsphere.go index 46c51a5c09..2e3cae0db9 100644 --- a/hack/tools/pkg/janitor/vsphere.go +++ b/hack/tools/pkg/janitor/vsphere.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" "github.com/vmware/govmomi" + "github.com/vmware/govmomi/cns" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/list" "github.com/vmware/govmomi/object" @@ -52,6 +53,7 @@ type VSphereClients struct { FieldsManager *object.CustomFieldsManager Finder *find.Finder ViewManager *view.Manager + CNS *cns.Client } // Logout logs out all clients. It logs errors if the context contains a logger. @@ -110,6 +112,16 @@ func NewVSphereClients(ctx context.Context, input NewVSphereClientsInput) (*VSph viewManager := view.NewManager(vimClient) finder := find.NewFinder(vimClient, false) + dc, err := finder.Datacenter(ctx, "*") + if err != nil { + return nil, err + } + finder.SetDatacenter(dc) + + cnsClient, err := cns.NewClient(ctx, vimClient) + if err != nil { + return nil, err + } return &VSphereClients{ Vim: vimClient, @@ -118,6 +130,7 @@ func NewVSphereClients(ctx context.Context, input NewVSphereClientsInput) (*VSph FieldsManager: fieldsManager, Finder: finder, ViewManager: viewManager, + CNS: cnsClient, }, nil } diff --git a/test/e2e/node_drain_test.go b/test/e2e/node_drain_test.go index d53b4bec9a..15f85f3c63 100644 --- a/test/e2e/node_drain_test.go +++ b/test/e2e/node_drain_test.go @@ -19,6 +19,7 @@ package e2e import ( "context" "fmt" + "os" "strings" "time" @@ -42,6 +43,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +const boskosResourceLabel = "capv-e2e-test-boskos-resource" + var _ = Describe("When testing Node drain [supervisor]", func() { const specName = "node-drain" // copied from CAPI Setup(specName, func(testSpecificSettingsGetter func() testSettings) { @@ -293,6 +296,10 @@ func generateStatefulset(input generateStatefulsetInput) *appsv1.StatefulSet { MatchLabels: map[string]string{ "app": "nonstop", "statefulset": input.Name, + "e2e-test": "node-drain", + // All labels get propagated down to CNS Volumes in vSphere. + // This label will be used by the janitor to cleanup orphaned CNS volumes. + boskosResourceLabel: os.Getenv("BOSKOS_RESOURCE_NAME"), }, }, Template: corev1.PodTemplateSpec{ @@ -300,6 +307,10 @@ func generateStatefulset(input generateStatefulsetInput) *appsv1.StatefulSet { Labels: map[string]string{ "app": "nonstop", "statefulset": input.Name, + "e2e-test": "node-drain", + // All labels get propagated down to CNS Volumes in vSphere. + // This label will be used by the janitor to cleanup orphaned CNS volumes. + boskosResourceLabel: os.Getenv("BOSKOS_RESOURCE_NAME"), }, }, Spec: corev1.PodSpec{