Skip to content

Commit

Permalink
🌱 e2e: properly label and cleanup created PVCs during e2e tests in pr…
Browse files Browse the repository at this point in the history
…ow (#3361)

* e2e: properly label and cleanup created PVCs during e2e tests in prow

* fixes

* fixup
  • Loading branch information
chrischdi authored Feb 18, 2025
1 parent 7602fa1 commit 092907a
Show file tree
Hide file tree
Showing 7 changed files with 364 additions and 50 deletions.
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/dougm/pretty v0.0.0-20160325215624-add1dbc86daf h1:A2XbJkAuMMFy/9EftoubSKBUIyiOm6Z8+X5G7QpS6so=
github.com/dougm/pretty v0.0.0-20160325215624-add1dbc86daf/go.mod h1:7NQ3kWOx2cZOSjtcveTa5nqupVr2s6/83sG+rTlI7uA=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
Expand Down
2 changes: 1 addition & 1 deletion hack/tools/boskosctl/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ func release(ctx context.Context, client *boskos.Client, resourceName, vSphereUs
log.Info("Cleaning up vSphere")
// Note: We intentionally want to skip clusterModule cleanup. If we run this too often we might hit race conditions
// when other tests are creating cluster modules in parallel.
if err := j.CleanupVSphere(ctx, []string{vSphereFolder}, []string{vSphereResourcePool}, []string{vSphereFolder}, true); err != nil {
if err := j.CleanupVSphere(ctx, []string{vSphereFolder}, []string{vSphereResourcePool}, []string{vSphereFolder}, resourceName, true); err != nil {
log.Info("Cleaning up vSphere failed")

// Try to release resource as dirty.
Expand Down
2 changes: 1 addition & 1 deletion hack/tools/janitor/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ func run(ctx context.Context) error {
j := janitor.NewJanitor(vSphereClients, false)

log.Info("Cleaning up vSphere")
if err := j.CleanupVSphere(ctx, []string{folder.(string)}, []string{resourcePool.(string)}, []string{folder.(string)}, false); err != nil {
if err := j.CleanupVSphere(ctx, []string{folder.(string)}, []string{resourcePool.(string)}, []string{folder.(string)}, res.Name, false); err != nil {
log.Info("Cleaning up vSphere failed")

// Intentionally keep this resource in cleaning state. The reaper will move it from cleaning to dirty
Expand Down
125 changes: 124 additions & 1 deletion hack/tools/pkg/janitor/janitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,13 @@ import (
"strings"

"github.com/pkg/errors"
cnstypes "github.com/vmware/govmomi/cns/types"
"github.com/vmware/govmomi/object"
govmomicluster "github.com/vmware/govmomi/vapi/cluster"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
)

Expand All @@ -52,8 +54,12 @@ type virtualMachine struct {
object *object.VirtualMachine
}

// boskosResourceLabel is used to identify volumes created in e2e tests.
// The value should contain the boskos resource name.
const boskosResourceLabel = "capv-e2e-test-boskos-resource"

// CleanupVSphere cleans up vSphere VMs, folders and resource pools.
func (s *Janitor) CleanupVSphere(ctx context.Context, folders, resourcePools, vmFolders []string, skipClusterModule bool) error {
func (s *Janitor) CleanupVSphere(ctx context.Context, folders, resourcePools, vmFolders []string, boskosResourceName string, skipClusterModule bool) error {
errList := []error{}

// Delete vms to cleanup folders and resource pools.
Expand Down Expand Up @@ -86,6 +92,11 @@ func (s *Janitor) CleanupVSphere(ctx context.Context, folders, resourcePools, vm
return errors.Wrap(err, "cleaning up folders")
}

// Delete CNS volumes.
if err := s.DeleteCNSVolumes(ctx, boskosResourceName); err != nil {
return errors.Wrap(err, "cleaning up volumes")
}

if skipClusterModule {
return nil
}
Expand Down Expand Up @@ -197,6 +208,118 @@ func (s *Janitor) deleteVSphereVMs(ctx context.Context, folder string) error {
return nil
}

// DeleteCNSVolumes deletes all volumes from tests.
func (s *Janitor) DeleteCNSVolumes(ctx context.Context, boskosResourceName string) error {
log := ctrl.LoggerFrom(ctx).WithName("volumes")
ctx = ctrl.LoggerInto(ctx, log)

log.Info("Deleting volumes")

type cnsVolumeToDelete struct {
volumeID cnstypes.CnsVolumeId
pvcName string
pvcNamespace string
}
volumesToDelete := []cnsVolumeToDelete{}

queryFilter := cnstypes.CnsQueryFilter{
Labels: []types.KeyValue{
{
Key: boskosResourceLabel,
Value: boskosResourceName,
},
},
}

for {
res, err := s.vSphereClients.CNS.QueryVolume(ctx, queryFilter)
if err != nil {
return err
}

for _, volume := range res.Volumes {
var pvcMetadata *cnstypes.CnsKubernetesEntityMetadata
for _, meta := range volume.Metadata.EntityMetadata {
k8sMetadata, ok := meta.(*cnstypes.CnsKubernetesEntityMetadata)
if !ok {
continue
}
if k8sMetadata.EntityType != string(cnstypes.CnsKubernetesEntityTypePVC) {
continue
}
pvcMetadata = k8sMetadata
}

if pvcMetadata == nil {
// Ignoring non-PVC volumes.
continue
}

var matchesBoskosResourcename bool
// Check again that the volume has a matching label.
for _, v := range pvcMetadata.Labels {
if v.Key != boskosResourceLabel {
continue
}
if v.Value != boskosResourceName {
continue
}
matchesBoskosResourcename = true
}

// Ignore not matching volume.
if !matchesBoskosResourcename {
continue
}

volumesToDelete = append(volumesToDelete, cnsVolumeToDelete{
volumeID: volume.VolumeId,
pvcName: pvcMetadata.EntityName,
pvcNamespace: pvcMetadata.Namespace,
})
}

if res.Cursor.Offset == res.Cursor.TotalRecords || len(res.Volumes) == 0 {
break
}

queryFilter.Cursor = &res.Cursor
}

if len(volumesToDelete) == 0 {
log.Info("No CNS Volumes to delete")
return nil
}

deleteTasks := []*object.Task{}
for _, volume := range volumesToDelete {
log := log.WithValues("volumeID", volume.volumeID, "PersistentVolumeClaim", klog.KRef(volume.pvcNamespace, volume.pvcName))

log.Info("Deleting CNS Volume in vSphere")

if s.dryRun {
// Skipping actual delete on dryRun.
continue
}

// Trigger deletion of the CNS Volume
task, err := s.vSphereClients.CNS.DeleteVolume(ctx, []cnstypes.CnsVolumeId{volume.volumeID}, true)
if err != nil {
return errors.Wrap(err, "failed to create CNS Volume deletion task")
}

log.Info("Created CNS Volume deletion task", "task", task.Reference().Value)
deleteTasks = append(deleteTasks, task)
}

// Wait for all delete tasks to succeed.
if err := waitForTasksFinished(ctx, deleteTasks, false); err != nil {
return errors.Wrap(err, "failed to wait for CNS Volume deletion tasks to finish")
}

return nil
}

// deleteObjectChildren deletes all child objects in a given object in vSphere if they don't
// contain any virtual machine.
// An object only gets deleted if:
Expand Down
Loading

0 comments on commit 092907a

Please sign in to comment.