Skip to content

Commit

Permalink
feat: enable work agent join leave (Azure#292)
Browse files Browse the repository at this point in the history
* feat: implement work agent join/leave

* fix tests

* fix the e2e

* fix a corner case that a  member cluster is deleted during reconcile

* disable the work-api e2e test

Co-authored-by: Ryan Zhang <[email protected]>
  • Loading branch information
ryanzhang-oss and Ryan Zhang authored Sep 15, 2022
1 parent 3d09ebe commit cb1a5d6
Show file tree
Hide file tree
Showing 14 changed files with 255 additions and 574 deletions.
12 changes: 7 additions & 5 deletions cmd/memberagent/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ Licensed under the MIT license.

package main

//goland:noinspection ALL
import (
"context"
"encoding/base64"
Expand Down Expand Up @@ -198,18 +199,19 @@ func Start(ctx context.Context, hubCfg, memberConfig *rest.Config, hubOpts, memb
os.Exit(1)
}

if err = workcontrollers.NewApplyWorkReconciler(
// create the work controller, so we can pass it to the internal member cluster reconciler
workController := workcontrollers.NewApplyWorkReconciler(
hubMgr.GetClient(),
spokeDynamicClient,
memberMgr.GetClient(),
restMapper,
hubMgr.GetEventRecorderFor("work_controller"),
5, true).SetupWithManager(hubMgr); err != nil {
restMapper, hubMgr.GetEventRecorderFor("work_controller"), 5, hubOpts.Namespace)

if err = workController.SetupWithManager(hubMgr); err != nil {
klog.ErrorS(err, "unable to create controller", "controller", "Work")
return err
}

if err = internalmembercluster.NewReconciler(hubMgr.GetClient(), memberMgr.GetClient()).SetupWithManager(hubMgr); err != nil {
if err = internalmembercluster.NewReconciler(hubMgr.GetClient(), memberMgr.GetClient(), workController).SetupWithManager(hubMgr); err != nil {
return errors.Wrap(err, "unable to create controller hub_member")
}

Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -96,5 +96,5 @@ replace (
golang.org/x/crypto => golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b

k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.2 // weird bug that the goland won't compile without this
sigs.k8s.io/work-api => github.com/Azure/k8s-work-api v0.4.2
sigs.k8s.io/work-api => github.com/Azure/k8s-work-api v0.4.3
)
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/Azure/k8s-work-api v0.4.2 h1:Kwl8pmBfiykgWws12ud80TpU9gQNveyR7zlwMutGwGc=
github.com/Azure/k8s-work-api v0.4.2/go.mod h1:FOGJkJ+uxjWlvUgmqUlRcmr4Q2ijocrUO/aLJv827y8=
github.com/Azure/k8s-work-api v0.4.3 h1:fxwO/QZftM3CW9FNl/JTHRQmfbQPa83VwOxR0HadECk=
github.com/Azure/k8s-work-api v0.4.3/go.mod h1:FOGJkJ+uxjWlvUgmqUlRcmr4Q2ijocrUO/aLJv827y8=
github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c=
github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
Expand Down
114 changes: 97 additions & 17 deletions pkg/controllers/internalmembercluster/member_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/predicate"
workcontrollers "sigs.k8s.io/work-api/pkg/controllers"

"go.goms.io/fleet/apis"
fleetv1alpha1 "go.goms.io/fleet/apis/v1alpha1"
Expand All @@ -31,26 +32,35 @@ import (
type Reconciler struct {
hubClient client.Client
memberClient client.Client
recorder record.EventRecorder

// the join/leave agent maintains the list of controllers in the member cluster
// so that it can make sure that all the agents on the member cluster have joined/left
// before updating the internal member cluster CR status
workController *workcontrollers.ApplyWorkReconciler

recorder record.EventRecorder
}

const (
eventReasonInternalMemberClusterHealthy = "InternalMemberClusterHealthy"
eventReasonInternalMemberClusterUnhealthy = "InternalMemberClusterUnhealthy"
eventReasonInternalMemberClusterJoined = "InternalMemberClusterJoined"
eventReasonInternalMemberClusterLeft = "InternalMemberClusterLeft"
eventReasonInternalMemberClusterHealthy = "InternalMemberClusterHealthy"
eventReasonInternalMemberClusterUnhealthy = "InternalMemberClusterUnhealthy"
eventReasonInternalMemberClusterJoined = "InternalMemberClusterJoined"
eventReasonInternalMemberClusterFailedToJoin = "InternalMemberClusterFailedToJoin"
eventReasonInternalMemberClusterFailedToLeave = "InternalMemberClusterFailedToLeave"
eventReasonInternalMemberClusterLeft = "InternalMemberClusterLeft"
)

// NewReconciler creates a new reconciler for the internalMemberCluster CR
func NewReconciler(hubClient client.Client, memberClient client.Client) *Reconciler {
func NewReconciler(hubClient client.Client, memberClient client.Client, workController *workcontrollers.ApplyWorkReconciler) *Reconciler {
return &Reconciler{
hubClient: hubClient,
memberClient: memberClient,
hubClient: hubClient,
memberClient: memberClient,
workController: workController,
}
}

func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
klog.V(3).InfoS("Reconcile", "InternalMemberCluster", req.NamespacedName)
klog.V(2).InfoS("Reconcile", "InternalMemberCluster", req.NamespacedName)

var imc fleetv1alpha1.InternalMemberCluster
if err := r.hubClient.Get(ctx, req.NamespacedName, &imc); err != nil {
Expand All @@ -60,6 +70,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu

switch imc.Spec.State {
case fleetv1alpha1.ClusterStateJoin:
if err := r.startAgents(ctx, &imc); err != nil {
return ctrl.Result{}, err
}
updateMemberAgentHeartBeat(&imc)
updateHealthErr := r.updateHealth(ctx, &imc)
r.markInternalMemberClusterJoined(&imc)
Expand All @@ -74,6 +87,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
return ctrl.Result{RequeueAfter: time.Second * time.Duration(imc.Spec.HeartbeatPeriodSeconds)}, nil

case fleetv1alpha1.ClusterStateLeave:
if err := r.stopAgents(ctx, &imc); err != nil {
return ctrl.Result{}, err
}
r.markInternalMemberClusterLeft(&imc)
if err := r.updateInternalMemberClusterWithRetry(ctx, &imc); err != nil {
klog.ErrorS(err, "failed to update status for %s", klog.KObj(&imc))
Expand All @@ -87,9 +103,33 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
}
}

// startAgents start all the member agents running on the member cluster
func (r *Reconciler) startAgents(ctx context.Context, imc *fleetv1alpha1.InternalMemberCluster) error {
// TODO: handle all the controllers uniformly if we have more
if err := r.workController.Join(ctx); err != nil {
r.markInternalMemberClusterJoinFailed(imc, err)
// ignore the update error since we will return an error anyway
_ = r.updateInternalMemberClusterWithRetry(ctx, imc)
return err
}
return nil
}

// stopAgents stops all the member agents running on the member cluster
func (r *Reconciler) stopAgents(ctx context.Context, imc *fleetv1alpha1.InternalMemberCluster) error {
// TODO: handle all the controllers uniformly if we have more
if err := r.workController.Leave(ctx); err != nil {
r.markInternalMemberClusterLeaveFailed(imc, err)
// ignore the update error since we will return an error anyway
_ = r.updateInternalMemberClusterWithRetry(ctx, imc)
return err
}
return nil
}

// updateHealth collects and updates member cluster resource stats and set ConditionTypeInternalMemberClusterHealth.
func (r *Reconciler) updateHealth(ctx context.Context, imc *fleetv1alpha1.InternalMemberCluster) error {
klog.V(3).InfoS("updateHealth", "InternalMemberCluster", klog.KObj(imc))
klog.V(2).InfoS("updateHealth", "InternalMemberCluster", klog.KObj(imc))

if err := r.updateResourceStats(ctx, imc); err != nil {
r.markInternalMemberClusterUnhealthy(imc, errors.Wrapf(err, "failed to update resource stats %s", klog.KObj(imc)))
Expand All @@ -102,7 +142,7 @@ func (r *Reconciler) updateHealth(ctx context.Context, imc *fleetv1alpha1.Intern

// updateResourceStats collects and updates resource usage stats of the member cluster.
func (r *Reconciler) updateResourceStats(ctx context.Context, imc *fleetv1alpha1.InternalMemberCluster) error {
klog.V(5).InfoS("updateResourceStats", "InternalMemberCluster", klog.KObj(imc))
klog.V(4).InfoS("updateResourceStats", "InternalMemberCluster", klog.KObj(imc))
var nodes corev1.NodeList
if err := r.memberClient.List(ctx, &nodes); err != nil {
return errors.Wrapf(err, "failed to list nodes for member cluster %s", klog.KObj(imc))
Expand Down Expand Up @@ -132,7 +172,7 @@ func (r *Reconciler) updateResourceStats(ctx context.Context, imc *fleetv1alpha1

// updateInternalMemberClusterWithRetry updates InternalMemberCluster status.
func (r *Reconciler) updateInternalMemberClusterWithRetry(ctx context.Context, imc *fleetv1alpha1.InternalMemberCluster) error {
klog.V(5).InfoS("updateInternalMemberClusterWithRetry", "InternalMemberCluster", klog.KObj(imc))
klog.V(4).InfoS("updateInternalMemberClusterWithRetry", "InternalMemberCluster", klog.KObj(imc))
backOffPeriod := retry.DefaultBackoff
backOffPeriod.Cap = time.Second * time.Duration(imc.Spec.HeartbeatPeriodSeconds)

Expand All @@ -147,15 +187,15 @@ func (r *Reconciler) updateInternalMemberClusterWithRetry(ctx context.Context, i

// updateMemberAgentHeartBeat is used to update member agent heart beat for Internal member cluster.
func updateMemberAgentHeartBeat(imc *fleetv1alpha1.InternalMemberCluster) {
klog.V(5).InfoS("update Internal member cluster heartbeat", "InternalMemberCluster", klog.KObj(imc))
klog.V(4).InfoS("update Internal member cluster heartbeat", "InternalMemberCluster", klog.KObj(imc))
desiredAgentStatus := imc.GetAgentStatus(fleetv1alpha1.MemberAgent)
if desiredAgentStatus != nil {
desiredAgentStatus.LastReceivedHeartbeat = metav1.Now()
}
}

func (r *Reconciler) markInternalMemberClusterHealthy(imc apis.ConditionedAgentObj) {
klog.V(5).InfoS("markInternalMemberClusterHealthy", "InternalMemberCluster", klog.KObj(imc))
klog.V(4).InfoS("markInternalMemberClusterHealthy", "InternalMemberCluster", klog.KObj(imc))
newCondition := metav1.Condition{
Type: string(fleetv1alpha1.AgentHealthy),
Status: metav1.ConditionTrue,
Expand All @@ -174,7 +214,7 @@ func (r *Reconciler) markInternalMemberClusterHealthy(imc apis.ConditionedAgentO
}

func (r *Reconciler) markInternalMemberClusterUnhealthy(imc apis.ConditionedAgentObj, err error) {
klog.V(5).InfoS("markInternalMemberClusterUnhealthy", "InternalMemberCluster", klog.KObj(imc))
klog.V(4).InfoS("markInternalMemberClusterUnhealthy", "InternalMemberCluster", klog.KObj(imc))
newCondition := metav1.Condition{
Type: string(fleetv1alpha1.AgentHealthy),
Status: metav1.ConditionFalse,
Expand All @@ -194,7 +234,7 @@ func (r *Reconciler) markInternalMemberClusterUnhealthy(imc apis.ConditionedAgen
}

func (r *Reconciler) markInternalMemberClusterJoined(imc apis.ConditionedAgentObj) {
klog.V(5).InfoS("markInternalMemberClusterJoined", "InternalMemberCluster", klog.KObj(imc))
klog.V(4).InfoS("markInternalMemberClusterJoined", "InternalMemberCluster", klog.KObj(imc))
newCondition := metav1.Condition{
Type: string(fleetv1alpha1.AgentJoined),
Status: metav1.ConditionTrue,
Expand All @@ -213,8 +253,28 @@ func (r *Reconciler) markInternalMemberClusterJoined(imc apis.ConditionedAgentOb
imc.SetConditionsWithType(fleetv1alpha1.MemberAgent, newCondition)
}

func (r *Reconciler) markInternalMemberClusterJoinFailed(imc apis.ConditionedAgentObj, err error) {
klog.V(4).InfoS("markInternalMemberCluster join failed", "error", err, "InternalMemberCluster", klog.KObj(imc))
newCondition := metav1.Condition{
Type: string(fleetv1alpha1.AgentJoined),
Status: metav1.ConditionUnknown,
Reason: eventReasonInternalMemberClusterFailedToJoin,
Message: err.Error(),
ObservedGeneration: imc.GetGeneration(),
}

// Joined status changed.
existingCondition := imc.GetConditionWithType(fleetv1alpha1.MemberAgent, newCondition.Type)
if existingCondition == nil || existingCondition.ObservedGeneration != imc.GetGeneration() || existingCondition.Status != newCondition.Status {
r.recorder.Event(imc, corev1.EventTypeNormal, eventReasonInternalMemberClusterFailedToJoin, "internal member cluster failed to join")
klog.ErrorS(err, "agent join failed", "InternalMemberCluster", klog.KObj(imc))
}

imc.SetConditionsWithType(fleetv1alpha1.MemberAgent, newCondition)
}

func (r *Reconciler) markInternalMemberClusterLeft(imc apis.ConditionedAgentObj) {
klog.V(5).InfoS("markInternalMemberClusterLeft", "InternalMemberCluster", klog.KObj(imc))
klog.V(4).InfoS("markInternalMemberClusterLeft", "InternalMemberCluster", klog.KObj(imc))
newCondition := metav1.Condition{
Type: string(fleetv1alpha1.AgentJoined),
Status: metav1.ConditionFalse,
Expand All @@ -233,6 +293,26 @@ func (r *Reconciler) markInternalMemberClusterLeft(imc apis.ConditionedAgentObj)
imc.SetConditionsWithType(fleetv1alpha1.MemberAgent, newCondition)
}

func (r *Reconciler) markInternalMemberClusterLeaveFailed(imc apis.ConditionedAgentObj, err error) {
klog.V(4).InfoS("markInternalMemberCluster leave failed", "error", err, "InternalMemberCluster", klog.KObj(imc))
newCondition := metav1.Condition{
Type: string(fleetv1alpha1.AgentJoined),
Status: metav1.ConditionUnknown,
Reason: eventReasonInternalMemberClusterFailedToLeave,
Message: err.Error(),
ObservedGeneration: imc.GetGeneration(),
}

// Joined status changed.
existingCondition := imc.GetConditionWithType(fleetv1alpha1.MemberAgent, newCondition.Type)
if existingCondition == nil || existingCondition.ObservedGeneration != imc.GetGeneration() || existingCondition.Status != newCondition.Status {
r.recorder.Event(imc, corev1.EventTypeNormal, eventReasonInternalMemberClusterFailedToLeave, "internal member cluster failed to leave")
klog.ErrorS(err, "agent leave failed", "InternalMemberCluster", klog.KObj(imc))
}

imc.SetConditionsWithType(fleetv1alpha1.MemberAgent, newCondition)
}

// SetupWithManager sets up the controller with the Manager.
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
r.recorder = mgr.GetEventRecorderFor("InternalMemberClusterController")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
workcontrollers "sigs.k8s.io/work-api/pkg/controllers"

"go.goms.io/fleet/apis/v1alpha1"
"go.goms.io/fleet/pkg/utils"
Expand Down Expand Up @@ -57,7 +58,9 @@ var _ = Describe("Test Internal Member Cluster Controller", func() {
}

By("create the internalMemberCluster reconciler")
r = NewReconciler(k8sClient, k8sClient)
workController := workcontrollers.NewApplyWorkReconciler(
k8sClient, nil, k8sClient, nil, nil, 5, memberClusterNamespace)
r = NewReconciler(k8sClient, k8sClient, workController)
err := r.SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())
})
Expand Down
4 changes: 4 additions & 0 deletions pkg/controllers/internalmembercluster/member_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1"

"go.goms.io/fleet/apis/v1alpha1"
)
Expand Down Expand Up @@ -60,6 +61,9 @@ var _ = BeforeSuite(func() {
err = v1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())

err = workv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())

//+kubebuilder:scaffold:scheme
By("construct the k8s client")
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct
klog.ErrorS(err, "failed to convert a cluster resource placement", "memberCluster", memberClusterName, "crp", uObj.GetName())
return ctrl.Result{}, err
}
if matchPlacement(&placement, mObj.(*unstructured.Unstructured).DeepCopy()) {
if mObj == nil {
// This is a corner case that the member cluster is deleted before we handle its status change. We can't use match since we don't have its label.
klog.V(3).InfoS("enqueue a placement to reconcile for a deleted member cluster", "memberCluster", memberClusterName, "placement", klog.KObj(&placement))
r.PlacementController.Enqueue(crpList[i])
} else if matchPlacement(&placement, mObj.(*unstructured.Unstructured).DeepCopy()) {
klog.V(3).InfoS("enqueue a placement to reconcile", "memberCluster", memberClusterName, "placement", klog.KObj(&placement))
r.PlacementController.Enqueue(crpList[i])
}
Expand Down
2 changes: 2 additions & 0 deletions test/e2e/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ make run-e2e
```
or test manually
```shell
kubectl --context=kind-hub-testing delete ns local-path-storage
kubectl --context=kind-hub-testing apply -f examples/fleet_v1alpha1_membercluster.yaml
kubectl --context=kind-hub-testing apply -f test/integration/manifests/resources
kubectl --context=kind-hub-testing apply -f test/integration/manifests/resources
Expand All @@ -46,4 +47,5 @@ kubectl --context=kind-member-testing -n fleet-system get pod
5.uninstall the resources
```shell
make uninstall-helm
make clean-e2e-tests
```
5 changes: 0 additions & 5 deletions test/e2e/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ import (
. "github.com/onsi/gomega"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1"
Expand All @@ -39,10 +38,6 @@ var (
// This namespace in HubCluster will store v1alpha1.Work to simulate Work-related features in Hub Cluster.
workNamespace = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName))

// Used to decode an unstructured object.
genericCodecs = serializer.NewCodecFactory(scheme)
genericCodec = genericCodecs.UniversalDeserializer()

//go:embed manifests
TestManifestFiles embed.FS
)
Expand Down
5 changes: 3 additions & 2 deletions test/e2e/join_leave_member_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,11 @@ import (
"context"

. "github.com/onsi/ginkgo/v2"
"go.goms.io/fleet/apis/v1alpha1"
testutils "go.goms.io/fleet/test/e2e/utils"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"go.goms.io/fleet/apis/v1alpha1"
testutils "go.goms.io/fleet/test/e2e/utils"
)

var _ = Describe("Join/leave member cluster testing", func() {
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/utils/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ func WaitConditionClusterResourcePlacement(cluster framework.Cluster, crp *v1alp
func DeleteClusterResourcePlacement(cluster framework.Cluster, crp *v1alpha1.ClusterResourcePlacement) {
ginkgo.By(fmt.Sprintf("Deleting ClusterResourcePlacement(%s)", crp.Name), func() {
err := cluster.KubeClient.Delete(context.TODO(), crp)
gomega.Expect(err).Should(gomega.Succeed())
gomega.Expect(err).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{}))
})
}

Expand Down Expand Up @@ -239,7 +239,7 @@ func DeleteNamespace(cluster framework.Cluster, ns *corev1.Namespace) {
ginkgo.By(fmt.Sprintf("Deleting Namespace(%s)", ns.Name), func() {
err := cluster.KubeClient.Delete(context.TODO(), ns)
if err != nil && !apierrors.IsNotFound(err) {
gomega.Expect(err).Should(gomega.Succeed())
gomega.Expect(err).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{}))
}
})
}
Expand All @@ -256,7 +256,7 @@ func CreateServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount)
func DeleteServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) {
ginkgo.By(fmt.Sprintf("Delete ServiceAccount(%s)", sa.Name), func() {
err := cluster.KubeClient.Delete(context.TODO(), sa)
gomega.Expect(err).Should(gomega.Succeed())
gomega.Expect(err).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{}))
})
}

Expand Down
Loading

0 comments on commit cb1a5d6

Please sign in to comment.