Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🌱 Use errors package of Go #10875

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -194,11 +194,6 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
// Lookup the cluster the config owner is associated with
cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName())
if err != nil {
if errors.Cause(err) == util.ErrNoCluster {
log.Info(fmt.Sprintf("%s does not belong to a cluster yet, waiting until it's part of a cluster", configOwner.GetKind()))
return ctrl.Result{}, nil
}

if apierrors.IsNotFound(err) {
log.Info("Cluster does not exist yet, waiting until it is created")
return ctrl.Result{}, nil
Expand Down
6 changes: 4 additions & 2 deletions cmd/clusterctl/client/cluster/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -303,10 +303,12 @@ func getGitHubClient(ctx context.Context, configVariablesClient config.Variables
return github.NewClient(authenticatingHTTPClient), nil
}

var errRateLimit = errors.New("rate limit for github api has been reached. Please wait one hour or get a personal API token and assign it to the GITHUB_TOKEN environment variable")

// handleGithubErr wraps error messages.
func handleGithubErr(err error, message string, args ...interface{}) error {
if _, ok := err.(*github.RateLimitError); ok {
return errors.New("rate limit for github api has been reached. Please wait one hour or get a personal API token and assign it to the GITHUB_TOKEN environment variable")
return errRateLimit
}
return errors.Wrapf(err, message, args...)
return fmt.Errorf("%s: %w", fmt.Sprintf(message, args...), err)
}
38 changes: 38 additions & 0 deletions cmd/clusterctl/client/cluster/template_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (

"github.com/google/go-github/v53/github"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

Expand Down Expand Up @@ -517,6 +518,43 @@ func Test_templateClient_GetFromURL(t *testing.T) {
}
}

func Test_handleGithubErr(t *testing.T) {
tests := []struct {
name string
err error
message string
args []any
want error
}{
{
name: "Return error",
err: errors.New("error"),
message: "message %s and %s",
args: []any{"arg1", "arg2"},
want: fmt.Errorf("message arg1 and arg2: %w", errors.New("error")),
},
{
name: "Return RateLimitError",
err: &github.RateLimitError{
Response: &http.Response{
StatusCode: http.StatusForbidden,
},
},
message: "",
args: nil,
want: errRateLimit,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)

got := handleGithubErr(tt.err, tt.message, tt.args...)
g.Expect(got.Error()).To(Equal(tt.want.Error()))
})
}
}

func mustParseURL(rawURL string) *url.URL {
rURL, err := url.Parse(rawURL)
if err != nil {
Expand Down
24 changes: 13 additions & 11 deletions cmd/clusterctl/client/repository/repository_github.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,8 @@ const (
)

var (
errNotFound = errors.New("404 Not Found")
errNotFound = errors.New("404 Not Found")
errRateLimit = errors.New("rate limit for github api has been reached. Please wait one hour or get a personal API token and assign it to the GITHUB_TOKEN environment variable")

// Caches used to limit the number of GitHub API calls.

Expand Down Expand Up @@ -319,7 +320,7 @@ func (g *gitHubRepository) getVersions(ctx context.Context) ([]string, error) {
if listReleasesErr != nil {
retryError = g.handleGithubErr(listReleasesErr, "failed to get the list of releases")
// Return immediately if we are rate limited.
if _, ok := listReleasesErr.(*github.RateLimitError); ok {
if errors.Is(retryError, errRateLimit) {
return false, retryError
}
return false, nil
Expand All @@ -334,7 +335,7 @@ func (g *gitHubRepository) getVersions(ctx context.Context) ([]string, error) {
if listReleasesErr != nil {
retryError = g.handleGithubErr(listReleasesErr, "failed to get the list of releases")
// Return immediately if we are rate limited.
if _, ok := listReleasesErr.(*github.RateLimitError); ok {
if errors.Is(retryError, errRateLimit) {
return false, retryError
}
return false, nil
Expand Down Expand Up @@ -384,7 +385,7 @@ func (g *gitHubRepository) getReleaseByTag(ctx context.Context, tag string) (*gi
return false, retryError
}
// Return immediately if we are rate limited.
if _, ok := getReleasesErr.(*github.RateLimitError); ok {
if errors.Is(retryError, errRateLimit) {
return false, retryError
}
return false, nil
Expand Down Expand Up @@ -466,7 +467,7 @@ func (g *gitHubRepository) downloadFilesFromRelease(ctx context.Context, release
if downloadReleaseError != nil {
retryError = g.handleGithubErr(downloadReleaseError, "failed to download file %q from %q release", *release.TagName, fileName)
// Return immediately if we are rate limited.
if _, ok := downloadReleaseError.(*github.RateLimitError); ok {
if errors.Is(retryError, errRateLimit) {
return false, retryError
}
return false, nil
Expand Down Expand Up @@ -500,12 +501,13 @@ func (g *gitHubRepository) downloadFilesFromRelease(ctx context.Context, release
// handleGithubErr wraps error messages.
func (g *gitHubRepository) handleGithubErr(err error, message string, args ...interface{}) error {
if _, ok := err.(*github.RateLimitError); ok {
return errors.New("rate limit for github api has been reached. Please wait one hour or get a personal API token and assign it to the GITHUB_TOKEN environment variable")
return errRateLimit
}
if ghErr, ok := err.(*github.ErrorResponse); ok {
if ghErr.Response.StatusCode == http.StatusNotFound {
return errNotFound
}

var ghErr *github.ErrorResponse
if errors.As(err, &ghErr) && ghErr.Response.StatusCode == http.StatusNotFound {
return errNotFound
}
return errors.Wrapf(err, message, args...)

return fmt.Errorf("%s: %w", fmt.Sprintf(message, args...), err)
}
52 changes: 52 additions & 0 deletions cmd/clusterctl/client/repository/repository_github_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (

"github.com/google/go-github/v53/github"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
"k8s.io/utils/ptr"

clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
Expand Down Expand Up @@ -1108,3 +1109,54 @@ func Test_gitHubRepository_releaseNotFound(t *testing.T) {
})
}
}

func Test_handleGithubErr(t *testing.T) {
tests := []struct {
name string
err error
message string
args []any
want error
}{
{
name: "Return error",
err: errors.New("error"),
message: "message %s and %s",
args: []any{"arg1", "arg2"},
want: fmt.Errorf("message arg1 and arg2: %w", errors.New("error")),
},
{
name: "Return RateLimitError",
err: &github.RateLimitError{
Response: &http.Response{
StatusCode: http.StatusForbidden,
},
},
message: "",
args: nil,
want: errRateLimit,
},
{
name: "Return ErrorResponse",
err: &github.ErrorResponse{
Response: &http.Response{
StatusCode: http.StatusNotFound,
},
},
message: "",
args: nil,
want: errNotFound,
},
}

gRepo := &gitHubRepository{}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)

got := gRepo.handleGithubErr(tt.err, tt.message, tt.args...)
g.Expect(got.Error()).To(Equal(tt.want.Error()))
})
}
}
91 changes: 51 additions & 40 deletions controlplane/kubeadm/internal/controllers/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -213,56 +213,67 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
}

defer func() {
// Always attempt to update status.
if err := r.updateStatus(ctx, controlPlane); err != nil {
var connFailure *internal.RemoteClusterConnectionError
if errors.As(err, &connFailure) {
log.Error(err, "Could not connect to workload cluster to fetch status")
} else {
log.Error(err, "Failed to update KubeadmControlPlane status")
reterr = kerrors.NewAggregate([]error{reterr, err})
}
}
res, reterr = r.deferPatch(ctx, kcp, controlPlane, patchHelper)
}()

if !kcp.ObjectMeta.DeletionTimestamp.IsZero() {
// Handle deletion reconciliation loop.
return r.reconcileDelete(ctx, controlPlane)
}

r.updateV1Beta2Status(ctx, controlPlane)
// Handle normal reconciliation loop.
return r.reconcile(ctx, controlPlane)
}

// Always attempt to Patch the KubeadmControlPlane object and status after each reconciliation.
patchOpts := []patch.Option{}
if reterr == nil {
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
}
if err := patchKubeadmControlPlane(ctx, patchHelper, kcp, patchOpts...); err != nil {
log.Error(err, "Failed to patch KubeadmControlPlane")
func (r *KubeadmControlPlaneReconciler) deferPatch(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane, patchHelper *patch.Helper) (ctrl.Result, error) {
var (
res ctrl.Result
reterr error
)
log := ctrl.LoggerFrom(ctx)
// Always attempt to update status.
if err := r.updateStatus(ctx, controlPlane); err != nil {
var connFailure *internal.RemoteClusterConnectionError
if errors.As(err, &connFailure) {
log.Error(err, "Could not connect to workload cluster to fetch status")
} else {
log.Error(err, "Failed to update KubeadmControlPlane status")
reterr = kerrors.NewAggregate([]error{reterr, err})
}
}

// Only requeue if there is no error, Requeue or RequeueAfter and the object does not have a deletion timestamp.
if reterr == nil && res.IsZero() && kcp.ObjectMeta.DeletionTimestamp.IsZero() {
// Make KCP requeue in case node status is not ready, so we can check for node status without waiting for a full
// resync (by default 10 minutes).
// The alternative solution would be to watch the control plane nodes in the Cluster - similar to how the
// MachineSet and MachineHealthCheck controllers watch the nodes under their control.
if !kcp.Status.Ready {
res = ctrl.Result{RequeueAfter: 20 * time.Second}
}
r.updateV1Beta2Status(ctx, controlPlane)

// Make KCP requeue if ControlPlaneComponentsHealthyCondition is false so we can check for control plane component
// status without waiting for a full resync (by default 10 minutes).
// Otherwise this condition can lead to a delay in provisioning MachineDeployments when MachineSet preflight checks are enabled.
// The alternative solution to this requeue would be watching the relevant pods inside each workload cluster which would be very expensive.
if conditions.IsFalse(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition) {
res = ctrl.Result{RequeueAfter: 20 * time.Second}
}
// Always attempt to Patch the KubeadmControlPlane object and status after each reconciliation.
patchOpts := []patch.Option{}
if reterr == nil {
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
}
if err := patchKubeadmControlPlane(ctx, patchHelper, kcp, patchOpts...); err != nil {
log.Error(err, "Failed to patch KubeadmControlPlane")
reterr = kerrors.NewAggregate([]error{reterr, err})
}

// Only requeue if there is no error, Requeue or RequeueAfter and the object does not have a deletion timestamp.
if reterr == nil && res.IsZero() && kcp.ObjectMeta.DeletionTimestamp.IsZero() {
// Make KCP requeue in case node status is not ready, so we can check for node status without waiting for a full
// resync (by default 10 minutes).
// The alternative solution would be to watch the control plane nodes in the Cluster - similar to how the
// MachineSet and MachineHealthCheck controllers watch the nodes under their control.
if !kcp.Status.Ready {
res = ctrl.Result{RequeueAfter: 20 * time.Second}
}
}()

if !kcp.ObjectMeta.DeletionTimestamp.IsZero() {
// Handle deletion reconciliation loop.
return r.reconcileDelete(ctx, controlPlane)
// Make KCP requeue if ControlPlaneComponentsHealthyCondition is false so we can check for control plane component
// status without waiting for a full resync (by default 10 minutes).
// Otherwise this condition can lead to a delay in provisioning MachineDeployments when MachineSet preflight checks are enabled.
// The alternative solution to this requeue would be watching the relevant pods inside each workload cluster which would be very expensive.
if conditions.IsFalse(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition) {
res = ctrl.Result{RequeueAfter: 20 * time.Second}
}
}

// Handle normal reconciliation loop.
return r.reconcile(ctx, controlPlane)
return res, reterr
}

// initControlPlaneScope initializes the control plane scope; this includes also checking for orphan machines and
Expand Down
39 changes: 39 additions & 0 deletions controlplane/kubeadm/internal/controllers/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2823,6 +2823,45 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition
}
}

func TestKubeadmControlPlaneReconciler_deferPatch(t *testing.T) {
g := NewWithT(t)

ns, err := env.CreateNamespace(ctx, "test-reconcile-update-status")
g.Expect(err).ToNot(HaveOccurred())

cluster, kcp, _ := createClusterWithControlPlane(ns.Name)
g.Expect(env.Create(ctx, cluster)).To(Succeed())
g.Expect(env.Create(ctx, kcp)).To(Succeed())
defer func(do ...client.Object) {
g.Expect(env.Cleanup(ctx, do...)).To(Succeed())
}(kcp, ns)

r := &KubeadmControlPlaneReconciler{
Client: env,
SecretCachingClient: secretCachingClient,
managementCluster: &fakeManagementCluster{
WorkloadErr: &internal.RemoteClusterConnectionError{
Name: util.ObjectKey(cluster).String(),
Err: errors.New("connection error"),
},
},
}

controlPlane, _, err := r.initControlPlaneScope(ctx, cluster, kcp)
g.Expect(err).ToNot(HaveOccurred())

patchHelper, err := patch.NewHelper(kcp, env)
g.Expect(err).ToNot(HaveOccurred())

result, err := r.deferPatch(ctx, kcp, controlPlane, patchHelper)
// Should bump RemoteClusterConnectionError
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: 20 * time.Second}))

// calling reconcile should return error
g.Expect(env.CleanupAndWait(ctx, cluster)).To(Succeed())
}

type fakeClusterCache struct {
clustercache.ClusterCache
lastProbeSuccessTime time.Time
Expand Down
3 changes: 1 addition & 2 deletions controlplane/kubeadm/internal/controllers/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"strings"
"time"

"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/ptr"
Expand Down Expand Up @@ -94,7 +93,7 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, contro

workloadCluster, err := controlPlane.GetWorkloadCluster(ctx)
if err != nil {
return errors.Wrap(err, "failed to create remote cluster client")
return fmt.Errorf("failed to create remote cluster client: %w", err)
}
status, err := workloadCluster.ClusterStatus(ctx)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion exp/internal/controllers/machinepool_controller_noderef.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, s *scope)
// Get the Node references.
nodeRefsResult, err := r.getNodeReferences(ctx, mp.Spec.ProviderIDList, mp.Spec.MinReadySeconds, s.nodeRefMap)
if err != nil {
if err == errNoAvailableNodes {
if errors.Is(err, errNoAvailableNodes) {
log.Info("Cannot assign NodeRefs to MachinePool, no matching Nodes")
// No need to requeue here. Nodes emit an event that triggers reconciliation.
return ctrl.Result{}, nil
Expand Down
2 changes: 1 addition & 1 deletion exp/internal/controllers/machinepool_controller_phases.go
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, s *
// Get and set Status.Replicas from the infrastructure provider.
err = util.UnstructuredUnmarshalField(infraConfig, &mp.Status.Replicas, "status", "replicas")
if err != nil {
if err != util.ErrUnstructuredFieldNotFound {
if !errors.Is(err, util.ErrUnstructuredFieldNotFound) {
return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve replicas from infrastructure provider for MachinePool %q in namespace %q", mp.Name, mp.Namespace)
}
}
Expand Down
Loading