Skip to content

Commit c738a83

Browse files
authored
Fix the 'printf: non-constant format string in call to fmt.Errorf (govet)' lint errors (#666)
Signed-off-by: Yuki Iwai <[email protected]>
1 parent 5caa9d5 commit c738a83

File tree

3 files changed

+23
-23
lines changed

3 files changed

+23
-23
lines changed

.github/workflows/golangci-lint.yml

-1
Original file line numberDiff line numberDiff line change
@@ -20,4 +20,3 @@ jobs:
2020
with:
2121
version: v1.61.0
2222
args: --timeout=5m
23-
only-new-issues: true

Makefile

+2-2
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ tidy:
121121

122122
.PHONY: lint
123123
lint: bin/golangci-lint ## Run golangci-lint linter
124-
$(GOLANGCI_LINT) run --new-from-rev=origin/master
124+
$(GOLANGCI_LINT) run
125125

126126
# Generate deploy/v2beta1/mpi-operator.yaml
127127
manifest: kustomize crd
@@ -146,7 +146,7 @@ bin/envtest: bin ## Download envtest-setup locally if necessary.
146146
@GOBIN=$(PROJECT_DIR)/bin go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
147147

148148
bin/kubectl: bin
149-
curl -L -o $(PROJECT_DIR)/bin/kubectl https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/$(GOOS)/$(GOARCH)/kubectl
149+
curl -L -o $(PROJECT_DIR)/bin/kubectl https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/$(GOOS)/$(GOARCH)/kubectl
150150
chmod +x $(PROJECT_DIR)/bin/kubectl
151151

152152
.PHONY: kind

pkg/controller/mpi_job_controller.go

+21-20
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ import (
2222
"crypto/rand"
2323
"crypto/x509"
2424
"encoding/pem"
25+
"errors"
2526
"fmt"
2627
"reflect"
2728
"sort"
@@ -34,7 +35,7 @@ import (
3435
batchv1 "k8s.io/api/batch/v1"
3536
corev1 "k8s.io/api/core/v1"
3637
"k8s.io/apimachinery/pkg/api/equality"
37-
"k8s.io/apimachinery/pkg/api/errors"
38+
apierrors "k8s.io/apimachinery/pkg/api/errors"
3839
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3940
"k8s.io/apimachinery/pkg/labels"
4041
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -359,7 +360,7 @@ func NewMPIJobControllerWithClock(
359360
// Pipe to default handler first, which just logs the error
360361
cache.DefaultWatchErrorHandler(r, err)
361362

362-
if errors.IsUnauthorized(err) || errors.IsForbidden(err) {
363+
if apierrors.IsUnauthorized(err) || apierrors.IsForbidden(err) {
363364
klog.Fatalf("Unable to sync cache for informer %s: %s. Requesting controller to exit.", name, err)
364365
}
365366
})
@@ -564,7 +565,7 @@ func (c *MPIJobController) syncHandler(key string) error {
564565
sharedJob, err := c.mpiJobLister.MPIJobs(namespace).Get(name)
565566
if err != nil {
566567
// The MPIJob may no longer exist, in which case we stop processing.
567-
if errors.IsNotFound(err) {
568+
if apierrors.IsNotFound(err) {
568569
klog.V(4).Infof("MPIJob has been deleted: %v", key)
569570
return nil
570571
}
@@ -714,7 +715,7 @@ func cleanUpWorkerPods(mpiJob *kubeflow.MPIJob, c *MPIJobController) error {
714715
// getLauncherJob gets the launcher Job controlled by this MPIJob.
715716
func (c *MPIJobController) getLauncherJob(mpiJob *kubeflow.MPIJob) (*batchv1.Job, error) {
716717
launcher, err := c.jobLister.Jobs(mpiJob.Namespace).Get(mpiJob.Name + launcherSuffix)
717-
if errors.IsNotFound(err) {
718+
if apierrors.IsNotFound(err) {
718719
return nil, nil
719720
}
720721
if err != nil {
@@ -729,7 +730,7 @@ func (c *MPIJobController) getLauncherJob(mpiJob *kubeflow.MPIJob) (*batchv1.Job
729730
if !metav1.IsControlledBy(launcher, mpiJob) {
730731
msg := fmt.Sprintf(MessageResourceExists, launcher.Name, launcher.Kind)
731732
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
732-
return launcher, fmt.Errorf(msg)
733+
return launcher, errors.New(msg)
733734
}
734735

735736
return launcher, nil
@@ -740,7 +741,7 @@ func (c *MPIJobController) getOrCreatePodGroups(mpiJob *kubeflow.MPIJob) (metav1
740741
newPodGroup := c.PodGroupCtrl.newPodGroup(mpiJob)
741742
podGroup, err := c.PodGroupCtrl.getPodGroup(newPodGroup.GetNamespace(), newPodGroup.GetName())
742743
// If the PodGroup doesn't exist, we'll create it.
743-
if errors.IsNotFound(err) {
744+
if apierrors.IsNotFound(err) {
744745
return c.PodGroupCtrl.createPodGroup(context.TODO(), newPodGroup)
745746
}
746747
// If an error occurs during Get/Create, we'll requeue the item so we
@@ -754,7 +755,7 @@ func (c *MPIJobController) getOrCreatePodGroups(mpiJob *kubeflow.MPIJob) (metav1
754755
if !metav1.IsControlledBy(podGroup, mpiJob) {
755756
msg := fmt.Sprintf(MessageResourceExists, podGroup.GetName(), "PodGroup")
756757
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
757-
return nil, fmt.Errorf(msg)
758+
return nil, errors.New(msg)
758759
}
759760

760761
if !c.PodGroupCtrl.pgSpecsAreEqual(podGroup, newPodGroup) {
@@ -767,7 +768,7 @@ func (c *MPIJobController) getOrCreatePodGroups(mpiJob *kubeflow.MPIJob) (metav1
767768
func (c *MPIJobController) deletePodGroups(mpiJob *kubeflow.MPIJob) error {
768769
podGroup, err := c.PodGroupCtrl.getPodGroup(mpiJob.Namespace, mpiJob.Name)
769770
if err != nil {
770-
if errors.IsNotFound(err) {
771+
if apierrors.IsNotFound(err) {
771772
return nil
772773
}
773774
return err
@@ -778,7 +779,7 @@ func (c *MPIJobController) deletePodGroups(mpiJob *kubeflow.MPIJob) error {
778779
if !metav1.IsControlledBy(podGroup, mpiJob) {
779780
msg := fmt.Sprintf(MessageResourceExists, podGroup.GetName(), "PodGroup")
780781
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
781-
return fmt.Errorf(msg)
782+
return errors.New(msg)
782783
}
783784

784785
// If the PodGroup exist, we'll delete it.
@@ -839,7 +840,7 @@ func (c *MPIJobController) getOrCreateConfigMap(mpiJob *kubeflow.MPIJob) (*corev
839840

840841
cm, err := c.configMapLister.ConfigMaps(mpiJob.Namespace).Get(mpiJob.Name + configSuffix)
841842
// If the ConfigMap doesn't exist, we'll create it.
842-
if errors.IsNotFound(err) {
843+
if apierrors.IsNotFound(err) {
843844
return c.kubeClient.CoreV1().ConfigMaps(mpiJob.Namespace).Create(context.TODO(), newCM, metav1.CreateOptions{})
844845
}
845846
if err != nil {
@@ -851,7 +852,7 @@ func (c *MPIJobController) getOrCreateConfigMap(mpiJob *kubeflow.MPIJob) (*corev
851852
if !metav1.IsControlledBy(cm, mpiJob) {
852853
msg := fmt.Sprintf(MessageResourceExists, cm.Name, cm.Kind)
853854
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
854-
return nil, fmt.Errorf(msg)
855+
return nil, errors.New(msg)
855856
}
856857

857858
// If the ConfigMap is changed, update it
@@ -869,7 +870,7 @@ func (c *MPIJobController) getOrCreateConfigMap(mpiJob *kubeflow.MPIJob) (*corev
869870

870871
func (c *MPIJobController) getOrCreateService(job *kubeflow.MPIJob, newSvc *corev1.Service) (*corev1.Service, error) {
871872
svc, err := c.serviceLister.Services(job.Namespace).Get(newSvc.Name)
872-
if errors.IsNotFound(err) {
873+
if apierrors.IsNotFound(err) {
873874
return c.kubeClient.CoreV1().Services(job.Namespace).Create(context.TODO(), newSvc, metav1.CreateOptions{})
874875
}
875876
if err != nil {
@@ -878,7 +879,7 @@ func (c *MPIJobController) getOrCreateService(job *kubeflow.MPIJob, newSvc *core
878879
if !metav1.IsControlledBy(svc, job) {
879880
msg := fmt.Sprintf(MessageResourceExists, svc.Name, svc.Kind)
880881
c.recorder.Event(job, corev1.EventTypeWarning, ErrResourceExists, msg)
881-
return nil, fmt.Errorf(msg)
882+
return nil, errors.New(msg)
882883
}
883884

884885
// If the Service selector is changed, update it.
@@ -895,7 +896,7 @@ func (c *MPIJobController) getOrCreateService(job *kubeflow.MPIJob, newSvc *core
895896
// or create one if it doesn't exist.
896897
func (c *MPIJobController) getOrCreateSSHAuthSecret(job *kubeflow.MPIJob) (*corev1.Secret, error) {
897898
secret, err := c.secretLister.Secrets(job.Namespace).Get(job.Name + sshAuthSecretSuffix)
898-
if errors.IsNotFound(err) {
899+
if apierrors.IsNotFound(err) {
899900
secret, err := newSSHAuthSecret(job)
900901
if err != nil {
901902
return nil, err
@@ -908,7 +909,7 @@ func (c *MPIJobController) getOrCreateSSHAuthSecret(job *kubeflow.MPIJob) (*core
908909
if !metav1.IsControlledBy(secret, job) {
909910
msg := fmt.Sprintf(MessageResourceExists, secret.Name, secret.Kind)
910911
c.recorder.Event(job, corev1.EventTypeWarning, ErrResourceExists, msg)
911-
return nil, fmt.Errorf(msg)
912+
return nil, errors.New(msg)
912913
}
913914
newSecret, err := newSSHAuthSecret(job)
914915
if err != nil {
@@ -973,7 +974,7 @@ func (c *MPIJobController) getOrCreateWorker(mpiJob *kubeflow.MPIJob) ([]*corev1
973974
pod, err := c.podLister.Pods(mpiJob.Namespace).Get(workerName(mpiJob, i))
974975

975976
// If the worker Pod doesn't exist, we'll create it.
976-
if errors.IsNotFound(err) {
977+
if apierrors.IsNotFound(err) {
977978
worker := c.newWorker(mpiJob, i)
978979
pod, err = c.kubeClient.CoreV1().Pods(mpiJob.Namespace).Create(context.TODO(), worker, metav1.CreateOptions{})
979980
}
@@ -989,7 +990,7 @@ func (c *MPIJobController) getOrCreateWorker(mpiJob *kubeflow.MPIJob) ([]*corev1
989990
if pod != nil && !metav1.IsControlledBy(pod, mpiJob) {
990991
msg := fmt.Sprintf(MessageResourceExists, pod.Name, pod.Kind)
991992
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
992-
return nil, fmt.Errorf(msg)
993+
return nil, errors.New(msg)
993994
}
994995
workerPods = append(workerPods, pod)
995996
}
@@ -1020,15 +1021,15 @@ func (c *MPIJobController) deleteWorkerPods(mpiJob *kubeflow.MPIJob) error {
10201021
pod, err := c.podLister.Pods(mpiJob.Namespace).Get(name)
10211022

10221023
// If the worker Pod doesn't exist, no need to remove it.
1023-
if errors.IsNotFound(err) {
1024+
if apierrors.IsNotFound(err) {
10241025
continue
10251026
}
10261027
// If the worker is not controlled by this MPIJob resource, we should log
10271028
// a warning to the event recorder and return.
10281029
if pod != nil && !metav1.IsControlledBy(pod, mpiJob) {
10291030
msg := fmt.Sprintf(MessageResourceExists, pod.Name, pod.Kind)
10301031
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
1031-
return fmt.Errorf(msg)
1032+
return errors.New(msg)
10321033
}
10331034
// If the worker pod is not running and cleanupPolicy is
10341035
// set to CleanPodPolicyRunning, keep the pod.
@@ -1039,7 +1040,7 @@ func (c *MPIJobController) deleteWorkerPods(mpiJob *kubeflow.MPIJob) error {
10391040
continue
10401041
}
10411042
err = c.kubeClient.CoreV1().Pods(mpiJob.Namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
1042-
if err != nil && !errors.IsNotFound(err) {
1043+
if err != nil && !apierrors.IsNotFound(err) {
10431044
klog.Errorf("Failed to delete pod[%s/%s]: %v", mpiJob.Namespace, name, err)
10441045
return err
10451046
}

0 commit comments

Comments
 (0)