Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make k8s plugin fields private #5441

Merged
merged 7 commits into from
Dec 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions pkg/app/pipedv1/plugin/kubernetes/deployment/annotate.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,12 @@ func annotateConfigHash(manifests []provider.Manifest) error {
configMaps := make(map[string]provider.Manifest)
secrets := make(map[string]provider.Manifest)
for _, m := range manifests {
if m.Key.IsConfigMap() {
configMaps[m.Key.Name] = m
if m.Key().IsConfigMap() {
configMaps[m.Key().Name()] = m
continue
}
if m.Key.IsSecret() {
secrets[m.Key.Name] = m
if m.Key().IsSecret() {
secrets[m.Key().Name()] = m
}
}

Expand All @@ -47,7 +47,7 @@ func annotateConfigHash(manifests []provider.Manifest) error {
}

for _, m := range manifests {
if m.Key.IsDeployment() {
if m.Key().IsDeployment() {
if err := annotateConfigHashToWorkload(m, configMaps, secrets); err != nil {
return err
}
Expand All @@ -60,8 +60,8 @@ func annotateConfigHash(manifests []provider.Manifest) error {
}

func annotateConfigHashToWorkload(m provider.Manifest, managedConfigMaps, managedSecrets map[string]provider.Manifest) error {
configMaps := provider.FindReferencingConfigMaps(m.Body)
secrets := provider.FindReferencingSecrets(m.Body)
configMaps := provider.FindReferencingConfigMaps(m)
secrets := provider.FindReferencingSecrets(m)

// The deployment is not referencing any config resources.
if len(configMaps)+len(secrets) == 0 {
Expand Down
20 changes: 10 additions & 10 deletions pkg/app/pipedv1/plugin/kubernetes/deployment/apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,43 +34,43 @@ func applyManifests(ctx context.Context, applier applier, manifests []provider.M
// 1. force-sync-by-replace
// 2. sync-by-replace
// 3. others
if annotation := m.Body.GetAnnotations()[provider.LabelForceSyncReplace]; annotation == provider.UseReplaceEnabled {
if annotation := m.GetAnnotations()[provider.LabelForceSyncReplace]; annotation == provider.UseReplaceEnabled {
// Always try to replace first and create if it fails due to resource not found error.
// This is because we cannot know whether resource already exists before executing command.
err := applier.ForceReplaceManifest(ctx, m)
if errors.Is(err, provider.ErrNotFound) {
lp.Infof("Specified resource does not exist, so create the resource: %s (%w)", m.Key.ReadableString(), err)
lp.Infof("Specified resource does not exist, so create the resource: %s (%w)", m.Key().ReadableString(), err)
err = applier.CreateManifest(ctx, m)
}
if err != nil {
lp.Errorf("Failed to forcefully replace or create manifest: %s (%w)", m.Key.ReadableString(), err)
lp.Errorf("Failed to forcefully replace or create manifest: %s (%w)", m.Key().ReadableString(), err)
return err
}
lp.Successf("- forcefully replaced or created manifest: %s", m.Key.ReadableString())
lp.Successf("- forcefully replaced or created manifest: %s", m.Key().ReadableString())
continue
}

if annotation := m.Body.GetAnnotations()[provider.LabelSyncReplace]; annotation == provider.UseReplaceEnabled {
if annotation := m.GetAnnotations()[provider.LabelSyncReplace]; annotation == provider.UseReplaceEnabled {
// Always try to replace first and create if it fails due to resource not found error.
// This is because we cannot know whether resource already exists before executing command.
err := applier.ReplaceManifest(ctx, m)
if errors.Is(err, provider.ErrNotFound) {
lp.Infof("Specified resource does not exist, so create the resource: %s (%w)", m.Key.ReadableString(), err)
lp.Infof("Specified resource does not exist, so create the resource: %s (%w)", m.Key().ReadableString(), err)
err = applier.CreateManifest(ctx, m)
}
if err != nil {
lp.Errorf("Failed to replace or create manifest: %s (%w)", m.Key.ReadableString(), err)
lp.Errorf("Failed to replace or create manifest: %s (%w)", m.Key().ReadableString(), err)
return err
}
lp.Successf("- replaced or created manifest: %s", m.Key.ReadableString())
lp.Successf("- replaced or created manifest: %s", m.Key().ReadableString())
continue
}

if err := applier.ApplyManifest(ctx, m); err != nil {
lp.Errorf("Failed to apply manifest: %s (%w)", m.Key.ReadableString(), err)
lp.Errorf("Failed to apply manifest: %s (%w)", m.Key().ReadableString(), err)
return err
}
lp.Successf("- applied manifest: %s", m.Key.ReadableString())
lp.Successf("- applied manifest: %s", m.Key().ReadableString())
continue

}
Expand Down
92 changes: 13 additions & 79 deletions pkg/app/pipedv1/plugin/kubernetes/deployment/determine.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (
"strings"

"go.uber.org/zap"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"

"github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/kubernetes/config"
"github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/kubernetes/provider"
Expand Down Expand Up @@ -51,32 +50,8 @@ func parseContainerImage(image string) (img containerImage) {
func determineVersions(manifests []provider.Manifest) ([]*model.ArtifactVersion, error) {
imageMap := map[string]struct{}{}
for _, m := range manifests {
// TODO: we should consider other fields like spec.jobTempate.spec.template.spec.containers because CronJob uses this format.
containers, ok, err := unstructured.NestedSlice(m.Body.Object, "spec", "template", "spec", "containers")
if err != nil {
// if the containers field is not an array, it will return an error.
// we define this as error because the 'containers' is plural form, so it should be an array.
return nil, err
}
if !ok {
continue
}
// Remove duplicate images on multiple manifests.
for _, c := range containers {
m, ok := c.(map[string]interface{})
if !ok {
// TODO: Add logging.
continue
}
img, ok := m["image"]
if !ok {
continue
}
imgStr, ok := img.(string)
if !ok {
return nil, fmt.Errorf("invalid image format: %T(%v)", img, img)
}
imageMap[imgStr] = struct{}{}
for _, c := range provider.FindContainerImages(m) {
imageMap[c] = struct{}{}
}
}

Expand All @@ -98,10 +73,10 @@ func determineVersions(manifests []provider.Manifest) ([]*model.ArtifactVersion,
func findManifests(kind, name string, manifests []provider.Manifest) []provider.Manifest {
out := make([]provider.Manifest, 0, len(manifests))
for _, m := range manifests {
if m.Body.GetKind() != kind {
if m.Key().Kind() != kind {
continue
}
if name != "" && m.Body.GetName() != name {
if name != "" && m.Key().Name() != name {
continue
}
out = append(out, m)
Expand Down Expand Up @@ -133,47 +108,6 @@ type workloadPair struct {
new provider.Manifest
}

func findUpdatedWorkloads(olds, news []provider.Manifest) []workloadPair {
pairs := make([]workloadPair, 0)
oldMap := make(map[provider.ResourceKey]provider.Manifest, len(olds))
nomalizeKey := func(k provider.ResourceKey) provider.ResourceKey {
// Ignoring APIVersion because user can upgrade to the new APIVersion for the same workload.
k.APIVersion = ""
if k.Namespace == provider.DefaultNamespace {
k.Namespace = ""
}
return k
}
for _, m := range olds {
key := nomalizeKey(m.Key)
oldMap[key] = m
}
for _, n := range news {
key := nomalizeKey(n.Key)
if o, ok := oldMap[key]; ok {
pairs = append(pairs, workloadPair{
old: o,
new: n,
})
}
}
return pairs
}

// findConfigsAndSecrets returns the manifests that are ConfigMap or Secret.
func findConfigsAndSecrets(manifests []provider.Manifest) map[provider.ResourceKey]provider.Manifest {
configs := make(map[provider.ResourceKey]provider.Manifest)
for _, m := range manifests {
if m.Key.IsConfigMap() {
configs[m.Key] = m
}
if m.Key.IsSecret() {
configs[m.Key] = m
}
}
return configs
}

func checkImageChange(ns diff.Nodes) (string, bool) {
const containerImageQuery = `^spec\.template\.spec\.containers\.\d+.image$`
nodes, _ := ns.Find(containerImageQuery)
Expand Down Expand Up @@ -234,32 +168,32 @@ func determineStrategy(olds, news []provider.Manifest, workloadRefs []config.K8s
return model.SyncStrategy_QUICK_SYNC, "Quick sync by applying all manifests because it was unable to find workloads in the new manifests"
}

workloads := findUpdatedWorkloads(oldWorkloads, newWorkloads)
workloads := provider.FindSameManifests(oldWorkloads, newWorkloads)
diffs := make(map[provider.ResourceKey]diff.Nodes, len(workloads))

for _, w := range workloads {
// If the workload's pod template was touched
// do progressive deployment with the specified pipeline.
diffResult, err := provider.Diff(w.old, w.new, logger)
diffResult, err := provider.Diff(w.Old, w.New, logger)
if err != nil {
return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively due to an error while calculating the diff (%v)", err)
}
diffNodes := diffResult.Nodes()
diffs[w.new.Key] = diffNodes
diffs[w.New.Key()] = diffNodes

templateDiffs := diffNodes.FindByPrefix("spec.template")
if len(templateDiffs) > 0 {
if msg, changed := checkImageChange(templateDiffs); changed {
return model.SyncStrategy_PIPELINE, msg
}
return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively because pod template of workload %s was changed", w.new.Key.Name)
return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively because pod template of workload %s was changed", w.New.Key().Name())
}
}

// If the config/secret was touched, we also need to do progressive
// deployment to check run with the new config/secret content.
oldConfigs := findConfigsAndSecrets(olds)
newConfigs := findConfigsAndSecrets(news)
oldConfigs := provider.FindConfigsAndSecrets(olds)
newConfigs := provider.FindConfigsAndSecrets(news)
if len(oldConfigs) > len(newConfigs) {
return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively because %d configmap/secret deleted", len(oldConfigs)-len(newConfigs))
}
Expand All @@ -269,22 +203,22 @@ func determineStrategy(olds, news []provider.Manifest, workloadRefs []config.K8s
for k, oc := range oldConfigs {
nc, ok := newConfigs[k]
if !ok {
return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively because %s %s was deleted", oc.Key.Kind, oc.Key.Name)
return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively because %s %s was deleted", oc.Key().Kind(), oc.Key().Name())
}
result, err := provider.Diff(oc, nc, logger)
if err != nil {
return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively due to an error while calculating the diff (%v)", err)
}
if result.HasDiff() {
return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively because %s %s was updated", oc.Key.Kind, oc.Key.Name)
return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively because %s %s was updated", oc.Key().Kind(), oc.Key().Name())
}
}

// Check if this is a scaling commit.
scales := make([]string, 0, len(diffs))
for k, d := range diffs {
if before, after, changed := checkReplicasChange(d); changed {
scales = append(scales, fmt.Sprintf("%s/%s from %s to %s", k.Kind, k.Name, before, after))
scales = append(scales, fmt.Sprintf("%s/%s from %s to %s", k.Kind(), k.Name(), before, after))
}

}
Expand Down
Loading
Loading