-
Notifications
You must be signed in to change notification settings - Fork 555
OCPBUGS-37982: Bug fix: Reduce Frequency of Update Requests for Copied CSVs #3497
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
138da4d
b46a9e9
dacf1f0
c21af9d
09b63e1
74bf0f7
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -360,7 +360,7 @@ func (a *Operator) pruneProvidedAPIs(group *operatorsv1.OperatorGroup, groupProv | |
} | ||
|
||
// Prune providedAPIs annotation if the cluster has fewer providedAPIs (handles CSV deletion) | ||
//if intersection := groupProvidedAPIs.Intersection(providedAPIsFromCSVs); len(intersection) < len(groupProvidedAPIs) { | ||
// if intersection := groupProvidedAPIs.Intersection(providedAPIsFromCSVs); len(intersection) < len(groupProvidedAPIs) { | ||
if len(intersection) < len(groupProvidedAPIs) { | ||
difference := groupProvidedAPIs.Difference(intersection) | ||
logger := logger.WithFields(logrus.Fields{ | ||
|
@@ -790,6 +790,14 @@ func copyableCSVHash(original *v1alpha1.ClusterServiceVersion) (string, string, | |
return newHash, originalHash, nil | ||
} | ||
|
||
const ( | ||
nonStatusCopyHashAnnotation = "olm.operatorframework.io/nonStatusCopyHash" | ||
statusCopyHashAnnotation = "olm.operatorframework.io/statusCopyHash" | ||
// annotations for metadata drift guard | ||
observedGenerationAnnotation = "olm.operatorframework.io/observedGeneration" | ||
observedResourceVersionAnnotation = "olm.operatorframework.io/observedResourceVersion" | ||
) | ||
|
||
// If returned error is not nil, the returned ClusterServiceVersion | ||
// has only the Name, Namespace, and UID fields set. | ||
func (a *Operator) copyToNamespace(prototype *v1alpha1.ClusterServiceVersion, nsFrom, nsTo, nonstatus, status string) (*v1alpha1.ClusterServiceVersion, error) { | ||
|
@@ -803,6 +811,7 @@ func (a *Operator) copyToNamespace(prototype *v1alpha1.ClusterServiceVersion, ns | |
|
||
existing, err := a.copiedCSVLister.Namespace(nsTo).Get(prototype.GetName()) | ||
if apierrors.IsNotFound(err) { | ||
prototype.Annotations[nonStatusCopyHashAnnotation] = nonstatus | ||
created, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).Create(context.TODO(), prototype, metav1.CreateOptions{}) | ||
if err != nil { | ||
return nil, fmt.Errorf("failed to create new CSV: %w", err) | ||
|
@@ -811,6 +820,10 @@ func (a *Operator) copyToNamespace(prototype *v1alpha1.ClusterServiceVersion, ns | |
if _, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).UpdateStatus(context.TODO(), created, metav1.UpdateOptions{}); err != nil { | ||
return nil, fmt.Errorf("failed to update status on new CSV: %w", err) | ||
} | ||
prototype.Annotations[statusCopyHashAnnotation] = status | ||
if _, err = a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).Update(context.TODO(), prototype, metav1.UpdateOptions{}); err != nil { | ||
return nil, fmt.Errorf("failed to update annotations after updating status: %w", err) | ||
} | ||
return &v1alpha1.ClusterServiceVersion{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: created.Name, | ||
|
@@ -821,15 +834,53 @@ func (a *Operator) copyToNamespace(prototype *v1alpha1.ClusterServiceVersion, ns | |
} else if err != nil { | ||
return nil, err | ||
} | ||
// metadata drift guard: detect manual modifications to spec or status | ||
if og, orv := existing.Annotations[observedGenerationAnnotation], existing.Annotations[observedResourceVersionAnnotation]; (og != "" && og != fmt.Sprint(existing.GetGeneration())) || (orv != "" && orv != existing.ResourceVersion) { | ||
// full resync for metadata drift | ||
// prepare prototype for update | ||
prototype.Namespace = existing.Namespace | ||
prototype.ResourceVersion = existing.ResourceVersion | ||
prototype.UID = existing.UID | ||
// sync hash annotations | ||
prototype.Annotations[nonStatusCopyHashAnnotation] = nonstatus | ||
prototype.Annotations[statusCopyHashAnnotation] = status | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. should we update the status hash post status update, i.e. with the observed resource version and generation annotations (for the same reason described in line 897)? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done in soon to be pushed commit 4b21aa6e |
||
// update spec and annotations | ||
updated, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).Update(context.TODO(), prototype, metav1.UpdateOptions{}) | ||
if err != nil { | ||
return nil, fmt.Errorf("failed to resync spec for metadata drift guard: %w", err) | ||
} | ||
// update status subresource | ||
updated.Status = prototype.Status | ||
if _, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).UpdateStatus(context.TODO(), updated, metav1.UpdateOptions{}); err != nil { | ||
return nil, fmt.Errorf("failed to resync status for metadata drift guard: %w", err) | ||
} | ||
// record observed generation and resourceVersion | ||
updated.Annotations[observedGenerationAnnotation] = fmt.Sprint(updated.GetGeneration()) | ||
updated.Annotations[observedResourceVersionAnnotation] = updated.ResourceVersion | ||
if _, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).Update(context.TODO(), updated, metav1.UpdateOptions{}); err != nil { | ||
return nil, fmt.Errorf("failed to update metadata guard annotations: %w", err) | ||
} | ||
return &v1alpha1.ClusterServiceVersion{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: updated.Name, | ||
Namespace: updated.Namespace, | ||
UID: updated.UID, | ||
}, | ||
}, nil | ||
} | ||
|
||
prototype.Namespace = existing.Namespace | ||
prototype.ResourceVersion = existing.ResourceVersion | ||
prototype.UID = existing.UID | ||
existingNonStatus := existing.Annotations["$copyhash-nonstatus"] | ||
existingStatus := existing.Annotations["$copyhash-status"] | ||
// Get the non-status and status hash of the existing copied CSV | ||
existingNonStatus := existing.Annotations[nonStatusCopyHashAnnotation] | ||
existingStatus := existing.Annotations[statusCopyHashAnnotation] | ||
|
||
var updated *v1alpha1.ClusterServiceVersion | ||
// Always set the in-memory prototype's nonstatus annotation: | ||
prototype.Annotations[nonStatusCopyHashAnnotation] = nonstatus | ||
if existingNonStatus != nonstatus { | ||
// include updates to the non-status hash annotation if there is a mismatch | ||
if updated, err = a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).Update(context.TODO(), prototype, metav1.UpdateOptions{}); err != nil { | ||
return nil, fmt.Errorf("failed to update: %w", err) | ||
} | ||
|
@@ -843,6 +894,17 @@ func (a *Operator) copyToNamespace(prototype *v1alpha1.ClusterServiceVersion, ns | |
if _, err = a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).UpdateStatus(context.TODO(), updated, metav1.UpdateOptions{}); err != nil { | ||
return nil, fmt.Errorf("failed to update status: %w", err) | ||
} | ||
// Update the status first if the existing copied CSV status hash doesn't match what we expect | ||
// to prevent a scenario where the hash annotations match but the contents do not. | ||
// We also need to update the CSV itself in this case to ensure we set the status hash annotation. | ||
prototype.Annotations[statusCopyHashAnnotation] = status | ||
if updated, err = a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).Update(context.TODO(), prototype, metav1.UpdateOptions{}); err != nil { | ||
return nil, fmt.Errorf("failed to update: %w", err) | ||
} | ||
} else { | ||
// Even if they're the same, ensure the returned prototype is annotated. | ||
prototype.Annotations[statusCopyHashAnnotation] = status | ||
updated = prototype | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. From the code implemented in this PR to the current state, the main addition seems to be this else block (beyond tests). I’m not entirely sure I fully understand—are we also looking to implement what’s outlined in the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this is first pass, basically, just merge the old PR. With this PR we're taking path of but the else is not the only thing done here, the main thing added is the tracking hashes so we can tell what's in need of update. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
We previously agreed that the old PR wasn't quite the right approach, correct? Given that, I’m not sure it makes sense to merge it as-is. If we need to do a release before we have the proper solution in place, we might include a change we don’t want. That doesn’t seem ideal to me. That is a case that I would request changes since it does not provide the desired solution, or fix the problem accordingly as defined in the doc. See that the doc has a section about that It’s fine to add it as you did, but what do you think about creating a commit on top with the solution we intend to use? Could we focus on implementing the correct fix for the bug instead? c/c @tmshort There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please see @tmshort comment on the doc. Idea being, merging this PR is a first step, it gives some relief, then we'll make another pass after this settles. Settling involves seeing much less API activity, especially on clusters with many namespaces for the CSV to be copied to. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. and settling also involves seeing if the things mentioned in the doc, primarily whether OLM not correcting user-modified copied CSVs, will be a real-world problem. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. My understanding is we have two problems:
The proposed approach is to separate these two problems by resolving 1 (which has a material impact on the cluster and customer's bottom line) and later handling 2. It's my understanding that changes to copied csvs don't carry any behavioral changes in the system anyway. They only exist to make it possible to discover which operators are available in a particular namespace with a kubectl command. Also, I'd assume that write access to CSVs will be restricted in most real world cases to the cluster admin and the namespace admin. If these two assumptions hold, I think the blast radius of modifying the copied CSV and not having it reconcile back to the intended spec should be pretty small. So, I tend to agree with the approach here. Let's address the big problem of api server/log spamming, then worry about the relatively small problem of inconsistent copied csvs. |
||
return &v1alpha1.ClusterServiceVersion{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
|
@@ -939,7 +1001,6 @@ func namespacesChanged(clusterNamespaces []string, statusNamespaces []string) bo | |
|
||
func (a *Operator) getOperatorGroupTargets(op *operatorsv1.OperatorGroup) (map[string]struct{}, error) { | ||
selector, err := metav1.LabelSelectorAsSelector(op.Spec.Selector) | ||
|
||
if err != nil { | ||
return nil, err | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Because
copyToNamespace
is called in a loop,prototype
, being a pointer, is reused multiple times. Which means that these annotations may already be set. Is there any reason why these annotations simply aren't set inensureCSVsInNamesapces()
where the hashes are calculcated?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
good point possibly. checking...
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
So looking at it closer it seems like we shouldn't change it, here's my reasoning:
Keeping the annotation logic here, in
copyToNamespace()
, encapsulate the update semantics so each call handles its own CSV's state reliably.We're reusing prototype and accounting for possibly set annotations. If we move the logic to
ensureCSVsInNamesapces()
, we'll have to duplicate the annotation checking logic because the logic for handling those annotations is tightly coupled with the CSV’s create/update lifecycle.In
copyToNamespace()
we need to:• Distinguish between a new creation (where the annotations don’t exist yet) and an update (where the annotations might already be set but could be outdated).
• Apply the updates in a specific order (first updating the non-status hash, then the status hash, including a status update to avoid mismatches).
• Ensure that each target CSV reflects the current state as expected for that specific namespace.
Aside from the hash handling we'd still need to be doing the above work in
copyToNamespace()