Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 11 additions & 11 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ k8s-openapi = { version = "0.27.0", default-features = false, features = ["schem
# We use rustls instead of openssl for easier portability, e.g. so that we can build stackablectl without the need to vendor (build from source) openssl
# We use ring instead of aws-lc-rs, as this currently fails to build in "make run-dev"
# We need a few schema fixes in kube, that went into main, but are not released yet
kube = { git = "https://github.com/kube-rs/kube-rs", rev = "fe69cc486ff8e62a7da61d64ec3ebbd9e64c43b5", version = "=3.0.1", default-features = false, features = ["client", "jsonpatch", "runtime", "derive", "admission", "rustls-tls", "ring"] }
kube = { git = "https://github.com/kube-rs/kube-rs", rev = "1320643f8ce7f8189e03496ff1329d678d76224c", version = "=3.0.1", default-features = false, features = ["client", "jsonpatch", "runtime", "derive", "admission", "rustls-tls", "ring"] }
opentelemetry = "0.31.0"
opentelemetry_sdk = { version = "0.31.0", features = ["rt-tokio"] }
opentelemetry-appender-tracing = "0.31.0"
Expand Down
5 changes: 5 additions & 0 deletions crates/stackable-operator/src/cluster_resources.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ use k8s_openapi::{
apps::v1::{
DaemonSet, DaemonSetSpec, Deployment, DeploymentSpec, StatefulSet, StatefulSetSpec,
},
autoscaling::v2::HorizontalPodAutoscaler,
batch::v1::Job,
core::v1::{
ConfigMap, ObjectReference, PodSpec, PodTemplateSpec, Secret, Service, ServiceAccount,
Expand Down Expand Up @@ -221,7 +222,9 @@ impl ClusterResource for Service {}
impl ClusterResource for ServiceAccount {}
impl ClusterResource for RoleBinding {}
impl ClusterResource for PodDisruptionBudget {}
impl ClusterResource for HorizontalPodAutoscaler {}
impl ClusterResource for listener::v1alpha1::Listener {}
impl ClusterResource for crate::crd::scaler::v1alpha1::StackableScaler {}

impl ClusterResource for Job {
fn pod_spec(&self) -> Option<&PodSpec> {
Expand Down Expand Up @@ -681,6 +684,8 @@ impl<'a> ClusterResources<'a> {
self.delete_orphaned_resources_of_kind::<RoleBinding>(client),
self.delete_orphaned_resources_of_kind::<PodDisruptionBudget>(client),
self.delete_orphaned_resources_of_kind::<listener::v1alpha1::Listener>(client),
self.delete_orphaned_resources_of_kind::<crate::crd::scaler::v1alpha1::StackableScaler>(client),
self.delete_orphaned_resources_of_kind::<HorizontalPodAutoscaler>(client),
)?;

Ok(())
Expand Down
1 change: 1 addition & 0 deletions crates/stackable-operator/src/crd/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ pub mod authentication;
pub mod git_sync;
pub mod listener;
pub mod s3;
pub mod scaler;

/// A reference to a product cluster (for example, a `ZookeeperCluster`)
///
Expand Down
241 changes: 241 additions & 0 deletions crates/stackable-operator/src/crd/scaler/builder.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,241 @@
//! Builder helper for constructing [`StackableScaler`] objects with proper metadata.
//!
//! Product operators use [`build_scaler`] to create a `StackableScaler` for each
//! auto-scaled role group, ensuring that all required labels are set so that
//! [`ClusterResources::add`](crate::cluster_resources::ClusterResources) validation passes.

use k8s_openapi::apimachinery::pkg::apis::meta::v1::OwnerReference;
use snafu::{ResultExt, Snafu};

use crate::{
builder::meta::ObjectMetaBuilder,
kvp::{Label, LabelError, Labels, consts::K8S_APP_MANAGED_BY_KEY},
};

use super::v1alpha1::{StackableScaler, StackableScalerSpec};

/// Error returned by [`build_scaler`] and [`build_hpa_from_user_spec`](super::build_hpa_from_user_spec).
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(super)))]
pub enum BuildScalerError {
/// A label value failed validation.
#[snafu(display("failed to construct label for scaler"))]
Label { source: LabelError },

/// The metadata builder failed (e.g. missing owner reference fields).
#[snafu(display("failed to build ObjectMeta for scaler"))]
ObjectMeta { source: crate::builder::meta::Error },
}

/// Constructs a [`StackableScaler`] with the required labels and owner reference.
///
/// The generated scaler name follows the convention
/// `{cluster_name}-{role}-{role_group}-scaler`.
///
/// # Labels
///
/// The following `app.kubernetes.io` labels are set:
///
/// | Key | Value |
/// |-----|-------|
/// | `app.kubernetes.io/name` | `app_name` |
/// | `app.kubernetes.io/instance` | `cluster_name` |
/// | `app.kubernetes.io/managed-by` | `managed_by` |
/// | `app.kubernetes.io/component` | `role` |
/// | `app.kubernetes.io/role-group` | `role_group` |
///
/// # Errors
///
/// Returns [`BuildScalerError::Label`] if any label value is invalid.
/// Returns [`BuildScalerError::ObjectMeta`] if the owner reference cannot be set.
// `clippy::too_many_arguments` suppressed: these parameters correspond 1:1 to the
// distinct Kubernetes metadata fields required on a StackableScaler. Grouping them
// into a struct would just push the field list one level deeper without reducing
// cognitive load, since callers already have each value as a separate variable.
#[allow(clippy::too_many_arguments)]
pub fn build_scaler(
cluster_name: &str,
app_name: &str,
namespace: &str,
role: &str,
role_group: &str,
initial_replicas: i32,
owner_ref: &OwnerReference,
managed_by: &str,
) -> Result<StackableScaler, BuildScalerError> {
let scaler_name = format!("{cluster_name}-{role}-{role_group}-scaler");

// Build the label set: name + instance + component + role-group + managed-by
let mut labels = Labels::common(app_name, cluster_name).context(LabelSnafu)?;
labels.insert(Label::component(role).context(LabelSnafu)?);
labels.insert(Label::role_group(role_group).context(LabelSnafu)?);
labels.insert(Label::try_from((K8S_APP_MANAGED_BY_KEY, managed_by)).context(LabelSnafu)?);

let metadata = ObjectMetaBuilder::new()
.name(&scaler_name)
.namespace(namespace)
.ownerreference(owner_ref.clone())
.with_labels(labels)
.build();

Ok(StackableScaler {
metadata,
spec: StackableScalerSpec {
replicas: initial_replicas,
},
status: None,
})
}

#[cfg(test)]
mod tests {
use k8s_openapi::apimachinery::pkg::apis::meta::v1::OwnerReference;

use super::*;

fn test_owner_ref() -> OwnerReference {
OwnerReference {
api_version: "nifi.stackable.tech/v1alpha1".to_string(),
kind: "NifiCluster".to_string(),
name: "my-nifi".to_string(),
uid: "abc-123".to_string(),
controller: Some(true),
block_owner_deletion: Some(true),
}
}

#[test]
fn build_scaler_sets_replicas() {
let owner_ref = test_owner_ref();
let scaler = build_scaler(
"my-nifi",
"nifi",
"default",
"nodes",
"default",
3,
&owner_ref,
"nifi-operator",
)
.expect("build_scaler should succeed");

assert_eq!(scaler.spec.replicas, 3);
}

#[test]
fn build_scaler_sets_owner_reference() {
let owner_ref = test_owner_ref();
let scaler = build_scaler(
"my-nifi",
"nifi",
"default",
"nodes",
"default",
1,
&owner_ref,
"nifi-operator",
)
.expect("build_scaler should succeed");

let refs = scaler
.metadata
.owner_references
.as_ref()
.expect("owner_references should be set");
assert_eq!(refs.len(), 1);
assert_eq!(refs[0].name, "my-nifi");
assert_eq!(refs[0].kind, "NifiCluster");
assert_eq!(refs[0].uid, "abc-123");
}

#[test]
fn build_scaler_sets_required_labels() {
let owner_ref = test_owner_ref();
let scaler = build_scaler(
"my-nifi",
"nifi",
"default",
"nodes",
"default",
1,
&owner_ref,
"nifi-operator",
)
.expect("build_scaler should succeed");

let labels = scaler
.metadata
.labels
.as_ref()
.expect("labels should be set");

assert_eq!(
labels.get("app.kubernetes.io/name"),
Some(&"nifi".to_string()),
"app.kubernetes.io/name should be the app_name"
);
assert_eq!(
labels.get("app.kubernetes.io/instance"),
Some(&"my-nifi".to_string()),
"app.kubernetes.io/instance should be the cluster_name"
);
assert_eq!(
labels.get("app.kubernetes.io/managed-by"),
Some(&"nifi-operator".to_string()),
"app.kubernetes.io/managed-by should be managed_by"
);
assert_eq!(
labels.get("app.kubernetes.io/component"),
Some(&"nodes".to_string()),
"app.kubernetes.io/component should be the role"
);
assert_eq!(
labels.get("app.kubernetes.io/role-group"),
Some(&"default".to_string()),
"app.kubernetes.io/role-group should be the role_group"
);
}

#[test]
fn build_scaler_generates_correct_name() {
let owner_ref = test_owner_ref();
let scaler = build_scaler(
"my-nifi",
"nifi",
"production",
"nodes",
"workers",
5,
&owner_ref,
"nifi-operator",
)
.expect("build_scaler should succeed");

assert_eq!(
scaler.metadata.name.as_deref(),
Some("my-nifi-nodes-workers-scaler")
);
assert_eq!(scaler.metadata.namespace.as_deref(), Some("production"));
}

#[test]
fn build_scaler_status_is_none() {
let owner_ref = test_owner_ref();
let scaler = build_scaler(
"my-nifi",
"nifi",
"default",
"nodes",
"default",
1,
&owner_ref,
"nifi-operator",
)
.expect("build_scaler should succeed");

assert!(
scaler.status.is_none(),
"status should be None on a newly built scaler"
);
}
}
Loading