Skip to content

test(policy): Reduce duplication in outbound policy API tests #13543

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Jan 13, 2025
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 0 additions & 16 deletions policy-test/src/grpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -291,22 +291,6 @@ impl OutboundPolicyClient {
Ok(rsp.into_inner())
}

pub async fn watch(
&mut self,
ns: &str,
svc: &k8s::Service,
port: u16,
) -> Result<tonic::Streaming<outbound::OutboundPolicy>, tonic::Status> {
let address = svc
.spec
.as_ref()
.expect("Service must have a spec")
.cluster_ip
.as_ref()
.expect("Service must have a cluster ip");
self.watch_ip(ns, address, port).await
}

pub async fn watch_ip(
&mut self,
ns: &str,
Expand Down
59 changes: 35 additions & 24 deletions policy-test/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ pub mod bb;
pub mod curl;
pub mod grpc;
pub mod outbound_api;
pub mod test_route;
pub mod web;

use kube::runtime::wait::Condition;
Expand All @@ -15,6 +16,7 @@ use linkerd_policy_controller_k8s_api::{
ResourceExt,
};
use maplit::{btreemap, convert_args};
use test_route::TestRoute;
use tokio::time;
use tracing::Instrument;

Expand Down Expand Up @@ -206,24 +208,25 @@ pub async fn await_pod_ip(client: &kube::Client, ns: &str, name: &str) -> std::n
.expect("pod IP must be valid")
}

// Waits until an HttpRoute with the given namespace and name has a status set
// on it, then returns the generic route status representation.
pub async fn await_route_status(
client: &kube::Client,
ns: &str,
name: &str,
) -> k8s::policy::httproute::RouteStatus {
use k8s::policy::httproute as api;
let route_status = await_condition(client, ns, name, |obj: Option<&api::HttpRoute>| -> bool {
obj.and_then(|route| route.status.as_ref()).is_some()
})
.await
.expect("must fetch route")
.status
.expect("route must contain a status representation")
.inner;
tracing::trace!(?route_status, name, ns, "got route status");
route_status
// Waits until an HttpRoute with the given namespace and name has been accepted.
pub async fn await_route_accepted<R: TestRoute>(client: &kube::Client, route: &R) {
await_condition(
client,
&route.namespace().unwrap(),
&route.name_unchecked(),
|obj: Option<&R>| -> bool {
obj.map_or(false, |route| {
let conditions = route
.conditions()
.unwrap_or_default()
.into_iter()
.cloned()
.collect::<Vec<_>>();
is_status_accepted(&conditions)
})
},
)
.await;
}

// Waits until an HttpRoute with the given namespace and name has a status set
Expand Down Expand Up @@ -591,17 +594,25 @@ pub fn mk_egress_net(ns: &str, name: &str) -> k8s::policy::EgressNetwork {
}

#[track_caller]
pub fn assert_resource_meta(meta: &Option<grpc::meta::Metadata>, resource: &Resource, port: u16) {
pub fn assert_resource_meta(
meta: &Option<grpc::meta::Metadata>,
parent_ref: ParentReference,
port: u16,
) {
println!("meta: {:?}", meta);
tracing::debug!(?meta, ?resource, port, "Asserting service metadata");
tracing::debug!(?meta, ?parent_ref, port, "Asserting parent metadata");
let mut group = parent_ref.group.unwrap();
if group.is_empty() {
group = "core".to_string();
}
assert_eq!(
meta,
&Some(grpc::meta::Metadata {
kind: Some(grpc::meta::metadata::Kind::Resource(grpc::meta::Resource {
group: resource.group(),
kind: resource.kind(),
name: resource.name(),
namespace: resource.namespace(),
group,
kind: parent_ref.kind.unwrap(),
name: parent_ref.name,
namespace: parent_ref.namespace.unwrap(),
section: "".to_string(),
port: port.into()
})),
Expand Down
99 changes: 35 additions & 64 deletions policy-test/src/outbound_api.rs
Original file line number Diff line number Diff line change
@@ -1,27 +1,23 @@
use crate::{assert_resource_meta, grpc, Resource};
use crate::{grpc, test_route::TestRoute, Resource};
use k8s_gateway_api::ParentReference;
use kube::ResourceExt;
use std::time::Duration;
use tokio::time;

pub async fn retry_watch_outbound_policy(
client: &kube::Client,
ns: &str,
resource: &Resource,
ip: &str,
port: u16,
) -> tonic::Streaming<grpc::outbound::OutboundPolicy> {
// Port-forward to the control plane and start watching the service's
// outbound policy.
let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(client).await;
loop {
match policy_api.watch_ip(ns, &resource.ip(), port).await {
match policy_api.watch_ip(ns, ip, port).await {
Ok(rx) => return rx,
Err(error) => {
tracing::error!(
?error,
ns,
resource = resource.name(),
"failed to watch outbound policy for port 4191"
);
tracing::error!(?error, ns, ip, port, "failed to watch outbound policy");
time::sleep(Duration::from_secs(1)).await;
}
}
Expand Down Expand Up @@ -291,26 +287,23 @@ pub fn assert_backend_has_failure_filter(
}

#[track_caller]
pub fn assert_route_is_default(route: &grpc::outbound::HttpRoute, parent: &Resource, port: u16) {
let kind = route.metadata.as_ref().unwrap().kind.as_ref().unwrap();
match kind {
pub fn assert_route_is_default<R: TestRoute>(
route: &R::Route,
parent: &ParentReference,
port: u16,
) {
let rules = &R::rules_first_available(route);
let backends = assert_singleton(rules);
let backend = R::backend(*assert_singleton(backends));
assert_backend_matches_reference(backend, parent, port);

let route_meta = R::extract_meta(route);
match route_meta.kind.as_ref().unwrap() {
grpc::meta::metadata::Kind::Default(_) => {}
grpc::meta::metadata::Kind::Resource(r) => {
panic!("route expected to be default but got resource {r:?}")
}
}

let backends = route_backends_first_available(route);
let backend = assert_singleton(backends);
assert_backend_matches_parent(backend, parent, port);

let rule = assert_singleton(&route.rules);
let route_match = assert_singleton(&rule.matches);
let path_match = route_match.path.as_ref().unwrap().kind.as_ref().unwrap();
assert_eq!(
*path_match,
grpc::http_route::path_match::Kind::Prefix("/".to_string())
);
}

#[track_caller]
Expand All @@ -330,49 +323,27 @@ pub fn assert_tls_route_is_default(route: &grpc::outbound::TlsRoute, parent: &Re
}

#[track_caller]
pub fn assert_backend_matches_parent(
backend: &grpc::outbound::http_route::RouteBackend,
parent: &Resource,
pub fn assert_backend_matches_reference(
backend: &grpc::outbound::Backend,
obj_ref: &ParentReference,
port: u16,
) {
let backend = backend.backend.as_ref().unwrap();

match parent {
Resource::Service(svc) => {
let dst = match backend.kind.as_ref().unwrap() {
grpc::outbound::backend::Kind::Balancer(balance) => {
let kind = balance.discovery.as_ref().unwrap().kind.as_ref().unwrap();
match kind {
grpc::outbound::backend::endpoint_discovery::Kind::Dst(dst) => &dst.path,
}
}
grpc::outbound::backend::Kind::Forward(_) => {
panic!("service default route backend must be Balancer")
}
};
assert_eq!(
*dst,
format!(
"{}.{}.svc.{}:{}",
svc.name_unchecked(),
svc.namespace().unwrap(),
"cluster.local",
port
)
);
let mut group = obj_ref.group.as_deref();
if group == Some("") {
group = Some("core");
}
match backend.metadata.as_ref().unwrap().kind.as_ref().unwrap() {
grpc::meta::metadata::Kind::Resource(resource) => {
assert_eq!(resource.name, obj_ref.name);
assert_eq!(Some(&resource.namespace), obj_ref.namespace.as_ref());
assert_eq!(Some(resource.group.as_str()), group);
assert_eq!(Some(&resource.kind), obj_ref.kind.as_ref());
assert_eq!(resource.port, u32::from(port));
}

Resource::EgressNetwork(_) => {
match backend.kind.as_ref().unwrap() {
grpc::outbound::backend::Kind::Forward(_) => {}
grpc::outbound::backend::Kind::Balancer(_) => {
panic!("egress net default route backend must be Forward")
}
};
grpc::meta::metadata::Kind::Default(_) => {
panic!("backend expected to be resource but got default")
}
}

assert_resource_meta(&backend.metadata, parent, port)
}

#[track_caller]
Expand Down Expand Up @@ -418,7 +389,7 @@ pub fn assert_tls_backend_matches_parent(
}
}

assert_resource_meta(&backend.metadata, parent, port)
//assert_resource_meta(&backend.metadata, parent, port)
}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should this be reverted?


#[track_caller]
Expand Down Expand Up @@ -464,7 +435,7 @@ pub fn assert_tcp_backend_matches_parent(
}
}

assert_resource_meta(&backend.metadata, parent, port)
//assert_resource_meta(&backend.metadata, parent, port)
}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same goes here


#[track_caller]
Expand Down
Loading
Loading