Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
package io.strimzi.systemtest.resources.types;

import io.fabric8.kubernetes.api.model.DeletionPropagation;
import io.fabric8.kubernetes.api.model.LabelSelectorBuilder;
import io.fabric8.kubernetes.client.dsl.MixedOperation;
import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation;
import io.fabric8.kubernetes.client.dsl.Resource;
Expand All @@ -13,6 +14,7 @@
import io.strimzi.api.kafka.Crds;
import io.strimzi.api.kafka.model.kafka.Kafka;
import io.strimzi.api.kafka.model.kafka.KafkaList;
import io.strimzi.api.kafka.model.nodepool.KafkaNodePool;
import io.strimzi.operator.common.model.Labels;
import io.strimzi.systemtest.resources.CrdClients;
import io.strimzi.systemtest.resources.ResourceConditions;
Expand All @@ -21,6 +23,8 @@
import org.apache.logging.log4j.Logger;
import org.junit.platform.commons.util.Preconditions;

import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
import java.util.function.Predicate;

Expand Down Expand Up @@ -84,6 +88,22 @@ public void delete(Kafka kafka) {
.allMatch(result -> true);
}

// Delete KNPs attached to this Kafka cluster first, to prevent issues with hanging PVCs
List<KafkaNodePool> nodePools = CrdClients.kafkaNodePoolClient().inNamespace(namespaceName)
.withLabelSelector(
new LabelSelectorBuilder()
.withMatchLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, kafka.getMetadata().getName()))
.build()
)
.list()
.getItems();

if (!nodePools.isEmpty()) {
List<String> nodePoolNames = nodePools.stream().map(nodePool -> nodePool.getMetadata().getName()).toList();
LOGGER.info("Deleting KafkaNodePools - {} - related to Kafka {}/{}", nodePoolNames, namespaceName, clusterName);
KubeResourceManager.get().deleteResourceWithWait(nodePools.toArray(new KafkaNodePool[0]));
}

// get current Kafka
Kafka currentKafka = client.inNamespace(namespaceName)
.withName(kafka.getMetadata().getName()).get();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,13 @@
import io.skodjob.annotations.TestDoc;
import io.skodjob.testframe.resources.KubeResourceManager;
import io.strimzi.api.kafka.model.kafka.KafkaResources;
import io.strimzi.api.kafka.model.nodepool.KafkaNodePool;
import io.strimzi.systemtest.AbstractST;
import io.strimzi.systemtest.Environment;
import io.strimzi.systemtest.TestConstants;
import io.strimzi.systemtest.annotations.IsolatedTest;
import io.strimzi.systemtest.docs.TestDocsLabels;
import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients;
import io.strimzi.systemtest.labels.LabelSelectors;
import io.strimzi.systemtest.resources.CrdClients;
import io.strimzi.systemtest.resources.crd.KafkaComponents;
import io.strimzi.systemtest.resources.operator.ClusterOperatorConfigurationBuilder;
import io.strimzi.systemtest.resources.operator.SetupClusterOperator;
Expand All @@ -41,7 +39,6 @@
import io.strimzi.systemtest.utils.kubeUtils.objects.ServiceUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Tag;

Expand Down Expand Up @@ -246,12 +243,4 @@ void setup() {
KubeResourceManager.get().createResourceWithWait(KafkaTemplates.kafka(Environment.TEST_SUITE_NAMESPACE, sharedClusterName, KAFKA_REPLICAS).build());
KubeResourceManager.get().createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(Environment.TEST_SUITE_NAMESPACE, sharedClusterName, KafkaResources.plainBootstrapAddress(sharedClusterName), 1).build());
}

@AfterEach
void cleanup() {
// In order to properly delete all resources, we need to delete KafkaNodePools first (as the correct deletion is KafkaNodePools -> Kafka)
// This will ensure everything will be deleted properly
List<KafkaNodePool> nodePools = CrdClients.kafkaNodePoolClient().inNamespace(Environment.TEST_SUITE_NAMESPACE).list().getItems();
KubeResourceManager.get().deleteResourceAsyncWait(nodePools.toArray(new KafkaNodePool[0]));
}
}