Skip to content

Commit c7654f7

Browse files
TaiJuWuchia7712
authored andcommitted
KAFKA-18399 Remove ZooKeeper from KafkaApis (8/N): ELECT_LEADERS , ALTER_PARTITION, UPDATE_FEATURES (#18453)
Reviewers: Chia-Ping Tsai <[email protected]>
1 parent 4be1376 commit c7654f7

File tree

2 files changed

+0
-145
lines changed

2 files changed

+0
-145
lines changed

Diff for: core/src/main/scala/kafka/server/KafkaApis.scala

-118
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ import org.apache.kafka.common.internals.{FatalExitError, Topic}
3737
import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.{AddPartitionsToTxnResult, AddPartitionsToTxnResultCollection}
3838
import org.apache.kafka.common.message.AlterConfigsResponseData.AlterConfigsResourceResponse
3939
import org.apache.kafka.common.message.DeleteRecordsResponseData.{DeleteRecordsPartitionResult, DeleteRecordsTopicResult}
40-
import org.apache.kafka.common.message.ElectLeadersResponseData.{PartitionResult, ReplicaElectionResult}
4140
import org.apache.kafka.common.message.ListClientMetricsResourcesResponseData.ClientMetricsResource
4241
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition
4342
import org.apache.kafka.common.message.ListOffsetsResponseData.{ListOffsetsPartitionResponse, ListOffsetsTopicResponse}
@@ -217,7 +216,6 @@ class KafkaApis(val requestChannel: RequestChannel,
217216
case ApiKeys.ALTER_CLIENT_QUOTAS => forwardToController(request)
218217
case ApiKeys.DESCRIBE_USER_SCRAM_CREDENTIALS => handleDescribeUserScramCredentialsRequest(request)
219218
case ApiKeys.ALTER_USER_SCRAM_CREDENTIALS => forwardToController(request)
220-
case ApiKeys.ALTER_PARTITION => handleAlterPartitionRequest(request)
221219
case ApiKeys.UPDATE_FEATURES => forwardToController(request)
222220
case ApiKeys.DESCRIBE_CLUSTER => handleDescribeCluster(request)
223221
case ApiKeys.DESCRIBE_PRODUCERS => handleDescribeProducersRequest(request)
@@ -2399,77 +2397,6 @@ class KafkaApis(val requestChannel: RequestChannel,
23992397
true
24002398
}
24012399

2402-
def handleElectLeaders(request: RequestChannel.Request): Unit = {
2403-
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
2404-
val electionRequest = request.body[ElectLeadersRequest]
2405-
2406-
def sendResponseCallback(
2407-
error: ApiError
2408-
)(
2409-
results: Map[TopicPartition, ApiError]
2410-
): Unit = {
2411-
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => {
2412-
val adjustedResults = if (electionRequest.data.topicPartitions == null) {
2413-
/* When performing elections across all of the partitions we should only return
2414-
* partitions for which there was an election or resulted in an error. In other
2415-
* words, partitions that didn't need election because they ready have the correct
2416-
* leader are not returned to the client.
2417-
*/
2418-
results.filter { case (_, error) =>
2419-
error.error != Errors.ELECTION_NOT_NEEDED
2420-
}
2421-
} else results
2422-
2423-
val electionResults = new util.ArrayList[ReplicaElectionResult]()
2424-
adjustedResults
2425-
.groupBy { case (tp, _) => tp.topic }
2426-
.foreachEntry { (topic, ps) =>
2427-
val electionResult = new ReplicaElectionResult()
2428-
2429-
electionResult.setTopic(topic)
2430-
ps.foreachEntry { (topicPartition, error) =>
2431-
val partitionResult = new PartitionResult()
2432-
partitionResult.setPartitionId(topicPartition.partition)
2433-
partitionResult.setErrorCode(error.error.code)
2434-
partitionResult.setErrorMessage(error.message)
2435-
electionResult.partitionResult.add(partitionResult)
2436-
}
2437-
2438-
electionResults.add(electionResult)
2439-
}
2440-
2441-
new ElectLeadersResponse(
2442-
requestThrottleMs,
2443-
error.error.code,
2444-
electionResults,
2445-
electionRequest.version
2446-
)
2447-
})
2448-
}
2449-
2450-
if (!authHelper.authorize(request.context, ALTER, CLUSTER, CLUSTER_NAME)) {
2451-
val error = new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED, null)
2452-
val partitionErrors: Map[TopicPartition, ApiError] =
2453-
electionRequest.topicPartitions.asScala.iterator.map(partition => partition -> error).toMap
2454-
2455-
sendResponseCallback(error)(partitionErrors)
2456-
} else {
2457-
val partitions = if (electionRequest.data.topicPartitions == null) {
2458-
metadataCache.getAllTopics().flatMap(metadataCache.getTopicPartitions)
2459-
} else {
2460-
electionRequest.topicPartitions.asScala
2461-
}
2462-
2463-
replicaManager.electLeaders(
2464-
zkSupport.controller,
2465-
partitions,
2466-
electionRequest.electionType,
2467-
sendResponseCallback(ApiError.NONE),
2468-
electionRequest.data.timeoutMs
2469-
)
2470-
}
2471-
}
2472-
24732400
def handleOffsetDeleteRequest(
24742401
request: RequestChannel.Request,
24752402
requestLocal: RequestLocal
@@ -2628,51 +2555,6 @@ class KafkaApis(val requestChannel: RequestChannel,
26282555
}
26292556
}
26302557

2631-
def handleAlterPartitionRequest(request: RequestChannel.Request): Unit = {
2632-
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request))
2633-
val alterPartitionRequest = request.body[AlterPartitionRequest]
2634-
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
2635-
2636-
if (!zkSupport.controller.isActive)
2637-
requestHelper.sendResponseExemptThrottle(request, alterPartitionRequest.getErrorResponse(
2638-
AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.NOT_CONTROLLER.exception))
2639-
else
2640-
zkSupport.controller.alterPartitions(alterPartitionRequest.data, request.context.apiVersion, alterPartitionResp =>
2641-
requestHelper.sendResponseExemptThrottle(request, new AlterPartitionResponse(alterPartitionResp)))
2642-
}
2643-
2644-
def handleUpdateFeatures(request: RequestChannel.Request): Unit = {
2645-
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
2646-
val updateFeaturesRequest = request.body[UpdateFeaturesRequest]
2647-
2648-
def sendResponseCallback(errors: Either[ApiError, Map[String, ApiError]]): Unit = {
2649-
def createResponse(throttleTimeMs: Int): UpdateFeaturesResponse = {
2650-
errors match {
2651-
case Left(topLevelError) =>
2652-
UpdateFeaturesResponse.createWithErrors(
2653-
topLevelError,
2654-
Collections.emptySet(),
2655-
throttleTimeMs)
2656-
case Right(featureUpdateErrors) =>
2657-
// This response is not correct, but since this is ZK specific code it will be removed in 4.0
2658-
UpdateFeaturesResponse.createWithErrors(
2659-
ApiError.NONE,
2660-
featureUpdateErrors.asJava.keySet(),
2661-
throttleTimeMs)
2662-
}
2663-
}
2664-
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => createResponse(requestThrottleMs))
2665-
}
2666-
2667-
if (!authHelper.authorize(request.context, ALTER, CLUSTER, CLUSTER_NAME)) {
2668-
sendResponseCallback(Left(new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED)))
2669-
} else if (!zkSupport.controller.isActive) {
2670-
sendResponseCallback(Left(new ApiError(Errors.NOT_CONTROLLER)))
2671-
} else {
2672-
zkSupport.controller.updateFeatures(updateFeaturesRequest, sendResponseCallback)
2673-
}
2674-
}
2675-
26762558
def handleDescribeCluster(request: RequestChannel.Request): Unit = {
26772559
val response = authHelper.computeDescribeClusterResponse(
26782560
request,

Diff for: core/src/test/scala/unit/kafka/server/KafkaApisTest.scala

-27
Original file line numberDiff line numberDiff line change
@@ -9932,25 +9932,12 @@ class KafkaApisTest extends Logging {
99329932
request
99339933
}
99349934

9935-
private def verifyShouldNeverHandleErrorMessage(handler: RequestChannel.Request => Unit): Unit = {
9936-
val request = createMockRequest()
9937-
val e = assertThrows(classOf[UnsupportedVersionException], () => handler(request))
9938-
assertEquals(KafkaApis.shouldNeverReceive(request).getMessage, e.getMessage)
9939-
}
9940-
99419935
private def verifyShouldAlwaysForwardErrorMessage(handler: RequestChannel.Request => Unit): Unit = {
99429936
val request = createMockRequest()
99439937
val e = assertThrows(classOf[UnsupportedVersionException], () => handler(request))
99449938
assertEquals(KafkaApis.shouldAlwaysForward(request).getMessage, e.getMessage)
99459939
}
99469940

9947-
@Test
9948-
def testRaftShouldNeverHandleAlterPartitionRequest(): Unit = {
9949-
metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0)
9950-
kafkaApis = createKafkaApis(raftSupport = true)
9951-
verifyShouldNeverHandleErrorMessage(kafkaApis.handleAlterPartitionRequest)
9952-
}
9953-
99549941
@Test
99559942
def testRaftShouldAlwaysForwardCreateAcls(): Unit = {
99569943
metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0)
@@ -10048,20 +10035,6 @@ class KafkaApisTest extends Logging {
1004810035
verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleAlterClientQuotasRequest)
1004910036
}
1005010037

10051-
@Test
10052-
def testRaftShouldAlwaysForwardUpdateFeatures(): Unit = {
10053-
metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0)
10054-
kafkaApis = createKafkaApis(raftSupport = true)
10055-
verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleUpdateFeatures)
10056-
}
10057-
10058-
@Test
10059-
def testRaftShouldAlwaysForwardElectLeaders(): Unit = {
10060-
metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0)
10061-
kafkaApis = createKafkaApis(raftSupport = true)
10062-
verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleElectLeaders)
10063-
}
10064-
1006510038
@Test
1006610039
def testConsumerGroupHeartbeatReturnsUnsupportedVersion(): Unit = {
1006710040
val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequestData().setGroupId("group")

0 commit comments

Comments
 (0)