@@ -28,13 +28,15 @@ import joptsimple.OptionParser
28
28
import kafka .consumer .BaseConsumerRecord
29
29
import kafka .metrics .KafkaMetricsGroup
30
30
import kafka .utils .{CommandLineUtils , CoreUtils , Logging , Whitelist }
31
- import org .apache .kafka .clients .consumer .{CommitFailedException , Consumer , ConsumerConfig , ConsumerRebalanceListener , ConsumerRecord , KafkaConsumer , OffsetAndMetadata }
31
+ import org .apache .kafka .clients .consumer .{CommitFailedException , Consumer , ConsumerConfig , ConsumerRebalanceListener , ConsumerRecord , KafkaConsumer , OffsetAndMetadata , PassThroughConsumerRecord }
32
32
import org .apache .kafka .clients .producer .internals .ErrorLoggingCallback
33
33
import org .apache .kafka .clients .producer .{KafkaProducer , ProducerConfig , ProducerRecord , RecordMetadata }
34
34
import org .apache .kafka .common .{KafkaException , TopicPartition }
35
35
import org .apache .kafka .common .serialization .ByteArrayDeserializer
36
36
import org .apache .kafka .common .utils .Utils
37
37
import org .apache .kafka .common .errors .WakeupException
38
+ import org .apache .kafka .common .header .Headers
39
+ import org .apache .kafka .common .header .internals .{RecordHeader , RecordHeaders }
38
40
import org .apache .kafka .common .record .RecordBatch
39
41
40
42
import scala .collection .JavaConverters ._
@@ -69,6 +71,22 @@ object MirrorMaker extends Logging with KafkaMetricsGroup {
69
71
private var offsetCommitIntervalMs = 0
70
72
private var abortOnSendFailure : Boolean = true
71
73
@ volatile private var exitingOnSendFailure : Boolean = false
74
+ private var passThroughEnabled : Boolean = false
75
+ private val PASS_THROUGH_MAGIC_VALUE = " __passThroughMagicValue" ;
76
+
77
+ val recordHeadersV1 : Headers = {
78
+ val magicValueV1 = Array [Byte ] {
79
+ RecordBatch .MAGIC_VALUE_V1
80
+ }
81
+ new RecordHeaders ().add(new RecordHeader (PASS_THROUGH_MAGIC_VALUE , magicValueV1))
82
+ }
83
+
84
+ val recordHeadersV2 : Headers = {
85
+ val magicValueV2 = Array [Byte ] {
86
+ RecordBatch .MAGIC_VALUE_V2
87
+ }
88
+ new RecordHeaders ().add(new RecordHeader (PASS_THROUGH_MAGIC_VALUE , magicValueV2))
89
+ }
72
90
73
91
// If a message send failed after retries are exhausted. The offset of the messages will also be removed from
74
92
// the unacked offset list to avoid offset commit being stuck on that offset. In this case, the offset of that
@@ -214,6 +232,7 @@ object MirrorMaker extends Logging with KafkaMetricsGroup {
214
232
producerProps.setProperty(ProducerConfig .KEY_SERIALIZER_CLASS_CONFIG , " org.apache.kafka.common.serialization.ByteArraySerializer" )
215
233
producerProps.setProperty(ProducerConfig .VALUE_SERIALIZER_CLASS_CONFIG , " org.apache.kafka.common.serialization.ByteArraySerializer" )
216
234
if (options.has(passthroughCompressionOpt)) {
235
+ passThroughEnabled = true
217
236
consumerProps.setProperty(" enable.shallow.iterator" , " true" )
218
237
producerProps.setProperty(ProducerConfig .COMPRESSION_TYPE_CONFIG , " passthrough" )
219
238
}
@@ -253,7 +272,11 @@ object MirrorMaker extends Logging with KafkaMetricsGroup {
253
272
else
254
273
CoreUtils .createObject[MirrorMakerMessageHandler ](customMessageHandlerClass)
255
274
} else {
256
- defaultMirrorMakerMessageHandler
275
+ if (passThroughEnabled) {
276
+ passThroughMirrorMakerMessageHandler
277
+ } else {
278
+ defaultMirrorMakerMessageHandler
279
+ }
257
280
}
258
281
}
259
282
} catch {
@@ -362,6 +385,17 @@ object MirrorMaker extends Logging with KafkaMetricsGroup {
362
385
record.value,
363
386
record.headers)
364
387
388
+ private def toBaseConsumerRecordWithPassThrough (record : PassThroughConsumerRecord [Array [Byte ], Array [Byte ]]): BaseConsumerRecord =
389
+ BaseConsumerRecord (record.topic,
390
+ record.partition,
391
+ record.offset,
392
+ record.timestamp,
393
+ record.timestampType,
394
+ record.key,
395
+ record.value,
396
+ record.headers,
397
+ record.magic)
398
+
365
399
override def run () {
366
400
info(" Starting mirror maker thread " + threadName)
367
401
try {
@@ -377,7 +411,13 @@ object MirrorMaker extends Logging with KafkaMetricsGroup {
377
411
} else {
378
412
trace(" Sending message with null value and offset %d." .format(data.offset))
379
413
}
380
- val records = messageHandler.handle(toBaseConsumerRecord(data))
414
+ val records = {
415
+ if (passThroughEnabled) {
416
+ messageHandler.handle(toBaseConsumerRecordWithPassThrough(data.asInstanceOf [PassThroughConsumerRecord [Array [Byte ], Array [Byte ]]]))
417
+ } else {
418
+ messageHandler.handle(toBaseConsumerRecord(data))
419
+ }
420
+ }
381
421
records.asScala.foreach(producer.send)
382
422
maybeFlushAndCommitOffsets()
383
423
}
@@ -587,6 +627,20 @@ object MirrorMaker extends Logging with KafkaMetricsGroup {
587
627
}
588
628
}
589
629
630
+ private [tools] object passThroughMirrorMakerMessageHandler extends MirrorMakerMessageHandler {
631
+ override def handle (record : BaseConsumerRecord ): util.List [ProducerRecord [Array [Byte ], Array [Byte ]]] = {
632
+ val timestamp : java.lang.Long = if (record.timestamp == RecordBatch .NO_TIMESTAMP ) null else record.timestamp
633
+ // It is assumed that we don't have message format V0 anymore at Linkedin
634
+ if (record.magic.equals(RecordBatch .MAGIC_VALUE_V1 )) {
635
+ Collections .singletonList(new ProducerRecord (record.topic, null , timestamp, record.key, record.value, recordHeadersV1))
636
+ } else if (record.magic.equals(RecordBatch .MAGIC_VALUE_V2 )) {
637
+ Collections .singletonList(new ProducerRecord (record.topic, null , timestamp, record.key, record.value, recordHeadersV2))
638
+ } else {
639
+ throw new IllegalArgumentException (" Record Batch with magic value : " + record.magic + " , is not supported in PassThrough mode" )
640
+ }
641
+ }
642
+ }
643
+
590
644
private class NoRecordsException extends RuntimeException
591
645
592
646
}
0 commit comments