diff --git a/NOTICE b/NOTICE index 03eb67bbbaf..490e17b4b3a 100644 --- a/NOTICE +++ b/NOTICE @@ -1,5 +1,5 @@ Apache BookKeeper -Copyright 2011-2023 The Apache Software Foundation +Copyright 2011-2024 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/bookkeeper-dist/src/main/resources/LICENSE-all.bin.txt b/bookkeeper-dist/src/main/resources/LICENSE-all.bin.txt index 979ab631413..830e9a7275d 100644 --- a/bookkeeper-dist/src/main/resources/LICENSE-all.bin.txt +++ b/bookkeeper-dist/src/main/resources/LICENSE-all.bin.txt @@ -217,17 +217,17 @@ Apache Software License, Version 2. - lib/commons-io-commons-io-2.7.jar [8] - lib/commons-lang-commons-lang-2.6.jar [9] - lib/commons-logging-commons-logging-1.1.1.jar [10] -- lib/io.netty-netty-buffer-4.1.94.Final.jar [11] -- lib/io.netty-netty-codec-4.1.94.Final.jar [11] -- lib/io.netty-netty-codec-dns-4.1.94.Final.jar [11] -- lib/io.netty-netty-codec-http-4.1.94.Final.jar [11] -- lib/io.netty-netty-codec-http2-4.1.94.Final.jar [11] -- lib/io.netty-netty-codec-socks-4.1.94.Final.jar [11] -- lib/io.netty-netty-common-4.1.94.Final.jar [11] -- lib/io.netty-netty-handler-4.1.94.Final.jar [11] -- lib/io.netty-netty-handler-proxy-4.1.94.Final.jar [11] -- lib/io.netty-netty-resolver-4.1.94.Final.jar [11] -- lib/io.netty-netty-resolver-dns-4.1.94.Final.jar [11] +- lib/io.netty-netty-buffer-4.1.104.Final.jar [11] +- lib/io.netty-netty-codec-4.1.104.Final.jar [11] +- lib/io.netty-netty-codec-dns-4.1.104.Final.jar [11] +- lib/io.netty-netty-codec-http-4.1.104.Final.jar [11] +- lib/io.netty-netty-codec-http2-4.1.104.Final.jar [11] +- lib/io.netty-netty-codec-socks-4.1.104.Final.jar [11] +- lib/io.netty-netty-common-4.1.104.Final.jar [11] +- lib/io.netty-netty-handler-4.1.104.Final.jar [11] +- lib/io.netty-netty-handler-proxy-4.1.104.Final.jar [11] +- lib/io.netty-netty-resolver-4.1.104.Final.jar [11] +- lib/io.netty-netty-resolver-dns-4.1.104.Final.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar [11] @@ -235,13 +235,13 @@ Apache Software License, Version 2. - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-osx-x86_64.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-windows-x86_64.jar [11] - lib/io.netty-netty-tcnative-classes-2.0.61.Final.jar [11] -- lib/io.netty-netty-transport-4.1.94.Final.jar [11] -- lib/io.netty-netty-transport-classes-epoll-4.1.94.Final.jar [11] -- lib/io.netty-netty-transport-native-epoll-4.1.94.Final-linux-x86_64.jar [11] -- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar [11] -- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar [11] -- lib/io.netty.incubator-netty-incubator-transport-classes-io_uring-0.0.21.Final.jar [11] -- lib/io.netty-netty-transport-native-unix-common-4.1.94.Final.jar [11] +- lib/io.netty-netty-transport-4.1.104.Final.jar [11] +- lib/io.netty-netty-transport-classes-epoll-4.1.104.Final.jar [11] +- lib/io.netty-netty-transport-native-epoll-4.1.104.Final-linux-x86_64.jar [11] +- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.24.Final-linux-x86_64.jar [11] +- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.24.Final-linux-aarch_64.jar [11] +- lib/io.netty.incubator-netty-incubator-transport-classes-io_uring-0.0.24.Final.jar [11] +- lib/io.netty-netty-transport-native-unix-common-4.1.104.Final.jar [11] - lib/io.prometheus-simpleclient-0.15.0.jar [12] - lib/io.prometheus-simpleclient_common-0.15.0.jar [12] - lib/io.prometheus-simpleclient_hotspot-0.15.0.jar [12] @@ -360,7 +360,7 @@ Apache Software License, Version 2. [8] Source available at https://github.com/apache/commons-io/tree/rel/commons-io-2.7 [9] Source available at https://github.com/apache/commons-lang/tree/LANG_2_6 [10] Source available at https://github.com/apache/commons-logging/tree/commons-logging-1.1.1 -[11] Source available at https://github.com/netty/netty/tree/netty-4.1.94.Final +[11] Source available at https://github.com/netty/netty/tree/netty-4.1.104.Final [12] Source available at https://github.com/prometheus/client_java/tree/parent-0.15.0 [13] Source available at https://github.com/vert-x3/vertx-auth/tree/4.3.2 [14] Source available at https://github.com/vert-x3/vertx-bridge-common/tree/4.3.2 @@ -404,9 +404,9 @@ Apache Software License, Version 2. [56] Source available at https://github.com/JetBrains/kotlin/releases/tag/v1.6.20 ------------------------------------------------------------------------------------ -lib/io.netty-netty-codec-4.1.94.Final.jar bundles some 3rd party dependencies +lib/io.netty-netty-codec-4.1.104.Final.jar bundles some 3rd party dependencies -lib/io.netty-netty-codec-4.1.94.Final.jar contains the extensions to Java Collections Framework which has +lib/io.netty-netty-codec-4.1.104.Final.jar contains the extensions to Java Collections Framework which has been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: * LICENSE: @@ -415,7 +415,7 @@ been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified version of Robert Harder's Public Domain +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified version of Robert Harder's Public Domain Base64 Encoder and Decoder, which can be obtained at: * LICENSE: @@ -423,7 +423,7 @@ Base64 Encoder and Decoder, which can be obtained at: * HOMEPAGE: * http://iharder.sourceforge.net/current/java/base64/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'Webbit', an event based +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'Webbit', an event based WebSocket and HTTP server, which can be obtained at: * LICENSE: @@ -431,7 +431,7 @@ WebSocket and HTTP server, which can be obtained at: * HOMEPAGE: * https://github.com/joewalnes/webbit -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'SLF4J', a simple logging +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'SLF4J', a simple logging facade for Java, which can be obtained at: * LICENSE: @@ -439,7 +439,7 @@ facade for Java, which can be obtained at: * HOMEPAGE: * http://www.slf4j.org/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'Apache Harmony', an open source +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'Apache Harmony', an open source Java SE, which can be obtained at: * NOTICE: @@ -449,7 +449,7 @@ Java SE, which can be obtained at: * HOMEPAGE: * http://archive.apache.org/dist/harmony/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'jbzip2', a Java bzip2 compression +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'jbzip2', a Java bzip2 compression and decompression library written by Matthew J. Francis. It can be obtained at: * LICENSE: @@ -457,7 +457,7 @@ and decompression library written by Matthew J. Francis. It can be obtained at: * HOMEPAGE: * https://code.google.com/p/jbzip2/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'libdivsufsort', a C API library to construct +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'libdivsufsort', a C API library to construct the suffix array and the Burrows-Wheeler transformed string for any input string of a constant-size alphabet written by Yuta Mori. It can be obtained at: @@ -466,7 +466,7 @@ a constant-size alphabet written by Yuta Mori. It can be obtained at: * HOMEPAGE: * https://github.com/y-256/libdivsufsort -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of Nitsan Wakart's 'JCTools', +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, which can be obtained at: * LICENSE: @@ -474,7 +474,7 @@ Java Concurrency Tools for the JVM, which can be obtained at: * HOMEPAGE: * https://github.com/JCTools/JCTools -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'JZlib', a re-implementation of zlib in +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'JZlib', a re-implementation of zlib in pure Java, which can be obtained at: * LICENSE: @@ -482,7 +482,7 @@ pure Java, which can be obtained at: * HOMEPAGE: * http://www.jcraft.com/jzlib/ -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Compress-LZF', a Java library for encoding and +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Compress-LZF', a Java library for encoding and decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: * LICENSE: @@ -490,7 +490,7 @@ decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: * HOMEPAGE: * https://github.com/ning/compress -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'lz4', a LZ4 Java compression +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'lz4', a LZ4 Java compression and decompression library written by Adrien Grand. It can be obtained at: * LICENSE: @@ -498,7 +498,7 @@ and decompression library written by Adrien Grand. It can be obtained at: * HOMEPAGE: * https://github.com/jpountz/lz4-java -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'lzma-java', a LZMA Java compression +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'lzma-java', a LZMA Java compression and decompression library, which can be obtained at: * LICENSE: @@ -506,7 +506,7 @@ and decompression library, which can be obtained at: * HOMEPAGE: * https://github.com/jponge/lzma-java -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'jfastlz', a Java port of FastLZ compression +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'jfastlz', a Java port of FastLZ compression and decompression library written by William Kinney. It can be obtained at: * LICENSE: @@ -514,7 +514,7 @@ and decompression library written by William Kinney. It can be obtained at: * HOMEPAGE: * https://code.google.com/p/jfastlz/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of and optionally depends on 'Protocol Buffers', +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data interchange format, which can be obtained at: * LICENSE: @@ -522,7 +522,7 @@ Google's data interchange format, which can be obtained at: * HOMEPAGE: * https://github.com/google/protobuf -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Bouncy Castle Crypto APIs' to generate +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Bouncy Castle Crypto APIs' to generate a temporary self-signed X.509 certificate when the JVM does not provide the equivalent functionality. It can be obtained at: @@ -531,7 +531,7 @@ equivalent functionality. It can be obtained at: * HOMEPAGE: * http://www.bouncycastle.org/ -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Snappy', a compression library produced +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Snappy', a compression library produced by Google Inc, which can be obtained at: * LICENSE: @@ -539,7 +539,7 @@ by Google Inc, which can be obtained at: * HOMEPAGE: * https://github.com/google/snappy -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'JBoss Marshalling', an alternative Java +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'JBoss Marshalling', an alternative Java serialization API, which can be obtained at: * LICENSE: @@ -547,7 +547,7 @@ serialization API, which can be obtained at: * HOMEPAGE: * https://github.com/jboss-remoting/jboss-marshalling -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Caliper', Google's micro- +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Caliper', Google's micro- benchmarking framework, which can be obtained at: * LICENSE: @@ -555,7 +555,7 @@ benchmarking framework, which can be obtained at: * HOMEPAGE: * https://github.com/google/caliper -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Apache Commons Logging', a logging +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Apache Commons Logging', a logging framework, which can be obtained at: * LICENSE: @@ -563,7 +563,7 @@ framework, which can be obtained at: * HOMEPAGE: * http://commons.apache.org/logging/ -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Apache Log4J', a logging framework, which +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Apache Log4J', a logging framework, which can be obtained at: * LICENSE: @@ -571,7 +571,7 @@ can be obtained at: * HOMEPAGE: * http://logging.apache.org/log4j/ -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Aalto XML', an ultra-high performance +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Aalto XML', an ultra-high performance non-blocking XML processor, which can be obtained at: * LICENSE: @@ -579,7 +579,7 @@ non-blocking XML processor, which can be obtained at: * HOMEPAGE: * http://wiki.fasterxml.com/AaltoHome -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified version of 'HPACK', a Java implementation of +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified version of 'HPACK', a Java implementation of the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: * LICENSE: @@ -587,7 +587,7 @@ the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: * HOMEPAGE: * https://github.com/twitter/hpack -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified version of 'HPACK', a Java implementation of +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified version of 'HPACK', a Java implementation of the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: * LICENSE: @@ -595,7 +595,7 @@ the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: * HOMEPAGE: * https://github.com/python-hyper/hpack/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified version of 'HPACK', a Java implementation of +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified version of 'HPACK', a Java implementation of the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at: * LICENSE: @@ -603,7 +603,7 @@ the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at * HOMEPAGE: * https://github.com/nghttp2/nghttp2/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'Apache Commons Lang', a Java library +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'Apache Commons Lang', a Java library provides utilities for the java.lang API, which can be obtained at: * LICENSE: @@ -612,7 +612,7 @@ provides utilities for the java.lang API, which can be obtained at: * https://commons.apache.org/proper/commons-lang/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains the Maven wrapper scripts from 'Maven Wrapper', +lib/io.netty-netty-codec-4.1.104.Final.jar contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build. * LICENSE: @@ -620,7 +620,7 @@ that provides an easy way to ensure a user has everything necessary to run the M * HOMEPAGE: * https://github.com/takari/maven-wrapper -lib/io.netty-netty-codec-4.1.94.Final.jar contains the dnsinfo.h header file, +lib/io.netty-netty-codec-4.1.104.Final.jar contains the dnsinfo.h header file, that provides a way to retrieve the system DNS configuration on MacOS. This private header is also used by Apple's open source mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/). diff --git a/bookkeeper-dist/src/main/resources/LICENSE-bkctl.bin.txt b/bookkeeper-dist/src/main/resources/LICENSE-bkctl.bin.txt index 66699c65bc2..18f830c4f73 100644 --- a/bookkeeper-dist/src/main/resources/LICENSE-bkctl.bin.txt +++ b/bookkeeper-dist/src/main/resources/LICENSE-bkctl.bin.txt @@ -217,15 +217,15 @@ Apache Software License, Version 2. - lib/commons-io-commons-io-2.7.jar [8] - lib/commons-lang-commons-lang-2.6.jar [9] - lib/commons-logging-commons-logging-1.1.1.jar [10] -- lib/io.netty-netty-buffer-4.1.94.Final.jar [11] -- lib/io.netty-netty-codec-4.1.94.Final.jar [11] -- lib/io.netty-netty-codec-http-4.1.94.Final.jar [11] -- lib/io.netty-netty-codec-http2-4.1.94.Final.jar [11] -- lib/io.netty-netty-codec-socks-4.1.94.Final.jar [11] -- lib/io.netty-netty-common-4.1.94.Final.jar [11] -- lib/io.netty-netty-handler-4.1.94.Final.jar [11] -- lib/io.netty-netty-handler-proxy-4.1.94.Final.jar [11] -- lib/io.netty-netty-resolver-4.1.94.Final.jar [11] +- lib/io.netty-netty-buffer-4.1.104.Final.jar [11] +- lib/io.netty-netty-codec-4.1.104.Final.jar [11] +- lib/io.netty-netty-codec-http-4.1.104.Final.jar [11] +- lib/io.netty-netty-codec-http2-4.1.104.Final.jar [11] +- lib/io.netty-netty-codec-socks-4.1.104.Final.jar [11] +- lib/io.netty-netty-common-4.1.104.Final.jar [11] +- lib/io.netty-netty-handler-4.1.104.Final.jar [11] +- lib/io.netty-netty-handler-proxy-4.1.104.Final.jar [11] +- lib/io.netty-netty-resolver-4.1.104.Final.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar [11] @@ -233,13 +233,13 @@ Apache Software License, Version 2. - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-osx-x86_64.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-windows-x86_64.jar [11] - lib/io.netty-netty-tcnative-classes-2.0.61.Final.jar [11] -- lib/io.netty-netty-transport-4.1.94.Final.jar [11] -- lib/io.netty-netty-transport-classes-epoll-4.1.94.Final.jar [11] -- lib/io.netty-netty-transport-native-epoll-4.1.94.Final-linux-x86_64.jar [11] -- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar [11] -- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar [11] -- lib/io.netty.incubator-netty-incubator-transport-classes-io_uring-0.0.21.Final.jar [11] -- lib/io.netty-netty-transport-native-unix-common-4.1.94.Final.jar [11] +- lib/io.netty-netty-transport-4.1.104.Final.jar [11] +- lib/io.netty-netty-transport-classes-epoll-4.1.104.Final.jar [11] +- lib/io.netty-netty-transport-native-epoll-4.1.104.Final-linux-x86_64.jar [11] +- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.24.Final-linux-x86_64.jar [11] +- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.24.Final-linux-aarch_64.jar [11] +- lib/io.netty.incubator-netty-incubator-transport-classes-io_uring-0.0.24.Final.jar [11] +- lib/io.netty-netty-transport-native-unix-common-4.1.104.Final.jar [11] - lib/org.apache.logging.log4j-log4j-api-2.18.0.jar [16] - lib/org.apache.logging.log4j-log4j-core-2.18.0.jar [16] - lib/org.apache.logging.log4j-log4j-slf4j-impl-2.18.0.jar [16] @@ -303,7 +303,7 @@ Apache Software License, Version 2. [8] Source available at https://github.com/apache/commons-io/tree/rel/commons-io-2.7 [9] Source available at https://github.com/apache/commons-lang/tree/LANG_2_6 [10] Source available at https://github.com/apache/commons-logging/tree/commons-logging-1.1.1 -[11] Source available at https://github.com/netty/netty/tree/netty-4.1.94.Final +[11] Source available at https://github.com/netty/netty/tree/netty-4.1.104.Final [16] Source available at https://github.com/apache/logging-log4j2/tree/rel/2.18.0 [18] Source available at https://github.com/apache/commons-collections/tree/collections-4.1 [19] Source available at https://github.com/apache/commons-lang/tree/LANG_3_6 @@ -335,9 +335,9 @@ Apache Software License, Version 2. [52] Source available at https://github.com/carrotsearch/hppc/tree/0.9.1 ------------------------------------------------------------------------------------ -lib/io.netty-netty-codec-4.1.94.Final.jar bundles some 3rd party dependencies +lib/io.netty-netty-codec-4.1.104.Final.jar bundles some 3rd party dependencies -lib/io.netty-netty-codec-4.1.94.Final.jar contains the extensions to Java Collections Framework which has +lib/io.netty-netty-codec-4.1.104.Final.jar contains the extensions to Java Collections Framework which has been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: * LICENSE: @@ -346,7 +346,7 @@ been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified version of Robert Harder's Public Domain +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified version of Robert Harder's Public Domain Base64 Encoder and Decoder, which can be obtained at: * LICENSE: @@ -354,7 +354,7 @@ Base64 Encoder and Decoder, which can be obtained at: * HOMEPAGE: * http://iharder.sourceforge.net/current/java/base64/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'Webbit', an event based +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'Webbit', an event based WebSocket and HTTP server, which can be obtained at: * LICENSE: @@ -362,7 +362,7 @@ WebSocket and HTTP server, which can be obtained at: * HOMEPAGE: * https://github.com/joewalnes/webbit -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'SLF4J', a simple logging +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'SLF4J', a simple logging facade for Java, which can be obtained at: * LICENSE: @@ -370,7 +370,7 @@ facade for Java, which can be obtained at: * HOMEPAGE: * http://www.slf4j.org/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'Apache Harmony', an open source +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'Apache Harmony', an open source Java SE, which can be obtained at: * NOTICE: @@ -380,7 +380,7 @@ Java SE, which can be obtained at: * HOMEPAGE: * http://archive.apache.org/dist/harmony/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'jbzip2', a Java bzip2 compression +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'jbzip2', a Java bzip2 compression and decompression library written by Matthew J. Francis. It can be obtained at: * LICENSE: @@ -388,7 +388,7 @@ and decompression library written by Matthew J. Francis. It can be obtained at: * HOMEPAGE: * https://code.google.com/p/jbzip2/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'libdivsufsort', a C API library to construct +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'libdivsufsort', a C API library to construct the suffix array and the Burrows-Wheeler transformed string for any input string of a constant-size alphabet written by Yuta Mori. It can be obtained at: @@ -397,7 +397,7 @@ a constant-size alphabet written by Yuta Mori. It can be obtained at: * HOMEPAGE: * https://github.com/y-256/libdivsufsort -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of Nitsan Wakart's 'JCTools', +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, which can be obtained at: * LICENSE: @@ -405,7 +405,7 @@ Java Concurrency Tools for the JVM, which can be obtained at: * HOMEPAGE: * https://github.com/JCTools/JCTools -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'JZlib', a re-implementation of zlib in +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'JZlib', a re-implementation of zlib in pure Java, which can be obtained at: * LICENSE: @@ -413,7 +413,7 @@ pure Java, which can be obtained at: * HOMEPAGE: * http://www.jcraft.com/jzlib/ -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Compress-LZF', a Java library for encoding and +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Compress-LZF', a Java library for encoding and decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: * LICENSE: @@ -421,7 +421,7 @@ decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: * HOMEPAGE: * https://github.com/ning/compress -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'lz4', a LZ4 Java compression +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'lz4', a LZ4 Java compression and decompression library written by Adrien Grand. It can be obtained at: * LICENSE: @@ -429,7 +429,7 @@ and decompression library written by Adrien Grand. It can be obtained at: * HOMEPAGE: * https://github.com/jpountz/lz4-java -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'lzma-java', a LZMA Java compression +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'lzma-java', a LZMA Java compression and decompression library, which can be obtained at: * LICENSE: @@ -437,7 +437,7 @@ and decompression library, which can be obtained at: * HOMEPAGE: * https://github.com/jponge/lzma-java -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'jfastlz', a Java port of FastLZ compression +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'jfastlz', a Java port of FastLZ compression and decompression library written by William Kinney. It can be obtained at: * LICENSE: @@ -445,7 +445,7 @@ and decompression library written by William Kinney. It can be obtained at: * HOMEPAGE: * https://code.google.com/p/jfastlz/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of and optionally depends on 'Protocol Buffers', +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data interchange format, which can be obtained at: * LICENSE: @@ -453,7 +453,7 @@ Google's data interchange format, which can be obtained at: * HOMEPAGE: * https://github.com/google/protobuf -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Bouncy Castle Crypto APIs' to generate +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Bouncy Castle Crypto APIs' to generate a temporary self-signed X.509 certificate when the JVM does not provide the equivalent functionality. It can be obtained at: @@ -462,7 +462,7 @@ equivalent functionality. It can be obtained at: * HOMEPAGE: * http://www.bouncycastle.org/ -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Snappy', a compression library produced +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Snappy', a compression library produced by Google Inc, which can be obtained at: * LICENSE: @@ -470,7 +470,7 @@ by Google Inc, which can be obtained at: * HOMEPAGE: * https://github.com/google/snappy -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'JBoss Marshalling', an alternative Java +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'JBoss Marshalling', an alternative Java serialization API, which can be obtained at: * LICENSE: @@ -478,7 +478,7 @@ serialization API, which can be obtained at: * HOMEPAGE: * https://github.com/jboss-remoting/jboss-marshalling -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Caliper', Google's micro- +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Caliper', Google's micro- benchmarking framework, which can be obtained at: * LICENSE: @@ -486,7 +486,7 @@ benchmarking framework, which can be obtained at: * HOMEPAGE: * https://github.com/google/caliper -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Apache Commons Logging', a logging +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Apache Commons Logging', a logging framework, which can be obtained at: * LICENSE: @@ -494,7 +494,7 @@ framework, which can be obtained at: * HOMEPAGE: * http://commons.apache.org/logging/ -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Apache Log4J', a logging framework, which +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Apache Log4J', a logging framework, which can be obtained at: * LICENSE: @@ -502,7 +502,7 @@ can be obtained at: * HOMEPAGE: * http://logging.apache.org/log4j/ -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Aalto XML', an ultra-high performance +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Aalto XML', an ultra-high performance non-blocking XML processor, which can be obtained at: * LICENSE: @@ -510,7 +510,7 @@ non-blocking XML processor, which can be obtained at: * HOMEPAGE: * http://wiki.fasterxml.com/AaltoHome -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified version of 'HPACK', a Java implementation of +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified version of 'HPACK', a Java implementation of the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: * LICENSE: @@ -518,7 +518,7 @@ the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: * HOMEPAGE: * https://github.com/twitter/hpack -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified version of 'HPACK', a Java implementation of +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified version of 'HPACK', a Java implementation of the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: * LICENSE: @@ -526,7 +526,7 @@ the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: * HOMEPAGE: * https://github.com/python-hyper/hpack/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified version of 'HPACK', a Java implementation of +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified version of 'HPACK', a Java implementation of the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at: * LICENSE: @@ -534,7 +534,7 @@ the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at * HOMEPAGE: * https://github.com/nghttp2/nghttp2/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'Apache Commons Lang', a Java library +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'Apache Commons Lang', a Java library provides utilities for the java.lang API, which can be obtained at: * LICENSE: @@ -543,7 +543,7 @@ provides utilities for the java.lang API, which can be obtained at: * https://commons.apache.org/proper/commons-lang/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains the Maven wrapper scripts from 'Maven Wrapper', +lib/io.netty-netty-codec-4.1.104.Final.jar contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build. * LICENSE: @@ -551,7 +551,7 @@ that provides an easy way to ensure a user has everything necessary to run the M * HOMEPAGE: * https://github.com/takari/maven-wrapper -lib/io.netty-netty-codec-4.1.94.Final.jar contains the dnsinfo.h header file, +lib/io.netty-netty-codec-4.1.104.Final.jar contains the dnsinfo.h header file, that provides a way to retrieve the system DNS configuration on MacOS. This private header is also used by Apple's open source mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/). diff --git a/bookkeeper-dist/src/main/resources/LICENSE-server.bin.txt b/bookkeeper-dist/src/main/resources/LICENSE-server.bin.txt index c7d0a3c532c..06289813597 100644 --- a/bookkeeper-dist/src/main/resources/LICENSE-server.bin.txt +++ b/bookkeeper-dist/src/main/resources/LICENSE-server.bin.txt @@ -217,17 +217,17 @@ Apache Software License, Version 2. - lib/commons-io-commons-io-2.7.jar [8] - lib/commons-lang-commons-lang-2.6.jar [9] - lib/commons-logging-commons-logging-1.1.1.jar [10] -- lib/io.netty-netty-buffer-4.1.94.Final.jar [11] -- lib/io.netty-netty-codec-4.1.94.Final.jar [11] -- lib/io.netty-netty-codec-dns-4.1.94.Final.jar [11] -- lib/io.netty-netty-codec-http-4.1.94.Final.jar [11] -- lib/io.netty-netty-codec-http2-4.1.94.Final.jar [11] -- lib/io.netty-netty-codec-socks-4.1.94.Final.jar [11] -- lib/io.netty-netty-common-4.1.94.Final.jar [11] -- lib/io.netty-netty-handler-4.1.94.Final.jar [11] -- lib/io.netty-netty-handler-proxy-4.1.94.Final.jar [11] -- lib/io.netty-netty-resolver-4.1.94.Final.jar [11] -- lib/io.netty-netty-resolver-dns-4.1.94.Final.jar [11] +- lib/io.netty-netty-buffer-4.1.104.Final.jar [11] +- lib/io.netty-netty-codec-4.1.104.Final.jar [11] +- lib/io.netty-netty-codec-dns-4.1.104.Final.jar [11] +- lib/io.netty-netty-codec-http-4.1.104.Final.jar [11] +- lib/io.netty-netty-codec-http2-4.1.104.Final.jar [11] +- lib/io.netty-netty-codec-socks-4.1.104.Final.jar [11] +- lib/io.netty-netty-common-4.1.104.Final.jar [11] +- lib/io.netty-netty-handler-4.1.104.Final.jar [11] +- lib/io.netty-netty-handler-proxy-4.1.104.Final.jar [11] +- lib/io.netty-netty-resolver-4.1.104.Final.jar [11] +- lib/io.netty-netty-resolver-dns-4.1.104.Final.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar [11] @@ -235,13 +235,13 @@ Apache Software License, Version 2. - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-osx-x86_64.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-windows-x86_64.jar [11] - lib/io.netty-netty-tcnative-classes-2.0.61.Final.jar [11] -- lib/io.netty-netty-transport-4.1.94.Final.jar [11] -- lib/io.netty-netty-transport-classes-epoll-4.1.94.Final.jar [11] -- lib/io.netty-netty-transport-native-epoll-4.1.94.Final-linux-x86_64.jar [11] -- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar [11] -- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar [11] -- lib/io.netty.incubator-netty-incubator-transport-classes-io_uring-0.0.21.Final.jar [11] -- lib/io.netty-netty-transport-native-unix-common-4.1.94.Final.jar [11] +- lib/io.netty-netty-transport-4.1.104.Final.jar [11] +- lib/io.netty-netty-transport-classes-epoll-4.1.104.Final.jar [11] +- lib/io.netty-netty-transport-native-epoll-4.1.104.Final-linux-x86_64.jar [11] +- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.24.Final-linux-x86_64.jar [11] +- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.24.Final-linux-aarch_64.jar [11] +- lib/io.netty.incubator-netty-incubator-transport-classes-io_uring-0.0.24.Final.jar [11] +- lib/io.netty-netty-transport-native-unix-common-4.1.104.Final.jar [11] - lib/io.prometheus-simpleclient-0.15.0.jar [12] - lib/io.prometheus-simpleclient_common-0.15.0.jar [12] - lib/io.prometheus-simpleclient_hotspot-0.15.0.jar [12] @@ -356,7 +356,7 @@ Apache Software License, Version 2. [8] Source available at https://github.com/apache/commons-io/tree/rel/commons-io-2.7 [9] Source available at https://github.com/apache/commons-lang/tree/LANG_2_6 [10] Source available at https://github.com/apache/commons-logging/tree/commons-logging-1.1.1 -[11] Source available at https://github.com/netty/netty/tree/netty-4.1.94.Final +[11] Source available at https://github.com/netty/netty/tree/netty-4.1.104.Final [12] Source available at https://github.com/prometheus/client_java/tree/parent-0.15.0 [13] Source available at https://github.com/vert-x3/vertx-auth/tree/4.3.2 [14] Source available at https://github.com/vert-x3/vertx-bridge-common/tree/4.3.2 @@ -399,9 +399,9 @@ Apache Software License, Version 2. [55] Source available at https://github.com/JetBrains/kotlin/releases/tag/v1.6.20 ------------------------------------------------------------------------------------ -lib/io.netty-netty-codec-4.1.94.Final.jar bundles some 3rd party dependencies +lib/io.netty-netty-codec-4.1.104.Final.jar bundles some 3rd party dependencies -lib/io.netty-netty-codec-4.1.94.Final.jar contains the extensions to Java Collections Framework which has +lib/io.netty-netty-codec-4.1.104.Final.jar contains the extensions to Java Collections Framework which has been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: * LICENSE: @@ -410,7 +410,7 @@ been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified version of Robert Harder's Public Domain +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified version of Robert Harder's Public Domain Base64 Encoder and Decoder, which can be obtained at: * LICENSE: @@ -418,7 +418,7 @@ Base64 Encoder and Decoder, which can be obtained at: * HOMEPAGE: * http://iharder.sourceforge.net/current/java/base64/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'Webbit', an event based +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'Webbit', an event based WebSocket and HTTP server, which can be obtained at: * LICENSE: @@ -426,7 +426,7 @@ WebSocket and HTTP server, which can be obtained at: * HOMEPAGE: * https://github.com/joewalnes/webbit -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'SLF4J', a simple logging +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'SLF4J', a simple logging facade for Java, which can be obtained at: * LICENSE: @@ -434,7 +434,7 @@ facade for Java, which can be obtained at: * HOMEPAGE: * http://www.slf4j.org/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'Apache Harmony', an open source +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'Apache Harmony', an open source Java SE, which can be obtained at: * NOTICE: @@ -444,7 +444,7 @@ Java SE, which can be obtained at: * HOMEPAGE: * http://archive.apache.org/dist/harmony/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'jbzip2', a Java bzip2 compression +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'jbzip2', a Java bzip2 compression and decompression library written by Matthew J. Francis. It can be obtained at: * LICENSE: @@ -452,7 +452,7 @@ and decompression library written by Matthew J. Francis. It can be obtained at: * HOMEPAGE: * https://code.google.com/p/jbzip2/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'libdivsufsort', a C API library to construct +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'libdivsufsort', a C API library to construct the suffix array and the Burrows-Wheeler transformed string for any input string of a constant-size alphabet written by Yuta Mori. It can be obtained at: @@ -461,7 +461,7 @@ a constant-size alphabet written by Yuta Mori. It can be obtained at: * HOMEPAGE: * https://github.com/y-256/libdivsufsort -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of Nitsan Wakart's 'JCTools', +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, which can be obtained at: * LICENSE: @@ -469,7 +469,7 @@ Java Concurrency Tools for the JVM, which can be obtained at: * HOMEPAGE: * https://github.com/JCTools/JCTools -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'JZlib', a re-implementation of zlib in +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'JZlib', a re-implementation of zlib in pure Java, which can be obtained at: * LICENSE: @@ -477,7 +477,7 @@ pure Java, which can be obtained at: * HOMEPAGE: * http://www.jcraft.com/jzlib/ -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Compress-LZF', a Java library for encoding and +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Compress-LZF', a Java library for encoding and decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: * LICENSE: @@ -485,7 +485,7 @@ decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: * HOMEPAGE: * https://github.com/ning/compress -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'lz4', a LZ4 Java compression +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'lz4', a LZ4 Java compression and decompression library written by Adrien Grand. It can be obtained at: * LICENSE: @@ -493,7 +493,7 @@ and decompression library written by Adrien Grand. It can be obtained at: * HOMEPAGE: * https://github.com/jpountz/lz4-java -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'lzma-java', a LZMA Java compression +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'lzma-java', a LZMA Java compression and decompression library, which can be obtained at: * LICENSE: @@ -501,7 +501,7 @@ and decompression library, which can be obtained at: * HOMEPAGE: * https://github.com/jponge/lzma-java -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'jfastlz', a Java port of FastLZ compression +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'jfastlz', a Java port of FastLZ compression and decompression library written by William Kinney. It can be obtained at: * LICENSE: @@ -509,7 +509,7 @@ and decompression library written by William Kinney. It can be obtained at: * HOMEPAGE: * https://code.google.com/p/jfastlz/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of and optionally depends on 'Protocol Buffers', +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data interchange format, which can be obtained at: * LICENSE: @@ -517,7 +517,7 @@ Google's data interchange format, which can be obtained at: * HOMEPAGE: * https://github.com/google/protobuf -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Bouncy Castle Crypto APIs' to generate +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Bouncy Castle Crypto APIs' to generate a temporary self-signed X.509 certificate when the JVM does not provide the equivalent functionality. It can be obtained at: @@ -526,7 +526,7 @@ equivalent functionality. It can be obtained at: * HOMEPAGE: * http://www.bouncycastle.org/ -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Snappy', a compression library produced +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Snappy', a compression library produced by Google Inc, which can be obtained at: * LICENSE: @@ -534,7 +534,7 @@ by Google Inc, which can be obtained at: * HOMEPAGE: * https://github.com/google/snappy -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'JBoss Marshalling', an alternative Java +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'JBoss Marshalling', an alternative Java serialization API, which can be obtained at: * LICENSE: @@ -542,7 +542,7 @@ serialization API, which can be obtained at: * HOMEPAGE: * https://github.com/jboss-remoting/jboss-marshalling -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Caliper', Google's micro- +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Caliper', Google's micro- benchmarking framework, which can be obtained at: * LICENSE: @@ -550,7 +550,7 @@ benchmarking framework, which can be obtained at: * HOMEPAGE: * https://github.com/google/caliper -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Apache Commons Logging', a logging +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Apache Commons Logging', a logging framework, which can be obtained at: * LICENSE: @@ -558,7 +558,7 @@ framework, which can be obtained at: * HOMEPAGE: * http://commons.apache.org/logging/ -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Apache Log4J', a logging framework, which +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Apache Log4J', a logging framework, which can be obtained at: * LICENSE: @@ -566,7 +566,7 @@ can be obtained at: * HOMEPAGE: * http://logging.apache.org/log4j/ -lib/io.netty-netty-codec-4.1.94.Final.jar optionally depends on 'Aalto XML', an ultra-high performance +lib/io.netty-netty-codec-4.1.104.Final.jar optionally depends on 'Aalto XML', an ultra-high performance non-blocking XML processor, which can be obtained at: * LICENSE: @@ -574,7 +574,7 @@ non-blocking XML processor, which can be obtained at: * HOMEPAGE: * http://wiki.fasterxml.com/AaltoHome -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified version of 'HPACK', a Java implementation of +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified version of 'HPACK', a Java implementation of the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: * LICENSE: @@ -582,7 +582,7 @@ the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: * HOMEPAGE: * https://github.com/twitter/hpack -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified version of 'HPACK', a Java implementation of +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified version of 'HPACK', a Java implementation of the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: * LICENSE: @@ -590,7 +590,7 @@ the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: * HOMEPAGE: * https://github.com/python-hyper/hpack/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified version of 'HPACK', a Java implementation of +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified version of 'HPACK', a Java implementation of the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at: * LICENSE: @@ -598,7 +598,7 @@ the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at * HOMEPAGE: * https://github.com/nghttp2/nghttp2/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains a modified portion of 'Apache Commons Lang', a Java library +lib/io.netty-netty-codec-4.1.104.Final.jar contains a modified portion of 'Apache Commons Lang', a Java library provides utilities for the java.lang API, which can be obtained at: * LICENSE: @@ -607,7 +607,7 @@ provides utilities for the java.lang API, which can be obtained at: * https://commons.apache.org/proper/commons-lang/ -lib/io.netty-netty-codec-4.1.94.Final.jar contains the Maven wrapper scripts from 'Maven Wrapper', +lib/io.netty-netty-codec-4.1.104.Final.jar contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build. * LICENSE: @@ -615,7 +615,7 @@ that provides an easy way to ensure a user has everything necessary to run the M * HOMEPAGE: * https://github.com/takari/maven-wrapper -lib/io.netty-netty-codec-4.1.94.Final.jar contains the dnsinfo.h header file, +lib/io.netty-netty-codec-4.1.104.Final.jar contains the dnsinfo.h header file, that provides a way to retrieve the system DNS configuration on MacOS. This private header is also used by Apple's open source mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/). diff --git a/bookkeeper-dist/src/main/resources/NOTICE-all.bin.txt b/bookkeeper-dist/src/main/resources/NOTICE-all.bin.txt index c2c5a5dd3db..95c83adba53 100644 --- a/bookkeeper-dist/src/main/resources/NOTICE-all.bin.txt +++ b/bookkeeper-dist/src/main/resources/NOTICE-all.bin.txt @@ -23,17 +23,17 @@ LongAdder), which was released with the following comments: http://creativecommons.org/publicdomain/zero/1.0/ ------------------------------------------------------------------------------------ -- lib/io.netty-netty-buffer-4.1.94.Final.jar -- lib/io.netty-netty-codec-4.1.94.Final.jar -- lib/io.netty-netty-codec-dns-4.1.94.Final.jar -- lib/io.netty-netty-codec-http-4.1.94.Final.jar -- lib/io.netty-netty-codec-http2-4.1.94.Final.jar -- lib/io.netty-netty-codec-socks-4.1.94.Final.jar -- lib/io.netty-netty-common-4.1.94.Final.jar -- lib/io.netty-netty-handler-4.1.94.Final.jar -- lib/io.netty-netty-handler-proxy-4.1.94.Final.jar -- lib/io.netty-netty-resolver-4.1.94.Final.jar -- lib/io.netty-netty-resolver-dns-4.1.94.Final.jar +- lib/io.netty-netty-buffer-4.1.104.Final.jar +- lib/io.netty-netty-codec-4.1.104.Final.jar +- lib/io.netty-netty-codec-dns-4.1.104.Final.jar +- lib/io.netty-netty-codec-http-4.1.104.Final.jar +- lib/io.netty-netty-codec-http2-4.1.104.Final.jar +- lib/io.netty-netty-codec-socks-4.1.104.Final.jar +- lib/io.netty-netty-common-4.1.104.Final.jar +- lib/io.netty-netty-handler-4.1.104.Final.jar +- lib/io.netty-netty-handler-proxy-4.1.104.Final.jar +- lib/io.netty-netty-resolver-4.1.104.Final.jar +- lib/io.netty-netty-resolver-dns-4.1.104.Final.jar - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final.jar - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar [11] @@ -41,12 +41,12 @@ LongAdder), which was released with the following comments: - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-osx-x86_64.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-windows-x86_64.jar [11] - lib/io.netty-netty-tcnative-classes-2.0.61.Final.jar -- lib/io.netty-netty-transport-4.1.94.Final.jar -- lib/io.netty-netty-transport-classes-epoll-4.1.94.Final.jar -- lib/io.netty-netty-transport-native-epoll-4.1.94.Final-linux-x86_64.jar -- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar -- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar -- lib/io.netty-netty-transport-native-unix-common-4.1.94.Final.jar +- lib/io.netty-netty-transport-4.1.104.Final.jar +- lib/io.netty-netty-transport-classes-epoll-4.1.104.Final.jar +- lib/io.netty-netty-transport-native-epoll-4.1.104.Final-linux-x86_64.jar +- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.24.Final-linux-x86_64.jar +- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.24.Final-linux-aarch_64.jar +- lib/io.netty-netty-transport-native-unix-common-4.1.104.Final.jar The Netty Project diff --git a/bookkeeper-dist/src/main/resources/NOTICE-bkctl.bin.txt b/bookkeeper-dist/src/main/resources/NOTICE-bkctl.bin.txt index 1f275c96ff5..f9e530e4afe 100644 --- a/bookkeeper-dist/src/main/resources/NOTICE-bkctl.bin.txt +++ b/bookkeeper-dist/src/main/resources/NOTICE-bkctl.bin.txt @@ -5,15 +5,15 @@ This product includes software developed at The Apache Software Foundation (http://www.apache.org/). ------------------------------------------------------------------------------------ -- lib/io.netty-netty-buffer-4.1.94.Final.jar -- lib/io.netty-netty-codec-4.1.94.Final.jar -- lib/io.netty-netty-codec-http-4.1.94.Final.jar -- lib/io.netty-netty-codec-http2-4.1.94.Final.jar -- lib/io.netty-netty-codec-socks-4.1.94.Final.jar -- lib/io.netty-netty-common-4.1.94.Final.jar -- lib/io.netty-netty-handler-4.1.94.Final.jar -- lib/io.netty-netty-handler-proxy-4.1.94.Final.jar -- lib/io.netty-netty-resolver-4.1.94.Final.jar +- lib/io.netty-netty-buffer-4.1.104.Final.jar +- lib/io.netty-netty-codec-4.1.104.Final.jar +- lib/io.netty-netty-codec-http-4.1.104.Final.jar +- lib/io.netty-netty-codec-http2-4.1.104.Final.jar +- lib/io.netty-netty-codec-socks-4.1.104.Final.jar +- lib/io.netty-netty-common-4.1.104.Final.jar +- lib/io.netty-netty-handler-4.1.104.Final.jar +- lib/io.netty-netty-handler-proxy-4.1.104.Final.jar +- lib/io.netty-netty-resolver-4.1.104.Final.jar - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final.jar - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar [11] @@ -21,12 +21,12 @@ The Apache Software Foundation (http://www.apache.org/). - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-osx-x86_64.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-windows-x86_64.jar [11] - lib/io.netty-netty-tcnative-classes-2.0.61.Final.jar -- lib/io.netty-netty-transport-4.1.94.Final.jar -- lib/io.netty-netty-transport-classes-epoll-4.1.94.Final.jar -- lib/io.netty-netty-transport-native-epoll-4.1.94.Final-linux-x86_64.jar -- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar -- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar -- lib/io.netty-netty-transport-native-unix-common-4.1.94.Final.jar +- lib/io.netty-netty-transport-4.1.104.Final.jar +- lib/io.netty-netty-transport-classes-epoll-4.1.104.Final.jar +- lib/io.netty-netty-transport-native-epoll-4.1.104.Final-linux-x86_64.jar +- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.24.Final-linux-x86_64.jar +- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.24.Final-linux-aarch_64.jar +- lib/io.netty-netty-transport-native-unix-common-4.1.104.Final.jar The Netty Project diff --git a/bookkeeper-dist/src/main/resources/NOTICE-server.bin.txt b/bookkeeper-dist/src/main/resources/NOTICE-server.bin.txt index 5ec576ceb60..a66ceaa43b3 100644 --- a/bookkeeper-dist/src/main/resources/NOTICE-server.bin.txt +++ b/bookkeeper-dist/src/main/resources/NOTICE-server.bin.txt @@ -5,17 +5,17 @@ This product includes software developed at The Apache Software Foundation (http://www.apache.org/). ------------------------------------------------------------------------------------ -- lib/io.netty-netty-buffer-4.1.94.Final.jar -- lib/io.netty-netty-codec-4.1.94.Final.jar -- lib/io.netty-netty-codec-dns-4.1.94.Final.jar -- lib/io.netty-netty-codec-http-4.1.94.Final.jar -- lib/io.netty-netty-codec-http2-4.1.94.Final.jar -- lib/io.netty-netty-codec-socks-4.1.94.Final.jar -- lib/io.netty-netty-common-4.1.94.Final.jar -- lib/io.netty-netty-handler-4.1.94.Final.jar -- lib/io.netty-netty-handler-proxy-4.1.94.Final.jar -- lib/io.netty-netty-resolver-4.1.94.Final.jar -- lib/io.netty-netty-resolver-dns-4.1.94.Final.jar +- lib/io.netty-netty-buffer-4.1.104.Final.jar +- lib/io.netty-netty-codec-4.1.104.Final.jar +- lib/io.netty-netty-codec-dns-4.1.104.Final.jar +- lib/io.netty-netty-codec-http-4.1.104.Final.jar +- lib/io.netty-netty-codec-http2-4.1.104.Final.jar +- lib/io.netty-netty-codec-socks-4.1.104.Final.jar +- lib/io.netty-netty-common-4.1.104.Final.jar +- lib/io.netty-netty-handler-4.1.104.Final.jar +- lib/io.netty-netty-handler-proxy-4.1.104.Final.jar +- lib/io.netty-netty-resolver-4.1.104.Final.jar +- lib/io.netty-netty-resolver-dns-4.1.104.Final.jar - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final.jar - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar [11] @@ -23,12 +23,12 @@ The Apache Software Foundation (http://www.apache.org/). - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-osx-x86_64.jar [11] - lib/io.netty-netty-tcnative-boringssl-static-2.0.61.Final-windows-x86_64.jar [11] - lib/io.netty-netty-tcnative-classes-2.0.61.Final.jar -- lib/io.netty-netty-transport-4.1.94.Final.jar -- lib/io.netty-netty-transport-classes-epoll-4.1.94.Final.jar -- lib/io.netty-netty-transport-native-epoll-4.1.94.Final-linux-x86_64.jar -- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar -- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar -- lib/io.netty-netty-transport-native-unix-common-4.1.94.Final.jar +- lib/io.netty-netty-transport-4.1.104.Final.jar +- lib/io.netty-netty-transport-classes-epoll-4.1.104.Final.jar +- lib/io.netty-netty-transport-native-epoll-4.1.104.Final-linux-x86_64.jar +- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.24.Final-linux-x86_64.jar +- lib/io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.24.Final-linux-aarch_64.jar +- lib/io.netty-netty-transport-native-unix-common-4.1.104.Final.jar The Netty Project diff --git a/bookkeeper-proto/src/main/proto/BookkeeperProtocol.proto b/bookkeeper-proto/src/main/proto/BookkeeperProtocol.proto index 2bf72a47538..72df7d5e1d4 100644 --- a/bookkeeper-proto/src/main/proto/BookkeeperProtocol.proto +++ b/bookkeeper-proto/src/main/proto/BookkeeperProtocol.proto @@ -66,6 +66,7 @@ enum OperationType { START_TLS = 9; FORCE_LEDGER = 10; GET_LIST_OF_ENTRIES_OF_LEDGER = 11; + BATCH_READ_ENTRY = 12; } /** diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/DefaultEntryLogger.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/DefaultEntryLogger.java index 575a8b375e3..d02ede52fbf 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/DefaultEntryLogger.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/DefaultEntryLogger.java @@ -255,8 +255,6 @@ private static class Header { * */ static final int LOGFILE_HEADER_SIZE = 1024; - final ByteBuf logfileHeader = Unpooled.buffer(LOGFILE_HEADER_SIZE); - static final int HEADER_VERSION_POSITION = 4; static final int LEDGERS_MAP_OFFSET_POSITION = HEADER_VERSION_POSITION + 4; @@ -328,15 +326,6 @@ public DefaultEntryLogger(ServerConfiguration conf, addListener(listener); } - // Initialize the entry log header buffer. This cannot be a static object - // since in our unit tests, we run multiple Bookies and thus EntryLoggers - // within the same JVM. All of these Bookie instances access this header - // so there can be race conditions when entry logs are rolled over and - // this header buffer is cleared before writing it into the new logChannel. - logfileHeader.writeBytes("BKLO".getBytes(UTF_8)); - logfileHeader.writeInt(HEADER_CURRENT_VERSION); - logfileHeader.writerIndex(LOGFILE_HEADER_SIZE); - // Find the largest logId long logId = INVALID_LID; for (File dir : ledgerDirsManager.getAllLedgerDirs()) { @@ -684,7 +673,7 @@ private void createNewCompactionLog() throws IOException { private void removeCurCompactionLog() { synchronized (compactionLogLock) { if (compactionLogChannel != null) { - if (!compactionLogChannel.getLogFile().delete()) { + if (compactionLogChannel.getLogFile().exists() && !compactionLogChannel.getLogFile().delete()) { LOG.warn("Could not delete compaction log file {}", compactionLogChannel.getLogFile()); } diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/Journal.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/Journal.java index 000a013881d..91c98857179 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/Journal.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/Journal.java @@ -471,6 +471,7 @@ public ForceWriteThread(Thread threadToNotifyOnEx, boolean enableGroupForceWrites, StatsLogger statsLogger) { super("ForceWriteThread"); + this.setPriority(Thread.MAX_PRIORITY); this.threadToNotifyOnEx = threadToNotifyOnEx; this.enableGroupForceWrites = enableGroupForceWrites; this.forceWriteThreadTime = statsLogger.getThreadScopedCounter("force-write-thread-time"); @@ -650,6 +651,7 @@ public Journal(int journalIndex, File journalDirectory, ServerConfiguration conf public Journal(int journalIndex, File journalDirectory, ServerConfiguration conf, LedgerDirsManager ledgerDirsManager, StatsLogger statsLogger, ByteBufAllocator allocator) { super(journalThreadName + "-" + conf.getBookiePort()); + this.setPriority(Thread.MAX_PRIORITY); this.allocator = allocator; StatsLogger journalStatsLogger = statsLogger.scopeLabel("journalIndex", String.valueOf(journalIndex)); diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/DirectEntryLogger.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/DirectEntryLogger.java index 323727217d6..e18364e61d3 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/DirectEntryLogger.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/DirectEntryLogger.java @@ -252,6 +252,7 @@ private ByteBuf internalReadEntry(long ledgerId, long entryId, long location, bo long thisEntryId = buf.getLong(8); if (thisLedgerId != ledgerId || thisEntryId != entryId) { + ReferenceCountUtil.release(buf); throw new IOException( exMsg("Bad location").kv("location", location) .kv("expectedLedger", ledgerId).kv("expectedEntry", entryId) diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/LedgerMetadataIndex.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/LedgerMetadataIndex.java index 0f615ab6752..b2fd42a6ba8 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/LedgerMetadataIndex.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/LedgerMetadataIndex.java @@ -300,6 +300,10 @@ public void setMasterKey(long ledgerId, byte[] masterKey) throws IOException { * Flushes all pending changes. */ public void flush() throws IOException { + if (pendingLedgersUpdates.isEmpty()) { + return; + } + LongWrapper key = LongWrapper.get(); try { @@ -323,6 +327,10 @@ public void flush() throws IOException { } public void removeDeletedLedgers() throws IOException { + if (pendingDeletedLedgers.isEmpty()) { + return; + } + LongWrapper key = LongWrapper.get(); try { diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerFragmentReplicator.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerFragmentReplicator.java index b9dc4c32d2a..5cc22362ac3 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerFragmentReplicator.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerFragmentReplicator.java @@ -336,7 +336,7 @@ static Set splitIntoSubFragments(LedgerHandle lh, * New bookies we want to use to recover and replicate the ledger * entries that were stored on the failed bookie. */ - private void recoverLedgerFragmentEntry(final Long entryId, + void recoverLedgerFragmentEntry(final Long entryId, final LedgerHandle lh, final AsyncCallback.VoidCallback ledgerFragmentEntryMcb, final Set newBookies, @@ -410,7 +410,7 @@ public void readComplete(int rc, LedgerHandle lh, lh.getLastAddConfirmed(), entry.getLength(), Unpooled.wrappedBuffer(data, 0, data.length), lh.getLedgerKey(), - 0 + BookieProtocol.FLAG_RECOVERY_ADD ); if (replicationThrottle != null) { if (toSend instanceof ByteBuf) { diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerHandle.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerHandle.java index 945b2844373..9486b2e632c 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerHandle.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerHandle.java @@ -1913,6 +1913,7 @@ void handleBookieFailure(final Map failedBookies) { void ensembleChangeLoop(List origEnsemble, Map failedBookies) { int ensembleChangeId = numEnsembleChanges.incrementAndGet(); + ensembleChangeCounter.inc(); String logContext = String.format("[EnsembleChange(ledger:%d, change-id:%010d)]", ledgerId, ensembleChangeId); // when the ensemble changes are too frequent, close handle diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/RackawareEnsemblePlacementPolicyImpl.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/RackawareEnsemblePlacementPolicyImpl.java index 7f219854ed8..3863a26a245 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/RackawareEnsemblePlacementPolicyImpl.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/RackawareEnsemblePlacementPolicyImpl.java @@ -507,13 +507,26 @@ public PlacementResult replaceBookie(int ensembleSize, int writeQuorum try { excludeBookies = addDefaultRackBookiesIfMinNumRacksIsEnforced(excludeBookies); excludeBookies.addAll(currentEnsemble); + + Set ensembleNodes = new HashSet<>(); + Set excludeNodes = new HashSet<>(); BookieNode bn = knownBookies.get(bookieToReplace); if (null == bn) { bn = createBookieNode(bookieToReplace); } - - Set ensembleNodes = convertBookiesToNodes(currentEnsemble); - Set excludeNodes = convertBookiesToNodes(excludeBookies); + for (BookieId bookieId : currentEnsemble) { + if (bookieId.equals(bookieToReplace)) { + continue; + } + ensembleNodes.add(convertBookieToNode(bookieId)); + } + for (BookieId bookieId : excludeBookies) { + if (bookieId.equals(bookieToReplace)) { + excludeNodes.add(bn); + continue; + } + excludeNodes.add(convertBookieToNode(bookieId)); + } excludeNodes.addAll(ensembleNodes); excludeNodes.add(bn); diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/RegionAwareEnsemblePlacementPolicy.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/RegionAwareEnsemblePlacementPolicy.java index 19729a4bde8..c742e62c04d 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/RegionAwareEnsemblePlacementPolicy.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/RegionAwareEnsemblePlacementPolicy.java @@ -244,7 +244,7 @@ public RegionAwareEnsemblePlacementPolicy initialize(ClientConfiguration conf, .initialize(dnsResolver, timer, this.reorderReadsRandom, this.stabilizePeriodSeconds, this.reorderThresholdPendingRequests, this.isWeighted, this.maxWeightMultiple, this.minNumRacksPerWriteQuorum, this.enforceMinNumRacksPerWriteQuorum, - this.ignoreLocalNodeInPlacementPolicy, this.ignoreLocalNodeInPlacementPolicy, + this.ignoreLocalNodeInPlacementPolicy, this.useHostnameResolveLocalNodePlacementPolicy, statsLogger, bookieAddressResolver) .withDefaultRack(NetworkTopology.DEFAULT_REGION_AND_RACK)); } @@ -392,9 +392,20 @@ public PlacementResult> newEnsemble(int ensembleSize, int writeQu remainingEnsembleBeforeIteration = remainingEnsemble; int regionsToAllocate = numRemainingRegions; int startRegionIndex = lastRegionIndex % numRegionsAvailable; + int localRegionIndex = -1; + if (myRegion != null && !UNKNOWN_REGION.equals(myRegion)) { + localRegionIndex = availableRegions.indexOf(myRegion); + } + String region = myRegion; for (int i = 0; i < numRegionsAvailable; ++i) { - String region = availableRegions.get(startRegionIndex % numRegionsAvailable); - startRegionIndex++; + // select the local region first, and for the rest region select, use round-robin selection. + if (i > 0 || localRegionIndex == -1) { + if (startRegionIndex % numRegionsAvailable == localRegionIndex) { + startRegionIndex++; + } + region = availableRegions.get(startRegionIndex % numRegionsAvailable); + startRegionIndex++; + } final Pair currentAllocation = regionsWiseAllocation.get(region); TopologyAwareEnsemblePlacementPolicy policyWithinRegion = perRegionPlacement.get(region); if (!regionsReachedMaxAllocation.contains(region)) { diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/TopologyAwareEnsemblePlacementPolicy.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/TopologyAwareEnsemblePlacementPolicy.java index 463d9599de2..4976f96e8c2 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/TopologyAwareEnsemblePlacementPolicy.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/TopologyAwareEnsemblePlacementPolicy.java @@ -824,9 +824,8 @@ protected String resolveNetworkLocation(BookieId addr) { if (null != historyBookie) { return historyBookie.getNetworkLocation(); } - - LOG.error("Cannot resolve bookieId {} to a network address, resolving as {}", addr, - NetworkTopology.DEFAULT_REGION_AND_RACK, err); + LOG.error("Cannot resolve bookieId {} to a network address, resolving as {}. {}", addr, + NetworkTopology.DEFAULT_REGION_AND_RACK, err.getMessage()); return NetworkTopology.DEFAULT_REGION_AND_RACK; } } diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ServerConfiguration.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ServerConfiguration.java index 454069a04d7..5b12a8f9e43 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ServerConfiguration.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ServerConfiguration.java @@ -344,6 +344,9 @@ public class ServerConfiguration extends AbstractConfiguration implements GenericCallback { public void operationComplete(int rc, T result) { closeLock.readLock().lock(); try { - if (!closed && null != removeCallback(cb)) { + if (!closed && removeCallback(cb)) { cb.operationComplete(rc, result); } } finally { @@ -78,8 +77,7 @@ public LedgerRange next() throws IOException { } private final LedgerManager underlying; - private final ConcurrentMap callbacks = - new ConcurrentHashMap(); + private final Set callbacks = ConcurrentHashMap.newKeySet(); private boolean closed = false; private final ReentrantReadWriteLock closeLock = new ReentrantReadWriteLock(); private final Set> futures = ConcurrentHashMap.newKeySet(); @@ -94,7 +92,7 @@ public LedgerManager getUnderlying() { } private void addCallback(GenericCallback callback) { - callbacks.put(callback, callback); + callbacks.add(callback); } @Override @@ -107,7 +105,7 @@ public void unregisterLedgerMetadataListener(long ledgerId, LedgerMetadataListen underlying.unregisterLedgerMetadataListener(ledgerId, listener); } - private GenericCallback removeCallback(GenericCallback callback) { + private boolean removeCallback(GenericCallback callback) { return callbacks.remove(callback); } @@ -206,7 +204,7 @@ public void operationComplete(int rc, Void result) { underlying.asyncProcessLedgers(processor, new AsyncCallback.VoidCallback() { @Override public void processResult(int rc, String path, Object ctx) { - if (null != removeCallback(stub)) { + if (removeCallback(stub)) { finalCb.processResult(rc, path, ctx); } } @@ -239,14 +237,13 @@ public void close() throws IOException { return; } closed = true; - keys = new HashSet(callbacks.keySet()); + keys = new HashSet<>(callbacks); } finally { closeLock.writeLock().unlock(); } for (GenericCallback key : keys) { - GenericCallback callback = callbacks.remove(key); - if (null != callback) { - callback.operationComplete(BKException.Code.ClientClosedException, null); + if (callbacks.remove(key)) { + key.operationComplete(BKException.Code.ClientClosedException, null); } } BKException exception = new BKException.BKClientClosedException(); diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BatchedReadEntryProcessor.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BatchedReadEntryProcessor.java new file mode 100644 index 00000000000..700952042f0 --- /dev/null +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BatchedReadEntryProcessor.java @@ -0,0 +1,109 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ +package org.apache.bookkeeper.proto; + +import io.netty.buffer.ByteBuf; +import io.netty.util.Recycler; +import io.netty.util.ReferenceCounted; +import java.util.concurrent.ExecutorService; +import org.apache.bookkeeper.proto.BookieProtocol.BatchedReadRequest; +import org.apache.bookkeeper.util.ByteBufList; + +public class BatchedReadEntryProcessor extends ReadEntryProcessor { + + private long maxBatchReadSize; + + public static BatchedReadEntryProcessor create(BatchedReadRequest request, + BookieRequestHandler requestHandler, + BookieRequestProcessor requestProcessor, + ExecutorService fenceThreadPool, + boolean throttleReadResponses, + long maxBatchReadSize) { + BatchedReadEntryProcessor rep = RECYCLER.get(); + rep.init(request, requestHandler, requestProcessor); + rep.fenceThreadPool = fenceThreadPool; + rep.throttleReadResponses = throttleReadResponses; + rep.maxBatchReadSize = maxBatchReadSize; + requestProcessor.onReadRequestStart(requestHandler.ctx().channel()); + return rep; + } + + @Override + protected ReferenceCounted readData() throws Exception { + ByteBufList data = null; + BatchedReadRequest batchRequest = (BatchedReadRequest) request; + int maxCount = batchRequest.getMaxCount(); + if (maxCount <= 0) { + maxCount = Integer.MAX_VALUE; + } + long maxSize = Math.min(batchRequest.getMaxSize(), maxBatchReadSize); + //See BookieProtoEncoding.ResponseEnDeCoderPreV3#encode on BatchedReadResponse case. + long frameSize = 24 + 8 + 4; + for (int i = 0; i < maxCount; i++) { + try { + ByteBuf entry = requestProcessor.getBookie().readEntry(request.getLedgerId(), request.getEntryId() + i); + frameSize += entry.readableBytes() + 4; + if (data == null) { + data = ByteBufList.get(entry); + } else { + if (frameSize > maxSize) { + entry.release(); + break; + } + data.add(entry); + } + } catch (Throwable e) { + if (data == null) { + throw e; + } + break; + } + } + return data; + } + + @Override + protected BookieProtocol.Response buildReadResponse(ReferenceCounted data) { + return ResponseBuilder.buildBatchedReadResponse((ByteBufList) data, (BatchedReadRequest) request); + } + + protected void recycle() { + request.recycle(); + super.reset(); + if (this.recyclerHandle != null) { + this.recyclerHandle.recycle(this); + } + } + + private final Recycler.Handle recyclerHandle; + + private BatchedReadEntryProcessor(Recycler.Handle recyclerHandle) { + this.recyclerHandle = recyclerHandle; + } + + private static final Recycler RECYCLER = new Recycler() { + @Override + protected BatchedReadEntryProcessor newObject(Recycler.Handle handle) { + return new BatchedReadEntryProcessor(handle); + } + }; + +} diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieClient.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieClient.java index 938874fac04..5d20e1b22d1 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieClient.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieClient.java @@ -26,6 +26,7 @@ import java.util.concurrent.CompletableFuture; import org.apache.bookkeeper.client.api.WriteFlag; import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.BatchedReadEntryCallback; import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ForceLedgerCallback; import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GetBookieInfoCallback; import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryCallback; @@ -180,6 +181,47 @@ void readEntry(BookieId address, long ledgerId, long entryId, ReadEntryCallback cb, Object ctx, int flags, byte[] masterKey, boolean allowFastFail); + /** + * Batch read entries with a null masterkey, disallowing failfast. + * @see #batchReadEntries(BookieId,long,long,int,long,BatchedReadEntryCallback,Object,int,byte[],boolean) + */ + default void batchReadEntries(BookieId address, long ledgerId, long startEntryId, + int maxCount, long maxSize, BatchedReadEntryCallback cb, Object ctx, + int flags) { + batchReadEntries(address, ledgerId, startEntryId, maxCount, maxSize, cb, ctx, flags, null); + } + + /** + * Batch read entries, disallowing failfast. + * @see #batchReadEntries(BookieId,long,long,int,long,BatchedReadEntryCallback,Object,int,byte[],boolean) + */ + default void batchReadEntries(BookieId address, long ledgerId, long startEntryId, + int maxCount, long maxSize, BatchedReadEntryCallback cb, Object ctx, + int flags, byte[] masterKey) { + batchReadEntries(address, ledgerId, startEntryId, maxCount, maxSize, cb, ctx, flags, masterKey, false); + } + + /** + * Batch read entries from bookie at address {@code address}. + * + * @param address address of the bookie to read from + * @param ledgerId id of the ledger the entry belongs to + * @param startEntryId id of the entry started + * @param maxCount the total entries count in this batch + * @param maxSize the total entries size in this batch + * @param cb the callback notified when the request completes + * @param ctx a context object passed to the callback on completion + * @param flags a bit mask of flags from BookieProtocol.FLAG_* + * {@link org.apache.bookkeeper.proto.BookieProtocol} + * @param masterKey the master key of the ledger being read from. This is only required + * if the FLAG_DO_FENCING is specified. + * @param allowFastFail fail the read immediately if the channel is non-writable + * {@link #isWritable(BookieId,long)} + */ + void batchReadEntries(BookieId address, long ledgerId, long startEntryId, + int maxCount, long maxSize, BatchedReadEntryCallback cb, Object ctx, + int flags, byte[] masterKey, boolean allowFastFail); + /** * Send a long poll request to bookie, waiting for the last add confirmed * to be updated. The client can also request that the full entry is returned diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieClientImpl.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieClientImpl.java index c305a51ea42..8d7742e7848 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieClientImpl.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieClientImpl.java @@ -54,6 +54,7 @@ import org.apache.bookkeeper.conf.ClientConfiguration; import org.apache.bookkeeper.net.BookieId; import org.apache.bookkeeper.net.BookieSocketAddress; +import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.BatchedReadEntryCallback; import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ForceLedgerCallback; import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.FutureGetListOfEntriesOfLedger; import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallback; @@ -353,6 +354,20 @@ private void completeRead(final int rc, } } + private void completeBatchRead(final int rc, + final long ledgerId, + final long startEntryId, + final ByteBufList bufList, + final BatchedReadEntryCallback cb, + final Object ctx) { + try { + executor.executeOrdered(ledgerId, () -> cb.readEntriesComplete(rc, ledgerId, startEntryId, bufList, ctx)); + } catch (RejectedExecutionException ree) { + cb.readEntriesComplete(getRc(BKException.Code.InterruptedException), + ledgerId, startEntryId, bufList, ctx); + } + } + private static class ChannelReadyForAddEntryCallback implements GenericCallback { private final Handle recyclerHandle; @@ -489,6 +504,26 @@ public void readEntry(final BookieId addr, final long ledgerId, final long entry }, ledgerId); } + @Override + public void batchReadEntries(final BookieId address, final long ledgerId, final long startEntryId, + final int maxCount, final long maxSize, final BatchedReadEntryCallback cb, final Object ctx, + final int flags, final byte[] masterKey, final boolean allowFastFail) { + final PerChannelBookieClientPool client = lookupClient(address); + if (client == null) { + cb.readEntriesComplete(getRc(BKException.Code.BookieHandleNotAvailableException), + ledgerId, startEntryId, null, ctx); + return; + } + + client.obtain((rc, pcbc) -> { + if (rc != BKException.Code.OK) { + completeBatchRead(rc, ledgerId, startEntryId, null, cb, ctx); + } else { + pcbc.batchReadEntries(ledgerId, startEntryId, maxCount, maxSize, cb, ctx, flags, masterKey, + allowFastFail); + } + }, ledgerId); + } @Override public void readEntryWaitForLACUpdate(final BookieId addr, diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtoEncoding.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtoEncoding.java index c56235dbe67..d8bfb4257a8 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtoEncoding.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtoEncoding.java @@ -110,7 +110,30 @@ public Object encode(Object msg, ByteBufAllocator allocator) return msg; } BookieProtocol.Request r = (BookieProtocol.Request) msg; - if (r instanceof BookieProtocol.ReadRequest) { + if (r instanceof BookieProtocol.BatchedReadRequest) { + int totalHeaderSize = 4 // for request type + + 8 // for ledger id + + 8 // for entry id + + 8 // for request id + + 4 // for max count + + 8; // for max size + if (r.hasMasterKey()) { + totalHeaderSize += BookieProtocol.MASTER_KEY_LENGTH; + } + ByteBuf buf = allocator.buffer(totalHeaderSize + 4 /* frame size */); + buf.writeInt(totalHeaderSize); + buf.writeInt(PacketHeader.toInt(r.getProtocolVersion(), r.getOpCode(), r.getFlags())); + buf.writeLong(r.getLedgerId()); + buf.writeLong(r.getEntryId()); + buf.writeLong(((BookieProtocol.BatchedReadRequest) r).getRequestId()); + buf.writeInt(((BookieProtocol.BatchedReadRequest) r).getMaxCount()); + buf.writeLong(((BookieProtocol.BatchedReadRequest) r).getMaxSize()); + if (r.hasMasterKey()) { + buf.writeBytes(r.getMasterKey(), 0, BookieProtocol.MASTER_KEY_LENGTH); + } + r.recycle(); + return buf; + } else if (r instanceof BookieProtocol.ReadRequest) { int totalHeaderSize = 4 // for request type + 8 // for ledgerId + 8; // for entryId @@ -181,6 +204,21 @@ public Object decode(ByteBuf packet) } else { return BookieProtocol.ReadRequest.create(version, ledgerId, entryId, flags, null); } + case BookieProtocol.BATCH_READ_ENTRY: + ledgerId = packet.readLong(); + entryId = packet.readLong(); + long requestId = packet.readLong(); + int maxCount = packet.readInt(); + long maxSize = packet.readLong(); + if ((flags & BookieProtocol.FLAG_DO_FENCING) == BookieProtocol.FLAG_DO_FENCING + && version >= 2) { + byte[] masterKey = readMasterKey(packet); + return BookieProtocol.BatchedReadRequest.create(version, ledgerId, entryId, flags, masterKey, + requestId, maxCount, maxSize); + } else { + return BookieProtocol.BatchedReadRequest.create(version, ledgerId, entryId, flags, null, + requestId, maxCount, maxSize); + } case BookieProtocol.AUTH: BookkeeperProtocol.AuthMessage.Builder builder = BookkeeperProtocol.AuthMessage.newBuilder(); builder.mergeFrom(new ByteBufInputStream(packet), extensionRegistry); @@ -260,6 +298,40 @@ public Object encode(Object msg, ByteBufAllocator allocator) } else { return ByteBufList.get(buf, rr.getData()); } + } else if (msg instanceof BookieProtocol.BatchedReadResponse) { + BookieProtocol.BatchedReadResponse brr = (BookieProtocol.BatchedReadResponse) r; + int payloadSize = brr.getData().readableBytes(); + int delimiterSize = brr.getData().size() * 4; // The size of each entry. + boolean isSmallEntry = (payloadSize + delimiterSize) < SMALL_ENTRY_SIZE_THRESHOLD; + + int responseSize = RESPONSE_HEADERS_SIZE + 8 /* request_id */ + payloadSize + delimiterSize; + int bufferSize = 4 /* frame size */ + responseSize; + ByteBuf buf = allocator.buffer(bufferSize); + buf.writeInt(responseSize); + buf.writeInt(PacketHeader.toInt(r.getProtocolVersion(), r.getOpCode(), (short) 0)); + buf.writeInt(r.getErrorCode()); + buf.writeLong(r.getLedgerId()); + buf.writeLong(r.getEntryId()); + buf.writeLong(((BookieProtocol.BatchedReadResponse) r).getRequestId()); + if (isSmallEntry) { + for (int i = 0; i < brr.getData().size(); i++) { + ByteBuf entryData = brr.getData().getBuffer(i); + buf.writeInt(entryData.readableBytes()); + buf.writeBytes(entryData); + } + brr.release(); + return buf; + } else { + ByteBufList byteBufList = ByteBufList.get(buf); + for (int i = 0; i < brr.getData().size(); i++) { + ByteBuf entryData = brr.getData().getBuffer(i); + ByteBuf entryLengthBuf = allocator.buffer(4); + entryLengthBuf.writeInt(entryData.readableBytes()); + byteBufList.add(entryLengthBuf); + byteBufList.add(entryData); + } + return byteBufList; + } } else if (msg instanceof BookieProtocol.AddResponse) { ByteBuf buf = allocator.buffer(RESPONSE_HEADERS_SIZE + 4 /* frame size */); buf.writeInt(RESPONSE_HEADERS_SIZE); @@ -309,6 +381,25 @@ public Object decode(ByteBuf buffer) return new BookieProtocol.ReadResponse( version, rc, ledgerId, entryId, buffer.retainedSlice()); + case BookieProtocol.BATCH_READ_ENTRY: + rc = buffer.readInt(); + ledgerId = buffer.readLong(); + entryId = buffer.readLong(); + long requestId = buffer.readLong(); + ByteBufList data = null; + while (buffer.readableBytes() > 0) { + int entrySize = buffer.readInt(); + int entryPos = buffer.readerIndex(); + if (data == null) { + data = ByteBufList.get(buffer.retainedSlice(entryPos, entrySize)); + buffer.readerIndex(entryPos + entrySize); + } else { + data.add(buffer.retainedSlice(entryPos, entrySize)); + buffer.readerIndex(entryPos + entrySize); + } + } + return new BookieProtocol.BatchedReadResponse(version, rc, ledgerId, entryId, requestId, data == null + ? ByteBufList.get() : data.retain()); case BookieProtocol.AUTH: ByteBufInputStream bufStream = new ByteBufInputStream(buffer); BookkeeperProtocol.AuthMessage.Builder builder = BookkeeperProtocol.AuthMessage.newBuilder(); diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtocol.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtocol.java index 3a27f08a95d..6a93f8d2cc6 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtocol.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtocol.java @@ -27,6 +27,7 @@ import io.netty.util.ReferenceCountUtil; import io.netty.util.ReferenceCounted; import org.apache.bookkeeper.proto.BookkeeperProtocol.AuthMessage; +import org.apache.bookkeeper.util.ByteBufList; /** * The packets of the Bookie protocol all have a 4-byte integer indicating the @@ -132,6 +133,7 @@ public static short getFlags(int packetHeader) { byte READ_LAC = 4; byte WRITE_LAC = 5; byte GET_BOOKIE_INFO = 6; + byte BATCH_READ_ENTRY = 7; /** * The error code that indicates success. @@ -328,6 +330,10 @@ boolean isFencing() { private final Handle recyclerHandle; + protected ReadRequest() { + recyclerHandle = null; + } + private ReadRequest(Handle recyclerHandle) { this.recyclerHandle = recyclerHandle; } @@ -344,7 +350,74 @@ public void recycle() { ledgerId = -1; entryId = -1; masterKey = null; - recyclerHandle.recycle(this); + if (recyclerHandle != null) { + recyclerHandle.recycle(this); + } + } + } + + /** + * The request for reading data with batch optimization. + * The ledger_id and entry_id will be used as start_ledger_id and start_entry_id. + * And the batch read operation can only happen on one ledger. + */ + class BatchedReadRequest extends ReadRequest { + + long requestId; + int maxCount; + long maxSize; + + static BatchedReadRequest create(byte protocolVersion, long ledgerId, long entryId, + short flags, byte[] masterKey, long requestId, int maxCount, long maxSize) { + BatchedReadRequest request = RECYCLER.get(); + request.protocolVersion = protocolVersion; + request.ledgerId = ledgerId; + request.entryId = entryId; + request.flags = flags; + request.masterKey = masterKey; + request.requestId = requestId; + request.maxCount = maxCount; + request.maxSize = maxSize; + request.opCode = BATCH_READ_ENTRY; + return request; + } + + int getMaxCount() { + return maxCount; + } + + long getMaxSize() { + return maxSize; + } + + long getRequestId() { + return requestId; + } + + private final Handle recyclerHandle; + + private BatchedReadRequest(Handle recyclerHandle) { + this.recyclerHandle = recyclerHandle; + } + + private static final Recycler RECYCLER = new Recycler() { + @Override + protected BatchedReadRequest newObject(Handle handle) { + return new BatchedReadRequest(handle); + } + }; + + @Override + public void recycle() { + ledgerId = -1; + entryId = -1; + masterKey = null; + maxCount = -1; + maxSize = -1; + requestId = -1; + if (recyclerHandle != null) { + recyclerHandle.recycle(this); + } } } @@ -479,6 +552,74 @@ public boolean release(int decrement) { } } + /** + * The response for batched read. + * The ledger_id and entry_id will be used as start_ledger_id and start_entry_id. + * And all the returned data is from one ledger. + */ + class BatchedReadResponse extends Response implements ReferenceCounted { + + final long requestId; + final ByteBufList data; + + BatchedReadResponse(byte protocolVersion, int errorCode, long ledgerId, long entryId, long requestId) { + this(protocolVersion, errorCode, ledgerId, entryId, requestId, ByteBufList.get()); + } + + BatchedReadResponse(byte protocolVersion, int errorCode, long ledgerId, long entryId, long requestId, + ByteBufList data) { + init(protocolVersion, BATCH_READ_ENTRY, errorCode, ledgerId, entryId); + this.requestId = requestId; + this.data = data; + } + + ByteBufList getData() { + return data; + } + + long getRequestId() { + return requestId; + } + + @Override + public int refCnt() { + return data.refCnt(); + } + + @Override + public ReferenceCounted retain() { + data.retain(); + return this; + } + + @Override + public ReferenceCounted retain(int increment) { + return data.retain(increment); + } + + @Override + public ReferenceCounted touch() { + data.touch(); + return this; + } + + @Override + public ReferenceCounted touch(Object hint) { + data.touch(hint); + return this; + } + + @Override + public boolean release() { + return data.release(); + } + + @Override + public boolean release(int decrement) { + return data.release(decrement); + } + } + /** * A response that adds data. */ diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieRequestProcessor.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieRequestProcessor.java index a77b3d7bb5b..1a083519625 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieRequestProcessor.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieRequestProcessor.java @@ -380,6 +380,10 @@ public void processRequest(Object msg, BookieRequestHandler requestHandler) { checkArgument(r instanceof BookieProtocol.ReadRequest); processReadRequest((BookieProtocol.ReadRequest) r, requestHandler); break; + case BookieProtocol.BATCH_READ_ENTRY: + checkArgument(r instanceof BookieProtocol.BatchedReadRequest); + processReadRequest((BookieProtocol.BatchedReadRequest) r, requestHandler); + break; case BookieProtocol.AUTH: LOG.info("Ignoring auth operation from client {}", requestHandler.ctx().channel().remoteAddress()); @@ -677,8 +681,11 @@ private void processAddRequest(final BookieProtocol.ParsedAddRequest r, final Bo private void processReadRequest(final BookieProtocol.ReadRequest r, final BookieRequestHandler requestHandler) { ExecutorService fenceThreadPool = null == highPriorityThreadPool ? null : highPriorityThreadPool.chooseThread(requestHandler.ctx()); - ReadEntryProcessor read = ReadEntryProcessor.create(r, requestHandler, - this, fenceThreadPool, throttleReadResponses); + ReadEntryProcessor read = r instanceof BookieProtocol.BatchedReadRequest + ? BatchedReadEntryProcessor.create((BookieProtocol.BatchedReadRequest) r, requestHandler, + this, fenceThreadPool, throttleReadResponses, serverCfg.getMaxBatchReadSize()) + : ReadEntryProcessor.create(r, requestHandler, + this, fenceThreadPool, throttleReadResponses); // If it's a high priority read (fencing or as part of recovery process), we want to make sure it // gets executed as fast as possible, so bypass the normal readThreadPool diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookkeeperInternalCallbacks.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookkeeperInternalCallbacks.java index f42f7ff13a5..a464c050187 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookkeeperInternalCallbacks.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookkeeperInternalCallbacks.java @@ -36,6 +36,7 @@ import org.apache.bookkeeper.net.BookieId; import org.apache.bookkeeper.stats.OpStatsLogger; import org.apache.bookkeeper.util.AvailabilityOfEntriesOfLedger; +import org.apache.bookkeeper.util.ByteBufList; import org.apache.bookkeeper.util.MathUtils; import org.apache.bookkeeper.versioning.Versioned; import org.apache.zookeeper.AsyncCallback; @@ -221,6 +222,16 @@ public interface ReadEntryCallback { void readEntryComplete(int rc, long ledgerId, long entryId, ByteBuf buffer, Object ctx); } + /** + * Declaration of a callback implementation for calls from BookieClient objects. + * Such calls are for replies of batched read operations (operations to read multi entries + * from a ledger). + * + */ + public interface BatchedReadEntryCallback { + void readEntriesComplete(int rc, long ledgerId, long startEntryId, ByteBufList bufList, Object ctx); + } + /** * Listener on entries responded. */ diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/PerChannelBookieClient.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/PerChannelBookieClient.java index b4cf194e247..5ebafe8ecaf 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/PerChannelBookieClient.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/PerChannelBookieClient.java @@ -102,6 +102,7 @@ import org.apache.bookkeeper.conf.ClientConfiguration; import org.apache.bookkeeper.net.BookieId; import org.apache.bookkeeper.net.BookieSocketAddress; +import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.BatchedReadEntryCallback; import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ForceLedgerCallback; import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallback; import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GetBookieInfoCallback; @@ -687,7 +688,7 @@ void connectIfNeededAndDoOp(GenericCallback op) { void writeLac(final long ledgerId, final byte[] masterKey, final long lac, ByteBufList toSend, WriteLacCallback cb, Object ctx) { final long txnId = getTxnId(); - final CompletionKey completionKey = new V3CompletionKey(txnId, + final CompletionKey completionKey = new TxnCompletionKey(txnId, OperationType.WRITE_LAC); // writeLac is mostly like addEntry hence uses addEntryTimeout completionObjects.put(completionKey, @@ -729,7 +730,7 @@ void forceLedger(final long ledgerId, ForceLedgerCallback cb, Object ctx) { return; } final long txnId = getTxnId(); - final CompletionKey completionKey = new V3CompletionKey(txnId, + final CompletionKey completionKey = new TxnCompletionKey(txnId, OperationType.FORCE_LEDGER); // force is mostly like addEntry hence uses addEntryTimeout completionObjects.put(completionKey, @@ -791,7 +792,7 @@ void addEntry(final long ledgerId, byte[] masterKey, final long entryId, Referen } } else { final long txnId = getTxnId(); - completionKey = new V3CompletionKey(txnId, OperationType.ADD_ENTRY); + completionKey = new TxnCompletionKey(txnId, OperationType.ADD_ENTRY); // Build the request and calculate the total size to be included in the packet. BKPacketHeader.Builder headerBuilder = BKPacketHeader.newBuilder() @@ -861,7 +862,7 @@ public void readLac(final long ledgerId, ReadLacCallback cb, Object ctx) { completionKey = acquireV2Key(ledgerId, 0, OperationType.READ_LAC); } else { final long txnId = getTxnId(); - completionKey = new V3CompletionKey(txnId, OperationType.READ_LAC); + completionKey = new TxnCompletionKey(txnId, OperationType.READ_LAC); // Build the request and calculate the total size to be included in the packet. BKPacketHeader.Builder headerBuilder = BKPacketHeader.newBuilder() @@ -883,7 +884,7 @@ public void readLac(final long ledgerId, ReadLacCallback cb, Object ctx) { public void getListOfEntriesOfLedger(final long ledgerId, GetListOfEntriesOfLedgerCallback cb) { final long txnId = getTxnId(); - final CompletionKey completionKey = new V3CompletionKey(txnId, OperationType.GET_LIST_OF_ENTRIES_OF_LEDGER); + final CompletionKey completionKey = new TxnCompletionKey(txnId, OperationType.GET_LIST_OF_ENTRIES_OF_LEDGER); completionObjects.put(completionKey, new GetListOfEntriesOfLedgerCompletion(completionKey, cb, ledgerId)); // Build the request. @@ -945,7 +946,7 @@ private void readEntryInternal(final long ledgerId, completionKey = acquireV2Key(ledgerId, entryId, OperationType.READ_ENTRY); } else { final long txnId = getTxnId(); - completionKey = new V3CompletionKey(txnId, OperationType.READ_ENTRY); + completionKey = new TxnCompletionKey(txnId, OperationType.READ_ENTRY); // Build the request and calculate the total size to be included in the packet. BKPacketHeader.Builder headerBuilder = BKPacketHeader.newBuilder() @@ -1007,9 +1008,52 @@ private void readEntryInternal(final long ledgerId, writeAndFlush(channel, completionKey, request, allowFastFail); } + public void batchReadEntries(final long ledgerId, + final long startEntryId, + final int maxCount, + final long maxSize, + BatchedReadEntryCallback cb, + Object ctx, + int flags, + byte[] masterKey, + boolean allowFastFail) { + + batchReadEntriesInternal(ledgerId, startEntryId, maxCount, maxSize, null, null, false, + cb, ctx, (short) flags, masterKey, allowFastFail); + } + + private void batchReadEntriesInternal(final long ledgerId, + final long startEntryId, + final int maxCount, + final long maxSize, + final Long previousLAC, + final Long timeOutInMillis, + final boolean piggyBackEntry, + final BatchedReadEntryCallback cb, + final Object ctx, + int flags, + byte[] masterKey, + boolean allowFastFail) { + Object request; + CompletionKey completionKey; + final long txnId = getTxnId(); + if (useV2WireProtocol) { + request = BookieProtocol.BatchedReadRequest.create(BookieProtocol.CURRENT_PROTOCOL_VERSION, + ledgerId, startEntryId, (short) flags, masterKey, txnId, maxCount, maxSize); + completionKey = new TxnCompletionKey(txnId, OperationType.BATCH_READ_ENTRY); + } else { + throw new UnsupportedOperationException("Unsupported batch read entry operation for v3 protocol."); + } + BatchedReadCompletion readCompletion = new BatchedReadCompletion( + completionKey, cb, ctx, ledgerId, startEntryId); + putCompletionKeyValue(completionKey, readCompletion); + + writeAndFlush(channel, completionKey, request, allowFastFail); + } + public void getBookieInfo(final long requested, GetBookieInfoCallback cb, Object ctx) { final long txnId = getTxnId(); - final CompletionKey completionKey = new V3CompletionKey(txnId, OperationType.GET_BOOKIE_INFO); + final CompletionKey completionKey = new TxnCompletionKey(txnId, OperationType.GET_BOOKIE_INFO); completionObjects.put(completionKey, new GetBookieInfoCompletion( completionKey, cb, ctx)); @@ -1355,7 +1399,12 @@ private void readV2Response(final BookieProtocol.Response response) { OperationType operationType = getOperationType(response.getOpCode()); StatusCode status = getStatusCodeFromErrorCode(response.errorCode); - CompletionKey key = acquireV2Key(response.ledgerId, response.entryId, operationType); + CompletionKey key; + if (OperationType.BATCH_READ_ENTRY == operationType) { + key = new TxnCompletionKey(((BookieProtocol.BatchedReadResponse) response).getRequestId(), operationType); + } else { + key = acquireV2Key(response.ledgerId, response.entryId, operationType); + } CompletionValue completionValue = getCompletionValue(key); key.release(); @@ -1437,6 +1486,8 @@ private static OperationType getOperationType(byte opCode) { return OperationType.WRITE_LAC; case BookieProtocol.GET_BOOKIE_INFO: return OperationType.GET_BOOKIE_INFO; + case BookieProtocol.BATCH_READ_ENTRY: + return OperationType.BATCH_READ_ENTRY; default: throw new IllegalArgumentException("Invalid operation type " + opCode); } @@ -1968,6 +2019,83 @@ private void handleReadResponse(long ledgerId, } } + class BatchedReadCompletion extends CompletionValue { + + final BatchedReadEntryCallback cb; + + public BatchedReadCompletion(final CompletionKey key, + final BatchedReadEntryCallback originalCallback, + final Object originalCtx, + long ledgerId, final long entryId) { + super("BatchedRead", originalCtx, ledgerId, entryId, + readEntryOpLogger, readTimeoutOpLogger); + this.cb = new BatchedReadEntryCallback() { + + @Override + public void readEntriesComplete(int rc, + long ledgerId, + long startEntryId, + ByteBufList bufList, + Object ctx) { + logOpResult(rc); + originalCallback.readEntriesComplete(rc, + ledgerId, entryId, + bufList, originalCtx); + key.release(); + } + }; + } + + @Override + public void errorOut() { + errorOut(BKException.Code.BookieHandleNotAvailableException); + } + + @Override + public void errorOut(final int rc) { + errorOutAndRunCallback( + () -> cb.readEntriesComplete(rc, ledgerId, + entryId, null, ctx)); + } + + @Override + public void handleV2Response(long ledgerId, + long entryId, + StatusCode status, + BookieProtocol.Response response) { + + readEntryOutstanding.dec(); + if (!(response instanceof BookieProtocol.BatchedReadResponse)) { + return; + } + BookieProtocol.BatchedReadResponse readResponse = (BookieProtocol.BatchedReadResponse) response; + handleBatchedReadResponse(ledgerId, entryId, status, readResponse.getData(), + INVALID_ENTRY_ID, -1L); + } + + @Override + public void handleV3Response(Response response) { + // V3 protocol haven't supported batched read yet. + } + + private void handleBatchedReadResponse(long ledgerId, + long entryId, + StatusCode status, + ByteBufList buffers, + long maxLAC, // max known lac piggy-back from bookies + long lacUpdateTimestamp) { // the timestamp when the lac is updated. + int rc = convertStatus(status, BKException.Code.ReadException); + + if (maxLAC > INVALID_ENTRY_ID && (ctx instanceof ReadEntryCallbackCtx)) { + ((ReadEntryCallbackCtx) ctx).setLastAddConfirmed(maxLAC); + } + if (lacUpdateTimestamp > -1L && (ctx instanceof ReadLastConfirmedAndEntryContext)) { + ((ReadLastConfirmedAndEntryContext) ctx).setLacUpdateTimestamp(lacUpdateTimestamp); + } + cb.readEntriesComplete(rc, ledgerId, entryId, buffers, ctx); + } + } + class StartTLSCompletion extends CompletionValue { final StartTLSCallback cb; @@ -2243,21 +2371,23 @@ private void handleResponse(long ledgerId, long entryId, // visable for testing CompletionKey newCompletionKey(long txnId, OperationType operationType) { - return new V3CompletionKey(txnId, operationType); + return new TxnCompletionKey(txnId, operationType); } - class V3CompletionKey extends CompletionKey { + class TxnCompletionKey extends CompletionKey { + final long txnId; - public V3CompletionKey(long txnId, OperationType operationType) { - super(txnId, operationType); + public TxnCompletionKey(long txnId, OperationType operationType) { + super(operationType); + this.txnId = txnId; } @Override public boolean equals(Object obj) { - if (!(obj instanceof V3CompletionKey)) { + if (!(obj instanceof TxnCompletionKey)) { return false; } - V3CompletionKey that = (V3CompletionKey) obj; + TxnCompletionKey that = (TxnCompletionKey) obj; return this.txnId == that.txnId && this.operationType == that.operationType; } @@ -2274,12 +2404,9 @@ public String toString() { } abstract class CompletionKey { - final long txnId; OperationType operationType; - CompletionKey(long txnId, - OperationType operationType) { - this.txnId = txnId; + CompletionKey(OperationType operationType) { this.operationType = operationType; } @@ -2340,28 +2467,28 @@ private long getTxnId() { return txnIdGenerator.incrementAndGet(); } - private final Recycler v2KeyRecycler = new Recycler() { + private final Recycler v2KeyRecycler = new Recycler() { @Override - protected V2CompletionKey newObject( - Recycler.Handle handle) { - return new V2CompletionKey(handle); + protected EntryCompletionKey newObject( + Recycler.Handle handle) { + return new EntryCompletionKey(handle); } }; - V2CompletionKey acquireV2Key(long ledgerId, long entryId, + EntryCompletionKey acquireV2Key(long ledgerId, long entryId, OperationType operationType) { - V2CompletionKey key = v2KeyRecycler.get(); + EntryCompletionKey key = v2KeyRecycler.get(); key.reset(ledgerId, entryId, operationType); return key; } - private class V2CompletionKey extends CompletionKey { - private final Handle recyclerHandle; + private class EntryCompletionKey extends CompletionKey { + private final Handle recyclerHandle; long ledgerId; long entryId; - private V2CompletionKey(Handle handle) { - super(-1, null); + private EntryCompletionKey(Handle handle) { + super(null); this.recyclerHandle = handle; } @@ -2373,10 +2500,10 @@ void reset(long ledgerId, long entryId, OperationType operationType) { @Override public boolean equals(Object object) { - if (!(object instanceof V2CompletionKey)) { + if (!(object instanceof EntryCompletionKey)) { return false; } - V2CompletionKey that = (V2CompletionKey) object; + EntryCompletionKey that = (EntryCompletionKey) object; return this.entryId == that.entryId && this.ledgerId == that.ledgerId && this.operationType == that.operationType; @@ -2564,7 +2691,7 @@ private void initiateTLS() { LOG.info("Initializing TLS to {}", channel); assert state == ConnectionState.CONNECTING; final long txnId = getTxnId(); - final CompletionKey completionKey = new V3CompletionKey(txnId, OperationType.START_TLS); + final CompletionKey completionKey = new TxnCompletionKey(txnId, OperationType.START_TLS); completionObjects.put(completionKey, new StartTLSCompletion(completionKey)); BookkeeperProtocol.Request.Builder h = withRequestContext(BookkeeperProtocol.Request.newBuilder()); diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ReadEntryProcessor.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ReadEntryProcessor.java index 04efd9634b2..d321623c54b 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ReadEntryProcessor.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ReadEntryProcessor.java @@ -20,6 +20,7 @@ import io.netty.buffer.ByteBuf; import io.netty.util.Recycler; import io.netty.util.ReferenceCountUtil; +import io.netty.util.ReferenceCounted; import java.io.IOException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; @@ -39,8 +40,8 @@ class ReadEntryProcessor extends PacketProcessorBase { private static final Logger LOG = LoggerFactory.getLogger(ReadEntryProcessor.class); - private ExecutorService fenceThreadPool; - private boolean throttleReadResponses; + protected ExecutorService fenceThreadPool; + protected boolean throttleReadResponses; public static ReadEntryProcessor create(ReadRequest request, BookieRequestHandler requestHandler, @@ -70,7 +71,7 @@ protected void processPacket() { } int errorCode = BookieProtocol.EOK; long startTimeNanos = MathUtils.nowInNano(); - ByteBuf data = null; + ReferenceCounted data = null; try { CompletableFuture fenceResult = null; if (request.isFencing()) { @@ -85,9 +86,9 @@ protected void processPacket() { throw BookieException.create(BookieException.Code.UnauthorizedAccessException); } } - data = requestProcessor.getBookie().readEntry(request.getLedgerId(), request.getEntryId()); + data = readData(); if (LOG.isDebugEnabled()) { - LOG.debug("##### Read entry ##### {} -- ref-count: {}", data.readableBytes(), data.refCnt()); + LOG.debug("##### Read entry ##### -- ref-count: {}", data.refCnt()); } if (fenceResult != null) { handleReadResultForFenceRead(fenceResult, data, startTimeNanos); @@ -126,13 +127,17 @@ protected void processPacket() { sendResponse(data, errorCode, startTimeNanos); } - private void sendResponse(ByteBuf data, int errorCode, long startTimeNanos) { + protected ReferenceCounted readData() throws Exception { + return requestProcessor.getBookie().readEntry(request.getLedgerId(), request.getEntryId()); + } + + private void sendResponse(ReferenceCounted data, int errorCode, long startTimeNanos) { final RequestStats stats = requestProcessor.getRequestStats(); final OpStatsLogger logger = stats.getReadEntryStats(); BookieProtocol.Response response; if (errorCode == BookieProtocol.EOK) { logger.registerSuccessfulEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS); - response = ResponseBuilder.buildReadResponse(data, request); + response = buildReadResponse(data); } else { if (data != null) { ReferenceCountUtil.release(data); @@ -145,13 +150,17 @@ private void sendResponse(ByteBuf data, int errorCode, long startTimeNanos) { recycle(); } - private void sendFenceResponse(Boolean result, ByteBuf data, long startTimeNanos) { + protected BookieProtocol.Response buildReadResponse(ReferenceCounted data) { + return ResponseBuilder.buildReadResponse((ByteBuf) data, request); + } + + private void sendFenceResponse(Boolean result, ReferenceCounted data, long startTimeNanos) { final int retCode = result != null && result ? BookieProtocol.EOK : BookieProtocol.EIO; sendResponse(data, retCode, startTimeNanos); } private void handleReadResultForFenceRead(CompletableFuture fenceResult, - ByteBuf data, + ReferenceCounted data, long startTimeNanos) { if (null != fenceThreadPool) { fenceResult.whenCompleteAsync(new FutureEventListener() { @@ -192,7 +201,9 @@ public String toString() { void recycle() { request.recycle(); super.reset(); - this.recyclerHandle.recycle(this); + if (this.recyclerHandle != null) { + this.recyclerHandle.recycle(this); + } } private final Recycler.Handle recyclerHandle; @@ -201,6 +212,10 @@ private ReadEntryProcessor(Recycler.Handle recyclerHandle) { this.recyclerHandle = recyclerHandle; } + protected ReadEntryProcessor() { + this.recyclerHandle = null; + } + private static final Recycler RECYCLER = new Recycler() { @Override protected ReadEntryProcessor newObject(Recycler.Handle handle) { diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ResponseBuilder.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ResponseBuilder.java index 563c0a1352f..4faa3dbc340 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ResponseBuilder.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ResponseBuilder.java @@ -21,16 +21,20 @@ package org.apache.bookkeeper.proto; import io.netty.buffer.ByteBuf; +import org.apache.bookkeeper.util.ByteBufList; class ResponseBuilder { static BookieProtocol.Response buildErrorResponse(int errorCode, BookieProtocol.Request r) { if (r.getOpCode() == BookieProtocol.ADDENTRY) { return BookieProtocol.AddResponse.create(r.getProtocolVersion(), errorCode, r.getLedgerId(), r.getEntryId()); - } else { - assert(r.getOpCode() == BookieProtocol.READENTRY); + } else if (r.getOpCode() == BookieProtocol.READENTRY) { return new BookieProtocol.ReadResponse(r.getProtocolVersion(), errorCode, r.getLedgerId(), r.getEntryId()); + } else { + assert(r.getOpCode() == BookieProtocol.BATCH_READ_ENTRY); + return new BookieProtocol.BatchedReadResponse(r.getProtocolVersion(), errorCode, + r.getLedgerId(), r.getEntryId(), ((BookieProtocol.BatchedReadRequest) r).getRequestId()); } } @@ -43,4 +47,9 @@ static BookieProtocol.Response buildReadResponse(ByteBuf data, BookieProtocol.Re return new BookieProtocol.ReadResponse(r.getProtocolVersion(), BookieProtocol.EOK, r.getLedgerId(), r.getEntryId(), data); } + + static BookieProtocol.Response buildBatchedReadResponse(ByteBufList data, BookieProtocol.BatchedReadRequest r) { + return new BookieProtocol.BatchedReadResponse(r.getProtocolVersion(), BookieProtocol.EOK, + r.getLedgerId(), r.getEntryId(), r.getRequestId(), data); + } } diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/Auditor.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/Auditor.java index 93810280189..13b10cf927c 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/Auditor.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/Auditor.java @@ -618,10 +618,16 @@ public void shutdown() { if (ownBkc) { bkc.close(); } + if (ledgerManager != null) { + ledgerManager.close(); + } + if (ledgerUnderreplicationManager != null) { + ledgerUnderreplicationManager.close(); + } } catch (InterruptedException ie) { Thread.currentThread().interrupt(); LOG.warn("Interrupted while shutting down auditor bookie", ie); - } catch (BKException bke) { + } catch (UnavailableException | IOException | BKException bke) { LOG.warn("Exception while shutting down auditor bookie", bke); } } diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/AuditorElector.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/AuditorElector.java index b478f43a0c2..f6b3a3a04f2 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/AuditorElector.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/AuditorElector.java @@ -25,11 +25,14 @@ import com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.BookKeeper; @@ -147,26 +150,28 @@ public Future start() { /** * Run cleanup operations for the auditor elector. */ - private void submitShutdownTask() { - executor.submit(new Runnable() { - @Override - public void run() { - if (!running.compareAndSet(true, false)) { - return; - } - - try { - ledgerAuditorManager.close(); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - LOG.warn("InterruptedException while closing ledger auditor manager", ie); - } catch (Exception ke) { - LOG.error("Exception while closing ledger auditor manager", ke); - } - } - }); + private Future submitShutdownTask() { + return executor.submit(shutdownTask); } + Runnable shutdownTask = new Runnable() { + @Override + public void run() { + if (!running.compareAndSet(true, false)) { + return; + } + + try { + ledgerAuditorManager.close(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + LOG.warn("InterruptedException while closing ledger auditor manager", ie); + } catch (Exception ke) { + LOG.error("Exception while closing ledger auditor manager", ke); + } + } + }; + /** * Performing the auditor election using the ZooKeeper ephemeral sequential * znode. The bookie which has created the least sequential will be elect as @@ -238,8 +243,18 @@ public void shutdown() throws InterruptedException { return; } // close auditor manager - submitShutdownTask(); - executor.shutdown(); + try { + submitShutdownTask().get(10, TimeUnit.SECONDS); + executor.shutdown(); + } catch (ExecutionException e) { + LOG.warn("Failed to close auditor manager", e); + executor.shutdownNow(); + shutdownTask.run(); + } catch (TimeoutException e) { + LOG.warn("Failed to close auditor manager in 10 seconds", e); + executor.shutdownNow(); + shutdownTask.run(); + } } if (auditor != null) { diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/ReplicationWorker.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/ReplicationWorker.java index f22231c5676..cf1f2f2f598 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/ReplicationWorker.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/ReplicationWorker.java @@ -600,7 +600,7 @@ private boolean isLastSegmentOpenAndMissingBookies(LedgerHandle lh) throws BKExc /** * Gets the under replicated fragments. */ - private Set getUnderreplicatedFragments(LedgerHandle lh, Long ledgerVerificationPercentage) + Set getUnderreplicatedFragments(LedgerHandle lh, Long ledgerVerificationPercentage) throws InterruptedException { //The data loss fragments is first to repair. If a fragment is data_loss and not_adhering_placement //at the same time, we only fix data_loss in this time. After fix data_loss, the fragment is still diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/ByteBufList.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/ByteBufList.java index 3a0d7b2bdef..324588d852b 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/ByteBufList.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/ByteBufList.java @@ -123,7 +123,7 @@ public static ByteBufList clone(ByteBufList other) { return buf; } - private static ByteBufList get() { + public static ByteBufList get() { ByteBufList buf = RECYCLER.get(); buf.setRefCnt(1); return buf; diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/HardLink.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/HardLink.java index d3e72b8aad6..1d3a3645921 100644 --- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/HardLink.java +++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/HardLink.java @@ -22,12 +22,18 @@ import static java.nio.charset.StandardCharsets.UTF_8; +import com.google.common.annotations.VisibleForTesting; import java.io.BufferedReader; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStreamReader; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.Arrays; +import java.util.concurrent.atomic.AtomicBoolean; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Class for creating hardlinks. @@ -42,7 +48,7 @@ * efficient - and minimizes the impact of the extra buffer creations. */ public class HardLink { - + private static final Logger LOG = LoggerFactory.getLogger(HardLink.class); /** * OS Types. */ @@ -395,12 +401,19 @@ protected static int getMaxAllowedCmdArgLength() { return getHardLinkCommand.getMaxAllowedCmdArgLength(); } + private static final AtomicBoolean CREATE_LINK_SUPPORTED = new AtomicBoolean(true); + /* * **************************************************** * Complexity is above. User-visible functionality is below * **************************************************** */ + @VisibleForTesting + static void enableJdkLinkApi(boolean enable) { + CREATE_LINK_SUPPORTED.set(enable); + } + /** * Creates a hardlink. * @param file - existing source file @@ -416,6 +429,23 @@ public static void createHardLink(File file, File linkName) throw new IOException( "invalid arguments to createHardLink: link name is null"); } + + // if createLink available try first, else fall back to shell command. + if (CREATE_LINK_SUPPORTED.get()) { + try { + Path newFile = Files.createLink(linkName.toPath(), file.toPath()); + if (newFile.toFile().exists()) { + return; + } + } catch (UnsupportedOperationException e) { + LOG.error("createLink not supported", e); + CREATE_LINK_SUPPORTED.set(false); + } catch (IOException e) { + LOG.error("error when create hard link use createLink", e); + CREATE_LINK_SUPPORTED.set(false); + } + } + // construct and execute shell command String[] hardLinkCommand = getHardLinkCommand.linkOne(file, linkName); Process process = Runtime.getRuntime().exec(hardLinkCommand); diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/TestLedgerFragmentReplicationWithMock.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/TestLedgerFragmentReplicationWithMock.java new file mode 100644 index 00000000000..c618aa4a15b --- /dev/null +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/TestLedgerFragmentReplicationWithMock.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ +package org.apache.bookkeeper.client; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.when; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.Unpooled; +import java.lang.reflect.Field; +import java.util.Enumeration; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import org.apache.bookkeeper.client.impl.LedgerEntryImpl; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.proto.BookieClientImpl; +import org.apache.bookkeeper.proto.BookieProtoEncoding; +import org.apache.bookkeeper.proto.BookieProtocol; +import org.apache.bookkeeper.proto.checksum.DigestManager; +import org.apache.bookkeeper.proto.checksum.DummyDigestManager; +import org.apache.commons.collections4.IteratorUtils; +import org.apache.zookeeper.AsyncCallback; +import org.junit.Test; +import org.mockito.Mockito; + +public class TestLedgerFragmentReplicationWithMock { + + @Test + public void testRecoverLedgerFragmentEntrySendRightRequestWithFlag() throws Exception { + CountDownLatch latch = new CountDownLatch(1); + BookieClientImpl bookieClient = Mockito.mock(BookieClientImpl.class); + doAnswer(invocationOnMock -> { + ByteBuf toSend = invocationOnMock.getArgument(4); + BookieProtoEncoding.RequestEnDeCoderPreV3 deCoderPreV3 = + new BookieProtoEncoding.RequestEnDeCoderPreV3(null); + toSend.readerIndex(4); + BookieProtocol.ParsedAddRequest request = (BookieProtocol.ParsedAddRequest) deCoderPreV3.decode(toSend); + + Field flagField = request.getClass().getSuperclass().getDeclaredField("flags"); + flagField.setAccessible(true); + short flag = flagField.getShort(request); + assertEquals(flag, BookieProtocol.FLAG_RECOVERY_ADD); + latch.countDown(); + return null; + }).when(bookieClient) + .addEntry(any(), anyLong(), any(), anyLong(), any(), any(), any(), anyInt(), anyBoolean(), any()); + + BookKeeper bkc = Mockito.mock(BookKeeper.class); + when(bkc.getBookieClient()).thenReturn(bookieClient); + + LedgerHandle lh = Mockito.mock(LedgerHandle.class); + DummyDigestManager ds = new DummyDigestManager(1L, true, ByteBufAllocator.DEFAULT); + when(lh.getDigestManager()).thenReturn(ds); + when(lh.getLedgerKey()).thenReturn(DigestManager.generateMasterKey("".getBytes())); + + ByteBuf data = Unpooled.wrappedBuffer(new byte[1024]); + LedgerEntry entry = new LedgerEntry(LedgerEntryImpl.create(1L, 1L, data.readableBytes(), data)); + List list = new LinkedList<>(); + list.add(entry); + Enumeration entries = IteratorUtils.asEnumeration(list.iterator()); + doAnswer(invocation -> { + org.apache.bookkeeper.client.AsyncCallback.ReadCallback rc = + invocation.getArgument(2, org.apache.bookkeeper.client.AsyncCallback.ReadCallback.class); + rc.readComplete(0, lh, entries, null); + return null; + }).when(lh).asyncReadEntries(anyLong(), anyLong(), any(), any()); + + ClientConfiguration conf = new ClientConfiguration(); + LedgerFragmentReplicator lfr = new LedgerFragmentReplicator(bkc, conf); + + Set bookies = new HashSet<>(); + bookies.add(BookieId.parse("127.0.0.1:3181")); + + AsyncCallback.VoidCallback vc = new AsyncCallback.VoidCallback() { + @Override + public void processResult(int rc, String path, Object ctx) { + } + }; + + lfr.recoverLedgerFragmentEntry(1L, lh, vc, bookies, (lid, le) -> {}); + + latch.await(); + } +} diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/TestRegionAwareEnsemblePlacementPolicy.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/TestRegionAwareEnsemblePlacementPolicy.java index 9d8e36a350d..e254b76f034 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/TestRegionAwareEnsemblePlacementPolicy.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/TestRegionAwareEnsemblePlacementPolicy.java @@ -1930,4 +1930,119 @@ public void testNotifyRackChangeWithNewRegion() throws Exception { assertEquals("region2", repp.address2Region.get(addr3.toBookieId())); assertEquals("region3", repp.address2Region.get(addr4.toBookieId())); } + + + @Test + public void testNewEnsemblePickLocalRegionBookies() + throws Exception { + repp.uninitalize(); + BookieSocketAddress addr1 = new BookieSocketAddress("127.0.0.10", 3181); + BookieSocketAddress addr2 = new BookieSocketAddress("127.0.0.2", 3181); + BookieSocketAddress addr3 = new BookieSocketAddress("127.0.0.3", 3181); + BookieSocketAddress addr4 = new BookieSocketAddress("127.0.0.4", 3181); + BookieSocketAddress addr5 = new BookieSocketAddress("127.0.0.5", 3181); + BookieSocketAddress addr6 = new BookieSocketAddress("127.0.0.6", 3181); + BookieSocketAddress addr7 = new BookieSocketAddress("127.0.0.7", 3181); + BookieSocketAddress addr8 = new BookieSocketAddress("127.0.0.8", 3181); + BookieSocketAddress addr9 = new BookieSocketAddress("127.0.0.9", 3181); + + // update dns mapping + StaticDNSResolver.addNodeToRack(addr1.getHostName(), "/region1/r1"); + StaticDNSResolver.addNodeToRack(addr2.getHostName(), "/region2/r2"); + StaticDNSResolver.addNodeToRack(addr3.getHostName(), "/region2/r2"); + StaticDNSResolver.addNodeToRack(addr4.getHostName(), "/region2/r2"); + StaticDNSResolver.addNodeToRack(addr5.getHostName(), "/region3/r3"); + StaticDNSResolver.addNodeToRack(addr6.getHostName(), "/region4/r4"); + StaticDNSResolver.addNodeToRack(addr7.getHostName(), "/region5/r5"); + StaticDNSResolver.addNodeToRack(addr8.getHostName(), "/region1/r2"); + StaticDNSResolver.addNodeToRack(addr9.getHostName(), "/region1/r2"); + + + updateMyRack("/region1/r2"); + repp = new RegionAwareEnsemblePlacementPolicy(); + repp.initialize(conf, Optional.empty(), timer, + DISABLE_ALL, NullStatsLogger.INSTANCE, BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER); + repp.withDefaultRack(NetworkTopology.DEFAULT_REGION_AND_RACK); + // Update cluster + Set addrs = new HashSet(); + addrs.add(addr1.toBookieId()); + addrs.add(addr2.toBookieId()); + addrs.add(addr3.toBookieId()); + addrs.add(addr4.toBookieId()); + addrs.add(addr5.toBookieId()); + addrs.add(addr6.toBookieId()); + addrs.add(addr7.toBookieId()); + addrs.add(addr8.toBookieId()); + addrs.add(addr9.toBookieId()); + repp.onClusterChanged(addrs, new HashSet()); + + int ensembleSize = 3; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + + Set excludeBookies = new HashSet<>(); + + int bookie1Count = 0; + int bookie8Count = 0; + int bookie9Count = 0; + for (int i = 0; i < 100; ++i) { + EnsemblePlacementPolicy.PlacementResult> ensembleResponse = + repp.newEnsemble(ensembleSize, writeQuorumSize, + ackQuorumSize, null, excludeBookies); + List ensemble = ensembleResponse.getResult(); + if (ensemble.contains(addr1.toBookieId())) { + bookie1Count++; + } + if (ensemble.contains(addr8.toBookieId())) { + bookie8Count++; + } + if (ensemble.contains(addr9.toBookieId())) { + bookie9Count++; + } + + if (!ensemble.contains(addr8.toBookieId()) && !ensemble.contains(addr9.toBookieId())) { + fail("Failed to select bookie located on the same region and rack with bookie client"); + } + if (ensemble.contains(addr2.toBookieId()) && ensemble.contains(addr3.toBookieId())) { + fail("addr2 and addr3 is same rack."); + } + } + LOG.info("Bookie1 Count: {}, Bookie8 Count: {}, Bookie9 Count: {}", bookie1Count, bookie8Count, bookie9Count); + + //shutdown all the bookies located in the same region and rack with local node + // to test new ensemble should contain addr1 + addrs.remove(addr8.toBookieId()); + addrs.remove(addr9.toBookieId()); + repp.onClusterChanged(addrs, new HashSet()); + bookie1Count = 0; + bookie8Count = 0; + bookie9Count = 0; + for (int i = 0; i < 100; ++i) { + try { + EnsemblePlacementPolicy.PlacementResult> ensembleResponse = + repp.newEnsemble(ensembleSize, writeQuorumSize, + ackQuorumSize, null, excludeBookies); + List ensemble = ensembleResponse.getResult(); + if (ensemble.contains(addr1.toBookieId())) { + bookie1Count++; + } + if (ensemble.contains(addr8.toBookieId())) { + bookie8Count++; + } + if (ensemble.contains(addr9.toBookieId())) { + bookie9Count++; + } + if (!ensemble.contains(addr1.toBookieId())) { + fail("Failed to select bookie located on the same region with bookie client"); + } + if (ensemble.contains(addr8.toBookieId()) || ensemble.contains(addr9.toBookieId())) { + fail("Selected the shutdown bookies"); + } + } catch (BKNotEnoughBookiesException e) { + fail("Failed to select the ensemble."); + } + } + LOG.info("Bookie1 Count: {}, Bookie8 Count: {}, Bookie9 Count: {}", bookie1Count, bookie8Count, bookie9Count); + + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/BatchedReadEntryProcessorTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/BatchedReadEntryProcessorTest.java new file mode 100644 index 00000000000..3f897558384 --- /dev/null +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/BatchedReadEntryProcessorTest.java @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.proto; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.channel.DefaultChannelPromise; +import io.netty.channel.EventLoop; +import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicReference; +import org.apache.bookkeeper.bookie.Bookie; +import org.apache.bookkeeper.bookie.BookieException; +import org.apache.bookkeeper.common.concurrent.FutureUtils; +import org.apache.bookkeeper.proto.BookieProtocol.Response; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.junit.Before; +import org.junit.Test; + + + +/** + * Unit test {@link ReadEntryProcessor}. + */ +public class BatchedReadEntryProcessorTest { + + private Channel channel; + private BookieRequestHandler requestHandler; + private BookieRequestProcessor requestProcessor; + private Bookie bookie; + + @Before + public void setup() throws IOException, BookieException { + channel = mock(Channel.class); + when(channel.isOpen()).thenReturn(true); + + requestHandler = mock(BookieRequestHandler.class); + ChannelHandlerContext ctx = mock(ChannelHandlerContext.class); + when(ctx.channel()).thenReturn(channel); + when(requestHandler.ctx()).thenReturn(ctx); + + bookie = mock(Bookie.class); + requestProcessor = mock(BookieRequestProcessor.class); + when(requestProcessor.getBookie()).thenReturn(bookie); + when(requestProcessor.getWaitTimeoutOnBackpressureMillis()).thenReturn(-1L); + when(requestProcessor.getRequestStats()).thenReturn(new RequestStats(NullStatsLogger.INSTANCE)); + when(channel.voidPromise()).thenReturn(mock(ChannelPromise.class)); + when(channel.writeAndFlush(any())).thenReturn(mock(ChannelPromise.class)); + EventLoop eventLoop = mock(EventLoop.class); + when(eventLoop.inEventLoop()).thenReturn(true); + when(channel.eventLoop()).thenReturn(eventLoop); + ByteBuf buffer0 = ByteBufAllocator.DEFAULT.buffer(4); + ByteBuf buffer1 = ByteBufAllocator.DEFAULT.buffer(4); + ByteBuf buffer2 = ByteBufAllocator.DEFAULT.buffer(4); + ByteBuf buffer3 = ByteBufAllocator.DEFAULT.buffer(4); + ByteBuf buffer4 = ByteBufAllocator.DEFAULT.buffer(4); + + when(bookie.readEntry(anyLong(), anyLong())).thenReturn(buffer0).thenReturn(buffer1).thenReturn(buffer2) + .thenReturn(buffer3).thenReturn(buffer4); + } + + @Test + public void testSuccessfulAsynchronousFenceRequest() throws Exception { + testAsynchronousRequest(true, BookieProtocol.EOK); + } + + @Test + public void testFailedAsynchronousFenceRequest() throws Exception { + testAsynchronousRequest(false, BookieProtocol.EIO); + } + + private void testAsynchronousRequest(boolean result, int errorCode) throws Exception { + CompletableFuture fenceResult = FutureUtils.createFuture(); + when(bookie.fenceLedger(anyLong(), any())).thenReturn(fenceResult); + + ChannelPromise promise = new DefaultChannelPromise(channel); + AtomicReference writtenObject = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + doAnswer(invocationOnMock -> { + writtenObject.set(invocationOnMock.getArgument(0)); + promise.setSuccess(); + latch.countDown(); + return promise; + }).when(channel).writeAndFlush(any(Response.class)); + + long requestId = 0; + int maxCount = 5; + long maxSize = 1024; + ExecutorService service = Executors.newCachedThreadPool(); + long ledgerId = System.currentTimeMillis(); + BookieProtocol.BatchedReadRequest request = BookieProtocol.BatchedReadRequest.create( + BookieProtocol.CURRENT_PROTOCOL_VERSION, ledgerId, 1, BookieProtocol.FLAG_DO_FENCING, new byte[] {}, + requestId, maxCount, maxSize); + ReadEntryProcessor processor = BatchedReadEntryProcessor.create( + request, requestHandler, requestProcessor, service, true, 1024 * 1024 * 5); + processor.run(); + + fenceResult.complete(result); + latch.await(); + verify(channel, times(1)).writeAndFlush(any(Response.class)); + + assertTrue(writtenObject.get() instanceof Response); + Response response = (Response) writtenObject.get(); + assertEquals(1, response.getEntryId()); + assertEquals(ledgerId, response.getLedgerId()); + assertEquals(BookieProtocol.BATCH_READ_ENTRY, response.getOpCode()); + assertEquals(errorCode, response.getErrorCode()); + service.shutdown(); + } + + @Test + public void testSuccessfulSynchronousFenceRequest() throws Exception { + testSynchronousRequest(true, BookieProtocol.EOK); + } + + @Test + public void testFailedSynchronousFenceRequest() throws Exception { + testSynchronousRequest(false, BookieProtocol.EIO); + } + + private void testSynchronousRequest(boolean result, int errorCode) throws Exception { + CompletableFuture fenceResult = FutureUtils.createFuture(); + when(bookie.fenceLedger(anyLong(), any())).thenReturn(fenceResult); + ChannelPromise promise = new DefaultChannelPromise(channel); + AtomicReference writtenObject = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + doAnswer(invocationOnMock -> { + writtenObject.set(invocationOnMock.getArgument(0)); + promise.setSuccess(); + latch.countDown(); + return promise; + }).when(channel).writeAndFlush(any(Response.class)); + + long requestId = 0; + int maxCount = 5; + long maxSize = 1024; + ExecutorService service = Executors.newCachedThreadPool(); + long ledgerId = System.currentTimeMillis(); + BookieProtocol.BatchedReadRequest request = BookieProtocol.BatchedReadRequest.create( + BookieProtocol.CURRENT_PROTOCOL_VERSION, ledgerId, 1, BookieProtocol.FLAG_DO_FENCING, new byte[] {}, + requestId, maxCount, maxSize); + ReadEntryProcessor processor = BatchedReadEntryProcessor.create( + request, requestHandler, requestProcessor, service, true, 1024 * 1024 * 5); + fenceResult.complete(result); + processor.run(); + + latch.await(); + verify(channel, times(1)).writeAndFlush(any(Response.class)); + + assertTrue(writtenObject.get() instanceof Response); + Response response = (Response) writtenObject.get(); + assertEquals(1, response.getEntryId()); + assertEquals(ledgerId, response.getLedgerId()); + assertEquals(BookieProtocol.BATCH_READ_ENTRY, response.getOpCode()); + assertEquals(errorCode, response.getErrorCode()); + } + + @Test + public void testNonFenceRequest() throws Exception { + ChannelPromise promise = new DefaultChannelPromise(channel); + AtomicReference writtenObject = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + doAnswer(invocationOnMock -> { + writtenObject.set(invocationOnMock.getArgument(0)); + promise.setSuccess(); + latch.countDown(); + return promise; + }).when(channel).writeAndFlush(any(Response.class)); + + long requestId = 0; + int maxCount = 5; + long maxSize = 1024; + ExecutorService service = Executors.newCachedThreadPool(); + long ledgerId = System.currentTimeMillis(); + BookieProtocol.BatchedReadRequest request = BookieProtocol.BatchedReadRequest.create( + BookieProtocol.CURRENT_PROTOCOL_VERSION, ledgerId, 1, BookieProtocol.FLAG_DO_FENCING, new byte[] {}, + requestId, maxCount, maxSize); + ReadEntryProcessor processor = BatchedReadEntryProcessor.create( + request, requestHandler, requestProcessor, service, true, 1024 * 1024 * 5); + processor.run(); + + latch.await(); + verify(channel, times(1)).writeAndFlush(any(Response.class)); + + assertTrue(writtenObject.get() instanceof Response); + Response response = (Response) writtenObject.get(); + assertEquals(1, response.getEntryId()); + assertEquals(ledgerId, response.getLedgerId()); + assertEquals(BookieProtocol.BATCH_READ_ENTRY, response.getOpCode()); + assertEquals(BookieProtocol.EOK, response.getErrorCode()); + } +} \ No newline at end of file diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/BookieProtoEncodingTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/BookieProtoEncodingTest.java index bba26f37a23..4f719ddfc08 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/BookieProtoEncodingTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/BookieProtoEncodingTest.java @@ -19,6 +19,7 @@ package org.apache.bookkeeper.proto; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.bookkeeper.proto.BookieProtocol.FLAG_NONE; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; @@ -44,6 +45,7 @@ import org.apache.bookkeeper.proto.BookkeeperProtocol.OperationType; import org.apache.bookkeeper.proto.BookkeeperProtocol.ProtocolVersion; import org.apache.bookkeeper.proto.BookkeeperProtocol.StatusCode; +import org.apache.bookkeeper.util.ByteBufList; import org.junit.Before; import org.junit.Test; @@ -131,4 +133,36 @@ public void testV2RequestDecoderThrowExceptionOnUnknownRequests() throws Excepti v2ReqEncoder.decode((ByteBuf) v3ReqEncoder.encode(v3Req, UnpooledByteBufAllocator.DEFAULT)); } + @Test + public void testV2BatchReadRequest() throws Exception { + RequestEnDeCoderPreV3 v2ReqEncoder = new RequestEnDeCoderPreV3(registry); + BookieProtocol.BatchedReadRequest req = BookieProtocol.BatchedReadRequest.create( + BookieProtocol.CURRENT_PROTOCOL_VERSION, 1L, 1L, FLAG_NONE, null, 1L, 10, 1024L); + ByteBuf buf = (ByteBuf) v2ReqEncoder.encode(req, UnpooledByteBufAllocator.DEFAULT); + buf.readInt(); // Skip the frame size. + BookieProtocol.BatchedReadRequest reqDecoded = (BookieProtocol.BatchedReadRequest) v2ReqEncoder.decode(buf); + assertEquals(req.ledgerId, reqDecoded.ledgerId); + assertEquals(req.entryId, reqDecoded.entryId); + assertEquals(req.maxSize, reqDecoded.maxSize); + assertEquals(req.maxCount, reqDecoded.maxCount); + reqDecoded.recycle(); + } + + @Test + public void testV2BatchReadResponse() throws Exception { + ResponseEnDeCoderPreV3 v2ReqEncoder = new ResponseEnDeCoderPreV3(registry); + ByteBuf first = UnpooledByteBufAllocator.DEFAULT.buffer(4).writeInt(10); + ByteBuf second = UnpooledByteBufAllocator.DEFAULT.buffer(8).writeLong(10L); + ByteBufList data = ByteBufList.get(first, second); + BookieProtocol.BatchedReadResponse res = new BookieProtocol.BatchedReadResponse( + BookieProtocol.CURRENT_PROTOCOL_VERSION, 1, 1L, 1L, 1L, data); + ByteBuf buf = (ByteBuf) v2ReqEncoder.encode(res, UnpooledByteBufAllocator.DEFAULT); + buf.readInt(); // Skip the frame size. + BookieProtocol.BatchedReadResponse resDecoded = (BookieProtocol.BatchedReadResponse) v2ReqEncoder.decode(buf); + assertEquals(res.ledgerId, resDecoded.ledgerId); + assertEquals(res.entryId, resDecoded.entryId); + assertEquals(res.getData().size(), resDecoded.getData().size()); + assertEquals(res.getData().readableBytes(), resDecoded.getData().readableBytes()); + } + } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/MockBookieClient.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/MockBookieClient.java index 2d8315f2f0a..37317317475 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/MockBookieClient.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/MockBookieClient.java @@ -70,10 +70,20 @@ public interface Hook { CompletableFuture runHook(BookieId bookie, long ledgerId, long entryId); } + /** + * Runs before or after an operation. Can stall the operation or error it. + */ + public interface BatchHook { + CompletableFuture runHook(BookieId bookie, long ledgerId, long startEntryId, int maxCount, long maxSize); + } + private Hook preReadHook = (bookie, ledgerId, entryId) -> FutureUtils.value(null); private Hook postReadHook = (bookie, ledgerId, entryId) -> FutureUtils.value(null); private Hook preWriteHook = (bookie, ledgerId, entryId) -> FutureUtils.value(null); private Hook postWriteHook = (bookie, ledgerId, entryId) -> FutureUtils.value(null); + private BatchHook preBatchReadHook = (bookie, ledgerId, startEntryId, maxCount, maxSize) -> FutureUtils.value(null); + private BatchHook postBatchReadHook = (bookie, ledgerId, startEntryId, maxCount, maxSize) -> FutureUtils.value( + null); public MockBookieClient(OrderedExecutor executor) { this.executor = executor; @@ -225,6 +235,41 @@ public void readEntry(BookieId addr, long ledgerId, long entryId, }, executor.chooseThread(ledgerId)); } + @Override + public void batchReadEntries(BookieId addr, long ledgerId, long startEntryId, int maxCount, long maxSize, + BookkeeperInternalCallbacks.BatchedReadEntryCallback cb, Object ctx, int flags, byte[] masterKey, + boolean allowFastFail) { + preBatchReadHook.runHook(addr, ledgerId, startEntryId, maxCount, maxSize) + .thenComposeAsync((res) -> { + LOG.info("[{};L{}] batch read entries startEntryId:{} maxCount:{} maxSize:{}", + addr, ledgerId, startEntryId, maxCount, maxSize); + if (isErrored(addr)) { + LOG.warn("[{};L{}] erroring batch read entries startEntryId:{} maxCount:{} maxSize:{}", + addr, ledgerId, startEntryId, maxCount, maxSize); + return FutureUtils.exception(new BKException.BKReadException()); + } + + try { + ByteBufList data = mockBookies.batchReadEntries(addr, flags, ledgerId, startEntryId, + maxCount, maxSize); + return FutureUtils.value(data); + } catch (BKException bke) { + return FutureUtils.exception(bke); + } + }, executor.chooseThread(ledgerId)) + .thenCompose((buf) -> postBatchReadHook.runHook(addr, ledgerId, startEntryId, maxCount, maxSize) + .thenApply((res) -> buf)) + .whenCompleteAsync((res, ex) -> { + if (ex != null) { + cb.readEntriesComplete(BKException.getExceptionCode(ex, BKException.Code.ReadException), + ledgerId, startEntryId, null, ctx); + } else { + cb.readEntriesComplete(BKException.Code.OK, + ledgerId, startEntryId, res, ctx); + } + }, executor.chooseThread(ledgerId)); + } + @Override public void readEntryWaitForLACUpdate(BookieId addr, long ledgerId, diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/MockBookies.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/MockBookies.java index cef77c3f99a..ac338b9757d 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/MockBookies.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/proto/MockBookies.java @@ -123,6 +123,40 @@ public ByteBuf readEntry(BookieId bookieId, int flags, long ledgerId, long entry return entry; } + public ByteBufList batchReadEntries(BookieId bookieId, int flags, long ledgerId, long startEntryId, + int maxCount, long maxSize) throws BKException { + MockLedgerData ledger = getBookieData(bookieId).get(ledgerId); + + if (ledger == null) { + LOG.warn("[{};L{}] ledger not found", bookieId, ledgerId); + throw new BKException.BKNoSuchLedgerExistsException(); + } + + if ((flags & BookieProtocol.FLAG_DO_FENCING) == BookieProtocol.FLAG_DO_FENCING) { + ledger.fence(); + } + //Refer: BatchedReadEntryProcessor.readData + ByteBufList data = null; + if (maxCount <= 0) { + maxCount = Integer.MAX_VALUE; + } + long frameSize = 24 + 8 + 4; + for (long i = startEntryId; i < startEntryId + maxCount; i++) { + ByteBuf entry = ledger.getEntry(i); + frameSize += entry.readableBytes() + 4; + if (data == null) { + data = ByteBufList.get(entry); + } else { + if (frameSize > maxSize) { + entry.release(); + break; + } + data.add(entry); + } + } + return data; + } + public ConcurrentHashMap getBookieData(BookieId bookieId) { return data.computeIfAbsent(bookieId, (key) -> new ConcurrentHashMap<>()); } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/replication/AuditorLedgerCheckerTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/replication/AuditorLedgerCheckerTest.java index 7fc733a1113..2e3e09012fb 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/replication/AuditorLedgerCheckerTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/replication/AuditorLedgerCheckerTest.java @@ -45,6 +45,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import lombok.Cleanup; import org.apache.bookkeeper.bookie.BookieImpl; @@ -409,7 +410,16 @@ public void testInnerDelayedAuditOfLostBookies() throws Exception { urLedgerMgr.setLostBookieRecoveryDelay(5); // shutdown a non auditor bookie; choosing non-auditor to avoid another election - String shutdownBookie = shutDownNonAuditorBookie(); + AtomicReference shutdownBookieRef = new AtomicReference<>(); + CountDownLatch shutdownLatch = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie = shutDownNonAuditorBookie(); + shutdownBookieRef.set(shutdownBookie); + shutdownLatch.countDown(); + } catch (Exception ignore) { + } + }).start(); if (LOG.isDebugEnabled()) { LOG.debug("Waiting for ledgers to be marked as under replicated"); @@ -425,9 +435,10 @@ public void testInnerDelayedAuditOfLostBookies() throws Exception { urLedgerList.contains(ledgerId)); Map urLedgerData = getUrLedgerData(urLedgerList); String data = urLedgerData.get(ledgerId); - assertTrue("Bookie " + shutdownBookie + shutdownLatch.await(); + assertTrue("Bookie " + shutdownBookieRef.get() + "is not listed in the ledger as missing replica :" + data, - data.contains(shutdownBookie)); + data.contains(shutdownBookieRef.get())); } /** @@ -486,7 +497,16 @@ public void testRescheduleOfDelayedAuditOfLostBookiesToStartImmediately() throws urLedgerMgr.setLostBookieRecoveryDelay(50); // shutdown a non auditor bookie; choosing non-auditor to avoid another election - String shutdownBookie = shutDownNonAuditorBookie(); + AtomicReference shutdownBookieRef = new AtomicReference<>(); + CountDownLatch shutdownLatch = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie = shutDownNonAuditorBookie(); + shutdownBookieRef.set(shutdownBookie); + shutdownLatch.countDown(); + } catch (Exception ignore) { + } + }).start(); if (LOG.isDebugEnabled()) { LOG.debug("Waiting for ledgers to be marked as under replicated"); @@ -505,9 +525,10 @@ public void testRescheduleOfDelayedAuditOfLostBookiesToStartImmediately() throws urLedgerList.contains(ledgerId)); Map urLedgerData = getUrLedgerData(urLedgerList); String data = urLedgerData.get(ledgerId); - assertTrue("Bookie " + shutdownBookie + shutdownLatch.await(); + assertTrue("Bookie " + shutdownBookieRef.get() + "is not listed in the ledger as missing replica :" + data, - data.contains(shutdownBookie)); + data.contains(shutdownBookieRef.get())); } @Test @@ -530,7 +551,16 @@ public void testRescheduleOfDelayedAuditOfLostBookiesToStartLater() throws Excep urLedgerMgr.setLostBookieRecoveryDelay(3); // shutdown a non auditor bookie; choosing non-auditor to avoid another election - String shutdownBookie = shutDownNonAuditorBookie(); + AtomicReference shutdownBookieRef = new AtomicReference<>(); + CountDownLatch shutdownLatch = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie = shutDownNonAuditorBookie(); + shutdownBookieRef.set(shutdownBookie); + shutdownLatch.countDown(); + } catch (Exception ignore) { + } + }).start(); if (LOG.isDebugEnabled()) { LOG.debug("Waiting for ledgers to be marked as under replicated"); @@ -556,9 +586,10 @@ public void testRescheduleOfDelayedAuditOfLostBookiesToStartLater() throws Excep urLedgerList.contains(ledgerId)); Map urLedgerData = getUrLedgerData(urLedgerList); String data = urLedgerData.get(ledgerId); - assertTrue("Bookie " + shutdownBookie + shutdownLatch.await(); + assertTrue("Bookie " + shutdownBookieRef.get() + "is not listed in the ledger as missing replica :" + data, - data.contains(shutdownBookie)); + data.contains(shutdownBookieRef.get())); } @Test @@ -647,7 +678,12 @@ public void testTriggerAuditorWithPendingAuditTask() throws Exception { urLedgerMgr.setLostBookieRecoveryDelay(lostBookieRecoveryDelay); // shutdown a non auditor bookie; choosing non-auditor to avoid another election - String shutdownBookie = shutDownNonAuditorBookie(); + new Thread(() -> { + try { + shutDownNonAuditorBookie(); + } catch (Exception ignore) { + } + }).start(); if (LOG.isDebugEnabled()) { LOG.debug("Waiting for ledgers to be marked as under replicated"); @@ -698,7 +734,12 @@ public void testTriggerAuditorBySettingDelayToZeroWithPendingAuditTask() throws urLedgerMgr.setLostBookieRecoveryDelay(lostBookieRecoveryDelay); // shutdown a non auditor bookie; choosing non-auditor to avoid another election - String shutdownBookie = shutDownNonAuditorBookie(); + new Thread(() -> { + try { + shutDownNonAuditorBookie(); + } catch (Exception ignore) { + } + }).start(); if (LOG.isDebugEnabled()) { LOG.debug("Waiting for ledgers to be marked as under replicated"); @@ -750,8 +791,17 @@ public void testDelayedAuditWithMultipleBookieFailures() throws Exception { // wait for 10 seconds before starting the recovery work when a bookie fails urLedgerMgr.setLostBookieRecoveryDelay(10); - // shutdown a non auditor bookie to avoid an election - String shutdownBookie1 = shutDownNonAuditorBookie(); + // shutdown a non auditor bookie; choosing non-auditor to avoid another election + AtomicReference shutdownBookieRef1 = new AtomicReference<>(); + CountDownLatch shutdownLatch1 = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie1 = shutDownNonAuditorBookie(); + shutdownBookieRef1.set(shutdownBookie1); + shutdownLatch1.countDown(); + } catch (Exception ignore) { + } + }).start(); // wait for 3 seconds and there shouldn't be any under replicated ledgers // because we have delayed the start of audit by 10 seconds @@ -763,7 +813,16 @@ public void testDelayedAuditWithMultipleBookieFailures() throws Exception { // the history about having delayed recovery remains. Hence we make sure // we bring down a non auditor bookie. This should cause the audit to take // place immediately and not wait for the remaining 7 seconds to elapse - String shutdownBookie2 = shutDownNonAuditorBookie(); + AtomicReference shutdownBookieRef2 = new AtomicReference<>(); + CountDownLatch shutdownLatch2 = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie2 = shutDownNonAuditorBookie(); + shutdownBookieRef2.set(shutdownBookie2); + shutdownLatch2.countDown(); + } catch (Exception ignore) { + } + }).start(); // 2 second grace period for the ledgers to get reported as under replicated Thread.sleep(2000); @@ -776,9 +835,11 @@ public void testDelayedAuditWithMultipleBookieFailures() throws Exception { urLedgerList.contains(ledgerId)); Map urLedgerData = getUrLedgerData(urLedgerList); String data = urLedgerData.get(ledgerId); - assertTrue("Bookie " + shutdownBookie1 + shutdownBookie2 + shutdownLatch1.await(); + shutdownLatch2.await(); + assertTrue("Bookie " + shutdownBookieRef1.get() + shutdownBookieRef2.get() + " are not listed in the ledger as missing replicas :" + data, - data.contains(shutdownBookie1) && data.contains(shutdownBookie2)); + data.contains(shutdownBookieRef1.get()) && data.contains(shutdownBookieRef2.get())); } /** @@ -808,7 +869,17 @@ public void testDelayedAuditWithRollingUpgrade() throws Exception { // shutdown a non auditor bookie to avoid an election int idx1 = getShutDownNonAuditorBookieIdx(""); ServerConfiguration conf1 = confByIndex(idx1); - String shutdownBookie1 = shutdownBookie(idx1); + + AtomicReference shutdownBookieRef1 = new AtomicReference<>(); + CountDownLatch shutdownLatch1 = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie1 = shutdownBookie(idx1); + shutdownBookieRef1.set(shutdownBookie1); + shutdownLatch1.countDown(); + } catch (Exception ignore) { + } + }).start(); // wait for 2 seconds and there shouldn't be any under replicated ledgers // because we have delayed the start of audit by 5 seconds @@ -821,8 +892,17 @@ public void testDelayedAuditWithRollingUpgrade() throws Exception { // Now to simulate the rolling upgrade, bring down a bookie different from // the one we brought down/up above. - String shutdownBookie2 = shutDownNonAuditorBookie(shutdownBookie1); - + // shutdown a non auditor bookie; choosing non-auditor to avoid another election + AtomicReference shutdownBookieRef2 = new AtomicReference<>(); + CountDownLatch shutdownLatch2 = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie2 = shutDownNonAuditorBookie(); + shutdownBookieRef2.set(shutdownBookie2); + shutdownLatch2.countDown(); + } catch (Exception ignore) { + } + }).start(); // since the first bookie that was brought down/up has come up, there is only // one bookie down at this time. Hence the lost bookie check shouldn't start // immediately; it will start 5 seconds after the second bookie went down @@ -839,11 +919,13 @@ public void testDelayedAuditWithRollingUpgrade() throws Exception { urLedgerList.contains(ledgerId)); Map urLedgerData = getUrLedgerData(urLedgerList); String data = urLedgerData.get(ledgerId); - assertTrue("Bookie " + shutdownBookie1 + "wrongly listed as missing the ledger: " + data, - !data.contains(shutdownBookie1)); - assertTrue("Bookie " + shutdownBookie2 + shutdownLatch1.await(); + shutdownLatch2.await(); + assertTrue("Bookie " + shutdownBookieRef1.get() + "wrongly listed as missing the ledger: " + data, + !data.contains(shutdownBookieRef1.get())); + assertTrue("Bookie " + shutdownBookieRef2.get() + " is not listed in the ledger as missing replicas :" + data, - data.contains(shutdownBookie2)); + data.contains(shutdownBookieRef2.get())); LOG.info("*****************Test Complete"); } @@ -1008,7 +1090,7 @@ private Auditor getAuditorBookiesAuditor() throws Exception { return auditorElectors.get(bookieAddr).auditor; } - private String shutDownNonAuditorBookie() throws Exception { + private String shutDownNonAuditorBookie() throws Exception { // shutdown bookie which is not an auditor int indexOf = indexOfServer(getAuditorBookie()); int bkIndexDownBookie; diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/test/BookieClientTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/test/BookieClientTest.java index a110e833ac3..5b96c52a0be 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/test/BookieClientTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/test/BookieClientTest.java @@ -21,13 +21,17 @@ package org.apache.bookkeeper.test; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; import io.netty.buffer.UnpooledByteBufAllocator; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.util.ReferenceCounted; import io.netty.util.concurrent.DefaultThreadFactory; import java.io.File; import java.io.IOException; @@ -36,6 +40,8 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import org.apache.bookkeeper.bookie.MockUncleanShutdownDetection; import org.apache.bookkeeper.bookie.TestBookieImpl; import org.apache.bookkeeper.client.BKException; @@ -57,11 +63,14 @@ import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryCallback; import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback; import org.apache.bookkeeper.proto.BookkeeperProtocol; +import org.apache.bookkeeper.proto.DataFormats; +import org.apache.bookkeeper.proto.checksum.DigestManager; import org.apache.bookkeeper.stats.NullStatsLogger; import org.apache.bookkeeper.test.TestStatsProvider.TestOpStatsLogger; import org.apache.bookkeeper.test.TestStatsProvider.TestStatsLogger; import org.apache.bookkeeper.util.ByteBufList; import org.apache.bookkeeper.util.IOUtils; +import org.awaitility.Awaitility; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -85,6 +94,7 @@ public void setUp() throws Exception { // know via ZooKeeper which Bookies are available, okay, so pass in null // for the zkServers input parameter when constructing the BookieServer. ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); + conf.setGcWaitTime(1000 * 100); conf.setBookiePort(port) .setJournalDirName(tmpDir.getPath()) .setLedgerDirNames(new String[] { tmpDir.getPath() }) @@ -347,4 +357,392 @@ public void getBookieInfoComplete(int rc, BookieInfo bInfo, Object ctx) { assertEquals("BookieInfoSuccessCount", expectedBookieInfoSuccessCount, perChannelBookieClientScopeOfThisAddr.getSuccessCount()); } + + @Test + public void testBatchedRead() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setUseV2WireProtocol(true); + BookieClient bc = new BookieClientImpl(conf, eventLoopGroup, + UnpooledByteBufAllocator.DEFAULT, executor, scheduler, NullStatsLogger.INSTANCE, + BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER); + + BookieId addr = bs.getBookieId(); + byte[] passwd = new byte[20]; + Arrays.fill(passwd, (byte) 'a'); + DigestManager digestManager = DigestManager.instantiate(1, passwd, + DataFormats.LedgerMetadataFormat.DigestType.CRC32C, ByteBufAllocator.DEFAULT, true); + byte[] masterKey = DigestManager.generateMasterKey(passwd); + + final int entries = 10; + int length = 0; + for (int i = 0; i < entries; i++) { + ByteBuf bb = Unpooled.buffer(4); + bb.writeInt(i); + length += 4; + ReferenceCounted content = digestManager.computeDigestAndPackageForSending(i, i - 1, length, bb, + masterKey, BookieProtocol.FLAG_NONE); + ResultStruct arc = new ResultStruct(); + bc.addEntry(addr, 1, passwd, i, content, wrcb, arc, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE); + Awaitility.await().untilAsserted(() -> { + assertEquals(0, arc.rc); + }); + } + AtomicReference result = new AtomicReference<>(); + AtomicInteger resCode = new AtomicInteger(); + + bc.batchReadEntries(addr, 1, 0, 5, 5 * 1024 * 1024, (rc, ledgerId, startEntryId, bufList, ctx) -> { + resCode.set(rc); + result.set(bufList); + }, null, BookieProtocol.FLAG_NONE); + + Awaitility.await().untilAsserted(() -> { + ByteBufList byteBufList = result.get(); + assertNotNull(byteBufList); + }); + assertEquals(Code.OK, resCode.get()); + ByteBufList byteBufList = result.get(); + assertEquals(5, byteBufList.size()); + for (int i = 0; i < byteBufList.size(); i++) { + ByteBuf buffer = byteBufList.getBuffer(i); + //ledgerId + assertEquals(1, buffer.readLong()); + //entryId + assertEquals(i, buffer.readLong()); + //lac + assertEquals(i - 1, buffer.readLong()); + //length + assertEquals((i + 1) * 4, buffer.readLong()); + //digest + int i1 = buffer.readInt(); + //data + ByteBuf byteBuf = buffer.readBytes(buffer.readableBytes()); + assertEquals(i, byteBuf.readInt()); + } + } + + @Test + public void testBatchedReadWittLostFourthEntry() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setUseV2WireProtocol(true); + BookieClient bc = new BookieClientImpl(conf, eventLoopGroup, + UnpooledByteBufAllocator.DEFAULT, executor, scheduler, NullStatsLogger.INSTANCE, + BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER); + + BookieId addr = bs.getBookieId(); + byte[] passwd = new byte[20]; + Arrays.fill(passwd, (byte) 'a'); + DigestManager digestManager = DigestManager.instantiate(1, passwd, + DataFormats.LedgerMetadataFormat.DigestType.CRC32C, ByteBufAllocator.DEFAULT, true); + byte[] masterKey = DigestManager.generateMasterKey(passwd); + + final int entries = 10; + int length = 0; + for (int i = 0; i < entries; i++) { + //The bookie server lost entry:3 + if (i == 3) { + continue; + } + ByteBuf bb = Unpooled.buffer(4); + bb.writeInt(i); + length += 4; + ReferenceCounted content = digestManager.computeDigestAndPackageForSending(i, i - 1, length, bb, + masterKey, BookieProtocol.FLAG_NONE); + ResultStruct arc = new ResultStruct(); + bc.addEntry(addr, 1, passwd, i, content, wrcb, arc, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE); + Awaitility.await().untilAsserted(() -> { + assertEquals(0, arc.rc); + }); + } + AtomicReference result = new AtomicReference<>(); + AtomicInteger resCode = new AtomicInteger(); + + bc.batchReadEntries(addr, 1, 0, 5, 5 * 1024 * 1024, (rc, ledgerId, startEntryId, bufList, ctx) -> { + resCode.set(rc); + result.set(bufList); + }, null, BookieProtocol.FLAG_NONE); + + Awaitility.await().untilAsserted(() -> { + ByteBufList byteBufList = result.get(); + assertNotNull(byteBufList); + }); + assertEquals(Code.OK, resCode.get()); + ByteBufList byteBufList = result.get(); + assertEquals(3, byteBufList.size()); + for (int i = 0; i < byteBufList.size(); i++) { + ByteBuf buffer = byteBufList.getBuffer(i); + //ledgerId + assertEquals(1, buffer.readLong()); + //entryId + assertEquals(i, buffer.readLong()); + //lac + assertEquals(i - 1, buffer.readLong()); + //length + assertEquals((i + 1) * 4, buffer.readLong()); + //digest + int i1 = buffer.readInt(); + //data + ByteBuf byteBuf = buffer.readBytes(buffer.readableBytes()); + assertEquals(i, byteBuf.readInt()); + } + } + + @Test + public void testBatchedReadWittLostFirstEntry() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setUseV2WireProtocol(true); + BookieClient bc = new BookieClientImpl(conf, eventLoopGroup, + UnpooledByteBufAllocator.DEFAULT, executor, scheduler, NullStatsLogger.INSTANCE, + BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER); + + BookieId addr = bs.getBookieId(); + byte[] passwd = new byte[20]; + Arrays.fill(passwd, (byte) 'a'); + DigestManager digestManager = DigestManager.instantiate(1, passwd, + DataFormats.LedgerMetadataFormat.DigestType.CRC32C, ByteBufAllocator.DEFAULT, true); + byte[] masterKey = DigestManager.generateMasterKey(passwd); + + final int entries = 10; + int length = 0; + for (int i = 0; i < entries; i++) { + //The bookie server lost entry:0 + if (i == 0) { + continue; + } + ByteBuf bb = Unpooled.buffer(4); + bb.writeInt(i); + length += 4; + ReferenceCounted content = digestManager.computeDigestAndPackageForSending(i, i - 1, length, bb, + masterKey, BookieProtocol.FLAG_NONE); + ResultStruct arc = new ResultStruct(); + bc.addEntry(addr, 1, passwd, i, content, wrcb, arc, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE); + Awaitility.await().untilAsserted(() -> { + assertEquals(0, arc.rc); + }); + } + AtomicReference result = new AtomicReference<>(); + AtomicInteger resCode = new AtomicInteger(); + + bc.batchReadEntries(addr, 1, 0, 5, 5 * 1024 * 1024, (rc, ledgerId, startEntryId, bufList, ctx) -> { + resCode.set(rc); + result.set(bufList); + }, null, BookieProtocol.FLAG_NONE); + + Awaitility.await().untilAsserted(() -> { + ByteBufList byteBufList = result.get(); + assertNotNull(byteBufList); + }); + assertEquals(Code.NoSuchEntryException, resCode.get()); + ByteBufList byteBufList = result.get(); + assertEquals(0, byteBufList.size()); + } + + //This test is for the `isSmallEntry` improvement. + @Test + public void testBatchedReadWittBigPayload() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setUseV2WireProtocol(true); + BookieClient bc = new BookieClientImpl(conf, eventLoopGroup, + UnpooledByteBufAllocator.DEFAULT, executor, scheduler, NullStatsLogger.INSTANCE, + BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER); + + BookieId addr = bs.getBookieId(); + byte[] passwd = new byte[20]; + Arrays.fill(passwd, (byte) 'a'); + DigestManager digestManager = DigestManager.instantiate(1, passwd, + DataFormats.LedgerMetadataFormat.DigestType.CRC32C, ByteBufAllocator.DEFAULT, true); + byte[] masterKey = DigestManager.generateMasterKey(passwd); + byte[] kbData = new byte[1024]; + for (int i = 0; i < 1024; i++) { + kbData[i] = (byte) i; + } + final int entries = 20; + int length = 0; + for (int i = 0; i < entries; i++) { + ByteBuf bb = Unpooled.buffer(1024); + bb.writeBytes(kbData); + length += 1024; + ReferenceCounted content = digestManager.computeDigestAndPackageForSending(i, i - 1, length, bb, + masterKey, BookieProtocol.FLAG_NONE); + ResultStruct arc = new ResultStruct(); + bc.addEntry(addr, 1, passwd, i, content, wrcb, arc, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE); + Awaitility.await().untilAsserted(() -> { + assertEquals(0, arc.rc); + }); + } + + AtomicReference result = new AtomicReference<>(); + AtomicInteger resCode = new AtomicInteger(); + + bc.batchReadEntries(addr, 1, 0, 20, 5 * 1024 * 1024, (rc, ledgerId, startEntryId, bufList, ctx) -> { + result.set(bufList); + resCode.set(rc); + }, null, BookieProtocol.FLAG_NONE); + Awaitility.await().untilAsserted(() -> { + ByteBufList byteBufList = result.get(); + assertNotNull(byteBufList); + }); + ByteBufList byteBufList = result.get(); + assertEquals(0, resCode.get()); + assertEquals(20, byteBufList.size()); + for (int i = 0; i < byteBufList.size(); i++) { + ByteBuf buffer = byteBufList.getBuffer(i); + //ledgerId + assertEquals(1, buffer.readLong()); + //entryId + assertEquals(i, buffer.readLong()); + //lac + assertEquals(i - 1, buffer.readLong()); + //length + assertEquals((i + 1) * 1024, buffer.readLong()); + //digest + int i1 = buffer.readInt(); + //data + ByteBuf byteBuf = buffer.readBytes(buffer.readableBytes()); + assertEquals(1024, byteBuf.readableBytes()); + byte[] bytes = ByteBufUtil.getBytes(byteBuf); + assertTrue(Arrays.equals(kbData, bytes)); + } + } + + @Test + public void testBatchedReadWithMaxSizeLimitCase1() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setUseV2WireProtocol(true); + BookieClient bc = new BookieClientImpl(conf, eventLoopGroup, + UnpooledByteBufAllocator.DEFAULT, executor, scheduler, NullStatsLogger.INSTANCE, + BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER); + + BookieId addr = bs.getBookieId(); + byte[] passwd = new byte[20]; + Arrays.fill(passwd, (byte) 'a'); + DigestManager digestManager = DigestManager.instantiate(1, passwd, + DataFormats.LedgerMetadataFormat.DigestType.CRC32C, ByteBufAllocator.DEFAULT, true); + byte[] masterKey = DigestManager.generateMasterKey(passwd); + byte[] kbData = new byte[1024]; + for (int i = 0; i < 1024; i++) { + kbData[i] = (byte) i; + } + final int entries = 20; + int length = 0; + for (int i = 0; i < entries; i++) { + ByteBuf bb = Unpooled.buffer(1024); + bb.writeBytes(kbData); + length += 1024; + ReferenceCounted content = digestManager.computeDigestAndPackageForSending(i, i - 1, length, bb, + masterKey, BookieProtocol.FLAG_NONE); + ResultStruct arc = new ResultStruct(); + bc.addEntry(addr, 1, passwd, i, content, wrcb, arc, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE); + Awaitility.await().untilAsserted(() -> { + assertEquals(0, arc.rc); + }); + } + + AtomicReference result = new AtomicReference<>(); + AtomicInteger resCode = new AtomicInteger(); + + // one entry size = 8(ledgerId) + 8(entryId) + 8(lac) + 8(length) + 4(digest) + payload size + int entrySize = 8 + 8 + 8 + 8 + 4 + 1024; + bc.batchReadEntries(addr, 1, 0, 20, 5 * entrySize , (rc, ledgerId, startEntryId, bufList, ctx) -> { + result.set(bufList); + resCode.set(rc); + }, null, BookieProtocol.FLAG_NONE); + Awaitility.await().untilAsserted(() -> { + ByteBufList byteBufList = result.get(); + assertNotNull(byteBufList); + }); + ByteBufList byteBufList = result.get(); + assertEquals(0, resCode.get()); + assertEquals(4, byteBufList.size()); + for (int i = 0; i < byteBufList.size(); i++) { + ByteBuf buffer = byteBufList.getBuffer(i); + //ledgerId + assertEquals(1, buffer.readLong()); + //entryId + assertEquals(i, buffer.readLong()); + //lac + assertEquals(i - 1, buffer.readLong()); + //length + assertEquals((i + 1) * 1024, buffer.readLong()); + //digest + int i1 = buffer.readInt(); + //data + ByteBuf byteBuf = buffer.readBytes(buffer.readableBytes()); + assertEquals(1024, byteBuf.readableBytes()); + byte[] bytes = ByteBufUtil.getBytes(byteBuf); + assertTrue(Arrays.equals(kbData, bytes)); + } + } + + //consider header size rather than case1. + @Test + public void testBatchedReadWithMaxSizeLimitCase2() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setUseV2WireProtocol(true); + BookieClient bc = new BookieClientImpl(conf, eventLoopGroup, + UnpooledByteBufAllocator.DEFAULT, executor, scheduler, NullStatsLogger.INSTANCE, + BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER); + + BookieId addr = bs.getBookieId(); + byte[] passwd = new byte[20]; + Arrays.fill(passwd, (byte) 'a'); + DigestManager digestManager = DigestManager.instantiate(1, passwd, + DataFormats.LedgerMetadataFormat.DigestType.CRC32C, ByteBufAllocator.DEFAULT, true); + byte[] masterKey = DigestManager.generateMasterKey(passwd); + byte[] kbData = new byte[1024]; + for (int i = 0; i < 1024; i++) { + kbData[i] = (byte) i; + } + final int entries = 20; + int length = 0; + for (int i = 0; i < entries; i++) { + ByteBuf bb = Unpooled.buffer(1024); + bb.writeBytes(kbData); + length += 1024; + ReferenceCounted content = digestManager.computeDigestAndPackageForSending(i, i - 1, length, bb, + masterKey, BookieProtocol.FLAG_NONE); + ResultStruct arc = new ResultStruct(); + bc.addEntry(addr, 1, passwd, i, content, wrcb, arc, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE); + Awaitility.await().untilAsserted(() -> { + assertEquals(0, arc.rc); + }); + } + + AtomicReference result = new AtomicReference<>(); + AtomicInteger resCode = new AtomicInteger(); + + // one entry size = 8(ledgerId) + 8(entryId) + 8(lac) + 8(length) + 4(digest) + payload size + int entrySize = 8 + 8 + 8 + 8 + 4 + 1024; + //response header size. + int headerSize = 24 + 8 + 4; + bc.batchReadEntries(addr, 1, 0, 20, 5 * entrySize + headerSize + (5 * 4) , + (rc, ledgerId, startEntryId, bufList, ctx) -> { + result.set(bufList); + resCode.set(rc); + }, null, BookieProtocol.FLAG_NONE); + Awaitility.await().untilAsserted(() -> { + ByteBufList byteBufList = result.get(); + assertNotNull(byteBufList); + }); + ByteBufList byteBufList = result.get(); + assertEquals(0, resCode.get()); + assertEquals(5, byteBufList.size()); + for (int i = 0; i < byteBufList.size(); i++) { + ByteBuf buffer = byteBufList.getBuffer(i); + //ledgerId + assertEquals(1, buffer.readLong()); + //entryId + assertEquals(i, buffer.readLong()); + //lac + assertEquals(i - 1, buffer.readLong()); + //length + assertEquals((i + 1) * 1024, buffer.readLong()); + //digest + int i1 = buffer.readInt(); + //data + ByteBuf byteBuf = buffer.readBytes(buffer.readableBytes()); + assertEquals(1024, byteBuf.readableBytes()); + byte[] bytes = ByteBufUtil.getBytes(byteBuf); + assertTrue(Arrays.equals(kbData, bytes)); + } + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/TestHardLink.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/TestHardLink.java new file mode 100644 index 00000000000..75f6cf502d8 --- /dev/null +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/TestHardLink.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.bookkeeper.util; + +import java.io.File; +import java.io.IOException; +import java.util.UUID; +import org.apache.commons.io.FileUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + + +public class TestHardLink { + + private File tempDir; + + @Before + public void setup() throws IOException { + // Create at least one file so that target disk will never be empty + tempDir = IOUtils.createTempDir("TestHardLink", "test-hardlink"); + } + + @After + public void tearDown() throws IOException { + FileUtils.deleteDirectory(tempDir); + } + + private void verifyHardLink(File origin, File linkedOrigin) throws IOException { + Assert.assertTrue(origin.exists()); + Assert.assertFalse(linkedOrigin.exists()); + + HardLink.createHardLink(origin, linkedOrigin); + + Assert.assertTrue(origin.exists()); + Assert.assertTrue(linkedOrigin.exists()); + + // when delete origin file it should be success and not exist. + origin.delete(); + Assert.assertFalse(origin.exists()); + Assert.assertTrue(linkedOrigin.exists()); + } + + @Test + public void testHardLink() throws IOException { + String uuidSuffix = UUID.randomUUID().toString(); + + // prepare file + File origin = new File(tempDir, "originFile." + uuidSuffix); + File linkedOrigin = new File(tempDir, "linkedOrigin." + uuidSuffix); + origin.createNewFile(); + + // disable jdk api link first + HardLink.enableJdkLinkApi(false); + verifyHardLink(origin, linkedOrigin); + + // prepare file + File jdkorigin = new File(tempDir, "jdkoriginFile." + uuidSuffix); + File jdklinkedOrigin = new File(tempDir, "jdklinkedOrigin." + uuidSuffix); + jdkorigin.createNewFile(); + + // enable jdk api link + HardLink.enableJdkLinkApi(true); + verifyHardLink(jdkorigin, jdklinkedOrigin); + } +} diff --git a/conf/bk_server.conf b/conf/bk_server.conf index 79e10f5851d..a391c1aa056 100755 --- a/conf/bk_server.conf +++ b/conf/bk_server.conf @@ -900,7 +900,7 @@ zkEnableSecurity=false # # For configuring corresponding stats provider, see details at each section below. # -# statsProviderClass=org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider +statsProviderClass=org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider ############################################################################# ## Prometheus Metrics Provider diff --git a/conf/log4j2.shell.xml b/conf/log4j2.shell.xml index 7c15d5d6cfa..c6e6b0fb9cd 100644 --- a/conf/log4j2.shell.xml +++ b/conf/log4j2.shell.xml @@ -21,23 +21,38 @@ --> - INFO - CONSOLE + . + bookkeeper-shell.log + INFO + ROLLINGFILE + + + + + + + - - + + - - - - - - + + + + + + + + + + + + diff --git a/deploy/kubernetes/gke/bookkeeper.statefulset.yml b/deploy/kubernetes/gke/bookkeeper.statefulset.yml index 49548f09d05..ca32b91137d 100644 --- a/deploy/kubernetes/gke/bookkeeper.statefulset.yml +++ b/deploy/kubernetes/gke/bookkeeper.statefulset.yml @@ -124,7 +124,6 @@ metadata: app: bookkeeper component: bookie spec: - ports: ports: - name: bookie port: 3181 diff --git a/docker/README.md b/docker/README.md index 82c635faa19..462f1ae2a16 100644 --- a/docker/README.md +++ b/docker/README.md @@ -118,7 +118,7 @@ docker run -it --rm \ --network "bk_network" \ --env BK_zkServers=test_zookeeper:2181 \ apache/bookkeeper \ - bookkeeper shell metaformat + /opt/bookkeeper/bin/bookkeeper shell metaformat ``` Now we can start our Bookkeeper ensemble (e.g. with three bookies): ``` @@ -232,8 +232,8 @@ Be careful where you put the transaction log (journal). A dedicated transaction Here is some useful and graceful command the could be used to replace the default command, once you want to delete the cookies and do auto recovery: ``` -/bookkeeper/bin/bookkeeper shell bookieformat -nonInteractive -force -deleteCookie -/bookkeeper/bin/bookkeeper autorecovery +/opt/bookkeeper/bin/bookkeeper shell bookieformat -nonInteractive -force -deleteCookie +/opt/bookkeeper/bin/bookkeeper autorecovery ``` Use them, and replace the default [CMD] when you wanted to do things other than start a bookie. diff --git a/docker/scripts/apply-config-from-env.py b/docker/scripts/apply-config-from-env.py index 105a2bacafe..7b74b503411 100755 --- a/docker/scripts/apply-config-from-env.py +++ b/docker/scripts/apply-config-from-env.py @@ -23,21 +23,28 @@ ## based on the ENV variables ## export my-key=new-value ## -## ./apply-config-from-env config_dir +## ./apply-config-from-env file ... ## import os, sys -if len(sys.argv) != 2: - print('Usage: %s ' + 'config_dir' % (sys.argv[0])) +if len(sys.argv) < 2: + print('Usage: %s file ...' % (sys.argv[0])) sys.exit(1) -def mylistdir(dir): - return [os.path.join(dir, filename) for filename in os.listdir(dir)] +def prepare_conf_files(files): + conf_files = [] + for f in files: + if os.path.isfile(f): + if not os.path.isabs(f): + f = os.path.join(os.getcwd(), f) + conf_files.append(f) + else: + print('%s is not a readable file' % f) + sys.exit(1) + return conf_files -# Always apply env config to all the files under conf -conf_dir = sys.argv[1] -conf_files = mylistdir(conf_dir) +conf_files = prepare_conf_files(sys.argv[1:]) print('conf files: ') print(conf_files) diff --git a/docker/scripts/common.sh b/docker/scripts/common.sh index 5bbcd208072..0f745db4b45 100755 --- a/docker/scripts/common.sh +++ b/docker/scripts/common.sh @@ -71,7 +71,7 @@ echo " BK_STREAM_STORAGE_ROOT_PATH is ${BK_STREAM_STORAGE_ROOT_PATH}" echo " BK_NUM_STORAGE_CONTAINERS is ${BK_NUM_STORAGE_CONTAINERS}" echo " BOOKIE_GRPC_PORT is ${BOOKIE_GRPC_PORT}" -python scripts/apply-config-from-env.py ${BK_HOME}/conf +python scripts/apply-config-from-env.py ${BK_HOME}/conf/*.conf export BOOKIE_CONF=${BK_HOME}/conf/bk_server.conf export SERVICE_PORT=${PORT0} diff --git a/docker/scripts/init_zookeeper.sh b/docker/scripts/init_zookeeper.sh index 803ef91d786..cff981211c8 100755 --- a/docker/scripts/init_zookeeper.sh +++ b/docker/scripts/init_zookeeper.sh @@ -63,7 +63,7 @@ function create_zk_dynamic_conf() { function init_zookeeper() { # apply zookeeper envs - python scripts/apply-config-from-env.py ${BK_HOME}/conf + python scripts/apply-config-from-env.py ${BK_HOME}/conf/zookeeper.conf # create dirs if they don't exist create_zk_dirs diff --git a/pom.xml b/pom.xml index 3ee1f6a768a..038c13c4105 100644 --- a/pom.xml +++ b/pom.xml @@ -152,9 +152,9 @@ 2.18.0 1.3.0 3.12.4 - 4.1.94.Final + 4.1.104.Final 2.0.61.Final - 0.0.21.Final + 0.0.24.Final 9.1.3 2.0.9 0.15.0 diff --git a/site3/website/docs/admin/bookies.md b/site3/website/docs/admin/bookies.md index 2971a444c44..d289022d1ad 100644 --- a/site3/website/docs/admin/bookies.md +++ b/site3/website/docs/admin/bookies.md @@ -18,8 +18,14 @@ There is no upper limit on the number of bookies that you can run in a single en ### Performance +#### Disks + To achieve optimal performance, BookKeeper requires each server to have at least two disks. It's possible to run a bookie with a single disk but performance will be significantly degraded. +#### Sticky reads + +BookKeeper sticky reads enables bookie node to read entries efficiently. The sticky reads are only available when ensemble (E) size is equal to quorum write (Qw). + ### ZooKeeper There is no constraint on the number of ZooKeeper nodes you can run with BookKeeper. A single machine running ZooKeeper in [standalone mode](https://zookeeper.apache.org/doc/current/zookeeperStarted.html#sc_InstallingSingleMode) is sufficient for BookKeeper, although for the sake of higher resilience we recommend running ZooKeeper in [quorum mode](https://zookeeper.apache.org/doc/current/zookeeperStarted.html#sc_RunningReplicatedZooKeeper) with multiple servers. diff --git a/site3/website/docs/admin/metrics.md b/site3/website/docs/admin/metrics.md index b52d4499ee3..5dfc1c46e42 100644 --- a/site3/website/docs/admin/metrics.md +++ b/site3/website/docs/admin/metrics.md @@ -12,9 +12,10 @@ BookKeeper has stats provider implementations for these sinks: Provider | Provider class name :--------|:------------------- [Codahale Metrics](https://mvnrepository.com/artifact/org.apache.bookkeeper.stats/codahale-metrics-provider) | `org.apache.bookkeeper.stats.CodahaleMetricsProvider` +[OpenTelemetry](https://opentelemetry.io/)|`org.apache.bookkeeper.stats.otel.OtelMetricsProvider` [Prometheus](https://prometheus.io/) | `org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider` -> The [Codahale Metrics]({{ site.github_master }}/bookkeeper-stats-providers/codahale-metrics-provider) stats provider is the default provider. +> The [Prometheus]({{ site.github_master }}/bookkeeper-stats-providers/prometheus-metrics-provider) stats provider is the default provider. ## Enabling stats providers in bookies diff --git a/site3/website/docs/reference/config.md b/site3/website/docs/reference/config.md index 11953b80b3f..a7ea81f0129 100644 --- a/site3/website/docs/reference/config.md +++ b/site3/website/docs/reference/config.md @@ -278,7 +278,7 @@ The table below lists parameters that you can set to configure bookies. All conf | --------- | ----------- | ------- | | enableStatistics | Whether statistics are enabled for the bookie. | true | | sanityCheckMetricsEnabled | Flag to enable sanity check metrics in bookie stats. | false | -| statsProviderClass | Stats provider class.
Options:
- Prometheus : org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider
- Codahale : org.apache.bookkeeper.stats.codahale.CodahaleMetricsProvider
- Twitter Finagle : org.apache.bookkeeper.stats.twitter.finagle.FinagleStatsProvider
- Twitter Ostrich : org.apache.bookkeeper.stats.twitter.ostrich.OstrichProvider
- Twitter Science : org.apache.bookkeeper.stats.twitter.science.TwitterStatsProvider
| org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider | +| statsProviderClass | Stats provider class.
Options:
- Prometheus : org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider
- Codahale : org.apache.bookkeeper.stats.codahale.CodahaleMetricsProvider
- OpenTelemetry : org.apache.bookkeeper.stats.otel.OtelMetricsProvider | org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider | limitStatsLogging | option to limit stats logging | true | diff --git a/site3/website/docusaurus.config.js b/site3/website/docusaurus.config.js index 0beff46af65..c001c3ef29f 100644 --- a/site3/website/docusaurus.config.js +++ b/site3/website/docusaurus.config.js @@ -7,7 +7,7 @@ const baseUrl = process.env.BASE_URL || "/" const deployUrl = process.env.DEPLOY_URL || "https://bookkeeper.apache.org"; const variables = { /** They are used in .md files*/ - latest_release: "4.16.3", + latest_release: "4.16.4", stable_release: "4.14.8", github_repo: "https://github.com/apache/bookkeeper", github_master: "https://github.com/apache/bookkeeper/tree/master", diff --git a/site3/website/src/pages/release-notes.md b/site3/website/src/pages/release-notes.md index e2fbc0c86e0..0bd6217203e 100644 --- a/site3/website/src/pages/release-notes.md +++ b/site3/website/src/pages/release-notes.md @@ -1,6 +1,127 @@ # Release notes +## 4.16.4 + +Release 4.16.4 includes multiple bug fixes and improvements, also we have a few dependency updates. + +Apache BookKeeper users are encouraged to upgrade to 4.16.4 if you are using 4.16.x. +The technical details of this release are summarized below. + +### Highlights + +#### Bugs +* Fix calculate checkSum when using Java9IntHash [PR #4140](https://github.com/apache/bookkeeper/pull/4140) +* Fix the autorecovery failed replicate by add entry fenced error [PR #4163](https://github.com/apache/bookkeeper/pull/4163) +* Fixing memory leak error when using DirectEntryLogger [PR #4135](https://github.com/apache/bookkeeper/pull/4135) +* Fix bug of negative JournalQueueSize [PR #4077](https://github.com/apache/bookkeeper/pull/4077) +* Fix NoSuchElementException when rereplicate empty ledgers [PR #4039](https://github.com/apache/bookkeeper/pull/4039) +* Change the method getUnderreplicatedFragments to the package private [PR #4174](https://github.com/apache/bookkeeper/pull/4174) +* Fix auditor elector executor block problem. [PR #4165](https://github.com/apache/bookkeeper/pull/4165) +* Fix auditor thread leak problem. [PR #4162](https://github.com/apache/bookkeeper/pull/4162) +* Use Flaky flag to skip testBookieServerZKSessionExpireBehaviour test [PR #4144](https://github.com/apache/bookkeeper/pull/4144) +* Add ledgersCount.incrementAndGet in setExplicitLac function [PR #4138](https://github.com/apache/bookkeeper/pull/4138) +* Fix no known bookies after reset racks for all BKs [PR #4128](https://github.com/apache/bookkeeper/pull/4128) +* Fix a slow gc thread shutdown when compacting [PR #4127](https://github.com/apache/bookkeeper/pull/4127) +* Remove the unused logs in the CleanupLedgerManager.recordPromise [PR #4121](https://github.com/apache/bookkeeper/pull/4121) +* Fix Flaky-test: HandleFailuresTest.testHandleFailureBookieNotInWriteSet [PR #4110](https://github.com/apache/bookkeeper/pull/4110) +* Ignore the empty `perRegionPlacement` when RegionAwareEnsemblePlacementPolicy#newEnsemble [PR #4106](https://github.com/apache/bookkeeper/pull/4106) +* Fix LedgerHandle `ensembleChangeCounter` not used. [PR #4103](https://github.com/apache/bookkeeper/pull/4103) +* Tune the TestReplicationWorker test. [PR #4093](https://github.com/apache/bookkeeper/pull/4093) +* Make AuditorBookieTest#waitForNewAuditor stronger. [PR #4078](https://github.com/apache/bookkeeper/pull/4078) +* Print compaction progress [PR #4071](https://github.com/apache/bookkeeper/pull/4071) +* Fix readEntry parameter order [PR #4059](https://github.com/apache/bookkeeper/pull/4059) +* Skip sync the RocksDB when no changes [PR #3904](https://github.com/apache/bookkeeper/pull/3904) +* Try to use jdk api to create hardlink when rename file when compaction. [PR #3876](https://github.com/apache/bookkeeper/pull/3876) + +#### Dependency updates +* Upgrade Zookeeper to 3.8.3 to address CVE-2023-44981 [PR #4112](https://github.com/apache/bookkeeper/pull/4112) +* Update Jetty dependency [PR #4141](https://github.com/apache/bookkeeper/pull/4141) +* Upgrade bc-fips to 1.0.2.4 to fix CVE-2022-45146 [PR #3915](https://github.com/apache/bookkeeper/pull/3915) + +#### Details + +https://github.com/apache/bookkeeper/pulls?q=is%3Apr+label%3Arelease%2F4.16.4+is%3Amerged+ + +## 4.15.5 + +Release 4.15.5 includes multiple bug fixes and improvements, also we have a few dependency updates. + +Apache BookKeeper users are encouraged to upgrade to 4.15.5 if you are using 4.15.x. +The technical details of this release are summarized below. + +### Highlights + +The previous release is using ARM platform compile, that means that the JNI libraries are only present for MacOS +and this will incur in a performance degradation (eg: the CRC libraries) or not working (in case of cpu affinity). + +The build platform now is tagged with `linux/amd64` by this [PR #4060](https://github.com/apache/bookkeeper/pull/4060) + +#### Bugs +* Fix no known bookies after reset racks for all BKs [PR #4128](https://github.com/apache/bookkeeper/pull/4128) +* Fix AutoCloseableLifecycleComponent close exception log [PR #4042](https://github.com/apache/bookkeeper/pull/4042) +* Fix NoSuchElementException when rereplicate empty ledgers [PR #4039](https://github.com/apache/bookkeeper/pull/4039) +* Fix deletedLedgers count [PR #4026](https://github.com/apache/bookkeeper/pull/4026) +* Fix read write request leak when executor throw `RejectedExecutionException` [PR #4024](https://github.com/apache/bookkeeper/pull/4024) +* Recycle LongWrapper finally to avoid memory leak [PR #4007](https://github.com/apache/bookkeeper/pull/4007) +* Fix trigger GC not work [PR #3998](https://github.com/apache/bookkeeper/pull/3998) +* Fix arbitrary file upload vulnerability with httpServerEnabled [PR #3982](https://github.com/apache/bookkeeper/pull/3982) +* Clear channel when channelInactive [PR #3966](https://github.com/apache/bookkeeper/pull/3966) +* Fix npe when iterate pendingLedgersUpdates and pendingDeletedLedgers. [PR #3955](https://github.com/apache/bookkeeper/pull/3955) +* Fix some metrics generated by prometheus client without type info [PR #3927](https://github.com/apache/bookkeeper/pull/3927) +* Fix ledger replicated failed blocks bookie decommission process [PR #3917](https://github.com/apache/bookkeeper/pull/3917) +* Recycle dropping read-write requests when various exceptions happened [PR #3912](https://github.com/apache/bookkeeper/pull/3912) +* SingleDirectoryDbLedgerStorage#flushMutex does not release lock on all exception paths [PR #3909](https://github.com/apache/bookkeeper/pull/3909) +* Fix ReclaimedSpaceViaDeletes stats incorrect problem. [PR #3906](https://github.com/apache/bookkeeper/pull/3906) +* Fix keys leak in EntryLocationIndex when ledgersToDelete is empty [PR #3903](https://github.com/apache/bookkeeper/pull/3903) +* Fix garbage collection blocked by runtime exception [PR #3901](https://github.com/apache/bookkeeper/pull/3901) +* Return activeLogChannel if new create [PR #3894](https://github.com/apache/bookkeeper/pull/3894) +* Modify incorrect rocksDB config level_compaction_dynamic_level_bytes to CFOptions [PR #3860](https://github.com/apache/bookkeeper/pull/3860) +* Fix ReadEntryProcessor v2 SchedulingDelayStats [PR #3758](https://github.com/apache/bookkeeper/pull/3758) +* Fix data lost when configured multiple ledger directories [PR #3329](https://github.com/apache/bookkeeper/pull/3329) + + +#### Improvements +* Issue 4126: Fix a slow gc thread shutdown when compacting [PR #4127](https://github.com/apache/bookkeeper/pull/4127) +* Remove the unused logs in the CleanupLedgerManager.recordPromise [PR #4121](https://github.com/apache/bookkeeper/pull/4121) +* Ignore the empty `perRegionPlacement` when RegionAwareEnsemblePlacementPolicy#newEnsemble [PR #4106](https://github.com/apache/bookkeeper/pull/4106) +* Print compaction progress [PR #4071](https://github.com/apache/bookkeeper/pull/4071) +* Force to use linux/amd64 to build release [PR #4060](https://github.com/apache/bookkeeper/pull/4060) +* Remove underreplicaiton callback [PR #4058](https://github.com/apache/bookkeeper/pull/4058) +* Allow to set max operation numbers in a single rocksdb batch [PR #4044](https://github.com/apache/bookkeeper/pull/4044) +* Change pendingDeletedLedgers as ConcurrentHashSet [PR #3989](https://github.com/apache/bookkeeper/pull/3989) +* Avoid compaction to trigger extra flushes DbLedgerStorage [PR #3959](https://github.com/apache/bookkeeper/pull/3959) +* Support skip invalid journal record in replying journal stage [PR #3956](https://github.com/apache/bookkeeper/pull/3956) +* Optimize getEntryLogMetadata [PR #3948](https://github.com/apache/bookkeeper/pull/3948) +* drop invalid entryFormat arg from shell command [PR #3938](https://github.com/apache/bookkeeper/pull/3938) +* Enable PCBC completionObjects autoShrink to reduce memory usage and gc [PR #3913](https://github.com/apache/bookkeeper/pull/3913) +* Prevent transit to writable mode when forceReadOnly mode is active [PR #3881](https://github.com/apache/bookkeeper/pull/3881) +* Make read entry request recyclable [PR #3842](https://github.com/apache/bookkeeper/pull/3842) +* Fixed the pivot selection in the group quick-sort [PR #3800](https://github.com/apache/bookkeeper/pull/3800) +* Execute clean indexes in finally [PR #3772](https://github.com/apache/bookkeeper/pull/3772) +* Add small files check in garbage collection [PR #3631](https://github.com/apache/bookkeeper/pull/3631) + + +#### Dependency updates +* Update Jetty dependency [PR #4141](https://github.com/apache/bookkeeper/pull/4141) +* Bump guava version from 31.0.1-jre to 32.0.1-jre [PR #4008](https://github.com/apache/bookkeeper/pull/4008) +* Upgrade snappy-java to address multiple CVEs [PR #3993](https://github.com/apache/bookkeeper/pull/3993) +* Upgrade grpc and protobuf to address CVE-2023-32732 [PR #3992](https://github.com/apache/bookkeeper/pull/3992) +* Downgrade grpc and protobuf to avoid introducing breaking change [PR #4001](https://github.com/apache/bookkeeper/pull/4001) +* Fix issue with binary compatibility with older grpc versions at runtime in the client [PR #3997](https://github.com/apache/bookkeeper/pull/3997) +* Upgrade jetty version to 9.4.51.v20230217 [PR #3937](https://github.com/apache/bookkeeper/pull/3937) +* Upgrade docker base image to resolve CVE-2023-0286 [PR #3916](https://github.com/apache/bookkeeper/pull/3916) +* Upgrade bc-fips to 1.0.2.4 to fix CVE-2022-45146 [PR #3915](https://github.com/apache/bookkeeper/pull/3915) +* Remove avro, hadoop-auth and jersey-json dependencies from hadoop-common to resolve CVE-2019-10202, CVE-2023-1370 and CVE-2022-45685 [PR #3911](https://github.com/apache/bookkeeper/pull/3911) +* Sync dependency version with source version in license [PR #3633](https://github.com/apache/bookkeeper/pull/3633) +* Bump grpc from 1.45.1 to 1.47.0, solve dependency check FP [PR #3305](https://github.com/apache/bookkeeper/pull/3305) +* Optimize log4j dependency in Bookkeeper [PR #3892](https://github.com/apache/bookkeeper/pull/3892) + + +#### Details + +https://github.com/apache/bookkeeper/pulls?q=is%3Apr+label%3Arelease%2F4.15.5+is%3Aclosed+ + ## 4.16.3 Rlease 4.16.3 includes multiple bug fixes and some dependencies CVE fixes. diff --git a/site3/website/versioned_docs/version-4.15.4/admin/autorecovery.md b/site3/website/versioned_docs/version-4.15.5/admin/autorecovery.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/admin/autorecovery.md rename to site3/website/versioned_docs/version-4.15.5/admin/autorecovery.md diff --git a/site3/website/versioned_docs/version-4.15.4/admin/bookies.md b/site3/website/versioned_docs/version-4.15.5/admin/bookies.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/admin/bookies.md rename to site3/website/versioned_docs/version-4.15.5/admin/bookies.md diff --git a/site3/website/versioned_docs/version-4.15.4/admin/decomission.md b/site3/website/versioned_docs/version-4.15.5/admin/decomission.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/admin/decomission.md rename to site3/website/versioned_docs/version-4.15.5/admin/decomission.md diff --git a/site3/website/versioned_docs/version-4.15.4/admin/geo-replication.md b/site3/website/versioned_docs/version-4.15.5/admin/geo-replication.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/admin/geo-replication.md rename to site3/website/versioned_docs/version-4.15.5/admin/geo-replication.md diff --git a/site3/website/versioned_docs/version-4.15.4/admin/http.md b/site3/website/versioned_docs/version-4.15.5/admin/http.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/admin/http.md rename to site3/website/versioned_docs/version-4.15.5/admin/http.md diff --git a/site3/website/versioned_docs/version-4.15.4/admin/metrics.md b/site3/website/versioned_docs/version-4.15.5/admin/metrics.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/admin/metrics.md rename to site3/website/versioned_docs/version-4.15.5/admin/metrics.md diff --git a/site3/website/versioned_docs/version-4.15.4/admin/perf.md b/site3/website/versioned_docs/version-4.15.5/admin/perf.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/admin/perf.md rename to site3/website/versioned_docs/version-4.15.5/admin/perf.md diff --git a/site3/website/versioned_docs/version-4.15.4/admin/placement.md b/site3/website/versioned_docs/version-4.15.5/admin/placement.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/admin/placement.md rename to site3/website/versioned_docs/version-4.15.5/admin/placement.md diff --git a/site3/website/versioned_docs/version-4.15.4/admin/upgrade.md b/site3/website/versioned_docs/version-4.15.5/admin/upgrade.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/admin/upgrade.md rename to site3/website/versioned_docs/version-4.15.5/admin/upgrade.md diff --git a/site3/website/versioned_docs/version-4.15.4/api/distributedlog-api.md b/site3/website/versioned_docs/version-4.15.5/api/distributedlog-api.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/api/distributedlog-api.md rename to site3/website/versioned_docs/version-4.15.5/api/distributedlog-api.md diff --git a/site3/website/versioned_docs/version-4.15.4/api/ledger-adv-api.md b/site3/website/versioned_docs/version-4.15.5/api/ledger-adv-api.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/api/ledger-adv-api.md rename to site3/website/versioned_docs/version-4.15.5/api/ledger-adv-api.md diff --git a/site3/website/versioned_docs/version-4.15.4/api/ledger-api.md b/site3/website/versioned_docs/version-4.15.5/api/ledger-api.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/api/ledger-api.md rename to site3/website/versioned_docs/version-4.15.5/api/ledger-api.md diff --git a/site3/website/versioned_docs/version-4.15.4/api/overview.md b/site3/website/versioned_docs/version-4.15.5/api/overview.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/api/overview.md rename to site3/website/versioned_docs/version-4.15.5/api/overview.md diff --git a/site3/website/versioned_docs/version-4.15.4/deployment/kubernetes.md b/site3/website/versioned_docs/version-4.15.5/deployment/kubernetes.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/deployment/kubernetes.md rename to site3/website/versioned_docs/version-4.15.5/deployment/kubernetes.md diff --git a/site3/website/versioned_docs/version-4.15.4/deployment/manual.md b/site3/website/versioned_docs/version-4.15.5/deployment/manual.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/deployment/manual.md rename to site3/website/versioned_docs/version-4.15.5/deployment/manual.md diff --git a/site3/website/versioned_docs/version-4.15.4/development/codebase.md b/site3/website/versioned_docs/version-4.15.5/development/codebase.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/development/codebase.md rename to site3/website/versioned_docs/version-4.15.5/development/codebase.md diff --git a/site3/website/versioned_docs/version-4.15.4/development/protocol.md b/site3/website/versioned_docs/version-4.15.5/development/protocol.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/development/protocol.md rename to site3/website/versioned_docs/version-4.15.5/development/protocol.md diff --git a/site3/website/versioned_docs/version-4.15.4/getting-started/concepts.md b/site3/website/versioned_docs/version-4.15.5/getting-started/concepts.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/getting-started/concepts.md rename to site3/website/versioned_docs/version-4.15.5/getting-started/concepts.md diff --git a/site3/website/versioned_docs/version-4.15.4/getting-started/installation.md b/site3/website/versioned_docs/version-4.15.5/getting-started/installation.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/getting-started/installation.md rename to site3/website/versioned_docs/version-4.15.5/getting-started/installation.md diff --git a/site3/website/versioned_docs/version-4.15.4/getting-started/run-locally.md b/site3/website/versioned_docs/version-4.15.5/getting-started/run-locally.md similarity index 100% rename from site3/website/versioned_docs/version-4.15.4/getting-started/run-locally.md rename to site3/website/versioned_docs/version-4.15.5/getting-started/run-locally.md diff --git a/site3/website/versioned_docs/version-4.15.4/overview/overview.md b/site3/website/versioned_docs/version-4.15.5/overview/overview.md similarity index 99% rename from site3/website/versioned_docs/version-4.15.4/overview/overview.md rename to site3/website/versioned_docs/version-4.15.5/overview/overview.md index 4d2351546b3..6b4ac18c223 100644 --- a/site3/website/versioned_docs/version-4.15.4/overview/overview.md +++ b/site3/website/versioned_docs/version-4.15.5/overview/overview.md @@ -1,6 +1,6 @@ --- id: overview -title: Apache BookKeeper 4.15.4 +title: Apache BookKeeper 4.15.5 --- -4.16.3 +4.16.4 @@ -37,7 +37,7 @@ shaded library, which relocate classes of protobuf and guava into a different na ```xml -4.16.3 +4.16.4 @@ -53,12 +53,12 @@ If you're using [Gradle](https://gradle.org/), add this to your [`build.gradle`] ```groovy dependencies { - compile group: 'org.apache.bookkeeper', name: 'bookkeeper-server', version: '4.16.3' + compile group: 'org.apache.bookkeeper', name: 'bookkeeper-server', version: '4.16.4' } // Alternatively: dependencies { - compile 'org.apache.bookkeeper:bookkeeper-server:4.16.3' + compile 'org.apache.bookkeeper:bookkeeper-server:4.16.4' } ``` diff --git a/site3/website/versioned_docs/version-4.16.3/api/overview.md b/site3/website/versioned_docs/version-4.16.4/api/overview.md similarity index 100% rename from site3/website/versioned_docs/version-4.16.3/api/overview.md rename to site3/website/versioned_docs/version-4.16.4/api/overview.md diff --git a/site3/website/versioned_docs/version-4.16.3/deployment/kubernetes.md b/site3/website/versioned_docs/version-4.16.4/deployment/kubernetes.md similarity index 100% rename from site3/website/versioned_docs/version-4.16.3/deployment/kubernetes.md rename to site3/website/versioned_docs/version-4.16.4/deployment/kubernetes.md diff --git a/site3/website/versioned_docs/version-4.16.3/deployment/manual.md b/site3/website/versioned_docs/version-4.16.4/deployment/manual.md similarity index 100% rename from site3/website/versioned_docs/version-4.16.3/deployment/manual.md rename to site3/website/versioned_docs/version-4.16.4/deployment/manual.md diff --git a/site3/website/versioned_docs/version-4.16.3/development/codebase.md b/site3/website/versioned_docs/version-4.16.4/development/codebase.md similarity index 100% rename from site3/website/versioned_docs/version-4.16.3/development/codebase.md rename to site3/website/versioned_docs/version-4.16.4/development/codebase.md diff --git a/site3/website/versioned_docs/version-4.16.3/development/protocol.md b/site3/website/versioned_docs/version-4.16.4/development/protocol.md similarity index 100% rename from site3/website/versioned_docs/version-4.16.3/development/protocol.md rename to site3/website/versioned_docs/version-4.16.4/development/protocol.md diff --git a/site3/website/versioned_docs/version-4.16.3/getting-started/concepts.md b/site3/website/versioned_docs/version-4.16.4/getting-started/concepts.md similarity index 100% rename from site3/website/versioned_docs/version-4.16.3/getting-started/concepts.md rename to site3/website/versioned_docs/version-4.16.4/getting-started/concepts.md diff --git a/site3/website/versioned_docs/version-4.16.3/getting-started/installation.md b/site3/website/versioned_docs/version-4.16.4/getting-started/installation.md similarity index 100% rename from site3/website/versioned_docs/version-4.16.3/getting-started/installation.md rename to site3/website/versioned_docs/version-4.16.4/getting-started/installation.md diff --git a/site3/website/versioned_docs/version-4.16.3/getting-started/run-locally.md b/site3/website/versioned_docs/version-4.16.4/getting-started/run-locally.md similarity index 100% rename from site3/website/versioned_docs/version-4.16.3/getting-started/run-locally.md rename to site3/website/versioned_docs/version-4.16.4/getting-started/run-locally.md diff --git a/site3/website/versioned_docs/version-4.16.3/overview/overview.md b/site3/website/versioned_docs/version-4.16.4/overview/overview.md similarity index 96% rename from site3/website/versioned_docs/version-4.16.3/overview/overview.md rename to site3/website/versioned_docs/version-4.16.4/overview/overview.md index 5597f25eab1..c3b1b231b96 100644 --- a/site3/website/versioned_docs/version-4.16.3/overview/overview.md +++ b/site3/website/versioned_docs/version-4.16.4/overview/overview.md @@ -1,6 +1,6 @@ --- id: overview -title: Apache BookKeeper 4.16.3 +title: Apache BookKeeper 4.16.4-SNAPSHOT --- -This documentation is for Apache BookKeeper™ version 4.16.3. +This documentation is for Apache BookKeeper™ version 4.16.4. Apache BookKeeper™ is a scalable, fault-tolerant, low-latency storage service optimized for real-time workloads. It offers durability, replication, and strong consistency as essentials for building reliable real-time applications. @@ -39,7 +39,7 @@ Object/[BLOB](https://en.wikipedia.org/wiki/Binary_large_object) storage | Stori Learn more about Apache BookKeeper™ and what it can do for your organization: -- [Apache BookKeeper 4.16.3 Release Notes](/release-notes#4163) +- [Apache BookKeeper 4.16.4 Release Notes](/release-notes#4164) - [Java API docs]({{ site.javadoc_base_url }}) Or start [using](../getting-started/installation) Apache BookKeeper today. diff --git a/site3/website/versioned_docs/version-4.16.3/reference/cli.md b/site3/website/versioned_docs/version-4.16.4/reference/cli.md similarity index 100% rename from site3/website/versioned_docs/version-4.16.3/reference/cli.md rename to site3/website/versioned_docs/version-4.16.4/reference/cli.md diff --git a/site3/website/versioned_docs/version-4.16.3/reference/config.md b/site3/website/versioned_docs/version-4.16.4/reference/config.md similarity index 100% rename from site3/website/versioned_docs/version-4.16.3/reference/config.md rename to site3/website/versioned_docs/version-4.16.4/reference/config.md diff --git a/site3/website/versioned_docs/version-4.16.3/security/overview.md b/site3/website/versioned_docs/version-4.16.4/security/overview.md similarity index 100% rename from site3/website/versioned_docs/version-4.16.3/security/overview.md rename to site3/website/versioned_docs/version-4.16.4/security/overview.md diff --git a/site3/website/versioned_docs/version-4.16.3/security/sasl.md b/site3/website/versioned_docs/version-4.16.4/security/sasl.md similarity index 100% rename from site3/website/versioned_docs/version-4.16.3/security/sasl.md rename to site3/website/versioned_docs/version-4.16.4/security/sasl.md diff --git a/site3/website/versioned_docs/version-4.16.3/security/tls.md b/site3/website/versioned_docs/version-4.16.4/security/tls.md similarity index 100% rename from site3/website/versioned_docs/version-4.16.3/security/tls.md rename to site3/website/versioned_docs/version-4.16.4/security/tls.md diff --git a/site3/website/versioned_docs/version-4.16.3/security/zookeeper.md b/site3/website/versioned_docs/version-4.16.4/security/zookeeper.md similarity index 100% rename from site3/website/versioned_docs/version-4.16.3/security/zookeeper.md rename to site3/website/versioned_docs/version-4.16.4/security/zookeeper.md diff --git a/site3/website/versioned_sidebars/version-4.15.4-sidebars.json b/site3/website/versioned_sidebars/version-4.15.5-sidebars.json similarity index 100% rename from site3/website/versioned_sidebars/version-4.15.4-sidebars.json rename to site3/website/versioned_sidebars/version-4.15.5-sidebars.json diff --git a/site3/website/versioned_sidebars/version-4.16.3-sidebars.json b/site3/website/versioned_sidebars/version-4.16.4-sidebars.json similarity index 68% rename from site3/website/versioned_sidebars/version-4.16.3-sidebars.json rename to site3/website/versioned_sidebars/version-4.16.4-sidebars.json index 970cb708173..b18efb25991 100644 --- a/site3/website/versioned_sidebars/version-4.16.3-sidebars.json +++ b/site3/website/versioned_sidebars/version-4.16.4-sidebars.json @@ -2,7 +2,7 @@ "docsSidebar": [ { "type": "doc", - "id": "version-4.16.3/overview/overview", + "id": "version-4.16.4/overview/overview", "label": "Overview" }, { @@ -12,17 +12,17 @@ "items": [ { "type": "doc", - "id": "version-4.16.3/getting-started/installation", + "id": "version-4.16.4/getting-started/installation", "label": "Installation" }, { "type": "doc", - "id": "version-4.16.3/getting-started/run-locally", + "id": "version-4.16.4/getting-started/run-locally", "label": "Run bookies locally" }, { "type": "doc", - "id": "version-4.16.3/getting-started/concepts", + "id": "version-4.16.4/getting-started/concepts", "label": "Concepts and architecture" } ] @@ -33,12 +33,12 @@ "items": [ { "type": "doc", - "id": "version-4.16.3/deployment/manual", + "id": "version-4.16.4/deployment/manual", "label": "Manual deployment" }, { "type": "doc", - "id": "version-4.16.3/deployment/kubernetes", + "id": "version-4.16.4/deployment/kubernetes", "label": "BookKeeper on Kubernetes" } ] @@ -49,32 +49,32 @@ "items": [ { "type": "doc", - "id": "version-4.16.3/admin/bookies", + "id": "version-4.16.4/admin/bookies", "label": "BookKeeper administration" }, { "type": "doc", - "id": "version-4.16.3/admin/autorecovery", + "id": "version-4.16.4/admin/autorecovery", "label": "AutoRecovery" }, { "type": "doc", - "id": "version-4.16.3/admin/metrics", + "id": "version-4.16.4/admin/metrics", "label": "Metrics collection" }, { "type": "doc", - "id": "version-4.16.3/admin/upgrade", + "id": "version-4.16.4/admin/upgrade", "label": "Upgrade" }, { "type": "doc", - "id": "version-4.16.3/admin/http", + "id": "version-4.16.4/admin/http", "label": "Admin REST API" }, { "type": "doc", - "id": "version-4.16.3/admin/decomission", + "id": "version-4.16.4/admin/decomission", "label": "Decommissioning Bookies" } ] @@ -85,22 +85,22 @@ "items": [ { "type": "doc", - "id": "version-4.16.3/api/overview", + "id": "version-4.16.4/api/overview", "label": "Overview" }, { "type": "doc", - "id": "version-4.16.3/api/ledger-api", + "id": "version-4.16.4/api/ledger-api", "label": "Ledger API" }, { "type": "doc", - "id": "version-4.16.3/api/ledger-adv-api", + "id": "version-4.16.4/api/ledger-adv-api", "label": "Advanced Ledger API" }, { "type": "doc", - "id": "version-4.16.3/api/distributedlog-api", + "id": "version-4.16.4/api/distributedlog-api", "label": "DistributedLog" } ] @@ -111,22 +111,22 @@ "items": [ { "type": "doc", - "id": "version-4.16.3/security/overview", + "id": "version-4.16.4/security/overview", "label": "Overview" }, { "type": "doc", - "id": "version-4.16.3/security/tls", + "id": "version-4.16.4/security/tls", "label": "TLS Authentication" }, { "type": "doc", - "id": "version-4.16.3/security/sasl", + "id": "version-4.16.4/security/sasl", "label": "SASL Authentication" }, { "type": "doc", - "id": "version-4.16.3/security/zookeeper", + "id": "version-4.16.4/security/zookeeper", "label": "ZooKeeper Authentication" } ] @@ -137,7 +137,7 @@ "items": [ { "type": "doc", - "id": "version-4.16.3/development/protocol", + "id": "version-4.16.4/development/protocol", "label": "BookKeeper protocol" } ] @@ -148,12 +148,12 @@ "items": [ { "type": "doc", - "id": "version-4.16.3/reference/config", + "id": "version-4.16.4/reference/config", "label": "Configuration" }, { "type": "doc", - "id": "version-4.16.3/reference/cli", + "id": "version-4.16.4/reference/cli", "label": "Command-line tools" } ] diff --git a/site3/website/versions.json b/site3/website/versions.json index 961022b3fd2..2b8455dbdbd 100644 --- a/site3/website/versions.json +++ b/site3/website/versions.json @@ -1,6 +1,6 @@ [ - "4.16.3", - "4.15.4", + "4.16.4", + "4.15.5", "4.14.8", "4.13.0", "4.12.1", diff --git a/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastTimer.java b/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastTimer.java index 2333df2f3fd..12e070d8816 100644 --- a/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastTimer.java +++ b/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastTimer.java @@ -477,8 +477,9 @@ private int getNow(int hash) { */ public double getRate(int seconds) { seconds = Math.min(seconds, timeWindow - 2); - int t = getNow(getHash()) - 1; // start from last completed second - int secFrom = t - seconds; + int t = getNow(getHash()); + // start from last completed second + int secFrom = t - seconds - 1; long sum = 0; for (int h = 0; h < HASH_SIZE; h++) { for (int i = t; i > secFrom; i--) { diff --git a/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/test/java/org/apache/bookkeeper/stats/codahale/FastTimerTest.java b/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/test/java/org/apache/bookkeeper/stats/codahale/FastTimerTest.java index bc95dff074f..0e900652cd9 100644 --- a/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/test/java/org/apache/bookkeeper/stats/codahale/FastTimerTest.java +++ b/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/test/java/org/apache/bookkeeper/stats/codahale/FastTimerTest.java @@ -17,6 +17,7 @@ package org.apache.bookkeeper.stats.codahale; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import com.codahale.metrics.Snapshot; import java.util.ArrayList; @@ -52,6 +53,17 @@ protected int getTime() { }; } + @Test + public void testMeanRate() { + FastTimer t = getMockedFastTimer(1, FastTimer.Buckets.fine); + + t.update(10, TimeUnit.NANOSECONDS); + assertTrue("should calculate mean before advancing time", t.getMeanRate() > 0); + + incSec(); + assertTrue("should calculate mean after advancing time", t.getMeanRate() > 0); + } + @Test public void testBuckets() { FastTimer t = new FastTimer(1, FastTimer.Buckets.fine); diff --git a/tests/docker-images/statestore-image/Dockerfile b/tests/docker-images/statestore-image/Dockerfile index 7605541188d..e89175be811 100644 --- a/tests/docker-images/statestore-image/Dockerfile +++ b/tests/docker-images/statestore-image/Dockerfile @@ -40,10 +40,10 @@ RUN set -x \ && apt-get install -y --no-install-recommends python3 pip \ && ln -s /usr/bin/python3 /usr/bin/python \ && apt-get install -y --no-install-recommends gpg gpg-agent wget sudo \ - && apt-get -y --purge autoremove \ - && apt-get autoclean \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* + && apt-get -y --purge autoremove \ + && apt-get autoclean \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ && pip install zk-shell \ && mkdir -pv /opt \ && cd /opt