Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: prefetch keys in opmget #4778

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 32 additions & 9 deletions src/server/string_family.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@
#include "server/transaction.h"
#include "util/fibers/future.h"

ABSL_FLAG(bool, mget_prefetch_keys, true, "If true, MGET will prefetch keys before reading them");

ABSL_FLAG(bool, mget_dedup_keys, false, "If true, MGET will deduplicate keys");

namespace dfly {

namespace {
Expand Down Expand Up @@ -547,23 +551,42 @@ MGetResponse OpMGet(fb2::BlockingCounter wait_bc, uint8_t fetch_mask, const Tran

absl::InlinedVector<Item, 32> items(keys.Size());

// First, fetch all iterators and count total size ahead
size_t total_size = 0;
unsigned index = 0;
static bool mget_prefetch_keys = absl::GetFlag(FLAGS_mget_prefetch_keys);
static bool mget_dedup_keys = absl::GetFlag(FLAGS_mget_dedup_keys);

// We can not make it thread-local because we may preempt during the Find loop due to
// serialization with the bumpup calls.

// TODO: consider separating BumpUps from finds because it becomes too complicated
// to reason about.
absl::flat_hash_map<string_view, unsigned> key_index;
if (mget_dedup_keys) {
key_index.reserve(keys.Size());
}

// First, fetch all iterators and count total size ahead
size_t total_size = 0;
unsigned index = 0;
key_index.reserve(keys.Size());
PrimeTable& pt = db_slice.GetDBTable(t->GetDbIndex())->prime;

constexpr unsigned kPrefetchLimit = 32;
if (mget_prefetch_keys) {
unsigned prefetched = 0;
for (string_view key : keys) {
pt.Prefetch(key);
if (++prefetched >= kPrefetchLimit) {
break;
}
}
}
Comment on lines +572 to +581
Copy link
Contributor

@BorysTheDev BorysTheDev Mar 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we move it before key_index.reserve() or even higher to have more time for memory prefetching?


for (string_view key : keys) {
auto [it, inserted] = key_index.try_emplace(key, index);
if (!inserted) { // duplicate -> point to the first occurrence.
items[index++].source_index = it->second;
continue;
if (mget_dedup_keys) {
auto [it, inserted] = key_index.try_emplace(key, index);
if (!inserted) { // duplicate -> point to the first occurrence.
items[index++].source_index = it->second;
continue;
}
}

auto it_res = db_slice.FindReadOnly(t->GetDbContext(), key, OBJ_STRING);
Expand Down Expand Up @@ -618,8 +641,8 @@ MGetResponse OpMGet(fb2::BlockingCounter wait_bc, uint8_t fetch_mask, const Tran
}
}
}
key_index.clear();

key_index.clear();
return response;
}

Expand Down
6 changes: 4 additions & 2 deletions src/server/tiered_storage_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,10 @@ TEST_F(TieredStorageTest, FlushAll) {
Metrics metrics;
ExpectConditionWithinTimeout([&] {
metrics = GetMetrics();
return metrics.events.hits > 2;

// Note that metrics.events.hits is not consistent with total_fetches
// and it can happen that hits is greater than total_fetches due to in-progress reads.
return metrics.tiered_stats.total_fetches > 2;
});
LOG(INFO) << FormatMetrics(metrics);

Expand All @@ -290,7 +293,6 @@ TEST_F(TieredStorageTest, FlushAll) {
LOG(INFO) << FormatMetrics(metrics);

EXPECT_EQ(metrics.db_stats.front().tiered_entries, 0u);
EXPECT_GT(metrics.tiered_stats.total_fetches, 2u);
}

TEST_F(TieredStorageTest, FlushPending) {
Expand Down
Loading