Skip to content
This repository was archived by the owner on Jan 22, 2025. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 3 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ jsonrpc-pubsub = "10.0.1"
jsonrpc-ws-server = "10.0.1"
libc = "0.2.48"
log = "0.4.2"
memmap = "0.7.0"
nix = "0.13.0"
rand = "0.6.5"
rand_chacha = "0.1.1"
Expand Down Expand Up @@ -117,5 +118,5 @@ members = [
"upload-perf",
"vote-signer",
"wallet",
]
exclude = ["programs/bpf/rust/noop"]
]
exclude = ["programs/bpf/rust/noop"]
171 changes: 171 additions & 0 deletions benches/blob_store.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
#![feature(test)]
use rand;

extern crate test;

use solana::blob_store::{get_tmp_store_path, BlobStore};
use solana::entry::{make_large_test_entries, make_tiny_test_entries, EntrySlice};
use solana::packet::Blob;

use rand::seq::SliceRandom;
use rand::{thread_rng, Rng};
use test::Bencher;

// Given some blobs and a ledger at ledger_path, benchmark writing the blobs to the ledger
fn bench_write_blobs(bench: &mut Bencher, blobs: &mut [Blob], ledger_path: &str) {
let mut store = BlobStore::open(&ledger_path).unwrap();

bench.iter(move || {
store.put_blobs(&blobs[..]).expect("Failed to insert blobs");
});

BlobStore::destroy(&ledger_path).expect("Expected successful database destruction");
}

// Insert some blobs into the ledger in preparation for read benchmarks
fn setup_read_bench(store: &mut BlobStore, num_small_blobs: u64, num_large_blobs: u64, slot: u64) {
// Make some big and small entries
let mut entries = make_large_test_entries(num_large_blobs as usize);
entries.extend(make_tiny_test_entries(num_small_blobs as usize));

// Convert the entries to blobs, write the blobs to the ledger
let mut blobs = entries.to_blobs();
for (index, b) in blobs.iter_mut().enumerate() {
b.set_index(index as u64);
b.set_slot(slot);
}

store
.put_blobs(&blobs)
.expect("Expectd successful insertion of blobs into ledger");
}

// Write small blobs to the ledger
#[bench]
#[ignore]
fn bench_write_small(bench: &mut Bencher) {
let ledger_path = get_tmp_store_path("bench_write_small").unwrap();
let num_entries = 32 * 1024;
let entries = make_tiny_test_entries(num_entries);
let mut blobs = entries.to_blobs();
for (index, b) in blobs.iter_mut().enumerate() {
b.set_index(index as u64);
}
bench_write_blobs(bench, &mut blobs, &ledger_path.to_string_lossy());
}

// Write big blobs to the ledger
#[bench]
#[ignore]
fn bench_write_big(bench: &mut Bencher) {
let ledger_path = get_tmp_store_path("bench_write_big").unwrap();
let num_entries = 1 * 1024;
let entries = make_large_test_entries(num_entries);
let mut blobs = entries.to_blobs();
for (index, b) in blobs.iter_mut().enumerate() {
b.set_index(index as u64);
}

bench_write_blobs(bench, &mut blobs, &ledger_path.to_string_lossy());
}

#[bench]
#[ignore]
fn bench_read_sequential(bench: &mut Bencher) {
let ledger_path = get_tmp_store_path("bench_read_sequential").unwrap();
let mut store = BlobStore::open(&ledger_path).unwrap();

// Insert some big and small blobs into the ledger
let num_small_blobs = 32 * 1024;
let num_large_blobs = 32 * 1024;
let total_blobs = num_small_blobs + num_large_blobs;
let slot = 0;
setup_read_bench(&mut store, num_small_blobs, num_large_blobs, slot);

let num_reads = total_blobs / 15;
let mut rng = rand::thread_rng();
bench.iter(move || {
// Generate random starting point in the range [0, total_blobs - 1], read num_reads blobs sequentially
let start_index = rng.gen_range(0, num_small_blobs + num_large_blobs);
for i in start_index..start_index + num_reads {
let _ = store.get_blob_data(slot, i as u64 % total_blobs);
}
});

BlobStore::destroy(&ledger_path).expect("Expected successful database destruction");
}

#[bench]
#[ignore]
fn bench_read_random(bench: &mut Bencher) {
let ledger_path = get_tmp_store_path("bench_read_random").unwrap();
let mut store = BlobStore::open(&ledger_path).unwrap();

// Insert some big and small blobs into the ledger
let num_small_blobs = 32 * 1024;
let num_large_blobs = 32 * 1024;
let total_blobs = num_small_blobs + num_large_blobs;
let slot = 0;
setup_read_bench(&mut store, num_small_blobs, num_large_blobs, slot);

let num_reads = total_blobs / 15;

// Generate a num_reads sized random sample of indexes in range [0, total_blobs - 1],
// simulating random reads
let mut rng = rand::thread_rng();
let indexes: Vec<usize> = (0..num_reads)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

are you intending to verify that caching helps, once implemented?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes. I've also experimented with parallelizing writing (partitioned based on slots) but I couldn't get it to work safely without needing to copy so much memory that it was much slower overall.

The little caching I've done so far made benchmark ns/iter ~70-80% of db_ledger benchmarks when before it was about ~110-120% .

I tried several different ways of exploiting concurrency including using tokio and futures, creating a separate writer thread that communicated over std::sync::mpsc and crossbeam-channel:: channels, etc.

But I could not do it both 1) safely 2) without so much copying that it made everything much slower.

.map(|_| rng.gen_range(0, total_blobs) as usize)
.collect();
bench.iter(move || {
for i in indexes.iter() {
let _ = store.get_blob_data(slot, *i as u64);
}
});

BlobStore::destroy(&ledger_path).expect("Expected successful database destruction");
}

#[bench]
#[ignore]
fn bench_insert_data_blob_small(bench: &mut Bencher) {
let ledger_path = get_tmp_store_path("bench_insert_data_blob_small").unwrap();
let mut store = BlobStore::open(&ledger_path).unwrap();
let num_entries = 32 * 1024;
let entries = make_tiny_test_entries(num_entries);
let mut blobs = entries.to_blobs();

blobs.shuffle(&mut thread_rng());

bench.iter(move || {
for blob in blobs.iter_mut() {
let index = blob.index();
blob.set_index(index + num_entries as u64);
}
store.put_blobs(&blobs).unwrap();
});

BlobStore::destroy(&ledger_path).expect("Expect successful destruction");
}

#[bench]
#[ignore]
fn bench_insert_data_blob_big(bench: &mut Bencher) {
let ledger_path = get_tmp_store_path("bench_insert_data_blob_big").unwrap();
let mut store = BlobStore::open(&ledger_path).unwrap();
let num_entries = 32 * 1024;
let entries = make_large_test_entries(num_entries);
let mut blobs = entries.to_blobs();
blobs.shuffle(&mut thread_rng());

bench.iter(move || {
let mut i = 0;
for blob in blobs.iter_mut() {
blob.set_index(i + num_entries as u64);
i += 1;
}

store.put_blobs(&blobs).expect("failed to insert blobs");
});

BlobStore::destroy(&ledger_path).expect("Expect successful destruction");
}
Loading