Skip to content

Commit 7f17b4b

Browse files
committed
bench: Add a benchmark for the LinkedChunk with the EventCacheStore.
1 parent fa3a9d8 commit 7f17b4b

File tree

2 files changed

+160
-0
lines changed

2 files changed

+160
-0
lines changed

benchmarks/Cargo.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,10 @@ pprof = { version = "0.14.0", features = ["flamegraph", "criterion"] }
2929
name = "crypto_bench"
3030
harness = false
3131

32+
[[bench]]
33+
name = "linked_chunk"
34+
harness = false
35+
3236
[[bench]]
3337
name = "store_bench"
3438
harness = false

benchmarks/benches/linked_chunk.rs

Lines changed: 156 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,156 @@
1+
use std::{sync::Arc, time::Duration};
2+
3+
use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput};
4+
use matrix_sdk::{
5+
linked_chunk::{LinkedChunk, Update},
6+
SqliteEventCacheStore,
7+
};
8+
use matrix_sdk_base::event_cache::{
9+
store::{DynEventCacheStore, IntoEventCacheStore, MemoryStore, DEFAULT_CHUNK_CAPACITY},
10+
Event, Gap,
11+
};
12+
use matrix_sdk_test::{event_factory::EventFactory, ALICE};
13+
use ruma::{room_id, EventId};
14+
use tempfile::tempdir;
15+
use tokio::runtime::Builder;
16+
17+
#[derive(Clone, Debug)]
18+
enum Operation {
19+
PushItemsBack(Vec<Event>),
20+
PushGapBack(Gap),
21+
}
22+
23+
pub fn writing(c: &mut Criterion) {
24+
// Create a new asynchronous runtime.
25+
let runtime = Builder::new_multi_thread()
26+
.enable_time()
27+
.enable_io()
28+
.build()
29+
.expect("Failed to create an asynchronous runtime");
30+
31+
let room_id = room_id!("!foo:bar.baz");
32+
let event_factory = EventFactory::new().room(room_id).sender(&ALICE);
33+
34+
let mut group = c.benchmark_group("writing");
35+
group.sample_size(10).measurement_time(Duration::from_secs(30));
36+
37+
for number_of_events in [10, 100, 1000, 10_000, 100_000] {
38+
let sqlite_temp_dir = tempdir().unwrap();
39+
40+
// Declare new stores for this set of events.
41+
let stores: [(&str, Option<Arc<DynEventCacheStore>>); 3] = [
42+
("none", None),
43+
("memory store", Some(MemoryStore::default().into_event_cache_store())),
44+
(
45+
"sqlite store",
46+
runtime.block_on(async {
47+
Some(
48+
SqliteEventCacheStore::open(sqlite_temp_dir.path().join("bench"), None)
49+
.await
50+
.unwrap()
51+
.into_event_cache_store(),
52+
)
53+
}),
54+
),
55+
];
56+
57+
for (store_name, store) in stores {
58+
// Create the operations we want to bench.
59+
let mut operations = Vec::new();
60+
61+
{
62+
let mut events = (0..number_of_events)
63+
.map(|nth| {
64+
event_factory
65+
.text_msg("foo")
66+
.event_id(&EventId::parse(format!("$ev{nth}")).unwrap())
67+
.into_event()
68+
})
69+
.peekable();
70+
71+
let mut gap_nth = 0;
72+
73+
while events.peek().is_some() {
74+
{
75+
let events_to_push_back = events.by_ref().take(80).collect::<Vec<_>>();
76+
77+
if events_to_push_back.is_empty() {
78+
break;
79+
}
80+
81+
operations.push(Operation::PushItemsBack(events_to_push_back));
82+
}
83+
84+
{
85+
operations.push(Operation::PushGapBack(Gap {
86+
prev_token: format!("gap{gap_nth}"),
87+
}));
88+
gap_nth += 1;
89+
}
90+
}
91+
}
92+
93+
// Define the throughput.
94+
group.throughput(Throughput::Elements(number_of_events));
95+
96+
// Get a bencher.
97+
group.bench_with_input(
98+
BenchmarkId::new(store_name, number_of_events),
99+
&operations,
100+
|bencher, operations| {
101+
// Bench the routine.
102+
bencher.to_async(&runtime).iter_batched(
103+
|| operations.clone(),
104+
|operations| async {
105+
// The routine to bench!
106+
107+
let mut linked_chunk = LinkedChunk::<DEFAULT_CHUNK_CAPACITY, Event, Gap>::new_with_update_history();
108+
109+
for operation in operations {
110+
match operation {
111+
Operation::PushItemsBack(events) => linked_chunk.push_items_back(events),
112+
Operation::PushGapBack(gap) => linked_chunk.push_gap_back(gap),
113+
}
114+
}
115+
116+
if let Some(store) = &store {
117+
let updates = linked_chunk.updates().unwrap().take();
118+
store.handle_linked_chunk_updates(room_id, updates).await.unwrap();
119+
// Empty the store.
120+
store.handle_linked_chunk_updates(room_id, vec![Update::Clear]).await.unwrap();
121+
}
122+
123+
},
124+
BatchSize::SmallInput
125+
)
126+
},
127+
);
128+
129+
{
130+
let _guard = runtime.enter();
131+
drop(store);
132+
}
133+
}
134+
}
135+
136+
group.finish()
137+
}
138+
139+
fn criterion() -> Criterion {
140+
#[cfg(target_os = "linux")]
141+
let criterion = Criterion::default().with_profiler(pprof::criterion::PProfProfiler::new(
142+
100,
143+
pprof::criterion::Output::Flamegraph(None),
144+
));
145+
#[cfg(not(target_os = "linux"))]
146+
let criterion = Criterion::default();
147+
148+
criterion
149+
}
150+
151+
criterion_group! {
152+
name = event_cache;
153+
config = criterion();
154+
targets = writing,
155+
}
156+
criterion_main!(event_cache);

0 commit comments

Comments
 (0)