Skip to content

Commit df6108c

Browse files
committed
fix(event cache): override reached_start when there's a mismatch between network and disk
It could be that we have a mismatch between network and disk, after running a back-pagination: - network indicates start of the timeline, aka there's no previous-batch token - but in the persisted storage, we do have an initial empty events chunk Because of this, we could have weird transitions from "I've reached the start of the room" to "I haven't actually reached it", if calling the `run_backwards()` method manually. This patch rewrites the logic when returning `reached_start`, so that it's more precise: - when reloading an events chunk from disk, rely on the previous chunk property to indicate whether we've reached the start of the timeline, thus avoiding unnecessary calls to back-paginations. - after resolving a gap via the network, override the result of `reached_start` with a boolean that indicates 1. there are no more gaps and 2. there's no previous chunk (actual previous or lazily-loaded). In the future, we should consider NOT having empty events chunks, if we can.
1 parent 337faf2 commit df6108c

File tree

4 files changed

+60
-49
lines changed

4 files changed

+60
-49
lines changed

crates/matrix-sdk-common/src/linked_chunk/mod.rs

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1260,6 +1260,12 @@ impl<const CAPACITY: usize, Item, Gap> Chunk<CAPACITY, Item, Gap> {
12601260
!self.is_gap()
12611261
}
12621262

1263+
/// Is this the definitive first chunk, even in the presence of
1264+
/// lazy-loading?
1265+
pub fn is_definitive_head(&self) -> bool {
1266+
self.previous.is_none() && self.lazy_previous.is_none()
1267+
}
1268+
12631269
/// Check whether this current chunk is the first chunk.
12641270
fn is_first_chunk(&self) -> bool {
12651271
self.previous.is_none()

crates/matrix-sdk/src/event_cache/pagination.rs

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ impl RoomPagination {
9898

9999
// Try to load one chunk backwards. If it returns events, no need to reach the
100100
// network!
101+
101102
match self.inner.state.write().await.load_more_events_backwards().await? {
102103
LoadMoreEventsBackwardsOutcome::Gap => {
103104
// continue, let's resolve this gap!
@@ -107,7 +108,11 @@ impl RoomPagination {
107108
return Ok(Some(BackPaginationOutcome { reached_start: true, events: vec![] }))
108109
}
109110

110-
LoadMoreEventsBackwardsOutcome::Events(events, sync_timeline_events_diffs) => {
111+
LoadMoreEventsBackwardsOutcome::Events {
112+
events,
113+
sync_timeline_events_diffs,
114+
reached_start,
115+
} => {
111116
if !sync_timeline_events_diffs.is_empty() {
112117
let _ = self.inner.sender.send(RoomEventCacheUpdate::UpdateTimelineEvents {
113118
diffs: sync_timeline_events_diffs,
@@ -116,7 +121,7 @@ impl RoomPagination {
116121
}
117122

118123
return Ok(Some(BackPaginationOutcome {
119-
reached_start: false,
124+
reached_start,
120125
// This is a backwards pagination. `BackPaginationOutcome` expects events to
121126
// be in “reverse order”.
122127
events: events.into_iter().rev().collect(),
@@ -261,6 +266,20 @@ impl RoomPagination {
261266
})
262267
.await?;
263268

269+
// There could be an inconsistency between the network (which thinks we hit the
270+
// start of the timeline) and the disk (which has the initial empty
271+
// chunks), so tweak the `reached_start` value so that it reflects the disk
272+
// state in priority instead.
273+
let reached_start = {
274+
// There's no gaps.
275+
!state.events().chunks().any(|chunk| chunk.is_gap()) &&
276+
// The first chunk has no predecessor.
277+
state.events()
278+
.chunks()
279+
.next()
280+
.map_or(reached_start, |chunk| chunk.is_definitive_head())
281+
};
282+
264283
let backpagination_outcome = BackPaginationOutcome { events, reached_start };
265284

266285
if !sync_timeline_events_diffs.is_empty() {

crates/matrix-sdk/src/event_cache/room/mod.rs

Lines changed: 19 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -534,7 +534,11 @@ pub(super) enum LoadMoreEventsBackwardsOutcome {
534534
StartOfTimeline,
535535

536536
/// Events have been inserted.
537-
Events(Vec<TimelineEvent>, Vec<VectorDiff<TimelineEvent>>),
537+
Events {
538+
events: Vec<TimelineEvent>,
539+
sync_timeline_events_diffs: Vec<VectorDiff<TimelineEvent>>,
540+
reached_start: bool,
541+
},
538542
}
539543

540544
// Use a private module to hide `events` to this parent module.
@@ -727,7 +731,13 @@ mod private {
727731

728732
let events = match &new_first_chunk.content {
729733
ChunkContent::Gap(_) => None,
730-
ChunkContent::Items(events) => Some(events.clone()),
734+
ChunkContent::Items(events) => {
735+
// We've reached the start on disk, if and only if, there was no chunk prior to
736+
// the one we just loaded.
737+
let reached_start = new_first_chunk.previous.is_none();
738+
739+
Some((events.clone(), reached_start))
740+
}
731741
};
732742

733743
if let Err(err) = self.events.insert_new_chunk_as_first(new_first_chunk) {
@@ -749,9 +759,11 @@ mod private {
749759

750760
Ok(match events {
751761
None => LoadMoreEventsBackwardsOutcome::Gap,
752-
Some(events) => {
753-
LoadMoreEventsBackwardsOutcome::Events(events, updates_as_vector_diffs)
754-
}
762+
Some((events, reached_start)) => LoadMoreEventsBackwardsOutcome::Events {
763+
events,
764+
sync_timeline_events_diffs: updates_as_vector_diffs,
765+
reached_start,
766+
},
755767
})
756768
}
757769

@@ -1862,8 +1874,6 @@ mod tests {
18621874
#[cfg(not(target_arch = "wasm32"))] // This uses the cross-process lock, so needs time support.
18631875
#[async_test]
18641876
async fn test_shrink_to_last_chunk() {
1865-
use std::ops::Not as _;
1866-
18671877
use eyeball_im::VectorDiff;
18681878

18691879
use crate::{assert_let_timeout, event_cache::RoomEventCacheUpdate};
@@ -1930,7 +1940,7 @@ mod tests {
19301940
let outcome = room_event_cache.pagination().run_backwards_once(20).await.unwrap();
19311941
assert_eq!(outcome.events.len(), 1);
19321942
assert_eq!(outcome.events[0].event_id().as_deref(), Some(evid1));
1933-
assert!(outcome.reached_start.not());
1943+
assert!(outcome.reached_start);
19341944

19351945
// We also get an update about the loading from the store.
19361946
assert_let_timeout!(
@@ -1974,6 +1984,6 @@ mod tests {
19741984
let outcome = room_event_cache.pagination().run_backwards_once(20).await.unwrap();
19751985
assert_eq!(outcome.events.len(), 1);
19761986
assert_eq!(outcome.events[0].event_id().as_deref(), Some(evid1));
1977-
assert!(outcome.reached_start.not());
1987+
assert!(outcome.reached_start);
19781988
}
19791989
}

crates/matrix-sdk/tests/integration/event_cache.rs

Lines changed: 14 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1189,7 +1189,14 @@ async fn test_no_gap_stored_after_deduplicated_sync() {
11891189
// only contains the events returned by the sync.
11901190
//
11911191
// The first back-pagination will hit the network, and let us know we've reached
1192-
// the end of the room.
1192+
// the end of the room, but! we do have the default initial events chunk in
1193+
// storage.
1194+
1195+
let outcome = pagination.run_backwards_once(20).await.unwrap();
1196+
assert!(outcome.reached_start.not());
1197+
assert!(outcome.events.is_empty());
1198+
1199+
// Loading the default initial event chunk.
11931200
let outcome = pagination.run_backwards_once(20).await.unwrap();
11941201
assert!(outcome.reached_start);
11951202
assert!(outcome.events.is_empty());
@@ -1222,14 +1229,10 @@ async fn test_no_gap_stored_after_deduplicated_sync() {
12221229
//
12231230
// The sync was limited, which unloaded the linked chunk, and reloaded only the
12241231
// final events chunk.
1225-
//
1226-
// There's an empty events chunk at the start of *every* linked chunk, so the
1227-
// next pagination will return it, and since the chunk is lazily loaded, the
1228-
// pagination doesn't know *yet* it's reached the start of the linked chunk.
12291232

12301233
let outcome = pagination.run_backwards_once(20).await.unwrap();
12311234
assert!(outcome.events.is_empty());
1232-
assert!(!outcome.reached_start);
1235+
assert!(outcome.reached_start);
12331236

12341237
{
12351238
let (events, _) = room_event_cache.subscribe().await;
@@ -1238,13 +1241,6 @@ async fn test_no_gap_stored_after_deduplicated_sync() {
12381241
assert_event_matches_msg(&events[2], "sup");
12391242
assert_eq!(events.len(), 3);
12401243
}
1241-
1242-
// Now, lazy-loading notices we've reached the start of the chunk, and reports
1243-
// it as such.
1244-
1245-
let outcome = pagination.run_backwards_once(20).await.unwrap();
1246-
assert!(outcome.events.is_empty());
1247-
assert!(outcome.reached_start);
12481244
}
12491245

12501246
#[async_test]
@@ -1336,7 +1332,7 @@ async fn test_no_gap_stored_after_deduplicated_backpagination() {
13361332
let pagination = room_event_cache.pagination();
13371333

13381334
let outcome = pagination.run_backwards_once(20).await.unwrap();
1339-
assert!(outcome.reached_start);
1335+
assert!(outcome.reached_start.not());
13401336
assert!(outcome.events.is_empty());
13411337
assert!(stream.is_empty());
13421338

@@ -1373,13 +1369,7 @@ async fn test_no_gap_stored_after_deduplicated_backpagination() {
13731369
// we shouldn't have to, since it is useless; all events were deduplicated
13741370
// from the previous pagination.
13751371

1376-
// Instead, we're lazy-loading an empty events chunks.
1377-
let outcome = pagination.run_backwards_once(20).await.unwrap();
1378-
assert!(outcome.reached_start.not());
1379-
assert!(outcome.events.is_empty());
1380-
assert!(stream.is_empty());
1381-
1382-
// And finally hit the start of the timeline.
1372+
// Instead, we're lazy-loading the empty initial events chunks.
13831373
let outcome = pagination.run_backwards_once(20).await.unwrap();
13841374
assert!(outcome.reached_start);
13851375
assert!(outcome.events.is_empty());
@@ -1948,9 +1938,8 @@ async fn test_lazy_loading() {
19481938
assert_event_id!(outcome.events[4], "$ev0_1");
19491939
assert_event_id!(outcome.events[5], "$ev0_0");
19501940

1951-
// The start of the timeline isn't reached yet. What we know for the moment is
1952-
// that we get new events.
1953-
assert!(outcome.reached_start.not());
1941+
// This was the start of the timeline \o/
1942+
assert!(outcome.reached_start);
19541943

19551944
// Let's check the stream for the last time.
19561945
let update = updates_stream.recv().await.unwrap();
@@ -1978,20 +1967,7 @@ async fn test_lazy_loading() {
19781967
assert_event_id!(event, "$ev0_5");
19791968
});
19801969
});
1981-
1982-
assert!(updates_stream.is_empty());
19831970
}
19841971

1985-
// Final pagination? Oh yeah;
1986-
// This time, the first chunk is loaded and there is nothing else to do, no gap,
1987-
// nothing. We've reached the start of the timeline!
1988-
{
1989-
let outcome = room_event_cache.pagination().run_backwards_until(1).await.unwrap();
1990-
1991-
// No events, hmmm…
1992-
assert!(outcome.events.is_empty());
1993-
1994-
// … that's because the start of the timeline is finally reached!
1995-
assert!(outcome.reached_start);
1996-
}
1972+
assert!(updates_stream.is_empty());
19971973
}

0 commit comments

Comments
 (0)