@@ -212,8 +212,7 @@ impl TransactionExtForLinkedChunks for Transaction<'_> {
212
212
213
213
match chunk_type {
214
214
CHUNK_TYPE_GAP_TYPE_STRING => {
215
- // It's a gap! There's at most one row for it in the database, so a
216
- // call to `query_row` is sufficient.
215
+ // It's a gap!
217
216
let gap = self . load_gap_content ( store, room_id, id) ?;
218
217
Ok ( RawChunk { content : ChunkContent :: Gap ( gap) , previous, identifier : id, next } )
219
218
}
@@ -244,6 +243,8 @@ impl TransactionExtForLinkedChunks for Transaction<'_> {
244
243
room_id : & Key ,
245
244
chunk_id : ChunkIdentifier ,
246
245
) -> Result < Gap > {
246
+ // There's at most one row for it in the database, so a call to `query_row` is
247
+ // sufficient.
247
248
let encoded_prev_token: Vec < u8 > = self . query_row (
248
249
"SELECT prev_token FROM gaps WHERE chunk_id = ? AND room_id = ?" ,
249
250
( chunk_id. index ( ) , & room_id) ,
@@ -275,9 +276,9 @@ impl TransactionExtForLinkedChunks for Transaction<'_> {
275
276
{
276
277
let encoded_content = event_data?;
277
278
let serialized_content = store. decode_value ( & encoded_content) ?;
278
- let sync_timeline_event = serde_json:: from_slice ( & serialized_content) ?;
279
+ let event = serde_json:: from_slice ( & serialized_content) ?;
279
280
280
- events. push ( sync_timeline_event ) ;
281
+ events. push ( event ) ;
281
282
}
282
283
283
284
Ok ( events)
@@ -615,17 +616,122 @@ impl EventCacheStore for SqliteEventCacheStore {
615
616
616
617
async fn load_last_chunk (
617
618
& self ,
618
- _room_id : & RoomId ,
619
+ room_id : & RoomId ,
619
620
) -> Result < ( Option < RawChunk < Event , Gap > > , ChunkIdentifierGenerator ) , Self :: Error > {
620
- todo ! ( )
621
+ let room_id = room_id. to_owned ( ) ;
622
+ let hashed_room_id = self . encode_key ( keys:: LINKED_CHUNKS , & room_id) ;
623
+
624
+ let this = self . clone ( ) ;
625
+
626
+ self
627
+ . acquire ( )
628
+ . await ?
629
+ . with_transaction ( move |txn| -> Result < _ > {
630
+ // Find the latest chunk identifier to generate a `ChunkIdentifierGenerator`.
631
+ let chunk_identifier_generator = match txn
632
+ . prepare (
633
+ "SELECT MAX(id) FROM linked_chunks WHERE room_id = ?"
634
+ ) ?
635
+ . query_row (
636
+ ( & hashed_room_id, ) ,
637
+ |row| {
638
+ row. get :: < _ , u64 > ( 0 )
639
+ }
640
+ ) . optional ( ) ?
641
+ {
642
+ Some ( last_chunk_identifier) => {
643
+ ChunkIdentifierGenerator :: new_from_previous_chunk_identifier (
644
+ ChunkIdentifier :: new ( last_chunk_identifier)
645
+ )
646
+ } ,
647
+ None => ChunkIdentifierGenerator :: new_from_scratch ( ) ,
648
+ } ;
649
+
650
+ // Find the last chunk.
651
+ let Some ( ( chunk_identifier, previous_chunk, chunk_type) ) = txn
652
+ . prepare (
653
+ "SELECT id, previous, type FROM linked_chunks WHERE room_id = ? AND next IS NULL"
654
+ ) ?
655
+ . query_row (
656
+ ( & hashed_room_id, ) ,
657
+ |row| {
658
+ Ok ( (
659
+ row. get :: < _ , u64 > ( 0 ) ?,
660
+ row. get :: < _ , Option < u64 > > ( 1 ) ?,
661
+ row. get :: < _ , String > ( 2 ) ?,
662
+ ) )
663
+ }
664
+ )
665
+ . optional ( ) ?
666
+ else {
667
+ // Chunk is not found.
668
+ return Ok ( ( None , chunk_identifier_generator) ) ;
669
+ } ;
670
+
671
+ // Build the chunk.
672
+ let last_chunk = txn. rebuild_chunk (
673
+ & this,
674
+ & hashed_room_id,
675
+ previous_chunk,
676
+ chunk_identifier,
677
+ None ,
678
+ & chunk_type
679
+ ) ?;
680
+
681
+ Ok ( ( Some ( last_chunk) , chunk_identifier_generator) )
682
+ } )
683
+ . await
621
684
}
622
685
623
686
async fn load_previous_chunk (
624
687
& self ,
625
- _room_id : & RoomId ,
626
- _before_chunk_identifier : ChunkIdentifier ,
688
+ room_id : & RoomId ,
689
+ before_chunk_identifier : ChunkIdentifier ,
627
690
) -> Result < Option < RawChunk < Event , Gap > > , Self :: Error > {
628
- todo ! ( )
691
+ let room_id = room_id. to_owned ( ) ;
692
+ let hashed_room_id = self . encode_key ( keys:: LINKED_CHUNKS , & room_id) ;
693
+
694
+ let this = self . clone ( ) ;
695
+
696
+ self
697
+ . acquire ( )
698
+ . await ?
699
+ . with_transaction ( move |txn| -> Result < _ > {
700
+ // Find the chunk before the chunk identified by `before_chunk_identifier`.
701
+ let Some ( ( chunk_identifier, previous_chunk, next_chunk, chunk_type) ) = txn
702
+ . prepare (
703
+ "SELECT id, previous, next, type FROM linked_chunks WHERE room_id = ? AND next = ?"
704
+ ) ?
705
+ . query_row (
706
+ ( & hashed_room_id, before_chunk_identifier. index ( ) ) ,
707
+ |row| {
708
+ Ok ( (
709
+ row. get :: < _ , u64 > ( 0 ) ?,
710
+ row. get :: < _ , Option < u64 > > ( 1 ) ?,
711
+ row. get :: < _ , Option < u64 > > ( 2 ) ?,
712
+ row. get :: < _ , String > ( 3 ) ?,
713
+ ) )
714
+ }
715
+ )
716
+ . optional ( ) ?
717
+ else {
718
+ // Chunk is not found.
719
+ return Ok ( None ) ;
720
+ } ;
721
+
722
+ // Build the chunk.
723
+ let last_chunk = txn. rebuild_chunk (
724
+ & this,
725
+ & hashed_room_id,
726
+ previous_chunk,
727
+ chunk_identifier,
728
+ next_chunk,
729
+ & chunk_type
730
+ ) ?;
731
+
732
+ Ok ( Some ( last_chunk) )
733
+ } )
734
+ . await
629
735
}
630
736
631
737
async fn clear_all_rooms_chunks ( & self ) -> Result < ( ) , Self :: Error > {
0 commit comments