From f0a2585fcce49ac6897eb48ad2656f19ccfe77c7 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 11 Nov 2025 14:50:27 +0100 Subject: [PATCH 01/28] chore(common) Rename `CrossProcessLockKind` to `CrossProcessLockState`. This patch renames the `CrossProcessLockKind` type to `CrossProcessLockState`. --- crates/matrix-sdk-base/src/media/store/mod.rs | 6 ++--- .../src/cross_process_lock.rs | 22 +++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/crates/matrix-sdk-base/src/media/store/mod.rs b/crates/matrix-sdk-base/src/media/store/mod.rs index b861673e890..5fd1642be93 100644 --- a/crates/matrix-sdk-base/src/media/store/mod.rs +++ b/crates/matrix-sdk-base/src/media/store/mod.rs @@ -33,7 +33,7 @@ use std::{ops::Deref, sync::Arc}; use matrix_sdk_common::cross_process_lock::{ CrossProcessLock, CrossProcessLockError, CrossProcessLockGeneration, CrossProcessLockGuard, - CrossProcessLockKind, TryLock, + CrossProcessLockState, TryLock, }; use matrix_sdk_store_encryption::Error as StoreEncryptionError; pub use traits::{DynMediaStore, IntoMediaStore, MediaStore, MediaStoreInner}; @@ -135,13 +135,13 @@ impl MediaStoreLock { pub async fn lock(&self) -> Result, CrossProcessLockError> { let cross_process_lock_guard = match self.cross_process_lock.spin_lock(None).await?? { // The lock is clean: no other hold acquired it, all good! - CrossProcessLockKind::Clean(guard) => guard, + CrossProcessLockState::Clean(guard) => guard, // The lock is dirty: another holder acquired it since the last time we acquired it. // It's not a problem in the case of the `MediaStore` because this API is “stateless” at // the time of writing (2025-11-11). There is nothing that can be out-of-sync: all the // state is in the database, nothing in memory. - CrossProcessLockKind::Dirty(guard) => { + CrossProcessLockState::Dirty(guard) => { self.cross_process_lock.clear_dirty(); guard diff --git a/crates/matrix-sdk-common/src/cross_process_lock.rs b/crates/matrix-sdk-common/src/cross_process_lock.rs index 2dbbefc168b..4467e09f1ba 100644 --- a/crates/matrix-sdk-common/src/cross_process_lock.rs +++ b/crates/matrix-sdk-common/src/cross_process_lock.rs @@ -169,7 +169,7 @@ where /// Whether the lock has been dirtied. /// - /// See [`CrossProcessLockKind::Dirty`] to learn more about the semantics + /// See [`CrossProcessLockState::Dirty`] to learn more about the semantics /// of _dirty_. is_dirty: Arc, } @@ -231,7 +231,7 @@ where /// Determine whether the cross-process lock is dirty. /// - /// See [`CrossProcessLockKind::Dirty`] to learn more about the semantics + /// See [`CrossProcessLockState::Dirty`] to learn more about the semantics /// of _dirty_. pub fn is_dirty(&self) -> bool { self.is_dirty.load(Ordering::SeqCst) @@ -254,7 +254,7 @@ where #[instrument(skip(self), fields(?self.lock_key, ?self.lock_holder))] pub async fn try_lock_once( &self, - ) -> Result, L::LockError> { + ) -> Result, L::LockError> { // Hold onto the locking attempt mutex for the entire lifetime of this // function, to avoid multiple reentrant calls. let mut _attempt = self.locking_attempt.lock().await; @@ -270,7 +270,7 @@ where self.num_holders.fetch_add(1, Ordering::SeqCst); - return Ok(Ok(CrossProcessLockKind::Clean(CrossProcessLockGuard::new( + return Ok(Ok(CrossProcessLockState::Clean(CrossProcessLockGuard::new( self.num_holders.clone(), )))); } @@ -399,9 +399,9 @@ where let guard = CrossProcessLockGuard::new(self.num_holders.clone()); Ok(Ok(if self.is_dirty() { - CrossProcessLockKind::Dirty(guard) + CrossProcessLockState::Dirty(guard) } else { - CrossProcessLockKind::Clean(guard) + CrossProcessLockState::Clean(guard) })) } @@ -417,7 +417,7 @@ where pub async fn spin_lock( &self, max_backoff: Option, - ) -> Result, L::LockError> { + ) -> Result, L::LockError> { let max_backoff = max_backoff.unwrap_or(MAX_BACKOFF_MS); // Note: reads/writes to the backoff are racy across threads in theory, but the @@ -467,7 +467,7 @@ where /// Represent a successful result of a locking attempt, either by /// [`CrossProcessLock::try_lock_once`] or [`CrossProcessLock::spin_lock`]. #[derive(Debug)] -pub enum CrossProcessLockKind { +pub enum CrossProcessLockState { /// The lock has been obtained successfully, all good. Clean(CrossProcessLockGuard), @@ -487,7 +487,7 @@ pub enum CrossProcessLockKind { Dirty(CrossProcessLockGuard), } -impl CrossProcessLockKind { +impl CrossProcessLockState { /// Map this value into the inner [`CrossProcessLockGuard`]. pub fn into_guard(self) -> CrossProcessLockGuard { match self { @@ -544,7 +544,7 @@ mod tests { }; use super::{ - CrossProcessLock, CrossProcessLockError, CrossProcessLockGeneration, CrossProcessLockKind, + CrossProcessLock, CrossProcessLockError, CrossProcessLockGeneration, CrossProcessLockState, CrossProcessLockUnobtained, EXTEND_LEASE_EVERY_MS, TryLock, memory_store_helper::{Lease, try_take_leased_lock}, }; @@ -588,7 +588,7 @@ mod tests { } } - async fn release_lock(lock: CrossProcessLockKind) { + async fn release_lock(lock: CrossProcessLockState) { drop(lock); sleep(Duration::from_millis(EXTEND_LEASE_EVERY_MS)).await; } From 1346561ecdf21ce6ea4617537e24b2099d375cde Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 11 Nov 2025 14:55:56 +0100 Subject: [PATCH 02/28] feat(common) Add `#[must_use]` on `CrossProcessLockGuard` and `*State`. This patch adds a `#[must_use]` attribute on `CrossProcessLockGuard` and `CrossProcessLockState` to avoid a misuse. --- crates/matrix-sdk-common/src/cross_process_lock.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/matrix-sdk-common/src/cross_process_lock.rs b/crates/matrix-sdk-common/src/cross_process_lock.rs index 4467e09f1ba..f9a74dd6e82 100644 --- a/crates/matrix-sdk-common/src/cross_process_lock.rs +++ b/crates/matrix-sdk-common/src/cross_process_lock.rs @@ -108,6 +108,7 @@ enum WaitingTime { /// The lock will be automatically released a short period of time after all the /// guards have dropped. #[derive(Debug)] +#[must_use = "If unused, the `CrossProcessLock` will unlock at the end of the lease"] pub struct CrossProcessLockGuard { num_holders: Arc, } @@ -467,6 +468,7 @@ where /// Represent a successful result of a locking attempt, either by /// [`CrossProcessLock::try_lock_once`] or [`CrossProcessLock::spin_lock`]. #[derive(Debug)] +#[must_use = "If unused, the `CrossProcessLock` will unlock at the end of the lease"] pub enum CrossProcessLockState { /// The lock has been obtained successfully, all good. Clean(CrossProcessLockGuard), From b8e6bda224b821df345ac642d2e5c3ac49fe546c Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 11 Nov 2025 15:35:44 +0100 Subject: [PATCH 03/28] feat(common): Add `CrossProcessLockState::map`. This patch adds the `CrossProcessLockState::map` method along with its companion `MappedCrossProcessLockState` type. The idea is to facilitate the creation of custom `CrossProcessLockState`-like type in various usage of the cross-process lock. --- .../src/cross_process_lock.rs | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/crates/matrix-sdk-common/src/cross_process_lock.rs b/crates/matrix-sdk-common/src/cross_process_lock.rs index f9a74dd6e82..7f834c30371 100644 --- a/crates/matrix-sdk-common/src/cross_process_lock.rs +++ b/crates/matrix-sdk-common/src/cross_process_lock.rs @@ -496,6 +496,44 @@ impl CrossProcessLockState { Self::Clean(guard) | Self::Dirty(guard) => guard, } } + + /// Map this [`CrossProcessLockState`] into a + /// [`MappedCrossProcessLockState`]. + /// + /// This is helpful when one wants to create its own wrapper over + /// [`CrossProcessLockGuard`]. + pub fn map(self, mapper: F) -> MappedCrossProcessLockState + where + F: FnOnce(CrossProcessLockGuard) -> G, + { + match self { + Self::Clean(guard) => MappedCrossProcessLockState::Clean(mapper(guard)), + Self::Dirty(guard) => MappedCrossProcessLockState::Dirty(mapper(guard)), + } + } +} + +/// A mapped [`CrossProcessLockState`]. +/// +/// Created by [`CrossProcessLockState::map`]. +#[derive(Debug)] +#[must_use = "If unused, the `CrossProcessLock` will unlock at the end of the lease"] +pub enum MappedCrossProcessLockState { + /// The equivalent of [`CrossProcessLockState::Clean`]. + Clean(G), + + /// The equivalent of [`CrossProcessLockState::Dirty`]. + Dirty(G), +} + +impl MappedCrossProcessLockState { + /// Return `Some(G)` if `Self` is [`Clean`][Self::Clean]. + pub fn as_clean(&self) -> Option<&G> { + match self { + Self::Clean(guard) => Some(guard), + Self::Dirty(_) => None, + } + } } /// Represent an unsuccessful result of a lock attempt, either by From cfb77524ea6884a8064fd623962dfd30e179fee8 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 11 Nov 2025 15:49:35 +0100 Subject: [PATCH 04/28] feat(base) Create the `EventCacheStoreLockState` type. This patch updates `EventCacheStoreLock::lock()` to return an `EventCacheStoreLockState` instead of an `EventCacheStoreLockGuard`, so that the caller has to handle dirty locks. --- .../matrix-sdk-base/src/event_cache/store/mod.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/crates/matrix-sdk-base/src/event_cache/store/mod.rs b/crates/matrix-sdk-base/src/event_cache/store/mod.rs index 3062b3de361..5d8638ce3f5 100644 --- a/crates/matrix-sdk-base/src/event_cache/store/mod.rs +++ b/crates/matrix-sdk-base/src/event_cache/store/mod.rs @@ -29,7 +29,7 @@ mod traits; use matrix_sdk_common::cross_process_lock::{ CrossProcessLock, CrossProcessLockError, CrossProcessLockGeneration, CrossProcessLockGuard, - TryLock, + MappedCrossProcessLockState, TryLock, }; pub use matrix_sdk_store_encryption::Error as StoreEncryptionError; use ruma::{OwnedEventId, events::AnySyncTimelineEvent, serde::Raw}; @@ -83,13 +83,21 @@ impl EventCacheStoreLock { } /// Acquire a spin lock (see [`CrossProcessLock::spin_lock`]). - pub async fn lock(&self) -> Result, CrossProcessLockError> { - let cross_process_lock_guard = self.cross_process_lock.spin_lock(None).await??.into_guard(); + pub async fn lock(&self) -> Result, CrossProcessLockError> { + let lock_state = + self.cross_process_lock.spin_lock(None).await??.map(|cross_process_lock_guard| { + EventCacheStoreLockGuard { cross_process_lock_guard, store: self.store.deref() } + }); - Ok(EventCacheStoreLockGuard { cross_process_lock_guard, store: self.store.deref() }) + Ok(lock_state) } } +/// The equivalent of [`CrossProcessLockState`] but for the [`EventCacheStore`]. +/// +/// [`CrossProcessLockState`]: matrix_sdk_common::cross_process_lock::CrossProcessLockState +pub type EventCacheStoreLockState<'a> = MappedCrossProcessLockState>; + /// An RAII implementation of a “scoped lock” of an [`EventCacheStoreLock`]. /// When this structure is dropped (falls out of scope), the lock will be /// unlocked. From 1ae1ea52688137d1251cda6a9963f71128cd03e8 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 11 Nov 2025 17:32:21 +0100 Subject: [PATCH 05/28] chore(sdk): Clean up imports. --- crates/matrix-sdk/src/event_cache/room/mod.rs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index 800df158413..0c82af3ff9c 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -616,8 +616,7 @@ mod private { }, linked_chunk::{ ChunkContent, ChunkIdentifierGenerator, ChunkMetadata, LinkedChunkId, - OwnedLinkedChunkId, Position, Update, - lazy_loader::{self}, + OwnedLinkedChunkId, Position, Update, lazy_loader, }, serde_helpers::{extract_edit_target, extract_thread_root}, sync::Timeline, @@ -636,16 +635,16 @@ mod private { use tracing::{debug, error, instrument, trace, warn}; use super::{ - super::{EventCacheError, deduplicator::DeduplicationOutcome}, + super::{ + BackPaginationOutcome, EventCacheError, RoomEventCacheLinkedChunkUpdate, + RoomPaginationStatus, ThreadEventCacheUpdate, + deduplicator::{DeduplicationOutcome, filter_duplicate_events}, + room::threads::ThreadEventCache, + }, EventLocation, LoadMoreEventsBackwardsOutcome, events::EventLinkedChunk, sort_positions_descending, }; - use crate::event_cache::{ - BackPaginationOutcome, RoomEventCacheLinkedChunkUpdate, RoomPaginationStatus, - ThreadEventCacheUpdate, deduplicator::filter_duplicate_events, - room::threads::ThreadEventCache, - }; /// State for a single room's event cache. /// From 4c836551ca20b410992e44cad409e93d06a4d937 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 11 Nov 2025 17:33:00 +0100 Subject: [PATCH 06/28] feat(common): Add `CrossProcessLockGuard::is_dirty` and `::clear_dirty`. This patch replicates the `is_dirty` and `clear_dirty` methods from `CrossProcessLock` to `CrossProcessLockGuard`. It allows to get an access to this API from a guard when one doesn't have the cross-process lock at hand. --- crates/matrix-sdk-base/src/media/store/mod.rs | 2 +- .../src/cross_process_lock.rs | 31 ++++++++++++++++--- 2 files changed, 28 insertions(+), 5 deletions(-) diff --git a/crates/matrix-sdk-base/src/media/store/mod.rs b/crates/matrix-sdk-base/src/media/store/mod.rs index 5fd1642be93..b7f6b4ba02b 100644 --- a/crates/matrix-sdk-base/src/media/store/mod.rs +++ b/crates/matrix-sdk-base/src/media/store/mod.rs @@ -142,7 +142,7 @@ impl MediaStoreLock { // the time of writing (2025-11-11). There is nothing that can be out-of-sync: all the // state is in the database, nothing in memory. CrossProcessLockState::Dirty(guard) => { - self.cross_process_lock.clear_dirty(); + guard.clear_dirty(); guard } diff --git a/crates/matrix-sdk-common/src/cross_process_lock.rs b/crates/matrix-sdk-common/src/cross_process_lock.rs index 7f834c30371..48a873bbd43 100644 --- a/crates/matrix-sdk-common/src/cross_process_lock.rs +++ b/crates/matrix-sdk-common/src/cross_process_lock.rs @@ -110,12 +110,35 @@ enum WaitingTime { #[derive(Debug)] #[must_use = "If unused, the `CrossProcessLock` will unlock at the end of the lease"] pub struct CrossProcessLockGuard { + /// A clone of [`CrossProcessLock::num_holders`]. num_holders: Arc, + + /// A clone of [`CrossProcessLock::is_dirty`]. + is_dirty: Arc, } impl CrossProcessLockGuard { - fn new(num_holders: Arc) -> Self { - Self { num_holders } + fn new(num_holders: Arc, is_dirty: Arc) -> Self { + Self { num_holders, is_dirty } + } + + /// Determine whether the cross-process lock associated to this guard is + /// dirty. + /// + /// See [`CrossProcessLockState::Dirty`] to learn more about the semantics + /// of _dirty_. + pub fn is_dirty(&self) -> bool { + self.is_dirty.load(Ordering::SeqCst) + } + + /// Clear the dirty state from the cross-process lock associated to this + /// guard. + /// + /// If the cross-process lock is dirtied, it will remain dirtied until + /// this method is called. This allows recovering from a dirty state and + /// marking that it has recovered. + pub fn clear_dirty(&self) { + self.is_dirty.store(false, Ordering::SeqCst); } } @@ -245,7 +268,6 @@ where /// marking that it has recovered. pub fn clear_dirty(&self) { self.is_dirty.store(false, Ordering::SeqCst); - self.generation.store(NO_CROSS_PROCESS_LOCK_GENERATION, Ordering::SeqCst); } /// Try to lock once, returns whether the lock was obtained or not. @@ -273,6 +295,7 @@ where return Ok(Ok(CrossProcessLockState::Clean(CrossProcessLockGuard::new( self.num_holders.clone(), + self.is_dirty.clone(), )))); } @@ -397,7 +420,7 @@ where self.num_holders.fetch_add(1, Ordering::SeqCst); - let guard = CrossProcessLockGuard::new(self.num_holders.clone()); + let guard = CrossProcessLockGuard::new(self.num_holders.clone(), self.is_dirty.clone()); Ok(Ok(if self.is_dirty() { CrossProcessLockState::Dirty(guard) From 2426225d5d6ee1d6b6b0e8d04977a9968993c1ae Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 11 Nov 2025 17:38:01 +0100 Subject: [PATCH 07/28] feat(base): Add `EventCacheStoreLockGuard::clear_dirty`. --- crates/matrix-sdk-base/src/event_cache/store/mod.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/crates/matrix-sdk-base/src/event_cache/store/mod.rs b/crates/matrix-sdk-base/src/event_cache/store/mod.rs index 5d8638ce3f5..24727a4bfcb 100644 --- a/crates/matrix-sdk-base/src/event_cache/store/mod.rs +++ b/crates/matrix-sdk-base/src/event_cache/store/mod.rs @@ -110,6 +110,16 @@ pub struct EventCacheStoreLockGuard<'a> { store: &'a DynEventCacheStore, } +impl<'a> EventCacheStoreLockGuard<'a> { + /// Forward to [`CrossProcessLockGuard::clear_dirty`]. + /// + /// This is an associated method to avoid colliding with the [`Deref`] + /// implementation. + pub fn clear_dirty(this: &Self) { + this.cross_process_lock_guard.clear_dirty(); + } +} + #[cfg(not(tarpaulin_include))] impl fmt::Debug for EventCacheStoreLockGuard<'_> { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { From dc87b1c316e5a25fa3d26d79433cae79cb9ebcaa Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 11 Nov 2025 18:39:46 +0100 Subject: [PATCH 08/28] refactor(base): `EventCacheStoreLockState` owns a clone of the inner store. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch changes `EventCacheStoreLockState` to own a clone of the inner store. It helps to remove the `'a` lifetime, and so it “disconnects” from the lifetime of the store. --- .../src/event_cache/store/mod.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/crates/matrix-sdk-base/src/event_cache/store/mod.rs b/crates/matrix-sdk-base/src/event_cache/store/mod.rs index 24727a4bfcb..3ac1d4bc70d 100644 --- a/crates/matrix-sdk-base/src/event_cache/store/mod.rs +++ b/crates/matrix-sdk-base/src/event_cache/store/mod.rs @@ -83,10 +83,10 @@ impl EventCacheStoreLock { } /// Acquire a spin lock (see [`CrossProcessLock::spin_lock`]). - pub async fn lock(&self) -> Result, CrossProcessLockError> { + pub async fn lock(&self) -> Result { let lock_state = self.cross_process_lock.spin_lock(None).await??.map(|cross_process_lock_guard| { - EventCacheStoreLockGuard { cross_process_lock_guard, store: self.store.deref() } + EventCacheStoreLockGuard { cross_process_lock_guard, store: self.store.clone() } }); Ok(lock_state) @@ -96,21 +96,21 @@ impl EventCacheStoreLock { /// The equivalent of [`CrossProcessLockState`] but for the [`EventCacheStore`]. /// /// [`CrossProcessLockState`]: matrix_sdk_common::cross_process_lock::CrossProcessLockState -pub type EventCacheStoreLockState<'a> = MappedCrossProcessLockState>; +pub type EventCacheStoreLockState = MappedCrossProcessLockState; /// An RAII implementation of a “scoped lock” of an [`EventCacheStoreLock`]. /// When this structure is dropped (falls out of scope), the lock will be /// unlocked. -pub struct EventCacheStoreLockGuard<'a> { +pub struct EventCacheStoreLockGuard { /// The cross process lock guard. #[allow(unused)] cross_process_lock_guard: CrossProcessLockGuard, /// A reference to the store. - store: &'a DynEventCacheStore, + store: Arc, } -impl<'a> EventCacheStoreLockGuard<'a> { +impl EventCacheStoreLockGuard { /// Forward to [`CrossProcessLockGuard::clear_dirty`]. /// /// This is an associated method to avoid colliding with the [`Deref`] @@ -121,17 +121,17 @@ impl<'a> EventCacheStoreLockGuard<'a> { } #[cfg(not(tarpaulin_include))] -impl fmt::Debug for EventCacheStoreLockGuard<'_> { +impl fmt::Debug for EventCacheStoreLockGuard { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.debug_struct("EventCacheStoreLockGuard").finish_non_exhaustive() } } -impl Deref for EventCacheStoreLockGuard<'_> { +impl Deref for EventCacheStoreLockGuard { type Target = DynEventCacheStore; fn deref(&self) -> &Self::Target { - self.store + self.store.as_ref() } } From 81cac326db3fa5042c52900a8253f612b400c98a Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 12 Nov 2025 13:13:09 +0100 Subject: [PATCH 09/28] feat(common): `CrossProcessLockGuard` can be cloned. This patch implements `Clone` for `CrossProcessLockGuard`. --- crates/matrix-sdk-common/src/cross_process_lock.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/matrix-sdk-common/src/cross_process_lock.rs b/crates/matrix-sdk-common/src/cross_process_lock.rs index 48a873bbd43..e55c37d1e5c 100644 --- a/crates/matrix-sdk-common/src/cross_process_lock.rs +++ b/crates/matrix-sdk-common/src/cross_process_lock.rs @@ -107,7 +107,7 @@ enum WaitingTime { /// /// The lock will be automatically released a short period of time after all the /// guards have dropped. -#[derive(Debug)] +#[derive(Clone, Debug)] #[must_use = "If unused, the `CrossProcessLock` will unlock at the end of the lease"] pub struct CrossProcessLockGuard { /// A clone of [`CrossProcessLock::num_holders`]. From 7d5e95d8e1bfba54d5cc17d00a3ea1fbef3f7132 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 12 Nov 2025 13:13:55 +0100 Subject: [PATCH 10/28] feat(base): `EventCacheStoreLockGuard` can be cloned. This patch implements `Clone` for `EventCacheStoreLockGuard`. --- crates/matrix-sdk-base/src/event_cache/store/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/matrix-sdk-base/src/event_cache/store/mod.rs b/crates/matrix-sdk-base/src/event_cache/store/mod.rs index 3ac1d4bc70d..c95f983e16d 100644 --- a/crates/matrix-sdk-base/src/event_cache/store/mod.rs +++ b/crates/matrix-sdk-base/src/event_cache/store/mod.rs @@ -101,6 +101,7 @@ pub type EventCacheStoreLockState = MappedCrossProcessLockState Date: Tue, 11 Nov 2025 17:36:00 +0100 Subject: [PATCH 11/28] fix(base): Use the `EventCacheStoreLockState`. --- crates/matrix-sdk-base/src/client.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/matrix-sdk-base/src/client.rs b/crates/matrix-sdk-base/src/client.rs index 4837483ac8e..4f273010f14 100644 --- a/crates/matrix-sdk-base/src/client.rs +++ b/crates/matrix-sdk-base/src/client.rs @@ -57,7 +57,7 @@ use crate::{ InviteAcceptanceDetails, RoomStateFilter, SessionMeta, deserialized_responses::DisplayName, error::{Error, Result}, - event_cache::store::EventCacheStoreLock, + event_cache::store::{EventCacheStoreLock, EventCacheStoreLockState}, media::store::MediaStoreLock, response_processors::{self as processors, Context}, room::{ @@ -1062,7 +1062,15 @@ impl BaseClient { self.state_store.forget_room(room_id).await?; // Remove the room in the event cache store too. - self.event_cache_store().lock().await?.remove_room(room_id).await?; + match self.event_cache_store().lock().await? { + // If the lock is clear, we can do the operation as expected. + // If the lock is dirty, we can ignore to refresh the state, we just need to remove a + // room. Also, we must not mark the lock as non-dirty because other operations may be + // critical and may need to refresh the `EventCache`' state. + EventCacheStoreLockState::Clean(guard) | EventCacheStoreLockState::Dirty(guard) => { + guard.remove_room(room_id).await? + } + } Ok(()) } From 486d7c315a69cd4a9981b9131c5507f0b3d603a1 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 11 Nov 2025 17:36:15 +0100 Subject: [PATCH 12/28] test(sdk): Update to use `EventCacheStoreLockState`. --- .../matrix-sdk/src/latest_events/latest_event.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/crates/matrix-sdk/src/latest_events/latest_event.rs b/crates/matrix-sdk/src/latest_events/latest_event.rs index ac173cf24a0..8ba63360ec7 100644 --- a/crates/matrix-sdk/src/latest_events/latest_event.rs +++ b/crates/matrix-sdk/src/latest_events/latest_event.rs @@ -292,7 +292,9 @@ mod tests_latest_event { .event_cache_store() .lock() .await - .unwrap() + .expect("Could not acquire the event cache lock") + .as_clean() + .expect("Could not acquire a clean event cache lock") .handle_linked_chunk_updates( LinkedChunkId::Room(&room_id), vec![ @@ -410,7 +412,9 @@ mod tests_latest_event { .event_cache_store() .lock() .await - .unwrap() + .expect("Could not acquire the event cache lock") + .as_clean() + .expect("Could not acquire a clean event cache lock") .handle_linked_chunk_updates( LinkedChunkId::Room(&room_id), vec![ @@ -1693,7 +1697,9 @@ mod tests_latest_event_value_builder { .event_cache_store() .lock() .await - .unwrap() + .expect("Could not acquire the event cache lock") + .as_clean() + .expect("Could not acquire a clean event cache lock") .handle_linked_chunk_updates( LinkedChunkId::Room(room_id), vec![ @@ -2316,7 +2322,9 @@ mod tests_latest_event_value_builder { .event_cache_store() .lock() .await - .unwrap() + .expect("Could not acquire the event cache lock") + .as_clean() + .expect("Could not acquire a clean event cache lock") .handle_linked_chunk_updates( LinkedChunkId::Room(room_id), vec![ From 7fe199cb2563b2f23a7437e36ddba7b4c71d065f Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 12 Nov 2025 16:09:32 +0100 Subject: [PATCH 13/28] refactor(sdk) Introduce `RoomEventCacheStateLock` and read/write guards. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch extracts fields from `RoomEventCacheState` and move them into `RoomEventCacheStateLock`. This lock provides 2 methods: `read` and `write`, respectively to acquire a read-only lock, and a write-only lock, represented by the `RoomEventCacheStateLockReadGuard` and the `RoomEventCacheStateLockWriteGuard` types. All “public” methods on `RoomEventCacheState` now are facade to the read and write guards. This refactoring makes the code to compile with the last change in `EventCacheStore::lock`, which now returns a `EventCacheStoreLockState`. The next step is to re-load `RoomEventCacheStateLock` when the lock is dirty! But before doing that, we need this new mechanism to centralise the management of the store lock. --- benchmarks/benches/event_cache.rs | 1 + benchmarks/benches/room_bench.rs | 2 + crates/matrix-sdk-ui/src/timeline/builder.rs | 60 +- .../src/timeline/controller/mod.rs | 7 +- .../src/timeline/pinned_events_loader.rs | 2 +- crates/matrix-sdk-ui/src/timeline/tasks.rs | 22 +- .../tests/integration/timeline/decryption.rs | 4 + .../tests/integration/timeline/mod.rs | 2 + .../integration/timeline/pinned_event.rs | 6 +- .../src/event_cache/deduplicator.rs | 23 +- crates/matrix-sdk/src/event_cache/mod.rs | 43 +- .../matrix-sdk/src/event_cache/pagination.rs | 20 +- .../matrix-sdk/src/event_cache/redecryptor.rs | 33 +- crates/matrix-sdk/src/event_cache/room/mod.rs | 1694 +++++++++-------- .../src/latest_events/latest_event.rs | 9 +- crates/matrix-sdk/src/latest_events/mod.rs | 4 +- crates/matrix-sdk/src/room/mod.rs | 2 +- crates/matrix-sdk/src/search_index/mod.rs | 4 +- crates/matrix-sdk/src/sliding_sync/client.rs | 2 +- .../tests/integration/event_cache/mod.rs | 76 +- .../tests/integration/event_cache/threads.rs | 24 +- .../tests/integration/room/common.rs | 8 +- .../matrix-sdk/tests/integration/room/left.rs | 36 +- labs/multiverse/src/main.rs | 16 +- .../src/widgets/room_view/details/events.rs | 2 +- 25 files changed, 1208 insertions(+), 894 deletions(-) diff --git a/benchmarks/benches/event_cache.rs b/benchmarks/benches/event_cache.rs index 10f7627edaa..53c225e5c2f 100644 --- a/benchmarks/benches/event_cache.rs +++ b/benchmarks/benches/event_cache.rs @@ -317,6 +317,7 @@ fn find_event_relations(c: &mut Criterion) { let (target, relations) = room_event_cache .find_event_with_relations(target_event_id, filter) .await + .unwrap() .unwrap(); assert_eq!(target.event_id().as_deref().unwrap(), target_event_id); assert_eq!(relations.len(), num_related_events as usize); diff --git a/benchmarks/benches/room_bench.rs b/benchmarks/benches/room_bench.rs index fc9d7dcb54b..d9a11227e22 100644 --- a/benchmarks/benches/room_bench.rs +++ b/benchmarks/benches/room_bench.rs @@ -181,6 +181,8 @@ pub fn load_pinned_events_benchmark(c: &mut Criterion) { .lock() .await .unwrap() + .as_clean() + .unwrap() .clear_all_linked_chunks() .await .unwrap(); diff --git a/crates/matrix-sdk-ui/src/timeline/builder.rs b/crates/matrix-sdk-ui/src/timeline/builder.rs index 90e443583f0..e918cd8b685 100644 --- a/crates/matrix-sdk-ui/src/timeline/builder.rs +++ b/crates/matrix-sdk-ui/src/timeline/builder.rs @@ -164,7 +164,7 @@ impl TimelineBuilder { room.client().event_cache().subscribe()?; let (room_event_cache, event_cache_drop) = room.event_cache().await?; - let (_, event_subscriber) = room_event_cache.subscribe().await; + let (_, event_subscriber) = room_event_cache.subscribe().await.unwrap(); let is_room_encrypted = room .latest_encryption_state() @@ -209,36 +209,36 @@ impl TimelineBuilder { .instrument(span) }); - let thread_update_join_handle = if let TimelineFocus::Thread { root_event_id: root } = - &focus - { - Some({ - let span = info_span!( - parent: Span::none(), - "thread_live_update_handler", - room_id = ?room.room_id(), - focus = focus.debug_string(), - prefix = internal_id_prefix - ); - span.follows_from(Span::current()); - - // Note: must be done here *before* spawning the task, to avoid race conditions - // with event cache updates happening in the background. - let (_events, receiver) = room_event_cache.subscribe_to_thread(root.clone()).await; - - spawn( - thread_updates_task( - receiver, - room_event_cache.clone(), - controller.clone(), - root.clone(), + let thread_update_join_handle = + if let TimelineFocus::Thread { root_event_id: root } = &focus { + Some({ + let span = info_span!( + parent: Span::none(), + "thread_live_update_handler", + room_id = ?room.room_id(), + focus = focus.debug_string(), + prefix = internal_id_prefix + ); + span.follows_from(Span::current()); + + // Note: must be done here *before* spawning the task, to avoid race conditions + // with event cache updates happening in the background. + let (_events, receiver) = + room_event_cache.subscribe_to_thread(root.clone()).await.unwrap(); + + spawn( + thread_updates_task( + receiver, + room_event_cache.clone(), + controller.clone(), + root.clone(), + ) + .instrument(span), ) - .instrument(span), - ) - }) - } else { - None - }; + }) + } else { + None + }; let local_echo_listener_handle = { let timeline_controller = controller.clone(); diff --git a/crates/matrix-sdk-ui/src/timeline/controller/mod.rs b/crates/matrix-sdk-ui/src/timeline/controller/mod.rs index 9eafcce7f24..b4e406bd95a 100644 --- a/crates/matrix-sdk-ui/src/timeline/controller/mod.rs +++ b/crates/matrix-sdk-ui/src/timeline/controller/mod.rs @@ -439,7 +439,7 @@ impl TimelineController

{ match focus { TimelineFocus::Live { .. } => { // Retrieve the cached events, and add them to the timeline. - let events = room_event_cache.events().await; + let events = room_event_cache.events().await?; let has_events = !events.is_empty(); @@ -549,7 +549,8 @@ impl TimelineController

{ } TimelineFocus::Thread { root_event_id, .. } => { - let (events, _) = room_event_cache.subscribe_to_thread(root_event_id.clone()).await; + let (events, _) = + room_event_cache.subscribe_to_thread(root_event_id.clone()).await?; let has_events = !events.is_empty(); // For each event, we also need to find the related events, as they don't @@ -558,7 +559,7 @@ impl TimelineController

{ let mut related_events = Vector::new(); for event_id in events.iter().filter_map(|event| event.event_id()) { if let Some((_original, related)) = - room_event_cache.find_event_with_relations(&event_id, None).await + room_event_cache.find_event_with_relations(&event_id, None).await? { related_events.extend(related); } diff --git a/crates/matrix-sdk-ui/src/timeline/pinned_events_loader.rs b/crates/matrix-sdk-ui/src/timeline/pinned_events_loader.rs index d605d6b6c9f..d4d0e585191 100644 --- a/crates/matrix-sdk-ui/src/timeline/pinned_events_loader.rs +++ b/crates/matrix-sdk-ui/src/timeline/pinned_events_loader.rs @@ -173,7 +173,7 @@ impl PinnedEventsRoom for Room { Box::pin(async move { if let Ok((cache, _handles)) = self.event_cache().await && let Some(ret) = - cache.find_event_with_relations(event_id, related_event_filters).await + cache.find_event_with_relations(event_id, related_event_filters).await? { debug!("Loaded pinned event {event_id} and related events from cache"); return Ok(ret); diff --git a/crates/matrix-sdk-ui/src/timeline/tasks.rs b/crates/matrix-sdk-ui/src/timeline/tasks.rs index b40f0c9cb25..5f1455ba2f4 100644 --- a/crates/matrix-sdk-ui/src/timeline/tasks.rs +++ b/crates/matrix-sdk-ui/src/timeline/tasks.rs @@ -28,7 +28,7 @@ use matrix_sdk::{ use ruma::OwnedEventId; use tokio::sync::broadcast::{Receiver, error::RecvError}; use tokio_stream::StreamExt as _; -use tracing::{instrument, trace, warn}; +use tracing::{error, instrument, trace, warn}; use crate::timeline::{TimelineController, TimelineFocus, event_item::RemoteEventOrigin}; @@ -93,7 +93,14 @@ pub(in crate::timeline) async fn thread_updates_task( // The updates might have lagged, but the room event cache might // have events, so retrieve them and add them back again to the // timeline, after clearing it. - let (initial_events, _) = room_event_cache.subscribe_to_thread(root.clone()).await; + let (initial_events, _) = + match room_event_cache.subscribe_to_thread(root.clone()).await { + Ok(values) => values, + Err(err) => { + error!(?err, "Subscribing to thread failed"); + break; + } + }; timeline_controller .replace_with_initial_remote_events(initial_events, RemoteEventOrigin::Cache) @@ -145,7 +152,16 @@ pub(in crate::timeline) async fn room_event_cache_updates_task( // The updates might have lagged, but the room event cache might have // events, so retrieve them and add them back again to the timeline, // after clearing it. - let initial_events = room_event_cache.events().await; + let initial_events = match room_event_cache.events().await { + Ok(initial_events) => initial_events, + Err(err) => { + error!( + ?err, + "Failed to replace the initial remote events in the event cache" + ); + break; + } + }; timeline_controller .replace_with_initial_remote_events(initial_events, RemoteEventOrigin::Cache) diff --git a/crates/matrix-sdk-ui/tests/integration/timeline/decryption.rs b/crates/matrix-sdk-ui/tests/integration/timeline/decryption.rs index 27794fc5cd3..1441ebaaa1d 100644 --- a/crates/matrix-sdk-ui/tests/integration/timeline/decryption.rs +++ b/crates/matrix-sdk-ui/tests/integration/timeline/decryption.rs @@ -73,6 +73,8 @@ async fn test_an_utd_from_the_event_cache_as_an_initial_item_is_decrypted() { // The item is an encrypted event! It has been stored before having a chance to // be decrypted. Damn. We want to see if decryption will trigger automatically. event_cache_store + .as_clean() + .unwrap() .handle_linked_chunk_updates( LinkedChunkId::Room(room_id), vec![ @@ -212,6 +214,8 @@ async fn test_an_utd_from_the_event_cache_as_a_paginated_item_is_decrypted() { // chance to be decrypted. Damn. We want to see if decryption will trigger // automatically. event_cache_store + .as_clean() + .unwrap() .handle_linked_chunk_updates( LinkedChunkId::Room(room_id), vec![ diff --git a/crates/matrix-sdk-ui/tests/integration/timeline/mod.rs b/crates/matrix-sdk-ui/tests/integration/timeline/mod.rs index 19d18604b0f..834bbc07226 100644 --- a/crates/matrix-sdk-ui/tests/integration/timeline/mod.rs +++ b/crates/matrix-sdk-ui/tests/integration/timeline/mod.rs @@ -827,6 +827,8 @@ async fn test_timeline_receives_a_limited_number_of_events_when_subscribing() { // The event cache contains 30 events. event_cache_store + .as_clean() + .unwrap() .handle_linked_chunk_updates( LinkedChunkId::Room(room_id), vec![ diff --git a/crates/matrix-sdk-ui/tests/integration/timeline/pinned_event.rs b/crates/matrix-sdk-ui/tests/integration/timeline/pinned_event.rs index 6252914a453..1ec204380af 100644 --- a/crates/matrix-sdk-ui/tests/integration/timeline/pinned_event.rs +++ b/crates/matrix-sdk-ui/tests/integration/timeline/pinned_event.rs @@ -256,7 +256,7 @@ async fn test_cached_events_are_kept_for_different_room_instances() { assert!(!items.is_empty()); // We just loaded some events assert_pending!(timeline_stream); - assert!(room_cache.find_event(event_id!("$1")).await.is_some()); + assert!(room_cache.find_event(event_id!("$1")).await.unwrap().is_some()); // Drop the existing room and timeline instances drop(timeline_stream); @@ -277,7 +277,7 @@ async fn test_cached_events_are_kept_for_different_room_instances() { let (items, _) = timeline.subscribe().await; assert!(!items.is_empty()); // These events came from the cache - assert!(room_cache.find_event(event_id!("$1")).await.is_some()); + assert!(room_cache.find_event(event_id!("$1")).await.unwrap().is_some()); // Drop the existing room and timeline instances server.server().reset().await; @@ -402,7 +402,7 @@ async fn test_pinned_timeline_with_no_pinned_events_on_pagination_is_just_empty( .expect("Pagination of events should successful"); // Assert the event is loaded and added to the cache - assert!(event_cache.find_event(event_id).await.is_some()); + assert!(event_cache.find_event(event_id).await.unwrap().is_some()); // And it won't cause an update in the pinned events timeline since it's not // pinned diff --git a/crates/matrix-sdk/src/event_cache/deduplicator.rs b/crates/matrix-sdk/src/event_cache/deduplicator.rs index 9d9f69facf2..581afed9d93 100644 --- a/crates/matrix-sdk/src/event_cache/deduplicator.rs +++ b/crates/matrix-sdk/src/event_cache/deduplicator.rs @@ -18,7 +18,7 @@ use std::collections::BTreeSet; use matrix_sdk_base::{ - event_cache::store::EventCacheStoreLock, + event_cache::store::EventCacheStoreLockGuard, linked_chunk::{LinkedChunkId, Position}, }; use ruma::OwnedEventId; @@ -32,7 +32,7 @@ use super::{ /// information about the duplicates found in the new events, including the /// events that are not loaded in memory. pub async fn filter_duplicate_events( - store: &EventCacheStoreLock, + store_guard: &EventCacheStoreLockGuard, linked_chunk_id: LinkedChunkId<'_>, linked_chunk: &EventLinkedChunk, mut new_events: Vec, @@ -50,10 +50,8 @@ pub async fn filter_duplicate_events( }); } - let store = store.lock().await?; - // Let the store do its magic ✨ - let duplicated_event_ids = store + let duplicated_event_ids = store_guard .filter_duplicated_events( linked_chunk_id, new_events.iter().filter_map(|event| event.event_id()).collect(), @@ -148,7 +146,10 @@ pub(super) struct DeduplicationOutcome { mod tests { use std::ops::Not as _; - use matrix_sdk_base::{deserialized_responses::TimelineEvent, linked_chunk::ChunkIdentifier}; + use matrix_sdk_base::{ + deserialized_responses::TimelineEvent, event_cache::store::EventCacheStoreLock, + linked_chunk::ChunkIdentifier, + }; use matrix_sdk_test::{async_test, event_factory::EventFactory}; use ruma::{EventId, owned_event_id, serde::Raw, user_id}; @@ -222,6 +223,8 @@ mod tests { .unwrap(); let event_cache_store = EventCacheStoreLock::new(event_cache_store, "hodor".to_owned()); + let event_cache_store = event_cache_store.lock().await.unwrap(); + let event_cache_store_guard = event_cache_store.as_clean().unwrap(); { // When presenting with only duplicate events, some of them in the in-memory @@ -232,7 +235,7 @@ mod tests { linked_chunk.push_events([event_1.clone(), event_2.clone(), event_3.clone()]); let outcome = filter_duplicate_events( - &event_cache_store, + event_cache_store_guard, LinkedChunkId::Room(room_id), &linked_chunk, vec![event_0.clone(), event_1.clone(), event_2.clone(), event_3.clone()], @@ -247,7 +250,7 @@ mod tests { linked_chunk.push_events([event_2.clone(), event_3.clone()]); let outcome = filter_duplicate_events( - &event_cache_store, + event_cache_store_guard, LinkedChunkId::Room(room_id), &linked_chunk, vec![event_0, event_1, event_2, event_3, event_4], @@ -351,6 +354,8 @@ mod tests { // Wrap the store into its lock. let event_cache_store = EventCacheStoreLock::new(event_cache_store, "hodor".to_owned()); + let event_cache_store = event_cache_store.lock().await.unwrap(); + let event_cache_store_guard = event_cache_store.as_clean().unwrap(); let linked_chunk = EventLinkedChunk::new(); @@ -360,7 +365,7 @@ mod tests { in_store_duplicated_event_ids, non_empty_all_duplicates, } = filter_duplicate_events( - &event_cache_store, + event_cache_store_guard, LinkedChunkId::Room(room_id), &linked_chunk, vec![ev1, ev2, ev3, ev4], diff --git a/crates/matrix-sdk/src/event_cache/mod.rs b/crates/matrix-sdk/src/event_cache/mod.rs index 16d0dba5564..870cacc52a3 100644 --- a/crates/matrix-sdk/src/event_cache/mod.rs +++ b/crates/matrix-sdk/src/event_cache/mod.rs @@ -42,7 +42,7 @@ use matrix_sdk_base::{ deserialized_responses::{AmbiguityChange, TimelineEvent}, event_cache::{ Gap, - store::{EventCacheStoreError, EventCacheStoreLock}, + store::{EventCacheStoreError, EventCacheStoreLock, EventCacheStoreLockState}, }, executor::AbortOnDrop, linked_chunk::{self, OwnedLinkedChunkId, lazy_loader::LazyLoaderError}, @@ -51,7 +51,6 @@ use matrix_sdk_base::{ timer, }; use matrix_sdk_common::executor::{JoinHandle, spawn}; -use room::RoomEventCacheState; use ruma::{ OwnedEventId, OwnedRoomId, OwnedTransactionId, RoomId, events::AnySyncEphemeralRoomEvent, serde::Raw, @@ -69,6 +68,7 @@ use tracing::{Instrument as _, Span, debug, error, info, info_span, instrument, use crate::{ Client, client::WeakClient, + event_cache::room::RoomEventCacheStateLock, send_queue::{LocalEchoContent, RoomSendQueueUpdate, SendQueueUpdate}, }; @@ -403,7 +403,13 @@ impl EventCache { }; trace!("waiting for state lock…"); - let mut state = room.inner.state.write().await; + let mut state = match room.inner.state.write().await { + Ok(state) => state, + Err(err) => { + warn!(for_room = %room_id, "Failed to get the `RoomEventCacheStateLock`: {err}"); + continue; + } + }; match state.auto_shrink_if_no_subscribers().await { Ok(diffs) => { @@ -932,12 +938,17 @@ impl EventCacheInner { .await; // Clear the storage for all the rooms, using the storage facility. - self.store.lock().await?.clear_all_linked_chunks().await?; + let store_guard = match self.store.lock().await? { + EventCacheStoreLockState::Clean(store_guard) => store_guard, + EventCacheStoreLockState::Dirty(store_guard) => store_guard, + }; + store_guard.clear_all_linked_chunks().await?; // At this point, all the in-memory linked chunks are desynchronized from the // storage. Resynchronize them manually by calling reset(), and // propagate updates to observers. - try_join_all(room_locks.into_iter().map(|(room, mut state_guard)| async move { + try_join_all(room_locks.into_iter().map(|(room, state_guard)| async move { + let mut state_guard = state_guard?; let updates_as_vector_diffs = state_guard.reset().await?; let _ = room.inner.sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { @@ -1037,7 +1048,7 @@ impl EventCacheInner { ThreadingSupport::Enabled { .. } ); - let room_state = RoomEventCacheState::new( + let room_state = RoomEventCacheStateLock::new( room_id.to_owned(), room_version_rules, enabled_thread_support, @@ -1048,7 +1059,7 @@ impl EventCacheInner { .await?; let timeline_is_not_empty = - room_state.room_linked_chunk().revents().next().is_some(); + room_state.read().await?.room_linked_chunk().revents().next().is_some(); // SAFETY: we must have subscribed before reaching this code, otherwise // something is very wrong. @@ -1246,7 +1257,7 @@ mod tests { let mut generic_stream = event_cache.subscribe_to_room_generic_updates(); let (room_event_cache, _drop_handles) = event_cache.for_room(room_id).await.unwrap(); - let (events, mut stream) = room_event_cache.subscribe().await; + let (events, mut stream) = room_event_cache.subscribe().await.unwrap(); assert!(events.is_empty()); @@ -1332,15 +1343,15 @@ mod tests { let (room_event_cache, _drop_handles) = room1.event_cache().await.unwrap(); - let found1 = room_event_cache.find_event(eid1).await.unwrap(); + let found1 = room_event_cache.find_event(eid1).await.unwrap().unwrap(); assert_event_matches_msg(&found1, "hey"); - let found2 = room_event_cache.find_event(eid2).await.unwrap(); + let found2 = room_event_cache.find_event(eid2).await.unwrap().unwrap(); assert_event_matches_msg(&found2, "you"); // Retrieving the event with id3 from the room which doesn't contain it will // fail… - assert!(room_event_cache.find_event(eid3).await.is_none()); + assert!(room_event_cache.find_event(eid3).await.unwrap().is_none()); } #[async_test] @@ -1361,7 +1372,7 @@ mod tests { room_event_cache.save_events([f.text_msg("hey there").event_id(event_id).into()]).await; // Retrieving the event at the room-wide cache works. - assert!(room_event_cache.find_event(event_id).await.is_some()); + assert!(room_event_cache.find_event(event_id).await.unwrap().is_some()); } #[async_test] @@ -1384,7 +1395,9 @@ mod tests { .event_cache_store() .lock() .await - .unwrap() + .expect("Could not acquire the event cache lock") + .as_clean() + .expect("Could not acquire a clean event cache lock") .handle_linked_chunk_updates( LinkedChunkId::Room(room_id_0), vec![ @@ -1449,7 +1462,9 @@ mod tests { .event_cache_store() .lock() .await - .unwrap() + .expect("Could not acquire the event cache lock") + .as_clean() + .expect("Could not acquire a clean event cache lock") .handle_linked_chunk_updates( LinkedChunkId::Room(room_id), vec![ diff --git a/crates/matrix-sdk/src/event_cache/pagination.rs b/crates/matrix-sdk/src/event_cache/pagination.rs index 1d2ec50c027..df4ee95451b 100644 --- a/crates/matrix-sdk/src/event_cache/pagination.rs +++ b/crates/matrix-sdk/src/event_cache/pagination.rs @@ -14,7 +14,10 @@ //! A sub-object for running pagination tasks on a given room. -use std::{sync::Arc, time::Duration}; +use std::{ + sync::{Arc, atomic::Ordering}, + time::Duration, +}; use eyeball::{SharedObservable, Subscriber}; use matrix_sdk_base::timeout::timeout; @@ -182,11 +185,13 @@ impl RoomPagination { // there's no previous events chunk to load. loop { - let mut state_guard = self.inner.state.write().await; + let mut state_guard = self.inner.state.write().await?; match state_guard.load_more_events_backwards().await? { LoadMoreEventsBackwardsOutcome::Gap { prev_token } => { - if prev_token.is_none() && !state_guard.waited_for_initial_prev_token { + if prev_token.is_none() + && !state_guard.waited_for_initial_prev_token().load(Ordering::SeqCst) + { // We didn't reload a pagination token, and we haven't waited for one; wait // and start over. @@ -205,7 +210,12 @@ impl RoomPagination { .await; trace!("done waiting"); - self.inner.state.write().await.waited_for_initial_prev_token = true; + self.inner + .state + .write() + .await? + .waited_for_initial_prev_token() + .store(true, Ordering::SeqCst); // Retry! // @@ -296,7 +306,7 @@ impl RoomPagination { .inner .state .write() - .await + .await? .handle_backpagination(events, new_token, prev_token) .await? { diff --git a/crates/matrix-sdk/src/event_cache/redecryptor.rs b/crates/matrix-sdk/src/event_cache/redecryptor.rs index dd9fcb497b7..73d3d634563 100644 --- a/crates/matrix-sdk/src/event_cache/redecryptor.rs +++ b/crates/matrix-sdk/src/event_cache/redecryptor.rs @@ -123,6 +123,7 @@ use matrix_sdk_base::{ types::events::room::encrypted::EncryptedEvent, }, deserialized_responses::{DecryptedRoomEvent, TimelineEvent, TimelineEventKind}, + event_cache::store::EventCacheStoreLockState, locks::Mutex, timer, }; @@ -234,9 +235,14 @@ impl EventCache { room_id: &RoomId, session_id: SessionId<'_>, ) -> Result, EventCacheError> { - let events = { - let store = self.inner.store.lock().await?; - store.get_room_events(room_id, Some("m.room.encrypted"), Some(session_id)).await? + let events = match self.inner.store.lock().await? { + // If the lock is clean, no problem. + // If the lock is dirty, it doesn't really matter as we are hitting the store + // directly, there is no in-memory state to manage, so all good. Do not mark the lock as + // non-dirty. + EventCacheStoreLockState::Clean(guard) | EventCacheStoreLockState::Dirty(guard) => { + guard.get_room_events(room_id, Some("m.room.encrypted"), Some(session_id)).await? + } }; Ok(events.into_iter().filter_map(filter_timeline_event_to_utd).collect()) @@ -256,9 +262,14 @@ impl EventCache { event_id.zip(event) }; - let events = { - let store = self.inner.store.lock().await?; - store.get_room_events(room_id, None, Some(session_id)).await? + let events = match self.inner.store.lock().await? { + // If the lock is clean, no problem. + // If the lock is dirty, it doesn't really matter as we are hitting the store + // directly, there is no in-memory state to manage, so all good. Do not mark the lock as + // non-dirty. + EventCacheStoreLockState::Clean(guard) | EventCacheStoreLockState::Dirty(guard) => { + guard.get_room_events(room_id, None, Some(session_id)).await? + } }; Ok(events.into_iter().filter_map(filter).collect()) @@ -291,7 +302,7 @@ impl EventCache { // Get the cache for this particular room and lock the state for the duration of // the decryption. let (room_cache, _drop_handles) = self.for_room(room_id).await?; - let mut state = room_cache.inner.state.write().await; + let mut state = room_cache.inner.state.write().await?; let event_ids: BTreeSet<_> = events.iter().cloned().map(|(event_id, _, _)| event_id).collect(); @@ -319,7 +330,7 @@ impl EventCache { // We replaced a bunch of events, reactive updates for those replacements have // been queued up. We need to send them out to our subscribers now. - let diffs = state.room_linked_chunk_mut().updates_as_vector_diffs(); + let diffs = state.room_linked_chunk().updates_as_vector_diffs(); let _ = room_cache.inner.sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { diffs, @@ -1214,7 +1225,7 @@ mod tests { .await .expect("We should be able to get to the event cache for a specific room"); - let (_, mut subscriber) = room_cache.subscribe().await; + let (_, mut subscriber) = room_cache.subscribe().await.unwrap(); // We regenerate the Olm machine to check if the room key stream is recreated to // correctly. @@ -1290,7 +1301,7 @@ mod tests { .await .expect("We should be able to get to the event cache for a specific room"); - let (_, mut subscriber) = room_cache.subscribe().await; + let (_, mut subscriber) = room_cache.subscribe().await.unwrap(); // Let us forward the event to Bob. matrix_mock_server @@ -1404,7 +1415,7 @@ mod tests { .await .expect("We should be able to get to the event cache for a specific room"); - let (_, mut subscriber) = room_cache.subscribe().await; + let (_, mut subscriber) = room_cache.subscribe().await.unwrap(); // Let us forward the event to Bob. matrix_mock_server diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index 0c82af3ff9c..1906b90722c 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -40,7 +40,7 @@ use ruma::{ serde::Raw, }; use tokio::sync::{ - Notify, RwLock, + Notify, broadcast::{Receiver, Sender}, mpsc, }; @@ -164,7 +164,7 @@ impl RoomEventCache { /// Create a new [`RoomEventCache`] using the given room and store. pub(super) fn new( client: WeakClient, - state: RoomEventCacheState, + state: RoomEventCacheStateLock, pagination_status: SharedObservable, room_id: OwnedRoomId, auto_shrink_sender: mpsc::Sender, @@ -186,10 +186,10 @@ impl RoomEventCache { /// /// Use [`RoomEventCache::subscribe`] to get all current events, plus a /// subscriber. - pub async fn events(&self) -> Vec { - let state = self.inner.state.read().await; + pub async fn events(&self) -> Result> { + let state = self.inner.state.read().await?; - state.room_linked_chunk().events().map(|(_position, item)| item.clone()).collect() + Ok(state.room_linked_chunk().events().map(|(_position, item)| item.clone()).collect()) } /// Subscribe to this room updates, after getting the initial list of @@ -198,12 +198,13 @@ impl RoomEventCache { /// Use [`RoomEventCache::events`] to get all current events without the /// subscriber. Creating, and especially dropping, a /// [`RoomEventCacheSubscriber`] isn't free, as it triggers side-effects. - pub async fn subscribe(&self) -> (Vec, RoomEventCacheSubscriber) { - let state = self.inner.state.read().await; + pub async fn subscribe(&self) -> Result<(Vec, RoomEventCacheSubscriber)> { + let state = self.inner.state.read().await?; let events = state.room_linked_chunk().events().map(|(_position, item)| item.clone()).collect(); - let previous_subscriber_count = state.subscriber_count.fetch_add(1, Ordering::SeqCst); + let subscriber_count = state.subscriber_count(); + let previous_subscriber_count = subscriber_count.fetch_add(1, Ordering::SeqCst); trace!("added a room event cache subscriber; new count: {}", previous_subscriber_count + 1); let recv = self.inner.sender.subscribe(); @@ -211,10 +212,10 @@ impl RoomEventCache { recv, room_id: self.inner.room_id.clone(), auto_shrink_sender: self.inner.auto_shrink_sender.clone(), - subscriber_count: state.subscriber_count.clone(), + subscriber_count: subscriber_count.clone(), }; - (events, subscriber) + Ok((events, subscriber)) } /// Subscribe to thread for a given root event, and get a (maybe empty) @@ -222,9 +223,9 @@ impl RoomEventCache { pub async fn subscribe_to_thread( &self, thread_root: OwnedEventId, - ) -> (Vec, Receiver) { - let mut state = self.inner.state.write().await; - state.subscribe_to_thread(thread_root) + ) -> Result<(Vec, Receiver)> { + let mut state = self.inner.state.write().await?; + Ok(state.subscribe_to_thread(thread_root)) } /// Paginate backwards in a thread, given its root event ID. @@ -241,7 +242,7 @@ impl RoomEventCache { // Take the lock only for a short time here. let mut outcome = - self.inner.state.write().await.load_more_thread_events_backwards(thread_root.clone()); + self.inner.state.write().await?.load_more_thread_events_backwards(thread_root.clone()); loop { match outcome { @@ -275,7 +276,7 @@ impl RoomEventCache { None }; - let mut state = self.inner.state.write().await; + let mut state = self.inner.state.write().await?; // Save all the events (but the thread root) in the store. state.save_events(result.chunk.iter().cloned()).await?; @@ -321,27 +322,28 @@ impl RoomEventCache { /// /// **Warning**! It looks into the loaded events from the in-memory linked /// chunk **only**. It doesn't look inside the storage. - pub async fn rfind_map_event_in_memory_by(&self, predicate: P) -> Option + pub async fn rfind_map_event_in_memory_by(&self, predicate: P) -> Result> where P: FnMut(&Event) -> Option, { - self.inner.state.read().await.rfind_map_event_in_memory_by(predicate) + Ok(self.inner.state.read().await?.rfind_map_event_in_memory_by(predicate)) } /// Try to find an event by ID in this room. /// /// It starts by looking into loaded events before looking inside the /// storage. - pub async fn find_event(&self, event_id: &EventId) -> Option { - self.inner + pub async fn find_event(&self, event_id: &EventId) -> Result> { + Ok(self + .inner .state .read() - .await + .await? .find_event(event_id) .await .ok() .flatten() - .map(|(_loc, event)| event) + .map(|(_loc, event)| event)) } /// Try to find an event by ID in this room, along with its related events. @@ -359,16 +361,17 @@ impl RoomEventCache { &self, event_id: &EventId, filter: Option>, - ) -> Option<(Event, Vec)> { + ) -> Result)>> { // Search in all loaded or stored events. - self.inner + Ok(self + .inner .state .read() - .await + .await? .find_event_with_relations(event_id, filter.clone()) .await .ok() - .flatten() + .flatten()) } /// Clear all the storage for this [`RoomEventCache`]. @@ -377,7 +380,7 @@ impl RoomEventCache { /// storage. pub async fn clear(&self) -> Result<()> { // Clear the linked chunk and persisted storage. - let updates_as_vector_diffs = self.inner.state.write().await.reset().await?; + let updates_as_vector_diffs = self.inner.state.write().await?.reset().await?; // Notify observers about the update. let _ = self.inner.sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { @@ -397,15 +400,23 @@ impl RoomEventCache { /// Save some events in the event cache, for further retrieval with /// [`Self::event`]. pub(crate) async fn save_events(&self, events: impl IntoIterator) { - if let Err(err) = self.inner.state.write().await.save_events(events).await { - warn!("couldn't save event in the event cache: {err}"); + match self.inner.state.write().await { + Ok(mut state_guard) => { + if let Err(err) = state_guard.save_events(events).await { + warn!("couldn't save event in the event cache: {err}"); + } + } + + Err(err) => { + warn!("couldn't save event in the event cache: {err}"); + } } } /// Return a nice debug string (a vector of lines) for the linked chunk of /// events for this room. pub async fn debug_string(&self) -> Vec { - self.inner.state.read().await.room_linked_chunk().debug_string() + self.inner.state.read().await.unwrap().room_linked_chunk().debug_string() } } @@ -420,7 +431,7 @@ pub(super) struct RoomEventCacheInner { pub sender: Sender, /// State for this room's event cache. - pub state: RwLock, + pub state: RoomEventCacheStateLock, /// A notifier that we received a new pagination token. pub pagination_batch_token_notifier: Notify, @@ -446,7 +457,7 @@ impl RoomEventCacheInner { /// to handle new timeline events. fn new( client: WeakClient, - state: RoomEventCacheState, + state: RoomEventCacheStateLock, pagination_status: SharedObservable, room_id: OwnedRoomId, auto_shrink_sender: mpsc::Sender, @@ -457,7 +468,7 @@ impl RoomEventCacheInner { Self { room_id: weak_room.room_id().to_owned(), weak_room, - state: RwLock::new(state), + state, sender, pagination_batch_token_notifier: Default::default(), auto_shrink_sender, @@ -545,7 +556,7 @@ impl RoomEventCacheInner { trace!("adding new events"); let (stored_prev_batch_token, timeline_event_diffs) = - self.state.write().await.handle_sync(timeline).await?; + self.state.write().await?.handle_sync(timeline).await?; // Now that all events have been added, we can trigger the // `pagination_token_notifier`. @@ -602,7 +613,10 @@ pub(super) enum LoadMoreEventsBackwardsOutcome { mod private { use std::{ collections::{BTreeMap, HashMap, HashSet}, - sync::{Arc, atomic::AtomicUsize}, + sync::{ + Arc, + atomic::{AtomicBool, AtomicUsize, Ordering}, + }, }; use eyeball::SharedObservable; @@ -612,7 +626,7 @@ mod private { deserialized_responses::{ThreadSummary, ThreadSummaryStatus, TimelineEventKind}, event_cache::{ Event, Gap, - store::{DynEventCacheStore, EventCacheStoreLock}, + store::{EventCacheStoreLock, EventCacheStoreLockGuard, EventCacheStoreLockState}, }, linked_chunk::{ ChunkContent, ChunkIdentifierGenerator, ChunkMetadata, LinkedChunkId, @@ -623,7 +637,7 @@ mod private { }; use matrix_sdk_common::executor::spawn; use ruma::{ - EventId, OwnedEventId, OwnedRoomId, + EventId, OwnedEventId, OwnedRoomId, RoomId, events::{ AnySyncMessageLikeEvent, AnySyncTimelineEvent, MessageLikeEventType, relation::RelationType, room::redaction::SyncRoomRedactionEvent, @@ -631,7 +645,10 @@ mod private { room_version_rules::RoomVersionRules, serde::Raw, }; - use tokio::sync::broadcast::{Receiver, Sender}; + use tokio::sync::{ + RwLock, RwLockReadGuard, RwLockWriteGuard, + broadcast::{Receiver, Sender}, + }; use tracing::{debug, error, instrument, trace, warn}; use super::{ @@ -650,16 +667,17 @@ mod private { /// /// This contains all the inner mutable states that ought to be updated at /// the same time. - pub struct RoomEventCacheState { - /// The room this state relates to. - room: OwnedRoomId, - - /// The rules for the version of this room. - room_version_rules: RoomVersionRules, + pub struct RoomEventCacheStateLock { + locked_state: RwLock, + } + struct RoomEventCacheStateLockInner { /// Whether thread support has been enabled for the event cache. enabled_thread_support: bool, + /// The room this state relates to. + room_id: OwnedRoomId, + /// Reference to the underlying backing store. store: EventCacheStoreLock, @@ -672,24 +690,27 @@ mod private { /// Keyed by the thread root event ID. threads: HashMap, - /// Have we ever waited for a previous-batch-token to come from sync, in - /// the context of pagination? We do this at most once per room, - /// the first time we try to run backward pagination. We reset - /// that upon clearing the timeline events. - pub waited_for_initial_prev_token: bool, - pagination_status: SharedObservable, /// See doc comment of /// [`super::super::EventCacheInner::linked_chunk_update_sender`]. linked_chunk_update_sender: Sender, + /// The rules for the version of this room. + room_version_rules: RoomVersionRules, + + /// Have we ever waited for a previous-batch-token to come from sync, in + /// the context of pagination? We do this at most once per room, + /// the first time we try to run backward pagination. We reset + /// that upon clearing the timeline events. + waited_for_initial_prev_token: Arc, + /// An atomic count of the current number of subscriber of the /// [`super::RoomEventCache`]. - pub(super) subscriber_count: Arc, + subscriber_count: Arc, } - impl RoomEventCacheState { + impl RoomEventCacheStateLock { /// Create a new state, or reload it from storage if it's been enabled. /// /// Not all events are going to be loaded. Only a portion of them. The @@ -707,7 +728,17 @@ mod private { store: EventCacheStoreLock, pagination_status: SharedObservable, ) -> Result { - let store_lock = store.lock().await?; + let store_guard = match store.lock().await? { + // + EventCacheStoreLockState::Clean(guard) => guard, + + // + EventCacheStoreLockState::Dirty(guard) => { + EventCacheStoreLockGuard::clear_dirty(&guard); + + guard + } + }; let linked_chunk_id = LinkedChunkId::Room(&room_id); @@ -716,7 +747,7 @@ mod private { // If loading the full linked chunk failed, we'll clear the event cache, as it // indicates that at some point, there's some malformed data. let full_linked_chunk_metadata = - match Self::load_linked_chunk_metadata(&*store_lock, linked_chunk_id).await { + match load_linked_chunk_metadata(&store_guard, linked_chunk_id).await { Ok(metas) => metas, Err(err) => { error!( @@ -724,7 +755,7 @@ mod private { ); // Try to clear storage for this room. - store_lock + store_guard .handle_linked_chunk_updates(linked_chunk_id, vec![Update::Clear]) .await?; @@ -733,7 +764,7 @@ mod private { } }; - let linked_chunk = match store_lock + let linked_chunk = match store_guard .load_last_chunk(linked_chunk_id) .await .map_err(EventCacheError::from) @@ -748,7 +779,7 @@ mod private { ); // Try to clear storage for this room. - store_lock + store_guard .handle_linked_chunk_updates(linked_chunk_id, vec![Update::Clear]) .await?; @@ -756,174 +787,199 @@ mod private { } }; - let room_linked_chunk = EventLinkedChunk::with_initial_linked_chunk( - linked_chunk, - full_linked_chunk_metadata, - ); - - // The threads mapping is intentionally empty at start, since we're going to - // reload threads lazily, as soon as we need to (based on external - // subscribers) or when we get new information about those (from - // sync). - let threads = HashMap::new(); + let waited_for_initial_prev_token = Arc::new(AtomicBool::new(false)); Ok(Self { - room: room_id, - room_version_rules, - enabled_thread_support, - store, - room_linked_chunk, - threads, - waited_for_initial_prev_token: false, - subscriber_count: Default::default(), - pagination_status, - linked_chunk_update_sender, + locked_state: RwLock::new(RoomEventCacheStateLockInner { + enabled_thread_support, + room_id, + store, + room_linked_chunk: EventLinkedChunk::with_initial_linked_chunk( + linked_chunk, + full_linked_chunk_metadata, + ), + // The threads mapping is intentionally empty at start, since we're going to + // reload threads lazily, as soon as we need to (based on external + // subscribers) or when we get new information about those (from + // sync). + threads: HashMap::new(), + pagination_status, + linked_chunk_update_sender, + room_version_rules, + waited_for_initial_prev_token, + subscriber_count: Default::default(), + }), }) } - /// Load a linked chunk's full metadata, making sure the chunks are - /// according to their their links. - /// - /// Returns `None` if there's no such linked chunk in the store, or an - /// error if the linked chunk is malformed. - async fn load_linked_chunk_metadata( - store: &DynEventCacheStore, - linked_chunk_id: LinkedChunkId<'_>, - ) -> Result>, EventCacheError> { - let mut all_chunks = store - .load_all_chunks_metadata(linked_chunk_id) - .await - .map_err(EventCacheError::from)?; - - if all_chunks.is_empty() { - // There are no chunks, so there's nothing to do. - return Ok(None); - } + pub async fn read(&self) -> Result, EventCacheError> { + let state_guard = self.locked_state.read().await; + let store_guard = match state_guard.store.lock().await? { + EventCacheStoreLockState::Clean(guard) => guard, + EventCacheStoreLockState::Dirty(_guard) => todo!("Dirty lock"), + }; - // Transform the vector into a hashmap, for quick lookup of the predecessors. - let chunk_map: HashMap<_, _> = - all_chunks.iter().map(|meta| (meta.identifier, meta)).collect(); + Ok(RoomEventCacheStateLockReadGuard { state: state_guard, store: store_guard }) + } - // Find a last chunk. - let mut iter = all_chunks.iter().filter(|meta| meta.next.is_none()); - let Some(last) = iter.next() else { - return Err(EventCacheError::InvalidLinkedChunkMetadata { - details: "no last chunk found".to_owned(), - }); + pub async fn write( + &self, + ) -> Result, EventCacheError> { + let state_guard = self.locked_state.write().await; + let store_guard = match state_guard.store.lock().await? { + EventCacheStoreLockState::Clean(guard) => guard, + EventCacheStoreLockState::Dirty(_guard) => todo!("Dirty lock"), }; - // There must at most one last chunk. - if let Some(other_last) = iter.next() { - return Err(EventCacheError::InvalidLinkedChunkMetadata { - details: format!( - "chunks {} and {} both claim to be last chunks", - last.identifier.index(), - other_last.identifier.index() - ), - }); - } + Ok(RoomEventCacheStateLockWriteGuard { state: state_guard, store: store_guard }) + } + } - // Rewind the chain back to the first chunk, and do some checks at the same - // time. - let mut seen = HashSet::new(); - let mut current = last; - loop { - // If we've already seen this chunk, there's a cycle somewhere. - if !seen.insert(current.identifier) { - return Err(EventCacheError::InvalidLinkedChunkMetadata { - details: format!( - "cycle detected in linked chunk at {}", - current.identifier.index() - ), - }); - } + pub struct RoomEventCacheStateLockReadGuard<'a> { + state: RwLockReadGuard<'a, RoomEventCacheStateLockInner>, + store: EventCacheStoreLockGuard, + } - let Some(prev_id) = current.previous else { - // If there's no previous chunk, we're done. - if seen.len() != all_chunks.len() { - return Err(EventCacheError::InvalidLinkedChunkMetadata { - details: format!( - "linked chunk likely has multiple components: {} chunks seen through the chain of predecessors, but {} expected", - seen.len(), - all_chunks.len() - ), - }); - } - break; - }; + pub struct RoomEventCacheStateLockWriteGuard<'a> { + state: RwLockWriteGuard<'a, RoomEventCacheStateLockInner>, + store: EventCacheStoreLockGuard, + } - // If the previous chunk is not in the map, then it's unknown - // and missing. - let Some(pred_meta) = chunk_map.get(&prev_id) else { - return Err(EventCacheError::InvalidLinkedChunkMetadata { - details: format!( - "missing predecessor {} chunk for {}", - prev_id.index(), - current.identifier.index() - ), - }); - }; + impl<'a> RoomEventCacheStateLockReadGuard<'a> { + /// Returns a read-only reference to the underlying room linked chunk. + pub fn room_linked_chunk(&self) -> &EventLinkedChunk { + &self.state.room_linked_chunk + } - // If the previous chunk isn't connected to the next, then the link is invalid. - if pred_meta.next != Some(current.identifier) { - return Err(EventCacheError::InvalidLinkedChunkMetadata { - details: format!( - "chunk {}'s next ({:?}) doesn't match the current chunk ({})", - pred_meta.identifier.index(), - pred_meta.next.map(|chunk_id| chunk_id.index()), - current.identifier.index() - ), - }); - } + pub fn subscriber_count(&self) -> &Arc { + &self.state.subscriber_count + } - current = *pred_meta; - } + /// Find a single event in this room. + /// + /// It starts by looking into loaded events in `EventLinkedChunk` before + /// looking inside the storage. + pub async fn find_event( + &self, + event_id: &EventId, + ) -> Result, EventCacheError> { + find_event(event_id, &self.state.room_id, &self.state.room_linked_chunk, &self.store) + .await + } - // At this point, `current` is the identifier of the first chunk. - // - // Reorder the resulting vector, by going through the chain of `next` links, and - // swapping items into their final position. - // - // Invariant in this loop: all items in [0..i[ are in their final, correct - // position. - let mut current = current.identifier; - for i in 0..all_chunks.len() { - // Find the target metadata. - let j = all_chunks - .iter() - .rev() - .position(|meta| meta.identifier == current) - .map(|j| all_chunks.len() - 1 - j) - .expect("the target chunk must be present in the metadata"); - if i != j { - all_chunks.swap(i, j); - } - if let Some(next) = all_chunks[i].next { - current = next; - } - } + /// Find an event and all its relations in the persisted storage. + /// + /// This goes straight to the database, as a simplification; we don't + /// expect to need to have to look up in memory events, or that + /// all the related events are actually loaded. + /// + /// The related events are sorted like this: + /// - events saved out-of-band with + /// [`super::RoomEventCache::save_events`] will be located at the + /// beginning of the array. + /// - events present in the linked chunk (be it in memory or in the + /// database) will be sorted according to their ordering in the linked + /// chunk. + pub async fn find_event_with_relations( + &self, + event_id: &EventId, + filters: Option>, + ) -> Result)>, EventCacheError> { + find_event_with_relations( + event_id, + &self.state.room_id, + filters, + &self.state.room_linked_chunk, + &self.store, + ) + .await + } + + //// Find a single event in this room, starting from the most recent event. + /// + /// **Warning**! It looks into the loaded events from the in-memory + /// linked chunk **only**. It doesn't look inside the storage, + /// contrary to [`Self::find_event`]. + pub fn rfind_map_event_in_memory_by(&self, mut predicate: P) -> Option + where + P: FnMut(&Event) -> Option, + { + self.state.room_linked_chunk.revents().find_map(|(_position, event)| predicate(event)) + } + } + + impl<'a> RoomEventCacheStateLockWriteGuard<'a> { + /// Returns a write reference to the underlying room linked chunk. + pub fn room_linked_chunk(&mut self) -> &mut EventLinkedChunk { + &mut self.state.room_linked_chunk + } + + pub fn waited_for_initial_prev_token(&self) -> &Arc { + &self.state.waited_for_initial_prev_token + } + + /// Find a single event in this room. + /// + /// It starts by looking into loaded events in `EventLinkedChunk` before + /// looking inside the storage. + pub async fn find_event( + &self, + event_id: &EventId, + ) -> Result, EventCacheError> { + find_event(event_id, &self.state.room_id, &self.state.room_linked_chunk, &self.store) + .await + } - Ok(Some(all_chunks)) + /// Find an event and all its relations in the persisted storage. + /// + /// This goes straight to the database, as a simplification; we don't + /// expect to need to have to look up in memory events, or that + /// all the related events are actually loaded. + /// + /// The related events are sorted like this: + /// - events saved out-of-band with + /// [`super::RoomEventCache::save_events`] will be located at the + /// beginning of the array. + /// - events present in the linked chunk (be it in memory or in the + /// database) will be sorted according to their ordering in the linked + /// chunk. + pub async fn find_event_with_relations( + &self, + event_id: &EventId, + filters: Option>, + ) -> Result)>, EventCacheError> { + find_event_with_relations( + event_id, + &self.state.room_id, + filters, + &self.state.room_linked_chunk, + &self.store, + ) + .await } /// Load more events backwards if the last chunk is **not** a gap. - pub(in super::super) async fn load_more_events_backwards( + pub async fn load_more_events_backwards( &mut self, ) -> Result { // If any in-memory chunk is a gap, don't load more events, and let the caller // resolve the gap. - if let Some(prev_token) = self.room_linked_chunk.rgap().map(|gap| gap.prev_token) { + if let Some(prev_token) = self.state.room_linked_chunk.rgap().map(|gap| gap.prev_token) + { return Ok(LoadMoreEventsBackwardsOutcome::Gap { prev_token: Some(prev_token) }); } - let store = self.store.lock().await?; - - let prev_first_chunk = - self.room_linked_chunk.chunks().next().expect("a linked chunk is never empty"); + let prev_first_chunk = self + .state + .room_linked_chunk + .chunks() + .next() + .expect("a linked chunk is never empty"); // The first chunk is not a gap, we can load its previous chunk. - let linked_chunk_id = LinkedChunkId::Room(&self.room); - let new_first_chunk = match store + let linked_chunk_id = LinkedChunkId::Room(&self.state.room_id); + let new_first_chunk = match self + .store .load_previous_chunk(linked_chunk_id, prev_first_chunk.identifier()) .await { @@ -937,7 +993,7 @@ mod private { // sync for that room, because every room must have *at least* a room creation // event. Otherwise, we have reached the start of the timeline. - if self.room_linked_chunk.events().next().is_some() { + if self.state.room_linked_chunk.events().next().is_some() { // If there's at least one event, this means we've reached the start of the // timeline, since the chunk is fully loaded. trace!("chunk is fully loaded and non-empty: reached_start=true"); @@ -952,7 +1008,9 @@ mod private { error!("error when loading the previous chunk of a linked chunk: {err}"); // Clear storage for this room. - store.handle_linked_chunk_updates(linked_chunk_id, vec![Update::Clear]).await?; + self.store + .handle_linked_chunk_updates(linked_chunk_id, vec![Update::Clear]) + .await?; // Return the error. return Err(err.into()); @@ -968,11 +1026,18 @@ mod private { // `Items`. let reached_start = new_first_chunk.previous.is_none(); - if let Err(err) = self.room_linked_chunk.insert_new_chunk_as_first(new_first_chunk) { + if let Err(err) = + self.state.room_linked_chunk.insert_new_chunk_as_first(new_first_chunk) + { error!("error when inserting the previous chunk into its linked chunk: {err}"); // Clear storage for this room. - store.handle_linked_chunk_updates(linked_chunk_id, vec![Update::Clear]).await?; + self.store + .handle_linked_chunk_updates( + LinkedChunkId::Room(&self.state.room_id), + vec![Update::Clear], + ) + .await?; // Return the error. return Err(err.into()); @@ -980,10 +1045,10 @@ mod private { // ⚠️ Let's not propagate the updates to the store! We already have these data // in the store! Let's drain them. - let _ = self.room_linked_chunk.store_updates().take(); + let _ = self.state.room_linked_chunk.store_updates().take(); // However, we want to get updates as `VectorDiff`s. - let timeline_event_diffs = self.room_linked_chunk.updates_as_vector_diffs(); + let timeline_event_diffs = self.state.room_linked_chunk.updates_as_vector_diffs(); Ok(match chunk_content { ChunkContent::Gap(gap) => { @@ -1010,13 +1075,11 @@ mod private { /// pending diff updates with the result of this function. /// /// Otherwise, returns `None`. - pub(super) async fn shrink_to_last_chunk(&mut self) -> Result<(), EventCacheError> { - let store_lock = self.store.lock().await?; - + pub async fn shrink_to_last_chunk(&mut self) -> Result<(), EventCacheError> { // Attempt to load the last chunk. - let linked_chunk_id = LinkedChunkId::Room(&self.room); + let linked_chunk_id = LinkedChunkId::Room(&self.state.room_id); let (last_chunk, chunk_identifier_generator) = - match store_lock.load_last_chunk(linked_chunk_id).await { + match self.store.load_last_chunk(linked_chunk_id).await { Ok(pair) => pair, Err(err) => { @@ -1024,7 +1087,7 @@ mod private { error!("error when reloading a linked chunk from memory: {err}"); // Clear storage for this room. - store_lock + self.store .handle_linked_chunk_updates(linked_chunk_id, vec![Update::Clear]) .await?; @@ -1038,7 +1101,7 @@ mod private { // Remove all the chunks from the linked chunks, except for the last one, and // updates the chunk identifier generator. if let Err(err) = - self.room_linked_chunk.replace_with(last_chunk, chunk_identifier_generator) + self.state.room_linked_chunk.replace_with(last_chunk, chunk_identifier_generator) { error!("error when replacing the linked chunk: {err}"); return self.reset_internal().await; @@ -1047,11 +1110,13 @@ mod private { // Let pagination observers know that we may have not reached the start of the // timeline. // TODO: likely need to cancel any ongoing pagination. - self.pagination_status.set(RoomPaginationStatus::Idle { hit_timeline_start: false }); + self.state + .pagination_status + .set(RoomPaginationStatus::Idle { hit_timeline_start: false }); // Don't propagate those updates to the store; this is only for the in-memory // representation that we're doing this. Let's drain those store updates. - let _ = self.room_linked_chunk.store_updates().take(); + let _ = self.state.room_linked_chunk.store_updates().take(); Ok(()) } @@ -1059,10 +1124,10 @@ mod private { /// Automatically shrink the room if there are no more subscribers, as /// indicated by the atomic number of active subscribers. #[must_use = "Propagate `VectorDiff` updates via `RoomEventCacheUpdate`"] - pub(crate) async fn auto_shrink_if_no_subscribers( + pub async fn auto_shrink_if_no_subscribers( &mut self, ) -> Result>>, EventCacheError> { - let subscriber_count = self.subscriber_count.load(std::sync::atomic::Ordering::SeqCst); + let subscriber_count = self.state.subscriber_count.load(Ordering::SeqCst); trace!(subscriber_count, "received request to auto-shrink"); @@ -1070,66 +1135,20 @@ mod private { // If we are the last strong reference to the auto-shrinker, we can shrink the // events data structure to its last chunk. self.shrink_to_last_chunk().await?; - Ok(Some(self.room_linked_chunk.updates_as_vector_diffs())) + + Ok(Some(self.state.room_linked_chunk.updates_as_vector_diffs())) } else { Ok(None) } } #[cfg(test)] - pub(crate) async fn force_shrink_to_last_chunk( + pub async fn force_shrink_to_last_chunk( &mut self, ) -> Result>, EventCacheError> { self.shrink_to_last_chunk().await?; - Ok(self.room_linked_chunk.updates_as_vector_diffs()) - } - - pub(crate) fn room_event_order(&self, event_pos: Position) -> Option { - self.room_linked_chunk.event_order(event_pos) - } - - /// Removes the bundled relations from an event, if they were present. - /// - /// Only replaces the present if it contained bundled relations. - fn strip_relations_if_present(event: &mut Raw) { - // We're going to get rid of the `unsigned`/`m.relations` field, if it's - // present. - // Use a closure that returns an option so we can quickly short-circuit. - let mut closure = || -> Option<()> { - let mut val: serde_json::Value = event.deserialize_as().ok()?; - let unsigned = val.get_mut("unsigned")?; - let unsigned_obj = unsigned.as_object_mut()?; - if unsigned_obj.remove("m.relations").is_some() { - *event = Raw::new(&val).ok()?.cast_unchecked(); - } - None - }; - let _ = closure(); - } - - fn strip_relations_from_event(ev: &mut Event) { - match &mut ev.kind { - TimelineEventKind::Decrypted(decrypted) => { - // Remove all information about encryption info for - // the bundled events. - decrypted.unsigned_encryption_info = None; - - // Remove the `unsigned`/`m.relations` field, if needs be. - Self::strip_relations_if_present(&mut decrypted.event); - } - - TimelineEventKind::UnableToDecrypt { event, .. } - | TimelineEventKind::PlainText { event } => { - Self::strip_relations_if_present(event); - } - } - } - /// Strips the bundled relations from a collection of events. - fn strip_relations_from_events(items: &mut [Event]) { - for ev in items.iter_mut() { - Self::strip_relations_from_event(ev); - } + Ok(self.state.room_linked_chunk.updates_as_vector_diffs()) } /// Remove events by their position, in `EventLinkedChunk` and in @@ -1138,7 +1157,7 @@ mod private { /// This method is purposely isolated because it must ensure that /// positions are sorted appropriately or it can be disastrous. #[instrument(skip_all)] - async fn remove_events( + pub async fn remove_events( &mut self, in_memory_events: Vec<(OwnedEventId, Position)>, in_store_events: Vec<(OwnedEventId, Position)>, @@ -1167,7 +1186,8 @@ mod private { } // `remove_events_by_position` is responsible of sorting positions. - self.room_linked_chunk + self.state + .room_linked_chunk .remove_events_by_position( in_memory_events.into_iter().map(|(_event_id, position)| position).collect(), ) @@ -1176,9 +1196,8 @@ mod private { self.propagate_changes().await } - /// Propagate changes to the underlying storage. async fn propagate_changes(&mut self) -> Result<(), EventCacheError> { - let updates = self.room_linked_chunk.store_updates().take(); + let updates = self.state.room_linked_chunk.store_updates().take(); self.send_updates_to_store(updates).await } @@ -1192,7 +1211,7 @@ mod private { &mut self, updates: Vec>, ) -> Result<(), EventCacheError> { - self.room_linked_chunk.order_tracker.map_updates(&updates); + self.state.room_linked_chunk.order_tracker.map_updates(&updates); self.send_updates_to_store(updates).await } @@ -1207,8 +1226,8 @@ mod private { // Strip relations from updates which insert or replace items. for update in updates.iter_mut() { match update { - Update::PushItems { items, .. } => Self::strip_relations_from_events(items), - Update::ReplaceItem { item, .. } => Self::strip_relations_from_event(item), + Update::PushItems { items, .. } => strip_relations_from_events(items), + Update::ReplaceItem { item, .. } => strip_relations_from_event(item), // Other update kinds don't involve adding new events. Update::NewItemsChunk { .. } | Update::NewGapChunk { .. } @@ -1228,12 +1247,10 @@ mod private { // storing updates happens in the expected order. let store = self.store.clone(); - let room_id = self.room.clone(); + let room_id = self.state.room_id.clone(); let cloned_updates = updates.clone(); spawn(async move { - let store = store.lock().await?; - trace!(updates = ?cloned_updates, "sending linked chunk updates to the store"); let linked_chunk_id = LinkedChunkId::Room(&room_id); store.handle_linked_chunk_updates(linked_chunk_id, cloned_updates).await?; @@ -1245,8 +1262,8 @@ mod private { .expect("joining failed")?; // Forward that the store got updated to observers. - let _ = self.linked_chunk_update_sender.send(RoomEventCacheLinkedChunkUpdate { - linked_chunk_id: OwnedLinkedChunkId::Room(self.room.clone()), + let _ = self.state.linked_chunk_update_sender.send(RoomEventCacheLinkedChunkUpdate { + linked_chunk_id: OwnedLinkedChunkId::Room(self.state.room_id.clone()), updates, }); @@ -1258,11 +1275,10 @@ mod private { /// Return a single diff update that is a clear of all events; as a /// result, the caller may override any pending diff updates /// with the result of this function. - #[must_use = "Propagate `VectorDiff` updates via `RoomEventCacheUpdate`"] pub async fn reset(&mut self) -> Result>, EventCacheError> { self.reset_internal().await?; - let diff_updates = self.room_linked_chunk.updates_as_vector_diffs(); + let diff_updates = self.state.room_linked_chunk.updates_as_vector_diffs(); // Ensure the contract defined in the doc comment is true: debug_assert_eq!(diff_updates.len(), 1); @@ -1272,14 +1288,13 @@ mod private { } async fn reset_internal(&mut self) -> Result<(), EventCacheError> { - self.room_linked_chunk.reset(); + self.state.room_linked_chunk.reset(); // No need to update the thread summaries: the room events are - // gone because of the - // reset of `room_linked_chunk`. + // gone because of the reset of `room_linked_chunk`. // // Clear the threads. - for thread in self.threads.values_mut() { + for thread in self.state.threads.values_mut() { thread.clear(); } @@ -1288,152 +1303,263 @@ mod private { // Reset the pagination state too: pretend we never waited for the initial // prev-batch token, and indicate that we're not at the start of the // timeline, since we don't know about that anymore. - self.waited_for_initial_prev_token = false; + self.state.waited_for_initial_prev_token.store(false, Ordering::SeqCst); // TODO: likely must cancel any ongoing back-paginations too - self.pagination_status.set(RoomPaginationStatus::Idle { hit_timeline_start: false }); + self.state + .pagination_status + .set(RoomPaginationStatus::Idle { hit_timeline_start: false }); Ok(()) } - /// Returns a read-only reference to the underlying room linked chunk. - pub fn room_linked_chunk(&self) -> &EventLinkedChunk { - &self.room_linked_chunk - } - - /// Returns a mutable reference to the underlying room linked chunk. - #[cfg(feature = "e2e-encryption")] - pub(in super::super) fn room_linked_chunk_mut(&mut self) -> &mut EventLinkedChunk { - &mut self.room_linked_chunk - } - - //// Find a single event in this room, starting from the most recent event. + /// Handle the result of a sync. /// - /// **Warning**! It looks into the loaded events from the in-memory - /// linked chunk **only**. It doesn't look inside the storage, - /// contrary to [`Self::find_event`]. - pub fn rfind_map_event_in_memory_by(&self, mut predicate: P) -> Option - where - P: FnMut(&Event) -> Option, - { - self.room_linked_chunk.revents().find_map(|(_position, event)| predicate(event)) - } - - /// Find a single event in this room. + /// It may send room event cache updates to the given sender, if it + /// generated any of those. /// - /// It starts by looking into loaded events in `EventLinkedChunk` before - /// looking inside the storage. - pub async fn find_event( - &self, - event_id: &EventId, - ) -> Result, EventCacheError> { - // There are supposedly fewer events loaded in memory than in the store. Let's - // start by looking up in the `EventLinkedChunk`. - for (position, event) in self.room_linked_chunk.revents() { - if event.event_id().as_deref() == Some(event_id) { - return Ok(Some((EventLocation::Memory(position), event.clone()))); - } - } - - let store = self.store.lock().await?; + /// Returns `true` for the first part of the tuple if a new gap + /// (previous-batch token) has been inserted, `false` otherwise. + #[must_use = "Propagate `VectorDiff` updates via `RoomEventCacheUpdate`"] + pub async fn handle_sync( + &mut self, + mut timeline: Timeline, + ) -> Result<(bool, Vec>), EventCacheError> { + let mut prev_batch = timeline.prev_batch.take(); - Ok(store - .find_event(&self.room, event_id) - .await? - .map(|event| (EventLocation::Store, event))) - } + let DeduplicationOutcome { + all_events: events, + in_memory_duplicated_event_ids, + in_store_duplicated_event_ids, + non_empty_all_duplicates: all_duplicates, + } = filter_duplicate_events( + &self.store, + LinkedChunkId::Room(&self.state.room_id), + &self.state.room_linked_chunk, + timeline.events, + ) + .await?; - /// Find an event and all its relations in the persisted storage. - /// - /// This goes straight to the database, as a simplification; we don't - /// expect to need to have to look up in memory events, or that - /// all the related events are actually loaded. - /// - /// The related events are sorted like this: - /// - events saved out-of-band with - /// [`super::RoomEventCache::save_events`] will be located at the - /// beginning of the array. - /// - events present in the linked chunk (be it in memory or in the - /// database) will be sorted according to their ordering in the linked - /// chunk. - pub async fn find_event_with_relations( - &self, - event_id: &EventId, - filters: Option>, - ) -> Result)>, EventCacheError> { - let store = self.store.lock().await?; + // If the timeline isn't limited, and we already knew about some past events, + // then this definitely knows what the timeline head is (either we know + // about all the events persisted in storage, or we have a gap + // somewhere). In this case, we can ditch the previous-batch + // token, which is an optimization to avoid unnecessary future back-pagination + // requests. + // + // We can also ditch it if we knew about all the events that came from sync, + // namely, they were all deduplicated. In this case, using the + // previous-batch token would only result in fetching other events we + // knew about. This is slightly incorrect in the presence of + // network splits, but this has shown to be Good Enough™. + if !timeline.limited && self.state.room_linked_chunk.events().next().is_some() + || all_duplicates + { + prev_batch = None; + } - // First, hit storage to get the target event and its related events. - let found = store.find_event(&self.room, event_id).await?; + if prev_batch.is_some() { + // Sad time: there's a gap, somewhere, in the timeline, and there's at least one + // non-duplicated event. We don't know which threads might have gappy, so we + // must invalidate them all :( + // TODO(bnjbvr): figure out a better catchup mechanism for threads. + let mut summaries_to_update = Vec::new(); - let Some(target) = found else { - // We haven't found the event: return early. - return Ok(None); - }; + for (thread_root, thread) in self.state.threads.iter_mut() { + // Empty the thread's linked chunk. + thread.clear(); - // Then, initialize the stack with all the related events, to find the - // transitive closure of all the related events. - let mut related = - store.find_event_relations(&self.room, event_id, filters.as_deref()).await?; - let mut stack = - related.iter().filter_map(|(event, _pos)| event.event_id()).collect::>(); - - // Also keep track of already seen events, in case there's a loop in the - // relation graph. - let mut already_seen = HashSet::new(); - already_seen.insert(event_id.to_owned()); - - let mut num_iters = 1; - - // Find the related event for each previously-related event. - while let Some(event_id) = stack.pop() { - if !already_seen.insert(event_id.clone()) { - // Skip events we've already seen. - continue; + summaries_to_update.push(thread_root.clone()); } - let other_related = - store.find_event_relations(&self.room, &event_id, filters.as_deref()).await?; + // Now, update the summaries to indicate that we're not sure what the latest + // thread event is. The thread count can remain as is, as it might still be + // valid, and there's no good value to reset it to, anyways. + for thread_root in summaries_to_update { + let Some((location, mut target_event)) = self.find_event(&thread_root).await? + else { + trace!(%thread_root, "thread root event is unknown, when updating thread summary after a gappy sync"); + continue; + }; - stack.extend(other_related.iter().filter_map(|(event, _pos)| event.event_id())); - related.extend(other_related); + if let Some(mut prev_summary) = target_event.thread_summary.summary().cloned() { + prev_summary.latest_reply = None; + + target_event.thread_summary = ThreadSummaryStatus::Some(prev_summary); + + self.replace_event_at(location, target_event).await?; + } + } + } - num_iters += 1; + if all_duplicates { + // No new events and no gap (per the previous check), thus no need to change the + // room state. We're done! + return Ok((false, Vec::new())); } - trace!(num_related = %related.len(), num_iters, "computed transitive closure of related events"); + let has_new_gap = prev_batch.is_some(); + + // If we've never waited for an initial previous-batch token, and we've now + // inserted a gap, no need to wait for a previous-batch token later. + if !self.state.waited_for_initial_prev_token.load(Ordering::SeqCst) && has_new_gap { + self.state.waited_for_initial_prev_token.store(true, Ordering::SeqCst); + } - // Sort the results by their positions in the linked chunk, if available. + // Remove the old duplicated events. // - // If an event doesn't have a known position, it goes to the start of the array. - related.sort_by(|(_, lhs), (_, rhs)| { - use std::cmp::Ordering; - match (lhs, rhs) { - (None, None) => Ordering::Equal, - (None, Some(_)) => Ordering::Less, - (Some(_), None) => Ordering::Greater, - (Some(lhs), Some(rhs)) => { - let lhs = self.room_event_order(*lhs); - let rhs = self.room_event_order(*rhs); - - // The events should have a definite position, but in the case they don't, - // still consider that not having a position means you'll end at the start - // of the array. - match (lhs, rhs) { - (None, None) => Ordering::Equal, - (None, Some(_)) => Ordering::Less, - (Some(_), None) => Ordering::Greater, - (Some(lhs), Some(rhs)) => lhs.cmp(&rhs), - } - } + // We don't have to worry the removals can change the position of the existing + // events, because we are pushing all _new_ `events` at the back. + self.remove_events(in_memory_duplicated_event_ids, in_store_duplicated_event_ids) + .await?; + + self.state + .room_linked_chunk + .push_live_events(prev_batch.map(|prev_token| Gap { prev_token }), &events); + + self.post_process_new_events(events, true).await?; + + if timeline.limited && has_new_gap { + // If there was a previous batch token for a limited timeline, unload the chunks + // so it only contains the last one; otherwise, there might be a + // valid gap in between, and observers may not render it (yet). + // + // We must do this *after* persisting these events to storage (in + // `post_process_new_events`). + self.shrink_to_last_chunk().await?; + } + + let timeline_event_diffs = self.state.room_linked_chunk.updates_as_vector_diffs(); + + Ok((has_new_gap, timeline_event_diffs)) + } + + /// Handle the result of a single back-pagination request. + /// + /// If the `prev_token` is set, then this function will check that the + /// corresponding gap is present in the in-memory linked chunk. + /// If it's not the case, `Ok(None)` will be returned, and the + /// caller may decide to do something based on that (e.g. restart a + /// pagination). + #[must_use = "Propagate `VectorDiff` updates via `RoomEventCacheUpdate`"] + pub async fn handle_backpagination( + &mut self, + events: Vec, + mut new_token: Option, + prev_token: Option, + ) -> Result>)>, EventCacheError> + { + // Check that the previous token still exists; otherwise it's a sign that the + // room's timeline has been cleared. + let prev_gap_id = if let Some(token) = prev_token { + // Find the corresponding gap in the in-memory linked chunk. + let gap_chunk_id = self.state.room_linked_chunk.chunk_identifier(|chunk| { + matches!(chunk.content(), ChunkContent::Gap(Gap { prev_token }) if *prev_token == token) + }); + + if gap_chunk_id.is_none() { + // We got a previous-batch token from the linked chunk *before* running the + // request, but it is missing *after* completing the request. + // + // It may be a sign the linked chunk has been reset, but it's fine, per this + // function's contract. + return Ok(None); } - }); - // Keep only the events, not their positions. - let related = related.into_iter().map(|(event, _pos)| event).collect(); + gap_chunk_id + } else { + None + }; + + let DeduplicationOutcome { + all_events: mut events, + in_memory_duplicated_event_ids, + in_store_duplicated_event_ids, + non_empty_all_duplicates: all_duplicates, + } = filter_duplicate_events( + &self.store, + LinkedChunkId::Room(&self.state.room_id), + &self.state.room_linked_chunk, + events, + ) + .await?; + + // If not all the events have been back-paginated, we need to remove the + // previous ones, otherwise we can end up with misordered events. + // + // Consider the following scenario: + // - sync returns [D, E, F] + // - then sync returns [] with a previous batch token PB1, so the internal + // linked chunk state is [D, E, F, PB1]. + // - back-paginating with PB1 may return [A, B, C, D, E, F]. + // + // Only inserting the new events when replacing PB1 would result in a timeline + // ordering of [D, E, F, A, B, C], which is incorrect. So we do have to remove + // all the events, in case this happens (see also #4746). + + if !all_duplicates { + // Let's forget all the previous events. + self.remove_events(in_memory_duplicated_event_ids, in_store_duplicated_event_ids) + .await?; + } else { + // All new events are duplicated, they can all be ignored. + events.clear(); + // The gap can be ditched too, as it won't be useful to backpaginate any + // further. + new_token = None; + } + + // `/messages` has been called with `dir=b` (backwards), so the events are in + // the inverted order; reorder them. + let topo_ordered_events = events.iter().rev().cloned().collect::>(); + + let new_gap = new_token.map(|prev_token| Gap { prev_token }); + let reached_start = self.state.room_linked_chunk.finish_back_pagination( + prev_gap_id, + new_gap, + &topo_ordered_events, + ); + + // Note: this flushes updates to the store. + self.post_process_new_events(topo_ordered_events, false).await?; + + let event_diffs = self.state.room_linked_chunk.updates_as_vector_diffs(); - Ok(Some((target, related))) + Ok(Some((BackPaginationOutcome { events, reached_start }, event_diffs))) } + /// Subscribe to thread for a given root event, and get a (maybe empty) + /// initially known list of events for that thread. + pub fn subscribe_to_thread( + &mut self, + root: OwnedEventId, + ) -> (Vec, Receiver) { + self.get_or_reload_thread(root).subscribe() + } + + /// Back paginate in the given thread. + /// + /// Will always start from the end, unless we previously paginated. + pub fn finish_thread_network_pagination( + &mut self, + root: OwnedEventId, + prev_token: Option, + new_token: Option, + events: Vec, + ) -> Option { + self.get_or_reload_thread(root).finish_network_pagination(prev_token, new_token, events) + } + + pub fn load_more_thread_events_backwards( + &mut self, + root: OwnedEventId, + ) -> LoadMoreEventsBackwardsOutcome { + self.get_or_reload_thread(root).load_more_events_backwards() + } + + // -------------------------------------------- + // utility methods + // -------------------------------------------- + /// Post-process new events, after they have been added to the in-memory /// linked chunk. /// @@ -1451,7 +1577,7 @@ mod private { for event in events { self.maybe_apply_new_redaction(&event).await?; - if self.enabled_thread_support { + if self.state.enabled_thread_support { // Only add the event to a thread if: // - thread support is enabled, // - and if this is a sync (we can't know where to insert backpaginated events @@ -1464,7 +1590,7 @@ mod private { .push(event.clone()); } else if let Some(event_id) = event.event_id() { // If we spot the root of a thread, add it to its linked chunk. - if self.threads.contains_key(&event_id) { + if self.state.threads.contains_key(&event_id) { new_events_by_thread .entry(event_id) .or_default() @@ -1493,7 +1619,7 @@ mod private { } } - if self.enabled_thread_support { + if self.state.enabled_thread_support { self.update_threads(new_events_by_thread).await?; } @@ -1503,12 +1629,11 @@ mod private { fn get_or_reload_thread(&mut self, root_event_id: OwnedEventId) -> &mut ThreadEventCache { // TODO: when there's persistent storage, try to lazily reload from disk, if // missing from memory. - self.threads.entry(root_event_id.clone()).or_insert_with(|| { - ThreadEventCache::new( - self.room.clone(), - root_event_id, - self.linked_chunk_update_sender.clone(), - ) + let room_id = self.state.room_id.clone(); + let linked_chunk_update_sender = self.state.linked_chunk_update_sender.clone(); + + self.state.threads.entry(root_event_id.clone()).or_insert_with(|| { + ThreadEventCache::new(room_id, root_event_id, linked_chunk_update_sender) }) } @@ -1566,9 +1691,13 @@ mod private { // worry about filtering out aggregation events (like // reactions/edits/etc.). Pretty neat, huh? let num_replies = { - let store_guard = &*self.store.lock().await?; - let thread_replies = store_guard - .find_event_relations(&self.room, &thread_root, Some(&[RelationType::Thread])) + let thread_replies = self + .store + .find_event_relations( + &self.state.room_id, + &thread_root, + Some(&[RelationType::Thread]), + ) .await?; thread_replies.len().try_into().unwrap_or(u32::MAX) }; @@ -1603,7 +1732,8 @@ mod private { ) -> Result<(), EventCacheError> { match location { EventLocation::Memory(position) => { - self.room_linked_chunk + self.state + .room_linked_chunk .replace_event_at(position, event) .expect("should have been a valid position of an item"); // We just changed the in-memory representation; synchronize this with @@ -1645,7 +1775,7 @@ mod private { return Ok(()); }; - let Some(event_id) = redaction.redacts(&self.room_version_rules.redaction) else { + let Some(event_id) = redaction.redacts(&self.state.room_version_rules.redaction) else { warn!("missing target event id from the redaction event"); return Ok(()); }; @@ -1676,311 +1806,337 @@ mod private { // If the event is part of a thread, update the thread linked chunk and the // summary. extract_thread_root(target_event.raw()) - } else { - warn!("failed to deserialize the event to redact"); - None - }; - - if let Some(redacted_event) = apply_redaction( - target_event.raw(), - event.raw().cast_ref_unchecked::(), - &self.room_version_rules.redaction, - ) { - // It's safe to cast `redacted_event` here: - // - either the event was an `AnyTimelineEvent` cast to `AnySyncTimelineEvent` - // when calling .raw(), so it's still one under the hood. - // - or it wasn't, and it's a plain `AnySyncTimelineEvent` in this case. - target_event.replace_raw(redacted_event.cast_unchecked()); - - self.replace_event_at(location, target_event).await?; - - // If the redacted event was part of a thread, remove it in the thread linked - // chunk too, and make sure to update the thread root's summary - // as well. - // - // Note: there is an ordering issue here: the above `replace_event_at` must - // happen BEFORE we recompute the summary, otherwise the set of - // replies may include the to-be-redacted event. - if let Some(thread_root) = thread_root - && let Some(thread_cache) = self.threads.get_mut(&thread_root) - { - thread_cache.remove_if_present(event_id); - - // The number of replies may have changed, so update the thread summary if - // needs be. - let latest_event_id = thread_cache.latest_event_id(); - self.maybe_update_thread_summary(thread_root, latest_event_id).await?; - } - } - - Ok(()) - } - - /// Save events into the database, without notifying observers. - pub async fn save_events( - &self, - events: impl IntoIterator, - ) -> Result<(), EventCacheError> { - let store = self.store.clone(); - let room_id = self.room.clone(); - let events = events.into_iter().collect::>(); - - // Spawn a task so the save is uninterrupted by task cancellation. - spawn(async move { - let store = store.lock().await?; - for event in events { - store.save_event(&room_id, event).await?; - } - super::Result::Ok(()) - }) - .await - .expect("joining failed")?; - - Ok(()) - } - - /// Handle the result of a sync. - /// - /// It may send room event cache updates to the given sender, if it - /// generated any of those. - /// - /// Returns `true` for the first part of the tuple if a new gap - /// (previous-batch token) has been inserted, `false` otherwise. - #[must_use = "Propagate `VectorDiff` updates via `RoomEventCacheUpdate`"] - pub async fn handle_sync( - &mut self, - mut timeline: Timeline, - ) -> Result<(bool, Vec>), EventCacheError> { - let mut prev_batch = timeline.prev_batch.take(); - - let DeduplicationOutcome { - all_events: events, - in_memory_duplicated_event_ids, - in_store_duplicated_event_ids, - non_empty_all_duplicates: all_duplicates, - } = filter_duplicate_events( - &self.store, - LinkedChunkId::Room(self.room.as_ref()), - &self.room_linked_chunk, - timeline.events, - ) - .await?; - - // If the timeline isn't limited, and we already knew about some past events, - // then this definitely knows what the timeline head is (either we know - // about all the events persisted in storage, or we have a gap - // somewhere). In this case, we can ditch the previous-batch - // token, which is an optimization to avoid unnecessary future back-pagination - // requests. - // - // We can also ditch it if we knew about all the events that came from sync, - // namely, they were all deduplicated. In this case, using the - // previous-batch token would only result in fetching other events we - // knew about. This is slightly incorrect in the presence of - // network splits, but this has shown to be Good Enough™. - if !timeline.limited && self.room_linked_chunk.events().next().is_some() - || all_duplicates - { - prev_batch = None; - } - - if prev_batch.is_some() { - // Sad time: there's a gap, somewhere, in the timeline, and there's at least one - // non-duplicated event. We don't know which threads might have gappy, so we - // must invalidate them all :( - // TODO(bnjbvr): figure out a better catchup mechanism for threads. - let mut summaries_to_update = Vec::new(); - - for (thread_root, thread) in self.threads.iter_mut() { - // Empty the thread's linked chunk. - thread.clear(); - - summaries_to_update.push(thread_root.clone()); - } - - // Now, update the summaries to indicate that we're not sure what the latest - // thread event is. The thread count can remain as is, as it might still be - // valid, and there's no good value to reset it to, anyways. - for thread_root in summaries_to_update { - let Some((location, mut target_event)) = self.find_event(&thread_root).await? - else { - trace!(%thread_root, "thread root event is unknown, when updating thread summary after a gappy sync"); - continue; - }; + } else { + warn!("failed to deserialize the event to redact"); + None + }; - if let Some(mut prev_summary) = target_event.thread_summary.summary().cloned() { - prev_summary.latest_reply = None; + if let Some(redacted_event) = apply_redaction( + target_event.raw(), + event.raw().cast_ref_unchecked::(), + &self.state.room_version_rules.redaction, + ) { + // It's safe to cast `redacted_event` here: + // - either the event was an `AnyTimelineEvent` cast to `AnySyncTimelineEvent` + // when calling .raw(), so it's still one under the hood. + // - or it wasn't, and it's a plain `AnySyncTimelineEvent` in this case. + target_event.replace_raw(redacted_event.cast_unchecked()); - target_event.thread_summary = ThreadSummaryStatus::Some(prev_summary); + self.replace_event_at(location, target_event).await?; - self.replace_event_at(location, target_event).await?; - } + // If the redacted event was part of a thread, remove it in the thread linked + // chunk too, and make sure to update the thread root's summary + // as well. + // + // Note: there is an ordering issue here: the above `replace_event_at` must + // happen BEFORE we recompute the summary, otherwise the set of + // replies may include the to-be-redacted event. + if let Some(thread_root) = thread_root + && let Some(thread_cache) = self.state.threads.get_mut(&thread_root) + { + thread_cache.remove_if_present(event_id); + + // The number of replies may have changed, so update the thread summary if + // needs be. + let latest_event_id = thread_cache.latest_event_id(); + self.maybe_update_thread_summary(thread_root, latest_event_id).await?; } } - if all_duplicates { - // No new events and no gap (per the previous check), thus no need to change the - // room state. We're done! - return Ok((false, Vec::new())); - } + Ok(()) + } - let has_new_gap = prev_batch.is_some(); + /// Save events into the database, without notifying observers. + pub async fn save_events( + &mut self, + events: impl IntoIterator, + ) -> Result<(), EventCacheError> { + let store = self.store.clone(); + let room_id = self.state.room_id.clone(); + let events = events.into_iter().collect::>(); - // If we've never waited for an initial previous-batch token, and we've now - // inserted a gap, no need to wait for a previous-batch token later. - if !self.waited_for_initial_prev_token && has_new_gap { - self.waited_for_initial_prev_token = true; - } + // Spawn a task so the save is uninterrupted by task cancellation. + spawn(async move { + for event in events { + store.save_event(&room_id, event).await?; + } + super::Result::Ok(()) + }) + .await + .expect("joining failed")?; - // Remove the old duplicated events. - // - // We don't have to worry the removals can change the position of the existing - // events, because we are pushing all _new_ `events` at the back. - self.remove_events(in_memory_duplicated_event_ids, in_store_duplicated_event_ids) - .await?; + Ok(()) + } + } - self.room_linked_chunk - .push_live_events(prev_batch.map(|prev_token| Gap { prev_token }), &events); + /// Load a linked chunk's full metadata, making sure the chunks are + /// according to their their links. + /// + /// Returns `None` if there's no such linked chunk in the store, or an + /// error if the linked chunk is malformed. + async fn load_linked_chunk_metadata( + store_guard: &EventCacheStoreLockGuard, + linked_chunk_id: LinkedChunkId<'_>, + ) -> Result>, EventCacheError> { + let mut all_chunks = store_guard + .load_all_chunks_metadata(linked_chunk_id) + .await + .map_err(EventCacheError::from)?; - self.post_process_new_events(events, true).await?; + if all_chunks.is_empty() { + // There are no chunks, so there's nothing to do. + return Ok(None); + } - if timeline.limited && has_new_gap { - // If there was a previous batch token for a limited timeline, unload the chunks - // so it only contains the last one; otherwise, there might be a - // valid gap in between, and observers may not render it (yet). - // - // We must do this *after* persisting these events to storage (in - // `post_process_new_events`). - self.shrink_to_last_chunk().await?; - } + // Transform the vector into a hashmap, for quick lookup of the predecessors. + let chunk_map: HashMap<_, _> = + all_chunks.iter().map(|meta| (meta.identifier, meta)).collect(); - let timeline_event_diffs = self.room_linked_chunk.updates_as_vector_diffs(); + // Find a last chunk. + let mut iter = all_chunks.iter().filter(|meta| meta.next.is_none()); + let Some(last) = iter.next() else { + return Err(EventCacheError::InvalidLinkedChunkMetadata { + details: "no last chunk found".to_owned(), + }); + }; - Ok((has_new_gap, timeline_event_diffs)) + // There must at most one last chunk. + if let Some(other_last) = iter.next() { + return Err(EventCacheError::InvalidLinkedChunkMetadata { + details: format!( + "chunks {} and {} both claim to be last chunks", + last.identifier.index(), + other_last.identifier.index() + ), + }); } - /// Handle the result of a single back-pagination request. - /// - /// If the `prev_token` is set, then this function will check that the - /// corresponding gap is present in the in-memory linked chunk. - /// If it's not the case, `Ok(None)` will be returned, and the - /// caller may decide to do something based on that (e.g. restart a - /// pagination). - #[must_use = "Propagate `VectorDiff` updates via `RoomEventCacheUpdate`"] - pub async fn handle_backpagination( - &mut self, - events: Vec, - mut new_token: Option, - prev_token: Option, - ) -> Result>)>, EventCacheError> - { - // Check that the previous token still exists; otherwise it's a sign that the - // room's timeline has been cleared. - let prev_gap_id = if let Some(token) = prev_token { - // Find the corresponding gap in the in-memory linked chunk. - let gap_chunk_id = self.room_linked_chunk.chunk_identifier(|chunk| { - matches!(chunk.content(), ChunkContent::Gap(Gap { prev_token }) if *prev_token == token) + // Rewind the chain back to the first chunk, and do some checks at the same + // time. + let mut seen = HashSet::new(); + let mut current = last; + loop { + // If we've already seen this chunk, there's a cycle somewhere. + if !seen.insert(current.identifier) { + return Err(EventCacheError::InvalidLinkedChunkMetadata { + details: format!( + "cycle detected in linked chunk at {}", + current.identifier.index() + ), }); + } - if gap_chunk_id.is_none() { - // We got a previous-batch token from the linked chunk *before* running the - // request, but it is missing *after* completing the request. - // - // It may be a sign the linked chunk has been reset, but it's fine, per this - // function's contract. - return Ok(None); + let Some(prev_id) = current.previous else { + // If there's no previous chunk, we're done. + if seen.len() != all_chunks.len() { + return Err(EventCacheError::InvalidLinkedChunkMetadata { + details: format!( + "linked chunk likely has multiple components: {} chunks seen through the chain of predecessors, but {} expected", + seen.len(), + all_chunks.len() + ), + }); } + break; + }; - gap_chunk_id - } else { - None + // If the previous chunk is not in the map, then it's unknown + // and missing. + let Some(pred_meta) = chunk_map.get(&prev_id) else { + return Err(EventCacheError::InvalidLinkedChunkMetadata { + details: format!( + "missing predecessor {} chunk for {}", + prev_id.index(), + current.identifier.index() + ), + }); }; - let DeduplicationOutcome { - all_events: mut events, - in_memory_duplicated_event_ids, - in_store_duplicated_event_ids, - non_empty_all_duplicates: all_duplicates, - } = filter_duplicate_events( - &self.store, - LinkedChunkId::Room(self.room.as_ref()), - &self.room_linked_chunk, - events, - ) - .await?; + // If the previous chunk isn't connected to the next, then the link is invalid. + if pred_meta.next != Some(current.identifier) { + return Err(EventCacheError::InvalidLinkedChunkMetadata { + details: format!( + "chunk {}'s next ({:?}) doesn't match the current chunk ({})", + pred_meta.identifier.index(), + pred_meta.next.map(|chunk_id| chunk_id.index()), + current.identifier.index() + ), + }); + } - // If not all the events have been back-paginated, we need to remove the - // previous ones, otherwise we can end up with misordered events. - // - // Consider the following scenario: - // - sync returns [D, E, F] - // - then sync returns [] with a previous batch token PB1, so the internal - // linked chunk state is [D, E, F, PB1]. - // - back-paginating with PB1 may return [A, B, C, D, E, F]. - // - // Only inserting the new events when replacing PB1 would result in a timeline - // ordering of [D, E, F, A, B, C], which is incorrect. So we do have to remove - // all the events, in case this happens (see also #4746). + current = *pred_meta; + } - if !all_duplicates { - // Let's forget all the previous events. - self.remove_events(in_memory_duplicated_event_ids, in_store_duplicated_event_ids) - .await?; - } else { - // All new events are duplicated, they can all be ignored. - events.clear(); - // The gap can be ditched too, as it won't be useful to backpaginate any - // further. - new_token = None; + // At this point, `current` is the identifier of the first chunk. + // + // Reorder the resulting vector, by going through the chain of `next` links, and + // swapping items into their final position. + // + // Invariant in this loop: all items in [0..i[ are in their final, correct + // position. + let mut current = current.identifier; + for i in 0..all_chunks.len() { + // Find the target metadata. + let j = all_chunks + .iter() + .rev() + .position(|meta| meta.identifier == current) + .map(|j| all_chunks.len() - 1 - j) + .expect("the target chunk must be present in the metadata"); + if i != j { + all_chunks.swap(i, j); } + if let Some(next) = all_chunks[i].next { + current = next; + } + } - // `/messages` has been called with `dir=b` (backwards), so the events are in - // the inverted order; reorder them. - let topo_ordered_events = events.iter().rev().cloned().collect::>(); + Ok(Some(all_chunks)) + } - let new_gap = new_token.map(|prev_token| Gap { prev_token }); - let reached_start = self.room_linked_chunk.finish_back_pagination( - prev_gap_id, - new_gap, - &topo_ordered_events, - ); + /// Removes the bundled relations from an event, if they were present. + /// + /// Only replaces the present if it contained bundled relations. + fn strip_relations_if_present(event: &mut Raw) { + // We're going to get rid of the `unsigned`/`m.relations` field, if it's + // present. + // Use a closure that returns an option so we can quickly short-circuit. + let mut closure = || -> Option<()> { + let mut val: serde_json::Value = event.deserialize_as().ok()?; + let unsigned = val.get_mut("unsigned")?; + let unsigned_obj = unsigned.as_object_mut()?; + if unsigned_obj.remove("m.relations").is_some() { + *event = Raw::new(&val).ok()?.cast_unchecked(); + } + None + }; + let _ = closure(); + } - // Note: this flushes updates to the store. - self.post_process_new_events(topo_ordered_events, false).await?; + fn strip_relations_from_event(ev: &mut Event) { + match &mut ev.kind { + TimelineEventKind::Decrypted(decrypted) => { + // Remove all information about encryption info for + // the bundled events. + decrypted.unsigned_encryption_info = None; - let event_diffs = self.room_linked_chunk.updates_as_vector_diffs(); + // Remove the `unsigned`/`m.relations` field, if needs be. + strip_relations_if_present(&mut decrypted.event); + } - Ok(Some((BackPaginationOutcome { events, reached_start }, event_diffs))) + TimelineEventKind::UnableToDecrypt { event, .. } + | TimelineEventKind::PlainText { event } => { + strip_relations_if_present(event); + } } + } - /// Subscribe to thread for a given root event, and get a (maybe empty) - /// initially known list of events for that thread. - pub fn subscribe_to_thread( - &mut self, - root: OwnedEventId, - ) -> (Vec, Receiver) { - self.get_or_reload_thread(root).subscribe() + /// Strips the bundled relations from a collection of events. + fn strip_relations_from_events(items: &mut [Event]) { + for ev in items.iter_mut() { + strip_relations_from_event(ev); } + } - /// Back paginate in the given thread. - /// - /// Will always start from the end, unless we previously paginated. - pub fn finish_thread_network_pagination( - &mut self, - root: OwnedEventId, - prev_token: Option, - new_token: Option, - events: Vec, - ) -> Option { - self.get_or_reload_thread(root).finish_network_pagination(prev_token, new_token, events) + /// Implementation of [`RoomEventCacheStateLockReadGuard::find_event`] and + /// [`RoomEventCacheStateLockWriteGuard::find_event`]. + async fn find_event( + event_id: &EventId, + room_id: &RoomId, + room_linked_chunk: &EventLinkedChunk, + store: &EventCacheStoreLockGuard, + ) -> Result, EventCacheError> { + // There are supposedly fewer events loaded in memory than in the store. Let's + // start by looking up in the `EventLinkedChunk`. + for (position, event) in room_linked_chunk.revents() { + if event.event_id().as_deref() == Some(event_id) { + return Ok(Some((EventLocation::Memory(position), event.clone()))); + } } - pub fn load_more_thread_events_backwards( - &mut self, - root: OwnedEventId, - ) -> LoadMoreEventsBackwardsOutcome { - self.get_or_reload_thread(root).load_more_events_backwards() + Ok(store.find_event(room_id, event_id).await?.map(|event| (EventLocation::Store, event))) + } + + /// Implementation of + /// [`RoomEventCacheStateLockReadGuard::find_event_with_relations`] and + /// [`RoomEventCacheStateLockWriteGuard::find_event_with_relations`]. + async fn find_event_with_relations( + event_id: &EventId, + room_id: &RoomId, + filters: Option>, + room_linked_chunk: &EventLinkedChunk, + store: &EventCacheStoreLockGuard, + ) -> Result)>, EventCacheError> { + // First, hit storage to get the target event and its related events. + let found = store.find_event(room_id, event_id).await?; + + let Some(target) = found else { + // We haven't found the event: return early. + return Ok(None); + }; + + // Then, initialize the stack with all the related events, to find the + // transitive closure of all the related events. + let mut related = store.find_event_relations(room_id, event_id, filters.as_deref()).await?; + let mut stack = + related.iter().filter_map(|(event, _pos)| event.event_id()).collect::>(); + + // Also keep track of already seen events, in case there's a loop in the + // relation graph. + let mut already_seen = HashSet::new(); + already_seen.insert(event_id.to_owned()); + + let mut num_iters = 1; + + // Find the related event for each previously-related event. + while let Some(event_id) = stack.pop() { + if !already_seen.insert(event_id.clone()) { + // Skip events we've already seen. + continue; + } + + let other_related = + store.find_event_relations(room_id, &event_id, filters.as_deref()).await?; + + stack.extend(other_related.iter().filter_map(|(event, _pos)| event.event_id())); + related.extend(other_related); + + num_iters += 1; } + + trace!(num_related = %related.len(), num_iters, "computed transitive closure of related events"); + + // Sort the results by their positions in the linked chunk, if available. + // + // If an event doesn't have a known position, it goes to the start of the array. + related.sort_by(|(_, lhs), (_, rhs)| { + use std::cmp::Ordering; + + match (lhs, rhs) { + (None, None) => Ordering::Equal, + (None, Some(_)) => Ordering::Less, + (Some(_), None) => Ordering::Greater, + (Some(lhs), Some(rhs)) => { + let lhs = room_linked_chunk.event_order(*lhs); + let rhs = room_linked_chunk.event_order(*rhs); + + // The events should have a definite position, but in the case they don't, + // still consider that not having a position means you'll end at the start + // of the array. + match (lhs, rhs) { + (None, None) => Ordering::Equal, + (None, Some(_)) => Ordering::Less, + (Some(_), None) => Ordering::Greater, + (Some(lhs), Some(rhs)) => lhs.cmp(&rhs), + } + } + } + }); + + // Keep only the events, not their positions. + let related = related.into_iter().map(|(event, _pos)| event).collect(); + + Ok(Some((target, related))) } } @@ -1993,7 +2149,7 @@ pub(super) enum EventLocation { Store, } -pub(super) use private::RoomEventCacheState; +pub(super) use private::RoomEventCacheStateLock; #[cfg(test)] mod tests { @@ -2134,8 +2290,11 @@ mod tests { room_event_cache.save_events([associated_related_event]).await; let filter = Some(vec![RelationType::Replacement]); - let (event, related_events) = - room_event_cache.find_event_with_relations(original_id, filter).await.unwrap(); + let (event, related_events) = room_event_cache + .find_event_with_relations(original_id, filter) + .await + .expect("Failed to find the event with relations") + .expect("Event has no relation"); // Fetched event is the right one. let cached_event_id = event.event_id().unwrap(); assert_eq!(cached_event_id, original_id); @@ -2148,8 +2307,12 @@ mod tests { // Now we'll filter threads instead, there should be no related events let filter = Some(vec![RelationType::Thread]); - let (event, related_events) = - room_event_cache.find_event_with_relations(original_id, filter).await.unwrap(); + let (event, related_events) = room_event_cache + .find_event_with_relations(original_id, filter) + .await + .expect("Failed to find the event with relations") + .expect("Event has no relation"); + // Fetched event is the right one. let cached_event_id = event.event_id().unwrap(); assert_eq!(cached_event_id, original_id); @@ -2193,8 +2356,11 @@ mod tests { // Save the associated related event, which redacts the related event. room_event_cache.save_events([associated_related_event]).await; - let (event, related_events) = - room_event_cache.find_event_with_relations(original_id, None).await.unwrap(); + let (event, related_events) = room_event_cache + .find_event_with_relations(original_id, None) + .await + .expect("Failed to find the event with relations") + .expect("Event has no relation"); // Fetched event is the right one. let cached_event_id = event.event_id().unwrap(); assert_eq!(cached_event_id, original_id); @@ -2241,8 +2407,11 @@ mod tests { let related_id = related_event.event_id().unwrap(); room_event_cache.save_events([related_event]).await; - let (event, related_events) = - room_event_cache.find_event_with_relations(&original_event_id, None).await.unwrap(); + let (event, related_events) = room_event_cache + .find_event_with_relations(&original_event_id, None) + .await + .expect("Failed to find the event with relations") + .expect("Event has no relation"); // Fetched event is the right one. let cached_event_id = event.event_id().unwrap(); assert_eq!(cached_event_id, original_event_id); @@ -2418,7 +2587,7 @@ mod timed_tests { // The in-memory linked chunk keeps the bundled relation. { - let events = room_event_cache.events().await; + let events = room_event_cache.events().await.unwrap(); assert_eq!(events.len(), 1); @@ -2534,13 +2703,13 @@ mod timed_tests { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (items, mut stream) = room_event_cache.subscribe().await; + let (items, mut stream) = room_event_cache.subscribe().await.unwrap(); let mut generic_stream = event_cache.subscribe_to_room_generic_updates(); // The rooms knows about all cached events. { - assert!(room_event_cache.find_event(event_id1).await.is_some()); - assert!(room_event_cache.find_event(event_id2).await.is_some()); + assert!(room_event_cache.find_event(event_id1).await.unwrap().is_some()); + assert!(room_event_cache.find_event(event_id2).await.unwrap().is_some()); } // But only part of events are loaded from the store @@ -2586,10 +2755,10 @@ mod timed_tests { // Events individually are not forgotten by the event cache, after clearing a // room. - assert!(room_event_cache.find_event(event_id1).await.is_some()); + assert!(room_event_cache.find_event(event_id1).await.unwrap().is_some()); // But their presence in a linked chunk is forgotten. - let items = room_event_cache.events().await; + let items = room_event_cache.events().await.unwrap(); assert!(items.is_empty()); // The event cache store too. @@ -2694,7 +2863,7 @@ mod timed_tests { } ); - let (items, mut stream) = room_event_cache.subscribe().await; + let (items, mut stream) = room_event_cache.subscribe().await.unwrap(); // The initial items contain one event because only the last chunk is loaded by // default. @@ -2703,8 +2872,8 @@ mod timed_tests { assert!(stream.is_empty()); // The event cache knows only all events though, even if they aren't loaded. - assert!(room_event_cache.find_event(event_id1).await.is_some()); - assert!(room_event_cache.find_event(event_id2).await.is_some()); + assert!(room_event_cache.find_event(event_id1).await.unwrap().is_some()); + assert!(room_event_cache.find_event(event_id2).await.unwrap().is_some()); // Let's paginate to load more events. room_event_cache.pagination().run_backwards_once(20).await.unwrap(); @@ -2744,7 +2913,7 @@ mod timed_tests { // when subscribing, to check that the items correspond to their new // positions. The duplicated item is removed (so it's not the first // element anymore), and it's added to the back of the list. - let items = room_event_cache.events().await; + let items = room_event_cache.events().await.unwrap(); assert_eq!(items.len(), 2); assert_eq!(items[0].event_id().unwrap(), event_id1); assert_eq!(items[1].event_id().unwrap(), event_id2); @@ -2806,7 +2975,7 @@ mod timed_tests { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let items = room_event_cache.events().await; + let items = room_event_cache.events().await.unwrap(); // Because the persisted content was invalid, the room store is reset: there are // no events in the cache. @@ -2859,7 +3028,7 @@ mod timed_tests { ); { - let mut state = room_event_cache.inner.state.write().await; + let mut state = room_event_cache.inner.state.write().await.unwrap(); let mut num_gaps = 0; let mut num_events = 0; @@ -2920,7 +3089,7 @@ mod timed_tests { ); { - let state = room_event_cache.inner.state.read().await; + let state = room_event_cache.inner.state.read().await.unwrap(); let mut num_gaps = 0; let mut num_events = 0; @@ -2957,9 +3126,13 @@ mod timed_tests { // Fill the event cache store with an initial linked chunk with 2 events chunks. { - let store = client.event_cache_store(); - let store = store.lock().await.unwrap(); - store + client + .event_cache_store() + .lock() + .await + .expect("Could not acquire the event cache lock") + .as_clean() + .expect("Could not acquire a clean event cache lock") .handle_linked_chunk_updates( LinkedChunkId::Room(room_id), vec![ @@ -2995,7 +3168,7 @@ mod timed_tests { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); // Sanity check: lazily loaded, so only includes one item at start. - let (events, mut stream) = room_event_cache.subscribe().await; + let (events, mut stream) = room_event_cache.subscribe().await.unwrap(); assert_eq!(events.len(), 1); assert_eq!(events[0].event_id().as_deref(), Some(evid2)); assert!(stream.is_empty()); @@ -3031,6 +3204,7 @@ mod timed_tests { .state .write() .await + .unwrap() .force_shrink_to_last_chunk() .await .expect("shrinking should succeed"); @@ -3049,7 +3223,7 @@ mod timed_tests { assert!(generic_stream.is_empty()); // When reading the events, we do get only the last one. - let events = room_event_cache.events().await; + let events = room_event_cache.events().await.unwrap(); assert_eq!(events.len(), 1); assert_eq!(events[0].event_id().as_deref(), Some(evid2)); @@ -3079,9 +3253,13 @@ mod timed_tests { // Fill the event cache store with an initial linked chunk with 2 events chunks. { - let store = client.event_cache_store(); - let store = store.lock().await.unwrap(); - store + client + .event_cache_store() + .lock() + .await + .expect("Could not acquire the event cache lock") + .as_clean() + .expect("Could not acquire a clean event cache lock") .handle_linked_chunk_updates( LinkedChunkId::Room(room_id), vec![ @@ -3119,20 +3297,27 @@ mod timed_tests { // Initially, the linked chunk only contains the last chunk, so only ev3 is // loaded. { - let state = room_event_cache.inner.state.read().await; + let state = room_event_cache.inner.state.read().await.unwrap(); + let room_linked_chunk = state.room_linked_chunk(); // But we can get the order of ev1. - assert_eq!(state.room_event_order(Position::new(ChunkIdentifier::new(0), 0)), Some(0)); + assert_eq!( + room_linked_chunk.event_order(Position::new(ChunkIdentifier::new(0), 0)), + Some(0) + ); // And that of ev2 as well. - assert_eq!(state.room_event_order(Position::new(ChunkIdentifier::new(0), 1)), Some(1)); + assert_eq!( + room_linked_chunk.event_order(Position::new(ChunkIdentifier::new(0), 1)), + Some(1) + ); // ev3, which is loaded, also has a known ordering. - let mut events = state.room_linked_chunk().events(); + let mut events = room_linked_chunk.events(); let (pos, ev) = events.next().unwrap(); assert_eq!(pos, Position::new(ChunkIdentifier::new(1), 0)); assert_eq!(ev.event_id().as_deref(), Some(evid3)); - assert_eq!(state.room_event_order(pos), Some(2)); + assert_eq!(room_linked_chunk.event_order(pos), Some(2)); // No other loaded events. assert!(events.next().is_none()); @@ -3145,9 +3330,11 @@ mod timed_tests { // All events are now loaded, so their order is precisely their enumerated index // in a linear iteration. { - let state = room_event_cache.inner.state.read().await; - for (i, (pos, _)) in state.room_linked_chunk().events().enumerate() { - assert_eq!(state.room_event_order(pos), Some(i)); + let state = room_event_cache.inner.state.read().await.unwrap(); + let room_linked_chunk = state.room_linked_chunk(); + + for (i, (pos, _)) in room_linked_chunk.events().enumerate() { + assert_eq!(room_linked_chunk.event_order(pos), Some(i)); } } @@ -3170,29 +3357,39 @@ mod timed_tests { .unwrap(); { - let state = room_event_cache.inner.state.read().await; + let state = room_event_cache.inner.state.read().await.unwrap(); + let room_linked_chunk = state.room_linked_chunk(); // After the shrink, only evid3 and evid4 are loaded. - let mut events = state.room_linked_chunk().events(); + let mut events = room_linked_chunk.events(); let (pos, ev) = events.next().unwrap(); assert_eq!(ev.event_id().as_deref(), Some(evid3)); - assert_eq!(state.room_event_order(pos), Some(2)); + assert_eq!(room_linked_chunk.event_order(pos), Some(2)); let (pos, ev) = events.next().unwrap(); assert_eq!(ev.event_id().as_deref(), Some(evid4)); - assert_eq!(state.room_event_order(pos), Some(3)); + assert_eq!(room_linked_chunk.event_order(pos), Some(3)); // No other loaded events. assert!(events.next().is_none()); // But we can still get the order of previous events. - assert_eq!(state.room_event_order(Position::new(ChunkIdentifier::new(0), 0)), Some(0)); - assert_eq!(state.room_event_order(Position::new(ChunkIdentifier::new(0), 1)), Some(1)); + assert_eq!( + room_linked_chunk.event_order(Position::new(ChunkIdentifier::new(0), 0)), + Some(0) + ); + assert_eq!( + room_linked_chunk.event_order(Position::new(ChunkIdentifier::new(0), 1)), + Some(1) + ); // ev3 doesn't have an order with its previous position, since it's been // deduplicated. - assert_eq!(state.room_event_order(Position::new(ChunkIdentifier::new(1), 0)), None); + assert_eq!( + room_linked_chunk.event_order(Position::new(ChunkIdentifier::new(1), 0)), + None + ); } } @@ -3212,9 +3409,13 @@ mod timed_tests { // Fill the event cache store with an initial linked chunk with 2 events chunks. { - let store = client.event_cache_store(); - let store = store.lock().await.unwrap(); - store + client + .event_cache_store() + .lock() + .await + .expect("Could not acquire the event cache lock") + .as_clean() + .expect("Could not acquire a clean event cache lock") .handle_linked_chunk_updates( LinkedChunkId::Room(room_id), vec![ @@ -3250,7 +3451,7 @@ mod timed_tests { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); // Sanity check: lazily loaded, so only includes one item at start. - let (events1, mut stream1) = room_event_cache.subscribe().await; + let (events1, mut stream1) = room_event_cache.subscribe().await.unwrap(); assert_eq!(events1.len(), 1); assert_eq!(events1[0].event_id().as_deref(), Some(evid2)); assert!(stream1.is_empty()); @@ -3276,7 +3477,7 @@ mod timed_tests { // Have another subscriber. // Since it's not the first one, and the previous one loaded some more events, // the second subscribers sees them all. - let (events2, stream2) = room_event_cache.subscribe().await; + let (events2, stream2) = room_event_cache.subscribe().await.unwrap(); assert_eq!(events2.len(), 2); assert_eq!(events2[0].event_id().as_deref(), Some(evid1)); assert_eq!(events2[1].event_id().as_deref(), Some(evid2)); @@ -3297,12 +3498,12 @@ mod timed_tests { { // Check the inner state: there's no more shared auto-shrinker. - let state = room_event_cache.inner.state.read().await; - assert_eq!(state.subscriber_count.load(std::sync::atomic::Ordering::SeqCst), 0); + let state = room_event_cache.inner.state.read().await.unwrap(); + assert_eq!(state.subscriber_count().load(std::sync::atomic::Ordering::SeqCst), 0); } // Getting the events will only give us the latest chunk. - let events3 = room_event_cache.events().await; + let events3 = room_event_cache.events().await.unwrap(); assert_eq!(events3.len(), 1); assert_eq!(events3[0].event_id().as_deref(), Some(evid2)); } @@ -3331,9 +3532,13 @@ mod timed_tests { // Fill the event cache store with an initial linked chunk of 2 chunks, and 4 // events. { - let store = client.event_cache_store(); - let store = store.lock().await.unwrap(); - store + client + .event_cache_store() + .lock() + .await + .expect("Could not acquire the event cache lock") + .as_clean() + .expect("Could not acquire a clean event cache lock") .handle_linked_chunk_updates( LinkedChunkId::Room(room_id), vec![ @@ -3375,7 +3580,7 @@ mod timed_tests { (event.raw().get_field::("sender").unwrap().as_deref() == Some(*BOB)).then(|| event.event_id()) }) .await, - Some(event_id) => { + Ok(Some(event_id)) => { assert_eq!(event_id.as_deref(), Some(event_id_0)); } ); @@ -3388,7 +3593,7 @@ mod timed_tests { (event.raw().get_field::("sender").unwrap().as_deref() == Some(*ALICE)).then(|| event.event_id()) }) .await, - Some(event_id) => { + Ok(Some(event_id)) => { assert_eq!(event_id.as_deref(), Some(event_id_2)); } ); @@ -3402,10 +3607,13 @@ mod timed_tests { .then(|| event.event_id()) }) .await + .unwrap() .is_none() ); // Look for an event that doesn't exist. - assert!(room_event_cache.rfind_map_event_in_memory_by(|_| None::<()>).await.is_none()); + assert!( + room_event_cache.rfind_map_event_in_memory_by(|_| None::<()>).await.unwrap().is_none() + ); } } diff --git a/crates/matrix-sdk/src/latest_events/latest_event.rs b/crates/matrix-sdk/src/latest_events/latest_event.rs index 8ba63360ec7..6edd284bece 100644 --- a/crates/matrix-sdk/src/latest_events/latest_event.rs +++ b/crates/matrix-sdk/src/latest_events/latest_event.rs @@ -519,13 +519,16 @@ impl LatestEventValueBuilder { own_user_id: Option<&UserId>, power_levels: Option<&RoomPowerLevels>, ) -> LatestEventValue { - room_event_cache + if let Ok(Some(event)) = room_event_cache .rfind_map_event_in_memory_by(|event| { filter_timeline_event(event, own_user_id, power_levels).then(|| event.clone()) }) .await - .map(LatestEventValue::Remote) - .unwrap_or_default() + { + LatestEventValue::Remote(event) + } else { + LatestEventValue::default() + } } /// Create a new [`LatestEventValue::LocalIsSending`] or diff --git a/crates/matrix-sdk/src/latest_events/mod.rs b/crates/matrix-sdk/src/latest_events/mod.rs index 5443bbff7e6..3d292429064 100644 --- a/crates/matrix-sdk/src/latest_events/mod.rs +++ b/crates/matrix-sdk/src/latest_events/mod.rs @@ -1154,7 +1154,9 @@ mod tests { .event_cache_store() .lock() .await - .unwrap() + .expect("Could not acquire the event cache lock") + .as_clean() + .expect("Could not acquire a clean event cache lock") .handle_linked_chunk_updates( LinkedChunkId::Room(&room_id), vec![ diff --git a/crates/matrix-sdk/src/room/mod.rs b/crates/matrix-sdk/src/room/mod.rs index 98435f5b9c3..5040a1934ed 100644 --- a/crates/matrix-sdk/src/room/mod.rs +++ b/crates/matrix-sdk/src/room/mod.rs @@ -816,7 +816,7 @@ impl Room { ) -> Result { match self.event_cache().await { Ok((event_cache, _drop_handles)) => { - if let Some(event) = event_cache.find_event(event_id).await { + if let Some(event) = event_cache.find_event(event_id).await? { return Ok(event); } // Fallthrough: try with a request. diff --git a/crates/matrix-sdk/src/search_index/mod.rs b/crates/matrix-sdk/src/search_index/mod.rs index 2493afa14bd..6f696e42671 100644 --- a/crates/matrix-sdk/src/search_index/mod.rs +++ b/crates/matrix-sdk/src/search_index/mod.rs @@ -218,7 +218,7 @@ async fn get_most_recent_edit( ) -> Option { use ruma::events::{AnySyncTimelineEvent, relation::RelationType}; - let Some((original_ev, related)) = + let Ok(Some((original_ev, related))) = cache.find_event_with_relations(original, Some(vec![RelationType::Replacement])).await else { debug!("Couldn't find relations for {}", original); @@ -275,7 +275,7 @@ async fn handle_room_redaction( rules: &RedactionRules, ) -> Option { if let Some(redacted_event_id) = event.redacts(rules) - && let Some(redacted_event) = cache.find_event(redacted_event_id).await + && let Ok(Some(redacted_event)) = cache.find_event(redacted_event_id).await && let Ok(AnySyncTimelineEvent::MessageLike(AnySyncMessageLikeEvent::RoomMessage( redacted_event, ))) = redacted_event.raw().deserialize() diff --git a/crates/matrix-sdk/src/sliding_sync/client.rs b/crates/matrix-sdk/src/sliding_sync/client.rs index 7f96fbef764..ab689b76899 100644 --- a/crates/matrix-sdk/src/sliding_sync/client.rs +++ b/crates/matrix-sdk/src/sliding_sync/client.rs @@ -355,7 +355,7 @@ async fn handle_receipts_extension( return Ok::<_, crate::Error>(None); }; - let previous_events = room_event_cache.events().await; + let previous_events = room_event_cache.events().await?; let receipt_event = client .base_client() diff --git a/crates/matrix-sdk/tests/integration/event_cache/mod.rs b/crates/matrix-sdk/tests/integration/event_cache/mod.rs index 6e36881350e..9fd44fdeb11 100644 --- a/crates/matrix-sdk/tests/integration/event_cache/mod.rs +++ b/crates/matrix-sdk/tests/integration/event_cache/mod.rs @@ -74,7 +74,7 @@ async fn test_event_cache_receives_events() { // If I create a room event subscriber, let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (events, mut subscriber) = room_event_cache.subscribe().await; + let (events, mut subscriber) = room_event_cache.subscribe().await.unwrap(); // Then at first it's empty, and the subscriber doesn't yield anything. assert!(events.is_empty()); @@ -148,7 +148,7 @@ async fn test_ignored_unignored() { // And subscribe to the room, let room = client.get_room(room_id).unwrap(); let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (events, mut room_stream) = room_event_cache.subscribe().await; + let (events, mut room_stream) = room_event_cache.subscribe().await.unwrap(); // Then at first it contains the two initial events. assert_eq!(events.len(), 2); @@ -199,7 +199,7 @@ async fn test_ignored_unignored() { { let room = client.get_room(other_room_id).unwrap(); let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let events = room_event_cache.events().await; + let events = room_event_cache.events().await.unwrap(); assert!(events.is_empty()); } @@ -254,7 +254,7 @@ async fn test_backpaginate_once() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (events, mut room_stream) = room_event_cache.subscribe().await; + let (events, mut room_stream) = room_event_cache.subscribe().await.unwrap(); // This is racy: either the initial message has been processed by the event // cache (and no room updates will happen in this case), or it hasn't, and @@ -343,7 +343,7 @@ async fn test_backpaginate_many_times_with_many_iterations() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (events, mut room_stream) = room_event_cache.subscribe().await; + let (events, mut room_stream) = room_event_cache.subscribe().await.unwrap(); // This is racy: either the initial message has been processed by the event // cache (and no room updates will happen in this case), or it hasn't, and @@ -424,7 +424,7 @@ async fn test_backpaginate_many_times_with_many_iterations() { assert!(room_stream.is_empty()); // And next time I'll open the room, I'll get the events in the right order. - let (events, room_stream) = room_event_cache.subscribe().await; + let (events, room_stream) = room_event_cache.subscribe().await.unwrap(); assert_event_matches_msg(&events[0], "oh well"); assert_event_matches_msg(&events[1], "hello"); @@ -465,7 +465,7 @@ async fn test_backpaginate_many_times_with_one_iteration() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (events, mut room_stream) = room_event_cache.subscribe().await; + let (events, mut room_stream) = room_event_cache.subscribe().await.unwrap(); // This is racy: either the initial message has been processed by the event // cache (and no room updates will happen in this case), or it hasn't, and @@ -543,7 +543,7 @@ async fn test_backpaginate_many_times_with_one_iteration() { }); // And next time I'll open the room, I'll get the events in the right order. - let (events, room_stream) = room_event_cache.subscribe().await; + let (events, room_stream) = room_event_cache.subscribe().await.unwrap(); assert_event_matches_msg(&events[0], "oh well"); assert_event_matches_msg(&events[1], "hello"); @@ -585,7 +585,7 @@ async fn test_reset_while_backpaginating() { let (room_event_cache, _drop_handles) = client.get_room(room_id).unwrap().event_cache().await.unwrap(); - let (events, mut room_stream) = room_event_cache.subscribe().await; + let (events, mut room_stream) = room_event_cache.subscribe().await.unwrap(); wait_for_initial_events(events, &mut room_stream).await; @@ -698,7 +698,7 @@ async fn test_backpaginating_without_token() { let room = server.sync_joined_room(&client, room_id).await; let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (events, mut room_stream) = room_event_cache.subscribe().await; + let (events, mut room_stream) = room_event_cache.subscribe().await.unwrap(); assert!(events.is_empty()); assert!(room_stream.is_empty()); @@ -755,7 +755,7 @@ async fn test_limited_timeline_resets_pagination() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (events, mut room_stream) = room_event_cache.subscribe().await; + let (events, mut room_stream) = room_event_cache.subscribe().await.unwrap(); assert!(events.is_empty()); assert!(room_stream.is_empty()); @@ -849,7 +849,7 @@ async fn test_limited_timeline_with_storage() { ) .await; - let (initial_events, mut subscriber) = room_event_cache.subscribe().await; + let (initial_events, mut subscriber) = room_event_cache.subscribe().await.unwrap(); // This is racy: either the sync has been handled, or it hasn't yet. if initial_events.is_empty() { @@ -980,7 +980,7 @@ async fn test_backpaginate_with_no_initial_events() { pagination.run_backwards_once(20).await.unwrap(); // The linked chunk should contain the events in the correct order. - let events = room_event_cache.events().await; + let events = room_event_cache.events().await.unwrap(); assert_eq!(events.len(), 3, "{events:?}"); assert_event_matches_msg(&events[0], "oh well"); @@ -1015,7 +1015,7 @@ async fn test_backpaginate_replace_empty_gap() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (events, mut stream) = room_event_cache.subscribe().await; + let (events, mut stream) = room_event_cache.subscribe().await.unwrap(); wait_for_initial_events(events, &mut stream).await; // The first back-pagination will return a previous-batch token, but no events. @@ -1043,7 +1043,7 @@ async fn test_backpaginate_replace_empty_gap() { pagination.run_backwards_once(20).await.unwrap(); // The linked chunk should contain the events in the correct order. - let events = room_event_cache.events().await; + let events = room_event_cache.events().await.unwrap(); assert_event_matches_msg(&events[0], "hello"); assert_event_matches_msg(&events[1], "world"); @@ -1083,7 +1083,7 @@ async fn test_no_gap_stored_after_deduplicated_sync() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (events, mut stream) = room_event_cache.subscribe().await; + let (events, mut stream) = room_event_cache.subscribe().await.unwrap(); if events.is_empty() { assert_let_timeout!(Ok(RoomEventCacheUpdate::UpdateTimelineEvents { .. }) = stream.recv()); @@ -1126,7 +1126,7 @@ async fn test_no_gap_stored_after_deduplicated_sync() { assert!(stream.is_empty()); { - let events = room_event_cache.events().await; + let events = room_event_cache.events().await.unwrap(); assert_event_matches_msg(&events[0], "hello"); assert_event_matches_msg(&events[1], "world"); assert_event_matches_msg(&events[2], "sup"); @@ -1145,7 +1145,7 @@ async fn test_no_gap_stored_after_deduplicated_sync() { assert!(outcome.reached_start); { - let events = room_event_cache.events().await; + let events = room_event_cache.events().await.unwrap(); assert_event_matches_msg(&events[0], "hello"); assert_event_matches_msg(&events[1], "world"); assert_event_matches_msg(&events[2], "sup"); @@ -1180,7 +1180,7 @@ async fn test_no_gap_stored_after_deduplicated_backpagination() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (events, mut stream) = room_event_cache.subscribe().await; + let (events, mut stream) = room_event_cache.subscribe().await.unwrap(); if events.is_empty() { assert_let_timeout!(Ok(RoomEventCacheUpdate::UpdateTimelineEvents { .. }) = stream.recv()); @@ -1268,7 +1268,7 @@ async fn test_no_gap_stored_after_deduplicated_backpagination() { // we shouldn't have to, since it is useless; all events were deduplicated // from the previous pagination. - let (events, stream) = room_event_cache.subscribe().await; + let (events, stream) = room_event_cache.subscribe().await.unwrap(); assert_event_matches_msg(&events[0], "hello"); assert_event_matches_msg(&events[1], "world"); assert_event_matches_msg(&events[2], "sup"); @@ -1304,7 +1304,7 @@ async fn test_dont_delete_gap_that_wasnt_inserted() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (events, mut stream) = room_event_cache.subscribe().await; + let (events, mut stream) = room_event_cache.subscribe().await.unwrap(); if events.is_empty() { assert_let_timeout!(Ok(RoomEventCacheUpdate::UpdateTimelineEvents { .. }) = stream.recv()); } @@ -1378,7 +1378,7 @@ async fn test_apply_redaction_when_redaction_comes_later() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); // Wait for the event. - let (events, mut subscriber) = room_event_cache.subscribe().await; + let (events, mut subscriber) = room_event_cache.subscribe().await.unwrap(); if events.is_empty() { assert_let_timeout!( Ok(RoomEventCacheUpdate::UpdateTimelineEvents { .. }) = subscriber.recv() @@ -1438,7 +1438,7 @@ async fn test_apply_redaction_when_redaction_comes_later() { let room = client.get_room(room_id).unwrap(); let (cache, _drop_handles) = room.event_cache().await.unwrap(); - let events = cache.events().await; + let events = cache.events().await.unwrap(); // We have two events: assert_eq!(events.len(), 2); @@ -1470,6 +1470,8 @@ async fn test_apply_redaction_on_an_in_store_event() { // 1. a chunk of 1 item, the one we are going to redact! // 2. a chunk of 1 item, the chunk that is going to be loaded. event_cache_store + .as_clean() + .unwrap() .handle_linked_chunk_updates( LinkedChunkId::Room(room_id), vec![ @@ -1512,7 +1514,7 @@ async fn test_apply_redaction_on_an_in_store_event() { let room = mock_server.sync_joined_room(&client, room_id).await; let (room_event_cache, _room_event_cache_drop_handle) = room.event_cache().await.unwrap(); - let (initial_updates, mut updates_stream) = room_event_cache.subscribe().await; + let (initial_updates, mut updates_stream) = room_event_cache.subscribe().await.unwrap(); // Initial events! // @@ -1603,7 +1605,7 @@ async fn test_apply_redaction_when_redacted_and_redaction_are_in_same_sync() { let room_id = room_id!("!omelette:fromage.fr"); let room = server.sync_joined_room(&client, room_id).await; let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (_events, mut subscriber) = room_event_cache.subscribe().await; + let (_events, mut subscriber) = room_event_cache.subscribe().await.unwrap(); let f = EventFactory::new().room(room_id).sender(user_id!("@a:b.c")); @@ -1676,6 +1678,8 @@ async fn test_lazy_loading() { // 2. a chunk of a gap // 1. a chunk of 6 items event_cache_store + .as_clean() + .unwrap() .handle_linked_chunk_updates( LinkedChunkId::Room(room_id), vec![ @@ -1753,7 +1757,7 @@ async fn test_lazy_loading() { let room = mock_server.sync_joined_room(&client, room_id).await; let (room_event_cache, _room_event_cache_drop_handle) = room.event_cache().await.unwrap(); - let (initial_updates, mut updates_stream) = room_event_cache.subscribe().await; + let (initial_updates, mut updates_stream) = room_event_cache.subscribe().await.unwrap(); // Initial events! // @@ -2040,6 +2044,8 @@ async fn test_deduplication() { // 2. a chunk of 4 items // 1. a chunk of 3 items event_cache_store + .as_clean() + .unwrap() .handle_linked_chunk_updates( LinkedChunkId::Room(room_id), vec![ @@ -2092,7 +2098,7 @@ async fn test_deduplication() { let room = mock_server.sync_joined_room(&client, room_id).await; let (room_event_cache, _room_event_cache_drop_handle) = room.event_cache().await.unwrap(); - let (initial_updates, mut updates_stream) = room_event_cache.subscribe().await; + let (initial_updates, mut updates_stream) = room_event_cache.subscribe().await.unwrap(); // One chunk has been loaded, so there are 3 events in memory. { @@ -2221,7 +2227,7 @@ async fn test_timeline_then_empty_timeline_then_deduplication_with_storage() { ]; let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (initial_events, mut subscriber) = room_event_cache.subscribe().await; + let (initial_events, mut subscriber) = room_event_cache.subscribe().await.unwrap(); assert!(initial_events.is_empty()); // Receive a sync with only the latest events. @@ -2383,7 +2389,7 @@ async fn test_clear_all_rooms() { .await; let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (initial, mut room_updates) = room_event_cache.subscribe().await; + let (initial, mut room_updates) = room_event_cache.subscribe().await.unwrap(); let mut initial = Vector::from(initial); // Wait for the ev1 event. @@ -2466,7 +2472,7 @@ async fn test_sync_while_back_paginate() { client.event_cache().subscribe().unwrap(); let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (initial_events, mut subscriber) = room_event_cache.subscribe().await; + let (initial_events, mut subscriber) = room_event_cache.subscribe().await.unwrap(); assert!(initial_events.is_empty()); // Mock /messages in case we use the prev_batch token from sync. @@ -2577,13 +2583,13 @@ async fn test_relations_ordering() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (initial_events, mut listener) = room_event_cache.subscribe().await; + let (initial_events, mut listener) = room_event_cache.subscribe().await.unwrap(); assert_eq!(initial_events.len(), 1); assert!(listener.recv().now_or_never().is_none()); // Sanity check: there are no relations for the target event yet. let (_, relations) = - room_event_cache.find_event_with_relations(target_event_id, None).await.unwrap(); + room_event_cache.find_event_with_relations(target_event_id, None).await.unwrap().unwrap(); assert!(relations.is_empty()); let edit2 = event_id!("$edit2"); @@ -2631,7 +2637,7 @@ async fn test_relations_ordering() { // At this point, relations are known for the target event. let (_, relations) = - room_event_cache.find_event_with_relations(target_event_id, None).await.unwrap(); + room_event_cache.find_event_with_relations(target_event_id, None).await.unwrap().unwrap(); assert_eq!(relations.len(), 2); // And the edit events are correctly ordered according to their position in the // linked chunk. @@ -2662,7 +2668,7 @@ async fn test_relations_ordering() { // Relations are returned accordingly. let (_, relations) = - room_event_cache.find_event_with_relations(target_event_id, None).await.unwrap(); + room_event_cache.find_event_with_relations(target_event_id, None).await.unwrap().unwrap(); assert_eq!(relations.len(), 3); assert_eq!(relations[0].event_id().unwrap(), edit2); assert_eq!(relations[1].event_id().unwrap(), edit3); @@ -2683,7 +2689,7 @@ async fn test_relations_ordering() { room.event(edit5, None).await.unwrap(); let (_, relations) = - room_event_cache.find_event_with_relations(target_event_id, None).await.unwrap(); + room_event_cache.find_event_with_relations(target_event_id, None).await.unwrap().unwrap(); assert_eq!(relations.len(), 4); assert_eq!(relations[0].event_id().unwrap(), edit5); assert_eq!(relations[1].event_id().unwrap(), edit2); diff --git a/crates/matrix-sdk/tests/integration/event_cache/threads.rs b/crates/matrix-sdk/tests/integration/event_cache/threads.rs index e15b7d24036..6039265f874 100644 --- a/crates/matrix-sdk/tests/integration/event_cache/threads.rs +++ b/crates/matrix-sdk/tests/integration/event_cache/threads.rs @@ -86,7 +86,7 @@ async fn test_thread_can_paginate_even_if_seen_sync_event() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); let (thread_events, mut thread_stream) = - room_event_cache.subscribe_to_thread(thread_root_id.to_owned()).await; + room_event_cache.subscribe_to_thread(thread_root_id.to_owned()).await.unwrap(); // Sanity check: the sync event is added to the thread. let mut thread_events = wait_for_initial_events(thread_events, &mut thread_stream).await; @@ -162,7 +162,7 @@ async fn test_ignored_user_empties_threads() { // And we subscribe to the thread, let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); let (events, mut thread_stream) = - room_event_cache.subscribe_to_thread(thread_root.to_owned()).await; + room_event_cache.subscribe_to_thread(thread_root.to_owned()).await.unwrap(); // Then, at first, the thread contains the two initial events. let events = wait_for_initial_events(events, &mut thread_stream).await; @@ -238,19 +238,19 @@ async fn test_gappy_sync_empties_all_threads() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); let (thread1_events, mut thread1_stream) = - room_event_cache.subscribe_to_thread(thread_root1.to_owned()).await; + room_event_cache.subscribe_to_thread(thread_root1.to_owned()).await.unwrap(); assert!(thread1_events.is_empty()); assert!(thread1_stream.is_empty()); let (thread2_events, mut thread2_stream) = - room_event_cache.subscribe_to_thread(thread_root2.to_owned()).await; + room_event_cache.subscribe_to_thread(thread_root2.to_owned()).await.unwrap(); assert!(thread2_events.is_empty()); assert!(thread2_stream.is_empty()); // Also subscribe to the room. - let (room_events, mut room_stream) = room_event_cache.subscribe().await; + let (room_events, mut room_stream) = room_event_cache.subscribe().await.unwrap(); assert!(room_events.is_empty()); assert!(room_stream.is_empty()); @@ -352,7 +352,7 @@ async fn test_gappy_sync_empties_all_threads() { // - but the latest reply is now `None`, as we're not sure that the latest reply // is still the latest one. - let reloaded_thread1 = room_event_cache.find_event(thread_root1).await.unwrap(); + let reloaded_thread1 = room_event_cache.find_event(thread_root1).await.unwrap().unwrap(); assert_let!(ThreadSummaryStatus::Some(summary) = reloaded_thread1.thread_summary); assert_eq!(summary.num_replies, 2); assert!(summary.latest_reply.is_none()); @@ -399,7 +399,7 @@ async fn test_deduplication() { // And we subscribe to the thread, let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); let (events, mut thread_stream) = - room_event_cache.subscribe_to_thread(thread_root.to_owned()).await; + room_event_cache.subscribe_to_thread(thread_root.to_owned()).await.unwrap(); // Then, at first, the thread contains the two initial events. let events = wait_for_initial_events(events, &mut thread_stream).await; @@ -487,7 +487,7 @@ async fn thread_subscription_test_setup() -> ThreadSubscriptionTestSetup { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - let (initial_events, mut subscriber) = room_event_cache.subscribe().await; + let (initial_events, mut subscriber) = room_event_cache.subscribe().await.unwrap(); assert!(initial_events.is_empty()); assert!(subscriber.is_empty()); @@ -647,7 +647,7 @@ async fn test_auto_subscribe_on_thread_paginate() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); let (thread_events, mut thread_stream) = - room_event_cache.subscribe_to_thread(thread_root_id.to_owned()).await; + room_event_cache.subscribe_to_thread(thread_root_id.to_owned()).await.unwrap(); // Sanity check: the sync event is added to the thread. let mut thread_events = wait_for_initial_events(thread_events, &mut thread_stream).await; @@ -729,7 +729,7 @@ async fn test_auto_subscribe_on_thread_paginate_root_event() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); let (thread_events, mut thread_stream) = - room_event_cache.subscribe_to_thread(thread_root_id.to_owned()).await; + room_event_cache.subscribe_to_thread(thread_root_id.to_owned()).await.unwrap(); // Sanity check: the sync event is added to the thread. let mut thread_events = wait_for_initial_events(thread_events, &mut thread_stream).await; @@ -802,7 +802,7 @@ async fn test_redact_touches_threads() { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); let (thread_events, mut thread_stream) = - room_event_cache.subscribe_to_thread(thread_root_id.to_owned()).await; + room_event_cache.subscribe_to_thread(thread_root_id.to_owned()).await.unwrap(); // Receive a thread root, and a threaded reply. s.server @@ -823,7 +823,7 @@ async fn test_redact_touches_threads() { assert_eq!(thread_events.remove(0).event_id().as_ref(), Some(&thread_resp1)); assert_eq!(thread_events.remove(0).event_id().as_ref(), Some(&thread_resp2)); - let (room_events, mut room_stream) = room_event_cache.subscribe().await; + let (room_events, mut room_stream) = room_event_cache.subscribe().await.unwrap(); assert_eq!(room_events.len(), 3); { diff --git a/crates/matrix-sdk/tests/integration/room/common.rs b/crates/matrix-sdk/tests/integration/room/common.rs index 42d13f6bb49..985927d17c0 100644 --- a/crates/matrix-sdk/tests/integration/room/common.rs +++ b/crates/matrix-sdk/tests/integration/room/common.rs @@ -657,7 +657,7 @@ async fn test_event() { // Requested event was saved to the cache let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - assert!(room_event_cache.find_event(event_id).await.is_some()); + assert!(room_event_cache.find_event(event_id).await.unwrap().is_some()); // So we can reload it without hitting the network. let timeline_event = room.load_or_fetch_event(event_id, None).await.unwrap(); @@ -730,9 +730,9 @@ async fn test_event_with_context() { // Requested event and their context ones were saved to the cache let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - assert!(room_event_cache.find_event(event_id).await.is_some()); - assert!(room_event_cache.find_event(prev_event_id).await.is_some()); - assert!(room_event_cache.find_event(next_event_id).await.is_some()); + assert!(room_event_cache.find_event(event_id).await.unwrap().is_some()); + assert!(room_event_cache.find_event(prev_event_id).await.unwrap().is_some()); + assert!(room_event_cache.find_event(next_event_id).await.unwrap().is_some()); } #[async_test] diff --git a/crates/matrix-sdk/tests/integration/room/left.rs b/crates/matrix-sdk/tests/integration/room/left.rs index 8b2e403d7d5..02faa287d8c 100644 --- a/crates/matrix-sdk/tests/integration/room/left.rs +++ b/crates/matrix-sdk/tests/integration/room/left.rs @@ -57,8 +57,13 @@ async fn test_forget_non_direct_room() { { // There is some data in the cache store. - let event_cache_store = client.event_cache_store().lock().await.unwrap(); - let room_data = event_cache_store + let room_data = client + .event_cache_store() + .lock() + .await + .unwrap() + .as_clean() + .unwrap() .load_all_chunks(LinkedChunkId::Room(&DEFAULT_TEST_ROOM_ID)) .await .unwrap(); @@ -74,8 +79,13 @@ async fn test_forget_non_direct_room() { { // Data in the event cache store has been removed. - let event_cache_store = client.event_cache_store().lock().await.unwrap(); - let room_data = event_cache_store + let room_data = client + .event_cache_store() + .lock() + .await + .unwrap() + .as_clean() + .unwrap() .load_all_chunks(LinkedChunkId::Room(&DEFAULT_TEST_ROOM_ID)) .await .unwrap(); @@ -119,8 +129,13 @@ async fn test_forget_banned_room() { { // There is some data in the cache store. - let event_cache_store = client.event_cache_store().lock().await.unwrap(); - let room_data = event_cache_store + let room_data = client + .event_cache_store() + .lock() + .await + .unwrap() + .as_clean() + .unwrap() .load_all_chunks(LinkedChunkId::Room(&DEFAULT_TEST_ROOM_ID)) .await .unwrap(); @@ -140,8 +155,13 @@ async fn test_forget_banned_room() { { // Data in the event cache store has been removed. - let event_cache_store = client.event_cache_store().lock().await.unwrap(); - let room_data = event_cache_store + let room_data = client + .event_cache_store() + .lock() + .await + .unwrap() + .as_clean() + .unwrap() .load_all_chunks(LinkedChunkId::Room(&DEFAULT_TEST_ROOM_ID)) .await .unwrap(); diff --git a/labs/multiverse/src/main.rs b/labs/multiverse/src/main.rs index c569ca07a0d..226ab353758 100644 --- a/labs/multiverse/src/main.rs +++ b/labs/multiverse/src/main.rs @@ -379,7 +379,7 @@ impl App { async fn index_event_cache( client: &Client, update_sender: &Sender<(bool, IndexingMessage)>, - store: EventCacheStoreLockGuard<'_>, + store: &EventCacheStoreLockGuard, search_index_guard: &mut SearchIndexGuard<'_>, mut count: usize, ) -> Result { @@ -525,9 +525,14 @@ impl App { debug!("Start indexing from the event cache."); // First index everything in the cache - let Ok(count) = - App::index_event_cache(&client, &update_sender, store, &mut search_index_guard, count) - .await + let Ok(count) = App::index_event_cache( + &client, + &update_sender, + store.as_clean().expect("Only one process should access the event cache store"), + &mut search_index_guard, + count, + ) + .await else { debug!("Quitting index task."); return; @@ -948,6 +953,9 @@ async fn get_events_from_event_ids( event_ids: Vec, ) -> Vec { if let Ok(cache_lock) = client.event_cache_store().lock().await { + let cache_lock = + cache_lock.as_clean().expect("Only one process must access the event cache store"); + futures_util::future::join_all(event_ids.iter().map(|event_id| async { let event_id = event_id.clone(); match cache_lock.find_event(room.room_id(), &event_id).await { diff --git a/labs/multiverse/src/widgets/room_view/details/events.rs b/labs/multiverse/src/widgets/room_view/details/events.rs index bdfa5de33ce..9ea9acab3e7 100644 --- a/labs/multiverse/src/widgets/room_view/details/events.rs +++ b/labs/multiverse/src/widgets/room_view/details/events.rs @@ -28,7 +28,7 @@ impl Widget for &mut EventsView<'_> { let events = tokio::task::block_in_place(|| { Handle::current().block_on(async { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - room_event_cache.events().await + room_event_cache.events().await.unwrap() }) }); From 12dfa6ecdea84f50a6f49342b7ddebc0a4fe0ebc Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 18 Nov 2025 15:21:49 +0100 Subject: [PATCH 14/28] feat(sdk): Reset `RoomEventCacheState` when the cross-process lock is dirty. This patch updates the `RoomEventCacheStateLock::read` and `write` methods to reset the state when the cross-process lock is dirty. --- crates/matrix-sdk/src/event_cache/room/mod.rs | 55 +++++++++++++++---- 1 file changed, 44 insertions(+), 11 deletions(-) diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index 1906b90722c..91efe1117dc 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -813,25 +813,58 @@ mod private { } pub async fn read(&self) -> Result, EventCacheError> { - let state_guard = self.locked_state.read().await; - let store_guard = match state_guard.store.lock().await? { - EventCacheStoreLockState::Clean(guard) => guard, - EventCacheStoreLockState::Dirty(_guard) => todo!("Dirty lock"), - }; + // Take a write-lock in case the lock is dirty and we need to reset the state. + let state_guard = self.locked_state.write().await; - Ok(RoomEventCacheStateLockReadGuard { state: state_guard, store: store_guard }) + match state_guard.store.lock().await? { + EventCacheStoreLockState::Clean(store_guard) => { + Ok(RoomEventCacheStateLockReadGuard { + // Downgrade to a read-lock. + state: state_guard.downgrade(), + store: store_guard, + }) + } + EventCacheStoreLockState::Dirty(store_guard) => { + let mut guard = RoomEventCacheStateLockWriteGuard { + state: state_guard, + store: store_guard, + }; + guard.shrink_to_last_chunk().await?; + + // All good now, mark the cross-process lock as non-dirty. + EventCacheStoreLockGuard::clear_dirty(&guard.store); + + Ok(RoomEventCacheStateLockReadGuard { + // Downgrade to a read-lock. + state: guard.state.downgrade(), + store: guard.store, + }) + } + } } pub async fn write( &self, ) -> Result, EventCacheError> { let state_guard = self.locked_state.write().await; - let store_guard = match state_guard.store.lock().await? { - EventCacheStoreLockState::Clean(guard) => guard, - EventCacheStoreLockState::Dirty(_guard) => todo!("Dirty lock"), - }; - Ok(RoomEventCacheStateLockWriteGuard { state: state_guard, store: store_guard }) + match state_guard.store.lock().await? { + EventCacheStoreLockState::Clean(store_guard) => { + Ok(RoomEventCacheStateLockWriteGuard { state: state_guard, store: store_guard }) + } + EventCacheStoreLockState::Dirty(store_guard) => { + let mut guard = RoomEventCacheStateLockWriteGuard { + state: state_guard, + store: store_guard, + }; + guard.shrink_to_last_chunk().await?; + + // All good now, mark the cross-process lock as non-dirty. + EventCacheStoreLockGuard::clear_dirty(&guard.store); + + Ok(guard) + } + } } } From ab5099a1e7ad63818391b88ab64bfc39894b97c3 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 18 Nov 2025 15:39:45 +0100 Subject: [PATCH 15/28] doc(sdk): Add missing or fix documentation. --- crates/matrix-sdk/src/event_cache/room/mod.rs | 38 +++++++++++++++++-- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index 91efe1117dc..d385d6a6044 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -717,9 +717,10 @@ mod private { /// [`EventLinkedChunk`] relies on a [`LinkedChunk`] to store all /// events. Only the last chunk will be loaded. It means the /// events are loaded from the most recent to the oldest. To - /// load more events, see [`Self::load_more_events_backwards`]. + /// load more events, see [`RoomPagination`]. /// /// [`LinkedChunk`]: matrix_sdk_common::linked_chunk::LinkedChunk + /// [`RoomPagination`]: super::RoomPagination pub async fn new( room_id: OwnedRoomId, room_version_rules: RoomVersionRules, @@ -729,10 +730,11 @@ mod private { pagination_status: SharedObservable, ) -> Result { let store_guard = match store.lock().await? { - // + // Lock is clean: all good! EventCacheStoreLockState::Clean(guard) => guard, - // + // Lock is dirty, not a problem, it's the first time we are creating this state, no + // need to refresh. EventCacheStoreLockState::Dirty(guard) => { EventCacheStoreLockGuard::clear_dirty(&guard); @@ -812,6 +814,15 @@ mod private { }) } + /// Lock this [`RoomEventCacheStateLock`] with per-thread shared access. + /// + /// This method locks the per-thread lock over the state, and then locks + /// the cross-process lock over the store. It returns an RAII guard + /// which will drop the read access to the state and to the store when + /// dropped. + /// + /// If the cross-process lock over the store is dirty (see + /// [`EventCacheStoreLockState`]), the state is reset to the last chunk. pub async fn read(&self) -> Result, EventCacheError> { // Take a write-lock in case the lock is dirty and we need to reset the state. let state_guard = self.locked_state.write().await; @@ -843,6 +854,16 @@ mod private { } } + /// Lock this [`RoomEventCacheStateLock`] with exclusive per-thread + /// write access. + /// + /// This method locks the per-thread lock over the state, and then locks + /// the cross-process lock over the store. It returns an RAII guard + /// which will drop the write access to the state and to the store when + /// dropped. + /// + /// If the cross-process lock over the store is dirty (see + /// [`EventCacheStoreLockState`]), the state is reset to the last chunk. pub async fn write( &self, ) -> Result, EventCacheError> { @@ -868,13 +889,23 @@ mod private { } } + /// The read lock guard returned by [`RoomEventCacheStateLock::read`]. pub struct RoomEventCacheStateLockReadGuard<'a> { + /// The per-thread read lock guard over the + /// [`RoomEventCacheStateLockInner`]. state: RwLockReadGuard<'a, RoomEventCacheStateLockInner>, + + /// The cross-process lock guard over the store. store: EventCacheStoreLockGuard, } + /// The write lock guard return by [`RoomEventCacheStateLock::write`]. pub struct RoomEventCacheStateLockWriteGuard<'a> { + /// The per-thread write lock guard over the + /// [`RoomEventCacheStateLockInner`]. state: RwLockWriteGuard<'a, RoomEventCacheStateLockInner>, + + /// The cross-process lock guard over the store. store: EventCacheStoreLockGuard, } @@ -947,6 +978,7 @@ mod private { &mut self.state.room_linked_chunk } + /// Get a reference to the `waited_for_initial_prev_token` atomic bool. pub fn waited_for_initial_prev_token(&self) -> &Arc { &self.state.waited_for_initial_prev_token } From deb4dd740903ed4c9ed9dfbfbcac009270fbd819 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 18 Nov 2025 15:42:02 +0100 Subject: [PATCH 16/28] fix(sdk): Remove a warning for `wasm32`. --- crates/matrix-sdk/src/event_cache/room/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index d385d6a6044..1830e8231b2 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -974,6 +974,7 @@ mod private { impl<'a> RoomEventCacheStateLockWriteGuard<'a> { /// Returns a write reference to the underlying room linked chunk. + #[cfg(any(feature = "e2e-encryption", test))] pub fn room_linked_chunk(&mut self) -> &mut EventLinkedChunk { &mut self.state.room_linked_chunk } From a085814b7441d0089c85572bdd6bb2709bcdac2e Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 18 Nov 2025 18:58:43 +0100 Subject: [PATCH 17/28] test(common): Test dirtiness of the cross-process lock. --- .../src/cross_process_lock.rs | 129 ++++++++++++++++-- 1 file changed, 117 insertions(+), 12 deletions(-) diff --git a/crates/matrix-sdk-common/src/cross_process_lock.rs b/crates/matrix-sdk-common/src/cross_process_lock.rs index e55c37d1e5c..88fa1e41b14 100644 --- a/crates/matrix-sdk-common/src/cross_process_lock.rs +++ b/crates/matrix-sdk-common/src/cross_process_lock.rs @@ -596,6 +596,7 @@ pub enum CrossProcessLockError { mod tests { use std::{ collections::HashMap, + ops::Not, sync::{Arc, RwLock, atomic}, }; @@ -603,6 +604,7 @@ mod tests { use matrix_sdk_test_macros::async_test; use tokio::{ spawn, + task::yield_now, time::{Duration, sleep}, }; @@ -665,17 +667,23 @@ mod tests { // The lock plain works when used with a single holder. let guard = lock.try_lock_once().await?.expect("lock must be obtained successfully"); + assert_matches!(guard, CrossProcessLockState::Clean(_)); + assert!(lock.is_dirty().not()); assert_eq!(lock.num_holders.load(atomic::Ordering::SeqCst), 1); // Releasing works. release_lock(guard).await; + assert!(lock.is_dirty().not()); assert_eq!(lock.num_holders.load(atomic::Ordering::SeqCst), 0); // Spin locking on the same lock always works, assuming no concurrent access. let guard = lock.spin_lock(None).await?.expect("spin lock must be obtained successfully"); + assert!(lock.is_dirty().not()); + assert_eq!(lock.num_holders.load(atomic::Ordering::SeqCst), 1); // Releasing still works. release_lock(guard).await; + assert!(lock.is_dirty().not()); assert_eq!(lock.num_holders.load(atomic::Ordering::SeqCst), 0); Ok(()) @@ -686,19 +694,23 @@ mod tests { let store = TestStore::default(); let lock = CrossProcessLock::new(store.clone(), "key".to_owned(), "first".to_owned()); - // When a lock is obtained... - let _guard = lock.try_lock_once().await?.expect("lock must be obtained successfully"); + // When a lock is obtained… + let guard = lock.try_lock_once().await?.expect("lock must be obtained successfully"); + assert_matches!(guard, CrossProcessLockState::Clean(_)); + assert!(lock.is_dirty().not()); assert_eq!(lock.num_holders.load(atomic::Ordering::SeqCst), 1); - // But then forgotten... (note: no need to release the guard) + // But then forgotten… (note: no need to release the guard) drop(lock); - // And when rematerializing the lock with the same key/value... + // And when rematerializing the lock with the same key/value… let lock = CrossProcessLock::new(store.clone(), "key".to_owned(), "first".to_owned()); // We still got it. - let _guard = + let guard = lock.try_lock_once().await?.expect("lock (again) must be obtained successfully"); + assert_matches!(guard, CrossProcessLockState::Clean(_)); + assert!(lock.is_dirty().not()); assert_eq!(lock.num_holders.load(atomic::Ordering::SeqCst), 1); Ok(()) @@ -711,7 +723,10 @@ mod tests { // Taking the lock twice… let guard1 = lock.try_lock_once().await?.expect("lock must be obtained successfully"); + assert_matches!(guard1, CrossProcessLockState::Clean(_)); let guard2 = lock.try_lock_once().await?.expect("lock must be obtained successfully"); + assert_matches!(guard2, CrossProcessLockState::Clean(_)); + assert!(lock.is_dirty().not()); assert_eq!(lock.num_holders.load(atomic::Ordering::SeqCst), 2); @@ -722,6 +737,8 @@ mod tests { release_lock(guard2).await; assert_eq!(lock.num_holders.load(atomic::Ordering::SeqCst), 0); + assert!(lock.is_dirty().not()); + Ok(()) } @@ -731,27 +748,36 @@ mod tests { let lock1 = CrossProcessLock::new(store.clone(), "key".to_owned(), "first".to_owned()); let lock2 = CrossProcessLock::new(store, "key".to_owned(), "second".to_owned()); - // When the first process takes the lock... + // `lock1` acquires the lock. let guard1 = lock1.try_lock_once().await?.expect("lock must be obtained successfully"); + assert_matches!(guard1, CrossProcessLockState::Clean(_)); + assert!(lock1.is_dirty().not()); - // The second can't take it immediately. - let _err2 = lock2.try_lock_once().await?.expect_err("lock must NOT be obtained"); + // `lock2` cannot acquire the lock. + let err = lock2.try_lock_once().await?.expect_err("lock must NOT be obtained"); + assert_matches!(err, CrossProcessLockUnobtained::Busy); + // `lock2` is waiting in a task. let lock2_clone = lock2.clone(); - let handle = spawn(async move { lock2_clone.spin_lock(Some(1000)).await }); + let task = spawn(async move { lock2_clone.spin_lock(Some(500)).await }); sleep(Duration::from_millis(100)).await; drop(guard1); - // lock2 in the background manages to get the lock at some point. - let _guard2 = handle + // Once `lock1` is released, `lock2` managed to obtain it. + let guard2 = task .await .expect("join handle is properly awaited") .expect("lock is successfully attempted") .expect("lock must be obtained successfully"); + assert_matches!(guard2, CrossProcessLockState::Clean(_)); + + // `lock1` and `lock2` are both clean! + assert!(lock1.is_dirty().not()); + assert!(lock2.is_dirty().not()); - // Now if lock1 tries to get the lock with a small timeout, it will fail. + // Now if `lock1` tries to obtain the lock with a small timeout, it will fail. assert_matches!( lock1.spin_lock(Some(200)).await, Ok(Err(CrossProcessLockUnobtained::TimedOut)) @@ -759,6 +785,85 @@ mod tests { Ok(()) } + + #[async_test] + async fn test_multiple_processes_up_to_dirty() -> TestResult { + let store = TestStore::default(); + let lock1 = CrossProcessLock::new(store.clone(), "key".to_owned(), "first".to_owned()); + let lock2 = CrossProcessLock::new(store, "key".to_owned(), "second".to_owned()); + + // Obtain `lock1` once. + { + let guard = lock1.try_lock_once().await?.expect("lock must be obtained successfully"); + assert_matches!(guard, CrossProcessLockState::Clean(_)); + assert!(lock1.is_dirty().not()); + drop(guard); + + yield_now().await; + } + + // Obtain `lock2` once. + { + let guard = lock2.try_lock_once().await?.expect("lock must be obtained successfully"); + assert_matches!(guard, CrossProcessLockState::Clean(_)); + assert!(lock1.is_dirty().not()); + drop(guard); + + yield_now().await; + } + + for _ in 0..3 { + // Obtain `lock1` once more. Now it's dirty because `lock2` has acquired the + // lock meanwhile. + { + let guard = + lock1.try_lock_once().await?.expect("lock must be obtained successfully"); + assert_matches!(guard, CrossProcessLockState::Dirty(_)); + assert!(lock1.is_dirty()); + + drop(guard); + yield_now().await; + } + + // Obtain `lock1` once more! It still dirty because it has not been marked as + // non-dirty. + { + let guard = + lock1.try_lock_once().await?.expect("lock must be obtained successfully"); + assert_matches!(guard, CrossProcessLockState::Dirty(_)); + assert!(lock1.is_dirty()); + lock1.clear_dirty(); + + drop(guard); + yield_now().await; + } + + // Obtain `lock1` once more. Now it's clear! + { + let guard = + lock1.try_lock_once().await?.expect("lock must be obtained successfully"); + assert_matches!(guard, CrossProcessLockState::Clean(_)); + assert!(lock1.is_dirty().not()); + + drop(guard); + yield_now().await; + } + + // Same dance with `lock2`! + { + let guard = + lock2.try_lock_once().await?.expect("lock must be obtained successfully"); + assert_matches!(guard, CrossProcessLockState::Dirty(_)); + assert!(lock2.is_dirty()); + lock2.clear_dirty(); + + drop(guard); + yield_now().await; + } + } + + Ok(()) + } } /// Some code that is shared by almost all `MemoryStore` implementations out From 82c8ab22a60c8e67d22f54f67e18c1bd67561245 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 19 Nov 2025 11:22:46 +0100 Subject: [PATCH 18/28] test(common): Make tests run faster. This patch replaces `sleep` by `yield_now`, which has the same effect in this case, and makes the tests run faster. --- crates/matrix-sdk-common/src/cross_process_lock.rs | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/crates/matrix-sdk-common/src/cross_process_lock.rs b/crates/matrix-sdk-common/src/cross_process_lock.rs index 88fa1e41b14..0eac49f859b 100644 --- a/crates/matrix-sdk-common/src/cross_process_lock.rs +++ b/crates/matrix-sdk-common/src/cross_process_lock.rs @@ -602,15 +602,11 @@ mod tests { use assert_matches::assert_matches; use matrix_sdk_test_macros::async_test; - use tokio::{ - spawn, - task::yield_now, - time::{Duration, sleep}, - }; + use tokio::{spawn, task::yield_now}; use super::{ CrossProcessLock, CrossProcessLockError, CrossProcessLockGeneration, CrossProcessLockState, - CrossProcessLockUnobtained, EXTEND_LEASE_EVERY_MS, TryLock, + CrossProcessLockUnobtained, TryLock, memory_store_helper::{Lease, try_take_leased_lock}, }; @@ -655,7 +651,7 @@ mod tests { async fn release_lock(lock: CrossProcessLockState) { drop(lock); - sleep(Duration::from_millis(EXTEND_LEASE_EVERY_MS)).await; + yield_now().await; } type TestResult = Result<(), CrossProcessLockError>; @@ -761,7 +757,7 @@ mod tests { let lock2_clone = lock2.clone(); let task = spawn(async move { lock2_clone.spin_lock(Some(500)).await }); - sleep(Duration::from_millis(100)).await; + yield_now().await; drop(guard1); From 24afefcad4c97d4edfdb04b0c6c48fd3aebe76ce Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 19 Nov 2025 15:04:18 +0100 Subject: [PATCH 19/28] feat(sdk): Implement `RoomEventCacheStateLockWriteGuard::downgrade`. This patch implements `RoomEventCacheStateLockWriteGuard::downgrade` to simplify the code a little bit. --- crates/matrix-sdk/src/event_cache/room/mod.rs | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index 1830e8231b2..8df05139021 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -830,7 +830,6 @@ mod private { match state_guard.store.lock().await? { EventCacheStoreLockState::Clean(store_guard) => { Ok(RoomEventCacheStateLockReadGuard { - // Downgrade to a read-lock. state: state_guard.downgrade(), store: store_guard, }) @@ -845,11 +844,7 @@ mod private { // All good now, mark the cross-process lock as non-dirty. EventCacheStoreLockGuard::clear_dirty(&guard.store); - Ok(RoomEventCacheStateLockReadGuard { - // Downgrade to a read-lock. - state: guard.state.downgrade(), - store: guard.store, - }) + Ok(guard.downgrade()) } } } @@ -909,6 +904,19 @@ mod private { store: EventCacheStoreLockGuard, } + impl<'a> RoomEventCacheStateLockWriteGuard<'a> { + /// Synchronously downgrades a write lock into a read lock. + /// + /// The per-thread/state lock is downgraded atomically, without allowing + /// any writers to take exclusive access of the lock in the meantime. + /// + /// It returns an RAII guard which will drop the write access to the + /// state and to the store when dropped. + fn downgrade(self) -> RoomEventCacheStateLockReadGuard<'a> { + RoomEventCacheStateLockReadGuard { state: self.state.downgrade(), store: self.store } + } + } + impl<'a> RoomEventCacheStateLockReadGuard<'a> { /// Returns a read-only reference to the underlying room linked chunk. pub fn room_linked_chunk(&self) -> &EventLinkedChunk { From d994cd64db2135d0790b736674b2cc43e0359f34 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 19 Nov 2025 16:48:07 +0100 Subject: [PATCH 20/28] feat(sdk): Allow shared access on `RoomEventCacheStateLock::read`. This patch fixes a problem found in a test (not commited yet) where it was impossible to do multiple calls to `read` if the first guard was still alive. See the comments to learn more. --- crates/matrix-sdk/src/event_cache/room/mod.rs | 81 +++++++++++++++++-- 1 file changed, 74 insertions(+), 7 deletions(-) diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index 8df05139021..1533caa490a 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -646,7 +646,7 @@ mod private { serde::Raw, }; use tokio::sync::{ - RwLock, RwLockReadGuard, RwLockWriteGuard, + RwLock, RwLockReadGuard, RwLockWriteGuard, Semaphore, broadcast::{Receiver, Sender}, }; use tracing::{debug, error, instrument, trace, warn}; @@ -668,7 +668,12 @@ mod private { /// This contains all the inner mutable states that ought to be updated at /// the same time. pub struct RoomEventCacheStateLock { + /// The per-thread lock around the real state. locked_state: RwLock, + + /// Please see inline comment of [`Self::read`] to understand why it + /// exists. + read_lock_acquisition: Semaphore, } struct RoomEventCacheStateLockInner { @@ -811,6 +816,7 @@ mod private { waited_for_initial_prev_token, subscriber_count: Default::default(), }), + read_lock_acquisition: Semaphore::new(1), }) } @@ -824,17 +830,78 @@ mod private { /// If the cross-process lock over the store is dirty (see /// [`EventCacheStoreLockState`]), the state is reset to the last chunk. pub async fn read(&self) -> Result, EventCacheError> { - // Take a write-lock in case the lock is dirty and we need to reset the state. - let state_guard = self.locked_state.write().await; + // Only one call at a time to `read` is allowed. + // + // Why? Because in case the cross-process lock over the store is dirty, we need + // to upgrade the read lock over the state to a write lock. + // + // ## Upgradable read lock + // + // One may argue that this upgrades can be done with an _upgradable read lock_ + // [^1] [^2]. We don't want to use this solution because an upgradable read lock + // is basically a mutex because we are losing the shared access property: having + // multiple read locks at the same time. This is an important property to hold + // for performance concerns. + // + // ## Downgradable write lock + // + // One may also argue we could first obtain a write lock over the state from the + // beginning, thus removing the need to upgrade the read lock to a write lock. + // The write lock is then downgraded to a read lock once the dirty is cleaned + // up. It can potentially create a deadlock in the following situation: + // + // - `read` is called once, it takes a write lock, then downgrades it to a read + // lock: the guard is kept alive somewhere, + // - `read` is called again, and waits to obtain the write lock, which is + // impossible as long as the guard from the previous call is not dropped. + // + // ## “Atomic” read and write + // + // One may finally argue to first obtain a read lock over the state, then drop + // it if the cross-process lock over the store is dirty, and immediately obtain + // a write lock (which can later be downgraded to a read lock). The problem is + // that this write lock is async: anything can happen between the drop and the + // new lock acquisition, and it's not possible to pause the runtime in the + // meantime. + // + // ## Semaphore + // + // The chosen idea is to allow only one execution at a time of this method. That + // way we are free to “upgrade” the read lock by dropping it and obtaining a new + // write lock. All callers to this method are waiting, so nothing can happen in + // the meantime. + // + // Note that it doesn't conflict with the `write` method because this later + // immediately obtains a write lock, which avoids any conflict with this method. + // + // [^1]: https://docs.rs/lock_api/0.4.14/lock_api/struct.RwLock.html#method.upgradable_read + // [^2]: https://docs.rs/async-lock/3.4.1/async_lock/struct.RwLock.html#method.upgradable_read + let _permit = self + .read_lock_acquisition + .acquire() + .await + .expect("The `lock_acquisition` semaphore must never be closed"); + + // Just a check in case the code is modified without knowing how it works. + debug_assert_eq!( + self.read_lock_acquisition.available_permits(), + 0, + "The semaphore must have only one permit" + ); + + // Obtain a read lock. + let state_guard = self.locked_state.read().await; match state_guard.store.lock().await? { EventCacheStoreLockState::Clean(store_guard) => { - Ok(RoomEventCacheStateLockReadGuard { - state: state_guard.downgrade(), - store: store_guard, - }) + Ok(RoomEventCacheStateLockReadGuard { state: state_guard, store: store_guard }) } EventCacheStoreLockState::Dirty(store_guard) => { + // Drop the read lock, and take a write lock to modify the state. + // This is safe because only one semaphore permit exists. + drop(state_guard); + let state_guard = self.locked_state.write().await; + let mut guard = RoomEventCacheStateLockWriteGuard { state: state_guard, store: store_guard, From 0da872ec3bd9f2fc86271418655960c86dc81a4e Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 19 Nov 2025 16:56:30 +0100 Subject: [PATCH 21/28] test(sdk): Add the `test_reset_when_dirty` test. This patch adds the new `test_reset_when_dirty` test, which ensures the state is correctly reset when the cross-process lock over the store becomes dirty. --- crates/matrix-sdk/src/event_cache/room/mod.rs | 271 +++++++++++++++++- 1 file changed, 268 insertions(+), 3 deletions(-) diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index 1533caa490a..6c943c28775 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -2565,7 +2565,7 @@ mod tests { #[cfg(all(test, not(target_family = "wasm")))] // This uses the cross-process lock, so needs time support. mod timed_tests { - use std::sync::Arc; + use std::{ops::Not, sync::Arc}; use assert_matches::assert_matches; use assert_matches2::assert_let; @@ -2585,7 +2585,7 @@ mod timed_tests { }; use matrix_sdk_test::{ALICE, BOB, async_test, event_factory::EventFactory}; use ruma::{ - OwnedUserId, event_id, + EventId, OwnedUserId, event_id, events::{AnySyncMessageLikeEvent, AnySyncTimelineEvent}, room_id, user_id, }; @@ -2594,7 +2594,7 @@ mod timed_tests { use super::RoomEventCacheGenericUpdate; use crate::{ assert_let_timeout, - event_cache::{RoomEventCacheUpdate, room::LoadMoreEventsBackwardsOutcome}, + event_cache::{RoomEventCache, RoomEventCacheUpdate, room::LoadMoreEventsBackwardsOutcome}, test_utils::client::MockClientBuilder, }; @@ -3757,4 +3757,269 @@ mod timed_tests { room_event_cache.rfind_map_event_in_memory_by(|_| None::<()>).await.unwrap().is_none() ); } + + #[async_test] + async fn test_reset_when_dirty() { + let user_id = user_id!("@mnt_io:matrix.org"); + let room_id = room_id!("!raclette:patate.ch"); + + // The storage shared by the two clients. + let event_cache_store = MemoryStore::new(); + + // Client for the process 0. + let client_p0 = MockClientBuilder::new(None) + .on_builder(|builder| { + builder.store_config( + StoreConfig::new("process #0".to_owned()) + .event_cache_store(event_cache_store.clone()), + ) + }) + .build() + .await; + + // Client for the process 1. + let client_p1 = MockClientBuilder::new(None) + .on_builder(|builder| { + builder.store_config( + StoreConfig::new("process #1".to_owned()).event_cache_store(event_cache_store), + ) + }) + .build() + .await; + + let event_factory = EventFactory::new().room(room_id).sender(user_id); + + let ev_id_0 = event_id!("$ev_0"); + let ev_id_1 = event_id!("$ev_1"); + + let ev_0 = event_factory.text_msg("comté").event_id(ev_id_0).into_event(); + let ev_1 = event_factory.text_msg("morbier").event_id(ev_id_1).into_event(); + + // Add events to the storage (shared by the two clients!). + client_p0 + .event_cache_store() + .lock() + .await + .expect("[p0] Could not acquire the event cache lock") + .as_clean() + .expect("[p0] Could not acquire a clean event cache lock") + .handle_linked_chunk_updates( + LinkedChunkId::Room(room_id), + vec![ + Update::NewItemsChunk { + previous: None, + new: ChunkIdentifier::new(0), + next: None, + }, + Update::PushItems { + at: Position::new(ChunkIdentifier::new(0), 0), + items: vec![ev_0], + }, + Update::NewItemsChunk { + previous: Some(ChunkIdentifier::new(0)), + new: ChunkIdentifier::new(1), + next: None, + }, + Update::PushItems { + at: Position::new(ChunkIdentifier::new(1), 0), + items: vec![ev_1], + }, + ], + ) + .await + .unwrap(); + + // Subscribe the event caches, and create the room. + let (room_event_cache_p0, room_event_cache_p1) = { + let event_cache_p0 = client_p0.event_cache(); + event_cache_p0.subscribe().unwrap(); + + let event_cache_p1 = client_p1.event_cache(); + event_cache_p1.subscribe().unwrap(); + + client_p0.base_client().get_or_create_room(room_id, matrix_sdk_base::RoomState::Joined); + client_p1.base_client().get_or_create_room(room_id, matrix_sdk_base::RoomState::Joined); + + let (room_event_cache_p0, _drop_handles) = + client_p0.get_room(room_id).unwrap().event_cache().await.unwrap(); + let (room_event_cache_p1, _drop_handles) = + client_p1.get_room(room_id).unwrap().event_cache().await.unwrap(); + + (room_event_cache_p0, room_event_cache_p1) + }; + + // Okay. We are ready for the test! + // + // First off, let's check `room_event_cache_p0` has access to the first event + // loaded in-memory, then do a pagination, and see more events. + { + let room_event_cache = &room_event_cache_p0; + + // `ev_id_1` must be loaded in memory. + assert!(event_loaded(room_event_cache, ev_id_1).await); + + // `ev_id_0` must NOT be loaded in memory. + assert!(event_loaded(room_event_cache, ev_id_0).await.not()); + + // Load one more event with a backpagination. + room_event_cache.pagination().run_backwards_once(1).await.unwrap(); + + // `ev_id_0` must now be loaded in memory. + assert!(event_loaded(room_event_cache, ev_id_0).await); + } + + // Second, let's check `room_event_cache_p1` has the same accesses. + { + let room_event_cache = &room_event_cache_p1; + + // `ev_id_1` must be loaded in memory. + assert!(event_loaded(room_event_cache, ev_id_1).await); + + // `ev_id_0` must NOT be loaded in memory. + assert!(event_loaded(room_event_cache, ev_id_0).await.not()); + + // Load one more event with a backpagination. + room_event_cache.pagination().run_backwards_once(1).await.unwrap(); + + // `ev_id_0` must now be loaded in memory. + assert!(event_loaded(room_event_cache, ev_id_0).await); + } + + // Do this a couple times, for the fun. + for _ in 0..3 { + // Third, because `room_event_cache_p1` has locked the store, the lock + // is dirty for `room_event_cache_p0`, so it will shrink to its last + // chunk! + { + let room_event_cache = &room_event_cache_p0; + + // `ev_id_1` must be loaded in memory, just like before. + assert!(event_loaded(room_event_cache, ev_id_1).await); + + // However, `ev_id_0` must NOT be loaded in memory. It WAS loaded, but the + // state has shrunk to its last chunk. + assert!(event_loaded(room_event_cache, ev_id_0).await.not()); + + // Load one more event with a backpagination. + room_event_cache.pagination().run_backwards_once(1).await.unwrap(); + + // `ev_id_0` must now be loaded in memory. + assert!(event_loaded(room_event_cache, ev_id_0).await); + } + + // Fourth, because `room_event_cache_p0` has locked the store again, the lock + // is dirty for `room_event_cache_p1` too!, so it will shrink to its last + // chunk! + { + let room_event_cache = &room_event_cache_p1; + + // `ev_id_1` must be loaded in memory, just like before. + assert!(event_loaded(room_event_cache, ev_id_1).await); + + // However, `ev_id_0` must NOT be loaded in memory. It WAS loaded, but the + // state has shrunk to its last chunk. + assert!(event_loaded(room_event_cache, ev_id_0).await.not()); + + // Load one more event with a backpagination. + room_event_cache.pagination().run_backwards_once(1).await.unwrap(); + + // `ev_id_0` must now be loaded in memory. + assert!(event_loaded(room_event_cache, ev_id_0).await); + } + } + + // Repeat that with an explicit read lock (so that we don't rely on + // `event_loaded` to trigger the dirty detection). + for _ in 0..3 { + { + let room_event_cache = &room_event_cache_p0; + + let guard = room_event_cache.inner.state.read().await.unwrap(); + + // Guard is kept alive, to ensure we can have multiple read guards alive with a + // shared access. + // See `RoomEventCacheStateLock::read` to learn more. + + assert!(event_loaded(room_event_cache, ev_id_1).await); + assert!(event_loaded(room_event_cache, ev_id_0).await.not()); + + // Ensure `guard` is alive up to this point (in case this test is refactored, I + // want to make this super explicit). + // + // We drop need to drop it before the pagination because the pagination needs to + // obtain a write lock. + drop(guard); + + room_event_cache.pagination().run_backwards_once(1).await.unwrap(); + assert!(event_loaded(room_event_cache, ev_id_0).await); + } + + { + let room_event_cache = &room_event_cache_p1; + + let guard = room_event_cache.inner.state.read().await.unwrap(); + + // Guard is kept alive, to ensure we can have multiple read guards alive with a + // shared access. + + assert!(event_loaded(room_event_cache, ev_id_1).await); + assert!(event_loaded(room_event_cache, ev_id_0).await.not()); + + // Ensure `guard` is alive up to this point (in case this test is refactored, I + // want to make this super explicit). + // + // We drop need to drop it before the pagination because the pagination needs to + // obtain a write lock. + drop(guard); + + room_event_cache.pagination().run_backwards_once(1).await.unwrap(); + assert!(event_loaded(room_event_cache, ev_id_0).await); + } + } + + // Repeat that with an explicit write lock. + for _ in 0..3 { + { + let room_event_cache = &room_event_cache_p0; + + let guard = room_event_cache.inner.state.write().await.unwrap(); + + // Guard isn't kept alive, otherwise `event_loaded` couldn't run because it + // needs to obtain a read lock. + drop(guard); + + assert!(event_loaded(room_event_cache, ev_id_1).await); + assert!(event_loaded(room_event_cache, ev_id_0).await.not()); + + room_event_cache.pagination().run_backwards_once(1).await.unwrap(); + assert!(event_loaded(room_event_cache, ev_id_0).await); + } + + { + let room_event_cache = &room_event_cache_p1; + + let guard = room_event_cache.inner.state.write().await.unwrap(); + + // Guard isn't kept alive, otherwise `event_loaded` couldn't run because it + // needs to obtain a read lock. + drop(guard); + + assert!(event_loaded(room_event_cache, ev_id_1).await); + assert!(event_loaded(room_event_cache, ev_id_0).await.not()); + + room_event_cache.pagination().run_backwards_once(1).await.unwrap(); + assert!(event_loaded(room_event_cache, ev_id_0).await); + } + } + } + + async fn event_loaded(room_event_cache: &RoomEventCache, event_id: &EventId) -> bool { + room_event_cache + .rfind_map_event_in_memory_by(|event| { + (event.event_id().as_deref() == Some(event_id)).then_some(()) + }) + .await + .unwrap() + .is_some() + } } From 955d001ff58f1512be56c6905b6da2157f1288a2 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Fri, 21 Nov 2025 10:50:50 +0100 Subject: [PATCH 22/28] refactor(sdk): Rename `RoomEventCacheInner::sender` to `update_sender`. This patch renames the `sender` field of `RoomEventCacheInner` to `update_sender` to clarify what the sender is about. --- crates/matrix-sdk/src/event_cache/mod.rs | 4 ++-- .../matrix-sdk/src/event_cache/pagination.rs | 9 +++++---- .../matrix-sdk/src/event_cache/redecryptor.rs | 2 +- crates/matrix-sdk/src/event_cache/room/mod.rs | 20 ++++++++++--------- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/crates/matrix-sdk/src/event_cache/mod.rs b/crates/matrix-sdk/src/event_cache/mod.rs index 870cacc52a3..eafd0cd6b50 100644 --- a/crates/matrix-sdk/src/event_cache/mod.rs +++ b/crates/matrix-sdk/src/event_cache/mod.rs @@ -421,7 +421,7 @@ impl EventCache { // However, better safe than sorry, and it's cheap to send an update here, // so let's do it! if !diffs.is_empty() { - let _ = room.inner.sender.send( + let _ = room.inner.update_sender.send( RoomEventCacheUpdate::UpdateTimelineEvents { diffs, origin: EventsOrigin::Cache, @@ -951,7 +951,7 @@ impl EventCacheInner { let mut state_guard = state_guard?; let updates_as_vector_diffs = state_guard.reset().await?; - let _ = room.inner.sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { + let _ = room.inner.update_sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { diffs: updates_as_vector_diffs, origin: EventsOrigin::Cache, }); diff --git a/crates/matrix-sdk/src/event_cache/pagination.rs b/crates/matrix-sdk/src/event_cache/pagination.rs index df4ee95451b..1c59861bc81 100644 --- a/crates/matrix-sdk/src/event_cache/pagination.rs +++ b/crates/matrix-sdk/src/event_cache/pagination.rs @@ -243,11 +243,12 @@ impl RoomPagination { reached_start, } => { if !timeline_event_diffs.is_empty() { - let _ = - self.inner.sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { + let _ = self.inner.update_sender.send( + RoomEventCacheUpdate::UpdateTimelineEvents { diffs: timeline_event_diffs, origin: EventsOrigin::Cache, - }); + }, + ); // Send a room event cache generic update. let _ = @@ -311,7 +312,7 @@ impl RoomPagination { .await? { if !timeline_event_diffs.is_empty() { - let _ = self.inner.sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { + let _ = self.inner.update_sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { diffs: timeline_event_diffs, origin: EventsOrigin::Pagination, }); diff --git a/crates/matrix-sdk/src/event_cache/redecryptor.rs b/crates/matrix-sdk/src/event_cache/redecryptor.rs index 73d3d634563..3a469223d14 100644 --- a/crates/matrix-sdk/src/event_cache/redecryptor.rs +++ b/crates/matrix-sdk/src/event_cache/redecryptor.rs @@ -332,7 +332,7 @@ impl EventCache { // been queued up. We need to send them out to our subscribers now. let diffs = state.room_linked_chunk().updates_as_vector_diffs(); - let _ = room_cache.inner.sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { + let _ = room_cache.inner.update_sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { diffs, origin: EventsOrigin::Cache, }); diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index 6c943c28775..67db4c4bc75 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -207,7 +207,7 @@ impl RoomEventCache { let previous_subscriber_count = subscriber_count.fetch_add(1, Ordering::SeqCst); trace!("added a room event cache subscriber; new count: {}", previous_subscriber_count + 1); - let recv = self.inner.sender.subscribe(); + let recv = self.inner.update_sender.subscribe(); let subscriber = RoomEventCacheSubscriber { recv, room_id: self.inner.room_id.clone(), @@ -383,7 +383,7 @@ impl RoomEventCache { let updates_as_vector_diffs = self.inner.state.write().await?.reset().await?; // Notify observers about the update. - let _ = self.inner.sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { + let _ = self.inner.update_sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { diffs: updates_as_vector_diffs, origin: EventsOrigin::Cache, }); @@ -428,7 +428,7 @@ pub(super) struct RoomEventCacheInner { pub weak_room: WeakRoom, /// Sender part for subscribers to this room. - pub sender: Sender, + pub update_sender: Sender, /// State for this room's event cache. pub state: RoomEventCacheStateLock, @@ -463,13 +463,14 @@ impl RoomEventCacheInner { auto_shrink_sender: mpsc::Sender, generic_update_sender: Sender, ) -> Self { - let sender = Sender::new(32); + let update_sender = Sender::new(32); let weak_room = WeakRoom::new(client, room_id); + Self { room_id: weak_room.room_id().to_owned(), weak_room, state, - sender, + update_sender, pagination_batch_token_notifier: Default::default(), auto_shrink_sender, pagination_status, @@ -498,7 +499,7 @@ impl RoomEventCacheInner { handled_read_marker = true; // Propagate to observers. (We ignore the error if there aren't any.) - let _ = self.sender.send(RoomEventCacheUpdate::MoveReadMarkerTo { + let _ = self.update_sender.send(RoomEventCacheUpdate::MoveReadMarkerTo { event_id: ev.content.event_id, }); } @@ -567,7 +568,7 @@ impl RoomEventCacheInner { // The order matters here: first send the timeline event diffs, then only the // related events (read receipts, etc.). if !timeline_event_diffs.is_empty() { - let _ = self.sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { + let _ = self.update_sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { diffs: timeline_event_diffs, origin: EventsOrigin::Sync, }); @@ -579,12 +580,13 @@ impl RoomEventCacheInner { if !ephemeral_events.is_empty() { let _ = self - .sender + .update_sender .send(RoomEventCacheUpdate::AddEphemeralEvents { events: ephemeral_events }); } if !ambiguity_changes.is_empty() { - let _ = self.sender.send(RoomEventCacheUpdate::UpdateMembers { ambiguity_changes }); + let _ = + self.update_sender.send(RoomEventCacheUpdate::UpdateMembers { ambiguity_changes }); } Ok(()) From 95a12eec49f4589ae130742ce72a1b09431994c0 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Fri, 21 Nov 2025 12:03:38 +0100 Subject: [PATCH 23/28] feat(sdk): Send updates when `RoomEventCacheStateLock` is reloaded. This patch updates the reloading of `RoomEventCacheStateLock` when the cross-process lock over the store is dirty to broadcast `RoomEventCacheUpdate` and `RoomEventCacheGenericUpdate`. That way the `Timeline` and other components can react to this reload. --- crates/matrix-sdk/src/event_cache/mod.rs | 5 + crates/matrix-sdk/src/event_cache/room/mod.rs | 327 +++++++++++++++++- 2 files changed, 316 insertions(+), 16 deletions(-) diff --git a/crates/matrix-sdk/src/event_cache/mod.rs b/crates/matrix-sdk/src/event_cache/mod.rs index eafd0cd6b50..6c928c92722 100644 --- a/crates/matrix-sdk/src/event_cache/mod.rs +++ b/crates/matrix-sdk/src/event_cache/mod.rs @@ -1048,10 +1048,14 @@ impl EventCacheInner { ThreadingSupport::Enabled { .. } ); + let update_sender = Sender::new(32); + let room_state = RoomEventCacheStateLock::new( room_id.to_owned(), room_version_rules, enabled_thread_support, + update_sender.clone(), + self.generic_update_sender.clone(), self.linked_chunk_update_sender.clone(), self.store.clone(), pagination_status.clone(), @@ -1074,6 +1078,7 @@ impl EventCacheInner { pagination_status, room_id.to_owned(), auto_shrink_sender, + update_sender, self.generic_update_sender.clone(), ); diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index 67db4c4bc75..62e5e7c75f4 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -168,6 +168,7 @@ impl RoomEventCache { pagination_status: SharedObservable, room_id: OwnedRoomId, auto_shrink_sender: mpsc::Sender, + update_sender: Sender, generic_update_sender: Sender, ) -> Self { Self { @@ -177,6 +178,7 @@ impl RoomEventCache { pagination_status, room_id, auto_shrink_sender, + update_sender, generic_update_sender, )), } @@ -427,9 +429,6 @@ pub(super) struct RoomEventCacheInner { pub weak_room: WeakRoom, - /// Sender part for subscribers to this room. - pub update_sender: Sender, - /// State for this room's event cache. pub state: RoomEventCacheStateLock, @@ -444,6 +443,9 @@ pub(super) struct RoomEventCacheInner { /// more details. auto_shrink_sender: mpsc::Sender, + /// Sender part for update subscribers to this room. + pub update_sender: Sender, + /// A clone of [`EventCacheInner::generic_update_sender`]. /// /// Whilst `EventCacheInner` handles the generic updates from the sync, or @@ -461,9 +463,9 @@ impl RoomEventCacheInner { pagination_status: SharedObservable, room_id: OwnedRoomId, auto_shrink_sender: mpsc::Sender, + update_sender: Sender, generic_update_sender: Sender, ) -> Self { - let update_sender = Sender::new(32); let weak_room = WeakRoom::new(client, room_id); Self { @@ -660,7 +662,8 @@ mod private { deduplicator::{DeduplicationOutcome, filter_duplicate_events}, room::threads::ThreadEventCache, }, - EventLocation, LoadMoreEventsBackwardsOutcome, + EventLocation, EventsOrigin, LoadMoreEventsBackwardsOutcome, RoomEventCacheGenericUpdate, + RoomEventCacheUpdate, events::EventLinkedChunk, sort_positions_descending, }; @@ -699,7 +702,19 @@ mod private { pagination_status: SharedObservable, - /// See doc comment of + /// A clone of [`super::RoomEventCacheInner::update_sender`]. + /// + /// This is used only by the [`RoomEventCacheStateLock::read`] and + /// [`RoomEventCacheStateLock::write`] when the state must be reset. + update_sender: Sender, + + /// A clone of [`super::super::EventCacheInner::generic_update_sender`]. + /// + /// This is used only by the [`RoomEventCacheStateLock::read`] and + /// [`RoomEventCacheStateLock::write`] when the state must be reset. + generic_update_sender: Sender, + + /// A clone of /// [`super::super::EventCacheInner::linked_chunk_update_sender`]. linked_chunk_update_sender: Sender, @@ -728,10 +743,13 @@ mod private { /// /// [`LinkedChunk`]: matrix_sdk_common::linked_chunk::LinkedChunk /// [`RoomPagination`]: super::RoomPagination + #[allow(clippy::too_many_arguments)] pub async fn new( room_id: OwnedRoomId, room_version_rules: RoomVersionRules, enabled_thread_support: bool, + update_sender: Sender, + generic_update_sender: Sender, linked_chunk_update_sender: Sender, store: EventCacheStoreLock, pagination_status: SharedObservable, @@ -813,6 +831,8 @@ mod private { // sync). threads: HashMap::new(), pagination_status, + update_sender, + generic_update_sender, linked_chunk_update_sender, room_version_rules, waited_for_initial_prev_token, @@ -908,12 +928,34 @@ mod private { state: state_guard, store: store_guard, }; - guard.shrink_to_last_chunk().await?; + + // Force to reload by shrinking to the last chunk. + let updates_as_vector_diffs = guard.force_shrink_to_last_chunk().await?; // All good now, mark the cross-process lock as non-dirty. EventCacheStoreLockGuard::clear_dirty(&guard.store); - Ok(guard.downgrade()) + // Downgrade the guard as soon as possible. + let guard = guard.downgrade(); + + // Now let the world know about the reload. + if !updates_as_vector_diffs.is_empty() { + // Notify observers about the update. + let _ = guard.state.update_sender.send( + RoomEventCacheUpdate::UpdateTimelineEvents { + diffs: updates_as_vector_diffs, + origin: EventsOrigin::Cache, + }, + ); + + // Notify observers about the generic update. + let _ = + guard.state.generic_update_sender.send(RoomEventCacheGenericUpdate { + room_id: guard.state.room_id.clone(), + }); + } + + Ok(guard) } } } @@ -942,11 +984,30 @@ mod private { state: state_guard, store: store_guard, }; - guard.shrink_to_last_chunk().await?; + + // Force to reload by shrinking to the last chunk. + let updates_as_vector_diffs = guard.force_shrink_to_last_chunk().await?; // All good now, mark the cross-process lock as non-dirty. EventCacheStoreLockGuard::clear_dirty(&guard.store); + // Now let the world know about the reload. + if !updates_as_vector_diffs.is_empty() { + // Notify observers about the update. + let _ = guard.state.update_sender.send( + RoomEventCacheUpdate::UpdateTimelineEvents { + diffs: updates_as_vector_diffs, + origin: EventsOrigin::Cache, + }, + ); + + // Notify observers about the generic update. + let _ = + guard.state.generic_update_sender.send(RoomEventCacheGenericUpdate { + room_id: guard.state.room_id.clone(), + }); + } + Ok(guard) } } @@ -1285,7 +1346,8 @@ mod private { } } - #[cfg(test)] + /// Force to shrink the room, whenever there is subscribers or not. + #[must_use = "Propagate `VectorDiff` updates via `RoomEventCacheUpdate`"] pub async fn force_shrink_to_last_chunk( &mut self, ) -> Result>, EventCacheError> { @@ -3761,7 +3823,7 @@ mod timed_tests { } #[async_test] - async fn test_reset_when_dirty() { + async fn test_reload_when_dirty() { let user_id = user_id!("@mnt_io:matrix.org"); let room_id = room_id!("!raclette:patate.ch"); @@ -3854,9 +3916,17 @@ mod timed_tests { // // First off, let's check `room_event_cache_p0` has access to the first event // loaded in-memory, then do a pagination, and see more events. - { + let mut updates_stream_p0 = { let room_event_cache = &room_event_cache_p0; + let (initial_updates, mut updates_stream) = + room_event_cache_p0.subscribe().await.unwrap(); + + // Initial updates contain `ev_id_1` only. + assert_eq!(initial_updates.len(), 1); + assert_eq!(initial_updates[0].event_id().as_deref(), Some(ev_id_1)); + assert!(updates_stream.is_empty()); + // `ev_id_1` must be loaded in memory. assert!(event_loaded(room_event_cache, ev_id_1).await); @@ -3866,13 +3936,36 @@ mod timed_tests { // Load one more event with a backpagination. room_event_cache.pagination().run_backwards_once(1).await.unwrap(); + // A new update for `ev_id_0` must be present. + assert_matches!( + updates_stream.recv().await.unwrap(), + RoomEventCacheUpdate::UpdateTimelineEvents { diffs, .. } => { + assert_eq!(diffs.len(), 1, "{diffs:#?}"); + assert_matches!( + &diffs[0], + VectorDiff::Insert { index: 0, value: event } => { + assert_eq!(event.event_id().as_deref(), Some(ev_id_0)); + } + ); + } + ); + // `ev_id_0` must now be loaded in memory. assert!(event_loaded(room_event_cache, ev_id_0).await); - } + + updates_stream + }; // Second, let's check `room_event_cache_p1` has the same accesses. - { + let mut updates_stream_p1 = { let room_event_cache = &room_event_cache_p1; + let (initial_updates, mut updates_stream) = + room_event_cache_p1.subscribe().await.unwrap(); + + // Initial updates contain `ev_id_1` only. + assert_eq!(initial_updates.len(), 1); + assert_eq!(initial_updates[0].event_id().as_deref(), Some(ev_id_1)); + assert!(updates_stream.is_empty()); // `ev_id_1` must be loaded in memory. assert!(event_loaded(room_event_cache, ev_id_1).await); @@ -3883,9 +3976,25 @@ mod timed_tests { // Load one more event with a backpagination. room_event_cache.pagination().run_backwards_once(1).await.unwrap(); + // A new update for `ev_id_0` must be present. + assert_matches!( + updates_stream.recv().await.unwrap(), + RoomEventCacheUpdate::UpdateTimelineEvents { diffs, .. } => { + assert_eq!(diffs.len(), 1, "{diffs:#?}"); + assert_matches!( + &diffs[0], + VectorDiff::Insert { index: 0, value: event } => { + assert_eq!(event.event_id().as_deref(), Some(ev_id_0)); + } + ); + } + ); + // `ev_id_0` must now be loaded in memory. assert!(event_loaded(room_event_cache, ev_id_0).await); - } + + updates_stream + }; // Do this a couple times, for the fun. for _ in 0..3 { @@ -3894,19 +4003,50 @@ mod timed_tests { // chunk! { let room_event_cache = &room_event_cache_p0; + let updates_stream = &mut updates_stream_p0; // `ev_id_1` must be loaded in memory, just like before. assert!(event_loaded(room_event_cache, ev_id_1).await); // However, `ev_id_0` must NOT be loaded in memory. It WAS loaded, but the - // state has shrunk to its last chunk. + // state has been reloaded to its last chunk. assert!(event_loaded(room_event_cache, ev_id_0).await.not()); + // The reload can be observed via the updates too. + assert_matches!( + updates_stream.recv().await.unwrap(), + RoomEventCacheUpdate::UpdateTimelineEvents { diffs, .. } => { + assert_eq!(diffs.len(), 2, "{diffs:#?}"); + assert_matches!(&diffs[0], VectorDiff::Clear); + assert_matches!( + &diffs[1], + VectorDiff::Append { values: events } => { + assert_eq!(events.len(), 1); + assert_eq!(events[0].event_id().as_deref(), Some(ev_id_1)); + } + ); + } + ); + // Load one more event with a backpagination. room_event_cache.pagination().run_backwards_once(1).await.unwrap(); // `ev_id_0` must now be loaded in memory. assert!(event_loaded(room_event_cache, ev_id_0).await); + + // The pagination can be observed via the updates too. + assert_matches!( + updates_stream.recv().await.unwrap(), + RoomEventCacheUpdate::UpdateTimelineEvents { diffs, .. } => { + assert_eq!(diffs.len(), 1, "{diffs:#?}"); + assert_matches!( + &diffs[0], + VectorDiff::Insert { index: 0, value: event } => { + assert_eq!(event.event_id().as_deref(), Some(ev_id_0)); + } + ); + } + ); } // Fourth, because `room_event_cache_p0` has locked the store again, the lock @@ -3914,6 +4054,7 @@ mod timed_tests { // chunk! { let room_event_cache = &room_event_cache_p1; + let updates_stream = &mut updates_stream_p1; // `ev_id_1` must be loaded in memory, just like before. assert!(event_loaded(room_event_cache, ev_id_1).await); @@ -3922,11 +4063,41 @@ mod timed_tests { // state has shrunk to its last chunk. assert!(event_loaded(room_event_cache, ev_id_0).await.not()); + // The reload can be observed via the updates too. + assert_matches!( + updates_stream.recv().await.unwrap(), + RoomEventCacheUpdate::UpdateTimelineEvents { diffs, .. } => { + assert_eq!(diffs.len(), 2, "{diffs:#?}"); + assert_matches!(&diffs[0], VectorDiff::Clear); + assert_matches!( + &diffs[1], + VectorDiff::Append { values: events } => { + assert_eq!(events.len(), 1); + assert_eq!(events[0].event_id().as_deref(), Some(ev_id_1)); + } + ); + } + ); + // Load one more event with a backpagination. room_event_cache.pagination().run_backwards_once(1).await.unwrap(); // `ev_id_0` must now be loaded in memory. assert!(event_loaded(room_event_cache, ev_id_0).await); + + // The pagination can be observed via the updates too. + assert_matches!( + updates_stream.recv().await.unwrap(), + RoomEventCacheUpdate::UpdateTimelineEvents { diffs, .. } => { + assert_eq!(diffs.len(), 1, "{diffs:#?}"); + assert_matches!( + &diffs[0], + VectorDiff::Insert { index: 0, value: event } => { + assert_eq!(event.event_id().as_deref(), Some(ev_id_0)); + } + ); + } + ); } } @@ -3935,6 +4106,7 @@ mod timed_tests { for _ in 0..3 { { let room_event_cache = &room_event_cache_p0; + let updates_stream = &mut updates_stream_p0; let guard = room_event_cache.inner.state.read().await.unwrap(); @@ -3942,6 +4114,22 @@ mod timed_tests { // shared access. // See `RoomEventCacheStateLock::read` to learn more. + // The reload can be observed via the updates too. + assert_matches!( + updates_stream.recv().await.unwrap(), + RoomEventCacheUpdate::UpdateTimelineEvents { diffs, .. } => { + assert_eq!(diffs.len(), 2, "{diffs:#?}"); + assert_matches!(&diffs[0], VectorDiff::Clear); + assert_matches!( + &diffs[1], + VectorDiff::Append { values: events } => { + assert_eq!(events.len(), 1); + assert_eq!(events[0].event_id().as_deref(), Some(ev_id_1)); + } + ); + } + ); + assert!(event_loaded(room_event_cache, ev_id_1).await); assert!(event_loaded(room_event_cache, ev_id_0).await.not()); @@ -3954,16 +4142,47 @@ mod timed_tests { room_event_cache.pagination().run_backwards_once(1).await.unwrap(); assert!(event_loaded(room_event_cache, ev_id_0).await); + + // The pagination can be observed via the updates too. + assert_matches!( + updates_stream.recv().await.unwrap(), + RoomEventCacheUpdate::UpdateTimelineEvents { diffs, .. } => { + assert_eq!(diffs.len(), 1, "{diffs:#?}"); + assert_matches!( + &diffs[0], + VectorDiff::Insert { index: 0, value: event } => { + assert_eq!(event.event_id().as_deref(), Some(ev_id_0)); + } + ); + } + ); } { let room_event_cache = &room_event_cache_p1; + let updates_stream = &mut updates_stream_p1; let guard = room_event_cache.inner.state.read().await.unwrap(); // Guard is kept alive, to ensure we can have multiple read guards alive with a // shared access. + // The reload can be observed via the updates too. + assert_matches!( + updates_stream.recv().await.unwrap(), + RoomEventCacheUpdate::UpdateTimelineEvents { diffs, .. } => { + assert_eq!(diffs.len(), 2, "{diffs:#?}"); + assert_matches!(&diffs[0], VectorDiff::Clear); + assert_matches!( + &diffs[1], + VectorDiff::Append { values: events } => { + assert_eq!(events.len(), 1); + assert_eq!(events[0].event_id().as_deref(), Some(ev_id_1)); + } + ); + } + ); + assert!(event_loaded(room_event_cache, ev_id_1).await); assert!(event_loaded(room_event_cache, ev_id_0).await.not()); @@ -3976,6 +4195,20 @@ mod timed_tests { room_event_cache.pagination().run_backwards_once(1).await.unwrap(); assert!(event_loaded(room_event_cache, ev_id_0).await); + + // The pagination can be observed via the updates too. + assert_matches!( + updates_stream.recv().await.unwrap(), + RoomEventCacheUpdate::UpdateTimelineEvents { diffs, .. } => { + assert_eq!(diffs.len(), 1, "{diffs:#?}"); + assert_matches!( + &diffs[0], + VectorDiff::Insert { index: 0, value: event } => { + assert_eq!(event.event_id().as_deref(), Some(ev_id_0)); + } + ); + } + ); } } @@ -3983,9 +4216,26 @@ mod timed_tests { for _ in 0..3 { { let room_event_cache = &room_event_cache_p0; + let updates_stream = &mut updates_stream_p0; let guard = room_event_cache.inner.state.write().await.unwrap(); + // The reload can be observed via the updates too. + assert_matches!( + updates_stream.recv().await.unwrap(), + RoomEventCacheUpdate::UpdateTimelineEvents { diffs, .. } => { + assert_eq!(diffs.len(), 2, "{diffs:#?}"); + assert_matches!(&diffs[0], VectorDiff::Clear); + assert_matches!( + &diffs[1], + VectorDiff::Append { values: events } => { + assert_eq!(events.len(), 1); + assert_eq!(events[0].event_id().as_deref(), Some(ev_id_1)); + } + ); + } + ); + // Guard isn't kept alive, otherwise `event_loaded` couldn't run because it // needs to obtain a read lock. drop(guard); @@ -3995,13 +4245,44 @@ mod timed_tests { room_event_cache.pagination().run_backwards_once(1).await.unwrap(); assert!(event_loaded(room_event_cache, ev_id_0).await); + + // The pagination can be observed via the updates too. + assert_matches!( + updates_stream.recv().await.unwrap(), + RoomEventCacheUpdate::UpdateTimelineEvents { diffs, .. } => { + assert_eq!(diffs.len(), 1, "{diffs:#?}"); + assert_matches!( + &diffs[0], + VectorDiff::Insert { index: 0, value: event } => { + assert_eq!(event.event_id().as_deref(), Some(ev_id_0)); + } + ); + } + ); } { let room_event_cache = &room_event_cache_p1; + let updates_stream = &mut updates_stream_p1; let guard = room_event_cache.inner.state.write().await.unwrap(); + // The reload can be observed via the updates too. + assert_matches!( + updates_stream.recv().await.unwrap(), + RoomEventCacheUpdate::UpdateTimelineEvents { diffs, .. } => { + assert_eq!(diffs.len(), 2, "{diffs:#?}"); + assert_matches!(&diffs[0], VectorDiff::Clear); + assert_matches!( + &diffs[1], + VectorDiff::Append { values: events } => { + assert_eq!(events.len(), 1); + assert_eq!(events[0].event_id().as_deref(), Some(ev_id_1)); + } + ); + } + ); + // Guard isn't kept alive, otherwise `event_loaded` couldn't run because it // needs to obtain a read lock. drop(guard); @@ -4011,6 +4292,20 @@ mod timed_tests { room_event_cache.pagination().run_backwards_once(1).await.unwrap(); assert!(event_loaded(room_event_cache, ev_id_0).await); + + // The pagination can be observed via the updates too. + assert_matches!( + updates_stream.recv().await.unwrap(), + RoomEventCacheUpdate::UpdateTimelineEvents { diffs, .. } => { + assert_eq!(diffs.len(), 1, "{diffs:#?}"); + assert_matches!( + &diffs[0], + VectorDiff::Insert { index: 0, value: event } => { + assert_eq!(event.event_id().as_deref(), Some(ev_id_0)); + } + ); + } + ); } } } From e4ff5d800c49883959d4646f14575caad95b9ca8 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Fri, 21 Nov 2025 12:15:16 +0100 Subject: [PATCH 24/28] test(sdk): Ensure `EventCacheStoreLockGuard::clear_dirty` is called! This patch ensures that the `EventCacheStoreLockGuard::clear_dirty` method is correctly called. --- .../src/event_cache/store/mod.rs | 5 +++++ crates/matrix-sdk/src/event_cache/room/mod.rs | 22 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/crates/matrix-sdk-base/src/event_cache/store/mod.rs b/crates/matrix-sdk-base/src/event_cache/store/mod.rs index c95f983e16d..d6d9e932fcd 100644 --- a/crates/matrix-sdk-base/src/event_cache/store/mod.rs +++ b/crates/matrix-sdk-base/src/event_cache/store/mod.rs @@ -119,6 +119,11 @@ impl EventCacheStoreLockGuard { pub fn clear_dirty(this: &Self) { this.cross_process_lock_guard.clear_dirty(); } + + /// Force to [`CrossProcessLockGuard::is_dirty`]. + pub fn is_dirty(this: &Self) -> bool { + this.cross_process_lock_guard.is_dirty() + } } #[cfg(not(tarpaulin_include))] diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index 62e5e7c75f4..4fe4692442f 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -1108,6 +1108,11 @@ mod private { { self.state.room_linked_chunk.revents().find_map(|(_position, event)| predicate(event)) } + + #[cfg(test)] + pub fn is_dirty(&self) -> bool { + EventCacheStoreLockGuard::is_dirty(&self.store) + } } impl<'a> RoomEventCacheStateLockWriteGuard<'a> { @@ -2072,6 +2077,11 @@ mod private { Ok(()) } + + #[cfg(test)] + pub fn is_dirty(&self) -> bool { + EventCacheStoreLockGuard::is_dirty(&self.store) + } } /// Load a linked chunk's full metadata, making sure the chunks are @@ -4114,6 +4124,9 @@ mod timed_tests { // shared access. // See `RoomEventCacheStateLock::read` to learn more. + // The lock is no longer marked as dirty, it's been cleaned. + assert!(guard.is_dirty().not()); + // The reload can be observed via the updates too. assert_matches!( updates_stream.recv().await.unwrap(), @@ -4167,6 +4180,9 @@ mod timed_tests { // Guard is kept alive, to ensure we can have multiple read guards alive with a // shared access. + // The lock is no longer marked as dirty, it's been cleaned. + assert!(guard.is_dirty().not()); + // The reload can be observed via the updates too. assert_matches!( updates_stream.recv().await.unwrap(), @@ -4220,6 +4236,9 @@ mod timed_tests { let guard = room_event_cache.inner.state.write().await.unwrap(); + // The lock is no longer marked as dirty, it's been cleaned. + assert!(guard.is_dirty().not()); + // The reload can be observed via the updates too. assert_matches!( updates_stream.recv().await.unwrap(), @@ -4267,6 +4286,9 @@ mod timed_tests { let guard = room_event_cache.inner.state.write().await.unwrap(); + // The lock is no longer marked as dirty, it's been cleaned. + assert!(guard.is_dirty().not()); + // The reload can be observed via the updates too. assert_matches!( updates_stream.recv().await.unwrap(), From d206cf56004fdef4074f655d253fdc3c335745e6 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Fri, 21 Nov 2025 14:06:03 +0100 Subject: [PATCH 25/28] test(sdk): Add a test for dirtiness handling in `RoomEventCacheStateLock::new`. --- crates/matrix-sdk/src/event_cache/room/mod.rs | 81 +++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index 4fe4692442f..bc5625cfb62 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -4332,6 +4332,87 @@ mod timed_tests { } } + #[async_test] + async fn test_load_when_dirty() { + let room_id_0 = room_id!("!raclette:patate.ch"); + let room_id_1 = room_id!("!morbiflette:patate.ch"); + + // The storage shared by the two clients. + let event_cache_store = MemoryStore::new(); + + // Client for the process 0. + let client_p0 = MockClientBuilder::new(None) + .on_builder(|builder| { + builder.store_config( + StoreConfig::new("process #0".to_owned()) + .event_cache_store(event_cache_store.clone()), + ) + }) + .build() + .await; + + // Client for the process 1. + let client_p1 = MockClientBuilder::new(None) + .on_builder(|builder| { + builder.store_config( + StoreConfig::new("process #1".to_owned()).event_cache_store(event_cache_store), + ) + }) + .build() + .await; + + // Subscribe the event caches, and create the room. + let (room_event_cache_0_p0, room_event_cache_0_p1) = { + let event_cache_p0 = client_p0.event_cache(); + event_cache_p0.subscribe().unwrap(); + + let event_cache_p1 = client_p1.event_cache(); + event_cache_p1.subscribe().unwrap(); + + client_p0 + .base_client() + .get_or_create_room(room_id_0, matrix_sdk_base::RoomState::Joined); + client_p0 + .base_client() + .get_or_create_room(room_id_1, matrix_sdk_base::RoomState::Joined); + + client_p1 + .base_client() + .get_or_create_room(room_id_0, matrix_sdk_base::RoomState::Joined); + client_p1 + .base_client() + .get_or_create_room(room_id_1, matrix_sdk_base::RoomState::Joined); + + let (room_event_cache_0_p0, _drop_handles) = + client_p0.get_room(room_id_0).unwrap().event_cache().await.unwrap(); + let (room_event_cache_0_p1, _drop_handles) = + client_p1.get_room(room_id_0).unwrap().event_cache().await.unwrap(); + + (room_event_cache_0_p0, room_event_cache_0_p1) + }; + + // Let's make the cross-process lock over the store dirty. + { + drop(room_event_cache_0_p0.inner.state.read().await.unwrap()); + drop(room_event_cache_0_p1.inner.state.read().await.unwrap()); + } + + // Create the `RoomEventCache` for `room_id_1`. During its creation, the + // cross-process lock over the store MUST be dirty, which makes no difference as + // a clean one: the state is just loaded, not reloaded. + let (room_event_cache_1_p0, _) = + client_p0.get_room(room_id_1).unwrap().event_cache().await.unwrap(); + + // Check the lock isn't dirty because it's been cleared. + { + let guard = room_event_cache_1_p0.inner.state.read().await.unwrap(); + assert!(guard.is_dirty().not()); + } + + // The only way to test this behaviour is to see that the dirty block in + // `RoomEventCacheStateLock` is covered by this test. + } + async fn event_loaded(room_event_cache: &RoomEventCache, event_id: &EventId) -> bool { room_event_cache .rfind_map_event_in_memory_by(|event| { From 6ec3441342b46115cf27ff4958bc42299dff39a4 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 25 Nov 2025 14:30:56 +0100 Subject: [PATCH 26/28] chore: Replace `unwrap` by `expect`. --- crates/matrix-sdk-ui/src/timeline/builder.rs | 4 ++-- labs/multiverse/src/widgets/room_view/details/events.rs | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/crates/matrix-sdk-ui/src/timeline/builder.rs b/crates/matrix-sdk-ui/src/timeline/builder.rs index e918cd8b685..f139b3097ea 100644 --- a/crates/matrix-sdk-ui/src/timeline/builder.rs +++ b/crates/matrix-sdk-ui/src/timeline/builder.rs @@ -164,7 +164,7 @@ impl TimelineBuilder { room.client().event_cache().subscribe()?; let (room_event_cache, event_cache_drop) = room.event_cache().await?; - let (_, event_subscriber) = room_event_cache.subscribe().await.unwrap(); + let (_, event_subscriber) = room_event_cache.subscribe().await?; let is_room_encrypted = room .latest_encryption_state() @@ -224,7 +224,7 @@ impl TimelineBuilder { // Note: must be done here *before* spawning the task, to avoid race conditions // with event cache updates happening in the background. let (_events, receiver) = - room_event_cache.subscribe_to_thread(root.clone()).await.unwrap(); + room_event_cache.subscribe_to_thread(root.clone()).await?; spawn( thread_updates_task( diff --git a/labs/multiverse/src/widgets/room_view/details/events.rs b/labs/multiverse/src/widgets/room_view/details/events.rs index 9ea9acab3e7..de8fa07f954 100644 --- a/labs/multiverse/src/widgets/room_view/details/events.rs +++ b/labs/multiverse/src/widgets/room_view/details/events.rs @@ -28,7 +28,10 @@ impl Widget for &mut EventsView<'_> { let events = tokio::task::block_in_place(|| { Handle::current().block_on(async { let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap(); - room_event_cache.events().await.unwrap() + room_event_cache + .events() + .await + .expect("Failed to fetch the events because of a store error") }) }); From 913e293042171c468da3649eaeee7ea6bf0bfad6 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 25 Nov 2025 14:38:14 +0100 Subject: [PATCH 27/28] refactor(sdk): Change a `Semaphore(permit=1)` for a `Mutex`. This patch changes the `Semaphore(permit=1)` for a `Mutex`: the semantics is strictly equivalent, but it removes the need to guarantee there is a single permit. --- crates/matrix-sdk/src/event_cache/room/mod.rs | 40 +++++++------------ 1 file changed, 15 insertions(+), 25 deletions(-) diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index bc5625cfb62..d509c445d3e 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -650,7 +650,7 @@ mod private { serde::Raw, }; use tokio::sync::{ - RwLock, RwLockReadGuard, RwLockWriteGuard, Semaphore, + Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, broadcast::{Receiver, Sender}, }; use tracing::{debug, error, instrument, trace, warn}; @@ -678,7 +678,7 @@ mod private { /// Please see inline comment of [`Self::read`] to understand why it /// exists. - read_lock_acquisition: Semaphore, + read_lock_acquisition: Mutex<()>, } struct RoomEventCacheStateLockInner { @@ -838,7 +838,7 @@ mod private { waited_for_initial_prev_token, subscriber_count: Default::default(), }), - read_lock_acquisition: Semaphore::new(1), + read_lock_acquisition: Mutex::new(()), }) } @@ -860,10 +860,10 @@ mod private { // ## Upgradable read lock // // One may argue that this upgrades can be done with an _upgradable read lock_ - // [^1] [^2]. We don't want to use this solution because an upgradable read lock - // is basically a mutex because we are losing the shared access property: having - // multiple read locks at the same time. This is an important property to hold - // for performance concerns. + // [^1] [^2]. We don't want to use this solution: an upgradable read lock is + // basically a mutex because we are losing the shared access property, i.e. + // having multiple read locks at the same time. This is an important property to + // hold for performance concerns. // // ## Downgradable write lock // @@ -886,30 +886,19 @@ mod private { // new lock acquisition, and it's not possible to pause the runtime in the // meantime. // - // ## Semaphore + // ## Semaphore with 1 permit, aka a Mutex // - // The chosen idea is to allow only one execution at a time of this method. That - // way we are free to “upgrade” the read lock by dropping it and obtaining a new - // write lock. All callers to this method are waiting, so nothing can happen in - // the meantime. + // The chosen idea is to allow only one execution at a time of this method: it + // becomes a critical section. That way we are free to “upgrade” the read lock + // by dropping it and obtaining a new write lock. All callers to this method are + // waiting, so nothing can happen in the meantime. // // Note that it doesn't conflict with the `write` method because this later // immediately obtains a write lock, which avoids any conflict with this method. // // [^1]: https://docs.rs/lock_api/0.4.14/lock_api/struct.RwLock.html#method.upgradable_read // [^2]: https://docs.rs/async-lock/3.4.1/async_lock/struct.RwLock.html#method.upgradable_read - let _permit = self - .read_lock_acquisition - .acquire() - .await - .expect("The `lock_acquisition` semaphore must never be closed"); - - // Just a check in case the code is modified without knowing how it works. - debug_assert_eq!( - self.read_lock_acquisition.available_permits(), - 0, - "The semaphore must have only one permit" - ); + let _one_reader_guard = self.read_lock_acquisition.lock().await; // Obtain a read lock. let state_guard = self.locked_state.read().await; @@ -920,7 +909,8 @@ mod private { } EventCacheStoreLockState::Dirty(store_guard) => { // Drop the read lock, and take a write lock to modify the state. - // This is safe because only one semaphore permit exists. + // This is safe because only one reader at a time (see + // `Self::read_lock_acquisition`) is allowed. drop(state_guard); let state_guard = self.locked_state.write().await; From 1b8980e1ac1783179aa2a69532a27fed729e4674 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Tue, 25 Nov 2025 14:41:38 +0100 Subject: [PATCH 28/28] chore(sdk): Remove an `unwrap` in `debug_string`. --- crates/matrix-sdk/src/event_cache/room/mod.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index d509c445d3e..d06848326ed 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -418,7 +418,14 @@ impl RoomEventCache { /// Return a nice debug string (a vector of lines) for the linked chunk of /// events for this room. pub async fn debug_string(&self) -> Vec { - self.inner.state.read().await.unwrap().room_linked_chunk().debug_string() + match self.inner.state.read().await { + Ok(read_guard) => read_guard.room_linked_chunk().debug_string(), + Err(err) => { + warn!(?err, "Failed to obtain the read guard for the `RoomEventCache`"); + + vec![] + } + } } }