matrix_sdk_crypto/store/
memorystore.rs

1// Copyright 2020 The Matrix.org Foundation C.I.C.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::{
16    collections::{BTreeMap, HashMap, HashSet},
17    convert::Infallible,
18    sync::Arc,
19};
20
21use async_trait::async_trait;
22use matrix_sdk_common::{
23    cross_process_lock::{
24        memory_store_helper::{try_take_leased_lock, Lease},
25        CrossProcessLockGeneration,
26    },
27    locks::RwLock as StdRwLock,
28};
29use ruma::{
30    events::secret::request::SecretName, DeviceId, OwnedDeviceId, OwnedRoomId, OwnedTransactionId,
31    OwnedUserId, RoomId, TransactionId, UserId,
32};
33use tokio::sync::{Mutex, RwLock};
34use tracing::warn;
35use vodozemac::Curve25519PublicKey;
36
37use super::{
38    caches::DeviceStore,
39    types::{
40        BackupKeys, Changes, DehydratedDeviceKey, PendingChanges, RoomKeyCounts, RoomSettings,
41        StoredRoomKeyBundleData, TrackedUser,
42    },
43    Account, CryptoStore, InboundGroupSession, Session,
44};
45use crate::{
46    gossiping::{GossipRequest, GossippedSecret, SecretInfo},
47    identities::{DeviceData, UserIdentityData},
48    olm::{
49        OutboundGroupSession, PickledAccount, PickledInboundGroupSession, PickledSession,
50        PrivateCrossSigningIdentity, SenderDataType, StaticAccountData,
51    },
52    store::types::RoomKeyWithheldEntry,
53};
54
55fn encode_key_info(info: &SecretInfo) -> String {
56    match info {
57        SecretInfo::KeyRequest(info) => {
58            format!("{}{}{}", info.room_id(), info.algorithm(), info.session_id())
59        }
60        SecretInfo::SecretRequest(i) => i.as_ref().to_owned(),
61    }
62}
63
64type SessionId = String;
65
66/// The "version" of a backup - newtype wrapper around a String.
67#[derive(Clone, Debug, PartialEq)]
68struct BackupVersion(String);
69
70impl BackupVersion {
71    fn from(s: &str) -> Self {
72        Self(s.to_owned())
73    }
74
75    fn as_str(&self) -> &str {
76        &self.0
77    }
78}
79
80/// An in-memory only store that will forget all the E2EE key once it's dropped.
81#[derive(Default, Debug)]
82pub struct MemoryStore {
83    static_account: Arc<StdRwLock<Option<StaticAccountData>>>,
84
85    account: StdRwLock<Option<String>>,
86    // Map of sender_key to map of session_id to serialized pickle
87    sessions: StdRwLock<BTreeMap<String, BTreeMap<String, String>>>,
88    inbound_group_sessions: StdRwLock<BTreeMap<OwnedRoomId, HashMap<String, String>>>,
89
90    /// Map room id -> session id -> backup order number
91    /// The latest backup in which this session is stored. Equivalent to
92    /// `backed_up_to` in [`IndexedDbCryptoStore`]
93    inbound_group_sessions_backed_up_to:
94        StdRwLock<HashMap<OwnedRoomId, HashMap<SessionId, BackupVersion>>>,
95
96    outbound_group_sessions: StdRwLock<BTreeMap<OwnedRoomId, OutboundGroupSession>>,
97    private_identity: StdRwLock<Option<PrivateCrossSigningIdentity>>,
98    tracked_users: StdRwLock<HashMap<OwnedUserId, TrackedUser>>,
99    olm_hashes: StdRwLock<HashMap<String, HashSet<String>>>,
100    devices: DeviceStore,
101    identities: StdRwLock<HashMap<OwnedUserId, String>>,
102    outgoing_key_requests: StdRwLock<HashMap<OwnedTransactionId, GossipRequest>>,
103    key_requests_by_info: StdRwLock<HashMap<String, OwnedTransactionId>>,
104    direct_withheld_info: StdRwLock<HashMap<OwnedRoomId, HashMap<String, RoomKeyWithheldEntry>>>,
105    custom_values: StdRwLock<HashMap<String, Vec<u8>>>,
106    leases: StdRwLock<HashMap<String, Lease>>,
107    secret_inbox: StdRwLock<HashMap<String, Vec<GossippedSecret>>>,
108    backup_keys: RwLock<BackupKeys>,
109    dehydrated_device_pickle_key: RwLock<Option<DehydratedDeviceKey>>,
110    next_batch_token: RwLock<Option<String>>,
111    room_settings: StdRwLock<HashMap<OwnedRoomId, RoomSettings>>,
112    room_key_bundles:
113        StdRwLock<HashMap<OwnedRoomId, HashMap<OwnedUserId, StoredRoomKeyBundleData>>>,
114
115    save_changes_lock: Arc<Mutex<()>>,
116}
117
118impl MemoryStore {
119    /// Create a new empty `MemoryStore`.
120    pub fn new() -> Self {
121        Self::default()
122    }
123
124    fn get_static_account(&self) -> Option<StaticAccountData> {
125        self.static_account.read().clone()
126    }
127
128    pub(crate) fn save_devices(&self, devices: Vec<DeviceData>) {
129        for device in devices {
130            let _ = self.devices.add(device);
131        }
132    }
133
134    fn delete_devices(&self, devices: Vec<DeviceData>) {
135        for device in devices {
136            let _ = self.devices.remove(device.user_id(), device.device_id());
137        }
138    }
139
140    fn save_sessions(&self, sessions: Vec<(String, PickledSession)>) {
141        let mut session_store = self.sessions.write();
142
143        for (session_id, pickle) in sessions {
144            let entry = session_store.entry(pickle.sender_key.to_base64()).or_default();
145
146            // insert or replace if exists
147            entry.insert(
148                session_id,
149                serde_json::to_string(&pickle).expect("Failed to serialize olm session"),
150            );
151        }
152    }
153
154    fn save_outbound_group_sessions(&self, sessions: Vec<OutboundGroupSession>) {
155        self.outbound_group_sessions
156            .write()
157            .extend(sessions.into_iter().map(|s| (s.room_id().to_owned(), s)));
158    }
159
160    fn save_private_identity(&self, private_identity: Option<PrivateCrossSigningIdentity>) {
161        *self.private_identity.write() = private_identity;
162    }
163
164    /// Return all the [`InboundGroupSession`]s we have, paired with the
165    /// `backed_up_to` value for each one (or "" where it is missing, which
166    /// should never happen).
167    async fn get_inbound_group_sessions_and_backed_up_to(
168        &self,
169    ) -> Result<Vec<(InboundGroupSession, Option<BackupVersion>)>> {
170        let lookup = |s: &InboundGroupSession| {
171            self.inbound_group_sessions_backed_up_to
172                .read()
173                .get(&s.room_id)?
174                .get(s.session_id())
175                .cloned()
176        };
177
178        Ok(self
179            .get_inbound_group_sessions()
180            .await?
181            .into_iter()
182            .map(|s| {
183                let v = lookup(&s);
184                (s, v)
185            })
186            .collect())
187    }
188}
189
190type Result<T> = std::result::Result<T, Infallible>;
191
192#[cfg_attr(target_family = "wasm", async_trait(?Send))]
193#[cfg_attr(not(target_family = "wasm"), async_trait)]
194impl CryptoStore for MemoryStore {
195    type Error = Infallible;
196
197    async fn load_account(&self) -> Result<Option<Account>> {
198        let pickled_account: Option<PickledAccount> = self.account.read().as_ref().map(|acc| {
199            serde_json::from_str(acc)
200                .expect("Deserialization failed: invalid pickled account JSON format")
201        });
202
203        if let Some(pickle) = pickled_account {
204            let account =
205                Account::from_pickle(pickle).expect("From pickle failed: invalid pickle format");
206
207            *self.static_account.write() = Some(account.static_data().clone());
208
209            Ok(Some(account))
210        } else {
211            Ok(None)
212        }
213    }
214
215    async fn load_identity(&self) -> Result<Option<PrivateCrossSigningIdentity>> {
216        Ok(self.private_identity.read().clone())
217    }
218
219    async fn next_batch_token(&self) -> Result<Option<String>> {
220        Ok(self.next_batch_token.read().await.clone())
221    }
222
223    async fn save_pending_changes(&self, changes: PendingChanges) -> Result<()> {
224        let _guard = self.save_changes_lock.lock().await;
225
226        let pickled_account = if let Some(account) = changes.account {
227            *self.static_account.write() = Some(account.static_data().clone());
228            Some(account.pickle())
229        } else {
230            None
231        };
232
233        *self.account.write() = pickled_account.map(|pickle| {
234            serde_json::to_string(&pickle)
235                .expect("Serialization failed: invalid pickled account JSON format")
236        });
237
238        Ok(())
239    }
240
241    async fn save_changes(&self, changes: Changes) -> Result<()> {
242        let _guard = self.save_changes_lock.lock().await;
243
244        let mut pickled_session: Vec<(String, PickledSession)> = Vec::new();
245        for session in changes.sessions {
246            let session_id = session.session_id().to_owned();
247            let pickle = session.pickle().await;
248            pickled_session.push((session_id.clone(), pickle));
249        }
250        self.save_sessions(pickled_session);
251
252        self.save_inbound_group_sessions(changes.inbound_group_sessions, None).await?;
253        self.save_outbound_group_sessions(changes.outbound_group_sessions);
254        self.save_private_identity(changes.private_identity);
255
256        self.save_devices(changes.devices.new);
257        self.save_devices(changes.devices.changed);
258        self.delete_devices(changes.devices.deleted);
259
260        {
261            let mut identities = self.identities.write();
262            for identity in changes.identities.new.into_iter().chain(changes.identities.changed) {
263                identities.insert(
264                    identity.user_id().to_owned(),
265                    serde_json::to_string(&identity)
266                        .expect("UserIdentityData should always serialize to json"),
267                );
268            }
269        }
270
271        {
272            let mut olm_hashes = self.olm_hashes.write();
273            for hash in changes.message_hashes {
274                olm_hashes.entry(hash.sender_key.to_owned()).or_default().insert(hash.hash.clone());
275            }
276        }
277
278        {
279            let mut outgoing_key_requests = self.outgoing_key_requests.write();
280            let mut key_requests_by_info = self.key_requests_by_info.write();
281
282            for key_request in changes.key_requests {
283                let id = key_request.request_id.clone();
284                let info_string = encode_key_info(&key_request.info);
285
286                outgoing_key_requests.insert(id.clone(), key_request);
287                key_requests_by_info.insert(info_string, id);
288            }
289        }
290
291        if let Some(key) = changes.backup_decryption_key {
292            self.backup_keys.write().await.decryption_key = Some(key);
293        }
294
295        if let Some(version) = changes.backup_version {
296            self.backup_keys.write().await.backup_version = Some(version);
297        }
298
299        if let Some(pickle_key) = changes.dehydrated_device_pickle_key {
300            let mut lock = self.dehydrated_device_pickle_key.write().await;
301            *lock = Some(pickle_key);
302        }
303
304        {
305            let mut secret_inbox = self.secret_inbox.write();
306            for secret in changes.secrets {
307                secret_inbox.entry(secret.secret_name.to_string()).or_default().push(secret);
308            }
309        }
310
311        {
312            let mut direct_withheld_info = self.direct_withheld_info.write();
313            for (room_id, data) in changes.withheld_session_info {
314                for (session_id, event) in data {
315                    direct_withheld_info
316                        .entry(room_id.to_owned())
317                        .or_default()
318                        .insert(session_id, event);
319                }
320            }
321        }
322
323        if let Some(next_batch_token) = changes.next_batch_token {
324            *self.next_batch_token.write().await = Some(next_batch_token);
325        }
326
327        if !changes.room_settings.is_empty() {
328            let mut settings = self.room_settings.write();
329            settings.extend(changes.room_settings);
330        }
331
332        if !changes.received_room_key_bundles.is_empty() {
333            let mut room_key_bundles = self.room_key_bundles.write();
334            for bundle in changes.received_room_key_bundles {
335                room_key_bundles
336                    .entry(bundle.bundle_data.room_id.clone())
337                    .or_default()
338                    .insert(bundle.sender_user.clone(), bundle);
339            }
340        }
341
342        Ok(())
343    }
344
345    async fn save_inbound_group_sessions(
346        &self,
347        sessions: Vec<InboundGroupSession>,
348        backed_up_to_version: Option<&str>,
349    ) -> Result<()> {
350        for session in sessions {
351            let room_id = session.room_id();
352            let session_id = session.session_id();
353
354            // Sanity-check that the data in the sessions corresponds to backed_up_version
355            let backed_up = session.backed_up();
356            if backed_up != backed_up_to_version.is_some() {
357                warn!(
358                    backed_up,
359                    backed_up_to_version,
360                    "Session backed-up flag does not correspond to backup version setting",
361                );
362            }
363
364            if let Some(backup_version) = backed_up_to_version {
365                self.inbound_group_sessions_backed_up_to
366                    .write()
367                    .entry(room_id.to_owned())
368                    .or_default()
369                    .insert(session_id.to_owned(), BackupVersion::from(backup_version));
370            }
371
372            let pickle = session.pickle().await;
373            self.inbound_group_sessions
374                .write()
375                .entry(session.room_id().to_owned())
376                .or_default()
377                .insert(
378                    session.session_id().to_owned(),
379                    serde_json::to_string(&pickle)
380                        .expect("Pickle pickle data should serialize to json"),
381                );
382        }
383        Ok(())
384    }
385
386    async fn get_sessions(&self, sender_key: &str) -> Result<Option<Vec<Session>>> {
387        let device_keys = self.get_own_device().await?.as_device_keys().clone();
388
389        if let Some(pickles) = self.sessions.read().get(sender_key) {
390            let mut sessions: Vec<Session> = Vec::new();
391            for serialized_pickle in pickles.values() {
392                let pickle: PickledSession = serde_json::from_str(serialized_pickle.as_str())
393                    .expect("Pickle pickle deserialization should work");
394                let session = Session::from_pickle(device_keys.clone(), pickle)
395                    .expect("Expect from pickle to always work");
396                sessions.push(session);
397            }
398            Ok(Some(sessions))
399        } else {
400            Ok(None)
401        }
402    }
403
404    async fn get_inbound_group_session(
405        &self,
406        room_id: &RoomId,
407        session_id: &str,
408    ) -> Result<Option<InboundGroupSession>> {
409        let pickle: Option<PickledInboundGroupSession> = self
410            .inbound_group_sessions
411            .read()
412            .get(room_id)
413            .and_then(|m| m.get(session_id))
414            .and_then(|ser| {
415                serde_json::from_str(ser).expect("Pickle pickle deserialization should work")
416            });
417
418        Ok(pickle.map(|p| {
419            InboundGroupSession::from_pickle(p).expect("Expect from pickle to always work")
420        }))
421    }
422
423    async fn get_withheld_info(
424        &self,
425        room_id: &RoomId,
426        session_id: &str,
427    ) -> Result<Option<RoomKeyWithheldEntry>> {
428        Ok(self
429            .direct_withheld_info
430            .read()
431            .get(room_id)
432            .and_then(|e| Some(e.get(session_id)?.to_owned())))
433    }
434
435    async fn get_withheld_sessions_by_room_id(
436        &self,
437        room_id: &RoomId,
438    ) -> crate::store::Result<Vec<RoomKeyWithheldEntry>, Self::Error> {
439        Ok(self
440            .direct_withheld_info
441            .read()
442            .get(room_id)
443            .map(|e| e.values().cloned().collect())
444            .unwrap_or_default())
445    }
446
447    async fn get_inbound_group_sessions(&self) -> Result<Vec<InboundGroupSession>> {
448        let inbounds = self
449            .inbound_group_sessions
450            .read()
451            .values()
452            .flat_map(HashMap::values)
453            .map(|ser| {
454                let pickle: PickledInboundGroupSession =
455                    serde_json::from_str(ser).expect("Pickle deserialization should work");
456                InboundGroupSession::from_pickle(pickle).expect("Expect from pickle to always work")
457            })
458            .collect();
459        Ok(inbounds)
460    }
461
462    async fn inbound_group_session_counts(
463        &self,
464        backup_version: Option<&str>,
465    ) -> Result<RoomKeyCounts> {
466        let backed_up = if let Some(backup_version) = backup_version {
467            self.get_inbound_group_sessions_and_backed_up_to()
468                .await?
469                .into_iter()
470                // Count the sessions backed up in the required backup
471                .filter(|(_, o)| o.as_ref().is_some_and(|o| o.as_str() == backup_version))
472                .count()
473        } else {
474            // We asked about a nonexistent backup version - this doesn't make much sense,
475            // but we can easily answer that nothing is backed up in this
476            // nonexistent backup.
477            0
478        };
479
480        let total = self.inbound_group_sessions.read().values().map(HashMap::len).sum();
481        Ok(RoomKeyCounts { total, backed_up })
482    }
483
484    async fn get_inbound_group_sessions_by_room_id(
485        &self,
486        room_id: &RoomId,
487    ) -> Result<Vec<InboundGroupSession>> {
488        let inbounds = match self.inbound_group_sessions.read().get(room_id) {
489            None => Vec::new(),
490            Some(v) => v
491                .values()
492                .map(|ser| {
493                    let pickle: PickledInboundGroupSession =
494                        serde_json::from_str(ser).expect("Pickle deserialization should work");
495                    InboundGroupSession::from_pickle(pickle)
496                        .expect("Expect from pickle to always work")
497                })
498                .collect(),
499        };
500        Ok(inbounds)
501    }
502
503    async fn get_inbound_group_sessions_for_device_batch(
504        &self,
505        sender_key: Curve25519PublicKey,
506        sender_data_type: SenderDataType,
507        after_session_id: Option<String>,
508        limit: usize,
509    ) -> Result<Vec<InboundGroupSession>> {
510        // First, find all InboundGroupSessions, filtering for those that match the
511        // device and sender_data type.
512        let mut sessions: Vec<_> = self
513            .get_inbound_group_sessions()
514            .await?
515            .into_iter()
516            .filter(|session: &InboundGroupSession| {
517                session.creator_info.curve25519_key == sender_key
518                    && session.sender_data.to_type() == sender_data_type
519            })
520            .collect();
521
522        // Then, sort the sessions in order of ascending session ID...
523        sessions.sort_by_key(|s| s.session_id().to_owned());
524
525        // Figure out where in the array to start returning results from
526        let start_index = {
527            match after_session_id {
528                None => 0,
529                Some(id) => {
530                    // We're looking for the first session with a session ID strictly after `id`; if
531                    // there are none, the end of the array.
532                    sessions
533                        .iter()
534                        .position(|session| session.session_id() > id.as_str())
535                        .unwrap_or(sessions.len())
536                }
537            }
538        };
539
540        // Return up to `limit` items from the array, starting from `start_index`
541        Ok(sessions.drain(start_index..).take(limit).collect())
542    }
543
544    async fn inbound_group_sessions_for_backup(
545        &self,
546        backup_version: &str,
547        limit: usize,
548    ) -> Result<Vec<InboundGroupSession>> {
549        Ok(self
550            .get_inbound_group_sessions_and_backed_up_to()
551            .await?
552            .into_iter()
553            .filter_map(|(session, backed_up_to)| {
554                if let Some(ref existing_version) = backed_up_to {
555                    if existing_version.as_str() == backup_version {
556                        // This session is already backed up in the required backup
557                        return None;
558                    }
559                }
560                // It's not backed up, or it's backed up in a different backup
561                Some(session)
562            })
563            .take(limit)
564            .collect())
565    }
566
567    async fn mark_inbound_group_sessions_as_backed_up(
568        &self,
569        backup_version: &str,
570        room_and_session_ids: &[(&RoomId, &str)],
571    ) -> Result<()> {
572        for &(room_id, session_id) in room_and_session_ids {
573            let session = self.get_inbound_group_session(room_id, session_id).await?;
574
575            if let Some(session) = session {
576                session.mark_as_backed_up();
577
578                self.inbound_group_sessions_backed_up_to
579                    .write()
580                    .entry(room_id.to_owned())
581                    .or_default()
582                    .insert(session_id.to_owned(), BackupVersion::from(backup_version));
583
584                // Save it back
585                let updated_pickle = session.pickle().await;
586
587                self.inbound_group_sessions.write().entry(room_id.to_owned()).or_default().insert(
588                    session_id.to_owned(),
589                    serde_json::to_string(&updated_pickle)
590                        .expect("Pickle serialization should work"),
591                );
592            }
593        }
594
595        Ok(())
596    }
597
598    async fn reset_backup_state(&self) -> Result<()> {
599        // Nothing to do here, because we remember which backup versions we backed up to
600        // in `mark_inbound_group_sessions_as_backed_up`, so we don't need to
601        // reset anything here because the required version is passed in to
602        // `inbound_group_sessions_for_backup`, and we can compare against the
603        // version we stored.
604
605        Ok(())
606    }
607
608    async fn load_backup_keys(&self) -> Result<BackupKeys> {
609        Ok(self.backup_keys.read().await.to_owned())
610    }
611
612    async fn load_dehydrated_device_pickle_key(&self) -> Result<Option<DehydratedDeviceKey>> {
613        Ok(self.dehydrated_device_pickle_key.read().await.to_owned())
614    }
615
616    async fn delete_dehydrated_device_pickle_key(&self) -> Result<()> {
617        let mut lock = self.dehydrated_device_pickle_key.write().await;
618        *lock = None;
619        Ok(())
620    }
621
622    async fn get_outbound_group_session(
623        &self,
624        room_id: &RoomId,
625    ) -> Result<Option<OutboundGroupSession>> {
626        Ok(self.outbound_group_sessions.read().get(room_id).cloned())
627    }
628
629    async fn load_tracked_users(&self) -> Result<Vec<TrackedUser>> {
630        Ok(self.tracked_users.read().values().cloned().collect())
631    }
632
633    async fn save_tracked_users(&self, tracked_users: &[(&UserId, bool)]) -> Result<()> {
634        self.tracked_users.write().extend(tracked_users.iter().map(|(user_id, dirty)| {
635            let user_id: OwnedUserId = user_id.to_owned().into();
636            (user_id.clone(), TrackedUser { user_id, dirty: *dirty })
637        }));
638        Ok(())
639    }
640
641    async fn get_device(
642        &self,
643        user_id: &UserId,
644        device_id: &DeviceId,
645    ) -> Result<Option<DeviceData>> {
646        Ok(self.devices.get(user_id, device_id))
647    }
648
649    async fn get_user_devices(
650        &self,
651        user_id: &UserId,
652    ) -> Result<HashMap<OwnedDeviceId, DeviceData>> {
653        Ok(self.devices.user_devices(user_id))
654    }
655
656    async fn get_own_device(&self) -> Result<DeviceData> {
657        let account =
658            self.get_static_account().expect("Expect account to exist when getting own device");
659
660        Ok(self
661            .devices
662            .get(&account.user_id, &account.device_id)
663            .expect("Invalid state: Should always have a own device"))
664    }
665
666    async fn get_user_identity(&self, user_id: &UserId) -> Result<Option<UserIdentityData>> {
667        let serialized = self.identities.read().get(user_id).cloned();
668        match serialized {
669            None => Ok(None),
670            Some(serialized) => {
671                let id: UserIdentityData = serde_json::from_str(serialized.as_str())
672                    .expect("Only valid serialized identity are saved");
673                Ok(Some(id))
674            }
675        }
676    }
677
678    async fn is_message_known(&self, message_hash: &crate::olm::OlmMessageHash) -> Result<bool> {
679        Ok(self
680            .olm_hashes
681            .write()
682            .entry(message_hash.sender_key.to_owned())
683            .or_default()
684            .contains(&message_hash.hash))
685    }
686
687    async fn get_outgoing_secret_requests(
688        &self,
689        request_id: &TransactionId,
690    ) -> Result<Option<GossipRequest>> {
691        Ok(self.outgoing_key_requests.read().get(request_id).cloned())
692    }
693
694    async fn get_secret_request_by_info(
695        &self,
696        key_info: &SecretInfo,
697    ) -> Result<Option<GossipRequest>> {
698        let key_info_string = encode_key_info(key_info);
699
700        Ok(self
701            .key_requests_by_info
702            .read()
703            .get(&key_info_string)
704            .and_then(|i| self.outgoing_key_requests.read().get(i).cloned()))
705    }
706
707    async fn get_unsent_secret_requests(&self) -> Result<Vec<GossipRequest>> {
708        Ok(self
709            .outgoing_key_requests
710            .read()
711            .values()
712            .filter(|req| !req.sent_out)
713            .cloned()
714            .collect())
715    }
716
717    async fn delete_outgoing_secret_requests(&self, request_id: &TransactionId) -> Result<()> {
718        let req = self.outgoing_key_requests.write().remove(request_id);
719        if let Some(i) = req {
720            let key_info_string = encode_key_info(&i.info);
721            self.key_requests_by_info.write().remove(&key_info_string);
722        }
723
724        Ok(())
725    }
726
727    async fn get_secrets_from_inbox(
728        &self,
729        secret_name: &SecretName,
730    ) -> Result<Vec<GossippedSecret>> {
731        Ok(self.secret_inbox.write().entry(secret_name.to_string()).or_default().to_owned())
732    }
733
734    async fn delete_secrets_from_inbox(&self, secret_name: &SecretName) -> Result<()> {
735        self.secret_inbox.write().remove(secret_name.as_str());
736
737        Ok(())
738    }
739
740    async fn get_room_settings(&self, room_id: &RoomId) -> Result<Option<RoomSettings>> {
741        Ok(self.room_settings.read().get(room_id).cloned())
742    }
743
744    async fn get_received_room_key_bundle_data(
745        &self,
746        room_id: &RoomId,
747        user_id: &UserId,
748    ) -> Result<Option<StoredRoomKeyBundleData>> {
749        let guard = self.room_key_bundles.read();
750
751        let result = guard.get(room_id).and_then(|bundles| bundles.get(user_id).cloned());
752
753        Ok(result)
754    }
755
756    async fn get_custom_value(&self, key: &str) -> Result<Option<Vec<u8>>> {
757        Ok(self.custom_values.read().get(key).cloned())
758    }
759
760    async fn set_custom_value(&self, key: &str, value: Vec<u8>) -> Result<()> {
761        self.custom_values.write().insert(key.to_owned(), value);
762        Ok(())
763    }
764
765    async fn remove_custom_value(&self, key: &str) -> Result<()> {
766        self.custom_values.write().remove(key);
767        Ok(())
768    }
769
770    async fn try_take_leased_lock(
771        &self,
772        lease_duration_ms: u32,
773        key: &str,
774        holder: &str,
775    ) -> Result<Option<CrossProcessLockGeneration>> {
776        Ok(try_take_leased_lock(&mut self.leases.write(), lease_duration_ms, key, holder))
777    }
778
779    async fn get_size(&self) -> Result<Option<usize>> {
780        Ok(None)
781    }
782}
783
784#[cfg(test)]
785mod tests {
786    use std::collections::HashMap;
787
788    use matrix_sdk_test::async_test;
789    use ruma::{room_id, user_id, RoomId};
790    use vodozemac::{Curve25519PublicKey, Ed25519PublicKey};
791
792    use super::SessionId;
793    use crate::{
794        identities::device::testing::get_device,
795        olm::{
796            tests::get_account_and_session_test_helper, Account, InboundGroupSession,
797            OlmMessageHash, PrivateCrossSigningIdentity, SenderData,
798        },
799        store::{
800            memorystore::MemoryStore,
801            types::{Changes, DeviceChanges, PendingChanges},
802            CryptoStore,
803        },
804        DeviceData,
805    };
806
807    #[async_test]
808    async fn test_session_store() {
809        let (account, session) = get_account_and_session_test_helper();
810        let own_device = DeviceData::from_account(&account);
811        let store = MemoryStore::new();
812
813        assert!(store.load_account().await.unwrap().is_none());
814
815        store
816            .save_changes(Changes {
817                devices: DeviceChanges { new: vec![own_device], ..Default::default() },
818                ..Default::default()
819            })
820            .await
821            .unwrap();
822        store.save_pending_changes(PendingChanges { account: Some(account) }).await.unwrap();
823
824        store
825            .save_changes(Changes { sessions: (vec![session.clone()]), ..Default::default() })
826            .await
827            .unwrap();
828
829        let sessions = store.get_sessions(&session.sender_key.to_base64()).await.unwrap().unwrap();
830
831        let loaded_session = &sessions[0];
832
833        assert_eq!(&session, loaded_session);
834    }
835
836    #[async_test]
837    async fn test_inbound_group_session_store() {
838        let (account, _) = get_account_and_session_test_helper();
839        let room_id = room_id!("!test:localhost");
840        let curve_key = "Nn0L2hkcCMFKqynTjyGsJbth7QrVmX3lbrksMkrGOAw";
841
842        let (outbound, _) = account.create_group_session_pair_with_defaults(room_id).await;
843        let inbound = InboundGroupSession::new(
844            Curve25519PublicKey::from_base64(curve_key).unwrap(),
845            Ed25519PublicKey::from_base64("ee3Ek+J2LkkPmjGPGLhMxiKnhiX//xcqaVL4RP6EypE").unwrap(),
846            room_id,
847            &outbound.session_key().await,
848            SenderData::unknown(),
849            outbound.settings().algorithm.to_owned(),
850            None,
851            false,
852        )
853        .unwrap();
854
855        let store = MemoryStore::new();
856        store.save_inbound_group_sessions(vec![inbound.clone()], None).await.unwrap();
857
858        let loaded_session =
859            store.get_inbound_group_session(room_id, outbound.session_id()).await.unwrap().unwrap();
860        assert_eq!(inbound, loaded_session);
861    }
862
863    #[async_test]
864    async fn test_backing_up_marks_sessions_as_backed_up() {
865        // Given there are 2 sessions
866        let room_id = room_id!("!test:localhost");
867        let (store, sessions) = store_with_sessions(2, room_id).await;
868
869        // When I mark them as backed up
870        mark_backed_up(&store, room_id, "bkp1", &sessions).await;
871
872        // Then their backed_up_to field is set
873        let but = backed_up_tos(&store).await;
874        assert_eq!(but[sessions[0].session_id()], "bkp1");
875        assert_eq!(but[sessions[1].session_id()], "bkp1");
876    }
877
878    #[async_test]
879    async fn test_backing_up_a_second_set_of_sessions_updates_their_backup_order() {
880        // Given there are 3 sessions
881        let room_id = room_id!("!test:localhost");
882        let (store, sessions) = store_with_sessions(3, room_id).await;
883
884        // When I mark 0 and 1 as backed up in bkp1
885        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
886
887        // And 1 and 2 as backed up in bkp2
888        mark_backed_up(&store, room_id, "bkp2", &sessions[1..]).await;
889
890        // Then 0 is backed up in bkp1 and the 1 and 2 are backed up in bkp2
891        let but = backed_up_tos(&store).await;
892        assert_eq!(but[sessions[0].session_id()], "bkp1");
893        assert_eq!(but[sessions[1].session_id()], "bkp2");
894        assert_eq!(but[sessions[2].session_id()], "bkp2");
895    }
896
897    #[async_test]
898    async fn test_backing_up_again_to_the_same_version_has_no_effect() {
899        // Given there are 3 sessions
900        let room_id = room_id!("!test:localhost");
901        let (store, sessions) = store_with_sessions(3, room_id).await;
902
903        // When I mark the first two as backed up in the first backup
904        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
905
906        // And the last 2 as backed up in the same backup version
907        mark_backed_up(&store, room_id, "bkp1", &sessions[1..]).await;
908
909        // Then they all get the same backed_up_to value
910        let but = backed_up_tos(&store).await;
911        assert_eq!(but[sessions[0].session_id()], "bkp1");
912        assert_eq!(but[sessions[1].session_id()], "bkp1");
913        assert_eq!(but[sessions[2].session_id()], "bkp1");
914    }
915
916    #[async_test]
917    async fn test_backing_up_to_an_old_backup_version_can_increase_backed_up_to() {
918        // Given we have backed up some sessions to 2 backup versions, an older and a
919        // newer
920        let room_id = room_id!("!test:localhost");
921        let (store, sessions) = store_with_sessions(4, room_id).await;
922        mark_backed_up(&store, room_id, "older_bkp", &sessions[..2]).await;
923        mark_backed_up(&store, room_id, "newer_bkp", &sessions[1..2]).await;
924
925        // When I ask to back up the un-backed-up ones to the older backup
926        mark_backed_up(&store, room_id, "older_bkp", &sessions[2..]).await;
927
928        // Then each session lists the backup it was most recently included in
929        let but = backed_up_tos(&store).await;
930        assert_eq!(but[sessions[0].session_id()], "older_bkp");
931        assert_eq!(but[sessions[1].session_id()], "newer_bkp");
932        assert_eq!(but[sessions[2].session_id()], "older_bkp");
933        assert_eq!(but[sessions[3].session_id()], "older_bkp");
934    }
935
936    #[async_test]
937    async fn test_backing_up_to_an_old_backup_version_overwrites_a_newer_one() {
938        // Given we have backed up to 2 backup versions, an older and a newer
939        let room_id = room_id!("!test:localhost");
940        let (store, sessions) = store_with_sessions(4, room_id).await;
941        mark_backed_up(&store, room_id, "older_bkp", &sessions).await;
942        // Sanity: they are backed up in order number 1
943        assert_eq!(backed_up_tos(&store).await[sessions[0].session_id()], "older_bkp");
944        mark_backed_up(&store, room_id, "newer_bkp", &sessions).await;
945        // Sanity: they are backed up in order number 2
946        assert_eq!(backed_up_tos(&store).await[sessions[0].session_id()], "newer_bkp");
947
948        // When I ask to back up some to the older version
949        mark_backed_up(&store, room_id, "older_bkp", &sessions[..2]).await;
950
951        // Then older backup overwrites: we don't consider the order here at all
952        let but = backed_up_tos(&store).await;
953        assert_eq!(but[sessions[0].session_id()], "older_bkp");
954        assert_eq!(but[sessions[1].session_id()], "older_bkp");
955        assert_eq!(but[sessions[2].session_id()], "newer_bkp");
956        assert_eq!(but[sessions[3].session_id()], "newer_bkp");
957    }
958
959    #[async_test]
960    async fn test_not_backed_up_sessions_are_eligible_for_backup() {
961        // Given there are 4 sessions, 2 of which are already backed up
962        let room_id = room_id!("!test:localhost");
963        let (store, sessions) = store_with_sessions(4, room_id).await;
964        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
965
966        // When I ask which to back up
967        let mut to_backup = store
968            .inbound_group_sessions_for_backup("bkp1", 10)
969            .await
970            .expect("Failed to ask for sessions to backup");
971        to_backup.sort_by_key(|s| s.session_id().to_owned());
972
973        // Then I am told the last 2 only
974        assert_eq!(to_backup, &[sessions[2].clone(), sessions[3].clone()]);
975    }
976
977    #[async_test]
978    async fn test_all_sessions_are_eligible_for_backup_if_version_is_unknown() {
979        // Given there are 4 sessions, 2 of which are already backed up in bkp1
980        let room_id = room_id!("!test:localhost");
981        let (store, sessions) = store_with_sessions(4, room_id).await;
982        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
983
984        // When I ask which to back up in an unknown version
985        let mut to_backup = store
986            .inbound_group_sessions_for_backup("unknown_bkp", 10)
987            .await
988            .expect("Failed to ask for sessions to backup");
989        to_backup.sort_by_key(|s| s.session_id().to_owned());
990
991        // Then I am told to back up all of them
992        assert_eq!(
993            to_backup,
994            &[sessions[0].clone(), sessions[1].clone(), sessions[2].clone(), sessions[3].clone()]
995        );
996    }
997
998    #[async_test]
999    async fn test_sessions_backed_up_to_a_later_version_are_eligible_for_backup() {
1000        // Given there are 4 sessions, some backed up to three different versions
1001        let room_id = room_id!("!test:localhost");
1002        let (store, sessions) = store_with_sessions(4, room_id).await;
1003        mark_backed_up(&store, room_id, "bkp0", &sessions[..1]).await;
1004        mark_backed_up(&store, room_id, "bkp1", &sessions[1..2]).await;
1005        mark_backed_up(&store, room_id, "bkp2", &sessions[2..3]).await;
1006
1007        // When I ask which to back up in the middle version
1008        let mut to_backup = store
1009            .inbound_group_sessions_for_backup("bkp1", 10)
1010            .await
1011            .expect("Failed to ask for sessions to backup");
1012        to_backup.sort_by_key(|s| s.session_id().to_owned());
1013
1014        // Then I am told to back up everything not in the version I asked about
1015        assert_eq!(
1016            to_backup,
1017            &[
1018                sessions[0].clone(), // Backed up in bkp0
1019                // sessions[1] is backed up in bkp1 already, which we asked about
1020                sessions[2].clone(), // Backed up in bkp2
1021                sessions[3].clone(), // Not backed up
1022            ]
1023        );
1024    }
1025
1026    #[async_test]
1027    async fn test_outbound_group_session_store() {
1028        // Given an outbound session
1029        let (account, _) = get_account_and_session_test_helper();
1030        let room_id = room_id!("!test:localhost");
1031        let (outbound, _) = account.create_group_session_pair_with_defaults(room_id).await;
1032
1033        // When we save it to the store
1034        let store = MemoryStore::new();
1035        store.save_outbound_group_sessions(vec![outbound.clone()]);
1036
1037        // Then we can get it out again
1038        let loaded_session = store.get_outbound_group_session(room_id).await.unwrap().unwrap();
1039        assert_eq!(
1040            serde_json::to_string(&outbound.pickle().await).unwrap(),
1041            serde_json::to_string(&loaded_session.pickle().await).unwrap()
1042        );
1043    }
1044
1045    #[async_test]
1046    async fn test_tracked_users_are_stored_once_per_user_id() {
1047        // Given a store containing 2 tracked users, both dirty
1048        let user1 = user_id!("@user1:s");
1049        let user2 = user_id!("@user2:s");
1050        let user3 = user_id!("@user3:s");
1051        let store = MemoryStore::new();
1052        store.save_tracked_users(&[(user1, true), (user2, true)]).await.unwrap();
1053
1054        // When we mark one as clean and add another
1055        store.save_tracked_users(&[(user2, false), (user3, false)]).await.unwrap();
1056
1057        // Then we can get them out again and their dirty flags are correct
1058        let loaded_tracked_users =
1059            store.load_tracked_users().await.expect("failed to load tracked users");
1060
1061        let tracked_contains = |user_id, dirty| {
1062            loaded_tracked_users.iter().any(|u| u.user_id == user_id && u.dirty == dirty)
1063        };
1064
1065        assert!(tracked_contains(user1, true));
1066        assert!(tracked_contains(user2, false));
1067        assert!(tracked_contains(user3, false));
1068        assert_eq!(loaded_tracked_users.len(), 3);
1069    }
1070
1071    #[async_test]
1072    async fn test_private_identity_store() {
1073        // Given a private identity
1074        let private_identity = PrivateCrossSigningIdentity::empty(user_id!("@u:s"));
1075
1076        // When we save it to the store
1077        let store = MemoryStore::new();
1078        store.save_private_identity(Some(private_identity.clone()));
1079
1080        // Then we can get it out again
1081        let loaded_identity =
1082            store.load_identity().await.expect("failed to load private identity").unwrap();
1083
1084        assert_eq!(loaded_identity.user_id(), user_id!("@u:s"));
1085    }
1086
1087    #[async_test]
1088    async fn test_device_store() {
1089        let device = get_device();
1090        let store = MemoryStore::new();
1091
1092        store.save_devices(vec![device.clone()]);
1093
1094        let loaded_device =
1095            store.get_device(device.user_id(), device.device_id()).await.unwrap().unwrap();
1096
1097        assert_eq!(device, loaded_device);
1098
1099        let user_devices = store.get_user_devices(device.user_id()).await.unwrap();
1100
1101        assert_eq!(&**user_devices.keys().next().unwrap(), device.device_id());
1102        assert_eq!(user_devices.values().next().unwrap(), &device);
1103
1104        let loaded_device = user_devices.get(device.device_id()).unwrap();
1105
1106        assert_eq!(&device, loaded_device);
1107
1108        store.delete_devices(vec![device.clone()]);
1109        assert!(store.get_device(device.user_id(), device.device_id()).await.unwrap().is_none());
1110    }
1111
1112    #[async_test]
1113    async fn test_message_hash() {
1114        let store = MemoryStore::new();
1115
1116        let hash =
1117            OlmMessageHash { sender_key: "test_sender".to_owned(), hash: "test_hash".to_owned() };
1118
1119        let mut changes = Changes::default();
1120        changes.message_hashes.push(hash.clone());
1121
1122        assert!(!store.is_message_known(&hash).await.unwrap());
1123        store.save_changes(changes).await.unwrap();
1124        assert!(store.is_message_known(&hash).await.unwrap());
1125    }
1126
1127    #[async_test]
1128    async fn test_key_counts_of_empty_store_are_zero() {
1129        // Given an empty store
1130        let store = MemoryStore::new();
1131
1132        // When we count keys
1133        let key_counts = store.inbound_group_session_counts(Some("")).await.unwrap();
1134
1135        // Then the answer is zero
1136        assert_eq!(key_counts.total, 0);
1137        assert_eq!(key_counts.backed_up, 0);
1138    }
1139
1140    #[async_test]
1141    async fn test_counting_sessions_reports_the_number_of_sessions() {
1142        // Given a store with sessions
1143        let room_id = room_id!("!test:localhost");
1144        let (store, _) = store_with_sessions(4, room_id).await;
1145
1146        // When we count keys
1147        let key_counts = store.inbound_group_session_counts(Some("bkp")).await.unwrap();
1148
1149        // Then the answer equals the number of sessions we created
1150        assert_eq!(key_counts.total, 4);
1151        // And none are backed up
1152        assert_eq!(key_counts.backed_up, 0);
1153    }
1154
1155    #[async_test]
1156    async fn test_counting_backed_up_sessions_reports_the_number_backed_up_in_this_backup() {
1157        // Given a store with sessions, some backed up
1158        let room_id = room_id!("!test:localhost");
1159        let (store, sessions) = store_with_sessions(5, room_id).await;
1160        mark_backed_up(&store, room_id, "bkp", &sessions[..2]).await;
1161
1162        // When we count keys
1163        let key_counts = store.inbound_group_session_counts(Some("bkp")).await.unwrap();
1164
1165        // Then the answer equals the number of sessions we created
1166        assert_eq!(key_counts.total, 5);
1167        // And the backed_up count matches how many were backed up
1168        assert_eq!(key_counts.backed_up, 2);
1169    }
1170
1171    #[async_test]
1172    async fn test_counting_backed_up_sessions_for_null_backup_reports_zero() {
1173        // Given a store with sessions, some backed up
1174        let room_id = room_id!("!test:localhost");
1175        let (store, sessions) = store_with_sessions(4, room_id).await;
1176        mark_backed_up(&store, room_id, "bkp", &sessions[..2]).await;
1177
1178        // When we count keys, providing None as the backup version
1179        let key_counts = store.inbound_group_session_counts(None).await.unwrap();
1180
1181        // Then we ignore everything and just say zero
1182        assert_eq!(key_counts.backed_up, 0);
1183    }
1184
1185    #[async_test]
1186    async fn test_counting_backed_up_sessions_only_reports_sessions_in_the_version_specified() {
1187        // Given a store with sessions, backed up in several versions
1188        let room_id = room_id!("!test:localhost");
1189        let (store, sessions) = store_with_sessions(4, room_id).await;
1190        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
1191        mark_backed_up(&store, room_id, "bkp2", &sessions[3..]).await;
1192
1193        // When we count keys for bkp2
1194        let key_counts = store.inbound_group_session_counts(Some("bkp2")).await.unwrap();
1195
1196        // Then the backed_up count reflects how many were backed up in bkp2 only
1197        assert_eq!(key_counts.backed_up, 1);
1198    }
1199
1200    /// Mark the supplied sessions as backed up in the supplied backup version
1201    async fn mark_backed_up(
1202        store: &MemoryStore,
1203        room_id: &RoomId,
1204        backup_version: &str,
1205        sessions: &[InboundGroupSession],
1206    ) {
1207        let rooms_and_ids: Vec<_> = sessions.iter().map(|s| (room_id, s.session_id())).collect();
1208
1209        store
1210            .mark_inbound_group_sessions_as_backed_up(backup_version, &rooms_and_ids)
1211            .await
1212            .expect("Failed to mark sessions as backed up");
1213    }
1214
1215    // Create a MemoryStore containing the supplied number of sessions.
1216    //
1217    // Sessions are returned in alphabetical order of session id.
1218    async fn store_with_sessions(
1219        num_sessions: usize,
1220        room_id: &RoomId,
1221    ) -> (MemoryStore, Vec<InboundGroupSession>) {
1222        let (account, _) = get_account_and_session_test_helper();
1223
1224        let mut sessions = Vec::with_capacity(num_sessions);
1225        for _ in 0..num_sessions {
1226            sessions.push(new_session(&account, room_id).await);
1227        }
1228        sessions.sort_by_key(|s| s.session_id().to_owned());
1229
1230        let store = MemoryStore::new();
1231        store.save_inbound_group_sessions(sessions.clone(), None).await.unwrap();
1232
1233        (store, sessions)
1234    }
1235
1236    // Create a new InboundGroupSession
1237    async fn new_session(account: &Account, room_id: &RoomId) -> InboundGroupSession {
1238        let curve_key = "Nn0L2hkcCMFKqynTjyGsJbth7QrVmX3lbrksMkrGOAw";
1239        let (outbound, _) = account.create_group_session_pair_with_defaults(room_id).await;
1240
1241        InboundGroupSession::new(
1242            Curve25519PublicKey::from_base64(curve_key).unwrap(),
1243            Ed25519PublicKey::from_base64("ee3Ek+J2LkkPmjGPGLhMxiKnhiX//xcqaVL4RP6EypE").unwrap(),
1244            room_id,
1245            &outbound.session_key().await,
1246            SenderData::unknown(),
1247            outbound.settings().algorithm.to_owned(),
1248            None,
1249            false,
1250        )
1251        .unwrap()
1252    }
1253
1254    /// Find the session_id and backed_up_to value for each of the sessions in
1255    /// the store.
1256    async fn backed_up_tos(store: &MemoryStore) -> HashMap<SessionId, String> {
1257        store
1258            .get_inbound_group_sessions_and_backed_up_to()
1259            .await
1260            .expect("Unable to get inbound group sessions and backup order")
1261            .iter()
1262            .map(|(s, o)| {
1263                (
1264                    s.session_id().to_owned(),
1265                    o.as_ref().map(|v| v.as_str().to_owned()).unwrap_or("".to_owned()),
1266                )
1267            })
1268            .collect()
1269    }
1270}
1271
1272#[cfg(test)]
1273mod integration_tests {
1274    use std::{
1275        collections::HashMap,
1276        sync::{Arc, Mutex, OnceLock},
1277    };
1278
1279    use async_trait::async_trait;
1280    use matrix_sdk_common::cross_process_lock::CrossProcessLockGeneration;
1281    use ruma::{
1282        events::secret::request::SecretName, DeviceId, OwnedDeviceId, RoomId, TransactionId, UserId,
1283    };
1284    use vodozemac::Curve25519PublicKey;
1285
1286    use super::MemoryStore;
1287    use crate::{
1288        cryptostore_integration_tests, cryptostore_integration_tests_time,
1289        olm::{
1290            InboundGroupSession, OlmMessageHash, OutboundGroupSession, PrivateCrossSigningIdentity,
1291            SenderDataType, StaticAccountData,
1292        },
1293        store::{
1294            types::{
1295                BackupKeys, Changes, DehydratedDeviceKey, PendingChanges, RoomKeyCounts,
1296                RoomKeyWithheldEntry, RoomSettings, StoredRoomKeyBundleData, TrackedUser,
1297            },
1298            CryptoStore,
1299        },
1300        Account, DeviceData, GossipRequest, GossippedSecret, SecretInfo, Session, UserIdentityData,
1301    };
1302
1303    /// Holds on to a MemoryStore during a test, and moves it back into STORES
1304    /// when this is dropped
1305    #[derive(Clone, Debug)]
1306    struct PersistentMemoryStore(Arc<MemoryStore>);
1307
1308    impl PersistentMemoryStore {
1309        fn new() -> Self {
1310            Self(Arc::new(MemoryStore::new()))
1311        }
1312
1313        fn get_static_account(&self) -> Option<StaticAccountData> {
1314            self.0.get_static_account()
1315        }
1316    }
1317
1318    /// Return a clone of the store for the test with the supplied name. Note:
1319    /// dropping this store won't destroy its data, since
1320    /// [PersistentMemoryStore] is a reference-counted smart pointer
1321    /// to an underlying [MemoryStore].
1322    async fn get_store(
1323        name: &str,
1324        _passphrase: Option<&str>,
1325        clear_data: bool,
1326    ) -> PersistentMemoryStore {
1327        // Holds on to one [PersistentMemoryStore] per test, so even if the test drops
1328        // the store, we keep its data alive. This simulates the behaviour of
1329        // the other stores, which keep their data in a real DB, allowing us to
1330        // test MemoryStore using the same code.
1331        static STORES: OnceLock<Mutex<HashMap<String, PersistentMemoryStore>>> = OnceLock::new();
1332        let stores = STORES.get_or_init(|| Mutex::new(HashMap::new()));
1333
1334        let mut stores = stores.lock().unwrap();
1335
1336        if clear_data {
1337            // Create a new PersistentMemoryStore
1338            let new_store = PersistentMemoryStore::new();
1339            stores.insert(name.to_owned(), new_store.clone());
1340            new_store
1341        } else {
1342            stores.entry(name.to_owned()).or_insert_with(PersistentMemoryStore::new).clone()
1343        }
1344    }
1345
1346    /// Forwards all methods to the underlying [MemoryStore].
1347    #[cfg_attr(target_family = "wasm", async_trait(?Send))]
1348    #[cfg_attr(not(target_family = "wasm"), async_trait)]
1349    impl CryptoStore for PersistentMemoryStore {
1350        type Error = <MemoryStore as CryptoStore>::Error;
1351
1352        async fn load_account(&self) -> Result<Option<Account>, Self::Error> {
1353            self.0.load_account().await
1354        }
1355
1356        async fn load_identity(&self) -> Result<Option<PrivateCrossSigningIdentity>, Self::Error> {
1357            self.0.load_identity().await
1358        }
1359
1360        async fn save_changes(&self, changes: Changes) -> Result<(), Self::Error> {
1361            self.0.save_changes(changes).await
1362        }
1363
1364        async fn save_pending_changes(&self, changes: PendingChanges) -> Result<(), Self::Error> {
1365            self.0.save_pending_changes(changes).await
1366        }
1367
1368        async fn save_inbound_group_sessions(
1369            &self,
1370            sessions: Vec<InboundGroupSession>,
1371            backed_up_to_version: Option<&str>,
1372        ) -> Result<(), Self::Error> {
1373            self.0.save_inbound_group_sessions(sessions, backed_up_to_version).await
1374        }
1375
1376        async fn get_sessions(
1377            &self,
1378            sender_key: &str,
1379        ) -> Result<Option<Vec<Session>>, Self::Error> {
1380            self.0.get_sessions(sender_key).await
1381        }
1382
1383        async fn get_inbound_group_session(
1384            &self,
1385            room_id: &RoomId,
1386            session_id: &str,
1387        ) -> Result<Option<InboundGroupSession>, Self::Error> {
1388            self.0.get_inbound_group_session(room_id, session_id).await
1389        }
1390
1391        async fn get_withheld_info(
1392            &self,
1393            room_id: &RoomId,
1394            session_id: &str,
1395        ) -> Result<Option<RoomKeyWithheldEntry>, Self::Error> {
1396            self.0.get_withheld_info(room_id, session_id).await
1397        }
1398
1399        async fn get_withheld_sessions_by_room_id(
1400            &self,
1401            room_id: &RoomId,
1402        ) -> Result<Vec<RoomKeyWithheldEntry>, Self::Error> {
1403            self.0.get_withheld_sessions_by_room_id(room_id).await
1404        }
1405
1406        async fn get_inbound_group_sessions(
1407            &self,
1408        ) -> Result<Vec<InboundGroupSession>, Self::Error> {
1409            self.0.get_inbound_group_sessions().await
1410        }
1411
1412        async fn inbound_group_session_counts(
1413            &self,
1414            backup_version: Option<&str>,
1415        ) -> Result<RoomKeyCounts, Self::Error> {
1416            self.0.inbound_group_session_counts(backup_version).await
1417        }
1418
1419        async fn get_inbound_group_sessions_by_room_id(
1420            &self,
1421            room_id: &RoomId,
1422        ) -> Result<Vec<InboundGroupSession>, Self::Error> {
1423            self.0.get_inbound_group_sessions_by_room_id(room_id).await
1424        }
1425
1426        async fn get_inbound_group_sessions_for_device_batch(
1427            &self,
1428            sender_key: Curve25519PublicKey,
1429            sender_data_type: SenderDataType,
1430            after_session_id: Option<String>,
1431            limit: usize,
1432        ) -> Result<Vec<InboundGroupSession>, Self::Error> {
1433            self.0
1434                .get_inbound_group_sessions_for_device_batch(
1435                    sender_key,
1436                    sender_data_type,
1437                    after_session_id,
1438                    limit,
1439                )
1440                .await
1441        }
1442
1443        async fn inbound_group_sessions_for_backup(
1444            &self,
1445            backup_version: &str,
1446            limit: usize,
1447        ) -> Result<Vec<InboundGroupSession>, Self::Error> {
1448            self.0.inbound_group_sessions_for_backup(backup_version, limit).await
1449        }
1450
1451        async fn mark_inbound_group_sessions_as_backed_up(
1452            &self,
1453            backup_version: &str,
1454            room_and_session_ids: &[(&RoomId, &str)],
1455        ) -> Result<(), Self::Error> {
1456            self.0
1457                .mark_inbound_group_sessions_as_backed_up(backup_version, room_and_session_ids)
1458                .await
1459        }
1460
1461        async fn reset_backup_state(&self) -> Result<(), Self::Error> {
1462            self.0.reset_backup_state().await
1463        }
1464
1465        async fn load_backup_keys(&self) -> Result<BackupKeys, Self::Error> {
1466            self.0.load_backup_keys().await
1467        }
1468
1469        async fn load_dehydrated_device_pickle_key(
1470            &self,
1471        ) -> Result<Option<DehydratedDeviceKey>, Self::Error> {
1472            self.0.load_dehydrated_device_pickle_key().await
1473        }
1474
1475        async fn delete_dehydrated_device_pickle_key(&self) -> Result<(), Self::Error> {
1476            self.0.delete_dehydrated_device_pickle_key().await
1477        }
1478
1479        async fn get_outbound_group_session(
1480            &self,
1481            room_id: &RoomId,
1482        ) -> Result<Option<OutboundGroupSession>, Self::Error> {
1483            self.0.get_outbound_group_session(room_id).await
1484        }
1485
1486        async fn load_tracked_users(&self) -> Result<Vec<TrackedUser>, Self::Error> {
1487            self.0.load_tracked_users().await
1488        }
1489
1490        async fn save_tracked_users(&self, users: &[(&UserId, bool)]) -> Result<(), Self::Error> {
1491            self.0.save_tracked_users(users).await
1492        }
1493
1494        async fn get_device(
1495            &self,
1496            user_id: &UserId,
1497            device_id: &DeviceId,
1498        ) -> Result<Option<DeviceData>, Self::Error> {
1499            self.0.get_device(user_id, device_id).await
1500        }
1501
1502        async fn get_user_devices(
1503            &self,
1504            user_id: &UserId,
1505        ) -> Result<HashMap<OwnedDeviceId, DeviceData>, Self::Error> {
1506            self.0.get_user_devices(user_id).await
1507        }
1508
1509        async fn get_own_device(&self) -> Result<DeviceData, Self::Error> {
1510            self.0.get_own_device().await
1511        }
1512
1513        async fn get_user_identity(
1514            &self,
1515            user_id: &UserId,
1516        ) -> Result<Option<UserIdentityData>, Self::Error> {
1517            self.0.get_user_identity(user_id).await
1518        }
1519
1520        async fn is_message_known(
1521            &self,
1522            message_hash: &OlmMessageHash,
1523        ) -> Result<bool, Self::Error> {
1524            self.0.is_message_known(message_hash).await
1525        }
1526
1527        async fn get_outgoing_secret_requests(
1528            &self,
1529            request_id: &TransactionId,
1530        ) -> Result<Option<GossipRequest>, Self::Error> {
1531            self.0.get_outgoing_secret_requests(request_id).await
1532        }
1533
1534        async fn get_secret_request_by_info(
1535            &self,
1536            secret_info: &SecretInfo,
1537        ) -> Result<Option<GossipRequest>, Self::Error> {
1538            self.0.get_secret_request_by_info(secret_info).await
1539        }
1540
1541        async fn get_unsent_secret_requests(&self) -> Result<Vec<GossipRequest>, Self::Error> {
1542            self.0.get_unsent_secret_requests().await
1543        }
1544
1545        async fn delete_outgoing_secret_requests(
1546            &self,
1547            request_id: &TransactionId,
1548        ) -> Result<(), Self::Error> {
1549            self.0.delete_outgoing_secret_requests(request_id).await
1550        }
1551
1552        async fn get_secrets_from_inbox(
1553            &self,
1554            secret_name: &SecretName,
1555        ) -> Result<Vec<GossippedSecret>, Self::Error> {
1556            self.0.get_secrets_from_inbox(secret_name).await
1557        }
1558
1559        async fn delete_secrets_from_inbox(
1560            &self,
1561            secret_name: &SecretName,
1562        ) -> Result<(), Self::Error> {
1563            self.0.delete_secrets_from_inbox(secret_name).await
1564        }
1565
1566        async fn get_room_settings(
1567            &self,
1568            room_id: &RoomId,
1569        ) -> Result<Option<RoomSettings>, Self::Error> {
1570            self.0.get_room_settings(room_id).await
1571        }
1572
1573        async fn get_received_room_key_bundle_data(
1574            &self,
1575            room_id: &RoomId,
1576            user_id: &UserId,
1577        ) -> crate::store::Result<Option<StoredRoomKeyBundleData>, Self::Error> {
1578            self.0.get_received_room_key_bundle_data(room_id, user_id).await
1579        }
1580
1581        async fn get_custom_value(&self, key: &str) -> Result<Option<Vec<u8>>, Self::Error> {
1582            self.0.get_custom_value(key).await
1583        }
1584
1585        async fn set_custom_value(&self, key: &str, value: Vec<u8>) -> Result<(), Self::Error> {
1586            self.0.set_custom_value(key, value).await
1587        }
1588
1589        async fn remove_custom_value(&self, key: &str) -> Result<(), Self::Error> {
1590            self.0.remove_custom_value(key).await
1591        }
1592
1593        async fn try_take_leased_lock(
1594            &self,
1595            lease_duration_ms: u32,
1596            key: &str,
1597            holder: &str,
1598        ) -> Result<Option<CrossProcessLockGeneration>, Self::Error> {
1599            self.0.try_take_leased_lock(lease_duration_ms, key, holder).await
1600        }
1601
1602        async fn next_batch_token(&self) -> Result<Option<String>, Self::Error> {
1603            self.0.next_batch_token().await
1604        }
1605
1606        async fn get_size(&self) -> Result<Option<usize>, Self::Error> {
1607            self.0.get_size().await
1608        }
1609    }
1610
1611    cryptostore_integration_tests!();
1612    cryptostore_integration_tests_time!();
1613}