pub struct ClientDb<LocalDb: ClientSideDb> { /* private fields */ }
Implementations§
Source§impl<LocalDb: ClientSideDb> ClientDb<LocalDb>
impl<LocalDb: ClientSideDb> ClientDb<LocalDb>
pub async fn connect<C, RRL, EH, EHF, VS>( config: C, db: LocalDb, cache_watermark: usize, require_relogin: RRL, error_handler: EH, vacuum_schedule: ClientVacuumSchedule<VS>, ) -> Result<(Arc<ClientDb<LocalDb>>, impl Future<Output = usize>)>
pub fn make_ulid(self: &Arc<Self>) -> Ulid
pub fn listen_for_all_updates(self: &Arc<Self>) -> Receiver<ObjectId>
pub fn listen_for_updates_on( self: &Arc<Self>, q: QueryId, ) -> Option<Receiver<ObjectId>>
pub fn on_connection_event( self: &Arc<Self>, cb: impl 'static + Send + Sync + Fn(ConnectionEvent), )
Sourcepub async fn login(
self: &Arc<Self>,
url: Arc<String>,
user: User,
token: SessionToken,
) -> Result<()>
pub async fn login( self: &Arc<Self>, url: Arc<String>, user: User, token: SessionToken, ) -> Result<()>
Note: The fact that token
is actually a token for the user
passed at creation of this ClientDb
is not actually checked, and is assumed to be true. Providing the wrong user
may lead to object creations
or event submissions being spuriously rejected locally, but will not allow them to succeed remotely anyway.
pub async fn logout(&self) -> Result<()>
pub fn user(&self) -> Option<User>
pub fn watch_upload_queue(&self) -> Receiver<Vec<UploadId>>
pub async fn list_uploads(&self) -> Result<Vec<UploadId>>
pub async fn get_upload( self: &Arc<Self>, upload_id: UploadId, ) -> Result<Option<Upload>>
pub fn rename_session(self: &Arc<Self>, name: String) -> Receiver<Result<()>>
pub async fn current_session(&self) -> Result<Session>
pub async fn list_sessions(&self) -> Result<Vec<Session>>
pub fn disconnect_session( self: &Arc<Self>, session_ref: SessionRef, ) -> Receiver<Result<()>>
Sourcepub async fn pause_vacuum(&self) -> RwLockReadGuard<'_, ()>
pub async fn pause_vacuum(&self) -> RwLockReadGuard<'_, ()>
Pauses the vacuum until the returned mutex guard is dropped
pub async fn set_importance<T: Object>( self: &Arc<Self>, ptr: DbPtr<T>, new_importance: Importance, ) -> Result<()>
pub fn list_saved_queries(self: &Arc<Self>) -> HashMap<QueryId, SavedQuery>
pub async fn set_query_importance<T: Object>( self: &Arc<Self>, query_id: QueryId, new_importance: Importance, ) -> Result<()>
pub async fn create<T: Object>( self: &Arc<Self>, importance: Importance, object: Arc<T>, ) -> Result<(Obj<T, LocalDb>, impl Future<Output = Result<()>>)>
pub async fn create_with<T: Object>( self: &Arc<Self>, importance: Importance, object_id: ObjectId, created_at: EventId, object: Arc<T>, ) -> Result<impl Future<Output = Result<()>>>
pub async fn submit<T: Object>( self: &Arc<Self>, ptr: DbPtr<T>, event: T::Event, ) -> Result<impl Future<Output = Result<()>>>
Sourcepub async fn submit_with<T: Object>(
self: &Arc<Self>,
importance: Importance,
object_id: ObjectId,
event_id: EventId,
event: Arc<T::Event>,
) -> Result<impl Future<Output = Result<()>>>
pub async fn submit_with<T: Object>( self: &Arc<Self>, importance: Importance, object_id: ObjectId, event_id: EventId, event: Arc<T::Event>, ) -> Result<impl Future<Output = Result<()>>>
Note: this will fail if the object is not subscribed upon yet: it would not make sense anyway.
pub async fn get<T: Object>( self: &Arc<Self>, importance: Importance, ptr: DbPtr<T>, ) -> Result<Obj<T, LocalDb>>
pub async fn get_local<T: Object>( self: &Arc<Self>, importance: Importance, ptr: DbPtr<T>, ) -> Result<Option<Obj<T, LocalDb>>>
pub async fn query_local<'a, T: Object>( self: &'a Arc<Self>, query: Arc<Query>, ) -> Result<impl 'a + Stream<Item = Result<Obj<T, LocalDb>>>>
Sourcepub async fn query_remote<'a, T: Object>(
self: &'a Arc<Self>,
importance: Importance,
query_id: QueryId,
query: Arc<Query>,
) -> Result<impl 'a + Stream<Item = Result<Obj<T, LocalDb>>>>
pub async fn query_remote<'a, T: Object>( self: &'a Arc<Self>, importance: Importance, query_id: QueryId, query: Arc<Query>, ) -> Result<impl 'a + Stream<Item = Result<Obj<T, LocalDb>>>>
Note that it is assumed here that the same QueryId will always be associated with the same Query. In particular, this means that when bumping an Object’s snapshot_version and adjusting the queries accordingly, you should change the QueryId, as well as unsubscribe/resubscribe on startup so that the database gets updated.
Sourcepub async fn create_binary(self: &Arc<Self>, data: Arc<[u8]>) -> Result<()>
pub async fn create_binary(self: &Arc<Self>, data: Arc<[u8]>) -> Result<()>
Note: when creating a binary, it can be vacuumed away at any time until an object or
event is added that requires it. As such, you probably want to use pause_vacuum
to make sure the created binary is not vacuumed away before the object or event
had enough time to get created.