pub struct NodeHandle { /* private fields */ }Implementations§
Source§impl NodeHandle
impl NodeHandle
pub async fn dht_upsert_peer( &self, local_target: NodeId, node_id: NodeId, addr: PeerAddr, ) -> Result<()>
pub async fn dht_find_node(&self, req: FindNode) -> Result<Vec<PeerAddr>>
pub async fn dht_store(&self, req: WireStore) -> Result<()>
pub async fn dht_find_value(&self, key: [u8; 32]) -> Result<Option<DhtValue>>
pub async fn dht_store_replicated<T: RequestTransport + ?Sized>( &self, transport: &T, req: WireStore, seed_peers: &[PeerAddr], ) -> Result<usize>
pub async fn dht_find_node_iterative<T: RequestTransport + ?Sized>( &self, transport: &T, target_node_id: [u8; 20], seed_peers: &[PeerAddr], ) -> Result<Vec<PeerAddr>>
pub async fn dht_find_value_iterative<T: RequestTransport + ?Sized>( &self, transport: &T, key: [u8; 32], seed_peers: &[PeerAddr], ) -> Result<Option<DhtValue>>
Sourcepub async fn dht_find_value_from_network<T: RequestTransport + ?Sized>(
&self,
transport: &T,
key: [u8; 32],
seed_peers: &[PeerAddr],
) -> Result<Option<DhtValue>>
pub async fn dht_find_value_from_network<T: RequestTransport + ?Sized>( &self, transport: &T, key: [u8; 32], seed_peers: &[PeerAddr], ) -> Result<Option<DhtValue>>
Like Self::dht_find_value_iterative, but always queries the
network even when a local copy exists. The remote result is
stored locally before being returned.
Use this for values where the relay’s copy is the authoritative
merged superset (e.g. CommunityMembers).
pub async fn dht_republish_once<T: RequestTransport + ?Sized>( &self, transport: &T, seed_peers: &[PeerAddr], ) -> Result<usize>
pub fn start_dht_republish_loop( self, transport: Arc<dyn RequestTransport>, seed_peers: Vec<PeerAddr>, interval: Duration, ) -> JoinHandle<()>
pub fn start_subscription_sync_loop( self, transport: Arc<dyn RequestTransport>, seed_peers: Vec<PeerAddr>, interval: Duration, ) -> JoinHandle<()>
Sourcepub fn start_tls_dht_service(
self,
bind_addr: SocketAddr,
local_signing_key: SigningKey,
capabilities: Capabilities,
tls_server: Arc<TlsServerHandle>,
) -> JoinHandle<Result<()>>
pub fn start_tls_dht_service( self, bind_addr: SocketAddr, local_signing_key: SigningKey, capabilities: Capabilities, tls_server: Arc<TlsServerHandle>, ) -> JoinHandle<Result<()>>
Start a TLS-over-TCP listener that accepts incoming sessions.
The returned task listens on bind_addr, wraps every accepted
TCP stream in a TLS session using the provided server handle,
then runs the SCP2P handshake and dispatches messages.
Sourcepub fn start_quic_dht_service(
self,
quic_server: QuicServerHandle,
local_signing_key: SigningKey,
capabilities: Capabilities,
) -> JoinHandle<Result<()>>
pub fn start_quic_dht_service( self, quic_server: QuicServerHandle, local_signing_key: SigningKey, capabilities: Capabilities, ) -> JoinHandle<Result<()>>
Start a QUIC/UDP listener that accepts incoming sessions.
The returned task accepts bidirectional QUIC streams on the given server endpoint, runs the SCP2P handshake, and dispatches messages.
Sourcepub async fn upsert_community_member(
&self,
community_share_id: ShareId,
self_addr: PeerAddr,
) -> Result<()>
pub async fn upsert_community_member( &self, community_share_id: ShareId, self_addr: PeerAddr, ) -> Result<()>
Insert or update the local DHT entry for a community the node has
joined so that other peers can discover this node as a community
member via community_info_key(share_id).
Sourcepub async fn reannounce_community_memberships(
&self,
self_addr: PeerAddr,
) -> Result<usize>
pub async fn reannounce_community_memberships( &self, self_addr: PeerAddr, ) -> Result<usize>
Re-announce DHT community member entries for all joined communities.
Called during dht_republish_once to keep community member
announcements fresh and ensure they survive app restarts.
Source§impl NodeHandle
impl NodeHandle
pub async fn sync_subscriptions(&self) -> Result<()>
pub async fn sync_subscriptions_over_dht<T: RequestTransport + ?Sized>( &self, transport: &T, seed_peers: &[PeerAddr], ) -> Result<()>
Sourcepub async fn apply_blocklist_updates_from_subscriptions<T: RequestTransport + ?Sized>(
&self,
transport: &T,
seed_peers: &[PeerAddr],
) -> Result<usize>
pub async fn apply_blocklist_updates_from_subscriptions<T: RequestTransport + ?Sized>( &self, transport: &T, seed_peers: &[PeerAddr], ) -> Result<usize>
Fetch and apply blocklist rules for all enabled blocklist shares that
publish a manifest item named "blocklist" (§4.11).
For each enabled share whose cached manifest contains an item with
name == "blocklist", this method downloads the content bytes from
seed_peers, decodes them as [BlocklistRules], and calls
NodeHandle::set_blocklist_rules to apply the update automatically.
Returns the number of shares whose rules were successfully refreshed.
Sourcepub fn start_blocklist_auto_sync_loop(
self,
transport: Arc<dyn RequestTransport>,
seed_peers: Vec<PeerAddr>,
interval: Duration,
) -> JoinHandle<()>
pub fn start_blocklist_auto_sync_loop( self, transport: Arc<dyn RequestTransport>, seed_peers: Vec<PeerAddr>, interval: Duration, ) -> JoinHandle<()>
Spawn a background task that periodically syncs subscriptions over the DHT and then refreshes blocklist rules from any newly updated blocklist share manifests (§4.11).
The loop runs sync_subscriptions_over_dht followed by
apply_blocklist_updates_from_subscriptions on each tick.
pub async fn search(&self, query: SearchQuery) -> Result<Vec<SearchResult>>
pub async fn search_with_trust_filter( &self, query: SearchQuery, trust_filter: SearchTrustFilter, ) -> Result<Vec<SearchResult>>
pub async fn search_page(&self, query: SearchPageQuery) -> Result<SearchPage>
pub async fn search_page_with_trust_filter( &self, query: SearchPageQuery, trust_filter: SearchTrustFilter, ) -> Result<SearchPage>
pub async fn begin_partial_download( &self, content_id: [u8; 32], target_path: String, total_chunks: u32, ) -> Result<()>
pub async fn mark_partial_chunk_complete( &self, content_id: [u8; 32], chunk_index: u32, ) -> Result<()>
pub async fn clear_partial_download(&self, content_id: [u8; 32]) -> Result<()>
pub async fn set_encrypted_node_key( &self, key_material: &[u8], passphrase: &str, ) -> Result<()>
pub async fn decrypt_node_key( &self, passphrase: &str, ) -> Result<Option<Vec<u8>>>
Sourcepub async fn encrypt_publisher_identities(&self, passphrase: &str) -> Result<()>
pub async fn encrypt_publisher_identities(&self, passphrase: &str) -> Result<()>
Encrypt all in-memory publisher identity secrets with passphrase.
After this call the plaintext secrets remain in memory (for runtime use) but are persisted in encrypted form only.
Sourcepub async fn unlock_publisher_identities(
&self,
passphrase: &str,
) -> Result<usize>
pub async fn unlock_publisher_identities( &self, passphrase: &str, ) -> Result<usize>
Decrypt persisted publisher identity secrets that were encrypted
with [encrypt_publisher_identities]. After this call the
plaintext secrets are available for use by
[ensure_publisher_identity] and related APIs.
pub async fn fetch_manifest_from_peers<C: PeerConnector>( &self, connector: &C, peers: &[PeerAddr], manifest_id: [u8; 32], policy: &FetchPolicy, ) -> Result<ManifestV1>
Sourcepub async fn download_from_peers<C: PeerConnector>(
&self,
connector: &C,
peers: &[PeerAddr],
content_id: [u8; 32],
target_path: &str,
policy: &FetchPolicy,
self_addr: Option<PeerAddr>,
on_progress: Option<&ProgressCallback>,
) -> Result<()>
pub async fn download_from_peers<C: PeerConnector>( &self, connector: &C, peers: &[PeerAddr], content_id: [u8; 32], target_path: &str, policy: &FetchPolicy, self_addr: Option<PeerAddr>, on_progress: Option<&ProgressCallback>, ) -> Result<()>
Download content from the network using all available peers.
Before fetching, any additional seeders recorded in the DHT via
content_provider_key are merged into the peer list. After a
successful download the content is stored locally and the
downloading node registers itself as a new seeder so future
peers can pull from it (forming a swarm).
Sourcepub async fn reannounce_seeded_content(
&self,
self_addr: PeerAddr,
) -> Result<usize>
pub async fn reannounce_seeded_content( &self, self_addr: PeerAddr, ) -> Result<usize>
Re-announce all locally seeded content in the DHT.
Call this periodically (e.g. every 10–15 minutes) to keep the
provider records alive past their TTL. For each content ID in
the local blob store the node ensures its own PeerAddr appears
in the Providers list under content_provider_key.
Re-announce share heads for public subscribed shares in the local DHT.
Subscribers cache signed ShareHead records received during sync.
For public shares we re-store them so that dht_republish_once
propagates them to the network — keeping the share discoverable
even after the original publisher goes offline.
Private shares are never re-announced: they die when the publisher stops refreshing.
Re-populate the in-memory DHT with share heads and manifests for
shares we have published. reannounce_subscribed_share_heads only
covers the subscriber side (iterates subscriptions); this covers the
publisher side so that data survives an app restart where the ephemeral
DHT is empty.
Source§impl NodeHandle
impl NodeHandle
Sourcepub async fn register_content_by_path(
&self,
peer: PeerAddr,
content_bytes: &[u8],
path: PathBuf,
) -> Result<[u8; 32]>
pub async fn register_content_by_path( &self, peer: PeerAddr, content_bytes: &[u8], path: PathBuf, ) -> Result<[u8; 32]>
Register a file at path as a locally-seedable content item.
Chunks are served directly from path via seek-based reads, so no
separate blob copy is made.
Sourcepub async fn register_content_precomputed(
&self,
peer: PeerAddr,
desc: ChunkedContent,
path: PathBuf,
) -> Result<[u8; 32]>
pub async fn register_content_precomputed( &self, peer: PeerAddr, desc: ChunkedContent, path: PathBuf, ) -> Result<[u8; 32]>
Register a file as seedable content when the [ChunkedContent]
descriptor has already been computed (e.g. via streaming hashing).
This avoids re-reading and re-hashing the file.
Sourcepub async fn register_content_from_bytes(
&self,
peer: PeerAddr,
content_bytes: &[u8],
data_dir: &Path,
) -> Result<[u8; 32]>
pub async fn register_content_from_bytes( &self, peer: PeerAddr, content_bytes: &[u8], data_dir: &Path, ) -> Result<[u8; 32]>
Register in-memory bytes as seedable content.
Writes content_bytes to {data_dir}/{hex_content_id}.dat then
delegates to Self::register_content_by_path. Use this for small
payloads (e.g. text publishing) that are not already on disk.
Sourcepub async fn publish_files(
&self,
files: &[PathBuf],
base_dir: Option<&Path>,
title: &str,
description: Option<&str>,
visibility: ShareVisibility,
communities: &[[u8; 32]],
provider: PeerAddr,
publisher: &ShareKeypair,
) -> Result<[u8; 32]>
pub async fn publish_files( &self, files: &[PathBuf], base_dir: Option<&Path>, title: &str, description: Option<&str>, visibility: ShareVisibility, communities: &[[u8; 32]], provider: PeerAddr, publisher: &ShareKeypair, ) -> Result<[u8; 32]>
Publish one or more files from disk as a single share.
Each file becomes an ItemV1 in the manifest. If base_dir is
Some, then ItemV1.path is set to the path of the file relative to
base_dir; otherwise path is None and name is the plain
filename.
Sourcepub async fn publish_folder(
&self,
folder: &Path,
title: &str,
description: Option<&str>,
visibility: ShareVisibility,
communities: &[[u8; 32]],
provider: PeerAddr,
publisher: &ShareKeypair,
) -> Result<[u8; 32]>
pub async fn publish_folder( &self, folder: &Path, title: &str, description: Option<&str>, visibility: ShareVisibility, communities: &[[u8; 32]], provider: PeerAddr, publisher: &ShareKeypair, ) -> Result<[u8; 32]>
Publish an entire folder tree as a new share revision.
Every file under folder is recursively collected, hashed, and
registered as a local provider. Each item carries a path
relative to folder.
List all items in a share manifest.
Sourcepub async fn reannounce_content_providers(
&self,
self_addr: PeerAddr,
) -> Result<usize>
pub async fn reannounce_content_providers( &self, self_addr: PeerAddr, ) -> Result<usize>
Re-announce DHT provider entries for all content in content_paths.
Called after relay tunnel registration so that provider entries contain the relayed address, enabling NAT-traversed downloads.
Source§impl NodeHandle
impl NodeHandle
pub async fn relay_register( &self, peer_addr: PeerAddr, ) -> Result<RelayRegistered>
pub async fn relay_register_with_slot( &self, peer_addr: PeerAddr, relay_slot_id: Option<u64>, ) -> Result<RelayRegistered>
pub async fn relay_connect( &self, peer_addr: PeerAddr, req: RelayConnect, ) -> Result<()>
pub async fn relay_stream( &self, peer_addr: PeerAddr, req: RelayStream, ) -> Result<RelayStream>
pub async fn set_relay_limits(&self, limits: RelayLimits) -> Result<()>
pub async fn set_abuse_limits(&self, limits: AbuseLimits) -> Result<()>
pub async fn note_relay_result( &self, peer: &PeerAddr, success: bool, ) -> Result<()>
pub async fn select_relay_peer(&self) -> Result<Option<PeerAddr>>
pub async fn select_relay_peers( &self, max_peers: usize, ) -> Result<Vec<PeerAddr>>
Sourcepub async fn fetch_relay_list_from_peer<T: RequestTransport + ?Sized>(
&self,
transport: &T,
peer: &PeerAddr,
max_count: u16,
) -> Result<Vec<RelayAnnouncement>>
pub async fn fetch_relay_list_from_peer<T: RequestTransport + ?Sized>( &self, transport: &T, peer: &PeerAddr, max_count: u16, ) -> Result<Vec<RelayAnnouncement>>
Ask a single peer for its cached relay announcements (Relay-PEX).
Returns the raw announcement list as sent by the peer; callers
should pass results through ingest_relay_announcements for
validation and local caching.
Sourcepub async fn ingest_relay_announcements(
&self,
announcements: Vec<RelayAnnouncement>,
) -> Result<usize>
pub async fn ingest_relay_announcements( &self, announcements: Vec<RelayAnnouncement>, ) -> Result<usize>
Validate and ingest a batch of relay announcements into the local cache.
Each announcement is independently validated (structure + signature + freshness). Invalid or expired entries are silently skipped. Returns the number of successfully ingested announcements.
Sourcepub async fn discover_relays_via_peers<T: RequestTransport + ?Sized>(
&self,
transport: &T,
seed_peers: &[PeerAddr],
max_per_peer: u16,
) -> Result<usize>
pub async fn discover_relays_via_peers<T: RequestTransport + ?Sized>( &self, transport: &T, seed_peers: &[PeerAddr], max_per_peer: u16, ) -> Result<usize>
Discover relay nodes by querying a set of seed peers via Relay-PEX.
Contacts up to max_peers seed peers, collects their relay lists,
and ingests all valid announcements into the local cache.
Returns the total number of newly ingested relay announcements.
Sourcepub async fn publish_relay_announcement(
&self,
signing_key: &SigningKey,
self_addrs: Vec<PeerAddr>,
capacity: RelayCapacity,
ttl_secs: u64,
) -> Result<RelayAnnouncement>
pub async fn publish_relay_announcement( &self, signing_key: &SigningKey, self_addrs: Vec<PeerAddr>, capacity: RelayCapacity, ttl_secs: u64, ) -> Result<RelayAnnouncement>
Build and sign a relay announcement for this node, then ingest it
into the local cache so it is returned by RelayListRequest handlers.
Call this on startup and periodically when capabilities.relay = true.
Sourcepub async fn publish_relay_announcement_to_dht<T: RequestTransport + ?Sized>(
&self,
transport: &T,
ann: &RelayAnnouncement,
seed_peers: &[PeerAddr],
) -> Result<usize>
pub async fn publish_relay_announcement_to_dht<T: RequestTransport + ?Sized>( &self, transport: &T, ann: &RelayAnnouncement, seed_peers: &[PeerAddr], ) -> Result<usize>
Publish a relay announcement to the DHT rendezvous keys (§4.9).
The relay’s assigned two rendezvous slots are derived from its
pubkey and the current time bucket. The announcement is encoded
as a DHT value and replicated to the K closest nodes for each
slot key.
Returns the total number of successful DHT store operations.
Sourcepub async fn discover_relays_from_dht<T: RequestTransport + ?Sized>(
&self,
transport: &T,
seed_peers: &[PeerAddr],
) -> Result<usize>
pub async fn discover_relays_from_dht<T: RequestTransport + ?Sized>( &self, transport: &T, seed_peers: &[PeerAddr], ) -> Result<usize>
Discover relay nodes by looking up all rendezvous slots in the DHT for the current time bucket (§4.9).
Iterates over all RELAY_RENDEZVOUS_N slots, performs an iterative
DHT find-value lookup for each, decodes any found values as
RelayAnnouncement, and ingests valid entries into the local cache.
Returns the number of newly ingested announcements.
Sourcepub async fn register_relay_tunnel<C: PeerConnector + 'static>(
&self,
connector: &C,
relay_addr: &PeerAddr,
) -> Result<ActiveRelaySlot>
pub async fn register_relay_tunnel<C: PeerConnector + 'static>( &self, connector: &C, relay_addr: &PeerAddr, ) -> Result<ActiveRelaySlot>
Register a relay tunnel on a remote relay node.
Connects to relay_addr using the provided connector, sends
RelayRegister { tunnel: true }, stores the slot info, and
spawns a background task that keeps the connection alive and
serves forwarded requests via serve_wire_stream.
Returns the assigned ActiveRelaySlot.
Sourcepub async fn active_relay_slot(&self) -> Option<ActiveRelaySlot>
pub async fn active_relay_slot(&self) -> Option<ActiveRelaySlot>
Return the first active relay slot, if any (backward-compat).
Sourcepub async fn active_relay_slots(&self) -> Vec<ActiveRelaySlot>
pub async fn active_relay_slots(&self) -> Vec<ActiveRelaySlot>
Return all active relay slots.
Sourcepub async fn relayed_self_addr(&self, self_addr: PeerAddr) -> PeerAddr
pub async fn relayed_self_addr(&self, self_addr: PeerAddr) -> PeerAddr
Build a PeerAddr that includes relay_via routing for this node.
If this node has an active relay slot, returns a PeerAddr whose
relay_via field points to the relay, allowing remote peers to
reach this firewalled node through the tunnel.
Uses the first active relay slot.
Sourcepub async fn all_relayed_self_addrs(&self, self_addr: PeerAddr) -> Vec<PeerAddr>
pub async fn all_relayed_self_addrs(&self, self_addr: PeerAddr) -> Vec<PeerAddr>
Build multiple PeerAddr variants, one for each active relay.
For provider announcements, publishing all relay routes lets downloaders try routes in parallel with fast failover.
Source§impl NodeHandle
impl NodeHandle
pub async fn runtime_config(&self) -> NodeConfig
pub async fn configured_bootstrap_peers(&self) -> Result<Vec<PeerAddr>>
Sourcepub async fn pin_bootstrap_key(
&self,
peer_addr_str: &str,
observed_pubkey: [u8; 32],
) -> Result<()>
pub async fn pin_bootstrap_key( &self, peer_addr_str: &str, observed_pubkey: [u8; 32], ) -> Result<()>
Record a TOFU-pinned public key for a bootstrap peer.
If the peer address already has a pinned key, verifies that the new key matches; returns an error on mismatch (identity change). If no key is pinned yet, stores it (first-seen trust).
Sourcepub async fn pinned_bootstrap_key(
&self,
peer_addr_str: &str,
) -> Option<[u8; 32]>
pub async fn pinned_bootstrap_key( &self, peer_addr_str: &str, ) -> Option<[u8; 32]>
Return the TOFU-pinned public key for a bootstrap peer, if any.
pub async fn peer_records(&self) -> Vec<PeerRecord>
Sourcepub async fn note_peer_outcome(
&self,
addr: &PeerAddr,
success: bool,
) -> Result<()>
pub async fn note_peer_outcome( &self, addr: &PeerAddr, success: bool, ) -> Result<()>
Record the outcome of a transfer interaction with a peer.
Calls PeerDb::note_outcome to update the persistent reputation
score: +1 for a successful interaction, -2 for a failure.
The score is clamped to [-10, 10] and survives node restarts.
This method is a no-op if the peer is not yet in the database.
pub async fn subscriptions(&self) -> Vec<PersistedSubscription>
Sourcepub async fn cached_manifest_meta(
&self,
manifest_id: &[u8; 32],
) -> (Option<String>, Option<String>)
pub async fn cached_manifest_meta( &self, manifest_id: &[u8; 32], ) -> (Option<String>, Option<String>)
Return the cached manifest title and description for a given manifest ID.
pub async fn communities(&self) -> Vec<PersistedCommunity>
pub async fn ensure_publisher_identity( &self, label: &str, ) -> Result<ShareKeypair>
Sourcepub async fn auto_protect_publisher_identities(&self) -> Result<()>
pub async fn auto_protect_publisher_identities(&self) -> Result<()>
Encrypt all existing plaintext publisher identities with the node key.
Safe to call repeatedly; already-encrypted identities are skipped.
No-ops if auto_protect_publisher_keys is disabled or no node key is set.
Sourcepub async fn ensure_node_identity(&self) -> Result<SigningKey>
pub async fn ensure_node_identity(&self) -> Result<SigningKey>
Return a stable Ed25519 node identity keypair.
On first call the key is generated, persisted to the store, and returned. Subsequent calls return the same key. This avoids the previous pattern of generating a fresh ephemeral keypair on every node start, which made the node un-addressable by pubkey across restarts.
Return all publisher identities that have a current published share head, together with the signing secret so the caller can display the share keys.
Remove the published share head (and its manifest) for the given share_id.
The publisher identity key is retained so the share can be re-published later.
Re-publish the share with a new visibility setting, bumping the sequence number.
pub async fn fetch_community_status_from_peer<T: RequestTransport + ?Sized>( &self, transport: &T, peer: &PeerAddr, share_id: ShareId, share_pubkey: [u8; 32], ) -> Result<(bool, Option<String>)>
pub async fn connect(&self, peer_addr: PeerAddr) -> Result<()>
pub async fn record_peer_seen(&self, peer_addr: PeerAddr) -> Result<()>
Sourcepub async fn record_peer_seen_with_capabilities(
&self,
peer_addr: PeerAddr,
capabilities: Capabilities,
) -> Result<()>
pub async fn record_peer_seen_with_capabilities( &self, peer_addr: PeerAddr, capabilities: Capabilities, ) -> Result<()>
Record that a peer was seen with specific capabilities.
Call after a successful handshake to persist the remote peer’s capabilities for future relay selection and capability-aware decisions.
pub async fn apply_pex_offer(&self, offer: PexOffer) -> Result<usize>
pub async fn build_pex_offer(&self, req: PexRequest) -> Result<PexOffer>
pub async fn subscribe(&self, share_id: ShareId) -> Result<()>
pub async fn join_community( &self, share_id: ShareId, share_pubkey: [u8; 32], ) -> Result<()>
Sourcepub async fn join_community_named(
&self,
share_id: ShareId,
share_pubkey: [u8; 32],
name: &str,
) -> Result<()>
pub async fn join_community_named( &self, share_id: ShareId, share_pubkey: [u8; 32], name: &str, ) -> Result<()>
Join a community with a human-readable name.
Sourcepub async fn join_community_with_token(
&self,
share_id: ShareId,
share_pubkey: [u8; 32],
token: Option<CommunityMembershipToken>,
) -> Result<()>
pub async fn join_community_with_token( &self, share_id: ShareId, share_pubkey: [u8; 32], token: Option<CommunityMembershipToken>, ) -> Result<()>
Join a community with an optional membership token.
When a CommunityMembershipToken is provided, it is verified
against share_pubkey before being stored. In v0.1, tokens
are optional; community membership without a token is
self-asserted.
Sourcepub async fn join_community_with_options(
&self,
share_id: ShareId,
share_pubkey: [u8; 32],
token: Option<CommunityMembershipToken>,
name: Option<String>,
) -> Result<()>
pub async fn join_community_with_options( &self, share_id: ShareId, share_pubkey: [u8; 32], token: Option<CommunityMembershipToken>, name: Option<String>, ) -> Result<()>
Join a community with optional token and optional name.
pub async fn leave_community(&self, share_id: ShareId) -> Result<()>
Sourcepub async fn update_community_name(
&self,
share_id: ShareId,
name: &str,
) -> Result<()>
pub async fn update_community_name( &self, share_id: ShareId, name: &str, ) -> Result<()>
Update the locally stored name for a community.
Called when a remote peer reports the community name during browse.
pub async fn subscribe_with_pubkey( &self, share_id: ShareId, share_pubkey: Option<[u8; 32]>, ) -> Result<()>
pub async fn subscribe_with_trust( &self, share_id: ShareId, share_pubkey: Option<[u8; 32]>, trust_level: SubscriptionTrustLevel, ) -> Result<()>
pub async fn set_subscription_trust_level( &self, share_id: ShareId, trust_level: SubscriptionTrustLevel, ) -> Result<()>
pub async fn set_blocklist_rules( &self, blocklist_share_id: ShareId, rules: BlocklistRules, ) -> Result<()>
pub async fn clear_blocklist_rules( &self, blocklist_share_id: ShareId, ) -> Result<()>
pub async fn unsubscribe(&self, share_id: ShareId) -> Result<()>
Trait Implementations§
Source§impl Clone for NodeHandle
impl Clone for NodeHandle
Source§fn clone(&self) -> NodeHandle
fn clone(&self) -> NodeHandle
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read more