1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
use crate::crypto::{PublicKey, SecretKey};
use std::net::SocketAddr;
use std::sync::Arc;
use anyhow::{anyhow, Result};
use iroh::{Endpoint, NodeId};
use uuid::Uuid;
pub use super::blobs_store::BlobsStore;
use crate::bucket_log::BucketLogProvider;
use crate::linked_data::Link;
use crate::mount::{Mount, MountError};
use super::sync::{PingPeerJob, SyncJob, SyncProvider};
/// Overview of a peer's state, generic over a bucket log provider.
/// Provides everything that a peer needs in order to
/// load data, interact with peers, and manage buckets.
#[derive(Debug)]
pub struct Peer<L: BucketLogProvider> {
log_provider: L,
socket_address: SocketAddr,
blobs_store: BlobsStore,
secret_key: SecretKey,
endpoint: Endpoint,
sync_provider: Arc<dyn SyncProvider<L>>,
}
impl<L: BucketLogProvider> Clone for Peer<L>
where
L: Clone,
{
fn clone(&self) -> Self {
Self {
log_provider: self.log_provider.clone(),
socket_address: self.socket_address,
blobs_store: self.blobs_store.clone(),
secret_key: self.secret_key.clone(),
endpoint: self.endpoint.clone(),
sync_provider: self.sync_provider.clone(),
}
}
}
impl<L: BucketLogProvider> Peer<L> {
pub(super) fn new(
log_provider: L,
socket_address: SocketAddr,
blobs_store: BlobsStore,
secret_key: SecretKey,
endpoint: Endpoint,
sync_provider: Arc<dyn SyncProvider<L>>,
) -> Peer<L> {
Self {
log_provider,
socket_address,
blobs_store,
secret_key,
endpoint,
sync_provider,
}
}
pub fn logs(&self) -> &L {
&self.log_provider
}
pub fn blobs(&self) -> &BlobsStore {
&self.blobs_store
}
pub fn endpoint(&self) -> &Endpoint {
&self.endpoint
}
pub fn log_provider(&self) -> &L {
&self.log_provider
}
pub fn secret(&self) -> &SecretKey {
&self.secret_key
}
pub fn socket(&self) -> &SocketAddr {
&self.socket_address
}
pub fn id(&self) -> NodeId {
self.endpoint.node_id()
}
// ========================================
// Sync Operations (dispatch to backend)
// ========================================
/// Dispatch a sync job to the sync provider
///
/// The provider decides when/where this executes (immediately, queued, etc.)
pub async fn dispatch(&self, job: SyncJob) -> Result<()>
where
L::Error: std::error::Error + Send + Sync + 'static,
{
self.sync_provider.execute(self, job).await
}
/// Ping all peers in a bucket's shares
///
/// Dispatches ping jobs to all peers listed in the bucket's current
/// manifest shares (except ourselves).
pub async fn ping(&self, bucket_id: Uuid) -> Result<()>
where
L::Error: std::error::Error + Send + Sync + 'static,
{
// Get current head link
let (head_link, _) = self
.logs()
.head(bucket_id, None)
.await
.map_err(|e| anyhow!("Failed to get head for bucket {}: {}", bucket_id, e))?;
// Load manifest from blobs store
let manifest: crate::mount::Manifest = self
.blobs()
.get_cbor(&head_link.hash())
.await
.map_err(|e| anyhow!("Failed to load manifest: {}", e))?;
// Extract our own key to skip ourselves
let our_key = crate::crypto::PublicKey::from(*self.secret().public()).to_hex();
// For each peer in shares, dispatch a ping job
for peer_key_hex in manifest.shares().keys() {
if peer_key_hex == &our_key {
continue; // Skip ourselves
}
let peer_id = crate::crypto::PublicKey::from_hex(peer_key_hex)
.map_err(|e| anyhow!("Invalid peer key in shares: {}", e))?;
// Dispatch ping job
if let Err(e) = self
.dispatch(SyncJob::PingPeer(PingPeerJob { bucket_id, peer_id }))
.await
{
tracing::warn!(
"Failed to dispatch ping to peer {} for bucket {}: {}",
peer_key_hex,
bucket_id,
e
);
}
}
Ok(())
}
/// Ping all peers for a bucket and collect their responses
///
/// Returns a map of peer public key hex to their ping reply status.
/// This waits for all pings to complete before returning.
///
/// # Arguments
///
/// * `bucket_id` - The bucket to ping peers for
/// * `timeout` - Optional timeout duration for the entire operation
pub async fn ping_and_collect(
&self,
bucket_id: Uuid,
timeout: Option<std::time::Duration>,
) -> Result<std::collections::HashMap<String, crate::peer::protocol::PingReplyStatus>>
where
L::Error: std::error::Error + Send + Sync + 'static,
{
use crate::peer::protocol::bidirectional::BidirectionalHandler;
use crate::peer::protocol::{Ping, PingMessage};
// Get current head link
let (head_link, head_height) = self
.logs()
.head(bucket_id, None)
.await
.map_err(|e| anyhow!("Failed to get head for bucket {}: {}", bucket_id, e))?;
// Load manifest from blobs store
let manifest: crate::mount::Manifest = self
.blobs()
.get_cbor(&head_link.hash())
.await
.map_err(|e| anyhow!("Failed to load manifest: {}", e))?;
// Extract our own key to skip ourselves
let our_key = crate::crypto::PublicKey::from(*self.secret().public()).to_hex();
// Collect all peer keys
let peer_keys: Vec<_> = manifest
.shares()
.keys()
.filter(|key| *key != &our_key)
.cloned()
.collect();
// Ping all peers concurrently
let mut tasks = Vec::new();
for peer_key_hex in peer_keys {
let peer_id = match crate::crypto::PublicKey::from_hex(&peer_key_hex) {
Ok(id) => id,
Err(e) => {
tracing::warn!("Invalid peer key {}: {}", peer_key_hex, e);
continue;
}
};
let ping = PingMessage {
bucket_id,
link: head_link.clone(),
height: head_height,
};
let peer = self.clone();
let key = peer_key_hex.clone();
tasks.push(tokio::spawn(async move {
let result = Ping::send::<L>(&peer, &peer_id, ping).await;
(key, result)
}));
}
// Collect results with optional timeout
let collect_future = async {
let mut results: std::collections::HashMap<
String,
crate::peer::protocol::PingReplyStatus,
> = std::collections::HashMap::new();
for task in tasks {
match task.await {
Ok((key, Ok(reply))) => {
results.insert(key, reply.status);
}
Ok((key, Err(e))) => {
tracing::warn!("Failed to ping peer {}: {}", key, e);
}
Err(e) => {
tracing::warn!("Task panicked: {}", e);
}
}
}
Ok(results)
};
// Apply timeout if specified
if let Some(timeout_duration) = timeout {
match tokio::time::timeout(timeout_duration, collect_future).await {
Ok(result) => result,
Err(_) => Err(anyhow!(
"Ping collection timed out after {:?}",
timeout_duration
)),
}
} else {
collect_future.await
}
}
/// Load mount at the current head of a bucket
///
/// # Arguments
///
/// * `bucket_id` - The UUID of the bucket to load
///
/// # Returns
///
/// The Mount at the current head of the bucket's log
///
/// # Errors
///
/// Returns error if:
/// - Bucket not found in log
/// - Failed to load mount from blobs
pub async fn mount(&self, bucket_id: Uuid) -> Result<Mount, MountError> {
// Get current head link from log
let (link, _height) = self
.log_provider
.head(bucket_id, None)
.await
.map_err(|e| MountError::Default(anyhow!("Failed to get current head: {}", e)))?;
// Load mount at that link (height is read from manifest)
Mount::load(&link, &self.secret_key, &self.blobs_store).await
}
/// Load mount for reading based on the peer's role in the bucket.
///
/// This method determines the appropriate version to load based on the peer's role:
/// - **Owners** see HEAD (latest state, including unpublished changes)
/// - **Mirrors** (or unknown roles) see the latest_published version
///
/// This ensures that mirrors only see content that has been explicitly published
/// to them, while owners always see the most recent state.
///
/// # Arguments
///
/// * `bucket_id` - The UUID of the bucket to load
///
/// # Returns
///
/// The Mount at the appropriate version for this peer's role
///
/// # Errors
///
/// Returns error if:
/// - Bucket not found in log
/// - No published version available (for mirrors)
/// - Failed to load mount from blobs
pub async fn mount_for_read(&self, bucket_id: Uuid) -> Result<Mount, MountError> {
use crate::mount::PrincipalRole;
// Get current head link from log
let (head_link, _) = self
.log_provider
.head(bucket_id, None)
.await
.map_err(|e| MountError::Default(anyhow!("Failed to get current head: {}", e)))?;
// Check our role from the HEAD manifest
let our_role = Mount::load_manifest(&head_link, &self.blobs_store)
.await
.ok()
.and_then(|m| {
m.get_share(&self.secret_key.public())
.map(|s| s.role().clone())
});
match our_role {
Some(PrincipalRole::Owner) => {
// Owners see HEAD (latest state)
self.mount(bucket_id).await
}
_ => {
// Mirrors (or unknown role) see latest_published
let (link, _) = self
.log_provider
.latest_published(bucket_id)
.await
.map_err(|e| {
MountError::Default(anyhow!("Failed to get latest published: {}", e))
})?
.ok_or_else(|| {
MountError::Default(anyhow!("No published version available"))
})?;
Mount::load(&link, &self.secret_key, &self.blobs_store).await
}
}
}
/// Save a mount and append it to the bucket's log.
///
/// The `publish` parameter controls publish state:
/// - `None` — preserve current state
/// - `Some(true)` — publish (expose secret so mirrors can decrypt)
/// - `Some(false)` — unpublish (revoke mirror access)
pub async fn save_mount(&self, mount: &Mount, publish: Option<bool>) -> Result<Link, MountError>
where
L::Error: std::error::Error + Send + Sync + 'static,
{
// Get our own public key to exclude from notifications
let our_public_key = self.secret_key.public();
tracing::info!("SAVE_MOUNT: Our public key: {}", our_public_key.to_hex());
let inner_mount = mount.inner().await;
let manifest = inner_mount.manifest();
let bucket_id = *manifest.id();
let name = manifest.name().to_string();
// Get shares from the mount manifest
let (link, previous_link, height) = mount.save(self.blobs(), publish).await?;
let inner = mount.inner().await;
let manifest = inner.manifest();
let shares = manifest.shares();
let is_published = manifest.is_published();
tracing::info!("SAVE_MOUNT: Found {} shares in manifest", shares.len());
// Append to log
self.log_provider
.append(
bucket_id,
name,
link.clone(),
Some(previous_link),
height,
is_published,
)
.await
.map_err(|e| MountError::Default(anyhow!("Failed to append to log: {}", e)))?;
// Dispatch ping jobs for each peer (except ourselves)
let mut notified_count = 0;
for (peer_key_hex, _share) in shares.iter() {
tracing::info!("SAVE_MOUNT: Checking share for peer: {}", peer_key_hex);
// Parse the peer's public key
if let Ok(peer_public_key) = PublicKey::from_hex(peer_key_hex) {
// Skip ourselves
if peer_public_key == our_public_key {
tracing::info!("SAVE_MOUNT: Skipping ourselves: {}", peer_key_hex);
continue;
}
tracing::info!(
"SAVE_MOUNT: Dispatching PingPeer job for bucket {} to peer {}",
bucket_id,
peer_key_hex
);
if let Err(e) = self
.dispatch(SyncJob::PingPeer(PingPeerJob {
bucket_id,
peer_id: peer_public_key,
}))
.await
{
tracing::warn!("Failed to dispatch ping: {}", e);
}
notified_count += 1;
} else {
tracing::warn!(
"SAVE_MOUNT: Failed to parse peer public key: {}",
peer_key_hex
);
}
}
tracing::info!(
"dispatched {} PingPeer jobs for bucket {}",
notified_count,
bucket_id
);
Ok(link)
}
}