freenet 0.2.48

Freenet core software
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
use std::{convert::Infallible, sync::Arc, time::Duration};

use futures::{FutureExt, future::BoxFuture};
use tokio::task::JoinHandle;
use tracing::Instrument;

use super::{
    NetEventRegister, PeerId,
    network_bridge::{
        EventLoopNotificationsReceiver, event_loop_notification_channel, p2p_protoc::P2pConnManager,
    },
};
use crate::{
    client_events::client_event_handling,
    ring::{ConnectionManager, Location},
};
use crate::{
    client_events::{BoxedClient, combinator::ClientEventsCombinator},
    config::GlobalExecutor,
    contract::{
        self, ContractHandler, ContractHandlerChannel, ExecutorToEventLoopChannel,
        NetworkEventListenerHalve, WaitingResolution, mediator_channels, run_op_request_mediator,
    },
    message::NodeEvent,
    node::NodeConfig,
    operations::connect,
};

use super::{OpManager, background_task_monitor::BackgroundTaskMonitor};

pub(crate) struct NodeP2P {
    pub(crate) op_manager: Arc<OpManager>,
    pub(super) conn_manager: P2pConnManager,
    pub(super) peer_id: Option<PeerId>,
    pub(super) is_gateway: bool,
    /// used for testing with deterministic location
    pub(super) location: Option<Location>,
    notification_channel: EventLoopNotificationsReceiver,
    client_wait_for_transaction: ContractHandlerChannel<WaitingResolution>,
    executor_listener: ExecutorToEventLoopChannel<NetworkEventListenerHalve>,
    node_controller: tokio::sync::mpsc::Receiver<NodeEvent>,
    should_try_connect: bool,
    client_events_task: BoxFuture<'static, anyhow::Error>,
    contract_executor_task: BoxFuture<'static, anyhow::Error>,
    initial_join_task: Option<JoinHandle<()>>,
    session_actor_task: JoinHandle<()>,
    result_router_task: JoinHandle<()>,
    op_mediator_task: JoinHandle<()>,
    /// Monitor for background tasks spawned during node construction (Ring, OpManager, etc.)
    background_task_monitor: BackgroundTaskMonitor,
}

impl NodeP2P {
    /// Aggressively wait for connections during startup to avoid on-demand delays.
    /// This is an associated function that can be spawned as a task to run concurrently
    /// with the event listener. Without the event listener running, connection
    /// handshakes won't be processed.
    async fn aggressive_initial_connections_impl(
        op_manager: &Arc<OpManager>,
        min_connections: usize,
    ) {
        tracing::info!(
            "Starting aggressive connection acquisition phase (target: {} connections)",
            min_connections
        );

        // For small networks, we want to ensure all nodes discover each other quickly
        // to avoid the 10+ second delays on first GET operations
        let start = tokio::time::Instant::now();
        let max_duration = Duration::from_secs(10);
        let mut last_connection_count = 0;

        while start.elapsed() < max_duration {
            // Cooperative yielding for CI environments with limited CPU cores
            // This is critical - the event listener needs CPU time to process handshakes
            tokio::task::yield_now().await;

            let current_connections = op_manager.ring.open_connections();

            // If we've reached our target, we're done
            if current_connections >= min_connections {
                tracing::info!(
                    "Reached minimum connections target: {}/{}",
                    current_connections,
                    min_connections
                );
                break;
            }

            // Log progress when connection count changes
            if current_connections != last_connection_count {
                tracing::info!(
                    "Connection progress: {}/{} (elapsed: {}s)",
                    current_connections,
                    min_connections,
                    start.elapsed().as_secs()
                );
                last_connection_count = current_connections;
            } else {
                tracing::debug!(
                    "Current connections: {}/{}, waiting for more peers (elapsed: {}s)",
                    current_connections,
                    min_connections,
                    start.elapsed().as_secs()
                );
            }

            // Check more frequently at the beginning to detect quick connections
            let sleep_duration = if start.elapsed() < Duration::from_secs(3) {
                Duration::from_millis(250)
            } else {
                Duration::from_millis(500)
            };
            tokio::time::sleep(sleep_duration).await;
        }

        let final_connections = op_manager.ring.open_connections();
        tracing::info!(
            "Aggressive connection phase complete. Final connections: {}/{} (took {}s)",
            final_connections,
            min_connections,
            start.elapsed().as_secs()
        );
    }

    pub(super) async fn run_node(mut self) -> anyhow::Result<Infallible> {
        // Record the start time for uptime tracking in shutdown event
        let start_time = tokio::time::Instant::now();

        // Initialize network status tracking for the connecting page diagnostics
        let gateway_addrs: std::collections::HashSet<std::net::SocketAddr> = self
            .conn_manager
            .gateways
            .iter()
            .filter_map(|g| g.socket_addr())
            .collect();
        super::network_status::init(
            self.conn_manager.listening_port(),
            gateway_addrs,
            crate::config::PCK_VERSION.to_string(),
        );

        // Emit peer startup event
        if let Some(event) = crate::tracing::NetEventLog::peer_startup(
            &self.op_manager.ring,
            crate::config::PCK_VERSION.to_string(),
            None, // git_commit - not available in library, only in binary
            None, // git_dirty - not available in library, only in binary
        ) {
            use either::Either;
            self.op_manager
                .ring
                .register_events(Either::Left(event))
                .await;
            tracing::info!(
                version = crate::config::PCK_VERSION,
                is_gateway = self.op_manager.ring.is_gateway(),
                "Peer startup event emitted"
            );
        }

        if self.should_try_connect {
            let join_handle = connect::initial_join_procedure(
                self.op_manager.clone(),
                &self.conn_manager.gateways,
            )
            .await?;
            self.initial_join_task = Some(join_handle);

            // Note: We don't run aggressive_initial_connections here because
            // the event listener hasn't started yet. The connect requests from
            // initial_join_procedure are queued but won't be processed until
            // the event listener runs. Instead, we'll run the aggressive
            // connection phase concurrently with the event listener below.
        }

        // Spawn aggressive connection task to run concurrently with event listener.
        // This is needed because connection handshakes are processed by the event
        // listener, so we can't block waiting for connections before it starts.
        let aggressive_conn_task = if self.should_try_connect {
            let op_manager = self.op_manager.clone();
            let min_connections = op_manager.ring.connection_manager.min_connections;
            Some(GlobalExecutor::spawn(async move {
                Self::aggressive_initial_connections_impl(&op_manager, min_connections).await;
            }))
        } else {
            None
        };

        let f = self.conn_manager.run_event_listener(
            self.op_manager.clone(),
            self.client_wait_for_transaction,
            self.notification_channel,
            self.executor_listener,
            self.node_controller,
        );

        // Monitor spawned infrastructure tasks (session actor, result router, op mediator).
        // If any of these panics or exits unexpectedly, the node runs degraded with no
        // logs or detection. Combine into a single future that produces an error.
        // Keep AbortHandles for cleanup since the JoinHandles are moved into the future.
        let session_abort = self.session_actor_task.abort_handle();
        let router_abort = self.result_router_task.abort_handle();
        let mediator_abort = self.op_mediator_task.abort_handle();
        let infra_monitor = {
            let mut session_handle = self.session_actor_task;
            let mut router_handle = self.result_router_task;
            let mut mediator_handle = self.op_mediator_task;
            async move {
                fn join_result_to_error(
                    name: &str,
                    r: Result<(), tokio::task::JoinError>,
                ) -> anyhow::Error {
                    match r {
                        Err(e) if e.is_panic() => anyhow::anyhow!("{name} panicked: {e}"),
                        Err(e) => anyhow::anyhow!("{name} task failed: {e}"),
                        Ok(()) => anyhow::anyhow!("{name} exited unexpectedly"),
                    }
                }
                let e: anyhow::Error = tokio::select! {
                    biased;
                    r = &mut session_handle => join_result_to_error("Session actor", r),
                    r = &mut router_handle => join_result_to_error("Result router", r),
                    r = &mut mediator_handle => join_result_to_error("Op mediator", r),
                };
                e
            }
        };

        // Monitor background tasks registered during node construction
        // (Ring maintenance, garbage cleanup, etc.)
        let background_monitor = self.background_task_monitor.wait_for_any_exit();

        let join_task = self.initial_join_task.take();
        let result = crate::deterministic_select! {
            r = f => {
               let Err(e) = r;
               eprintln!("CRITICAL: Network event listener exited: {e}");
               tracing::error!("Network event listener exited: {}", e);
               Err(e)
            },
            e = self.client_events_task => {
                eprintln!("CRITICAL: Client events task exited: {e}");
                tracing::error!("Client events task exited: {:?}", e);
                Err(e)
            },
            e = self.contract_executor_task => {
                eprintln!("CRITICAL: Contract executor task exited: {e}");
                tracing::error!("Contract executor task exited: {:?}", e);
                Err(e)
            },
            e = infra_monitor => {
                eprintln!("CRITICAL: Infrastructure task exited: {e}");
                tracing::error!("Infrastructure task exited: {:?}", e);
                Err(e)
            },
            e = background_monitor => {
                eprintln!("CRITICAL: Background task exited: {e}");
                tracing::error!("Background task exited: {:?}", e);
                Err(e)
            },
        };

        if let Some(handle) = join_task {
            handle.abort();
        }
        if let Some(handle) = aggressive_conn_task {
            handle.abort();
        }
        session_abort.abort();
        router_abort.abort();
        mediator_abort.abort();

        // Emit peer shutdown event
        let (graceful, reason) = match &result {
            Ok(_) => (true, None),
            Err(e) => (false, Some(e.to_string())),
        };
        if let Some(event) = crate::tracing::NetEventLog::peer_shutdown(
            &self.op_manager.ring,
            graceful,
            reason.clone(),
            start_time,
        ) {
            use either::Either;
            self.op_manager
                .ring
                .register_events(Either::Left(event))
                .await;
            tracing::info!(
                graceful,
                reason = reason.as_deref().unwrap_or("clean exit"),
                uptime_secs = start_time.elapsed().as_secs(),
                "Peer shutdown event emitted"
            );
        }

        result
    }

    /// Build a new node and return it along with a shutdown sender.
    ///
    /// The shutdown sender can be used to trigger graceful shutdown by sending
    /// `NodeEvent::Disconnect`.
    pub(crate) async fn build<CH, const CLIENTS: usize, ER>(
        config: NodeConfig,
        clients: [BoxedClient; CLIENTS],
        event_register: ER,
        ch_builder: CH::Builder,
    ) -> anyhow::Result<(Self, tokio::sync::mpsc::Sender<NodeEvent>)>
    where
        CH: ContractHandler + Send + 'static,
        ER: NetEventRegister + Clone,
    {
        let (notification_channel, notification_tx) = event_loop_notification_channel();
        let (mut ch_outbound, ch_inbound, wait_for_event) = contract::contract_handler_channel();
        let (client_responses, cli_response_sender) = contract::client_responses_channel();

        // Prepare session adapter channel for actor-based client management
        let (session_tx, session_rx) = tokio::sync::mpsc::channel(1000);

        // Install session adapter in contract handler
        ch_outbound.with_session_adapter(session_tx.clone());

        // Create result router channel for dual-path result delivery
        let (result_router_tx, result_router_rx) = tokio::sync::mpsc::channel(1000);

        // Spawn Session Actor
        use crate::client_events::session_actor::SessionActor;
        let session_actor = SessionActor::new(session_rx, cli_response_sender.clone());
        let session_actor_task = GlobalExecutor::spawn(async move {
            tracing::info!("Session actor starting");
            session_actor.run().await;
            tracing::warn!("Session actor stopped");
        });

        // Spawn ResultRouter task
        use crate::client_events::result_router::ResultRouter;
        let router = ResultRouter::new(result_router_rx, session_tx.clone());
        let result_router_task = GlobalExecutor::spawn(async move {
            tracing::info!("Result router starting");
            router.run().await;
            tracing::warn!("Result router stopped");
        });

        tracing::info!("Actor-based client management infrastructure installed with result router");

        let background_task_monitor = BackgroundTaskMonitor::new();
        let connection_manager = ConnectionManager::new(&config);
        let op_manager = Arc::new(OpManager::new(
            notification_tx,
            ch_outbound,
            &config,
            event_register.clone(),
            connection_manager,
            result_router_tx,
            &background_task_monitor,
        )?);
        op_manager.ring.attach_op_manager(&op_manager);

        // Create channels for the mediator pattern:
        // - op_request_channel: executors send (Transaction, oneshot::Sender) to mediator
        // - mediator_channels: mediator forwards Transaction to event loop and routes responses back
        let (op_request_receiver, op_sender) = contract::op_request_channel();
        let (executor_listener, to_event_loop_tx, from_event_loop_rx) =
            mediator_channels(op_manager.clone());

        // Spawn the mediator task that bridges between the pooled executors and the event loop
        let op_mediator_task = GlobalExecutor::spawn({
            let mediator_task =
                run_op_request_mediator(op_request_receiver, to_event_loop_tx, from_event_loop_rx);
            mediator_task.instrument(tracing::info_span!("op_request_mediator"))
        });

        let contract_handler = CH::build(ch_inbound, op_sender, op_manager.clone(), ch_builder)
            .await
            .map_err(|e| anyhow::anyhow!(e))?;

        let conn_manager =
            P2pConnManager::build(&config, op_manager.clone(), event_register).await?;

        let parent_span = tracing::Span::current();
        let contract_executor_task = GlobalExecutor::spawn({
            let task = async move {
                tracing::info!("Contract executor task starting");
                let result = contract::contract_handling(
                    contract_handler,
                    crate::contract::user_input::DashboardPrompter::new(
                        crate::contract::user_input::pending_prompts(),
                    ),
                )
                .await;
                match &result {
                    Ok(_) => tracing::warn!("Contract executor task exiting normally (unexpected)"),
                    Err(e) => tracing::error!("Contract executor task exiting with error: {e}"),
                }
                result
            };
            task.instrument(tracing::info_span!(parent: parent_span.clone(), "contract_handling"))
        })
        .map(|r| match r {
            Ok(Err(e)) => anyhow::anyhow!("Error in contract handling task: {e}"),
            Ok(Ok(_)) => anyhow::anyhow!("Contract handling task exited unexpectedly"),
            Err(e) => anyhow::anyhow!(e),
        })
        .boxed();
        // Slot 0 = HTTP client API, Slot 1 = WebSocket proxy (from serve_client_api).
        let clients = ClientEventsCombinator::new(clients).with_slot_names(&["http", "websocket"]);
        // Create node controller channel with capacity for shutdown signal
        // We clone the sender to return it for external shutdown triggering
        let (node_controller_tx, node_controller_rx) = tokio::sync::mpsc::channel(1);
        let shutdown_tx = node_controller_tx.clone();
        let client_events_task = GlobalExecutor::spawn({
            let op_manager_clone = op_manager.clone();
            let task = async move {
                tracing::info!("Client events task starting");
                let result = client_event_handling(
                    op_manager_clone,
                    clients,
                    client_responses,
                    node_controller_tx,
                )
                .await;
                tracing::warn!("Client events task exiting (unexpected)");
                result
            };
            task.instrument(tracing::info_span!(parent: parent_span, "client_event_handling"))
        })
        .map(|r| match r {
            Ok(_) => anyhow::anyhow!("Client event handling task exited unexpectedly"),
            Err(e) => anyhow::anyhow!(e),
        })
        .boxed();

        Ok((
            NodeP2P {
                conn_manager,
                notification_channel,
                client_wait_for_transaction: wait_for_event,
                op_manager,
                executor_listener,
                node_controller: node_controller_rx,
                should_try_connect: config.should_connect,
                peer_id: None, // PeerId removed - using PeerKeyLocation instead
                is_gateway: config.is_gateway,
                location: config.location,
                client_events_task,
                contract_executor_task,
                initial_join_task: None,
                session_actor_task,
                result_router_task,
                op_mediator_task,
                background_task_monitor,
            },
            shutdown_tx,
        ))
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    /// Verify that a spawned task that panics is detected via JoinHandle.
    #[tokio::test]
    async fn test_join_handle_detects_panic() {
        let handle: JoinHandle<()> = tokio::spawn(async {
            panic!("intentional test panic");
        });
        let result = handle.await;
        assert!(result.is_err());
        let err = result.unwrap_err();
        assert!(
            err.is_panic(),
            "JoinError should indicate a panic, got: {err}"
        );
    }

    /// Verify that a spawned task that returns cleanly produces Ok.
    #[tokio::test]
    async fn test_join_handle_detects_clean_exit() {
        let handle: JoinHandle<()> = tokio::spawn(async {
            // Clean return
        });
        let result = handle.await;
        assert!(result.is_ok(), "Clean task exit should produce Ok");
    }

    /// Three tasks: 2 sleeping, 1 panics after short delay. Verify tokio::select!
    /// triggers on the panicked task and returns a panic error.
    #[tokio::test]
    async fn test_select_catches_first_panicked_task() {
        let mut h1: JoinHandle<()> = tokio::spawn(async {
            tokio::time::sleep(Duration::from_secs(60)).await;
        });
        let mut h2: JoinHandle<()> = tokio::spawn(async {
            tokio::time::sleep(Duration::from_secs(60)).await;
        });
        let mut h3: JoinHandle<()> = tokio::spawn(async {
            tokio::time::sleep(Duration::from_millis(10)).await;
            panic!("task 3 panicked");
        });

        let result: anyhow::Result<()> = tokio::select! {
            biased;
            r = &mut h1 => match r {
                Err(e) if e.is_panic() => Err(anyhow::anyhow!("task 1 panicked: {e}")),
                Err(e) => Err(anyhow::anyhow!("task 1 failed: {e}")),
                Ok(()) => Err(anyhow::anyhow!("task 1 exited")),
            },
            r = &mut h2 => match r {
                Err(e) if e.is_panic() => Err(anyhow::anyhow!("task 2 panicked: {e}")),
                Err(e) => Err(anyhow::anyhow!("task 2 failed: {e}")),
                Ok(()) => Err(anyhow::anyhow!("task 2 exited")),
            },
            r = &mut h3 => match r {
                Err(e) if e.is_panic() => Err(anyhow::anyhow!("task 3 panicked: {e}")),
                Err(e) => Err(anyhow::anyhow!("task 3 failed: {e}")),
                Ok(()) => Err(anyhow::anyhow!("task 3 exited")),
            },
        };

        assert!(result.is_err());
        let err_msg = result.unwrap_err().to_string();
        assert!(
            err_msg.contains("task 3 panicked"),
            "Should catch the panicking task, got: {err_msg}"
        );

        // Clean up the sleeping tasks
        h1.abort();
        h2.abort();
    }
}