1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
//! Connection retry logic for auto-connect peers.
//!
//! When an outbound handshake fails (timeout or send error), the node can
//! automatically retry with exponential backoff. Retry state lives on Node
//! (not PeerConnection) because each retry creates a fresh connection.
use super::{Node, NodeError};
use crate::PeerIdentity;
use crate::config::PeerConfig;
use crate::identity::NodeAddr;
use tracing::{debug, info, warn};
// MAX_BACKOFF_MS is now derived from config: node.retry.max_backoff_secs * 1000
const MAX_RETRY_CONNECTIONS_PER_TICK: usize = 16;
/// Tracks retry state for a peer across connection attempts.
pub struct RetryState {
/// The peer config to use for initiating retries.
pub peer_config: PeerConfig,
/// Number of retries attempted so far.
pub retry_count: u32,
/// Timestamp (Unix ms) when the next retry should be attempted.
pub retry_after_ms: u64,
/// Whether this is an auto-reconnect (unlimited retries, ignores max_retries).
pub reconnect: bool,
/// Optional absolute expiry for this retry entry (Unix ms).
///
/// When set, retries are dropped after this point even if reconnect logic
/// would otherwise continue.
pub expires_at_ms: Option<u64>,
}
impl RetryState {
/// Create a new retry state for a peer.
pub fn new(peer_config: PeerConfig) -> Self {
Self {
peer_config,
retry_count: 0,
retry_after_ms: 0,
reconnect: false,
expires_at_ms: None,
}
}
/// Calculate the backoff delay in milliseconds for the current retry count.
///
/// Uses exponential backoff: `base_interval_ms * 2^retry_count`,
/// capped at `MAX_BACKOFF_MS`.
pub fn backoff_ms(&self, base_interval_ms: u64, max_backoff_ms: u64) -> u64 {
let multiplier = 1u64.checked_shl(self.retry_count).unwrap_or(u64::MAX);
base_interval_ms
.saturating_mul(multiplier)
.min(max_backoff_ms)
}
}
impl Node {
/// Schedule a retry for a failed outbound connection, if applicable.
///
/// Only schedules if the peer is an auto-connect peer and max retries
/// have not been exhausted (unless `reconnect` is true, which retries
/// indefinitely). Does nothing if the peer is already connected or has
/// a connection in progress.
pub(super) fn schedule_retry(&mut self, node_addr: NodeAddr, now_ms: u64) {
let retry_cfg = &self.config.node.retry;
let max_retries = retry_cfg.max_retries;
if max_retries == 0 {
return;
}
// Don't retry if peer is already connected
if self.peers.contains_key(&node_addr) {
return;
}
let base_interval_ms = retry_cfg.base_interval_secs * 1000;
let max_backoff_ms = retry_cfg.max_backoff_secs * 1000;
let peer_name = self.peer_display_name(&node_addr);
if let Some(state) = self.retry_pending.get_mut(&node_addr) {
// Already tracking — increment
state.retry_count += 1;
if !state.reconnect && state.retry_count > max_retries {
info!(
peer = %peer_name,
attempts = state.retry_count,
"Max retries exhausted, giving up on peer"
);
self.retry_pending.remove(&node_addr);
return;
}
let delay = state.backoff_ms(base_interval_ms, max_backoff_ms);
state.retry_after_ms = now_ms + delay;
debug!(
peer = %peer_name,
retry = state.retry_count,
reconnect = state.reconnect,
delay_secs = delay / 1000,
"Scheduling connection retry"
);
} else {
// First failure — find the matching PeerConfig
let peer_config = self
.config
.auto_connect_peers()
.find(|pc| {
PeerIdentity::from_npub(&pc.npub)
.map(|id| *id.node_addr() == node_addr)
.unwrap_or(false)
})
.cloned();
if let Some(pc) = peer_config {
let mut state = RetryState::new(pc);
state.retry_count = 1;
state.reconnect = true;
let delay = state.backoff_ms(base_interval_ms, max_backoff_ms);
state.retry_after_ms = now_ms + delay;
debug!(
peer = %self.peer_display_name(&node_addr),
delay_secs = delay / 1000,
"First connection attempt failed, scheduling retry"
);
self.retry_pending.insert(node_addr, state);
}
// If not found in auto_connect_peers, no retry (one-shot connection)
}
}
/// Schedule auto-reconnect for a peer removed by MMP dead timeout.
///
/// Looks up the peer in auto-connect config and checks `auto_reconnect`.
/// If enabled, feeds the peer into the retry system with unlimited retries.
///
/// If a retry entry already exists (e.g. from a previous failed handshake
/// attempt during an earlier reconnect cycle), the existing retry count is
/// preserved and incremented rather than reset to zero. This ensures
/// exponential backoff accumulates across repeated link-dead events instead
/// of resetting to the base interval on every peer removal.
pub(super) fn schedule_reconnect(&mut self, node_addr: NodeAddr, now_ms: u64) {
// Find peer in auto-connect config
let peer_config = self
.config
.auto_connect_peers()
.find(|pc| {
PeerIdentity::from_npub(&pc.npub)
.map(|id| *id.node_addr() == node_addr)
.unwrap_or(false)
})
.cloned();
let Some(pc) = peer_config else {
return; // Not an auto-connect peer, no reconnect
};
if !pc.auto_reconnect {
debug!(
peer = %self.peer_display_name(&node_addr),
"Auto-reconnect disabled for peer, skipping"
);
return;
}
let base_interval_ms = self.config.node.retry.base_interval_secs * 1000;
let max_backoff_ms = self.config.node.retry.max_backoff_secs * 1000;
let peer_name = self.peer_display_name(&node_addr);
// If we already have accumulated backoff from previous failed attempts,
// preserve and bump it rather than resetting to zero. This prevents the
// exponential backoff from being discarded on each link-dead cycle.
if let Some(state) = self.retry_pending.get_mut(&node_addr) {
state.reconnect = true;
state.retry_count += 1;
let delay = state.backoff_ms(base_interval_ms, max_backoff_ms);
state.retry_after_ms = now_ms + delay;
debug!(
peer = %peer_name,
retry = state.retry_count,
delay_secs = delay / 1000,
"Scheduling auto-reconnect after link-dead removal (backoff preserved)"
);
return;
}
let mut state = RetryState::new(pc);
state.reconnect = true;
let delay = state.backoff_ms(base_interval_ms, max_backoff_ms);
state.retry_after_ms = now_ms + delay;
debug!(
peer = %peer_name,
delay_secs = delay / 1000,
"Scheduling auto-reconnect after link-dead removal"
);
self.retry_pending.insert(node_addr, state);
}
/// Process pending retries whose time has arrived.
///
/// For each due retry, initiates a fresh connection attempt. The retry
/// entry stays in `retry_pending` until the connection succeeds (cleared
/// in `promote_connection`) or max retries are exhausted (cleared in
/// `schedule_retry`).
pub(super) async fn process_pending_retries(&mut self, now_ms: u64) {
if self.retry_pending.is_empty() {
return;
}
let expired: Vec<NodeAddr> = self
.retry_pending
.iter()
.filter_map(|(addr, state)| {
state
.expires_at_ms
.filter(|expires_at_ms| now_ms >= *expires_at_ms)
.map(|_| *addr)
})
.collect();
for node_addr in expired {
self.retry_pending.remove(&node_addr);
info!(
peer = %self.peer_display_name(&node_addr),
"Retry window expired, dropping pending retry state"
);
}
if self.retry_pending.is_empty() {
return;
}
// Collect retries that are due
let due: Vec<NodeAddr> = self
.retry_pending
.iter()
.filter(|(_, state)| now_ms >= state.retry_after_ms)
.map(|(addr, _)| *addr)
.collect();
let deferred = due.len().saturating_sub(MAX_RETRY_CONNECTIONS_PER_TICK);
if deferred > 0 {
debug!(
due = due.len(),
processing = MAX_RETRY_CONNECTIONS_PER_TICK,
deferred,
"Retry processing budget exhausted; deferring remaining peers"
);
}
for node_addr in due.into_iter().take(MAX_RETRY_CONNECTIONS_PER_TICK) {
// Peer may have connected inbound while we waited
if self.peers.contains_key(&node_addr) {
self.retry_pending.remove(&node_addr);
continue;
}
let state = match self.retry_pending.get(&node_addr) {
Some(s) => s,
None => continue,
};
debug!(
peer = %self.peer_display_name(&node_addr),
retry = state.retry_count,
"Attempting connection retry"
);
let peer_config = state.peer_config.clone();
// Ask the Nostr runtime to refresh stale overlay adverts without
// blocking this maintenance tick; retry cadence can otherwise stall
// daemon status/control traffic behind relay fetch timeouts.
if let Some(bootstrap) = self.nostr_discovery.clone() {
bootstrap
.request_advert_stale_check(peer_config.npub.clone())
.await;
}
match self.initiate_peer_retry_connection(&peer_config).await {
Ok(()) => {
// Push retry_after_ms past the handshake timeout window so
// we don't re-fire on the next tick. If the handshake
// succeeds, promote_connection() clears retry_pending. If
// it times out, check_timeouts() calls schedule_retry()
// which bumps the counter and applies proper backoff.
let hs_timeout_ms = self.config.node.rate_limit.handshake_timeout_secs * 1000;
if let Some(state) = self.retry_pending.get_mut(&node_addr) {
state.retry_after_ms = now_ms + hs_timeout_ms;
}
debug!(
peer = %self.peer_display_name(&node_addr),
"Retry connection initiated, suppressing re-fire for {}s",
self.config.node.rate_limit.handshake_timeout_secs,
);
}
Err(e) => {
warn!(
peer = %self.peer_display_name(&node_addr),
error = %e,
"Retry connection initiation failed"
);
// No-transport failures usually mean the cached overlay
// advert is stale (peer rebound NAT, switched relay, etc.).
// The advert cache is read-only inside fetch_advert, so
// every retry returns the same dead address until the
// entry expires. Force a re-fetch so the next retry tick
// picks up fresh endpoints.
if matches!(e, NodeError::NoTransportForType(_))
&& let Some(bootstrap) = self.nostr_discovery.clone()
{
bootstrap
.request_advert_stale_check(peer_config.npub.clone())
.await;
}
// Immediate failure counts as an attempt — schedule next retry
// (reconnect flag is preserved on existing retry_pending entry)
self.schedule_retry(node_addr, now_ms);
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::config::PeerConfig;
const TEST_MAX_BACKOFF_MS: u64 = 300_000;
#[test]
fn test_backoff_exponential() {
let state = RetryState {
peer_config: PeerConfig::default(),
retry_count: 0,
retry_after_ms: 0,
reconnect: false,
expires_at_ms: None,
};
// base = 5000ms
assert_eq!(state.backoff_ms(5000, TEST_MAX_BACKOFF_MS), 5000); // 5s * 2^0
let state = RetryState {
retry_count: 1,
..state
};
assert_eq!(state.backoff_ms(5000, TEST_MAX_BACKOFF_MS), 10_000); // 5s * 2^1
let state = RetryState {
retry_count: 2,
..state
};
assert_eq!(state.backoff_ms(5000, TEST_MAX_BACKOFF_MS), 20_000); // 5s * 2^2
let state = RetryState {
retry_count: 3,
..state
};
assert_eq!(state.backoff_ms(5000, TEST_MAX_BACKOFF_MS), 40_000); // 5s * 2^3
let state = RetryState {
retry_count: 4,
..state
};
assert_eq!(state.backoff_ms(5000, TEST_MAX_BACKOFF_MS), 80_000); // 5s * 2^4
}
#[test]
fn test_backoff_cap() {
let state = RetryState {
peer_config: PeerConfig::default(),
retry_count: 20, // 2^20 * 5000 would be huge
retry_after_ms: 0,
reconnect: false,
expires_at_ms: None,
};
assert_eq!(
state.backoff_ms(5000, TEST_MAX_BACKOFF_MS),
TEST_MAX_BACKOFF_MS
);
}
#[test]
fn test_backoff_zero_base() {
let state = RetryState {
peer_config: PeerConfig::default(),
retry_count: 3,
retry_after_ms: 0,
reconnect: false,
expires_at_ms: None,
};
assert_eq!(state.backoff_ms(0, TEST_MAX_BACKOFF_MS), 0);
}
}