Skip to main content

bee_tui/
stamp_preview.rs

1//! Pure math + formatting for the four `:*-preview` command-bar
2//! verbs (`:topup-preview`, `:dilute-preview`, `:extend-preview`,
3//! `:buy-preview`).
4//!
5//! All functions here are **read-only** — they compute what *would*
6//! happen if the operator ran a write op against Bee, without
7//! actually issuing one. This honours bee-tui's "Truth over API,
8//! read-mostly" stance while still giving operators the predictive
9//! answers they normally have to leave the cockpit for.
10//!
11//! ## Formulas
12//!
13//! Every formula here is the canonical one used across swarm-cli
14//! (`stamp/buy.ts:97-103`), beekeeper-stamper
15//! (`pkg/stamper/node.go:33-43,69-115`), gateway-proxy
16//! (`stamps.ts:198-234`), and bee-scripts (`calculate_bzz.sh`):
17//!
18//! - `cost_bzz   = amount × 2^depth / 1e16`
19//! - `ttl_blocks = amount / current_price`
20//! - `ttl_secs   = ttl_blocks × blocktime_seconds`
21//! - `capacity   = 2^depth × 4096` bytes
22//! - `dilute(d → d+k)`: `new_amount = old_amount / 2^k`,
23//!   `new_ttl = old_ttl / 2^k`, `new_capacity = capacity × 2^k`,
24//!   cost = 0 (no new BZZ paid; the existing balance is
25//!   redistributed across more chunks)
26//!
27//! ## Why a separate module
28//!
29//! The drill-pane economics in [`crate::components::stamps`] use the
30//! same formulas but only for an *existing* batch. Previews extend
31//! that to hypothetical batches (`:buy-preview`) and hypothetical
32//! changes (`:topup-preview`, `:dilute-preview`,
33//! `:extend-preview`). Splitting them out keeps the per-screen file
34//! focused on its render path.
35
36use bee::debug::ChainState;
37use bee::postage::PostageBatch;
38use num_bigint::BigInt;
39
40use crate::components::stamps::{format_bytes, format_ttl_seconds};
41
42/// Block time in seconds for Gnosis Chain (where Bee's stamp
43/// contract lives). Hard-coded across the ecosystem (swarm-cli,
44/// beekeeper, gateway-proxy all assume 5s); pinning it here matches.
45/// Exposed as a constant so tests can reuse it.
46pub const GNOSIS_BLOCK_TIME_SECS: i64 = 5;
47
48/// 1 BZZ in PLUR.
49pub const PLUR_PER_BZZ: f64 = 1e16;
50
51/// Outcome of `:topup-preview`. Pure values; the verb formats them
52/// into the single-line `CommandStatus` summary.
53#[derive(Debug, Clone, PartialEq)]
54pub struct TopupPreview {
55    pub batch_id_short: String,
56    pub current_depth: u8,
57    pub current_ttl_seconds: i64,
58    /// Additional per-chunk PLUR the operator wants to add.
59    pub delta_amount: BigInt,
60    /// Extra TTL the topup would buy, in seconds.
61    pub extra_ttl_seconds: i64,
62    /// New TTL = current + extra (clamped at 0 if Bee already
63    /// reports the batch as expired).
64    pub new_ttl_seconds: i64,
65    /// Cost of the topup in BZZ (= delta × 2^depth / 1e16).
66    pub cost_bzz: f64,
67}
68
69impl TopupPreview {
70    /// One-line summary for the command-bar.
71    pub fn summary(&self) -> String {
72        format!(
73            "topup-preview {}: +{:.4} BZZ (delta {} PLUR/chunk), TTL {} → {}",
74            self.batch_id_short,
75            self.cost_bzz,
76            self.delta_amount,
77            format_ttl_seconds(self.current_ttl_seconds),
78            format_ttl_seconds(self.new_ttl_seconds),
79        )
80    }
81}
82
83#[derive(Debug, Clone, PartialEq)]
84pub struct DilutePreview {
85    pub batch_id_short: String,
86    pub old_depth: u8,
87    pub new_depth: u8,
88    pub old_capacity_bytes: u128,
89    pub new_capacity_bytes: u128,
90    pub old_ttl_seconds: i64,
91    pub new_ttl_seconds: i64,
92}
93
94impl DilutePreview {
95    pub fn summary(&self) -> String {
96        format!(
97            "dilute-preview {}: depth {}→{}, capacity {}→{}, TTL {}→{}, cost 0 BZZ",
98            self.batch_id_short,
99            self.old_depth,
100            self.new_depth,
101            format_bytes(self.old_capacity_bytes),
102            format_bytes(self.new_capacity_bytes),
103            format_ttl_seconds(self.old_ttl_seconds),
104            format_ttl_seconds(self.new_ttl_seconds),
105        )
106    }
107}
108
109#[derive(Debug, Clone, PartialEq)]
110pub struct ExtendPreview {
111    pub batch_id_short: String,
112    pub depth: u8,
113    pub current_ttl_seconds: i64,
114    pub extension_seconds: i64,
115    /// Per-chunk PLUR the operator would need to add to gain
116    /// `extension_seconds` of TTL at the current price.
117    pub needed_amount_plur: BigInt,
118    pub cost_bzz: f64,
119    pub new_ttl_seconds: i64,
120}
121
122impl ExtendPreview {
123    pub fn summary(&self) -> String {
124        format!(
125            "extend-preview {} +{}: cost {:.4} BZZ ({} PLUR/chunk), TTL {} → {}",
126            self.batch_id_short,
127            format_ttl_seconds(self.extension_seconds),
128            self.cost_bzz,
129            self.needed_amount_plur,
130            format_ttl_seconds(self.current_ttl_seconds),
131            format_ttl_seconds(self.new_ttl_seconds),
132        )
133    }
134}
135
136#[derive(Debug, Clone, PartialEq)]
137pub struct BuyPreview {
138    pub depth: u8,
139    pub amount_plur: BigInt,
140    pub capacity_bytes: u128,
141    pub ttl_seconds: i64,
142    pub cost_bzz: f64,
143}
144
145impl BuyPreview {
146    pub fn summary(&self) -> String {
147        format!(
148            "buy-preview depth={} amount={} PLUR/chunk: capacity {}, TTL {}, cost {:.4} BZZ",
149            self.depth,
150            self.amount_plur,
151            format_bytes(self.capacity_bytes),
152            format_ttl_seconds(self.ttl_seconds),
153            self.cost_bzz,
154        )
155    }
156}
157
158/// Output of `:plan-batch` — the unified topup+dilute decision the
159/// beekeeper-stamper module makes per-batch. Given thresholds and a
160/// batch, returns whether topup and/or dilute is needed and the BZZ
161/// cost of each leg. Each leg is independent; the cockpit shows them
162/// together so the operator sees the full picture at once.
163///
164/// Mirrors `pkg/stamper/node.go:Set` in beekeeper, except read-only
165/// (no chain writes). Default thresholds match the cross-ecosystem
166/// convention (swarm-gateway): usage 0.85, TTL 24h, dilute by +2.
167#[derive(Debug, Clone, PartialEq)]
168pub struct PlanPreview {
169    pub batch_id_short: String,
170    /// Snapshot of the batch's state at plan time.
171    pub current_depth: u8,
172    pub current_usage_pct: f64,
173    pub current_ttl_seconds: i64,
174    /// Threshold inputs the plan was computed against.
175    pub usage_threshold_pct: f64,
176    pub ttl_threshold_seconds: i64,
177    pub extra_depth: u8,
178    /// Action recommended: `None` when nothing to do, otherwise zero
179    /// or more legs that together restore the batch to thresholds.
180    pub action: PlanAction,
181    /// Total BZZ cost across both legs. Dilute is free; topup pays.
182    pub total_cost_bzz: f64,
183    /// Reason rendered alongside the plan ("usage above 85%", "TTL
184    /// below 24h after dilute", "immutable batch can't dilute", etc.)
185    pub reason: String,
186}
187
188#[derive(Debug, Clone, PartialEq)]
189pub enum PlanAction {
190    /// Batch is healthy against both thresholds — no action needed.
191    None,
192    /// Just topup; usage is still under threshold.
193    Topup {
194        delta_amount_plur: BigInt,
195        new_ttl_seconds: i64,
196        cost_bzz: f64,
197    },
198    /// Just dilute; TTL is still above threshold post-dilute.
199    Dilute {
200        new_depth: u8,
201        post_dilute_ttl_seconds: i64,
202    },
203    /// Both. Topup happens first (preserves the per-chunk amount),
204    /// then dilute drops the post-topup TTL by `2^extra_depth`.
205    TopupThenDilute {
206        topup_delta_amount_plur: BigInt,
207        topup_cost_bzz: f64,
208        new_depth: u8,
209        post_dilute_ttl_seconds: i64,
210    },
211}
212
213impl PlanPreview {
214    pub fn summary(&self) -> String {
215        let action_line = match &self.action {
216            PlanAction::None => "no action needed".to_string(),
217            PlanAction::Topup {
218                delta_amount_plur,
219                cost_bzz,
220                new_ttl_seconds,
221                ..
222            } => format!(
223                "topup +{} PLUR/chunk → TTL {} (cost {cost_bzz:.4} BZZ)",
224                delta_amount_plur,
225                format_ttl_seconds(*new_ttl_seconds),
226            ),
227            PlanAction::Dilute {
228                new_depth,
229                post_dilute_ttl_seconds,
230            } => format!(
231                "dilute → depth {new_depth} (TTL {} after, no BZZ)",
232                format_ttl_seconds(*post_dilute_ttl_seconds),
233            ),
234            PlanAction::TopupThenDilute {
235                topup_delta_amount_plur,
236                topup_cost_bzz,
237                new_depth,
238                post_dilute_ttl_seconds,
239            } => format!(
240                "topup +{topup_delta_amount_plur} PLUR/chunk + dilute → depth {new_depth} (TTL {} after, cost {topup_cost_bzz:.4} BZZ)",
241                format_ttl_seconds(*post_dilute_ttl_seconds),
242            ),
243        };
244        format!(
245            "plan-batch {}: usage {:.1}% (thr {:.0}%), TTL {} (thr {}); {action_line}; total {:.4} BZZ — {}",
246            self.batch_id_short,
247            self.current_usage_pct * 100.0,
248            self.usage_threshold_pct * 100.0,
249            format_ttl_seconds(self.current_ttl_seconds),
250            format_ttl_seconds(self.ttl_threshold_seconds),
251            self.total_cost_bzz,
252            self.reason,
253        )
254    }
255}
256
257/// Default thresholds, sourced from gateway-proxy and swarm-gateway:
258/// trigger dilute when usage exceeds 85%, top-up when remaining TTL
259/// drops below 24 hours, dilute by +2 depth (4× capacity).
260pub const DEFAULT_USAGE_THRESHOLD: f64 = 0.85;
261pub const DEFAULT_TTL_THRESHOLD_SECONDS: i64 = 24 * 60 * 60;
262pub const DEFAULT_EXTRA_DEPTH: u8 = 2;
263
264/// Run beekeeper-stamper's `Set` algorithm read-only on a single
265/// batch. Decides whether to topup, dilute, both, or skip.
266///
267/// * `usage_threshold` — fraction in `[0, 1]`. Above this, dilute.
268/// * `ttl_threshold_seconds` — if remaining TTL (after any dilute)
269///   is below this, topup to bring it back above.
270/// * `extra_depth` — how many depth levels to dilute by when needed.
271pub fn plan_batch(
272    batch: &PostageBatch,
273    chain_state: &ChainState,
274    usage_threshold: f64,
275    ttl_threshold_seconds: i64,
276    extra_depth: u8,
277) -> Result<PlanPreview, String> {
278    if !(0.0..=1.0).contains(&usage_threshold) {
279        return Err(format!(
280            "usage_threshold {usage_threshold} out of range [0, 1]"
281        ));
282    }
283    if ttl_threshold_seconds <= 0 {
284        return Err("ttl_threshold must be a positive duration".into());
285    }
286    if chain_state.current_price <= BigInt::from(0) {
287        return Err("chain price not loaded yet — try again in a moment".into());
288    }
289    let bucket_depth = batch.bucket_depth.max(16);
290    let usage_pct = stamp_usage(batch.utilization, batch.depth, bucket_depth);
291    let current_ttl = batch.batch_ttl.max(0);
292
293    let new_depth = batch.depth.saturating_add(extra_depth);
294    if new_depth > 41 {
295        return Err(format!(
296            "current depth {} + extra_depth {extra_depth} exceeds Bee's depth ceiling 41",
297            batch.depth
298        ));
299    }
300
301    // Dilute leg: when usage exceeds threshold + the batch is
302    // mutable. Each +1 depth halves the remaining TTL.
303    let needs_dilute = usage_pct >= usage_threshold;
304    let dilute_factor = 1i64 << extra_depth;
305    let post_dilute_ttl = current_ttl / dilute_factor.max(1);
306
307    if batch.immutable && needs_dilute {
308        // Immutable batches can't dilute — flag and only consider topup.
309        if current_ttl >= ttl_threshold_seconds {
310            return Ok(PlanPreview {
311                batch_id_short: short_batch_id(batch),
312                current_depth: batch.depth,
313                current_usage_pct: usage_pct,
314                current_ttl_seconds: current_ttl,
315                usage_threshold_pct: usage_threshold,
316                ttl_threshold_seconds,
317                extra_depth,
318                action: PlanAction::None,
319                total_cost_bzz: 0.0,
320                reason: format!(
321                    "immutable batch above usage threshold ({:.1}%) — can't dilute, but TTL still above threshold",
322                    usage_pct * 100.0
323                ),
324            });
325        }
326        let needed = ttl_threshold_seconds.saturating_sub(current_ttl).max(1);
327        let amount = amount_for_ttl_extension(
328            needed,
329            &chain_state.current_price,
330            GNOSIS_BLOCK_TIME_SECS,
331        );
332        let cost = cost_bzz(&amount, batch.depth);
333        return Ok(PlanPreview {
334            batch_id_short: short_batch_id(batch),
335            current_depth: batch.depth,
336            current_usage_pct: usage_pct,
337            current_ttl_seconds: current_ttl,
338            usage_threshold_pct: usage_threshold,
339            ttl_threshold_seconds,
340            extra_depth,
341            action: PlanAction::Topup {
342                delta_amount_plur: amount,
343                new_ttl_seconds: current_ttl + needed,
344                cost_bzz: cost,
345            },
346            total_cost_bzz: cost,
347            reason: "immutable batch above usage threshold + TTL below threshold — topup only"
348                .to_string(),
349        });
350    }
351
352    let effective_ttl_after = if needs_dilute {
353        post_dilute_ttl
354    } else {
355        current_ttl
356    };
357    let needs_topup = effective_ttl_after < ttl_threshold_seconds;
358
359    match (needs_topup, needs_dilute) {
360        (false, false) => Ok(PlanPreview {
361            batch_id_short: short_batch_id(batch),
362            current_depth: batch.depth,
363            current_usage_pct: usage_pct,
364            current_ttl_seconds: current_ttl,
365            usage_threshold_pct: usage_threshold,
366            ttl_threshold_seconds,
367            extra_depth,
368            action: PlanAction::None,
369            total_cost_bzz: 0.0,
370            reason: "batch is healthy against both thresholds".into(),
371        }),
372        (true, false) => {
373            let needed = ttl_threshold_seconds.saturating_sub(current_ttl).max(1);
374            let amount = amount_for_ttl_extension(
375                needed,
376                &chain_state.current_price,
377                GNOSIS_BLOCK_TIME_SECS,
378            );
379            let cost = cost_bzz(&amount, batch.depth);
380            Ok(PlanPreview {
381                batch_id_short: short_batch_id(batch),
382                current_depth: batch.depth,
383                current_usage_pct: usage_pct,
384                current_ttl_seconds: current_ttl,
385                usage_threshold_pct: usage_threshold,
386                ttl_threshold_seconds,
387                extra_depth,
388                action: PlanAction::Topup {
389                    delta_amount_plur: amount,
390                    new_ttl_seconds: current_ttl + needed,
391                    cost_bzz: cost,
392                },
393                total_cost_bzz: cost,
394                reason: format!(
395                    "TTL below threshold ({}) — topup",
396                    format_ttl_seconds(ttl_threshold_seconds)
397                ),
398            })
399        }
400        (false, true) => Ok(PlanPreview {
401            batch_id_short: short_batch_id(batch),
402            current_depth: batch.depth,
403            current_usage_pct: usage_pct,
404            current_ttl_seconds: current_ttl,
405            usage_threshold_pct: usage_threshold,
406            ttl_threshold_seconds,
407            extra_depth,
408            action: PlanAction::Dilute {
409                new_depth,
410                post_dilute_ttl_seconds: post_dilute_ttl,
411            },
412            total_cost_bzz: 0.0,
413            reason: format!(
414                "usage above threshold ({:.0}%) — dilute",
415                usage_threshold * 100.0
416            ),
417        }),
418        (true, true) => {
419            // Topup first to a TTL high enough that post-dilute we
420            // still clear the threshold. Required pre-dilute TTL is
421            // `ttl_threshold × dilute_factor`. Topup buys the gap.
422            let target_pre_dilute_ttl =
423                ttl_threshold_seconds.saturating_mul(dilute_factor.max(1));
424            let needed = target_pre_dilute_ttl.saturating_sub(current_ttl).max(1);
425            let amount = amount_for_ttl_extension(
426                needed,
427                &chain_state.current_price,
428                GNOSIS_BLOCK_TIME_SECS,
429            );
430            let cost = cost_bzz(&amount, batch.depth);
431            let post_dilute_ttl = (current_ttl + needed) / dilute_factor.max(1);
432            Ok(PlanPreview {
433                batch_id_short: short_batch_id(batch),
434                current_depth: batch.depth,
435                current_usage_pct: usage_pct,
436                current_ttl_seconds: current_ttl,
437                usage_threshold_pct: usage_threshold,
438                ttl_threshold_seconds,
439                extra_depth,
440                action: PlanAction::TopupThenDilute {
441                    topup_delta_amount_plur: amount,
442                    topup_cost_bzz: cost,
443                    new_depth,
444                    post_dilute_ttl_seconds: post_dilute_ttl,
445                },
446                total_cost_bzz: cost,
447                reason: "usage above threshold + post-dilute TTL would fall below — topup then dilute"
448                    .to_string(),
449            })
450        }
451    }
452}
453
454/// Fractional bucket usage on `[0, 1]`. Bee's `utilization` is the
455/// max-bucket count, so the meaningful denominator is
456/// `2^(depth - bucket_depth)` (the per-bucket capacity).
457fn stamp_usage(utilization: u32, depth: u8, bucket_depth: u8) -> f64 {
458    if depth <= bucket_depth {
459        return 0.0;
460    }
461    let denom = 1u64 << (depth - bucket_depth);
462    f64::from(utilization) / (denom as f64)
463}
464
465/// Output of `:buy-suggest` — the inverse of `:buy-preview`.
466/// Operator supplies a *target* (size + duration); we return the
467/// minimum (depth, amount) that meets it. Capacity rounds *up* to
468/// the next power-of-two depth (Bee batches are sized in
469/// `2^depth × 4 KiB` increments) so the headroom is operator-
470/// visible. Amount is the per-chunk PLUR that buys at least the
471/// requested duration at the current chain price.
472#[derive(Debug, Clone, PartialEq)]
473pub struct BuySuggestion {
474    pub target_bytes: u128,
475    pub target_seconds: i64,
476    pub depth: u8,
477    pub amount_plur: BigInt,
478    /// Actual capacity at the chosen depth (≥ `target_bytes`).
479    pub capacity_bytes: u128,
480    /// Actual TTL at the chosen amount (≥ `target_seconds`).
481    pub ttl_seconds: i64,
482    pub cost_bzz: f64,
483}
484
485impl BuySuggestion {
486    pub fn summary(&self) -> String {
487        format!(
488            "buy-suggest {} / {}: depth={} amount={} PLUR/chunk → capacity {}, TTL {}, cost {:.4} BZZ",
489            format_bytes(self.target_bytes),
490            format_ttl_seconds(self.target_seconds),
491            self.depth,
492            self.amount_plur,
493            format_bytes(self.capacity_bytes),
494            format_ttl_seconds(self.ttl_seconds),
495            self.cost_bzz,
496        )
497    }
498}
499
500/// Theoretical capacity in bytes for a depth, before bucket skew.
501/// `2^depth × 4 KiB`.
502pub fn theoretical_capacity_bytes(depth: u8) -> u128 {
503    (1u128 << depth) * 4096
504}
505
506/// Convert a per-chunk PLUR amount into total BZZ paid for a batch
507/// of the given depth. Same `amount × 2^depth / 1e16` formula used
508/// across the ecosystem.
509pub fn cost_bzz(amount_per_chunk: &BigInt, depth: u8) -> f64 {
510    let total_plur: BigInt = amount_per_chunk * (BigInt::from(1u32) << depth as usize);
511    total_plur.to_string().parse::<f64>().unwrap_or(0.0) / PLUR_PER_BZZ
512}
513
514/// TTL in seconds for `amount_per_chunk` PLUR at the current chain
515/// price. Returns 0 if `current_price` is zero (chain hasn't loaded
516/// yet — caller should fall back to "n/a").
517pub fn ttl_seconds(amount_per_chunk: &BigInt, current_price: &BigInt, blocktime: i64) -> i64 {
518    if current_price <= &BigInt::from(0) {
519        return 0;
520    }
521    let ttl_blocks: BigInt = amount_per_chunk / current_price;
522    let secs: BigInt = &ttl_blocks * BigInt::from(blocktime);
523    secs.to_string().parse::<i64>().unwrap_or(i64::MAX)
524}
525
526/// Inverse of [`ttl_seconds`]: how much per-chunk PLUR the operator
527/// must add to gain `extra_seconds` of TTL at the current price.
528pub fn amount_for_ttl_extension(
529    extra_seconds: i64,
530    current_price: &BigInt,
531    blocktime: i64,
532) -> BigInt {
533    if extra_seconds <= 0 || blocktime <= 0 {
534        return BigInt::from(0);
535    }
536    let extra_blocks = BigInt::from(extra_seconds / blocktime);
537    extra_blocks * current_price
538}
539
540/// Compute a topup preview against an existing batch. Reads the
541/// chain price from `chain_state`; returns an `Err` summary string
542/// if the chain isn't loaded yet (so the caller can surface a
543/// useful command-bar message rather than silent 0s).
544pub fn topup_preview(
545    batch: &PostageBatch,
546    delta_amount: BigInt,
547    chain_state: &ChainState,
548) -> Result<TopupPreview, String> {
549    if chain_state.current_price <= BigInt::from(0) {
550        return Err("chain price not loaded yet — try again in a moment".into());
551    }
552    if delta_amount <= BigInt::from(0) {
553        return Err("topup amount must be a positive PLUR value".into());
554    }
555    let extra_ttl_seconds = ttl_seconds(
556        &delta_amount,
557        &chain_state.current_price,
558        GNOSIS_BLOCK_TIME_SECS,
559    );
560    let new_ttl_seconds = batch.batch_ttl.max(0).saturating_add(extra_ttl_seconds);
561    let cost = cost_bzz(&delta_amount, batch.depth);
562    Ok(TopupPreview {
563        batch_id_short: short_batch_id(batch),
564        current_depth: batch.depth,
565        current_ttl_seconds: batch.batch_ttl,
566        delta_amount,
567        extra_ttl_seconds,
568        new_ttl_seconds,
569        cost_bzz: cost,
570    })
571}
572
573/// Compute a dilute preview. Bee's dilute keeps the existing PLUR
574/// balance but redistributes it across more chunks: new depth must
575/// be strictly greater than the current depth, and per-chunk amount
576/// halves with every +1 in depth.
577pub fn dilute_preview(batch: &PostageBatch, new_depth: u8) -> Result<DilutePreview, String> {
578    if new_depth <= batch.depth {
579        return Err(format!(
580            "new depth {} must be greater than current depth {} (dilute can only raise depth)",
581            new_depth, batch.depth
582        ));
583    }
584    if new_depth > 41 {
585        return Err(format!(
586            "depth {new_depth} exceeds Bee's depth ceiling (41) — refusing to preview"
587        ));
588    }
589    let delta = (new_depth - batch.depth) as u32;
590    let factor = 1u128 << delta;
591    let old_capacity = theoretical_capacity_bytes(batch.depth);
592    let new_capacity = theoretical_capacity_bytes(new_depth);
593    let old_ttl = batch.batch_ttl.max(0);
594    let new_ttl = old_ttl / (factor.min(i64::MAX as u128) as i64).max(1);
595    Ok(DilutePreview {
596        batch_id_short: short_batch_id(batch),
597        old_depth: batch.depth,
598        new_depth,
599        old_capacity_bytes: old_capacity,
600        new_capacity_bytes: new_capacity,
601        old_ttl_seconds: old_ttl,
602        new_ttl_seconds: new_ttl,
603    })
604}
605
606/// Compute an extend preview. Given a target TTL extension (in
607/// seconds), figures out the per-chunk PLUR needed and the BZZ cost.
608pub fn extend_preview(
609    batch: &PostageBatch,
610    extension_seconds: i64,
611    chain_state: &ChainState,
612) -> Result<ExtendPreview, String> {
613    if extension_seconds <= 0 {
614        return Err("extension must be a positive duration".into());
615    }
616    if chain_state.current_price <= BigInt::from(0) {
617        return Err("chain price not loaded yet — try again in a moment".into());
618    }
619    let needed_amount = amount_for_ttl_extension(
620        extension_seconds,
621        &chain_state.current_price,
622        GNOSIS_BLOCK_TIME_SECS,
623    );
624    let cost = cost_bzz(&needed_amount, batch.depth);
625    let new_ttl_seconds = batch.batch_ttl.max(0).saturating_add(extension_seconds);
626    Ok(ExtendPreview {
627        batch_id_short: short_batch_id(batch),
628        depth: batch.depth,
629        current_ttl_seconds: batch.batch_ttl,
630        extension_seconds,
631        needed_amount_plur: needed_amount,
632        cost_bzz: cost,
633        new_ttl_seconds,
634    })
635}
636
637/// Compute a buy preview for a hypothetical fresh batch. No batch
638/// lookup needed; the operator supplies depth + per-chunk PLUR.
639pub fn buy_preview(
640    depth: u8,
641    amount_plur: BigInt,
642    chain_state: &ChainState,
643) -> Result<BuyPreview, String> {
644    if depth < 17 {
645        return Err(format!(
646            "depth {depth} is below Bee's minimum (17) — refusing to preview"
647        ));
648    }
649    if depth > 41 {
650        return Err(format!(
651            "depth {depth} exceeds Bee's depth ceiling (41) — refusing to preview"
652        ));
653    }
654    if amount_plur <= BigInt::from(0) {
655        return Err("amount must be a positive PLUR value".into());
656    }
657    if chain_state.current_price <= BigInt::from(0) {
658        return Err("chain price not loaded yet — try again in a moment".into());
659    }
660    let capacity_bytes = theoretical_capacity_bytes(depth);
661    let ttl = ttl_seconds(
662        &amount_plur,
663        &chain_state.current_price,
664        GNOSIS_BLOCK_TIME_SECS,
665    );
666    let cost = cost_bzz(&amount_plur, depth);
667    Ok(BuyPreview {
668        depth,
669        amount_plur,
670        capacity_bytes,
671        ttl_seconds: ttl,
672        cost_bzz: cost,
673    })
674}
675
676/// Inverse of [`buy_preview`]: operator says "I want X bytes for Y
677/// seconds", we return the minimum `(depth, amount)` pair that
678/// covers it.
679///
680/// Depth rounds *up* to the next power of two so the actual
681/// capacity is ≥ target_bytes (the alternative — exactly fit —
682/// would silently truncate the operator's stated need). Amount
683/// rounds *up* in blocks so the actual TTL is ≥ target_seconds.
684///
685/// Errors if the chain price isn't loaded yet or if the target
686/// exceeds Bee's depth ceiling (41).
687pub fn buy_suggest(
688    target_bytes: u128,
689    target_seconds: i64,
690    chain_state: &ChainState,
691) -> Result<BuySuggestion, String> {
692    if target_bytes == 0 {
693        return Err("target size must be positive".into());
694    }
695    if target_seconds <= 0 {
696        return Err("target duration must be positive".into());
697    }
698    if chain_state.current_price <= BigInt::from(0) {
699        return Err("chain price not loaded yet — try again in a moment".into());
700    }
701
702    // chunks_needed = ceil(target_bytes / 4096)
703    let chunks_needed = target_bytes.div_ceil(4096);
704    // depth = ceil(log2(chunks_needed)), clamped to Bee's [17, 41]
705    // bounds. depth=17 is Bee's minimum useful batch size; depth>41
706    // exceeds the contract's enforced ceiling.
707    let raw_depth = if chunks_needed <= 1 {
708        0
709    } else {
710        // ceil(log2(n)) — using leading_zeros for a portable answer.
711        128 - (chunks_needed - 1).leading_zeros()
712    };
713    if raw_depth > 41 {
714        return Err(format!(
715            "target {} exceeds Bee's max batch capacity (depth 41 ≈ 8 PiB)",
716            format_bytes(target_bytes)
717        ));
718    }
719    let depth: u8 = raw_depth.max(17) as u8;
720    let capacity_bytes = theoretical_capacity_bytes(depth);
721
722    // ttl_blocks_needed = ceil(target_seconds / blocktime)
723    let target_blocks =
724        target_seconds.saturating_add(GNOSIS_BLOCK_TIME_SECS - 1) / GNOSIS_BLOCK_TIME_SECS;
725    let amount = BigInt::from(target_blocks) * &chain_state.current_price;
726
727    // Actual TTL the chosen amount yields, given ceil rounding.
728    let ttl_seconds = ttl_seconds(&amount, &chain_state.current_price, GNOSIS_BLOCK_TIME_SECS);
729    let cost = cost_bzz(&amount, depth);
730
731    Ok(BuySuggestion {
732        target_bytes,
733        target_seconds,
734        depth,
735        amount_plur: amount,
736        capacity_bytes,
737        ttl_seconds,
738        cost_bzz: cost,
739    })
740}
741
742fn short_batch_id(batch: &PostageBatch) -> String {
743    let hex = batch.batch_id.to_hex();
744    if hex.len() > 8 {
745        format!("{}…", &hex[..8])
746    } else {
747        hex
748    }
749}
750
751/// Parse a human-readable size into bytes. Accepts plain integers
752/// (`4096` = bytes), binary suffixes (`5GiB`, `2TiB`, `512MiB`),
753/// and decimal suffixes (`5GB`, `1TB`, `500MB`). Single-letter
754/// shorthands (`5G`, `2T`, `100M`, `4K`) default to **binary**
755/// because operators reasoning about Bee's depth=2^N chunk counts
756/// always think in powers of two. Suffix matching is
757/// case-insensitive.
758///
759/// Used by `:buy-suggest` so operators can type sizes the way they
760/// do in chat ("5 GiB for 30d") rather than hand-converting to
761/// raw bytes.
762pub fn parse_size_bytes(s: &str) -> Result<u128, String> {
763    let s = s.trim();
764    if s.is_empty() {
765        return Err("size cannot be empty".into());
766    }
767    // Strip any internal whitespace between the number and the unit
768    // ("5 GiB" → "5GiB") so we don't reject a perfectly clear input.
769    let compact: String = s.chars().filter(|c| !c.is_whitespace()).collect();
770    let (num_part, mul) = split_size(&compact)
771        .ok_or_else(|| format!("invalid size {s:?} (try 5GiB, 2TiB, 500MiB, 4096)"))?;
772    let n: u128 = num_part
773        .parse()
774        .map_err(|_| format!("invalid size {s:?} (numeric part {num_part:?} unparseable)"))?;
775    if n == 0 {
776        return Err("size must be positive".into());
777    }
778    n.checked_mul(mul).ok_or_else(|| {
779        format!("size {s:?} overflowed u128 — that's larger than any plausible Bee batch")
780    })
781}
782
783/// Split a compact size string into (digits, multiplier). Returns
784/// `None` on unrecognised suffix.
785fn split_size(s: &str) -> Option<(&str, u128)> {
786    // Find first non-digit char; everything before is the number,
787    // everything after (lowercased) is the unit.
788    let split = s
789        .char_indices()
790        .find(|(_, c)| !c.is_ascii_digit())
791        .map(|(i, _)| i)
792        .unwrap_or(s.len());
793    let (num, unit) = s.split_at(split);
794    let unit_lower = unit.to_ascii_lowercase();
795    let mul: u128 = match unit_lower.as_str() {
796        "" | "b" => 1,
797        "k" | "kib" => 1024,
798        "kb" => 1_000,
799        "m" | "mib" => 1024u128.pow(2),
800        "mb" => 1_000u128.pow(2),
801        "g" | "gib" => 1024u128.pow(3),
802        "gb" => 1_000u128.pow(3),
803        "t" | "tib" => 1024u128.pow(4),
804        "tb" => 1_000u128.pow(4),
805        "p" | "pib" => 1024u128.pow(5),
806        "pb" => 1_000u128.pow(5),
807        _ => return None,
808    };
809    Some((num, mul))
810}
811
812/// Parse a duration written like `30d` / `12h` / `90m` / `45s` /
813/// plain seconds (`5000`). Used by `:extend-preview` so operators
814/// don't have to convert days to seconds in their head. Rejects
815/// negative or zero values; returns an actionable error otherwise.
816pub fn parse_duration_seconds(s: &str) -> Result<i64, String> {
817    let s = s.trim();
818    if s.is_empty() {
819        return Err("duration cannot be empty".into());
820    }
821    let (num_part, unit) = match s.chars().last() {
822        Some(c) if "smhdSMHD".contains(c) => (&s[..s.len() - 1], Some(c.to_ascii_lowercase())),
823        _ => (s, None),
824    };
825    let n: i64 = num_part
826        .parse()
827        .map_err(|_| format!("invalid duration {s:?} (try 30d / 12h / 90m / 45s / 5000)"))?;
828    if n <= 0 {
829        return Err(format!("duration must be positive, got {n}"));
830    }
831    let secs = match unit {
832        Some('s') | None => n,
833        Some('m') => n.saturating_mul(60),
834        Some('h') => n.saturating_mul(3_600),
835        Some('d') => n.saturating_mul(86_400),
836        _ => unreachable!("unit guard above"),
837    };
838    Ok(secs)
839}
840
841/// Parse a per-chunk PLUR amount. Plain-integer only for now —
842/// scientific notation (`1e14`) is harder to read back than to write
843/// and operators copy-paste these from chain explorers anyway.
844pub fn parse_plur_amount(s: &str) -> Result<BigInt, String> {
845    let s = s.trim();
846    if s.is_empty() {
847        return Err("amount cannot be empty".into());
848    }
849    s.parse::<BigInt>()
850        .map_err(|_| format!("invalid PLUR amount {s:?} (digits only, e.g. 100000000000)"))
851}
852
853/// Resolve a user-typed batch prefix (typically the 8 hex chars
854/// shown in the S2 table) to the matching `PostageBatch`. Errors on
855/// no-match or ambiguous-match so the operator doesn't preview the
856/// wrong batch by accident.
857pub fn match_batch_prefix<'a>(
858    batches: &'a [PostageBatch],
859    prefix: &str,
860) -> Result<&'a PostageBatch, String> {
861    let prefix = prefix.trim().trim_end_matches('…').to_ascii_lowercase();
862    if prefix.is_empty() {
863        return Err("batch id prefix cannot be empty".into());
864    }
865    let matches: Vec<&PostageBatch> = batches
866        .iter()
867        .filter(|b| {
868            b.batch_id
869                .to_hex()
870                .to_ascii_lowercase()
871                .starts_with(&prefix)
872        })
873        .collect();
874    match matches.as_slice() {
875        [] => Err(format!(
876            "no batch matches prefix {prefix:?} (try the 8-char hex shown in S2)"
877        )),
878        [single] => Ok(single),
879        many => Err(format!(
880            "{} batches match prefix {prefix:?}: {} — type a longer prefix",
881            many.len(),
882            many.iter()
883                .map(|b| short_batch_id(b))
884                .collect::<Vec<_>>()
885                .join(", ")
886        )),
887    }
888}
889
890#[cfg(test)]
891mod tests {
892    use super::*;
893
894    fn make_batch(amount: Option<BigInt>, depth: u8, batch_ttl: i64) -> PostageBatch {
895        PostageBatch {
896            batch_id: bee::swarm::BatchId::new(&[0xab; 32]).unwrap(),
897            amount,
898            start: 0,
899            owner: String::new(),
900            depth,
901            bucket_depth: depth.saturating_sub(6),
902            immutable: true,
903            batch_ttl,
904            utilization: 0,
905            usable: true,
906            exists: true,
907            label: "test".into(),
908            block_number: 0,
909        }
910    }
911
912    fn chain(current_price_plur: u64) -> ChainState {
913        ChainState {
914            block: 100,
915            chain_tip: 100,
916            current_price: BigInt::from(current_price_plur),
917            total_amount: BigInt::from(0),
918        }
919    }
920
921    #[test]
922    fn capacity_at_depth_22_is_16_gib() {
923        // 2^22 × 4096 = 16 GiB exactly.
924        assert_eq!(theoretical_capacity_bytes(22), 16 * 1024 * 1024 * 1024);
925    }
926
927    #[test]
928    fn cost_bzz_matches_canonical_formula() {
929        // amount=1e14 PLUR/chunk × 2^22 chunks / 1e16 PLUR/BZZ
930        //   = 1e14 × 4_194_304 / 1e16 = 41943.04 BZZ.
931        let amount = BigInt::from(100_000_000_000_000u64);
932        let bzz = cost_bzz(&amount, 22);
933        assert!(
934            (bzz - 41943.04).abs() < 0.0001,
935            "expected ~41943.04 BZZ, got {bzz}"
936        );
937    }
938
939    #[test]
940    fn ttl_seconds_basic() {
941        // amount=1_000_000 PLUR/chunk, current_price=1 PLUR/block
942        //   → ttl_blocks = 1_000_000, ttl_secs = 5_000_000.
943        let secs = ttl_seconds(
944            &BigInt::from(1_000_000u64),
945            &BigInt::from(1u64),
946            GNOSIS_BLOCK_TIME_SECS,
947        );
948        assert_eq!(secs, 5_000_000);
949    }
950
951    #[test]
952    fn ttl_seconds_zero_price_returns_zero() {
953        let secs = ttl_seconds(
954            &BigInt::from(1_000_000u64),
955            &BigInt::from(0u64),
956            GNOSIS_BLOCK_TIME_SECS,
957        );
958        assert_eq!(secs, 0);
959    }
960
961    #[test]
962    fn amount_for_extension_is_inverse_of_ttl() {
963        // Extending by 5_000_000 seconds at price=1 PLUR/block gives
964        // back 1_000_000 PLUR/chunk.
965        let amt = amount_for_ttl_extension(5_000_000, &BigInt::from(1u64), GNOSIS_BLOCK_TIME_SECS);
966        assert_eq!(amt, BigInt::from(1_000_000u64));
967    }
968
969    #[test]
970    fn topup_preview_typical_case() {
971        // depth=22, amount=delta=1e10 PLUR/chunk, price=1 PLUR/block.
972        // extra_ttl = 1e10 × 5 = 5e10 seconds.
973        // cost = 1e10 × 2^22 / 1e16 = 4.194 BZZ.
974        let batch = make_batch(Some(BigInt::from(0)), 22, 86_400);
975        let preview = topup_preview(&batch, BigInt::from(10_000_000_000u64), &chain(1)).unwrap();
976        assert_eq!(preview.current_depth, 22);
977        assert_eq!(preview.extra_ttl_seconds, 50_000_000_000);
978        assert!((preview.cost_bzz - 4.194304).abs() < 0.0001);
979        assert_eq!(preview.new_ttl_seconds, 86_400 + 50_000_000_000);
980    }
981
982    #[test]
983    fn topup_preview_rejects_zero_price() {
984        let batch = make_batch(None, 22, 86_400);
985        let err = topup_preview(&batch, BigInt::from(1_000), &chain(0)).unwrap_err();
986        assert!(err.contains("chain price"));
987    }
988
989    #[test]
990    fn topup_preview_rejects_zero_delta() {
991        let batch = make_batch(None, 22, 86_400);
992        let err = topup_preview(&batch, BigInt::from(0), &chain(1)).unwrap_err();
993        assert!(err.contains("positive PLUR"));
994    }
995
996    #[test]
997    fn dilute_preview_doubles_capacity_halves_ttl() {
998        // Going from depth 22 → 23 doubles capacity, halves TTL.
999        let batch = make_batch(None, 22, 100_000);
1000        let preview = dilute_preview(&batch, 23).unwrap();
1001        assert_eq!(preview.old_capacity_bytes * 2, preview.new_capacity_bytes);
1002        assert_eq!(preview.old_ttl_seconds / 2, preview.new_ttl_seconds);
1003        assert!(preview.summary().contains("cost 0 BZZ"));
1004    }
1005
1006    #[test]
1007    fn dilute_preview_rejects_lower_or_equal_depth() {
1008        let batch = make_batch(None, 22, 100_000);
1009        assert!(dilute_preview(&batch, 22).is_err());
1010        assert!(dilute_preview(&batch, 21).is_err());
1011    }
1012
1013    #[test]
1014    fn dilute_preview_rejects_above_depth_ceiling() {
1015        let batch = make_batch(None, 22, 100_000);
1016        assert!(dilute_preview(&batch, 42).is_err());
1017    }
1018
1019    #[test]
1020    fn extend_preview_typical_case() {
1021        // Extend by 5_000_000s (~58 days) at price=1, blocktime=5:
1022        // needed_amount = 5_000_000 / 5 × 1 = 1_000_000 PLUR/chunk.
1023        // depth=22 → cost = 1e6 × 2^22 / 1e16 = 4.194304e-4 BZZ.
1024        let batch = make_batch(None, 22, 86_400);
1025        let preview = extend_preview(&batch, 5_000_000, &chain(1)).unwrap();
1026        assert_eq!(preview.needed_amount_plur, BigInt::from(1_000_000u64));
1027        assert!((preview.cost_bzz - 4.194304e-4).abs() < 1e-9);
1028        assert_eq!(preview.new_ttl_seconds, 86_400 + 5_000_000);
1029    }
1030
1031    #[test]
1032    fn extend_preview_rejects_zero_extension() {
1033        let batch = make_batch(None, 22, 86_400);
1034        assert!(extend_preview(&batch, 0, &chain(1)).is_err());
1035        assert!(extend_preview(&batch, -10, &chain(1)).is_err());
1036    }
1037
1038    #[test]
1039    fn buy_preview_typical_case() {
1040        // depth=22, amount=1e14 PLUR/chunk, price=1 PLUR/block.
1041        // capacity = 16 GiB, ttl = 1e14 × 5 = 5e14 secs, cost = 41943.04 BZZ.
1042        let preview = buy_preview(22, BigInt::from(100_000_000_000_000u64), &chain(1)).unwrap();
1043        assert_eq!(preview.capacity_bytes, 16 * 1024 * 1024 * 1024);
1044        assert_eq!(preview.ttl_seconds, 500_000_000_000_000);
1045        assert!((preview.cost_bzz - 41943.04).abs() < 0.0001);
1046    }
1047
1048    #[test]
1049    fn buy_preview_rejects_below_minimum_depth() {
1050        assert!(buy_preview(16, BigInt::from(100), &chain(1)).is_err());
1051    }
1052
1053    #[test]
1054    fn buy_preview_rejects_above_ceiling() {
1055        assert!(buy_preview(42, BigInt::from(100), &chain(1)).is_err());
1056    }
1057
1058    #[test]
1059    fn buy_preview_rejects_zero_amount() {
1060        assert!(buy_preview(22, BigInt::from(0), &chain(1)).is_err());
1061    }
1062
1063    #[test]
1064    fn parse_size_plain_integer_is_bytes() {
1065        assert_eq!(parse_size_bytes("4096").unwrap(), 4096);
1066        assert!(parse_size_bytes("0").is_err());
1067        assert!(parse_size_bytes("").is_err());
1068    }
1069
1070    #[test]
1071    fn parse_size_binary_suffixes() {
1072        assert_eq!(parse_size_bytes("1KiB").unwrap(), 1024);
1073        assert_eq!(parse_size_bytes("1MiB").unwrap(), 1024u128.pow(2));
1074        assert_eq!(parse_size_bytes("1GiB").unwrap(), 1024u128.pow(3));
1075        assert_eq!(parse_size_bytes("1TiB").unwrap(), 1024u128.pow(4));
1076        // Single-letter shorthand defaults to binary (operator-friendly
1077        // for power-of-two batch reasoning).
1078        assert_eq!(parse_size_bytes("1G").unwrap(), 1024u128.pow(3));
1079        assert_eq!(parse_size_bytes("4K").unwrap(), 4096);
1080    }
1081
1082    #[test]
1083    fn parse_size_decimal_suffixes() {
1084        assert_eq!(parse_size_bytes("1KB").unwrap(), 1_000);
1085        assert_eq!(parse_size_bytes("1MB").unwrap(), 1_000_000);
1086        assert_eq!(parse_size_bytes("1GB").unwrap(), 1_000_000_000);
1087    }
1088
1089    #[test]
1090    fn parse_size_handles_whitespace_and_case() {
1091        assert_eq!(parse_size_bytes(" 5 GiB ").unwrap(), 5 * 1024u128.pow(3));
1092        assert_eq!(parse_size_bytes("5gib").unwrap(), 5 * 1024u128.pow(3));
1093        assert_eq!(parse_size_bytes("2 TIB").unwrap(), 2 * 1024u128.pow(4));
1094    }
1095
1096    #[test]
1097    fn parse_size_rejects_unknown_unit() {
1098        assert!(parse_size_bytes("5xyz").is_err());
1099        assert!(parse_size_bytes("abc").is_err());
1100    }
1101
1102    #[test]
1103    fn buy_suggest_typical_5gib_30d() {
1104        // 5 GiB needs ceil(log2(5*256K)) = ceil(20.32) = 21 → 8 GiB.
1105        // 30d at 5s blocktime = 30*86400/5 = 518_400 blocks.
1106        // amount = 518_400 * 1 = 518_400 PLUR/chunk (price=1).
1107        // TTL at amount=518_400, price=1, blocktime=5 → 2_592_000s = 30d.
1108        let s = buy_suggest(5 * 1024u128.pow(3), 30 * 86_400, &chain(1)).unwrap();
1109        assert_eq!(s.depth, 21);
1110        assert_eq!(s.capacity_bytes, 8 * 1024u128.pow(3));
1111        assert_eq!(s.amount_plur, BigInt::from(518_400u32));
1112        assert_eq!(s.ttl_seconds, 30 * 86_400);
1113    }
1114
1115    #[test]
1116    fn buy_suggest_4gib_exact_uses_depth_20() {
1117        // 4 GiB exactly = 2^20 chunks * 4096 → depth 20 fits exactly.
1118        let s = buy_suggest(4 * 1024u128.pow(3), 86_400, &chain(1)).unwrap();
1119        assert_eq!(s.depth, 20);
1120        assert_eq!(s.capacity_bytes, 4 * 1024u128.pow(3));
1121    }
1122
1123    #[test]
1124    fn buy_suggest_tiny_target_clamps_to_min_depth_17() {
1125        // 1 chunk's worth → ceil(log2(1)) = 0 → clamp to 17.
1126        let s = buy_suggest(4096, 86_400, &chain(1)).unwrap();
1127        assert_eq!(s.depth, 17);
1128        assert!(s.capacity_bytes >= 4096);
1129    }
1130
1131    #[test]
1132    fn buy_suggest_rejects_above_max_depth() {
1133        // depth 42 ≈ 16 PiB; explicitly refused.
1134        let huge = 16 * 1024u128.pow(5); // 16 PiB
1135        assert!(buy_suggest(huge, 86_400, &chain(1)).is_err());
1136    }
1137
1138    #[test]
1139    fn buy_suggest_rounds_duration_up_in_blocks() {
1140        // 7 seconds at 5s blocktime → 2 blocks (ceil), not 1.
1141        // amount = 2 * 1 = 2; TTL = 2 * 5 = 10s ≥ 7.
1142        let s = buy_suggest(4096, 7, &chain(1)).unwrap();
1143        assert_eq!(s.amount_plur, BigInt::from(2u32));
1144        assert_eq!(s.ttl_seconds, 10);
1145    }
1146
1147    #[test]
1148    fn buy_suggest_rejects_zero_or_negative_inputs() {
1149        assert!(buy_suggest(0, 86_400, &chain(1)).is_err());
1150        assert!(buy_suggest(4096, 0, &chain(1)).is_err());
1151        assert!(buy_suggest(4096, -5, &chain(1)).is_err());
1152    }
1153
1154    #[test]
1155    fn buy_suggest_rejects_zero_chain_price() {
1156        assert!(buy_suggest(4096, 86_400, &chain(0)).is_err());
1157    }
1158
1159    #[test]
1160    fn buy_suggest_summary_is_compact() {
1161        let s = buy_suggest(5 * 1024u128.pow(3), 30 * 86_400, &chain(1)).unwrap();
1162        let line = s.summary();
1163        assert!(line.starts_with("buy-suggest"));
1164        assert!(line.contains("5.0 GiB"));
1165        assert!(line.contains("30d  0h"));
1166        assert!(line.contains("depth=21"));
1167        assert!(!line.contains('\n'));
1168    }
1169
1170    #[test]
1171    fn parse_duration_handles_units() {
1172        assert_eq!(parse_duration_seconds("5000").unwrap(), 5_000);
1173        assert_eq!(parse_duration_seconds("45s").unwrap(), 45);
1174        assert_eq!(parse_duration_seconds("90m").unwrap(), 5_400);
1175        assert_eq!(parse_duration_seconds("12h").unwrap(), 43_200);
1176        assert_eq!(parse_duration_seconds("30d").unwrap(), 2_592_000);
1177        // Trailing whitespace + uppercase unit.
1178        assert_eq!(parse_duration_seconds(" 7D ").unwrap(), 604_800);
1179    }
1180
1181    #[test]
1182    fn parse_duration_rejects_invalid() {
1183        assert!(parse_duration_seconds("").is_err());
1184        assert!(parse_duration_seconds("abc").is_err());
1185        assert!(parse_duration_seconds("0d").is_err());
1186        assert!(parse_duration_seconds("-5h").is_err());
1187    }
1188
1189    #[test]
1190    fn parse_plur_handles_large_amounts() {
1191        let amt = parse_plur_amount("100000000000000").unwrap();
1192        assert_eq!(amt, BigInt::from(100_000_000_000_000u64));
1193    }
1194
1195    #[test]
1196    fn parse_plur_rejects_garbage() {
1197        assert!(parse_plur_amount("").is_err());
1198        assert!(parse_plur_amount("1e14").is_err()); // scientific not supported
1199        assert!(parse_plur_amount("123abc").is_err());
1200    }
1201
1202    #[test]
1203    fn match_batch_prefix_unique_returns_single() {
1204        let b1 = make_batch_with_id([0xab; 32]);
1205        let b2 = make_batch_with_id([0xcd; 32]);
1206        let batches = vec![b1.clone(), b2.clone()];
1207        let m = match_batch_prefix(&batches, "abab").unwrap();
1208        assert_eq!(m.batch_id, b1.batch_id);
1209    }
1210
1211    #[test]
1212    fn match_batch_prefix_handles_trailing_ellipsis() {
1213        // The S2 table renders "abababab…" — operators may copy that
1214        // shape verbatim. Strip the trailing ellipsis transparently.
1215        let b1 = make_batch_with_id([0xab; 32]);
1216        let batches = vec![b1.clone()];
1217        let m = match_batch_prefix(&batches, "abababab…").unwrap();
1218        assert_eq!(m.batch_id, b1.batch_id);
1219    }
1220
1221    #[test]
1222    fn match_batch_prefix_ambiguous_errors_with_listing() {
1223        let b1 = make_batch_with_id([0xab; 32]);
1224        let b2 = make_batch_with_id([0xab; 32]); // identical prefix
1225        let batches = vec![b1, b2];
1226        let err = match_batch_prefix(&batches, "ab").unwrap_err();
1227        assert!(err.contains("match prefix"));
1228    }
1229
1230    #[test]
1231    fn match_batch_prefix_no_match_errors() {
1232        let b1 = make_batch_with_id([0xab; 32]);
1233        let batches = vec![b1];
1234        let err = match_batch_prefix(&batches, "ff").unwrap_err();
1235        assert!(err.contains("no batch matches"));
1236    }
1237
1238    fn make_batch_with_id(bytes: [u8; 32]) -> PostageBatch {
1239        PostageBatch {
1240            batch_id: bee::swarm::BatchId::new(&bytes).unwrap(),
1241            amount: None,
1242            start: 0,
1243            owner: String::new(),
1244            depth: 22,
1245            bucket_depth: 16,
1246            immutable: true,
1247            batch_ttl: 86_400,
1248            utilization: 0,
1249            usable: true,
1250            exists: true,
1251            label: "test".into(),
1252            block_number: 0,
1253        }
1254    }
1255
1256    #[test]
1257    fn summary_strings_are_compact_and_human_readable() {
1258        // Smoke test that summary() produces reasonable single-line
1259        // output (no embedded newlines, includes the verb name).
1260        let batch = make_batch(None, 22, 86_400);
1261        let p = topup_preview(&batch, BigInt::from(10u64), &chain(1)).unwrap();
1262        let s = p.summary();
1263        assert!(s.starts_with("topup-preview"));
1264        assert!(!s.contains('\n'));
1265
1266        let p = dilute_preview(&batch, 23).unwrap();
1267        let s = p.summary();
1268        assert!(s.starts_with("dilute-preview"));
1269        assert!(!s.contains('\n'));
1270
1271        let p = extend_preview(&batch, 86_400, &chain(1)).unwrap();
1272        let s = p.summary();
1273        assert!(s.starts_with("extend-preview"));
1274        assert!(!s.contains('\n'));
1275
1276        let p = buy_preview(22, BigInt::from(10_000), &chain(1)).unwrap();
1277        let s = p.summary();
1278        assert!(s.starts_with("buy-preview"));
1279        assert!(!s.contains('\n'));
1280    }
1281
1282    fn mutable_batch(amount: u64, depth: u8, batch_ttl: i64, utilization: u32) -> PostageBatch {
1283        let mut b = make_batch(Some(BigInt::from(amount)), depth, batch_ttl);
1284        b.immutable = false;
1285        b.utilization = utilization;
1286        b
1287    }
1288
1289    #[test]
1290    fn plan_batch_healthy_returns_no_action() {
1291        // depth=22, util=0%, TTL=30 days, threshold 24h: nothing to do.
1292        let batch = mutable_batch(1_000_000, 22, 30 * 86_400, 0);
1293        let plan = plan_batch(
1294            &batch,
1295            &chain(1),
1296            DEFAULT_USAGE_THRESHOLD,
1297            DEFAULT_TTL_THRESHOLD_SECONDS,
1298            DEFAULT_EXTRA_DEPTH,
1299        )
1300        .unwrap();
1301        assert_eq!(plan.action, PlanAction::None);
1302        assert_eq!(plan.total_cost_bzz, 0.0);
1303        assert!(plan.reason.contains("healthy"));
1304    }
1305
1306    #[test]
1307    fn plan_batch_low_ttl_only_topup() {
1308        // depth=22, util=0%, TTL=1h. Below 24h threshold, but usage
1309        // is 0 so no dilute needed.
1310        let batch = mutable_batch(1_000_000, 22, 3600, 0);
1311        let plan = plan_batch(
1312            &batch,
1313            &chain(1),
1314            DEFAULT_USAGE_THRESHOLD,
1315            DEFAULT_TTL_THRESHOLD_SECONDS,
1316            DEFAULT_EXTRA_DEPTH,
1317        )
1318        .unwrap();
1319        match plan.action {
1320            PlanAction::Topup {
1321                ref delta_amount_plur,
1322                ..
1323            } => {
1324                assert!(*delta_amount_plur > BigInt::from(0));
1325            }
1326            other => panic!("expected Topup, got {other:?}"),
1327        }
1328        assert!(plan.total_cost_bzz > 0.0);
1329    }
1330
1331    #[test]
1332    fn plan_batch_high_usage_only_dilute() {
1333        // util at 100%, but TTL very high so post-dilute (TTL/4) is
1334        // still way above threshold. Pure dilute.
1335        // bucket_depth = depth - 6 = 16; depth=22; max bucket count
1336        // = 2^(22-16) = 64. utilization=64 → 100% usage.
1337        let batch = mutable_batch(1_000_000, 22, 365 * 86_400, 64);
1338        let plan = plan_batch(
1339            &batch,
1340            &chain(1),
1341            DEFAULT_USAGE_THRESHOLD,
1342            DEFAULT_TTL_THRESHOLD_SECONDS,
1343            DEFAULT_EXTRA_DEPTH,
1344        )
1345        .unwrap();
1346        match plan.action {
1347            PlanAction::Dilute { new_depth, .. } => {
1348                assert_eq!(new_depth, 24);
1349            }
1350            other => panic!("expected Dilute, got {other:?}"),
1351        }
1352        assert_eq!(plan.total_cost_bzz, 0.0);
1353    }
1354
1355    #[test]
1356    fn plan_batch_high_usage_low_ttl_topup_then_dilute() {
1357        // util=100% AND TTL barely above threshold — post-dilute TTL
1358        // would fall below, so plan is topup-then-dilute.
1359        let batch = mutable_batch(1_000_000, 22, 2 * 24 * 3600, 64);
1360        let plan = plan_batch(
1361            &batch,
1362            &chain(1),
1363            DEFAULT_USAGE_THRESHOLD,
1364            DEFAULT_TTL_THRESHOLD_SECONDS,
1365            DEFAULT_EXTRA_DEPTH,
1366        )
1367        .unwrap();
1368        match plan.action {
1369            PlanAction::TopupThenDilute {
1370                ref topup_delta_amount_plur,
1371                new_depth,
1372                ..
1373            } => {
1374                assert!(*topup_delta_amount_plur > BigInt::from(0));
1375                assert_eq!(new_depth, 24);
1376            }
1377            other => panic!("expected TopupThenDilute, got {other:?}"),
1378        }
1379        assert!(plan.total_cost_bzz > 0.0);
1380    }
1381
1382    #[test]
1383    fn plan_batch_immutable_high_usage_skips_dilute() {
1384        let mut batch = mutable_batch(1_000_000, 22, 30 * 86_400, 64);
1385        batch.immutable = true;
1386        let plan = plan_batch(
1387            &batch,
1388            &chain(1),
1389            DEFAULT_USAGE_THRESHOLD,
1390            DEFAULT_TTL_THRESHOLD_SECONDS,
1391            DEFAULT_EXTRA_DEPTH,
1392        )
1393        .unwrap();
1394        // Immutable + healthy TTL → None with a reason explaining
1395        // why we can't act on the high usage.
1396        assert_eq!(plan.action, PlanAction::None);
1397        assert!(plan.reason.contains("immutable"));
1398    }
1399
1400    #[test]
1401    fn plan_batch_rejects_out_of_range_threshold() {
1402        let batch = mutable_batch(1_000_000, 22, 30 * 86_400, 0);
1403        assert!(
1404            plan_batch(&batch, &chain(1), 1.5, DEFAULT_TTL_THRESHOLD_SECONDS, 2).is_err()
1405        );
1406        assert!(plan_batch(&batch, &chain(1), -0.1, 86400, 2).is_err());
1407    }
1408
1409    #[test]
1410    fn plan_batch_summary_is_one_line() {
1411        let batch = mutable_batch(1_000_000, 22, 3600, 64);
1412        let plan = plan_batch(
1413            &batch,
1414            &chain(1),
1415            DEFAULT_USAGE_THRESHOLD,
1416            DEFAULT_TTL_THRESHOLD_SECONDS,
1417            DEFAULT_EXTRA_DEPTH,
1418        )
1419        .unwrap();
1420        let s = plan.summary();
1421        assert!(s.starts_with("plan-batch"));
1422        assert!(!s.contains('\n'), "summary must be a single line: {s}");
1423    }
1424}