canic_core/ops/
pool.rs

1//! Pool lifecycle helpers.
2//!
3//! The root canister maintains a pool of empty or decommissioned canisters
4//! that can be quickly reassigned when scaling.
5//!
6//! INVARIANTS:
7//! - Pool canisters are NOT part of topology
8//! - Pool canisters have NO parent
9//! - Root is the sole controller
10//! - Importing a canister is destructive (code + controllers wiped)
11//! - Registry metadata is informational only while in pool
12//! - Ready entries have no code installed (reset_into_pool uninstalls before Ready)
13//
14// LOCAL INVARIANT:
15// On local replicas, only canisters that are routable in the current replica
16// may enter or remain in the pool. Mainnet canister IDs are never importable.
17
18pub use crate::ops::storage::pool::{CanisterPoolEntry, CanisterPoolStatus, CanisterPoolView};
19
20use crate::{
21    Error, ThisError,
22    cdk::{
23        api::canister_self,
24        futures::spawn,
25        mgmt::{CanisterSettings, UpdateSettingsArgs},
26        types::Principal,
27    },
28    config::Config,
29    log::Topic,
30    ops::{
31        OPS_POOL_CHECK_INTERVAL, OPS_POOL_INIT_DELAY, OpsError,
32        config::ConfigOps,
33        ic::{
34            Network, build_network, canister_status, get_cycles,
35            mgmt::{create_canister, uninstall_code},
36            timer::{TimerId, TimerOps},
37            update_settings,
38        },
39        prelude::*,
40        storage::{pool::CanisterPoolStorageOps, topology::SubnetCanisterRegistryOps},
41    },
42    types::{Cycles, TC},
43};
44use candid::CandidType;
45use serde::Deserialize;
46use std::{cell::RefCell, time::Duration};
47
48/// Internal reset worker and scheduling logic.
49/// Isolated to keep pool lifecycle logic linear and readable.
50mod reset_scheduler {
51    use super::*;
52
53    thread_local! {
54        static RESET_IN_PROGRESS: RefCell<bool> = const { RefCell::new(false) };
55        static RESET_RESCHEDULE: RefCell<bool> = const { RefCell::new(false) };
56        static RESET_TIMER: RefCell<Option<TimerId>> = const { RefCell::new(None) };
57    }
58
59    pub fn schedule() {
60        RESET_TIMER.with_borrow_mut(|slot| {
61            if slot.is_some() {
62                return;
63            }
64
65            let id = TimerOps::set(Duration::ZERO, "pool:pending", async {
66                RESET_TIMER.with_borrow_mut(|slot| *slot = None);
67                let _ = run_worker(super::POOL_RESET_BATCH_SIZE).await;
68            });
69
70            *slot = Some(id);
71        });
72    }
73
74    fn maybe_reschedule() {
75        let reschedule = RESET_RESCHEDULE.with_borrow_mut(|f| {
76            let v = *f;
77            *f = false;
78            v
79        });
80
81        if reschedule || has_pending_reset() {
82            schedule();
83        }
84    }
85
86    async fn run_worker(limit: usize) -> Result<(), Error> {
87        if limit == 0 {
88            return Ok(());
89        }
90
91        let should_run = RESET_IN_PROGRESS.with_borrow_mut(|flag| {
92            if *flag {
93                RESET_RESCHEDULE.with_borrow_mut(|r| *r = true);
94                false
95            } else {
96                *flag = true;
97                true
98            }
99        });
100
101        if !should_run {
102            return Ok(());
103        }
104
105        let result = run_batch(limit).await;
106
107        RESET_IN_PROGRESS.with_borrow_mut(|f| *f = false);
108        maybe_reschedule();
109
110        result
111    }
112
113    async fn run_batch(limit: usize) -> Result<(), Error> {
114        let mut pending: Vec<_> = CanisterPoolStorageOps::export()
115            .into_iter()
116            .filter(|(_, e)| e.status.is_pending_reset())
117            .collect();
118
119        if pending.is_empty() {
120            return Ok(());
121        }
122
123        pending.sort_by_key(|(_, e)| e.created_at);
124
125        for (pid, mut entry) in pending.into_iter().take(limit) {
126            if !super::can_enter_pool(pid).await {
127                let _ = CanisterPoolStorageOps::take(&pid);
128                continue;
129            }
130
131            match super::reset_into_pool(pid).await {
132                Ok(cycles) => {
133                    entry.cycles = cycles;
134                    entry.status = CanisterPoolStatus::Ready;
135                }
136                Err(err) => {
137                    entry.status = CanisterPoolStatus::Failed {
138                        reason: err.to_string(),
139                    };
140                    log!(
141                        Topic::CanisterPool,
142                        Warn,
143                        "pool reset failed for {pid}: {err}"
144                    );
145                }
146            }
147
148            if !CanisterPoolStorageOps::update(pid, entry) {
149                log!(
150                    Topic::CanisterPool,
151                    Warn,
152                    "pool reset update missing for {pid}"
153                );
154            }
155        }
156
157        Ok(())
158    }
159
160    fn has_pending_reset() -> bool {
161        CanisterPoolStorageOps::export()
162            .into_iter()
163            .any(|(_, e)| e.status.is_pending_reset())
164    }
165
166    // ---------- test hook ----------
167    #[cfg(test)]
168    thread_local! {
169        static RESET_SCHEDULED: RefCell<bool> = const { RefCell::new(false) };
170    }
171
172    #[cfg(test)]
173    pub fn mark_scheduled_for_test() {
174        RESET_SCHEDULED.with_borrow_mut(|f| *f = true);
175    }
176
177    #[cfg(test)]
178    pub fn take_scheduled_for_test() -> bool {
179        RESET_SCHEDULED.with_borrow_mut(|flag| {
180            let value = *flag;
181            *flag = false;
182            value
183        })
184    }
185}
186
187#[cfg(test)]
188thread_local! {
189    static TEST_IMPORTABLE_OVERRIDE: RefCell<Option<bool>> = const { RefCell::new(None) };
190}
191
192//
193// TIMER STATE
194//
195
196thread_local! {
197    static TIMER: RefCell<Option<TimerId>> = const { RefCell::new(None) };
198}
199
200/// Default cycles allocated to freshly created pool canisters.
201const POOL_CANISTER_CYCLES: u128 = 5 * TC;
202
203/// Default batch size for resetting pending pool entries.
204const POOL_RESET_BATCH_SIZE: usize = 10;
205
206///
207/// PoolOpsError
208///
209
210#[derive(Debug, ThisError)]
211pub enum PoolOpsError {
212    #[error("pool entry missing for {pid}")]
213    PoolEntryMissing { pid: Principal },
214
215    #[error("missing module hash for pool entry {pid}")]
216    MissingModuleHash { pid: Principal },
217
218    #[error("missing type for pool entry {pid}")]
219    MissingType { pid: Principal },
220
221    #[error("pool entry {pid} is not ready")]
222    PoolEntryNotReady { pid: Principal },
223}
224
225impl From<PoolOpsError> for Error {
226    fn from(err: PoolOpsError) -> Self {
227        OpsError::from(err).into()
228    }
229}
230
231///
232/// PoolAdminCommand
233///
234
235#[derive(CandidType, Clone, Debug, Deserialize, Eq, PartialEq)]
236pub enum PoolAdminCommand {
237    CreateEmpty,
238    Recycle { pid: Principal },
239    ImportImmediate { pid: Principal },
240    ImportQueued { pids: Vec<Principal> },
241    RequeueFailed { pids: Option<Vec<Principal>> },
242}
243
244///
245/// PoolAdminResponse
246///
247
248#[derive(CandidType, Clone, Debug, Deserialize, Eq, PartialEq)]
249pub enum PoolAdminResponse {
250    Created {
251        pid: Principal,
252    },
253    Recycled,
254    Imported,
255    QueuedImported {
256        added: u64,
257        requeued: u64,
258        skipped: u64,
259        total: u64,
260    },
261    FailedRequeued {
262        requeued: u64,
263        skipped: u64,
264        total: u64,
265    },
266}
267
268fn pool_controllers() -> Vec<Principal> {
269    let mut controllers = Config::get().controllers.clone();
270
271    let root = canister_self();
272    if !controllers.contains(&root) {
273        controllers.push(root);
274    }
275
276    controllers
277}
278
279fn is_local_build() -> bool {
280    build_network() == Some(Network::Local)
281}
282
283///
284/// Returns true iff the canister is routable in the current local replica.
285///
286/// Local-only precondition check.
287/// Must be cheap, non-destructive, and side-effect free.
288///
289async fn is_importable_on_local(pid: Principal) -> bool {
290    #[cfg(test)]
291    if let Some(override_value) = TEST_IMPORTABLE_OVERRIDE.with(|slot| *slot.borrow()) {
292        return override_value;
293    }
294
295    if !is_local_build() {
296        return true;
297    }
298
299    match canister_status(pid).await {
300        Ok(_) => true,
301        Err(err) => {
302            log!(
303                Topic::CanisterPool,
304                Warn,
305                "pool import skipped for {pid} (local non-importable): {err}"
306            );
307            false
308        }
309    }
310}
311
312async fn can_enter_pool(pid: Principal) -> bool {
313    if !is_local_build() {
314        return true;
315    }
316
317    is_importable_on_local(pid).await
318}
319
320async fn reset_into_pool(pid: Principal) -> Result<Cycles, Error> {
321    update_settings(&UpdateSettingsArgs {
322        canister_id: pid,
323        settings: CanisterSettings {
324            controllers: Some(pool_controllers()),
325            ..Default::default()
326        },
327    })
328    .await?;
329
330    uninstall_code(pid).await?;
331
332    get_cycles(pid).await
333}
334
335fn register_or_update_preserving_metadata(
336    pid: Principal,
337    cycles: Cycles,
338    status: CanisterPoolStatus,
339    role: Option<CanisterRole>,
340    parent: Option<Principal>,
341    module_hash: Option<Vec<u8>>,
342) {
343    if let Some(mut entry) = CanisterPoolStorageOps::get(pid) {
344        entry.cycles = cycles;
345        entry.status = status;
346        entry.role = role.or(entry.role);
347        entry.parent = parent.or(entry.parent);
348        entry.module_hash = module_hash.or(entry.module_hash);
349        let _ = CanisterPoolStorageOps::update(pid, entry);
350    } else {
351        CanisterPoolStorageOps::register(pid, cycles, status, role, parent, module_hash);
352    }
353}
354
355fn mark_pending_reset(pid: Principal) {
356    register_or_update_preserving_metadata(
357        pid,
358        Cycles::default(),
359        CanisterPoolStatus::PendingReset,
360        None,
361        None,
362        None,
363    );
364}
365
366fn mark_ready(pid: Principal, cycles: Cycles) {
367    register_or_update_preserving_metadata(
368        pid,
369        cycles,
370        CanisterPoolStatus::Ready,
371        None,
372        None,
373        None,
374    );
375}
376
377fn mark_failed(pid: Principal, err: &Error) {
378    register_or_update_preserving_metadata(
379        pid,
380        Cycles::default(),
381        CanisterPoolStatus::Failed {
382            reason: err.to_string(),
383        },
384        None,
385        None,
386        None,
387    );
388}
389///
390/// PoolOps
391///
392
393pub struct PoolOps;
394
395impl PoolOps {
396    // ---------------------------------------------------------------------
397    // Lifecycle
398    // ---------------------------------------------------------------------
399
400    pub fn start() {
401        TIMER.with_borrow_mut(|slot| {
402            if slot.is_some() {
403                return;
404            }
405
406            let id = TimerOps::set(OPS_POOL_INIT_DELAY, "pool:init", async {
407                let _ = Self::check();
408
409                let interval =
410                    TimerOps::set_interval(OPS_POOL_CHECK_INTERVAL, "pool:interval", || async {
411                        let _ = Self::check();
412                    });
413
414                TIMER.with_borrow_mut(|slot| *slot = Some(interval));
415            });
416
417            *slot = Some(id);
418        });
419    }
420
421    pub fn stop() {
422        TIMER.with_borrow_mut(|slot| {
423            if let Some(id) = slot.take() {
424                TimerOps::clear(id);
425            }
426        });
427    }
428
429    // ---------------------------------------------------------------------
430    // Public API
431    // ---------------------------------------------------------------------
432
433    #[must_use]
434    pub fn check() -> u64 {
435        reset_scheduler::schedule();
436
437        let subnet_cfg = ConfigOps::current_subnet();
438        let min_size: u64 = subnet_cfg.pool.minimum_size.into();
439        let ready_size = Self::ready_len();
440
441        if ready_size >= min_size {
442            return 0;
443        }
444
445        let missing = (min_size - ready_size).min(10);
446        log!(
447            Topic::CanisterPool,
448            Ok,
449            "pool low: {ready_size}/{min_size}, creating {missing}"
450        );
451
452        spawn(async move {
453            for i in 0..missing {
454                match pool_create_canister().await {
455                    Ok(_) => log!(
456                        Topic::CanisterPool,
457                        Ok,
458                        "created pool canister {}/{}",
459                        i + 1,
460                        missing
461                    ),
462                    Err(e) => log!(Topic::CanisterPool, Warn, "pool creation failed: {e:?}"),
463                }
464            }
465        });
466
467        missing
468    }
469
470    #[must_use]
471    pub fn pop_ready() -> Option<(Principal, CanisterPoolEntry)> {
472        CanisterPoolStorageOps::pop_ready()
473    }
474
475    #[must_use]
476    pub fn contains(pid: &Principal) -> bool {
477        CanisterPoolStorageOps::contains(pid)
478    }
479
480    #[must_use]
481    pub fn export() -> CanisterPoolView {
482        CanisterPoolStorageOps::export()
483    }
484
485    pub async fn admin(cmd: PoolAdminCommand) -> Result<PoolAdminResponse, Error> {
486        match cmd {
487            PoolAdminCommand::CreateEmpty => {
488                let pid = pool_create_canister().await?;
489                Ok(PoolAdminResponse::Created { pid })
490            }
491            PoolAdminCommand::Recycle { pid } => {
492                pool_recycle_canister(pid).await?;
493                Ok(PoolAdminResponse::Recycled)
494            }
495            PoolAdminCommand::ImportImmediate { pid } => {
496                pool_import_canister(pid).await?;
497                Ok(PoolAdminResponse::Imported)
498            }
499            PoolAdminCommand::ImportQueued { pids } => {
500                let (a, r, s, t) = if is_local_build() {
501                    pool_import_queued_canisters_local(pids).await?
502                } else {
503                    pool_import_queued_canisters(pids)?
504                };
505                Ok(PoolAdminResponse::QueuedImported {
506                    added: a,
507                    requeued: r,
508                    skipped: s,
509                    total: t,
510                })
511            }
512            PoolAdminCommand::RequeueFailed { pids } => {
513                let (requeued, skipped, total) = pool_requeue_failed(pids)?;
514                Ok(PoolAdminResponse::FailedRequeued {
515                    requeued,
516                    skipped,
517                    total,
518                })
519            }
520        }
521    }
522
523    // ---------------------------------------------------------------------
524    // Scheduler + worker
525    // ---------------------------------------------------------------------
526
527    fn ready_len() -> u64 {
528        CanisterPoolStorageOps::export()
529            .into_iter()
530            .filter(|(_, e)| e.status.is_ready())
531            .count() as u64
532    }
533}
534
535//
536// CREATE / IMPORT / RECYCLE / EXPORT
537//
538
539pub async fn pool_create_canister() -> Result<Principal, Error> {
540    OpsError::require_root()?;
541
542    let cycles = Cycles::new(POOL_CANISTER_CYCLES);
543    let pid = create_canister(pool_controllers(), cycles.clone()).await?;
544
545    CanisterPoolStorageOps::register(pid, cycles, CanisterPoolStatus::Ready, None, None, None);
546    Ok(pid)
547}
548
549pub async fn pool_import_canister(pid: Principal) -> Result<(), Error> {
550    OpsError::require_root()?;
551
552    if !can_enter_pool(pid).await {
553        let _ = CanisterPoolStorageOps::take(&pid);
554        return Ok(());
555    }
556
557    mark_pending_reset(pid);
558
559    match reset_into_pool(pid).await {
560        Ok(cycles) => {
561            let _ = SubnetCanisterRegistryOps::remove(&pid);
562            mark_ready(pid, cycles);
563        }
564        Err(err) => {
565            log!(
566                Topic::CanisterPool,
567                Warn,
568                "pool import failed for {pid}: {err}"
569            );
570            mark_failed(pid, &err);
571            return Err(err);
572        }
573    }
574
575    Ok(())
576}
577
578async fn pool_import_queued_canisters_local(
579    pids: Vec<Principal>,
580) -> Result<(u64, u64, u64, u64), Error> {
581    let total = pids.len() as u64;
582    let mut added = 0;
583    let mut requeued = 0;
584    let mut skipped = 0;
585
586    for pid in pids {
587        if SubnetCanisterRegistryOps::get(pid).is_some() {
588            skipped += 1;
589            continue;
590        }
591
592        if let Some(entry) = CanisterPoolStorageOps::get(pid) {
593            if entry.status.is_failed() {
594                if can_enter_pool(pid).await {
595                    mark_pending_reset(pid);
596                    requeued += 1;
597                } else {
598                    let _ = CanisterPoolStorageOps::take(&pid);
599                    skipped += 1;
600                }
601            } else {
602                skipped += 1;
603            }
604            continue;
605        }
606
607        if !can_enter_pool(pid).await {
608            skipped += 1;
609            continue;
610        }
611
612        mark_pending_reset(pid);
613        added += 1;
614    }
615
616    if added > 0 || requeued > 0 {
617        maybe_schedule_reset_worker();
618    }
619
620    Ok((added, requeued, skipped, total))
621}
622
623fn pool_import_queued_canisters(pids: Vec<Principal>) -> Result<(u64, u64, u64, u64), Error> {
624    pool_import_queued_canisters_inner(pids, true)
625}
626
627fn pool_import_queued_canisters_inner(
628    pids: Vec<Principal>,
629    enforce_root: bool,
630) -> Result<(u64, u64, u64, u64), Error> {
631    if enforce_root {
632        OpsError::require_root()?;
633    }
634
635    let mut added = 0;
636    let mut requeued = 0;
637    let mut skipped = 0;
638
639    for pid in &pids {
640        if SubnetCanisterRegistryOps::get(*pid).is_some() {
641            skipped += 1;
642            continue;
643        }
644
645        if let Some(entry) = CanisterPoolStorageOps::get(*pid) {
646            if entry.status.is_failed() {
647                mark_pending_reset(*pid);
648                requeued += 1;
649            } else {
650                skipped += 1;
651            }
652            continue;
653        }
654
655        mark_pending_reset(*pid);
656        added += 1;
657    }
658
659    maybe_schedule_reset_worker();
660
661    Ok((added, requeued, skipped, pids.len() as u64))
662}
663
664#[cfg(not(test))]
665fn maybe_schedule_reset_worker() {
666    reset_scheduler::schedule();
667}
668
669#[cfg(test)]
670fn maybe_schedule_reset_worker() {
671    reset_scheduler::mark_scheduled_for_test();
672}
673
674#[cfg(test)]
675fn take_reset_scheduled() -> bool {
676    reset_scheduler::take_scheduled_for_test()
677}
678
679#[cfg(test)]
680fn set_test_importable_override(value: Option<bool>) {
681    TEST_IMPORTABLE_OVERRIDE.with_borrow_mut(|slot| *slot = value);
682}
683
684fn pool_requeue_failed(pids: Option<Vec<Principal>>) -> Result<(u64, u64, u64), Error> {
685    pool_requeue_failed_inner(pids, true)
686}
687
688fn pool_requeue_failed_inner(
689    pids: Option<Vec<Principal>>,
690    enforce_root: bool,
691) -> Result<(u64, u64, u64), Error> {
692    if enforce_root {
693        OpsError::require_root()?;
694    }
695
696    let mut requeued = 0;
697    let mut skipped = 0;
698    let total;
699
700    if let Some(pids) = pids {
701        total = pids.len() as u64;
702        for pid in pids {
703            if let Some(entry) = CanisterPoolStorageOps::get(pid) {
704                if entry.status.is_failed() {
705                    mark_pending_reset(pid);
706                    requeued += 1;
707                } else {
708                    skipped += 1;
709                }
710            } else {
711                skipped += 1;
712            }
713        }
714    } else {
715        let entries = CanisterPoolStorageOps::export();
716        total = entries.len() as u64;
717        for (pid, entry) in entries {
718            if entry.status.is_failed() {
719                mark_pending_reset(pid);
720                requeued += 1;
721            } else {
722                skipped += 1;
723            }
724        }
725    }
726
727    if requeued > 0 {
728        maybe_schedule_reset_worker();
729    }
730
731    Ok((requeued, skipped, total))
732}
733
734pub async fn pool_recycle_canister(pid: Principal) -> Result<(), Error> {
735    OpsError::require_root()?;
736
737    let entry =
738        SubnetCanisterRegistryOps::get(pid).ok_or(PoolOpsError::PoolEntryMissing { pid })?;
739
740    let role = Some(entry.role.clone());
741    let hash = entry.module_hash.clone();
742
743    let cycles = reset_into_pool(pid).await?;
744    let _ = SubnetCanisterRegistryOps::remove(&pid);
745    CanisterPoolStorageOps::register(pid, cycles, CanisterPoolStatus::Ready, role, None, hash);
746
747    Ok(())
748}
749
750pub async fn pool_export_canister(pid: Principal) -> Result<(CanisterRole, Vec<u8>), Error> {
751    OpsError::require_root()?;
752
753    let entry = CanisterPoolStorageOps::take(&pid).ok_or(PoolOpsError::PoolEntryMissing { pid })?;
754
755    if !entry.status.is_ready() {
756        return Err(PoolOpsError::PoolEntryNotReady { pid }.into());
757    }
758
759    let role = entry.role.ok_or(PoolOpsError::MissingType { pid })?;
760    let hash = entry
761        .module_hash
762        .ok_or(PoolOpsError::MissingModuleHash { pid })?;
763
764    Ok((role, hash))
765}
766
767//
768// ORCHESTRATION HOOK
769//
770
771pub async fn recycle_via_orchestrator(pid: Principal) -> Result<(), Error> {
772    use crate::ops::orchestration::orchestrator::{CanisterLifecycleOrchestrator, LifecycleEvent};
773
774    CanisterLifecycleOrchestrator::apply(LifecycleEvent::RecycleToPool { pid })
775        .await
776        .map(|_| ())
777}
778
779//
780// TESTS
781//
782
783#[cfg(test)]
784mod tests {
785    use super::*;
786    use crate::{
787        ids::CanisterRole,
788        model::memory::{CanisterEntry, pool::CanisterPool, topology::SubnetCanisterRegistry},
789    };
790    use candid::Principal;
791
792    fn p(id: u8) -> Principal {
793        Principal::from_slice(&[id; 29])
794    }
795
796    fn reset_state() {
797        CanisterPool::clear();
798        SubnetCanisterRegistry::clear_for_tests();
799        let _ = take_reset_scheduled();
800    }
801
802    #[test]
803    fn import_queued_registers_pending_entries() {
804        reset_state();
805
806        let p1 = p(1);
807        let p2 = p(2);
808
809        let (added, requeued, skipped, total) =
810            pool_import_queued_canisters_inner(vec![p1, p2], false).unwrap();
811        assert_eq!(added, 2);
812        assert_eq!(requeued, 0);
813        assert_eq!(skipped, 0);
814        assert_eq!(total, 2);
815
816        let e1 = CanisterPoolStorageOps::get(p1).unwrap();
817        let e2 = CanisterPoolStorageOps::get(p2).unwrap();
818        assert!(e1.status.is_pending_reset());
819        assert!(e2.status.is_pending_reset());
820        assert_eq!(e1.cycles, Cycles::default());
821        assert_eq!(e2.cycles, Cycles::default());
822    }
823
824    #[test]
825    fn import_queued_requeues_failed_entries() {
826        reset_state();
827
828        let p1 = p(3);
829        CanisterPoolStorageOps::register(
830            p1,
831            Cycles::new(10),
832            CanisterPoolStatus::Failed {
833                reason: "nope".to_string(),
834            },
835            None,
836            None,
837            None,
838        );
839
840        let (added, requeued, skipped, total) =
841            pool_import_queued_canisters_inner(vec![p1], false).unwrap();
842        assert_eq!(added, 0);
843        assert_eq!(requeued, 1);
844        assert_eq!(skipped, 0);
845        assert_eq!(total, 1);
846        assert!(take_reset_scheduled());
847
848        let entry = CanisterPoolStorageOps::get(p1).unwrap();
849        assert!(entry.status.is_pending_reset());
850        assert_eq!(entry.cycles, Cycles::default());
851    }
852
853    #[test]
854    fn import_queued_skips_ready_entries() {
855        reset_state();
856
857        let p1 = p(4);
858        CanisterPoolStorageOps::register(
859            p1,
860            Cycles::new(42),
861            CanisterPoolStatus::Ready,
862            None,
863            None,
864            None,
865        );
866
867        let (added, requeued, skipped, total) =
868            pool_import_queued_canisters_inner(vec![p1], false).unwrap();
869        assert_eq!(added, 0);
870        assert_eq!(requeued, 0);
871        assert_eq!(skipped, 1);
872        assert_eq!(total, 1);
873
874        let entry = CanisterPoolStorageOps::get(p1).unwrap();
875        assert!(entry.status.is_ready());
876        assert_eq!(entry.cycles, Cycles::new(42));
877    }
878
879    #[test]
880    fn import_queued_skips_registry_canisters() {
881        reset_state();
882
883        let pid = p(5);
884        SubnetCanisterRegistry::insert_for_tests(CanisterEntry {
885            pid,
886            role: CanisterRole::new("alpha"),
887            parent_pid: None,
888            module_hash: None,
889            created_at: 0,
890        });
891
892        let (added, requeued, skipped, total) =
893            pool_import_queued_canisters_inner(vec![pid], false).unwrap();
894        assert_eq!(added, 0);
895        assert_eq!(requeued, 0);
896        assert_eq!(skipped, 1);
897        assert_eq!(total, 1);
898        assert!(CanisterPoolStorageOps::get(pid).is_none());
899    }
900
901    #[test]
902    fn import_queued_local_skips_non_importable() {
903        reset_state();
904        set_test_importable_override(Some(false));
905
906        let pid = p(9);
907        let (added, requeued, skipped, total) =
908            futures::executor::block_on(pool_import_queued_canisters_local(vec![pid])).unwrap();
909
910        assert_eq!(added, 0);
911        assert_eq!(requeued, 0);
912        assert_eq!(skipped, 1);
913        assert_eq!(total, 1);
914        assert!(CanisterPoolStorageOps::get(pid).is_none());
915
916        set_test_importable_override(None);
917    }
918
919    #[test]
920    fn register_or_update_preserves_metadata() {
921        reset_state();
922
923        let pid = p(6);
924        let role = CanisterRole::new("alpha");
925        let parent = p(9);
926        let hash = vec![1, 2, 3];
927
928        CanisterPoolStorageOps::register(
929            pid,
930            Cycles::new(7),
931            CanisterPoolStatus::Failed {
932                reason: "oops".to_string(),
933            },
934            Some(role.clone()),
935            Some(parent),
936            Some(hash.clone()),
937        );
938
939        mark_pending_reset(pid);
940
941        let entry = CanisterPoolStorageOps::get(pid).unwrap();
942        assert!(entry.status.is_pending_reset());
943        assert_eq!(entry.cycles, Cycles::default());
944        assert_eq!(entry.role, Some(role));
945        assert_eq!(entry.parent, Some(parent));
946        assert_eq!(entry.module_hash, Some(hash));
947    }
948
949    #[test]
950    fn requeue_failed_scans_pool_and_schedules() {
951        reset_state();
952
953        let failed_pid = p(7);
954        let ready_pid = p(8);
955
956        CanisterPoolStorageOps::register(
957            failed_pid,
958            Cycles::new(11),
959            CanisterPoolStatus::Failed {
960                reason: "bad".to_string(),
961            },
962            None,
963            None,
964            None,
965        );
966        CanisterPoolStorageOps::register(
967            ready_pid,
968            Cycles::new(22),
969            CanisterPoolStatus::Ready,
970            None,
971            None,
972            None,
973        );
974
975        let (requeued, skipped, total) = pool_requeue_failed_inner(None, false).unwrap();
976        assert_eq!(requeued, 1);
977        assert_eq!(skipped, 1);
978        assert_eq!(total, 2);
979        assert!(take_reset_scheduled());
980
981        let failed_entry = CanisterPoolStorageOps::get(failed_pid).unwrap();
982        let ready_entry = CanisterPoolStorageOps::get(ready_pid).unwrap();
983        assert!(failed_entry.status.is_pending_reset());
984        assert_eq!(failed_entry.cycles, Cycles::default());
985        assert!(ready_entry.status.is_ready());
986        assert_eq!(ready_entry.cycles, Cycles::new(22));
987    }
988}