canic_core/ops/
pool.rs

1//! Pool lifecycle helpers.
2//!
3//! The root canister maintains a pool of empty or decommissioned canisters
4//! that can be quickly reassigned when scaling.
5//!
6//! INVARIANTS:
7//! - Pool canisters are NOT part of topology
8//! - Pool canisters have NO parent
9//! - Root is the sole controller
10//! - Importing a canister is destructive (code + controllers wiped)
11//! - Registry metadata is informational only while in pool
12//! - Ready entries have no code installed (reset_into_pool uninstalls before Ready)
13//
14// LOCAL INVARIANT:
15// On local replicas, only canisters that are routable in the current replica
16// may enter or remain in the pool. IC/mainnet IDs are skipped on local.
17
18pub use crate::ops::storage::pool::{CanisterPoolEntry, CanisterPoolStatus, CanisterPoolView};
19
20use crate::{
21    Error, ThisError,
22    cdk::{
23        api::canister_self,
24        futures::spawn,
25        mgmt::{CanisterSettings, UpdateSettingsArgs},
26        types::Principal,
27    },
28    config::Config,
29    log::Topic,
30    ops::{
31        OPS_POOL_CHECK_INTERVAL, OPS_POOL_INIT_DELAY, OpsError,
32        config::ConfigOps,
33        ic::{
34            Network, build_network, canister_status, get_cycles,
35            mgmt::{create_canister, uninstall_code},
36            timer::{TimerId, TimerOps},
37            update_settings,
38        },
39        prelude::*,
40        storage::{pool::CanisterPoolStorageOps, topology::SubnetCanisterRegistryOps},
41    },
42    types::{Cycles, TC},
43};
44use candid::CandidType;
45use serde::Deserialize;
46use std::{cell::RefCell, time::Duration};
47
48/// Internal reset worker and scheduling logic.
49/// Isolated to keep pool lifecycle logic linear and readable.
50mod reset_scheduler {
51    use super::*;
52
53    thread_local! {
54        static RESET_IN_PROGRESS: RefCell<bool> = const { RefCell::new(false) };
55        static RESET_RESCHEDULE: RefCell<bool> = const { RefCell::new(false) };
56        static RESET_TIMER: RefCell<Option<TimerId>> = const { RefCell::new(None) };
57    }
58
59    pub fn schedule() {
60        let _ = TimerOps::set_guarded(&RESET_TIMER, Duration::ZERO, "pool:pending", async {
61            RESET_TIMER.with_borrow_mut(|slot| *slot = None);
62            let _ = run_worker(super::POOL_RESET_BATCH_SIZE).await;
63        });
64    }
65
66    fn maybe_reschedule() {
67        let reschedule = RESET_RESCHEDULE.with_borrow_mut(|f| {
68            let v = *f;
69            *f = false;
70            v
71        });
72
73        if reschedule || has_pending_reset() {
74            schedule();
75        }
76    }
77
78    async fn run_worker(limit: usize) -> Result<(), Error> {
79        if limit == 0 {
80            return Ok(());
81        }
82
83        let should_run = RESET_IN_PROGRESS.with_borrow_mut(|flag| {
84            if *flag {
85                RESET_RESCHEDULE.with_borrow_mut(|r| *r = true);
86                false
87            } else {
88                *flag = true;
89                true
90            }
91        });
92
93        if !should_run {
94            return Ok(());
95        }
96
97        let result = run_batch(limit).await;
98
99        RESET_IN_PROGRESS.with_borrow_mut(|f| *f = false);
100        maybe_reschedule();
101
102        result
103    }
104
105    async fn run_batch(limit: usize) -> Result<(), Error> {
106        let mut pending: Vec<_> = CanisterPoolStorageOps::export()
107            .into_iter()
108            .filter(|(_, e)| e.status.is_pending_reset())
109            .collect();
110
111        if pending.is_empty() {
112            return Ok(());
113        }
114
115        pending.sort_by_key(|(_, e)| e.created_at);
116
117        for (pid, mut entry) in pending.into_iter().take(limit) {
118            if !super::can_enter_pool(pid).await {
119                let _ = CanisterPoolStorageOps::take(&pid);
120                continue;
121            }
122
123            match super::reset_into_pool(pid).await {
124                Ok(cycles) => {
125                    entry.cycles = cycles;
126                    entry.status = CanisterPoolStatus::Ready;
127                }
128                Err(err) => {
129                    entry.status = CanisterPoolStatus::Failed {
130                        reason: err.to_string(),
131                    };
132                    log!(
133                        Topic::CanisterPool,
134                        Warn,
135                        "pool reset failed for {pid}: {err}"
136                    );
137                }
138            }
139
140            if !CanisterPoolStorageOps::update(pid, entry) {
141                log!(
142                    Topic::CanisterPool,
143                    Warn,
144                    "pool reset update missing for {pid}"
145                );
146            }
147        }
148
149        Ok(())
150    }
151
152    fn has_pending_reset() -> bool {
153        CanisterPoolStorageOps::export()
154            .into_iter()
155            .any(|(_, e)| e.status.is_pending_reset())
156    }
157
158    // ---------- test hook ----------
159    #[cfg(test)]
160    thread_local! {
161        static RESET_SCHEDULED: RefCell<bool> = const { RefCell::new(false) };
162    }
163
164    #[cfg(test)]
165    pub fn mark_scheduled_for_test() {
166        RESET_SCHEDULED.with_borrow_mut(|f| *f = true);
167    }
168
169    #[cfg(test)]
170    pub fn take_scheduled_for_test() -> bool {
171        RESET_SCHEDULED.with_borrow_mut(|flag| {
172            let value = *flag;
173            *flag = false;
174            value
175        })
176    }
177}
178
179#[cfg(test)]
180thread_local! {
181    static TEST_IMPORTABLE_OVERRIDE: RefCell<Option<bool>> = const { RefCell::new(None) };
182}
183
184//
185// TIMER STATE
186//
187
188thread_local! {
189    static TIMER: RefCell<Option<TimerId>> = const { RefCell::new(None) };
190}
191
192/// Default cycles allocated to freshly created pool canisters.
193const POOL_CANISTER_CYCLES: u128 = 5 * TC;
194
195/// Default batch size for resetting pending pool entries.
196const POOL_RESET_BATCH_SIZE: usize = 10;
197
198///
199/// PoolOpsError
200///
201
202#[derive(Debug, ThisError)]
203pub enum PoolOpsError {
204    #[error("pool entry missing for {pid}")]
205    PoolEntryMissing { pid: Principal },
206
207    #[error("pool import blocked for {pid}: canister is still registered in subnet registry")]
208    ImportBlockedRegistered { pid: Principal },
209
210    #[error("missing module hash for pool entry {pid}")]
211    MissingModuleHash { pid: Principal },
212
213    #[error("missing type for pool entry {pid}")]
214    MissingType { pid: Principal },
215
216    #[error("pool entry {pid} is not ready")]
217    PoolEntryNotReady { pid: Principal },
218}
219
220impl From<PoolOpsError> for Error {
221    fn from(err: PoolOpsError) -> Self {
222        OpsError::from(err).into()
223    }
224}
225
226///
227/// PoolAdminCommand
228///
229
230#[derive(CandidType, Clone, Debug, Deserialize, Eq, PartialEq)]
231pub enum PoolAdminCommand {
232    CreateEmpty,
233    Recycle { pid: Principal },
234    ImportImmediate { pid: Principal },
235    ImportQueued { pids: Vec<Principal> },
236    RequeueFailed { pids: Option<Vec<Principal>> },
237}
238
239///
240/// PoolStatusCounts
241/// Summary of pool entries by status.
242///
243
244#[derive(CandidType, Clone, Copy, Debug, Default, Deserialize, Eq, PartialEq)]
245pub struct PoolStatusCounts {
246    pub ready: u64,
247    pub pending_reset: u64,
248    pub failed: u64,
249    pub total: u64,
250}
251
252///
253/// PoolImportSummary
254/// Diagnostics for queued imports.
255///
256
257#[derive(CandidType, Clone, Debug, Deserialize, Eq, PartialEq)]
258pub struct PoolImportSummary {
259    pub status_counts: PoolStatusCounts,
260    pub skipped_in_registry: u64,
261    pub skipped_already_ready: u64,
262    pub skipped_already_pending_reset: u64,
263    pub skipped_already_failed: u64,
264    pub skipped_non_importable: u64,
265}
266
267///
268/// PoolAdminResponse
269///
270
271#[derive(CandidType, Clone, Debug, Deserialize, Eq, PartialEq)]
272pub enum PoolAdminResponse {
273    Created {
274        pid: Principal,
275    },
276    Recycled,
277    Imported,
278    QueuedImported {
279        added: u64,
280        requeued: u64,
281        skipped: u64,
282        total: u64,
283        summary: PoolImportSummary,
284    },
285    FailedRequeued {
286        requeued: u64,
287        skipped: u64,
288        total: u64,
289    },
290}
291
292fn pool_controllers() -> Vec<Principal> {
293    let mut controllers = Config::get().controllers.clone();
294
295    let root = canister_self();
296    if !controllers.contains(&root) {
297        controllers.push(root);
298    }
299
300    controllers
301}
302
303fn is_local_build() -> bool {
304    build_network() == Some(Network::Local)
305}
306
307///
308/// Returns true iff the canister is routable in the current local replica.
309///
310/// Local-only precondition check.
311/// Must be cheap, non-destructive, and side-effect free.
312///
313async fn is_importable_on_local(pid: Principal) -> bool {
314    check_importable_on_local(pid).await.is_ok()
315}
316
317async fn check_importable_on_local(pid: Principal) -> Result<(), String> {
318    #[cfg(test)]
319    if let Some(override_value) = TEST_IMPORTABLE_OVERRIDE.with(|slot| *slot.borrow()) {
320        if override_value {
321            return Ok(());
322        }
323        return Err("test override: non-importable".to_string());
324    }
325
326    if !is_local_build() {
327        return Ok(());
328    }
329
330    match canister_status(pid).await {
331        Ok(_) => Ok(()),
332        Err(err) => {
333            log!(
334                Topic::CanisterPool,
335                Warn,
336                "pool import skipped for {pid} (local non-importable): {err}"
337            );
338            Err(err.to_string())
339        }
340    }
341}
342
343async fn can_enter_pool(pid: Principal) -> bool {
344    if !is_local_build() {
345        return true;
346    }
347
348    is_importable_on_local(pid).await
349}
350
351fn pool_status_counts() -> PoolStatusCounts {
352    let mut counts = PoolStatusCounts::default();
353
354    for (_, entry) in CanisterPoolStorageOps::export() {
355        match entry.status {
356            CanisterPoolStatus::Ready => counts.ready += 1,
357            CanisterPoolStatus::PendingReset => counts.pending_reset += 1,
358            CanisterPoolStatus::Failed { .. } => counts.failed += 1,
359        }
360    }
361
362    counts.total = counts.ready + counts.pending_reset + counts.failed;
363    counts
364}
365
366async fn reset_into_pool(pid: Principal) -> Result<Cycles, Error> {
367    update_settings(&UpdateSettingsArgs {
368        canister_id: pid,
369        settings: CanisterSettings {
370            controllers: Some(pool_controllers()),
371            ..Default::default()
372        },
373    })
374    .await?;
375
376    uninstall_code(pid).await?;
377
378    get_cycles(pid).await
379}
380
381fn register_or_update_preserving_metadata(
382    pid: Principal,
383    cycles: Cycles,
384    status: CanisterPoolStatus,
385    role: Option<CanisterRole>,
386    parent: Option<Principal>,
387    module_hash: Option<Vec<u8>>,
388) {
389    if let Some(mut entry) = CanisterPoolStorageOps::get(pid) {
390        entry.cycles = cycles;
391        entry.status = status;
392        entry.role = role.or(entry.role);
393        entry.parent = parent.or(entry.parent);
394        entry.module_hash = module_hash.or(entry.module_hash);
395        let _ = CanisterPoolStorageOps::update(pid, entry);
396    } else {
397        CanisterPoolStorageOps::register(pid, cycles, status, role, parent, module_hash);
398    }
399}
400
401fn mark_pending_reset(pid: Principal) {
402    register_or_update_preserving_metadata(
403        pid,
404        Cycles::default(),
405        CanisterPoolStatus::PendingReset,
406        None,
407        None,
408        None,
409    );
410}
411
412fn mark_ready(pid: Principal, cycles: Cycles) {
413    register_or_update_preserving_metadata(
414        pid,
415        cycles,
416        CanisterPoolStatus::Ready,
417        None,
418        None,
419        None,
420    );
421}
422
423fn mark_failed(pid: Principal, err: &Error) {
424    register_or_update_preserving_metadata(
425        pid,
426        Cycles::default(),
427        CanisterPoolStatus::Failed {
428            reason: err.to_string(),
429        },
430        None,
431        None,
432        None,
433    );
434}
435///
436/// PoolOps
437///
438
439pub struct PoolOps;
440
441impl PoolOps {
442    // ---------------------------------------------------------------------
443    // Lifecycle
444    // ---------------------------------------------------------------------
445
446    pub fn start() {
447        let _ = TimerOps::set_guarded_interval(
448            &TIMER,
449            OPS_POOL_INIT_DELAY,
450            "pool:init",
451            || async {
452                let _ = Self::check();
453            },
454            OPS_POOL_CHECK_INTERVAL,
455            "pool:interval",
456            || async {
457                let _ = Self::check();
458            },
459        );
460    }
461
462    pub fn stop() {
463        let _ = TimerOps::clear_guarded(&TIMER);
464    }
465
466    // ---------------------------------------------------------------------
467    // Public API
468    // ---------------------------------------------------------------------
469
470    #[must_use]
471    pub fn check() -> u64 {
472        reset_scheduler::schedule();
473
474        let subnet_cfg = ConfigOps::current_subnet();
475        let min_size: u64 = subnet_cfg.pool.minimum_size.into();
476        let ready_size = Self::ready_len();
477
478        if ready_size >= min_size {
479            return 0;
480        }
481
482        let missing = (min_size - ready_size).min(10);
483        log!(
484            Topic::CanisterPool,
485            Ok,
486            "pool low: {ready_size}/{min_size}, creating {missing}"
487        );
488
489        spawn(async move {
490            for i in 0..missing {
491                match pool_create_canister().await {
492                    Ok(_) => log!(
493                        Topic::CanisterPool,
494                        Ok,
495                        "created pool canister {}/{}",
496                        i + 1,
497                        missing
498                    ),
499                    Err(e) => log!(Topic::CanisterPool, Warn, "pool creation failed: {e:?}"),
500                }
501            }
502        });
503
504        missing
505    }
506
507    #[must_use]
508    pub fn pop_ready() -> Option<(Principal, CanisterPoolEntry)> {
509        CanisterPoolStorageOps::pop_ready()
510    }
511
512    #[must_use]
513    pub fn contains(pid: &Principal) -> bool {
514        CanisterPoolStorageOps::contains(pid)
515    }
516
517    #[must_use]
518    pub fn export() -> CanisterPoolView {
519        CanisterPoolStorageOps::export()
520    }
521
522    pub async fn admin(cmd: PoolAdminCommand) -> Result<PoolAdminResponse, Error> {
523        match cmd {
524            PoolAdminCommand::CreateEmpty => {
525                let pid = pool_create_canister().await?;
526                Ok(PoolAdminResponse::Created { pid })
527            }
528            PoolAdminCommand::Recycle { pid } => {
529                pool_recycle_canister(pid).await?;
530                Ok(PoolAdminResponse::Recycled)
531            }
532            PoolAdminCommand::ImportImmediate { pid } => {
533                pool_import_canister(pid).await?;
534                Ok(PoolAdminResponse::Imported)
535            }
536            PoolAdminCommand::ImportQueued { pids } => {
537                let (a, r, s, t, summary) = if is_local_build() {
538                    pool_import_queued_canisters_local(pids).await?
539                } else {
540                    pool_import_queued_canisters(pids)?
541                };
542                Ok(PoolAdminResponse::QueuedImported {
543                    added: a,
544                    requeued: r,
545                    skipped: s,
546                    total: t,
547                    summary,
548                })
549            }
550            PoolAdminCommand::RequeueFailed { pids } => {
551                let (requeued, skipped, total) = pool_requeue_failed(pids)?;
552                Ok(PoolAdminResponse::FailedRequeued {
553                    requeued,
554                    skipped,
555                    total,
556                })
557            }
558        }
559    }
560
561    // ---------------------------------------------------------------------
562    // Scheduler + worker
563    // ---------------------------------------------------------------------
564
565    fn ready_len() -> u64 {
566        CanisterPoolStorageOps::export()
567            .into_iter()
568            .filter(|(_, e)| e.status.is_ready())
569            .count() as u64
570    }
571}
572
573//
574// CREATE / IMPORT / RECYCLE / EXPORT
575//
576
577pub async fn pool_create_canister() -> Result<Principal, Error> {
578    OpsError::require_root()?;
579
580    let cycles = Cycles::new(POOL_CANISTER_CYCLES);
581    let pid = create_canister(pool_controllers(), cycles.clone()).await?;
582
583    CanisterPoolStorageOps::register(pid, cycles, CanisterPoolStatus::Ready, None, None, None);
584    Ok(pid)
585}
586
587pub async fn pool_import_canister(pid: Principal) -> Result<(), Error> {
588    OpsError::require_root()?;
589
590    if SubnetCanisterRegistryOps::get(pid).is_some() {
591        return Err(PoolOpsError::ImportBlockedRegistered { pid }.into());
592    }
593
594    if !can_enter_pool(pid).await {
595        let _ = CanisterPoolStorageOps::take(&pid);
596        return Ok(());
597    }
598
599    mark_pending_reset(pid);
600
601    match reset_into_pool(pid).await {
602        Ok(cycles) => {
603            let _ = SubnetCanisterRegistryOps::remove(&pid);
604            mark_ready(pid, cycles);
605        }
606        Err(err) => {
607            log!(
608                Topic::CanisterPool,
609                Warn,
610                "pool import failed for {pid}: {err}"
611            );
612            mark_failed(pid, &err);
613            return Err(err);
614        }
615    }
616
617    Ok(())
618}
619
620async fn pool_import_queued_canisters_local(
621    pids: Vec<Principal>,
622) -> Result<(u64, u64, u64, u64, PoolImportSummary), Error> {
623    let total = pids.len() as u64;
624    let mut added = 0;
625    let mut requeued = 0;
626    let mut skipped = 0;
627    let mut summary = PoolImportSummary {
628        status_counts: PoolStatusCounts::default(),
629        skipped_in_registry: 0,
630        skipped_already_ready: 0,
631        skipped_already_pending_reset: 0,
632        skipped_already_failed: 0,
633        skipped_non_importable: 0,
634    };
635
636    for pid in pids {
637        if SubnetCanisterRegistryOps::get(pid).is_some() {
638            skipped += 1;
639            summary.skipped_in_registry += 1;
640            continue;
641        }
642
643        if let Some(entry) = CanisterPoolStorageOps::get(pid) {
644            if entry.status.is_failed() {
645                if Ok(()) == check_importable_on_local(pid).await {
646                    mark_pending_reset(pid);
647                    requeued += 1;
648                } else {
649                    let _ = CanisterPoolStorageOps::take(&pid);
650                    skipped += 1;
651                    summary.skipped_non_importable += 1;
652                }
653            } else {
654                skipped += 1;
655                match entry.status {
656                    CanisterPoolStatus::Ready => summary.skipped_already_ready += 1,
657                    CanisterPoolStatus::PendingReset => summary.skipped_already_pending_reset += 1,
658                    CanisterPoolStatus::Failed { .. } => summary.skipped_already_failed += 1,
659                }
660            }
661            continue;
662        }
663
664        if Ok(()) == check_importable_on_local(pid).await {
665            mark_pending_reset(pid);
666            added += 1;
667        } else {
668            skipped += 1;
669            summary.skipped_non_importable += 1;
670        }
671    }
672
673    if added > 0 || requeued > 0 {
674        maybe_schedule_reset_worker();
675    }
676
677    summary.status_counts = pool_status_counts();
678
679    Ok((added, requeued, skipped, total, summary))
680}
681
682fn pool_import_queued_canisters(
683    pids: Vec<Principal>,
684) -> Result<(u64, u64, u64, u64, PoolImportSummary), Error> {
685    pool_import_queued_canisters_inner(pids, true)
686}
687
688fn pool_import_queued_canisters_inner(
689    pids: Vec<Principal>,
690    enforce_root: bool,
691) -> Result<(u64, u64, u64, u64, PoolImportSummary), Error> {
692    if enforce_root {
693        OpsError::require_root()?;
694    }
695
696    let mut added = 0;
697    let mut requeued = 0;
698    let mut skipped = 0;
699    let mut summary = PoolImportSummary {
700        status_counts: PoolStatusCounts::default(),
701        skipped_in_registry: 0,
702        skipped_already_ready: 0,
703        skipped_already_pending_reset: 0,
704        skipped_already_failed: 0,
705        skipped_non_importable: 0,
706    };
707
708    for pid in &pids {
709        if SubnetCanisterRegistryOps::get(*pid).is_some() {
710            skipped += 1;
711            summary.skipped_in_registry += 1;
712            continue;
713        }
714
715        if let Some(entry) = CanisterPoolStorageOps::get(*pid) {
716            if entry.status.is_failed() {
717                mark_pending_reset(*pid);
718                requeued += 1;
719            } else {
720                skipped += 1;
721                match entry.status {
722                    CanisterPoolStatus::Ready => summary.skipped_already_ready += 1,
723                    CanisterPoolStatus::PendingReset => summary.skipped_already_pending_reset += 1,
724                    CanisterPoolStatus::Failed { .. } => summary.skipped_already_failed += 1,
725                }
726            }
727            continue;
728        }
729
730        mark_pending_reset(*pid);
731        added += 1;
732    }
733
734    maybe_schedule_reset_worker();
735
736    summary.status_counts = pool_status_counts();
737
738    Ok((added, requeued, skipped, pids.len() as u64, summary))
739}
740
741#[cfg(not(test))]
742fn maybe_schedule_reset_worker() {
743    reset_scheduler::schedule();
744}
745
746#[cfg(test)]
747fn maybe_schedule_reset_worker() {
748    reset_scheduler::mark_scheduled_for_test();
749}
750
751#[cfg(test)]
752fn take_reset_scheduled() -> bool {
753    reset_scheduler::take_scheduled_for_test()
754}
755
756#[cfg(test)]
757fn set_test_importable_override(value: Option<bool>) {
758    TEST_IMPORTABLE_OVERRIDE.with_borrow_mut(|slot| *slot = value);
759}
760
761fn pool_requeue_failed(pids: Option<Vec<Principal>>) -> Result<(u64, u64, u64), Error> {
762    pool_requeue_failed_inner(pids, true)
763}
764
765fn pool_requeue_failed_inner(
766    pids: Option<Vec<Principal>>,
767    enforce_root: bool,
768) -> Result<(u64, u64, u64), Error> {
769    if enforce_root {
770        OpsError::require_root()?;
771    }
772
773    let mut requeued = 0;
774    let mut skipped = 0;
775    let total;
776
777    if let Some(pids) = pids {
778        total = pids.len() as u64;
779        for pid in pids {
780            if let Some(entry) = CanisterPoolStorageOps::get(pid) {
781                if entry.status.is_failed() {
782                    mark_pending_reset(pid);
783                    requeued += 1;
784                } else {
785                    skipped += 1;
786                }
787            } else {
788                skipped += 1;
789            }
790        }
791    } else {
792        let entries = CanisterPoolStorageOps::export();
793        total = entries.len() as u64;
794        for (pid, entry) in entries {
795            if entry.status.is_failed() {
796                mark_pending_reset(pid);
797                requeued += 1;
798            } else {
799                skipped += 1;
800            }
801        }
802    }
803
804    if requeued > 0 {
805        maybe_schedule_reset_worker();
806    }
807
808    Ok((requeued, skipped, total))
809}
810
811pub async fn pool_recycle_canister(pid: Principal) -> Result<(), Error> {
812    OpsError::require_root()?;
813
814    let entry =
815        SubnetCanisterRegistryOps::get(pid).ok_or(PoolOpsError::PoolEntryMissing { pid })?;
816
817    let role = Some(entry.role.clone());
818    let hash = entry.module_hash.clone();
819
820    let cycles = reset_into_pool(pid).await?;
821    let _ = SubnetCanisterRegistryOps::remove(&pid);
822    CanisterPoolStorageOps::register(pid, cycles, CanisterPoolStatus::Ready, role, None, hash);
823
824    Ok(())
825}
826
827pub async fn pool_export_canister(pid: Principal) -> Result<(CanisterRole, Vec<u8>), Error> {
828    OpsError::require_root()?;
829
830    let entry = CanisterPoolStorageOps::take(&pid).ok_or(PoolOpsError::PoolEntryMissing { pid })?;
831
832    if !entry.status.is_ready() {
833        return Err(PoolOpsError::PoolEntryNotReady { pid }.into());
834    }
835
836    let role = entry.role.ok_or(PoolOpsError::MissingType { pid })?;
837    let hash = entry
838        .module_hash
839        .ok_or(PoolOpsError::MissingModuleHash { pid })?;
840
841    Ok((role, hash))
842}
843
844//
845// ORCHESTRATION HOOK
846//
847
848pub async fn recycle_via_orchestrator(pid: Principal) -> Result<(), Error> {
849    use crate::ops::orchestration::orchestrator::{CanisterLifecycleOrchestrator, LifecycleEvent};
850
851    CanisterLifecycleOrchestrator::apply(LifecycleEvent::RecycleToPool { pid })
852        .await
853        .map(|_| ())
854}
855
856//
857// TESTS
858//
859
860#[cfg(test)]
861mod tests {
862    use super::*;
863    use crate::{
864        ids::CanisterRole,
865        model::memory::{CanisterEntry, pool::CanisterPool, topology::SubnetCanisterRegistry},
866    };
867    use candid::Principal;
868
869    fn p(id: u8) -> Principal {
870        Principal::from_slice(&[id; 29])
871    }
872
873    fn reset_state() {
874        CanisterPool::clear();
875        SubnetCanisterRegistry::clear_for_tests();
876        let _ = take_reset_scheduled();
877    }
878
879    #[test]
880    fn import_queued_registers_pending_entries() {
881        reset_state();
882
883        let p1 = p(1);
884        let p2 = p(2);
885
886        let (added, requeued, skipped, total, _) =
887            pool_import_queued_canisters_inner(vec![p1, p2], false).unwrap();
888        assert_eq!(added, 2);
889        assert_eq!(requeued, 0);
890        assert_eq!(skipped, 0);
891        assert_eq!(total, 2);
892
893        let e1 = CanisterPoolStorageOps::get(p1).unwrap();
894        let e2 = CanisterPoolStorageOps::get(p2).unwrap();
895        assert!(e1.status.is_pending_reset());
896        assert!(e2.status.is_pending_reset());
897        assert_eq!(e1.cycles, Cycles::default());
898        assert_eq!(e2.cycles, Cycles::default());
899    }
900
901    #[test]
902    fn import_queued_requeues_failed_entries() {
903        reset_state();
904
905        let p1 = p(3);
906        CanisterPoolStorageOps::register(
907            p1,
908            Cycles::new(10),
909            CanisterPoolStatus::Failed {
910                reason: "nope".to_string(),
911            },
912            None,
913            None,
914            None,
915        );
916
917        let (added, requeued, skipped, total, _) =
918            pool_import_queued_canisters_inner(vec![p1], false).unwrap();
919        assert_eq!(added, 0);
920        assert_eq!(requeued, 1);
921        assert_eq!(skipped, 0);
922        assert_eq!(total, 1);
923        assert!(take_reset_scheduled());
924
925        let entry = CanisterPoolStorageOps::get(p1).unwrap();
926        assert!(entry.status.is_pending_reset());
927        assert_eq!(entry.cycles, Cycles::default());
928    }
929
930    #[test]
931    fn import_queued_skips_ready_entries() {
932        reset_state();
933
934        let p1 = p(4);
935        CanisterPoolStorageOps::register(
936            p1,
937            Cycles::new(42),
938            CanisterPoolStatus::Ready,
939            None,
940            None,
941            None,
942        );
943
944        let (added, requeued, skipped, total, _) =
945            pool_import_queued_canisters_inner(vec![p1], false).unwrap();
946        assert_eq!(added, 0);
947        assert_eq!(requeued, 0);
948        assert_eq!(skipped, 1);
949        assert_eq!(total, 1);
950
951        let entry = CanisterPoolStorageOps::get(p1).unwrap();
952        assert!(entry.status.is_ready());
953        assert_eq!(entry.cycles, Cycles::new(42));
954    }
955
956    #[test]
957    fn import_queued_skips_registry_canisters() {
958        reset_state();
959
960        let pid = p(5);
961        SubnetCanisterRegistry::insert_for_tests(CanisterEntry {
962            pid,
963            role: CanisterRole::new("alpha"),
964            parent_pid: None,
965            module_hash: None,
966            created_at: 0,
967        });
968
969        let (added, requeued, skipped, total, _) =
970            pool_import_queued_canisters_inner(vec![pid], false).unwrap();
971        assert_eq!(added, 0);
972        assert_eq!(requeued, 0);
973        assert_eq!(skipped, 1);
974        assert_eq!(total, 1);
975        assert!(CanisterPoolStorageOps::get(pid).is_none());
976    }
977
978    #[test]
979    fn import_queued_local_skips_non_importable() {
980        reset_state();
981        set_test_importable_override(Some(false));
982
983        let pid = p(9);
984        let (added, requeued, skipped, total, _) =
985            futures::executor::block_on(pool_import_queued_canisters_local(vec![pid])).unwrap();
986
987        assert_eq!(added, 0);
988        assert_eq!(requeued, 0);
989        assert_eq!(skipped, 1);
990        assert_eq!(total, 1);
991        assert!(CanisterPoolStorageOps::get(pid).is_none());
992
993        set_test_importable_override(None);
994    }
995
996    #[test]
997    fn register_or_update_preserves_metadata() {
998        reset_state();
999
1000        let pid = p(6);
1001        let role = CanisterRole::new("alpha");
1002        let parent = p(9);
1003        let hash = vec![1, 2, 3];
1004
1005        CanisterPoolStorageOps::register(
1006            pid,
1007            Cycles::new(7),
1008            CanisterPoolStatus::Failed {
1009                reason: "oops".to_string(),
1010            },
1011            Some(role.clone()),
1012            Some(parent),
1013            Some(hash.clone()),
1014        );
1015
1016        mark_pending_reset(pid);
1017
1018        let entry = CanisterPoolStorageOps::get(pid).unwrap();
1019        assert!(entry.status.is_pending_reset());
1020        assert_eq!(entry.cycles, Cycles::default());
1021        assert_eq!(entry.role, Some(role));
1022        assert_eq!(entry.parent, Some(parent));
1023        assert_eq!(entry.module_hash, Some(hash));
1024    }
1025
1026    #[test]
1027    fn requeue_failed_scans_pool_and_schedules() {
1028        reset_state();
1029
1030        let failed_pid = p(7);
1031        let ready_pid = p(8);
1032
1033        CanisterPoolStorageOps::register(
1034            failed_pid,
1035            Cycles::new(11),
1036            CanisterPoolStatus::Failed {
1037                reason: "bad".to_string(),
1038            },
1039            None,
1040            None,
1041            None,
1042        );
1043        CanisterPoolStorageOps::register(
1044            ready_pid,
1045            Cycles::new(22),
1046            CanisterPoolStatus::Ready,
1047            None,
1048            None,
1049            None,
1050        );
1051
1052        let (requeued, skipped, total) = pool_requeue_failed_inner(None, false).unwrap();
1053        assert_eq!(requeued, 1);
1054        assert_eq!(skipped, 1);
1055        assert_eq!(total, 2);
1056        assert!(take_reset_scheduled());
1057
1058        let failed_entry = CanisterPoolStorageOps::get(failed_pid).unwrap();
1059        let ready_entry = CanisterPoolStorageOps::get(ready_pid).unwrap();
1060        assert!(failed_entry.status.is_pending_reset());
1061        assert_eq!(failed_entry.cycles, Cycles::default());
1062        assert!(ready_entry.status.is_ready());
1063        assert_eq!(ready_entry.cycles, Cycles::new(22));
1064    }
1065}