canic_core/ops/
pool.rs

1//! Pool lifecycle helpers.
2//!
3//! The root canister maintains a pool of empty or decommissioned canisters
4//! that can be quickly reassigned when scaling.
5//!
6//! INVARIANTS:
7//! - Pool canisters are NOT part of topology
8//! - Pool canisters have NO parent
9//! - Root is the sole controller
10//! - Importing a canister is destructive (code + controllers wiped)
11//! - Registry metadata is informational only while in pool
12//! - Ready entries have no code installed (reset_into_pool uninstalls before Ready)
13//
14// LOCAL INVARIANT:
15// On local replicas, only canisters that are routable in the current replica
16// may enter or remain in the pool. Mainnet canister IDs are never importable.
17
18pub use crate::ops::storage::pool::{CanisterPoolEntry, CanisterPoolStatus, CanisterPoolView};
19
20use crate::{
21    Error, ThisError,
22    cdk::{
23        api::canister_self,
24        futures::spawn,
25        mgmt::{CanisterSettings, UpdateSettingsArgs},
26        types::Principal,
27    },
28    config::Config,
29    log::Topic,
30    ops::{
31        OPS_POOL_CHECK_INTERVAL, OPS_POOL_INIT_DELAY, OpsError,
32        config::ConfigOps,
33        ic::{
34            Network, build_network, canister_status, get_cycles,
35            mgmt::{create_canister, uninstall_code},
36            timer::{TimerId, TimerOps},
37            update_settings,
38        },
39        prelude::*,
40        storage::{pool::CanisterPoolStorageOps, topology::SubnetCanisterRegistryOps},
41    },
42    types::{Cycles, TC},
43};
44use candid::CandidType;
45use serde::Deserialize;
46use std::{cell::RefCell, time::Duration};
47
48#[cfg(test)]
49thread_local! {
50    static TEST_IMPORTABLE_OVERRIDE: RefCell<Option<bool>> = const { RefCell::new(None) };
51}
52
53//
54// TIMER STATE
55//
56
57thread_local! {
58    static TIMER: RefCell<Option<TimerId>> = const { RefCell::new(None) };
59    static RESET_IN_PROGRESS: RefCell<bool> = const { RefCell::new(false) };
60    static RESET_RESCHEDULE: RefCell<bool> = const { RefCell::new(false) };
61    static RESET_TIMER: RefCell<Option<TimerId>> = const { RefCell::new(None) };
62}
63
64/// Default cycles allocated to freshly created pool canisters.
65const POOL_CANISTER_CYCLES: u128 = 5 * TC;
66
67/// Default batch size for resetting pending pool entries.
68const POOL_RESET_BATCH_SIZE: usize = 10;
69
70///
71/// PoolOpsError
72///
73
74#[derive(Debug, ThisError)]
75pub enum PoolOpsError {
76    #[error("pool entry missing for {pid}")]
77    PoolEntryMissing { pid: Principal },
78
79    #[error("missing module hash for pool entry {pid}")]
80    MissingModuleHash { pid: Principal },
81
82    #[error("missing type for pool entry {pid}")]
83    MissingType { pid: Principal },
84
85    #[error("pool entry {pid} is not ready")]
86    PoolEntryNotReady { pid: Principal },
87}
88
89impl From<PoolOpsError> for Error {
90    fn from(err: PoolOpsError) -> Self {
91        OpsError::from(err).into()
92    }
93}
94
95///
96/// PoolAdminCommand
97///
98
99#[derive(CandidType, Clone, Debug, Deserialize, Eq, PartialEq)]
100pub enum PoolAdminCommand {
101    CreateEmpty,
102    Recycle { pid: Principal },
103    ImportImmediate { pid: Principal },
104    ImportQueued { pids: Vec<Principal> },
105    RequeueFailed { pids: Option<Vec<Principal>> },
106}
107
108///
109/// PoolAdminResponse
110///
111
112#[derive(CandidType, Clone, Debug, Deserialize, Eq, PartialEq)]
113pub enum PoolAdminResponse {
114    Created {
115        pid: Principal,
116    },
117    Recycled,
118    Imported,
119    QueuedImported {
120        added: u64,
121        requeued: u64,
122        skipped: u64,
123        total: u64,
124    },
125    FailedRequeued {
126        requeued: u64,
127        skipped: u64,
128        total: u64,
129    },
130}
131
132//
133// INTERNAL HELPERS
134//
135
136fn pool_controllers() -> Vec<Principal> {
137    let mut controllers = Config::get().controllers.clone();
138
139    let root = canister_self();
140    if !controllers.contains(&root) {
141        controllers.push(root);
142    }
143
144    controllers
145}
146
147fn is_local_build() -> bool {
148    build_network() == Some(Network::Local)
149}
150
151///
152/// Returns true iff the canister is routable in the current local replica.
153///
154/// Local-only precondition check.
155/// Must be cheap, non-destructive, and side-effect free.
156///
157async fn is_importable_on_local(pid: Principal) -> bool {
158    #[cfg(test)]
159    if let Some(override_value) = TEST_IMPORTABLE_OVERRIDE.with(|slot| *slot.borrow()) {
160        return override_value;
161    }
162
163    if !is_local_build() {
164        return true;
165    }
166
167    match canister_status(pid).await {
168        Ok(_) => true,
169        Err(err) => {
170            log!(
171                Topic::CanisterPool,
172                Warn,
173                "pool import skipped for {pid} (local non-importable): {err}"
174            );
175            false
176        }
177    }
178}
179
180async fn reset_into_pool(pid: Principal) -> Result<Cycles, Error> {
181    uninstall_code(pid).await?;
182
183    update_settings(&UpdateSettingsArgs {
184        canister_id: pid,
185        settings: CanisterSettings {
186            controllers: Some(pool_controllers()),
187            ..Default::default()
188        },
189    })
190    .await?;
191
192    get_cycles(pid).await
193}
194
195fn register_or_update_preserving_metadata(
196    pid: Principal,
197    cycles: Cycles,
198    status: CanisterPoolStatus,
199    role: Option<CanisterRole>,
200    parent: Option<Principal>,
201    module_hash: Option<Vec<u8>>,
202) {
203    if let Some(mut entry) = CanisterPoolStorageOps::get(pid) {
204        entry.cycles = cycles;
205        entry.status = status;
206        entry.role = role.or(entry.role);
207        entry.parent = parent.or(entry.parent);
208        entry.module_hash = module_hash.or(entry.module_hash);
209        let _ = CanisterPoolStorageOps::update(pid, entry);
210    } else {
211        CanisterPoolStorageOps::register(pid, cycles, status, role, parent, module_hash);
212    }
213}
214
215///
216/// PoolOps
217///
218
219pub struct PoolOps;
220
221impl PoolOps {
222    // ---------------------------------------------------------------------
223    // Lifecycle
224    // ---------------------------------------------------------------------
225
226    pub fn start() {
227        TIMER.with_borrow_mut(|slot| {
228            if slot.is_some() {
229                return;
230            }
231
232            let id = TimerOps::set(OPS_POOL_INIT_DELAY, "pool:init", async {
233                let _ = Self::check();
234
235                let interval =
236                    TimerOps::set_interval(OPS_POOL_CHECK_INTERVAL, "pool:interval", || async {
237                        let _ = Self::check();
238                    });
239
240                TIMER.with_borrow_mut(|slot| *slot = Some(interval));
241            });
242
243            *slot = Some(id);
244        });
245    }
246
247    pub fn stop() {
248        TIMER.with_borrow_mut(|slot| {
249            if let Some(id) = slot.take() {
250                TimerOps::clear(id);
251            }
252        });
253    }
254
255    // ---------------------------------------------------------------------
256    // Public API
257    // ---------------------------------------------------------------------
258
259    #[must_use]
260    pub fn check() -> u64 {
261        Self::schedule_reset_worker();
262
263        let subnet_cfg = ConfigOps::current_subnet();
264        let min_size: u64 = subnet_cfg.pool.minimum_size.into();
265        let ready_size = Self::ready_len();
266
267        if ready_size >= min_size {
268            return 0;
269        }
270
271        let missing = (min_size - ready_size).min(10);
272        log!(
273            Topic::CanisterPool,
274            Ok,
275            "pool low: {ready_size}/{min_size}, creating {missing}"
276        );
277
278        spawn(async move {
279            for i in 0..missing {
280                match pool_create_canister().await {
281                    Ok(_) => log!(
282                        Topic::CanisterPool,
283                        Ok,
284                        "created pool canister {}/{}",
285                        i + 1,
286                        missing
287                    ),
288                    Err(e) => log!(Topic::CanisterPool, Warn, "pool creation failed: {e:?}"),
289                }
290            }
291        });
292
293        missing
294    }
295
296    #[must_use]
297    pub fn pop_ready() -> Option<(Principal, CanisterPoolEntry)> {
298        CanisterPoolStorageOps::pop_ready()
299    }
300
301    #[must_use]
302    pub fn contains(pid: &Principal) -> bool {
303        CanisterPoolStorageOps::contains(pid)
304    }
305
306    #[must_use]
307    pub fn export() -> CanisterPoolView {
308        CanisterPoolStorageOps::export()
309    }
310
311    pub async fn admin(cmd: PoolAdminCommand) -> Result<PoolAdminResponse, Error> {
312        match cmd {
313            PoolAdminCommand::CreateEmpty => {
314                let pid = pool_create_canister().await?;
315                Ok(PoolAdminResponse::Created { pid })
316            }
317            PoolAdminCommand::Recycle { pid } => {
318                pool_recycle_canister(pid).await?;
319                Ok(PoolAdminResponse::Recycled)
320            }
321            PoolAdminCommand::ImportImmediate { pid } => {
322                pool_import_canister(pid).await?;
323                Ok(PoolAdminResponse::Imported)
324            }
325            PoolAdminCommand::ImportQueued { pids } => {
326                let (a, r, s, t) = if is_local_build() {
327                    pool_import_queued_canisters_local(pids).await?
328                } else {
329                    pool_import_queued_canisters(pids)?
330                };
331                Ok(PoolAdminResponse::QueuedImported {
332                    added: a,
333                    requeued: r,
334                    skipped: s,
335                    total: t,
336                })
337            }
338            PoolAdminCommand::RequeueFailed { pids } => {
339                let (requeued, skipped, total) = pool_requeue_failed(pids)?;
340                Ok(PoolAdminResponse::FailedRequeued {
341                    requeued,
342                    skipped,
343                    total,
344                })
345            }
346        }
347    }
348
349    // ---------------------------------------------------------------------
350    // Scheduler + worker
351    // ---------------------------------------------------------------------
352
353    fn ready_len() -> u64 {
354        CanisterPoolStorageOps::export()
355            .into_iter()
356            .filter(|(_, e)| e.status.is_ready())
357            .count() as u64
358    }
359
360    fn has_pending_reset() -> bool {
361        CanisterPoolStorageOps::export()
362            .into_iter()
363            .any(|(_, e)| e.status.is_pending_reset())
364    }
365
366    fn maybe_reschedule() {
367        let reschedule = RESET_RESCHEDULE.with_borrow_mut(|f| {
368            let v = *f;
369            *f = false;
370            v
371        });
372
373        if reschedule || Self::has_pending_reset() {
374            Self::schedule_reset_worker();
375        }
376    }
377
378    fn schedule_reset_worker() {
379        RESET_TIMER.with_borrow_mut(|slot| {
380            if slot.is_some() {
381                return;
382            }
383
384            let id = TimerOps::set(Duration::ZERO, "pool:pending", async {
385                RESET_TIMER.with_borrow_mut(|slot| *slot = None);
386                let _ = Self::run_reset_worker(POOL_RESET_BATCH_SIZE).await;
387            });
388
389            *slot = Some(id);
390        });
391    }
392
393    async fn run_reset_worker(limit: usize) -> Result<(), Error> {
394        if limit == 0 {
395            return Ok(());
396        }
397
398        let should_run = RESET_IN_PROGRESS.with_borrow_mut(|flag| {
399            if *flag {
400                RESET_RESCHEDULE.with_borrow_mut(|r| *r = true);
401                false
402            } else {
403                *flag = true;
404                true
405            }
406        });
407
408        if !should_run {
409            return Ok(());
410        }
411
412        let result = Self::run_reset_batch(limit).await;
413
414        RESET_IN_PROGRESS.with_borrow_mut(|f| *f = false);
415        Self::maybe_reschedule();
416
417        result
418    }
419
420    async fn run_reset_batch(limit: usize) -> Result<(), Error> {
421        let mut pending: Vec<_> = CanisterPoolStorageOps::export()
422            .into_iter()
423            .filter(|(_, e)| e.status.is_pending_reset())
424            .collect();
425
426        if pending.is_empty() {
427            return Ok(());
428        }
429
430        pending.sort_by_key(|(_, e)| e.created_at);
431
432        for (pid, mut entry) in pending.into_iter().take(limit) {
433            if is_local_build() && !is_importable_on_local(pid).await {
434                let _ = CanisterPoolStorageOps::take(&pid);
435                continue;
436            }
437
438            match reset_into_pool(pid).await {
439                Ok(cycles) => {
440                    entry.cycles = cycles;
441                    entry.status = CanisterPoolStatus::Ready;
442                }
443                Err(err) => {
444                    entry.status = CanisterPoolStatus::Failed {
445                        reason: err.to_string(),
446                    };
447                    log!(
448                        Topic::CanisterPool,
449                        Warn,
450                        "pool reset failed for {pid}: {err}"
451                    );
452                }
453            }
454
455            if !CanisterPoolStorageOps::update(pid, entry) {
456                log!(
457                    Topic::CanisterPool,
458                    Warn,
459                    "pool reset update missing for {pid}"
460                );
461            }
462        }
463
464        Ok(())
465    }
466}
467
468//
469// CREATE / IMPORT / RECYCLE / EXPORT
470//
471
472pub async fn pool_create_canister() -> Result<Principal, Error> {
473    OpsError::require_root()?;
474
475    let cycles = Cycles::new(POOL_CANISTER_CYCLES);
476    let pid = create_canister(pool_controllers(), cycles.clone()).await?;
477
478    CanisterPoolStorageOps::register(pid, cycles, CanisterPoolStatus::Ready, None, None, None);
479    Ok(pid)
480}
481
482pub async fn pool_import_canister(pid: Principal) -> Result<(), Error> {
483    OpsError::require_root()?;
484
485    if is_local_build() && !is_importable_on_local(pid).await {
486        // Local-only cleanup: remove non-importable IDs so they never persist as failures.
487        let _ = CanisterPoolStorageOps::take(&pid);
488        return Ok(());
489    }
490
491    register_or_update_preserving_metadata(
492        pid,
493        Cycles::default(),
494        CanisterPoolStatus::PendingReset,
495        None,
496        None,
497        None,
498    );
499    let _ = SubnetCanisterRegistryOps::remove(&pid);
500    match reset_into_pool(pid).await {
501        Ok(cycles) => {
502            register_or_update_preserving_metadata(
503                pid,
504                cycles,
505                CanisterPoolStatus::Ready,
506                None,
507                None,
508                None,
509            );
510        }
511        Err(err) => {
512            log!(
513                Topic::CanisterPool,
514                Warn,
515                "pool import failed for {pid}: {err}"
516            );
517            register_or_update_preserving_metadata(
518                pid,
519                Cycles::default(),
520                CanisterPoolStatus::Failed {
521                    reason: err.to_string(),
522                },
523                None,
524                None,
525                None,
526            );
527        }
528    }
529    Ok(())
530}
531
532async fn pool_import_queued_canisters_local(
533    pids: Vec<Principal>,
534) -> Result<(u64, u64, u64, u64), Error> {
535    let total = pids.len() as u64;
536    let mut added = 0;
537    let mut requeued = 0;
538    let mut skipped = 0;
539
540    for pid in pids {
541        if SubnetCanisterRegistryOps::get(pid).is_some() {
542            skipped += 1;
543            continue;
544        }
545
546        if let Some(entry) = CanisterPoolStorageOps::get(pid) {
547            if entry.status.is_failed() {
548                if !is_importable_on_local(pid).await {
549                    let _ = CanisterPoolStorageOps::take(&pid);
550                    skipped += 1;
551                } else {
552                    register_or_update_preserving_metadata(
553                        pid,
554                        Cycles::default(),
555                        CanisterPoolStatus::PendingReset,
556                        None,
557                        None,
558                        None,
559                    );
560                    requeued += 1;
561                }
562            } else {
563                skipped += 1;
564            }
565            continue;
566        }
567
568        if !is_importable_on_local(pid).await {
569            skipped += 1;
570            continue;
571        }
572
573        register_or_update_preserving_metadata(
574            pid,
575            Cycles::default(),
576            CanisterPoolStatus::PendingReset,
577            None,
578            None,
579            None,
580        );
581        added += 1;
582    }
583
584    if added > 0 || requeued > 0 {
585        maybe_schedule_reset_worker();
586    }
587
588    Ok((added, requeued, skipped, total))
589}
590
591fn pool_import_queued_canisters(pids: Vec<Principal>) -> Result<(u64, u64, u64, u64), Error> {
592    pool_import_queued_canisters_inner(pids, true)
593}
594
595fn pool_import_queued_canisters_inner(
596    pids: Vec<Principal>,
597    enforce_root: bool,
598) -> Result<(u64, u64, u64, u64), Error> {
599    if enforce_root {
600        OpsError::require_root()?;
601    }
602
603    let mut added = 0;
604    let mut requeued = 0;
605    let mut skipped = 0;
606
607    for pid in &pids {
608        if SubnetCanisterRegistryOps::get(*pid).is_some() {
609            skipped += 1;
610            continue;
611        }
612
613        if let Some(entry) = CanisterPoolStorageOps::get(*pid) {
614            if entry.status.is_failed() {
615                register_or_update_preserving_metadata(
616                    *pid,
617                    Cycles::default(),
618                    CanisterPoolStatus::PendingReset,
619                    None,
620                    None,
621                    None,
622                );
623                requeued += 1;
624            } else {
625                skipped += 1;
626            }
627            continue;
628        }
629
630        register_or_update_preserving_metadata(
631            *pid,
632            Cycles::default(),
633            CanisterPoolStatus::PendingReset,
634            None,
635            None,
636            None,
637        );
638        added += 1;
639    }
640
641    maybe_schedule_reset_worker();
642
643    Ok((added, requeued, skipped, pids.len() as u64))
644}
645
646#[cfg(not(test))]
647fn maybe_schedule_reset_worker() {
648    PoolOps::schedule_reset_worker();
649}
650
651#[cfg(test)]
652fn maybe_schedule_reset_worker() {
653    RESET_SCHEDULED.with_borrow_mut(|flag| *flag = true);
654}
655
656#[cfg(test)]
657thread_local! {
658    static RESET_SCHEDULED: RefCell<bool> = const { RefCell::new(false) };
659}
660
661#[cfg(test)]
662fn take_reset_scheduled() -> bool {
663    RESET_SCHEDULED.with_borrow_mut(|flag| {
664        let value = *flag;
665        *flag = false;
666        value
667    })
668}
669
670#[cfg(test)]
671fn set_test_importable_override(value: Option<bool>) {
672    TEST_IMPORTABLE_OVERRIDE.with_borrow_mut(|slot| *slot = value);
673}
674
675fn pool_requeue_failed(pids: Option<Vec<Principal>>) -> Result<(u64, u64, u64), Error> {
676    pool_requeue_failed_inner(pids, true)
677}
678
679fn pool_requeue_failed_inner(
680    pids: Option<Vec<Principal>>,
681    enforce_root: bool,
682) -> Result<(u64, u64, u64), Error> {
683    if enforce_root {
684        OpsError::require_root()?;
685    }
686
687    let mut requeued = 0;
688    let mut skipped = 0;
689    let total;
690
691    if let Some(pids) = pids {
692        total = pids.len() as u64;
693        for pid in pids {
694            if let Some(entry) = CanisterPoolStorageOps::get(pid) {
695                if entry.status.is_failed() {
696                    register_or_update_preserving_metadata(
697                        pid,
698                        Cycles::default(),
699                        CanisterPoolStatus::PendingReset,
700                        None,
701                        None,
702                        None,
703                    );
704                    requeued += 1;
705                } else {
706                    skipped += 1;
707                }
708            } else {
709                skipped += 1;
710            }
711        }
712    } else {
713        let entries = CanisterPoolStorageOps::export();
714        total = entries.len() as u64;
715        for (pid, entry) in entries {
716            if entry.status.is_failed() {
717                register_or_update_preserving_metadata(
718                    pid,
719                    Cycles::default(),
720                    CanisterPoolStatus::PendingReset,
721                    None,
722                    None,
723                    None,
724                );
725                requeued += 1;
726            } else {
727                skipped += 1;
728            }
729        }
730    }
731
732    if requeued > 0 {
733        maybe_schedule_reset_worker();
734    }
735
736    Ok((requeued, skipped, total))
737}
738
739pub async fn pool_recycle_canister(pid: Principal) -> Result<(), Error> {
740    OpsError::require_root()?;
741
742    let entry =
743        SubnetCanisterRegistryOps::get(pid).ok_or(PoolOpsError::PoolEntryMissing { pid })?;
744
745    let role = Some(entry.role.clone());
746    let hash = entry.module_hash.clone();
747
748    let _ = SubnetCanisterRegistryOps::remove(&pid);
749
750    let cycles = reset_into_pool(pid).await?;
751    CanisterPoolStorageOps::register(pid, cycles, CanisterPoolStatus::Ready, role, None, hash);
752
753    Ok(())
754}
755
756pub async fn pool_export_canister(pid: Principal) -> Result<(CanisterRole, Vec<u8>), Error> {
757    OpsError::require_root()?;
758
759    let entry = CanisterPoolStorageOps::take(&pid).ok_or(PoolOpsError::PoolEntryMissing { pid })?;
760
761    if !entry.status.is_ready() {
762        return Err(PoolOpsError::PoolEntryNotReady { pid }.into());
763    }
764
765    let role = entry.role.ok_or(PoolOpsError::MissingType { pid })?;
766    let hash = entry
767        .module_hash
768        .ok_or(PoolOpsError::MissingModuleHash { pid })?;
769
770    Ok((role, hash))
771}
772
773//
774// ORCHESTRATION HOOK
775//
776
777pub async fn recycle_via_orchestrator(pid: Principal) -> Result<(), Error> {
778    use crate::ops::orchestration::orchestrator::{CanisterLifecycleOrchestrator, LifecycleEvent};
779
780    CanisterLifecycleOrchestrator::apply(LifecycleEvent::RecycleToPool { pid })
781        .await
782        .map(|_| ())
783}
784
785//
786// TESTS
787//
788
789#[cfg(test)]
790mod tests {
791    use super::*;
792    use crate::{
793        ids::CanisterRole,
794        model::memory::{CanisterEntry, pool::CanisterPool, topology::SubnetCanisterRegistry},
795    };
796    use candid::Principal;
797
798    fn p(id: u8) -> Principal {
799        Principal::from_slice(&[id; 29])
800    }
801
802    fn reset_state() {
803        CanisterPool::clear();
804        SubnetCanisterRegistry::clear_for_tests();
805        let _ = take_reset_scheduled();
806    }
807
808    #[test]
809    fn import_queued_registers_pending_entries() {
810        reset_state();
811
812        let p1 = p(1);
813        let p2 = p(2);
814
815        let (added, requeued, skipped, total) =
816            pool_import_queued_canisters_inner(vec![p1, p2], false).unwrap();
817        assert_eq!(added, 2);
818        assert_eq!(requeued, 0);
819        assert_eq!(skipped, 0);
820        assert_eq!(total, 2);
821
822        let e1 = CanisterPoolStorageOps::get(p1).unwrap();
823        let e2 = CanisterPoolStorageOps::get(p2).unwrap();
824        assert!(e1.status.is_pending_reset());
825        assert!(e2.status.is_pending_reset());
826        assert_eq!(e1.cycles, Cycles::default());
827        assert_eq!(e2.cycles, Cycles::default());
828    }
829
830    #[test]
831    fn import_queued_requeues_failed_entries() {
832        reset_state();
833
834        let p1 = p(3);
835        CanisterPoolStorageOps::register(
836            p1,
837            Cycles::new(10),
838            CanisterPoolStatus::Failed {
839                reason: "nope".to_string(),
840            },
841            None,
842            None,
843            None,
844        );
845
846        let (added, requeued, skipped, total) =
847            pool_import_queued_canisters_inner(vec![p1], false).unwrap();
848        assert_eq!(added, 0);
849        assert_eq!(requeued, 1);
850        assert_eq!(skipped, 0);
851        assert_eq!(total, 1);
852        assert!(take_reset_scheduled());
853
854        let entry = CanisterPoolStorageOps::get(p1).unwrap();
855        assert!(entry.status.is_pending_reset());
856        assert_eq!(entry.cycles, Cycles::default());
857    }
858
859    #[test]
860    fn import_queued_skips_ready_entries() {
861        reset_state();
862
863        let p1 = p(4);
864        CanisterPoolStorageOps::register(
865            p1,
866            Cycles::new(42),
867            CanisterPoolStatus::Ready,
868            None,
869            None,
870            None,
871        );
872
873        let (added, requeued, skipped, total) =
874            pool_import_queued_canisters_inner(vec![p1], false).unwrap();
875        assert_eq!(added, 0);
876        assert_eq!(requeued, 0);
877        assert_eq!(skipped, 1);
878        assert_eq!(total, 1);
879
880        let entry = CanisterPoolStorageOps::get(p1).unwrap();
881        assert!(entry.status.is_ready());
882        assert_eq!(entry.cycles, Cycles::new(42));
883    }
884
885    #[test]
886    fn import_queued_skips_registry_canisters() {
887        reset_state();
888
889        let pid = p(5);
890        SubnetCanisterRegistry::insert_for_tests(CanisterEntry {
891            pid,
892            role: CanisterRole::new("alpha"),
893            parent_pid: None,
894            module_hash: None,
895            created_at: 0,
896        });
897
898        let (added, requeued, skipped, total) =
899            pool_import_queued_canisters_inner(vec![pid], false).unwrap();
900        assert_eq!(added, 0);
901        assert_eq!(requeued, 0);
902        assert_eq!(skipped, 1);
903        assert_eq!(total, 1);
904        assert!(CanisterPoolStorageOps::get(pid).is_none());
905    }
906
907    #[test]
908    fn import_queued_local_skips_non_importable() {
909        reset_state();
910        set_test_importable_override(Some(false));
911
912        let pid = p(9);
913        let (added, requeued, skipped, total) =
914            futures::executor::block_on(pool_import_queued_canisters_local(vec![pid])).unwrap();
915
916        assert_eq!(added, 0);
917        assert_eq!(requeued, 0);
918        assert_eq!(skipped, 1);
919        assert_eq!(total, 1);
920        assert!(CanisterPoolStorageOps::get(pid).is_none());
921
922        set_test_importable_override(None);
923    }
924
925    #[test]
926    fn register_or_update_preserves_metadata() {
927        reset_state();
928
929        let pid = p(6);
930        let role = CanisterRole::new("alpha");
931        let parent = p(9);
932        let hash = vec![1, 2, 3];
933
934        CanisterPoolStorageOps::register(
935            pid,
936            Cycles::new(7),
937            CanisterPoolStatus::Failed {
938                reason: "oops".to_string(),
939            },
940            Some(role.clone()),
941            Some(parent),
942            Some(hash.clone()),
943        );
944
945        register_or_update_preserving_metadata(
946            pid,
947            Cycles::default(),
948            CanisterPoolStatus::PendingReset,
949            None,
950            None,
951            None,
952        );
953
954        let entry = CanisterPoolStorageOps::get(pid).unwrap();
955        assert!(entry.status.is_pending_reset());
956        assert_eq!(entry.cycles, Cycles::default());
957        assert_eq!(entry.role, Some(role));
958        assert_eq!(entry.parent, Some(parent));
959        assert_eq!(entry.module_hash, Some(hash));
960    }
961
962    #[test]
963    fn requeue_failed_scans_pool_and_schedules() {
964        reset_state();
965
966        let failed_pid = p(7);
967        let ready_pid = p(8);
968
969        CanisterPoolStorageOps::register(
970            failed_pid,
971            Cycles::new(11),
972            CanisterPoolStatus::Failed {
973                reason: "bad".to_string(),
974            },
975            None,
976            None,
977            None,
978        );
979        CanisterPoolStorageOps::register(
980            ready_pid,
981            Cycles::new(22),
982            CanisterPoolStatus::Ready,
983            None,
984            None,
985            None,
986        );
987
988        let (requeued, skipped, total) = pool_requeue_failed_inner(None, false).unwrap();
989        assert_eq!(requeued, 1);
990        assert_eq!(skipped, 1);
991        assert_eq!(total, 2);
992        assert!(take_reset_scheduled());
993
994        let failed_entry = CanisterPoolStorageOps::get(failed_pid).unwrap();
995        let ready_entry = CanisterPoolStorageOps::get(ready_pid).unwrap();
996        assert!(failed_entry.status.is_pending_reset());
997        assert_eq!(failed_entry.cycles, Cycles::default());
998        assert!(ready_entry.status.is_ready());
999        assert_eq!(ready_entry.cycles, Cycles::new(22));
1000    }
1001}