1pub use crate::ops::storage::pool::{CanisterPoolEntry, CanisterPoolStatus, CanisterPoolView};
19
20use crate::{
21 Error, ThisError,
22 cdk::{
23 api::canister_self,
24 futures::spawn,
25 mgmt::{CanisterSettings, UpdateSettingsArgs},
26 types::Principal,
27 },
28 config::Config,
29 log::Topic,
30 ops::{
31 OPS_POOL_CHECK_INTERVAL, OPS_POOL_INIT_DELAY, OpsError,
32 config::ConfigOps,
33 ic::{
34 Network, build_network, canister_status, get_cycles,
35 mgmt::{create_canister, uninstall_code},
36 timer::{TimerId, TimerOps},
37 update_settings,
38 },
39 prelude::*,
40 storage::{pool::CanisterPoolStorageOps, topology::SubnetCanisterRegistryOps},
41 },
42 types::{Cycles, TC},
43};
44use candid::CandidType;
45use serde::Deserialize;
46use std::{cell::RefCell, time::Duration};
47
48mod reset_scheduler {
51 use super::*;
52
53 thread_local! {
54 static RESET_IN_PROGRESS: RefCell<bool> = const { RefCell::new(false) };
55 static RESET_RESCHEDULE: RefCell<bool> = const { RefCell::new(false) };
56 static RESET_TIMER: RefCell<Option<TimerId>> = const { RefCell::new(None) };
57 }
58
59 pub fn schedule() {
60 let _ = TimerOps::set_guarded(&RESET_TIMER, Duration::ZERO, "pool:pending", async {
61 RESET_TIMER.with_borrow_mut(|slot| *slot = None);
62 let _ = run_worker(super::POOL_RESET_BATCH_SIZE).await;
63 });
64 }
65
66 fn maybe_reschedule() {
67 let reschedule = RESET_RESCHEDULE.with_borrow_mut(|f| {
68 let v = *f;
69 *f = false;
70 v
71 });
72
73 if reschedule || has_pending_reset() {
74 schedule();
75 }
76 }
77
78 async fn run_worker(limit: usize) -> Result<(), Error> {
79 if limit == 0 {
80 return Ok(());
81 }
82
83 let should_run = RESET_IN_PROGRESS.with_borrow_mut(|flag| {
84 if *flag {
85 RESET_RESCHEDULE.with_borrow_mut(|r| *r = true);
86 false
87 } else {
88 *flag = true;
89 true
90 }
91 });
92
93 if !should_run {
94 return Ok(());
95 }
96
97 let result = run_batch(limit).await;
98
99 RESET_IN_PROGRESS.with_borrow_mut(|f| *f = false);
100 maybe_reschedule();
101
102 result
103 }
104
105 async fn run_batch(limit: usize) -> Result<(), Error> {
106 let mut pending: Vec<_> = CanisterPoolStorageOps::export()
107 .into_iter()
108 .filter(|(_, e)| e.status.is_pending_reset())
109 .collect();
110
111 if pending.is_empty() {
112 return Ok(());
113 }
114
115 pending.sort_by_key(|(_, e)| e.created_at);
116
117 for (pid, mut entry) in pending.into_iter().take(limit) {
118 if !super::can_enter_pool(pid).await {
119 let _ = CanisterPoolStorageOps::take(&pid);
120 continue;
121 }
122
123 match super::reset_into_pool(pid).await {
124 Ok(cycles) => {
125 entry.cycles = cycles;
126 entry.status = CanisterPoolStatus::Ready;
127 }
128 Err(err) => {
129 entry.status = CanisterPoolStatus::Failed {
130 reason: err.to_string(),
131 };
132 log!(
133 Topic::CanisterPool,
134 Warn,
135 "pool reset failed for {pid}: {err}"
136 );
137 }
138 }
139
140 if !CanisterPoolStorageOps::update(pid, entry) {
141 log!(
142 Topic::CanisterPool,
143 Warn,
144 "pool reset update missing for {pid}"
145 );
146 }
147 }
148
149 Ok(())
150 }
151
152 fn has_pending_reset() -> bool {
153 CanisterPoolStorageOps::export()
154 .into_iter()
155 .any(|(_, e)| e.status.is_pending_reset())
156 }
157
158 #[cfg(test)]
160 thread_local! {
161 static RESET_SCHEDULED: RefCell<bool> = const { RefCell::new(false) };
162 }
163
164 #[cfg(test)]
165 pub fn mark_scheduled_for_test() {
166 RESET_SCHEDULED.with_borrow_mut(|f| *f = true);
167 }
168
169 #[cfg(test)]
170 pub fn take_scheduled_for_test() -> bool {
171 RESET_SCHEDULED.with_borrow_mut(|flag| {
172 let value = *flag;
173 *flag = false;
174 value
175 })
176 }
177}
178
179#[cfg(test)]
180thread_local! {
181 static TEST_IMPORTABLE_OVERRIDE: RefCell<Option<bool>> = const { RefCell::new(None) };
182}
183
184thread_local! {
189 static TIMER: RefCell<Option<TimerId>> = const { RefCell::new(None) };
190}
191
192const POOL_CANISTER_CYCLES: u128 = 5 * TC;
194
195const POOL_RESET_BATCH_SIZE: usize = 10;
197
198#[derive(Debug, ThisError)]
203pub enum PoolOpsError {
204 #[error("pool entry missing for {pid}")]
205 PoolEntryMissing { pid: Principal },
206
207 #[error("pool import blocked for {pid}: canister is still registered in subnet registry")]
208 ImportBlockedRegistered { pid: Principal },
209
210 #[error("missing module hash for pool entry {pid}")]
211 MissingModuleHash { pid: Principal },
212
213 #[error("missing type for pool entry {pid}")]
214 MissingType { pid: Principal },
215
216 #[error("pool entry {pid} is not ready")]
217 PoolEntryNotReady { pid: Principal },
218}
219
220impl From<PoolOpsError> for Error {
221 fn from(err: PoolOpsError) -> Self {
222 OpsError::from(err).into()
223 }
224}
225
226#[derive(CandidType, Clone, Debug, Deserialize, Eq, PartialEq)]
231pub enum PoolAdminCommand {
232 CreateEmpty,
233 Recycle { pid: Principal },
234 ImportImmediate { pid: Principal },
235 ImportQueued { pids: Vec<Principal> },
236 RequeueFailed { pids: Option<Vec<Principal>> },
237}
238
239#[derive(CandidType, Clone, Debug, Deserialize, Eq, PartialEq)]
244pub enum PoolAdminResponse {
245 Created {
246 pid: Principal,
247 },
248 Recycled,
249 Imported,
250 QueuedImported {
251 added: u64,
252 requeued: u64,
253 skipped: u64,
254 total: u64,
255 },
256 FailedRequeued {
257 requeued: u64,
258 skipped: u64,
259 total: u64,
260 },
261}
262
263fn pool_controllers() -> Vec<Principal> {
264 let mut controllers = Config::get().controllers.clone();
265
266 let root = canister_self();
267 if !controllers.contains(&root) {
268 controllers.push(root);
269 }
270
271 controllers
272}
273
274fn is_local_build() -> bool {
275 build_network() == Some(Network::Local)
276}
277
278async fn is_importable_on_local(pid: Principal) -> bool {
285 #[cfg(test)]
286 if let Some(override_value) = TEST_IMPORTABLE_OVERRIDE.with(|slot| *slot.borrow()) {
287 return override_value;
288 }
289
290 if !is_local_build() {
291 return true;
292 }
293
294 match canister_status(pid).await {
295 Ok(_) => true,
296 Err(err) => {
297 log!(
298 Topic::CanisterPool,
299 Warn,
300 "pool import skipped for {pid} (local non-importable): {err}"
301 );
302 false
303 }
304 }
305}
306
307async fn can_enter_pool(pid: Principal) -> bool {
308 if !is_local_build() {
309 return true;
310 }
311
312 is_importable_on_local(pid).await
313}
314
315async fn reset_into_pool(pid: Principal) -> Result<Cycles, Error> {
316 update_settings(&UpdateSettingsArgs {
317 canister_id: pid,
318 settings: CanisterSettings {
319 controllers: Some(pool_controllers()),
320 ..Default::default()
321 },
322 })
323 .await?;
324
325 uninstall_code(pid).await?;
326
327 get_cycles(pid).await
328}
329
330fn register_or_update_preserving_metadata(
331 pid: Principal,
332 cycles: Cycles,
333 status: CanisterPoolStatus,
334 role: Option<CanisterRole>,
335 parent: Option<Principal>,
336 module_hash: Option<Vec<u8>>,
337) {
338 if let Some(mut entry) = CanisterPoolStorageOps::get(pid) {
339 entry.cycles = cycles;
340 entry.status = status;
341 entry.role = role.or(entry.role);
342 entry.parent = parent.or(entry.parent);
343 entry.module_hash = module_hash.or(entry.module_hash);
344 let _ = CanisterPoolStorageOps::update(pid, entry);
345 } else {
346 CanisterPoolStorageOps::register(pid, cycles, status, role, parent, module_hash);
347 }
348}
349
350fn mark_pending_reset(pid: Principal) {
351 register_or_update_preserving_metadata(
352 pid,
353 Cycles::default(),
354 CanisterPoolStatus::PendingReset,
355 None,
356 None,
357 None,
358 );
359}
360
361fn mark_ready(pid: Principal, cycles: Cycles) {
362 register_or_update_preserving_metadata(
363 pid,
364 cycles,
365 CanisterPoolStatus::Ready,
366 None,
367 None,
368 None,
369 );
370}
371
372fn mark_failed(pid: Principal, err: &Error) {
373 register_or_update_preserving_metadata(
374 pid,
375 Cycles::default(),
376 CanisterPoolStatus::Failed {
377 reason: err.to_string(),
378 },
379 None,
380 None,
381 None,
382 );
383}
384pub struct PoolOps;
389
390impl PoolOps {
391 pub fn start() {
396 let _ = TimerOps::set_guarded_interval(
397 &TIMER,
398 OPS_POOL_INIT_DELAY,
399 "pool:init",
400 || async {
401 let _ = Self::check();
402 },
403 OPS_POOL_CHECK_INTERVAL,
404 "pool:interval",
405 || async {
406 let _ = Self::check();
407 },
408 );
409 }
410
411 pub fn stop() {
412 let _ = TimerOps::clear_guarded(&TIMER);
413 }
414
415 #[must_use]
420 pub fn check() -> u64 {
421 reset_scheduler::schedule();
422
423 let subnet_cfg = ConfigOps::current_subnet();
424 let min_size: u64 = subnet_cfg.pool.minimum_size.into();
425 let ready_size = Self::ready_len();
426
427 if ready_size >= min_size {
428 return 0;
429 }
430
431 let missing = (min_size - ready_size).min(10);
432 log!(
433 Topic::CanisterPool,
434 Ok,
435 "pool low: {ready_size}/{min_size}, creating {missing}"
436 );
437
438 spawn(async move {
439 for i in 0..missing {
440 match pool_create_canister().await {
441 Ok(_) => log!(
442 Topic::CanisterPool,
443 Ok,
444 "created pool canister {}/{}",
445 i + 1,
446 missing
447 ),
448 Err(e) => log!(Topic::CanisterPool, Warn, "pool creation failed: {e:?}"),
449 }
450 }
451 });
452
453 missing
454 }
455
456 #[must_use]
457 pub fn pop_ready() -> Option<(Principal, CanisterPoolEntry)> {
458 CanisterPoolStorageOps::pop_ready()
459 }
460
461 #[must_use]
462 pub fn contains(pid: &Principal) -> bool {
463 CanisterPoolStorageOps::contains(pid)
464 }
465
466 #[must_use]
467 pub fn export() -> CanisterPoolView {
468 CanisterPoolStorageOps::export()
469 }
470
471 pub async fn admin(cmd: PoolAdminCommand) -> Result<PoolAdminResponse, Error> {
472 match cmd {
473 PoolAdminCommand::CreateEmpty => {
474 let pid = pool_create_canister().await?;
475 Ok(PoolAdminResponse::Created { pid })
476 }
477 PoolAdminCommand::Recycle { pid } => {
478 pool_recycle_canister(pid).await?;
479 Ok(PoolAdminResponse::Recycled)
480 }
481 PoolAdminCommand::ImportImmediate { pid } => {
482 pool_import_canister(pid).await?;
483 Ok(PoolAdminResponse::Imported)
484 }
485 PoolAdminCommand::ImportQueued { pids } => {
486 let (a, r, s, t) = if is_local_build() {
487 pool_import_queued_canisters_local(pids).await?
488 } else {
489 pool_import_queued_canisters(pids)?
490 };
491 Ok(PoolAdminResponse::QueuedImported {
492 added: a,
493 requeued: r,
494 skipped: s,
495 total: t,
496 })
497 }
498 PoolAdminCommand::RequeueFailed { pids } => {
499 let (requeued, skipped, total) = pool_requeue_failed(pids)?;
500 Ok(PoolAdminResponse::FailedRequeued {
501 requeued,
502 skipped,
503 total,
504 })
505 }
506 }
507 }
508
509 fn ready_len() -> u64 {
514 CanisterPoolStorageOps::export()
515 .into_iter()
516 .filter(|(_, e)| e.status.is_ready())
517 .count() as u64
518 }
519}
520
521pub async fn pool_create_canister() -> Result<Principal, Error> {
526 OpsError::require_root()?;
527
528 let cycles = Cycles::new(POOL_CANISTER_CYCLES);
529 let pid = create_canister(pool_controllers(), cycles.clone()).await?;
530
531 CanisterPoolStorageOps::register(pid, cycles, CanisterPoolStatus::Ready, None, None, None);
532 Ok(pid)
533}
534
535pub async fn pool_import_canister(pid: Principal) -> Result<(), Error> {
536 OpsError::require_root()?;
537
538 if SubnetCanisterRegistryOps::get(pid).is_some() {
539 return Err(PoolOpsError::ImportBlockedRegistered { pid }.into());
540 }
541
542 if !can_enter_pool(pid).await {
543 let _ = CanisterPoolStorageOps::take(&pid);
544 return Ok(());
545 }
546
547 mark_pending_reset(pid);
548
549 match reset_into_pool(pid).await {
550 Ok(cycles) => {
551 let _ = SubnetCanisterRegistryOps::remove(&pid);
552 mark_ready(pid, cycles);
553 }
554 Err(err) => {
555 log!(
556 Topic::CanisterPool,
557 Warn,
558 "pool import failed for {pid}: {err}"
559 );
560 mark_failed(pid, &err);
561 return Err(err);
562 }
563 }
564
565 Ok(())
566}
567
568async fn pool_import_queued_canisters_local(
569 pids: Vec<Principal>,
570) -> Result<(u64, u64, u64, u64), Error> {
571 let total = pids.len() as u64;
572 let mut added = 0;
573 let mut requeued = 0;
574 let mut skipped = 0;
575
576 for pid in pids {
577 if SubnetCanisterRegistryOps::get(pid).is_some() {
578 skipped += 1;
579 continue;
580 }
581
582 if let Some(entry) = CanisterPoolStorageOps::get(pid) {
583 if entry.status.is_failed() {
584 if can_enter_pool(pid).await {
585 mark_pending_reset(pid);
586 requeued += 1;
587 } else {
588 let _ = CanisterPoolStorageOps::take(&pid);
589 skipped += 1;
590 }
591 } else {
592 skipped += 1;
593 }
594 continue;
595 }
596
597 if !can_enter_pool(pid).await {
598 skipped += 1;
599 continue;
600 }
601
602 mark_pending_reset(pid);
603 added += 1;
604 }
605
606 if added > 0 || requeued > 0 {
607 maybe_schedule_reset_worker();
608 }
609
610 Ok((added, requeued, skipped, total))
611}
612
613fn pool_import_queued_canisters(pids: Vec<Principal>) -> Result<(u64, u64, u64, u64), Error> {
614 pool_import_queued_canisters_inner(pids, true)
615}
616
617fn pool_import_queued_canisters_inner(
618 pids: Vec<Principal>,
619 enforce_root: bool,
620) -> Result<(u64, u64, u64, u64), Error> {
621 if enforce_root {
622 OpsError::require_root()?;
623 }
624
625 let mut added = 0;
626 let mut requeued = 0;
627 let mut skipped = 0;
628
629 for pid in &pids {
630 if SubnetCanisterRegistryOps::get(*pid).is_some() {
631 skipped += 1;
632 continue;
633 }
634
635 if let Some(entry) = CanisterPoolStorageOps::get(*pid) {
636 if entry.status.is_failed() {
637 mark_pending_reset(*pid);
638 requeued += 1;
639 } else {
640 skipped += 1;
641 }
642 continue;
643 }
644
645 mark_pending_reset(*pid);
646 added += 1;
647 }
648
649 maybe_schedule_reset_worker();
650
651 Ok((added, requeued, skipped, pids.len() as u64))
652}
653
654#[cfg(not(test))]
655fn maybe_schedule_reset_worker() {
656 reset_scheduler::schedule();
657}
658
659#[cfg(test)]
660fn maybe_schedule_reset_worker() {
661 reset_scheduler::mark_scheduled_for_test();
662}
663
664#[cfg(test)]
665fn take_reset_scheduled() -> bool {
666 reset_scheduler::take_scheduled_for_test()
667}
668
669#[cfg(test)]
670fn set_test_importable_override(value: Option<bool>) {
671 TEST_IMPORTABLE_OVERRIDE.with_borrow_mut(|slot| *slot = value);
672}
673
674fn pool_requeue_failed(pids: Option<Vec<Principal>>) -> Result<(u64, u64, u64), Error> {
675 pool_requeue_failed_inner(pids, true)
676}
677
678fn pool_requeue_failed_inner(
679 pids: Option<Vec<Principal>>,
680 enforce_root: bool,
681) -> Result<(u64, u64, u64), Error> {
682 if enforce_root {
683 OpsError::require_root()?;
684 }
685
686 let mut requeued = 0;
687 let mut skipped = 0;
688 let total;
689
690 if let Some(pids) = pids {
691 total = pids.len() as u64;
692 for pid in pids {
693 if let Some(entry) = CanisterPoolStorageOps::get(pid) {
694 if entry.status.is_failed() {
695 mark_pending_reset(pid);
696 requeued += 1;
697 } else {
698 skipped += 1;
699 }
700 } else {
701 skipped += 1;
702 }
703 }
704 } else {
705 let entries = CanisterPoolStorageOps::export();
706 total = entries.len() as u64;
707 for (pid, entry) in entries {
708 if entry.status.is_failed() {
709 mark_pending_reset(pid);
710 requeued += 1;
711 } else {
712 skipped += 1;
713 }
714 }
715 }
716
717 if requeued > 0 {
718 maybe_schedule_reset_worker();
719 }
720
721 Ok((requeued, skipped, total))
722}
723
724pub async fn pool_recycle_canister(pid: Principal) -> Result<(), Error> {
725 OpsError::require_root()?;
726
727 let entry =
728 SubnetCanisterRegistryOps::get(pid).ok_or(PoolOpsError::PoolEntryMissing { pid })?;
729
730 let role = Some(entry.role.clone());
731 let hash = entry.module_hash.clone();
732
733 let cycles = reset_into_pool(pid).await?;
734 let _ = SubnetCanisterRegistryOps::remove(&pid);
735 CanisterPoolStorageOps::register(pid, cycles, CanisterPoolStatus::Ready, role, None, hash);
736
737 Ok(())
738}
739
740pub async fn pool_export_canister(pid: Principal) -> Result<(CanisterRole, Vec<u8>), Error> {
741 OpsError::require_root()?;
742
743 let entry = CanisterPoolStorageOps::take(&pid).ok_or(PoolOpsError::PoolEntryMissing { pid })?;
744
745 if !entry.status.is_ready() {
746 return Err(PoolOpsError::PoolEntryNotReady { pid }.into());
747 }
748
749 let role = entry.role.ok_or(PoolOpsError::MissingType { pid })?;
750 let hash = entry
751 .module_hash
752 .ok_or(PoolOpsError::MissingModuleHash { pid })?;
753
754 Ok((role, hash))
755}
756
757pub async fn recycle_via_orchestrator(pid: Principal) -> Result<(), Error> {
762 use crate::ops::orchestration::orchestrator::{CanisterLifecycleOrchestrator, LifecycleEvent};
763
764 CanisterLifecycleOrchestrator::apply(LifecycleEvent::RecycleToPool { pid })
765 .await
766 .map(|_| ())
767}
768
769#[cfg(test)]
774mod tests {
775 use super::*;
776 use crate::{
777 ids::CanisterRole,
778 model::memory::{CanisterEntry, pool::CanisterPool, topology::SubnetCanisterRegistry},
779 };
780 use candid::Principal;
781
782 fn p(id: u8) -> Principal {
783 Principal::from_slice(&[id; 29])
784 }
785
786 fn reset_state() {
787 CanisterPool::clear();
788 SubnetCanisterRegistry::clear_for_tests();
789 let _ = take_reset_scheduled();
790 }
791
792 #[test]
793 fn import_queued_registers_pending_entries() {
794 reset_state();
795
796 let p1 = p(1);
797 let p2 = p(2);
798
799 let (added, requeued, skipped, total) =
800 pool_import_queued_canisters_inner(vec![p1, p2], false).unwrap();
801 assert_eq!(added, 2);
802 assert_eq!(requeued, 0);
803 assert_eq!(skipped, 0);
804 assert_eq!(total, 2);
805
806 let e1 = CanisterPoolStorageOps::get(p1).unwrap();
807 let e2 = CanisterPoolStorageOps::get(p2).unwrap();
808 assert!(e1.status.is_pending_reset());
809 assert!(e2.status.is_pending_reset());
810 assert_eq!(e1.cycles, Cycles::default());
811 assert_eq!(e2.cycles, Cycles::default());
812 }
813
814 #[test]
815 fn import_queued_requeues_failed_entries() {
816 reset_state();
817
818 let p1 = p(3);
819 CanisterPoolStorageOps::register(
820 p1,
821 Cycles::new(10),
822 CanisterPoolStatus::Failed {
823 reason: "nope".to_string(),
824 },
825 None,
826 None,
827 None,
828 );
829
830 let (added, requeued, skipped, total) =
831 pool_import_queued_canisters_inner(vec![p1], false).unwrap();
832 assert_eq!(added, 0);
833 assert_eq!(requeued, 1);
834 assert_eq!(skipped, 0);
835 assert_eq!(total, 1);
836 assert!(take_reset_scheduled());
837
838 let entry = CanisterPoolStorageOps::get(p1).unwrap();
839 assert!(entry.status.is_pending_reset());
840 assert_eq!(entry.cycles, Cycles::default());
841 }
842
843 #[test]
844 fn import_queued_skips_ready_entries() {
845 reset_state();
846
847 let p1 = p(4);
848 CanisterPoolStorageOps::register(
849 p1,
850 Cycles::new(42),
851 CanisterPoolStatus::Ready,
852 None,
853 None,
854 None,
855 );
856
857 let (added, requeued, skipped, total) =
858 pool_import_queued_canisters_inner(vec![p1], false).unwrap();
859 assert_eq!(added, 0);
860 assert_eq!(requeued, 0);
861 assert_eq!(skipped, 1);
862 assert_eq!(total, 1);
863
864 let entry = CanisterPoolStorageOps::get(p1).unwrap();
865 assert!(entry.status.is_ready());
866 assert_eq!(entry.cycles, Cycles::new(42));
867 }
868
869 #[test]
870 fn import_queued_skips_registry_canisters() {
871 reset_state();
872
873 let pid = p(5);
874 SubnetCanisterRegistry::insert_for_tests(CanisterEntry {
875 pid,
876 role: CanisterRole::new("alpha"),
877 parent_pid: None,
878 module_hash: None,
879 created_at: 0,
880 });
881
882 let (added, requeued, skipped, total) =
883 pool_import_queued_canisters_inner(vec![pid], false).unwrap();
884 assert_eq!(added, 0);
885 assert_eq!(requeued, 0);
886 assert_eq!(skipped, 1);
887 assert_eq!(total, 1);
888 assert!(CanisterPoolStorageOps::get(pid).is_none());
889 }
890
891 #[test]
892 fn import_queued_local_skips_non_importable() {
893 reset_state();
894 set_test_importable_override(Some(false));
895
896 let pid = p(9);
897 let (added, requeued, skipped, total) =
898 futures::executor::block_on(pool_import_queued_canisters_local(vec![pid])).unwrap();
899
900 assert_eq!(added, 0);
901 assert_eq!(requeued, 0);
902 assert_eq!(skipped, 1);
903 assert_eq!(total, 1);
904 assert!(CanisterPoolStorageOps::get(pid).is_none());
905
906 set_test_importable_override(None);
907 }
908
909 #[test]
910 fn register_or_update_preserves_metadata() {
911 reset_state();
912
913 let pid = p(6);
914 let role = CanisterRole::new("alpha");
915 let parent = p(9);
916 let hash = vec![1, 2, 3];
917
918 CanisterPoolStorageOps::register(
919 pid,
920 Cycles::new(7),
921 CanisterPoolStatus::Failed {
922 reason: "oops".to_string(),
923 },
924 Some(role.clone()),
925 Some(parent),
926 Some(hash.clone()),
927 );
928
929 mark_pending_reset(pid);
930
931 let entry = CanisterPoolStorageOps::get(pid).unwrap();
932 assert!(entry.status.is_pending_reset());
933 assert_eq!(entry.cycles, Cycles::default());
934 assert_eq!(entry.role, Some(role));
935 assert_eq!(entry.parent, Some(parent));
936 assert_eq!(entry.module_hash, Some(hash));
937 }
938
939 #[test]
940 fn requeue_failed_scans_pool_and_schedules() {
941 reset_state();
942
943 let failed_pid = p(7);
944 let ready_pid = p(8);
945
946 CanisterPoolStorageOps::register(
947 failed_pid,
948 Cycles::new(11),
949 CanisterPoolStatus::Failed {
950 reason: "bad".to_string(),
951 },
952 None,
953 None,
954 None,
955 );
956 CanisterPoolStorageOps::register(
957 ready_pid,
958 Cycles::new(22),
959 CanisterPoolStatus::Ready,
960 None,
961 None,
962 None,
963 );
964
965 let (requeued, skipped, total) = pool_requeue_failed_inner(None, false).unwrap();
966 assert_eq!(requeued, 1);
967 assert_eq!(skipped, 1);
968 assert_eq!(total, 2);
969 assert!(take_reset_scheduled());
970
971 let failed_entry = CanisterPoolStorageOps::get(failed_pid).unwrap();
972 let ready_entry = CanisterPoolStorageOps::get(ready_pid).unwrap();
973 assert!(failed_entry.status.is_pending_reset());
974 assert_eq!(failed_entry.cycles, Cycles::default());
975 assert!(ready_entry.status.is_ready());
976 assert_eq!(ready_entry.cycles, Cycles::new(22));
977 }
978}