canic_core/ops/
mgmt.rs

1//! Provisioning helpers for creating, installing, and tearing down canisters.
2//!
3//! These routines bundle the multi-phase orchestration that root performs when
4//! scaling out the topology: reserving cycles, recording registry state,
5//! installing WASM modules, and cascading state updates to descendants.
6
7use crate::{
8    Error,
9    cdk::{api::canister_self, mgmt::CanisterInstallMode},
10    config::Config,
11    interface::{
12        ic::{
13            delete_canister as mgmt_delete_canister, deposit_cycles, get_cycles, install_code,
14            uninstall_code,
15        },
16        prelude::*,
17    },
18    log::Topic,
19    ops::{
20        CanisterInitPayload, OpsError,
21        config::ConfigOps,
22        model::memory::{
23            EnvOps,
24            directory::{AppDirectoryOps, SubnetDirectoryOps},
25            env::EnvData,
26            reserve::CanisterReserveOps,
27            topology::SubnetCanisterRegistryOps,
28        },
29        sync::state::StateBundle,
30        wasm::WasmOps,
31    },
32};
33use candid::Principal;
34use canic_types::Cycles;
35use thiserror::Error as ThisError;
36
37#[derive(Debug, ThisError)]
38pub enum ProvisioningError {
39    #[error(transparent)]
40    Other(#[from] Error),
41
42    #[error("install failed for {pid}: {source}")]
43    InstallFailed { pid: Principal, source: Error },
44}
45
46//
47// ===========================================================================
48// DIRECTORY SYNC
49// ===========================================================================
50//
51
52/// Rebuild AppDirectory and SubnetDirectory from the registry,
53/// import them directly, and return the resulting state bundle.
54/// When `updated_ty` is provided, only include the sections that list that type.
55pub(crate) async fn rebuild_directories_from_registry(
56    updated_ty: Option<&CanisterRole>,
57) -> Result<StateBundle, Error> {
58    let mut bundle = StateBundle::default();
59    let cfg = Config::get();
60
61    // did a directory change?
62    let include_app = updated_ty.is_none_or(|ty| cfg.app_directory.contains(ty));
63    let include_subnet = updated_ty.is_none_or(|ty| {
64        ConfigOps::current_subnet()
65            .map(|c| c.subnet_directory.contains(ty))
66            // default to true if config is unavailable to avoid skipping a needed rebuild
67            .unwrap_or(true)
68    });
69
70    if include_app {
71        let app_view = AppDirectoryOps::root_build_view();
72        AppDirectoryOps::import(app_view.clone());
73        bundle.app_directory = Some(app_view);
74    }
75
76    if include_subnet {
77        let subnet_view = SubnetDirectoryOps::root_build_view();
78        SubnetDirectoryOps::import(subnet_view.clone());
79        bundle.subnet_directory = Some(subnet_view);
80    }
81
82    Ok(bundle)
83}
84
85//
86// ===========================================================================
87// HIGH-LEVEL FLOW
88// ===========================================================================
89//
90
91/// Create and install a new canister of the requested type beneath `parent`.
92///
93/// PHASES:
94/// 1. Allocate a canister ID and cycles (preferring the reserve pool)
95/// 2. Install WASM + bootstrap initial state
96/// 3. Register canister in SubnetCanisterRegistry
97/// 4. Cascade topology + sync directories
98pub async fn create_and_install_canister(
99    ty: &CanisterRole,
100    parent_pid: Principal,
101    extra_arg: Option<Vec<u8>>,
102) -> Result<Principal, ProvisioningError> {
103    // must have WASM module registered
104    WasmOps::try_get(ty)?;
105
106    // Phase 1: allocation
107    let pid = allocate_canister(ty).await?;
108
109    // Phase 2: installation
110    if let Err(err) = install_canister(pid, ty, parent_pid, extra_arg).await {
111        return Err(ProvisioningError::InstallFailed { pid, source: err });
112    }
113
114    Ok(pid)
115}
116
117//
118// ===========================================================================
119// DELETION
120// ===========================================================================
121//
122
123/// Delete an existing canister.
124///
125/// PHASES:
126/// 0. Uninstall code
127/// 1. Delete via management canister
128/// 2. Remove from SubnetCanisterRegistry
129/// 3. Cascade topology
130/// 4. Sync directories
131pub async fn delete_canister(
132    pid: Principal,
133) -> Result<(Option<CanisterRole>, Option<Principal>), Error> {
134    OpsError::require_root()?;
135    let parent_pid = SubnetCanisterRegistryOps::get_parent(pid);
136
137    // Phase 0: uninstall code
138    uninstall_code(pid).await?;
139
140    // Phase 1: delete the canister
141    mgmt_delete_canister(pid).await?;
142
143    // Phase 2: remove registry record
144    let removed_entry = SubnetCanisterRegistryOps::remove(&pid);
145    match &removed_entry {
146        Some(c) => log!(
147            Topic::CanisterLifecycle,
148            Ok,
149            "🗑️ delete_canister: {} ({})",
150            pid,
151            c.ty
152        ),
153        None => log!(
154            Topic::CanisterLifecycle,
155            Warn,
156            "🗑️ delete_canister: {pid} not in registry"
157        ),
158    }
159
160    Ok((removed_entry.map(|e| e.ty), parent_pid))
161}
162
163/// Uninstall code from a canister without deleting it.
164pub async fn uninstall_canister(pid: Principal) -> Result<(), Error> {
165    uninstall_code(pid).await?;
166
167    log!(Topic::CanisterLifecycle, Ok, "🗑️ uninstall_canister: {pid}");
168
169    Ok(())
170}
171
172//
173// ===========================================================================
174// PHASE 1 — ALLOCATION (Reserve → Create)
175// ===========================================================================
176//
177
178/// Allocate a canister ID and ensure it meets the initial cycle target.
179///
180/// Reuses a canister from the reserve if available; otherwise creates a new one.
181pub async fn allocate_canister(ty: &CanisterRole) -> Result<Principal, Error> {
182    // use ConfigOps for a clean, ops-layer config lookup
183    let cfg = ConfigOps::current_subnet_canister(ty)?;
184
185    let target = cfg.initial_cycles;
186
187    // Reuse from reserve
188    if let Some((pid, _)) = CanisterReserveOps::pop_first() {
189        let mut current = get_cycles(pid).await?;
190
191        if current < target {
192            let missing = target.to_u128().saturating_sub(current.to_u128());
193            if missing > 0 {
194                deposit_cycles(pid, missing).await?;
195                current = Cycles::new(current.to_u128() + missing);
196
197                log!(
198                    Topic::CanisterReserve,
199                    Ok,
200                    "⚡ allocate_canister: topped up {pid} by {} to meet target {}",
201                    Cycles::from(missing),
202                    target
203                );
204            }
205        }
206
207        log!(
208            Topic::CanisterReserve,
209            Ok,
210            "⚡ allocate_canister: reusing {pid} from pool (current {current})"
211        );
212
213        return Ok(pid);
214    }
215
216    // Create new canister
217    let pid = create_canister(target).await?;
218    log!(
219        Topic::CanisterReserve,
220        Info,
221        "⚡ allocate_canister: pool empty"
222    );
223
224    Ok(pid)
225}
226
227/// Create a fresh canister on the IC with the configured controllers.
228pub(crate) async fn create_canister(cycles: Cycles) -> Result<Principal, Error> {
229    let mut controllers = Config::get().controllers.clone();
230    controllers.push(canister_self()); // root always controls
231
232    let pid = crate::interface::ic::canister::create_canister(controllers, cycles.clone()).await?;
233
234    log!(
235        Topic::CanisterLifecycle,
236        Ok,
237        "⚡ create_canister: {pid} ({cycles})"
238    );
239
240    Ok(pid)
241}
242
243//
244// ===========================================================================
245// PHASE 2 — INSTALLATION
246// ===========================================================================
247//
248
249/// Install WASM and initial state into a new canister.
250#[allow(clippy::cast_precision_loss)]
251async fn install_canister(
252    pid: Principal,
253    ty: &CanisterRole,
254    parent_pid: Principal,
255    extra_arg: Option<Vec<u8>>,
256) -> Result<(), Error> {
257    // Fetch and register WASM
258    let wasm = WasmOps::try_get(ty)?;
259
260    // Construct init payload
261    let env = EnvData {
262        prime_root_pid: Some(EnvOps::try_get_prime_root_pid()?),
263        subnet_type: Some(EnvOps::try_get_subnet_type()?),
264        subnet_pid: Some(EnvOps::try_get_subnet_pid()?),
265        root_pid: Some(EnvOps::try_get_root_pid()?),
266        canister_type: Some(ty.clone()),
267        parent_pid: Some(parent_pid),
268    };
269
270    let payload = CanisterInitPayload {
271        env,
272        app_directory: AppDirectoryOps::export(),
273        subnet_directory: SubnetDirectoryOps::export(),
274    };
275
276    let module_hash = wasm.module_hash();
277
278    // Register before install so init hooks can observe the registry; roll back on failure.
279    // otherwise if the init() tries to create a canister via root, it will panic
280    SubnetCanisterRegistryOps::register(pid, ty, parent_pid, module_hash.clone());
281
282    if let Err(err) = install_code(
283        CanisterInstallMode::Install,
284        pid,
285        wasm.bytes(),
286        (payload, extra_arg),
287    )
288    .await
289    {
290        let removed = SubnetCanisterRegistryOps::remove(&pid);
291        if removed.is_none() {
292            log!(
293                Topic::CanisterLifecycle,
294                Warn,
295                "⚠️ install_canister rollback: {pid} missing from registry after failed install"
296            );
297        }
298
299        return Err(err);
300    }
301
302    log!(
303        Topic::CanisterLifecycle,
304        Ok,
305        "⚡ install_canister: {pid} ({ty}, {:.2} KiB)",
306        wasm.len() as f64 / 1_024.0,
307    );
308
309    Ok(())
310}
311
312//
313// ===========================================================================