Skip to main content

canic_testkit/pic/
root.rs

1use super::{CachedPicBaseline, Pic, pic};
2use crate::artifacts::{WasmBuildProfile, build_dfx_all_with_env, dfx_artifact_ready_for_build};
3use canic::{
4    Error,
5    cdk::types::Principal,
6    dto::{
7        page::{Page, PageRequest},
8        topology::DirectoryEntryResponse,
9    },
10    ids::CanisterRole,
11    protocol,
12};
13use canic_control_plane::{
14    dto::template::{
15        TemplateChunkInput, TemplateChunkSetInfoResponse, TemplateChunkSetPrepareInput,
16        TemplateManifestInput,
17    },
18    ids::{
19        TemplateChunkingMode, TemplateId, TemplateManifestState, TemplateVersion, WasmStoreBinding,
20    },
21};
22use std::{collections::HashMap, fs, io, io::Write, path::PathBuf, time::Instant};
23
24///
25/// RootBaselineSpec
26///
27
28#[derive(Clone)]
29pub struct RootBaselineSpec<'a> {
30    pub progress_prefix: &'a str,
31    pub workspace_root: PathBuf,
32    pub root_wasm_relative: &'a str,
33    pub root_wasm_artifact_relative: &'a str,
34    pub root_release_artifacts_relative: &'a str,
35    pub artifact_watch_paths: &'a [&'a str],
36    pub release_roles: &'a [&'a str],
37    pub dfx_build_lock_relative: &'a str,
38    pub build_network: &'a str,
39    pub build_profile: WasmBuildProfile,
40    pub build_extra_env: &'a [(&'a str, &'a str)],
41    pub bootstrap_tick_limit: usize,
42    pub root_setup_max_attempts: usize,
43    pub pocket_ic_wasm_chunk_store_limit_bytes: usize,
44    pub root_release_chunk_bytes: usize,
45    pub package_version: &'a str,
46}
47
48///
49/// RootBaselineMetadata
50///
51
52pub struct RootBaselineMetadata {
53    pub root_id: Principal,
54    pub subnet_directory: HashMap<CanisterRole, Principal>,
55}
56
57// Print one progress line for a root-test setup phase and flush immediately.
58fn progress(spec: &RootBaselineSpec<'_>, phase: &str) {
59    eprintln!("[{}] {phase}", spec.progress_prefix);
60    let _ = std::io::stderr().flush();
61}
62
63// Print one completed phase with wall-clock timing.
64fn progress_elapsed(spec: &RootBaselineSpec<'_>, phase: &str, started_at: Instant) {
65    progress(
66        spec,
67        &format!("{phase} in {:.2}s", started_at.elapsed().as_secs_f32()),
68    );
69}
70
71/// Build the local `.dfx` root artifacts once unless all required outputs are already fresh.
72pub fn ensure_root_release_artifacts_built(spec: &RootBaselineSpec<'_>) {
73    if root_release_artifacts_ready(spec) {
74        progress(spec, "reusing existing root release artifacts");
75        return;
76    }
77
78    progress(spec, "building local DFX artifacts for root baseline");
79    let started_at = Instant::now();
80    build_dfx_all_with_env(
81        &spec.workspace_root,
82        spec.dfx_build_lock_relative,
83        spec.build_network,
84        spec.build_profile,
85        spec.build_extra_env,
86    );
87    progress_elapsed(spec, "finished local DFX artifact build", started_at);
88}
89
90/// Load the built `root.wasm.gz` artifact used for PocketIC root installs.
91#[must_use]
92pub fn load_root_wasm(spec: &RootBaselineSpec<'_>) -> Option<Vec<u8>> {
93    let path = spec.workspace_root.join(spec.root_wasm_relative);
94    match fs::read(&path) {
95        Ok(bytes) => {
96            assert!(
97                bytes.len() < spec.pocket_ic_wasm_chunk_store_limit_bytes,
98                "root wasm artifact is too large for PocketIC chunked install: {} bytes at {}. \
99Use a compressed `.wasm.gz` artifact and/or build canister wasm with `RUSTFLAGS=\"-C debuginfo=0\"`.",
100                bytes.len(),
101                path.display()
102            );
103            Some(bytes)
104        }
105        Err(err) if err.kind() == io::ErrorKind::NotFound => None,
106        Err(err) => panic!("failed to read root wasm at {}: {}", path.display(), err),
107    }
108}
109
110/// Build one fresh root topology and capture immutable controller snapshots for cache reuse.
111#[must_use]
112pub fn build_root_cached_baseline(
113    spec: &RootBaselineSpec<'_>,
114    root_wasm: Vec<u8>,
115) -> CachedPicBaseline<RootBaselineMetadata> {
116    let initialized = setup_root_topology(spec, root_wasm);
117    let controller_ids = std::iter::once(initialized.metadata.root_id)
118        .chain(initialized.metadata.subnet_directory.values().copied())
119        .collect::<Vec<_>>();
120
121    progress(spec, "capturing cached root snapshots");
122    let started_at = Instant::now();
123    let baseline = CachedPicBaseline::capture(
124        initialized.pic,
125        initialized.metadata.root_id,
126        controller_ids,
127        initialized.metadata,
128    )
129    .expect("cached root snapshots must be available");
130    progress_elapsed(spec, "captured cached root snapshots", started_at);
131    baseline
132}
133
134/// Restore one cached root topology and wait until root plus children are ready again.
135pub fn restore_root_cached_baseline(
136    spec: &RootBaselineSpec<'_>,
137    baseline: &CachedPicBaseline<RootBaselineMetadata>,
138) {
139    progress(spec, "restoring cached root snapshots");
140    let restore_started_at = Instant::now();
141    baseline.restore(baseline.metadata.root_id);
142    progress_elapsed(spec, "restored cached root snapshots", restore_started_at);
143
144    progress(spec, "waiting for restored root bootstrap");
145    let root_wait_started_at = Instant::now();
146    wait_for_bootstrap(spec, &baseline.pic, baseline.metadata.root_id);
147    progress_elapsed(spec, "restored root bootstrap ready", root_wait_started_at);
148
149    progress(spec, "waiting for restored child canisters ready");
150    let child_wait_started_at = Instant::now();
151    wait_for_children_ready(spec, &baseline.pic, &baseline.metadata.subnet_directory);
152    progress_elapsed(
153        spec,
154        "restored child canisters ready",
155        child_wait_started_at,
156    );
157}
158
159/// Install root, stage one ordinary release profile, resume bootstrap, and fetch the subnet map.
160#[must_use]
161pub fn setup_root_topology(
162    spec: &RootBaselineSpec<'_>,
163    root_wasm: Vec<u8>,
164) -> InitializedRootTopology {
165    for attempt in 1..=spec.root_setup_max_attempts {
166        progress(
167            spec,
168            &format!(
169                "initialize root setup attempt {attempt}/{}",
170                spec.root_setup_max_attempts
171            ),
172        );
173        let wasm = root_wasm.clone();
174        let attempt_result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
175            progress(spec, "starting PocketIC instance");
176            let pic_started_at = Instant::now();
177            let pic = pic();
178            progress_elapsed(spec, "PocketIC instance ready", pic_started_at);
179
180            progress(spec, "installing root canister");
181            let root_install_started_at = Instant::now();
182            let root_id = pic
183                .create_and_install_root_canister(wasm)
184                .expect("install root canister");
185            progress_elapsed(spec, "root canister installed", root_install_started_at);
186
187            progress(spec, "staging managed release set");
188            let stage_started_at = Instant::now();
189            stage_managed_release_set(spec, &pic, root_id);
190            progress_elapsed(spec, "staged managed release set", stage_started_at);
191
192            progress(spec, "resuming root bootstrap");
193            let resume_started_at = Instant::now();
194            resume_root_bootstrap(&pic, root_id);
195            progress_elapsed(spec, "resumed root bootstrap", resume_started_at);
196
197            progress(spec, "waiting for root bootstrap");
198            let root_wait_started_at = Instant::now();
199            wait_for_bootstrap(spec, &pic, root_id);
200            progress_elapsed(spec, "root bootstrap ready", root_wait_started_at);
201
202            progress(spec, "fetching subnet directory");
203            let directory_started_at = Instant::now();
204            let subnet_directory = fetch_subnet_directory(&pic, root_id);
205            progress_elapsed(spec, "fetched subnet directory", directory_started_at);
206
207            progress(spec, "waiting for child canisters ready");
208            let child_wait_started_at = Instant::now();
209            wait_for_children_ready(spec, &pic, &subnet_directory);
210            progress_elapsed(spec, "child canisters ready", child_wait_started_at);
211
212            InitializedRootTopology {
213                pic,
214                metadata: RootBaselineMetadata {
215                    root_id,
216                    subnet_directory,
217                },
218            }
219        }));
220
221        match attempt_result {
222            Ok(state) => return state,
223            Err(err) if attempt < spec.root_setup_max_attempts => {
224                eprintln!(
225                    "setup_root attempt {attempt}/{} failed; retrying",
226                    spec.root_setup_max_attempts
227                );
228                drop(err);
229            }
230            Err(err) => std::panic::resume_unwind(err),
231        }
232    }
233
234    unreachable!("setup_root must return or panic")
235}
236
237///
238/// InitializedRootTopology
239///
240
241pub struct InitializedRootTopology {
242    pub pic: Pic,
243    pub metadata: RootBaselineMetadata,
244}
245
246// Stage the configured ordinary release set into root before bootstrap resumes.
247fn stage_managed_release_set(spec: &RootBaselineSpec<'_>, pic: &Pic, root_id: Principal) {
248    let now_secs = root_time_secs(pic, root_id);
249    let version = TemplateVersion::owned(spec.package_version.to_string());
250    let roles = configured_release_roles(spec);
251    let total = roles.len();
252
253    for (index, role) in roles.into_iter().enumerate() {
254        let role_name = role.as_str().to_string();
255        progress(
256            spec,
257            &format!("staging release {}/{}: {role_name}", index + 1, total),
258        );
259        let wasm_module = load_release_wasm_gz(spec, &role_name);
260        let template_id = TemplateId::owned(format!("embedded:{role}"));
261        let payload_hash = canic::cdk::utils::wasm::get_wasm_hash(&wasm_module);
262        let payload_size_bytes = wasm_module.len() as u64;
263        let chunks = wasm_module
264            .chunks(spec.root_release_chunk_bytes)
265            .map(<[u8]>::to_vec)
266            .collect::<Vec<_>>();
267
268        let manifest = TemplateManifestInput {
269            template_id: template_id.clone(),
270            role: role.clone(),
271            version: version.clone(),
272            payload_hash: payload_hash.clone(),
273            payload_size_bytes,
274            store_binding: WasmStoreBinding::new("bootstrap"),
275            chunking_mode: TemplateChunkingMode::Chunked,
276            manifest_state: TemplateManifestState::Approved,
277            approved_at: Some(now_secs),
278            created_at: now_secs,
279        };
280        stage_manifest(pic, root_id, manifest);
281
282        let prepare = TemplateChunkSetPrepareInput {
283            template_id: template_id.clone(),
284            version: version.clone(),
285            payload_hash: payload_hash.clone(),
286            payload_size_bytes,
287            chunk_hashes: chunks
288                .iter()
289                .map(|chunk| canic::cdk::utils::wasm::get_wasm_hash(chunk))
290                .collect(),
291        };
292        prepare_chunk_set(pic, root_id, prepare);
293
294        for (chunk_index, bytes) in chunks.into_iter().enumerate() {
295            publish_chunk(
296                pic,
297                root_id,
298                TemplateChunkInput {
299                    template_id: template_id.clone(),
300                    version: version.clone(),
301                    chunk_index: u32::try_from(chunk_index)
302                        .expect("release chunk index must fit into nat32"),
303                    bytes,
304                },
305            );
306        }
307    }
308}
309
310// Load one built `.wasm.gz` artifact for a configured release role.
311fn load_release_wasm_gz(spec: &RootBaselineSpec<'_>, role_name: &str) -> Vec<u8> {
312    let artifact_path = spec
313        .workspace_root
314        .join(spec.root_release_artifacts_relative)
315        .join(role_name)
316        .join(format!("{role_name}.wasm.gz"));
317    let bytes = fs::read(&artifact_path)
318        .unwrap_or_else(|err| panic!("read {} failed: {err}", artifact_path.display()));
319    assert!(
320        !bytes.is_empty(),
321        "release artifact must not be empty: {}",
322        artifact_path.display()
323    );
324    bytes
325}
326
327// Confirm the root bootstrap artifact and every managed ordinary release artifact are fresh.
328fn root_release_artifacts_ready(spec: &RootBaselineSpec<'_>) -> bool {
329    if !dfx_artifact_ready_for_build(
330        &spec.workspace_root,
331        spec.root_wasm_artifact_relative,
332        spec.artifact_watch_paths,
333        spec.build_network,
334        spec.build_profile,
335        spec.build_extra_env,
336    ) {
337        return false;
338    }
339
340    configured_release_roles(spec).into_iter().all(|role| {
341        let role_name = role.as_str().to_string();
342        let artifact_relative_path = format!(
343            "{}/{role_name}/{role_name}.wasm.gz",
344            spec.root_release_artifacts_relative
345        );
346        dfx_artifact_ready_for_build(
347            &spec.workspace_root,
348            &artifact_relative_path,
349            spec.artifact_watch_paths,
350            spec.build_network,
351            spec.build_profile,
352            spec.build_extra_env,
353        )
354    })
355}
356
357// Map the configured ordinary role names into stable `CanisterRole` values.
358fn configured_release_roles(spec: &RootBaselineSpec<'_>) -> Vec<CanisterRole> {
359    spec.release_roles
360        .iter()
361        .copied()
362        .map(|role| CanisterRole::owned(role.to_string()))
363        .collect()
364}
365
366// Stage one manifest through the root admin surface.
367fn stage_manifest(pic: &Pic, root_id: Principal, manifest: TemplateManifestInput) {
368    let staged: Result<(), Error> = pic
369        .update_call(
370            root_id,
371            protocol::CANIC_TEMPLATE_STAGE_MANIFEST_ADMIN,
372            (manifest,),
373        )
374        .expect("stage release manifest transport");
375
376    staged.expect("stage release manifest application");
377}
378
379// Prepare one staged chunk set through the root admin surface.
380fn prepare_chunk_set(pic: &Pic, root_id: Principal, prepare: TemplateChunkSetPrepareInput) {
381    let prepared: Result<TemplateChunkSetInfoResponse, Error> = pic
382        .update_call(root_id, protocol::CANIC_TEMPLATE_PREPARE_ADMIN, (prepare,))
383        .expect("prepare release chunk set transport");
384
385    let _ = prepared.expect("prepare release chunk set application");
386}
387
388// Publish one staged release chunk through the root admin surface.
389fn publish_chunk(pic: &Pic, root_id: Principal, chunk: TemplateChunkInput) {
390    let published: Result<(), Error> = pic
391        .update_call(
392            root_id,
393            protocol::CANIC_TEMPLATE_PUBLISH_CHUNK_ADMIN,
394            (chunk,),
395        )
396        .expect("publish release chunk transport");
397
398    published.expect("publish release chunk application");
399}
400
401// Resume the root bootstrap flow once the ordinary release set is staged.
402fn resume_root_bootstrap(pic: &Pic, root_id: Principal) {
403    let resumed: Result<(), Error> = pic
404        .update_call(
405            root_id,
406            protocol::CANIC_WASM_STORE_BOOTSTRAP_RESUME_ROOT_ADMIN,
407            (),
408        )
409        .expect("resume root bootstrap transport");
410
411    resumed.expect("resume root bootstrap application");
412}
413
414// Read the current replica time from root so staged manifests use replica timestamps.
415fn root_time_secs(pic: &Pic, root_id: Principal) -> u64 {
416    let now_secs: Result<u64, Error> = pic
417        .query_call(root_id, protocol::CANIC_TIME, ())
418        .expect("query root time transport");
419
420    now_secs.expect("query root time application")
421}
422
423// Wait until root reports `canic_ready`.
424fn wait_for_bootstrap(spec: &RootBaselineSpec<'_>, pic: &Pic, root_id: Principal) {
425    pic.wait_for_ready(root_id, spec.bootstrap_tick_limit, "root bootstrap");
426}
427
428// Wait until every child canister reports `canic_ready`.
429fn wait_for_children_ready(
430    spec: &RootBaselineSpec<'_>,
431    pic: &Pic,
432    subnet_directory: &HashMap<CanisterRole, Principal>,
433) {
434    pic.wait_for_all_ready(
435        subnet_directory
436            .iter()
437            .filter(|(role, _)| !role.is_root())
438            .map(|(_, pid)| *pid),
439        spec.bootstrap_tick_limit,
440        "root children bootstrap",
441    );
442}
443
444// Fetch the subnet directory from root as a role → principal map.
445fn fetch_subnet_directory(pic: &Pic, root_id: Principal) -> HashMap<CanisterRole, Principal> {
446    let page: Result<Page<DirectoryEntryResponse>, canic::Error> = pic
447        .query_call(
448            root_id,
449            protocol::CANIC_SUBNET_DIRECTORY,
450            (PageRequest {
451                limit: 100,
452                offset: 0,
453            },),
454        )
455        .expect("query subnet directory transport");
456
457    let page = page.expect("query subnet directory application");
458
459    page.entries
460        .into_iter()
461        .map(|entry| (entry.role, entry.pid))
462        .collect()
463}