Skip to main content

canic_testkit/pic/
root.rs

1use super::{CachedPicBaseline, Pic, pic};
2use crate::artifacts::{WasmBuildProfile, build_dfx_all, dfx_artifact_ready};
3use canic::{
4    Error,
5    cdk::types::Principal,
6    dto::{
7        page::{Page, PageRequest},
8        topology::DirectoryEntryResponse,
9    },
10    ids::CanisterRole,
11    protocol,
12};
13use canic_control_plane::{
14    dto::template::{
15        TemplateChunkInput, TemplateChunkSetInfoResponse, TemplateChunkSetPrepareInput,
16        TemplateManifestInput,
17    },
18    ids::{
19        TemplateChunkingMode, TemplateId, TemplateManifestState, TemplateVersion, WasmStoreBinding,
20    },
21};
22use std::{collections::HashMap, fs, io, io::Write, path::PathBuf, time::Instant};
23
24///
25/// RootBaselineSpec
26///
27
28#[derive(Clone)]
29pub struct RootBaselineSpec<'a> {
30    pub progress_prefix: &'a str,
31    pub workspace_root: PathBuf,
32    pub root_wasm_relative: &'a str,
33    pub root_wasm_artifact_relative: &'a str,
34    pub root_release_artifacts_relative: &'a str,
35    pub artifact_watch_paths: &'a [&'a str],
36    pub release_roles: &'a [&'a str],
37    pub dfx_build_lock_relative: &'a str,
38    pub build_network: &'a str,
39    pub build_profile: WasmBuildProfile,
40    pub bootstrap_tick_limit: usize,
41    pub root_setup_max_attempts: usize,
42    pub pocket_ic_wasm_chunk_store_limit_bytes: usize,
43    pub root_release_chunk_bytes: usize,
44    pub package_version: &'a str,
45}
46
47///
48/// RootBaselineMetadata
49///
50
51pub struct RootBaselineMetadata {
52    pub root_id: Principal,
53    pub subnet_directory: HashMap<CanisterRole, Principal>,
54}
55
56// Print one progress line for a root-test setup phase and flush immediately.
57fn progress(spec: &RootBaselineSpec<'_>, phase: &str) {
58    eprintln!("[{}] {phase}", spec.progress_prefix);
59    let _ = std::io::stderr().flush();
60}
61
62// Print one completed phase with wall-clock timing.
63fn progress_elapsed(spec: &RootBaselineSpec<'_>, phase: &str, started_at: Instant) {
64    progress(
65        spec,
66        &format!("{phase} in {:.2}s", started_at.elapsed().as_secs_f32()),
67    );
68}
69
70/// Build the local `.dfx` root artifacts once unless all required outputs are already fresh.
71pub fn ensure_root_release_artifacts_built(spec: &RootBaselineSpec<'_>) {
72    if root_release_artifacts_ready(spec) {
73        progress(spec, "reusing existing root release artifacts");
74        return;
75    }
76
77    progress(spec, "building local DFX artifacts for root baseline");
78    let started_at = Instant::now();
79    build_dfx_all(
80        &spec.workspace_root,
81        spec.dfx_build_lock_relative,
82        spec.build_network,
83        spec.build_profile,
84    );
85    progress_elapsed(spec, "finished local DFX artifact build", started_at);
86}
87
88/// Load the built `root.wasm.gz` artifact used for PocketIC root installs.
89#[must_use]
90pub fn load_root_wasm(spec: &RootBaselineSpec<'_>) -> Option<Vec<u8>> {
91    let path = spec.workspace_root.join(spec.root_wasm_relative);
92    match fs::read(&path) {
93        Ok(bytes) => {
94            assert!(
95                bytes.len() < spec.pocket_ic_wasm_chunk_store_limit_bytes,
96                "root wasm artifact is too large for PocketIC chunked install: {} bytes at {}. \
97Use a compressed `.wasm.gz` artifact and/or build canister wasm with `RUSTFLAGS=\"-C debuginfo=0\"`.",
98                bytes.len(),
99                path.display()
100            );
101            Some(bytes)
102        }
103        Err(err) if err.kind() == io::ErrorKind::NotFound => None,
104        Err(err) => panic!("failed to read root wasm at {}: {}", path.display(), err),
105    }
106}
107
108/// Build one fresh root topology and capture immutable controller snapshots for cache reuse.
109#[must_use]
110pub fn build_root_cached_baseline(
111    spec: &RootBaselineSpec<'_>,
112    root_wasm: Vec<u8>,
113) -> CachedPicBaseline<RootBaselineMetadata> {
114    let initialized = setup_root_topology(spec, root_wasm);
115    let controller_ids = std::iter::once(initialized.metadata.root_id)
116        .chain(initialized.metadata.subnet_directory.values().copied())
117        .collect::<Vec<_>>();
118
119    progress(spec, "capturing cached root snapshots");
120    let started_at = Instant::now();
121    let baseline = CachedPicBaseline::capture(
122        initialized.pic,
123        initialized.metadata.root_id,
124        controller_ids,
125        initialized.metadata,
126    )
127    .expect("cached root snapshots must be available");
128    progress_elapsed(spec, "captured cached root snapshots", started_at);
129    baseline
130}
131
132/// Restore one cached root topology and wait until root plus children are ready again.
133pub fn restore_root_cached_baseline(
134    spec: &RootBaselineSpec<'_>,
135    baseline: &CachedPicBaseline<RootBaselineMetadata>,
136) {
137    progress(spec, "restoring cached root snapshots");
138    let restore_started_at = Instant::now();
139    baseline.restore(baseline.metadata.root_id);
140    progress_elapsed(spec, "restored cached root snapshots", restore_started_at);
141
142    progress(spec, "waiting for restored root bootstrap");
143    let root_wait_started_at = Instant::now();
144    wait_for_bootstrap(spec, &baseline.pic, baseline.metadata.root_id);
145    progress_elapsed(spec, "restored root bootstrap ready", root_wait_started_at);
146
147    progress(spec, "waiting for restored child canisters ready");
148    let child_wait_started_at = Instant::now();
149    wait_for_children_ready(spec, &baseline.pic, &baseline.metadata.subnet_directory);
150    progress_elapsed(
151        spec,
152        "restored child canisters ready",
153        child_wait_started_at,
154    );
155}
156
157/// Install root, stage one ordinary release profile, resume bootstrap, and fetch the subnet map.
158#[must_use]
159pub fn setup_root_topology(
160    spec: &RootBaselineSpec<'_>,
161    root_wasm: Vec<u8>,
162) -> InitializedRootTopology {
163    for attempt in 1..=spec.root_setup_max_attempts {
164        progress(
165            spec,
166            &format!(
167                "initialize root setup attempt {attempt}/{}",
168                spec.root_setup_max_attempts
169            ),
170        );
171        let wasm = root_wasm.clone();
172        let attempt_result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
173            progress(spec, "starting PocketIC instance");
174            let pic_started_at = Instant::now();
175            let pic = pic();
176            progress_elapsed(spec, "PocketIC instance ready", pic_started_at);
177
178            progress(spec, "installing root canister");
179            let root_install_started_at = Instant::now();
180            let root_id = pic
181                .create_and_install_root_canister(wasm)
182                .expect("install root canister");
183            progress_elapsed(spec, "root canister installed", root_install_started_at);
184
185            progress(spec, "staging managed release set");
186            let stage_started_at = Instant::now();
187            stage_managed_release_set(spec, &pic, root_id);
188            progress_elapsed(spec, "staged managed release set", stage_started_at);
189
190            progress(spec, "resuming root bootstrap");
191            let resume_started_at = Instant::now();
192            resume_root_bootstrap(&pic, root_id);
193            progress_elapsed(spec, "resumed root bootstrap", resume_started_at);
194
195            progress(spec, "waiting for root bootstrap");
196            let root_wait_started_at = Instant::now();
197            wait_for_bootstrap(spec, &pic, root_id);
198            progress_elapsed(spec, "root bootstrap ready", root_wait_started_at);
199
200            progress(spec, "fetching subnet directory");
201            let directory_started_at = Instant::now();
202            let subnet_directory = fetch_subnet_directory(&pic, root_id);
203            progress_elapsed(spec, "fetched subnet directory", directory_started_at);
204
205            progress(spec, "waiting for child canisters ready");
206            let child_wait_started_at = Instant::now();
207            wait_for_children_ready(spec, &pic, &subnet_directory);
208            progress_elapsed(spec, "child canisters ready", child_wait_started_at);
209
210            InitializedRootTopology {
211                pic,
212                metadata: RootBaselineMetadata {
213                    root_id,
214                    subnet_directory,
215                },
216            }
217        }));
218
219        match attempt_result {
220            Ok(state) => return state,
221            Err(err) if attempt < spec.root_setup_max_attempts => {
222                eprintln!(
223                    "setup_root attempt {attempt}/{} failed; retrying",
224                    spec.root_setup_max_attempts
225                );
226                drop(err);
227            }
228            Err(err) => std::panic::resume_unwind(err),
229        }
230    }
231
232    unreachable!("setup_root must return or panic")
233}
234
235///
236/// InitializedRootTopology
237///
238
239pub struct InitializedRootTopology {
240    pub pic: Pic,
241    pub metadata: RootBaselineMetadata,
242}
243
244// Stage the configured ordinary release set into root before bootstrap resumes.
245fn stage_managed_release_set(spec: &RootBaselineSpec<'_>, pic: &Pic, root_id: Principal) {
246    let now_secs = root_time_secs(pic, root_id);
247    let version = TemplateVersion::owned(spec.package_version.to_string());
248    let roles = configured_release_roles(spec);
249    let total = roles.len();
250
251    for (index, role) in roles.into_iter().enumerate() {
252        let role_name = role.as_str().to_string();
253        progress(
254            spec,
255            &format!("staging release {}/{}: {role_name}", index + 1, total),
256        );
257        let wasm_module = load_release_wasm_gz(spec, &role_name);
258        let template_id = TemplateId::owned(format!("embedded:{role}"));
259        let payload_hash = canic::cdk::utils::wasm::get_wasm_hash(&wasm_module);
260        let payload_size_bytes = wasm_module.len() as u64;
261        let chunks = wasm_module
262            .chunks(spec.root_release_chunk_bytes)
263            .map(<[u8]>::to_vec)
264            .collect::<Vec<_>>();
265
266        let manifest = TemplateManifestInput {
267            template_id: template_id.clone(),
268            role: role.clone(),
269            version: version.clone(),
270            payload_hash: payload_hash.clone(),
271            payload_size_bytes,
272            store_binding: WasmStoreBinding::new("bootstrap"),
273            chunking_mode: TemplateChunkingMode::Chunked,
274            manifest_state: TemplateManifestState::Approved,
275            approved_at: Some(now_secs),
276            created_at: now_secs,
277        };
278        stage_manifest(pic, root_id, manifest);
279
280        let prepare = TemplateChunkSetPrepareInput {
281            template_id: template_id.clone(),
282            version: version.clone(),
283            payload_hash: payload_hash.clone(),
284            payload_size_bytes,
285            chunk_hashes: chunks
286                .iter()
287                .map(|chunk| canic::cdk::utils::wasm::get_wasm_hash(chunk))
288                .collect(),
289        };
290        prepare_chunk_set(pic, root_id, prepare);
291
292        for (chunk_index, bytes) in chunks.into_iter().enumerate() {
293            publish_chunk(
294                pic,
295                root_id,
296                TemplateChunkInput {
297                    template_id: template_id.clone(),
298                    version: version.clone(),
299                    chunk_index: u32::try_from(chunk_index)
300                        .expect("release chunk index must fit into nat32"),
301                    bytes,
302                },
303            );
304        }
305    }
306}
307
308// Load one built `.wasm.gz` artifact for a configured release role.
309fn load_release_wasm_gz(spec: &RootBaselineSpec<'_>, role_name: &str) -> Vec<u8> {
310    let artifact_path = spec
311        .workspace_root
312        .join(spec.root_release_artifacts_relative)
313        .join(role_name)
314        .join(format!("{role_name}.wasm.gz"));
315    let bytes = fs::read(&artifact_path)
316        .unwrap_or_else(|err| panic!("read {} failed: {err}", artifact_path.display()));
317    assert!(
318        !bytes.is_empty(),
319        "release artifact must not be empty: {}",
320        artifact_path.display()
321    );
322    bytes
323}
324
325// Confirm the root bootstrap artifact and every managed ordinary release artifact are fresh.
326fn root_release_artifacts_ready(spec: &RootBaselineSpec<'_>) -> bool {
327    if !dfx_artifact_ready(
328        &spec.workspace_root,
329        spec.root_wasm_artifact_relative,
330        spec.artifact_watch_paths,
331    ) {
332        return false;
333    }
334
335    configured_release_roles(spec).into_iter().all(|role| {
336        let role_name = role.as_str().to_string();
337        let artifact_relative_path = format!(
338            "{}/{role_name}/{role_name}.wasm.gz",
339            spec.root_release_artifacts_relative
340        );
341        dfx_artifact_ready(
342            &spec.workspace_root,
343            &artifact_relative_path,
344            spec.artifact_watch_paths,
345        )
346    })
347}
348
349// Map the configured ordinary role names into stable `CanisterRole` values.
350fn configured_release_roles(spec: &RootBaselineSpec<'_>) -> Vec<CanisterRole> {
351    spec.release_roles
352        .iter()
353        .copied()
354        .map(|role| CanisterRole::owned(role.to_string()))
355        .collect()
356}
357
358// Stage one manifest through the root admin surface.
359fn stage_manifest(pic: &Pic, root_id: Principal, manifest: TemplateManifestInput) {
360    let staged: Result<(), Error> = pic
361        .update_call(
362            root_id,
363            protocol::CANIC_TEMPLATE_STAGE_MANIFEST_ADMIN,
364            (manifest,),
365        )
366        .expect("stage release manifest transport");
367
368    staged.expect("stage release manifest application");
369}
370
371// Prepare one staged chunk set through the root admin surface.
372fn prepare_chunk_set(pic: &Pic, root_id: Principal, prepare: TemplateChunkSetPrepareInput) {
373    let prepared: Result<TemplateChunkSetInfoResponse, Error> = pic
374        .update_call(root_id, protocol::CANIC_TEMPLATE_PREPARE_ADMIN, (prepare,))
375        .expect("prepare release chunk set transport");
376
377    let _ = prepared.expect("prepare release chunk set application");
378}
379
380// Publish one staged release chunk through the root admin surface.
381fn publish_chunk(pic: &Pic, root_id: Principal, chunk: TemplateChunkInput) {
382    let published: Result<(), Error> = pic
383        .update_call(
384            root_id,
385            protocol::CANIC_TEMPLATE_PUBLISH_CHUNK_ADMIN,
386            (chunk,),
387        )
388        .expect("publish release chunk transport");
389
390    published.expect("publish release chunk application");
391}
392
393// Resume the root bootstrap flow once the ordinary release set is staged.
394fn resume_root_bootstrap(pic: &Pic, root_id: Principal) {
395    let resumed: Result<(), Error> = pic
396        .update_call(
397            root_id,
398            protocol::CANIC_WASM_STORE_BOOTSTRAP_RESUME_ROOT_ADMIN,
399            (),
400        )
401        .expect("resume root bootstrap transport");
402
403    resumed.expect("resume root bootstrap application");
404}
405
406// Read the current replica time from root so staged manifests use replica timestamps.
407fn root_time_secs(pic: &Pic, root_id: Principal) -> u64 {
408    let now_secs: Result<u64, Error> = pic
409        .query_call(root_id, protocol::CANIC_TIME, ())
410        .expect("query root time transport");
411
412    now_secs.expect("query root time application")
413}
414
415// Wait until root reports `canic_ready`.
416fn wait_for_bootstrap(spec: &RootBaselineSpec<'_>, pic: &Pic, root_id: Principal) {
417    pic.wait_for_ready(root_id, spec.bootstrap_tick_limit, "root bootstrap");
418}
419
420// Wait until every child canister reports `canic_ready`.
421fn wait_for_children_ready(
422    spec: &RootBaselineSpec<'_>,
423    pic: &Pic,
424    subnet_directory: &HashMap<CanisterRole, Principal>,
425) {
426    pic.wait_for_all_ready(
427        subnet_directory
428            .iter()
429            .filter(|(role, _)| !role.is_root())
430            .map(|(_, pid)| *pid),
431        spec.bootstrap_tick_limit,
432        "root children bootstrap",
433    );
434}
435
436// Fetch the subnet directory from root as a role → principal map.
437fn fetch_subnet_directory(pic: &Pic, root_id: Principal) -> HashMap<CanisterRole, Principal> {
438    let page: Result<Page<DirectoryEntryResponse>, canic::Error> = pic
439        .query_call(
440            root_id,
441            protocol::CANIC_SUBNET_DIRECTORY,
442            (PageRequest {
443                limit: 100,
444                offset: 0,
445            },),
446        )
447        .expect("query subnet directory transport");
448
449    let page = page.expect("query subnet directory application");
450
451    page.entries
452        .into_iter()
453        .map(|entry| (entry.role, entry.pid))
454        .collect()
455}