#[cfg(target_arch = "wasm32")]
use jsmpi as mpi;
#[cfg(not(target_arch = "wasm32"))]
use mpi::collective::SystemOperation;
#[cfg(not(target_arch = "wasm32"))]
use mpi::traits::*;
#[cfg(target_arch = "wasm32")]
use std::time::Duration;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen_futures::spawn_local;
#[cfg(not(target_arch = "wasm32"))]
fn run() {
let universe = mpi::initialize().expect("failed to initialize MPI runtime");
let world = universe.world();
let rank = world.rank();
let root = 0;
let local = rank + 1;
let mut reduced = 0_i32;
world
.process_at_rank(root)
.reduce_into_root(&local, &mut reduced, SystemOperation::sum());
if rank == root {
log::info!("[reduce] root reduced sum={reduced}");
}
world.barrier();
}
#[cfg(target_arch = "wasm32")]
async fn run_async() {
let runtime = mpi::runtime::Runtime::detect().expect("failed to initialize MPI runtime");
let rank = runtime.rank();
let size = runtime.size();
let root = 0;
let tag = 23;
let local = rank + 1;
if rank == root {
let mut reduced = local;
for src in 1..size {
let (value, _status) = runtime
.receive_with_timeout_async::<i32>(Some(src), Some(tag), Duration::from_secs(15))
.await
.expect("reduce receive failed");
reduced += value;
}
log::info!("[reduce] root reduced sum={reduced}");
} else {
runtime
.send(rank, root, tag, &local)
.expect("reduce send failed");
}
runtime.barrier_async().await.expect("barrier failed");
}
fn main() {
#[cfg(not(target_arch = "wasm32"))]
env_logger::init();
#[cfg(not(target_arch = "wasm32"))]
run();
}
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
pub fn jsmpi_main() {
let _ = console_log::init();
spawn_local(async {
run_async().await;
mpi::runtime::mark_finished().expect("failed to mark worker as finished");
});
}