Skip to main content

load_and_predict/
load_and_predict.rs

1//! Load a CoreML model and run prediction.
2//!
3//! Usage: cargo run --example load_and_predict -- <path/to/model.mlmodelc>
4
5use coreml_native::{BorrowedTensor, ComputeUnits, Model};
6
7fn main() {
8    let model_path = std::env::args()
9        .nth(1)
10        .unwrap_or_else(|| "tests/fixtures/test_linear.mlmodelc".to_string());
11
12    println!("Loading model: {model_path}");
13    let model = Model::load(&model_path, ComputeUnits::All).expect("Failed to load model");
14
15    // Create a simple input tensor
16    let input_data = vec![1.0f32, 2.0, 3.0, 4.0];
17    let tensor =
18        BorrowedTensor::from_f32(&input_data, &[1, 4]).expect("Failed to create tensor");
19
20    println!("Running prediction...");
21    let prediction = model.predict(&[("input", &tensor)]).expect("Prediction failed");
22
23    let (output, shape) = prediction.get_f32("output").expect("Failed to get output");
24    println!("Output shape: {shape:?}");
25    println!("Output data: {output:?}");
26}