pub struct Script { /* private fields */ }Implementations§
Source§impl Script
impl Script
pub fn new(name: String) -> Script
pub fn with_ir(ir: ScriptIR) -> Script
pub fn get_name(&self) -> Option<&str>
pub fn set_name(&mut self, name: String)
pub fn get_ir(&self) -> Option<&ScriptIR>
pub fn get_ir_mut(&mut self) -> Option<&mut ScriptIR>
pub fn get_backend(&self) -> Option<Backend>
pub fn set_backend(&mut self, backend: Backend)
pub fn get_device(&self) -> Option<Device>
pub fn set_device(&mut self, device: Device)
Sourcepub fn add_input(&mut self, name: &str, tensor: Tensor)
pub fn add_input(&mut self, name: &str, tensor: Tensor)
Add an input tensor for script execution
§Arguments
name- Name of the input (must match script’s input names)tensor- Input tensor
§Example
script.add_input("x", tensor_x);
script.add_input("y", tensor_y);
let result = script.run()?;Examples found in repository?
examples/simple-linear-train-script.rs (line 57)
4fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let input_data: Vec<Vec<f32>> = (0..10000)
6 .map(|i| {
7 vec![
8 (i % 100) as f32 / 100.0,
9 ((i % 100) + 1) as f32 / 100.0,
10 ((i % 100) + 2) as f32 / 100.0,
11 ]
12 })
13 .collect();
14 let target_data: Vec<Vec<f32>> = (0..10000).map(|i| vec![((i % 100) * 10) as f32 / 1000.0]).collect();
15
16 let input_tensor = Tensor::new(input_data)?;
17 let target_tensor = Tensor::new(target_data)?;
18
19 // Build script
20 let builder = Builder::new("linear_training".to_string());
21 builder.start()?;
22
23 let mut linear = Linear::new(3, 1, true, DType::F32)?;
24 let mse_loss = MSE::new();
25 let mut optimizer = SGD::new(0.01);
26
27 let input = Tensor::input("input", &[10000, 3])?;
28 input.requires_grad()?;
29 let target = Tensor::input("target", &[10000, 1])?;
30
31 let epochs = 1000;
32 let mut final_loss = Tensor::full(&[], 0.0)?;
33
34 for _ in 0..epochs {
35 let pred = linear.forward(&input)?;
36 let loss = mse_loss.forward((&pred, &target))?;
37
38 loss.backward()?;
39
40 optimizer.step(&mut linear.parameters())?;
41 optimizer.zero_grad(&mut linear.parameters())?;
42
43 final_loss = loss;
44 }
45
46 let params = linear.parameters();
47 builder.add_output("loss", final_loss)?;
48 builder.add_output("weight", *params[0])?;
49 builder.add_output("bias", *params[1])?;
50
51 builder.end()?;
52
53 let mut script = builder.build()?;
54 #[cfg(feature = "xla")]
55 script.set_backend(Backend::XLA);
56
57 script.add_input("input", input_tensor);
58 script.add_input("target", target_tensor);
59
60 println!("Compiling script...");
61 let compile_start = Instant::now();
62 script.compile()?;
63 let compile_elapsed = compile_start.elapsed();
64 println!("Compilation time: {:?}", compile_elapsed);
65
66 println!("Running script...");
67 let run_start = Instant::now();
68 let output = script.run()?;
69 let run_elapsed = run_start.elapsed();
70
71 println!("Loss: {}", output["loss"]);
72 println!("Weight: {}", output["weight"]);
73 println!("Bias: {}", output["bias"]);
74 println!("Execution time: {:?}", run_elapsed);
75 println!("Total time: {:?}", compile_elapsed + run_elapsed);
76
77 Ok(())
78}Sourcepub fn with_input(self, name: &str, tensor: Tensor) -> Script
pub fn with_input(self, name: &str, tensor: Tensor) -> Script
Builder-style method to add input (returns self for chaining)
§Example
let result = script
.with_input("x", tensor_x)
.with_input("y", tensor_y)
.run()?;Sourcepub fn clear_inputs(&mut self)
pub fn clear_inputs(&mut self)
Clear all runtime inputs
Sourcepub fn get_inputs(&self) -> &HashMap<String, Tensor>
pub fn get_inputs(&self) -> &HashMap<String, Tensor>
Get current runtime inputs
Sourcepub fn compile(&mut self) -> Result<(), HoduError>
pub fn compile(&mut self) -> Result<(), HoduError>
Compile the script for execution
This method compiles the script IR into an optimized executable form.
The compiled result is cached, so subsequent calls to run() will reuse it.
§Example
script.compile()?; // Explicit compilation (optional)
script.run()?; // Uses cached compilationExamples found in repository?
examples/simple-linear-train-script.rs (line 62)
4fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let input_data: Vec<Vec<f32>> = (0..10000)
6 .map(|i| {
7 vec![
8 (i % 100) as f32 / 100.0,
9 ((i % 100) + 1) as f32 / 100.0,
10 ((i % 100) + 2) as f32 / 100.0,
11 ]
12 })
13 .collect();
14 let target_data: Vec<Vec<f32>> = (0..10000).map(|i| vec![((i % 100) * 10) as f32 / 1000.0]).collect();
15
16 let input_tensor = Tensor::new(input_data)?;
17 let target_tensor = Tensor::new(target_data)?;
18
19 // Build script
20 let builder = Builder::new("linear_training".to_string());
21 builder.start()?;
22
23 let mut linear = Linear::new(3, 1, true, DType::F32)?;
24 let mse_loss = MSE::new();
25 let mut optimizer = SGD::new(0.01);
26
27 let input = Tensor::input("input", &[10000, 3])?;
28 input.requires_grad()?;
29 let target = Tensor::input("target", &[10000, 1])?;
30
31 let epochs = 1000;
32 let mut final_loss = Tensor::full(&[], 0.0)?;
33
34 for _ in 0..epochs {
35 let pred = linear.forward(&input)?;
36 let loss = mse_loss.forward((&pred, &target))?;
37
38 loss.backward()?;
39
40 optimizer.step(&mut linear.parameters())?;
41 optimizer.zero_grad(&mut linear.parameters())?;
42
43 final_loss = loss;
44 }
45
46 let params = linear.parameters();
47 builder.add_output("loss", final_loss)?;
48 builder.add_output("weight", *params[0])?;
49 builder.add_output("bias", *params[1])?;
50
51 builder.end()?;
52
53 let mut script = builder.build()?;
54 #[cfg(feature = "xla")]
55 script.set_backend(Backend::XLA);
56
57 script.add_input("input", input_tensor);
58 script.add_input("target", target_tensor);
59
60 println!("Compiling script...");
61 let compile_start = Instant::now();
62 script.compile()?;
63 let compile_elapsed = compile_start.elapsed();
64 println!("Compilation time: {:?}", compile_elapsed);
65
66 println!("Running script...");
67 let run_start = Instant::now();
68 let output = script.run()?;
69 let run_elapsed = run_start.elapsed();
70
71 println!("Loss: {}", output["loss"]);
72 println!("Weight: {}", output["weight"]);
73 println!("Bias: {}", output["bias"]);
74 println!("Execution time: {:?}", run_elapsed);
75 println!("Total time: {:?}", compile_elapsed + run_elapsed);
76
77 Ok(())
78}Sourcepub fn run(&mut self) -> Result<HashMap<String, Tensor>, HoduError>
pub fn run(&mut self) -> Result<HashMap<String, Tensor>, HoduError>
Execute the script with previously added inputs
If the script hasn’t been compiled yet, it will be compiled automatically. Subsequent calls will reuse the cached compilation.
§Returns
HoduResult<ExecutionOutputs>- Output tensors mapped by name
§Example
script.add_input("x", tensor_x);
script.add_input("y", tensor_y);
let outputs = script.run()?;
let result = &outputs["result"];Examples found in repository?
examples/simple-linear-train-script.rs (line 68)
4fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let input_data: Vec<Vec<f32>> = (0..10000)
6 .map(|i| {
7 vec![
8 (i % 100) as f32 / 100.0,
9 ((i % 100) + 1) as f32 / 100.0,
10 ((i % 100) + 2) as f32 / 100.0,
11 ]
12 })
13 .collect();
14 let target_data: Vec<Vec<f32>> = (0..10000).map(|i| vec![((i % 100) * 10) as f32 / 1000.0]).collect();
15
16 let input_tensor = Tensor::new(input_data)?;
17 let target_tensor = Tensor::new(target_data)?;
18
19 // Build script
20 let builder = Builder::new("linear_training".to_string());
21 builder.start()?;
22
23 let mut linear = Linear::new(3, 1, true, DType::F32)?;
24 let mse_loss = MSE::new();
25 let mut optimizer = SGD::new(0.01);
26
27 let input = Tensor::input("input", &[10000, 3])?;
28 input.requires_grad()?;
29 let target = Tensor::input("target", &[10000, 1])?;
30
31 let epochs = 1000;
32 let mut final_loss = Tensor::full(&[], 0.0)?;
33
34 for _ in 0..epochs {
35 let pred = linear.forward(&input)?;
36 let loss = mse_loss.forward((&pred, &target))?;
37
38 loss.backward()?;
39
40 optimizer.step(&mut linear.parameters())?;
41 optimizer.zero_grad(&mut linear.parameters())?;
42
43 final_loss = loss;
44 }
45
46 let params = linear.parameters();
47 builder.add_output("loss", final_loss)?;
48 builder.add_output("weight", *params[0])?;
49 builder.add_output("bias", *params[1])?;
50
51 builder.end()?;
52
53 let mut script = builder.build()?;
54 #[cfg(feature = "xla")]
55 script.set_backend(Backend::XLA);
56
57 script.add_input("input", input_tensor);
58 script.add_input("target", target_tensor);
59
60 println!("Compiling script...");
61 let compile_start = Instant::now();
62 script.compile()?;
63 let compile_elapsed = compile_start.elapsed();
64 println!("Compilation time: {:?}", compile_elapsed);
65
66 println!("Running script...");
67 let run_start = Instant::now();
68 let output = script.run()?;
69 let run_elapsed = run_start.elapsed();
70
71 println!("Loss: {}", output["loss"]);
72 println!("Weight: {}", output["weight"]);
73 println!("Bias: {}", output["bias"]);
74 println!("Execution time: {:?}", run_elapsed);
75 println!("Total time: {:?}", compile_elapsed + run_elapsed);
76
77 Ok(())
78}Sourcepub fn to_bytes(&self) -> Result<Vec<u8>, HoduError>
pub fn to_bytes(&self) -> Result<Vec<u8>, HoduError>
Serialize the script to compressed bytes (works in also no-std)
Sourcepub fn from_bytes(data: &[u8]) -> Result<Script, HoduError>
pub fn from_bytes(data: &[u8]) -> Result<Script, HoduError>
Deserialize the script from bytes (works in also no-std)
Trait Implementations§
Auto Trait Implementations§
impl Freeze for Script
impl RefUnwindSafe for Script
impl Send for Script
impl Sync for Script
impl Unpin for Script
impl UnwindSafe for Script
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more