pub mod bet;
use crate::trit::Trit;
use crate::vm::bet::{unpack_trits, BetFault};
use std::fmt;
use std::sync::Arc;
pub trait RemoteTransport: Send + Sync {
fn remote_send(&self, node_addr: &str, agent_id: usize, trit: i8) -> std::io::Result<()>;
fn remote_await(&self, node_addr: &str, agent_id: usize) -> std::io::Result<i8>;
}
const MAX_CALL_DEPTH: usize = 4096;
#[derive(Debug, PartialEq, Eq)]
pub enum VmError {
StackUnderflow,
BetFault(BetFault),
Halt,
InvalidOpcode(u8),
InvalidRegister(u8),
PcOutOfBounds(usize),
TypeMismatch { expected: String, found: String },
TensorIndexOutOfBounds { tensor_id: usize, index: usize, size: usize },
TensorNotAllocated(usize),
AgentTypeNotRegistered(u16),
AgentIdInvalid(usize),
RuntimeError(String),
CallStackOverflow,
FileOpenError(String),
FileReadError(String),
FileWriteError(String),
FileNotOpen(usize),
AssertionFailed,
}
impl fmt::Display for VmError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
VmError::StackUnderflow =>
write!(f, "[BET-001] Stack underflow — you tried to pop a truth that wasn't there.\n → details: stdlib/errors/BET-001.tern | ternlang errors BET-001"),
VmError::BetFault(fault) =>
write!(f, "[BET-002] BET encoding fault: {fault:?}. The 0b00 state is forbidden — only 01/10/11 are valid trit bits.\n → details: stdlib/errors/BET-002.tern | ternlang errors BET-002"),
VmError::Halt =>
write!(f, "[BET-003] VM halted cleanly. Execution reached the end. This is not an error — this is peace.\n → details: stdlib/errors/BET-003.tern | ternlang errors BET-003"),
VmError::InvalidOpcode(op) =>
write!(f, "[BET-004] Unknown opcode 0x{op:02x} — the machine has never seen this instruction. Delete cached .ternbc files and recompile.\n → details: stdlib/errors/BET-004.tern | ternlang errors BET-004"),
VmError::InvalidRegister(reg) =>
write!(f, "[BET-005] Register {reg} is out of range. The BET has exactly 27 registers (0–26). That's 3³. No more.\n → details: stdlib/errors/BET-005.tern | ternlang errors BET-005"),
VmError::PcOutOfBounds(pc) =>
write!(f, "[BET-006] PC {pc} is out of bounds — you jumped outside the known universe. Recompile from source.\n → details: stdlib/errors/BET-006.tern | ternlang errors BET-006"),
VmError::TypeMismatch { expected, found } =>
write!(f, "[BET-007] Runtime type mismatch — expected {expected} but found {found}. Square peg, round hole.\n → details: stdlib/errors/BET-007.tern | ternlang errors BET-007"),
VmError::TensorIndexOutOfBounds { tensor_id, index, size } =>
write!(f, "[BET-008] Tensor[{tensor_id}]: index {index} is out of bounds — tensor only has {size} element(s). Trittensors don't grow on access.\n → details: stdlib/errors/BET-008.tern | ternlang errors BET-008"),
VmError::TensorNotAllocated(idx) =>
write!(f, "[BET-009] TensorRef({idx}) doesn't exist — you never allocated it. TALLOC first, then TIDX.\n → details: stdlib/errors/BET-009.tern | ternlang errors BET-009"),
VmError::AgentTypeNotRegistered(type_id) =>
write!(f, "[BET-010] Agent type_id 0x{type_id:04x} was never registered. You can't spawn what was never declared.\n → details: stdlib/errors/BET-010.tern | ternlang errors BET-010"),
VmError::AgentIdInvalid(id) =>
write!(f, "[BET-011] Agent #{id} doesn't exist — no agent was spawned at this ID. TSEND and TAWAIT require a live agent.\n → details: stdlib/errors/BET-011.tern | ternlang errors BET-011"),
VmError::RuntimeError(msg) =>
write!(f, "[BET-012] Runtime error: {msg}"),
VmError::CallStackOverflow =>
write!(f, "[BET-013] Call stack overflow — max depth ({MAX_CALL_DEPTH}) exceeded. Infinite recursion or unbounded cross-module mutual calls detected.\n → details: stdlib/errors/BET-013.tern | ternlang errors BET-013"),
VmError::FileOpenError(e) =>
write!(f, "[IO-001] File open error: {e}"),
VmError::FileReadError(e) =>
write!(f, "[IO-002] File read error: {e}"),
VmError::FileWriteError(e) =>
write!(f, "[IO-003] File write error: {e}"),
VmError::FileNotOpen(id) =>
write!(f, "[IO-004] File handle {id} is not open or was closed."),
VmError::AssertionFailed =>
write!(f, "[ASSERT-001] Assertion failed: an assert() condition evaluated to reject or tend."),
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum Value {
Trit(Trit),
Int(i64),
Float(f64),
String(String),
TensorRef(usize),
TensorView {
tensor_id: usize,
offset: usize,
length: usize,
stride: usize,
},
AgentRef(usize, Option<String>),
Struct(std::collections::HashMap<String, Value>),
}
impl Default for Value {
fn default() -> Self {
Value::Trit(Trit::Tend)
}
}
enum TensorData {
Trit(Vec<Trit>),
PackedTrit(Vec<u8>, usize),
Float(Vec<f64>),
Int(Vec<i64>),
}
impl TensorData {
fn len(&self) -> usize {
match self {
TensorData::Trit(v) => v.len(),
TensorData::PackedTrit(_, len) => *len,
TensorData::Float(v) => v.len(),
TensorData::Int(v) => v.len(),
}
}
}
struct TensorInstance {
data: TensorData,
rows: usize,
cols: usize,
}
struct AgentInstance {
handler_addr: usize,
mailbox: std::collections::VecDeque<Value>,
}
pub struct BetVm {
registers: Vec<Value>,
register_stack: Vec<Vec<Value>>,
carry_reg: Trit,
stack: Vec<Value>,
call_stack: Vec<usize>,
tensors: Vec<TensorInstance>,
agents: Vec<AgentInstance>,
agent_types: std::collections::HashMap<u16, usize>,
pc: usize,
code: Vec<u8>,
node_id: String,
pub sparse_dropped: bool,
remote: Option<Arc<dyn RemoteTransport>>,
open_files: Vec<Option<std::fs::File>>,
bindings: std::collections::HashMap<usize, Value>,
_instructions_count: u64,
pub print_log: Vec<String>,
}
impl BetVm {
pub fn new(code: Vec<u8>) -> Self {
Self {
registers: vec![Value::default(); 27],
register_stack: Vec::new(),
carry_reg: Trit::Tend,
stack: Vec::new(),
call_stack: Vec::new(),
tensors: Vec::new(),
agents: Vec::new(),
agent_types: std::collections::HashMap::new(),
pc: 0,
code,
node_id: "127.0.0.1".into(),
sparse_dropped: false,
remote: None,
open_files: Vec::new(),
bindings: std::collections::HashMap::new(),
_instructions_count: 0,
print_log: Vec::new(),
}
}
pub fn take_output(&mut self) -> Vec<String> {
std::mem::take(&mut self.print_log)
}
pub fn set_node_id(&mut self, node_id: String) {
self.node_id = node_id;
}
pub fn set_remote(&mut self, transport: Arc<dyn RemoteTransport>) {
self.remote = Some(transport);
}
pub fn register_agent_type(&mut self, type_id: u16, handler_addr: usize) {
self.agent_types.insert(type_id, handler_addr);
}
pub fn peek_stack(&self) -> Option<Value> {
self.stack.last().cloned()
}
pub fn get_registers(&self) -> Vec<Value> {
self.registers.clone()
}
pub fn get_register(&self, reg: u8) -> Value {
self.registers.get(reg as usize).cloned().unwrap_or_default()
}
pub fn node_id(&self) -> &str {
&self.node_id
}
pub fn run(&mut self) -> Result<(), VmError> {
loop {
if self.pc >= self.code.len() { break; }
let opcode = self.code[self.pc];
self.pc += 1;
match opcode {
0x01 => { let packed = self.read_u8()?;
let trits = unpack_trits(&[packed], 1).map_err(VmError::BetFault)?;
self.stack.push(Value::Trit(trits[0]));
}
0x02 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
match (a.clone(), b.clone()) {
(Value::Trit(av), Value::Trit(bv)) => {
let (sum, carry) = av + bv;
self.stack.push(Value::Trit(sum));
self.carry_reg = carry;
}
(Value::Int(av), Value::Int(bv)) => self.stack.push(Value::Int(av + bv)),
(Value::Float(av), Value::Float(bv)) => self.stack.push(Value::Float(av + bv)),
(Value::Int(av), Value::Trit(bv)) => self.stack.push(Value::Int(av + bv as i64)),
(Value::Trit(av), Value::Int(bv)) => self.stack.push(Value::Int(av as i64 + bv)),
(Value::Float(av), Value::Trit(bv)) => self.stack.push(Value::Float(av + (bv as i8 as f64))),
(Value::Trit(av), Value::Float(bv)) => self.stack.push(Value::Float((av as i8 as f64) + bv)),
(Value::Float(av), Value::Int(bv)) => self.stack.push(Value::Float(av + (bv as f64))),
(Value::Int(av), Value::Float(bv)) => self.stack.push(Value::Float((av as f64) + bv)),
(Value::String(av), Value::String(bv)) => self.stack.push(Value::String(av + &bv)),
_ => return Err(VmError::TypeMismatch { expected: "Numeric".into(), found: format!("{:?}", (a, b)) }),
}
}
0x03 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
match (a.clone(), b.clone()) {
(Value::Trit(av), Value::Trit(bv)) => self.stack.push(Value::Trit(av * bv)),
(Value::Int(av), Value::Int(bv)) => self.stack.push(Value::Int(av * bv)),
(Value::Float(av), Value::Float(bv)) => self.stack.push(Value::Float(av * bv)),
(Value::Int(av), Value::Trit(bv)) => self.stack.push(Value::Int(av * bv as i64)),
(Value::Trit(av), Value::Int(bv)) => self.stack.push(Value::Int(av as i64 * bv)),
(Value::Float(av), Value::Trit(bv)) => self.stack.push(Value::Float(av * (bv as i8 as f64))),
(Value::Trit(av), Value::Float(bv)) => self.stack.push(Value::Float((av as i8 as f64) * bv)),
(Value::Float(av), Value::Int(bv)) => self.stack.push(Value::Float(av * (bv as f64))),
(Value::Int(av), Value::Float(bv)) => self.stack.push(Value::Float((av as f64) * bv)),
_ => return Err(VmError::TypeMismatch { expected: "Numeric".into(), found: format!("{:?}", (a, b)) }),
}
}
0x04 => { let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
match a.clone() {
Value::Trit(av) => self.stack.push(Value::Trit(-av)),
Value::Int(av) => self.stack.push(Value::Int(-av)),
Value::Float(av) => self.stack.push(Value::Float(-av)),
_ => return Err(VmError::TypeMismatch { expected: "Numeric".into(), found: format!("{:?}", a) }),
}
}
0x05 => { let addr = self.read_u16()?;
let val = self.stack.last().ok_or(VmError::StackUnderflow)?;
let is_pos = match val {
Value::Trit(Trit::Affirm) => true,
Value::Int(v) => *v > 0,
Value::Float(f) => *f > 0.0,
_ => false,
};
if is_pos { self.pc = addr as usize; }
}
0x06 => { let addr = self.read_u16()?;
let val = self.stack.last().ok_or(VmError::StackUnderflow)?;
let is_zero = match val {
Value::Trit(Trit::Tend) => true,
Value::Int(v) => *v == 0,
Value::Float(f) => *f == 0.0,
_ => false,
};
if is_zero { self.pc = addr as usize; }
}
0x07 => { let addr = self.read_u16()?;
let val = self.stack.last().ok_or(VmError::StackUnderflow)?;
let is_neg = match val {
Value::Trit(Trit::Reject) => true,
Value::Int(v) => *v < 0,
Value::Float(f) => *f < 0.0,
_ => false,
};
if is_neg { self.pc = addr as usize; }
}
0x08 => { let reg = self.read_u8()? as usize;
let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
self.bindings.remove(®);
if reg >= self.registers.len() { self.registers.resize(reg + 1, Value::default()); }
self.registers[reg] = val;
}
0x09 => { let reg = self.read_u8()? as usize;
let val = self.bindings.get(®).cloned().unwrap_or_else(|| {
if reg >= self.registers.len() { self.registers.resize(reg + 1, Value::default()); }
self.registers[reg].clone()
});
self.stack.push(val);
}
0x0a => { let val = self.stack.last().ok_or(VmError::StackUnderflow)?;
self.stack.push(val.clone());
}
0x0b => { let addr = self.read_u16()?;
self.pc = addr as usize;
}
0x0c => { self.stack.pop().ok_or(VmError::StackUnderflow)?;
}
0x0e => { let b_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a = match a_val {
Value::Trit(t) => t,
Value::Int(v) if v == 1 => Trit::Affirm,
Value::Int(v) if v == 0 => Trit::Tend,
Value::Int(v) if v == -1 => Trit::Reject,
_ => return Err(VmError::TypeMismatch { expected: "Trit or Int(-1..1)".into(), found: format!("{:?}", a_val) }),
};
let b = match b_val {
Value::Trit(t) => t,
Value::Int(v) if v == 1 => Trit::Affirm,
Value::Int(v) if v == 0 => Trit::Tend,
Value::Int(v) if v == -1 => Trit::Reject,
_ => return Err(VmError::TypeMismatch { expected: "Trit or Int(-1..1)".into(), found: format!("{:?}", b_val) }),
};
let result = match (a, b) {
(Trit::Affirm, Trit::Affirm) => Trit::Affirm,
(Trit::Reject, Trit::Reject) => Trit::Reject,
(Trit::Tend, x) => x,
(x, Trit::Tend) => x,
_ => Trit::Tend,
};
self.stack.push(Value::Trit(result));
}
0x0f => { let rows = self.read_u32()? as usize;
let cols = self.read_u32()? as usize;
let size = rows * cols;
let idx = self.tensors.len();
self.tensors.push(TensorInstance {
data: TensorData::Trit(vec![Trit::Tend; size]),
rows,
cols,
});
self.stack.push(Value::TensorRef(idx));
}
0x3c => { let rows = self.read_u32()? as usize;
let cols = self.read_u32()? as usize;
let size = rows * cols;
let idx = self.tensors.len();
self.tensors.push(TensorInstance {
data: TensorData::Int(vec![0i64; size]),
rows,
cols,
});
self.stack.push(Value::TensorRef(idx));
}
0x3d => { let rows = self.read_u32()? as usize;
let cols = self.read_u32()? as usize;
let size = rows * cols;
let idx = self.tensors.len();
self.tensors.push(TensorInstance {
data: TensorData::Float(vec![0.0f64; size]),
rows,
cols,
});
self.stack.push(Value::TensorRef(idx));
}
0x10 => { if self.call_stack.len() >= MAX_CALL_DEPTH {
return Err(VmError::CallStackOverflow);
}
let addr = self.read_u16()? as usize;
self.register_stack.push(self.registers.clone());
self.call_stack.push(self.pc);
self.pc = addr;
}
0x11 => { if let Some(prev) = self.register_stack.pop() {
self.registers = prev;
}
match self.call_stack.pop() {
Some(ret) => self.pc = ret,
None => return Ok(()),
}
}
0x14 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
match (a.clone(), b.clone()) {
(Value::Int(x), Value::Int(y)) => {
let r = if x < y { Trit::Affirm } else if x == y { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Float(x), Value::Float(y)) => {
let r = if x < y { Trit::Affirm } else if (x - y).abs() < f64::EPSILON { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Int(x), Value::Trit(y)) => {
let bv = y as i64;
let r = if x < bv { Trit::Affirm } else if x == bv { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Trit(x), Value::Int(y)) => {
let av = x as i64;
let r = if av < y { Trit::Affirm } else if av == y { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Int(av), Value::Float(bv)) => {
let a_val = av as f64;
let r = if a_val < bv { Trit::Affirm } else if (a_val - bv).abs() < f64::EPSILON { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Float(av), Value::Int(bv)) => {
let b_val = bv as f64;
let r = if av < b_val { Trit::Affirm } else if (av - b_val).abs() < f64::EPSILON { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Trit(av), Value::Float(bv)) => {
let a_val = av as i8 as f64;
let r = if a_val < bv { Trit::Affirm } else if (a_val - bv).abs() < f64::EPSILON { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Float(av), Value::Trit(bv)) => {
let b_val = bv as i8 as f64;
let r = if av < b_val { Trit::Affirm } else if (av - b_val).abs() < f64::EPSILON { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Trit(x), Value::Trit(y)) => {
let av = x as i64;
let bv = y as i64;
let r = if av < bv { Trit::Affirm } else if av == bv { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
_ => return Err(VmError::TypeMismatch { expected: "Numeric".into(), found: format!("{:?}", (a, b)) }),
}
}
0x15 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
match (a.clone(), b.clone()) {
(Value::Int(x), Value::Int(y)) => {
let r = if x > y { Trit::Affirm } else if x == y { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Float(x), Value::Float(y)) => {
let r = if x > y { Trit::Affirm } else if (x - y).abs() < f64::EPSILON { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Int(x), Value::Trit(y)) => {
let bv = y as i64;
let r = if x > bv { Trit::Affirm } else if x == bv { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Trit(x), Value::Int(y)) => {
let av = x as i64;
let r = if av > y { Trit::Affirm } else if av == y { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Int(av), Value::Float(bv)) => {
let a_val = av as f64;
let r = if a_val > bv { Trit::Affirm } else if (a_val - bv).abs() < f64::EPSILON { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Float(av), Value::Int(bv)) => {
let b_val = bv as f64;
let r = if av > b_val { Trit::Affirm } else if (av - b_val).abs() < f64::EPSILON { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Trit(av), Value::Float(bv)) => {
let a_val = av as i8 as f64;
let r = if a_val > bv { Trit::Affirm } else if (a_val - bv).abs() < f64::EPSILON { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Float(av), Value::Trit(bv)) => {
let b_val = bv as i8 as f64;
let r = if av > b_val { Trit::Affirm } else if (av - b_val).abs() < f64::EPSILON { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
(Value::Trit(x), Value::Trit(y)) => {
let av = x as i64;
let bv = y as i64;
let r = if av > bv { Trit::Affirm } else if av == bv { Trit::Tend } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
_ => return Err(VmError::TypeMismatch { expected: "Numeric".into(), found: format!("{:?}", (a, b)) }),
}
}
0x16 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let is_eq = match (a.clone(), b.clone()) {
(Value::Int(av), Value::Trit(bv)) => av == bv as i64,
(Value::Trit(av), Value::Int(bv)) => av as i64 == bv,
(Value::Float(av), Value::Float(bv)) => (av - bv).abs() < f64::EPSILON,
(Value::Float(av), Value::Trit(bv)) => (av - (bv as i8 as f64)).abs() < f64::EPSILON,
(Value::Trit(av), Value::Float(bv)) => ((av as i8 as f64) - bv).abs() < f64::EPSILON,
(Value::Float(av), Value::Int(bv)) => (av - (bv as f64)).abs() < f64::EPSILON,
(Value::Int(av), Value::Float(bv)) => ((av as f64) - bv).abs() < f64::EPSILON,
_ => a == b,
};
let r = if is_eq { Trit::Affirm } else { Trit::Reject };
self.stack.push(Value::Trit(r));
}
0x17 => { let mut b = [0u8; 8];
for i in 0..8 { b[i] = self.read_u8()?; }
self.stack.push(Value::Int(i64::from_le_bytes(b)));
}
0x18 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
match (a.clone(), b.clone()) {
(Value::Int(x), Value::Int(y)) => self.stack.push(Value::Int(x + y)),
_ => return Err(VmError::TypeMismatch { expected: "Int".into(), found: format!("{:?}", (a, b)) }),
}
}
0x19 => { let mut b = [0u8; 8];
for i in 0..8 { b[i] = self.read_u8()?; }
self.stack.push(Value::Float(f64::from_le_bytes(b)));
}
0x1e => { let b_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
match (a_val.clone(), b_val.clone()) {
(Value::Int(av), Value::Int(bv)) => {
if bv == 0 { return Err(VmError::RuntimeError("Division by zero".into())); }
self.stack.push(Value::Int(av / bv));
}
(Value::Float(av), Value::Float(bv)) => {
if bv == 0.0 { return Err(VmError::RuntimeError("Division by zero".into())); }
self.stack.push(Value::Float(av / bv));
}
(Value::Int(av), Value::Trit(bv)) => {
let b = bv as i64;
if b == 0 { return Err(VmError::RuntimeError("Division by zero".into())); }
self.stack.push(Value::Int(av / b));
}
(Value::Trit(av), Value::Int(bv)) => {
if bv == 0 { return Err(VmError::RuntimeError("Division by zero".into())); }
self.stack.push(Value::Int(av as i64 / bv));
}
(Value::Float(av), Value::Trit(bv)) => {
let b = bv as i8 as f64;
if b == 0.0 { return Err(VmError::RuntimeError("Division by zero".into())); }
self.stack.push(Value::Float(av / b));
}
(Value::Trit(av), Value::Float(bv)) => {
if bv == 0.0 { return Err(VmError::RuntimeError("Division by zero".into())); }
self.stack.push(Value::Float(av as i8 as f64 / bv));
}
(Value::Float(av), Value::Int(bv)) => {
let b = bv as f64;
if b == 0.0 { return Err(VmError::RuntimeError("Division by zero".into())); }
self.stack.push(Value::Float(av / b));
}
(Value::Int(av), Value::Float(bv)) => {
if bv == 0.0 { return Err(VmError::RuntimeError("Division by zero".into())); }
self.stack.push(Value::Float(av as f64 / bv));
}
_ => return Err(VmError::TypeMismatch { expected: "Numeric".into(), found: format!("{:?}", (a_val, b_val)) }),
}
}
0x1f => { let b_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
match (a_val.clone(), b_val.clone()) {
(Value::Int(av), Value::Int(bv)) => {
if bv == 0 { return Err(VmError::RuntimeError("Modulo by zero".into())); }
self.stack.push(Value::Int(av % bv));
}
(Value::Int(av), Value::Trit(bv)) => {
let b = bv as i64;
if b == 0 { return Err(VmError::RuntimeError("Modulo by zero".into())); }
self.stack.push(Value::Int(av % b));
}
(Value::Trit(av), Value::Int(bv)) => {
if bv == 0 { return Err(VmError::RuntimeError("Modulo by zero".into())); }
self.stack.push(Value::Int(av as i64 % bv));
}
(Value::Float(av), Value::Float(bv)) => {
if bv == 0.0 { return Err(VmError::RuntimeError("Modulo by zero".into())); }
self.stack.push(Value::Float(av % bv));
}
(Value::Float(av), Value::Trit(bv)) => {
let b = bv as i8 as f64;
if b == 0.0 { return Err(VmError::RuntimeError("Modulo by zero".into())); }
self.stack.push(Value::Float(av % b));
}
(Value::Trit(av), Value::Float(bv)) => {
if bv == 0.0 { return Err(VmError::RuntimeError("Modulo by zero".into())); }
self.stack.push(Value::Float(av as i8 as f64 % bv));
}
(Value::Float(av), Value::Int(bv)) => {
let b = bv as f64;
if b == 0.0 { return Err(VmError::RuntimeError("Modulo by zero".into())); }
self.stack.push(Value::Float(av % b));
}
(Value::Int(av), Value::Float(bv)) => {
if bv == 0.0 { return Err(VmError::RuntimeError("Modulo by zero".into())); }
self.stack.push(Value::Float(av as f64 % bv));
}
_ => return Err(VmError::TypeMismatch { expected: "Int or Trit".into(), found: format!("{:?}", (a_val, b_val)) }),
}
}
0x20 => { let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let line = match &val {
Value::Trit(t) => format!("{:?}", t),
Value::Int(i) => format!("{}", i),
Value::Float(f) => format!("{}", f),
Value::String(s) => s.clone(),
Value::TensorRef(idx) => format!("TensorRef({})", idx),
Value::TensorView { tensor_id, offset, length, .. } => format!("TensorView({}[{}..{}])", tensor_id, offset, offset + length),
Value::AgentRef(idx, addr) => format!("AgentRef({}, {:?})", idx, addr),
Value::Struct(fields) => format!("Struct({:?})", fields),
};
println!("{}", line);
self.print_log.push(line);
}
0x21 => { let len = self.read_u16()? as usize;
let mut bytes = vec![0u8; len];
for i in 0..len { bytes[i] = self.read_u8()?; }
let s = String::from_utf8(bytes).map_err(|_| VmError::RuntimeError("Invalid UTF-8 string".into()))?;
self.stack.push(Value::String(s));
}
0x22 => { let col = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let row = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let rf = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let r = match row { Value::Int(v) => v, Value::Trit(t) => t as i64, _ => return Err(VmError::TypeMismatch { expected: "Int or Trit".into(), found: format!("{:?}", row) }) };
let c = match col { Value::Int(v) => v, Value::Trit(t) => t as i64, _ => return Err(VmError::TypeMismatch { expected: "Int or Trit".into(), found: format!("{:?}", col) }) };
let (idx, pos) = self.get_pos(&rf, r, c)?;
let tensor = &self.tensors[idx];
let data_len = tensor.data.len();
if pos >= data_len {
return Err(VmError::TensorIndexOutOfBounds { tensor_id: idx, index: pos, size: data_len });
}
let pushed = match &tensor.data {
TensorData::Trit(v) => Value::Trit(v[pos]),
TensorData::PackedTrit(v, _) => {
let byte_idx = pos / 5;
let trit_idx = pos % 5;
let trits = crate::trit::unpack_5_trits(v[byte_idx]);
Value::Trit(trits[trit_idx])
}
TensorData::Float(v) => Value::Float(v[pos]),
TensorData::Int(v) => Value::Int(v[pos]),
};
self.stack.push(pushed);
}
0x23 => { let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let col = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let row = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let rf = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let r = match row { Value::Int(v) => v, Value::Trit(t) => t as i64, _ => return Err(VmError::TypeMismatch { expected: "Int or Trit".into(), found: format!("{:?}", row) }) };
let c = match col { Value::Int(v) => v, Value::Trit(t) => t as i64, _ => return Err(VmError::TypeMismatch { expected: "Int or Trit".into(), found: format!("{:?}", col) }) };
let (idx, pos) = self.get_pos(&rf, r, c)?;
let tensor = &mut self.tensors[idx];
let data_len = tensor.data.len();
if pos >= data_len { return Err(VmError::TensorIndexOutOfBounds { tensor_id: idx, index: pos, size: data_len }); }
match (&mut tensor.data, val.clone()) {
(TensorData::Trit(v), Value::Trit(t)) => v[pos] = t,
(TensorData::Trit(v), Value::Int(i)) => v[pos] = if i > 0 { Trit::Affirm } else if i < 0 { Trit::Reject } else { Trit::Tend },
(TensorData::PackedTrit(v, _), val_v) => {
let byte_idx = pos / 5;
let trit_idx = pos % 5;
let mut trits = crate::trit::unpack_5_trits(v[byte_idx]);
trits[trit_idx] = match val_v {
Value::Trit(t) => t,
Value::Int(i) => if i > 0 { Trit::Affirm } else if i < 0 { Trit::Reject } else { Trit::Tend },
_ => return Err(VmError::TypeMismatch { expected: "Trit or Int".into(), found: format!("{:?}", val_v) }),
};
v[byte_idx] = crate::trit::pack_5_trits(trits);
}
(TensorData::Float(v), Value::Float(f)) => v[pos] = f,
(TensorData::Float(v), Value::Int(i)) => v[pos] = i as f64,
(TensorData::Int(v), Value::Int(i)) => v[pos] = i,
(TensorData::Int(v), Value::Float(f)) => v[pos] = f as i64,
(TensorData::Int(v), Value::Trit(t)) => v[pos] = t as i64,
_ => return Err(VmError::TypeMismatch { expected: "compatible value for tensor type".into(), found: format!("{:?}", val) }),
}
}
0x24 => { let rf = self.stack.pop().ok_or(VmError::StackUnderflow)?;
match rf {
Value::TensorRef(idx) => {
if idx >= self.tensors.len() { return Err(VmError::TensorNotAllocated(idx)); }
let tensor = &self.tensors[idx];
self.stack.push(Value::Int(tensor.rows as i64));
self.stack.push(Value::Int(tensor.cols as i64));
}
Value::TensorView { length, .. } => {
self.stack.push(Value::Int(length as i64));
self.stack.push(Value::Int(1));
}
Value::String(s) => {
let n = s.chars().count() as i64;
self.stack.push(Value::Int(n));
self.stack.push(Value::Int(1));
}
_ => return Err(VmError::TypeMismatch { expected: "TensorRef, TensorView, or String".into(), found: format!("{:?}", rf) }),
}
}
0x30 => { let type_id = self.read_u16()?;
if let Some(&handler_addr) = self.agent_types.get(&type_id) {
let id = self.agents.len();
self.agents.push(AgentInstance { handler_addr, mailbox: Default::default() });
self.stack.push(Value::AgentRef(id, None));
} else {
return Err(VmError::AgentTypeNotRegistered(type_id));
}
}
0x31 => { let msg = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let target = self.stack.pop().ok_or(VmError::StackUnderflow)?;
if let Value::AgentRef(id, None) = target {
if id < self.agents.len() {
self.agents[id].mailbox.push_back(msg);
} else {
return Err(VmError::AgentIdInvalid(id));
}
} else {
return Err(VmError::TypeMismatch { expected: "Local AgentRef".into(), found: format!("{:?}", target) });
}
}
0x32 => { let target = self.stack.pop().ok_or(VmError::StackUnderflow)?;
if let Value::AgentRef(id, None) = target {
if id < self.agents.len() {
if self.call_stack.len() >= MAX_CALL_DEPTH {
return Err(VmError::CallStackOverflow);
}
let handler_addr = self.agents[id].handler_addr;
let msg = self.agents[id].mailbox.pop_front().unwrap_or(Value::default());
self.register_stack.push(self.registers.clone());
self.call_stack.push(self.pc);
self.pc = handler_addr;
self.stack.push(msg);
} else {
return Err(VmError::AgentIdInvalid(id));
}
} else {
return Err(VmError::TypeMismatch { expected: "Local AgentRef".into(), found: format!("{:?}", target) });
}
}
0x25 => { let mut b = [0u8; 8];
for i in 0..8 { b[i] = self.read_u8()?; }
let target_val = i64::from_le_bytes(b);
let addr = self.read_u16()?;
let val = self.stack.last().ok_or(VmError::StackUnderflow)?;
let is_eq = match val {
Value::Int(v) => *v == target_val,
Value::Trit(t) => (*t as i8) as i64 == target_val,
_ => false,
};
if is_eq { self.pc = addr as usize; }
}
0x26 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let is_le = match (a.clone(), b.clone()) {
(Value::Int(x), Value::Int(y)) => x <= y,
(Value::Float(x), Value::Float(y)) => x <= y || (x - y).abs() < f64::EPSILON,
(Value::Int(x), Value::Trit(y)) => x <= y as i64,
(Value::Trit(x), Value::Int(y)) => (x as i64) <= y,
(Value::Trit(x), Value::Trit(y)) => (x as i64) <= (y as i64),
_ => false,
};
self.stack.push(Value::Trit(if is_le { Trit::Affirm } else { Trit::Reject }));
}
0x27 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let is_ge = match (a.clone(), b.clone()) {
(Value::Int(x), Value::Int(y)) => x >= y,
(Value::Float(x), Value::Float(y)) => x >= y || (x - y).abs() < f64::EPSILON,
(Value::Int(x), Value::Trit(y)) => x >= y as i64,
(Value::Trit(x), Value::Int(y)) => (x as i64) >= y,
(Value::Trit(x), Value::Trit(y)) => (x as i64) >= (y as i64),
_ => false,
};
self.stack.push(Value::Trit(if is_ge { Trit::Affirm } else { Trit::Reject }));
}
0x28 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let to_trit = |v: Value| -> Result<Trit, VmError> {
match v {
Value::Trit(t) => Ok(t),
Value::Int(n) if n > 0 => Ok(Trit::Affirm),
Value::Int(0) => Ok(Trit::Tend),
Value::Int(_) => Ok(Trit::Reject),
other => Err(VmError::TypeMismatch { expected: "Trit or Int".into(), found: format!("{:?}", other) }),
}
};
let ta = to_trit(a)?;
let tb = to_trit(b)?;
let result = if (ta as i8) <= (tb as i8) { ta } else { tb };
self.stack.push(Value::Trit(result));
}
0x29 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let to_trit = |v: Value| -> Result<Trit, VmError> {
match v {
Value::Trit(t) => Ok(t),
Value::Int(n) if n > 0 => Ok(Trit::Affirm),
Value::Int(0) => Ok(Trit::Tend),
Value::Int(_) => Ok(Trit::Reject),
other => Err(VmError::TypeMismatch { expected: "Trit or Int".into(), found: format!("{:?}", other) }),
}
};
let ta = to_trit(a)?;
let tb = to_trit(b)?;
let result = if (ta as i8) >= (tb as i8) { ta } else { tb };
self.stack.push(Value::Trit(result));
}
0x2a => { let mut fb = [0u8; 8];
for i in 0..8 { fb[i] = self.read_u8()?; }
let target_f = f64::from_le_bytes(fb);
let addr = self.read_u16()?;
let val = self.stack.last().ok_or(VmError::StackUnderflow)?;
if let Value::Float(f) = val {
if (f - target_f).abs() < 1e-9 {
self.pc = addr as usize;
}
}
}
0x33 => { let mode = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let path = self.stack.pop().ok_or(VmError::StackUnderflow)?;
if let (Value::String(p), Value::Int(m)) = (path, mode) {
use std::fs::OpenOptions;
let mut options = OpenOptions::new();
match m {
0 => { options.read(true); } 1 => { options.write(true).create(true).truncate(true); } 2 => { options.append(true).create(true); } _ => return Err(VmError::RuntimeError(format!("Invalid file mode: {m}"))),
}
let file = options.open(&p).map_err(|e| VmError::FileOpenError(e.to_string()))?;
let handle = self.open_files.len();
self.open_files.push(Some(file));
self.stack.push(Value::Int(handle as i64));
} else {
return Err(VmError::TypeMismatch { expected: "String, Int".into(), found: "Unknown".into() });
}
}
0x34 => { let handle_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
if let Value::Int(h) = handle_val {
let h = h as usize;
if h >= self.open_files.len() || self.open_files[h].is_none() {
return Err(VmError::FileNotOpen(h));
}
let file = self.open_files[h].as_mut().unwrap();
let mut buf = [0u8; 1];
use std::io::Read;
match file.read_exact(&mut buf) {
Ok(_) => {
let t = match buf[0] {
b'+' | b'1' => Trit::Affirm,
b'-' => Trit::Reject,
_ => Trit::Tend,
};
self.stack.push(Value::Trit(t));
}
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
self.stack.push(Value::Trit(Trit::Tend)); }
Err(e) => return Err(VmError::FileReadError(e.to_string())),
}
} else {
return Err(VmError::TypeMismatch { expected: "Int".into(), found: format!("{:?}", handle_val) });
}
}
0x35 => { let t_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let h_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
if let (Value::Int(h), Value::Trit(t)) = (h_val, t_val) {
let h = h as usize;
if h >= self.open_files.len() || self.open_files[h].is_none() {
return Err(VmError::FileNotOpen(h));
}
let file = self.open_files[h].as_mut().unwrap();
let out = match t {
Trit::Affirm => b'+',
Trit::Reject => b'-',
Trit::Tend => b'0',
};
use std::io::Write;
file.write_all(&[out]).map_err(|e| VmError::FileWriteError(e.to_string()))?;
} else {
return Err(VmError::TypeMismatch { expected: "Int, Trit".into(), found: "Unknown".into() });
}
}
0x36 => { self.stack.push(Value::String(self.node_id.clone()));
}
0x37 => { let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let is_affirm = match val {
Value::Trit(Trit::Affirm) => true,
Value::Int(1) => true,
_ => false,
};
if !is_affirm {
return Err(VmError::AssertionFailed);
}
}
0x38 => { let a_rows = self.read_u8()? as usize;
let a_cols = self.read_u8()? as usize;
let b_cols = self.read_u8()? as usize;
let b_ref = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a_ref = self.stack.pop().ok_or(VmError::StackUnderflow)?;
if let (Value::TensorRef(a_idx), Value::TensorRef(b_idx)) = (a_ref, b_ref) {
let (a_data, b_data) = {
let a = self.tensors.get(a_idx).ok_or(VmError::TensorNotAllocated(a_idx))?;
let b = self.tensors.get(b_idx).ok_or(VmError::TensorNotAllocated(b_idx))?;
let a_data = match &a.data {
TensorData::Trit(v) => v,
_ => return Err(VmError::TypeMismatch { expected: "TritTensor".into(), found: "Other".into() }),
};
let b_data = match &b.data {
TensorData::Trit(v) => v,
_ => return Err(VmError::TypeMismatch { expected: "TritTensor".into(), found: "Other".into() }),
};
(a_data.clone(), b_data.clone())
};
let mut result = vec![Trit::Tend; a_rows * b_cols];
let mut skipped = false;
for i in 0..a_rows {
for k in 0..a_cols {
let a_val = a_data[i * a_cols + k];
if a_val == Trit::Tend {
skipped = true;
continue;
}
for j in 0..b_cols {
let b_val = b_data[k * b_cols + j];
if b_val == Trit::Tend { continue; }
let prod = a_val * b_val;
let (sum, _) = result[i * b_cols + j] + prod;
result[i * b_cols + j] = sum;
}
}
}
if skipped { self.sparse_dropped = true; }
let res_idx = self.tensors.len();
self.tensors.push(TensorInstance {
data: TensorData::Trit(result),
rows: a_rows,
cols: b_cols,
});
self.stack.push(Value::TensorRef(res_idx));
} else {
return Err(VmError::TypeMismatch { expected: "TensorRef, TensorRef".into(), found: "Unknown".into() });
}
}
0x40 => { let num_fields = self.read_u8()? as usize;
let mut fields = std::collections::HashMap::new();
for _ in 0..num_fields {
let name_len = self.read_u8()? as usize;
let mut name_bytes = vec![0u8; name_len];
for i in 0..name_len { name_bytes[i] = self.read_u8()?; }
let name = String::from_utf8(name_bytes).unwrap();
let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
fields.insert(name, val);
}
self.stack.push(Value::Struct(fields));
}
0x41 => { let name_len = self.read_u8()? as usize;
let mut name_bytes = vec![0u8; name_len];
for i in 0..name_len { name_bytes[i] = self.read_u8()?; }
let name = String::from_utf8(name_bytes).unwrap();
let obj = self.stack.pop().ok_or(VmError::StackUnderflow)?;
if let Value::Struct(fields) = obj {
let val = fields.get(&name).cloned().unwrap_or_default();
self.stack.push(val);
} else {
return Err(VmError::TypeMismatch { expected: "Struct".into(), found: format!("{:?}", obj) });
}
}
0x42 => { let reg = self.read_u8()? as usize;
let view_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
if let Value::TensorView { .. } = view_val {
self.bindings.insert(reg, view_val);
} else {
return Err(VmError::TypeMismatch { expected: "TensorView".into(), found: format!("{:?}", view_val) });
}
}
0x55 => { let stride = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let length = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let offset = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let rf = self.stack.pop().ok_or(VmError::StackUnderflow)?;
if let (Value::Int(o), Value::Int(l), Value::Int(s)) = (&offset, &length, &stride) {
match rf {
Value::TensorRef(id) => {
self.stack.push(Value::TensorView {
tensor_id: id,
offset: *o as usize,
length: *l as usize,
stride: *s as usize,
});
}
Value::TensorView { tensor_id, offset: v_off, stride: v_stride, .. } => {
self.stack.push(Value::TensorView {
tensor_id,
offset: v_off + (*o as usize * v_stride),
length: *l as usize,
stride: v_stride * (*s as usize),
});
}
_ => return Err(VmError::TypeMismatch { expected: "TensorRef or TensorView".into(), found: format!("{:?}", rf) }),
}
} else {
return Err(VmError::TypeMismatch { expected: "Int, Int, Int".into(), found: format!("{:?}, {:?}, {:?}", offset, length, stride) });
}
}
0x50 => { let mut trits = [Trit::Tend; 5];
for i in (0..5).rev() {
let t = self.stack.pop().ok_or(VmError::StackUnderflow)?;
trits[i] = match t {
Value::Trit(tv) => tv,
_ => return Err(VmError::TypeMismatch { expected: "Trit".into(), found: format!("{:?}", t) }),
};
}
let packed = crate::trit::pack_5_trits(trits);
self.stack.push(Value::Int(packed as i64));
}
0x51 => { let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
if let Value::Int(packed) = val {
let trits = crate::trit::unpack_5_trits(packed as u8);
for t in trits {
self.stack.push(Value::Trit(t));
}
} else {
return Err(VmError::TypeMismatch { expected: "Int (packed byte)".into(), found: format!("{:?}", val) });
}
}
0x56 => { let rows = self.read_u32()? as usize;
let cols = self.read_u32()? as usize;
let size = rows * cols;
let num_bytes = (size + 4) / 5;
let idx = self.tensors.len();
self.tensors.push(TensorInstance {
data: TensorData::PackedTrit(vec![0x00; num_bytes], size), rows,
cols,
});
self.stack.push(Value::TensorRef(idx));
}
0x52 => { let b_ref = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a_ref = self.stack.pop().ok_or(VmError::StackUnderflow)?;
if let (Value::TensorRef(a_idx), Value::TensorRef(b_idx)) = (a_ref, b_ref) {
let res_idx = self.tensors.len();
let (rows, cols, data) = {
let a = self.tensors.get(a_idx).ok_or(VmError::TensorNotAllocated(a_idx))?;
let b = self.tensors.get(b_idx).ok_or(VmError::TensorNotAllocated(b_idx))?;
if a.rows != b.rows || a.cols != b.cols {
return Err(VmError::RuntimeError("Tensor dimension mismatch in TV_ADD".into()));
}
match (&a.data, &b.data) {
(TensorData::PackedTrit(av, alen), TensorData::PackedTrit(bv, _)) => {
let mut res_v = vec![0u8; av.len()];
for i in 0..av.len() {
res_v[i] = crate::trit::packed_add(av[i], bv[i]);
}
(a.rows, a.cols, TensorData::PackedTrit(res_v, *alen))
}
_ => return Err(VmError::TypeMismatch { expected: "PackedTrit tensors".into(), found: "Other".into() }),
}
};
self.tensors.push(TensorInstance { data, rows, cols });
self.stack.push(Value::TensorRef(res_idx));
} else {
return Err(VmError::TypeMismatch { expected: "TensorRef, TensorRef".into(), found: "Unknown".into() });
}
}
0x53 => { let a_ref = self.stack.pop().ok_or(VmError::StackUnderflow)?;
if let Value::TensorRef(idx) = a_ref {
let res_idx = self.tensors.len();
let (rows, cols, data) = {
let a = self.tensors.get(idx).ok_or(VmError::TensorNotAllocated(idx))?;
match &a.data {
TensorData::PackedTrit(v, len) => {
let mut res_v = vec![0u8; v.len()];
for i in 0..v.len() {
res_v[i] = crate::trit::packed_neg(v[i]);
}
(a.rows, a.cols, TensorData::PackedTrit(res_v, *len))
}
_ => return Err(VmError::TypeMismatch { expected: "PackedTrit tensor".into(), found: "Other".into() }),
}
};
self.tensors.push(TensorInstance { data, rows, cols });
self.stack.push(Value::TensorRef(res_idx));
} else {
return Err(VmError::TypeMismatch { expected: "TensorRef".into(), found: format!("{:?}", a_ref) });
}
}
0x54 => { let b_ref = self.stack.pop().ok_or(VmError::StackUnderflow)?;
let a_ref = self.stack.pop().ok_or(VmError::StackUnderflow)?;
if let (Value::TensorRef(a_idx), Value::TensorRef(b_idx)) = (a_ref, b_ref) {
let res_idx = self.tensors.len();
let (rows, cols, data) = {
let a = self.tensors.get(a_idx).ok_or(VmError::TensorNotAllocated(a_idx))?;
let b = self.tensors.get(b_idx).ok_or(VmError::TensorNotAllocated(b_idx))?;
if a.rows != b.rows || a.cols != b.cols {
return Err(VmError::RuntimeError("Tensor dimension mismatch in TV_CON".into()));
}
match (&a.data, &b.data) {
(TensorData::PackedTrit(av, alen), TensorData::PackedTrit(bv, _)) => {
let mut res_v = vec![0u8; av.len()];
for i in 0..av.len() {
res_v[i] = crate::trit::packed_consensus(av[i], bv[i]);
}
(a.rows, a.cols, TensorData::PackedTrit(res_v, *alen))
}
_ => return Err(VmError::TypeMismatch { expected: "PackedTrit tensors".into(), found: "Other".into() }),
}
};
self.tensors.push(TensorInstance { data, rows, cols });
self.stack.push(Value::TensorRef(res_idx));
} else {
return Err(VmError::TypeMismatch { expected: "TensorRef, TensorRef".into(), found: "Unknown".into() });
}
}
0x00 => return Ok(()),
_ => return Err(VmError::InvalidOpcode(opcode)),
}
}
Ok(())
}
fn read_u8(&mut self) -> Result<u8, VmError> {
if self.pc >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
let val = self.code[self.pc];
self.pc += 1;
Ok(val)
}
fn read_u16(&mut self) -> Result<u16, VmError> {
if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
let val = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]);
self.pc += 2;
Ok(val)
}
fn read_u32(&mut self) -> Result<u32, VmError> {
if self.pc + 3 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
let val = u32::from_le_bytes([
self.code[self.pc], self.code[self.pc + 1],
self.code[self.pc + 2], self.code[self.pc + 3]
]);
self.pc += 4;
Ok(val)
}
fn get_pos(&self, rf: &Value, row: i64, col: i64) -> Result<(usize, usize), VmError> {
match rf {
Value::TensorRef(idx) => {
let tensor = self.tensors.get(*idx).ok_or(VmError::TensorNotAllocated(*idx))?;
let pos = if tensor.cols > 1 && col >= 0 {
row as usize * tensor.cols + col as usize
} else {
row as usize
};
Ok((*idx, pos))
}
Value::TensorView { tensor_id, offset, stride, .. } => {
let pos = *offset + (row as usize * *stride);
Ok((*tensor_id, pos))
}
_ => Err(VmError::TypeMismatch { expected: "TensorRef or TensorView".into(), found: format!("{:?}", rf) }),
}
}
}