Skip to main content

ternlang_core/vm/
mod.rs

1pub mod bet;
2
3use crate::trit::Trit;
4use crate::vm::bet::{unpack_trits, BetFault};
5
6use std::fmt;
7use std::sync::Arc;
8
9// ─── Remote transport trait ───────────────────────────────────────────────────
10
11/// Abstracts the TCP layer so `ternlang-core` doesn't depend on `ternlang-runtime`.
12/// Implement this trait on `TernNode` in `ternlang-runtime`, then inject via
13/// `BetVm::set_remote(Arc<dyn RemoteTransport>)`.
14pub trait RemoteTransport: Send + Sync {
15    /// Send a trit (-1/0/+1) to the specified remote agent's mailbox (fire-and-forget).
16    fn remote_send(&self, node_addr: &str, agent_id: usize, trit: i8) -> std::io::Result<()>;
17    /// Request the remote agent to process its mailbox and return the result trit.
18    fn remote_await(&self, node_addr: &str, agent_id: usize) -> std::io::Result<i8>;
19}
20
21#[derive(Debug, PartialEq, Eq)]
22pub enum VmError {
23    StackUnderflow,
24    BetFault(BetFault),
25    Halt,
26    InvalidOpcode(u8),
27    InvalidRegister(u8),
28    PcOutOfBounds(usize),
29    TypeMismatch,
30}
31
32impl fmt::Display for VmError {
33    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
34        match self {
35            VmError::StackUnderflow =>
36                write!(f, "[BET-001] Stack underflow — you tried to pop a truth that wasn't there."),
37            VmError::BetFault(fault) =>
38                write!(f, "[BET-002] BET encoding fault: {fault:?}. The 0b00 state is invalid — only -1, 0, +1 exist."),
39            VmError::Halt =>
40                write!(f, "[BET-003] VM halted cleanly. Execution reached the end."),
41            VmError::InvalidOpcode(op) =>
42                write!(f, "[BET-004] Unknown opcode 0x{op:02x} — the machine doesn't know this instruction. Conflict state."),
43            VmError::InvalidRegister(reg) =>
44                write!(f, "[BET-005] Register {reg} is out of range. The BET has 27 registers (0–26)."),
45            VmError::PcOutOfBounds(pc) =>
46                write!(f, "[BET-006] PC {pc} is out of bounds — you jumped outside the known universe. Recompile."),
47            VmError::TypeMismatch =>
48                write!(f, "[BET-007] Runtime type mismatch — a trit was expected but something else arrived."),
49        }
50    }
51}
52
53#[repr(u8)]
54pub enum Opcode {
55    Tpush(Trit) = 0x01,
56    Tadd = 0x02,
57    Tmul = 0x03,
58    Tneg = 0x04,
59    TjmpPos(u16) = 0x05,
60    TjmpZero(u16) = 0x06,
61    TjmpNeg(u16) = 0x07,
62    Tstore(u8) = 0x08,
63    Tload(u8) = 0x09,
64    Tdup = 0x0a,
65    Tjmp(u16) = 0x0b,
66    Tpop = 0x0c,
67    TloadCarry = 0x0d,
68    Tcons = 0x0e,
69    Talloc(u16) = 0x0f,
70    Tcall(u16) = 0x10,  // Call function at address, push return addr to call stack
71    Tret = 0x11,         // Return: pop call stack, jump back
72    TnodeId = 0x12,      // Phase 5.1: Push current node address to stack
73    TpushStr = 0x13,     // Phase 5.1: Push string literal
74    Tless    = 0x14,     // Integer less-than: pop b, pop a → push trit(a < b)
75    Tgreater = 0x15,     // Integer greater-than: pop b, pop a → push trit(a > b)
76    Teq      = 0x16,     // Equality: pop b, pop a → push trit(a == b)
77    Thalt = 0x00,
78}
79
80#[derive(Debug, Clone, PartialEq, Eq)]
81pub enum Value {
82    Trit(Trit),
83    Int(i64),
84    String(String),
85    TensorRef(usize),
86    /// AgentRef { instance_id, node_addr }
87    /// node_addr: None = local, Some("host:port") = remote
88    AgentRef(usize, Option<String>),
89}
90
91impl Default for Value {
92    fn default() -> Self {
93        Value::Trit(Trit::Tend)
94    }
95}
96
97/// A running agent instance.
98/// v0.1: synchronous local actors — handler_addr is the bytecode address of the `handle` fn.
99struct AgentInstance {
100    handler_addr: usize,
101    mailbox: std::collections::VecDeque<Value>,
102}
103
104pub struct BetVm {
105    registers: [Value; 27],
106    carry_reg: Trit,
107    stack: Vec<Value>,
108    call_stack: Vec<usize>,  // Return addresses for TCALL/TRET
109    tensors: Vec<Vec<Trit>>, // Simple heap for now
110    agents: Vec<AgentInstance>,
111    /// agent_types[type_id] = handler_addr — registered at spawn time via TSPAWN
112    agent_types: std::collections::HashMap<u16, usize>,
113    pc: usize,
114    code: Vec<u8>,
115    /// Phase 5.1: The local node's address (returned by TNODEID)
116    node_id: String,
117    /// Phase 5.1: Optional remote transport for cross-node TSEND/TAWAIT
118    remote: Option<Arc<dyn RemoteTransport>>,
119    instructions_count: u64,
120}
121
122impl BetVm {
123    pub fn new(code: Vec<u8>) -> Self {
124        Self {
125            registers: std::array::from_fn(|_| Value::default()),
126            carry_reg: Trit::Tend,
127            stack: Vec::new(),
128            call_stack: Vec::new(),
129            tensors: Vec::new(),
130            agents: Vec::new(),
131            agent_types: std::collections::HashMap::new(),
132            pc: 0,
133            code,
134            node_id: "127.0.0.1:7373".to_string(), // Default
135            remote: None,
136            instructions_count: 0,
137        }
138    }
139
140    pub fn set_node_id(&mut self, node_id: String) {
141        self.node_id = node_id;
142    }
143
144    /// Inject a remote transport so TSEND/TAWAIT can cross node boundaries.
145    pub fn set_remote(&mut self, transport: Arc<dyn RemoteTransport>) {
146        self.remote = Some(transport);
147    }
148
149    /// Register an agent type (handler_addr) under a type_id.
150    /// Called by the codegen runtime before spawning instances.
151    pub fn register_agent_type(&mut self, type_id: u16, handler_addr: usize) {
152        self.agent_types.insert(type_id, handler_addr);
153    }
154
155    pub fn run(&mut self) -> Result<(), VmError> {
156        loop {
157            self.instructions_count += 1;
158
159            if self.pc >= self.code.len() {
160                break;
161            }
162
163            let opcode = self.code[self.pc];
164            self.pc += 1;
165
166            match opcode {
167                0x01 => { // Tpush
168                    if self.pc >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
169                    let packed = self.code[self.pc];
170                    self.pc += 1;
171                    let trits = unpack_trits(&[packed], 1).map_err(VmError::BetFault)?;
172                    self.stack.push(Value::Trit(trits[0]));
173                }
174                0x02 => { // Tadd
175                    let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
176                    let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
177                    match (a, b) {
178                        (Value::Trit(av), Value::Trit(bv)) => {
179                            let (sum, carry) = av + bv;
180                            self.stack.push(Value::Trit(sum));
181                            self.carry_reg = carry;
182                        }
183                        _ => return Err(VmError::TypeMismatch),
184                    }
185                }
186                0x03 => { // Tmul
187                    let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
188                    let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
189                    match (a, b) {
190                        (Value::Trit(av), Value::Trit(bv)) => {
191                            self.stack.push(Value::Trit(av * bv));
192                        }
193                        _ => return Err(VmError::TypeMismatch),
194                    }
195                }
196                0x04 => { // Tneg
197                    let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
198                    match a {
199                        Value::Trit(av) => self.stack.push(Value::Trit(-av)),
200                        _ => return Err(VmError::TypeMismatch),
201                    }
202                }
203                0x05 => { // TjmpPos
204                    if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
205                    let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
206                    self.pc += 2;
207                    let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
208                    if let Value::Trit(Trit::Affirm) = val {
209                        self.pc = addr;
210                    }
211                }
212                0x06 => { // TjmpZero
213                    if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
214                    let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
215                    self.pc += 2;
216                    let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
217                    if let Value::Trit(Trit::Tend) = val {
218                        self.pc = addr;
219                    }
220                }
221                0x07 => { // TjmpNeg
222                    if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
223                    let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
224                    self.pc += 2;
225                    let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
226                    if let Value::Trit(Trit::Reject) = val {
227                        self.pc = addr;
228                    }
229                }
230                0x08 => { // Tstore
231                    if self.pc >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
232                    let reg = self.code[self.pc];
233                    self.pc += 1;
234                    let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
235                    if (reg as usize) >= self.registers.len() {
236                        return Err(VmError::InvalidRegister(reg));
237                    }
238                    self.registers[reg as usize] = val;
239                }
240                0x09 => { // Tload
241                    if self.pc >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
242                    let reg = self.code[self.pc];
243                    self.pc += 1;
244                    if (reg as usize) >= self.registers.len() {
245                        return Err(VmError::InvalidRegister(reg));
246                    }
247                    self.stack.push(self.registers[reg as usize].clone());
248                }
249                0x0a => { // Tdup
250                    let val = self.stack.last().ok_or(VmError::StackUnderflow)?;
251                    self.stack.push(val.clone());
252                }
253                0x0b => { // Tjmp
254                    if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
255                    let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
256                    self.pc = addr;
257                }
258                0x0c => { // Tpop
259                    self.stack.pop().ok_or(VmError::StackUnderflow)?;
260                }
261                0x0d => { // TloadCarry
262                    self.stack.push(Value::Trit(self.carry_reg));
263                }
264                0x0e => { // Tcons (Consensus Addition)
265                    let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
266                    let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
267                    match (a, b) {
268                        (Value::Trit(av), Value::Trit(bv)) => {
269                            let (sum, _carry) = av + bv;
270                            self.stack.push(Value::Trit(sum));
271                        }
272                        _ => return Err(VmError::TypeMismatch),
273                    }
274                }
275                0x0f => { // Talloc
276                    if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
277                    let size = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
278                    self.pc += 2;
279                    let idx = self.tensors.len();
280                    self.tensors.push(vec![Trit::Tend; size]);
281                    self.stack.push(Value::TensorRef(idx));
282                }
283                0x20 => { // TMATMUL — (tensor_ref_a, tensor_ref_b) → tensor_ref_result
284                    let b_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
285                    let a_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
286                    match (a_val, b_val) {
287                        (Value::TensorRef(a_idx), Value::TensorRef(b_idx)) => {
288                            // tensors stored flat row-major; infer square dims from len
289                            let a_len = self.tensors[a_idx].len();
290                            let b_len = self.tensors[b_idx].len();
291                            let a_dim = (a_len as f64).sqrt() as usize;
292                            let b_dim = (b_len as f64).sqrt() as usize;
293                            if a_dim * a_dim != a_len || b_dim * b_dim != b_len || a_dim != b_dim {
294                                return Err(VmError::TypeMismatch);
295                            }
296                            let n = a_dim;
297                            let mut result = vec![Trit::Tend; n * n];
298                            for row in 0..n {
299                                for col in 0..n {
300                                    let mut acc = Trit::Tend;
301                                    for k in 0..n {
302                                        let (prod, _) = self.tensors[a_idx][row * n + k]
303                                            + (self.tensors[a_idx][row * n + k]
304                                                * self.tensors[b_idx][k * n + col]);
305                                        let (sum, _) = acc + prod;
306                                        acc = sum;
307                                    }
308                                    result[row * n + col] = acc;
309                                }
310                            }
311                            let out_idx = self.tensors.len();
312                            self.tensors.push(result);
313                            self.stack.push(Value::TensorRef(out_idx));
314                        }
315                        _ => return Err(VmError::TypeMismatch),
316                    }
317                }
318                0x21 => { // TSPARSE_MATMUL — matmul skipping zero-state weights (flagship)
319                    let b_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
320                    let a_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
321                    match (a_val, b_val) {
322                        (Value::TensorRef(a_idx), Value::TensorRef(b_idx)) => {
323                            let a_len = self.tensors[a_idx].len();
324                            let n = (a_len as f64).sqrt() as usize;
325                            let mut result = vec![Trit::Tend; n * n];
326                            let mut skipped: usize = 0;
327                            for row in 0..n {
328                                for col in 0..n {
329                                    let mut acc = Trit::Tend;
330                                    for k in 0..n {
331                                        let weight = self.tensors[b_idx][k * n + col];
332                                        // SPARSE SKIP: zero weights contribute nothing — skip entirely
333                                        if weight == Trit::Tend {
334                                            skipped += 1;
335                                            continue;
336                                        }
337                                        let prod = self.tensors[a_idx][row * n + k] * weight;
338                                        let (sum, _) = acc + prod;
339                                        acc = sum;
340                                    }
341                                    result[row * n + col] = acc;
342                                }
343                            }
344                            let out_idx = self.tensors.len();
345                            self.tensors.push(result);
346                            // Push result ref and skipped count for observability
347                            self.stack.push(Value::TensorRef(out_idx));
348                            self.stack.push(Value::Int(skipped as i64));
349                        }
350                        _ => return Err(VmError::TypeMismatch),
351                    }
352                }
353                0x22 => { // TIDX — (tensor_ref, row, col) → trit
354                    let col_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
355                    let row_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
356                    let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
357                    match (ref_val, row_val, col_val) {
358                        (Value::TensorRef(idx), Value::Int(row), Value::Int(col)) => {
359                            let n = (self.tensors[idx].len() as f64).sqrt() as usize;
360                            let pos = row as usize * n + col as usize;
361                            self.stack.push(Value::Trit(self.tensors[idx][pos]));
362                        }
363                        _ => return Err(VmError::TypeMismatch),
364                    }
365                }
366                0x23 => { // TSET — (tensor_ref, row, col, trit_val) → stores in place
367                    let trit_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
368                    let col_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
369                    let row_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
370                    let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
371                    match (ref_val, row_val, col_val, trit_val) {
372                        (Value::TensorRef(idx), Value::Int(row), Value::Int(col), Value::Trit(t)) => {
373                            let n = (self.tensors[idx].len() as f64).sqrt() as usize;
374                            let pos = row as usize * n + col as usize;
375                            self.tensors[idx][pos] = t;
376                        }
377                        _ => return Err(VmError::TypeMismatch),
378                    }
379                }
380                0x24 => { // TSHAPE — tensor_ref → pushes (Int(rows), Int(cols)) onto stack
381                    let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
382                    match ref_val {
383                        Value::TensorRef(idx) => {
384                            let len = self.tensors[idx].len();
385                            let n = (len as f64).sqrt() as usize;
386                            self.stack.push(Value::Int(n as i64)); // rows
387                            self.stack.push(Value::Int(n as i64)); // cols
388                        }
389                        _ => return Err(VmError::TypeMismatch),
390                    }
391                }
392                0x25 => { // TSPARSITY — tensor_ref → Int(zero_count) on stack
393                    let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
394                    match ref_val {
395                        Value::TensorRef(idx) => {
396                            let zeros = self.tensors[idx].iter()
397                                .filter(|&&t| t == Trit::Tend)
398                                .count();
399                            self.stack.push(Value::Int(zeros as i64));
400                        }
401                        _ => return Err(VmError::TypeMismatch),
402                    }
403                }
404                0x10 => { // Tcall
405                    if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
406                    let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
407                    self.pc += 2;
408                    self.call_stack.push(self.pc); // push return address
409                    self.pc = addr;
410                }
411                0x11 => { // Tret
412                    match self.call_stack.pop() {
413                        Some(return_addr) => self.pc = return_addr,
414                        None => return Ok(()), // top-level return = halt
415                    }
416                }
417                // ── Tensor compression opcodes ───────────────────────────────────
418                0x26 => { // TCOMPRESS — TensorRef → TensorRef (run-length compressed)
419                    // Run-length encoding of a sparse trit tensor.
420                    // Format: alternating (count: u8, trit: encoded) pairs.
421                    // Compressed tensor is stored as a new entry in self.tensors.
422                    // Stack: tensor_ref → compressed_ref
423                    let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
424                    match ref_val {
425                        Value::TensorRef(idx) => {
426                            let src = &self.tensors[idx].clone();
427                            let compressed = rle_compress(src);
428                            let new_idx = self.tensors.len();
429                            self.tensors.push(compressed);
430                            self.stack.push(Value::TensorRef(new_idx));
431                        }
432                        _ => return Err(VmError::TypeMismatch),
433                    }
434                }
435                0x27 => { // TUNPACK — compressed TensorRef → TensorRef (restored)
436                    // Decodes a run-length compressed tensor back to dense form.
437                    // Stack: compressed_ref → restored_ref
438                    let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
439                    match ref_val {
440                        Value::TensorRef(idx) => {
441                            let src = &self.tensors[idx].clone();
442                            let unpacked = rle_decompress(src);
443                            let new_idx = self.tensors.len();
444                            self.tensors.push(unpacked);
445                            self.stack.push(Value::TensorRef(new_idx));
446                        }
447                        _ => return Err(VmError::TypeMismatch),
448                    }
449                }
450                // ── Actor opcodes ────────────────────────────────────────────────
451                0x30 => { // TSPAWN type_id:u16 — create local agent instance, push AgentRef
452                    if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
453                    let type_id = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]);
454                    self.pc += 2;
455                    let handler_addr = *self.agent_types.get(&type_id)
456                        .ok_or(VmError::InvalidOpcode(0x30))?;
457                    let instance_id = self.agents.len();
458                    self.agents.push(AgentInstance {
459                        handler_addr,
460                        mailbox: std::collections::VecDeque::new(),
461                    });
462                    self.stack.push(Value::AgentRef(instance_id, None));
463                }
464                0x33 => { // TREMOTE_SPAWN (addr:String, type_id:u16) -> AgentRef(id, Some(addr))
465                    let addr_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
466                    if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
467                    let type_id = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]);
468                    self.pc += 2;
469                    if let Value::String(addr) = addr_val {
470                        // For v0.1: we don't actually trigger network spawn here.
471                        // We just push the remote AgentRef. The runtime (TernNode)
472                        // will handle the real network call when TSEND/TAWAIT is used.
473                        self.stack.push(Value::AgentRef(type_id as usize, Some(addr)));
474                    } else {
475                        return Err(VmError::TypeMismatch);
476                    }
477                }
478                0x31 => { // TSEND — (AgentRef, message) → push to mailbox
479                    let message = self.stack.pop().ok_or(VmError::StackUnderflow)?;
480                    let agent_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
481                    match agent_val {
482                        Value::AgentRef(id, None) => {
483                            self.agents[id].mailbox.push_back(message);
484                        }
485                        Value::AgentRef(id, Some(addr)) => {
486                            // Phase 5.1: Remote TSEND via injected RemoteTransport.
487                            if let Some(rt) = &self.remote {
488                                let trit_i8 = match message {
489                                    Value::Trit(Trit::Affirm) =>  1i8,
490                                    Value::Trit(Trit::Reject) => -1i8,
491                                    _                         =>  0i8,
492                                };
493                                rt.remote_send(&addr, id, trit_i8)
494                                    .map_err(|_| VmError::TypeMismatch)?;
495                            }
496                            // If no transport configured: silent no-op (local-only mode).
497                        }
498                        _ => return Err(VmError::TypeMismatch),
499                    }
500                }
501                0x32 => { // TAWAIT — AgentRef → pop mailbox, call handler, push result
502                    let agent_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
503                    match agent_val {
504                        Value::AgentRef(id, None) => {
505                            let message = self.agents[id].mailbox.pop_front()
506                                .unwrap_or(Value::Trit(Trit::Tend)); // empty mailbox → hold
507                            let handler_addr = self.agents[id].handler_addr;
508                            // Push message as argument, then TCALL the handler.
509                            self.stack.push(message);
510                            self.call_stack.push(self.pc); // return to after TAWAIT
511                            self.pc = handler_addr;
512                        }
513                        Value::AgentRef(id, Some(addr)) => {
514                            // Phase 5.1: Remote TAWAIT via injected RemoteTransport.
515                            let result = if let Some(rt) = &self.remote {
516                                rt.remote_await(&addr, id)
517                                    .map(|v| match v {
518                                        1  => Trit::Affirm,
519                                        -1 => Trit::Reject,
520                                        _  => Trit::Tend,
521                                    })
522                                    .unwrap_or(Trit::Tend)
523                            } else {
524                                Trit::Tend // hold: no transport configured
525                            };
526                            self.stack.push(Value::Trit(result));
527                        }
528                        _ => return Err(VmError::TypeMismatch),
529                    }
530                }
531                0x12 => { // TNODEID — push local node address
532                    self.stack.push(Value::String(self.node_id.clone()));
533                }
534                0x14 => { // TLESS — integer less-than: pop b, pop a → push trit(a < b)
535                    let b = self.stack.pop().unwrap_or(Value::Int(0));
536                    let a = self.stack.pop().unwrap_or(Value::Int(0));
537                    let result = match (a, b) {
538                        (Value::Int(x), Value::Int(y)) => {
539                            if x < y { Trit::Affirm } else if x == y { Trit::Tend } else { Trit::Reject }
540                        }
541                        _ => Trit::Tend,
542                    };
543                    self.stack.push(Value::Trit(result));
544                }
545                0x15 => { // TGREATER — integer greater-than: pop b, pop a → push trit(a > b)
546                    let b = self.stack.pop().unwrap_or(Value::Int(0));
547                    let a = self.stack.pop().unwrap_or(Value::Int(0));
548                    let result = match (a, b) {
549                        (Value::Int(x), Value::Int(y)) => {
550                            if x > y { Trit::Affirm } else if x == y { Trit::Tend } else { Trit::Reject }
551                        }
552                        _ => Trit::Tend,
553                    };
554                    self.stack.push(Value::Trit(result));
555                }
556                0x16 => { // TEQ — equality: pop b, pop a → push trit(a == b)
557                    let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
558                    let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
559                    let result = if a == b { Trit::Affirm } else { Trit::Reject };
560                    self.stack.push(Value::Trit(result));
561                }
562                // ─────────────────────────────────────────────────────────────────
563
564                0x00 => return Ok(()), // Thalt
565                _ => return Err(VmError::InvalidOpcode(opcode)),
566            }
567        }
568        Ok(())
569    }
570
571    pub fn get_register(&self, reg: usize) -> Value {
572        self.registers[reg].clone()
573    }
574
575    pub fn get_stack_top(&self) -> Option<Value> {
576        self.stack.last().cloned()
577    }
578
579    pub fn get_tensor(&self, idx: usize) -> Option<&Vec<Trit>> {
580        self.tensors.get(idx)
581    }
582
583    pub fn peek_stack(&self) -> Option<Value> {
584        self.stack.last().cloned()
585    }
586}
587
588// ─── Run-length compression helpers (used by TCOMPRESS / TUNPACK) ────────────
589
590/// Run-length encode a trit slice.
591/// Output format: pairs of (run_length: u8 encoded as Trit::Tend count trick,
592/// but since Trit has 3 values we encode runs as sequences of a sentinel + count.
593///
594/// Actual encoding stored in the tensor heap as a flat Vec<Trit>:
595///   [Trit value, count_high, count_low, Trit value, count_high, count_low, ...]
596/// where count = count_high * 3 + count_low  (base-3, max run = 8 trits)
597/// For simplicity max run length is 255, encoded as two trits (base 16 = 4 bits).
598///
599/// We use a simple scheme: store pairs (value_trit, length_trit_sequence)
600/// terminated by a sentinel. Here we use a flat encoding:
601///   even index → trit value, odd index → run length as Int packed as Trit
602/// Since we can't store arbitrary ints as trits, we store the raw Vec<Trit>
603/// with a header sentinel (NegOne) followed by (value, count) pairs where
604/// count is clamped to a single trit (1–3). For longer runs we emit multiple pairs.
605pub fn rle_compress(src: &[Trit]) -> Vec<Trit> {
606    if src.is_empty() { return vec![]; }
607    let mut out = Vec::new();
608    // Header: NegOne sentinel marks this as a compressed tensor
609    out.push(Trit::Reject);
610
611    let mut i = 0;
612    while i < src.len() {
613        let val = src[i];
614        let mut run = 1usize;
615        while i + run < src.len() && src[i + run] == val && run < 255 {
616            run += 1;
617        }
618        // Encode run as series of (val, count_trit) pairs.
619        // count_trit: PosOne=1, Zero=2 (by convention), NegOne=3... not ideal.
620        // Use a simpler scheme: emit val followed by run-length in unary trit pairs.
621        // For portability just encode run as (run / 3) Zero-trits + (run % 3) marker.
622        // Simplest correct scheme: emit val, then run count in base 3 (2 trits = max 8).
623        // For runs > 8 we emit multiple pairs.
624        let mut remaining = run;
625        while remaining > 0 {
626            let chunk = remaining.min(8); // max 8 per pair (2×3+2, max 2-digit base-3)
627            out.push(val);
628            out.push(int_to_trit((chunk / 3) as i8)); // high trit
629            out.push(int_to_trit((chunk % 3) as i8)); // low trit
630            remaining -= chunk;
631        }
632        i += run;
633    }
634    out
635}
636
637/// Decode a run-length encoded trit slice back to dense form.
638pub fn rle_decompress(src: &[Trit]) -> Vec<Trit> {
639    if src.is_empty() { return vec![]; }
640    // Check header sentinel
641    if src[0] != Trit::Reject { return src.to_vec(); } // not compressed
642    let mut out = Vec::new();
643    let mut i = 1;
644    while i + 2 < src.len() {
645        let val   = src[i];
646        let hi    = trit_to_int(src[i + 1]) as usize;
647        let lo    = trit_to_int(src[i + 2]) as usize;
648        let count = hi * 3 + lo;
649        for _ in 0..count.max(1) { out.push(val); }
650        i += 3;
651    }
652    out
653}
654
655fn int_to_trit(v: i8) -> Trit {
656    match v {
657        0 => Trit::Tend,
658        1 => Trit::Affirm,
659        _ => Trit::Reject,
660    }
661}
662
663fn trit_to_int(t: Trit) -> i8 {
664    match t {
665        Trit::Tend   => 0,
666        Trit::Affirm => 1,
667        Trit::Reject => 2, // used as digit '2' in base-3 run-length
668    }
669}
670
671#[cfg(test)]
672mod tensor_tests {
673    use super::*;
674    use crate::vm::bet::pack_trits;
675
676    fn push_trit(code: &mut Vec<u8>, t: Trit) {
677        code.push(0x01);
678        code.extend(pack_trits(&[t]));
679    }
680
681    fn talloc(code: &mut Vec<u8>, size: u16) {
682        code.push(0x0f);
683        code.extend_from_slice(&size.to_le_bytes());
684    }
685
686    #[test]
687    fn test_tsparsity() {
688        // Allocate a 2x2 tensor (4 trits), all Zero → sparsity = 4
689        let mut code = Vec::new();
690        talloc(&mut code, 4); // TALLOC 4 → TensorRef(0) on stack
691        code.push(0x08); code.push(0x00); // TSTORE reg0
692        code.push(0x09); code.push(0x00); // TLOAD reg0
693        code.push(0x25); // TSPARSITY → Int(4)
694        code.push(0x08); code.push(0x01); // TSTORE reg1
695        code.push(0x00); // THALT
696
697        let mut vm = BetVm::new(code);
698        vm.run().unwrap();
699        assert_eq!(vm.get_register(1), Value::Int(4));
700    }
701
702    #[test]
703    fn test_tshape() {
704        // Allocate a 4-element (2x2) tensor, check shape returns 2, 2
705        let mut code = Vec::new();
706        talloc(&mut code, 4);
707        code.push(0x08); code.push(0x00); // TSTORE reg0
708        code.push(0x09); code.push(0x00); // TLOAD reg0
709        code.push(0x24); // TSHAPE → Int(rows), Int(cols)
710        code.push(0x08); code.push(0x02); // TSTORE cols → reg2
711        code.push(0x08); code.push(0x01); // TSTORE rows → reg1
712        code.push(0x00); // THALT
713
714        let mut vm = BetVm::new(code);
715        vm.run().unwrap();
716        assert_eq!(vm.get_register(1), Value::Int(2)); // rows
717        assert_eq!(vm.get_register(2), Value::Int(2)); // cols
718    }
719
720    #[test]
721    fn test_tsparse_matmul_skips_zeros() {
722        // Two 1x1 all-zero tensors: TSPARSE_MATMUL should skip the one multiply
723        // and produce a zero result with skipped_count = 1
724        let mut code = Vec::new();
725        talloc(&mut code, 1); // TensorRef(0) = A, all zeros
726        code.push(0x08); code.push(0x00); // TSTORE reg0
727        talloc(&mut code, 1); // TensorRef(1) = W, all zeros
728        code.push(0x08); code.push(0x01); // TSTORE reg1
729
730        code.push(0x09); code.push(0x00); // TLOAD A ref → stack
731        code.push(0x09); code.push(0x01); // TLOAD W ref → stack
732        code.push(0x21); // TSPARSE_MATMUL → pushes TensorRef(result), then Int(skipped)
733        code.push(0x08); code.push(0x03); // TSTORE skipped_count → reg3
734        code.push(0x08); code.push(0x02); // TSTORE result_ref → reg2
735        code.push(0x00); // THALT
736
737        let mut vm = BetVm::new(code);
738        vm.run().unwrap();
739
740        // W is all-zero so all 1 multiply was skipped
741        assert_eq!(vm.get_register(3), Value::Int(1));
742        // Result tensor should be zero
743        let result_ref = match vm.get_register(2) {
744            Value::TensorRef(i) => i,
745            _ => panic!("expected TensorRef"),
746        };
747        assert_eq!(vm.get_tensor(result_ref).unwrap()[0], Trit::Tend);
748    }
749}
750
751#[cfg(test)]
752mod actor_tests {
753    use super::*;
754    use crate::vm::bet::pack_trits;
755
756    /// Integration test: spawn an agent, send a trit message, await the reply.
757    /// The handler is an identity function: handle(msg: trit) → msg.
758    ///
759    /// Bytecode layout:
760    ///   [0x00] TJMP → entry_point          (skip over handler body)
761    ///   [handler_addr]: TPUSH msg (arg) already on stack when called via TAWAIT
762    ///                   TRET
763    ///   [entry_point]: TSPAWN type_id=0    → AgentRef(0)
764    ///                  TSTORE reg0
765    ///                  TLOAD  reg0         → AgentRef(0)
766    ///                  TPUSH PosOne        → message
767    ///                  TSEND               → sends +1 to agent's mailbox
768    ///                  TLOAD  reg0         → AgentRef(0)
769    ///                  TAWAIT              → pops mailbox, calls handler, pushes result
770    ///                  TSTORE reg1         → result (+1) in reg1
771    ///                  THALT
772    #[test]
773    fn test_actor_spawn_send_await() {
774        let mut code = Vec::new();
775
776        // [0] TJMP over handler (3 bytes total, patch after)
777        let jmp_patch = code.len() + 1;
778        code.push(0x0b); // TJMP
779        code.extend_from_slice(&[0u8, 0u8]);
780
781        // [3] Handler: identity — the message is already on the stack when TAWAIT calls us.
782        //     Just TRET — leaves the message as the return value on the stack.
783        let handler_addr = code.len();
784        code.push(0x11); // TRET
785
786        // Patch the TJMP to land here
787        let entry = code.len() as u16;
788        let bytes = entry.to_le_bytes();
789        code[jmp_patch] = bytes[0];
790        code[jmp_patch + 1] = bytes[1];
791
792        // [entry] TSPAWN type_id=0 → AgentRef(0) on stack
793        code.push(0x30); code.extend_from_slice(&0u16.to_le_bytes());
794        code.push(0x08); code.push(0x00); // TSTORE reg0
795
796        // TLOAD reg0 → AgentRef, TPUSH PosOne → message, TSEND
797        code.push(0x09); code.push(0x00); // TLOAD reg0
798        code.push(0x01); code.extend(pack_trits(&[Trit::Affirm])); // TPUSH +1
799        code.push(0x31); // TSEND
800
801        // TLOAD reg0 → AgentRef, TAWAIT
802        code.push(0x09); code.push(0x00); // TLOAD reg0
803        code.push(0x32); // TAWAIT → calls handler with message on stack
804        code.push(0x08); code.push(0x01); // TSTORE reg1
805        code.push(0x00); // THALT
806
807        let mut vm = BetVm::new(code);
808        vm.register_agent_type(0, handler_addr);
809        vm.run().unwrap();
810
811        // The agent echoed +1 back → reg1 = PosOne
812        assert_eq!(vm.get_register(1), Value::Trit(Trit::Affirm));
813    }
814}
815
816#[cfg(test)]
817mod tests {
818    use super::*;
819    use crate::vm::bet::pack_trits;
820
821    #[test]
822    fn test_vm_addition() {
823        // Tpush 1, Tpush 1, Tadd, Tstore 0, TloadCarry, Tstore 1, Thalt
824        let mut code = vec![0x01];
825        code.extend(pack_trits(&[Trit::Affirm]));
826        code.push(0x01);
827        code.extend(pack_trits(&[Trit::Affirm]));
828        code.push(0x02); // Tadd
829        code.push(0x08); // Tstore 0
830        code.push(0x00);
831        code.push(0x0d); // TloadCarry
832        code.push(0x08); // Tstore 1
833        code.push(0x01);
834        code.push(0x00); // Thalt
835        
836        let mut vm = BetVm::new(code);
837        vm.run().unwrap();
838        assert_eq!(vm.get_register(0), Value::Trit(Trit::Reject)); // Sum
839        assert_eq!(vm.get_register(1), Value::Trit(Trit::Affirm)); // Carry
840    }
841}
842
843#[cfg(test)]
844mod compress_tests {
845    use super::*;
846    use crate::trit::Trit;
847
848    #[test]
849    fn test_rle_compress_all_zeros() {
850        let src = vec![Trit::Tend; 9];
851        let c = rle_compress(&src);
852        // Must start with sentinel and be shorter than raw
853        assert_eq!(c[0], Trit::Reject);
854        assert!(c.len() < src.len(), "compressed should be shorter than 9 zeros");
855    }
856
857    #[test]
858    fn test_rle_roundtrip_uniform() {
859        let src = vec![Trit::Affirm; 6];
860        let compressed = rle_compress(&src);
861        let restored   = rle_decompress(&compressed);
862        assert_eq!(restored, src, "roundtrip must be lossless");
863    }
864
865    #[test]
866    fn test_rle_roundtrip_mixed() {
867        let src = vec![
868            Trit::Affirm, Trit::Affirm, Trit::Affirm,
869            Trit::Tend,   Trit::Tend,
870            Trit::Reject,
871            Trit::Tend,   Trit::Tend,   Trit::Tend,
872        ];
873        let compressed = rle_compress(&src);
874        let restored   = rle_decompress(&compressed);
875        assert_eq!(restored, src, "roundtrip must be lossless for mixed tensor");
876    }
877
878    #[test]
879    fn test_rle_compress_single_element() {
880        let src = vec![Trit::Reject];
881        let c = rle_compress(&src);
882        let r = rle_decompress(&c);
883        assert_eq!(r, src);
884    }
885
886    #[test]
887    fn test_tcompress_tunpack_opcodes() {
888        // Test TCOMPRESS (0x26) and TUNPACK (0x27) via VM bytecode.
889        // Strategy: TALLOC a sparse tensor, compress it, unpack it,
890        // check TSPARSITY is preserved. Use a pre-filled tensor (all zeros = maximum sparsity).
891        let mut code = Vec::new();
892
893        // TALLOC 9 elements (all zero by default)
894        code.push(0x0f);
895        code.extend_from_slice(&9u16.to_le_bytes());
896        code.push(0x08); code.push(0x00); // TSTORE r0
897
898        // TCOMPRESS r0 → compressed ref in r1
899        code.push(0x09); code.push(0x00); // TLOAD r0
900        code.push(0x26);                   // TCOMPRESS
901        code.push(0x08); code.push(0x01); // TSTORE r1
902
903        // TUNPACK r1 → restored ref in r2
904        code.push(0x09); code.push(0x01); // TLOAD r1
905        code.push(0x27);                   // TUNPACK
906        code.push(0x08); code.push(0x02); // TSTORE r2
907
908        // TSPARSITY on restored tensor → should be 9 (all zeros)
909        code.push(0x09); code.push(0x02); // TLOAD r2
910        code.push(0x25);                   // TSPARSITY
911        code.push(0x08); code.push(0x03); // TSTORE r3
912
913        code.push(0x00); // THALT
914
915        let mut vm = BetVm::new(code);
916        vm.run().unwrap();
917
918        // All 9 elements should still be zero after compress→unpack
919        let sparsity = vm.get_register(3);
920        assert!(matches!(sparsity, Value::Int(n) if n >= 9),
921            "restored tensor should have 9 zero elements, got {:?}", sparsity);
922    }
923}