1pub mod bet;
2
3use crate::trit::Trit;
4use crate::vm::bet::{unpack_trits, BetFault};
5
6use std::fmt;
7use std::sync::Arc;
8
9pub trait RemoteTransport: Send + Sync {
15 fn remote_send(&self, node_addr: &str, agent_id: usize, trit: i8) -> std::io::Result<()>;
17 fn remote_await(&self, node_addr: &str, agent_id: usize) -> std::io::Result<i8>;
19}
20
21#[derive(Debug, PartialEq, Eq)]
22pub enum VmError {
23 StackUnderflow,
24 BetFault(BetFault),
25 Halt,
26 InvalidOpcode(u8),
27 InvalidRegister(u8),
28 PcOutOfBounds(usize),
29 TypeMismatch,
30}
31
32impl fmt::Display for VmError {
33 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
34 match self {
35 VmError::StackUnderflow =>
36 write!(f, "[BET-001] Stack underflow — you tried to pop a truth that wasn't there."),
37 VmError::BetFault(fault) =>
38 write!(f, "[BET-002] BET encoding fault: {fault:?}. The 0b00 state is invalid — only -1, 0, +1 exist."),
39 VmError::Halt =>
40 write!(f, "[BET-003] VM halted cleanly. Execution reached the end."),
41 VmError::InvalidOpcode(op) =>
42 write!(f, "[BET-004] Unknown opcode 0x{op:02x} — the machine doesn't know this instruction. Conflict state."),
43 VmError::InvalidRegister(reg) =>
44 write!(f, "[BET-005] Register {reg} is out of range. The BET has 27 registers (0–26)."),
45 VmError::PcOutOfBounds(pc) =>
46 write!(f, "[BET-006] PC {pc} is out of bounds — you jumped outside the known universe. Recompile."),
47 VmError::TypeMismatch =>
48 write!(f, "[BET-007] Runtime type mismatch — a trit was expected but something else arrived."),
49 }
50 }
51}
52
53#[repr(u8)]
54pub enum Opcode {
55 Tpush(Trit) = 0x01,
56 Tadd = 0x02,
57 Tmul = 0x03,
58 Tneg = 0x04,
59 TjmpPos(u16) = 0x05,
60 TjmpZero(u16) = 0x06,
61 TjmpNeg(u16) = 0x07,
62 Tstore(u8) = 0x08,
63 Tload(u8) = 0x09,
64 Tdup = 0x0a,
65 Tjmp(u16) = 0x0b,
66 Tpop = 0x0c,
67 TloadCarry = 0x0d,
68 Tcons = 0x0e,
69 Talloc(u16) = 0x0f,
70 Tcall(u16) = 0x10, Tret = 0x11, TnodeId = 0x12, TpushStr = 0x13, Tless = 0x14, Tgreater = 0x15, Teq = 0x16, Thalt = 0x00,
78}
79
80#[derive(Debug, Clone, PartialEq, Eq)]
81pub enum Value {
82 Trit(Trit),
83 Int(i64),
84 String(String),
85 TensorRef(usize),
86 AgentRef(usize, Option<String>),
89}
90
91impl Default for Value {
92 fn default() -> Self {
93 Value::Trit(Trit::Tend)
94 }
95}
96
97struct AgentInstance {
100 handler_addr: usize,
101 mailbox: std::collections::VecDeque<Value>,
102}
103
104pub struct BetVm {
105 registers: [Value; 27],
106 carry_reg: Trit,
107 stack: Vec<Value>,
108 call_stack: Vec<usize>, tensors: Vec<Vec<Trit>>, agents: Vec<AgentInstance>,
111 agent_types: std::collections::HashMap<u16, usize>,
113 pc: usize,
114 code: Vec<u8>,
115 node_id: String,
117 remote: Option<Arc<dyn RemoteTransport>>,
119 instructions_count: u64,
120}
121
122impl BetVm {
123 pub fn new(code: Vec<u8>) -> Self {
124 Self {
125 registers: std::array::from_fn(|_| Value::default()),
126 carry_reg: Trit::Tend,
127 stack: Vec::new(),
128 call_stack: Vec::new(),
129 tensors: Vec::new(),
130 agents: Vec::new(),
131 agent_types: std::collections::HashMap::new(),
132 pc: 0,
133 code,
134 node_id: "127.0.0.1:7373".to_string(), remote: None,
136 instructions_count: 0,
137 }
138 }
139
140 pub fn set_node_id(&mut self, node_id: String) {
141 self.node_id = node_id;
142 }
143
144 pub fn set_remote(&mut self, transport: Arc<dyn RemoteTransport>) {
146 self.remote = Some(transport);
147 }
148
149 pub fn register_agent_type(&mut self, type_id: u16, handler_addr: usize) {
152 self.agent_types.insert(type_id, handler_addr);
153 }
154
155 pub fn run(&mut self) -> Result<(), VmError> {
156 loop {
157 self.instructions_count += 1;
158
159 if self.pc >= self.code.len() {
160 break;
161 }
162
163 let opcode = self.code[self.pc];
164 self.pc += 1;
165
166 match opcode {
167 0x01 => { if self.pc >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
169 let packed = self.code[self.pc];
170 self.pc += 1;
171 let trits = unpack_trits(&[packed], 1).map_err(VmError::BetFault)?;
172 self.stack.push(Value::Trit(trits[0]));
173 }
174 0x02 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
176 let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
177 match (a, b) {
178 (Value::Trit(av), Value::Trit(bv)) => {
179 let (sum, carry) = av + bv;
180 self.stack.push(Value::Trit(sum));
181 self.carry_reg = carry;
182 }
183 _ => return Err(VmError::TypeMismatch),
184 }
185 }
186 0x03 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
188 let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
189 match (a, b) {
190 (Value::Trit(av), Value::Trit(bv)) => {
191 self.stack.push(Value::Trit(av * bv));
192 }
193 _ => return Err(VmError::TypeMismatch),
194 }
195 }
196 0x04 => { let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
198 match a {
199 Value::Trit(av) => self.stack.push(Value::Trit(-av)),
200 _ => return Err(VmError::TypeMismatch),
201 }
202 }
203 0x05 => { if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
205 let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
206 self.pc += 2;
207 let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
208 if let Value::Trit(Trit::Affirm) = val {
209 self.pc = addr;
210 }
211 }
212 0x06 => { if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
214 let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
215 self.pc += 2;
216 let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
217 if let Value::Trit(Trit::Tend) = val {
218 self.pc = addr;
219 }
220 }
221 0x07 => { if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
223 let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
224 self.pc += 2;
225 let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
226 if let Value::Trit(Trit::Reject) = val {
227 self.pc = addr;
228 }
229 }
230 0x08 => { if self.pc >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
232 let reg = self.code[self.pc];
233 self.pc += 1;
234 let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
235 if (reg as usize) >= self.registers.len() {
236 return Err(VmError::InvalidRegister(reg));
237 }
238 self.registers[reg as usize] = val;
239 }
240 0x09 => { if self.pc >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
242 let reg = self.code[self.pc];
243 self.pc += 1;
244 if (reg as usize) >= self.registers.len() {
245 return Err(VmError::InvalidRegister(reg));
246 }
247 self.stack.push(self.registers[reg as usize].clone());
248 }
249 0x0a => { let val = self.stack.last().ok_or(VmError::StackUnderflow)?;
251 self.stack.push(val.clone());
252 }
253 0x0b => { if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
255 let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
256 self.pc = addr;
257 }
258 0x0c => { self.stack.pop().ok_or(VmError::StackUnderflow)?;
260 }
261 0x0d => { self.stack.push(Value::Trit(self.carry_reg));
263 }
264 0x0e => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
266 let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
267 match (a, b) {
268 (Value::Trit(av), Value::Trit(bv)) => {
269 let (sum, _carry) = av + bv;
270 self.stack.push(Value::Trit(sum));
271 }
272 _ => return Err(VmError::TypeMismatch),
273 }
274 }
275 0x0f => { if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
277 let size = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
278 self.pc += 2;
279 let idx = self.tensors.len();
280 self.tensors.push(vec![Trit::Tend; size]);
281 self.stack.push(Value::TensorRef(idx));
282 }
283 0x20 => { let b_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
285 let a_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
286 match (a_val, b_val) {
287 (Value::TensorRef(a_idx), Value::TensorRef(b_idx)) => {
288 let a_len = self.tensors[a_idx].len();
290 let b_len = self.tensors[b_idx].len();
291 let a_dim = (a_len as f64).sqrt() as usize;
292 let b_dim = (b_len as f64).sqrt() as usize;
293 if a_dim * a_dim != a_len || b_dim * b_dim != b_len || a_dim != b_dim {
294 return Err(VmError::TypeMismatch);
295 }
296 let n = a_dim;
297 let mut result = vec![Trit::Tend; n * n];
298 for row in 0..n {
299 for col in 0..n {
300 let mut acc = Trit::Tend;
301 for k in 0..n {
302 let (prod, _) = self.tensors[a_idx][row * n + k]
303 + (self.tensors[a_idx][row * n + k]
304 * self.tensors[b_idx][k * n + col]);
305 let (sum, _) = acc + prod;
306 acc = sum;
307 }
308 result[row * n + col] = acc;
309 }
310 }
311 let out_idx = self.tensors.len();
312 self.tensors.push(result);
313 self.stack.push(Value::TensorRef(out_idx));
314 }
315 _ => return Err(VmError::TypeMismatch),
316 }
317 }
318 0x21 => { let b_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
320 let a_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
321 match (a_val, b_val) {
322 (Value::TensorRef(a_idx), Value::TensorRef(b_idx)) => {
323 let a_len = self.tensors[a_idx].len();
324 let n = (a_len as f64).sqrt() as usize;
325 let mut result = vec![Trit::Tend; n * n];
326 let mut skipped: usize = 0;
327 for row in 0..n {
328 for col in 0..n {
329 let mut acc = Trit::Tend;
330 for k in 0..n {
331 let weight = self.tensors[b_idx][k * n + col];
332 if weight == Trit::Tend {
334 skipped += 1;
335 continue;
336 }
337 let prod = self.tensors[a_idx][row * n + k] * weight;
338 let (sum, _) = acc + prod;
339 acc = sum;
340 }
341 result[row * n + col] = acc;
342 }
343 }
344 let out_idx = self.tensors.len();
345 self.tensors.push(result);
346 self.stack.push(Value::TensorRef(out_idx));
348 self.stack.push(Value::Int(skipped as i64));
349 }
350 _ => return Err(VmError::TypeMismatch),
351 }
352 }
353 0x22 => { let col_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
355 let row_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
356 let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
357 match (ref_val, row_val, col_val) {
358 (Value::TensorRef(idx), Value::Int(row), Value::Int(col)) => {
359 let n = (self.tensors[idx].len() as f64).sqrt() as usize;
360 let pos = row as usize * n + col as usize;
361 self.stack.push(Value::Trit(self.tensors[idx][pos]));
362 }
363 _ => return Err(VmError::TypeMismatch),
364 }
365 }
366 0x23 => { let trit_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
368 let col_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
369 let row_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
370 let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
371 match (ref_val, row_val, col_val, trit_val) {
372 (Value::TensorRef(idx), Value::Int(row), Value::Int(col), Value::Trit(t)) => {
373 let n = (self.tensors[idx].len() as f64).sqrt() as usize;
374 let pos = row as usize * n + col as usize;
375 self.tensors[idx][pos] = t;
376 }
377 _ => return Err(VmError::TypeMismatch),
378 }
379 }
380 0x24 => { let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
382 match ref_val {
383 Value::TensorRef(idx) => {
384 let len = self.tensors[idx].len();
385 let n = (len as f64).sqrt() as usize;
386 self.stack.push(Value::Int(n as i64)); self.stack.push(Value::Int(n as i64)); }
389 _ => return Err(VmError::TypeMismatch),
390 }
391 }
392 0x25 => { let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
394 match ref_val {
395 Value::TensorRef(idx) => {
396 let zeros = self.tensors[idx].iter()
397 .filter(|&&t| t == Trit::Tend)
398 .count();
399 self.stack.push(Value::Int(zeros as i64));
400 }
401 _ => return Err(VmError::TypeMismatch),
402 }
403 }
404 0x10 => { if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
406 let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
407 self.pc += 2;
408 self.call_stack.push(self.pc); self.pc = addr;
410 }
411 0x11 => { match self.call_stack.pop() {
413 Some(return_addr) => self.pc = return_addr,
414 None => return Ok(()), }
416 }
417 0x26 => { let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
424 match ref_val {
425 Value::TensorRef(idx) => {
426 let src = &self.tensors[idx].clone();
427 let compressed = rle_compress(src);
428 let new_idx = self.tensors.len();
429 self.tensors.push(compressed);
430 self.stack.push(Value::TensorRef(new_idx));
431 }
432 _ => return Err(VmError::TypeMismatch),
433 }
434 }
435 0x27 => { let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
439 match ref_val {
440 Value::TensorRef(idx) => {
441 let src = &self.tensors[idx].clone();
442 let unpacked = rle_decompress(src);
443 let new_idx = self.tensors.len();
444 self.tensors.push(unpacked);
445 self.stack.push(Value::TensorRef(new_idx));
446 }
447 _ => return Err(VmError::TypeMismatch),
448 }
449 }
450 0x30 => { if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
453 let type_id = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]);
454 self.pc += 2;
455 let handler_addr = *self.agent_types.get(&type_id)
456 .ok_or(VmError::InvalidOpcode(0x30))?;
457 let instance_id = self.agents.len();
458 self.agents.push(AgentInstance {
459 handler_addr,
460 mailbox: std::collections::VecDeque::new(),
461 });
462 self.stack.push(Value::AgentRef(instance_id, None));
463 }
464 0x33 => { let addr_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
466 if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
467 let type_id = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]);
468 self.pc += 2;
469 if let Value::String(addr) = addr_val {
470 self.stack.push(Value::AgentRef(type_id as usize, Some(addr)));
474 } else {
475 return Err(VmError::TypeMismatch);
476 }
477 }
478 0x31 => { let message = self.stack.pop().ok_or(VmError::StackUnderflow)?;
480 let agent_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
481 match agent_val {
482 Value::AgentRef(id, None) => {
483 self.agents[id].mailbox.push_back(message);
484 }
485 Value::AgentRef(id, Some(addr)) => {
486 if let Some(rt) = &self.remote {
488 let trit_i8 = match message {
489 Value::Trit(Trit::Affirm) => 1i8,
490 Value::Trit(Trit::Reject) => -1i8,
491 _ => 0i8,
492 };
493 rt.remote_send(&addr, id, trit_i8)
494 .map_err(|_| VmError::TypeMismatch)?;
495 }
496 }
498 _ => return Err(VmError::TypeMismatch),
499 }
500 }
501 0x32 => { let agent_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
503 match agent_val {
504 Value::AgentRef(id, None) => {
505 let message = self.agents[id].mailbox.pop_front()
506 .unwrap_or(Value::Trit(Trit::Tend)); let handler_addr = self.agents[id].handler_addr;
508 self.stack.push(message);
510 self.call_stack.push(self.pc); self.pc = handler_addr;
512 }
513 Value::AgentRef(id, Some(addr)) => {
514 let result = if let Some(rt) = &self.remote {
516 rt.remote_await(&addr, id)
517 .map(|v| match v {
518 1 => Trit::Affirm,
519 -1 => Trit::Reject,
520 _ => Trit::Tend,
521 })
522 .unwrap_or(Trit::Tend)
523 } else {
524 Trit::Tend };
526 self.stack.push(Value::Trit(result));
527 }
528 _ => return Err(VmError::TypeMismatch),
529 }
530 }
531 0x12 => { self.stack.push(Value::String(self.node_id.clone()));
533 }
534 0x14 => { let b = self.stack.pop().unwrap_or(Value::Int(0));
536 let a = self.stack.pop().unwrap_or(Value::Int(0));
537 let result = match (a, b) {
538 (Value::Int(x), Value::Int(y)) => {
539 if x < y { Trit::Affirm } else if x == y { Trit::Tend } else { Trit::Reject }
540 }
541 _ => Trit::Tend,
542 };
543 self.stack.push(Value::Trit(result));
544 }
545 0x15 => { let b = self.stack.pop().unwrap_or(Value::Int(0));
547 let a = self.stack.pop().unwrap_or(Value::Int(0));
548 let result = match (a, b) {
549 (Value::Int(x), Value::Int(y)) => {
550 if x > y { Trit::Affirm } else if x == y { Trit::Tend } else { Trit::Reject }
551 }
552 _ => Trit::Tend,
553 };
554 self.stack.push(Value::Trit(result));
555 }
556 0x16 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
558 let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
559 let result = if a == b { Trit::Affirm } else { Trit::Reject };
560 self.stack.push(Value::Trit(result));
561 }
562 0x00 => return Ok(()), _ => return Err(VmError::InvalidOpcode(opcode)),
566 }
567 }
568 Ok(())
569 }
570
571 pub fn get_register(&self, reg: usize) -> Value {
572 self.registers[reg].clone()
573 }
574
575 pub fn get_stack_top(&self) -> Option<Value> {
576 self.stack.last().cloned()
577 }
578
579 pub fn get_tensor(&self, idx: usize) -> Option<&Vec<Trit>> {
580 self.tensors.get(idx)
581 }
582
583 pub fn peek_stack(&self) -> Option<Value> {
584 self.stack.last().cloned()
585 }
586}
587
588pub fn rle_compress(src: &[Trit]) -> Vec<Trit> {
606 if src.is_empty() { return vec![]; }
607 let mut out = Vec::new();
608 out.push(Trit::Reject);
610
611 let mut i = 0;
612 while i < src.len() {
613 let val = src[i];
614 let mut run = 1usize;
615 while i + run < src.len() && src[i + run] == val && run < 255 {
616 run += 1;
617 }
618 let mut remaining = run;
625 while remaining > 0 {
626 let chunk = remaining.min(8); out.push(val);
628 out.push(int_to_trit((chunk / 3) as i8)); out.push(int_to_trit((chunk % 3) as i8)); remaining -= chunk;
631 }
632 i += run;
633 }
634 out
635}
636
637pub fn rle_decompress(src: &[Trit]) -> Vec<Trit> {
639 if src.is_empty() { return vec![]; }
640 if src[0] != Trit::Reject { return src.to_vec(); } let mut out = Vec::new();
643 let mut i = 1;
644 while i + 2 < src.len() {
645 let val = src[i];
646 let hi = trit_to_int(src[i + 1]) as usize;
647 let lo = trit_to_int(src[i + 2]) as usize;
648 let count = hi * 3 + lo;
649 for _ in 0..count.max(1) { out.push(val); }
650 i += 3;
651 }
652 out
653}
654
655fn int_to_trit(v: i8) -> Trit {
656 match v {
657 0 => Trit::Tend,
658 1 => Trit::Affirm,
659 _ => Trit::Reject,
660 }
661}
662
663fn trit_to_int(t: Trit) -> i8 {
664 match t {
665 Trit::Tend => 0,
666 Trit::Affirm => 1,
667 Trit::Reject => 2, }
669}
670
671#[cfg(test)]
672mod tensor_tests {
673 use super::*;
674 use crate::vm::bet::pack_trits;
675
676 fn push_trit(code: &mut Vec<u8>, t: Trit) {
677 code.push(0x01);
678 code.extend(pack_trits(&[t]));
679 }
680
681 fn talloc(code: &mut Vec<u8>, size: u16) {
682 code.push(0x0f);
683 code.extend_from_slice(&size.to_le_bytes());
684 }
685
686 #[test]
687 fn test_tsparsity() {
688 let mut code = Vec::new();
690 talloc(&mut code, 4); code.push(0x08); code.push(0x00); code.push(0x09); code.push(0x00); code.push(0x25); code.push(0x08); code.push(0x01); code.push(0x00); let mut vm = BetVm::new(code);
698 vm.run().unwrap();
699 assert_eq!(vm.get_register(1), Value::Int(4));
700 }
701
702 #[test]
703 fn test_tshape() {
704 let mut code = Vec::new();
706 talloc(&mut code, 4);
707 code.push(0x08); code.push(0x00); code.push(0x09); code.push(0x00); code.push(0x24); code.push(0x08); code.push(0x02); code.push(0x08); code.push(0x01); code.push(0x00); let mut vm = BetVm::new(code);
715 vm.run().unwrap();
716 assert_eq!(vm.get_register(1), Value::Int(2)); assert_eq!(vm.get_register(2), Value::Int(2)); }
719
720 #[test]
721 fn test_tsparse_matmul_skips_zeros() {
722 let mut code = Vec::new();
725 talloc(&mut code, 1); code.push(0x08); code.push(0x00); talloc(&mut code, 1); code.push(0x08); code.push(0x01); code.push(0x09); code.push(0x00); code.push(0x09); code.push(0x01); code.push(0x21); code.push(0x08); code.push(0x03); code.push(0x08); code.push(0x02); code.push(0x00); let mut vm = BetVm::new(code);
738 vm.run().unwrap();
739
740 assert_eq!(vm.get_register(3), Value::Int(1));
742 let result_ref = match vm.get_register(2) {
744 Value::TensorRef(i) => i,
745 _ => panic!("expected TensorRef"),
746 };
747 assert_eq!(vm.get_tensor(result_ref).unwrap()[0], Trit::Tend);
748 }
749}
750
751#[cfg(test)]
752mod actor_tests {
753 use super::*;
754 use crate::vm::bet::pack_trits;
755
756 #[test]
773 fn test_actor_spawn_send_await() {
774 let mut code = Vec::new();
775
776 let jmp_patch = code.len() + 1;
778 code.push(0x0b); code.extend_from_slice(&[0u8, 0u8]);
780
781 let handler_addr = code.len();
784 code.push(0x11); let entry = code.len() as u16;
788 let bytes = entry.to_le_bytes();
789 code[jmp_patch] = bytes[0];
790 code[jmp_patch + 1] = bytes[1];
791
792 code.push(0x30); code.extend_from_slice(&0u16.to_le_bytes());
794 code.push(0x08); code.push(0x00); code.push(0x09); code.push(0x00); code.push(0x01); code.extend(pack_trits(&[Trit::Affirm])); code.push(0x31); code.push(0x09); code.push(0x00); code.push(0x32); code.push(0x08); code.push(0x01); code.push(0x00); let mut vm = BetVm::new(code);
808 vm.register_agent_type(0, handler_addr);
809 vm.run().unwrap();
810
811 assert_eq!(vm.get_register(1), Value::Trit(Trit::Affirm));
813 }
814}
815
816#[cfg(test)]
817mod tests {
818 use super::*;
819 use crate::vm::bet::pack_trits;
820
821 #[test]
822 fn test_vm_addition() {
823 let mut code = vec![0x01];
825 code.extend(pack_trits(&[Trit::Affirm]));
826 code.push(0x01);
827 code.extend(pack_trits(&[Trit::Affirm]));
828 code.push(0x02); code.push(0x08); code.push(0x00);
831 code.push(0x0d); code.push(0x08); code.push(0x01);
834 code.push(0x00); let mut vm = BetVm::new(code);
837 vm.run().unwrap();
838 assert_eq!(vm.get_register(0), Value::Trit(Trit::Reject)); assert_eq!(vm.get_register(1), Value::Trit(Trit::Affirm)); }
841}
842
843#[cfg(test)]
844mod compress_tests {
845 use super::*;
846 use crate::trit::Trit;
847
848 #[test]
849 fn test_rle_compress_all_zeros() {
850 let src = vec![Trit::Tend; 9];
851 let c = rle_compress(&src);
852 assert_eq!(c[0], Trit::Reject);
854 assert!(c.len() < src.len(), "compressed should be shorter than 9 zeros");
855 }
856
857 #[test]
858 fn test_rle_roundtrip_uniform() {
859 let src = vec![Trit::Affirm; 6];
860 let compressed = rle_compress(&src);
861 let restored = rle_decompress(&compressed);
862 assert_eq!(restored, src, "roundtrip must be lossless");
863 }
864
865 #[test]
866 fn test_rle_roundtrip_mixed() {
867 let src = vec![
868 Trit::Affirm, Trit::Affirm, Trit::Affirm,
869 Trit::Tend, Trit::Tend,
870 Trit::Reject,
871 Trit::Tend, Trit::Tend, Trit::Tend,
872 ];
873 let compressed = rle_compress(&src);
874 let restored = rle_decompress(&compressed);
875 assert_eq!(restored, src, "roundtrip must be lossless for mixed tensor");
876 }
877
878 #[test]
879 fn test_rle_compress_single_element() {
880 let src = vec![Trit::Reject];
881 let c = rle_compress(&src);
882 let r = rle_decompress(&c);
883 assert_eq!(r, src);
884 }
885
886 #[test]
887 fn test_tcompress_tunpack_opcodes() {
888 let mut code = Vec::new();
892
893 code.push(0x0f);
895 code.extend_from_slice(&9u16.to_le_bytes());
896 code.push(0x08); code.push(0x00); code.push(0x09); code.push(0x00); code.push(0x26); code.push(0x08); code.push(0x01); code.push(0x09); code.push(0x01); code.push(0x27); code.push(0x08); code.push(0x02); code.push(0x09); code.push(0x02); code.push(0x25); code.push(0x08); code.push(0x03); code.push(0x00); let mut vm = BetVm::new(code);
916 vm.run().unwrap();
917
918 let sparsity = vm.get_register(3);
920 assert!(matches!(sparsity, Value::Int(n) if n >= 9),
921 "restored tensor should have 9 zero elements, got {:?}", sparsity);
922 }
923}