1pub mod bet;
2
3use crate::trit::Trit;
4use crate::vm::bet::{unpack_trits, BetFault};
5
6use std::fmt;
7use std::sync::Arc;
8
9pub trait RemoteTransport: Send + Sync {
15 fn remote_send(&self, node_addr: &str, agent_id: usize, trit: i8) -> std::io::Result<()>;
17 fn remote_await(&self, node_addr: &str, agent_id: usize) -> std::io::Result<i8>;
19}
20
21#[derive(Debug, PartialEq, Eq)]
22pub enum VmError {
23 StackUnderflow,
24 BetFault(BetFault),
25 Halt,
26 InvalidOpcode(u8),
27 InvalidRegister(u8),
28 PcOutOfBounds(usize),
29 TypeMismatch,
30}
31
32impl fmt::Display for VmError {
33 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
34 match self {
35 VmError::StackUnderflow => write!(f, "Stack underflow"),
36 VmError::BetFault(fault) => write!(f, "BET Fault: {:?}", fault),
37 VmError::Halt => write!(f, "VM Halted"),
38 VmError::InvalidOpcode(op) => write!(f, "Invalid opcode: 0x{:02x}", op),
39 VmError::InvalidRegister(reg) => write!(f, "Invalid register: {}", reg),
40 VmError::PcOutOfBounds(pc) => write!(f, "PC out of bounds: {}", pc),
41 VmError::TypeMismatch => write!(f, "Type mismatch"),
42 }
43 }
44}
45
46#[repr(u8)]
47pub enum Opcode {
48 Tpush(Trit) = 0x01,
49 Tadd = 0x02,
50 Tmul = 0x03,
51 Tneg = 0x04,
52 TjmpPos(u16) = 0x05,
53 TjmpZero(u16) = 0x06,
54 TjmpNeg(u16) = 0x07,
55 Tstore(u8) = 0x08,
56 Tload(u8) = 0x09,
57 Tdup = 0x0a,
58 Tjmp(u16) = 0x0b,
59 Tpop = 0x0c,
60 TloadCarry = 0x0d,
61 Tcons = 0x0e,
62 Talloc(u16) = 0x0f,
63 Tcall(u16) = 0x10, Tret = 0x11, TnodeId = 0x12, TpushStr = 0x13, Thalt = 0x00,
68}
69
70#[derive(Debug, Clone, PartialEq, Eq)]
71pub enum Value {
72 Trit(Trit),
73 Int(i64),
74 String(String),
75 TensorRef(usize),
76 AgentRef(usize, Option<String>),
79}
80
81impl Default for Value {
82 fn default() -> Self {
83 Value::Trit(Trit::Zero)
84 }
85}
86
87struct AgentInstance {
90 handler_addr: usize,
91 mailbox: std::collections::VecDeque<Value>,
92}
93
94pub struct BetVm {
95 registers: [Value; 27],
96 carry_reg: Trit,
97 stack: Vec<Value>,
98 call_stack: Vec<usize>, tensors: Vec<Vec<Trit>>, agents: Vec<AgentInstance>,
101 agent_types: std::collections::HashMap<u16, usize>,
103 pc: usize,
104 code: Vec<u8>,
105 node_id: String,
107 remote: Option<Arc<dyn RemoteTransport>>,
109}
110
111impl BetVm {
112 pub fn new(code: Vec<u8>) -> Self {
113 Self {
114 registers: std::array::from_fn(|_| Value::default()),
115 carry_reg: Trit::Zero,
116 stack: Vec::new(),
117 call_stack: Vec::new(),
118 tensors: Vec::new(),
119 agents: Vec::new(),
120 agent_types: std::collections::HashMap::new(),
121 pc: 0,
122 code,
123 node_id: "127.0.0.1:7373".to_string(), remote: None,
125 }
126 }
127
128 pub fn set_node_id(&mut self, node_id: String) {
129 self.node_id = node_id;
130 }
131
132 pub fn set_remote(&mut self, transport: Arc<dyn RemoteTransport>) {
134 self.remote = Some(transport);
135 }
136
137 pub fn register_agent_type(&mut self, type_id: u16, handler_addr: usize) {
140 self.agent_types.insert(type_id, handler_addr);
141 }
142
143 pub fn run(&mut self) -> Result<(), VmError> {
144 loop {
145 if self.pc >= self.code.len() {
146 break;
147 }
148
149 let opcode = self.code[self.pc];
150 self.pc += 1;
151
152 match opcode {
153 0x01 => { if self.pc >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
155 let packed = self.code[self.pc];
156 self.pc += 1;
157 let trits = unpack_trits(&[packed], 1).map_err(VmError::BetFault)?;
158 self.stack.push(Value::Trit(trits[0]));
159 }
160 0x02 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
162 let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
163 match (a, b) {
164 (Value::Trit(av), Value::Trit(bv)) => {
165 let (sum, carry) = av + bv;
166 self.stack.push(Value::Trit(sum));
167 self.carry_reg = carry;
168 }
169 _ => return Err(VmError::TypeMismatch),
170 }
171 }
172 0x03 => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
174 let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
175 match (a, b) {
176 (Value::Trit(av), Value::Trit(bv)) => {
177 self.stack.push(Value::Trit(av * bv));
178 }
179 _ => return Err(VmError::TypeMismatch),
180 }
181 }
182 0x04 => { let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
184 match a {
185 Value::Trit(av) => self.stack.push(Value::Trit(-av)),
186 _ => return Err(VmError::TypeMismatch),
187 }
188 }
189 0x05 => { if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
191 let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
192 self.pc += 2;
193 let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
194 if let Value::Trit(Trit::PosOne) = val {
195 self.pc = addr;
196 }
197 }
198 0x06 => { if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
200 let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
201 self.pc += 2;
202 let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
203 if let Value::Trit(Trit::Zero) = val {
204 self.pc = addr;
205 }
206 }
207 0x07 => { if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
209 let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
210 self.pc += 2;
211 let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
212 if let Value::Trit(Trit::NegOne) = val {
213 self.pc = addr;
214 }
215 }
216 0x08 => { if self.pc >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
218 let reg = self.code[self.pc];
219 self.pc += 1;
220 let val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
221 if (reg as usize) >= self.registers.len() {
222 return Err(VmError::InvalidRegister(reg));
223 }
224 self.registers[reg as usize] = val;
225 }
226 0x09 => { if self.pc >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
228 let reg = self.code[self.pc];
229 self.pc += 1;
230 if (reg as usize) >= self.registers.len() {
231 return Err(VmError::InvalidRegister(reg));
232 }
233 self.stack.push(self.registers[reg as usize].clone());
234 }
235 0x0a => { let val = self.stack.last().ok_or(VmError::StackUnderflow)?;
237 self.stack.push(val.clone());
238 }
239 0x0b => { if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
241 let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
242 self.pc = addr;
243 }
244 0x0c => { self.stack.pop().ok_or(VmError::StackUnderflow)?;
246 }
247 0x0d => { self.stack.push(Value::Trit(self.carry_reg));
249 }
250 0x0e => { let b = self.stack.pop().ok_or(VmError::StackUnderflow)?;
252 let a = self.stack.pop().ok_or(VmError::StackUnderflow)?;
253 match (a, b) {
254 (Value::Trit(av), Value::Trit(bv)) => {
255 let (sum, _carry) = av + bv;
256 self.stack.push(Value::Trit(sum));
257 }
258 _ => return Err(VmError::TypeMismatch),
259 }
260 }
261 0x0f => { if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
263 let size = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
264 self.pc += 2;
265 let idx = self.tensors.len();
266 self.tensors.push(vec![Trit::Zero; size]);
267 self.stack.push(Value::TensorRef(idx));
268 }
269 0x20 => { let b_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
271 let a_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
272 match (a_val, b_val) {
273 (Value::TensorRef(a_idx), Value::TensorRef(b_idx)) => {
274 let a_len = self.tensors[a_idx].len();
276 let b_len = self.tensors[b_idx].len();
277 let a_dim = (a_len as f64).sqrt() as usize;
278 let b_dim = (b_len as f64).sqrt() as usize;
279 if a_dim * a_dim != a_len || b_dim * b_dim != b_len || a_dim != b_dim {
280 return Err(VmError::TypeMismatch);
281 }
282 let n = a_dim;
283 let mut result = vec![Trit::Zero; n * n];
284 for row in 0..n {
285 for col in 0..n {
286 let mut acc = Trit::Zero;
287 for k in 0..n {
288 let (prod, _) = self.tensors[a_idx][row * n + k]
289 + (self.tensors[a_idx][row * n + k]
290 * self.tensors[b_idx][k * n + col]);
291 let (sum, _) = acc + prod;
292 acc = sum;
293 }
294 result[row * n + col] = acc;
295 }
296 }
297 let out_idx = self.tensors.len();
298 self.tensors.push(result);
299 self.stack.push(Value::TensorRef(out_idx));
300 }
301 _ => return Err(VmError::TypeMismatch),
302 }
303 }
304 0x21 => { let b_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
306 let a_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
307 match (a_val, b_val) {
308 (Value::TensorRef(a_idx), Value::TensorRef(b_idx)) => {
309 let a_len = self.tensors[a_idx].len();
310 let n = (a_len as f64).sqrt() as usize;
311 let mut result = vec![Trit::Zero; n * n];
312 let mut skipped: usize = 0;
313 for row in 0..n {
314 for col in 0..n {
315 let mut acc = Trit::Zero;
316 for k in 0..n {
317 let weight = self.tensors[b_idx][k * n + col];
318 if weight == Trit::Zero {
320 skipped += 1;
321 continue;
322 }
323 let prod = self.tensors[a_idx][row * n + k] * weight;
324 let (sum, _) = acc + prod;
325 acc = sum;
326 }
327 result[row * n + col] = acc;
328 }
329 }
330 let out_idx = self.tensors.len();
331 self.tensors.push(result);
332 self.stack.push(Value::TensorRef(out_idx));
334 self.stack.push(Value::Int(skipped as i64));
335 }
336 _ => return Err(VmError::TypeMismatch),
337 }
338 }
339 0x22 => { let col_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
341 let row_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
342 let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
343 match (ref_val, row_val, col_val) {
344 (Value::TensorRef(idx), Value::Int(row), Value::Int(col)) => {
345 let n = (self.tensors[idx].len() as f64).sqrt() as usize;
346 let pos = row as usize * n + col as usize;
347 self.stack.push(Value::Trit(self.tensors[idx][pos]));
348 }
349 _ => return Err(VmError::TypeMismatch),
350 }
351 }
352 0x23 => { let trit_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
354 let col_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
355 let row_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
356 let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
357 match (ref_val, row_val, col_val, trit_val) {
358 (Value::TensorRef(idx), Value::Int(row), Value::Int(col), Value::Trit(t)) => {
359 let n = (self.tensors[idx].len() as f64).sqrt() as usize;
360 let pos = row as usize * n + col as usize;
361 self.tensors[idx][pos] = t;
362 }
363 _ => return Err(VmError::TypeMismatch),
364 }
365 }
366 0x24 => { let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
368 match ref_val {
369 Value::TensorRef(idx) => {
370 let len = self.tensors[idx].len();
371 let n = (len as f64).sqrt() as usize;
372 self.stack.push(Value::Int(n as i64)); self.stack.push(Value::Int(n as i64)); }
375 _ => return Err(VmError::TypeMismatch),
376 }
377 }
378 0x25 => { let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
380 match ref_val {
381 Value::TensorRef(idx) => {
382 let zeros = self.tensors[idx].iter()
383 .filter(|&&t| t == Trit::Zero)
384 .count();
385 self.stack.push(Value::Int(zeros as i64));
386 }
387 _ => return Err(VmError::TypeMismatch),
388 }
389 }
390 0x10 => { if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
392 let addr = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]) as usize;
393 self.pc += 2;
394 self.call_stack.push(self.pc); self.pc = addr;
396 }
397 0x11 => { match self.call_stack.pop() {
399 Some(return_addr) => self.pc = return_addr,
400 None => return Ok(()), }
402 }
403 0x26 => { let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
410 match ref_val {
411 Value::TensorRef(idx) => {
412 let src = &self.tensors[idx].clone();
413 let compressed = rle_compress(src);
414 let new_idx = self.tensors.len();
415 self.tensors.push(compressed);
416 self.stack.push(Value::TensorRef(new_idx));
417 }
418 _ => return Err(VmError::TypeMismatch),
419 }
420 }
421 0x27 => { let ref_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
425 match ref_val {
426 Value::TensorRef(idx) => {
427 let src = &self.tensors[idx].clone();
428 let unpacked = rle_decompress(src);
429 let new_idx = self.tensors.len();
430 self.tensors.push(unpacked);
431 self.stack.push(Value::TensorRef(new_idx));
432 }
433 _ => return Err(VmError::TypeMismatch),
434 }
435 }
436 0x30 => { if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
439 let type_id = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]);
440 self.pc += 2;
441 let handler_addr = *self.agent_types.get(&type_id)
442 .ok_or(VmError::InvalidOpcode(0x30))?;
443 let instance_id = self.agents.len();
444 self.agents.push(AgentInstance {
445 handler_addr,
446 mailbox: std::collections::VecDeque::new(),
447 });
448 self.stack.push(Value::AgentRef(instance_id, None));
449 }
450 0x33 => { let addr_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
452 if self.pc + 1 >= self.code.len() { return Err(VmError::PcOutOfBounds(self.pc)); }
453 let type_id = u16::from_le_bytes([self.code[self.pc], self.code[self.pc + 1]]);
454 self.pc += 2;
455 if let Value::String(addr) = addr_val {
456 self.stack.push(Value::AgentRef(type_id as usize, Some(addr)));
460 } else {
461 return Err(VmError::TypeMismatch);
462 }
463 }
464 0x31 => { let message = self.stack.pop().ok_or(VmError::StackUnderflow)?;
466 let agent_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
467 match agent_val {
468 Value::AgentRef(id, None) => {
469 self.agents[id].mailbox.push_back(message);
470 }
471 Value::AgentRef(id, Some(addr)) => {
472 if let Some(rt) = &self.remote {
474 let trit_i8 = match message {
475 Value::Trit(Trit::PosOne) => 1i8,
476 Value::Trit(Trit::NegOne) => -1i8,
477 _ => 0i8,
478 };
479 rt.remote_send(&addr, id, trit_i8)
480 .map_err(|_| VmError::TypeMismatch)?;
481 }
482 }
484 _ => return Err(VmError::TypeMismatch),
485 }
486 }
487 0x32 => { let agent_val = self.stack.pop().ok_or(VmError::StackUnderflow)?;
489 match agent_val {
490 Value::AgentRef(id, None) => {
491 let message = self.agents[id].mailbox.pop_front()
492 .unwrap_or(Value::Trit(Trit::Zero)); let handler_addr = self.agents[id].handler_addr;
494 self.stack.push(message);
496 self.call_stack.push(self.pc); self.pc = handler_addr;
498 }
499 Value::AgentRef(id, Some(addr)) => {
500 let result = if let Some(rt) = &self.remote {
502 rt.remote_await(&addr, id)
503 .map(|v| match v {
504 1 => Trit::PosOne,
505 -1 => Trit::NegOne,
506 _ => Trit::Zero,
507 })
508 .unwrap_or(Trit::Zero)
509 } else {
510 Trit::Zero };
512 self.stack.push(Value::Trit(result));
513 }
514 _ => return Err(VmError::TypeMismatch),
515 }
516 }
517 0x12 => { self.stack.push(Value::String(self.node_id.clone()));
519 }
520 0x00 => return Ok(()), _ => return Err(VmError::InvalidOpcode(opcode)),
524 }
525 }
526 Ok(())
527 }
528
529 pub fn get_register(&self, reg: usize) -> Value {
530 self.registers[reg].clone()
531 }
532
533 pub fn get_tensor(&self, idx: usize) -> Option<&Vec<Trit>> {
534 self.tensors.get(idx)
535 }
536
537 pub fn peek_stack(&self) -> Option<Value> {
538 self.stack.last().cloned()
539 }
540}
541
542pub fn rle_compress(src: &[Trit]) -> Vec<Trit> {
560 if src.is_empty() { return vec![]; }
561 let mut out = Vec::new();
562 out.push(Trit::NegOne);
564
565 let mut i = 0;
566 while i < src.len() {
567 let val = src[i];
568 let mut run = 1usize;
569 while i + run < src.len() && src[i + run] == val && run < 255 {
570 run += 1;
571 }
572 let mut remaining = run;
579 while remaining > 0 {
580 let chunk = remaining.min(8); out.push(val);
582 out.push(int_to_trit((chunk / 3) as i8)); out.push(int_to_trit((chunk % 3) as i8)); remaining -= chunk;
585 }
586 i += run;
587 }
588 out
589}
590
591pub fn rle_decompress(src: &[Trit]) -> Vec<Trit> {
593 if src.is_empty() { return vec![]; }
594 if src[0] != Trit::NegOne { return src.to_vec(); } let mut out = Vec::new();
597 let mut i = 1;
598 while i + 2 < src.len() {
599 let val = src[i];
600 let hi = trit_to_int(src[i + 1]) as usize;
601 let lo = trit_to_int(src[i + 2]) as usize;
602 let count = hi * 3 + lo;
603 for _ in 0..count.max(1) { out.push(val); }
604 i += 3;
605 }
606 out
607}
608
609fn int_to_trit(v: i8) -> Trit {
610 match v {
611 0 => Trit::Zero,
612 1 => Trit::PosOne,
613 _ => Trit::NegOne,
614 }
615}
616
617fn trit_to_int(t: Trit) -> i8 {
618 match t {
619 Trit::Zero => 0,
620 Trit::PosOne => 1,
621 Trit::NegOne => 2, }
623}
624
625#[cfg(test)]
626mod tensor_tests {
627 use super::*;
628 use crate::vm::bet::pack_trits;
629
630 fn push_trit(code: &mut Vec<u8>, t: Trit) {
631 code.push(0x01);
632 code.extend(pack_trits(&[t]));
633 }
634
635 fn talloc(code: &mut Vec<u8>, size: u16) {
636 code.push(0x0f);
637 code.extend_from_slice(&size.to_le_bytes());
638 }
639
640 #[test]
641 fn test_tsparsity() {
642 let mut code = Vec::new();
644 talloc(&mut code, 4); code.push(0x08); code.push(0x00); code.push(0x09); code.push(0x00); code.push(0x25); code.push(0x08); code.push(0x01); code.push(0x00); let mut vm = BetVm::new(code);
652 vm.run().unwrap();
653 assert_eq!(vm.get_register(1), Value::Int(4));
654 }
655
656 #[test]
657 fn test_tshape() {
658 let mut code = Vec::new();
660 talloc(&mut code, 4);
661 code.push(0x08); code.push(0x00); code.push(0x09); code.push(0x00); code.push(0x24); code.push(0x08); code.push(0x02); code.push(0x08); code.push(0x01); code.push(0x00); let mut vm = BetVm::new(code);
669 vm.run().unwrap();
670 assert_eq!(vm.get_register(1), Value::Int(2)); assert_eq!(vm.get_register(2), Value::Int(2)); }
673
674 #[test]
675 fn test_tsparse_matmul_skips_zeros() {
676 let mut code = Vec::new();
679 talloc(&mut code, 1); code.push(0x08); code.push(0x00); talloc(&mut code, 1); code.push(0x08); code.push(0x01); code.push(0x09); code.push(0x00); code.push(0x09); code.push(0x01); code.push(0x21); code.push(0x08); code.push(0x03); code.push(0x08); code.push(0x02); code.push(0x00); let mut vm = BetVm::new(code);
692 vm.run().unwrap();
693
694 assert_eq!(vm.get_register(3), Value::Int(1));
696 let result_ref = match vm.get_register(2) {
698 Value::TensorRef(i) => i,
699 _ => panic!("expected TensorRef"),
700 };
701 assert_eq!(vm.get_tensor(result_ref).unwrap()[0], Trit::Zero);
702 }
703}
704
705#[cfg(test)]
706mod actor_tests {
707 use super::*;
708 use crate::vm::bet::pack_trits;
709
710 #[test]
727 fn test_actor_spawn_send_await() {
728 let mut code = Vec::new();
729
730 let jmp_patch = code.len() + 1;
732 code.push(0x0b); code.extend_from_slice(&[0u8, 0u8]);
734
735 let handler_addr = code.len();
738 code.push(0x11); let entry = code.len() as u16;
742 let bytes = entry.to_le_bytes();
743 code[jmp_patch] = bytes[0];
744 code[jmp_patch + 1] = bytes[1];
745
746 code.push(0x30); code.extend_from_slice(&0u16.to_le_bytes());
748 code.push(0x08); code.push(0x00); code.push(0x09); code.push(0x00); code.push(0x01); code.extend(pack_trits(&[Trit::PosOne])); code.push(0x31); code.push(0x09); code.push(0x00); code.push(0x32); code.push(0x08); code.push(0x01); code.push(0x00); let mut vm = BetVm::new(code);
762 vm.register_agent_type(0, handler_addr);
763 vm.run().unwrap();
764
765 assert_eq!(vm.get_register(1), Value::Trit(Trit::PosOne));
767 }
768}
769
770#[cfg(test)]
771mod tests {
772 use super::*;
773 use crate::vm::bet::pack_trits;
774
775 #[test]
776 fn test_vm_addition() {
777 let mut code = vec![0x01];
779 code.extend(pack_trits(&[Trit::PosOne]));
780 code.push(0x01);
781 code.extend(pack_trits(&[Trit::PosOne]));
782 code.push(0x02); code.push(0x08); code.push(0x00);
785 code.push(0x0d); code.push(0x08); code.push(0x01);
788 code.push(0x00); let mut vm = BetVm::new(code);
791 vm.run().unwrap();
792 assert_eq!(vm.get_register(0), Value::Trit(Trit::NegOne)); assert_eq!(vm.get_register(1), Value::Trit(Trit::PosOne)); }
795}
796
797#[cfg(test)]
798mod compress_tests {
799 use super::*;
800 use crate::trit::Trit;
801
802 #[test]
803 fn test_rle_compress_all_zeros() {
804 let src = vec![Trit::Zero; 9];
805 let c = rle_compress(&src);
806 assert_eq!(c[0], Trit::NegOne);
808 assert!(c.len() < src.len(), "compressed should be shorter than 9 zeros");
809 }
810
811 #[test]
812 fn test_rle_roundtrip_uniform() {
813 let src = vec![Trit::PosOne; 6];
814 let compressed = rle_compress(&src);
815 let restored = rle_decompress(&compressed);
816 assert_eq!(restored, src, "roundtrip must be lossless");
817 }
818
819 #[test]
820 fn test_rle_roundtrip_mixed() {
821 let src = vec![
822 Trit::PosOne, Trit::PosOne, Trit::PosOne,
823 Trit::Zero, Trit::Zero,
824 Trit::NegOne,
825 Trit::Zero, Trit::Zero, Trit::Zero,
826 ];
827 let compressed = rle_compress(&src);
828 let restored = rle_decompress(&compressed);
829 assert_eq!(restored, src, "roundtrip must be lossless for mixed tensor");
830 }
831
832 #[test]
833 fn test_rle_compress_single_element() {
834 let src = vec![Trit::NegOne];
835 let c = rle_compress(&src);
836 let r = rle_decompress(&c);
837 assert_eq!(r, src);
838 }
839
840 #[test]
841 fn test_tcompress_tunpack_opcodes() {
842 let mut code = Vec::new();
846
847 code.push(0x0f);
849 code.extend_from_slice(&9u16.to_le_bytes());
850 code.push(0x08); code.push(0x00); code.push(0x09); code.push(0x00); code.push(0x26); code.push(0x08); code.push(0x01); code.push(0x09); code.push(0x01); code.push(0x27); code.push(0x08); code.push(0x02); code.push(0x09); code.push(0x02); code.push(0x25); code.push(0x08); code.push(0x03); code.push(0x00); let mut vm = BetVm::new(code);
870 vm.run().unwrap();
871
872 let sparsity = vm.get_register(3);
874 assert!(matches!(sparsity, Value::Int(n) if n >= 9),
875 "restored tensor should have 9 zero elements, got {:?}", sparsity);
876 }
877}