1use std::collections::{HashMap, VecDeque};
2use std::sync::{Arc, Mutex};
3
4pub mod area {
6 pub const PROCESS_INPUTS: u8 = 0x81;
7 pub const PROCESS_OUTPUTS: u8 = 0x82;
8 pub const MARKERS: u8 = 0x83;
9 pub const DATA_BLOCK: u8 = 0x84;
10 pub const INSTANCE_DB: u8 = 0x85;
11 pub const LOCAL_DATA: u8 = 0x86;
12 pub const TIMER: u8 = 0x1D;
13 pub const COUNTER: u8 = 0x1C;
14}
15
16#[derive(Debug, Clone, Copy, PartialEq, Eq)]
18pub enum CpuState {
19 Run,
20 Stop,
21}
22
23impl Default for CpuState {
24 fn default() -> Self {
25 CpuState::Stop
26 }
27}
28
29#[derive(Debug, Clone)]
31pub struct EventInfo {
32 pub event: &'static str, pub area: u8,
34 pub db_number: u16,
35 pub start: u32,
36 pub length: u32,
37}
38
39#[derive(Clone)]
51pub struct DataStore {
52 inner: Arc<Mutex<StoreInner>>,
53}
54
55impl Default for DataStore {
56 fn default() -> Self {
57 Self {
58 inner: Arc::new(Mutex::new(StoreInner {
59 data: HashMap::new(),
60 cpu_state: CpuState::Stop,
61 clock: [0u8; 8],
62 registered_areas: HashMap::new(),
63 locked_areas: HashMap::new(),
64 events: VecDeque::new(),
65 event_mask: 0xFFFF_FFFF,
66 client_count: 0,
67 read_callbacks: Vec::new(),
68 write_callbacks: Vec::new(),
69 event_callbacks: Vec::new(),
70 })),
71 }
72 }
73}
74
75#[derive(Debug, Clone, Copy, PartialEq, Eq)]
77pub struct ServerStatus {
78 pub server_running: bool,
80 pub cpu_state: CpuState,
81 pub clients_count: usize,
82}
83
84struct StoreInner {
85 data: HashMap<(u8, u16, u32), u8>,
87 cpu_state: CpuState,
88 clock: [u8; 8],
90 registered_areas: HashMap<u8, usize>,
92 locked_areas: HashMap<u8, bool>,
94 events: VecDeque<EventInfo>,
96 event_mask: u32,
98 pub(crate) client_count: usize,
100 read_callbacks: Vec<Box<dyn Fn(&EventInfo) + Send>>,
101 write_callbacks: Vec<Box<dyn Fn(&EventInfo) + Send>>,
102 event_callbacks: Vec<Box<dyn Fn(&str) + Send>>,
103}
104
105impl DataStore {
106 pub fn new() -> Self {
108 Self::default()
109 }
110
111 pub fn register_area(&self, area_code: u8, size: usize) {
116 let mut inner = self.inner.lock().unwrap();
117 inner.registered_areas.insert(area_code, size);
118 }
119
120 pub fn unregister_area(&self, area_code: u8) {
122 let mut inner = self.inner.lock().unwrap();
123 inner.registered_areas.remove(&area_code);
124 inner.data.retain(|k, _| k.0 != area_code);
126 }
127
128 pub fn is_area_registered(&self, area_code: u8) -> bool {
130 self.inner.lock().unwrap().registered_areas.contains_key(&area_code)
131 }
132
133 pub fn registered_areas(&self) -> Vec<u8> {
135 self.inner.lock().unwrap().registered_areas.keys().copied().collect()
136 }
137
138 pub fn lock_area(&self, area_code: u8) {
143 self.inner.lock().unwrap().locked_areas.insert(area_code, true);
144 }
145
146 pub fn unlock_area(&self, area_code: u8) {
148 self.inner.lock().unwrap().locked_areas.remove(&area_code);
149 }
150
151 pub fn is_area_locked(&self, area_code: u8) -> bool {
153 self.inner.lock().unwrap().locked_areas.contains_key(&area_code)
154 }
155
156 pub fn get_status(&self) -> ServerStatus {
160 let inner = self.inner.lock().unwrap();
161 ServerStatus {
162 server_running: true,
163 cpu_state: inner.cpu_state,
164 clients_count: inner.client_count,
165 }
166 }
167
168 pub(crate) fn client_connected(&self) {
170 self.inner.lock().unwrap().client_count += 1;
171 }
172
173 pub(crate) fn client_disconnected(&self) {
175 let mut inner = self.inner.lock().unwrap();
176 inner.client_count = inner.client_count.saturating_sub(1);
177 }
178
179 pub fn get_mask(&self) -> u32 {
183 self.inner.lock().unwrap().event_mask
184 }
185
186 pub fn set_mask(&self, mask: u32) {
188 self.inner.lock().unwrap().event_mask = mask;
189 }
190
191 pub fn clear_events(&self) {
193 self.inner.lock().unwrap().events.clear();
194 }
195
196 pub fn pick_event(&self) -> Option<EventInfo> {
198 self.inner.lock().unwrap().events.pop_front()
199 }
200
201 #[allow(dead_code)]
203 pub(crate) fn enqueue_event(&self, info: EventInfo, kind_bit: u32) {
204 let mut inner = self.inner.lock().unwrap();
205 if inner.event_mask & kind_bit == 0 {
206 return;
207 }
208 if inner.events.len() >= 1024 {
209 inner.events.pop_front(); }
211 inner.events.push_back(info);
212 }
213
214 pub fn cpu_state(&self) -> CpuState {
218 self.inner.lock().unwrap().cpu_state
219 }
220
221 pub fn set_cpu_state(&self, state: CpuState) {
223 let mut inner = self.inner.lock().unwrap();
224 inner.cpu_state = state;
225 drop(inner); let event = match state {
228 CpuState::Run => "cpu_start",
229 CpuState::Stop => "cpu_stop",
230 };
231 self.fire_event(event);
232 }
233
234 pub fn get_clock(&self) -> [u8; 8] {
238 self.inner.lock().unwrap().clock
239 }
240
241 pub fn set_clock(&self, bytes: [u8; 8]) {
243 self.inner.lock().unwrap().clock = bytes;
244 }
245
246 pub fn read_bytes(&self, db: u16, start: u32, count: u32) -> Vec<u8> {
250 let inner = self.inner.lock().unwrap();
251 let end = start.saturating_add(count);
252 (start..end)
253 .map(|offset| *inner.data.get(&(0x84, db, offset)).unwrap_or(&0))
254 .collect()
255 }
256
257 pub fn read_area(&self, area: u8, db: u16, start: u32, count: u32) -> Vec<u8> {
259 let inner = self.inner.lock().unwrap();
260 let end = start.saturating_add(count);
261 let data: Vec<u8> = (start..end)
262 .map(|offset| *inner.data.get(&(area, db, offset)).unwrap_or(&0))
263 .collect();
264
265 drop(inner);
267 self.fire_read(&EventInfo {
268 event: "read",
269 area,
270 db_number: db,
271 start,
272 length: count,
273 });
274 data
275 }
276
277 pub fn write_area(&self, area: u8, db: u16, start: u32, data: &[u8]) {
281 let mut inner = self.inner.lock().unwrap();
282 if inner.locked_areas.contains_key(&area) {
283 return;
284 }
285 for (i, &byte) in data.iter().enumerate() {
286 if let Some(offset) = start.checked_add(i as u32) {
287 inner.data.insert((area, db, offset), byte);
288 }
289 }
290 drop(inner);
291
292 self.fire_write(&EventInfo {
293 event: "write",
294 area,
295 db_number: db,
296 start,
297 length: data.len() as u32,
298 });
299 }
300
301 pub fn write_bytes(&self, db: u16, start: u32, data: &[u8]) {
303 self.write_area(area::DATA_BLOCK, db, start, data);
304 }
305
306 pub fn on_read<F>(&self, cb: F)
310 where
311 F: Fn(&EventInfo) + Send + 'static,
312 {
313 self.inner.lock().unwrap().read_callbacks.push(Box::new(cb));
314 }
315
316 pub fn on_write<F>(&self, cb: F)
318 where
319 F: Fn(&EventInfo) + Send + 'static,
320 {
321 self.inner.lock().unwrap().write_callbacks.push(Box::new(cb));
322 }
323
324 pub fn on_event<F>(&self, cb: F)
326 where
327 F: Fn(&str) + Send + 'static,
328 {
329 self.inner.lock().unwrap().event_callbacks.push(Box::new(cb));
330 }
331
332 fn fire_read(&self, info: &EventInfo) {
335 let callbacks = {
338 let mut inner = self.inner.lock().unwrap();
339 std::mem::take(&mut inner.read_callbacks)
340 };
341 for cb in &callbacks {
342 cb(info);
343 }
344 self.inner.lock().unwrap().read_callbacks = callbacks;
346 }
347
348 fn fire_write(&self, info: &EventInfo) {
349 let callbacks = {
350 let mut inner = self.inner.lock().unwrap();
351 std::mem::take(&mut inner.write_callbacks)
352 };
353 for cb in &callbacks {
354 cb(info);
355 }
356 self.inner.lock().unwrap().write_callbacks = callbacks;
357 }
358
359 fn fire_event(&self, event: &str) {
360 let callbacks = {
361 let mut inner = self.inner.lock().unwrap();
362 std::mem::take(&mut inner.event_callbacks)
363 };
364 for cb in &callbacks {
365 cb(event);
366 }
367 self.inner.lock().unwrap().event_callbacks = callbacks;
368 }
369}
370
371#[cfg(test)]
376mod tests {
377 use super::*;
378
379 #[test]
380 fn read_unset_returns_zeros() {
381 let store = DataStore::new();
382 let data = store.read_bytes(1, 0, 4);
383 assert_eq!(data, vec![0, 0, 0, 0]);
384 }
385
386 #[test]
387 fn write_then_read_roundtrip() {
388 let store = DataStore::new();
389 store.write_bytes(1, 0, &[0xDE, 0xAD, 0xBE, 0xEF]);
390 let data = store.read_bytes(1, 0, 4);
391 assert_eq!(data, vec![0xDE, 0xAD, 0xBE, 0xEF]);
392 }
393
394 #[test]
395 fn write_to_different_dbs_isolated() {
396 let store = DataStore::new();
397 store.write_bytes(1, 0, &[0xAA]);
398 store.write_bytes(2, 0, &[0xBB]);
399 assert_eq!(store.read_bytes(1, 0, 1), vec![0xAA]);
400 assert_eq!(store.read_bytes(2, 0, 1), vec![0xBB]);
401 }
402
403 #[test]
404 fn read_area_uses_area_code() {
405 let store = DataStore::new();
406 store.write_area(area::MARKERS, 0, 10, &[0x99]);
407 let pa = store.read_area(area::PROCESS_OUTPUTS, 0, 10, 1);
408 assert_eq!(pa, vec![0x00]); let mk = store.read_area(area::MARKERS, 0, 10, 1);
410 assert_eq!(mk, vec![0x99]);
411 }
412
413 #[test]
414 fn register_area_roundtrip() {
415 let store = DataStore::new();
416 assert!(!store.is_area_registered(0x81));
417 store.register_area(0x81, 1024);
418 assert!(store.is_area_registered(0x81));
419 store.unregister_area(0x81);
420 assert!(!store.is_area_registered(0x81));
421 }
422
423 #[test]
424 fn cpu_state_defaults_to_stop() {
425 let store = DataStore::new();
426 assert_eq!(store.cpu_state(), CpuState::Stop);
427 }
428
429 #[test]
430 fn cpu_state_transitions() {
431 let store = DataStore::new();
432 store.set_cpu_state(CpuState::Run);
433 assert_eq!(store.cpu_state(), CpuState::Run);
434 store.set_cpu_state(CpuState::Stop);
435 assert_eq!(store.cpu_state(), CpuState::Stop);
436 }
437
438 #[test]
439 fn write_callback_invoked() {
440 use std::sync::atomic::{AtomicBool, Ordering};
441 let store = DataStore::new();
442 let fired = Arc::new(AtomicBool::new(false));
443 let f = fired.clone();
444 store.on_write(move |_| {
445 f.store(true, Ordering::SeqCst);
446 });
447 store.write_bytes(1, 0, &[0x01]);
448 assert!(fired.load(Ordering::SeqCst));
449 }
450
451 #[test]
452 fn event_callback_invoked() {
453 use std::sync::atomic::{AtomicBool, Ordering};
454 let store = DataStore::new();
455 let fired = Arc::new(AtomicBool::new(false));
456 let f = fired.clone();
457 store.on_event(move |e| {
458 if e == "cpu_start" {
459 f.store(true, Ordering::SeqCst);
460 }
461 });
462 store.set_cpu_state(CpuState::Run);
463 assert!(fired.load(Ordering::SeqCst));
464 }
465}