1use std::collections::HashMap;
2use std::sync::{Arc, Mutex};
3
4pub mod area {
6 pub const PROCESS_INPUTS: u8 = 0x81;
7 pub const PROCESS_OUTPUTS: u8 = 0x82;
8 pub const MARKERS: u8 = 0x83;
9 pub const DATA_BLOCK: u8 = 0x84;
10 pub const INSTANCE_DB: u8 = 0x85;
11 pub const LOCAL_DATA: u8 = 0x86;
12 pub const TIMER: u8 = 0x1D;
13 pub const COUNTER: u8 = 0x1C;
14}
15
16#[derive(Debug, Clone, Copy, PartialEq, Eq)]
18pub enum CpuState {
19 Run,
20 Stop,
21}
22
23impl Default for CpuState {
24 fn default() -> Self {
25 CpuState::Stop
26 }
27}
28
29#[derive(Debug, Clone)]
31pub struct EventInfo {
32 pub event: &'static str, pub area: u8,
34 pub db_number: u16,
35 pub start: u32,
36 pub length: u32,
37}
38
39#[derive(Clone)]
51pub struct DataStore {
52 inner: Arc<Mutex<StoreInner>>,
53}
54
55impl Default for DataStore {
56 fn default() -> Self {
57 Self {
58 inner: Arc::new(Mutex::new(StoreInner {
59 data: HashMap::new(),
60 cpu_state: CpuState::Stop,
61 registered_areas: HashMap::new(),
62 read_callbacks: Vec::new(),
63 write_callbacks: Vec::new(),
64 event_callbacks: Vec::new(),
65 })),
66 }
67 }
68}
69
70struct StoreInner {
71 data: HashMap<(u8, u16, u32), u8>,
73 cpu_state: CpuState,
74 registered_areas: HashMap<u8, usize>, read_callbacks: Vec<Box<dyn Fn(&EventInfo) + Send>>,
77 write_callbacks: Vec<Box<dyn Fn(&EventInfo) + Send>>,
78 event_callbacks: Vec<Box<dyn Fn(&str) + Send>>,
79}
80
81impl DataStore {
82 pub fn new() -> Self {
84 Self::default()
85 }
86
87 pub fn register_area(&self, area_code: u8, size: usize) {
92 let mut inner = self.inner.lock().unwrap();
93 inner.registered_areas.insert(area_code, size);
94 }
95
96 pub fn unregister_area(&self, area_code: u8) {
98 let mut inner = self.inner.lock().unwrap();
99 inner.registered_areas.remove(&area_code);
100 inner.data.retain(|k, _| k.0 != area_code);
102 }
103
104 pub fn is_area_registered(&self, area_code: u8) -> bool {
106 self.inner.lock().unwrap().registered_areas.contains_key(&area_code)
107 }
108
109 pub fn registered_areas(&self) -> Vec<u8> {
111 self.inner.lock().unwrap().registered_areas.keys().copied().collect()
112 }
113
114 pub fn cpu_state(&self) -> CpuState {
118 self.inner.lock().unwrap().cpu_state
119 }
120
121 pub fn set_cpu_state(&self, state: CpuState) {
123 let mut inner = self.inner.lock().unwrap();
124 inner.cpu_state = state;
125 drop(inner); let event = match state {
128 CpuState::Run => "cpu_start",
129 CpuState::Stop => "cpu_stop",
130 };
131 self.fire_event(event);
132 }
133
134 pub fn read_bytes(&self, db: u16, start: u32, count: u32) -> Vec<u8> {
138 let inner = self.inner.lock().unwrap();
139 let end = start.saturating_add(count);
140 (start..end)
141 .map(|offset| *inner.data.get(&(0x84, db, offset)).unwrap_or(&0))
142 .collect()
143 }
144
145 pub fn read_area(&self, area: u8, db: u16, start: u32, count: u32) -> Vec<u8> {
147 let inner = self.inner.lock().unwrap();
148 let end = start.saturating_add(count);
149 let data: Vec<u8> = (start..end)
150 .map(|offset| *inner.data.get(&(area, db, offset)).unwrap_or(&0))
151 .collect();
152
153 drop(inner);
155 self.fire_read(&EventInfo {
156 event: "read",
157 area,
158 db_number: db,
159 start,
160 length: count,
161 });
162 data
163 }
164
165 pub fn write_area(&self, area: u8, db: u16, start: u32, data: &[u8]) {
167 let mut inner = self.inner.lock().unwrap();
168 for (i, &byte) in data.iter().enumerate() {
169 if let Some(offset) = start.checked_add(i as u32) {
170 inner.data.insert((area, db, offset), byte);
171 }
172 }
173 drop(inner);
174
175 self.fire_write(&EventInfo {
176 event: "write",
177 area,
178 db_number: db,
179 start,
180 length: data.len() as u32,
181 });
182 }
183
184 pub fn write_bytes(&self, db: u16, start: u32, data: &[u8]) {
186 self.write_area(area::DATA_BLOCK, db, start, data);
187 }
188
189 pub fn on_read<F>(&self, cb: F)
193 where
194 F: Fn(&EventInfo) + Send + 'static,
195 {
196 self.inner.lock().unwrap().read_callbacks.push(Box::new(cb));
197 }
198
199 pub fn on_write<F>(&self, cb: F)
201 where
202 F: Fn(&EventInfo) + Send + 'static,
203 {
204 self.inner.lock().unwrap().write_callbacks.push(Box::new(cb));
205 }
206
207 pub fn on_event<F>(&self, cb: F)
209 where
210 F: Fn(&str) + Send + 'static,
211 {
212 self.inner.lock().unwrap().event_callbacks.push(Box::new(cb));
213 }
214
215 fn fire_read(&self, info: &EventInfo) {
218 let callbacks = {
221 let mut inner = self.inner.lock().unwrap();
222 std::mem::take(&mut inner.read_callbacks)
223 };
224 for cb in &callbacks {
225 cb(info);
226 }
227 self.inner.lock().unwrap().read_callbacks = callbacks;
229 }
230
231 fn fire_write(&self, info: &EventInfo) {
232 let callbacks = {
233 let mut inner = self.inner.lock().unwrap();
234 std::mem::take(&mut inner.write_callbacks)
235 };
236 for cb in &callbacks {
237 cb(info);
238 }
239 self.inner.lock().unwrap().write_callbacks = callbacks;
240 }
241
242 fn fire_event(&self, event: &str) {
243 let callbacks = {
244 let mut inner = self.inner.lock().unwrap();
245 std::mem::take(&mut inner.event_callbacks)
246 };
247 for cb in &callbacks {
248 cb(event);
249 }
250 self.inner.lock().unwrap().event_callbacks = callbacks;
251 }
252}
253
254#[cfg(test)]
259mod tests {
260 use super::*;
261
262 #[test]
263 fn read_unset_returns_zeros() {
264 let store = DataStore::new();
265 let data = store.read_bytes(1, 0, 4);
266 assert_eq!(data, vec![0, 0, 0, 0]);
267 }
268
269 #[test]
270 fn write_then_read_roundtrip() {
271 let store = DataStore::new();
272 store.write_bytes(1, 0, &[0xDE, 0xAD, 0xBE, 0xEF]);
273 let data = store.read_bytes(1, 0, 4);
274 assert_eq!(data, vec![0xDE, 0xAD, 0xBE, 0xEF]);
275 }
276
277 #[test]
278 fn write_to_different_dbs_isolated() {
279 let store = DataStore::new();
280 store.write_bytes(1, 0, &[0xAA]);
281 store.write_bytes(2, 0, &[0xBB]);
282 assert_eq!(store.read_bytes(1, 0, 1), vec![0xAA]);
283 assert_eq!(store.read_bytes(2, 0, 1), vec![0xBB]);
284 }
285
286 #[test]
287 fn read_area_uses_area_code() {
288 let store = DataStore::new();
289 store.write_area(area::MARKERS, 0, 10, &[0x99]);
290 let pa = store.read_area(area::PROCESS_OUTPUTS, 0, 10, 1);
291 assert_eq!(pa, vec![0x00]); let mk = store.read_area(area::MARKERS, 0, 10, 1);
293 assert_eq!(mk, vec![0x99]);
294 }
295
296 #[test]
297 fn register_area_roundtrip() {
298 let store = DataStore::new();
299 assert!(!store.is_area_registered(0x81));
300 store.register_area(0x81, 1024);
301 assert!(store.is_area_registered(0x81));
302 store.unregister_area(0x81);
303 assert!(!store.is_area_registered(0x81));
304 }
305
306 #[test]
307 fn cpu_state_defaults_to_stop() {
308 let store = DataStore::new();
309 assert_eq!(store.cpu_state(), CpuState::Stop);
310 }
311
312 #[test]
313 fn cpu_state_transitions() {
314 let store = DataStore::new();
315 store.set_cpu_state(CpuState::Run);
316 assert_eq!(store.cpu_state(), CpuState::Run);
317 store.set_cpu_state(CpuState::Stop);
318 assert_eq!(store.cpu_state(), CpuState::Stop);
319 }
320
321 #[test]
322 fn write_callback_invoked() {
323 use std::sync::atomic::{AtomicBool, Ordering};
324 let store = DataStore::new();
325 let fired = Arc::new(AtomicBool::new(false));
326 let f = fired.clone();
327 store.on_write(move |_| {
328 f.store(true, Ordering::SeqCst);
329 });
330 store.write_bytes(1, 0, &[0x01]);
331 assert!(fired.load(Ordering::SeqCst));
332 }
333
334 #[test]
335 fn event_callback_invoked() {
336 use std::sync::atomic::{AtomicBool, Ordering};
337 let store = DataStore::new();
338 let fired = Arc::new(AtomicBool::new(false));
339 let f = fired.clone();
340 store.on_event(move |e| {
341 if e == "cpu_start" {
342 f.store(true, Ordering::SeqCst);
343 }
344 });
345 store.set_cpu_state(CpuState::Run);
346 assert!(fired.load(Ordering::SeqCst));
347 }
348}