Skip to main content

snap7_server/
store.rs

1use std::collections::HashMap;
2use std::sync::{Arc, Mutex};
3
4/// Area codes recognised by the simulated PLC.
5pub mod area {
6    pub const PROCESS_INPUTS: u8 = 0x81;
7    pub const PROCESS_OUTPUTS: u8 = 0x82;
8    pub const MARKERS: u8 = 0x83;
9    pub const DATA_BLOCK: u8 = 0x84;
10    pub const INSTANCE_DB: u8 = 0x85;
11    pub const LOCAL_DATA: u8 = 0x86;
12    pub const TIMER: u8 = 0x1D;
13    pub const COUNTER: u8 = 0x1C;
14}
15
16/// CPU run-state for the simulated PLC.
17#[derive(Debug, Clone, Copy, PartialEq, Eq)]
18pub enum CpuState {
19    Run,
20    Stop,
21}
22
23impl Default for CpuState {
24    fn default() -> Self {
25        CpuState::Stop
26    }
27}
28
29/// Information about a data-access event passed to callbacks.
30#[derive(Debug, Clone)]
31pub struct EventInfo {
32    pub event: &'static str, // "read" | "write" | "cpu_stop" | "cpu_start"
33    pub area: u8,
34    pub db_number: u16,
35    pub start: u32,
36    pub length: u32,
37}
38
39// ---------------------------------------------------------------------------
40// DataStore – multi-area, CPU state, callbacks
41// ---------------------------------------------------------------------------
42
43/// A unified data store that maps `(area, db_number, offset) -> u8`.
44///
45/// Supports:
46/// - Arbitrary area codes (PI / PA / MK / DB / TI / CT / …)
47/// - Per-area registration (`register_area` / `unregister_area`)
48/// - CPU run-state (`cpu_state` / `set_cpu_state`)
49/// - Read / write event callbacks
50#[derive(Clone)]
51pub struct DataStore {
52    inner: Arc<Mutex<StoreInner>>,
53}
54
55impl Default for DataStore {
56    fn default() -> Self {
57        Self {
58            inner: Arc::new(Mutex::new(StoreInner {
59                data: HashMap::new(),
60                cpu_state: CpuState::Stop,
61                registered_areas: HashMap::new(),
62                read_callbacks: Vec::new(),
63                write_callbacks: Vec::new(),
64                event_callbacks: Vec::new(),
65            })),
66        }
67    }
68}
69
70struct StoreInner {
71    /// `(area_code, db_number, offset) -> byte`
72    data: HashMap<(u8, u16, u32), u8>,
73    cpu_state: CpuState,
74    /// Set of registered area codes (just the `area_code` portion).
75    registered_areas: HashMap<u8, usize>, // area_code -> size hint
76    read_callbacks: Vec<Box<dyn Fn(&EventInfo) + Send>>,
77    write_callbacks: Vec<Box<dyn Fn(&EventInfo) + Send>>,
78    event_callbacks: Vec<Box<dyn Fn(&str) + Send>>,
79}
80
81impl DataStore {
82    /// Create a new empty store.
83    pub fn new() -> Self {
84        Self::default()
85    }
86
87    // -- Area registration ---------------------------------------------------
88
89    /// Register a memory area.  `size` is a hint; reads beyond written bytes
90    /// return zeros.
91    pub fn register_area(&self, area_code: u8, size: usize) {
92        let mut inner = self.inner.lock().unwrap();
93        inner.registered_areas.insert(area_code, size);
94    }
95
96    /// Unregister a previously registered area.
97    pub fn unregister_area(&self, area_code: u8) {
98        let mut inner = self.inner.lock().unwrap();
99        inner.registered_areas.remove(&area_code);
100        // Also purge stored bytes for this area.
101        inner.data.retain(|k, _| k.0 != area_code);
102    }
103
104    /// Check whether an area is registered.
105    pub fn is_area_registered(&self, area_code: u8) -> bool {
106        self.inner.lock().unwrap().registered_areas.contains_key(&area_code)
107    }
108
109    /// Return the set of registered area codes.
110    pub fn registered_areas(&self) -> Vec<u8> {
111        self.inner.lock().unwrap().registered_areas.keys().copied().collect()
112    }
113
114    // -- CPU state -----------------------------------------------------------
115
116    /// Current simulated CPU state.
117    pub fn cpu_state(&self) -> CpuState {
118        self.inner.lock().unwrap().cpu_state
119    }
120
121    /// Set the simulated CPU state and fire `event_callbacks`.
122    pub fn set_cpu_state(&self, state: CpuState) {
123        let mut inner = self.inner.lock().unwrap();
124        inner.cpu_state = state;
125        drop(inner); // release lock before calling callbacks
126
127        let event = match state {
128            CpuState::Run => "cpu_start",
129            CpuState::Stop => "cpu_stop",
130        };
131        self.fire_event(event);
132    }
133
134    // -- Data access (compatible with dispatch) ------------------------------
135
136    /// Read a contiguous range of bytes.
137    pub fn read_bytes(&self, db: u16, start: u32, count: u32) -> Vec<u8> {
138        let inner = self.inner.lock().unwrap();
139        let end = start.saturating_add(count);
140        (start..end)
141            .map(|offset| *inner.data.get(&(0x84, db, offset)).unwrap_or(&0))
142            .collect()
143    }
144
145    /// Read from an arbitrary area.
146    pub fn read_area(&self, area: u8, db: u16, start: u32, count: u32) -> Vec<u8> {
147        let inner = self.inner.lock().unwrap();
148        let end = start.saturating_add(count);
149        let data: Vec<u8> = (start..end)
150            .map(|offset| *inner.data.get(&(area, db, offset)).unwrap_or(&0))
151            .collect();
152
153        // Fire read callbacks after releasing the lock
154        drop(inner);
155        self.fire_read(&EventInfo {
156            event: "read",
157            area,
158            db_number: db,
159            start,
160            length: count,
161        });
162        data
163    }
164
165    /// Write to an arbitrary area.
166    pub fn write_area(&self, area: u8, db: u16, start: u32, data: &[u8]) {
167        let mut inner = self.inner.lock().unwrap();
168        for (i, &byte) in data.iter().enumerate() {
169            if let Some(offset) = start.checked_add(i as u32) {
170                inner.data.insert((area, db, offset), byte);
171            }
172        }
173        drop(inner);
174
175        self.fire_write(&EventInfo {
176            event: "write",
177            area,
178            db_number: db,
179            start,
180            length: data.len() as u32,
181        });
182    }
183
184    /// Write to DB area (convenience, retained for backward compat).
185    pub fn write_bytes(&self, db: u16, start: u32, data: &[u8]) {
186        self.write_area(area::DATA_BLOCK, db, start, data);
187    }
188
189    // -- Callbacks -----------------------------------------------------------
190
191    /// Register a callback fired on every data read.
192    pub fn on_read<F>(&self, cb: F)
193    where
194        F: Fn(&EventInfo) + Send + 'static,
195    {
196        self.inner.lock().unwrap().read_callbacks.push(Box::new(cb));
197    }
198
199    /// Register a callback fired on every data write.
200    pub fn on_write<F>(&self, cb: F)
201    where
202        F: Fn(&EventInfo) + Send + 'static,
203    {
204        self.inner.lock().unwrap().write_callbacks.push(Box::new(cb));
205    }
206
207    /// Register a callback fired on CPU state changes and other server events.
208    pub fn on_event<F>(&self, cb: F)
209    where
210        F: Fn(&str) + Send + 'static,
211    {
212        self.inner.lock().unwrap().event_callbacks.push(Box::new(cb));
213    }
214
215    // -- Internal helpers ----------------------------------------------------
216
217    fn fire_read(&self, info: &EventInfo) {
218        // Take the callback list so we can invoke callbacks without
219        // holding the lock.
220        let callbacks = {
221            let mut inner = self.inner.lock().unwrap();
222            std::mem::take(&mut inner.read_callbacks)
223        };
224        for cb in &callbacks {
225            cb(info);
226        }
227        // Restore callbacks
228        self.inner.lock().unwrap().read_callbacks = callbacks;
229    }
230
231    fn fire_write(&self, info: &EventInfo) {
232        let callbacks = {
233            let mut inner = self.inner.lock().unwrap();
234            std::mem::take(&mut inner.write_callbacks)
235        };
236        for cb in &callbacks {
237            cb(info);
238        }
239        self.inner.lock().unwrap().write_callbacks = callbacks;
240    }
241
242    fn fire_event(&self, event: &str) {
243        let callbacks = {
244            let mut inner = self.inner.lock().unwrap();
245            std::mem::take(&mut inner.event_callbacks)
246        };
247        for cb in &callbacks {
248            cb(event);
249        }
250        self.inner.lock().unwrap().event_callbacks = callbacks;
251    }
252}
253
254// ---------------------------------------------------------------------------
255// Tests
256// ---------------------------------------------------------------------------
257
258#[cfg(test)]
259mod tests {
260    use super::*;
261
262    #[test]
263    fn read_unset_returns_zeros() {
264        let store = DataStore::new();
265        let data = store.read_bytes(1, 0, 4);
266        assert_eq!(data, vec![0, 0, 0, 0]);
267    }
268
269    #[test]
270    fn write_then_read_roundtrip() {
271        let store = DataStore::new();
272        store.write_bytes(1, 0, &[0xDE, 0xAD, 0xBE, 0xEF]);
273        let data = store.read_bytes(1, 0, 4);
274        assert_eq!(data, vec![0xDE, 0xAD, 0xBE, 0xEF]);
275    }
276
277    #[test]
278    fn write_to_different_dbs_isolated() {
279        let store = DataStore::new();
280        store.write_bytes(1, 0, &[0xAA]);
281        store.write_bytes(2, 0, &[0xBB]);
282        assert_eq!(store.read_bytes(1, 0, 1), vec![0xAA]);
283        assert_eq!(store.read_bytes(2, 0, 1), vec![0xBB]);
284    }
285
286    #[test]
287    fn read_area_uses_area_code() {
288        let store = DataStore::new();
289        store.write_area(area::MARKERS, 0, 10, &[0x99]);
290        let pa = store.read_area(area::PROCESS_OUTPUTS, 0, 10, 1);
291        assert_eq!(pa, vec![0x00]); // different area → no data
292        let mk = store.read_area(area::MARKERS, 0, 10, 1);
293        assert_eq!(mk, vec![0x99]);
294    }
295
296    #[test]
297    fn register_area_roundtrip() {
298        let store = DataStore::new();
299        assert!(!store.is_area_registered(0x81));
300        store.register_area(0x81, 1024);
301        assert!(store.is_area_registered(0x81));
302        store.unregister_area(0x81);
303        assert!(!store.is_area_registered(0x81));
304    }
305
306    #[test]
307    fn cpu_state_defaults_to_stop() {
308        let store = DataStore::new();
309        assert_eq!(store.cpu_state(), CpuState::Stop);
310    }
311
312    #[test]
313    fn cpu_state_transitions() {
314        let store = DataStore::new();
315        store.set_cpu_state(CpuState::Run);
316        assert_eq!(store.cpu_state(), CpuState::Run);
317        store.set_cpu_state(CpuState::Stop);
318        assert_eq!(store.cpu_state(), CpuState::Stop);
319    }
320
321    #[test]
322    fn write_callback_invoked() {
323        use std::sync::atomic::{AtomicBool, Ordering};
324        let store = DataStore::new();
325        let fired = Arc::new(AtomicBool::new(false));
326        let f = fired.clone();
327        store.on_write(move |_| {
328            f.store(true, Ordering::SeqCst);
329        });
330        store.write_bytes(1, 0, &[0x01]);
331        assert!(fired.load(Ordering::SeqCst));
332    }
333
334    #[test]
335    fn event_callback_invoked() {
336        use std::sync::atomic::{AtomicBool, Ordering};
337        let store = DataStore::new();
338        let fired = Arc::new(AtomicBool::new(false));
339        let f = fired.clone();
340        store.on_event(move |e| {
341            if e == "cpu_start" {
342                f.store(true, Ordering::SeqCst);
343            }
344        });
345        store.set_cpu_state(CpuState::Run);
346        assert!(fired.load(Ordering::SeqCst));
347    }
348}