Skip to main content

snap7_server/
store.rs

1use std::collections::{HashMap, VecDeque};
2use std::sync::{Arc, Mutex};
3
4/// Area codes recognised by the simulated PLC.
5pub mod area {
6    pub const PROCESS_INPUTS: u8 = 0x81;
7    pub const PROCESS_OUTPUTS: u8 = 0x82;
8    pub const MARKERS: u8 = 0x83;
9    pub const DATA_BLOCK: u8 = 0x84;
10    pub const INSTANCE_DB: u8 = 0x85;
11    pub const LOCAL_DATA: u8 = 0x86;
12    pub const TIMER: u8 = 0x1D;
13    pub const COUNTER: u8 = 0x1C;
14}
15
16/// CPU run-state for the simulated PLC.
17#[derive(Debug, Clone, Copy, PartialEq, Eq)]
18pub enum CpuState {
19    Run,
20    Stop,
21}
22
23impl Default for CpuState {
24    fn default() -> Self {
25        CpuState::Stop
26    }
27}
28
29/// Information about a data-access event passed to callbacks.
30#[derive(Debug, Clone)]
31pub struct EventInfo {
32    pub event: &'static str, // "read" | "write" | "cpu_stop" | "cpu_start"
33    pub area: u8,
34    pub db_number: u16,
35    pub start: u32,
36    pub length: u32,
37}
38
39// ---------------------------------------------------------------------------
40// DataStore – multi-area, CPU state, callbacks
41// ---------------------------------------------------------------------------
42
43/// A unified data store that maps `(area, db_number, offset) -> u8`.
44///
45/// Supports:
46/// - Arbitrary area codes (PI / PA / MK / DB / TI / CT / …)
47/// - Per-area registration (`register_area` / `unregister_area`)
48/// - CPU run-state (`cpu_state` / `set_cpu_state`)
49/// - Read / write event callbacks
50#[derive(Clone)]
51pub struct DataStore {
52    inner: Arc<Mutex<StoreInner>>,
53}
54
55impl Default for DataStore {
56    fn default() -> Self {
57        Self {
58            inner: Arc::new(Mutex::new(StoreInner {
59                data: HashMap::new(),
60                cpu_state: CpuState::Stop,
61                clock: [0u8; 8],
62                registered_areas: HashMap::new(),
63                locked_areas: HashMap::new(),
64                events: VecDeque::new(),
65                event_mask: 0xFFFF_FFFF,
66                client_count: 0,
67                read_callbacks: Vec::new(),
68                write_callbacks: Vec::new(),
69                event_callbacks: Vec::new(),
70            })),
71        }
72    }
73}
74
75/// Server and CPU status returned by [`DataStore::get_status`].
76#[derive(Debug, Clone, Copy, PartialEq, Eq)]
77pub struct ServerStatus {
78    /// 0 = stopped, 1 = running
79    pub server_running: bool,
80    pub cpu_state: CpuState,
81    pub clients_count: usize,
82}
83
84struct StoreInner {
85    /// `(area_code, db_number, offset) -> byte`
86    data: HashMap<(u8, u16, u32), u8>,
87    cpu_state: CpuState,
88    /// Simulated RTC: 8 BCD bytes in S7 DATE_AND_TIME order.
89    clock: [u8; 8],
90    /// Set of registered area codes (area_code -> size hint).
91    registered_areas: HashMap<u8, usize>,
92    /// Locked areas — writes to locked areas are rejected.
93    locked_areas: HashMap<u8, bool>,
94    /// Event log queue (capped at 1024).
95    events: VecDeque<EventInfo>,
96    /// Bitmask filter: only events whose kind matches are enqueued.
97    event_mask: u32,
98    /// Connected client count (incremented/decremented by dispatch).
99    pub(crate) client_count: usize,
100    read_callbacks: Vec<Box<dyn Fn(&EventInfo) + Send>>,
101    write_callbacks: Vec<Box<dyn Fn(&EventInfo) + Send>>,
102    event_callbacks: Vec<Box<dyn Fn(&str) + Send>>,
103}
104
105impl DataStore {
106    /// Create a new empty store.
107    pub fn new() -> Self {
108        Self::default()
109    }
110
111    // -- Area registration ---------------------------------------------------
112
113    /// Register a memory area.  `size` is a hint; reads beyond written bytes
114    /// return zeros.
115    pub fn register_area(&self, area_code: u8, size: usize) {
116        let mut inner = self.inner.lock().unwrap();
117        inner.registered_areas.insert(area_code, size);
118    }
119
120    /// Unregister a previously registered area.
121    pub fn unregister_area(&self, area_code: u8) {
122        let mut inner = self.inner.lock().unwrap();
123        inner.registered_areas.remove(&area_code);
124        // Also purge stored bytes for this area.
125        inner.data.retain(|k, _| k.0 != area_code);
126    }
127
128    /// Check whether an area is registered.
129    pub fn is_area_registered(&self, area_code: u8) -> bool {
130        self.inner.lock().unwrap().registered_areas.contains_key(&area_code)
131    }
132
133    /// Return the set of registered area codes.
134    pub fn registered_areas(&self) -> Vec<u8> {
135        self.inner.lock().unwrap().registered_areas.keys().copied().collect()
136    }
137
138    // -- Area lock / unlock --------------------------------------------------
139
140    /// Lock an area: subsequent write_area calls to this area return without
141    /// modifying data (silently skipped, matching C snap7 Srv_LockArea behaviour).
142    pub fn lock_area(&self, area_code: u8) {
143        self.inner.lock().unwrap().locked_areas.insert(area_code, true);
144    }
145
146    /// Unlock an area previously locked with [`lock_area`](Self::lock_area).
147    pub fn unlock_area(&self, area_code: u8) {
148        self.inner.lock().unwrap().locked_areas.remove(&area_code);
149    }
150
151    /// Return whether an area is currently locked.
152    pub fn is_area_locked(&self, area_code: u8) -> bool {
153        self.inner.lock().unwrap().locked_areas.contains_key(&area_code)
154    }
155
156    // -- Server status -------------------------------------------------------
157
158    /// Return current server/CPU status and connected client count.
159    pub fn get_status(&self) -> ServerStatus {
160        let inner = self.inner.lock().unwrap();
161        ServerStatus {
162            server_running: true,
163            cpu_state: inner.cpu_state,
164            clients_count: inner.client_count,
165        }
166    }
167
168    /// Increment the internal client counter (called by dispatch on connect).
169    pub(crate) fn client_connected(&self) {
170        self.inner.lock().unwrap().client_count += 1;
171    }
172
173    /// Decrement the internal client counter (called by dispatch on disconnect).
174    pub(crate) fn client_disconnected(&self) {
175        let mut inner = self.inner.lock().unwrap();
176        inner.client_count = inner.client_count.saturating_sub(1);
177    }
178
179    // -- Event queue ---------------------------------------------------------
180
181    /// Return the current event filter mask.
182    pub fn get_mask(&self) -> u32 {
183        self.inner.lock().unwrap().event_mask
184    }
185
186    /// Set the event filter mask. Only events whose kind-bit is set are enqueued.
187    pub fn set_mask(&self, mask: u32) {
188        self.inner.lock().unwrap().event_mask = mask;
189    }
190
191    /// Drain the event queue.
192    pub fn clear_events(&self) {
193        self.inner.lock().unwrap().events.clear();
194    }
195
196    /// Pop the oldest event from the queue. Returns `None` when empty.
197    pub fn pick_event(&self) -> Option<EventInfo> {
198        self.inner.lock().unwrap().events.pop_front()
199    }
200
201    /// Push an event into the queue (respects mask; queue capped at 1024).
202    #[allow(dead_code)]
203    pub(crate) fn enqueue_event(&self, info: EventInfo, kind_bit: u32) {
204        let mut inner = self.inner.lock().unwrap();
205        if inner.event_mask & kind_bit == 0 {
206            return;
207        }
208        if inner.events.len() >= 1024 {
209            inner.events.pop_front(); // drop oldest when full
210        }
211        inner.events.push_back(info);
212    }
213
214    // -- CPU state -----------------------------------------------------------
215
216    /// Current simulated CPU state.
217    pub fn cpu_state(&self) -> CpuState {
218        self.inner.lock().unwrap().cpu_state
219    }
220
221    /// Set the simulated CPU state and fire `event_callbacks`.
222    pub fn set_cpu_state(&self, state: CpuState) {
223        let mut inner = self.inner.lock().unwrap();
224        inner.cpu_state = state;
225        drop(inner); // release lock before calling callbacks
226
227        let event = match state {
228            CpuState::Run => "cpu_start",
229            CpuState::Stop => "cpu_stop",
230        };
231        self.fire_event(event);
232    }
233
234    // -- Clock ---------------------------------------------------------------
235
236    /// Return the simulated RTC as 8 BCD bytes (S7 DATE_AND_TIME format).
237    pub fn get_clock(&self) -> [u8; 8] {
238        self.inner.lock().unwrap().clock
239    }
240
241    /// Set the simulated RTC from 8 BCD bytes (S7 DATE_AND_TIME format).
242    pub fn set_clock(&self, bytes: [u8; 8]) {
243        self.inner.lock().unwrap().clock = bytes;
244    }
245
246    // -- Data access (compatible with dispatch) ------------------------------
247
248    /// Read a contiguous range of bytes.
249    pub fn read_bytes(&self, db: u16, start: u32, count: u32) -> Vec<u8> {
250        let inner = self.inner.lock().unwrap();
251        let end = start.saturating_add(count);
252        (start..end)
253            .map(|offset| *inner.data.get(&(0x84, db, offset)).unwrap_or(&0))
254            .collect()
255    }
256
257    /// Read from an arbitrary area.
258    pub fn read_area(&self, area: u8, db: u16, start: u32, count: u32) -> Vec<u8> {
259        let inner = self.inner.lock().unwrap();
260        let end = start.saturating_add(count);
261        let data: Vec<u8> = (start..end)
262            .map(|offset| *inner.data.get(&(area, db, offset)).unwrap_or(&0))
263            .collect();
264
265        // Fire read callbacks after releasing the lock
266        drop(inner);
267        self.fire_read(&EventInfo {
268            event: "read",
269            area,
270            db_number: db,
271            start,
272            length: count,
273        });
274        data
275    }
276
277    /// Write to an arbitrary area.
278    ///
279    /// Silently no-ops if the area is currently locked via [`lock_area`](Self::lock_area).
280    pub fn write_area(&self, area: u8, db: u16, start: u32, data: &[u8]) {
281        let mut inner = self.inner.lock().unwrap();
282        if inner.locked_areas.contains_key(&area) {
283            return;
284        }
285        for (i, &byte) in data.iter().enumerate() {
286            if let Some(offset) = start.checked_add(i as u32) {
287                inner.data.insert((area, db, offset), byte);
288            }
289        }
290        drop(inner);
291
292        self.fire_write(&EventInfo {
293            event: "write",
294            area,
295            db_number: db,
296            start,
297            length: data.len() as u32,
298        });
299    }
300
301    /// Write to DB area (convenience, retained for backward compat).
302    pub fn write_bytes(&self, db: u16, start: u32, data: &[u8]) {
303        self.write_area(area::DATA_BLOCK, db, start, data);
304    }
305
306    // -- Callbacks -----------------------------------------------------------
307
308    /// Register a callback fired on every data read.
309    pub fn on_read<F>(&self, cb: F)
310    where
311        F: Fn(&EventInfo) + Send + 'static,
312    {
313        self.inner.lock().unwrap().read_callbacks.push(Box::new(cb));
314    }
315
316    /// Register a callback fired on every data write.
317    pub fn on_write<F>(&self, cb: F)
318    where
319        F: Fn(&EventInfo) + Send + 'static,
320    {
321        self.inner.lock().unwrap().write_callbacks.push(Box::new(cb));
322    }
323
324    /// Register a callback fired on CPU state changes and other server events.
325    pub fn on_event<F>(&self, cb: F)
326    where
327        F: Fn(&str) + Send + 'static,
328    {
329        self.inner.lock().unwrap().event_callbacks.push(Box::new(cb));
330    }
331
332    // -- Internal helpers ----------------------------------------------------
333
334    fn fire_read(&self, info: &EventInfo) {
335        // Take the callback list so we can invoke callbacks without
336        // holding the lock.
337        let callbacks = {
338            let mut inner = self.inner.lock().unwrap();
339            std::mem::take(&mut inner.read_callbacks)
340        };
341        for cb in &callbacks {
342            cb(info);
343        }
344        // Restore callbacks
345        self.inner.lock().unwrap().read_callbacks = callbacks;
346    }
347
348    fn fire_write(&self, info: &EventInfo) {
349        let callbacks = {
350            let mut inner = self.inner.lock().unwrap();
351            std::mem::take(&mut inner.write_callbacks)
352        };
353        for cb in &callbacks {
354            cb(info);
355        }
356        self.inner.lock().unwrap().write_callbacks = callbacks;
357    }
358
359    fn fire_event(&self, event: &str) {
360        let callbacks = {
361            let mut inner = self.inner.lock().unwrap();
362            std::mem::take(&mut inner.event_callbacks)
363        };
364        for cb in &callbacks {
365            cb(event);
366        }
367        self.inner.lock().unwrap().event_callbacks = callbacks;
368    }
369}
370
371// ---------------------------------------------------------------------------
372// Tests
373// ---------------------------------------------------------------------------
374
375#[cfg(test)]
376mod tests {
377    use super::*;
378
379    #[test]
380    fn read_unset_returns_zeros() {
381        let store = DataStore::new();
382        let data = store.read_bytes(1, 0, 4);
383        assert_eq!(data, vec![0, 0, 0, 0]);
384    }
385
386    #[test]
387    fn write_then_read_roundtrip() {
388        let store = DataStore::new();
389        store.write_bytes(1, 0, &[0xDE, 0xAD, 0xBE, 0xEF]);
390        let data = store.read_bytes(1, 0, 4);
391        assert_eq!(data, vec![0xDE, 0xAD, 0xBE, 0xEF]);
392    }
393
394    #[test]
395    fn write_to_different_dbs_isolated() {
396        let store = DataStore::new();
397        store.write_bytes(1, 0, &[0xAA]);
398        store.write_bytes(2, 0, &[0xBB]);
399        assert_eq!(store.read_bytes(1, 0, 1), vec![0xAA]);
400        assert_eq!(store.read_bytes(2, 0, 1), vec![0xBB]);
401    }
402
403    #[test]
404    fn read_area_uses_area_code() {
405        let store = DataStore::new();
406        store.write_area(area::MARKERS, 0, 10, &[0x99]);
407        let pa = store.read_area(area::PROCESS_OUTPUTS, 0, 10, 1);
408        assert_eq!(pa, vec![0x00]); // different area → no data
409        let mk = store.read_area(area::MARKERS, 0, 10, 1);
410        assert_eq!(mk, vec![0x99]);
411    }
412
413    #[test]
414    fn register_area_roundtrip() {
415        let store = DataStore::new();
416        assert!(!store.is_area_registered(0x81));
417        store.register_area(0x81, 1024);
418        assert!(store.is_area_registered(0x81));
419        store.unregister_area(0x81);
420        assert!(!store.is_area_registered(0x81));
421    }
422
423    #[test]
424    fn cpu_state_defaults_to_stop() {
425        let store = DataStore::new();
426        assert_eq!(store.cpu_state(), CpuState::Stop);
427    }
428
429    #[test]
430    fn cpu_state_transitions() {
431        let store = DataStore::new();
432        store.set_cpu_state(CpuState::Run);
433        assert_eq!(store.cpu_state(), CpuState::Run);
434        store.set_cpu_state(CpuState::Stop);
435        assert_eq!(store.cpu_state(), CpuState::Stop);
436    }
437
438    #[test]
439    fn write_callback_invoked() {
440        use std::sync::atomic::{AtomicBool, Ordering};
441        let store = DataStore::new();
442        let fired = Arc::new(AtomicBool::new(false));
443        let f = fired.clone();
444        store.on_write(move |_| {
445            f.store(true, Ordering::SeqCst);
446        });
447        store.write_bytes(1, 0, &[0x01]);
448        assert!(fired.load(Ordering::SeqCst));
449    }
450
451    #[test]
452    fn event_callback_invoked() {
453        use std::sync::atomic::{AtomicBool, Ordering};
454        let store = DataStore::new();
455        let fired = Arc::new(AtomicBool::new(false));
456        let f = fired.clone();
457        store.on_event(move |e| {
458            if e == "cpu_start" {
459                f.store(true, Ordering::SeqCst);
460            }
461        });
462        store.set_cpu_state(CpuState::Run);
463        assert!(fired.load(Ordering::SeqCst));
464    }
465}