prusst/
lib.rs

1//! A convenient Rust interface to the UIO kernel module for TI Programmable Real-time Unit
2//! coprocessors, with roughly the same functionality as the
3//! [C prussdrv library](https://github.com/beagleboard/am335x_pru_package)
4//! but with a safer, rustic API that attempts to mitigate risks related to uninitialized or
5//! invalid register states, use of freed memory, memory allocations conflicts etc.
6//! 
7//! 
8//! # Design rationale
9//! 
10//! The design of the library exploits the Rust type system to reduce the risk of shooting onself
11//! in the foot. Its architecture is meant to offer improved ergonomics compared to its C relative,
12//! while operating at a similarly low level of abstraction and providing equivalent functionality.
13//! 
14//! Data-race safety is warranted by checking that only one `Pruss` instance (a view of the PRU
15//! subsystem) is running at a time. The magic of the Rust borrowing rules will then _statically_
16//! ensure, inter alia:
17//! 
18//! * the absence of memory aliasing for local and shared PRU RAM, meaning that a previously allocated
19//! RAM segment may not be re-used before the data it contains is released,
20//! 
21//! * the impossibility to request code execution on a PRU core before the code has actually been
22//! loaded,
23//! 
24//! * the impossibility to overwrite PRU code that is already loaded and still in use,
25//! 
26//! * the impossibility to concurrently modify the interrupt mapping.
27//! 
28//! Type safety also avoids many pitfalls associated with interrupt management. Unlike the C prussdrv
29//! library, system events, host interrupt, events out and channels are all distinct types: they cannot
30//! be misused or inadvertently switched in function calls. A related benefit is that the interrupt
31//! management API is very self-explanatory.
32//! 
33//! Event handling is one of the few places where prusst requires the user to be more explicit
34//! than the C prussdrv library. Indeed, the `prussdrv_pru_clear_event` function of the C driver
35//! automatically re-enables an event out after clearing the triggering system event, which may wrongly
36//! suggest that the combined clear-enable operation is thread-safe (it isn't). In contrast, prusst
37//! mandates that both `Intc::clear_sysevt` and `Intc::enable_host` be called if the event out needs to
38//! be caught again. This behavior is probably less surprising and is arguably more consistent with the
39//! atomicity of other interrupt management functions.
40//!
41//!
42//! # Hello world
43//!
44//! ```
45//! extern crate prusst;
46//! 
47//! use prusst::{Pruss, IntcConfig, Sysevt, Evtout};
48//! use std::fs::File;
49//! 
50//! fn main() {
51//!     // Configure and get a view of the PRU subsystem.
52//!     let mut pruss = Pruss::new(&IntcConfig::new_populated()).unwrap();
53//!     
54//!     // Get a handle to an event out before it is triggered.
55//!     let irq = pruss.intc.register_irq(Evtout::E0);
56//! 
57//!     // Open, load and run a PRU binary.
58//!     let mut file = File::open("hello.bin").unwrap();
59//!     unsafe { pruss.pru0.load_code(&mut file).unwrap().run(); }
60//!     
61//!     // Wait for the PRU code from hello.bin to trigger an event out.
62//!     irq.wait();
63//!     
64//!     // Clear the triggering interrupt.
65//!     pruss.intc.clear_sysevt(Sysevt::S19);
66//! 
67//!     // Do nothing: the `pruss` destructor will stop any running code and release ressources.
68//!     println!("We are done...");
69//! }
70//! ```
71
72extern crate libc;
73
74mod def;
75mod error;
76mod pubdef;
77pub mod util;
78
79use def::*;
80pub use error::Error;
81pub use pubdef::*;
82
83use std::cmp::Eq;
84use std::ffi::CString;
85use std::fs::File;
86use std::io::{self, Read};
87use std::marker::PhantomData;
88use std::mem;
89use std::ops::{BitOrAssign, Shl};
90use std::ptr;
91use std::result;
92use std::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT, compiler_fence};
93
94
95
96// A flag making sure that only one instance of the PRU subsystem is instantiated at a time.
97static PRUSS_IS_INSTANTIATED: AtomicBool = ATOMIC_BOOL_INIT;
98
99
100
101/// Result type for the PRU subsystem.
102pub type Result<T> = result::Result<T, Error>;
103
104
105
106/// Main interface to the PRU subsystem.
107pub struct Pruss<'a> {
108    _prumap: MemMap,
109    _hostmap: MemMap,
110
111    /// PRU interrupt controller
112    pub intc: Intc,
113    /// Program loader for PRU0
114    pub pru0: PruLoader,
115    /// Program loader for PRU1
116    pub pru1: PruLoader,
117    /// Data RAM for PRU0
118    pub dram0: MemSegment<'a>,
119    /// Data RAM for PRU1
120    pub dram1: MemSegment<'a>,
121    /// Shared data RAM
122    pub dram2: MemSegment<'a>,
123    /// Host memory
124    pub hostram: MemSegment<'a>,
125}
126
127impl<'a> Pruss<'a> {
128    /// Creates a PRU subsystem context, mapping all necessary PRU registers and memory.
129    ///
130    /// The interrupt controller is initialized with the provided mapping.
131    pub fn new(intc_config: &IntcConfig) -> Result<Pruss<'a>> {
132        // Enforce singleton instantiation.
133        if PRUSS_IS_INSTANTIATED.swap(true, Ordering::Acquire) {
134            return Err(Error::AlreadyInstantiated);
135        }
136
137        // Handy function to read the size of system devices.
138        fn memsize(path: &str) -> io::Result<usize> {
139            let mut f = try!(File::open(path));
140            let mut buffer = String::new();
141            try!(f.read_to_string(&mut buffer));
142            Ok(usize::from_str_radix(&buffer[2..].trim(), 16).unwrap())
143        };
144
145        // Create memory mapped devices.
146        let file = try!(SyncFile::new(PRUSS_DEVICE_PATH));
147        let prumem_size = try!(memsize(UIO_PRUMEM_SIZE_PATH));
148        let hostmem_size = try!(memsize(UIO_HOSTMEM_SIZE_PATH));
149        let prumap = try!(MemMap::new(file.fd, prumem_size, 0));
150        let hostmap = try!(MemMap::new(file.fd, hostmem_size, 1));
151
152        // Create and initialize the interrupt controller.
153        let mut intc = Intc::new(unsafe { prumap.base.offset(INTC_OFFSET as isize) as *mut u32 });
154        intc.map_interrupts(intc_config);
155
156        // Create the PRU code loaders.
157        let pru0 =
158            PruLoader::new(unsafe { prumap.base.offset(PRU0CTRL_OFFSET as isize) as *mut u32 },
159                           unsafe { prumap.base.offset(IRAM0_OFFSET as isize) },
160                           IRAM0_SIZE);
161        let pru1 =
162            PruLoader::new(unsafe { prumap.base.offset(PRU1CTRL_OFFSET as isize) as *mut u32 },
163                           unsafe { prumap.base.offset(IRAM1_OFFSET as isize) },
164                           IRAM1_SIZE);
165
166        // Create memory views.
167        let dram0 = MemSegment::new(prumap.base, DRAM0_OFFSET, DRAM0_OFFSET + DRAM0_SIZE);
168        let dram1 = MemSegment::new(prumap.base, DRAM1_OFFSET, DRAM1_OFFSET + DRAM1_SIZE);
169        let dram2 = MemSegment::new(prumap.base, DRAM2_OFFSET, DRAM2_OFFSET + DRAM2_SIZE);
170        let hostram = MemSegment::new(hostmap.base, 0, hostmem_size);
171
172        // Voila.
173        Ok(Pruss {
174            _prumap: prumap,
175            _hostmap: hostmap,
176            intc: intc,
177            pru0: pru0,
178            pru1: pru1,
179            dram0: dram0,
180            dram1: dram1,
181            dram2: dram2,
182            hostram: hostram,
183        })
184    }
185}
186
187impl<'a> Drop for Pruss<'a> {
188    fn drop(&mut self) {
189        // Stop instruction executions in both PRUs
190        self.pru0.reset();
191        self.pru1.reset();
192
193        // Allow another PRU subsystem context to be instantiated.
194        PRUSS_IS_INSTANTIATED.store(false, Ordering::Release);
195    }
196}
197
198unsafe impl<'a> Send for Pruss<'a> {}
199
200unsafe impl<'a> Sync for Pruss<'a> {}
201
202
203
204/// The PRU interrupt controller.
205pub struct Intc {
206    intc_reg: *mut u32,
207}
208
209impl Intc {
210    /// Creates a driver context with sane interrupt intc mapping defaults.
211    fn new(intc_reg: *mut u32) -> Self {
212        let intc = Intc { intc_reg: intc_reg };
213
214        intc
215    }
216
217    /// Maps PRU interrupts according to the provided configuration.
218    pub fn map_interrupts(&mut self, interrupts: &IntcConfig) {
219        unsafe {
220            // Set the polarity of system interrupts to high.
221            ptr::write_volatile(self.intc_reg.offset(SIPR1_REG), 0xffffffff);
222            ptr::write_volatile(self.intc_reg.offset(SIPR2_REG), 0xffffffff);
223
224            // Clear all channel map registers and assign system events to channels.
225            for cmrx in 0..NUM_CMRX {
226                ptr::write_volatile(self.intc_reg.offset(CMR_REG + cmrx), 0);
227            }
228            for m in &interrupts.sysevt_to_channel_map {
229                let cmrx = (m.sysevt >> 2) as isize;
230                debug_assert!(cmrx < NUM_CMRX);
231                let val = ptr::read_volatile(self.intc_reg.offset(CMR_REG + cmrx));
232                ptr::write_volatile(self.intc_reg.offset(CMR_REG + cmrx),
233                                    val | (m.channel as u32) << ((m.sysevt as u32 & 0b11) * 8));
234            }
235
236            // Clear all host map registers and assign channels to hosts.
237            for hmrx in 0..NUM_HMRX {
238                ptr::write_volatile(self.intc_reg.offset(HMR_REG + hmrx), 0);
239            }
240            for m in &interrupts.channel_to_host_map {
241                let hmrx = (m.channel >> 2) as isize;
242                debug_assert!(hmrx < NUM_HMRX);
243                let val = ptr::read_volatile(self.intc_reg.offset(HMR_REG + hmrx));
244                ptr::write_volatile(self.intc_reg.offset(HMR_REG + hmrx),
245                                    val | (m.host as u32) << ((m.channel as u32 & 0b11) * 8));
246            }
247
248            // Set the type of system interrupts to pulse.
249            ptr::write_volatile(self.intc_reg.offset(SITR1_REG), 0x0);
250            ptr::write_volatile(self.intc_reg.offset(SITR2_REG), 0x0);
251
252            // Enable and clear system events.
253            let (mut mask1, mut mask2) = (0u32, 0u32);
254            for se in &interrupts.sysevt_enable {
255                match *se {
256                    0...31 => mask1 |= 1u32 << se,
257                    32...63 => mask2 |= 1u32 << (se - 32),
258                    _ => unreachable!(),
259                };
260            }
261            ptr::write_volatile(self.intc_reg.offset(ESR1_REG), mask1);
262            ptr::write_volatile(self.intc_reg.offset(SECR1_REG), mask1);
263            ptr::write_volatile(self.intc_reg.offset(ESR2_REG), mask2);
264            ptr::write_volatile(self.intc_reg.offset(SECR2_REG), mask2);
265
266            // Enable host interrupts.
267            for h in &interrupts.host_enable {
268                ptr::write_volatile(self.intc_reg.offset(HIEISR_REG), *h as u32);
269            }
270            ptr::write_volatile(self.intc_reg.offset(GER_REG), 0x1);
271        }
272    }
273    
274    /// Triggers a system event.
275    pub fn send_sysevt(&self, sysevt: Sysevt) {
276        unsafe {
277            match sysevt as u8 {
278                se @ 0...31 => ptr::write_volatile(self.intc_reg.offset(SRSR1_REG),
279                                                   1u32 << se),
280                se @ 32...63 => ptr::write_volatile(self.intc_reg.offset(SRSR2_REG),
281                                                    1u32 << (se - 32)),
282                _ => unreachable!(),
283            };
284        }
285    }
286
287    /// Clears a system event.
288    pub fn clear_sysevt(&self, sysevt: Sysevt) {
289        unsafe {
290            ptr::write_volatile(self.intc_reg.offset(SICR_REG), sysevt as u32);
291        }
292    }
293
294    /// Enables a system event.
295    pub fn enable_sysevt(&self, sysevt: Sysevt) {
296        unsafe {
297            ptr::write_volatile(self.intc_reg.offset(EISR_REG), sysevt as u32 );
298        }
299    }
300
301    /// Disables a system event.
302    pub fn disable_sysevt(&self, sysevt: Sysevt) {
303        unsafe {
304            ptr::write_volatile(self.intc_reg.offset(EICR_REG), sysevt as u32 );
305        }
306    }
307
308    /// Enables or re-enables a host interrupt.
309    ///
310    /// Beware: calling this function before the triggering system event was cleared will trigger
311    /// the host interrupt again.
312    pub fn enable_host<T: Into<Host>>(&self, host: T) {
313        let host: Host = host.into();
314        unsafe {
315            ptr::write_volatile(self.intc_reg.offset(HIEISR_REG), host as u32 );
316        }
317    }
318
319    /// Disables a host interrupt.
320    pub fn disable_host<T: Into<Host>>(&self, host: T) {
321        let host: Host = host.into();
322        unsafe {
323            ptr::write_volatile(self.intc_reg.offset(HIDISR_REG), host as u32 );
324        }
325    }
326
327    /// Returns a synchronization primitive for event out host interrupts.
328    ///
329    /// Important: this function should be called before any corresponding event out is triggered.
330    ///
331    /// # Panics
332    ///
333    /// This function should not panic provided that the uio_pruss kernel module is loaded, which
334    /// is theoretically guaranteed at this point since `Pruss` could not have been created
335    /// otherwise.
336    pub fn register_irq(&self, e: Evtout) -> EvtoutIrq {
337        EvtoutIrq::new(e)
338    }
339}
340
341
342
343/// PRU instruction code loader.
344pub struct PruLoader {
345    pructrl_reg: *mut u32,
346    iram_base: *mut u8,
347    iram_size: usize,
348}
349
350impl PruLoader {
351    fn new(pructrl_reg: *mut u32, iram_base: *mut u8, iram_size: usize) -> PruLoader {
352
353        PruLoader {
354            pructrl_reg: pructrl_reg,
355            iram_base: iram_base,
356            iram_size: iram_size,
357        }
358    }
359
360    /// Loads a binary of opcodes to the PRU without executing it.
361    ///
362    /// This function proceeds as follows:
363    ///
364    /// * a soft PRU reset is forced,
365    /// * the code is written to the PRU instruction RAM.
366    ///
367    /// The code can be subsequently started and stopped using the returned `PruCode` handle.
368    ///
369    /// # Errors
370    ///
371    /// IO errors that may occur while reading the buffer are forwarded.
372    /// If the buffer cannot be read entirely because the code does not fit into the instruction
373    /// RAM, an error of the kind `ErrorKind::InvalidInput` is returned.
374    pub fn load_code<R: Read>(&mut self, code: &mut R) -> io::Result<PruCode> {
375        // Invoke a soft reset of the PRU to make sure no code is currently running.
376        self.reset();
377        // Write the code to the instruction RAM.
378        let n: usize = try!(code.read( unsafe {
379            std::slice::from_raw_parts_mut(self.iram_base, self.iram_size)
380        }));
381        // Make sure the whole buffer was read, otherwise return an InvalidInput error kind.
382        match n {
383            0 => {
384                Err(io::Error::new(io::ErrorKind::InvalidInput,
385                                   "size of PRU code exceeding instruction RAM capacity"))
386            }
387            _ => {
388                // Introduce a fence to ensure that IRAM writes are not reordered past the
389                // call to PruCode::run().
390                // Does it actually work? Who knows, we did what we could.
391                compiler_fence(Ordering::Release);
392                Ok(PruCode::new(self.pructrl_reg))
393            }
394        }
395    }
396
397    /// Resets the PRU.
398    ///
399    /// Invokes a soft reset by clearing the PRU control register.
400    fn reset(&mut self) {
401        unsafe {
402            ptr::write_volatile(self.pructrl_reg, 0);
403        }
404    }
405}
406
407
408
409/// View of a contiguous memory segment.
410///
411/// The design of MemSegment is meant to allow allocation at arbitrary addresses while preventing
412/// memory aliasing. This is achieved by allowing segments to be recursively split and by
413/// borrowing segments upon object allocation, thus preventing further splitting and allocation
414/// until the allocated object goes out of scope. For this reason, segments are neither copyable
415/// nor clonable.
416pub struct MemSegment<'a> {
417    // It is necessary to keep the `from` index rather than offset the `base` pointer because
418    // alignment must be checked when allocating memory for arbitrary types.
419    base: *mut u8,
420    from: usize,
421    to: usize,
422    _memory_marker: PhantomData<&'a [u8]>,
423}
424
425impl<'a> MemSegment<'a> {
426    fn new<'b>(base: *mut u8, from: usize, to: usize) -> MemSegment<'b> {
427        MemSegment {
428            base: base,
429            from: from,
430            to: to,
431            _memory_marker: PhantomData,
432        }
433    }
434    
435    /// Allocates an object at the beginning of the segment.
436    ///
437    /// # Panics
438    ///
439    /// This function will panic if the beginning of the segment is not properly aligned
440    /// for type T or if the size of T exceeds its capacity.
441    #[inline]
442    pub fn alloc<T: Copy>(&mut self, source: T) -> &mut T {
443        let target: &mut T = unsafe { self.alloc_uninitialized() };
444        *target = source;
445
446        target
447    }
448
449    /// Allocates an object at the begining of the segment without initializing it.
450    ///
451    /// This can save some unecessary initialization if the PRU is anyway going to initialize
452    /// memory before it will be read by the host. In some cases, it can also be used to avoid
453    /// trashing the stack with a large temporary initialization object if for some reason the
454    /// compiler cannot inline the call to `alloc`.
455    ///
456    /// # Undefined Behavior
457    ///
458    /// Reading an uninitialized object is undefined behavior (even for Copy types).
459    ///
460    /// # Panics
461    ///
462    /// This function will panic if the beginning of the segment is not properly aligned
463    /// for type T or if the size of T exceeds its capacity.
464    pub unsafe fn alloc_uninitialized<T: Copy>(&mut self) -> &mut T {
465        // Make sure the begining of the memory region is properly aligned for type T.
466        assert!(self.from % mem::align_of::<T>() == 0);
467        // Make sure the region is large enough to hold type T.
468        assert!(self.to - self.from >= mem::size_of::<T>());
469
470        &mut *(self.base.offset(self.from as isize) as *mut T)
471    }
472
473    /// Position at which the segment starts (in bytes).
474    pub fn begin(&self) -> usize {
475        self.from
476    }
477
478    /// Position at which the segment ends (in bytes).
479    pub fn end(&self) -> usize {
480        self.to
481    }
482
483    /// Splits the memory segment into two at the given byte position.
484    ///
485    /// Note that positions (addresses) are absolute and remain valid after the splitting
486    /// operation. If for instance a segment is split at 0x00001000, the `begin` method of
487    /// the second segment hence created will return 0x00001000 and not 0x00000000.
488    pub fn split_at(&mut self, position: usize) -> (MemSegment, MemSegment) {
489        assert!(position >= self.from && position <= self.to);
490        (MemSegment {
491            base: self.base,
492            from: self.from,
493            to: position,
494            _memory_marker: PhantomData,
495        },
496         MemSegment {
497            base: self.base,
498            from: position,
499            to: self.to,
500            _memory_marker: PhantomData,
501        })
502    }
503}
504
505unsafe impl<'a> Send for MemSegment<'a> {}
506
507unsafe impl<'a> Sync for MemSegment<'a> {}
508
509
510
511/// PRU interrupt controller configuration.
512///
513/// A call to the `new_populated` method automatically initializes the data with the same defaults
514/// as the PRUSS_INTC_INITDATA macro of the C prussdrv library. Alternatively, a blank-state
515/// initialization data structure can be created with `new_empty` and then populated with the
516/// dedicated methods.
517#[derive(Clone)]
518pub struct IntcConfig {
519    sysevt_to_channel_map: Vec<SysevtToChannel>,
520    channel_to_host_map: Vec<ChannelToHost>,
521    sysevt_enable: Vec<u8>,
522    host_enable: Vec<u8>,
523}
524
525impl IntcConfig {
526    /// Constructs an empty PRU interrupt controller configuration.
527    pub fn new_empty() -> IntcConfig {
528        IntcConfig {
529            sysevt_to_channel_map: Vec::new(),
530            channel_to_host_map: Vec::new(),
531            sysevt_enable: Vec::new(),
532            host_enable: Vec::new(),
533        }
534    }
535
536    /// Constructs a PRU interrupt controller configuration with a default mapping.
537    ///
538    /// The mapping reflects the one defined in the `PRUSS_INTC_INITDATA` C macro of the C
539    /// prussdrv library, namely:
540    ///
541    /// * it maps:
542    ///     - `Sysevt::S17` to `Channel::C1`,
543    ///     - `Sysevt::S18` to `Channel::C0`,
544    ///     - `Sysevt::S19` to `Channel::C2`,
545    ///     - `Sysevt::S20` to `Channel::C3`,
546    ///     - `Sysevt::S21` to `Channel::C0`,
547    ///     - `Sysevt::S22` to `Channel::C1`,
548    ///
549    /// * it maps:
550    ///     - `Channel::C0` to `Host::Pru0`,
551    ///     - `Channel::C1` to `Host::Pru1`,
552    ///     - `Channel::C2` to `Host::Evtout0`,
553    ///     - `Channel::C3` to `Host::Evtout1`,
554    ///
555    /// * it enables:
556    ///     - `Sysevt::S17`,
557    ///     - `Sysevt::S18`,
558    ///     - `Sysevt::S19`,
559    ///     - `Sysevt::S20`,
560    ///     - `Sysevt::S21`,
561    ///     - `Sysevt::S22`,
562    ///
563    /// * it enables:
564    ///     - `Host::Pru0`,
565    ///     - `Host::Pru1`,
566    ///     - `Host::Evtout0`,
567    ///     - `Host::Evtout1`
568    ///
569    pub fn new_populated() -> IntcConfig {
570        let mut config_data = Self::new_empty();
571        config_data.map_sysevts_to_channels(&[(Sysevt::S17, Channel::C1),
572                                            (Sysevt::S18, Channel::C0),
573                                            (Sysevt::S19, Channel::C2),
574                                            (Sysevt::S20, Channel::C3),
575                                            (Sysevt::S21, Channel::C0),
576                                            (Sysevt::S22, Channel::C1)]);
577        config_data.map_channels_to_hosts(&[(Channel::C0, Host::Pru0),
578                                          (Channel::C1, Host::Pru1),
579                                          (Channel::C2, Host::Evtout0),
580                                          (Channel::C3, Host::Evtout1)]);
581        config_data.auto_enable_sysevts();
582        config_data.auto_enable_hosts();
583
584        config_data
585    }
586
587    /// Enables the specified system events.
588    ///
589    /// # Panics
590    ///
591    /// This will panic if a system event is enabled several times.
592    pub fn enable_sysevts(&mut self, sysevts: &[Sysevt]) {
593        let mut bitfield = BitField64::new(NUM_SYSEVTS);
594        self.sysevt_enable = sysevts.iter()
595            .map(|&sysevt| {
596                assert!(bitfield.try_set(sysevt as u8));
597                sysevt as u8
598            })
599            .collect();
600    }
601
602    /// Enables the specified host interrupts.
603    ///
604    /// # Panics
605    ///
606    /// This will panic if a host interrupt is enabled several times.
607    pub fn enable_hosts(&mut self, hosts: &[Host]) {
608        let mut bitfield = BitField32::new(NUM_HOSTS);
609        self.host_enable = hosts.iter()
610            .map(|&host| {
611                assert!(bitfield.try_set(host as u8));
612                host as u8
613            })
614            .collect()
615    }
616
617    /// Automatically enables system events that are already assigned to a channel.
618    pub fn auto_enable_sysevts(&mut self) {
619        self.sysevt_enable = self.sysevt_to_channel_map
620            .iter()
621            .map(|sysevt_to_channel| sysevt_to_channel.sysevt)
622            .collect();
623    }
624
625    /// Automatically enables host interrupts that are already mapped to a channel.
626    pub fn auto_enable_hosts(&mut self) {
627        self.host_enable = self.channel_to_host_map
628            .iter()
629            .map(|channel_to_host| channel_to_host.host)
630            .collect()
631    }
632
633    /// Assigns system events to channels.
634    ///
635    /// A channel can be targeted by several events but an event can be mapped to only one channel.
636    ///
637    /// # Panics
638    ///
639    /// This will panic if a system event is mapped to several channels simultaneously.
640    pub fn map_sysevts_to_channels(&mut self, scmap: &[(Sysevt, Channel)]) {
641        let mut bitfield = BitField64::new(NUM_SYSEVTS);
642        self.sysevt_to_channel_map = scmap.iter()
643            .map(|&(s, c)| {
644                assert!(bitfield.try_set(s as u8));
645                SysevtToChannel {
646                    sysevt: s as u8,
647                    channel: c as u8,
648                }
649            })
650            .collect();
651    }
652
653    /// Assigns channel numbers to host interrupts.
654    ///
655    /// A host interrupt can be targeted by several channels but a channel can be mapped to only
656    /// one host.
657    ///
658    /// # Panics
659    ///
660    /// This will panic if a channel is mapped to several hosts.
661    pub fn map_channels_to_hosts(&mut self, chmap: &[(Channel, Host)]) {
662        let mut bitfield = BitField32::new(NUM_CHANNELS);
663        self.channel_to_host_map = chmap.iter()
664            .map(|&(c, h)| {
665                assert!(bitfield.try_set(c as u8));
666                ChannelToHost {
667                    channel: c as u8,
668                    host: h as u8,
669                }
670            })
671            .collect();
672    }
673}
674
675
676
677/// Synchronization primitive that can be used to wait for an event out.
678pub struct EvtoutIrq {
679    file: File,
680    event: Evtout,
681}
682
683impl EvtoutIrq {
684    // This function should not panic as long as the UIO module is loaded.
685    fn new(e: Evtout) -> EvtoutIrq {
686        EvtoutIrq {
687            file: File::open(format!("{}{}", EVTOUT_DEVICE_ROOT_PATH, e as usize)).unwrap(),
688            event: e,
689        }
690    }
691
692    /// Waits until the associated event out is triggered.
693    ///
694    /// # Panics
695    ///
696    /// This function should not panic as long as the UIO module is loaded, which is theoretically
697    /// guaranteed at this point since `Pruss` could not have been created otherwise.
698    pub fn wait(&self) -> u32 {
699        let mut buffer = [0u8; 4];
700        (&mut &(self.file)).read_exact(&mut buffer).unwrap();
701        unsafe { mem::transmute::<[u8; 4], u32>(buffer) }
702    }
703
704    /// Returns the associated event out.
705    pub fn get_evtout(&self) -> Evtout {
706        self.event
707    }
708}
709
710
711
712/// Handle to a binary code loaded in the PRU.
713pub struct PruCode<'a> {
714    pructrl_reg: *mut u32,
715    _pructrl_marker: PhantomData<&'a mut u32>,
716}
717
718impl<'a> PruCode<'a> {
719    fn new<'b>(pructrl_reg: *mut u32) -> PruCode<'b> {
720        PruCode {
721            pructrl_reg: pructrl_reg,
722            _pructrl_marker: PhantomData,
723        }
724    }
725
726    /// Executes the code loaded in the PRU.
727    ///
728    /// This function writes 1 to the enable bit of the PRU control register, which allows
729    /// the loaded code to be started or, if it had been stopped, to resume its execution.
730    ///
731    /// # Safety
732    ///
733    /// This runs a binary code that has unrestricted access to pretty much all the processor memory
734    /// and peripherals. What could possibly go wrong?
735    pub unsafe fn run(&mut self) {
736        // Set the enable bit of the PRU control register to start or resume code execution.
737        ptr::write_volatile(self.pructrl_reg, 2);
738    }
739
740    /// Halts the execution of code running in the PRU.
741    ///
742    /// This function simply writes 0 to the enable bit of the PRU Control Register. If code was
743    /// currently running, it will be stopped. Execution of the code can be resumed with a
744    /// subsequent call to `run`.
745    pub fn halt(&mut self) {
746        // Clear the enable bit of the PRU control register to start or resume code execution
747        // without resetting the PRU.
748        unsafe {
749            ptr::write_volatile(self.pructrl_reg, 1);
750        }
751    }
752
753    /// Resets the PRU.
754    ///
755    /// Invokes a soft reset by clearing the PRU control register.
756    pub fn reset(&mut self) {
757        unsafe {
758            ptr::write_volatile(self.pructrl_reg, 0);
759        }
760    }
761}
762
763unsafe impl<'a> Send for PruCode<'a> {}
764
765unsafe impl<'a> Sync for PruCode<'a> {}
766
767
768
769/// Connection from system event to channel
770#[derive(Copy, Clone)]
771struct SysevtToChannel {
772    sysevt: u8,
773    channel: u8,
774}
775
776
777
778/// Connection from channel to host
779#[derive(Copy, Clone)]
780struct ChannelToHost {
781    channel: u8,
782    host: u8,
783}
784
785
786
787/// A read-write file with synchronized I/O.
788struct SyncFile {
789    fd: libc::c_int,
790}
791
792impl SyncFile {
793    fn new(path: &str) -> io::Result<SyncFile> {
794        let fd = unsafe {
795            libc::open(CString::new(path).unwrap().as_ptr(),
796                       libc::O_RDWR | libc::O_SYNC)
797        };
798        match fd {
799            err if err < 0 => Err(io::Error::from_raw_os_error(err as i32)),
800            _ => Ok(SyncFile { fd: fd }),
801        }
802    }
803}
804
805impl Drop for SyncFile {
806    fn drop(&mut self) {
807        unsafe {
808            libc::close(self.fd);
809        }
810    }
811}
812
813
814
815/// Memory-mapped file.
816struct MemMap {
817    base: *mut u8,
818    size: usize,
819}
820
821impl MemMap {
822    fn new(fd: libc::c_int, size: usize, page: isize) -> io::Result<MemMap> {
823        unsafe {
824            let base = libc::mmap(ptr::null_mut(),
825                                  size as libc::size_t,
826                                  libc::PROT_READ | libc::PROT_WRITE,
827                                  libc::MAP_SHARED,
828                                  fd,
829                                  (PAGE_SIZE * page) as libc::off_t);
830            if base == libc::MAP_FAILED {
831                Err(io::Error::last_os_error())
832            } else {
833                Ok(MemMap {
834                    base: base as *mut u8,
835                    size: size,
836                })
837            }
838        }
839    }
840}
841
842impl Drop for MemMap {
843    fn drop(&mut self) {
844        unsafe {
845            libc::munmap(self.base as *mut libc::c_void, self.size as libc::size_t);
846        }
847    }
848}
849
850
851
852/// A bit field based on an unsigned type with a width of 256 at most.
853#[derive(Copy, Clone)]
854struct BitField<T> {
855    bits: T,
856    width: u8,
857}
858
859impl<T: Eq + BitOrAssign + From<u8> + Copy + Shl<u8, Output = T>> BitField<T> {
860    /// Constructs a new bit field with the specified width.
861    ///
862    /// # Panics
863    ///
864    /// This will panic if the width does not fit within the underlying type.
865    fn new(width: u8) -> Self {
866        assert!((mem::size_of::<T>() * 8) >= width as usize);
867        BitField {
868            bits: 0u8.into(),
869            width: width,
870        }
871    }
872
873    /// Attempts to set the bit and returns true if succesful, i.e. if the bit was not already set.
874    ///
875    /// # Panics
876    ///
877    /// This will panic if the addressed bit is not witin the field width.
878    fn try_set(&mut self, bit: u8) -> bool {
879        assert!(bit < self.width);
880        let mask: T = Into::<T>::into(1u8) << bit;
881        let old = self.bits;
882        self.bits |= mask;
883        old != self.bits
884    }
885}
886
887type BitField32 = BitField<u32>;
888
889type BitField64 = BitField<u64>;