1#![no_std]
4
5#[macro_use]
6extern crate bitflags;
7
8use core::{cmp, mem, slice};
9
10pub use self::io::Io;
11mod io;
12
13pub use self::mapper::{PhysicalAddress, VirtualAddress, Mapper};
14mod mapper;
15
16pub use self::mmio::Mmio;
17mod mmio;
18
19pub static PCI_IDS: &[(u16, u16)] = &[
20 (0x8086, 0x02A4), (0x8086, 0x06A4), (0x8086, 0x43A4), (0x8086, 0x51A4), (0x8086, 0x7723), (0x8086, 0x7A24), (0x8086, 0x7E23), (0x8086, 0x9DA4), (0x8086, 0xA0A4), (0x8086, 0xA324), (0x8086, 0xA324), ];
32
33#[derive(Debug)]
34pub enum SpiError {
35 Access,
37 Cycle,
39 Register,
41}
42
43#[allow(clippy::len_without_is_empty)]
44pub trait Spi {
45 fn len(&mut self) -> Result<usize, SpiError>;
46
47 fn read(&mut self, address: usize, buf: &mut [u8]) -> Result<usize, SpiError>;
48
49 fn erase(&mut self, address: usize) -> Result<(), SpiError>;
50
51 fn write(&mut self, address: usize, buf: &[u8]) -> Result<usize, SpiError>;
52}
53
54pub struct SpiDev<'m, M: Mapper> {
55 mapper: &'m mut M,
56 pub regs: &'m mut SpiRegs,
57}
58
59impl<'m, M: Mapper> SpiDev<'m, M> {
60 #[allow(clippy::missing_safety_doc)]
61 pub unsafe fn new(mcfg: &[u8], mapper: &'m mut M) -> Result<Self, &'static str> {
62 let pcie_base =
63 (mcfg[0x2c] as usize) |
64 (mcfg[0x2d] as usize) << 8 |
65 (mcfg[0x2e] as usize) << 16 |
66 (mcfg[0x2f] as usize) << 24 |
67 (mcfg[0x30] as usize) << 32 |
68 (mcfg[0x31] as usize) << 40 |
69 (mcfg[0x32] as usize) << 48 |
70 (mcfg[0x33] as usize) << 56;
71
72 let mut phys_opt = None;
73 {
74 let (pcie_bus, pcie_dev, pcie_func) = (0x00, 0x1F, 0x05);
75 let pcie_size = 4096;
76
77 let pcie_phys = PhysicalAddress(
78 pcie_base |
79 (pcie_bus << 20) |
80 (pcie_dev << 15) |
81 (pcie_func << 12)
82 );
83 let pcie_virt = mapper.map(pcie_phys, pcie_size)?;
84 {
85 let pcie_space = slice::from_raw_parts_mut(pcie_virt.0 as *mut u8, pcie_size);
86
87 let vendor_id =
88 (pcie_space[0x00] as u16) |
89 (pcie_space[0x01] as u16) << 8;
90 let product_id =
91 (pcie_space[0x02] as u16) |
92 (pcie_space[0x03] as u16) << 8;
93 for known_id in PCI_IDS.iter() {
94 if known_id.0 == vendor_id && known_id.1 == product_id {
95 let bar0 =
96 (pcie_space[0x10] as u32) |
97 (pcie_space[0x11] as u32) << 8 |
98 (pcie_space[0x12] as u32) << 16 |
99 (pcie_space[0x13] as u32) << 24;
100 phys_opt = Some(PhysicalAddress(bar0 as usize));
101 break;
102 }
103 }
104 }
105 mapper.unmap(pcie_virt, pcie_size)?;
106 }
107
108 let phys = match phys_opt {
109 Some(some) => some,
110 None => return Err("no supported SPI device found"),
111 };
112 let virt = mapper.map(phys, mem::size_of::<SpiRegs>())?;
113 let regs = &mut *(virt.0 as *mut SpiRegs);
114
115 Ok(Self {
116 mapper,
117 regs,
118 })
119 }
120}
121
122impl<'m, M: Mapper> Spi for SpiDev<'m, M> {
123 fn len(&mut self) -> Result<usize, SpiError> {
124 self.regs.len()
125 }
126
127 fn read(&mut self, address: usize, buf: &mut [u8]) -> Result<usize, SpiError> {
128 self.regs.read(address, buf)
129 }
130
131 fn erase(&mut self, address: usize) -> Result<(), SpiError> {
132 self.regs.erase(address)
133 }
134
135 fn write(&mut self, address: usize, buf: &[u8]) -> Result<usize, SpiError> {
136 self.regs.write(address, buf)
137 }
138}
139
140impl<'m, M: Mapper> Drop for SpiDev<'m, M> {
141 fn drop(&mut self) {
142 let virt = VirtualAddress(self.regs as *mut SpiRegs as usize);
143 let _ = unsafe { self.mapper.unmap(virt, mem::size_of::<SpiRegs>()) };
144 }
145}
146
147bitflags! {
148 #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
149 pub struct HsfStsCtl: u32 {
150 const FDONE = 1 << 0;
152 const FCERR = 1 << 1;
154 const H_AEL = 1 << 2;
156
157 const H_SCIP = 1 << 5;
161
162 const WRSDIS = 1 << 11;
166 const PRR34_LOCKDN = 1 << 12;
168 const FDOPSS = 1 << 13;
170 const FDV = 1 << 14;
172 const FLOCKDN = 1 << 15;
174 const FGO = 1 << 16;
176
177 const FCYCLE = 0b1111 << 17;
179 const FCYCLE_0 = 1 << 17;
180 const FCYCLE_1 = 1 << 18;
181 const FCYCLE_2 = 1 << 19;
182 const FCYCLE_3 = 1 << 20;
183
184 const WET = 1 << 21;
186
187 const FDBC = 0b111111 << 24;
191 const FDBC_0 = 1 << 24;
192 const FDBC_1 = 1 << 25;
193 const FDBC_2 = 1 << 26;
194 const FDBC_3 = 1 << 27;
195 const FDBC_4 = 1 << 28;
196 const FDBC_5 = 1 << 29;
197
198 const FSMIE = 1 << 31;
202 }
203}
204
205#[allow(dead_code)]
206impl HsfStsCtl {
207 fn sanitize(&mut self) {
208 self.remove(Self::FGO);
215
216 self.remove(Self::FCYCLE);
218
219 self.remove(Self::WET);
221
222 self.remove(Self::FDBC);
224
225 self.remove(Self::FSMIE);
227 }
228
229 fn cycle(&self) -> HsfStsCtlCycle {
230 unsafe { mem::transmute((*self & Self::FCYCLE).bits()) }
231 }
232
233 fn set_cycle(&mut self, value: HsfStsCtlCycle) {
234 *self = (*self & !Self::FCYCLE) | (
235 Self::from_bits_truncate(value as u32)
236 );
237 }
238
239 fn count(&self) -> u8 {
240 (((*self & Self::FDBC).bits() >> 24) + 1) as u8
241 }
242
243 fn set_count(&mut self, value: u8) {
244 *self = (*self & !Self::FDBC) | (
245 Self::from_bits_truncate(
246 (cmp::max(value, 64).saturating_sub(1) as u32) << 24
247 )
248 );
249 }
250}
251
252#[repr(u32)]
253pub enum HsfStsCtlCycle {
254 Read = 0x0 << 17,
256 Rsvd = 0x1 << 17,
258 Write = 0x2 << 17,
260 BlockErase = 0x3 << 17,
262 SectorErase = 0x4 << 17,
264 ReadSfdp = 0x5 << 17,
266 ReadJedec = 0x6 << 17,
268 WriteStatus = 0x7 << 17,
270 ReadStatus = 0x8 << 17,
272 RpmcOp1 = 0x9 << 17,
274 RpmcOp2 = 0xA << 17,
276}
277
278#[repr(u32)]
279pub enum FdoSection {
280 Map = 0b000 << 12,
281 Component = 0b001 << 12,
282 Region = 0b010 << 12,
283 Master = 0b011 << 12
284}
285
286#[allow(dead_code)]
287#[repr(C)]
288pub struct SpiRegs {
289 bfpreg: Mmio<u32>,
291 hsfsts_ctl: Mmio<u32>,
293 faddr: Mmio<u32>,
295 dlock: Mmio<u32>,
297 fdata: [Mmio<u32>; 16],
299 fracc: Mmio<u32>,
301 freg: [Mmio<u32>; 6],
303 _reserved1: [Mmio<u32>; 6],
304 fpr: [Mmio<u32>; 5],
306 gpr: Mmio<u32>,
308 _reserved2: [Mmio<u32>; 5],
309 sfracc: Mmio<u32>,
311 fdoc: Mmio<u32>,
313 fdod: Mmio<u32>,
315 _reserved3: Mmio<u32>,
316 afc: Mmio<u32>,
318 vscc0: Mmio<u32>,
320 vscc1: Mmio<u32>,
322 ptinx: Mmio<u32>,
324 ptdata: Mmio<u32>,
326 sbrs: Mmio<u32>,
328}
329
330impl SpiRegs {
331 pub fn hsfsts_ctl(&self) -> HsfStsCtl {
332 HsfStsCtl::from_bits_truncate(self.hsfsts_ctl.read())
333 }
334
335 pub fn set_hsfsts_ctl(&mut self, value: HsfStsCtl) {
336 self.hsfsts_ctl.write(value.bits());
337 }
338
339 pub fn fdo(&mut self, section: FdoSection, index: u16) -> u32 {
340 self.fdoc.write(
341 (section as u32) |
342 (((index & 0b1111111111) as u32) << 2)
343 );
344 self.fdod.read()
345 }
346}
347
348impl Spi for SpiRegs {
349 fn len(&mut self) -> Result<usize, SpiError> {
350 let kib = 1024;
351 let mib = 1024 * kib;
352
353 let component = self.fdo(FdoSection::Component, 0);
354 Ok(match component & 0b111 {
355 0b000 => 512 * kib,
356 0b001 => mib,
357 0b010 => 2 * mib,
358 0b011 => 4 * mib,
359 0b100 => 8 * mib,
360 0b101 => 16 * mib,
361 0b110 => 32 * mib,
362 0b111 => 64 * mib,
363 _ => return Err(SpiError::Register)
364 })
365 }
366
367 fn read(&mut self, address: usize, buf: &mut [u8]) -> Result<usize, SpiError> {
368 let mut count = 0;
369 for chunk in buf.chunks_mut(64) {
370 let mut hsfsts_ctl;
371
372 loop {
374 hsfsts_ctl = self.hsfsts_ctl();
375 if ! hsfsts_ctl.contains(HsfStsCtl::H_SCIP) {
376 break;
377 }
378 }
379
380 hsfsts_ctl.sanitize();
381 self.set_hsfsts_ctl(hsfsts_ctl);
382
383 hsfsts_ctl.set_cycle(HsfStsCtlCycle::Read);
384 hsfsts_ctl.set_count(chunk.len() as u8);
385 hsfsts_ctl.insert(HsfStsCtl::FGO);
386
387 self.faddr.write((address + count) as u32);
389 self.set_hsfsts_ctl(hsfsts_ctl);
390
391 loop {
393 hsfsts_ctl = self.hsfsts_ctl();
394
395 if hsfsts_ctl.contains(HsfStsCtl::FCERR) {
396 hsfsts_ctl.sanitize();
397 self.set_hsfsts_ctl(hsfsts_ctl);
398
399 return Err(SpiError::Cycle);
400 }
401
402 if hsfsts_ctl.contains(HsfStsCtl::FDONE) {
403 break;
404 }
405 }
406
407 for (i, dword) in chunk.chunks_mut(4).enumerate() {
408 let data = self.fdata[i].read();
409 for (j, byte) in dword.iter_mut().enumerate() {
410 *byte = (data >> (j * 8)) as u8;
411 }
412 }
413
414 hsfsts_ctl.sanitize();
415 self.set_hsfsts_ctl(hsfsts_ctl);
416
417 count += chunk.len()
418 }
419 Ok(count)
420 }
421
422 fn erase(&mut self, address: usize) -> Result<(), SpiError> {
423 let mut hsfsts_ctl;
424
425 loop {
427 hsfsts_ctl = self.hsfsts_ctl();
428 if ! hsfsts_ctl.contains(HsfStsCtl::H_SCIP) {
429 break;
430 }
431 }
432
433 hsfsts_ctl.sanitize();
434 self.set_hsfsts_ctl(hsfsts_ctl);
435
436 hsfsts_ctl.set_cycle(HsfStsCtlCycle::BlockErase);
437 hsfsts_ctl.insert(HsfStsCtl::FGO);
438
439 self.faddr.write(address as u32);
441 self.set_hsfsts_ctl(hsfsts_ctl);
442
443 loop {
445 hsfsts_ctl = self.hsfsts_ctl();
446
447 if hsfsts_ctl.contains(HsfStsCtl::FCERR) {
448 hsfsts_ctl.sanitize();
449 self.set_hsfsts_ctl(hsfsts_ctl);
450
451 return Err(SpiError::Cycle);
452 }
453
454 if hsfsts_ctl.contains(HsfStsCtl::FDONE) {
455 break;
456 }
457 }
458
459 hsfsts_ctl.sanitize();
460 self.set_hsfsts_ctl(hsfsts_ctl);
461
462 Ok(())
463 }
464
465 fn write(&mut self, address: usize, buf: &[u8]) -> Result<usize, SpiError> {
466 let mut count = 0;
467 for chunk in buf.chunks(64) {
468 let mut hsfsts_ctl;
469
470 loop {
472 hsfsts_ctl = self.hsfsts_ctl();
473 if ! hsfsts_ctl.contains(HsfStsCtl::H_SCIP) {
474 break;
475 }
476 }
477
478 hsfsts_ctl.sanitize();
479 self.set_hsfsts_ctl(hsfsts_ctl);
480
481 hsfsts_ctl.set_cycle(HsfStsCtlCycle::Write);
482 hsfsts_ctl.set_count(chunk.len() as u8);
483 hsfsts_ctl.insert(HsfStsCtl::FGO);
484
485 for (i, dword) in chunk.chunks(4).enumerate() {
487 let mut data = 0;
488 for (j, byte) in dword.iter().enumerate() {
489 data |= (*byte as u32) << (j * 8);
490 }
491 self.fdata[i].write(data);
492 }
493
494 self.faddr.write((address + count) as u32);
496 self.set_hsfsts_ctl(hsfsts_ctl);
497
498 loop {
500 hsfsts_ctl = self.hsfsts_ctl();
501
502 if hsfsts_ctl.contains(HsfStsCtl::FCERR) {
503 hsfsts_ctl.sanitize();
504 self.set_hsfsts_ctl(hsfsts_ctl);
505
506 return Err(SpiError::Cycle);
507 }
508
509 if hsfsts_ctl.contains(HsfStsCtl::FDONE) {
510 break;
511 }
512 }
513
514 hsfsts_ctl.sanitize();
515 self.set_hsfsts_ctl(hsfsts_ctl);
516
517 count += chunk.len()
518 }
519 Ok(count)
520 }
521}
522
523#[cfg(test)]
524mod tests {
525 use super::SpiRegs;
526
527 #[test]
528 fn offsets() {
529 unsafe {
530 let spi: &SpiRegs = &*(0 as *const SpiRegs);
531
532 assert_eq!(&spi.bfpreg as *const _ as usize, 0x00);
533
534 assert_eq!(&spi.freg as *const _ as usize, 0x54);
535 assert_eq!(&spi.fpr as *const _ as usize, 0x84);
536
537 assert_eq!(&spi.gpr as *const _ as usize, 0x98);
538 assert_eq!(&spi.sfracc as *const _ as usize, 0xb0);
539
540 assert_eq!(&spi.fdod as *const _ as usize, 0xb8);
541 assert_eq!(&spi.afc as *const _ as usize, 0xc0);
542
543 assert_eq!(&spi.sbrs as *const _ as usize, 0xd4);
544 }
545 }
546}