1#![no_std]
4
5#[macro_use]
6extern crate bitflags;
7
8use core::{cmp, mem, slice};
9
10pub use self::io::Io;
11mod io;
12
13pub use self::mapper::{PhysicalAddress, VirtualAddress, Mapper};
14mod mapper;
15
16pub use self::mmio::Mmio;
17mod mmio;
18
19pub static PCI_IDS: &[(u16, u16)] = &[
20 (0x8086, 0x02A4), (0x8086, 0x06A4), (0x8086, 0x43A4), (0x8086, 0x51A4), (0x8086, 0x7A24), (0x8086, 0x7E23), (0x8086, 0x9DA4), (0x8086, 0xA0A4), (0x8086, 0xA324), (0x8086, 0xA324), ];
31
32#[derive(Debug)]
33pub enum SpiError {
34 Access,
36 Cycle,
38 Register,
40}
41
42#[allow(clippy::len_without_is_empty)]
43pub trait Spi {
44 fn len(&mut self) -> Result<usize, SpiError>;
45
46 fn read(&mut self, address: usize, buf: &mut [u8]) -> Result<usize, SpiError>;
47
48 fn erase(&mut self, address: usize) -> Result<(), SpiError>;
49
50 fn write(&mut self, address: usize, buf: &[u8]) -> Result<usize, SpiError>;
51}
52
53pub struct SpiDev<'m, M: Mapper> {
54 mapper: &'m mut M,
55 pub regs: &'m mut SpiRegs,
56}
57
58impl<'m, M: Mapper> SpiDev<'m, M> {
59 #[allow(clippy::missing_safety_doc)]
60 pub unsafe fn new(mcfg: &[u8], mapper: &'m mut M) -> Result<Self, &'static str> {
61 let pcie_base =
62 (mcfg[0x2c] as usize) |
63 (mcfg[0x2d] as usize) << 8 |
64 (mcfg[0x2e] as usize) << 16 |
65 (mcfg[0x2f] as usize) << 24 |
66 (mcfg[0x30] as usize) << 32 |
67 (mcfg[0x31] as usize) << 40 |
68 (mcfg[0x32] as usize) << 48 |
69 (mcfg[0x33] as usize) << 56;
70
71 let mut phys_opt = None;
72 {
73 let (pcie_bus, pcie_dev, pcie_func) = (0x00, 0x1F, 0x05);
74 let pcie_size = 4096;
75
76 let pcie_phys = PhysicalAddress(
77 pcie_base |
78 (pcie_bus << 20) |
79 (pcie_dev << 15) |
80 (pcie_func << 12)
81 );
82 let pcie_virt = mapper.map(pcie_phys, pcie_size)?;
83 {
84 let pcie_space = slice::from_raw_parts_mut(pcie_virt.0 as *mut u8, pcie_size);
85
86 let vendor_id =
87 (pcie_space[0x00] as u16) |
88 (pcie_space[0x01] as u16) << 8;
89 let product_id =
90 (pcie_space[0x02] as u16) |
91 (pcie_space[0x03] as u16) << 8;
92 for known_id in PCI_IDS.iter() {
93 if known_id.0 == vendor_id && known_id.1 == product_id {
94 let bar0 =
95 (pcie_space[0x10] as u32) |
96 (pcie_space[0x11] as u32) << 8 |
97 (pcie_space[0x12] as u32) << 16 |
98 (pcie_space[0x13] as u32) << 24;
99 phys_opt = Some(PhysicalAddress(bar0 as usize));
100 break;
101 }
102 }
103 }
104 mapper.unmap(pcie_virt, pcie_size)?;
105 }
106
107 let phys = match phys_opt {
108 Some(some) => some,
109 None => return Err("no supported SPI device found"),
110 };
111 let virt = mapper.map(phys, mem::size_of::<SpiRegs>())?;
112 let regs = &mut *(virt.0 as *mut SpiRegs);
113
114 Ok(Self {
115 mapper,
116 regs,
117 })
118 }
119}
120
121impl<'m, M: Mapper> Spi for SpiDev<'m, M> {
122 fn len(&mut self) -> Result<usize, SpiError> {
123 self.regs.len()
124 }
125
126 fn read(&mut self, address: usize, buf: &mut [u8]) -> Result<usize, SpiError> {
127 self.regs.read(address, buf)
128 }
129
130 fn erase(&mut self, address: usize) -> Result<(), SpiError> {
131 self.regs.erase(address)
132 }
133
134 fn write(&mut self, address: usize, buf: &[u8]) -> Result<usize, SpiError> {
135 self.regs.write(address, buf)
136 }
137}
138
139impl<'m, M: Mapper> Drop for SpiDev<'m, M> {
140 fn drop(&mut self) {
141 let virt = VirtualAddress(self.regs as *mut SpiRegs as usize);
142 let _ = unsafe { self.mapper.unmap(virt, mem::size_of::<SpiRegs>()) };
143 }
144}
145
146bitflags! {
147 #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
148 pub struct HsfStsCtl: u32 {
149 const FDONE = 1 << 0;
151 const FCERR = 1 << 1;
153 const H_AEL = 1 << 2;
155
156 const H_SCIP = 1 << 5;
160
161 const WRSDIS = 1 << 11;
165 const PRR34_LOCKDN = 1 << 12;
167 const FDOPSS = 1 << 13;
169 const FDV = 1 << 14;
171 const FLOCKDN = 1 << 15;
173 const FGO = 1 << 16;
175
176 const FCYCLE = 0b1111 << 17;
178 const FCYCLE_0 = 1 << 17;
179 const FCYCLE_1 = 1 << 18;
180 const FCYCLE_2 = 1 << 19;
181 const FCYCLE_3 = 1 << 20;
182
183 const WET = 1 << 21;
185
186 const FDBC = 0b111111 << 24;
190 const FDBC_0 = 1 << 24;
191 const FDBC_1 = 1 << 25;
192 const FDBC_2 = 1 << 26;
193 const FDBC_3 = 1 << 27;
194 const FDBC_4 = 1 << 28;
195 const FDBC_5 = 1 << 29;
196
197 const FSMIE = 1 << 31;
201 }
202}
203
204#[allow(dead_code)]
205impl HsfStsCtl {
206 fn sanitize(&mut self) {
207 self.remove(Self::FGO);
214
215 self.remove(Self::FCYCLE);
217
218 self.remove(Self::WET);
220
221 self.remove(Self::FDBC);
223
224 self.remove(Self::FSMIE);
226 }
227
228 fn cycle(&self) -> HsfStsCtlCycle {
229 unsafe { mem::transmute((*self & Self::FCYCLE).bits()) }
230 }
231
232 fn set_cycle(&mut self, value: HsfStsCtlCycle) {
233 *self = (*self & !Self::FCYCLE) | (
234 Self::from_bits_truncate(value as u32)
235 );
236 }
237
238 fn count(&self) -> u8 {
239 (((*self & Self::FDBC).bits() >> 24) + 1) as u8
240 }
241
242 fn set_count(&mut self, value: u8) {
243 *self = (*self & !Self::FDBC) | (
244 Self::from_bits_truncate(
245 (cmp::max(value, 64).saturating_sub(1) as u32) << 24
246 )
247 );
248 }
249}
250
251#[repr(u32)]
252pub enum HsfStsCtlCycle {
253 Read = 0x0 << 17,
255 Rsvd = 0x1 << 17,
257 Write = 0x2 << 17,
259 BlockErase = 0x3 << 17,
261 SectorErase = 0x4 << 17,
263 ReadSfdp = 0x5 << 17,
265 ReadJedec = 0x6 << 17,
267 WriteStatus = 0x7 << 17,
269 ReadStatus = 0x8 << 17,
271 RpmcOp1 = 0x9 << 17,
273 RpmcOp2 = 0xA << 17,
275}
276
277#[repr(u32)]
278pub enum FdoSection {
279 Map = 0b000 << 12,
280 Component = 0b001 << 12,
281 Region = 0b010 << 12,
282 Master = 0b011 << 12
283}
284
285#[allow(dead_code)]
286#[repr(packed)]
287pub struct SpiRegs {
288 bfpreg: Mmio<u32>,
290 hsfsts_ctl: Mmio<u32>,
292 faddr: Mmio<u32>,
294 dlock: Mmio<u32>,
296 fdata: [Mmio<u32>; 16],
298 fracc: Mmio<u32>,
300 freg: [Mmio<u32>; 6],
302 _reserved1: [Mmio<u32>; 6],
303 fpr: [Mmio<u32>; 5],
305 gpr: Mmio<u32>,
307 _reserved2: [Mmio<u32>; 5],
308 sfracc: Mmio<u32>,
310 fdoc: Mmio<u32>,
312 fdod: Mmio<u32>,
314 _reserved3: Mmio<u32>,
315 afc: Mmio<u32>,
317 vscc0: Mmio<u32>,
319 vscc1: Mmio<u32>,
321 ptinx: Mmio<u32>,
323 ptdata: Mmio<u32>,
325 sbrs: Mmio<u32>,
327}
328
329impl SpiRegs {
330 pub fn hsfsts_ctl(&self) -> HsfStsCtl {
331 HsfStsCtl::from_bits_truncate(self.hsfsts_ctl.read())
332 }
333
334 pub fn set_hsfsts_ctl(&mut self, value: HsfStsCtl) {
335 self.hsfsts_ctl.write(value.bits());
336 }
337
338 pub fn fdo(&mut self, section: FdoSection, index: u16) -> u32 {
339 self.fdoc.write(
340 (section as u32) |
341 (((index & 0b1111111111) as u32) << 2)
342 );
343 self.fdod.read()
344 }
345}
346
347impl Spi for SpiRegs {
348 fn len(&mut self) -> Result<usize, SpiError> {
349 let kib = 1024;
350 let mib = 1024 * kib;
351
352 let component = self.fdo(FdoSection::Component, 0);
353 Ok(match component & 0b111 {
354 0b000 => 512 * kib,
355 0b001 => mib,
356 0b010 => 2 * mib,
357 0b011 => 4 * mib,
358 0b100 => 8 * mib,
359 0b101 => 16 * mib,
360 0b110 => 32 * mib,
361 0b111 => 64 * mib,
362 _ => return Err(SpiError::Register)
363 })
364 }
365
366 fn read(&mut self, address: usize, buf: &mut [u8]) -> Result<usize, SpiError> {
367 let mut count = 0;
368 for chunk in buf.chunks_mut(64) {
369 let mut hsfsts_ctl;
370
371 loop {
373 hsfsts_ctl = self.hsfsts_ctl();
374 if ! hsfsts_ctl.contains(HsfStsCtl::H_SCIP) {
375 break;
376 }
377 }
378
379 hsfsts_ctl.sanitize();
380 self.set_hsfsts_ctl(hsfsts_ctl);
381
382 hsfsts_ctl.set_cycle(HsfStsCtlCycle::Read);
383 hsfsts_ctl.set_count(chunk.len() as u8);
384 hsfsts_ctl.insert(HsfStsCtl::FGO);
385
386 self.faddr.write((address + count) as u32);
388 self.set_hsfsts_ctl(hsfsts_ctl);
389
390 loop {
392 hsfsts_ctl = self.hsfsts_ctl();
393
394 if hsfsts_ctl.contains(HsfStsCtl::FCERR) {
395 hsfsts_ctl.sanitize();
396 self.set_hsfsts_ctl(hsfsts_ctl);
397
398 return Err(SpiError::Cycle);
399 }
400
401 if hsfsts_ctl.contains(HsfStsCtl::FDONE) {
402 break;
403 }
404 }
405
406 for (i, dword) in chunk.chunks_mut(4).enumerate() {
407 let data = self.fdata[i].read();
408 for (j, byte) in dword.iter_mut().enumerate() {
409 *byte = (data >> (j * 8)) as u8;
410 }
411 }
412
413 hsfsts_ctl.sanitize();
414 self.set_hsfsts_ctl(hsfsts_ctl);
415
416 count += chunk.len()
417 }
418 Ok(count)
419 }
420
421 fn erase(&mut self, address: usize) -> Result<(), SpiError> {
422 let mut hsfsts_ctl;
423
424 loop {
426 hsfsts_ctl = self.hsfsts_ctl();
427 if ! hsfsts_ctl.contains(HsfStsCtl::H_SCIP) {
428 break;
429 }
430 }
431
432 hsfsts_ctl.sanitize();
433 self.set_hsfsts_ctl(hsfsts_ctl);
434
435 hsfsts_ctl.set_cycle(HsfStsCtlCycle::BlockErase);
436 hsfsts_ctl.insert(HsfStsCtl::FGO);
437
438 self.faddr.write(address as u32);
440 self.set_hsfsts_ctl(hsfsts_ctl);
441
442 loop {
444 hsfsts_ctl = self.hsfsts_ctl();
445
446 if hsfsts_ctl.contains(HsfStsCtl::FCERR) {
447 hsfsts_ctl.sanitize();
448 self.set_hsfsts_ctl(hsfsts_ctl);
449
450 return Err(SpiError::Cycle);
451 }
452
453 if hsfsts_ctl.contains(HsfStsCtl::FDONE) {
454 break;
455 }
456 }
457
458 hsfsts_ctl.sanitize();
459 self.set_hsfsts_ctl(hsfsts_ctl);
460
461 Ok(())
462 }
463
464 fn write(&mut self, address: usize, buf: &[u8]) -> Result<usize, SpiError> {
465 let mut count = 0;
466 for chunk in buf.chunks(64) {
467 let mut hsfsts_ctl;
468
469 loop {
471 hsfsts_ctl = self.hsfsts_ctl();
472 if ! hsfsts_ctl.contains(HsfStsCtl::H_SCIP) {
473 break;
474 }
475 }
476
477 hsfsts_ctl.sanitize();
478 self.set_hsfsts_ctl(hsfsts_ctl);
479
480 hsfsts_ctl.set_cycle(HsfStsCtlCycle::Write);
481 hsfsts_ctl.set_count(chunk.len() as u8);
482 hsfsts_ctl.insert(HsfStsCtl::FGO);
483
484 for (i, dword) in chunk.chunks(4).enumerate() {
486 let mut data = 0;
487 for (j, byte) in dword.iter().enumerate() {
488 data |= (*byte as u32) << (j * 8);
489 }
490 self.fdata[i].write(data);
491 }
492
493 self.faddr.write((address + count) as u32);
495 self.set_hsfsts_ctl(hsfsts_ctl);
496
497 loop {
499 hsfsts_ctl = self.hsfsts_ctl();
500
501 if hsfsts_ctl.contains(HsfStsCtl::FCERR) {
502 hsfsts_ctl.sanitize();
503 self.set_hsfsts_ctl(hsfsts_ctl);
504
505 return Err(SpiError::Cycle);
506 }
507
508 if hsfsts_ctl.contains(HsfStsCtl::FDONE) {
509 break;
510 }
511 }
512
513 hsfsts_ctl.sanitize();
514 self.set_hsfsts_ctl(hsfsts_ctl);
515
516 count += chunk.len()
517 }
518 Ok(count)
519 }
520}
521
522#[cfg(test)]
523mod tests {
524 use super::SpiRegs;
525
526 #[test]
527 fn offsets() {
528 unsafe {
529 let spi: &SpiRegs = &*(0 as *const SpiRegs);
530
531 assert_eq!(&spi.bfpreg as *const _ as usize, 0x00);
532
533 assert_eq!(&spi.freg as *const _ as usize, 0x54);
534 assert_eq!(&spi.fpr as *const _ as usize, 0x84);
535
536 assert_eq!(&spi.gpr as *const _ as usize, 0x98);
537 assert_eq!(&spi.sfracc as *const _ as usize, 0xb0);
538
539 assert_eq!(&spi.fdod as *const _ as usize, 0xb8);
540 assert_eq!(&spi.afc as *const _ as usize, 0xc0);
541
542 assert_eq!(&spi.sbrs as *const _ as usize, 0xd4);
543 }
544 }
545}