1#![macro_use]
4
5use core::future::{poll_fn, Future};
6use core::marker::PhantomData;
7use core::sync::atomic::compiler_fence;
8use core::sync::atomic::Ordering::SeqCst;
9use core::task::Poll;
10
11use embassy_hal_internal::{Peri, PeripheralType};
12use embassy_sync::waitqueue::AtomicWaker;
13#[cfg(feature = "time")]
14use embassy_time::{Duration, Instant};
15
16use crate::chip::{EASY_DMA_SIZE, FORCE_COPY_BUFFER_SIZE};
17use crate::gpio::Pin as GpioPin;
18use crate::interrupt::typelevel::Interrupt;
19use crate::pac::gpio::vals as gpiovals;
20use crate::pac::twis::vals;
21use crate::util::slice_in_ram_or;
22use crate::{gpio, interrupt, pac};
23
24#[non_exhaustive]
26pub struct Config {
27 pub address0: u8,
29
30 pub address1: Option<u8>,
32
33 pub orc: u8,
38
39 pub sda_high_drive: bool,
41
42 pub sda_pullup: bool,
47
48 pub scl_high_drive: bool,
50
51 pub scl_pullup: bool,
56}
57
58impl Default for Config {
59 fn default() -> Self {
60 Self {
61 address0: 0x55,
62 address1: None,
63 orc: 0x00,
64 scl_high_drive: false,
65 sda_pullup: false,
66 sda_high_drive: false,
67 scl_pullup: false,
68 }
69 }
70}
71
72#[derive(Debug, Copy, Clone, Eq, PartialEq)]
73#[cfg_attr(feature = "defmt", derive(defmt::Format))]
74enum Status {
75 Read,
76 Write,
77}
78
79#[derive(Debug, Copy, Clone, Eq, PartialEq)]
81#[cfg_attr(feature = "defmt", derive(defmt::Format))]
82#[non_exhaustive]
83pub enum Error {
84 TxBufferTooLong,
86 RxBufferTooLong,
88 DataNack,
90 Bus,
92 BufferNotInRAM,
94 Overflow,
96 OverRead,
98 Timeout,
100}
101
102#[derive(Debug, Copy, Clone, Eq, PartialEq)]
104#[cfg_attr(feature = "defmt", derive(defmt::Format))]
105pub enum Command {
106 Read,
108 WriteRead(usize),
110 Write(usize),
112}
113
114pub struct InterruptHandler<T: Instance> {
116 _phantom: PhantomData<T>,
117}
118
119impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
120 unsafe fn on_interrupt() {
121 let r = T::regs();
122 let s = T::state();
123
124 if r.events_read().read() != 0 || r.events_write().read() != 0 {
125 s.waker.wake();
126 r.intenclr().write(|w| {
127 w.set_read(true);
128 w.set_write(true);
129 });
130 }
131 if r.events_stopped().read() != 0 {
132 s.waker.wake();
133 r.intenclr().write(|w| w.set_stopped(true));
134 }
135 if r.events_error().read() != 0 {
136 s.waker.wake();
137 r.intenclr().write(|w| w.set_error(true));
138 }
139 }
140}
141
142pub struct Twis<'d, T: Instance> {
144 _p: Peri<'d, T>,
145}
146
147impl<'d, T: Instance> Twis<'d, T> {
148 pub fn new(
150 twis: Peri<'d, T>,
151 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
152 sda: Peri<'d, impl GpioPin>,
153 scl: Peri<'d, impl GpioPin>,
154 config: Config,
155 ) -> Self {
156 let r = T::regs();
157
158 sda.conf().write(|w| {
160 w.set_dir(gpiovals::Dir::INPUT);
161 w.set_input(gpiovals::Input::CONNECT);
162 w.set_drive(match config.sda_high_drive {
163 true => gpiovals::Drive::H0D1,
164 false => gpiovals::Drive::S0D1,
165 });
166 if config.sda_pullup {
167 w.set_pull(gpiovals::Pull::PULLUP);
168 }
169 });
170 scl.conf().write(|w| {
171 w.set_dir(gpiovals::Dir::INPUT);
172 w.set_input(gpiovals::Input::CONNECT);
173 w.set_drive(match config.scl_high_drive {
174 true => gpiovals::Drive::H0D1,
175 false => gpiovals::Drive::S0D1,
176 });
177 if config.sda_pullup {
178 w.set_pull(gpiovals::Pull::PULLUP);
179 }
180 });
181
182 r.psel().sda().write_value(sda.psel_bits());
184 r.psel().scl().write_value(scl.psel_bits());
185
186 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
188
189 r.intenclr().write(|w| w.0 = 0xFFFF_FFFF);
191
192 r.address(0).write(|w| w.set_address(config.address0));
194 r.config().write(|w| w.set_address0(true));
195 if let Some(address1) = config.address1 {
196 r.address(1).write(|w| w.set_address(address1));
197 r.config().modify(|w| w.set_address1(true));
198 }
199
200 r.orc().write(|w| w.set_orc(config.orc));
202
203 r.shorts().write(|w| w.set_read_suspend(true));
205
206 T::Interrupt::unpend();
207 unsafe { T::Interrupt::enable() };
208
209 Self { _p: twis }
210 }
211
212 unsafe fn set_tx_buffer(&mut self, buffer: &[u8]) -> Result<(), Error> {
214 slice_in_ram_or(buffer, Error::BufferNotInRAM)?;
215
216 if buffer.len() > EASY_DMA_SIZE {
217 return Err(Error::TxBufferTooLong);
218 }
219
220 let r = T::regs();
221
222 r.txd().ptr().write_value(buffer.as_ptr() as u32);
226 r.txd().maxcnt().write(|w|
227 w.set_maxcnt(buffer.len() as _));
234
235 Ok(())
236 }
237
238 unsafe fn set_rx_buffer(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
240 if buffer.len() > EASY_DMA_SIZE {
244 return Err(Error::RxBufferTooLong);
245 }
246
247 let r = T::regs();
248
249 r.rxd().ptr().write_value(buffer.as_mut_ptr() as u32);
253 r.rxd().maxcnt().write(|w|
254 w.set_maxcnt(buffer.len() as _));
264
265 Ok(())
266 }
267
268 fn clear_errorsrc(&mut self) {
269 let r = T::regs();
270 r.errorsrc().write(|w| {
271 w.set_overflow(true);
272 w.set_overread(true);
273 w.set_dnack(true);
274 });
275 }
276
277 pub fn address_match(&self) -> u8 {
279 let r = T::regs();
280 r.address(r.match_().read().0 as usize).read().address()
281 }
282
283 pub fn address_match_index(&self) -> usize {
285 T::regs().match_().read().0 as _
286 }
287
288 fn blocking_listen_wait(&mut self) -> Result<Status, Error> {
290 let r = T::regs();
291 loop {
292 if r.events_error().read() != 0 {
293 r.events_error().write_value(0);
294 r.tasks_stop().write_value(1);
295 while r.events_stopped().read() == 0 {}
296 return Err(Error::Overflow);
297 }
298 if r.events_stopped().read() != 0 {
299 r.events_stopped().write_value(0);
300 return Err(Error::Bus);
301 }
302 if r.events_read().read() != 0 {
303 r.events_read().write_value(0);
304 return Ok(Status::Read);
305 }
306 if r.events_write().read() != 0 {
307 r.events_write().write_value(0);
308 return Ok(Status::Write);
309 }
310 }
311 }
312
313 fn blocking_listen_wait_end(&mut self, status: Status) -> Result<Command, Error> {
315 let r = T::regs();
316 loop {
317 if r.events_error().read() != 0 {
319 r.events_error().write_value(0);
320 r.tasks_stop().write_value(1);
321 return Err(Error::Overflow);
322 } else if r.events_stopped().read() != 0 {
323 r.events_stopped().write_value(0);
324 return match status {
325 Status::Read => Ok(Command::Read),
326 Status::Write => {
327 let n = r.rxd().amount().read().0 as usize;
328 Ok(Command::Write(n))
329 }
330 };
331 } else if r.events_read().read() != 0 {
332 r.events_read().write_value(0);
333 let n = r.rxd().amount().read().0 as usize;
334 return Ok(Command::WriteRead(n));
335 }
336 }
337 }
338
339 fn blocking_wait(&mut self) -> Result<usize, Error> {
341 let r = T::regs();
342 loop {
343 if r.events_error().read() != 0 {
345 r.events_error().write_value(0);
346 r.tasks_stop().write_value(1);
347 let errorsrc = r.errorsrc().read();
348 if errorsrc.overread() {
349 return Err(Error::OverRead);
350 } else if errorsrc.dnack() {
351 return Err(Error::DataNack);
352 } else {
353 return Err(Error::Bus);
354 }
355 } else if r.events_stopped().read() != 0 {
356 r.events_stopped().write_value(0);
357 let n = r.txd().amount().read().0 as usize;
358 return Ok(n);
359 }
360 }
361 }
362
363 #[cfg(feature = "time")]
365 fn blocking_wait_timeout(&mut self, timeout: Duration) -> Result<usize, Error> {
366 let r = T::regs();
367 let deadline = Instant::now() + timeout;
368 loop {
369 if r.events_error().read() != 0 {
371 r.events_error().write_value(0);
372 r.tasks_stop().write_value(1);
373 let errorsrc = r.errorsrc().read();
374 if errorsrc.overread() {
375 return Err(Error::OverRead);
376 } else if errorsrc.dnack() {
377 return Err(Error::DataNack);
378 } else {
379 return Err(Error::Bus);
380 }
381 } else if r.events_stopped().read() != 0 {
382 r.events_stopped().write_value(0);
383 let n = r.txd().amount().read().0 as usize;
384 return Ok(n);
385 } else if Instant::now() > deadline {
386 r.tasks_stop().write_value(1);
387 return Err(Error::Timeout);
388 }
389 }
390 }
391
392 #[cfg(feature = "time")]
394 fn blocking_listen_wait_timeout(&mut self, timeout: Duration) -> Result<Status, Error> {
395 let r = T::regs();
396 let deadline = Instant::now() + timeout;
397 loop {
398 if r.events_error().read() != 0 {
399 r.events_error().write_value(0);
400 r.tasks_stop().write_value(1);
401 while r.events_stopped().read() == 0 {}
402 return Err(Error::Overflow);
403 }
404 if r.events_stopped().read() != 0 {
405 r.events_stopped().write_value(0);
406 return Err(Error::Bus);
407 }
408 if r.events_read().read() != 0 {
409 r.events_read().write_value(0);
410 return Ok(Status::Read);
411 }
412 if r.events_write().read() != 0 {
413 r.events_write().write_value(0);
414 return Ok(Status::Write);
415 }
416 if Instant::now() > deadline {
417 r.tasks_stop().write_value(1);
418 return Err(Error::Timeout);
419 }
420 }
421 }
422
423 #[cfg(feature = "time")]
425 fn blocking_listen_wait_end_timeout(&mut self, status: Status, timeout: Duration) -> Result<Command, Error> {
426 let r = T::regs();
427 let deadline = Instant::now() + timeout;
428 loop {
429 if r.events_error().read() != 0 {
431 r.events_error().write_value(0);
432 r.tasks_stop().write_value(1);
433 return Err(Error::Overflow);
434 } else if r.events_stopped().read() != 0 {
435 r.events_stopped().write_value(0);
436 return match status {
437 Status::Read => Ok(Command::Read),
438 Status::Write => {
439 let n = r.rxd().amount().read().0 as usize;
440 Ok(Command::Write(n))
441 }
442 };
443 } else if r.events_read().read() != 0 {
444 r.events_read().write_value(0);
445 let n = r.rxd().amount().read().0 as usize;
446 return Ok(Command::WriteRead(n));
447 } else if Instant::now() > deadline {
448 r.tasks_stop().write_value(1);
449 return Err(Error::Timeout);
450 }
451 }
452 }
453
454 fn async_wait(&mut self) -> impl Future<Output = Result<usize, Error>> {
456 poll_fn(move |cx| {
457 let r = T::regs();
458 let s = T::state();
459
460 s.waker.register(cx.waker());
461
462 if r.events_error().read() != 0 {
464 r.events_error().write_value(0);
465 r.tasks_stop().write_value(1);
466 let errorsrc = r.errorsrc().read();
467 if errorsrc.overread() {
468 return Poll::Ready(Err(Error::OverRead));
469 } else if errorsrc.dnack() {
470 return Poll::Ready(Err(Error::DataNack));
471 } else {
472 return Poll::Ready(Err(Error::Bus));
473 }
474 } else if r.events_stopped().read() != 0 {
475 r.events_stopped().write_value(0);
476 let n = r.txd().amount().read().0 as usize;
477 return Poll::Ready(Ok(n));
478 }
479
480 Poll::Pending
481 })
482 }
483
484 fn async_listen_wait(&mut self) -> impl Future<Output = Result<Status, Error>> {
486 poll_fn(move |cx| {
487 let r = T::regs();
488 let s = T::state();
489
490 s.waker.register(cx.waker());
491
492 if r.events_error().read() != 0 {
494 r.events_error().write_value(0);
495 r.tasks_stop().write_value(1);
496 return Poll::Ready(Err(Error::Overflow));
497 } else if r.events_read().read() != 0 {
498 r.events_read().write_value(0);
499 return Poll::Ready(Ok(Status::Read));
500 } else if r.events_write().read() != 0 {
501 r.events_write().write_value(0);
502 return Poll::Ready(Ok(Status::Write));
503 } else if r.events_stopped().read() != 0 {
504 r.events_stopped().write_value(0);
505 return Poll::Ready(Err(Error::Bus));
506 }
507 Poll::Pending
508 })
509 }
510
511 fn async_listen_wait_end(&mut self, status: Status) -> impl Future<Output = Result<Command, Error>> {
513 poll_fn(move |cx| {
514 let r = T::regs();
515 let s = T::state();
516
517 s.waker.register(cx.waker());
518
519 if r.events_error().read() != 0 {
521 r.events_error().write_value(0);
522 r.tasks_stop().write_value(1);
523 return Poll::Ready(Err(Error::Overflow));
524 } else if r.events_stopped().read() != 0 {
525 r.events_stopped().write_value(0);
526 return match status {
527 Status::Read => Poll::Ready(Ok(Command::Read)),
528 Status::Write => {
529 let n = r.rxd().amount().read().0 as usize;
530 Poll::Ready(Ok(Command::Write(n)))
531 }
532 };
533 } else if r.events_read().read() != 0 {
534 r.events_read().write_value(0);
535 let n = r.rxd().amount().read().0 as usize;
536 return Poll::Ready(Ok(Command::WriteRead(n)));
537 }
538 Poll::Pending
539 })
540 }
541
542 fn setup_respond_from_ram(&mut self, buffer: &[u8], inten: bool) -> Result<(), Error> {
543 let r = T::regs();
544
545 compiler_fence(SeqCst);
546
547 unsafe { self.set_tx_buffer(buffer)? };
549
550 r.events_stopped().write_value(0);
552 r.events_error().write_value(0);
553 self.clear_errorsrc();
554
555 if inten {
556 r.intenset().write(|w| {
557 w.set_stopped(true);
558 w.set_error(true);
559 });
560 } else {
561 r.intenclr().write(|w| {
562 w.set_stopped(true);
563 w.set_error(true);
564 });
565 }
566
567 r.tasks_preparetx().write_value(1);
569 r.tasks_resume().write_value(1);
570 Ok(())
571 }
572
573 fn setup_respond(&mut self, wr_buffer: &[u8], inten: bool) -> Result<(), Error> {
574 match self.setup_respond_from_ram(wr_buffer, inten) {
575 Ok(_) => Ok(()),
576 Err(Error::BufferNotInRAM) => {
577 trace!("Copying TWIS tx buffer into RAM for DMA");
578 let tx_ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..wr_buffer.len()];
579 tx_ram_buf.copy_from_slice(wr_buffer);
580 self.setup_respond_from_ram(tx_ram_buf, inten)
581 }
582 Err(error) => Err(error),
583 }
584 }
585
586 fn setup_listen(&mut self, buffer: &mut [u8], inten: bool) -> Result<(), Error> {
587 let r = T::regs();
588 compiler_fence(SeqCst);
589
590 unsafe { self.set_rx_buffer(buffer)? };
592
593 r.events_read().write_value(0);
595 r.events_write().write_value(0);
596 r.events_stopped().write_value(0);
597 r.events_error().write_value(0);
598 self.clear_errorsrc();
599
600 if inten {
601 r.intenset().write(|w| {
602 w.set_stopped(true);
603 w.set_error(true);
604 w.set_read(true);
605 w.set_write(true);
606 });
607 } else {
608 r.intenclr().write(|w| {
609 w.set_stopped(true);
610 w.set_error(true);
611 w.set_read(true);
612 w.set_write(true);
613 });
614 }
615
616 r.tasks_preparerx().write_value(1);
618
619 Ok(())
620 }
621
622 fn setup_listen_end(&mut self, inten: bool) -> Result<(), Error> {
623 let r = T::regs();
624 compiler_fence(SeqCst);
625
626 r.events_read().write_value(0);
628 r.events_write().write_value(0);
629 r.events_stopped().write_value(0);
630 r.events_error().write_value(0);
631 self.clear_errorsrc();
632
633 if inten {
634 r.intenset().write(|w| {
635 w.set_stopped(true);
636 w.set_error(true);
637 w.set_read(true);
638 });
639 } else {
640 r.intenclr().write(|w| {
641 w.set_stopped(true);
642 w.set_error(true);
643 w.set_read(true);
644 });
645 }
646
647 Ok(())
648 }
649
650 pub fn blocking_listen(&mut self, buffer: &mut [u8]) -> Result<Command, Error> {
656 self.setup_listen(buffer, false)?;
657 let status = self.blocking_listen_wait()?;
658 if status == Status::Write {
659 self.setup_listen_end(false)?;
660 let command = self.blocking_listen_wait_end(status)?;
661 return Ok(command);
662 }
663 Ok(Command::Read)
664 }
665
666 pub fn blocking_respond_to_read(&mut self, buffer: &[u8]) -> Result<usize, Error> {
671 self.setup_respond(buffer, false)?;
672 self.blocking_wait()
673 }
674
675 pub fn blocking_respond_to_read_from_ram(&mut self, buffer: &[u8]) -> Result<usize, Error> {
678 self.setup_respond_from_ram(buffer, false)?;
679 self.blocking_wait()
680 }
681
682 #[cfg(feature = "time")]
690 pub fn blocking_listen_timeout(&mut self, buffer: &mut [u8], timeout: Duration) -> Result<Command, Error> {
691 self.setup_listen(buffer, false)?;
692 let status = self.blocking_listen_wait_timeout(timeout)?;
693 if status == Status::Write {
694 self.setup_listen_end(false)?;
695 let command = self.blocking_listen_wait_end_timeout(status, timeout)?;
696 return Ok(command);
697 }
698 Ok(Command::Read)
699 }
700
701 #[cfg(feature = "time")]
705 pub fn blocking_respond_to_read_timeout(&mut self, buffer: &[u8], timeout: Duration) -> Result<usize, Error> {
706 self.setup_respond(buffer, false)?;
707 self.blocking_wait_timeout(timeout)
708 }
709
710 #[cfg(feature = "time")]
713 pub fn blocking_respond_to_read_from_ram_timeout(
714 &mut self,
715 buffer: &[u8],
716 timeout: Duration,
717 ) -> Result<usize, Error> {
718 self.setup_respond_from_ram(buffer, false)?;
719 self.blocking_wait_timeout(timeout)
720 }
721
722 pub async fn listen(&mut self, buffer: &mut [u8]) -> Result<Command, Error> {
730 self.setup_listen(buffer, true)?;
731 let status = self.async_listen_wait().await?;
732 if status == Status::Write {
733 self.setup_listen_end(true)?;
734 let command = self.async_listen_wait_end(status).await?;
735 return Ok(command);
736 }
737 Ok(Command::Read)
738 }
739
740 pub async fn respond_to_read(&mut self, buffer: &[u8]) -> Result<usize, Error> {
745 self.setup_respond(buffer, true)?;
746 self.async_wait().await
747 }
748
749 pub async fn respond_to_read_from_ram(&mut self, buffer: &[u8]) -> Result<usize, Error> {
751 self.setup_respond_from_ram(buffer, true)?;
752 self.async_wait().await
753 }
754}
755
756impl<'a, T: Instance> Drop for Twis<'a, T> {
757 fn drop(&mut self) {
758 trace!("twis drop");
759
760 let r = T::regs();
764 r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
765
766 gpio::deconfigure_pin(r.psel().sda().read());
767 gpio::deconfigure_pin(r.psel().scl().read());
768
769 trace!("twis drop: done");
770 }
771}
772
773pub(crate) struct State {
774 waker: AtomicWaker,
775}
776
777impl State {
778 pub(crate) const fn new() -> Self {
779 Self {
780 waker: AtomicWaker::new(),
781 }
782 }
783}
784
785pub(crate) trait SealedInstance {
786 fn regs() -> pac::twis::Twis;
787 fn state() -> &'static State;
788}
789
790#[allow(private_bounds)]
792pub trait Instance: SealedInstance + PeripheralType + 'static {
793 type Interrupt: interrupt::typelevel::Interrupt;
795}
796
797macro_rules! impl_twis {
798 ($type:ident, $pac_type:ident, $irq:ident) => {
799 impl crate::twis::SealedInstance for peripherals::$type {
800 fn regs() -> pac::twis::Twis {
801 pac::$pac_type
802 }
803 fn state() -> &'static crate::twis::State {
804 static STATE: crate::twis::State = crate::twis::State::new();
805 &STATE
806 }
807 }
808 impl crate::twis::Instance for peripherals::$type {
809 type Interrupt = crate::interrupt::typelevel::$irq;
810 }
811 };
812}