use core::cmp::min;
use core::ptr::{null_mut, null};
use core::mem::size_of;
use core::slice::from_raw_parts_mut;
use alloc::boxed::Box;
use alloc::vec::Vec;
use crate::fxmac_const::*;
use crate::fxmac_phy::*;
use crate::fxmac::*;
use crate::utils::*;
pub const FXMAX_RX_BDSPACE_LENGTH: usize = FXMAX_RX_PBUFS_LENGTH * 64; pub const FXMAX_TX_BDSPACE_LENGTH: usize = FXMAX_RX_PBUFS_LENGTH * 64;
pub const FXMAX_RX_PBUFS_LENGTH: usize = 128; pub const FXMAX_TX_PBUFS_LENGTH: usize = 128;
pub const FXMAX_MAX_HARDWARE_ADDRESS_LENGTH: usize =6;
pub const FXMAC_LWIP_PORT_CONFIG_JUMBO: u32 = BIT(0);
pub const FXMAC_LWIP_PORT_CONFIG_MULTICAST_ADDRESS_FILITER: u32 = BIT(1);
pub const FXMAC_LWIP_PORT_CONFIG_COPY_ALL_FRAMES: u32 =BIT(2);
pub const FXMAC_LWIP_PORT_CONFIG_CLOSE_FCS_CHECK: u32 =BIT(3);
pub const FXMAC_LWIP_PORT_CONFIG_UNICAST_ADDRESS_FILITER: u32 =BIT(5);
pub const FXMAC_PHY_SPEED_10M: u32 = 10;
pub const FXMAC_PHY_SPEED_100M: u32 = 100;
pub const FXMAC_PHY_SPEED_1000M: u32 = 1000;
pub const FXMAC_PHY_SPEED_10G: u32 = 10000;
pub const FXMAC_PHY_HALF_DUPLEX: u32 = 0;
pub const FXMAC_PHY_FULL_DUPLEX: u32 = 1;
pub const FXMAC_RECV_MAX_COUNT: u32 =10;
pub const PQ_QUEUE_SIZE: u32 =4096;
pub const FXMAC_BD_ADDR_OFFSET: u32 = 0;
pub const FXMAC_BD_STAT_OFFSET: u32 = 4;
pub const FXMAC_BD_ADDR_HI_OFFSET: u32 = 1 << 3;
pub const FXMAC_RXBUF_NEW_MASK: u32 = 1<<0;
pub const FXMAC_RXBUF_WRAP_MASK: u32 = 1<<1;
pub const FXMAC_RXBUF_ADD_MASK: u32 = GENMASK(31, 2);
pub const FXMAC_TXBUF_USED_MASK: u32 = 1<<31;
pub const FXMAC_TXBUF_WRAP_MASK: u32 = 1<<30;
pub const BD_ALIGNMENT: u64 = FXMAC_DMABD_MINIMUM_ALIGNMENT*2; pub const FXMAC_DMABD_MINIMUM_ALIGNMENT: u64 = 64;
pub const FXMAC_BD_NUM_WORDS: usize = 4;
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct macb_dma_desc {
pub addr: u32,
pub ctrl: u32,
pub addrh: u32,
pub resvd: u32,
}
pub type FXmacBd = [u32; FXMAC_BD_NUM_WORDS];
pub type uintptr = u64;
pub const FXMAC_SEND: u32 = 1;
pub const FXMAC_RECV: u32 = 2;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct FXmacBdRing {
pub phys_base_addr: uintptr,
pub base_bd_addr: uintptr,
pub high_bd_addr: uintptr,
pub length: u32,
pub run_state: u32,
pub separation: u32,
pub free_head: *mut FXmacBd,
pub pre_head: *mut FXmacBd,
pub hw_head: *mut FXmacBd,
pub hw_tail: *mut FXmacBd,
pub post_head: *mut FXmacBd,
pub bda_restart: *mut FXmacBd,
pub hw_cnt: u32,
pub pre_cnt: u32,
pub free_cnt: u32,
pub post_cnt: u32,
pub all_cnt: u32,
}
impl Default for FXmacBdRing {
fn default() -> Self {
Self {
phys_base_addr: 0,
base_bd_addr: 0,
high_bd_addr: 0,
length: 0,
run_state: 0,
separation: 0,
free_head: null_mut(),
pre_head: null_mut(),
hw_head: null_mut(),
hw_tail: null_mut(),
post_head: null_mut(),
bda_restart: null_mut(),
hw_cnt: 0,
pre_cnt: 0,
free_cnt: 0,
post_cnt: 0,
all_cnt: 0,
}
}
}
pub struct FXmacNetifBuffer
{
pub rx_bdspace: usize, pub tx_bdspace: usize,
pub rx_pbufs_storage: [uintptr; FXMAX_RX_PBUFS_LENGTH],
pub tx_pbufs_storage: [uintptr; FXMAX_TX_PBUFS_LENGTH],
}
impl Default for FXmacNetifBuffer {
fn default() -> Self {
let alloc_pages = (FXMAX_RX_BDSPACE_LENGTH + (PAGE_SIZE - 1)) / PAGE_SIZE;
let (mut rx_vaddr, mut rx_dma) = crate_interface::call_interface!(crate::KernelFunc::dma_alloc_coherent(alloc_pages));
let alloc_pages = (FXMAX_TX_BDSPACE_LENGTH + (PAGE_SIZE - 1)) / PAGE_SIZE;
let (mut tx_vaddr, mut tx_dma) = crate_interface::call_interface!(crate::KernelFunc::dma_alloc_coherent(alloc_pages));
Self {
rx_bdspace: rx_vaddr,
tx_bdspace: tx_vaddr,
rx_pbufs_storage: [0; FXMAX_RX_PBUFS_LENGTH],
tx_pbufs_storage: [0; FXMAX_TX_PBUFS_LENGTH],
}
}
}
pub struct FXmacLwipPort
{
pub buffer: FXmacNetifBuffer,
pub feature: u32,
pub hwaddr: [u8; FXMAX_MAX_HARDWARE_ADDRESS_LENGTH],
pub recv_flg: u32,
}
pub fn fxmac_bd_read(bd_ptr: u64, offset: u32) -> u32 {
trace!("fxmac_bd_read at {:#x}", bd_ptr + offset as u64);
read_reg((crate_interface::call_interface!(crate::KernelFunc::virt_to_phys(bd_ptr as usize)) + offset as usize) as *const u32)
}
pub fn fxmac_bd_write(bd_ptr: u64, offset: u32, data: u32)
{
debug!("fxmac_bd_write {:#x} to {:#x}", data, bd_ptr + offset as u64);
write_reg((crate_interface::call_interface!(crate::KernelFunc::virt_to_phys(bd_ptr as usize)) + offset as usize) as *mut u32, data);
}
fn FXmacBdSetRxWrap(mut bdptr: u64) {
bdptr += FXMAC_BD_ADDR_OFFSET as u64;
let temp_ptr = bdptr as *mut u32;
if !temp_ptr.is_null() {
let mut data_value_rx: u32 = unsafe{*temp_ptr};
info!("RX WRAP of BD @ {:#x} set {:#x} | FXMAC_RXBUF_WRAP_MASK", bdptr, data_value_rx);
data_value_rx |= FXMAC_RXBUF_WRAP_MASK;
unsafe {
temp_ptr.write_volatile(data_value_rx);
}
}
}
fn FXmacBdSetTxWrap(mut bdptr: u64) {
bdptr += FXMAC_BD_STAT_OFFSET as u64;
let temp_ptr = bdptr as *mut u32;
if !temp_ptr.is_null() {
let mut data_value_tx: u32 = unsafe{*temp_ptr};
info!("TX WRAP of BD @ {:#x} set {:#x} | TXBUF_WRAP", bdptr, data_value_tx);
data_value_tx |= FXMAC_TXBUF_WRAP_MASK;
unsafe {
temp_ptr.write_volatile(data_value_tx);
}
}
}
fn FXmacBdringPtrReset(ring_ptr: &mut FXmacBdRing, virtaddrloc: *mut FXmacBd)
{
ring_ptr.free_head = virtaddrloc;
ring_ptr.pre_head = virtaddrloc;
ring_ptr.hw_head = virtaddrloc;
ring_ptr.hw_tail = virtaddrloc;
ring_ptr.post_head = virtaddrloc;
}
fn fxmac_bd_set_address_rx(bd_ptr: u64, addr: u64) {
fxmac_bd_write((bd_ptr), FXMAC_BD_ADDR_OFFSET,
((fxmac_bd_read(bd_ptr, FXMAC_BD_ADDR_OFFSET) & !FXMAC_RXBUF_ADD_MASK) |
(addr & ULONG64_LO_MASK) as u32));
fxmac_bd_write(bd_ptr, FXMAC_BD_ADDR_HI_OFFSET, ((addr & ULONG64_HI_MASK) >> 32) as u32);
}
fn fxmac_bd_set_address_tx(bd_ptr: u64, addr: u64) {
fxmac_bd_write(bd_ptr, FXMAC_BD_ADDR_OFFSET, (addr & ULONG64_LO_MASK) as u32);
fxmac_bd_write(bd_ptr, FXMAC_BD_ADDR_HI_OFFSET, ((addr & ULONG64_HI_MASK) >> 32) as u32);
}
fn FXMAC_RING_SEEKAHEAD(ring_ptr: &mut FXmacBdRing, bdptr: &mut (*mut FXmacBd), num_bd: u32)
{
trace!("FXMAC_RING_SEEKAHEAD, bdptr={:#x}", *bdptr as u64);
let mut addr: u64 = *bdptr as u64;
addr += (ring_ptr.separation * num_bd) as u64;
if (addr > ring_ptr.high_bd_addr) || (*bdptr as u64 > addr)
{
addr -= ring_ptr.length as u64;
}
*bdptr = addr as *mut FXmacBd;
trace!("FXMAC_RING_SEEKAHEAD, bdptr: {:#x}, addr: {:#x}", *bdptr as u64, addr);
}
pub fn FXmacAllocDmaPbufs(instance_p: &mut FXmac) -> u32 {
let mut status: u32 = 0;
let rxringptr: &mut FXmacBdRing = &mut instance_p.rx_bd_queue.bdring;
let txringptr: &mut FXmacBdRing = &mut instance_p.tx_bd_queue.bdring;
info!("Allocate RX descriptors, 1 RxBD at a time.");
for i in 0..FXMAX_RX_PBUFS_LENGTH
{
let max_frame_size = if (instance_p.lwipport.feature & FXMAC_LWIP_PORT_CONFIG_JUMBO) != 0
{ info!("FXMAC_LWIP_PORT_CONFIG_JUMBO"); FXMAC_MAX_FRAME_SIZE_JUMBO } else { info!("NO CONFIG_JUMBO"); FXMAC_MAX_FRAME_SIZE };
let alloc_rx_buffer_pages = (max_frame_size as usize + (PAGE_SIZE - 1)) / PAGE_SIZE;
let (mut rx_mbufs_vaddr, mut rx_mbufs_dma) =
crate_interface::call_interface!(crate::KernelFunc::dma_alloc_coherent(alloc_rx_buffer_pages));
let rxringptr: &mut FXmacBdRing = &mut instance_p.rx_bd_queue.bdring;
let mut rxbd: *mut FXmacBd = null_mut();
status = FXmacBdRingAlloc(rxringptr, 1, &mut rxbd);
assert!(!rxbd.is_null());
if (status != 0)
{
error!("FXmacInitDma: Error allocating RxBD");
return status;
}
status = FXmacBdRingToHw(rxringptr, 1, rxbd);
let bdindex = FXMAC_BD_TO_INDEX(rxringptr, rxbd as u64);
let mut temp = rxbd as *mut u32;
let mut v = 0;
if bdindex == (FXMAX_RX_PBUFS_LENGTH - 1) as u32 {
v |= FXMAC_RXBUF_WRAP_MASK;
}
unsafe{
temp.write_volatile(v);
temp.add(1).write_volatile(0);
}
crate::utils::DSB();
crate::utils::FCacheDCacheInvalidateRange(rx_mbufs_vaddr as u64, max_frame_size as u64);
fxmac_bd_set_address_rx(rxbd as u64, rx_mbufs_dma as u64);
instance_p.lwipport.buffer.rx_pbufs_storage[bdindex as usize] = rx_mbufs_vaddr as u64;
}
for index in 0..FXMAX_TX_PBUFS_LENGTH {
let max_fr_size = if (instance_p.lwipport.feature & FXMAC_LWIP_PORT_CONFIG_JUMBO) != 0
{
FXMAC_MAX_FRAME_SIZE_JUMBO
} else {
FXMAC_MAX_FRAME_SIZE
};
let alloc_pages = (max_fr_size as usize + (PAGE_SIZE - 1)) / PAGE_SIZE;
let (mut tx_mbufs_vaddr, mut tx_mbufs_dma) =
crate_interface::call_interface!(crate::KernelFunc::dma_alloc_coherent(alloc_pages));
instance_p.lwipport.buffer.tx_pbufs_storage[index as usize] = tx_mbufs_vaddr as u64;
let txbd = (txringptr.base_bd_addr + (index as u64 * txringptr.separation as u64)) as *mut FXmacBd;
fxmac_bd_set_address_tx(txbd as u64, tx_mbufs_dma as u64);
crate::utils::DSB();
}
0
}
pub fn FXmacInitDma(instance_p: &mut FXmac) -> u32
{
let rxringptr: &mut FXmacBdRing = &mut instance_p.rx_bd_queue.bdring;
let txringptr: &mut FXmacBdRing = &mut instance_p.tx_bd_queue.bdring;
info!("FXmacInitDma, rxringptr: {:p}", rxringptr);
info!("FXmacInitDma, txringptr: {:p}", txringptr);
info!("FXmacInitDma, rx_bdspace: {:#x}", &instance_p.lwipport.buffer.rx_bdspace);
info!("FXmacInitDma, tx_bdspace: {:#x}", &instance_p.lwipport.buffer.tx_bdspace);
let mut bdtemplate: FXmacBd = [0; FXMAC_BD_NUM_WORDS];
let mut status: u32 = FXmacBdRingCreate(rxringptr, instance_p.lwipport.buffer.rx_bdspace as u64, instance_p.lwipport.buffer.rx_bdspace as u64, BD_ALIGNMENT, FXMAX_RX_PBUFS_LENGTH as u32);
status = FXmacBdRingClone(rxringptr, &bdtemplate, FXMAC_RECV);
bdtemplate.fill(0);
fxmac_bd_write((&bdtemplate as *const _ as u64), FXMAC_BD_STAT_OFFSET,
fxmac_bd_read((&mut bdtemplate as *mut _ as u64), FXMAC_BD_STAT_OFFSET) | (FXMAC_TXBUF_USED_MASK));
status = FXmacBdRingCreate(txringptr, instance_p.lwipport.buffer.tx_bdspace as u64, instance_p.lwipport.buffer.tx_bdspace as u64, BD_ALIGNMENT, FXMAX_TX_PBUFS_LENGTH as u32);
status = FXmacBdRingClone(txringptr, &bdtemplate, FXMAC_SEND);
FXmacAllocDmaPbufs(instance_p);
FXmacSetQueuePtr(instance_p.rx_bd_queue.bdring.phys_base_addr, 0, FXMAC_RECV);
FXmacSetQueuePtr(instance_p.tx_bd_queue.bdring.phys_base_addr, 0, FXMAC_SEND);
let FXMAC_TAIL_QUEUE = |queue: u64| 0x0e80 + (queue << 2);
if (instance_p.config.caps & FXMAC_CAPS_TAILPTR) != 0
{
write_reg((instance_p.config.base_address + FXMAC_TAIL_QUEUE(0)) as *mut u32, (1<<31) | 0);
}
0
}
fn FXMAC_BD_TO_INDEX (ringptr: &mut FXmacBdRing, bdptr: u64) -> u32 {
( (bdptr - ringptr.base_bd_addr as u64) / ringptr.separation as u64 ) as u32
}
fn FXMAC_BD_RING_NEXT(ring_ptr: &mut FXmacBdRing, bd_ptr: *mut FXmacBd) -> *mut FXmacBd {
if bd_ptr as u64 >= ring_ptr.high_bd_addr {
ring_ptr.base_bd_addr as *mut FXmacBd
}else{
(bd_ptr as u64 + ring_ptr.separation as u64) as *mut FXmacBd
}
}
pub fn FXmacBdRingCreate(ring_ptr: &mut FXmacBdRing, phys_addr: u64, virt_addr: u64, alignment: u64, bd_count: u32) -> u32
{
let virt_addr_loc: u64 = virt_addr;
ring_ptr.all_cnt = 0;
ring_ptr.free_cnt = 0;
ring_ptr.hw_cnt = 0;
ring_ptr.pre_cnt = 0;
ring_ptr.post_cnt = 0;
assert!((virt_addr_loc % alignment) == 0);
assert!(bd_count > 0);
ring_ptr.separation = size_of::<FXmacBd>() as u32;
let rxringptr = unsafe { from_raw_parts_mut(virt_addr_loc as *mut FXmacBd, bd_count as usize) };
rxringptr.fill([0; FXMAC_BD_NUM_WORDS]);
let mut bd_virt_addr = virt_addr_loc;
for i in 1..bd_count {
bd_virt_addr += ring_ptr.separation as u64;
}
info!("FXmacBdRingCreate BDs count={}, separation={}, {:#x}~{:#x}", bd_count, ring_ptr.separation, virt_addr, bd_virt_addr);
ring_ptr.base_bd_addr = virt_addr_loc;
ring_ptr.high_bd_addr = bd_virt_addr;
ring_ptr.length = (ring_ptr.high_bd_addr - ring_ptr.base_bd_addr) as u32 + ring_ptr.separation;
ring_ptr.free_head = virt_addr_loc as *mut FXmacBd;
ring_ptr.pre_head = virt_addr_loc as *mut FXmacBd;
ring_ptr.free_cnt = bd_count;
ring_ptr.all_cnt = bd_count;
ring_ptr.run_state = FXMAC_DMA_SG_IS_STOPED as u32;
ring_ptr.phys_base_addr = phys_addr;
ring_ptr.hw_head = virt_addr_loc as *mut FXmacBd;
ring_ptr.hw_tail = virt_addr_loc as *mut FXmacBd;
ring_ptr.post_head = virt_addr_loc as *mut FXmacBd;
ring_ptr.bda_restart = phys_addr as *mut FXmacBd;
0
}
pub fn FXmacBdRingClone(ring_ptr: &mut FXmacBdRing, src_bd_ptr: & FXmacBd, direction: u32) -> u32
{
assert!(ring_ptr.free_cnt == ring_ptr.all_cnt);
let mut cur_bd = ring_ptr.base_bd_addr;
for i in 0..ring_ptr.all_cnt {
trace!("FXmacBdRingClone, copy current bd @ {:#x}", cur_bd);
let cur_bd_slice = unsafe { from_raw_parts_mut(cur_bd as *mut FXmacBd, 1) };
cur_bd_slice[0].copy_from_slice(src_bd_ptr);
crate::utils::DSB();
cur_bd += ring_ptr.separation as u64;
}
cur_bd -= ring_ptr.separation as u64;
if direction == FXMAC_RECV {
FXmacBdSetRxWrap(cur_bd);
} else {
FXmacBdSetTxWrap(cur_bd);
}
0
}
pub fn FXmacBdRingAlloc(ring_ptr: &mut FXmacBdRing, num_bd: u32, bd_set_ptr: &mut(*mut FXmacBd)) -> u32 {
if ring_ptr.free_cnt < num_bd {
error!("No Enough free BDs available for the request: {}", num_bd);
4
} else {
*bd_set_ptr = ring_ptr.free_head as *mut FXmacBd;
let b = ring_ptr.free_head;
let mut free_head_t = ring_ptr.free_head;
FXMAC_RING_SEEKAHEAD(ring_ptr, &mut free_head_t, num_bd);
ring_ptr.free_head = free_head_t;
debug!("free_head {:#x} seekahead to {:#x}", b as usize, ring_ptr.free_head as usize);
assert!(b as usize != ring_ptr.free_head as usize);
ring_ptr.free_cnt -= num_bd;
ring_ptr.pre_cnt += num_bd;
0
}
}
pub fn FXmacBdRingToHw(ring_ptr: &mut FXmacBdRing, num_bd: u32, bd_set_ptr: *mut FXmacBd) -> u32 {
let mut cur_bd_ptr: *mut FXmacBd = bd_set_ptr;
for i in 0..num_bd {
cur_bd_ptr = FXMAC_BD_RING_NEXT(ring_ptr, cur_bd_ptr);
}
let mut pre_head_t = ring_ptr.pre_head;
FXMAC_RING_SEEKAHEAD(ring_ptr, &mut pre_head_t, num_bd);
ring_ptr.pre_head = pre_head_t;
ring_ptr.pre_cnt -= num_bd;
ring_ptr.hw_tail = cur_bd_ptr;
ring_ptr.hw_cnt += num_bd;
0
}
pub fn FXmacBdRingFromHwRx(ring_ptr: &mut FXmacBdRing, bd_limit: usize, bd_set_ptr: &mut (*mut FXmacBd)) -> u32 {
let mut cur_bd_ptr: *mut FXmacBd = ring_ptr.hw_head;
let mut status: u32 = 0;
let mut bd_str: u32 = 0;
let mut bd_count: u32 = 0;
let mut bd_partial_count: u32 = 0;
if ring_ptr.hw_cnt == 0 {
warn!("No BDs in RX work group, there's nothing to search");
*bd_set_ptr = null_mut();
status = 0;
}else{
while (bd_count as usize) < bd_limit {
bd_str = fxmac_bd_read(cur_bd_ptr as u64, FXMAC_BD_STAT_OFFSET);
let bd_rx_new = fxmac_bd_read(cur_bd_ptr as u64, FXMAC_BD_ADDR_OFFSET) & FXMAC_RXBUF_NEW_MASK;
if bd_rx_new == 0 {
break;
}
bd_count += 1;
if (bd_str & FXMAC_RXBUF_EOF_MASK) != 0 {
bd_partial_count = 0;
}
else
{
bd_partial_count+=1;
}
cur_bd_ptr = FXMAC_BD_RING_NEXT(ring_ptr, cur_bd_ptr);
}
bd_count -= bd_partial_count;
if bd_count > 0 {
*bd_set_ptr = ring_ptr.hw_head;
ring_ptr.hw_cnt -= bd_count;
ring_ptr.post_cnt += bd_count;
let mut hw_head_t = ring_ptr.hw_head;
FXMAC_RING_SEEKAHEAD(ring_ptr, &mut hw_head_t, bd_count);
ring_ptr.hw_head = hw_head_t;
info!("FXmacBdRingFromHwRx, Found BD={}", bd_count);
status = bd_count;
} else {
*bd_set_ptr = null_mut();
status = 0;
}
}
status
}
pub fn FXmacBdRingFromHwTx(ring_ptr: &mut FXmacBdRing, bd_limit: usize, bd_set_ptr: &mut(*mut FXmacBd)) -> u32 {
let mut bd_str: u32 = 0;
let mut bd_count: u32 = 0;
let mut bd_partial_count: u32 = 0;
let mut status: u32 = 0;
let mut bd_limitLoc: u32 = bd_limit as u32;
let mut cur_bd_ptr: *mut FXmacBd = ring_ptr.hw_head;
if ring_ptr.hw_cnt == 0
{
debug!("No BDs in TX work group, then there's nothing to search");
*bd_set_ptr = null_mut();
status = 0;
} else {
if bd_limitLoc > ring_ptr.hw_cnt
{
bd_limitLoc = ring_ptr.hw_cnt;
}
while bd_count < bd_limitLoc {
bd_str = fxmac_bd_read(cur_bd_ptr as u64, FXMAC_BD_STAT_OFFSET);
if (bd_str & FXMAC_TXBUF_USED_MASK) != 0
{
info!("FXmacBdRingFromHwTx, found a hardware USED TXBUF");
bd_count += 1;
bd_partial_count += 1;
}
if (bd_str & FXMAC_TXBUF_LAST_MASK) != 0
{
bd_partial_count = 0;
}
cur_bd_ptr = FXMAC_BD_RING_NEXT(ring_ptr, cur_bd_ptr);
}
info!("FXmacBdRingFromHwTx, Subtract off any partial packet BDs found");
bd_count -= bd_partial_count;
if bd_count > 0 {
*bd_set_ptr = ring_ptr.hw_head;
ring_ptr.hw_cnt -= bd_count;
ring_ptr.post_cnt += bd_count;
let mut hw_head_t = ring_ptr.hw_head;
FXMAC_RING_SEEKAHEAD(ring_ptr, &mut hw_head_t, bd_count);
ring_ptr.hw_head = hw_head_t;
info!("FXmacBdRingFromHwTx, Found BD={}", bd_count);
status = bd_count;
} else {
*bd_set_ptr = null_mut();
status = 0;
}
}
status
}
pub fn FXmacSgsend(instance_p: &mut FXmac, p: Vec<Vec<u8>>) -> u32 {
let mut status: u32 = 0;
let mut bdindex: u32 = 0;
let mut max_fr_size: u32 = 0;
let mut send_len: u32 = 0;
let mut last_txbd: *mut FXmacBd = null_mut();
let mut txbdset: *mut FXmacBd = null_mut();
let txring: &mut FXmacBdRing = &mut instance_p.tx_bd_queue.bdring;
let n_pbufs: u32 = p.len() as u32;
debug!("Sending packets: {}", n_pbufs);
status = FXmacBdRingAlloc(txring, n_pbufs, &mut txbdset);
assert!(!txbdset.is_null());
let mut txbd: *mut FXmacBd = txbdset;
for q in &p {
bdindex = FXMAC_BD_TO_INDEX(txring, txbd as u64);
if (instance_p.lwipport.feature & FXMAC_LWIP_PORT_CONFIG_JUMBO) != 0
{
max_fr_size = FXMAC_MAX_FRAME_SIZE_JUMBO;
}
else
{
max_fr_size = FXMAC_MAX_FRAME_SIZE;
}
let pbufs_len = min(q.len(), max_fr_size as usize);
let pbufs_virt = instance_p.lwipport.buffer.tx_pbufs_storage[bdindex as usize];
let pbuf = unsafe { from_raw_parts_mut(pbufs_virt as *mut u8, pbufs_len) };
pbuf.copy_from_slice(q);
crate::utils::FCacheDCacheFlushRange(pbufs_virt, pbufs_len as u64);
warn!(">>>>>>>>> TX PKT {} @{:#x} - {}", pbufs_len, pbufs_virt, bdindex);
debug!(">>>>>>>>> {:x?}", pbuf);
send_len += pbufs_len as u32;
if q.len() > max_fr_size as usize
{
warn!("The packet: {} to be send is TOO LARGE", q.len());
fxmac_bd_write(txbd as u64, FXMAC_BD_STAT_OFFSET,
((fxmac_bd_read(txbd as u64, FXMAC_BD_STAT_OFFSET) & !FXMAC_TXBUF_LEN_MASK) | (max_fr_size & 0x3FFF)));
} else {
fxmac_bd_write(txbd as u64, FXMAC_BD_STAT_OFFSET,
((fxmac_bd_read(txbd as u64, FXMAC_BD_STAT_OFFSET) & !FXMAC_TXBUF_LEN_MASK) | (q.len() as u32 & 0x3FFF)));
}
last_txbd = txbd;
let t_txbd = txbd as u64;
fxmac_bd_write(t_txbd, FXMAC_BD_STAT_OFFSET,
fxmac_bd_read(t_txbd, FXMAC_BD_STAT_OFFSET) & !FXMAC_TXBUF_LAST_MASK );
txbd = FXMAC_BD_RING_NEXT(txring, txbd);
}
let t_txbd = last_txbd as u64;
fxmac_bd_write(t_txbd, FXMAC_BD_STAT_OFFSET,
fxmac_bd_read(t_txbd, FXMAC_BD_STAT_OFFSET) | FXMAC_TXBUF_LAST_MASK );
if (instance_p.config.caps & FXMAC_CAPS_TAILPTR) != 0
{
bdindex = FXMAC_BD_TO_INDEX(txring, txbd as u64);
}
let mut txbd = txbdset;
let FXMAC_BD_CLEAR_TX_USED = |bd_ptr: u64|
fxmac_bd_write(bd_ptr, FXMAC_BD_STAT_OFFSET, fxmac_bd_read(bd_ptr, FXMAC_BD_STAT_OFFSET) & (!FXMAC_TXBUF_USED_MASK));
for q in 1..p.len() {
txbd = FXMAC_BD_RING_NEXT(txring, txbd);
FXMAC_BD_CLEAR_TX_USED(txbd as u64);
crate::utils::DSB();
}
FXMAC_BD_CLEAR_TX_USED(txbdset as u64); crate::utils::DSB();
status = FXmacBdRingToHw(txring, n_pbufs, txbdset);
let FXMAC_TAIL_QUEUE = |queue: u64| 0x0e80 + (queue << 2);
if (instance_p.config.caps & FXMAC_CAPS_TAILPTR) != 0
{
write_reg((instance_p.config.base_address + FXMAC_TAIL_QUEUE(0)) as *mut u32, (1 << 31) | bdindex);
}
debug!("TX DMA DESC: {:#010x?}", unsafe{*(txbdset as *const macb_dma_desc)});
let value = read_reg((instance_p.config.base_address + FXMAC_NWCTRL_OFFSET) as *const u32) | FXMAC_NWCTRL_STARTTX_MASK;
write_reg((instance_p.config.base_address + FXMAC_NWCTRL_OFFSET) as *mut u32, value);
send_len
}
pub fn FXmacRecvHandler(instance_p: &mut FXmac) -> Option<Vec<Vec<u8>>> {
trace!("RX receive packets");
let mut recv_packets = Vec::new();
let mut rxbdset: *mut FXmacBd = null_mut();
let regval: u32 = read_reg((instance_p.config.base_address + FXMAC_RXSR_OFFSET) as *const u32);
write_reg((instance_p.config.base_address + FXMAC_RXSR_OFFSET) as *mut u32, regval);
loop {
let bd_processed: u32 = FXmacBdRingFromHwRx(&mut instance_p.rx_bd_queue.bdring, FXMAX_RX_PBUFS_LENGTH, &mut rxbdset);
if bd_processed == 0
{
break;
}
assert!(!rxbdset.is_null());
let mut curbdptr: *mut FXmacBd = rxbdset;
for k in 0..bd_processed {
let rxring: &mut FXmacBdRing = &mut instance_p.rx_bd_queue.bdring;
let rx_bytes: u32 =
if (instance_p.lwipport.feature & FXMAC_LWIP_PORT_CONFIG_JUMBO) != 0
{
fxmac_bd_read(curbdptr as u64, FXMAC_BD_STAT_OFFSET) & 0x00003FFF
} else {
debug!("FXMAC_RXBUF_LEN_MASK={:#x}", FXMAC_RXBUF_LEN_MASK);
fxmac_bd_read(curbdptr as u64, FXMAC_BD_STAT_OFFSET) & FXMAC_RXBUF_LEN_MASK
};
let bdindex: u32 = FXMAC_BD_TO_INDEX(rxring, curbdptr as u64);
let pbufs_virt = instance_p.lwipport.buffer.rx_pbufs_storage[bdindex as usize];
debug!("RX PKT {} @{:#x} <<<<<<<<< - {}", rx_bytes, pbufs_virt, bdindex);
let mbuf = unsafe { from_raw_parts_mut(pbufs_virt as *mut u8, rx_bytes as usize) };
debug!("pbuf: {:x?}", mbuf);
recv_packets.push(mbuf.to_vec());
let mut hash_match: u32 = (fxmac_bd_read(curbdptr as u64, FXMAC_BD_STAT_OFFSET) & FXMAC_RXBUF_HASH_MASK) >> 29;
debug!("hash_match is {:#x}", hash_match);
crate::utils::FCacheDCacheInvalidateRange(instance_p.lwipport.buffer.rx_pbufs_storage[bdindex as usize], rx_bytes as u64);
mbuf[..min(64, rx_bytes as usize)].fill(0);
curbdptr = FXMAC_BD_RING_NEXT(rxring, curbdptr);
}
FXmacBdRingFree(&mut instance_p.rx_bd_queue.bdring, bd_processed);
SetupRxBds(instance_p);
}
if recv_packets.len() > 0 {
Some(recv_packets)
} else {
None
}
}
pub fn SetupRxBds(instance_p: &mut FXmac) {
let rxring: &mut FXmacBdRing = &mut instance_p.rx_bd_queue.bdring;
let mut status: u32 = 0;
let mut rxbd: *mut FXmacBd = null_mut();
let mut freebds: u32 = rxring.free_cnt;
while freebds > 0 {
freebds -= 1;
let max_frame_size = if (instance_p.lwipport.feature & FXMAC_LWIP_PORT_CONFIG_JUMBO) != 0 {
FXMAC_MAX_FRAME_SIZE_JUMBO
} else {
FXMAC_MAX_FRAME_SIZE
};
let alloc_rx_buffer_pages: usize = (max_frame_size as usize + (PAGE_SIZE - 1)) / PAGE_SIZE;
status = FXmacBdRingAlloc(rxring, 1, &mut rxbd);
assert!(!rxbd.is_null());
status = FXmacBdRingToHw(rxring, 1, rxbd);
let bdindex: u32 = FXMAC_BD_TO_INDEX(rxring, rxbd as u64);
let rx_macb_dma_desc = unsafe{(rxbd as *const macb_dma_desc).read_volatile()};
trace!("SetupRxBds - {}: {:#010x?}", bdindex, rx_macb_dma_desc);
let mut v = rx_macb_dma_desc.addr & (!0x7f); if bdindex == (FXMAX_RX_PBUFS_LENGTH - 1) as u32 {
v |= FXMAC_RXBUF_WRAP_MASK;
}
let mut temp = rxbd as *mut u32;
unsafe {
temp.add(1).write_volatile(0); temp.write_volatile(v); }
crate::utils::DSB();
assert!(instance_p.lwipport.buffer.rx_pbufs_storage[bdindex as usize] != 0);
}
}
pub fn FXmacBdRingFree(ring_ptr: &mut FXmacBdRing, num_bd: u32) -> u32 {
if 0 == num_bd {
0
} else {
ring_ptr.free_cnt += num_bd;
ring_ptr.post_cnt -= num_bd;
let mut post_head_t = ring_ptr.post_head;
FXMAC_RING_SEEKAHEAD(ring_ptr, &mut post_head_t, num_bd);
ring_ptr.post_head = post_head_t;
0
}
}
pub fn ResetDma(instance_p: &mut FXmac)
{
info!("Resetting DMA");
let txringptr: &mut FXmacBdRing = &mut instance_p.tx_bd_queue.bdring;
let rxringptr: &mut FXmacBdRing = &mut instance_p.rx_bd_queue.bdring;
FXmacBdringPtrReset(txringptr, instance_p.lwipport.buffer.tx_bdspace as *mut FXmacBd);
FXmacBdringPtrReset(rxringptr, instance_p.lwipport.buffer.rx_bdspace as *mut FXmacBd);
FXmacSetQueuePtr(instance_p.tx_bd_queue.bdring.phys_base_addr, 0, FXMAC_SEND);
FXmacSetQueuePtr(instance_p.rx_bd_queue.bdring.phys_base_addr, 0, FXMAC_RECV);
}
pub fn FXmacHandleDmaTxError(instance_p: &mut FXmac)
{
panic!("Failed to handle DMA interrupt error");
}
pub fn FXmacHandleTxErrors(instance_p: &mut FXmac)
{
let mut netctrlreg: u32 = read_reg((instance_p.config.base_address + FXMAC_NWCTRL_OFFSET) as *const u32);
netctrlreg = netctrlreg & !FXMAC_NWCTRL_TXEN_MASK;
write_reg((instance_p.config.base_address + FXMAC_NWCTRL_OFFSET) as *mut u32, netctrlreg);
FreeOnlyTxPbufs(instance_p);
CleanDmaTxdescs(instance_p);
netctrlreg = read_reg((instance_p.config.base_address + FXMAC_NWCTRL_OFFSET) as *const u32);
netctrlreg = netctrlreg | FXMAC_NWCTRL_TXEN_MASK;
write_reg((instance_p.config.base_address + FXMAC_NWCTRL_OFFSET) as *mut u32, netctrlreg);
}
fn CleanDmaTxdescs(instance_p: &mut FXmac)
{
warn!("Clean DMA TX DESCs");
let txringptr: &mut FXmacBdRing = &mut instance_p.tx_bd_queue.bdring;
let mut bdtemplate: FXmacBd = [0; FXMAC_BD_NUM_WORDS];
fxmac_bd_write((&mut bdtemplate as *mut _ as u64), FXMAC_BD_STAT_OFFSET,
fxmac_bd_read((&mut bdtemplate as *mut _ as u64), FXMAC_BD_STAT_OFFSET) | (FXMAC_TXBUF_USED_MASK));
let tx_bdspace_ptr = instance_p.lwipport.buffer.tx_bdspace as u64;
FXmacBdRingCreate(txringptr, tx_bdspace_ptr, tx_bdspace_ptr, BD_ALIGNMENT, FXMAX_TX_BDSPACE_LENGTH as u32);
FXmacBdRingClone(txringptr, &bdtemplate, FXMAC_SEND);
}
fn FreeOnlyTxPbufs(instance_p: &mut FXmac)
{
warn!("Free all TX DMA pbuf");
for index in 0..FXMAX_TX_PBUFS_LENGTH
{
if (instance_p.lwipport.buffer.tx_pbufs_storage[index] != 0)
{
let pbuf = instance_p.lwipport.buffer.tx_pbufs_storage[index];
let pages = (FXMAC_MAX_FRAME_SIZE as usize + (PAGE_SIZE - 1)) / PAGE_SIZE;
crate_interface::call_interface!(crate::KernelFunc::dma_free_coherent(pbuf as usize, pages));
instance_p.lwipport.buffer.tx_pbufs_storage[index] = 0;
}
}
}
pub fn FXmacProcessSentBds(instance_p: &mut FXmac)
{
let txring: &mut FXmacBdRing = &mut (instance_p.tx_bd_queue.bdring);
let mut txbdset: *mut FXmacBd = null_mut();
loop {
let n_bds: u32 = FXmacBdRingFromHwTx(txring, FXMAX_TX_PBUFS_LENGTH, &mut txbdset);
if n_bds == 0 {
info!("FXmacProcessSentBds have not found BD");
return;
}
let mut n_pbufs_freed: u32 = n_bds;
let mut curbdpntr: *mut FXmacBd = txbdset;
while n_pbufs_freed > 0 {
let bdindex = FXMAC_BD_TO_INDEX(txring, curbdpntr as u64) as usize;
trace!("FXmacProcessSentBds - {}: {:#010x?}", bdindex, unsafe{*(curbdpntr as *const macb_dma_desc)});
let mut v = 0;
if bdindex == (FXMAX_TX_PBUFS_LENGTH - 1) {
v = 0xC0000000;
} else {
v = 0x80000000;
}
let mut temp = curbdpntr as *mut u32;
unsafe{
temp.add(1).write_volatile(v); }
crate::utils::DSB();
let b = curbdpntr;
curbdpntr = FXMAC_BD_RING_NEXT(txring, curbdpntr);
assert!(curbdpntr as usize != b as usize);
n_pbufs_freed -= 1;
crate::utils::DSB();
}
FXmacBdRingFree(txring, n_bds);
}
}
pub fn FXmacSendHandler(instance: &mut FXmac)
{
debug!("-> FXmacSendHandler");
let regval: u32 = read_reg((instance.config.base_address + FXMAC_TXSR_OFFSET) as *const u32);
write_reg((instance.config.base_address + FXMAC_TXSR_OFFSET) as *mut u32, regval);
FXmacProcessSentBds(instance);
}
pub fn FXmacLinkChange(instance: &mut FXmac)
{
debug!("-> FXmacLinkChange");
if instance.config.interface == FXmacPhyInterface::FXMAC_PHY_INTERFACE_MODE_SGMII
{
let mut link: u32 = 0;
let mut link_status: u32 = 0;
let ctrl: u32 = read_reg((instance.config.base_address + FXMAC_PCS_AN_LP_OFFSET) as *const u32);
let link: u32 = (ctrl & FXMAC_PCS_LINK_PARTNER_NEXT_PAGE_STATUS) >> FXMAC_PCS_LINK_PARTNER_NEXT_PAGE_OFFSET;
match link {
0 => {
info!("link status is down");
link_status = FXMAC_LINKDOWN;
}
1 => {
info!("link status is up");
link_status = FXMAC_LINKUP;
}
_ => {
error!("link status is error {:#x}", link);
}
}
if link_status == FXMAC_LINKUP
{
if link_status != instance.link_status
{
instance.link_status = FXMAC_NEGOTIATING;
info!("need NEGOTIATING");
}
}
else
{
instance.link_status = FXMAC_LINKDOWN;
}
}
}
pub fn phy_link_detect(xmac_p: &mut FXmac, phy_addr: u32) -> u32
{
let mut status: u16 = 0;
let mut ret: u32 = FXmacPhyRead(xmac_p, phy_addr, PHY_STATUS_REG_OFFSET, &mut status);
if status & PHY_STAT_LINK_STATUS != 0 {
return 1;
}
0
}
pub fn phy_autoneg_status(xmac_p: &mut FXmac, phy_addr: u32) -> u32
{
let mut status: u16 = 0;
FXmacPhyRead(xmac_p, phy_addr, PHY_STATUS_REG_OFFSET, &mut status);
if status & PHY_STATUS_AUTONEGOTIATE_COMPLETE != 0 {
return 1;
}
0
}
pub fn FXmacLwipPortTx(instance: &mut FXmac, pbuf: Vec<Vec<u8>>) -> i32 {
info!("TX transmit packets");
let freecnt = (instance.tx_bd_queue.bdring).free_cnt;
if freecnt <= 4 { info!("TX freecnt={}, let's process sent BDs", freecnt);
FXmacProcessSentBds(instance);
}
if (instance.tx_bd_queue.bdring).free_cnt != 0 {
FXmacSgsend(instance, pbuf) as i32
}else{
error!(" TX packets dropped, no space");
-3 }
}
pub fn ethernetif_input_to_recv_packets(instance_p: &mut FXmac)
{
if(instance_p.lwipport.recv_flg > 0)
{
info!("ethernetif_input_to_recv_packets, fxmac_port->recv_flg={}", instance_p.lwipport.recv_flg);
instance_p.lwipport.recv_flg -= 1;
write_reg((instance_p.config.base_address + FXMAC_IER_OFFSET) as *mut u32, instance_p.mask);
}
{
}
}