/** * Poll for available rx dma descriptors in ethernet operating mode */ static void poll_rx(struct ccat_eth_priv *const priv) { static const size_t overhead = CCAT_ETH_FRAME_HEAD_LEN - 4; struct ccat_eth_dma_fifo *const fifo = &priv->rx_fifo; /* TODO omit possible deadlock in situations with heavy traffic */ while (ccat_eth_frame_received(fifo->next)) { const size_t len = le16_to_cpu(fifo->next->length) - overhead; if (priv->ecdev) { ecdev_receive(priv->ecdev, fifo->next->data, len); } else { ccat_eth_receive(priv->netdev, fifo->next->data, len); } ccat_eth_rx_fifo_add(fifo, fifo->next); ccat_eth_fifo_inc(fifo); } }
static void ecdev_receive_eim(struct ccat_eth_priv *const priv, size_t len) { ecdev_receive(priv->ecdev, priv->rx_fifo.eim.next->data, len); }