static void ec_bhf_process_rx(struct ec_bhf_priv *priv) { struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext]; while (ec_bhf_pkt_received(desc)) { int pkt_size = (le16_to_cpu(desc->header.len) & RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4; u8 *data = desc->data; struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size); if (skb) { memcpy(skb_put(skb, pkt_size), data, pkt_size); skb->protocol = eth_type_trans(skb, priv->net_dev); priv->stat_rx_bytes += pkt_size; netif_rx(skb); } else { dev_err_ratelimited(PRIV_TO_DEV(priv), "Couldn't allocate a skb_buff for a packet of size %u\n", pkt_size); } desc->header.recv = 0; ec_bhf_add_rx_desc(priv, desc); priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount; desc = &priv->rx_descs[priv->rx_dnext]; } }
static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv) { struct device *dev = PRIV_TO_DEV(priv); unsigned block_count, i; void __iomem *ec_info; block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT); for (i = 0; i < block_count; i++) { u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE + INFO_BLOCK_TYPE); if (type == ETHERCAT_MASTER_ID) break; } if (i == block_count) { dev_err(dev, "EtherCAT master with DMA block not found\n"); return -ENODEV; } ec_info = priv->io + i * INFO_BLOCK_SIZE; priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN); priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN); priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET); priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET); priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET); priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET); return 0; }
static int ec_bhf_stop(struct net_device *net_dev) { struct ec_bhf_priv *priv = netdev_priv(net_dev); struct device *dev = PRIV_TO_DEV(priv); hrtimer_cancel(&priv->hrtimer); ec_bhf_reset(priv); netif_tx_disable(net_dev); dma_free_coherent(dev, priv->tx_buf.alloc_len, priv->tx_buf.alloc, priv->tx_buf.alloc_phys); dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc, priv->rx_buf.alloc_phys); return 0; }
static int ec_bhf_open(struct net_device *net_dev) { struct ec_bhf_priv *priv = netdev_priv(net_dev); struct device *dev = PRIV_TO_DEV(priv); int err = 0; ec_bhf_reset(priv); err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan, FIFO_SIZE * sizeof(struct rx_desc)); if (err) { dev_err(dev, "Failed to allocate rx buffer\n"); goto out; } ec_bhf_setup_rx_descs(priv); err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan, FIFO_SIZE * sizeof(struct tx_desc)); if (err) { dev_err(dev, "Failed to allocate tx buffer\n"); goto error_rx_free; } iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG); ec_bhf_setup_tx_descs(priv); netif_start_queue(net_dev); hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); priv->hrtimer.function = ec_bhf_timer_fun; hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency), HRTIMER_MODE_REL); return 0; error_rx_free: dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc, priv->rx_buf.alloc_len); out: return err; }
static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv, struct bhf_dma *buf, int channel, int size) { int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET; struct device *dev = PRIV_TO_DEV(priv); u32 mask; iowrite32(0xffffffff, priv->dma_io + offset); mask = ioread32(priv->dma_io + offset); mask &= DMA_WINDOW_SIZE_MASK; /* We want to allocate a chunk of memory that is: * - aligned to the mask we just read * - is of size 2^mask bytes (at most) * In order to ensure that we will allocate buffer of * 2 * 2^mask bytes. */ buf->len = min_t(int, ~mask + 1, size); buf->alloc_len = 2 * buf->len; buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys, GFP_KERNEL); if (buf->alloc == NULL) { dev_err(dev, "Failed to allocate buffer\n"); return -ENOMEM; } buf->buf_phys = (buf->alloc_phys + buf->len) & mask; buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys); iowrite32(0, priv->dma_io + offset + 4); iowrite32(buf->buf_phys, priv->dma_io + offset); return 0; }