static void nps_enet_read_rx_fifo(struct net_device *ndev, unsigned char *dst, u32 length) { struct nps_enet_priv *priv = netdev_priv(ndev); s32 i, last = length & (sizeof(u32) - 1); u32 *reg = (u32 *)dst, len = length / sizeof(u32); bool dst_is_aligned = IS_ALIGNED((unsigned long)dst, sizeof(u32)); /* In case dst is not aligned we need an intermediate buffer */ if (dst_is_aligned) for (i = 0; i < len; i++, reg++) *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); else { /* !dst_is_aligned */ for (i = 0; i < len; i++, reg++) { u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); /* to accommodate word-unaligned address of "reg" * we have to do memcpy_toio() instead of simple "=". */ memcpy_toio((void __iomem *)reg, &buf, sizeof(buf)); } } /* copy last bytes (if any) */ if (last) { u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); memcpy_toio((void __iomem *)reg, &buf, last); } }
static void nps_enet_read_rx_fifo(struct net_device *ndev, unsigned char *dst, u32 length) { struct nps_enet_priv *priv = netdev_priv(ndev); s32 i, last = length & (sizeof(u32) - 1); u32 *reg = (u32 *)dst, len = length / sizeof(u32); bool dst_is_aligned = IS_ALIGNED((unsigned long)dst, sizeof(u32)); /* In case dst is not aligned we need an intermediate buffer */ if (dst_is_aligned) { ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, reg, len); reg += len; } else { /* !dst_is_aligned */ for (i = 0; i < len; i++, reg++) { u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); put_unaligned_be32(buf, reg); } } /* copy last bytes (if any) */ if (last) { u32 buf; ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, &buf, 1); memcpy((u8 *)reg, &buf, last); } }
static void nps_enet_tx_handler(struct net_device *ndev) { struct nps_enet_priv *priv = netdev_priv(ndev); u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; u32 tx_ctrl_et = (tx_ctrl_value & TX_CTL_ET_MASK) >> TX_CTL_ET_SHIFT; u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT; /* Check if we got TX */ if (!priv->tx_skb || tx_ctrl_ct) return; /* Ack Tx ctrl register */ nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0); /* Check Tx transmit error */ if (unlikely(tx_ctrl_et)) { ndev->stats.tx_errors++; } else { ndev->stats.tx_packets++; ndev->stats.tx_bytes += tx_ctrl_nt; } dev_kfree_skb(priv->tx_skb); priv->tx_skb = NULL; if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); }
static void nps_enet_tx_handler(struct net_device *ndev) { struct nps_enet_priv *priv = netdev_priv(ndev); struct nps_enet_tx_ctl tx_ctrl; tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); /* Check if we got TX */ if (!priv->tx_packet_sent || tx_ctrl.ct) return; /* Ack Tx ctrl register */ nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0); /* Check Tx transmit error */ if (unlikely(tx_ctrl.et)) { ndev->stats.tx_errors++; } else { ndev->stats.tx_packets++; ndev->stats.tx_bytes += tx_ctrl.nt; } dev_kfree_skb(priv->tx_skb); priv->tx_packet_sent = false; if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); }
static inline bool nps_enet_is_tx_pending(struct nps_enet_priv *priv) { u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; return (!tx_ctrl_ct && priv->tx_skb); }
static void nps_enet_clean_rx_fifo(struct net_device *ndev, u32 frame_len) { struct nps_enet_priv *priv = netdev_priv(ndev); u32 i, len = DIV_ROUND_UP(frame_len, sizeof(u32)); /* Empty Rx FIFO buffer by reading all words */ for (i = 0; i < len; i++) nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); }
/** * nps_enet_irq_handler - Global interrupt handler for ENET. * @irq: irq number. * @dev_instance: device instance. * * returns: IRQ_HANDLED for all cases. * * EZchip ENET has 2 interrupt causes, and depending on bits raised in * CTRL registers we may tell what is a reason for interrupt to fire up. * We got one for RX and the other for TX (completion). */ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance) { struct net_device *ndev = dev_instance; struct nps_enet_priv *priv = netdev_priv(ndev); struct nps_enet_rx_ctl rx_ctrl; struct nps_enet_tx_ctl tx_ctrl; rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr) if (likely(napi_schedule_prep(&priv->napi))) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); __napi_schedule(&priv->napi); } return IRQ_HANDLED; }
/** * nps_enet_irq_handler - Global interrupt handler for ENET. * @irq: irq number. * @dev_instance: device instance. * * returns: IRQ_HANDLED for all cases. * * EZchip ENET has 2 interrupt causes, and depending on bits raised in * CTRL registers we may tell what is a reason for interrupt to fire up. * We got one for RX and the other for TX (completion). */ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance) { struct net_device *ndev = dev_instance; struct nps_enet_priv *priv = netdev_priv(ndev); u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; if (nps_enet_is_tx_pending(priv) || rx_ctrl_cr) if (likely(napi_schedule_prep(&priv->napi))) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); __napi_schedule(&priv->napi); } return IRQ_HANDLED; }
/** * nps_enet_poll - NAPI poll handler. * @napi: Pointer to napi_struct structure. * @budget: How many frames to process on one call. * * returns: Number of processed frames */ static int nps_enet_poll(struct napi_struct *napi, int budget) { struct net_device *ndev = napi->dev; struct nps_enet_priv *priv = netdev_priv(ndev); u32 work_done; nps_enet_tx_handler(ndev); work_done = nps_enet_rx_handler(ndev); if (work_done < budget) { u32 buf_int_enable_value = 0; u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; napi_complete(napi); /* set tx_done and rx_rdy bits */ buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, buf_int_enable_value); /* in case we will get a tx interrupt while interrupts * are masked, we will lose it since the tx is edge interrupt. * specifically, while executing the code section above, * between nps_enet_tx_handler and the interrupts enable, all * tx requests will be stuck until we will get an rx interrupt. * the two code lines below will solve this situation by * re-adding ourselves to the poll list. */ if (priv->tx_skb && !tx_ctrl_ct) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); napi_reschedule(napi); } }
static u32 nps_enet_rx_handler(struct net_device *ndev) { u32 frame_len, err = 0; u32 work_done = 0; struct nps_enet_priv *priv = netdev_priv(ndev); struct sk_buff *skb; u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; u32 rx_ctrl_er = (rx_ctrl_value & RX_CTL_ER_MASK) >> RX_CTL_ER_SHIFT; u32 rx_ctrl_crc = (rx_ctrl_value & RX_CTL_CRC_MASK) >> RX_CTL_CRC_SHIFT; frame_len = (rx_ctrl_value & RX_CTL_NR_MASK) >> RX_CTL_NR_SHIFT; /* Check if we got RX */ if (!rx_ctrl_cr) return work_done; /* If we got here there is a work for us */ work_done++; /* Check Rx error */ if (rx_ctrl_er) { ndev->stats.rx_errors++; err = 1; } /* Check Rx CRC error */ if (rx_ctrl_crc) { ndev->stats.rx_crc_errors++; ndev->stats.rx_dropped++; err = 1; } /* Check Frame length Min 64b */ if (unlikely(frame_len < ETH_ZLEN)) { ndev->stats.rx_length_errors++; ndev->stats.rx_dropped++; err = 1; } if (err) goto rx_irq_clean; /* Skb allocation */ skb = netdev_alloc_skb_ip_align(ndev, frame_len); if (unlikely(!skb)) { ndev->stats.rx_errors++; ndev->stats.rx_dropped++; goto rx_irq_clean; } /* Copy frame from Rx fifo into the skb */ nps_enet_read_rx_fifo(ndev, skb->data, frame_len); skb_put(skb, frame_len); skb->protocol = eth_type_trans(skb, ndev); skb->ip_summed = CHECKSUM_UNNECESSARY; ndev->stats.rx_packets++; ndev->stats.rx_bytes += frame_len; netif_receive_skb(skb); goto rx_irq_frame_done; rx_irq_clean: /* Clean Rx fifo */ nps_enet_clean_rx_fifo(ndev, frame_len); rx_irq_frame_done: /* Ack Rx ctrl register */ nps_enet_reg_set(priv, NPS_ENET_REG_RX_CTL, 0); return work_done; }