Beispiel #1
0
/*
 * The interrupt handler.
 * This is called from the MPC core interrupt.
 */
static irqreturn_t
fs_enet_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct fs_enet_private *fep;
	const struct fs_platform_info *fpi;
	u32 int_events;
	u32 int_clr_events;
	int nr, napi_ok;
	int handled;

	fep = netdev_priv(dev);
	fpi = fep->fpi;

	nr = 0;
	while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
		nr++;

		int_clr_events = int_events;
		int_clr_events &= ~fep->ev_napi_rx;

		(*fep->ops->clear_int_events)(dev, int_clr_events);

		if (int_events & fep->ev_err)
			(*fep->ops->ev_error)(dev, int_events);

		if (int_events & fep->ev_rx) {
			napi_ok = napi_schedule_prep(&fep->napi);

			(*fep->ops->napi_disable_rx)(dev);
			(*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);

			/* NOTE: it is possible for FCCs in NAPI mode    */
			/* to submit a spurious interrupt while in poll  */
			if (napi_ok)
				__napi_schedule(&fep->napi);
		}

		if (int_events & fep->ev_tx) {
			napi_ok = napi_schedule_prep(&fep->napi_tx);

			(*fep->ops->napi_disable_tx)(dev);
			(*fep->ops->clear_int_events)(dev, fep->ev_napi_tx);

			/* NOTE: it is possible for FCCs in NAPI mode    */
			/* to submit a spurious interrupt while in poll  */
			if (napi_ok)
				__napi_schedule(&fep->napi_tx);
		}
	}

	handled = nr > 0;
	return IRQ_RETVAL(handled);
}
Beispiel #2
0
static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
{
	u32 status;

	status = ixp2000_reg_read(IXP2000_IRQ_THD_STATUS_A_0);
	if (status == 0)
		return IRQ_NONE;

	/*
	 * Any of the eight receive units signaled RX?
	 */
	if (status & 0x00ff) {
		struct net_device *dev = nds[0];
		struct ixpdev_priv *ip = netdev_priv(dev);

		ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
		if (likely(napi_schedule_prep(&ip->napi))) {
			__napi_schedule(&ip->napi);
		} else {
			printk(KERN_CRIT "ixp2000: irq while polling!!\n");
		}
	}

	/*
	 * Any of the eight transmit units signaled TXdone?
	 */
	if (status & 0xff00) {
		ixp2000_reg_wrb(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0xff00);
		ixpdev_tx_complete();
	}

	return IRQ_HANDLED;
}
Beispiel #3
0
static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
{
    struct net_device *ndev = (struct net_device *)dev_id;
    struct hip04_priv *priv = netdev_priv(ndev);
    struct net_device_stats *stats = &ndev->stats;
    u32 ists = readl_relaxed(priv->base + PPE_INTSTS);

    if (!ists)
        return IRQ_NONE;

    writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);

    if (unlikely(ists & DEF_INT_ERR)) {
        if (ists & (RCV_NOBUF | RCV_DROP)) {
            stats->rx_errors++;
            stats->rx_dropped++;
            netdev_err(ndev, "rx drop\n");
        }
        if (ists & TX_DROP) {
            stats->tx_dropped++;
            netdev_err(ndev, "tx drop\n");
        }
    }

    if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
        /* disable rx interrupt */
        priv->reg_inten &= ~(RCV_INT);
        writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
        hrtimer_cancel(&priv->tx_coalesce_timer);
        __napi_schedule(&priv->napi);
    }

    return IRQ_HANDLED;
}
Beispiel #4
0
static irqreturn_t ubi32_eth_interrupt(int irq, void *dev_id)
{
	struct ubi32_eth_private *priv;

	struct net_device *dev = (struct net_device *)dev_id;
	BUG_ON(irq != dev->irq);

	priv = netdev_priv(dev);
	if (unlikely(!(priv->regs->int_status & priv->regs->int_mask))) {
		return IRQ_NONE;
	}

	/*
	 * Disable port interrupt
	 */
#ifdef UBICOM32_USE_NAPI
	if (napi_schedule_prep(&priv->napi)) {
		priv->regs->int_mask = 0;
		__napi_schedule(&priv->napi);
	}
#else
	priv->regs->int_mask = 0;
	tasklet_schedule(&priv->tsk);
#endif
	return IRQ_HANDLED;
}
void rmnet_mhi_rx_cb(mhi_cb_info *cb_info)
{
	struct net_device *dev;
	struct rmnet_mhi_private *rmnet_mhi_ptr;
	if (NULL != cb_info && NULL != cb_info->result) {
		dev = (struct net_device *)cb_info->result->user_data;
		rmnet_mhi_ptr = netdev_priv(dev);
	}

	switch(cb_info->cb_reason) {
		case MHI_CB_XFER_SUCCESS:
		case MHI_CB_MHI_ENABLED:
			break;
		case MHI_CB_MHI_DISABLED:
			return;
			break;
		default:
			pr_err("%s(): Received bad return code %d from core", __func__,
					cb_info->cb_reason);
			break;
	}
	rx_interrupts_count[rmnet_mhi_ptr->dev_index]++;

	/* Disable interrupts */
	mhi_mask_irq(rmnet_mhi_ptr->rx_client_handle);
	atomic_inc(&rmnet_mhi_ptr->irq_masked);

	/* We need to start a watchdog here, not sure how to do that yet */

	if (napi_schedule_prep(&(rmnet_mhi_ptr->napi)))
		__napi_schedule(&(rmnet_mhi_ptr->napi));
	else
		rx_interrupts_in_masked_irq[rmnet_mhi_ptr->dev_index]++;
}
Beispiel #6
0
static irqreturn_t
fec_enet_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct fec_enet_private *fep = netdev_priv(ndev);
	uint int_events;
	irqreturn_t ret = IRQ_NONE;

	do {
		int_events = readl(fep->hwp + FEC_IEVENT);
		writel(int_events, fep->hwp + FEC_IEVENT);

		if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
			ret = IRQ_HANDLED;

			/* Disable the RX interrupt */
			if (napi_schedule_prep(&fep->napi)) {
				writel(FEC_RX_DISABLED_IMASK,
					fep->hwp + FEC_IMASK);
				__napi_schedule(&fep->napi);
			}
		}

		if (int_events & FEC_ENET_MII) {
			ret = IRQ_HANDLED;
			complete(&fep->mdio_done);
		}
	} while (int_events);

	return ret;
}
Beispiel #7
0
/* The RDC interrupt handler. */
static irqreturn_t pci_eth_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct pci_eth_private *priv = netdev_priv(dev);
	void __iomem *ioaddr = priv->base;
	u16 misr, status;

	/* TODO: Is it our interrupt? */
	/* TODO: Read status register and clear */
	status = ioread16(ioaddr + MISR);

	/* RX interrupt request */
	if (status & RX_INTS) {
		if (status & RX_NO_DESC) {
			/* RX descriptor unavailable */
			dev->stats.rx_dropped++;
			dev->stats.rx_missed_errors++;
		}
		if (status & RX_FIFO_FULL)
			dev->stats.rx_fifo_errors++;

		if (likely(napi_schedule_prep(&priv->napi))) {
			/* TODO: Mask off (disable) RX interrupt */
			__napi_schedule(&priv->napi);
		}
	}

	/* TX interrupt request */
	if (status & TX_INTS)
		pci_eth_tx(dev);

	return IRQ_HANDLED;
}
Beispiel #8
0
static irqreturn_t cpmac_irq(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct cpmac_priv *priv;
	int queue;
	u32 status;

	priv = netdev_priv(dev);

	status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);

	if (unlikely(netif_msg_intr(priv)))
		printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
		       status);

	if (status & MAC_INT_TX)
		cpmac_end_xmit(dev, (status & 7));

	if (status & MAC_INT_RX) {
		queue = (status >> 8) & 7;
		if (napi_schedule_prep(&priv->napi)) {
			cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
			__napi_schedule(&priv->napi);
		}
	}
Beispiel #9
0
static int rmnet_mhi_open(struct net_device *dev)
{
	struct rmnet_mhi_private *rmnet_mhi_ptr =
			*(struct rmnet_mhi_private **)netdev_priv(dev);

	rmnet_log(MSG_INFO,
			"Opened net dev interface for MHI chans %d and %d\n",
			rmnet_mhi_ptr->tx_channel,
			rmnet_mhi_ptr->rx_channel);
	netif_start_queue(dev);
	napi_enable(&(rmnet_mhi_ptr->napi));

	/* Poll to check if any buffers are accumulated in the
	 * transport buffers
	 */
	if (napi_schedule_prep(&(rmnet_mhi_ptr->napi))) {
		mhi_mask_irq(rmnet_mhi_ptr->rx_client_handle);
		atomic_inc(&rmnet_mhi_ptr->irq_masked_cntr);
		__napi_schedule(&(rmnet_mhi_ptr->napi));
	} else {
		rx_interrupts_in_masked_irq[rmnet_mhi_ptr->dev_index]++;
	}
	return 0;

}
Beispiel #10
0
static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
    struct hip04_priv *priv = netdev_priv(ndev);
    struct net_device_stats *stats = &ndev->stats;
    unsigned int tx_head = priv->tx_head, count;
    struct tx_desc *desc = &priv->tx_desc[tx_head];
    dma_addr_t phys;

    smp_rmb();
    count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
    if (count == (TX_DESC_NUM - 1)) {
        netif_stop_queue(ndev);
        return NETDEV_TX_BUSY;
    }

    phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
    if (dma_mapping_error(&ndev->dev, phys)) {
        dev_kfree_skb(skb);
        return NETDEV_TX_OK;
    }

    priv->tx_skb[tx_head] = skb;
    priv->tx_phys[tx_head] = phys;
    desc->send_addr = cpu_to_be32(phys);
    desc->send_size = cpu_to_be32(skb->len);
    desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
    phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
    desc->wb_addr = cpu_to_be32(phys);
    skb_tx_timestamp(skb);

    hip04_set_xmit_desc(priv, phys);
    priv->tx_head = TX_NEXT(tx_head);
    count++;
    netdev_sent_queue(ndev, skb->len);

    stats->tx_bytes += skb->len;
    stats->tx_packets++;

    /* Ensure tx_head update visible to tx reclaim */
    smp_wmb();

    /* queue is getting full, better start cleaning up now */
    if (count >= priv->tx_coalesce_frames) {
        if (napi_schedule_prep(&priv->napi)) {
            /* disable rx interrupt and timer */
            priv->reg_inten &= ~(RCV_INT);
            writel_relaxed(DEF_INT_MASK & ~RCV_INT,
                           priv->base + PPE_INTEN);
            hrtimer_cancel(&priv->tx_coalesce_timer);
            __napi_schedule(&priv->napi);
        }
    } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
        /* cleanup not pending yet, start a new timer */
        hip04_start_tx_timer(priv);
    }

    return NETDEV_TX_OK;
}
Beispiel #11
0
static void timer_handler(unsigned long data)
{
	struct asf_qdisc *sch = (struct asf_qdisc *)data;

	if (napi_schedule_prep(&(sch->qos_napi))) {
		sch->state = SCH_BUSY;
		__napi_schedule(&(sch->qos_napi));
	}
}
Beispiel #12
0
static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
{
    struct xgene_enet_desc_ring *rx_ring = data;

    if (napi_schedule_prep(&rx_ring->napi)) {
        disable_irq_nosync(irq);
        __napi_schedule(&rx_ring->napi);
    }

    return IRQ_HANDLED;
}
Beispiel #13
0
static irqreturn_t xge_irq(const int irq, void *data)
{
	struct xge_pdata *pdata = data;

	if (napi_schedule_prep(&pdata->napi)) {
		xge_intr_disable(pdata);
		__napi_schedule(&pdata->napi);
	}

	return IRQ_HANDLED;
}
Beispiel #14
0
void qos_dequeue(struct  asf_qdisc *sch)
{
	if (sch->state != SCH_BUSY) {
		if (sch->state == SCH_TIMER_PENDING)
			del_timer(&sch->timer);

		if (napi_schedule_prep(&(sch->qos_napi))) {
			sch->state = SCH_BUSY;
			__napi_schedule(&(sch->qos_napi));
		}
	}
}
Beispiel #15
0
static irqreturn_t
fec_enet_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct fec_enet_private *fep = netdev_priv(ndev);
	struct fec_ptp_private *fpp = fep->ptp_priv;
	uint int_events;
	ulong flags;
	irqreturn_t ret = IRQ_NONE;

	do {
		int_events = readl(fep->hwp + FEC_IEVENT);
		writel(int_events, fep->hwp + FEC_IEVENT);

		if (int_events & FEC_ENET_RXF) {
			ret = IRQ_HANDLED;
			spin_lock_irqsave(&fep->hw_lock, flags);

			if (fep->use_napi) {
				/* Disable the RX interrupt */
				if (napi_schedule_prep(&fep->napi)) {
					fec_rx_int_is_enabled(ndev, false);
					__napi_schedule(&fep->napi);
				}
			} else
				fec_enet_rx(ndev);

			spin_unlock_irqrestore(&fep->hw_lock, flags);
		}

		/* Transmit OK, or non-fatal error. Update the buffer
		 * descriptors. FEC handles all errors, we just discover
		 * them as part of the transmit process.
		 */
		if (int_events & FEC_ENET_TXF) {
			ret = IRQ_HANDLED;
			fec_enet_tx(ndev);
		}

		if (int_events & FEC_ENET_TS_TIMER) {
			ret = IRQ_HANDLED;
			if (fep->ptimer_present && fpp)
				fpp->prtc++;
		}

		if (int_events & FEC_ENET_MII) {
			ret = IRQ_HANDLED;
			complete(&fep->mdio_done);
		}
	} while (int_events);

	return ret;
}
static irqreturn_t Eth_interruption(int irq, void *dev_id, struct pt_regs *regs) {
    int statusword;
    struct eth_priv *priv;
    struct eth_packet *pkt = NULL;
 
    /*
     * As usual, check the "device" pointer to be sure it is
     * really interrupting.
     * Then assign "struct device *dev".
     */
     struct net_device *dev = (struct net_device *)dev_id;
     /* ... and check with hw if it's really ours */
 
     /* paranoid */
     if(!dev)
        return IRQ_HANDLED;
 
    /* Lock the device */
    priv = netdev_priv(dev);
    spin_lock(&priv->lock);
 
    /* retrieve statusword: real netdevices use I/O instructions */
    statusword = priv->status;
    priv->status = 0;

    if(statusword & ETH_RX_INTR) {
        /* This will disinable any further "packet available"
         * interrupts and tells networking subsystem to poll
         * the driver shortly to pick up all available packets.
         */
        Eth_rx_ints(dev, 0);
        if (napi_schedule_prep(&priv->napi)) {
            /* Disinable reception interrupts */
                __napi_schedule(&priv->napi);
        }
         
    }
    if (statusword & ETH_TX_INTR) {
        /* a transmission is over: free the skb */
        priv->stats.tx_packets++;
        priv->stats.tx_bytes += priv->tx_packetlen;
        dev_kfree_skb(priv->skb);
    }
 
    /* Unlock the device and we are done */
    spin_unlock(&priv->lock);
    if (pkt)
        eth_release_buffer(pkt); /* Do this outside the lock! */
        return IRQ_HANDLED;
}
Beispiel #17
0
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
{
	struct net_device *netdev = dev_instance;
	struct ibmveth_adapter *adapter = netdev_priv(netdev);
	unsigned long lpar_rc;

	if (napi_schedule_prep(&adapter->napi)) {
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_DISABLE);
		ibmveth_assert(lpar_rc == H_SUCCESS);
		__napi_schedule(&adapter->napi);
	}
	return IRQ_HANDLED;
}
Beispiel #18
0
/**
 * arc_emac_intr - Global interrupt handler for EMAC.
 * @irq:		irq number.
 * @dev_instance:	device instance.
 *
 * returns: IRQ_HANDLED for all cases.
 *
 * ARC EMAC has only 1 interrupt line, and depending on bits raised in
 * STATUS register we may tell what is a reason for interrupt to fire.
 */
static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
{
	struct net_device *ndev = dev_instance;
	struct arc_emac_priv *priv = netdev_priv(ndev);
	struct net_device_stats *stats = &ndev->stats;
	unsigned int status;

	status = arc_reg_get(priv, R_STATUS);
	status &= ~MDIO_MASK;

	/* Reset all flags except "MDIO complete" */
	arc_reg_set(priv, R_STATUS, status);

	if (status & (RXINT_MASK | TXINT_MASK)) {
		if (likely(napi_schedule_prep(&priv->napi))) {
			arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
			__napi_schedule(&priv->napi);
		}
	}

	if (status & ERR_MASK) {
		/* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding
		 * 8-bit error counter overrun.
		 */

		if (status & MSER_MASK) {
			stats->rx_missed_errors += 0x100;
			stats->rx_errors += 0x100;
			priv->rx_missed_errors += 0x100;
			napi_schedule(&priv->napi);
		}

		if (status & RXCR_MASK) {
			stats->rx_crc_errors += 0x100;
			stats->rx_errors += 0x100;
		}

		if (status & RXFR_MASK) {
			stats->rx_frame_errors += 0x100;
			stats->rx_errors += 0x100;
		}

		if (status & RXFL_MASK) {
			stats->rx_over_errors += 0x100;
			stats->rx_errors += 0x100;
		}
	}

	return IRQ_HANDLED;
}
Beispiel #19
0
static enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
{
    struct hip04_priv *priv;

    priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);

    if (napi_schedule_prep(&priv->napi)) {
        /* disable rx interrupt */
        priv->reg_inten &= ~(RCV_INT);
        writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
        __napi_schedule(&priv->napi);
    }

    return HRTIMER_NORESTART;
}
Beispiel #20
0
/**
 * nps_enet_irq_handler - Global interrupt handler for ENET.
 * @irq:                irq number.
 * @dev_instance:       device instance.
 *
 * returns: IRQ_HANDLED for all cases.
 *
 * EZchip ENET has 2 interrupt causes, and depending on bits raised in
 * CTRL registers we may tell what is a reason for interrupt to fire up.
 * We got one for RX and the other for TX (completion).
 */
static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
{
	struct net_device *ndev = dev_instance;
	struct nps_enet_priv *priv = netdev_priv(ndev);
	u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
	u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;

	if (nps_enet_is_tx_pending(priv) || rx_ctrl_cr)
		if (likely(napi_schedule_prep(&priv->napi))) {
			nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
			__napi_schedule(&priv->napi);
		}

	return IRQ_HANDLED;
}
Beispiel #21
0
/* The RDC interrupt handler. */
static irqreturn_t r6040_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct r6040_private *lp = netdev_priv(dev);
	void __iomem *ioaddr = lp->base;
	u16 misr, status;

	/* Save MIER */
	misr = ioread16(ioaddr + MIER);
	/* Mask off RDC MAC interrupt */
	iowrite16(MSK_INT, ioaddr + MIER);
	/* Read MISR status and clear */
	status = ioread16(ioaddr + MISR);

	if (status == 0x0000 || status == 0xffff) {
		/* Restore RDC MAC interrupt */
		iowrite16(misr, ioaddr + MIER);
		return IRQ_NONE;
	}

	/* RX interrupt request */
	if (status & RX_INTS) {
		if (status & RX_NO_DESC) {
			/* RX descriptor unavailable */
			dev->stats.rx_dropped++;
			dev->stats.rx_missed_errors++;
		}
		if (status & RX_FIFO_FULL)
			dev->stats.rx_fifo_errors++;

		if (likely(napi_schedule_prep(&lp->napi))) {
			/* Mask off RX interrupt */
			misr &= ~RX_INTS;
			__napi_schedule(&lp->napi);
		}
	}

	/* TX interrupt request */
	if (status & TX_INTS)
		r6040_tx(dev);

	/* Restore RDC MAC interrupt */
	iowrite16(misr, ioaddr + MIER);

	return IRQ_HANDLED;
}
Beispiel #22
0
/**
 * nps_enet_irq_handler - Global interrupt handler for ENET.
 * @irq:                irq number.
 * @dev_instance:       device instance.
 *
 * returns: IRQ_HANDLED for all cases.
 *
 * EZchip ENET has 2 interrupt causes, and depending on bits raised in
 * CTRL registers we may tell what is a reason for interrupt to fire up.
 * We got one for RX and the other for TX (completion).
 */
static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
{
	struct net_device *ndev = dev_instance;
	struct nps_enet_priv *priv = netdev_priv(ndev);
	struct nps_enet_rx_ctl rx_ctrl;
	struct nps_enet_tx_ctl tx_ctrl;

	rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
	tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);

	if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr)
		if (likely(napi_schedule_prep(&priv->napi))) {
			nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
			__napi_schedule(&priv->napi);
		}

	return IRQ_HANDLED;
}
static void rmnet_mhi_rx_cb(struct mhi_result *result)
{
	struct net_device *dev;
	struct rmnet_mhi_private *rmnet_mhi_ptr;
	rmnet_mhi_ptr = result->user_data;
	dev = rmnet_mhi_ptr->dev;

	rmnet_log(MSG_VERBOSE, "Entered\n");
	rx_interrupts_count[rmnet_mhi_ptr->dev_index]++;

	if (napi_schedule_prep(&(rmnet_mhi_ptr->napi))) {
		mhi_mask_irq(rmnet_mhi_ptr->rx_client_handle);
		atomic_inc(&rmnet_mhi_ptr->irq_masked_cntr);
		__napi_schedule(&(rmnet_mhi_ptr->napi));
	} else {
		rx_interrupts_in_masked_irq[rmnet_mhi_ptr->dev_index]++;
	}
	rmnet_log(MSG_VERBOSE, "Exited\n");
}
Beispiel #24
0
static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct netdata_local *pldat = netdev_priv(ndev);
	u32 tmp;

	spin_lock(&pldat->lock);

	tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
	/* Clear interrupts */
	writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));

	lpc_eth_disable_int(pldat->net_base);
	if (likely(napi_schedule_prep(&pldat->napi)))
		__napi_schedule(&pldat->napi);

	spin_unlock(&pldat->lock);

	return IRQ_HANDLED;
}
Beispiel #25
0
static irqreturn_t
fec_enet_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct fec_enet_private *fep = netdev_priv(ndev);
	uint int_events;
	irqreturn_t ret = IRQ_NONE;

	do {
		int_events = readl(fep->hwp + FEC_IEVENT);
		writel(int_events, fep->hwp + FEC_IEVENT);

		if (int_events & FEC_ENET_RXF) {
			ret = IRQ_HANDLED;

			/* Disable the RX interrupt */
			if (napi_schedule_prep(&fep->napi)) {
				writel(FEC_RX_DISABLED_IMASK,
					fep->hwp + FEC_IMASK);
				__napi_schedule(&fep->napi);
			}
		}

		/* Transmit OK, or non-fatal error. Update the buffer
		 * descriptors. FEC handles all errors, we just discover
		 * them as part of the transmit process.
		 */
		if (int_events & FEC_ENET_TXF) {
			ret = IRQ_HANDLED;
			fec_enet_tx(ndev);
		}

		if (int_events & FEC_ENET_MII) {
			ret = IRQ_HANDLED;
			complete(&fep->mdio_done);
		}
	} while (int_events);

	return ret;
}
Beispiel #26
0
static void xlgmac_tx_timer(struct timer_list *t)
{
	struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
	struct xlgmac_pdata *pdata = channel->pdata;
	struct napi_struct *napi;

	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;

	if (napi_schedule_prep(napi)) {
		/* Disable Tx and Rx interrupts */
		if (pdata->per_channel_irq)
			disable_irq_nosync(channel->dma_irq);
		else
			xlgmac_disable_rx_tx_ints(pdata);

		pdata->stats.napi_poll_txtimer++;
		/* Turn on polling */
		__napi_schedule(napi);
	}

	channel->tx_timer_active = 0;
}
Beispiel #27
0
/**
@brief		receive all @b IPC message frames in all RXQs

In a for loop,\n
1) Checks any REQ_ACK received.\n
2) Receives all IPC link frames in every RXQ.\n
3) Sends RES_ACK if there was REQ_ACK from CP.\n
4) Checks any RES_ACK received.\n

@param mld	the pointer to a mem_link_device instance
@param mst	the pointer to a mem_snapshot instance
*/
void recv_sbd_ipc_frames(struct mem_link_device *mld)
{
	struct sbd_link_device *sl = &mld->sbd_link_dev;
	int i;

	for (i = 0; i < sl->num_channels; i++) {
		struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, RX);

		if (unlikely(rb_empty(rb)))
			continue;

		if (likely(sipc_ps_ch(rb->ch))) {
#ifdef CONFIG_LINK_DEVICE_NAPI
			//mld->link_dev.disable_irq(&mld->link_dev);
			if (napi_schedule_prep(&rb->iod->napi))
				__napi_schedule(&rb->iod->napi);
#else
			rx_net_frames_from_rb(rb, 0);
#endif
		} else {
			rx_ipc_frames_from_rb(rb);
		}
	}
}
Beispiel #28
0
static irqreturn_t enic_isr_legacy(int irq, void *data)
{
	struct net_device *netdev = data;
	struct enic *enic = netdev_priv(netdev);
	u32 pba;

	vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]);

	pba = vnic_intr_legacy_pba(enic->legacy_pba);
	if (!pba) {
		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
		return IRQ_NONE;	
	}

	if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) {
		vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_NOTIFY]);
		enic_notify_check(enic);
	}

	if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) {
		vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_ERR]);
		enic_log_q_error(enic);
		
		schedule_work(&enic->reset);
		return IRQ_HANDLED;
	}

	if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
		if (napi_schedule_prep(&enic->napi))
			__napi_schedule(&enic->napi);
	} else {
		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
	}

	return IRQ_HANDLED;
}
Beispiel #29
0
/* Get packets from the host vring */
static int cfv_rx_poll(struct napi_struct *napi, int quota)
{
	struct cfv_info *cfv = container_of(napi, struct cfv_info, napi);
	int rxcnt = 0;
	int err = 0;
	void *buf;
	struct sk_buff *skb;
	struct vringh_kiov *riov = &cfv->ctx.riov;
	unsigned int skb_len;

	do {
		skb = NULL;

		/* Put the previous iovec back on the used ring and
		 * fetch a new iovec if we have processed all elements.
		 */
		if (riov->i == riov->used) {
			if (cfv->ctx.head != USHRT_MAX) {
				vringh_complete_kern(cfv->vr_rx,
						     cfv->ctx.head,
						     0);
				cfv->ctx.head = USHRT_MAX;
			}

			err = vringh_getdesc_kern(
				cfv->vr_rx,
				riov,
				NULL,
				&cfv->ctx.head,
				GFP_ATOMIC);

			if (err <= 0)
				goto exit;
		}

		buf = phys_to_virt((unsigned long) riov->iov[riov->i].iov_base);
		/* TODO: Add check on valid buffer address */

		skb = cfv_alloc_and_copy_skb(&err, cfv, buf,
					     riov->iov[riov->i].iov_len);
		if (unlikely(err))
			goto exit;

		/* Push received packet up the stack. */
		skb_len = skb->len;
		skb->protocol = htons(ETH_P_CAIF);
		skb_reset_mac_header(skb);
		skb->dev = cfv->ndev;
		err = netif_receive_skb(skb);
		if (unlikely(err)) {
			++cfv->ndev->stats.rx_dropped;
		} else {
			++cfv->ndev->stats.rx_packets;
			cfv->ndev->stats.rx_bytes += skb_len;
		}

		++riov->i;
		++rxcnt;
	} while (rxcnt < quota);

	++cfv->stats.rx_napi_resched;
	goto out;

exit:
	switch (err) {
	case 0:
		++cfv->stats.rx_napi_complete;

		/* Really out of patckets? (stolen from virtio_net)*/
		napi_complete(napi);
		if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) &&
		    napi_schedule_prep(napi)) {
			vringh_notify_disable_kern(cfv->vr_rx);
			__napi_schedule(napi);
		}
		break;

	case -ENOMEM:
		++cfv->stats.rx_nomem;
		dev_kfree_skb(skb);
		/* Stop NAPI poll on OOM, we hope to be polled later */
		napi_complete(napi);
		vringh_notify_enable_kern(cfv->vr_rx);
		break;

	default:
		/* We're doomed, any modem fault is fatal */
		netdev_warn(cfv->ndev, "Bad ring, disable device\n");
		cfv->ndev->stats.rx_dropped = riov->used - riov->i;
		napi_complete(napi);
		vringh_notify_disable_kern(cfv->vr_rx);
		netif_carrier_off(cfv->ndev);
		break;
	}
out:
	if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0)
		vringh_notify(cfv->vr_rx);
	return rxcnt;
}
Beispiel #30
0
void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
{
	struct hfi1_devdata *dd = packet->rcd->dd;
	struct hfi1_vnic_vport_info *vinfo = NULL;
	struct hfi1_vnic_rx_queue *rxq;
	struct sk_buff *skb;
	int l4_type, vesw_id = -1;
	u8 q_idx;

	l4_type = HFI1_GET_L4_TYPE(packet->ebuf);
	if (likely(l4_type == OPA_VNIC_L4_ETHR)) {
		vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf);
		vinfo = idr_find(&dd->vnic.vesw_idr, vesw_id);

		/*
		 * In case of invalid vesw id, count the error on
		 * the first available vport.
		 */
		if (unlikely(!vinfo)) {
			struct hfi1_vnic_vport_info *vinfo_tmp;
			int id_tmp = 0;

			vinfo_tmp =  idr_get_next(&dd->vnic.vesw_idr, &id_tmp);
			if (vinfo_tmp) {
				spin_lock(&vport_cntr_lock);
				vinfo_tmp->stats[0].netstats.rx_nohandler++;
				spin_unlock(&vport_cntr_lock);
			}
		}
	}

	if (unlikely(!vinfo)) {
		dd_dev_warn(dd, "vnic rcv err: l4 %d vesw id %d ctx %d\n",
			    l4_type, vesw_id, packet->rcd->ctxt);
		return;
	}

	q_idx = packet->rcd->vnic_q_idx;
	rxq = &vinfo->rxq[q_idx];
	if (unlikely(!netif_oper_up(vinfo->netdev))) {
		vinfo->stats[q_idx].rx_drop_state++;
		skb_queue_purge(&rxq->skbq);
		return;
	}

	if (unlikely(skb_queue_len(&rxq->skbq) > HFI1_VNIC_RCV_Q_SIZE)) {
		vinfo->stats[q_idx].netstats.rx_fifo_errors++;
		return;
	}

	skb = netdev_alloc_skb(vinfo->netdev, packet->tlen);
	if (unlikely(!skb)) {
		vinfo->stats[q_idx].netstats.rx_fifo_errors++;
		return;
	}

	memcpy(skb->data, packet->ebuf, packet->tlen);
	skb_put(skb, packet->tlen);
	skb_queue_tail(&rxq->skbq, skb);

	if (napi_schedule_prep(&rxq->napi)) {
		v_dbg("napi %d scheduling\n", q_idx);
		__napi_schedule(&rxq->napi);
	}
}