コード例 #1
0
ファイル: fs_enet-main.c プロジェクト: 3null/linux
/*
 * The interrupt handler.
 * This is called from the MPC core interrupt.
 */
static irqreturn_t
fs_enet_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct fs_enet_private *fep;
	const struct fs_platform_info *fpi;
	u32 int_events;
	u32 int_clr_events;
	int nr, napi_ok;
	int handled;

	fep = netdev_priv(dev);
	fpi = fep->fpi;

	nr = 0;
	while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
		nr++;

		int_clr_events = int_events;
		int_clr_events &= ~fep->ev_napi_rx;

		(*fep->ops->clear_int_events)(dev, int_clr_events);

		if (int_events & fep->ev_err)
			(*fep->ops->ev_error)(dev, int_events);

		if (int_events & fep->ev_rx) {
			napi_ok = napi_schedule_prep(&fep->napi);

			(*fep->ops->napi_disable_rx)(dev);
			(*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);

			/* NOTE: it is possible for FCCs in NAPI mode    */
			/* to submit a spurious interrupt while in poll  */
			if (napi_ok)
				__napi_schedule(&fep->napi);
		}

		if (int_events & fep->ev_tx) {
			napi_ok = napi_schedule_prep(&fep->napi_tx);

			(*fep->ops->napi_disable_tx)(dev);
			(*fep->ops->clear_int_events)(dev, fep->ev_napi_tx);

			/* NOTE: it is possible for FCCs in NAPI mode    */
			/* to submit a spurious interrupt while in poll  */
			if (napi_ok)
				__napi_schedule(&fep->napi_tx);
		}
	}

	handled = nr > 0;
	return IRQ_RETVAL(handled);
}
コード例 #2
0
static int rmnet_mhi_open(struct net_device *dev)
{
	struct rmnet_mhi_private *rmnet_mhi_ptr =
			*(struct rmnet_mhi_private **)netdev_priv(dev);

	rmnet_log(MSG_INFO,
			"Opened net dev interface for MHI chans %d and %d\n",
			rmnet_mhi_ptr->tx_channel,
			rmnet_mhi_ptr->rx_channel);
	netif_start_queue(dev);
	napi_enable(&(rmnet_mhi_ptr->napi));

	/* Poll to check if any buffers are accumulated in the
	 * transport buffers
	 */
	if (napi_schedule_prep(&(rmnet_mhi_ptr->napi))) {
		mhi_mask_irq(rmnet_mhi_ptr->rx_client_handle);
		atomic_inc(&rmnet_mhi_ptr->irq_masked_cntr);
		__napi_schedule(&(rmnet_mhi_ptr->napi));
	} else {
		rx_interrupts_in_masked_irq[rmnet_mhi_ptr->dev_index]++;
	}
	return 0;

}
コード例 #3
0
ファイル: hip04_eth.c プロジェクト: mhei/linux
static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
{
    struct net_device *ndev = (struct net_device *)dev_id;
    struct hip04_priv *priv = netdev_priv(ndev);
    struct net_device_stats *stats = &ndev->stats;
    u32 ists = readl_relaxed(priv->base + PPE_INTSTS);

    if (!ists)
        return IRQ_NONE;

    writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);

    if (unlikely(ists & DEF_INT_ERR)) {
        if (ists & (RCV_NOBUF | RCV_DROP)) {
            stats->rx_errors++;
            stats->rx_dropped++;
            netdev_err(ndev, "rx drop\n");
        }
        if (ists & TX_DROP) {
            stats->tx_dropped++;
            netdev_err(ndev, "tx drop\n");
        }
    }

    if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
        /* disable rx interrupt */
        priv->reg_inten &= ~(RCV_INT);
        writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
        hrtimer_cancel(&priv->tx_coalesce_timer);
        __napi_schedule(&priv->napi);
    }

    return IRQ_HANDLED;
}
コード例 #4
0
ファイル: virtio_net.c プロジェクト: Tigrouzen/k1099
static int virtnet_poll(struct napi_struct *napi, int budget)
{
	struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
	struct sk_buff *skb = NULL;
	unsigned int len, received = 0;

again:
	while (received < budget &&
	       (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
		__skb_unlink(skb, &vi->recv);
		receive_skb(vi->dev, skb, len);
		vi->num--;
		received++;
	}

	/* FIXME: If we oom and completely run out of inbufs, we need
	 * to start a timer trying to fill more. */
	if (vi->num < vi->max / 2)
		try_fill_recv(vi);

	/* Out of packets? */
	if (received < budget) {
		netif_rx_complete(vi->dev, napi);
		if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
		    && napi_schedule_prep(napi)) {
			vi->rvq->vq_ops->disable_cb(vi->rvq);
			__netif_rx_schedule(vi->dev, napi);
			goto again;
		}
	}

	return received;
}
コード例 #5
0
void rmnet_mhi_rx_cb(mhi_cb_info *cb_info)
{
	struct net_device *dev;
	struct rmnet_mhi_private *rmnet_mhi_ptr;
	if (NULL != cb_info && NULL != cb_info->result) {
		dev = (struct net_device *)cb_info->result->user_data;
		rmnet_mhi_ptr = netdev_priv(dev);
	}

	switch(cb_info->cb_reason) {
		case MHI_CB_XFER_SUCCESS:
		case MHI_CB_MHI_ENABLED:
			break;
		case MHI_CB_MHI_DISABLED:
			return;
			break;
		default:
			pr_err("%s(): Received bad return code %d from core", __func__,
					cb_info->cb_reason);
			break;
	}
	rx_interrupts_count[rmnet_mhi_ptr->dev_index]++;

	/* Disable interrupts */
	mhi_mask_irq(rmnet_mhi_ptr->rx_client_handle);
	atomic_inc(&rmnet_mhi_ptr->irq_masked);

	/* We need to start a watchdog here, not sure how to do that yet */

	if (napi_schedule_prep(&(rmnet_mhi_ptr->napi)))
		__napi_schedule(&(rmnet_mhi_ptr->napi));
	else
		rx_interrupts_in_masked_irq[rmnet_mhi_ptr->dev_index]++;
}
コード例 #6
0
ファイル: ubi32-eth.c プロジェクト: 7LK/McWRT
static irqreturn_t ubi32_eth_interrupt(int irq, void *dev_id)
{
	struct ubi32_eth_private *priv;

	struct net_device *dev = (struct net_device *)dev_id;
	BUG_ON(irq != dev->irq);

	priv = netdev_priv(dev);
	if (unlikely(!(priv->regs->int_status & priv->regs->int_mask))) {
		return IRQ_NONE;
	}

	/*
	 * Disable port interrupt
	 */
#ifdef UBICOM32_USE_NAPI
	if (napi_schedule_prep(&priv->napi)) {
		priv->regs->int_mask = 0;
		__napi_schedule(&priv->napi);
	}
#else
	priv->regs->int_mask = 0;
	tasklet_schedule(&priv->tsk);
#endif
	return IRQ_HANDLED;
}
コード例 #7
0
ファイル: cpmac.c プロジェクト: AdrianHuang/uclinux-robutest
static irqreturn_t cpmac_irq(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct cpmac_priv *priv;
	int queue;
	u32 status;

	priv = netdev_priv(dev);

	status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);

	if (unlikely(netif_msg_intr(priv)))
		printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
		       status);

	if (status & MAC_INT_TX)
		cpmac_end_xmit(dev, (status & 7));

	if (status & MAC_INT_RX) {
		queue = (status >> 8) & 7;
		if (napi_schedule_prep(&priv->napi)) {
			cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
			__napi_schedule(&priv->napi);
		}
	}
コード例 #8
0
ファイル: pci_eth.c プロジェクト: agamemnon886/mod
/* The RDC interrupt handler. */
static irqreturn_t pci_eth_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct pci_eth_private *priv = netdev_priv(dev);
	void __iomem *ioaddr = priv->base;
	u16 misr, status;

	/* TODO: Is it our interrupt? */
	/* TODO: Read status register and clear */
	status = ioread16(ioaddr + MISR);

	/* RX interrupt request */
	if (status & RX_INTS) {
		if (status & RX_NO_DESC) {
			/* RX descriptor unavailable */
			dev->stats.rx_dropped++;
			dev->stats.rx_missed_errors++;
		}
		if (status & RX_FIFO_FULL)
			dev->stats.rx_fifo_errors++;

		if (likely(napi_schedule_prep(&priv->napi))) {
			/* TODO: Mask off (disable) RX interrupt */
			__napi_schedule(&priv->napi);
		}
	}

	/* TX interrupt request */
	if (status & TX_INTS)
		pci_eth_tx(dev);

	return IRQ_HANDLED;
}
コード例 #9
0
ファイル: fec.c プロジェクト: CoerWatt/linux
static irqreturn_t
fec_enet_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct fec_enet_private *fep = netdev_priv(ndev);
	uint int_events;
	irqreturn_t ret = IRQ_NONE;

	do {
		int_events = readl(fep->hwp + FEC_IEVENT);
		writel(int_events, fep->hwp + FEC_IEVENT);

		if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
			ret = IRQ_HANDLED;

			/* Disable the RX interrupt */
			if (napi_schedule_prep(&fep->napi)) {
				writel(FEC_RX_DISABLED_IMASK,
					fep->hwp + FEC_IMASK);
				__napi_schedule(&fep->napi);
			}
		}

		if (int_events & FEC_ENET_MII) {
			ret = IRQ_HANDLED;
			complete(&fep->mdio_done);
		}
	} while (int_events);

	return ret;
}
コード例 #10
0
static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
{
	u32 status;

	status = ixp2000_reg_read(IXP2000_IRQ_THD_STATUS_A_0);
	if (status == 0)
		return IRQ_NONE;

	/*
	 * Any of the eight receive units signaled RX?
	 */
	if (status & 0x00ff) {
		struct net_device *dev = nds[0];
		struct ixpdev_priv *ip = netdev_priv(dev);

		ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
		if (likely(napi_schedule_prep(&ip->napi))) {
			__netif_rx_schedule(&ip->napi);
		} else {
			printk(KERN_CRIT "ixp2000: irq while polling!!\n");
		}
	}

	/*
	 * Any of the eight transmit units signaled TXdone?
	 */
	if (status & 0xff00) {
		ixp2000_reg_wrb(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0xff00);
		ixpdev_tx_complete();
	}

	return IRQ_HANDLED;
}
コード例 #11
0
ファイル: hip04_eth.c プロジェクト: mhei/linux
static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
    struct hip04_priv *priv = netdev_priv(ndev);
    struct net_device_stats *stats = &ndev->stats;
    unsigned int tx_head = priv->tx_head, count;
    struct tx_desc *desc = &priv->tx_desc[tx_head];
    dma_addr_t phys;

    smp_rmb();
    count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
    if (count == (TX_DESC_NUM - 1)) {
        netif_stop_queue(ndev);
        return NETDEV_TX_BUSY;
    }

    phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
    if (dma_mapping_error(&ndev->dev, phys)) {
        dev_kfree_skb(skb);
        return NETDEV_TX_OK;
    }

    priv->tx_skb[tx_head] = skb;
    priv->tx_phys[tx_head] = phys;
    desc->send_addr = cpu_to_be32(phys);
    desc->send_size = cpu_to_be32(skb->len);
    desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
    phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
    desc->wb_addr = cpu_to_be32(phys);
    skb_tx_timestamp(skb);

    hip04_set_xmit_desc(priv, phys);
    priv->tx_head = TX_NEXT(tx_head);
    count++;
    netdev_sent_queue(ndev, skb->len);

    stats->tx_bytes += skb->len;
    stats->tx_packets++;

    /* Ensure tx_head update visible to tx reclaim */
    smp_wmb();

    /* queue is getting full, better start cleaning up now */
    if (count >= priv->tx_coalesce_frames) {
        if (napi_schedule_prep(&priv->napi)) {
            /* disable rx interrupt and timer */
            priv->reg_inten &= ~(RCV_INT);
            writel_relaxed(DEF_INT_MASK & ~RCV_INT,
                           priv->base + PPE_INTEN);
            hrtimer_cancel(&priv->tx_coalesce_timer);
            __napi_schedule(&priv->napi);
        }
    } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
        /* cleanup not pending yet, start a new timer */
        hip04_start_tx_timer(priv);
    }

    return NETDEV_TX_OK;
}
コード例 #12
0
ファイル: asfqosapi.c プロジェクト: fsl-dpacc/g_ipsec_la
static void timer_handler(unsigned long data)
{
	struct asf_qdisc *sch = (struct asf_qdisc *)data;

	if (napi_schedule_prep(&(sch->qos_napi))) {
		sch->state = SCH_BUSY;
		__napi_schedule(&(sch->qos_napi));
	}
}
コード例 #13
0
ファイル: xgene_enet_main.c プロジェクト: Nukil/linux-2.6
static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
{
    struct xgene_enet_desc_ring *rx_ring = data;

    if (napi_schedule_prep(&rx_ring->napi)) {
        disable_irq_nosync(irq);
        __napi_schedule(&rx_ring->napi);
    }

    return IRQ_HANDLED;
}
コード例 #14
0
ファイル: main.c プロジェクト: Lyude/linux
static irqreturn_t xge_irq(const int irq, void *data)
{
	struct xge_pdata *pdata = data;

	if (napi_schedule_prep(&pdata->napi)) {
		xge_intr_disable(pdata);
		__napi_schedule(&pdata->napi);
	}

	return IRQ_HANDLED;
}
コード例 #15
0
static irqreturn_t
fec_enet_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct fec_enet_private *fep = netdev_priv(ndev);
	struct fec_ptp_private *fpp = fep->ptp_priv;
	uint int_events;
	ulong flags;
	irqreturn_t ret = IRQ_NONE;

	do {
		int_events = readl(fep->hwp + FEC_IEVENT);
		writel(int_events, fep->hwp + FEC_IEVENT);

		if (int_events & FEC_ENET_RXF) {
			ret = IRQ_HANDLED;
			spin_lock_irqsave(&fep->hw_lock, flags);

			if (fep->use_napi) {
				/* Disable the RX interrupt */
				if (napi_schedule_prep(&fep->napi)) {
					fec_rx_int_is_enabled(ndev, false);
					__napi_schedule(&fep->napi);
				}
			} else
				fec_enet_rx(ndev);

			spin_unlock_irqrestore(&fep->hw_lock, flags);
		}

		/* Transmit OK, or non-fatal error. Update the buffer
		 * descriptors. FEC handles all errors, we just discover
		 * them as part of the transmit process.
		 */
		if (int_events & FEC_ENET_TXF) {
			ret = IRQ_HANDLED;
			fec_enet_tx(ndev);
		}

		if (int_events & FEC_ENET_TS_TIMER) {
			ret = IRQ_HANDLED;
			if (fep->ptimer_present && fpp)
				fpp->prtc++;
		}

		if (int_events & FEC_ENET_MII) {
			ret = IRQ_HANDLED;
			complete(&fep->mdio_done);
		}
	} while (int_events);

	return ret;
}
コード例 #16
0
ファイル: asfqosapi.c プロジェクト: fsl-dpacc/g_ipsec_la
void qos_dequeue(struct  asf_qdisc *sch)
{
	if (sch->state != SCH_BUSY) {
		if (sch->state == SCH_TIMER_PENDING)
			del_timer(&sch->timer);

		if (napi_schedule_prep(&(sch->qos_napi))) {
			sch->state = SCH_BUSY;
			__napi_schedule(&(sch->qos_napi));
		}
	}
}
コード例 #17
0
static irqreturn_t Eth_interruption(int irq, void *dev_id, struct pt_regs *regs) {
    int statusword;
    struct eth_priv *priv;
    struct eth_packet *pkt = NULL;
 
    /*
     * As usual, check the "device" pointer to be sure it is
     * really interrupting.
     * Then assign "struct device *dev".
     */
     struct net_device *dev = (struct net_device *)dev_id;
     /* ... and check with hw if it's really ours */
 
     /* paranoid */
     if(!dev)
        return IRQ_HANDLED;
 
    /* Lock the device */
    priv = netdev_priv(dev);
    spin_lock(&priv->lock);
 
    /* retrieve statusword: real netdevices use I/O instructions */
    statusword = priv->status;
    priv->status = 0;

    if(statusword & ETH_RX_INTR) {
        /* This will disinable any further "packet available"
         * interrupts and tells networking subsystem to poll
         * the driver shortly to pick up all available packets.
         */
        Eth_rx_ints(dev, 0);
        if (napi_schedule_prep(&priv->napi)) {
            /* Disinable reception interrupts */
                __napi_schedule(&priv->napi);
        }
         
    }
    if (statusword & ETH_TX_INTR) {
        /* a transmission is over: free the skb */
        priv->stats.tx_packets++;
        priv->stats.tx_bytes += priv->tx_packetlen;
        dev_kfree_skb(priv->skb);
    }
 
    /* Unlock the device and we are done */
    spin_unlock(&priv->lock);
    if (pkt)
        eth_release_buffer(pkt); /* Do this outside the lock! */
        return IRQ_HANDLED;
}
コード例 #18
0
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
{
	struct net_device *netdev = dev_instance;
	struct ibmveth_adapter *adapter = netdev_priv(netdev);
	unsigned long lpar_rc;

	if (napi_schedule_prep(&adapter->napi)) {
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_DISABLE);
		ibmveth_assert(lpar_rc == H_SUCCESS);
		__napi_schedule(&adapter->napi);
	}
	return IRQ_HANDLED;
}
コード例 #19
0
ファイル: emac_main.c プロジェクト: Anjali05/linux
/**
 * arc_emac_intr - Global interrupt handler for EMAC.
 * @irq:		irq number.
 * @dev_instance:	device instance.
 *
 * returns: IRQ_HANDLED for all cases.
 *
 * ARC EMAC has only 1 interrupt line, and depending on bits raised in
 * STATUS register we may tell what is a reason for interrupt to fire.
 */
static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
{
	struct net_device *ndev = dev_instance;
	struct arc_emac_priv *priv = netdev_priv(ndev);
	struct net_device_stats *stats = &ndev->stats;
	unsigned int status;

	status = arc_reg_get(priv, R_STATUS);
	status &= ~MDIO_MASK;

	/* Reset all flags except "MDIO complete" */
	arc_reg_set(priv, R_STATUS, status);

	if (status & (RXINT_MASK | TXINT_MASK)) {
		if (likely(napi_schedule_prep(&priv->napi))) {
			arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
			__napi_schedule(&priv->napi);
		}
	}

	if (status & ERR_MASK) {
		/* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding
		 * 8-bit error counter overrun.
		 */

		if (status & MSER_MASK) {
			stats->rx_missed_errors += 0x100;
			stats->rx_errors += 0x100;
			priv->rx_missed_errors += 0x100;
			napi_schedule(&priv->napi);
		}

		if (status & RXCR_MASK) {
			stats->rx_crc_errors += 0x100;
			stats->rx_errors += 0x100;
		}

		if (status & RXFR_MASK) {
			stats->rx_frame_errors += 0x100;
			stats->rx_errors += 0x100;
		}

		if (status & RXFL_MASK) {
			stats->rx_over_errors += 0x100;
			stats->rx_errors += 0x100;
		}
	}

	return IRQ_HANDLED;
}
コード例 #20
0
ファイル: nps_enet.c プロジェクト: forgivemyheart/linux
/**
 * nps_enet_irq_handler - Global interrupt handler for ENET.
 * @irq:                irq number.
 * @dev_instance:       device instance.
 *
 * returns: IRQ_HANDLED for all cases.
 *
 * EZchip ENET has 2 interrupt causes, and depending on bits raised in
 * CTRL registers we may tell what is a reason for interrupt to fire up.
 * We got one for RX and the other for TX (completion).
 */
static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
{
	struct net_device *ndev = dev_instance;
	struct nps_enet_priv *priv = netdev_priv(ndev);
	u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
	u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;

	if (nps_enet_is_tx_pending(priv) || rx_ctrl_cr)
		if (likely(napi_schedule_prep(&priv->napi))) {
			nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
			__napi_schedule(&priv->napi);
		}

	return IRQ_HANDLED;
}
コード例 #21
0
ファイル: hip04_eth.c プロジェクト: mhei/linux
static enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
{
    struct hip04_priv *priv;

    priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);

    if (napi_schedule_prep(&priv->napi)) {
        /* disable rx interrupt */
        priv->reg_inten &= ~(RCV_INT);
        writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
        __napi_schedule(&priv->napi);
    }

    return HRTIMER_NORESTART;
}
コード例 #22
0
/* The RDC interrupt handler. */
static irqreturn_t r6040_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct r6040_private *lp = netdev_priv(dev);
	void __iomem *ioaddr = lp->base;
	u16 misr, status;

	/* Save MIER */
	misr = ioread16(ioaddr + MIER);
	/* Mask off RDC MAC interrupt */
	iowrite16(MSK_INT, ioaddr + MIER);
	/* Read MISR status and clear */
	status = ioread16(ioaddr + MISR);

	if (status == 0x0000 || status == 0xffff) {
		/* Restore RDC MAC interrupt */
		iowrite16(misr, ioaddr + MIER);
		return IRQ_NONE;
	}

	/* RX interrupt request */
	if (status & RX_INTS) {
		if (status & RX_NO_DESC) {
			/* RX descriptor unavailable */
			dev->stats.rx_dropped++;
			dev->stats.rx_missed_errors++;
		}
		if (status & RX_FIFO_FULL)
			dev->stats.rx_fifo_errors++;

		if (likely(napi_schedule_prep(&lp->napi))) {
			/* Mask off RX interrupt */
			misr &= ~RX_INTS;
			__napi_schedule(&lp->napi);
		}
	}

	/* TX interrupt request */
	if (status & TX_INTS)
		r6040_tx(dev);

	/* Restore RDC MAC interrupt */
	iowrite16(misr, ioaddr + MIER);

	return IRQ_HANDLED;
}
コード例 #23
0
ファイル: dwc-xlgmac-net.c プロジェクト: AlexShiLucky/linux
static irqreturn_t xlgmac_dma_isr(int irq, void *data)
{
	struct xlgmac_channel *channel = data;

	/* Per channel DMA interrupts are enabled, so we use the per
	 * channel napi structure and not the private data napi structure
	 */
	if (napi_schedule_prep(&channel->napi)) {
		/* Disable Tx and Rx interrupts */
		disable_irq_nosync(channel->dma_irq);

		/* Turn on polling */
		__napi_schedule_irqoff(&channel->napi);
	}

	return IRQ_HANDLED;
}
コード例 #24
0
ファイル: nps_enet.c プロジェクト: DenisLug/mptcp
/**
 * nps_enet_irq_handler - Global interrupt handler for ENET.
 * @irq:                irq number.
 * @dev_instance:       device instance.
 *
 * returns: IRQ_HANDLED for all cases.
 *
 * EZchip ENET has 2 interrupt causes, and depending on bits raised in
 * CTRL registers we may tell what is a reason for interrupt to fire up.
 * We got one for RX and the other for TX (completion).
 */
static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
{
	struct net_device *ndev = dev_instance;
	struct nps_enet_priv *priv = netdev_priv(ndev);
	struct nps_enet_rx_ctl rx_ctrl;
	struct nps_enet_tx_ctl tx_ctrl;

	rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
	tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);

	if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr)
		if (likely(napi_schedule_prep(&priv->napi))) {
			nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
			__napi_schedule(&priv->napi);
		}

	return IRQ_HANDLED;
}
コード例 #25
0
static void rmnet_mhi_rx_cb(struct mhi_result *result)
{
	struct net_device *dev;
	struct rmnet_mhi_private *rmnet_mhi_ptr;
	rmnet_mhi_ptr = result->user_data;
	dev = rmnet_mhi_ptr->dev;

	rmnet_log(MSG_VERBOSE, "Entered\n");
	rx_interrupts_count[rmnet_mhi_ptr->dev_index]++;

	if (napi_schedule_prep(&(rmnet_mhi_ptr->napi))) {
		mhi_mask_irq(rmnet_mhi_ptr->rx_client_handle);
		atomic_inc(&rmnet_mhi_ptr->irq_masked_cntr);
		__napi_schedule(&(rmnet_mhi_ptr->napi));
	} else {
		rx_interrupts_in_masked_irq[rmnet_mhi_ptr->dev_index]++;
	}
	rmnet_log(MSG_VERBOSE, "Exited\n");
}
コード例 #26
0
ファイル: lpc_eth.c プロジェクト: guanhe0/kernel
static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct netdata_local *pldat = netdev_priv(ndev);
	u32 tmp;

	spin_lock(&pldat->lock);

	tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
	/* Clear interrupts */
	writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));

	lpc_eth_disable_int(pldat->net_base);
	if (likely(napi_schedule_prep(&pldat->napi)))
		__napi_schedule(&pldat->napi);

	spin_unlock(&pldat->lock);

	return IRQ_HANDLED;
}
コード例 #27
0
static irqreturn_t
fec_enet_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct fec_enet_private *fep = netdev_priv(ndev);
	uint int_events;
	irqreturn_t ret = IRQ_NONE;

	do {
		int_events = readl(fep->hwp + FEC_IEVENT);
		writel(int_events, fep->hwp + FEC_IEVENT);

		if (int_events & FEC_ENET_RXF) {
			ret = IRQ_HANDLED;

			/* Disable the RX interrupt */
			if (napi_schedule_prep(&fep->napi)) {
				writel(FEC_RX_DISABLED_IMASK,
					fep->hwp + FEC_IMASK);
				__napi_schedule(&fep->napi);
			}
		}

		/* Transmit OK, or non-fatal error. Update the buffer
		 * descriptors. FEC handles all errors, we just discover
		 * them as part of the transmit process.
		 */
		if (int_events & FEC_ENET_TXF) {
			ret = IRQ_HANDLED;
			fec_enet_tx(ndev);
		}

		if (int_events & FEC_ENET_MII) {
			ret = IRQ_HANDLED;
			complete(&fep->mdio_done);
		}
	} while (int_events);

	return ret;
}
コード例 #28
0
ファイル: dwc-xlgmac-net.c プロジェクト: AlexShiLucky/linux
static void xlgmac_tx_timer(struct timer_list *t)
{
	struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
	struct xlgmac_pdata *pdata = channel->pdata;
	struct napi_struct *napi;

	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;

	if (napi_schedule_prep(napi)) {
		/* Disable Tx and Rx interrupts */
		if (pdata->per_channel_irq)
			disable_irq_nosync(channel->dma_irq);
		else
			xlgmac_disable_rx_tx_ints(pdata);

		pdata->stats.napi_poll_txtimer++;
		/* Turn on polling */
		__napi_schedule(napi);
	}

	channel->tx_timer_active = 0;
}
コード例 #29
0
/**
@brief		receive all @b IPC message frames in all RXQs

In a for loop,\n
1) Checks any REQ_ACK received.\n
2) Receives all IPC link frames in every RXQ.\n
3) Sends RES_ACK if there was REQ_ACK from CP.\n
4) Checks any RES_ACK received.\n

@param mld	the pointer to a mem_link_device instance
@param mst	the pointer to a mem_snapshot instance
*/
void recv_sbd_ipc_frames(struct mem_link_device *mld)
{
	struct sbd_link_device *sl = &mld->sbd_link_dev;
	int i;

	for (i = 0; i < sl->num_channels; i++) {
		struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, RX);

		if (unlikely(rb_empty(rb)))
			continue;

		if (likely(sipc_ps_ch(rb->ch))) {
#ifdef CONFIG_LINK_DEVICE_NAPI
			//mld->link_dev.disable_irq(&mld->link_dev);
			if (napi_schedule_prep(&rb->iod->napi))
				__napi_schedule(&rb->iod->napi);
#else
			rx_net_frames_from_rb(rb, 0);
#endif
		} else {
			rx_ipc_frames_from_rb(rb);
		}
	}
}
コード例 #30
0
static irqreturn_t enic_isr_legacy(int irq, void *data)
{
	struct net_device *netdev = data;
	struct enic *enic = netdev_priv(netdev);
	u32 pba;

	vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]);

	pba = vnic_intr_legacy_pba(enic->legacy_pba);
	if (!pba) {
		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
		return IRQ_NONE;	
	}

	if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) {
		vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_NOTIFY]);
		enic_notify_check(enic);
	}

	if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) {
		vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_ERR]);
		enic_log_q_error(enic);
		
		schedule_work(&enic->reset);
		return IRQ_HANDLED;
	}

	if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
		if (napi_schedule_prep(&enic->napi))
			__napi_schedule(&enic->napi);
	} else {
		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
	}

	return IRQ_HANDLED;
}