Exemple #1
0
/**
 * nps_enet_poll - NAPI poll handler.
 * @napi:       Pointer to napi_struct structure.
 * @budget:     How many frames to process on one call.
 *
 * returns:     Number of processed frames
 */
static int nps_enet_poll(struct napi_struct *napi, int budget)
{
	struct net_device *ndev = napi->dev;
	struct nps_enet_priv *priv = netdev_priv(ndev);
	u32 work_done;

	nps_enet_tx_handler(ndev);
	work_done = nps_enet_rx_handler(ndev);
	if (work_done < budget) {
		u32 buf_int_enable_value = 0;

		napi_complete(napi);

		/* set tx_done and rx_rdy bits */
		buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
		buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;

		nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
				 buf_int_enable_value);

		/* in case we will get a tx interrupt while interrupts
		 * are masked, we will lose it since the tx is edge interrupt.
		 * specifically, while executing the code section above,
		 * between nps_enet_tx_handler and the interrupts enable, all
		 * tx requests will be stuck until we will get an rx interrupt.
		 * the two code lines below will solve this situation by
		 * re-adding ourselves to the poll list.
		 */
		if (nps_enet_is_tx_pending(priv)) {
			nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
			napi_reschedule(napi);
		}
	}

	return work_done;
}
Exemple #2
0
static int xenvif_poll(struct napi_struct *napi, int budget)
{
	struct xenvif_queue *queue =
		container_of(napi, struct xenvif_queue, napi);
	int work_done;

	/* This vif is rogue, we pretend we've there is nothing to do
	 * for this vif to deschedule it from NAPI. But this interface
	 * will be turned off in thread context later.
	 */
	if (unlikely(queue->vif->disabled)) {
		napi_complete(napi);
		return 0;
	}

	work_done = xenvif_tx_action(queue, budget);

	if (work_done < budget) {
		napi_complete_done(napi, work_done);
		xenvif_napi_schedule_or_enable_events(queue);
	}

	return work_done;
}
Exemple #3
0
static int cpmac_poll(struct napi_struct *napi, int budget)
{
	struct sk_buff *skb;
	struct cpmac_desc *desc, *restart;
	struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
	int received = 0, processed = 0;

	spin_lock(&priv->rx_lock);
	if (unlikely(!priv->rx_head)) {
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx: polling, but no queue\n",
			       priv->dev->name);
		spin_unlock(&priv->rx_lock);
		napi_complete(napi);
		return 0;
	}

	desc = priv->rx_head;
	restart = NULL;
	while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
		processed++;

		if ((desc->dataflags & CPMAC_EOQ) != 0) {
			/* The last update to eoq->hw_next didn't happen
			* soon enough, and the receiver stopped here.
			*Remember this descriptor so we can restart
			* the receiver after freeing some space.
			*/
			if (unlikely(restart)) {
				if (netif_msg_rx_err(priv))
					printk(KERN_ERR "%s: poll found a"
						" duplicate EOQ: %p and %p\n",
						priv->dev->name, restart, desc);
				goto fatal_error;
			}

			restart = desc->next;
		}

		skb = cpmac_rx_one(priv, desc);
		if (likely(skb)) {
			netif_receive_skb(skb);
			received++;
		}
		desc = desc->next;
	}

	if (desc != priv->rx_head) {
		/* We freed some buffers, but not the whole ring,
		 * add what we did free to the rx list */
		desc->prev->hw_next = (u32)0;
		priv->rx_head->prev->hw_next = priv->rx_head->mapping;
	}

	/* Optimization: If we did not actually process an EOQ (perhaps because
	 * of quota limits), check to see if the tail of the queue has EOQ set.
	* We should immediately restart in that case so that the receiver can
	* restart and run in parallel with more packet processing.
	* This lets us handle slightly larger bursts before running
	* out of ring space (assuming dev->weight < ring_size) */

	if (!restart &&
	     (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
		    == CPMAC_EOQ &&
	     (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
		/* reset EOQ so the poll loop (above) doesn't try to
		* restart this when it eventually gets to this descriptor.
		*/
		priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
		restart = priv->rx_head;
	}

	if (restart) {
		priv->dev->stats.rx_errors++;
		priv->dev->stats.rx_fifo_errors++;
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx dma ring overrun\n",
			       priv->dev->name);

		if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
			if (netif_msg_drv(priv))
				printk(KERN_ERR "%s: cpmac_poll is trying to "
					"restart rx from a descriptor that's "
					"not free: %p\n",
					priv->dev->name, restart);
				goto fatal_error;
		}

		cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
	}

	priv->rx_head = desc;
	spin_unlock(&priv->rx_lock);
	if (unlikely(netif_msg_rx_status(priv)))
		printk(KERN_DEBUG "%s: poll processed %d packets\n",
		       priv->dev->name, received);
	if (processed == 0) {
		/* we ran out of packets to read,
		 * revert to interrupt-driven mode */
		napi_complete(napi);
		cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
		return 0;
	}

	return 1;

fatal_error:
	/* Something went horribly wrong.
	 * Reset hardware to try to recover rather than wedging. */

	if (netif_msg_drv(priv)) {
		printk(KERN_ERR "%s: cpmac_poll is confused. "
				"Resetting hardware\n", priv->dev->name);
		cpmac_dump_all_desc(priv->dev);
		printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
			priv->dev->name,
			cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
			cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
	}

	spin_unlock(&priv->rx_lock);
	napi_complete(napi);
	netif_tx_stop_all_queues(priv->dev);
	napi_disable(&priv->napi);

	atomic_inc(&priv->reset_pending);
	cpmac_hw_stop(priv->dev);
	if (!schedule_work(&priv->reset_work))
		atomic_dec(&priv->reset_pending);
	return 0;

}
Exemple #4
0
static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
{
	struct fs_enet_private *fep = container_of(napi, struct fs_enet_private,
						   napi_tx);
	struct net_device *dev = fep->ndev;
	cbd_t __iomem *bdp;
	struct sk_buff *skb;
	int dirtyidx, do_wake, do_restart;
	u16 sc;
	int has_tx_work = 0;

	spin_lock(&fep->tx_lock);
	bdp = fep->dirty_tx;

	/* clear TX status bits for napi*/
	(*fep->ops->napi_clear_tx_event)(dev);

	do_wake = do_restart = 0;
	while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
		dirtyidx = bdp - fep->tx_bd_base;

		if (fep->tx_free == fep->tx_ring)
			break;

		skb = fep->tx_skbuff[dirtyidx];

		/*
		 * Check for errors.
		 */
		if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
			  BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {

			if (sc & BD_ENET_TX_HB)	/* No heartbeat */
				fep->stats.tx_heartbeat_errors++;
			if (sc & BD_ENET_TX_LC)	/* Late collision */
				fep->stats.tx_window_errors++;
			if (sc & BD_ENET_TX_RL)	/* Retrans limit */
				fep->stats.tx_aborted_errors++;
			if (sc & BD_ENET_TX_UN)	/* Underrun */
				fep->stats.tx_fifo_errors++;
			if (sc & BD_ENET_TX_CSL)	/* Carrier lost */
				fep->stats.tx_carrier_errors++;

			if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
				fep->stats.tx_errors++;
				do_restart = 1;
			}
		} else
			fep->stats.tx_packets++;

		if (sc & BD_ENET_TX_READY) {
			dev_warn(fep->dev,
				 "HEY! Enet xmit interrupt and TX_READY.\n");
		}

		/*
		 * Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
		if (sc & BD_ENET_TX_DEF)
			fep->stats.collisions++;

		/* unmap */
		dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
				skb->len, DMA_TO_DEVICE);

		/*
		 * Free the sk buffer associated with this last transmit.
		 */
		dev_kfree_skb(skb);
		fep->tx_skbuff[dirtyidx] = NULL;

		/*
		 * Update pointer to next buffer descriptor to be transmitted.
		 */
		if ((sc & BD_ENET_TX_WRAP) == 0)
			bdp++;
		else
			bdp = fep->tx_bd_base;

		/*
		 * Since we have freed up a buffer, the ring is no longer
		 * full.
		 */
		if (!fep->tx_free++)
			do_wake = 1;
		has_tx_work = 1;
	}

	fep->dirty_tx = bdp;

	if (do_restart)
		(*fep->ops->tx_restart)(dev);

	if (!has_tx_work) {
		napi_complete(napi);
		(*fep->ops->napi_enable_tx)(dev);
	}

	spin_unlock(&fep->tx_lock);

	if (do_wake)
		netif_wake_queue(dev);

	if (has_tx_work)
		return budget;
	return 0;
}
Exemple #5
0
/*NAPI polling Receive packets */
static int fec_rx_poll(struct napi_struct *napi, int budget)
{
	struct  fec_enet_private *fep =
		container_of(napi, struct fec_enet_private, napi);
	struct net_device *ndev = napi->dev;
	struct  fec_ptp_private *fpp = fep->ptp_priv;
	const struct platform_device_id *id_entry =
				platform_get_device_id(fep->pdev);
	int pkt_received = 0;
	struct bufdesc *bdp;
	unsigned short status;
	struct	sk_buff	*skb;
	ushort	pkt_len;
	__u8 *data;

	if (fep->use_napi)
		WARN_ON(!budget);

#ifdef CONFIG_M532x
	flush_cache_all();
#endif

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

	while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
		if (pkt_received >= budget)
			break;
		pkt_received++;

		/* Since we have allocated space to hold a complete frame,
		 * the last indicator should be set.
		 */
		if ((status & BD_ENET_RX_LAST) == 0)
			dev_err(&ndev->dev, "FEC ENET: rcv is not +last\n");

		if (!fep->opened)
			goto rx_processing_done;

		/* Check for errors. */
		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
			ndev->stats.rx_errors++;
			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
				/* Frame too long or too short. */
				ndev->stats.rx_length_errors++;
			}
			if (status & BD_ENET_RX_NO)	/* Frame alignment */
				ndev->stats.rx_frame_errors++;
			if (status & BD_ENET_RX_CR)	/* CRC Error */
				ndev->stats.rx_crc_errors++;
			if (status & BD_ENET_RX_OV)	/* FIFO overrun */
				ndev->stats.rx_fifo_errors++;
		}

		/* Report late collisions as a frame error.
		 * On this error, the BD is closed, but we don't know what we
		 * have in the buffer.  So, just drop this frame on the floor.
		 */
		if (status & BD_ENET_RX_CL) {
			ndev->stats.rx_errors++;
			ndev->stats.rx_frame_errors++;
			goto rx_processing_done;
		}

		/* Process the incoming frame. */
		ndev->stats.rx_packets++;
		pkt_len = bdp->cbd_datlen;
		ndev->stats.rx_bytes += pkt_len;
		data = (__u8 *)__va(bdp->cbd_bufaddr);

		if (bdp->cbd_bufaddr)
			dma_unmap_single(&ndev->dev, bdp->cbd_bufaddr,
				FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);

		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
			swap_buffer(data, pkt_len);

		/* This does 16 byte alignment, exactly what we need.
		 * The packet length includes FCS, but we don't want to
		 * include that when passing upstream as it messes up
		 * bridging applications.
		 */
		skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);

		if (unlikely(!skb)) {
			dev_err(&ndev->dev,
			"%s: Memory squeeze, dropping packet.\n", ndev->name);
			ndev->stats.rx_dropped++;
		} else {
			skb_reserve(skb, NET_IP_ALIGN);
			skb_put(skb, pkt_len - 4);	/* Make room */
			skb_copy_to_linear_data(skb, data, pkt_len - 4);
			/* 1588 messeage TS handle */
			if (fep->ptimer_present)
				fec_ptp_store_rxstamp(fpp, skb, bdp);
			skb->protocol = eth_type_trans(skb, ndev);
			netif_receive_skb(skb);
		}

		bdp->cbd_bufaddr = dma_map_single(&ndev->dev, data,
				FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
		/* Clear the status flags for this buffer */
		status &= ~BD_ENET_RX_STATS;

		/* Mark the buffer empty */
		status |= BD_ENET_RX_EMPTY;
		bdp->cbd_sc = status;
#ifdef CONFIG_ENHANCED_BD
		bdp->cbd_esc = BD_ENET_RX_INT;
		bdp->cbd_prot = 0;
		bdp->cbd_bdu = 0;
#endif

		/* Update BD pointer to next entry */
		if (status & BD_ENET_RX_WRAP)
			bdp = fep->rx_bd_base;
		else
			bdp++;
		/* Doing this here will keep the FEC running while we process
		 * incoming frames.  On a heavily loaded network, we should be
		 * able to keep up at the expense of system resources.
		 */
		writel(0, fep->hwp + FEC_R_DES_ACTIVE);
	}
	fep->cur_rx = bdp;

	if (pkt_received < budget) {
		napi_complete(napi);
		fec_rx_int_is_enabled(ndev, true);
	}

	return pkt_received;
}
Exemple #6
0
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
			    struct mlx4_en_tx_ring *ring,
			    int index, u64 timestamp,
			    int napi_mode)
{
	struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
	struct mlx4_en_rx_alloc frame = {
		.page = tx_info->page,
		.dma = tx_info->map0_dma,
	};

	if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
		dma_unmap_page(priv->ddev, tx_info->map0_dma,
			       PAGE_SIZE, priv->dma_dir);
		put_page(tx_info->page);
	}

	return tx_info->nr_txbb;
}

int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int cnt = 0;

	/* Skip last polled descriptor */
	ring->cons += ring->last_nr_txbb;
	en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
		 ring->cons, ring->prod);

	if ((u32) (ring->prod - ring->cons) > ring->size) {
		if (netif_msg_tx_err(priv))
			en_warn(priv, "Tx consumer passed producer!\n");
		return 0;
	}

	while (ring->cons != ring->prod) {
		ring->last_nr_txbb = ring->free_tx_desc(priv, ring,
						ring->cons & ring->size_mask,
						0, 0 /* Non-NAPI caller */);
		ring->cons += ring->last_nr_txbb;
		cnt++;
	}

	if (ring->tx_queue)
		netdev_tx_reset_queue(ring->tx_queue);

	if (cnt)
		en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);

	return cnt;
}

bool mlx4_en_process_tx_cq(struct net_device *dev,
			   struct mlx4_en_cq *cq, int napi_budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cq *mcq = &cq->mcq;
	struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring];
	struct mlx4_cqe *cqe;
	u16 index, ring_index, stamp_index;
	u32 txbbs_skipped = 0;
	u32 txbbs_stamp = 0;
	u32 cons_index = mcq->cons_index;
	int size = cq->size;
	u32 size_mask = ring->size_mask;
	struct mlx4_cqe *buf = cq->buf;
	u32 packets = 0;
	u32 bytes = 0;
	int factor = priv->cqe_factor;
	int done = 0;
	int budget = priv->tx_work_limit;
	u32 last_nr_txbb;
	u32 ring_cons;

	if (unlikely(!priv->port_up))
		return true;

	netdev_txq_bql_complete_prefetchw(ring->tx_queue);

	index = cons_index & size_mask;
	cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
	last_nr_txbb = READ_ONCE(ring->last_nr_txbb);
	ring_cons = READ_ONCE(ring->cons);
	ring_index = ring_cons & size_mask;
	stamp_index = ring_index;

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
			cons_index & size) && (done < budget)) {
		u16 new_index;

		/*
		 * make sure we read the CQE after we read the
		 * ownership bit
		 */
		dma_rmb();

		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
			     MLX4_CQE_OPCODE_ERROR)) {
			struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;

			en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
			       cqe_err->vendor_err_syndrome,
			       cqe_err->syndrome);
		}

		/* Skip over last polled CQE */
		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;

		do {
			u64 timestamp = 0;

			txbbs_skipped += last_nr_txbb;
			ring_index = (ring_index + last_nr_txbb) & size_mask;

			if (unlikely(ring->tx_info[ring_index].ts_requested))
				timestamp = mlx4_en_get_cqe_ts(cqe);

			/* free next descriptor */
			last_nr_txbb = ring->free_tx_desc(
					priv, ring, ring_index,
					timestamp, napi_budget);

			mlx4_en_stamp_wqe(priv, ring, stamp_index,
					  !!((ring_cons + txbbs_stamp) &
						ring->size));
			stamp_index = ring_index;
			txbbs_stamp = txbbs_skipped;
			packets++;
			bytes += ring->tx_info[ring_index].nr_bytes;
		} while ((++done < budget) && (ring_index != new_index));

		++cons_index;
		index = cons_index & size_mask;
		cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
	}

	/*
	 * To prevent CQ overflow we first update CQ consumer and only then
	 * the ring consumer.
	 */
	mcq->cons_index = cons_index;
	mlx4_cq_set_ci(mcq);
	wmb();

	/* we want to dirty this cache line once */
	WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb);
	WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);

	if (cq->type == TX_XDP)
		return done < budget;

	netdev_tx_completed_queue(ring->tx_queue, packets, bytes);

	/* Wakeup Tx queue if this stopped, and ring is not full.
	 */
	if (netif_tx_queue_stopped(ring->tx_queue) &&
	    !mlx4_en_is_tx_ring_full(ring)) {
		netif_tx_wake_queue(ring->tx_queue);
		ring->wake_queue++;
	}

	return done < budget;
}

void mlx4_en_tx_irq(struct mlx4_cq *mcq)
{
	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
	struct mlx4_en_priv *priv = netdev_priv(cq->dev);

	if (likely(priv->port_up))
		napi_schedule_irqoff(&cq->napi);
	else
		mlx4_en_arm_cq(priv, cq);
}

/* TX CQ polling - called by NAPI */
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
{
	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
	struct net_device *dev = cq->dev;
	struct mlx4_en_priv *priv = netdev_priv(dev);
	bool clean_complete;

	clean_complete = mlx4_en_process_tx_cq(dev, cq, budget);
	if (!clean_complete)
		return budget;

	napi_complete(napi);
	mlx4_en_arm_cq(priv, cq);

	return 0;
}

static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
						      struct mlx4_en_tx_ring *ring,
						      u32 index,
						      unsigned int desc_size)
{
	u32 copy = (ring->size - index) << LOG_TXBB_SIZE;
	int i;

	for (i = desc_size - copy - 4; i >= 0; i -= 4) {
		if ((i & (TXBB_SIZE - 1)) == 0)
			wmb();

		*((u32 *) (ring->buf + i)) =
			*((u32 *) (ring->bounce_buf + copy + i));
	}

	for (i = copy - 4; i >= 4 ; i -= 4) {
		if ((i & (TXBB_SIZE - 1)) == 0)
			wmb();

		*((u32 *)(ring->buf + (index << LOG_TXBB_SIZE) + i)) =
			*((u32 *) (ring->bounce_buf + i));
	}

	/* Return real descriptor location */
	return ring->buf + (index << LOG_TXBB_SIZE);
}
Exemple #7
0
static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
{
	struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
	struct qtnf_pcie_bus_priv *priv = &ps->base;
	struct net_device *ndev = NULL;
	struct sk_buff *skb = NULL;
	int processed = 0;
	struct qtnf_pearl_rx_bd *rxbd;
	dma_addr_t skb_paddr;
	int consume;
	u32 descw;
	u32 psize;
	u16 r_idx;
	u16 w_idx;
	int ret;

	while (processed < budget) {
		if (!qtnf_rx_data_ready(ps))
			goto rx_out;

		r_idx = priv->rx_bd_r_index;
		rxbd = &ps->rx_bd_vbase[r_idx];
		descw = le32_to_cpu(rxbd->info);

		skb = priv->rx_skb[r_idx];
		psize = QTN_GET_LEN(descw);
		consume = 1;

		if (!(descw & QTN_TXDONE_MASK)) {
			pr_warn("skip invalid rxbd[%d]\n", r_idx);
			consume = 0;
		}

		if (!skb) {
			pr_warn("skip missing rx_skb[%d]\n", r_idx);
			consume = 0;
		}

		if (skb && (skb_tailroom(skb) <  psize)) {
			pr_err("skip packet with invalid length: %u > %u\n",
			       psize, skb_tailroom(skb));
			consume = 0;
		}

		if (skb) {
			skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
						  le32_to_cpu(rxbd->addr));
			pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
					 PCI_DMA_FROMDEVICE);
		}

		if (consume) {
			skb_put(skb, psize);
			ndev = qtnf_classify_skb(bus, skb);
			if (likely(ndev)) {
				qtnf_update_rx_stats(ndev, skb);
				skb->protocol = eth_type_trans(skb, ndev);
				napi_gro_receive(napi, skb);
			} else {
				pr_debug("drop untagged skb\n");
				bus->mux_dev.stats.rx_dropped++;
				dev_kfree_skb_any(skb);
			}
		} else {
			if (skb) {
				bus->mux_dev.stats.rx_dropped++;
				dev_kfree_skb_any(skb);
			}
		}

		priv->rx_skb[r_idx] = NULL;
		if (++r_idx >= priv->rx_bd_num)
			r_idx = 0;

		priv->rx_bd_r_index = r_idx;

		/* repalce processed buffer by a new one */
		w_idx = priv->rx_bd_w_index;
		while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
				  priv->rx_bd_num) > 0) {
			if (++w_idx >= priv->rx_bd_num)
				w_idx = 0;

			ret = pearl_skb2rbd_attach(ps, w_idx);
			if (ret) {
				pr_err("failed to allocate new rx_skb[%d]\n",
				       w_idx);
				break;
			}
		}

		processed++;
	}

rx_out:
	if (processed < budget) {
		napi_complete(napi);
		qtnf_en_rxdone_irq(ps);
	}

	return processed;
}
Exemple #8
0
/* NAPI receive function */
static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
{
	struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
	struct net_device *dev = fep->ndev;
	const struct fs_platform_info *fpi = fep->fpi;
	cbd_t __iomem *bdp;
	struct sk_buff *skb, *skbn, *skbt;
	int received = 0;
	u16 pkt_len, sc;
	int curidx;

	if (budget <= 0)
		return received;

	/*
	 * First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

	/* clear RX status bits for napi*/
	(*fep->ops->napi_clear_rx_event)(dev);

	while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
		curidx = bdp - fep->rx_bd_base;

		/*
		 * Since we have allocated space to hold a complete frame,
		 * the last indicator should be set.
		 */
		if ((sc & BD_ENET_RX_LAST) == 0)
			dev_warn(fep->dev, "rcv is not +last\n");

		/*
		 * Check for errors.
		 */
		if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
			  BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
			fep->stats.rx_errors++;
			/* Frame too long or too short. */
			if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
				fep->stats.rx_length_errors++;
			/* Frame alignment */
			if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
				fep->stats.rx_frame_errors++;
			/* CRC Error */
			if (sc & BD_ENET_RX_CR)
				fep->stats.rx_crc_errors++;
			/* FIFO overrun */
			if (sc & BD_ENET_RX_OV)
				fep->stats.rx_crc_errors++;

			skb = fep->rx_skbuff[curidx];

			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
				DMA_FROM_DEVICE);

			skbn = skb;

		} else {
			skb = fep->rx_skbuff[curidx];

			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
				DMA_FROM_DEVICE);

			/*
			 * Process the incoming frame.
			 */
			fep->stats.rx_packets++;
			pkt_len = CBDR_DATLEN(bdp) - 4;	/* remove CRC */
			fep->stats.rx_bytes += pkt_len + 4;

			if (pkt_len <= fpi->rx_copybreak) {
				/* +2 to make IP header L1 cache aligned */
				skbn = netdev_alloc_skb(dev, pkt_len + 2);
				if (skbn != NULL) {
					skb_reserve(skbn, 2);	/* align IP header */
					skb_copy_from_linear_data(skb,
						      skbn->data, pkt_len);
					/* swap */
					skbt = skb;
					skb = skbn;
					skbn = skbt;
				}
			} else {
				skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);

				if (skbn)
					skb_align(skbn, ENET_RX_ALIGN);
			}

			if (skbn != NULL) {
				skb_put(skb, pkt_len);	/* Make room */
				skb->protocol = eth_type_trans(skb, dev);
				received++;
				netif_receive_skb(skb);
			} else {
				fep->stats.rx_dropped++;
				skbn = skb;
			}
		}

		fep->rx_skbuff[curidx] = skbn;
		CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
			     L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
			     DMA_FROM_DEVICE));
		CBDW_DATLEN(bdp, 0);
		CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);

		/*
		 * Update BD pointer to next entry.
		 */
		if ((sc & BD_ENET_RX_WRAP) == 0)
			bdp++;
		else
			bdp = fep->rx_bd_base;

		(*fep->ops->rx_bd_done)(dev);

		if (received >= budget)
			break;
	}

	fep->cur_rx = bdp;

	if (received < budget) {
		/* done */
		napi_complete(napi);
		(*fep->ops->napi_enable_rx)(dev);
	}
	return received;
}
Exemple #9
0
static int eth_poll(struct napi_struct *napi, int budget)
{
	struct port *port = container_of(napi, struct port, napi);
	struct net_device *dev = port->netdev;
	unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
	int received = 0;

#if DEBUG_RX
	printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
#endif

	while (received < budget) {
		struct sk_buff *skb;
		struct desc *desc;
		int n;
#ifdef __ARMEB__
		struct sk_buff *temp;
		u32 phys;
#endif

		if ((n = queue_get_desc(rxq, port, 0)) < 0) {
#if DEBUG_RX
			printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
			       dev->name);
#endif
			napi_complete(napi);
			qmgr_enable_irq(rxq);
			if (!qmgr_stat_empty(rxq) &&
			    napi_reschedule(napi)) {
#if DEBUG_RX
				printk(KERN_DEBUG "%s: eth_poll"
				       " napi_reschedule successed\n",
				       dev->name);
#endif
				qmgr_disable_irq(rxq);
				continue;
			}
#if DEBUG_RX
			printk(KERN_DEBUG "%s: eth_poll all done\n",
			       dev->name);
#endif
			return received; /* all work done */
		}

		desc = rx_desc_ptr(port, n);

#ifdef __ARMEB__
		if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
			phys = dma_map_single(&dev->dev, skb->data,
					      RX_BUFF_SIZE, DMA_FROM_DEVICE);
			if (dma_mapping_error(&dev->dev, phys)) {
				dev_kfree_skb(skb);
				skb = NULL;
			}
		}
#else
		skb = netdev_alloc_skb(dev,
				       ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
#endif

		if (!skb) {
			dev->stats.rx_dropped++;
			/* put the desc back on RX-ready queue */
			desc->buf_len = MAX_MRU;
			desc->pkt_len = 0;
			queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
			continue;
		}

		/* process received frame */
#ifdef __ARMEB__
		temp = skb;
		skb = port->rx_buff_tab[n];
		dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
				 RX_BUFF_SIZE, DMA_FROM_DEVICE);
#else
		dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN,
				RX_BUFF_SIZE, DMA_FROM_DEVICE);
		memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
			      ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
#endif
		skb_reserve(skb, NET_IP_ALIGN);
		skb_put(skb, desc->pkt_len);

		debug_pkt(dev, "eth_poll", skb->data, skb->len);

		skb->protocol = eth_type_trans(skb, dev);
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += skb->len;
		netif_receive_skb(skb);

		/* put the new buffer on RX-free queue */
#ifdef __ARMEB__
		port->rx_buff_tab[n] = temp;
		desc->data = phys + NET_IP_ALIGN;
#endif
		desc->buf_len = MAX_MRU;
		desc->pkt_len = 0;
		queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
		received++;
	}

#if DEBUG_RX
	printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
#endif
	return received;		/* not all work done */
}
static int ftmac100_poll(struct napi_struct *napi, int budget)
{
	struct ftmac100 *priv = container_of(napi, struct ftmac100, napi);
	struct net_device *netdev = priv->netdev;
	unsigned int status;
	bool completed = true;
	int rx = 0;

	status = ioread32(priv->base + FTMAC100_OFFSET_ISR);

	if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) {
		/*
                              
                                                            
    
                          
                          
   */
		bool retry;

		do {
			retry = ftmac100_rx_packet(priv, &rx);
		} while (retry && rx < budget);

		if (retry && rx == budget)
			completed = false;
	}

	if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) {
		/*
                          
                                                
    
                            
                                                    
                                     
   */
		ftmac100_tx_complete(priv);
	}

	if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST |
		      FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) {
		if (net_ratelimit())
			netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
				    status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "",
				    status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
				    status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
				    status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");

		if (status & FTMAC100_INT_NORXBUF) {
			/*                       */
			netdev->stats.rx_over_errors++;
		}

		if (status & FTMAC100_INT_RPKT_LOST) {
			/*                                          */
			netdev->stats.rx_fifo_errors++;
		}

		if (status & FTMAC100_INT_PHYSTS_CHG) {
			/*                        */
			mii_check_link(&priv->mii);
		}
	}

	if (completed) {
		/*              */
		napi_complete(napi);
		ftmac100_enable_all_int(priv);
	}

	return rx;
}
Exemple #11
0
/* Get packets from the host vring */
static int cfv_rx_poll(struct napi_struct *napi, int quota)
{
	struct cfv_info *cfv = container_of(napi, struct cfv_info, napi);
	int rxcnt = 0;
	int err = 0;
	void *buf;
	struct sk_buff *skb;
	struct vringh_kiov *riov = &cfv->ctx.riov;
	unsigned int skb_len;

	do {
		skb = NULL;

		/* Put the previous iovec back on the used ring and
		 * fetch a new iovec if we have processed all elements.
		 */
		if (riov->i == riov->used) {
			if (cfv->ctx.head != USHRT_MAX) {
				vringh_complete_kern(cfv->vr_rx,
						     cfv->ctx.head,
						     0);
				cfv->ctx.head = USHRT_MAX;
			}

			err = vringh_getdesc_kern(
				cfv->vr_rx,
				riov,
				NULL,
				&cfv->ctx.head,
				GFP_ATOMIC);

			if (err <= 0)
				goto exit;
		}

		buf = phys_to_virt((unsigned long) riov->iov[riov->i].iov_base);
		/* TODO: Add check on valid buffer address */

		skb = cfv_alloc_and_copy_skb(&err, cfv, buf,
					     riov->iov[riov->i].iov_len);
		if (unlikely(err))
			goto exit;

		/* Push received packet up the stack. */
		skb_len = skb->len;
		skb->protocol = htons(ETH_P_CAIF);
		skb_reset_mac_header(skb);
		skb->dev = cfv->ndev;
		err = netif_receive_skb(skb);
		if (unlikely(err)) {
			++cfv->ndev->stats.rx_dropped;
		} else {
			++cfv->ndev->stats.rx_packets;
			cfv->ndev->stats.rx_bytes += skb_len;
		}

		++riov->i;
		++rxcnt;
	} while (rxcnt < quota);

	++cfv->stats.rx_napi_resched;
	goto out;

exit:
	switch (err) {
	case 0:
		++cfv->stats.rx_napi_complete;

		/* Really out of patckets? (stolen from virtio_net)*/
		napi_complete(napi);
		if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) &&
		    napi_schedule_prep(napi)) {
			vringh_notify_disable_kern(cfv->vr_rx);
			__napi_schedule(napi);
		}
		break;

	case -ENOMEM:
		++cfv->stats.rx_nomem;
		dev_kfree_skb(skb);
		/* Stop NAPI poll on OOM, we hope to be polled later */
		napi_complete(napi);
		vringh_notify_enable_kern(cfv->vr_rx);
		break;

	default:
		/* We're doomed, any modem fault is fatal */
		netdev_warn(cfv->ndev, "Bad ring, disable device\n");
		cfv->ndev->stats.rx_dropped = riov->used - riov->i;
		napi_complete(napi);
		vringh_notify_disable_kern(cfv->vr_rx);
		netif_carrier_off(cfv->ndev);
		break;
	}
out:
	if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0)
		vringh_notify(cfv->vr_rx);
	return rxcnt;
}
Exemple #12
0
static int hip04_rx_poll(struct napi_struct *napi, int budget)
{
    struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
    struct net_device *ndev = priv->ndev;
    struct net_device_stats *stats = &ndev->stats;
    unsigned int cnt = hip04_recv_cnt(priv);
    struct rx_desc *desc;
    struct sk_buff *skb;
    unsigned char *buf;
    bool last = false;
    dma_addr_t phys;
    int rx = 0;
    int tx_remaining;
    u16 len;
    u32 err;

    while (cnt && !last) {
        buf = priv->rx_buf[priv->rx_head];
        skb = build_skb(buf, priv->rx_buf_size);
        if (unlikely(!skb)) {
            net_dbg_ratelimited("build_skb failed\n");
            goto refill;
        }

        dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
                         RX_BUF_SIZE, DMA_FROM_DEVICE);
        priv->rx_phys[priv->rx_head] = 0;

        desc = (struct rx_desc *)skb->data;
        len = be16_to_cpu(desc->pkt_len);
        err = be32_to_cpu(desc->pkt_err);

        if (0 == len) {
            dev_kfree_skb_any(skb);
            last = true;
        } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
            dev_kfree_skb_any(skb);
            stats->rx_dropped++;
            stats->rx_errors++;
        } else {
            skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
            skb_put(skb, len);
            skb->protocol = eth_type_trans(skb, ndev);
            napi_gro_receive(&priv->napi, skb);
            stats->rx_packets++;
            stats->rx_bytes += len;
            rx++;
        }

refill:
        buf = netdev_alloc_frag(priv->rx_buf_size);
        if (!buf)
            goto done;
        phys = dma_map_single(&ndev->dev, buf,
                              RX_BUF_SIZE, DMA_FROM_DEVICE);
        if (dma_mapping_error(&ndev->dev, phys))
            goto done;
        priv->rx_buf[priv->rx_head] = buf;
        priv->rx_phys[priv->rx_head] = phys;
        hip04_set_recv_desc(priv, phys);

        priv->rx_head = RX_NEXT(priv->rx_head);
        if (rx >= budget)
            goto done;

        if (--cnt == 0)
            cnt = hip04_recv_cnt(priv);
    }

    if (!(priv->reg_inten & RCV_INT)) {
        /* enable rx interrupt */
        priv->reg_inten |= RCV_INT;
        writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
    }
    napi_complete(napi);
done:
    /* clean up tx descriptors and start a new timer if necessary */
    tx_remaining = hip04_tx_reclaim(ndev, false);
    if (rx < budget && tx_remaining)
        hip04_start_tx_timer(priv);

    return rx;
}
Exemple #13
0
/* This code is for batched host mode */
static int sn_poll(struct napi_struct *napi, int budget)
{
	struct sn_queue *rx_queue = container_of(napi, struct sn_queue, napi);

	int poll_cnt = 0;
	int ret;
	int cnt;
	int i;

	void *objs[MAX_RX_BATCH * 3];

do_more:
	ret = llring_dequeue_burst(rx_queue->sn_to_drv, objs, 
			max(budget - poll_cnt, MAX_RX_BATCH) * 3);
	if (unlikely(ret % 3)) {
		log_err("not a multiple of 3 (%d)!!!\n", ret);
		return poll_cnt;
	}

	cnt = ret / 3;

	if (cnt == 0) {
		napi_complete(napi);
		return poll_cnt;
	}

	for (i = 0; i < cnt; i++) {
		struct sk_buff *skb;

		void *cookie;
		void *vaddr;
		int packet_len;

		cookie = objs[i * 3];
		vaddr = phys_to_virt((phys_addr_t) objs[i * 3 + 1]);
		packet_len = (uint64_t) objs[i * 3 + 2];

		objs[i] = cookie;

		skb = netdev_alloc_skb(napi->dev, packet_len);
		if (unlikely(!skb)) {
			log_err("netdev_alloc_skb() failed\n");
			continue;
		}

		memcpy(skb_put(skb, packet_len), vaddr, packet_len);

		skb_record_rx_queue(skb, rx_queue->queue_id);
		skb->protocol = eth_type_trans(skb, napi->dev);
		skb->ip_summed = CHECKSUM_NONE;

		rx_queue->stats.packets++;
		rx_queue->stats.bytes += skb->len;

		netif_receive_skb(skb);
	}


	ret = llring_enqueue_burst(rx_queue->drv_to_sn, objs, cnt);
	if (unlikely(ret != cnt)) {
		/* It should never happen but if it does, I am f****d up */
		log_err("free buffer queue overflow!\n");
	}

	poll_cnt += cnt;

	if (poll_cnt >= budget)
		return poll_cnt;

	goto do_more;
	/* goto fail; */
}
Exemple #14
0
static int rmnet_mhi_poll(struct napi_struct *napi, int budget)
{
	int received_packets = 0;
	struct net_device *dev = napi->dev;
	struct rmnet_mhi_private *rmnet_mhi_ptr =
			*(struct rmnet_mhi_private **)netdev_priv(dev);
	enum MHI_STATUS res = MHI_STATUS_reserved;
	bool should_reschedule = true;
	struct sk_buff *skb;
	dma_addr_t dma_addr;
	uintptr_t *cb_ptr;

	rmnet_log(MSG_VERBOSE, "Entered\n");
	while (received_packets < budget) {
		struct mhi_result *result =
		      mhi_poll(rmnet_mhi_ptr->rx_client_handle);
		if (result->transaction_status == MHI_STATUS_DEVICE_NOT_READY) {
			continue;
		} else if (result->transaction_status != MHI_STATUS_SUCCESS) {
			rmnet_log(MSG_CRITICAL,
				  "mhi_poll failed, error is %d\n",
				  result->transaction_status);
			break;
		}

		/* Nothing more to read, or out of buffers in MHI layer */
		if (unlikely(!result->payload_buf ||
						!result->bytes_xferd)) {
			should_reschedule = false;
			break;
		}

		skb = skb_dequeue(&(rmnet_mhi_ptr->rx_buffers));
		if (unlikely(!skb)) {
			rmnet_log(MSG_CRITICAL,
				  "No RX buffers to match");
			break;
		}

		cb_ptr = (uintptr_t *)skb->cb;
		dma_addr = (dma_addr_t)(uintptr_t)(*cb_ptr);

		/* Sanity check, ensuring that this is actually the buffer */
		if (unlikely(dma_addr != result->payload_buf)) {
			rmnet_log(MSG_CRITICAL,
				  "Buf mismatch, expected 0x%lx, got 0x%lx",
					(uintptr_t)dma_addr,
					(uintptr_t)result->payload_buf);
			break;
		}

		dma_unmap_single(&(dev->dev), dma_addr,
				(rmnet_mhi_ptr->mru - MHI_RX_HEADROOM),
				 DMA_FROM_DEVICE);
		skb_put(skb, result->bytes_xferd);

		skb->dev = dev;
		skb->protocol = rmnet_mhi_ip_type_trans(skb);

		netif_receive_skb(skb);

		/* Statistics */
		received_packets++;
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += result->bytes_xferd;

		/* Need to allocate a new buffer instead of this one */
		skb = alloc_skb(rmnet_mhi_ptr->mru, GFP_ATOMIC);

		if (unlikely(!skb)) {
			rmnet_log(MSG_CRITICAL,
				  "Can't allocate a new RX buffer for MHI");
			break;
		}

		skb_reserve(skb, MHI_RX_HEADROOM);

		cb_ptr = (uintptr_t *)skb->cb;
		dma_addr = dma_map_single(&(dev->dev), skb->data,
					(rmnet_mhi_ptr->mru - MHI_RX_HEADROOM),
					DMA_FROM_DEVICE);
		*cb_ptr = (uintptr_t)dma_addr;

		if (unlikely(dma_mapping_error(&(dev->dev), dma_addr))) {
			rmnet_log(MSG_CRITICAL,
				  "DMA mapping error in polling function");
			dev_kfree_skb_irq(skb);
			break;
		}

		res = mhi_queue_xfer(
			rmnet_mhi_ptr->rx_client_handle,
			(uintptr_t)dma_addr, rmnet_mhi_ptr->mru, MHI_EOT);

		if (unlikely(MHI_STATUS_SUCCESS != res)) {
			rmnet_log(MSG_CRITICAL,
				"mhi_queue_xfer failed, error %d", res);
			dma_unmap_single(&(dev->dev), dma_addr,
					(rmnet_mhi_ptr->mru - MHI_RX_HEADROOM),
					DMA_FROM_DEVICE);

			dev_kfree_skb_irq(skb);
			break;
		}

		skb_queue_tail(&(rmnet_mhi_ptr->rx_buffers), skb);

	} /* while (received_packets < budget) or any other error */

	napi_complete(napi);

	/* We got a NULL descriptor back */
	if (should_reschedule == false) {
		if (rmnet_mhi_ptr->irq_masked_cntr) {
			mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle);
			--rmnet_mhi_ptr->irq_masked_cntr;
		}
	} else {
		if (received_packets == budget)
			rx_napi_budget_overflow[rmnet_mhi_ptr->dev_index]++;
		napi_reschedule(napi);
	}

	rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index] =
	min((unsigned long)received_packets,
	    rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index]);

	rx_napi_skb_burst_max[rmnet_mhi_ptr->dev_index] =
	max((unsigned long)received_packets,
	    rx_napi_skb_burst_max[rmnet_mhi_ptr->dev_index]);

	rmnet_log(MSG_VERBOSE, "Exited, polled %d pkts\n", received_packets);
	return received_packets;
}
/* TODO: No error handling yet */
static int rmnet_mhi_poll(struct napi_struct *napi, int budget)
{
	int received_packets = 0;
	struct net_device *dev = napi->dev;
	struct rmnet_mhi_private *rmnet_mhi_ptr = netdev_priv(dev);
	MHI_STATUS res = MHI_STATUS_reserved;
	bool should_reschedule = true;
	struct sk_buff *skb;
	dma_addr_t dma_addr;
	uintptr_t *cb_ptr;

	/* Reset the watchdog? */

	while (received_packets < budget) {
		mhi_result *result =
		      mhi_poll(rmnet_mhi_ptr->rx_client_handle);

		if(result->transaction_status == MHI_STATUS_DEVICE_NOT_READY) {
			continue;
		} else if (result->transaction_status != MHI_STATUS_SUCCESS) {
			/* TODO: Handle error */
			pr_err("%s: mhi_poll failed, error is %d",
			       __func__, result->transaction_status);
			break;
		}

		/* Nothing more to read, or out of buffers in MHI layer */
		if (unlikely(0 == result->payload_buf || 0 == result->bytes_xferd)) {
			should_reschedule = false;
			break;
		}

		/* Assumption
		   ----------
		   The buffer returned back is guaranteed to be the first buffer
		   that was allocated for the RX, so we just dequeue the head.
		*/

		/* Take the first one */
		skb = skb_dequeue(&(rmnet_mhi_ptr->rx_buffers));
		if (unlikely(0 == skb)) {
			/* TODO: This shouldn't happen, we had a guard above */
			pr_err("%s: No RX buffers to match", __func__);
			break;
		}

		cb_ptr = (uintptr_t *)skb->cb;
		dma_addr = (dma_addr_t)(uintptr_t)(*cb_ptr);

		/* Sanity check, ensuring that this is actually the buffer */
		if (unlikely((uintptr_t)dma_addr != (uintptr_t)result->payload_buf)) {
			/* TODO: Handle error */
			pr_err("%s: Unexpected physical address mismatch, expected 0x%lx, got 0x%lx",
			       __func__, (uintptr_t)dma_addr, (uintptr_t)result->payload_buf);
			break;
		}

		dma_unmap_single(&(dev->dev), dma_addr,
				(rmnet_mhi_ptr->mru - MHI_RX_HEADROOM),
				 DMA_FROM_DEVICE);
		skb_put(skb, result->bytes_xferd);

		skb->dev = dev;
		skb->protocol = rmnet_mhi_ip_type_trans(skb);

		netif_receive_skb(skb);

		/* Statistics */
		received_packets++;
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += result->bytes_xferd;

		/* Need to allocate a new buffer instead of this one
		   (TODO: Maybe we can do it @ the end?)
		 */
		skb = alloc_skb(rmnet_mhi_ptr->mru, GFP_ATOMIC);

		if (unlikely(0 == skb)) {
			/* TODO: Handle error */
			pr_err("%s: Can't allocate a new RX buffer for MHI",
			       __func__);
			break;
		}

		skb_reserve(skb, MHI_RX_HEADROOM);

		cb_ptr = (uintptr_t *)skb->cb;
		dma_addr = dma_map_single(&(dev->dev), skb->data,
					 rmnet_mhi_ptr->mru - MHI_RX_HEADROOM,
					  DMA_FROM_DEVICE);
		*cb_ptr = (uintptr_t)dma_addr;

		if (unlikely(dma_mapping_error(&(dev->dev), dma_addr))) {
			pr_err("%s: DMA mapping error in polling function",
			       __func__);
			/* TODO: Handle error */
			dev_kfree_skb_irq(skb);
			break;
		}

		/* TODO: What do we do in such a scenario in
			which we can't allocate a RX buffer? */
		if (unlikely(DMA_RANGE_CHECK(dma_addr,
				    rmnet_mhi_ptr->mru,
				    MHI_DMA_MASK))) {
			pr_err("%s: RX buffer is out of MHI DMA address range",
			       __func__);
			dma_unmap_single(&(dev->dev), dma_addr,
					(rmnet_mhi_ptr->mru - MHI_RX_HEADROOM),
							 DMA_FROM_DEVICE);
			dev_kfree_skb_irq(skb);
			break;
		}

		res = mhi_queue_xfer(
			rmnet_mhi_ptr->rx_client_handle,
			(uintptr_t)dma_addr, rmnet_mhi_ptr->mru, 0, 0);

		if (unlikely(MHI_STATUS_SUCCESS != res)) {
			/* TODO: Handle error */
			pr_err("%s: mhi_queue_xfer failed, error %d",
			       __func__, res);
			dma_unmap_single(&(dev->dev), dma_addr,
					(rmnet_mhi_ptr->mru - MHI_RX_HEADROOM),
							 DMA_FROM_DEVICE);

			dev_kfree_skb_irq(skb);
			break;
		}

		skb_queue_tail(&(rmnet_mhi_ptr->rx_buffers), skb);

	} /* while (received_packets < budget) or any other error */

	napi_complete(napi);

	/* We got a NULL descriptor back */
	if (false == should_reschedule) {
		if (atomic_read(&rmnet_mhi_ptr->irq_masked)) {
			atomic_dec(&rmnet_mhi_ptr->irq_masked);
			mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle);
		}
	} else {
		if (received_packets == budget)
			rx_napi_budget_overflow[rmnet_mhi_ptr->dev_index]++;
		napi_reschedule(napi);
	}

	/* Start a watchdog? */

	rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index] =
	min((unsigned long)received_packets,
	    rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index]);

	rx_napi_skb_burst_max[rmnet_mhi_ptr->dev_index] =
	max((unsigned long)received_packets,
	    rx_napi_skb_burst_max[rmnet_mhi_ptr->dev_index]);

	return received_packets;
}
Exemple #16
0
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
	struct ibmveth_adapter *adapter =
			container_of(napi, struct ibmveth_adapter, napi);
	struct net_device *netdev = adapter->netdev;
	int frames_processed = 0;
	unsigned long lpar_rc;

restart_poll:
	do {
		if (!ibmveth_rxq_pending_buffer(adapter))
			break;

		smp_rmb();
		if (!ibmveth_rxq_buffer_valid(adapter)) {
			wmb(); /* suggested by larson1 */
			adapter->rx_invalid_buffer++;
			netdev_dbg(netdev, "recycling invalid buffer\n");
			ibmveth_rxq_recycle_buffer(adapter);
		} else {
			struct sk_buff *skb, *new_skb;
			int length = ibmveth_rxq_frame_length(adapter);
			int offset = ibmveth_rxq_frame_offset(adapter);
			int csum_good = ibmveth_rxq_csum_good(adapter);

			skb = ibmveth_rxq_get_buffer(adapter);

			new_skb = NULL;
			if (length < rx_copybreak)
				new_skb = netdev_alloc_skb(netdev, length);

			if (new_skb) {
				skb_copy_to_linear_data(new_skb,
							skb->data + offset,
							length);
				if (rx_flush)
					ibmveth_flush_buffer(skb->data,
						length + offset);
				if (!ibmveth_rxq_recycle_buffer(adapter))
					kfree_skb(skb);
				skb = new_skb;
			} else {
				ibmveth_rxq_harvest_buffer(adapter);
				skb_reserve(skb, offset);
			}

			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, netdev);

			if (csum_good)
				skb->ip_summed = CHECKSUM_UNNECESSARY;

			netif_receive_skb(skb);	/* send it up */

			netdev->stats.rx_packets++;
			netdev->stats.rx_bytes += length;
			frames_processed++;
		}
	} while (frames_processed < budget);

	ibmveth_replenish_task(adapter);

	if (frames_processed < budget) {
		/* We think we are done - reenable interrupts,
		 * then check once more to make sure we are done.
		 */
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_ENABLE);

		BUG_ON(lpar_rc != H_SUCCESS);

		napi_complete(napi);

		if (ibmveth_rxq_pending_buffer(adapter) &&
		    napi_reschedule(napi)) {
			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
					       VIO_IRQ_DISABLE);
			goto restart_poll;
		}
	}

	return frames_processed;
}
Exemple #17
0
/******************************************************************************
 * struct napi_struct functions
 *****************************************************************************/
static int ftmac100_poll(struct napi_struct *napi, int budget)
{
	struct ftmac100 *priv = container_of(napi, struct ftmac100, napi);
	struct net_device *netdev = priv->netdev;
	unsigned int status;
	bool completed = true;
	int rx = 0;

	status = ioread32(priv->base + FTMAC100_OFFSET_ISR);

	if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) {
		/*
		 * FTMAC100_INT_RPKT_FINISH:
		 *	RX DMA has received packets into RX buffer successfully
		 *
		 * FTMAC100_INT_NORXBUF:
		 *	RX buffer unavailable
		 */
		bool retry;

		do {
			retry = ftmac100_rx_packet(priv, &rx);
		} while (retry && rx < budget);

		if (retry && rx == budget)
			completed = false;
	}

	if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) {
		/*
		 * FTMAC100_INT_XPKT_OK:
		 *	packet transmitted to ethernet successfully
		 *
		 * FTMAC100_INT_XPKT_LOST:
		 *	packet transmitted to ethernet lost due to late
		 *	collision or excessive collision
		 */
		ftmac100_tx_complete(priv);
	}

	if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST |
		      FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) {
		if (net_ratelimit())
			netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
				    status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "",
				    status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
				    status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
				    status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");

		if (status & FTMAC100_INT_NORXBUF) {
			/* RX buffer unavailable */
			netdev->stats.rx_over_errors++;
		}

		if (status & FTMAC100_INT_RPKT_LOST) {
			/* received packet lost due to RX FIFO full */
			netdev->stats.rx_fifo_errors++;
		}

		if (status & FTMAC100_INT_PHYSTS_CHG) {
			/* PHY link status change */
			mii_check_link(&priv->mii);
		}
	}

	if (completed) {
		/* stop polling */
		napi_complete(napi);
		ftmac100_enable_all_int(priv);
	}

	return rx;
}
Exemple #18
0
void ieee80211_napi_complete(struct ieee80211_hw *hw)
{
	struct ieee80211_local *local = hw_to_local(hw);

	napi_complete(&local->napi);
}
static int fjes_poll(struct napi_struct *napi, int budget)
{
	struct fjes_adapter *adapter =
			container_of(napi, struct fjes_adapter, napi);
	struct net_device *netdev = napi->dev;
	struct fjes_hw *hw = &adapter->hw;
	struct sk_buff *skb;
	int work_done = 0;
	int cur_epid = 0;
	int epidx;
	size_t frame_len;
	void *frame;

	spin_lock(&hw->rx_status_lock);
	for (epidx = 0; epidx < hw->max_epid; epidx++) {
		if (epidx == hw->my_epid)
			continue;

		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
		    EP_PARTNER_SHARED)
			adapter->hw.ep_shm_info[epidx]
				   .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
	}
	spin_unlock(&hw->rx_status_lock);

	while (work_done < budget) {
		prefetch(&adapter->hw);
		frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);

		if (frame) {
			skb = napi_alloc_skb(napi, frame_len);
			if (!skb) {
				adapter->stats64.rx_dropped += 1;
				hw->ep_shm_info[cur_epid].net_stats
							 .rx_dropped += 1;
				adapter->stats64.rx_errors += 1;
				hw->ep_shm_info[cur_epid].net_stats
							 .rx_errors += 1;
			} else {
				memcpy(skb_put(skb, frame_len),
				       frame, frame_len);
				skb->protocol = eth_type_trans(skb, netdev);
				skb->ip_summed = CHECKSUM_UNNECESSARY;

				netif_receive_skb(skb);

				work_done++;

				adapter->stats64.rx_packets += 1;
				hw->ep_shm_info[cur_epid].net_stats
							 .rx_packets += 1;
				adapter->stats64.rx_bytes += frame_len;
				hw->ep_shm_info[cur_epid].net_stats
							 .rx_bytes += frame_len;

				if (is_multicast_ether_addr(
					((struct ethhdr *)frame)->h_dest)) {
					adapter->stats64.multicast += 1;
					hw->ep_shm_info[cur_epid].net_stats
								 .multicast += 1;
				}
			}

			fjes_rxframe_release(adapter, cur_epid);
			adapter->unset_rx_last = true;
		} else {
			break;
		}
	}

	if (work_done < budget) {
		napi_complete(napi);

		if (adapter->unset_rx_last) {
			adapter->rx_last_jiffies = jiffies;
			adapter->unset_rx_last = false;
		}

		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
			napi_reschedule(napi);
		} else {
			spin_lock(&hw->rx_status_lock);
			for (epidx = 0; epidx < hw->max_epid; epidx++) {
				if (epidx == hw->my_epid)
					continue;
				if (fjes_hw_get_partner_ep_status(hw, epidx) ==
				    EP_PARTNER_SHARED)
					adapter->hw.ep_shm_info[epidx].tx
						   .info->v1i.rx_status &=
						~FJES_RX_POLL_WORK;
			}
			spin_unlock(&hw->rx_status_lock);

			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
		}
	}

	return work_done;
}
Exemple #20
0
static int ag71xx_poll(struct napi_struct *napi, int limit)
{
	struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
	struct net_device *dev = ag->dev;
	struct ag71xx_ring *rx_ring;
	unsigned long flags;
	u32 status;
	int tx_done;
	int rx_done;

	pdata->ddr_flush();
	tx_done = ag71xx_tx_packets(ag);

	DBG("%s: processing RX ring\n", dev->name);
	rx_done = ag71xx_rx_packets(ag, limit);

	ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done);

	rx_ring = &ag->rx_ring;
	if (rx_ring->buf[rx_ring->dirty % AG71XX_RX_RING_SIZE].skb == NULL)
		goto oom;

	status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
	if (unlikely(status & RX_STATUS_OF)) {
		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
		dev->stats.rx_fifo_errors++;

		/* restart RX */
		ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
	}

	if (rx_done < limit) {
		if (status & RX_STATUS_PR)
			goto more;

		status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
		if (status & TX_STATUS_PS)
			goto more;

		DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
			dev->name, rx_done, tx_done, limit);

		napi_complete(napi);

		/* enable interrupts */
		spin_lock_irqsave(&ag->lock, flags);
		ag71xx_int_enable(ag, AG71XX_INT_POLL);
		spin_unlock_irqrestore(&ag->lock, flags);
		return rx_done;
	}

 more:
	DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
			dev->name, rx_done, tx_done, limit);
	return rx_done;

 oom:
	if (netif_msg_rx_err(ag))
		printk(KERN_DEBUG "%s: out of memory\n", dev->name);

	mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
	napi_complete(napi);
	return 0;
}
static int rmnet_mhi_poll(struct napi_struct *napi, int budget)
{
	int received_packets = 0;
	struct net_device *dev = napi->dev;
	struct rmnet_mhi_private *rmnet_mhi_ptr =
			*(struct rmnet_mhi_private **)netdev_priv(dev);
	enum MHI_STATUS res = MHI_STATUS_reserved;
	bool should_reschedule = true;
	struct sk_buff *skb;
	struct mhi_skb_priv *skb_priv;
	int r, cur_mru;

	rmnet_log(MSG_VERBOSE, "Entered\n");
	rmnet_mhi_ptr->mru = mru;
	while (received_packets < budget) {
		struct mhi_result *result =
		      mhi_poll(rmnet_mhi_ptr->rx_client_handle);
		if (result->transaction_status == MHI_STATUS_DEVICE_NOT_READY) {
			rmnet_log(MSG_INFO,
				  "Transaction status not ready, continuing\n");
			break;
		} else if (result->transaction_status != MHI_STATUS_SUCCESS &&
			   result->transaction_status != MHI_STATUS_OVERFLOW) {
			rmnet_log(MSG_CRITICAL,
				  "mhi_poll failed, error %d\n",
				  result->transaction_status);
			break;
		}

		/* Nothing more to read, or out of buffers in MHI layer */
		if (unlikely(!result->buf_addr || !result->bytes_xferd)) {
			rmnet_log(MSG_CRITICAL,
				  "Not valid buff not rescheduling\n");
			should_reschedule = false;
			break;
		}

		skb = skb_dequeue(&(rmnet_mhi_ptr->rx_buffers));
		if (unlikely(!skb)) {
			rmnet_log(MSG_CRITICAL,
				  "No RX buffers to match");
			break;
		}

		skb_priv = (struct mhi_skb_priv *)(skb->cb);

		/* Setup the tail to the end of data */
		skb_put(skb, result->bytes_xferd);

		skb->dev = dev;
		skb->protocol = rmnet_mhi_ip_type_trans(skb);

		if (result->transaction_status == MHI_STATUS_OVERFLOW)
			r = rmnet_mhi_process_fragment(rmnet_mhi_ptr, skb, 1);
		else
			r = rmnet_mhi_process_fragment(rmnet_mhi_ptr, skb, 0);
		if (r) {
			rmnet_log(MSG_CRITICAL,
				  "Failed to process fragmented packet ret %d",
				   r);
			BUG();
		}

		/* Statistics */
		received_packets++;
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += result->bytes_xferd;

		/* Need to allocate a new buffer instead of this one */
		cur_mru = rmnet_mhi_ptr->mru;
		skb = alloc_skb(cur_mru, GFP_ATOMIC);
		if (unlikely(!skb)) {
			rmnet_log(MSG_CRITICAL,
				  "Can't allocate a new RX buffer for MHI");
			break;
		}
		skb_priv = (struct mhi_skb_priv *)(skb->cb);
		skb_priv->dma_size = cur_mru;

		rmnet_log(MSG_VERBOSE,
		  "Allocated SKB of MRU 0x%x, SKB_DATA 0%p SKB_LEN 0x%x\n",
				rmnet_mhi_ptr->mru, skb->data, skb->len);
		/* Reserve headroom, tail == data */
		skb_reserve(skb, MHI_RX_HEADROOM);
		skb_priv->dma_size -= MHI_RX_HEADROOM;
		skb_priv->dma_addr = 0;

		rmnet_log(MSG_VERBOSE,
			 "Mapped SKB %p to DMA Addr 0x%lx, DMA_SIZE: 0x%lx\n",
			  skb->data,
			  (uintptr_t)skb->data,
			  (uintptr_t)skb_priv->dma_size);


		res = mhi_queue_xfer(
			rmnet_mhi_ptr->rx_client_handle,
			skb->data, skb_priv->dma_size, MHI_EOT);

		if (unlikely(MHI_STATUS_SUCCESS != res)) {
			rmnet_log(MSG_CRITICAL,
				"mhi_queue_xfer failed, error %d", res);
			dev_kfree_skb_irq(skb);
			break;
		}
		skb_queue_tail(&rmnet_mhi_ptr->rx_buffers, skb);
	} /* while (received_packets < budget) or any other error */

	napi_complete(napi);

	/* We got a NULL descriptor back */
	if (should_reschedule == false) {
		if (atomic_read(&rmnet_mhi_ptr->irq_masked_cntr)) {
			atomic_dec(&rmnet_mhi_ptr->irq_masked_cntr);
			mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle);
		}
	} else {
		if (received_packets == budget)
			rx_napi_budget_overflow[rmnet_mhi_ptr->dev_index]++;
		napi_reschedule(napi);
	}

	rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index] =
	min((unsigned long)received_packets,
	    rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index]);

	rx_napi_skb_burst_max[rmnet_mhi_ptr->dev_index] =
	max((unsigned long)received_packets,
	    rx_napi_skb_burst_max[rmnet_mhi_ptr->dev_index]);

	rmnet_log(MSG_VERBOSE, "Exited, polled %d pkts\n", received_packets);
	return received_packets;
}
static int hss_hdlc_poll(struct napi_struct *napi, int budget)
{
	struct port *port = container_of(napi, struct port, napi);
	struct net_device *dev = port->netdev;
	unsigned int rxq = queue_ids[port->id].rx;
	unsigned int rxfreeq = queue_ids[port->id].rxfree;
	int received = 0;

#if DEBUG_RX
	printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name);
#endif

	while (received < budget) {
		struct sk_buff *skb;
		struct desc *desc;
		int n;
#ifdef __ARMEB__
		struct sk_buff *temp;
		u32 phys;
#endif

		if ((n = queue_get_desc(rxq, port, 0)) < 0) {
#if DEBUG_RX
			printk(KERN_DEBUG "%s: hss_hdlc_poll"
			       " napi_complete\n", dev->name);
#endif
			napi_complete(napi);
			qmgr_enable_irq(rxq);
			if (!qmgr_stat_empty(rxq) &&
			    napi_reschedule(napi)) {
#if DEBUG_RX
				printk(KERN_DEBUG "%s: hss_hdlc_poll"
				       " napi_reschedule succeeded\n",
				       dev->name);
#endif
				qmgr_disable_irq(rxq);
				continue;
			}
#if DEBUG_RX
			printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n",
			       dev->name);
#endif
			return received; 
		}

		desc = rx_desc_ptr(port, n);
#if 0 
		if (desc->error_count)
			printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
			       " errors %u\n", dev->name, desc->status,
			       desc->error_count);
#endif
		skb = NULL;
		switch (desc->status) {
		case 0:
#ifdef __ARMEB__
			if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
				phys = dma_map_single(&dev->dev, skb->data,
						      RX_SIZE,
						      DMA_FROM_DEVICE);
				if (dma_mapping_error(&dev->dev, phys)) {
					dev_kfree_skb(skb);
					skb = NULL;
				}
			}
#else
			skb = netdev_alloc_skb(dev, desc->pkt_len);
#endif
			if (!skb)
				dev->stats.rx_dropped++;
			break;
		case ERR_HDLC_ALIGN:
		case ERR_HDLC_ABORT:
			dev->stats.rx_frame_errors++;
			dev->stats.rx_errors++;
			break;
		case ERR_HDLC_FCS:
			dev->stats.rx_crc_errors++;
			dev->stats.rx_errors++;
			break;
		case ERR_HDLC_TOO_LONG:
			dev->stats.rx_length_errors++;
			dev->stats.rx_errors++;
			break;
		default:	
			netdev_err(dev, "hss_hdlc_poll: status 0x%02X errors %u\n",
				   desc->status, desc->error_count);
			dev->stats.rx_errors++;
		}

		if (!skb) {
			
			desc->buf_len = RX_SIZE;
			desc->pkt_len = desc->status = 0;
			queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
			continue;
		}

		
#ifdef __ARMEB__
		temp = skb;
		skb = port->rx_buff_tab[n];
		dma_unmap_single(&dev->dev, desc->data,
				 RX_SIZE, DMA_FROM_DEVICE);
#else
		dma_sync_single_for_cpu(&dev->dev, desc->data,
					RX_SIZE, DMA_FROM_DEVICE);
		memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
			      ALIGN(desc->pkt_len, 4) / 4);
#endif
		skb_put(skb, desc->pkt_len);

		debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);

		skb->protocol = hdlc_type_trans(skb, dev);
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += skb->len;
		netif_receive_skb(skb);

		
#ifdef __ARMEB__
		port->rx_buff_tab[n] = temp;
		desc->data = phys;
#endif
		desc->buf_len = RX_SIZE;
		desc->pkt_len = 0;
		queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
		received++;
	}
#if DEBUG_RX
	printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n");
#endif
	return received;	
}
Exemple #23
0
static int sprdwl_rx_handler(struct napi_struct *napi, int budget)
{
	struct sprdwl_priv *priv = container_of(napi, struct sprdwl_priv, napi);
	struct sblock blk;
	struct sk_buff *skb;
	int ret, work_done;
	u16 decryp_data_len = 0;
	struct wlan_sblock_recv_data *data;
	uint32_t length = 0;
#ifdef CONFIG_SPRDWL_FW_ZEROCOPY
	u8 offset = 0;
#endif
	for (work_done = 0; work_done < budget; work_done++) {
		ret = sblock_receive(WLAN_CP_ID, WLAN_SBLOCK_CH, &blk, 0);
		if (ret) {
			dev_dbg(&priv->ndev->dev, "no more sblock to read\n");
			break;
		}
#ifdef CONFIG_SPRDWL_FW_ZEROCOPY
		offset = *(u8 *)blk.addr;
		length = blk.length - 2 - offset;
#else
		length = blk.length;
#endif
		/*16 bytes align */
		skb = dev_alloc_skb(length + NET_IP_ALIGN);
		if (!skb) {
			dev_err(&priv->ndev->dev,
				"Failed to allocate skbuff!\n");
			priv->ndev->stats.rx_dropped++;
			goto rx_failed;
		}
#ifdef CONFIG_SPRDWL_FW_ZEROCOPY
		data = (struct wlan_sblock_recv_data *)(blk.addr + 2 + offset);
#else
		data = (struct wlan_sblock_recv_data *)blk.addr;
#endif

		if (data->is_encrypted == 1) {
			if (priv->connect_status == SPRDWL_CONNECTED &&
			    priv->cipher_type == SPRDWL_CIPHER_WAPI &&
			    priv->key_len[GROUP][priv->key_index[GROUP]] != 0 &&
			    priv->
			    key_len[PAIRWISE][priv->key_index[PAIRWISE]] != 0) {
				u8 snap_header[6] = { 0xaa, 0xaa, 0x03,
					0x00, 0x00, 0x00
				};
				skb_reserve(skb, NET_IP_ALIGN);
				decryp_data_len = wlan_rx_wapi_decryption(priv,
						   (u8 *)&data->u2.encrypt,
						   data->u1.encrypt.header_len,
						   (length -
						   sizeof(data->is_encrypted) -
						   sizeof(data->u1) -
						   data->u1.encrypt.header_len),
						   (skb->data + 12));
				if (decryp_data_len == 0) {
					dev_err(&priv->ndev->dev,
						"Failed to decrypt WAPI data!\n");
					priv->ndev->stats.rx_dropped++;
					dev_kfree_skb(skb);
					goto rx_failed;
				}
				if (memcmp((skb->data + 12), snap_header,
					   sizeof(snap_header)) == 0) {
					skb_reserve(skb, 6);
					/* copy the eth address from eth header,
					 * but not copy eth type
					 */
					memcpy(skb->data,
					       data->u2.encrypt.
					       mac_header.addr1, 6);
					memcpy(skb->data + 6,
					       data->u2.encrypt.
					       mac_header.addr2, 6);
					skb_put(skb, (decryp_data_len + 6));
				} else {
					/* copy eth header */
					memcpy(skb->data,
					       data->u2.encrypt.
					       mac_header.addr3, 6);
					memcpy(skb->data + 6,
					       data->u2.encrypt.
					       mac_header.addr2, 6);
					skb_put(skb, (decryp_data_len + 12));
				}
			} else {
				dev_err(&priv->ndev->dev,
					"wrong encryption data!\n");
				priv->ndev->stats.rx_dropped++;
				dev_kfree_skb(skb);
				goto rx_failed;
			}
		} else if (data->is_encrypted == 0) {
			skb_reserve(skb, NET_IP_ALIGN);
			/* dec the first encrypt byte */
			memcpy(skb->data, (u8 *)&data->u2,
			       (length - sizeof(data->is_encrypted) -
				sizeof(data->u1)));
			skb_put(skb,
				(length - sizeof(data->is_encrypted) -
				 sizeof(data->u1)));
		} else {
			dev_err(&priv->ndev->dev,
				"wrong data fromat recieved!\n");
			priv->ndev->stats.rx_dropped++;
			dev_kfree_skb(skb);
			goto rx_failed;
		}

#ifdef DUMP_RECEIVE_PACKET
		print_hex_dump(KERN_DEBUG, "receive packet: ",
			       DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len,
			       0);
#endif
		skb->dev = priv->ndev;
		skb->protocol = eth_type_trans(skb, priv->ndev);
		/* CHECKSUM_UNNECESSARY not supported by our hardware */
		/* skb->ip_summed = CHECKSUM_UNNECESSARY; */

		priv->ndev->stats.rx_packets++;
		priv->ndev->stats.rx_bytes += skb->len;

		napi_gro_receive(napi, skb);

rx_failed:
		ret = sblock_release(WLAN_CP_ID, WLAN_SBLOCK_CH, &blk);
		if (ret)
			dev_err(&priv->ndev->dev,
				"Failed to release sblock (%d)!\n", ret);
	}
	if (work_done < budget)
		napi_complete(napi);

	return work_done;
}
Exemple #24
0
static int c_can_poll(struct napi_struct *napi, int quota)
{
	u16 irqstatus;
	int lec_type = 0;
	int work_done = 0;
	struct net_device *dev = napi->dev;
	struct c_can_priv *priv = netdev_priv(dev);

	irqstatus = priv->irqstatus;
	if (!irqstatus)
		goto end;

	/* status events have the highest priority */
	if (irqstatus == STATUS_INTERRUPT) {
		priv->current_status = priv->read_reg(priv,
					C_CAN_STS_REG);

		/* handle Tx/Rx events */
		if (priv->current_status & STATUS_TXOK)
			priv->write_reg(priv, C_CAN_STS_REG,
					priv->current_status & ~STATUS_TXOK);

		if (priv->current_status & STATUS_RXOK)
			priv->write_reg(priv, C_CAN_STS_REG,
					priv->current_status & ~STATUS_RXOK);

		/* handle state changes */
		if ((priv->current_status & STATUS_EWARN) &&
				(!(priv->last_status & STATUS_EWARN))) {
			netdev_dbg(dev, "entered error warning state\n");
			work_done += c_can_handle_state_change(dev,
						C_CAN_ERROR_WARNING);
		}
		if ((priv->current_status & STATUS_EPASS) &&
				(!(priv->last_status & STATUS_EPASS))) {
			netdev_dbg(dev, "entered error passive state\n");
			work_done += c_can_handle_state_change(dev,
						C_CAN_ERROR_PASSIVE);
		}
		if ((priv->current_status & STATUS_BOFF) &&
				(!(priv->last_status & STATUS_BOFF))) {
			netdev_dbg(dev, "entered bus off state\n");
			work_done += c_can_handle_state_change(dev,
						C_CAN_BUS_OFF);
		}

		/* handle bus recovery events */
		if ((!(priv->current_status & STATUS_BOFF)) &&
				(priv->last_status & STATUS_BOFF)) {
			netdev_dbg(dev, "left bus off state\n");
			priv->can.state = CAN_STATE_ERROR_ACTIVE;
		}
		if ((!(priv->current_status & STATUS_EPASS)) &&
				(priv->last_status & STATUS_EPASS)) {
			netdev_dbg(dev, "left error passive state\n");
			priv->can.state = CAN_STATE_ERROR_ACTIVE;
		}

		priv->last_status = priv->current_status;

		/* handle lec errors on the bus */
		lec_type = c_can_has_and_handle_berr(priv);
		if (lec_type)
			work_done += c_can_handle_bus_err(dev, lec_type);
	} else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
			(irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
		/* handle events corresponding to receive message objects */
		work_done += c_can_do_rx_poll(dev, (quota - work_done));
	} else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
			(irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
		/* handle events corresponding to transmit message objects */
		c_can_do_tx(dev);
	}

end:
	if (work_done < quota) {
		napi_complete(napi);
		/* enable all IRQs */
		c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
	}

	return work_done;
}
Exemple #25
0
static int mv_eth_poll_l2fw(struct napi_struct *napi, int budget)
{
    int rx_done = 0;
    MV_U32 causeRxTx;
    struct eth_port *pp = MV_ETH_PRIV(napi->dev);
    read_lock(&pp->rwlock);

    STAT_INFO(pp->stats.poll[smp_processor_id()]++);

    /* Read cause register */
    causeRxTx = MV_REG_READ(NETA_INTR_NEW_CAUSE_REG(pp->port)) &
                (MV_ETH_MISC_SUM_INTR_MASK | MV_ETH_TXDONE_INTR_MASK |
                 MV_ETH_RX_INTR_MASK);

    if (causeRxTx & MV_ETH_MISC_SUM_INTR_MASK) {
        MV_U32 causeMisc;

        /* Process MISC events - Link, etc ??? */
        causeRxTx &= ~MV_ETH_MISC_SUM_INTR_MASK;
        causeMisc = MV_REG_READ(NETA_INTR_MISC_CAUSE_REG(pp->port));

        if (causeMisc & NETA_CAUSE_LINK_CHANGE_MASK)
            mv_eth_link_event(pp, 1);
        MV_REG_WRITE(NETA_INTR_MISC_CAUSE_REG(pp->port), 0);
    }

    causeRxTx |= pp->causeRxTx[smp_processor_id()];
#ifdef CONFIG_MV_ETH_TXDONE_ISR
    if (causeRxTx & MV_ETH_TXDONE_INTR_MASK) {
        /* TX_DONE process */

        mv_eth_tx_done_gbe(pp,
                           (causeRxTx & MV_ETH_TXDONE_INTR_MASK));

        causeRxTx &= ~MV_ETH_TXDONE_INTR_MASK;
    }
#endif /* CONFIG_MV_ETH_TXDONE_ISR */

#if (CONFIG_MV_ETH_RXQ > 1)
    while ((causeRxTx != 0) && (budget > 0)) {
        int count, rx_queue;

        rx_queue = mv_eth_rx_policy(causeRxTx);
        if (rx_queue == -1)
            break;

        count = mv_eth_l2fw_rx(pp, budget, rx_queue);
        rx_done += count;
        budget -= count;
        if (budget > 0)
            causeRxTx &=
                ~((1 << rx_queue) << NETA_CAUSE_RXQ_OCCUP_DESC_OFFS);
    }
#else
    rx_done = mv_eth_l2fw_rx(pp, budget, CONFIG_MV_ETH_RXQ_DEF);
    budget -= rx_done;
#endif /* (CONFIG_MV_ETH_RXQ > 1) */


    if (budget > 0) {
        unsigned long flags;
        causeRxTx = 0;

        napi_complete(napi);
        STAT_INFO(pp->stats.poll_exit[smp_processor_id()]++);

        local_irq_save(flags);
        MV_REG_WRITE(NETA_INTR_NEW_MASK_REG(pp->port),
                     (MV_ETH_MISC_SUM_INTR_MASK | MV_ETH_TXDONE_INTR_MASK |
                      MV_ETH_RX_INTR_MASK));

        local_irq_restore(flags);
    }
    pp->causeRxTx[smp_processor_id()] = causeRxTx;

    read_unlock(&pp->rwlock);

    return rx_done;
}
Exemple #26
0
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
	struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi);
	struct net_device *netdev = adapter->netdev;
	int frames_processed = 0;
	unsigned long lpar_rc;

 restart_poll:
	do {
		struct sk_buff *skb;

		if (!ibmveth_rxq_pending_buffer(adapter))
			break;

		rmb();
		if (!ibmveth_rxq_buffer_valid(adapter)) {
			wmb(); 
			adapter->rx_invalid_buffer++;
			ibmveth_debug_printk("recycling invalid buffer\n");
			ibmveth_rxq_recycle_buffer(adapter);
		} else {
			int length = ibmveth_rxq_frame_length(adapter);
			int offset = ibmveth_rxq_frame_offset(adapter);
			int csum_good = ibmveth_rxq_csum_good(adapter);

			skb = ibmveth_rxq_get_buffer(adapter);
			if (csum_good)
				skb->ip_summed = CHECKSUM_UNNECESSARY;

			ibmveth_rxq_harvest_buffer(adapter);

			skb_reserve(skb, offset);
			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, netdev);

			netif_receive_skb(skb);	

			netdev->stats.rx_packets++;
			netdev->stats.rx_bytes += length;
			frames_processed++;
		}
	} while (frames_processed < budget);

	ibmveth_replenish_task(adapter);

	if (frames_processed < budget) {
		
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_ENABLE);

		ibmveth_assert(lpar_rc == H_SUCCESS);

		napi_complete(napi);

		if (ibmveth_rxq_pending_buffer(adapter) &&
		    napi_reschedule(napi)) {
			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
					       VIO_IRQ_DISABLE);
			goto restart_poll;
		}
	}

	return frames_processed;
}
Exemple #27
0
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
	struct ibmveth_adapter *adapter =
			container_of(napi, struct ibmveth_adapter, napi);
	struct net_device *netdev = adapter->netdev;
	int frames_processed = 0;
	unsigned long lpar_rc;
	struct iphdr *iph;
	u16 mss = 0;

restart_poll:
	while (frames_processed < budget) {
		if (!ibmveth_rxq_pending_buffer(adapter))
			break;

		smp_rmb();
		if (!ibmveth_rxq_buffer_valid(adapter)) {
			wmb(); /* suggested by larson1 */
			adapter->rx_invalid_buffer++;
			netdev_dbg(netdev, "recycling invalid buffer\n");
			ibmveth_rxq_recycle_buffer(adapter);
		} else {
			struct sk_buff *skb, *new_skb;
			int length = ibmveth_rxq_frame_length(adapter);
			int offset = ibmveth_rxq_frame_offset(adapter);
			int csum_good = ibmveth_rxq_csum_good(adapter);
			int lrg_pkt = ibmveth_rxq_large_packet(adapter);

			skb = ibmveth_rxq_get_buffer(adapter);

			/* if the large packet bit is set in the rx queue
			 * descriptor, the mss will be written by PHYP eight
			 * bytes from the start of the rx buffer, which is
			 * skb->data at this stage
			 */
			if (lrg_pkt) {
				__be64 *rxmss = (__be64 *)(skb->data + 8);

				mss = (u16)be64_to_cpu(*rxmss);
			}

			new_skb = NULL;
			if (length < rx_copybreak)
				new_skb = netdev_alloc_skb(netdev, length);

			if (new_skb) {
				skb_copy_to_linear_data(new_skb,
							skb->data + offset,
							length);
				if (rx_flush)
					ibmveth_flush_buffer(skb->data,
						length + offset);
				if (!ibmveth_rxq_recycle_buffer(adapter))
					kfree_skb(skb);
				skb = new_skb;
			} else {
				ibmveth_rxq_harvest_buffer(adapter);
				skb_reserve(skb, offset);
			}

			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, netdev);

			if (csum_good) {
				skb->ip_summed = CHECKSUM_UNNECESSARY;
				if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
					iph = (struct iphdr *)skb->data;

					/* If the IP checksum is not offloaded and if the packet
					 *  is large send, the checksum must be rebuilt.
					 */
					if (iph->check == 0xffff) {
						iph->check = 0;
						iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
					}
				}
			}

			if (length > netdev->mtu + ETH_HLEN) {
				ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
				adapter->rx_large_packets++;
			}

			napi_gro_receive(napi, skb);	/* send it up */

			netdev->stats.rx_packets++;
			netdev->stats.rx_bytes += length;
			frames_processed++;
		}
	}

	ibmveth_replenish_task(adapter);

	if (frames_processed < budget) {
		napi_complete(napi);

		/* We think we are done - reenable interrupts,
		 * then check once more to make sure we are done.
		 */
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_ENABLE);

		BUG_ON(lpar_rc != H_SUCCESS);

		if (ibmveth_rxq_pending_buffer(adapter) &&
		    napi_reschedule(napi)) {
			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
					       VIO_IRQ_DISABLE);
			goto restart_poll;
		}
	}

	return frames_processed;
}