Esempio n. 1
0
/*-------------------------------------------------------------------------*/
void usbnet_pause_rx(struct usbnet *dev)
{
	set_bit(EVENT_RX_PAUSED, &dev->flags);

	if (netif_msg_rx_status(dev))
		devdbg(dev, "paused rx queue enabled");
}
Esempio n. 2
0
int mpodp_clean_rx(struct mpodp_if_priv *priv, struct mpodp_rxq *rxq,
		   int budget)
{
	struct net_device *netdev = priv->netdev;
	struct mpodp_rx *rx;
	int worked = 0;
	ktime_t now = ktime_get_real();
	/* RX: 2nd step: give packet to kernel and update RX head */
	while (budget-- && rxq->used != rxq->avail) {
		if (!mpodp_rx_is_done(priv, rxq, rxq->used)) {
			/* DMA transfer not completed */
			break;
		}

		if (netif_msg_rx_status(priv))
			netdev_info(netdev, "rxq[%d] rx[%d]: transfer done\n",
				   rxq->id, rxq->used);

		/* get rx slot */
		rx = &(rxq->ring[rxq->used]);

		if (rx->len == 0) {
			/* error packet, skip it */
			goto pkt_skip;
		}

		dma_unmap_sg(&priv->pdev->dev, rx->sg,
			     rx->dma_len, DMA_FROM_DEVICE);

		/* fill skb field */
		skb_put(rx->skb, rx->len);
		skb_record_rx_queue(rx->skb, rxq->id);
		rx->skb->tstamp = now;

		rx->skb->protocol = eth_type_trans(rx->skb, netdev);
		netif_receive_skb(rx->skb);

		/* update stats */
		netdev->stats.rx_bytes += rx->len;
		netdev->stats.rx_packets++;

	      pkt_skip:
		rxq->used = (rxq->used + 1) % rxq->size;

		worked++;
	}
	/* write new RX head */
	if (worked) {
		writel(rxq->used, rxq->head_addr);
	}


	return worked;
}
Esempio n. 3
0
static int mpodp_flush_rx_trans(struct mpodp_if_priv *priv, struct mpodp_rxq *rxq,
				struct mpodp_rx *rx, uint32_t first_slot)
{
	struct net_device *netdev = priv->netdev;
	struct dma_async_tx_descriptor *dma_txd;

	rx->dma_len =
		dma_map_sg(&priv->pdev->dev, rx->sg, rx->sg_len,
			   DMA_FROM_DEVICE);
	if (rx->dma_len == 0)
		return -1;

	/* configure channel */
	priv->rx_config.cfg.src_addr =
		    rxq->mppa_entries[first_slot].pkt_addr;
	if (dmaengine_slave_config(priv->rx_chan, &priv->rx_config.cfg)) {
		/* board has reset, wait for reset of netdev */
		netif_carrier_off(netdev);
		if (netif_msg_rx_err(priv))
			netdev_err(netdev,
				   "rxq[%d] rx[%d]: cannot configure channel\n",
				   rxq->id, first_slot);
		goto dma_failed;
	}

	/* get transfer descriptor */
	dma_txd = dmaengine_prep_slave_sg(priv->rx_chan,
					  rx->sg, rx->dma_len,
					  DMA_DEV_TO_MEM, 0);
	if (dma_txd == NULL) {
		if (netif_msg_rx_err(priv))
			netdev_err(netdev,
				   "rxq[%d] rx[%d]: cannot get dma descriptor",
				   rxq->id, first_slot);
		goto dma_failed;
	}

	if (netif_msg_rx_status(priv))
		netdev_info(netdev, "rxq[%d] rx[%d]: transfer start (%d)\n",
			   rxq->id, rxq->avail, rx->sg_len);

	/* submit and issue descriptor */
	rx->cookie = dmaengine_submit(dma_txd);

	return 0;
 dma_failed:
	dma_unmap_sg(&priv->pdev->dev, rx->sg, rx->sg_len, DMA_FROM_DEVICE);
	return -1;
}
Esempio n. 4
0
/* Passes this packet up the stack, updating its accounting.
 * Some link protocols batch packets, so their rx_fixup paths
 * can return clones as well as just modify the original skb.
 */
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
	int	status;

	skb->protocol = eth_type_trans (skb, dev->net);
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += skb->len;

	if (netif_msg_rx_status (dev))
		devdbg (dev, "< rx, len %zu, type 0x%x",
			skb->len + sizeof (struct ethhdr), skb->protocol);
	memset (skb->cb, 0, sizeof (struct skb_data));
	status = netif_rx (skb);
	if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
		devdbg (dev, "netif_rx status %d", status);
}
Esempio n. 5
0
void usbnet_resume_rx(struct usbnet *dev)
{
	struct sk_buff *skb;
	int num = 0;

	clear_bit(EVENT_RX_PAUSED, &dev->flags);

	while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) {
		usbnet_skb_return(dev, skb);
		num++;
	}

	tasklet_schedule(&dev->bh);

	if (netif_msg_rx_status(dev))
		devdbg(dev, "paused rx queue disabled, %d skbs requeued", num);
}
Esempio n. 6
0
/* Passes this packet up the stack, updating its accounting.
 * Some link protocols batch packets, so their rx_fixup paths
 * can return clones as well as just modify the original skb.
 */
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
	int	status;

	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
		skb_queue_tail(&dev->rxq_pause, skb);
		return;
	}

	skb->protocol = eth_type_trans (skb, dev->net);
	dev->net->stats.rx_packets++;
	dev->net->stats.rx_bytes += skb->len;

	if (netif_msg_rx_status (dev))
		devdbg (dev, "< rx, len %zu, type 0x%x",
			skb->len + sizeof (struct ethhdr), skb->protocol);
	memset (skb->cb, 0, sizeof (struct skb_data));
	status = netif_rx (skb);
	if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
		devdbg (dev, "netif_rx status %d", status);
}
Esempio n. 7
0
/* Passes this packet up the stack, updating its accounting.
 * Some link protocols batch packets, so their rx_fixup paths
 * can return clones as well as just modify the original skb.
 */
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
	int	status;

	skb->protocol = eth_type_trans (skb, dev->net);
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += skb->len;

	if (netif_msg_rx_status (dev))
		devdbg (dev, "< rx, len %zu, type 0x%x",
			skb->len + sizeof (struct ethhdr), skb->protocol);
	memset (skb->cb, 0, sizeof (struct skb_data));
#if defined(CONFIG_RA_HW_NAT_PCI) && (defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE))
	 /* ra_sw_nat_hook_rx return 1 --> continue
	  * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
	  */
	FOE_MAGIC_TAG(skb) = FOE_MAGIC_EXTIF;
	FOE_AI_UNHIT(skb);
	if(ra_sw_nat_hook_rx != NULL)
	{
		if(ra_sw_nat_hook_rx(skb)) {
			status = netif_rx (skb);
			if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
				devdbg (dev, "netif_rx status %d", status);
		}
	} else  {
		status = netif_rx (skb);
		if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
			devdbg (dev, "netif_rx status %d", status);
	}

#else
	status = netif_rx (skb);
	if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
		devdbg (dev, "netif_rx status %d", status);
#endif
}
Esempio n. 8
0
/* Received a packet and pass to upper layer
 */
static void emac_rx(struct net_device *dev)
{
	struct emac_board_info *db = netdev_priv(dev);
	struct sk_buff *skb;
	u8 *rdptr;
	bool good_packet;
	static int rxlen_last;
	unsigned int reg_val;
	u32 rxhdr, rxstatus, rxcount, rxlen;

	/* Check packet ready or not */
	while (1) {
		/* race warning: the first packet might arrive with
		 * the interrupts disabled, but the second will fix
		 * it
		 */
		rxcount = readl(db->membase + EMAC_RX_FBC_REG);

		if (netif_msg_rx_status(db))
			dev_dbg(db->dev, "RXCount: %x\n", rxcount);

		if ((db->skb_last != NULL) && (rxlen_last > 0)) {
			dev->stats.rx_bytes += rxlen_last;

			/* Pass to upper layer */
			db->skb_last->protocol = eth_type_trans(db->skb_last,
								dev);
			netif_rx(db->skb_last);
			dev->stats.rx_packets++;
			db->skb_last = NULL;
			rxlen_last = 0;

			reg_val = readl(db->membase + EMAC_RX_CTL_REG);
			reg_val &= ~EMAC_RX_CTL_DMA_EN;
			writel(reg_val, db->membase + EMAC_RX_CTL_REG);
		}

		if (!rxcount) {
			db->emacrx_completed_flag = 1;
			reg_val = readl(db->membase + EMAC_INT_CTL_REG);
			reg_val |= (0xf << 0) | (0x01 << 8);
			writel(reg_val, db->membase + EMAC_INT_CTL_REG);

			/* had one stuck? */
			rxcount = readl(db->membase + EMAC_RX_FBC_REG);
			if (!rxcount)
				return;
		}

		reg_val = readl(db->membase + EMAC_RX_IO_DATA_REG);
		if (netif_msg_rx_status(db))
			dev_dbg(db->dev, "receive header: %x\n", reg_val);
		if (reg_val != EMAC_UNDOCUMENTED_MAGIC) {
			/* disable RX */
			reg_val = readl(db->membase + EMAC_CTL_REG);
			writel(reg_val & ~EMAC_CTL_RX_EN,
			       db->membase + EMAC_CTL_REG);

			/* Flush RX FIFO */
			reg_val = readl(db->membase + EMAC_RX_CTL_REG);
			writel(reg_val | (1 << 3),
			       db->membase + EMAC_RX_CTL_REG);

			do {
				reg_val = readl(db->membase + EMAC_RX_CTL_REG);
			} while (reg_val & (1 << 3));

			/* enable RX */
			reg_val = readl(db->membase + EMAC_CTL_REG);
			writel(reg_val | EMAC_CTL_RX_EN,
			       db->membase + EMAC_CTL_REG);
			reg_val = readl(db->membase + EMAC_INT_CTL_REG);
			reg_val |= (0xf << 0) | (0x01 << 8);
			writel(reg_val, db->membase + EMAC_INT_CTL_REG);

			db->emacrx_completed_flag = 1;

			return;
		}

		/* A packet ready now  & Get status/length */
		good_packet = true;

		rxhdr = readl(db->membase + EMAC_RX_IO_DATA_REG);

		if (netif_msg_rx_status(db))
			dev_dbg(db->dev, "rxhdr: %x\n", *((int *)(&rxhdr)));

		rxlen = EMAC_RX_IO_DATA_LEN(rxhdr);
		rxstatus = EMAC_RX_IO_DATA_STATUS(rxhdr);

		if (netif_msg_rx_status(db))
			dev_dbg(db->dev, "RX: status %02x, length %04x\n",
				rxstatus, rxlen);

		/* Packet Status check */
		if (rxlen < 0x40) {
			good_packet = false;
			if (netif_msg_rx_err(db))
				dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
		}

		if (unlikely(!(rxstatus & EMAC_RX_IO_DATA_STATUS_OK))) {
			good_packet = false;

			if (rxstatus & EMAC_RX_IO_DATA_STATUS_CRC_ERR) {
				if (netif_msg_rx_err(db))
					dev_dbg(db->dev, "crc error\n");
				dev->stats.rx_crc_errors++;
			}

			if (rxstatus & EMAC_RX_IO_DATA_STATUS_LEN_ERR) {
				if (netif_msg_rx_err(db))
					dev_dbg(db->dev, "length error\n");
				dev->stats.rx_length_errors++;
			}
		}

		/* Move data from EMAC */
		if (good_packet) {
			skb = netdev_alloc_skb(dev, rxlen + 4);
			if (!skb)
				continue;
			skb_reserve(skb, 2);
			rdptr = skb_put(skb, rxlen - 4);

			/* Read received packet from RX SRAM */
			if (netif_msg_rx_status(db))
				dev_dbg(db->dev, "RxLen %x\n", rxlen);

			emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG,
					rdptr, rxlen);
			dev->stats.rx_bytes += rxlen;

			/* Pass to upper layer */
			skb->protocol = eth_type_trans(skb, dev);
			netif_rx(skb);
			dev->stats.rx_packets++;
		}
	}
}
static void read_bulk_callback(struct urb *urb)
{
	struct sk_buff *skb = NULL;
	BOOLEAN bHeaderSupressionEnabled = FALSE;
	int QueueIndex = NO_OF_QUEUES + 1;
	UINT uiIndex=0;
	int process_done = 1;
	
	PUSB_RCB pRcb = (PUSB_RCB)urb->context;
	PS_INTERFACE_ADAPTER psIntfAdapter = pRcb->psIntfAdapter;
	PMINI_ADAPTER Adapter = psIntfAdapter->psAdapter;
	PLEADER pLeader = urb->transfer_buffer;

	if (unlikely(netif_msg_rx_status(Adapter)))
		pr_info(PFX "%s: rx urb status %d length %d\n",
			Adapter->dev->name, urb->status, urb->actual_length);

	if((Adapter->device_removed == TRUE)  ||
		(TRUE == Adapter->bEndPointHalted) ||
		(0 == urb->actual_length)
		)
	{
	 	pRcb->bUsed = FALSE;
 		atomic_dec(&psIntfAdapter->uNumRcbUsed);
		return;
	}

	if(urb->status != STATUS_SUCCESS)
	{
		if(urb->status == -EPIPE)
		{
			Adapter->bEndPointHalted = TRUE ;
			wake_up(&Adapter->tx_packet_wait_queue);
		}
		else
		{
			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"Rx URB has got cancelled. status :%d", urb->status);
		}
		pRcb->bUsed = FALSE;
 		atomic_dec(&psIntfAdapter->uNumRcbUsed);
		urb->status = STATUS_SUCCESS ;
		return ;
	}

	if(Adapter->bDoSuspend && (Adapter->bPreparingForLowPowerMode))
	{
		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"device is going in low power mode while PMU option selected..hence rx packet should not be process");
		return ;
	}

	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Read back done len %d\n", pLeader->PLength);
	if(!pLeader->PLength)
	{
		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Length 0");
		atomic_dec(&psIntfAdapter->uNumRcbUsed);
		return;
	}
	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Status:0x%hX, Length:0x%hX, VCID:0x%hX", pLeader->Status,pLeader->PLength,pLeader->Vcid);
	if(MAX_CNTL_PKT_SIZE < pLeader->PLength)
	{
		if (netif_msg_rx_err(Adapter))
			pr_info(PFX "%s: corrupted leader length...%d\n",
				Adapter->dev->name, pLeader->PLength);
		++Adapter->dev->stats.rx_dropped;
		atomic_dec(&psIntfAdapter->uNumRcbUsed);
		return;
	}

	QueueIndex = SearchVcid( Adapter,pLeader->Vcid);
	if(QueueIndex < NO_OF_QUEUES)
	{
		bHeaderSupressionEnabled =
			Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled;
		bHeaderSupressionEnabled =
			bHeaderSupressionEnabled & Adapter->bPHSEnabled;
	}

	skb = dev_alloc_skb (pLeader->PLength + SKB_RESERVE_PHS_BYTES + SKB_RESERVE_ETHERNET_HEADER);
	if(!skb)
	{
		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "NO SKBUFF!!! Dropping the Packet");
		atomic_dec(&psIntfAdapter->uNumRcbUsed);
		return;
	}
    
	if((ntohs(pLeader->Vcid) == VCID_CONTROL_PACKET) ||
	    (!(pLeader->Status >= 0x20  &&  pLeader->Status <= 0x3F)))
	{
	    BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_CTRL, DBG_LVL_ALL, "Received control pkt...");
		*(PUSHORT)skb->data = pLeader->Status;
       	memcpy(skb->data+sizeof(USHORT), urb->transfer_buffer +
			(sizeof(LEADER)), pLeader->PLength);
		skb->len = pLeader->PLength + sizeof(USHORT);

		spin_lock(&Adapter->control_queue_lock);
		ENQUEUEPACKET(Adapter->RxControlHead,Adapter->RxControlTail,skb);
		spin_unlock(&Adapter->control_queue_lock);

		atomic_inc(&Adapter->cntrlpktCnt);
		wake_up(&Adapter->process_rx_cntrlpkt);
	}
	else
	{
        BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt...");
		skb_reserve(skb, 2 + SKB_RESERVE_PHS_BYTES);
		memcpy(skb->data+ETH_HLEN, (PUCHAR)urb->transfer_buffer + sizeof(LEADER), pLeader->PLength);
		skb->dev = Adapter->dev;

		
		skb_put (skb, pLeader->PLength + ETH_HLEN);
		Adapter->PackInfo[QueueIndex].uiTotalRxBytes+=pLeader->PLength;
		Adapter->PackInfo[QueueIndex].uiThisPeriodRxBytes+= pLeader->PLength;
        BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt of len :0x%X", pLeader->PLength);

		if(netif_running(Adapter->dev))
		{
			
			skb_pull(skb, ETH_HLEN);
			PHSReceive(Adapter, pLeader->Vcid, skb, &skb->len,
					NULL,bHeaderSupressionEnabled);

			if(!Adapter->PackInfo[QueueIndex].bEthCSSupport)
			{
				skb_push(skb, ETH_HLEN);

				memcpy(skb->data, skb->dev->dev_addr, 6);
				memcpy(skb->data+6, skb->dev->dev_addr, 6);
				(*(skb->data+11))++;
				*(skb->data+12) = 0x08;
				*(skb->data+13) = 0x00;
				pLeader->PLength+=ETH_HLEN;
			}

			skb->protocol = eth_type_trans(skb, Adapter->dev);
			process_done = netif_rx(skb);
		}
		else
		{
		    BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "i/f not up hance freeing SKB...");
			dev_kfree_skb(skb);
		}

		++Adapter->dev->stats.rx_packets;
		Adapter->dev->stats.rx_bytes += pLeader->PLength;

		for(uiIndex = 0 ; uiIndex < MIBS_MAX_HIST_ENTRIES ; uiIndex++)
		{
			if((pLeader->PLength <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1))
				&& (pLeader->PLength > MIBS_PKTSIZEHIST_RANGE*(uiIndex)))
				Adapter->aRxPktSizeHist[uiIndex]++;
		}
	}
 	Adapter->PrevNumRecvDescs++;
	pRcb->bUsed = FALSE;
	atomic_dec(&psIntfAdapter->uNumRcbUsed);
}
Esempio n. 10
0
static int cpmac_poll(struct napi_struct *napi, int budget)
{
	struct sk_buff *skb;
	struct cpmac_desc *desc, *restart;
	struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
	int received = 0, processed = 0;

	spin_lock(&priv->rx_lock);
	if (unlikely(!priv->rx_head)) {
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx: polling, but no queue\n",
			       priv->dev->name);
		spin_unlock(&priv->rx_lock);
		netif_rx_complete(priv->dev, napi);
		return 0;
	}

	desc = priv->rx_head;
	restart = NULL;
	while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
		processed++;

		if ((desc->dataflags & CPMAC_EOQ) != 0) {
			/* The last update to eoq->hw_next didn't happen
			* soon enough, and the receiver stopped here.
			*Remember this descriptor so we can restart
			* the receiver after freeing some space.
			*/
			if (unlikely(restart)) {
				if (netif_msg_rx_err(priv))
					printk(KERN_ERR "%s: poll found a"
						" duplicate EOQ: %p and %p\n",
						priv->dev->name, restart, desc);
				goto fatal_error;
			}

			restart = desc->next;
		}

		skb = cpmac_rx_one(priv, desc);
		if (likely(skb)) {
			netif_receive_skb(skb);
			received++;
		}
		desc = desc->next;
	}

	if (desc != priv->rx_head) {
		/* We freed some buffers, but not the whole ring,
		 * add what we did free to the rx list */
		desc->prev->hw_next = (u32)0;
		priv->rx_head->prev->hw_next = priv->rx_head->mapping;
	}

	/* Optimization: If we did not actually process an EOQ (perhaps because
	 * of quota limits), check to see if the tail of the queue has EOQ set.
	* We should immediately restart in that case so that the receiver can
	* restart and run in parallel with more packet processing.
	* This lets us handle slightly larger bursts before running
	* out of ring space (assuming dev->weight < ring_size) */

	if (!restart &&
	     (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
		    == CPMAC_EOQ &&
	     (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
		/* reset EOQ so the poll loop (above) doesn't try to
		* restart this when it eventually gets to this descriptor.
		*/
		priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
		restart = priv->rx_head;
	}

	if (restart) {
		priv->dev->stats.rx_errors++;
		priv->dev->stats.rx_fifo_errors++;
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx dma ring overrun\n",
			       priv->dev->name);

		if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
			if (netif_msg_drv(priv))
				printk(KERN_ERR "%s: cpmac_poll is trying to "
					"restart rx from a descriptor that's "
					"not free: %p\n",
					priv->dev->name, restart);
				goto fatal_error;
		}

		cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
	}

	priv->rx_head = desc;
	spin_unlock(&priv->rx_lock);
	if (unlikely(netif_msg_rx_status(priv)))
		printk(KERN_DEBUG "%s: poll processed %d packets\n",
		       priv->dev->name, received);
	if (processed == 0) {
		/* we ran out of packets to read,
		 * revert to interrupt-driven mode */
		netif_rx_complete(priv->dev, napi);
		cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
		return 0;
	}

	return 1;

fatal_error:
	/* Something went horribly wrong.
	 * Reset hardware to try to recover rather than wedging. */

	if (netif_msg_drv(priv)) {
		printk(KERN_ERR "%s: cpmac_poll is confused. "
				"Resetting hardware\n", priv->dev->name);
		cpmac_dump_all_desc(priv->dev);
		printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
			priv->dev->name,
			cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
			cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
	}

	spin_unlock(&priv->rx_lock);
	netif_rx_complete(priv->dev, napi);
	netif_tx_stop_all_queues(priv->dev);
	napi_disable(&priv->napi);

	atomic_inc(&priv->reset_pending);
	cpmac_hw_stop(priv->dev);
	if (!schedule_work(&priv->reset_work))
		atomic_dec(&priv->reset_pending);
	return 0;

}
Esempio n. 11
0
static int cp_rx_poll (struct net_device *dev, int *budget)
{
	struct cp_private *cp = netdev_priv(dev);
	unsigned rx_tail = cp->rx_tail;
	unsigned rx_work = dev->quota;
	unsigned rx;

rx_status_loop:
	rx = 0;
	cpw16(IntrStatus, cp_rx_intr_mask);

	while (1) {
		u32 status, len;
		dma_addr_t mapping;
		struct sk_buff *skb, *new_skb;
		struct cp_desc *desc;
		unsigned buflen;

		skb = cp->rx_skb[rx_tail].skb;
		if (!skb)
			BUG();

		desc = &cp->rx_ring[rx_tail];
		status = le32_to_cpu(desc->opts1);
		if (status & DescOwn)
			break;

		len = (status & 0x1fff) - 4;
		mapping = cp->rx_skb[rx_tail].mapping;

		if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
			/* we don't support incoming fragmented frames.
			 * instead, we attempt to ensure that the
			 * pre-allocated RX skbs are properly sized such
			 * that RX fragments are never encountered
			 */
			cp_rx_err_acct(cp, rx_tail, status, len);
			cp->net_stats.rx_dropped++;
			cp->cp_stats.rx_frags++;
			goto rx_next;
		}

		if (status & (RxError | RxErrFIFO)) {
			cp_rx_err_acct(cp, rx_tail, status, len);
			goto rx_next;
		}

		if (netif_msg_rx_status(cp))
			printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
			       cp->dev->name, rx_tail, status, len);

		buflen = cp->rx_buf_sz + RX_OFFSET;
		new_skb = dev_alloc_skb (buflen);
		if (!new_skb) {
			cp->net_stats.rx_dropped++;
			goto rx_next;
		}

		skb_reserve(new_skb, RX_OFFSET);
		new_skb->dev = cp->dev;

		pci_unmap_single(cp->pdev, mapping,
				 buflen, PCI_DMA_FROMDEVICE);

		/* Handle checksum offloading for incoming packets. */
		if (cp_rx_csum_ok(status))
			skb->ip_summed = CHECKSUM_UNNECESSARY;
		else
			skb->ip_summed = CHECKSUM_NONE;

		skb_put(skb, len);

		mapping =
		cp->rx_skb[rx_tail].mapping =
			pci_map_single(cp->pdev, new_skb->tail,
				       buflen, PCI_DMA_FROMDEVICE);
		cp->rx_skb[rx_tail].skb = new_skb;

		cp_rx_skb(cp, skb, desc);
		rx++;

rx_next:
		cp->rx_ring[rx_tail].opts2 = 0;
		cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
		if (rx_tail == (CP_RX_RING_SIZE - 1))
			desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
						  cp->rx_buf_sz);
		else
			desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
		rx_tail = NEXT_RX(rx_tail);

		if (!rx_work--)
			break;
	}

	cp->rx_tail = rx_tail;

	dev->quota -= rx;
	*budget -= rx;

	/* if we did not reach work limit, then we're done with
	 * this round of polling
	 */
	if (rx_work) {
		if (cpr16(IntrStatus) & cp_rx_intr_mask)
			goto rx_status_loop;

		local_irq_disable();
		cpw16_f(IntrMask, cp_intr_mask);
		__netif_rx_complete(dev);
		local_irq_enable();

		return 0;	/* done */
	}

	return 1;		/* not done */
}