예제 #1
0
static int
e1000_run_loopback_test(struct e1000_adapter *adapter)
{
	struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
	struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
	struct pci_dev *pdev = adapter->pdev;
	int i;

	E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);

	for(i = 0; i < 64; i++) {
		e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1024);
		pci_dma_sync_single(pdev, txdr->buffer_info[i].dma,
				    txdr->buffer_info[i].length,
				    PCI_DMA_TODEVICE);
	}
	E1000_WRITE_REG(&adapter->hw, TDT, i);

	msec_delay(200);

	pci_dma_sync_single(pdev, rxdr->buffer_info[0].dma,
			    rxdr->buffer_info[0].length, PCI_DMA_FROMDEVICE);

	return e1000_check_lbtest_frame(rxdr->buffer_info[0].skb, 1024);
}
예제 #2
0
static int vino_waitfor(struct vino_device *v, int frame)
{
	wait_queue_t wait;
	int i, err = 0;

	if (frame != 0)
		return -EINVAL;

	spin_lock_irq(&v->state_lock);
	switch (v->buffer_state) {
	case VINO_BUF_GRABBING:
		init_waitqueue_entry(&wait, current);
		/* add ourselves into wait queue */
		add_wait_queue(&v->dma_wait, &wait);
		/* and set current state */
		set_current_state(TASK_INTERRUPTIBLE);
		/* before releasing spinlock */
		spin_unlock_irq(&v->state_lock);
		/* to ensure that schedule_timeout will return imediately
		 * if VINO interrupt was triggred meanwhile */
		schedule_timeout(HZ / 10);
		if (signal_pending(current))
			err = -EINTR;
		spin_lock_irq(&v->state_lock);
		remove_wait_queue(&v->dma_wait, &wait);
		/* don't rely on schedule_timeout return value and check what
		 * really happened */
		if (!err && v->buffer_state == VINO_BUF_GRABBING)
			err = -EIO;
		/* fall through */
	case VINO_BUF_DONE:
		for (i = 0; i < v->page_count; i++)
			pci_dma_sync_single(NULL, v->dma_desc.cpu[PAGE_RATIO*i],
					    PAGE_SIZE, PCI_DMA_FROMDEVICE);
		v->buffer_state = VINO_BUF_UNUSED;
		break;
	default:
		err = -EINVAL;
	}
	spin_unlock_irq(&v->state_lock);

	if (err && err != -EINVAL) {
		DEBUG("VINO: waiting for frame failed\n");
		spin_lock_irq(&Vino->vino_lock);
		dma_stop(v);
		clear_eod(v);
		spin_unlock_irq(&Vino->vino_lock);
	}

	return err;
}
예제 #3
0
static void ehci_urb_complete (
	struct ehci_hcd		*ehci,
	dma_addr_t		addr,
	struct urb		*urb
) {
	if (urb->transfer_buffer_length && usb_pipein (urb->pipe))
		pci_dma_sync_single (ehci->hcd.pdev, addr,
			urb->transfer_buffer_length,
			PCI_DMA_FROMDEVICE);

	/* cleanse status if we saw no error */
	if (likely (urb->status == -EINPROGRESS)) {
		if (urb->actual_length != urb->transfer_buffer_length
				&& (urb->transfer_flags & USB_DISABLE_SPD))
			urb->status = -EREMOTEIO;
		else
			urb->status = 0;
	}

	/* only report unlinks once */
	if (likely (urb->status != -ENOENT && urb->status != -ENOTCONN))
		urb->complete (urb);
}
예제 #4
0
static int tulip_rx(/*RTnet*/struct rtnet_device *rtdev, nanosecs_t *time_stamp)
{
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
	int received = 0;

	if (tulip_debug > 4)
		/*RTnet*/rtdm_printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);
	/* If we own the next entry, it is a new packet. Send it up. */
	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
		s32 status = le32_to_cpu(tp->rx_ring[entry].status);

		if (tulip_debug > 5)
			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
				   rtdev->name, entry, status);
		if (--rx_work_limit < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					if (tulip_debug > 1)
						/*RTnet*/rtdm_printk(KERN_WARNING "%s: Oversized Ethernet frame "
							   "spanned multiple buffers, status %8.8x!\n",
							   rtdev->name, status);
					tp->stats.rx_length_errors++;
				}
			} else if (status & RxDescFatalErr) {
				/* There was a fatal error. */
				if (tulip_debug > 2)
					/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   rtdev->name, status);
				tp->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) tp->stats.rx_length_errors++;
				if (status & 0x0004) tp->stats.rx_frame_errors++;
				if (status & 0x0002) tp->stats.rx_crc_errors++;
				if (status & 0x0001) tp->stats.rx_fifo_errors++;
			}
		} else {
			/* Omit the four octet CRC from the length. */
			short pkt_len = ((status >> 16) & 0x7ff) - 4;
			struct /*RTnet*/rtskb *skb;

#ifndef final_version
			if (pkt_len > 1518) {
				/*RTnet*/rtdm_printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
					   rtdev->name, pkt_len, pkt_len);
				pkt_len = 1518;
				tp->stats.rx_length_errors++;
			}
#endif

#if 0 /*RTnet*/
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < tulip_rx_copybreak
				&& (skb = /*RTnet*/dev_alloc_rtskb(pkt_len + 2)) != NULL) {
				skb->rtdev = rtdev;
				/*RTnet*/rtskb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single(tp->pdev,
						    tp->rx_buffers[entry].mapping,
						    pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
				//eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
				//		 pkt_len, 0);
				memcpy(rtskb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#else
				memcpy(/*RTnet*/rtskb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#endif
			} else { 	/* Pass up the skb already on the Rx ring. */
#endif /*RTnet*/
			{
				char *temp = /*RTnet*/rtskb_put(skb = tp->rx_buffers[entry].skb,
						     pkt_len);

#ifndef final_version
				if (tp->rx_buffers[entry].mapping !=
				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
					/*RTnet*/rtdm_printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
					       "do not match in tulip_rx: %08x vs. %08x ? / %p.\n",
					       rtdev->name,
					       le32_to_cpu(tp->rx_ring[entry].buffer1),
					       tp->rx_buffers[entry].mapping,
					       temp);/*RTnet*/
				}
#endif

				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

				tp->rx_buffers[entry].skb = NULL;
				tp->rx_buffers[entry].mapping = 0;
			}
			skb->protocol = /*RTnet*/rt_eth_type_trans(skb, rtdev);
			skb->time_stamp = *time_stamp;
			/*RTnet*/rtnetif_rx(skb);

			tp->stats.rx_packets++;
			tp->stats.rx_bytes += pkt_len;
		}
		received++;
		entry = (++tp->cur_rx) % RX_RING_SIZE;
	}
	return received;
}

/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */
int tulip_interrupt(rtdm_irq_t *irq_handle)
{
	nanosecs_t time_stamp = rtdm_clock_read();/*RTnet*/
	struct rtnet_device *rtdev =
	    rtdm_irq_get_arg(irq_handle, struct rtnet_device);/*RTnet*/
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	long ioaddr = rtdev->base_addr;
	unsigned int csr5;
	int entry;
	int missed;
	int rx = 0;
	int tx = 0;
	int oi = 0;
	int maxrx = RX_RING_SIZE;
	int maxtx = TX_RING_SIZE;
	int maxoi = TX_RING_SIZE;
	unsigned int work_count = tulip_max_interrupt_work;

	/* Let's see whether the interrupt really is for us */
	csr5 = inl(ioaddr + CSR5);

	if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) {
		rtdm_printk("%s: unexpected IRQ!\n",rtdev->name);
		return 0;
	}

	tp->nir++;

	do {
		/* Acknowledge all of the current interrupt sources ASAP. */
		outl(csr5 & 0x0001ffff, ioaddr + CSR5);

		if (tulip_debug > 4)
			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
				   rtdev->name, csr5, inl(rtdev->base_addr + CSR5));

		if (csr5 & (RxIntr | RxNoBuf)) {
			rx += tulip_rx(rtdev, &time_stamp);
			tulip_refill_rx(rtdev);
		}

		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
			unsigned int dirty_tx;

			rtdm_lock_get(&tp->lock);

			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
				 dirty_tx++) {
				int entry = dirty_tx % TX_RING_SIZE;
				int status = le32_to_cpu(tp->tx_ring[entry].status);

				if (status < 0)
					break;			/* It still has not been Txed */

				/* Check for Rx filter setup frames. */
				if (tp->tx_buffers[entry].skb == NULL) {
					/* test because dummy frames not mapped */
					if (tp->tx_buffers[entry].mapping)
						pci_unmap_single(tp->pdev,
							 tp->tx_buffers[entry].mapping,
							 sizeof(tp->setup_frame),
							 PCI_DMA_TODEVICE);
					continue;
				}

				if (status & 0x8000) {
					/* There was an major error, log it. */
#ifndef final_version
					if (tulip_debug > 1)
						/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
							   rtdev->name, status);
#endif
					tp->stats.tx_errors++;
					if (status & 0x4104) tp->stats.tx_aborted_errors++;
					if (status & 0x0C00) tp->stats.tx_carrier_errors++;
					if (status & 0x0200) tp->stats.tx_window_errors++;
					if (status & 0x0002) tp->stats.tx_fifo_errors++;
					if ((status & 0x0080) && tp->full_duplex == 0)
						tp->stats.tx_heartbeat_errors++;
				} else {
					tp->stats.tx_bytes +=
						tp->tx_buffers[entry].skb->len;
					tp->stats.collisions += (status >> 3) & 15;
					tp->stats.tx_packets++;
				}

				pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
						 tp->tx_buffers[entry].skb->len,
						 PCI_DMA_TODEVICE);

				/* Free the original skb. */
				/*RTnet*/dev_kfree_rtskb(tp->tx_buffers[entry].skb);
				tp->tx_buffers[entry].skb = NULL;
				tp->tx_buffers[entry].mapping = 0;
				tx++;
				rtnetif_tx(rtdev);
			}

#ifndef final_version
			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
				/*RTnet*/rtdm_printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
					   rtdev->name, dirty_tx, tp->cur_tx);
				dirty_tx += TX_RING_SIZE;
			}
#endif

			if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
				/*RTnet*/rtnetif_wake_queue(rtdev);

			tp->dirty_tx = dirty_tx;
			if (csr5 & TxDied) {
				if (tulip_debug > 2)
					/*RTnet*/rtdm_printk(KERN_WARNING "%s: The transmitter stopped."
						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
						   rtdev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
				tulip_restart_rxtx(tp);
			}
			rtdm_lock_put(&tp->lock);
		}

		/* Log errors. */
		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
			if (csr5 == 0xffffffff)
				break;
#if 0 /*RTnet*/
			if (csr5 & TxJabber) tp->stats.tx_errors++;
			if (csr5 & TxFIFOUnderflow) {
				if ((tp->csr6 & 0xC000) != 0xC000)
					tp->csr6 += 0x4000;	/* Bump up the Tx threshold */
				else
					tp->csr6 |= 0x00200000;  /* Store-n-forward. */
				/* Restart the transmit process. */
				tulip_restart_rxtx(tp);
				outl(0, ioaddr + CSR1);
			}
			if (csr5 & (RxDied | RxNoBuf)) {
				if (tp->flags & COMET_MAC_ADDR) {
					outl(tp->mc_filter[0], ioaddr + 0xAC);
					outl(tp->mc_filter[1], ioaddr + 0xB0);
				}
			}
			if (csr5 & RxDied) {		/* Missed a Rx frame. */
                                tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
				tp->stats.rx_errors++;
				tulip_start_rxtx(tp);
			}
			/*
			 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
			 * call is ever done under the spinlock
			 */
			if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
				if (tp->link_change)
					(tp->link_change)(rtdev, csr5);
			}
			if (csr5 & SytemError) {
				int error = (csr5 >> 23) & 7;
				/* oops, we hit a PCI error.  The code produced corresponds
				 * to the reason:
				 *  0 - parity error
				 *  1 - master abort
				 *  2 - target abort
				 * Note that on parity error, we should do a software reset
				 * of the chip to get it back into a sane state (according
				 * to the 21142/3 docs that is).
				 *   -- rmk
				 */
				/*RTnet*/rtdm_printk(KERN_ERR "%s: (%lu) System Error occured (%d)\n",
					rtdev->name, tp->nir, error);
			}
#endif /*RTnet*/
			/*RTnet*/rtdm_printk(KERN_ERR "%s: Error detected, "
			    "device may not work any more (csr5=%08x)!\n", rtdev->name, csr5);
			/* Clear all error sources, included undocumented ones! */
			outl(0x0800f7ba, ioaddr + CSR5);
			oi++;
		}
예제 #5
0
/*
 * Receive a management frame from the device.
 * This can be an arbitrary number of traps, and at most one response
 * frame for a previous request sent via islpci_mgt_transmit().
 */
int
islpci_mgt_receive(struct net_device *ndev)
{
	islpci_private *priv = netdev_priv(ndev);
	isl38xx_control_block *cb =
	    (isl38xx_control_block *) priv->control_block;
	u32 curr_frag;

#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive \n");
#endif

	/* Only once per interrupt, determine fragment range to
	 * process.  This avoids an endless loop (i.e. lockup) if
	 * frames come in faster than we can process them. */
	curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]);
	barrier();

	for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) {
		pimfor_header_t *header;
		u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE;
		struct islpci_membuf *buf = &priv->mgmt_rx[index];
		u16 frag_len;
		int size;
		struct islpci_mgmtframe *frame;

		/* I have no idea (and no documentation) if flags != 0
		 * is possible.  Drop the frame, reuse the buffer. */
		if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) {
			printk(KERN_WARNING "%s: unknown flags 0x%04x\n",
			       ndev->name,
			       le16_to_cpu(cb->rx_data_mgmt[index].flags));
			continue;
		}

		/* The device only returns the size of the header(s) here. */
		frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size);

		/*
		 * We appear to have no way to tell the device the
		 * size of a receive buffer.  Thus, if this check
		 * triggers, we likely have kernel heap corruption. */
		if (frag_len > MGMT_FRAME_SIZE) {
			printk(KERN_WARNING
				"%s: Bogus packet size of %d (%#x).\n",
				ndev->name, frag_len, frag_len);
			frag_len = MGMT_FRAME_SIZE;
		}

		/* Ensure the results of device DMA are visible to the CPU. */
		pci_dma_sync_single(priv->pdev, buf->pci_addr,
				    buf->size, PCI_DMA_FROMDEVICE);

		/* Perform endianess conversion for PIMFOR header in-place. */
		header = pimfor_decode_header(buf->mem, frag_len);
		if (!header) {
			printk(KERN_WARNING "%s: no PIMFOR header found\n",
			       ndev->name);
			continue;
		}

		/* The device ID from the PIMFOR packet received from
		 * the MVC is always 0.  We forward a sensible device_id.
		 * Not that anyone upstream would care... */
		header->device_id = priv->ndev->ifindex;

#if VERBOSE > SHOW_ERROR_MESSAGES
		DEBUG(SHOW_PIMFOR_FRAMES,
		      "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n",
		      header->operation, header->oid, header->device_id,
		      header->flags, header->length);

		/* display the buffer contents for debugging */
		display_buffer((char *) header, PIMFOR_HEADER_SIZE);
		display_buffer((char *) header + PIMFOR_HEADER_SIZE,
			       header->length);
#endif

		/* nobody sends these */
		if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) {
			printk(KERN_DEBUG
			       "%s: errant PIMFOR application frame\n",
			       ndev->name);
			continue;
		}

		/* Determine frame size, skipping OID_INL_TUNNEL headers. */
		size = PIMFOR_HEADER_SIZE + header->length;
		frame = kmalloc(sizeof (struct islpci_mgmtframe) + size,
				GFP_ATOMIC);
		if (!frame) {
			printk(KERN_WARNING
			       "%s: Out of memory, cannot handle oid 0x%08x\n",
			       ndev->name, header->oid);
			continue;
		}
		frame->ndev = ndev;
		memcpy(&frame->buf, header, size);
		frame->header = (pimfor_header_t *) frame->buf;
		frame->data = frame->buf + PIMFOR_HEADER_SIZE;

#if VERBOSE > SHOW_ERROR_MESSAGES
		DEBUG(SHOW_PIMFOR_FRAMES,
		      "frame: header: %p, data: %p, size: %d\n",
		      frame->header, frame->data, size);
#endif

		if (header->operation == PIMFOR_OP_TRAP) {
#if VERBOSE > SHOW_ERROR_MESSAGES
			printk(KERN_DEBUG
			       "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
			       header->oid, header->device_id, header->flags,
			       header->length);
#endif

			/* Create work to handle trap out of interrupt
			 * context. */
			INIT_WORK(&frame->ws, prism54_process_trap, frame);
			schedule_work(&frame->ws);

		} else {
			/* Signal the one waiting process that a response
			 * has been received. */
			if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) {
				printk(KERN_WARNING
				       "%s: mgmt response not collected\n",
				       ndev->name);
				kfree(frame);
			}
#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n");
#endif
			wake_up(&priv->mgmt_wqueue);
		}

	}

	return 0;
}
예제 #6
0
/* This routine is logically part of the interrupt handler, but separated
   for clarity and better register allocation. */
static int netdev_rx(struct net_device *dev)
{
	struct netdev_private *np = dev->priv;
	int entry = np->cur_rx % RX_RING_SIZE;
	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;

	if (debug > 4) {
		printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
			   entry, np->rx_ring[entry].status);
	}

	/* If EOP is set on the next entry, it's a new packet. Send it up. */
	while (1) {
		struct netdev_desc *desc = &(np->rx_ring[entry]);
		u32 frame_status;
		int pkt_len;

		if (!(desc->status & DescOwn))
			break;
		frame_status = le32_to_cpu(desc->status);
		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
		if (debug > 4)
			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
				   frame_status);
		if (--boguscnt < 0)
			break;
		pci_dma_sync_single(np->pci_dev, desc->frag[0].addr,
			np->rx_buf_sz, PCI_DMA_FROMDEVICE);
		
		if (frame_status & 0x001f4000) {
			/* There was a error. */
			if (debug > 2)
				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
					   frame_status);
			np->stats.rx_errors++;
			if (frame_status & 0x00100000) np->stats.rx_length_errors++;
			if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
			if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
			if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
			if (frame_status & 0x00100000) {
				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
					   " status %8.8x.\n",
					   dev->name, frame_status);
			}
		} else {
			struct sk_buff *skb;

#ifndef final_version
			if (debug > 4)
				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
					   ", bogus_cnt %d.\n",
					   pkt_len, boguscnt);
#endif
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb->dev = dev;
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
				skb_put(skb, pkt_len);
			} else {
				pci_unmap_single(np->pci_dev, 
					desc->frag[0].addr,
					np->rx_buf_sz, 
					PCI_DMA_FROMDEVICE);
				skb_put(skb = np->rx_skbuff[entry], pkt_len);
				np->rx_skbuff[entry] = NULL;
			}
			skb->protocol = eth_type_trans(skb, dev);
			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
			netif_rx(skb);
			dev->last_rx = jiffies;
		}
		entry = (++np->cur_rx) % RX_RING_SIZE;
	}

	/* Refill the Rx ring buffers. */
	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
		struct sk_buff *skb;
		entry = np->dirty_rx % RX_RING_SIZE;
		if (np->rx_skbuff[entry] == NULL) {
			skb = dev_alloc_skb(np->rx_buf_sz);
			np->rx_skbuff[entry] = skb;
			if (skb == NULL)
				break;		/* Better luck next round. */
			skb->dev = dev;		/* Mark as being used by this device. */
			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
				pci_map_single(np->pci_dev, skb->tail, 
					np->rx_buf_sz, PCI_DMA_FROMDEVICE));
		}
		/* Perhaps we need not reset this field. */
		np->rx_ring[entry].frag[0].length =
			cpu_to_le32(np->rx_buf_sz | LastFrag);
		np->rx_ring[entry].status = 0;
	}

	/* No need to restart Rx engine, it will poll. */
	return 0;
}
예제 #7
0
/* This routine is logically part of the interrupt handler, but separated
   for clarity and better register allocation. */
static int netdev_rx(struct net_device *dev)
{
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	int entry = np->cur_rx % RX_RING_SIZE;
	int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;

	if (debug > 4) {
		printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
			   entry, np->rx_ring[entry].status);
	}

	/* If EOP is set on the next entry, it's a new packet. Send it up. */
	while (--work_limit >= 0) {
		struct w840_rx_desc *desc = np->rx_head_desc;
		s32 status = le32_to_cpu(desc->status);

		if (debug > 4)
			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
				   status);
		if (status < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
						   "multiple buffers, entry %#x status %4.4x!\n",
						   dev->name, np->cur_rx, status);
					np->stats.rx_length_errors++;
				}
			} else if (status & 0x8000) {
				/* There was a fatal error. */
				if (debug > 2)
					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   dev->name, status);
				np->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) np->stats.rx_length_errors++;
				if (status & 0x004C) np->stats.rx_frame_errors++;
				if (status & 0x0002) np->stats.rx_crc_errors++;
			}
		} else {
			struct sk_buff *skb;
			/* Omit the four octet CRC from the length. */
			int pkt_len = ((status >> 16) & 0x7ff) - 4;

#ifndef final_version
			if (debug > 4)
				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
					   " status %x.\n", pkt_len, status);
#endif
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb->dev = dev;
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single(np->pdev,np->rx_addr[entry],
							np->rx_skbuff[entry]->len,
							PCI_DMA_FROMDEVICE);
				/* Call copy + cksum if available. */
#if HAS_IP_COPYSUM
				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
				skb_put(skb, pkt_len);
#else
				memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
					   pkt_len);
#endif
			} else {
				pci_unmap_single(np->pdev,np->rx_addr[entry],
							np->rx_skbuff[entry]->len,
							PCI_DMA_FROMDEVICE);
				skb_put(skb = np->rx_skbuff[entry], pkt_len);
				np->rx_skbuff[entry] = NULL;
			}
#ifndef final_version				/* Remove after testing. */
			/* You will want this info for the initial debug. */
			if (debug > 5)
				printk(KERN_DEBUG "  Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
					   "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
					   "%d.%d.%d.%d.\n",
					   skb->data[0], skb->data[1], skb->data[2], skb->data[3],
					   skb->data[4], skb->data[5], skb->data[6], skb->data[7],
					   skb->data[8], skb->data[9], skb->data[10],
					   skb->data[11], skb->data[12], skb->data[13],
					   skb->data[14], skb->data[15], skb->data[16],
					   skb->data[17]);
#endif
			skb->protocol = eth_type_trans(skb, dev);
			netif_rx(skb);
			dev->last_rx = jiffies;
			np->stats.rx_packets++;
			np->stats.rx_bytes += pkt_len;
		}
		entry = (++np->cur_rx) % RX_RING_SIZE;
		np->rx_head_desc = &np->rx_ring[entry];
	}

	/* Refill the Rx ring buffers. */
	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
		struct sk_buff *skb;
		entry = np->dirty_rx % RX_RING_SIZE;
		if (np->rx_skbuff[entry] == NULL) {
			skb = dev_alloc_skb(np->rx_buf_sz);
			np->rx_skbuff[entry] = skb;
			if (skb == NULL)
				break;			/* Better luck next round. */
			skb->dev = dev;			/* Mark as being used by this device. */
			np->rx_addr[entry] = pci_map_single(np->pdev,
							skb->tail,
							skb->len, PCI_DMA_FROMDEVICE);
			np->rx_ring[entry].buffer1 = cpu_to_le32(np->rx_addr[entry]);
		}
		wmb();
		np->rx_ring[entry].status = cpu_to_le32(DescOwn);
	}

	return 0;
}
예제 #8
0
파일: interrupt.c 프로젝트: xricson/knoppix
static int tulip_rx(struct net_device *dev)
{
	struct tulip_private *tp = (struct tulip_private *)dev->priv;
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
	int received = 0;

#ifdef CONFIG_NET_HW_FLOWCONTROL
        int drop = 0, mit_sel = 0;

/* that one buffer is needed for mit activation; or might be a
   bug in the ring buffer code; check later -- JHS*/

        if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
#endif

	if (tulip_debug > 4)
		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);
	/* If we own the next entry, it is a new packet. Send it up. */
	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
		s32 status = le32_to_cpu(tp->rx_ring[entry].status);

		if (tulip_debug > 5)
			printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
				   dev->name, entry, status);
		if (--rx_work_limit < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					if (tulip_debug > 1)
						printk(KERN_WARNING "%s: Oversized Ethernet frame "
							   "spanned multiple buffers, status %8.8x!\n",
							   dev->name, status);
					tp->stats.rx_length_errors++;
				}
			} else if (status & RxDescFatalErr) {
				/* There was a fatal error. */
				if (tulip_debug > 2)
					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   dev->name, status);
				tp->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) tp->stats.rx_length_errors++;
				if (status & 0x0004) tp->stats.rx_frame_errors++;
				if (status & 0x0002) tp->stats.rx_crc_errors++;
				if (status & 0x0001) tp->stats.rx_fifo_errors++;
			}
		} else {
			/* Omit the four octet CRC from the length. */
			short pkt_len = ((status >> 16) & 0x7ff) - 4;
			struct sk_buff *skb;

#ifndef final_version
			if (pkt_len > 1518) {
				printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
					   dev->name, pkt_len, pkt_len);
				pkt_len = 1518;
				tp->stats.rx_length_errors++;
			}
#endif

#ifdef CONFIG_NET_HW_FLOWCONTROL
                        drop = atomic_read(&netdev_dropping);
                        if (drop)
                                goto throttle;
#endif
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < tulip_rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb->dev = dev;
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single(tp->pdev,
						    tp->rx_buffers[entry].mapping,
						    pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
				eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
						 pkt_len, 0);
				skb_put(skb, pkt_len);
#else
				memcpy(skb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#endif
			} else { 	/* Pass up the skb already on the Rx ring. */
				char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
						     pkt_len);

#ifndef final_version
				if (tp->rx_buffers[entry].mapping !=
				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
					printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
					       "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
					       dev->name,
					       le32_to_cpu(tp->rx_ring[entry].buffer1),
					       (long long)tp->rx_buffers[entry].mapping,
					       skb->head, temp);
				}
#endif

				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

				tp->rx_buffers[entry].skb = NULL;
				tp->rx_buffers[entry].mapping = 0;
			}
			skb->protocol = eth_type_trans(skb, dev);
#ifdef CONFIG_NET_HW_FLOWCONTROL
                        mit_sel =
#endif
			netif_rx(skb);

#ifdef CONFIG_NET_HW_FLOWCONTROL
                        switch (mit_sel) {
                        case NET_RX_SUCCESS:
                        case NET_RX_CN_LOW:
                        case NET_RX_CN_MOD:
                                break;

                        case NET_RX_CN_HIGH:
                                rx_work_limit -= NET_RX_CN_HIGH; /* additional*/
                                break;
                        case NET_RX_DROP:
                                rx_work_limit = -1;
                                break;
                        default:
                                printk("unknown feedback return code %d\n", mit_sel);
                                break;
                        }

                        drop = atomic_read(&netdev_dropping);
                        if (drop) {
throttle:
                                rx_work_limit = -1;
                                mit_sel = NET_RX_DROP;

                                if (tp->fc_bit) {
                                        long ioaddr = dev->base_addr;

                                        /* disable Rx & RxNoBuf ints. */
                                        outl(tulip_tbl[tp->chip_id].valid_intrs&RX_A_NBF_STOP, ioaddr + CSR7);
                                        set_bit(tp->fc_bit, &netdev_fc_xoff);
                                }
                        }
#endif
			dev->last_rx = jiffies;
			tp->stats.rx_packets++;
			tp->stats.rx_bytes += pkt_len;
		}
		received++;
		entry = (++tp->cur_rx) % RX_RING_SIZE;
	}
#ifdef CONFIG_NET_HW_FLOWCONTROL

        /* We use this simplistic scheme for IM. It's proven by
           real life installations. We can have IM enabled
           continuesly but this would cause unnecessary latency.
           Unfortunely we can't use all the NET_RX_* feedback here.
           This would turn on IM for devices that is not contributing
           to backlog congestion with unnecessary latency.

           We monitor the device RX-ring and have:

           HW Interrupt Mitigation either ON or OFF.

           ON:  More then 1 pkt received (per intr.) OR we are dropping
           OFF: Only 1 pkt received

           Note. We only use min and max (0, 15) settings from mit_table */


        if( tp->flags &  HAS_INTR_MITIGATION) {
                if((received > 1 || mit_sel == NET_RX_DROP)
                   && tp->mit_sel != 15 ) {
                        tp->mit_sel = 15;
                        tp->mit_change = 1; /* Force IM change */
                }
                if((received <= 1 && mit_sel != NET_RX_DROP) && tp->mit_sel != 0 ) {
                        tp->mit_sel = 0;
                        tp->mit_change = 1; /* Force IM change */
                }
        }

        return RX_RING_SIZE+1; /* maxrx+1 */
#else
	return received;
#endif
}