static int
e1000_run_loopback_test(struct e1000_adapter *adapter)
{
	struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
	struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
	struct pci_dev *pdev = adapter->pdev;
	int i;

	E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);

	for(i = 0; i < 64; i++) {
		e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1024);
		pci_dma_sync_single_for_device(pdev, txdr->buffer_info[i].dma,
					    txdr->buffer_info[i].length,
					    PCI_DMA_TODEVICE);
	}
	E1000_WRITE_REG(&adapter->hw, TDT, i);

	msec_delay(200);

	i = 0;
	do {
		pci_dma_sync_single_for_cpu(pdev, rxdr->buffer_info[i].dma,
					    rxdr->buffer_info[i].length,
					    PCI_DMA_FROMDEVICE);

		if (!e1000_check_lbtest_frame(rxdr->buffer_info[i++].skb, 1024))
			return 0;
	} while (i < 64);

	return 13;
}
Exemple #2
0
static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
	int ring_index, struct p54p_desc *ring, u32 ring_limit,
	struct sk_buff **rx_buf)
{
	struct p54p_priv *priv = dev->priv;
	struct p54p_ring_control *ring_control = priv->ring_control;
	struct p54p_desc *desc;
	u32 idx, i;

	i = (*index) % ring_limit;
	(*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
	idx %= ring_limit;
	while (i != idx) {
		u16 len;
		struct sk_buff *skb;
		dma_addr_t dma_addr;
		desc = &ring[i];
		len = le16_to_cpu(desc->len);
		skb = rx_buf[i];

		if (!skb) {
			i++;
			i %= ring_limit;
			continue;
		}

		if (unlikely(len > priv->common.rx_mtu)) {
			if (net_ratelimit())
				dev_err(&priv->pdev->dev, "rx'd frame size "
					"exceeds length threshold.\n");

			len = priv->common.rx_mtu;
		}
		dma_addr = le32_to_cpu(desc->host_addr);
		pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
			priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
		skb_put(skb, len);

		if (p54_rx(dev, skb)) {
			pci_unmap_single(priv->pdev, dma_addr,
				priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
			rx_buf[i] = NULL;
			desc->host_addr = cpu_to_le32(0);
		} else {
			skb_trim(skb, 0);
			pci_dma_sync_single_for_device(priv->pdev, dma_addr,
				priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
		}

		i++;
		i %= ring_limit;
	}

	p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
}
Exemple #3
0
static void xilly_dma_sync_single_for_cpu_pci(struct xilly_endpoint *ep,
					      dma_addr_t dma_handle,
					      size_t size,
					      int direction)
{
	pci_dma_sync_single_for_cpu(ep->pdev,
				    dma_handle,
				    size,
				    xilly_pci_direction(direction));
}
ssize_t mic_psmi_read(struct file * filp, char __user *buf,
			size_t count, loff_t *pos)
{
	ssize_t total_bytes = 0;
	unsigned int pg_no, pg_off, bytes;
	mic_ctx_t *mic_ctx = ((filp)->private_data);
	struct mic_psmi_ctx *psmi_ctx = &mic_ctx->bi_psmi;
	loff_t mem_size;

	if (!psmi_ctx->enabled)
		return -EINVAL;
	if (FAMILY_ABR == mic_ctx->bi_family &&
			USAGE_MODE_NORMAL != usagemode_param)
		mem_size = MIC_APERTURE_SIZE;
	else
		mem_size = psmi_ctx->dma_mem_size;
	if (*pos >= mem_size || count <= 0)
		return 0;
	if (*pos + count > mem_size)
		count = mem_size - *pos;
	/* read aperture memory */
	if (USAGE_MODE_NORMAL != usagemode_param) {
		if (copy_to_user(buf,
			mic_ctx->aper.va + *pos, count))
			return -EFAULT;
		goto read_exit;
	}
	/* read host memory allocated for psmi handler */
	pg_no = *pos / MIC_PSMI_PAGE_SIZE;
	pg_off = *pos % MIC_PSMI_PAGE_SIZE;
	while (total_bytes < count) {
		pci_dma_sync_single_for_cpu(mic_ctx->bi_pdev,
			psmi_ctx->dma_tbl[pg_no + 1].pa,
				MIC_PSMI_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
		bytes = MIC_PSMI_PAGE_SIZE - pg_off;
		if (total_bytes + bytes > count)
			bytes = count - total_bytes;
		if (copy_to_user(buf,
			(void *)psmi_ctx->va_tbl[pg_no].pa + pg_off, bytes))
			return -EFAULT;
		total_bytes += bytes;
		buf += bytes;
		pg_no++;
		/* Only the first page needs an offset */
		pg_off = 0;
	}
read_exit:
	*pos += count;
	return count;
}
Exemple #5
0
static int
receive_packet (struct net_device *dev)
{
	struct netdev_private *np = netdev_priv(dev);
	int entry = np->cur_rx % RX_RING_SIZE;
	int cnt = 30;

	/* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
	while (1) {
		struct netdev_desc *desc = &np->rx_ring[entry];
		int pkt_len;
		u64 frame_status;

		if (!(desc->status & cpu_to_le64(RFDDone)) ||
		    !(desc->status & cpu_to_le64(FrameStart)) ||
		    !(desc->status & cpu_to_le64(FrameEnd)))
			break;

		/* Chip omits the CRC. */
		frame_status = le64_to_cpu(desc->status);
		pkt_len = frame_status & 0xffff;
		if (--cnt < 0)
			break;
		/* Update rx error statistics, drop packet. */
		if (frame_status & RFS_Errors) {
			np->stats.rx_errors++;
			if (frame_status & (RxRuntFrame | RxLengthError))
				np->stats.rx_length_errors++;
			if (frame_status & RxFCSError)
				np->stats.rx_crc_errors++;
			if (frame_status & RxAlignmentError && np->speed != 1000)
				np->stats.rx_frame_errors++;
			if (frame_status & RxFIFOOverrun)
	 			np->stats.rx_fifo_errors++;
		} else {
			struct sk_buff *skb;

			/* Small skbuffs for short packets */
			if (pkt_len > copy_thresh) {
				pci_unmap_single (np->pdev,
						  desc_to_dma(desc),
						  np->rx_buf_sz,
						  PCI_DMA_FROMDEVICE);
				skb_put (skb = np->rx_skbuff[entry], pkt_len);
				np->rx_skbuff[entry] = NULL;
			} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
				pci_dma_sync_single_for_cpu(np->pdev,
							    desc_to_dma(desc),
							    np->rx_buf_sz,
							    PCI_DMA_FROMDEVICE);
				skb_copy_to_linear_data (skb,
						  np->rx_skbuff[entry]->data,
						  pkt_len);
				skb_put (skb, pkt_len);
				pci_dma_sync_single_for_device(np->pdev,
							       desc_to_dma(desc),
							       np->rx_buf_sz,
							       PCI_DMA_FROMDEVICE);
			}
			skb->protocol = eth_type_trans (skb, dev);
#if 0
			/* Checksum done by hw, but csum value unavailable. */
			if (np->pdev->pci_rev_id >= 0x0c &&
				!(frame_status & (TCPError | UDPError | IPError))) {
				skb->ip_summed = CHECKSUM_UNNECESSARY;
			}
#endif
			netif_rx (skb);
		}
		entry = (entry + 1) % RX_RING_SIZE;
	}
	spin_lock(&np->rx_lock);
	np->cur_rx = entry;
	/* Re-allocate skbuffs to fill the descriptor ring */
	entry = np->old_rx;
	while (entry != np->cur_rx) {
		struct sk_buff *skb;
		/* Dropped packets don't need to re-allocate */
		if (np->rx_skbuff[entry] == NULL) {
			skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
			if (skb == NULL) {
				np->rx_ring[entry].fraginfo = 0;
				printk (KERN_INFO
					"%s: receive_packet: "
					"Unable to re-allocate Rx skbuff.#%d\n",
					dev->name, entry);
				break;
			}
			np->rx_skbuff[entry] = skb;
			np->rx_ring[entry].fraginfo =
			    cpu_to_le64 (pci_map_single
					 (np->pdev, skb->data, np->rx_buf_sz,
					  PCI_DMA_FROMDEVICE));
		}
		np->rx_ring[entry].fraginfo |=
		    cpu_to_le64((u64)np->rx_buf_sz << 48);
		np->rx_ring[entry].status = 0;
		entry = (entry + 1) % RX_RING_SIZE;
	}
	np->old_rx = entry;
	spin_unlock(&np->rx_lock);
	return 0;
}
Exemple #6
0
/*
 * Receive a management frame from the device.
 * This can be an arbitrary number of traps, and at most one response
 * frame for a previous request sent via islpci_mgt_transmit().
 */
int
islpci_mgt_receive(struct net_device *ndev)
{
	islpci_private *priv = netdev_priv(ndev);
	isl38xx_control_block *cb =
	    (isl38xx_control_block *) priv->control_block;
	u32 curr_frag;

#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive\n");
#endif

	/* Only once per interrupt, determine fragment range to
	 * process.  This avoids an endless loop (i.e. lockup) if
	 * frames come in faster than we can process them. */
	curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]);
	barrier();

	for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) {
		pimfor_header_t *header;
		u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE;
		struct islpci_membuf *buf = &priv->mgmt_rx[index];
		u16 frag_len;
		int size;
		struct islpci_mgmtframe *frame;

		/* I have no idea (and no documentation) if flags != 0
		 * is possible.  Drop the frame, reuse the buffer. */
		if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) {
//			printk(KERN_WARNING "%s: unknown flags 0x%04x\n",
//			       ndev->name,
;
			continue;
		}

		/* The device only returns the size of the header(s) here. */
		frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size);

		/*
		 * We appear to have no way to tell the device the
		 * size of a receive buffer.  Thus, if this check
		 * triggers, we likely have kernel heap corruption. */
		if (frag_len > MGMT_FRAME_SIZE) {
//			printk(KERN_WARNING
//				"%s: Bogus packet size of %d (%#x).\n",
;
			frag_len = MGMT_FRAME_SIZE;
		}

		/* Ensure the results of device DMA are visible to the CPU. */
		pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr,
					    buf->size, PCI_DMA_FROMDEVICE);

		/* Perform endianess conversion for PIMFOR header in-place. */
		header = pimfor_decode_header(buf->mem, frag_len);
		if (!header) {
//			printk(KERN_WARNING "%s: no PIMFOR header found\n",
;
			continue;
		}

		/* The device ID from the PIMFOR packet received from
		 * the MVC is always 0.  We forward a sensible device_id.
		 * Not that anyone upstream would care... */
		header->device_id = priv->ndev->ifindex;

#if VERBOSE > SHOW_ERROR_MESSAGES
		DEBUG(SHOW_PIMFOR_FRAMES,
		      "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x\n",
		      header->operation, header->oid, header->device_id,
		      header->flags, header->length);

		/* display the buffer contents for debugging */
		display_buffer((char *) header, PIMFOR_HEADER_SIZE);
		display_buffer((char *) header + PIMFOR_HEADER_SIZE,
			       header->length);
#endif

		/* nobody sends these */
		if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) {
//			printk(KERN_DEBUG
//			       "%s: errant PIMFOR application frame\n",
;
			continue;
		}

		/* Determine frame size, skipping OID_INL_TUNNEL headers. */
		size = PIMFOR_HEADER_SIZE + header->length;
		frame = kmalloc(sizeof (struct islpci_mgmtframe) + size,
				GFP_ATOMIC);
		if (!frame) {
//			printk(KERN_WARNING
//			       "%s: Out of memory, cannot handle oid 0x%08x\n",
;
			continue;
		}
		frame->ndev = ndev;
		memcpy(&frame->buf, header, size);
		frame->header = (pimfor_header_t *) frame->buf;
		frame->data = frame->buf + PIMFOR_HEADER_SIZE;

#if VERBOSE > SHOW_ERROR_MESSAGES
		DEBUG(SHOW_PIMFOR_FRAMES,
		      "frame: header: %p, data: %p, size: %d\n",
		      frame->header, frame->data, size);
#endif

		if (header->operation == PIMFOR_OP_TRAP) {
#if VERBOSE > SHOW_ERROR_MESSAGES
//			printk(KERN_DEBUG
//			       "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
//			       header->oid, header->device_id, header->flags,
;
#endif

			/* Create work to handle trap out of interrupt
			 * context. */
			INIT_WORK(&frame->ws, prism54_process_trap);
			schedule_work(&frame->ws);

		} else {
			/* Signal the one waiting process that a response
			 * has been received. */
			if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) {
//				printk(KERN_WARNING
//				       "%s: mgmt response not collected\n",
;
				kfree(frame);
			}
#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n");
#endif
			wake_up(&priv->mgmt_wqueue);
		}

	}

	return 0;
}
Exemple #7
0
static int tulip_rx(struct net_device *dev)
{
	struct tulip_private *tp = netdev_priv(dev);
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
	int received = 0;

	if (tulip_debug > 4)
		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);
	/* If we own the next entry, it is a new packet. Send it up. */
	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
		s32 status = le32_to_cpu(tp->rx_ring[entry].status);

		if (tulip_debug > 5)
			printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
				   dev->name, entry, status);
		if (--rx_work_limit < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					if (tulip_debug > 1)
						printk(KERN_WARNING "%s: Oversized Ethernet frame "
							   "spanned multiple buffers, status %8.8x!\n",
							   dev->name, status);
					tp->stats.rx_length_errors++;
				}
			} else if (status & RxDescFatalErr) {
				/* There was a fatal error. */
				if (tulip_debug > 2)
					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   dev->name, status);
				tp->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) tp->stats.rx_length_errors++;
				if (status & 0x0004) tp->stats.rx_frame_errors++;
				if (status & 0x0002) tp->stats.rx_crc_errors++;
				if (status & 0x0001) tp->stats.rx_fifo_errors++;
			}
		} else {
			/* Omit the four octet CRC from the length. */
			short pkt_len = ((status >> 16) & 0x7ff) - 4;
			struct sk_buff *skb;

#ifndef final_version
			if (pkt_len > 1518) {
				printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
					   dev->name, pkt_len, pkt_len);
				pkt_len = 1518;
				tp->stats.rx_length_errors++;
			}
#endif

			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < tulip_rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb->dev = dev;
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single_for_cpu(tp->pdev,
							    tp->rx_buffers[entry].mapping,
							    pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
				eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
						 pkt_len, 0);
				skb_put(skb, pkt_len);
#else
				memcpy(skb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->data,
				       pkt_len);
#endif
				pci_dma_sync_single_for_device(tp->pdev,
							       tp->rx_buffers[entry].mapping,
							       pkt_len, PCI_DMA_FROMDEVICE);
			} else { 	/* Pass up the skb already on the Rx ring. */
				char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
						     pkt_len);

#ifndef final_version
				if (tp->rx_buffers[entry].mapping !=
				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
					printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
					       "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
					       dev->name,
					       le32_to_cpu(tp->rx_ring[entry].buffer1),
					       (long long)tp->rx_buffers[entry].mapping,
					       skb->head, temp);
				}
#endif

				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

				tp->rx_buffers[entry].skb = NULL;
				tp->rx_buffers[entry].mapping = 0;
			}
			skb->protocol = eth_type_trans(skb, dev);

			netif_rx(skb);

			dev->last_rx = jiffies;
			tp->stats.rx_packets++;
			tp->stats.rx_bytes += pkt_len;
		}
		received++;
		entry = (++tp->cur_rx) % RX_RING_SIZE;
	}
	return received;
}
Exemple #8
0
int tulip_poll(struct net_device *dev, int *budget)
{
	struct tulip_private *tp = netdev_priv(dev);
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = *budget;
	int received = 0;

	if (!netif_running(dev))
		goto done;

	if (rx_work_limit > dev->quota)
		rx_work_limit = dev->quota;

#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION

/* that one buffer is needed for mit activation; or might be a
   bug in the ring buffer code; check later -- JHS*/

        if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
#endif

	if (tulip_debug > 4)
		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);

       do {
		if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
			printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
			break;
		}
               /* Acknowledge current RX interrupt sources. */
               iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
 
 
               /* If we own the next entry, it is a new packet. Send it up. */
               while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
                       s32 status = le32_to_cpu(tp->rx_ring[entry].status);
 
 
                       if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
                               break;
 
                       if (tulip_debug > 5)
                               printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
                                      dev->name, entry, status);
                       if (--rx_work_limit < 0)
                               goto not_done;
 
                       if ((status & 0x38008300) != 0x0300) {
                               if ((status & 0x38000300) != 0x0300) {
                                /* Ingore earlier buffers. */
                                       if ((status & 0xffff) != 0x7fff) {
                                               if (tulip_debug > 1)
                                                       printk(KERN_WARNING "%s: Oversized Ethernet frame "
                                                              "spanned multiple buffers, status %8.8x!\n",
                                                              dev->name, status);
                                               tp->stats.rx_length_errors++;
                                       }
                               } else if (status & RxDescFatalErr) {
                                /* There was a fatal error. */
                                       if (tulip_debug > 2)
                                               printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
                                                      dev->name, status);
                                       tp->stats.rx_errors++; /* end of a packet.*/
                                       if (status & 0x0890) tp->stats.rx_length_errors++;
                                       if (status & 0x0004) tp->stats.rx_frame_errors++;
                                       if (status & 0x0002) tp->stats.rx_crc_errors++;
                                       if (status & 0x0001) tp->stats.rx_fifo_errors++;
                               }
                       } else {
                               /* Omit the four octet CRC from the length. */
                               short pkt_len = ((status >> 16) & 0x7ff) - 4;
                               struct sk_buff *skb;
  
#ifndef final_version
                               if (pkt_len > 1518) {
                                       printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
                                              dev->name, pkt_len, pkt_len);
                                       pkt_len = 1518;
                                       tp->stats.rx_length_errors++;
                               }
#endif
                               /* Check if the packet is long enough to accept without copying
                                  to a minimally-sized skbuff. */
                               if (pkt_len < tulip_rx_copybreak
                                   && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
                                       skb->dev = dev;
                                       skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                       pci_dma_sync_single_for_cpu(tp->pdev,
								   tp->rx_buffers[entry].mapping,
								   pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
                                       eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
                                                        pkt_len, 0);
                                       skb_put(skb, pkt_len);
#else
                                       memcpy(skb_put(skb, pkt_len),
                                              tp->rx_buffers[entry].skb->data,
                                              pkt_len);
#endif
                                       pci_dma_sync_single_for_device(tp->pdev,
								      tp->rx_buffers[entry].mapping,
								      pkt_len, PCI_DMA_FROMDEVICE);
                               } else {        /* Pass up the skb already on the Rx ring. */
                                       char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
                                                            pkt_len);
  
#ifndef final_version
                                       if (tp->rx_buffers[entry].mapping !=
                                           le32_to_cpu(tp->rx_ring[entry].buffer1)) {
                                               printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
                                                      "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
                                                      dev->name,
                                                      le32_to_cpu(tp->rx_ring[entry].buffer1),
                                                      (unsigned long long)tp->rx_buffers[entry].mapping,
                                                      skb->head, temp);
                                       }
#endif
  
                                       pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
                                                        PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
  
                                       tp->rx_buffers[entry].skb = NULL;
                                       tp->rx_buffers[entry].mapping = 0;
                               }
                               skb->protocol = eth_type_trans(skb, dev);
  
                               netif_receive_skb(skb);
 
                               dev->last_rx = jiffies;
                               tp->stats.rx_packets++;
                               tp->stats.rx_bytes += pkt_len;
                       }
                       received++;

                       entry = (++tp->cur_rx) % RX_RING_SIZE;
                       if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
                               tulip_refill_rx(dev);
 
                }
 
               /* New ack strategy... irq does not ack Rx any longer
                  hopefully this helps */
 
               /* Really bad things can happen here... If new packet arrives
                * and an irq arrives (tx or just due to occasionally unset
                * mask), it will be acked by irq handler, but new thread
                * is not scheduled. It is major hole in design.
                * No idea how to fix this if "playing with fire" will fail
                * tomorrow (night 011029). If it will not fail, we won
                * finally: amount of IO did not increase at all. */
       } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
 
done:
 
 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
  
          /* We use this simplistic scheme for IM. It's proven by
             real life installations. We can have IM enabled
            continuesly but this would cause unnecessary latency. 
            Unfortunely we can't use all the NET_RX_* feedback here. 
            This would turn on IM for devices that is not contributing 
            to backlog congestion with unnecessary latency. 
  
             We monitor the the device RX-ring and have:
  
             HW Interrupt Mitigation either ON or OFF.
  
            ON:  More then 1 pkt received (per intr.) OR we are dropping 
             OFF: Only 1 pkt received
            
             Note. We only use min and max (0, 15) settings from mit_table */
  
  
          if( tp->flags &  HAS_INTR_MITIGATION) {
                 if( received > 1 ) {
                         if( ! tp->mit_on ) {
                                 tp->mit_on = 1;
                                 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
                         }
                  }
                 else {
                         if( tp->mit_on ) {
                                 tp->mit_on = 0;
                                 iowrite32(0, tp->base_addr + CSR11);
                         }
                  }
          }

#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
 
         dev->quota -= received;
         *budget -= received;
 
         tulip_refill_rx(dev);
         
         /* If RX ring is not full we are out of memory. */
         if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
 
         /* Remove us from polling list and enable RX intr. */
 
         netif_rx_complete(dev);
         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
 
         /* The last op happens after poll completion. Which means the following:
          * 1. it can race with disabling irqs in irq handler
          * 2. it can race with dise/enabling irqs in other poll threads
          * 3. if an irq raised after beginning loop, it will be immediately
          *    triggered here.
          *
          * Summarizing: the logic results in some redundant irqs both
          * due to races in masking and due to too late acking of already
          * processed irqs. But it must not result in losing events.
          */
 
         return 0;
 
 not_done:
         if (!received) {

                 received = dev->quota; /* Not to happen */
         }
         dev->quota -= received;
         *budget -= received;
 
         if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
             tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
                 tulip_refill_rx(dev);
 
         if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
 
         return 1;
 
 
 oom:    /* Executed with RX ints disabled */
 
         
         /* Start timer, stop polling, but do not enable rx interrupts. */
         mod_timer(&tp->oom_timer, jiffies+1);
       
         /* Think: timer_pending() was an explicit signature of bug.
          * Timer can be pending now but fired and completed
          * before we did netif_rx_complete(). See? We would lose it. */
 
         /* remove ourselves from the polling list */
         netif_rx_complete(dev);
 
         return 0;
}
Exemple #9
0
int
islpci_mgt_receive(struct net_device *ndev)
{
	islpci_private *priv = netdev_priv(ndev);
	isl38xx_control_block *cb =
	    (isl38xx_control_block *) priv->control_block;
	u32 curr_frag;

#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive \n");
#endif

	
	curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]);
	barrier();

	for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) {
		pimfor_header_t *header;
		u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE;
		struct islpci_membuf *buf = &priv->mgmt_rx[index];
		u16 frag_len;
		int size;
		struct islpci_mgmtframe *frame;

		
		if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) {
			printk(KERN_WARNING "%s: unknown flags 0x%04x\n",
			       ndev->name,
			       le16_to_cpu(cb->rx_data_mgmt[index].flags));
			continue;
		}

		
		frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size);

		
		if (frag_len > MGMT_FRAME_SIZE) {
			printk(KERN_WARNING
				"%s: Bogus packet size of %d (%#x).\n",
				ndev->name, frag_len, frag_len);
			frag_len = MGMT_FRAME_SIZE;
		}

		
		pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr,
					    buf->size, PCI_DMA_FROMDEVICE);

		
		header = pimfor_decode_header(buf->mem, frag_len);
		if (!header) {
			printk(KERN_WARNING "%s: no PIMFOR header found\n",
			       ndev->name);
			continue;
		}

		
		header->device_id = priv->ndev->ifindex;

#if VERBOSE > SHOW_ERROR_MESSAGES
		DEBUG(SHOW_PIMFOR_FRAMES,
		      "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n",
		      header->operation, header->oid, header->device_id,
		      header->flags, header->length);

		
		display_buffer((char *) header, PIMFOR_HEADER_SIZE);
		display_buffer((char *) header + PIMFOR_HEADER_SIZE,
			       header->length);
#endif

		
		if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) {
			printk(KERN_DEBUG
			       "%s: errant PIMFOR application frame\n",
			       ndev->name);
			continue;
		}

		
		size = PIMFOR_HEADER_SIZE + header->length;
		frame = kmalloc(sizeof (struct islpci_mgmtframe) + size,
				GFP_ATOMIC);
		if (!frame) {
			printk(KERN_WARNING
			       "%s: Out of memory, cannot handle oid 0x%08x\n",
			       ndev->name, header->oid);
			continue;
		}
		frame->ndev = ndev;
		memcpy(&frame->buf, header, size);
		frame->header = (pimfor_header_t *) frame->buf;
		frame->data = frame->buf + PIMFOR_HEADER_SIZE;

#if VERBOSE > SHOW_ERROR_MESSAGES
		DEBUG(SHOW_PIMFOR_FRAMES,
		      "frame: header: %p, data: %p, size: %d\n",
		      frame->header, frame->data, size);
#endif

		if (header->operation == PIMFOR_OP_TRAP) {
#if VERBOSE > SHOW_ERROR_MESSAGES
			printk(KERN_DEBUG
			       "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
			       header->oid, header->device_id, header->flags,
			       header->length);
#endif

			
			INIT_WORK(&frame->ws, prism54_process_trap);
			schedule_work(&frame->ws);

		} else {
			
			if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) {
				printk(KERN_WARNING
				       "%s: mgmt response not collected\n",
				       ndev->name);
				kfree(frame);
			}
#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n");
#endif
			wake_up(&priv->mgmt_wqueue);
		}

	}

	return 0;
}
Exemple #10
0
static int
e1000_run_loopback_test(struct e1000_adapter *adapter)
{
	struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
	struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
	struct pci_dev *pdev = adapter->pdev;
	int i, j, k, l, lc, good_cnt, ret_val=0;
	unsigned long time;

	E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);

	/* Calculate the loop count based on the largest descriptor ring 
	 * The idea is to wrap the largest ring a number of times using 64
	 * send/receive pairs during each loop
	 */

	if(rxdr->count <= txdr->count)
		lc = ((txdr->count / 64) * 2) + 1;
	else
		lc = ((rxdr->count / 64) * 2) + 1;

	k = l = 0;
	for(j = 0; j <= lc; j++) { /* loop count loop */
		for(i = 0; i < 64; i++) { /* send the packets */
			e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 
					1024);
			pci_dma_sync_single_for_device(pdev, 
					txdr->buffer_info[k].dma,
				    	txdr->buffer_info[k].length,
				    	PCI_DMA_TODEVICE);
			if(unlikely(++k == txdr->count)) k = 0;
		}
		E1000_WRITE_REG(&adapter->hw, TDT, k);
		msec_delay(200);
		time = jiffies; /* set the start time for the receive */
		good_cnt = 0;
		do { /* receive the sent packets */
			pci_dma_sync_single_for_cpu(pdev, 
					rxdr->buffer_info[l].dma,
				    	rxdr->buffer_info[l].length,
				    	PCI_DMA_FROMDEVICE);
	
			ret_val = e1000_check_lbtest_frame(
					rxdr->buffer_info[l].skb,
				   	1024);
			if(!ret_val)
				good_cnt++;
			if(unlikely(++l == rxdr->count)) l = 0;
			/* time + 20 msecs (200 msecs on 2.4) is more than 
			 * enough time to complete the receives, if it's 
			 * exceeded, break and error off
			 */
		} while (good_cnt < 64 && jiffies < (time + 20));
		if(good_cnt != 64) {
			ret_val = 13; /* ret_val is the same as mis-compare */
			break; 
		}
		if(jiffies >= (time + 2)) {
			ret_val = 14; /* error code for time out error */
			break;
		}
	} /* end loop count loop */
	return ret_val;
}
Exemple #11
0
static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
{
	struct adm8211_priv *priv = dev->priv;
	unsigned int entry = priv->cur_rx % priv->rx_ring_size;
	u32 status;
	unsigned int pktlen;
	struct sk_buff *skb, *newskb;
	unsigned int limit = priv->rx_ring_size;
	u8 rssi, rate;

	while (!(priv->rx_ring[entry].status & cpu_to_le32(RDES0_STATUS_OWN))) {
		if (!limit--)
			break;

		status = le32_to_cpu(priv->rx_ring[entry].status);
		rate = (status & RDES0_STATUS_RXDR) >> 12;
		rssi = le32_to_cpu(priv->rx_ring[entry].length) &
			RDES1_STATUS_RSSI;

		pktlen = status & RDES0_STATUS_FL;
		if (pktlen > RX_PKT_SIZE) {
			if (net_ratelimit())
				wiphy_debug(dev->wiphy, "frame too long (%d)\n",
					    pktlen);
			pktlen = RX_PKT_SIZE;
		}

		if (!priv->soft_rx_crc && status & RDES0_STATUS_ES) {
			skb = NULL; /* old buffer will be reused */
			/* TODO: update RX error stats */
			/* TODO: check RDES0_STATUS_CRC*E */
		} else if (pktlen < RX_COPY_BREAK) {
			skb = dev_alloc_skb(pktlen);
			if (skb) {
				pci_dma_sync_single_for_cpu(
					priv->pdev,
					priv->rx_buffers[entry].mapping,
					pktlen, PCI_DMA_FROMDEVICE);
				memcpy(skb_put(skb, pktlen),
				       skb_tail_pointer(priv->rx_buffers[entry].skb),
				       pktlen);
				pci_dma_sync_single_for_device(
					priv->pdev,
					priv->rx_buffers[entry].mapping,
					RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
			}
		} else {
			newskb = dev_alloc_skb(RX_PKT_SIZE);
			if (newskb) {
				skb = priv->rx_buffers[entry].skb;
				skb_put(skb, pktlen);
				pci_unmap_single(
					priv->pdev,
					priv->rx_buffers[entry].mapping,
					RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
				priv->rx_buffers[entry].skb = newskb;
				priv->rx_buffers[entry].mapping =
					pci_map_single(priv->pdev,
						       skb_tail_pointer(newskb),
						       RX_PKT_SIZE,
						       PCI_DMA_FROMDEVICE);
			} else {
				skb = NULL;
				/* TODO: update rx dropped stats */
			}

			priv->rx_ring[entry].buffer1 =
				cpu_to_le32(priv->rx_buffers[entry].mapping);
		}

		priv->rx_ring[entry].status = cpu_to_le32(RDES0_STATUS_OWN |
							  RDES0_STATUS_SQL);
		priv->rx_ring[entry].length =
			cpu_to_le32(RX_PKT_SIZE |
				    (entry == priv->rx_ring_size - 1 ?
				     RDES1_CONTROL_RER : 0));

		if (skb) {
			struct ieee80211_rx_status rx_status = {0};

			if (priv->pdev->revision < ADM8211_REV_CA)
				rx_status.signal = rssi;
			else
				rx_status.signal = 100 - rssi;

			rx_status.rate_idx = rate;

			rx_status.freq = adm8211_channels[priv->channel - 1].center_freq;
			rx_status.band = IEEE80211_BAND_2GHZ;

			memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
			ieee80211_rx_irqsafe(dev, skb);
		}

		entry = (++priv->cur_rx) % priv->rx_ring_size;
	}

	/* TODO: check LPC and update stats? */
}
Exemple #12
0
static int b44_rx(struct b44 *bp, int budget)
{
	int received;
	u32 cons, prod;

	received = 0;
	prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
	prod /= sizeof(struct dma_desc);
	cons = bp->rx_cons;

	while (cons != prod && budget > 0) {
		struct ring_info *rp = &bp->rx_buffers[cons];
		struct sk_buff *skb = rp->skb;
		dma_addr_t map = pci_unmap_addr(rp, mapping);
		struct rx_header *rh;
		u16 len;

		pci_dma_sync_single_for_cpu(bp->pdev, map,
					    RX_PKT_BUF_SZ,
					    PCI_DMA_FROMDEVICE);
		rh = (struct rx_header *) skb->data;
		len = cpu_to_le16(rh->len);
		if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
		drop_it:
			b44_recycle_rx(bp, cons, bp->rx_prod);
		drop_it_no_recycle:
			bp->stats.rx_dropped++;
			goto next_pkt;
		}

		if (len == 0) {
			int i = 0;

			do {
				udelay(2);
				barrier();
				len = cpu_to_le16(rh->len);
			} while (len == 0 && i++ < 5);
			if (len == 0)
				goto drop_it;
		}

		/* Omit CRC. */
		len -= 4;

		if (len > RX_COPY_THRESHOLD) {
			int skb_size;
			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
			if (skb_size < 0)
				goto drop_it;
			pci_unmap_single(bp->pdev, map,
					 skb_size, PCI_DMA_FROMDEVICE);
			/* Leave out rx_header */
                	skb_put(skb, len+bp->rx_offset);
            	        skb_pull(skb,bp->rx_offset);
		} else {
			struct sk_buff *copy_skb;

			b44_recycle_rx(bp, cons, bp->rx_prod);
			copy_skb = dev_alloc_skb(len + 2);
			if (copy_skb == NULL)
				goto drop_it_no_recycle;

			copy_skb->dev = bp->dev;
			skb_reserve(copy_skb, 2);
			skb_put(copy_skb, len);
			/* DMA sync done above, copy just the actual packet */
			memcpy(copy_skb->data, skb->data+bp->rx_offset, len);

			skb = copy_skb;
		}
		skb->ip_summed = CHECKSUM_NONE;
		skb->protocol = eth_type_trans(skb, bp->dev);
		netif_receive_skb(skb);
		bp->dev->last_rx = jiffies;
		received++;
		budget--;
	next_pkt:
		bp->rx_prod = (bp->rx_prod + 1) &
			(B44_RX_RING_SIZE - 1);
		cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
	}

	bp->rx_cons = cons;
	bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));

	return received;
}
Exemple #13
0
static void
rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
                     void *ioaddr)
{
    unsigned long cur_rx, rx_left;
    int delta;

    assert(dev != NULL);
    assert(tp != NULL);
    assert(ioaddr != NULL);

    cur_rx = tp->cur_rx;
    rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;

    while (rx_left > 0) {
        int entry = cur_rx % NUM_RX_DESC;
        u32 status;

        rmb();
        status = le32_to_cpu(tp->RxDescArray[entry].status);

        if (status & OWNbit)
            break;
        if (status & RxRES) {
            printk(KERN_INFO "%s: Rx ERROR!!!\n", dev->name);
            tp->stats.rx_errors++;
            if (status & (RxRWT | RxRUNT))
                tp->stats.rx_length_errors++;
            if (status & RxCRC)
                tp->stats.rx_crc_errors++;
        } else {
            struct RxDesc *desc = tp->RxDescArray + entry;
            struct sk_buff *skb = tp->Rx_skbuff[entry];
            int pkt_size = (status & 0x00001FFF) - 4;
            void (*pci_action)(struct pci_dev *, dma_addr_t,
                               size_t, int) = pci_dma_sync_single_for_device;


            pci_dma_sync_single_for_cpu(tp->pci_dev,
                                        le64_to_cpu(desc->addr), RX_BUF_SIZE,
                                        PCI_DMA_FROMDEVICE);

            if (rtl8169_try_rx_copy(&skb, pkt_size, desc, dev)) {
                pci_action = pci_unmap_single;
                tp->Rx_skbuff[entry] = NULL;
            }

            pci_action(tp->pci_dev, le64_to_cpu(desc->addr),
                       RX_BUF_SIZE, PCI_DMA_FROMDEVICE);

            skb_put(skb, pkt_size);
            skb->protocol = eth_type_trans(skb, dev);
            netif_rx(skb);

            dev->last_rx = jiffies;
            tp->stats.rx_bytes += pkt_size;
            tp->stats.rx_packets++;
        }

        cur_rx++;
        rx_left--;
    }

    tp->cur_rx = cur_rx;

    delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
    if (delta > 0)
        tp->dirty_rx += delta;
    else if (delta < 0)
        printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);

    /*
     * FIXME: until there is periodic timer to try and refill the ring,
     * a temporary shortage may definitely kill the Rx process.
     * - disable the asic to try and avoid an overflow and kick it again
     *   after refill ?
     * - how do others driver handle this condition (Uh oh...).
     */
    if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx)
        printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
}