예제 #1
0
static void tulip_down (/*RTnet*/struct rtnet_device *rtdev)
{
	long ioaddr = rtdev->base_addr;
	struct tulip_private *tp = (struct tulip_private *) rtdev->priv;

	rtdm_irq_disable(&tp->irq_handle);
	rtdm_lock_get(&tp->lock); /* sync with IRQ handler on other cpu -JK- */

	/* Disable interrupts by clearing the interrupt mask. */
	outl (0x00000000, ioaddr + CSR7);

	/* Stop the Tx and Rx processes. */
	tulip_stop_rxtx(tp);

	/* prepare receive buffers */
	tulip_refill_rx(rtdev);

	/* release any unconsumed transmit buffers */
	tulip_clean_tx_ring(tp);

	/* 21040 -- Leave the card in 10baseT state. */
	if (tp->chip_id == DC21040)
		outl (0x00000004, ioaddr + CSR13);

	if (inl (ioaddr + CSR6) != 0xffffffff)
		tp->stats.rx_missed_errors += inl (ioaddr + CSR8) & 0xffff;

	rtdm_lock_put(&tp->lock);
	rtdm_irq_enable(&tp->irq_handle);

	rtdev->if_port = tp->saved_if_port;

	/* Leave the driver in snooze, not sleep, mode. */
	tulip_set_power_state (tp, 0, 1);
}
예제 #2
0
static int demo_interrupt(rtdm_irq_t *irq_context)
{
    struct demodrv_context *ctx;
    int           dev_id;
//    timestamp, if needed, can be obtained list this:
//    u64           timestamp = rtdm_clock_read();
    int           ret = RTDM_IRQ_HANDLED; // usual return value

    ctx = rtdm_irq_get_arg(irq_context, struct demodrv_context);
    dev_id    = ctx->dev_id;

    rtdm_lock_get(&ctx->lock);

    // do stuff
#ifdef TIMERINT
    if (events > EVENT_SIGNAL_COUNT) {
        rtdm_event_signal(&ctx->irq_event);
        events = 0;
    }
#else
    rtdm_event_signal(&ctx->irq_event);
#endif
    events++;
        
    rtdm_lock_put(&ctx->lock);
    // those return values were dropped from the RTDM
    // ret = RTDM_IRQ_ENABLE | RTDM_IRQ_PROPAGATE;

#ifdef TIMERINT
    // Only propagate the timer interrupt, so that linux sees it.
    // Forwarding interrupts to the non-realtime domain is not a common
    //     use-case of realtime device drivers, so usually DON'T DO THIS.
    // But here we grab the important timer interrupt, so we need to propagate it.
    return XN_ISR_PROPAGATE;
#else
    // signal interrupt is handled and don't propagate the interrupt to linux
    return RTDM_IRQ_HANDLED;
#endif
}
예제 #3
0
static int tulip_rx(/*RTnet*/struct rtnet_device *rtdev, nanosecs_t *time_stamp)
{
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
	int received = 0;

	if (tulip_debug > 4)
		/*RTnet*/rtdm_printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);
	/* If we own the next entry, it is a new packet. Send it up. */
	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
		s32 status = le32_to_cpu(tp->rx_ring[entry].status);

		if (tulip_debug > 5)
			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
				   rtdev->name, entry, status);
		if (--rx_work_limit < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					if (tulip_debug > 1)
						/*RTnet*/rtdm_printk(KERN_WARNING "%s: Oversized Ethernet frame "
							   "spanned multiple buffers, status %8.8x!\n",
							   rtdev->name, status);
					tp->stats.rx_length_errors++;
				}
			} else if (status & RxDescFatalErr) {
				/* There was a fatal error. */
				if (tulip_debug > 2)
					/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   rtdev->name, status);
				tp->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) tp->stats.rx_length_errors++;
				if (status & 0x0004) tp->stats.rx_frame_errors++;
				if (status & 0x0002) tp->stats.rx_crc_errors++;
				if (status & 0x0001) tp->stats.rx_fifo_errors++;
			}
		} else {
			/* Omit the four octet CRC from the length. */
			short pkt_len = ((status >> 16) & 0x7ff) - 4;
			struct /*RTnet*/rtskb *skb;

#ifndef final_version
			if (pkt_len > 1518) {
				/*RTnet*/rtdm_printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
					   rtdev->name, pkt_len, pkt_len);
				pkt_len = 1518;
				tp->stats.rx_length_errors++;
			}
#endif

#if 0 /*RTnet*/
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < tulip_rx_copybreak
				&& (skb = /*RTnet*/dev_alloc_rtskb(pkt_len + 2)) != NULL) {
				skb->rtdev = rtdev;
				/*RTnet*/rtskb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single(tp->pdev,
						    tp->rx_buffers[entry].mapping,
						    pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
				//eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
				//		 pkt_len, 0);
				memcpy(rtskb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#else
				memcpy(/*RTnet*/rtskb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#endif
			} else { 	/* Pass up the skb already on the Rx ring. */
#endif /*RTnet*/
			{
				char *temp = /*RTnet*/rtskb_put(skb = tp->rx_buffers[entry].skb,
						     pkt_len);

#ifndef final_version
				if (tp->rx_buffers[entry].mapping !=
				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
					/*RTnet*/rtdm_printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
					       "do not match in tulip_rx: %08x vs. %08x ? / %p.\n",
					       rtdev->name,
					       le32_to_cpu(tp->rx_ring[entry].buffer1),
					       tp->rx_buffers[entry].mapping,
					       temp);/*RTnet*/
				}
#endif

				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

				tp->rx_buffers[entry].skb = NULL;
				tp->rx_buffers[entry].mapping = 0;
			}
			skb->protocol = /*RTnet*/rt_eth_type_trans(skb, rtdev);
			skb->time_stamp = *time_stamp;
			/*RTnet*/rtnetif_rx(skb);

			tp->stats.rx_packets++;
			tp->stats.rx_bytes += pkt_len;
		}
		received++;
		entry = (++tp->cur_rx) % RX_RING_SIZE;
	}
	return received;
}

/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */
int tulip_interrupt(rtdm_irq_t *irq_handle)
{
	nanosecs_t time_stamp = rtdm_clock_read();/*RTnet*/
	struct rtnet_device *rtdev =
	    rtdm_irq_get_arg(irq_handle, struct rtnet_device);/*RTnet*/
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	long ioaddr = rtdev->base_addr;
	unsigned int csr5;
	int entry;
	int missed;
	int rx = 0;
	int tx = 0;
	int oi = 0;
	int maxrx = RX_RING_SIZE;
	int maxtx = TX_RING_SIZE;
	int maxoi = TX_RING_SIZE;
	unsigned int work_count = tulip_max_interrupt_work;

	/* Let's see whether the interrupt really is for us */
	csr5 = inl(ioaddr + CSR5);

	if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) {
		rtdm_printk("%s: unexpected IRQ!\n",rtdev->name);
		return 0;
	}

	tp->nir++;

	do {
		/* Acknowledge all of the current interrupt sources ASAP. */
		outl(csr5 & 0x0001ffff, ioaddr + CSR5);

		if (tulip_debug > 4)
			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
				   rtdev->name, csr5, inl(rtdev->base_addr + CSR5));

		if (csr5 & (RxIntr | RxNoBuf)) {
			rx += tulip_rx(rtdev, &time_stamp);
			tulip_refill_rx(rtdev);
		}

		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
			unsigned int dirty_tx;

			rtdm_lock_get(&tp->lock);

			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
				 dirty_tx++) {
				int entry = dirty_tx % TX_RING_SIZE;
				int status = le32_to_cpu(tp->tx_ring[entry].status);

				if (status < 0)
					break;			/* It still has not been Txed */

				/* Check for Rx filter setup frames. */
				if (tp->tx_buffers[entry].skb == NULL) {
					/* test because dummy frames not mapped */
					if (tp->tx_buffers[entry].mapping)
						pci_unmap_single(tp->pdev,
							 tp->tx_buffers[entry].mapping,
							 sizeof(tp->setup_frame),
							 PCI_DMA_TODEVICE);
					continue;
				}

				if (status & 0x8000) {
					/* There was an major error, log it. */
#ifndef final_version
					if (tulip_debug > 1)
						/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
							   rtdev->name, status);
#endif
					tp->stats.tx_errors++;
					if (status & 0x4104) tp->stats.tx_aborted_errors++;
					if (status & 0x0C00) tp->stats.tx_carrier_errors++;
					if (status & 0x0200) tp->stats.tx_window_errors++;
					if (status & 0x0002) tp->stats.tx_fifo_errors++;
					if ((status & 0x0080) && tp->full_duplex == 0)
						tp->stats.tx_heartbeat_errors++;
				} else {
					tp->stats.tx_bytes +=
						tp->tx_buffers[entry].skb->len;
					tp->stats.collisions += (status >> 3) & 15;
					tp->stats.tx_packets++;
				}

				pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
						 tp->tx_buffers[entry].skb->len,
						 PCI_DMA_TODEVICE);

				/* Free the original skb. */
				/*RTnet*/dev_kfree_rtskb(tp->tx_buffers[entry].skb);
				tp->tx_buffers[entry].skb = NULL;
				tp->tx_buffers[entry].mapping = 0;
				tx++;
				rtnetif_tx(rtdev);
			}

#ifndef final_version
			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
				/*RTnet*/rtdm_printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
					   rtdev->name, dirty_tx, tp->cur_tx);
				dirty_tx += TX_RING_SIZE;
			}
#endif

			if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
				/*RTnet*/rtnetif_wake_queue(rtdev);

			tp->dirty_tx = dirty_tx;
			if (csr5 & TxDied) {
				if (tulip_debug > 2)
					/*RTnet*/rtdm_printk(KERN_WARNING "%s: The transmitter stopped."
						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
						   rtdev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
				tulip_restart_rxtx(tp);
			}
			rtdm_lock_put(&tp->lock);
		}

		/* Log errors. */
		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
			if (csr5 == 0xffffffff)
				break;
#if 0 /*RTnet*/
			if (csr5 & TxJabber) tp->stats.tx_errors++;
			if (csr5 & TxFIFOUnderflow) {
				if ((tp->csr6 & 0xC000) != 0xC000)
					tp->csr6 += 0x4000;	/* Bump up the Tx threshold */
				else
					tp->csr6 |= 0x00200000;  /* Store-n-forward. */
				/* Restart the transmit process. */
				tulip_restart_rxtx(tp);
				outl(0, ioaddr + CSR1);
			}
			if (csr5 & (RxDied | RxNoBuf)) {
				if (tp->flags & COMET_MAC_ADDR) {
					outl(tp->mc_filter[0], ioaddr + 0xAC);
					outl(tp->mc_filter[1], ioaddr + 0xB0);
				}
			}
			if (csr5 & RxDied) {		/* Missed a Rx frame. */
                                tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
				tp->stats.rx_errors++;
				tulip_start_rxtx(tp);
			}
			/*
			 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
			 * call is ever done under the spinlock
			 */
			if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
				if (tp->link_change)
					(tp->link_change)(rtdev, csr5);
			}
			if (csr5 & SytemError) {
				int error = (csr5 >> 23) & 7;
				/* oops, we hit a PCI error.  The code produced corresponds
				 * to the reason:
				 *  0 - parity error
				 *  1 - master abort
				 *  2 - target abort
				 * Note that on parity error, we should do a software reset
				 * of the chip to get it back into a sane state (according
				 * to the 21142/3 docs that is).
				 *   -- rmk
				 */
				/*RTnet*/rtdm_printk(KERN_ERR "%s: (%lu) System Error occured (%d)\n",
					rtdev->name, tp->nir, error);
			}
#endif /*RTnet*/
			/*RTnet*/rtdm_printk(KERN_ERR "%s: Error detected, "
			    "device may not work any more (csr5=%08x)!\n", rtdev->name, csr5);
			/* Clear all error sources, included undocumented ones! */
			outl(0x0800f7ba, ioaddr + CSR5);
			oi++;
		}
예제 #4
0
/* The interrupt handler.
 * This is called from the CPM handler, not the MPC core interrupt.
 */
static int scc_enet_interrupt(rtdm_irq_t *irq_handle)
{
	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
	int packets = 0;
	struct	scc_enet_private *cep;
	volatile cbd_t	*bdp;
	ushort	int_events;
	int	must_restart;
	nanosecs_abs_t time_stamp = rtdm_clock_read();


	cep = (struct scc_enet_private *)rtdev->priv;

	/* Get the interrupt events that caused us to be here.
	*/
	int_events = cep->sccp->scc_scce;
	cep->sccp->scc_scce = int_events;
	must_restart = 0;

	/* Handle receive event in its own function.
	*/
	if (int_events & SCCE_ENET_RXF) {
		scc_enet_rx(rtdev, &packets, &time_stamp);
	}

	/* Check for a transmit error.  The manual is a little unclear
	 * about this, so the debug code until I get it figured out.  It
	 * appears that if TXE is set, then TXB is not set.  However,
	 * if carrier sense is lost during frame transmission, the TXE
	 * bit is set, "and continues the buffer transmission normally."
	 * I don't know if "normally" implies TXB is set when the buffer
	 * descriptor is closed.....trial and error :-).
	 */

	/* Transmit OK, or non-fatal error.  Update the buffer descriptors.
	*/
	if (int_events & (SCCE_ENET_TXE | SCCE_ENET_TXB)) {
	    rtdm_lock_get(&cep->lock);
	    bdp = cep->dirty_tx;
	    while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) {
		RT_DEBUG(__FUNCTION__": Tx ok\n");
		if ((bdp==cep->cur_tx) && (cep->tx_full == 0))
		    break;

		if (bdp->cbd_sc & BD_ENET_TX_HB)	/* No heartbeat */
			cep->stats.tx_heartbeat_errors++;
		if (bdp->cbd_sc & BD_ENET_TX_LC)	/* Late collision */
			cep->stats.tx_window_errors++;
		if (bdp->cbd_sc & BD_ENET_TX_RL)	/* Retrans limit */
			cep->stats.tx_aborted_errors++;
		if (bdp->cbd_sc & BD_ENET_TX_UN)	/* Underrun */
			cep->stats.tx_fifo_errors++;
		if (bdp->cbd_sc & BD_ENET_TX_CSL)	/* Carrier lost */
			cep->stats.tx_carrier_errors++;


		/* No heartbeat or Lost carrier are not really bad errors.
		 * The others require a restart transmit command.
		 */
		if (bdp->cbd_sc &
		    (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
			must_restart = 1;
			cep->stats.tx_errors++;
		}

		cep->stats.tx_packets++;

		/* Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
		if (bdp->cbd_sc & BD_ENET_TX_DEF)
			cep->stats.collisions++;

		/* Free the sk buffer associated with this last transmit.
		*/
		dev_kfree_rtskb(cep->tx_skbuff[cep->skb_dirty]);
		cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK;

		/* Update pointer to next buffer descriptor to be transmitted.
		*/
		if (bdp->cbd_sc & BD_ENET_TX_WRAP)
			bdp = cep->tx_bd_base;
		else
			bdp++;

		/* I don't know if we can be held off from processing these
		 * interrupts for more than one frame time.  I really hope
		 * not.  In such a case, we would now want to check the
		 * currently available BD (cur_tx) and determine if any
		 * buffers between the dirty_tx and cur_tx have also been
		 * sent.  We would want to process anything in between that
		 * does not have BD_ENET_TX_READY set.
		 */

		/* Since we have freed up a buffer, the ring is no longer
		 * full.
		 */
		if (cep->tx_full) {
			cep->tx_full = 0;
			if (rtnetif_queue_stopped(rtdev))
				rtnetif_wake_queue(rtdev);
		}

		cep->dirty_tx = (cbd_t *)bdp;
	    }

	    if (must_restart) {
		volatile cpm8xx_t *cp;

		/* Some transmit errors cause the transmitter to shut
		 * down.  We now issue a restart transmit.  Since the
		 * errors close the BD and update the pointers, the restart
		 * _should_ pick up without having to reset any of our
		 * pointers either.
		 */
		cp = cpmp;
		cp->cp_cpcr =
		    mk_cr_cmd(CPM_CR_ENET, CPM_CR_RESTART_TX) | CPM_CR_FLG;
		while (cp->cp_cpcr & CPM_CR_FLG);
	    }
	    rtdm_lock_put(&cep->lock);
	}

	/* Check for receive busy, i.e. packets coming but no place to
	 * put them.  This "can't happen" because the receive interrupt
	 * is tossing previous frames.
	 */
	if (int_events & SCCE_ENET_BSY) {
		cep->stats.rx_dropped++;
		rtdm_printk("CPM ENET: BSY can't happen.\n");
	}

	if (packets > 0)
		rt_mark_stack_mgr(rtdev);
	return RTDM_IRQ_HANDLED;
}
예제 #5
0
static int
scc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
{
	struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv;
	volatile cbd_t	*bdp;
	rtdm_lockctx_t context;


	RT_DEBUG(__FUNCTION__": ...\n");

	/* Fill in a Tx ring entry */
	bdp = cep->cur_tx;

#ifndef final_version
	if (bdp->cbd_sc & BD_ENET_TX_READY) {
		/* Ooops.  All transmit buffers are full.  Bail out.
		 * This should not happen, since cep->tx_busy should be set.
		 */
		rtdm_printk("%s: tx queue full!.\n", rtdev->name);
		return 1;
	}
#endif

	/* Clear all of the status flags.
	 */
	bdp->cbd_sc &= ~BD_ENET_TX_STATS;

	/* If the frame is short, tell CPM to pad it.
	*/
	if (skb->len <= ETH_ZLEN)
		bdp->cbd_sc |= BD_ENET_TX_PAD;
	else
		bdp->cbd_sc &= ~BD_ENET_TX_PAD;

	/* Set buffer length and buffer pointer.
	*/
	bdp->cbd_datlen = skb->len;
	bdp->cbd_bufaddr = __pa(skb->data);

	/* Save skb pointer.
	*/
	cep->tx_skbuff[cep->skb_cur] = skb;

	cep->stats.tx_bytes += skb->len;
	cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK;
	
	/* Prevent interrupts from changing the Tx ring from underneath us. */
	// *** RTnet ***
#if 0
	rtdm_irq_disable(&cep->irq_handle);
	rtdm_lock_get(&cep->lock);
#else
	rtdm_lock_get_irqsave(&cep->lock, context);
#endif

	/* Get and patch time stamp just before the transmission */
	if (skb->xmit_stamp)
		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);

	/* Push the data cache so the CPM does not get stale memory
	 * data.
	 */
	flush_dcache_range((unsigned long)(skb->data),
			   (unsigned long)(skb->data + skb->len));


	/* Send it on its way.  Tell CPM its ready, interrupt when done,
	 * its the last BD of the frame, and to put the CRC on the end.
	 */
	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC);
#if 0
	dev->trans_start = jiffies;
#endif

	/* If this was the last BD in the ring, start at the beginning again.
	*/
	if (bdp->cbd_sc & BD_ENET_TX_WRAP)
		bdp = cep->tx_bd_base;
	else
		bdp++;

	if (bdp->cbd_sc & BD_ENET_TX_READY) {
	        rtnetif_stop_queue(rtdev);
		cep->tx_full = 1;
	}

	cep->cur_tx = (cbd_t *)bdp;

	// *** RTnet ***
#if 0
	rtdm_lock_put(&cep->lock);
	rtdm_irq_enable(&cep->irq_handle);
#else
	rtdm_lock_put_irqrestore(&cep->lock, context);
#endif

	return 0;
}
예제 #6
0
파일: rt_fec.c 프로젝트: hiddeate2m/rtnet
/* During a receive, the cur_rx points to the current incoming buffer.
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
static void
fec_enet_rx(struct rtnet_device *ndev, int *packets, nanosecs_abs_t *time_stamp)
{
	struct fec_enet_private *fep = rtnetdev_priv(ndev);
	const struct platform_device_id *id_entry =
				platform_get_device_id(fep->pdev);
	struct bufdesc *bdp;
	unsigned short status;
	struct	rtskb	*skb;
	ushort	pkt_len;
	__u8 *data;

#ifdef CONFIG_M532x
	flush_cache_all();
#endif
	rtdm_lock_get(&fep->hw_lock);

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

	while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {

		/* Since we have allocated space to hold a complete frame,
		 * the last indicator should be set.
		 */
		if ((status & BD_ENET_RX_LAST) == 0)
			printk("FEC ENET: rcv is not +last\n");

		if (!fep->opened)
			goto rx_processing_done;

		/* Check for errors. */
		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
			fep->stats.rx_errors++;
			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
				/* Frame too long or too short. */
				fep->stats.rx_length_errors++;
			}
			if (status & BD_ENET_RX_NO)	/* Frame alignment */
				fep->stats.rx_frame_errors++;
			if (status & BD_ENET_RX_CR)	/* CRC Error */
				fep->stats.rx_crc_errors++;
			if (status & BD_ENET_RX_OV)	/* FIFO overrun */
				fep->stats.rx_fifo_errors++;
		}

		/* Report late collisions as a frame error.
		 * On this error, the BD is closed, but we don't know what we
		 * have in the buffer.  So, just drop this frame on the floor.
		 */
		if (status & BD_ENET_RX_CL) {
			fep->stats.rx_errors++;
			fep->stats.rx_frame_errors++;
			goto rx_processing_done;
		}

		/* Process the incoming frame. */
		fep->stats.rx_packets++;
		pkt_len = bdp->cbd_datlen;
		fep->stats.rx_bytes += pkt_len;
		data = (__u8*)__va(bdp->cbd_bufaddr);

		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);

		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
			swap_buffer(data, pkt_len);

		/* This does 16 byte alignment, exactly what we need.
		 * The packet length includes FCS, but we don't want to
		 * include that when passing upstream as it messes up
		 * bridging applications.
		 */
		skb = dev_alloc_rtskb(pkt_len - 4 + NET_IP_ALIGN,
				      &fep->skb_pool); /* RTnet */

		if (unlikely(!skb)) {
			printk("%s: Memory squeeze, dropping packet.\n",
					ndev->name);
			fep->stats.rx_dropped++;
		} else {
			rtskb_reserve(skb, NET_IP_ALIGN);
			rtskb_put(skb, pkt_len - 4);	/* Make room */
			memcpy(skb->data, data, pkt_len - 4);
			skb->protocol = rt_eth_type_trans(skb, ndev);
			skb->rtdev = ndev;
			skb->time_stamp = *time_stamp;
			rtnetif_rx(skb);
			(*packets)++; /* RTnet */
		}

		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
		/* Clear the status flags for this buffer */
		status &= ~BD_ENET_RX_STATS;

		/* Mark the buffer empty */
		status |= BD_ENET_RX_EMPTY;
		bdp->cbd_sc = status;

		/* Update BD pointer to next entry */
		if (status & BD_ENET_RX_WRAP)
			bdp = fep->rx_bd_base;
		else
			bdp++;
		/* Doing this here will keep the FEC running while we process
		 * incoming frames.  On a heavily loaded network, we should be
		 * able to keep up at the expense of system resources.
		 */
		writel(0, fep->hwp + FEC_R_DES_ACTIVE);
	}
	fep->cur_rx = bdp;

	rtdm_lock_put(&fep->hw_lock);
}
예제 #7
0
파일: rt_fec.c 프로젝트: hiddeate2m/rtnet
static void
fec_enet_tx(struct rtnet_device *ndev)
{
	struct	fec_enet_private *fep;
	struct bufdesc *bdp;
	unsigned short status;
	struct	rtskb	*skb;

	fep = rtnetdev_priv(ndev);
	rtdm_lock_get(&fep->hw_lock);
	bdp = fep->dirty_tx;

	while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
		if (bdp == fep->cur_tx && fep->tx_full == 0)
			break;

		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
				FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
		bdp->cbd_bufaddr = 0;

		skb = fep->tx_skbuff[fep->skb_dirty];
		/* Check for errors. */
		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
				   BD_ENET_TX_RL | BD_ENET_TX_UN |
				   BD_ENET_TX_CSL)) {
			fep->stats.tx_errors++;
			if (status & BD_ENET_TX_HB)  /* No heartbeat */
				fep->stats.tx_heartbeat_errors++;
			if (status & BD_ENET_TX_LC)  /* Late collision */
				fep->stats.tx_window_errors++;
			if (status & BD_ENET_TX_RL)  /* Retrans limit */
				fep->stats.tx_aborted_errors++;
			if (status & BD_ENET_TX_UN)  /* Underrun */
				fep->stats.tx_fifo_errors++;
			if (status & BD_ENET_TX_CSL) /* Carrier lost */
				fep->stats.tx_carrier_errors++;
		} else {
			fep->stats.tx_packets++;
		}

		if (status & BD_ENET_TX_READY)
			printk("HEY! Enet xmit interrupt and TX_READY.\n");

		/* Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
		if (status & BD_ENET_TX_DEF)
			fep->stats.collisions++;

		/* Free the sk buffer associated with this last transmit */
		dev_kfree_rtskb(skb); /* RTnet */
		fep->tx_skbuff[fep->skb_dirty] = NULL;
		fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;

		/* Update pointer to next buffer descriptor to be transmitted */
		if (status & BD_ENET_TX_WRAP)
			bdp = fep->tx_bd_base;
		else
			bdp++;

		/* Since we have freed up a buffer, the ring is no longer full
		 */
		if (fep->tx_full) {
			fep->tx_full = 0;
			if (rtnetif_queue_stopped(ndev))
				rtnetif_wake_queue(ndev);
		}
	}
	fep->dirty_tx = bdp;
	rtdm_lock_put(&fep->hw_lock);
}