Example #1
0
/***
 *  rt_loopback_xmit - begin packet transmission
 *  @skb: packet to be sent
 *  @dev: network device to which packet is sent
 *
 */
static int rt_loopback_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
{
    unsigned short          hash;
    struct rtpacket_type    *pt_entry;
    unsigned long           flags;
    rtos_time_t             time;


    /* write transmission stamp - in case any protocol ever gets the idea to
       ask the lookback device for this service... */
    if (skb->xmit_stamp) {
        rtos_get_time(&time);
        *skb->xmit_stamp =
            cpu_to_be64(rtos_time_to_nanosecs(&time) + *skb->xmit_stamp);
    }

    /* make sure that critical fields are re-intialised */
    skb->chain_end = skb;

    /* parse the Ethernet header as usual */
    skb->protocol = rt_eth_type_trans(skb, rtdev);
    skb->nh.raw   = skb->data;

    rtdev_reference(rtdev);

    rtcap_report_incoming(skb);

    hash = ntohs(skb->protocol) & RTPACKET_HASH_KEY_MASK;

    rtos_spin_lock_irqsave(&rt_packets_lock, flags);

    list_for_each_entry(pt_entry, &rt_packets[hash], list_entry)
        if (pt_entry->type == skb->protocol) {
            pt_entry->refcount++;
            rtos_spin_unlock_irqrestore(&rt_packets_lock, flags);

            pt_entry->handler(skb, pt_entry);

            rtos_spin_lock_irqsave(&rt_packets_lock, flags);
            pt_entry->refcount--;
            rtos_spin_unlock_irqrestore(&rt_packets_lock, flags);

            goto out;
        }

    rtos_spin_unlock_irqrestore(&rt_packets_lock, flags);

    /* don't warn if running in promiscuous mode (RTcap...?) */
    if ((rtdev->flags & IFF_PROMISC) == 0)
        rtos_print("RTnet: unknown layer-3 protocol\n");

    kfree_rtskb(skb);

  out:
    rtdev_dereference(rtdev);
    return 0;
}
Example #2
0
/***
 *  rt_loopback_xmit - begin packet transmission
 *  @skb: packet to be sent
 *  @dev: network device to which packet is sent
 *
 */
static int rt_loopback_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
{
    unsigned short          hash;
    struct rtpacket_type    *pt;
    unsigned long           flags;
    rtos_time_t             time;


    /* write transmission stamp - in case any protocol ever gets the idea to
       ask the lookback device for this service... */
    if (skb->xmit_stamp) {
        rtos_get_time(&time);
        *skb->xmit_stamp =
            cpu_to_be64(rtos_time_to_nanosecs(&time) + *skb->xmit_stamp);
    }

    /* make sure that critical fields are re-intialised */
    skb->chain_end = skb;

    /* parse the Ethernet header as usual */
    skb->protocol = rt_eth_type_trans(skb, rtdev);
    skb->nh.raw   = skb->data;

    rtdev_reference(rtdev);

    rtcap_report_incoming(skb);

    hash = ntohs(skb->protocol) & (MAX_RT_PROTOCOLS-1);

    rtos_spin_lock_irqsave(&rt_packets_lock, flags);

    pt = rt_packets[hash];

    if ((pt != NULL) && (pt->type == skb->protocol)) {
        pt->refcount++;
        rtos_spin_unlock_irqrestore(&rt_packets_lock, flags);

        pt->handler(skb, pt);

        rtos_spin_lock_irqsave(&rt_packets_lock, flags);
        pt->refcount--;
        rtos_spin_unlock_irqrestore(&rt_packets_lock, flags);
    } else {
        rtos_spin_unlock_irqrestore(&rt_packets_lock, flags);

        rtos_print("RTnet: unknown layer-3 protocol\n");

        kfree_rtskb(skb);
    }

    rtdev_dereference(rtdev);

    return 0;
}
Example #3
0
/***
 *  rt_loopback_xmit - begin packet transmission
 *  @skb: packet to be sent
 *  @dev: network device to which packet is sent
 *
 */
static int rt_loopback_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
{
    /* make sure that critical field are re-intialised */
    skb->chain_end = skb;

    /* parse the Ethernet header as usual */
    skb->protocol = rt_eth_type_trans(skb, rtdev);

#ifdef DEBUG_LOOPBACK_DRIVER
    {
        int i, cuantos;
        rt_printk("\n\nPACKET:");
        rt_printk("\nskb->protocol = %d", skb->protocol);
        rt_printk("\nskb->pkt_type = %d", skb->pkt_type);
        rt_printk("\nskb->csum = %d", skb->csum);
        rt_printk("\nskb->len = %d", skb->len);

        rt_printk("\n\nETHERNET HEADER:");
        rt_printk("\nMAC dest: "); for(i=0;i<6;i++){ rt_printk("0x%02X ", skb->buf_start[i+2]); }
        rt_printk("\nMAC orig: "); for(i=0;i<6;i++){ rt_printk("0x%02X ", skb->buf_start[i+8]); }
        rt_printk("\nPROTOCOL: "); for(i=0;i<2;i++){ rt_printk("0x%02X ", skb->buf_start[i+14]); }

        rt_printk("\n\nIP HEADER:");
        rt_printk("\nVERSIZE : "); for(i=0;i<1;i++){ rt_printk("0x%02X ", skb->buf_start[i+16]); }
        rt_printk("\nPRIORITY: "); for(i=0;i<1;i++){ rt_printk("0x%02X ", skb->buf_start[i+17]); }
        rt_printk("\nLENGTH  : "); for(i=0;i<2;i++){ rt_printk("0x%02X ", skb->buf_start[i+18]); }
        rt_printk("\nIDENT   : "); for(i=0;i<2;i++){ rt_printk("0x%02X ", skb->buf_start[i+20]); }
        rt_printk("\nFRAGMENT: "); for(i=0;i<2;i++){ rt_printk("0x%02X ", skb->buf_start[i+22]); }
        rt_printk("\nTTL     : "); for(i=0;i<1;i++){ rt_printk("0x%02X ", skb->buf_start[i+24]); }
        rt_printk("\nPROTOCOL: "); for(i=0;i<1;i++){ rt_printk("0x%02X ", skb->buf_start[i+25]); }
        rt_printk("\nCHECKSUM: "); for(i=0;i<2;i++){ rt_printk("0x%02X ", skb->buf_start[i+26]); }
        rt_printk("\nIP ORIGE: "); for(i=0;i<4;i++){ rt_printk("0x%02X ", skb->buf_start[i+28]); }
        rt_printk("\nIP DESTI: "); for(i=0;i<4;i++){ rt_printk("0x%02X ", skb->buf_start[i+32]); }

        cuantos = (int)(*(unsigned short *)(skb->buf_start+18)) - 20;
        rt_printk("\n\nDATA (%d):", cuantos);
        rt_printk("\n:"); for(i=0;i<cuantos;i++){ rt_printk("0x%02X ", skb->buf_start[i+36]); }
    }
#endif

    rtnetif_rx(skb);
    rt_mark_stack_mgr(rtdev);

    return 0;
}
Example #4
0
/***
 *  rt_loopback_xmit - begin packet transmission
 *  @skb: packet to be sent
 *  @dev: network device to which packet is sent
 *
 */
static int rt_loopback_xmit(struct rtskb *rtskb, struct rtnet_device *rtdev)
{
    /* write transmission stamp - in case any protocol ever gets the idea to
       ask the lookback device for this service... */
    if (rtskb->xmit_stamp)
	*rtskb->xmit_stamp =
	    cpu_to_be64(rtdm_clock_read() + *rtskb->xmit_stamp);

    /* make sure that critical fields are re-intialised */
    rtskb->chain_end = rtskb;

    /* parse the Ethernet header as usual */
    rtskb->protocol = rt_eth_type_trans(rtskb, rtdev);

    rt_stack_deliver(rtskb);

    return 0;
}
Example #5
0
int rtwlan_rx(struct rtskb * rtskb, struct rtnet_device * rtnet_dev)
{
    struct rtwlan_device * rtwlan = rtnetdev_priv(rtnet_dev);
    struct ieee80211_hdr  * hdr = (struct ieee80211_hdr *)rtskb->data;
    u16 fc = le16_to_cpu(hdr->frame_ctl);

    switch(rtwlan->mode) {
        case RTWLAN_MODE_RAW:
        case RTWLAN_MODE_ACK:
            rtskb_pull(rtskb, ieee80211_get_hdrlen(fc));
            rtskb->protocol = rt_eth_type_trans (rtskb, rtnet_dev);
            break;

        case RTWLAN_MODE_MON:
            rtskb->mac.raw = rtskb->data;
            rtcap_mark_incoming(rtskb);
            break;
    }

    return 0;
}
Example #6
0
static int tulip_rx(/*RTnet*/struct rtnet_device *rtdev, nanosecs_t *time_stamp)
{
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
	int received = 0;

	if (tulip_debug > 4)
		/*RTnet*/rtdm_printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);
	/* If we own the next entry, it is a new packet. Send it up. */
	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
		s32 status = le32_to_cpu(tp->rx_ring[entry].status);

		if (tulip_debug > 5)
			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
				   rtdev->name, entry, status);
		if (--rx_work_limit < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					if (tulip_debug > 1)
						/*RTnet*/rtdm_printk(KERN_WARNING "%s: Oversized Ethernet frame "
							   "spanned multiple buffers, status %8.8x!\n",
							   rtdev->name, status);
					tp->stats.rx_length_errors++;
				}
			} else if (status & RxDescFatalErr) {
				/* There was a fatal error. */
				if (tulip_debug > 2)
					/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   rtdev->name, status);
				tp->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) tp->stats.rx_length_errors++;
				if (status & 0x0004) tp->stats.rx_frame_errors++;
				if (status & 0x0002) tp->stats.rx_crc_errors++;
				if (status & 0x0001) tp->stats.rx_fifo_errors++;
			}
		} else {
			/* Omit the four octet CRC from the length. */
			short pkt_len = ((status >> 16) & 0x7ff) - 4;
			struct /*RTnet*/rtskb *skb;

#ifndef final_version
			if (pkt_len > 1518) {
				/*RTnet*/rtdm_printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
					   rtdev->name, pkt_len, pkt_len);
				pkt_len = 1518;
				tp->stats.rx_length_errors++;
			}
#endif

#if 0 /*RTnet*/
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < tulip_rx_copybreak
				&& (skb = /*RTnet*/dev_alloc_rtskb(pkt_len + 2)) != NULL) {
				skb->rtdev = rtdev;
				/*RTnet*/rtskb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single(tp->pdev,
						    tp->rx_buffers[entry].mapping,
						    pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
				//eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
				//		 pkt_len, 0);
				memcpy(rtskb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#else
				memcpy(/*RTnet*/rtskb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#endif
			} else { 	/* Pass up the skb already on the Rx ring. */
#endif /*RTnet*/
			{
				char *temp = /*RTnet*/rtskb_put(skb = tp->rx_buffers[entry].skb,
						     pkt_len);

#ifndef final_version
				if (tp->rx_buffers[entry].mapping !=
				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
					/*RTnet*/rtdm_printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
					       "do not match in tulip_rx: %08x vs. %08x ? / %p.\n",
					       rtdev->name,
					       le32_to_cpu(tp->rx_ring[entry].buffer1),
					       tp->rx_buffers[entry].mapping,
					       temp);/*RTnet*/
				}
#endif

				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

				tp->rx_buffers[entry].skb = NULL;
				tp->rx_buffers[entry].mapping = 0;
			}
			skb->protocol = /*RTnet*/rt_eth_type_trans(skb, rtdev);
			skb->time_stamp = *time_stamp;
			/*RTnet*/rtnetif_rx(skb);

			tp->stats.rx_packets++;
			tp->stats.rx_bytes += pkt_len;
		}
		received++;
		entry = (++tp->cur_rx) % RX_RING_SIZE;
	}
	return received;
}

/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */
int tulip_interrupt(rtdm_irq_t *irq_handle)
{
	nanosecs_t time_stamp = rtdm_clock_read();/*RTnet*/
	struct rtnet_device *rtdev =
	    rtdm_irq_get_arg(irq_handle, struct rtnet_device);/*RTnet*/
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	long ioaddr = rtdev->base_addr;
	unsigned int csr5;
	int entry;
	int missed;
	int rx = 0;
	int tx = 0;
	int oi = 0;
	int maxrx = RX_RING_SIZE;
	int maxtx = TX_RING_SIZE;
	int maxoi = TX_RING_SIZE;
	unsigned int work_count = tulip_max_interrupt_work;

	/* Let's see whether the interrupt really is for us */
	csr5 = inl(ioaddr + CSR5);

	if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) {
		rtdm_printk("%s: unexpected IRQ!\n",rtdev->name);
		return 0;
	}

	tp->nir++;

	do {
		/* Acknowledge all of the current interrupt sources ASAP. */
		outl(csr5 & 0x0001ffff, ioaddr + CSR5);

		if (tulip_debug > 4)
			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
				   rtdev->name, csr5, inl(rtdev->base_addr + CSR5));

		if (csr5 & (RxIntr | RxNoBuf)) {
			rx += tulip_rx(rtdev, &time_stamp);
			tulip_refill_rx(rtdev);
		}

		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
			unsigned int dirty_tx;

			rtdm_lock_get(&tp->lock);

			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
				 dirty_tx++) {
				int entry = dirty_tx % TX_RING_SIZE;
				int status = le32_to_cpu(tp->tx_ring[entry].status);

				if (status < 0)
					break;			/* It still has not been Txed */

				/* Check for Rx filter setup frames. */
				if (tp->tx_buffers[entry].skb == NULL) {
					/* test because dummy frames not mapped */
					if (tp->tx_buffers[entry].mapping)
						pci_unmap_single(tp->pdev,
							 tp->tx_buffers[entry].mapping,
							 sizeof(tp->setup_frame),
							 PCI_DMA_TODEVICE);
					continue;
				}

				if (status & 0x8000) {
					/* There was an major error, log it. */
#ifndef final_version
					if (tulip_debug > 1)
						/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
							   rtdev->name, status);
#endif
					tp->stats.tx_errors++;
					if (status & 0x4104) tp->stats.tx_aborted_errors++;
					if (status & 0x0C00) tp->stats.tx_carrier_errors++;
					if (status & 0x0200) tp->stats.tx_window_errors++;
					if (status & 0x0002) tp->stats.tx_fifo_errors++;
					if ((status & 0x0080) && tp->full_duplex == 0)
						tp->stats.tx_heartbeat_errors++;
				} else {
					tp->stats.tx_bytes +=
						tp->tx_buffers[entry].skb->len;
					tp->stats.collisions += (status >> 3) & 15;
					tp->stats.tx_packets++;
				}

				pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
						 tp->tx_buffers[entry].skb->len,
						 PCI_DMA_TODEVICE);

				/* Free the original skb. */
				/*RTnet*/dev_kfree_rtskb(tp->tx_buffers[entry].skb);
				tp->tx_buffers[entry].skb = NULL;
				tp->tx_buffers[entry].mapping = 0;
				tx++;
				rtnetif_tx(rtdev);
			}

#ifndef final_version
			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
				/*RTnet*/rtdm_printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
					   rtdev->name, dirty_tx, tp->cur_tx);
				dirty_tx += TX_RING_SIZE;
			}
#endif

			if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
				/*RTnet*/rtnetif_wake_queue(rtdev);

			tp->dirty_tx = dirty_tx;
			if (csr5 & TxDied) {
				if (tulip_debug > 2)
					/*RTnet*/rtdm_printk(KERN_WARNING "%s: The transmitter stopped."
						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
						   rtdev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
				tulip_restart_rxtx(tp);
			}
			rtdm_lock_put(&tp->lock);
		}

		/* Log errors. */
		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
			if (csr5 == 0xffffffff)
				break;
#if 0 /*RTnet*/
			if (csr5 & TxJabber) tp->stats.tx_errors++;
			if (csr5 & TxFIFOUnderflow) {
				if ((tp->csr6 & 0xC000) != 0xC000)
					tp->csr6 += 0x4000;	/* Bump up the Tx threshold */
				else
					tp->csr6 |= 0x00200000;  /* Store-n-forward. */
				/* Restart the transmit process. */
				tulip_restart_rxtx(tp);
				outl(0, ioaddr + CSR1);
			}
			if (csr5 & (RxDied | RxNoBuf)) {
				if (tp->flags & COMET_MAC_ADDR) {
					outl(tp->mc_filter[0], ioaddr + 0xAC);
					outl(tp->mc_filter[1], ioaddr + 0xB0);
				}
			}
			if (csr5 & RxDied) {		/* Missed a Rx frame. */
                                tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
				tp->stats.rx_errors++;
				tulip_start_rxtx(tp);
			}
			/*
			 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
			 * call is ever done under the spinlock
			 */
			if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
				if (tp->link_change)
					(tp->link_change)(rtdev, csr5);
			}
			if (csr5 & SytemError) {
				int error = (csr5 >> 23) & 7;
				/* oops, we hit a PCI error.  The code produced corresponds
				 * to the reason:
				 *  0 - parity error
				 *  1 - master abort
				 *  2 - target abort
				 * Note that on parity error, we should do a software reset
				 * of the chip to get it back into a sane state (according
				 * to the 21142/3 docs that is).
				 *   -- rmk
				 */
				/*RTnet*/rtdm_printk(KERN_ERR "%s: (%lu) System Error occured (%d)\n",
					rtdev->name, tp->nir, error);
			}
#endif /*RTnet*/
			/*RTnet*/rtdm_printk(KERN_ERR "%s: Error detected, "
			    "device may not work any more (csr5=%08x)!\n", rtdev->name, csr5);
			/* Clear all error sources, included undocumented ones! */
			outl(0x0800f7ba, ioaddr + CSR5);
			oi++;
		}
Example #7
0
/* During a receive, the cur_rx points to the current incoming buffer.
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
static int
scc_enet_rx(struct rtnet_device *rtdev, int* packets, nanosecs_abs_t *time_stamp)
{
	struct	scc_enet_private *cep;
	volatile cbd_t	*bdp;
	ushort	pkt_len;
	struct	rtskb *skb;

	RT_DEBUG(__FUNCTION__": ...\n");

	cep = (struct scc_enet_private *)rtdev->priv;

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = cep->cur_rx;

    for (;;) {

	if (bdp->cbd_sc & BD_ENET_RX_EMPTY)
		break;

#ifndef final_version
	/* Since we have allocated space to hold a complete frame, both
	 * the first and last indicators should be set.
	 */
	if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) !=
		(BD_ENET_RX_FIRST | BD_ENET_RX_LAST))
			rtdm_printk("CPM ENET: rcv is not first+last\n");
#endif

	/* Frame too long or too short.
	*/
	if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
		cep->stats.rx_length_errors++;
	if (bdp->cbd_sc & BD_ENET_RX_NO)	/* Frame alignment */
		cep->stats.rx_frame_errors++;
	if (bdp->cbd_sc & BD_ENET_RX_CR)	/* CRC Error */
		cep->stats.rx_crc_errors++;
	if (bdp->cbd_sc & BD_ENET_RX_OV)	/* FIFO overrun */
		cep->stats.rx_crc_errors++;

	/* Report late collisions as a frame error.
	 * On this error, the BD is closed, but we don't know what we
	 * have in the buffer.  So, just drop this frame on the floor.
	 */
	if (bdp->cbd_sc & BD_ENET_RX_CL) {
		cep->stats.rx_frame_errors++;
	}
	else {

		/* Process the incoming frame.
		*/
		cep->stats.rx_packets++;
		pkt_len = bdp->cbd_datlen;
		cep->stats.rx_bytes += pkt_len;

		/* This does 16 byte alignment, much more than we need.
		 * The packet length includes FCS, but we don't want to
		 * include that when passing upstream as it messes up
		 * bridging applications.
		 */
		skb = dev_alloc_rtskb(pkt_len-4, &cep->skb_pool);
		if (skb == NULL) {
			rtdm_printk("%s: Memory squeeze, dropping packet.\n", rtdev->name);
			cep->stats.rx_dropped++;
		}
		else {
			skb->rtdev = rtdev;
			rtskb_put(skb,pkt_len-4); /* Make room */
			memcpy(skb->data,
			       cep->rx_vaddr[bdp - cep->rx_bd_base],
			       pkt_len-4);
			skb->protocol=rt_eth_type_trans(skb,rtdev);
			skb->time_stamp = *time_stamp;
			rtnetif_rx(skb);
			(*packets)++;
		}
	}

	/* Clear the status flags for this buffer.
	*/
	bdp->cbd_sc &= ~BD_ENET_RX_STATS;

	/* Mark the buffer empty.
	*/
	bdp->cbd_sc |= BD_ENET_RX_EMPTY;

	/* Update BD pointer to next entry.
	*/
	if (bdp->cbd_sc & BD_ENET_RX_WRAP)
		bdp = cep->rx_bd_base;
	else
		bdp++;

    }
	cep->cur_rx = (cbd_t *)bdp;

	return 0;
}
Example #8
0
/* During a receive, the cur_rx points to the current incoming buffer.
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
static void
fec_enet_rx(struct rtnet_device *ndev, int *packets, nanosecs_abs_t *time_stamp)
{
	struct fec_enet_private *fep = rtnetdev_priv(ndev);
	const struct platform_device_id *id_entry =
				platform_get_device_id(fep->pdev);
	struct bufdesc *bdp;
	unsigned short status;
	struct	rtskb	*skb;
	ushort	pkt_len;
	__u8 *data;

#ifdef CONFIG_M532x
	flush_cache_all();
#endif
	rtdm_lock_get(&fep->hw_lock);

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

	while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {

		/* Since we have allocated space to hold a complete frame,
		 * the last indicator should be set.
		 */
		if ((status & BD_ENET_RX_LAST) == 0)
			printk("FEC ENET: rcv is not +last\n");

		if (!fep->opened)
			goto rx_processing_done;

		/* Check for errors. */
		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
			fep->stats.rx_errors++;
			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
				/* Frame too long or too short. */
				fep->stats.rx_length_errors++;
			}
			if (status & BD_ENET_RX_NO)	/* Frame alignment */
				fep->stats.rx_frame_errors++;
			if (status & BD_ENET_RX_CR)	/* CRC Error */
				fep->stats.rx_crc_errors++;
			if (status & BD_ENET_RX_OV)	/* FIFO overrun */
				fep->stats.rx_fifo_errors++;
		}

		/* Report late collisions as a frame error.
		 * On this error, the BD is closed, but we don't know what we
		 * have in the buffer.  So, just drop this frame on the floor.
		 */
		if (status & BD_ENET_RX_CL) {
			fep->stats.rx_errors++;
			fep->stats.rx_frame_errors++;
			goto rx_processing_done;
		}

		/* Process the incoming frame. */
		fep->stats.rx_packets++;
		pkt_len = bdp->cbd_datlen;
		fep->stats.rx_bytes += pkt_len;
		data = (__u8*)__va(bdp->cbd_bufaddr);

		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);

		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
			swap_buffer(data, pkt_len);

		/* This does 16 byte alignment, exactly what we need.
		 * The packet length includes FCS, but we don't want to
		 * include that when passing upstream as it messes up
		 * bridging applications.
		 */
		skb = dev_alloc_rtskb(pkt_len - 4 + NET_IP_ALIGN,
				      &fep->skb_pool); /* RTnet */

		if (unlikely(!skb)) {
			printk("%s: Memory squeeze, dropping packet.\n",
					ndev->name);
			fep->stats.rx_dropped++;
		} else {
			rtskb_reserve(skb, NET_IP_ALIGN);
			rtskb_put(skb, pkt_len - 4);	/* Make room */
			memcpy(skb->data, data, pkt_len - 4);
			skb->protocol = rt_eth_type_trans(skb, ndev);
			skb->rtdev = ndev;
			skb->time_stamp = *time_stamp;
			rtnetif_rx(skb);
			(*packets)++; /* RTnet */
		}

		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
		/* Clear the status flags for this buffer */
		status &= ~BD_ENET_RX_STATS;

		/* Mark the buffer empty */
		status |= BD_ENET_RX_EMPTY;
		bdp->cbd_sc = status;

		/* Update BD pointer to next entry */
		if (status & BD_ENET_RX_WRAP)
			bdp = fep->rx_bd_base;
		else
			bdp++;
		/* Doing this here will keep the FEC running while we process
		 * incoming frames.  On a heavily loaded network, we should be
		 * able to keep up at the expense of system resources.
		 */
		writel(0, fep->hwp + FEC_R_DES_ACTIVE);
	}
	fep->cur_rx = bdp;

	rtdm_lock_put(&fep->hw_lock);
}
Example #9
0
/***
 * rt_loopback_xmit - begin packet transmission
 * @skb: packet to be sent
 * @dev: network device to which packet is sent
 *
 */
static int rt_loopback_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
{
	int err=0;
	struct rtskb *new_skb;

	if ( (new_skb=dev_alloc_rtskb(skb->len + 2))==NULL ) 
	{
		rt_printk("RTnet %s: couldn't allocate a rtskb of size %d.\n", rtdev->name, skb->len);
		err = -ENOMEM;
		goto rt_loopback_xmit_end;
	}
	else 
	{
		new_skb->rx = rt_get_time();
		new_skb->rtdev = rtdev;
		rtskb_reserve(new_skb,2);
		memcpy(new_skb->buf_start, skb->buf_start, SKB_DATA_ALIGN(ETH_FRAME_LEN));
		rtskb_put(new_skb, skb->len);
		new_skb->protocol = rt_eth_type_trans(new_skb, rtdev);

#ifdef DEBUG_LOOPBACK_DRIVER
		{
			int i, cuantos;
			rt_printk("\n\nPACKET:");
			rt_printk("\nskb->protocol = %d", 		skb->protocol);
			rt_printk("\nskb->pkt_type = %d", 		skb->pkt_type);
			rt_printk("\nskb->users = %d", 			skb->users);
			rt_printk("\nskb->cloned = %d", 		skb->cloned);
			rt_printk("\nskb->csum = %d",	 		skb->csum);
			rt_printk("\nskb->len = %d", 			skb->len);
			
			rt_printk("\nnew_skb->protocol = %d", 	new_skb->protocol);
			rt_printk("\nnew_skb->pkt_type = %d", 	new_skb->pkt_type);
			rt_printk("\nnew_skb->users = %d", 		new_skb->users);
			rt_printk("\nnew_skb->cloned = %d", 	new_skb->cloned);
			rt_printk("\nnew_skb->csum = %d",	 	new_skb->csum);
			rt_printk("\nnew_skb->len = %d", 		new_skb->len);
			
			rt_printk("\n\nETHERNET HEADER:");
			rt_printk("\nMAC dest: "); for(i=0;i<6;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+2]); }
			rt_printk("\nMAC orig: "); for(i=0;i<6;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+8]); }
			rt_printk("\nPROTOCOL: "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+14]); }
		
			rt_printk("\n\nIP HEADER:");
			rt_printk("\nVERSIZE : "); for(i=0;i<1;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+16]); }
			rt_printk("\nPRIORITY: "); for(i=0;i<1;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+17]); }
			rt_printk("\nLENGTH  : "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+18]); }
			rt_printk("\nIDENT   : "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+20]); }
			rt_printk("\nFRAGMENT: "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+22]); }
			rt_printk("\nTTL     : "); for(i=0;i<1;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+24]); }
			rt_printk("\nPROTOCOL: "); for(i=0;i<1;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+25]); }
			rt_printk("\nCHECKSUM: "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+26]); }
			rt_printk("\nIP ORIGE: "); for(i=0;i<4;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+28]); }
			rt_printk("\nIP DESTI: "); for(i=0;i<4;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+32]); }
		
			cuantos = (int)(*(unsigned short *)(new_skb->buf_start+18)) - 20;
			rt_printk("\n\nDATA (%d):", cuantos);  
			rt_printk("\n:");  		   for(i=0;i<cuantos;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+36]); }		
		}
#endif

		rtnetif_rx(new_skb);
		rt_mark_stack_mgr(rtdev);
	}
	
rt_loopback_xmit_end:
	kfree_rtskb(skb);
	return err;
}