Ejemplo n.º 1
0
static int tulip_close (/*RTnet*/struct rtnet_device *rtdev)
{
	long ioaddr = rtdev->base_addr;
	struct tulip_private *tp = (struct tulip_private *) rtdev->priv;
	int i;

	rtnetif_stop_queue (rtdev);

	tulip_down (rtdev);

	if (tulip_debug > 1)
		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
			rtdev->name, inl (ioaddr + CSR5));

	rtdm_irq_free(&tp->irq_handle);

	/* Free all the skbuffs in the Rx queue. */
	for (i = 0; i < RX_RING_SIZE; i++) {
		struct /*RTnet*/rtskb *skb = tp->rx_buffers[i].skb;
		dma_addr_t mapping = tp->rx_buffers[i].mapping;

		tp->rx_buffers[i].skb = NULL;
		tp->rx_buffers[i].mapping = 0;

		tp->rx_ring[i].status = 0;	/* Not owned by Tulip chip. */
		tp->rx_ring[i].length = 0;
		tp->rx_ring[i].buffer1 = 0xBADF00D0;	/* An invalid address. */
		if (skb) {
			pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
					 PCI_DMA_FROMDEVICE);
			/*RTnet*/dev_kfree_rtskb (skb);
		}
	}
	for (i = 0; i < TX_RING_SIZE; i++) {
		struct /*RTnet*/rtskb *skb = tp->tx_buffers[i].skb;

		if (skb != NULL) {
			pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
					 skb->len, PCI_DMA_TODEVICE);
			/*RTnet*/dev_kfree_rtskb (skb);
		}
		tp->tx_buffers[i].skb = NULL;
		tp->tx_buffers[i].mapping = 0;
	}

	rt_stack_disconnect(rtdev);

	return 0;
}
Ejemplo n.º 2
0
/***
 *  rt2x00_close
 *  @rtdev
 */
static int rt2x00_close (struct rtnet_device *rtnet_dev) {

    struct rtwlan_device * rtwlan_dev = rtnetdev_priv(rtnet_dev);
    struct _rt2x00_core  * core   = rtwlan_priv(rtwlan_dev);

    DEBUG("Start.\n");

    if(!test_and_clear_bit(DEVICE_ENABLED, &core->flags)){
        ERROR("device already disabled.\n");
        return -EBUSY;
    }

    rt2x00_radio_off(core);

    rtnetif_stop_queue(rtnet_dev);
    rt_stack_disconnect(rtnet_dev);

    RTNET_MOD_DEC_USE_COUNT;

    return 0;
}
Ejemplo n.º 3
0
static int
scc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
{
	struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv;
	volatile cbd_t	*bdp;
	rtdm_lockctx_t context;


	RT_DEBUG(__FUNCTION__": ...\n");

	/* Fill in a Tx ring entry */
	bdp = cep->cur_tx;

#ifndef final_version
	if (bdp->cbd_sc & BD_ENET_TX_READY) {
		/* Ooops.  All transmit buffers are full.  Bail out.
		 * This should not happen, since cep->tx_busy should be set.
		 */
		rtdm_printk("%s: tx queue full!.\n", rtdev->name);
		return 1;
	}
#endif

	/* Clear all of the status flags.
	 */
	bdp->cbd_sc &= ~BD_ENET_TX_STATS;

	/* If the frame is short, tell CPM to pad it.
	*/
	if (skb->len <= ETH_ZLEN)
		bdp->cbd_sc |= BD_ENET_TX_PAD;
	else
		bdp->cbd_sc &= ~BD_ENET_TX_PAD;

	/* Set buffer length and buffer pointer.
	*/
	bdp->cbd_datlen = skb->len;
	bdp->cbd_bufaddr = __pa(skb->data);

	/* Save skb pointer.
	*/
	cep->tx_skbuff[cep->skb_cur] = skb;

	cep->stats.tx_bytes += skb->len;
	cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK;
	
	/* Prevent interrupts from changing the Tx ring from underneath us. */
	// *** RTnet ***
#if 0
	rtdm_irq_disable(&cep->irq_handle);
	rtdm_lock_get(&cep->lock);
#else
	rtdm_lock_get_irqsave(&cep->lock, context);
#endif

	/* Get and patch time stamp just before the transmission */
	if (skb->xmit_stamp)
		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);

	/* Push the data cache so the CPM does not get stale memory
	 * data.
	 */
	flush_dcache_range((unsigned long)(skb->data),
			   (unsigned long)(skb->data + skb->len));


	/* Send it on its way.  Tell CPM its ready, interrupt when done,
	 * its the last BD of the frame, and to put the CRC on the end.
	 */
	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC);
#if 0
	dev->trans_start = jiffies;
#endif

	/* If this was the last BD in the ring, start at the beginning again.
	*/
	if (bdp->cbd_sc & BD_ENET_TX_WRAP)
		bdp = cep->tx_bd_base;
	else
		bdp++;

	if (bdp->cbd_sc & BD_ENET_TX_READY) {
	        rtnetif_stop_queue(rtdev);
		cep->tx_full = 1;
	}

	cep->cur_tx = (cbd_t *)bdp;

	// *** RTnet ***
#if 0
	rtdm_lock_put(&cep->lock);
	rtdm_irq_enable(&cep->irq_handle);
#else
	rtdm_lock_put_irqrestore(&cep->lock, context);
#endif

	return 0;
}
Ejemplo n.º 4
0
static int
tulip_start_xmit(struct /*RTnet*/rtskb *skb, /*RTnet*/struct rtnet_device *rtdev)
{
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	int entry;
	u32 flag;
	dma_addr_t mapping;
	/*RTnet*/
	rtdm_lockctx_t context;


	rtdm_lock_get_irqsave(&tp->lock, context);

	/* TODO: move to rtdev_xmit, use queue */
	if (rtnetif_queue_stopped(rtdev)) {
		dev_kfree_rtskb(skb);
		tp->stats.tx_dropped++;

		rtdm_lock_put_irqrestore(&tp->lock, context);
		return 0;
	}
	/*RTnet*/

	/* Calculate the next Tx descriptor entry. */
	entry = tp->cur_tx % TX_RING_SIZE;

	tp->tx_buffers[entry].skb = skb;
	mapping = pci_map_single(tp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
	tp->tx_buffers[entry].mapping = mapping;
	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);

	if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
		flag = 0x60000000; /* No interrupt */
	} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
		flag = 0xe0000000; /* Tx-done intr. */
	} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
		flag = 0x60000000; /* No Tx-done intr. */
	} else {		/* Leave room for set_rx_mode() to fill entries. */
		flag = 0xe0000000; /* Tx-done intr. */
		rtnetif_stop_queue(rtdev);
	}
	if (entry == TX_RING_SIZE-1)
		flag = 0xe0000000 | DESC_RING_WRAP;

	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
	/* if we were using Transmit Automatic Polling, we would need a
	 * wmb() here. */
	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);

	/*RTnet*/
	/* get and patch time stamp just before the transmission */
	if (skb->xmit_stamp)
		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
	/*RTnet*/

	wmb();

	tp->cur_tx++;

	/* Trigger an immediate transmit demand. */
	outl(0, rtdev->base_addr + CSR1);

	/*RTnet*/
	rtdm_lock_put_irqrestore(&tp->lock, context);
	/*RTnet*/

	return 0;
}
Ejemplo n.º 5
0
static int
fec_enet_start_xmit(struct rtskb *skb, struct rtnet_device *ndev)
{
	struct fec_enet_private *fep = rtnetdev_priv(ndev);
	const struct platform_device_id *id_entry =
				platform_get_device_id(fep->pdev);
	struct bufdesc *bdp;
	void *bufaddr;
	unsigned short	status;
	unsigned long context;

	if (!fep->link) {
		/* Link is down or autonegotiation is in progress. */
		printk("%s: tx link down!.\n", ndev->name);
		rtnetif_stop_queue(ndev);
		return 1;	/* RTnet: will call kfree_rtskb() */
	}

	rtdm_lock_get_irqsave(&fep->hw_lock, context);

	/* RTnet */
	if (skb->xmit_stamp)
		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() +
					       *skb->xmit_stamp);

	/* Fill in a Tx ring entry */
	bdp = fep->cur_tx;

	status = bdp->cbd_sc;

	if (status & BD_ENET_TX_READY) {
		/* Ooops.  All transmit buffers are full.  Bail out.
		 * This should not happen, since ndev->tbusy should be set.
		 */
		printk("%s: tx queue full!.\n", ndev->name);
		rtdm_lock_put_irqrestore(&fep->hw_lock, context);
		return 1;	/* RTnet: will call kfree_rtskb() */
	}

	/* Clear all of the status flags */
	status &= ~BD_ENET_TX_STATS;

	/* Set buffer length and buffer pointer */
	bufaddr = skb->data;
	bdp->cbd_datlen = skb->len;

	/*
	 * On some FEC implementations data must be aligned on
	 * 4-byte boundaries. Use bounce buffers to copy data
	 * and get it aligned. Ugh.
	 */
	if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
		unsigned int index;
		index = bdp - fep->tx_bd_base;
		memcpy(fep->tx_bounce[index], skb->data, skb->len);
		bufaddr = fep->tx_bounce[index];
	}

	/*
	 * Some design made an incorrect assumption on endian mode of
	 * the system that it's running on. As the result, driver has to
	 * swap every frame going to and coming from the controller.
	 */
	if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
		swap_buffer(bufaddr, skb->len);

	/* Save skb pointer */
	fep->tx_skbuff[fep->skb_cur] = skb;

	fep->stats.tx_bytes += skb->len;
	fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;

	/* Push the data cache so the CPM does not get stale memory
	 * data.
	 */
	bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
			FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);

	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
	 * it's the last BD of the frame, and to put the CRC on the end.
	 */
	status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
			| BD_ENET_TX_LAST | BD_ENET_TX_TC);
	bdp->cbd_sc = status;

	/* Trigger transmission start */
	writel(0, fep->hwp + FEC_X_DES_ACTIVE);

	/* If this was the last BD in the ring, start at the beginning again. */
	if (status & BD_ENET_TX_WRAP)
		bdp = fep->tx_bd_base;
	else
		bdp++;

	if (bdp == fep->dirty_tx) {
		fep->tx_full = 1;
		rtnetif_stop_queue(ndev);
	}

	fep->cur_tx = bdp;

	rtdm_lock_put_irqrestore(&fep->hw_lock, context);

	return NETDEV_TX_OK;
}