Exemplo n.º 1
0
static void rc32434_tx_tasklet(unsigned long tx_data_dev)
{
	struct net_device *dev = (struct net_device *)tx_data_dev;	
	struct rc32434_local* lp = (struct rc32434_local *)dev->priv;
	volatile DMAD_t td = &lp->td_ring[lp->tx_next_done];
	u32 devcs;
	unsigned long 	flags;
	volatile u32 dmas;
	
	spin_lock_irqsave(&lp->lock, flags);
	
	/* process all desc that are done */
	while(IS_DMA_FINISHED(td->control)) {
		if(lp->tx_full == 1) {
			netif_wake_queue(dev);
			lp->tx_full = 0;
		}
		
		devcs = lp->td_ring[lp->tx_next_done].devcs;    
		if ((devcs & (ETHTX_fd_m | ETHTX_ld_m)) != (ETHTX_fd_m | ETHTX_ld_m)) {
			lp->stats.tx_errors++;
			lp->stats.tx_dropped++;				
			
			/* should never happen */
			DBG(1, __FUNCTION__ ": split tx ignored\n");
		}
		else if (IS_TX_TOK(devcs)) {
			lp->stats.tx_packets++;
		}
		else {
			lp->stats.tx_errors++;
			lp->stats.tx_dropped++;				
			
			/* underflow */
			if (IS_TX_UND_ERR(devcs)) 
				lp->stats.tx_fifo_errors++;
			
			/* oversized frame */
			if (IS_TX_OF_ERR(devcs))
				lp->stats.tx_aborted_errors++;
			
			/* excessive deferrals */
			if (IS_TX_ED_ERR(devcs))
				lp->stats.tx_carrier_errors++;
			
			/* collisions: medium busy */
			if (IS_TX_EC_ERR(devcs))
				lp->stats.collisions++;
			
			/* late collision */
			if (IS_TX_LC_ERR(devcs))
				lp->stats.tx_window_errors++;
			
		}
		
		/* We must always free the original skb */
		if (lp->tx_skb[lp->tx_next_done] != NULL) {
			dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
			lp->tx_skb[lp->tx_next_done] = NULL;
		}
		
		lp->td_ring[lp->tx_next_done].control = DMAD_iof_m;
		lp->td_ring[lp->tx_next_done].devcs = ETHTX_fd_m | ETHTX_ld_m;	
		lp->td_ring[lp->tx_next_done].link = 0;
		lp->td_ring[lp->tx_next_done].ca = 0;
		lp->tx_count --;
		
		/* go on to next transmission */
		lp->tx_next_done = (lp->tx_next_done + 1) & RC32434_TDS_MASK;
		td = &lp->td_ring[lp->tx_next_done];
		
	}
	
	dmas = __raw_readl(&lp->tx_dma_regs->dmas);
	__raw_writel( ~dmas, &lp->tx_dma_regs->dmas);
	
	/* Enable F E bit in Tx DMA */
	__raw_writel(__raw_readl(&lp->tx_dma_regs->dmasm) & ~(DMASM_f_m | DMASM_e_m), &lp->tx_dma_regs->dmasm); 
	spin_unlock_irqrestore(&lp->lock, flags);
	
}
Exemplo n.º 2
0
static void
acacia_tx(struct net_device *dev)
{
    struct acacia_local* lp = (struct acacia_local *)dev->priv;
    volatile DMAD_t td = &lp->td_ring[lp->tx_next_out];
    struct DMAD_s ltd;

    /* cgg - see what has happened to each transmission in backlog */
    while (lp->tx_count && IS_DMA_USED(td->control)) {

        ltd = *td;
        td->devcs = 0;
        td->ca = 0;
        td->link = 0;

        if (acacia_check_tx_consistent(lp)) {
            err("%s: tx queue inconsistent, i/f reset\n",
                __func__);
            err("%s:    nI=%d, nO=%d, cnt=%d\n", __func__,
                lp->tx_next_in, lp->tx_next_out, lp->tx_count);
            lp->stats.tx_errors++;
            acacia_restart(dev);
            break;
        }

        if ((ltd.devcs & 0xffffffc0) == 0) {
            /* last transmission not complete? */
            dbg(0, "no devcs status\n");
            dbg(0, "    dptr=%08x, td=%08x\n",
                readl(&lp->tx_dma_regs->dmadptr),
                (u32)kseg1_to_phys(td));
            dbg(0, "    cmdstat=%08x, ca=%08x, "
                "devcs=%08x, link=%08x\n",
                ltd.cmdstat, ltd.curr_addr, ltd.devcs, ltd.link);
            dbg(0, "    nI=%d, nO=%d, cnt=%d\n",
                lp->tx_next_in, lp->tx_next_out, lp->tx_count);
            lp->stats.tx_errors++;
        } else if ((ltd.devcs & (ETHTX_fd_m | ETHTX_ld_m)) !=
                   (ETHTX_fd_m | ETHTX_ld_m)) {
            lp->stats.tx_errors++;
            /* should never happen */
            dbg(1, "split tx ignored\n");
        } else if (IS_TX_TOK(ltd.devcs)) {
            /* transmit OK */
            lp->stats.tx_packets++;
        } else {
            dbg(0, "error, devcs=0x%08x\n",
                ltd.devcs);

            lp->stats.tx_errors++;

            /* underflow */
            if (IS_TX_UND_ERR(ltd.devcs))
                lp->stats.tx_fifo_errors++;

            /* oversized frame */
            if (IS_TX_OF_ERR(ltd.devcs))
                lp->stats.tx_aborted_errors++;

            /* excessive deferrals */
            if (IS_TX_ED_ERR(ltd.devcs))
                lp->stats.tx_carrier_errors++;

            /* collisions: medium busy */
            if (IS_TX_EC_ERR(ltd.devcs))
                lp->stats.collisions++;

            /* late collision */
            if (IS_TX_LC_ERR(ltd.devcs))
                lp->stats.tx_window_errors++;
        }

        // Wake the queue if the ring was full
        if (lp->tx_full) {
            lp->tx_full = 0;
            netif_wake_queue(dev);
            err("%s: Tx Ring was full, queue waked\n", __func__);
        }

        /* We must always free the original skb */
        if (lp->tx_skb[lp->tx_next_out] != NULL) {
            dev_kfree_skb_irq(lp->tx_skb[lp->tx_next_out]);
            lp->tx_skb[lp->tx_next_out] = NULL;
        }

        // decrement tx ring buffer count
        lp->tx_count--;
        /* go on to next transmission */
        lp->tx_next_out = (lp->tx_next_out + 1) & ACACIA_TDS_MASK;
        td = &lp->td_ring[lp->tx_next_out];
    }
}