コード例 #1
0
int buf_pop (circbuf_t * buf, char *dest, unsigned int len)
{
    unsigned int i;
    char *p = buf->top;
    char *end = buf->end;
    char *data = buf->data;
    char *q = dest;
    //u32 dma_con = 0;

    if (len == 0)
        return 0;

    /* Cap to number of bytes in buffer */
    if (len > buf->size)
        len = buf->size;

#if 0
    /* dma setting */
    __raw_writel (p, DMA_SRC (USB_FULL_DMA1_BASE));     /* SOURCE */
    __raw_writel (dest, DMA_DST (USB_FULL_DMA1_BASE));  /* DESTINATION */
    __raw_writel (end - p, DMA_WPPT (USB_FULL_DMA1_BASE));      /* wrapping point */
    __raw_writel (data, DMA_WPTO (USB_FULL_DMA1_BASE)); /* wrapping destination */

    __raw_writel (len, DMA_COUNT (USB_FULL_DMA1_BASE));
    dma_con = DMA_CON_WPEN | DMA_CON_BURST_16BEAT | DMA_CON_SINC
        | DMA_CON_DINC | DMA_CON_SIZE_BYTE;
    __raw_writel (dma_con, DMA_CON (USB_FULL_DMA1_BASE));

    __raw_writel (DMA_START_BIT, DMA_START (USB_FULL_DMA1_BASE));
    //printf("USB DMA Start!\n");
    while (__raw_readl (DMA_GLBSTA_L) & DMA_GLBSTA_RUN (1));
    //printf("USB DMA Complete\n");
    __raw_writel (DMA_STOP_BIT, DMA_START (USB_FULL_DMA1_BASE));
    __raw_writel (DMA_ACKINT_BIT, DMA_ACKINT (USB_FULL_DMA1_BASE));

    p += len;
    if (p >= end)
        p = data + (p - end);
#else
    for (i = 0; i < len; i++)
    {
        *q = *p;
        p++;
        if (p == end)
            p = data;
        q++;
    }
#endif

    /* Update 'top' pointer */
    buf->top = p;
    buf->size -= len;

    return len;
}
コード例 #2
0
int buf_push (circbuf_t * buf, const char *src, unsigned int len)
{
    /* NOTE:  this function allows push to overwrite old data. */
    unsigned int i;
    //u32 dma_con = 0;
    char *p = buf->tail;
    char *end = buf->end;
    char *data = buf->data;
    char *q = (char *)src;
#if 0
    /* dma setting */
    __raw_writel (src, DMA_SRC (USB_FULL_DMA0_BASE));   /* source */
    __raw_writel (p, DMA_DST (USB_FULL_DMA0_BASE));     /* destination */
    __raw_writel (end - p, DMA_WPPT (USB_FULL_DMA0_BASE));      /* wrapping point */
    __raw_writel (data, DMA_WPTO (USB_FULL_DMA0_BASE)); /* wrapping destination */


    __raw_writel (len, DMA_COUNT (USB_FULL_DMA0_BASE));
    dma_con =
        DMA_CON_WPEN | DMA_CON_WPSD | DMA_CON_BURST_16BEAT | DMA_CON_SINC |
        DMA_CON_DINC | DMA_CON_SIZE_BYTE;

    __raw_writel (dma_con, DMA_CON (USB_FULL_DMA0_BASE));

    __raw_writel (DMA_START_BIT, DMA_START (USB_FULL_DMA0_BASE));
    //printf("USB DMA Start!\n");
    while (__raw_readl (DMA_GLBSTA_L) & DMA_GLBSTA_RUN (0));
    //printf("USB DMA Complete\n");
    __raw_writel (DMA_STOP_BIT, DMA_START (USB_FULL_DMA0_BASE));
    __raw_writel (DMA_ACKINT_BIT, DMA_ACKINT (USB_FULL_DMA0_BASE));

    p += len;
    if (p >= end)
    {
        p = data + (p - end);
    }
#else
    for (i = 0; i < len; i++)
    {
        *p = *q;
        p++;
        if (p == end)
            p = data;
        q++;
    }
#endif

    buf->size += len;

    /* Update 'tail' pointer */
    buf->tail = p;

    return len;
}
コード例 #3
0
ファイル: korina.c プロジェクト: anchowee/linino
static void rc32434_rx_tasklet(unsigned long rx_data_dev)
#endif
{
	struct net_device *dev = (struct net_device *)rx_data_dev;	
	struct rc32434_local* lp = netdev_priv(dev);
	volatile DMAD_t  rd = &lp->rd_ring[lp->rx_next_done];
	struct sk_buff *skb, *skb_new;
	u8* pkt_buf;
	u32 devcs, count, pkt_len, pktuncrc_len;
	volatile u32 dmas;
#ifdef CONFIG_IDT_USE_NAPI
	u32 received = 0;
	int rx_work_limit = min(*budget,dev->quota);
#else
	unsigned long 	flags;
	spin_lock_irqsave(&lp->lock, flags);
#endif

	dma_cache_inv((u32)rd, sizeof(*rd));
	while ( (count = RC32434_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
#ifdef CONFIG_IDT_USE_NAPI
		if(--rx_work_limit <0)
                {
                        break;
                }
#endif
		/* init the var. used for the later operations within the while loop */
		skb_new = NULL;
		devcs = rd->devcs;
		pkt_len = RCVPKT_LENGTH(devcs);
		skb = lp->rx_skb[lp->rx_next_done];
      
		if (count < 64) {
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;			
		}
		else if ((devcs & ( ETHRX_ld_m)) !=	ETHRX_ld_m) {
			/* check that this is a whole packet */
			/* WARNING: DMA_FD bit incorrectly set in Rc32434 (errata ref #077) */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
		}
		else if ( (devcs & ETHRX_rok_m)  ) {
			
			{
				/* must be the (first and) last descriptor then */
				pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data;
				
				pktuncrc_len = pkt_len - 4;
				/* invalidate the cache */
				dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len);
				
				/* Malloc up new buffer. */					  
				skb_new = dev_alloc_skb(RC32434_RBSIZE + 2);					             	
				
				if (skb_new != NULL){
					/* Make room */
					skb_put(skb, pktuncrc_len);		    
					
					skb->protocol = eth_type_trans(skb, dev);
					
					/* pass the packet to upper layers */
#ifdef CONFIG_IDT_USE_NAPI
					netif_receive_skb(skb);
#else
					netif_rx(skb);
#endif
					
					dev->last_rx = jiffies;
					lp->stats.rx_packets++;
					lp->stats.rx_bytes += pktuncrc_len;
					
					if (IS_RCV_MP(devcs))
						lp->stats.multicast++;
					
					/* 16 bit align */						  
					skb_reserve(skb_new, 2);	
					
					skb_new->dev = dev;
					lp->rx_skb[lp->rx_next_done] = skb_new;
				}
				else {
					ERR("no memory, dropping rx packet.\n");
					lp->stats.rx_errors++;		
					lp->stats.rx_dropped++;					
				}
			}
			
		}			
		else {
			/* This should only happen if we enable accepting broken packets */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
			
			/* add statistics counters */
			if (IS_RCV_CRC_ERR(devcs)) {
				DBG(2, "RX CRC error\n");
				lp->stats.rx_crc_errors++;
			} 
			else if (IS_RCV_LOR_ERR(devcs)) {
				DBG(2, "RX LOR error\n");
				lp->stats.rx_length_errors++;
			}				
			else if (IS_RCV_LE_ERR(devcs)) {
				DBG(2, "RX LE error\n");
				lp->stats.rx_length_errors++;
			}
			else if (IS_RCV_OVR_ERR(devcs)) {
				lp->stats.rx_over_errors++;
			}
			else if (IS_RCV_CV_ERR(devcs)) {
				/* code violation */
				DBG(2, "RX CV error\n");
				lp->stats.rx_frame_errors++;
			}
			else if (IS_RCV_CES_ERR(devcs)) {
				DBG(2, "RX Preamble error\n");
			}
		}
		
		rd->devcs = 0;
		
		/* restore descriptor's curr_addr */
		if(skb_new)
			rd->ca = CPHYSADDR(skb_new->data); 
		else
			rd->ca = CPHYSADDR(skb->data);
		
		rd->control = DMA_COUNT(RC32434_RBSIZE) |DMAD_cod_m |DMAD_iod_m;
		lp->rd_ring[(lp->rx_next_done-1)& RC32434_RDS_MASK].control &=  ~(DMAD_cod_m); 	
		
		lp->rx_next_done = (lp->rx_next_done + 1) & RC32434_RDS_MASK;
		dma_cache_wback((u32)rd, sizeof(*rd));
		rd = &lp->rd_ring[lp->rx_next_done];
		__raw_writel( ~DMAS_d_m, &lp->rx_dma_regs->dmas);
	}	
#ifdef CONFIG_IDT_USE_NAPI
        dev->quota -= received;
        *budget =- received;
        if(rx_work_limit < 0)
                goto not_done;
#endif
	
	dmas = __raw_readl(&lp->rx_dma_regs->dmas);
	
	if(dmas & DMAS_h_m) {
		__raw_writel( ~(DMAS_h_m | DMAS_e_m), &lp->rx_dma_regs->dmas);
#ifdef RC32434_PROC_DEBUG
		lp->dma_halt_cnt++;
#endif
		rd->devcs = 0;
		skb = lp->rx_skb[lp->rx_next_done];
		rd->ca = CPHYSADDR(skb->data);
		dma_cache_wback((u32)rd, sizeof(*rd));
		rc32434_chain_rx(lp,rd);
	}
	
#ifdef CONFIG_IDT_USE_NAPI
	netif_rx_complete(dev);
#endif
	/* Enable D H E bit in Rx DMA */
	__raw_writel(__raw_readl(&lp->rx_dma_regs->dmasm) & ~(DMASM_d_m | DMASM_h_m |DMASM_e_m), &lp->rx_dma_regs->dmasm); 
#ifdef CONFIG_IDT_USE_NAPI
	return 0;
 not_done:
	return 1;
#else
	spin_unlock_irqrestore(&lp->lock, flags);
	return;
#endif

	
}	
コード例 #4
0
ファイル: korina.c プロジェクト: anchowee/linino
/* transmit packet */
static int rc32434_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct rc32434_local		*lp = (struct rc32434_local *)dev->priv;
	unsigned long 			flags;
	u32					length;
	DMAD_t				td;
	
	
	spin_lock_irqsave(&lp->lock, flags);
	
	td = &lp->td_ring[lp->tx_chain_tail];
	
	/* stop queue when full, drop pkts if queue already full */
	if(lp->tx_count >= (RC32434_NUM_TDS - 2)) {
		lp->tx_full = 1;
		
		if(lp->tx_count == (RC32434_NUM_TDS - 2)) {
			netif_stop_queue(dev);
		}
		else {
			lp->stats.tx_dropped++;
			dev_kfree_skb_any(skb);
			spin_unlock_irqrestore(&lp->lock, flags);
			return 1;
		}	   
	}	 
	
	lp->tx_count ++;
	
	lp->tx_skb[lp->tx_chain_tail] = skb;
	
	length = skb->len;
	dma_cache_wback((u32)skb->data, skb->len);
	
	/* Setup the transmit descriptor. */
	dma_cache_inv((u32) td, sizeof(*td));
	td->ca = CPHYSADDR(skb->data);
	
	if(__raw_readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
		if( lp->tx_chain_status == empty ) {
			td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m;                                /*  Update tail      */
			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK;                          /*   Move tail       */
			__raw_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Write to NDPTR    */
			lp->tx_chain_head = lp->tx_chain_tail;                                                  /* Move head to tail */
		}
		else {
			td->control = DMA_COUNT(length) |DMAD_cof_m|DMAD_iof_m;                                 /* Update tail */
			lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].control &=  ~(DMAD_cof_m);          /* Link to prev */
			lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].link =  CPHYSADDR(td);              /* Link to prev */
			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK;                          /* Move tail */
			__raw_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Write to NDPTR */
			lp->tx_chain_head = lp->tx_chain_tail;                                                  /* Move head to tail */
			lp->tx_chain_status = empty;
		}
	}
	else {
		if( lp->tx_chain_status == empty ) {
			td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m;                                /* Update tail */
			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK;                          /* Move tail */
			lp->tx_chain_status = filled;
		}
		else {
			td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m;                                /* Update tail */
			lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].control &=  ~(DMAD_cof_m);          /* Link to prev */
			lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].link =  CPHYSADDR(td);              /* Link to prev */
			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK;                          /* Move tail */
		}
	}
	dma_cache_wback((u32) td, sizeof(*td));
	
	dev->trans_start = jiffies;				
	
	spin_unlock_irqrestore(&lp->lock, flags);
	
	return 0;
}
コード例 #5
0
ファイル: korina.c プロジェクト: anchowee/linino
/*
 * Initialize the RC32434 ethernet controller.
 */
static int rc32434_init(struct net_device *dev)
{
	struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
	int i, j;
	
	/* Disable DMA */       
	rc32434_abort_tx(dev);
	rc32434_abort_rx(dev); 
	
	/* reset ethernet logic */ 
	__raw_writel(0, &lp->eth_regs->ethintfc);
	while((__raw_readl(&lp->eth_regs->ethintfc) & ETHINTFC_rip_m))
		dev->trans_start = jiffies;	
	
	/* Enable Ethernet Interface */ 
	__raw_writel(ETHINTFC_en_m, &lp->eth_regs->ethintfc); 
	
#ifndef CONFIG_IDT_USE_NAPI
	tasklet_disable(lp->rx_tasklet);
#endif
	tasklet_disable(lp->tx_tasklet);
	
	/* Initialize the transmit Descriptors */
	for (i = 0; i < RC32434_NUM_TDS; i++) {
		lp->td_ring[i].control = DMAD_iof_m;
		lp->td_ring[i].devcs = ETHTX_fd_m | ETHTX_ld_m;
		lp->td_ring[i].ca = 0;
		lp->td_ring[i].link = 0;
		if (lp->tx_skb[i] != NULL) {
			dev_kfree_skb_any(lp->tx_skb[i]);
			lp->tx_skb[i] = NULL;
		}
	}
	lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail = 	lp->tx_full = lp->tx_count = 0;
	lp->	tx_chain_status = empty;
	
	/*
	 * Initialize the receive descriptors so that they
	 * become a circular linked list, ie. let the last
	 * descriptor point to the first again.
	 */
	for (i=0; i<RC32434_NUM_RDS; i++) {
		struct sk_buff *skb = lp->rx_skb[i];
		
		if (lp->rx_skb[i] == NULL) {
			skb = dev_alloc_skb(RC32434_RBSIZE + 2);
			if (skb == NULL) {
				ERR("No memory in the system\n");
				for (j = 0; j < RC32434_NUM_RDS; j ++)
					if (lp->rx_skb[j] != NULL) 
						dev_kfree_skb_any(lp->rx_skb[j]);
				
				return 1;
			}
			else {
				skb->dev = dev;
				skb_reserve(skb, 2);
				lp->rx_skb[i] = skb;
				lp->rd_ring[i].ca = CPHYSADDR(skb->data); 
				
			}
		}
		lp->rd_ring[i].control =	DMAD_iod_m | DMA_COUNT(RC32434_RBSIZE);
		lp->rd_ring[i].devcs = 0;
		lp->rd_ring[i].ca = CPHYSADDR(skb->data);
		lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
		
	}
	/* loop back */
	lp->rd_ring[RC32434_NUM_RDS-1].link = CPHYSADDR(&lp->rd_ring[0]);
	lp->rx_next_done   = 0;
	
	lp->rd_ring[RC32434_NUM_RDS-1].control |= DMAD_cod_m;
	lp->rx_chain_head = 0;
	lp->rx_chain_tail = 0;
	lp->rx_chain_status = empty;
	
	__raw_writel(0, &lp->rx_dma_regs->dmas);
	/* Start Rx DMA */
	rc32434_start_rx(lp, &lp->rd_ring[0]);
	
	/* Enable F E bit in Tx DMA */
	__raw_writel(__raw_readl(&lp->tx_dma_regs->dmasm) & ~(DMASM_f_m | DMASM_e_m), &lp->tx_dma_regs->dmasm); 
	/* Enable D H E bit in Rx DMA */
	__raw_writel(__raw_readl(&lp->rx_dma_regs->dmasm) & ~(DMASM_d_m | DMASM_h_m | DMASM_e_m), &lp->rx_dma_regs->dmasm); 
	
	/* Accept only packets destined for this Ethernet device address */
	__raw_writel(ETHARC_ab_m, &lp->eth_regs->etharc); 
	
	/* Set all Ether station address registers to their initial values */ 
	__raw_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0); 
	__raw_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);
	
	__raw_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1); 
	__raw_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);
	
	__raw_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2); 
	__raw_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);
	
	__raw_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3); 
	__raw_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3); 
	
	
	/* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */ 
	__raw_writel(ETHMAC2_pe_m | ETHMAC2_cen_m | ETHMAC2_fd_m, &lp->eth_regs->ethmac2);  
	//ETHMAC2_flc_m		ETHMAC2_fd_m	lp->duplex_mode
	
	/* Back to back inter-packet-gap */ 
	__raw_writel(0x15, &lp->eth_regs->ethipgt); 
	/* Non - Back to back inter-packet-gap */ 
	__raw_writel(0x12, &lp->eth_regs->ethipgr); 
	
	/* Management Clock Prescaler Divisor */
	/* Clock independent setting */
	__raw_writel(((idt_cpu_freq)/MII_CLOCK+1) & ~1,
		       &lp->eth_regs->ethmcp);
	
	/* don't transmit until fifo contains 48b */
	__raw_writel(48, &lp->eth_regs->ethfifott);
	
	__raw_writel(ETHMAC1_re_m, &lp->eth_regs->ethmac1);
	
#ifndef CONFIG_IDT_USE_NAPI
	tasklet_enable(lp->rx_tasklet);
#endif
	tasklet_enable(lp->tx_tasklet);
	
	netif_start_queue(dev);
	
	return 0; 
}
コード例 #6
0
ファイル: korina.c プロジェクト: 325116067/semc-qsd8x50
static int korina_rx(struct net_device *dev, int limit)
{
	struct korina_private *lp = netdev_priv(dev);
	struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
	struct sk_buff *skb, *skb_new;
	u8 *pkt_buf;
	u32 devcs, pkt_len, dmas;
	int count;

	dma_cache_inv((u32)rd, sizeof(*rd));

	for (count = 0; count < limit; count++) {
		skb = lp->rx_skb[lp->rx_next_done];
		skb_new = NULL;

		devcs = rd->devcs;

		if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
			break;

		/* Update statistics counters */
		if (devcs & ETH_RX_CRC)
			dev->stats.rx_crc_errors++;
		if (devcs & ETH_RX_LOR)
			dev->stats.rx_length_errors++;
		if (devcs & ETH_RX_LE)
			dev->stats.rx_length_errors++;
		if (devcs & ETH_RX_OVR)
			dev->stats.rx_over_errors++;
		if (devcs & ETH_RX_CV)
			dev->stats.rx_frame_errors++;
		if (devcs & ETH_RX_CES)
			dev->stats.rx_length_errors++;
		if (devcs & ETH_RX_MP)
			dev->stats.multicast++;

		if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
			/* check that this is a whole packet
			 * WARNING: DMA_FD bit incorrectly set
			 * in Rc32434 (errata ref #077) */
			dev->stats.rx_errors++;
			dev->stats.rx_dropped++;
		} else if ((devcs & ETH_RX_ROK)) {
			pkt_len = RCVPKT_LENGTH(devcs);

			/* must be the (first and) last
			 * descriptor then */
			pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;

			/* invalidate the cache */
			dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);

			/* Malloc up new buffer. */
			skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);

			if (!skb_new)
				break;
			/* Do not count the CRC */
			skb_put(skb, pkt_len - 4);
			skb->protocol = eth_type_trans(skb, dev);

			/* Pass the packet to upper layers */
			netif_receive_skb(skb);
			dev->stats.rx_packets++;
			dev->stats.rx_bytes += pkt_len;

			/* Update the mcast stats */
			if (devcs & ETH_RX_MP)
				dev->stats.multicast++;

			/* 16 bit align */
			skb_reserve(skb_new, 2);

			lp->rx_skb[lp->rx_next_done] = skb_new;
		}

		rd->devcs = 0;

		/* Restore descriptor's curr_addr */
		if (skb_new)
			rd->ca = CPHYSADDR(skb_new->data);
		else
			rd->ca = CPHYSADDR(skb->data);

		rd->control = DMA_COUNT(KORINA_RBSIZE) |
			DMA_DESC_COD | DMA_DESC_IOD;
		lp->rd_ring[(lp->rx_next_done - 1) &
			KORINA_RDS_MASK].control &=
			~DMA_DESC_COD;

		lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
		dma_cache_wback((u32)rd, sizeof(*rd));
		rd = &lp->rd_ring[lp->rx_next_done];
		writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
	}

	dmas = readl(&lp->rx_dma_regs->dmas);

	if (dmas & DMA_STAT_HALT) {
		writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
				&lp->rx_dma_regs->dmas);

		lp->dma_halt_cnt++;
		rd->devcs = 0;
		skb = lp->rx_skb[lp->rx_next_done];
		rd->ca = CPHYSADDR(skb->data);
		dma_cache_wback((u32)rd, sizeof(*rd));
		korina_chain_rx(lp, rd);
	}

	return count;
}
コード例 #7
0
ファイル: korina.c プロジェクト: 325116067/semc-qsd8x50
/* transmit packet */
static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);
	unsigned long flags;
	u32 length;
	u32 chain_prev, chain_next;
	struct dma_desc *td;

	spin_lock_irqsave(&lp->lock, flags);

	td = &lp->td_ring[lp->tx_chain_tail];

	/* stop queue when full, drop pkts if queue already full */
	if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
		lp->tx_full = 1;

		if (lp->tx_count == (KORINA_NUM_TDS - 2))
			netif_stop_queue(dev);
		else {
			dev->stats.tx_dropped++;
			dev_kfree_skb_any(skb);
			spin_unlock_irqrestore(&lp->lock, flags);

			return NETDEV_TX_BUSY;
		}
	}

	lp->tx_count++;

	lp->tx_skb[lp->tx_chain_tail] = skb;

	length = skb->len;
	dma_cache_wback((u32)skb->data, skb->len);

	/* Setup the transmit descriptor. */
	dma_cache_inv((u32) td, sizeof(*td));
	td->ca = CPHYSADDR(skb->data);
	chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
	chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;

	if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
		if (lp->tx_chain_status == desc_empty) {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			/* Write to NDPTR */
			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
					&lp->tx_dma_regs->dmandptr);
			/* Move head to tail */
			lp->tx_chain_head = lp->tx_chain_tail;
		} else {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Link to prev */
			lp->td_ring[chain_prev].control &=
					~DMA_DESC_COF;
			/* Link to prev */
			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			/* Write to NDPTR */
			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
					&(lp->tx_dma_regs->dmandptr));
			/* Move head to tail */
			lp->tx_chain_head = lp->tx_chain_tail;
			lp->tx_chain_status = desc_empty;
		}
	} else {
		if (lp->tx_chain_status == desc_empty) {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			lp->tx_chain_status = desc_filled;
		} else {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			lp->td_ring[chain_prev].control &=
					~DMA_DESC_COF;
			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
			lp->tx_chain_tail = chain_next;
		}
	}
	dma_cache_wback((u32) td, sizeof(*td));

	dev->trans_start = jiffies;
	spin_unlock_irqrestore(&lp->lock, flags);

	return NETDEV_TX_OK;
}
コード例 #8
0
/*
 * We have a good packet(s), get it/them out of the buffers.
 *
 * cgg - this driver works by creating (once) a circular list of receiver
 *       DMA descriptors that will be used serially by the Banyan.
 *       Because the descriptors are never unlinked from the list _they
 *       are always live_.  We are counting on Linux (and the chosen number
 *	 of buffers) to keep ahead of the hardware otherwise the same
 *	 descriptor might be used for more than one reception.
 */
static void
acacia_rx(struct net_device *dev)
{
    struct acacia_local* lp = (struct acacia_local *)dev->priv;
    volatile DMAD_t  rd = &lp->rd_ring[lp->rx_next_out];
    struct sk_buff *skb;
    u8* pkt_buf;
    u32 devcs;
    u32 count, pkt_len;

    /* cgg - keep going while we have received into more descriptors */

    while (IS_DMA_USED(rd->control)) {

        devcs = rd->devcs;

        pkt_len = RCVPKT_LENGTH(devcs);

        pkt_buf = &lp->rba[lp->rx_next_out * ACACIA_RBSIZE];

        /*
         * cgg - RESET the address pointer later - if we get a second
         * reception it will occur in the remains of the current
         * area of memory - protected by the diminished DMA count.
         */

        /*
         * Due to a bug in banyan processor, the packet length
         * given by devcs field and count field sometimes differ.
         * If that is the case, report Error.
         */
        count = ACACIA_RBSIZE - (u32)DMA_COUNT(rd->control);
        if( count != pkt_len) {
            lp->stats.rx_errors++;
        } else if (count < 64) {
            lp->stats.rx_errors++;
        } else if ((devcs & (/*ETHERDMA_IN_FD |*/ ETHRX_ld_m)) !=
                   (/*ETHERDMA_IN_FD |*/ ETHRX_ld_m)) {
            /* cgg - check that this is a whole packet */
            /* WARNING: DMA_FD bit incorrectly set in Acacia
               (errata ref #077) */
            lp->stats.rx_errors++;
            lp->stats.rx_over_errors++;
        } else if (devcs & ETHRX_rok_m) {
            /* must be the (first and) last descriptor then */

            /* Malloc up new buffer. */
            skb = dev_alloc_skb(pkt_len+2);
            if (skb == NULL) {
                err("no memory, dropping rx packet.\n");
                lp->stats.rx_dropped++;
            } else {
                /* else added by cgg - used to fall through! */
                /* invalidate the cache before copying
                   the buffer */
                dma_cache_inv((unsigned long)pkt_buf, pkt_len);

                skb->dev = dev;
                skb_reserve(skb, 2);	/* 16 bit align */
                skb_put(skb, pkt_len);	/* Make room */
                eth_copy_and_sum(skb, pkt_buf, pkt_len, 0);
                skb->protocol = eth_type_trans(skb, dev);
                /* pass the packet to upper layers */
                netif_rx(skb);
                dev->last_rx = jiffies;
                lp->stats.rx_packets++;
                lp->stats.rx_bytes += pkt_len;

                if (IS_RCV_MP(devcs))
                    lp->stats.multicast++;
            }

        } else {
            /* This should only happen if we enable
               accepting broken packets */
            lp->stats.rx_errors++;

            /* cgg - (re-)added statistics counters */
            if (IS_RCV_CRC_ERR(devcs)) {
                dbg(2, "RX CRC error\n");
                lp->stats.rx_crc_errors++;
            } else {
                if (IS_RCV_LOR_ERR(devcs)) {
                    dbg(2, "RX LOR error\n");
                    lp->stats.rx_length_errors++;
                }

                if (IS_RCV_LE_ERR(devcs)) {
                    dbg(2, "RX LE error\n");
                    lp->stats.rx_length_errors++;
                }
            }

            if (IS_RCV_OVR_ERR(devcs)) {
                /*
                 * The overflow errors are handled through
                 * an interrupt handler.
                 */
                lp->stats.rx_over_errors++;
            }
            /* code violation */
            if (IS_RCV_CV_ERR(devcs)) {
                dbg(2, "RX CV error\n");
                lp->stats.rx_frame_errors++;
            }

            if (IS_RCV_CES_ERR(devcs)) {
                dbg(2, "RX Preamble error\n");
            }
        }


        /* reset descriptor's curr_addr */
        rd->ca = virt_to_phys(pkt_buf);

        /*
         * cgg - clear the bits that let us see whether this
         * descriptor has been used or not & reset reception
         * length.
         */
        rd->control = DMAD_iod_m | DMA_COUNT(ACACIA_RBSIZE);
        rd->devcs = 0;
        lp->rx_next_out = (lp->rx_next_out + 1) & ACACIA_RDS_MASK;
        rd = &lp->rd_ring[lp->rx_next_out];

        /*
         * we'll deal with all possible interrupts up to the last
         * used descriptor - so cancel any interrupts that may have
         * arrisen while we've been processing.
         */
        writel(0, &lp->rx_dma_regs->dmas);
    }

    /*
     * If any worth-while packets have been received, dev_rint()
     * has done a mark_bh(NET_BH) for us and will work on them
     * when we get to the bottom-half routine.
     */
}
コード例 #9
0
/* transmit packet */
static int acacia_send_packet(struct sk_buff *skb, struct net_device *dev)
{
    struct acacia_local *lp = (struct acacia_local *)dev->priv;
    volatile DMAD_t td;
    struct DMAD_s local_td;
    unsigned long flags;
    int tx_next_in, empty_index;
    u32 laddr, length;

    spin_lock_irqsave(&lp->lock, flags);

    if (lp->tx_count >= ACACIA_NUM_TDS) {
        err("Tx ring full, packet dropped\n");
        lp->tx_full = 1;
        lp->stats.tx_dropped++;
        spin_unlock_irqrestore(&lp->lock, flags);
        return 1;
    }

    tx_next_in = lp->tx_next_in;
    td = &lp->td_ring[tx_next_in];
    empty_index = (tx_next_in + 1) & ACACIA_TDS_MASK;

    if (!IS_DMA_USED(td->control)) {
        err("%s: device owns descriptor, i/f reset\n", __func__);
        lp->stats.tx_errors++;
        lp->stats.tx_dropped++;
        acacia_restart(dev);     /* Restart interface */
        spin_unlock_irqrestore(&lp->lock, flags);
        return 1;
    }

    laddr = virt_to_phys(skb->data);
    /* make sure payload gets written to memory */

    dma_cache_inv((unsigned long)skb->data, skb->len);

    if (lp->tx_skb[tx_next_in] != NULL)
        dev_kfree_skb_any(lp->tx_skb[tx_next_in]);
    lp->tx_skb[tx_next_in] = skb;

    length = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;

    /*
     * Setup the transmit descriptor.
     */
    local_td.devcs = ETHTX_fd_m | ETHTX_ld_m;
    local_td.ca = laddr;
    local_td.control = DMAD_iod_m | DMAD_iof_m | DMA_COUNT(length);
    local_td.link = kseg1_to_phys(&lp->td_ring[empty_index]);

    if (!(readl(&lp->tx_dma_regs->dmac) & DMAC_run_m)) {
        /*
         * DMA is halted, just update the td and go. Note that
         * the dptr will *always* be stopped at this td, so
         * there won't be a linked list left (this has been
         * verified too).
         */
        *td = local_td;
        acacia_start_tx(lp, td);
#ifdef ACACIA_PROC_DEBUG
        lp->dma_halt_cnt++;
        lp->halt_tx_count += lp->tx_count;
#endif
    } else if (readl(&lp->tx_dma_regs->dmadptr) != kseg1_to_phys(td)) {
        /*
         * DMA is running but not on this td presently. There
         * is a race condition right here. The DMA may
         * have moved to this td just after the above 'if'
         * statement, and reads the td from memory just before
         * we update it on the next line. So check if DMA
         * has since moved to this td while we updated it.
         */
        *td = local_td;
        if (readl(&lp->tx_dma_regs->dmadptr) == kseg1_to_phys(td)) {
            dbg(2, "DMA race detected\n");
            acacia_halt_tx(dev);
            *td = local_td;
            acacia_start_tx(lp, td);
#ifdef ACACIA_PROC_DEBUG
            lp->dma_race_cnt++;
            lp->race_tx_count += lp->tx_count;
        } else {
            lp->dma_run_cnt++;
            lp->run_tx_count += lp->tx_count;
#endif
        }
    } else {
        /*
         * DMA is running (or was running) and is presently
         * processing this td, so stop the DMA from what
         * it's doing, update the td and start again.
         */
        acacia_halt_tx(dev);
        *td = local_td;
        acacia_start_tx(lp, td);
#ifdef ACACIA_PROC_DEBUG
        lp->dma_collide_cnt++;
        lp->collide_tx_count += lp->tx_count;
#endif
    }

    dev->trans_start = jiffies;

    /* increment nextIn index */
    lp->tx_next_in = empty_index;
    // increment count and stop queue if full
    if (++lp->tx_count == ACACIA_NUM_TDS) {
        lp->tx_full = 1;
        netif_stop_queue(dev);
        err("Tx Ring now full, queue stopped.\n");
    }

    lp->stats.tx_bytes += length;

    spin_unlock_irqrestore(&lp->lock, flags);

    return 0;
}
コード例 #10
0
/*
 * Initialize the BANYAN ethernet controller.
 */
static int acacia_init(struct net_device *dev)
{
    struct acacia_local *lp = (struct acacia_local *)dev->priv;
    int i;

    /* Disable DMA */
    acacia_halt_tx(dev);
    acacia_halt_rx(dev);

    /* reset ethernet logic */
    writel(0, &lp->eth_regs->ethintfc);

    i = readl(&lp->eth_regs->ethintfc);
    for(i = 0xfffff; i>0 ; i--) {
        if (!(readl(&lp->eth_regs->ethintfc) & ETHINTFC_rip_m))
            break;
    }
    /* Enable Ethernet Interface */
    writel(ETHINTFC_en_m, &lp->eth_regs->ethintfc);
    /* Fifo Tx Threshold level */

    /* Accept only packets destined for this Ethernet device address */
    /* cgg - and broadcasts */
    writel(ETHARC_ab_m, &lp->eth_regs->etharc);

    /* Set all Ether station address registers to their initial values */
    writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
    writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);

    writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
    writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);

    writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
    writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);

    writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
    writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);

    /* Input Ready threshold = 16, output Ready threshold = 16 */
#if 0
    writel((0x10 << 16) + 16, &lp->eth_regs->ethfifost);
#endif
    /* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
    writel(ETHMAC2_flc_m | ETHMAC2_pe_m | ETHMAC2_cen_m | ETHMAC2_fd_m,
           &lp->eth_regs->ethmac2);

    /* Back to back inter-packet-gap */
    writel(0x15, &lp->eth_regs->ethipgt);
    /* Non - Back to back inter-packet-gap */
    writel(0x12, &lp->eth_regs->ethipgr);

    /* Management Clock Prescaler Divisor */
    /* cgg - changed from clock independent setting:
       writel(40, &lp->eth_regs->ethmcp); */

    writel(((IDT_BUS_FREQ * 1000 * 1000)/MII_CLOCK+1) & ~1,
           &lp->eth_regs->ethmcp);

    /* Clear Stat. Registers by reading them */
#if 0
    tmp = readl(&lp->eth_regs->ethrbc);
    tmp = readl(&lp->eth_regs->ethrpc);
    tmp = readl(&lp->eth_regs->ethrupc);
    tmp = readl(&lp->eth_regs->ethrfc);
    tmp = readl(&lp->eth_regs->ethtbc);
#endif

    /* don't transmit until fifo contains 48b */
    writel(48, &lp->eth_regs->ethfifott);

    writel(ETHMAC1_re_m, &lp->eth_regs->ethmac1);

    /* Initialize the transmit Descriptors */
    for (i = 0; i < ACACIA_NUM_TDS; i++) {
        lp->td_ring[i].control = DMAD_f_m;
        lp->td_ring[i].devcs = 0;
        lp->td_ring[i].ca = 0;
        lp->td_ring[i].link = 0;
        if (lp->tx_skb[i] != NULL) {
            /* free dangling skb */
            dev_kfree_skb_any(lp->tx_skb[i]);
            lp->tx_skb[i] = NULL;
        }
    }

    lp->tx_next_in = lp->tx_next_out = lp->tx_count = 0;

    /*
     * Initialize the receive descriptors so that they
     * become a circular linked list, ie. let the last
     * descriptor point to the first again.
     */
    for (i=0; i<ACACIA_NUM_RDS; i++) {
        lp->rd_ring[i].control =
            DMAD_iod_m | DMA_COUNT(ACACIA_RBSIZE);
        lp->rd_ring[i].devcs = 0;
        lp->rd_ring[i].ca =
            virt_to_phys(&lp->rba[i * ACACIA_RBSIZE]);
        lp->rd_ring[i].link = kseg1_to_phys(&lp->rd_ring[i+1]);
    }
    /* loop back */
    lp->rd_ring[ACACIA_NUM_RDS-1].link = kseg1_to_phys(&lp->rd_ring[0]);

    lp->rx_next_out = 0;
    writel(0, &lp->rx_dma_regs->dmas);

    /* Start Rx DMA */
    acacia_start_rx(lp, &lp->rd_ring[0]);

    writel(readl(&lp->tx_dma_regs->dmasm) & ~(DMAS_f_m | DMAS_e_m),
           &lp->tx_dma_regs->dmasm);

    writel(readl(&lp->rx_dma_regs->dmasm) & ~(DMAS_d_m | DMAS_h_m | DMAS_e_m),
           &lp->rx_dma_regs->dmasm);

    netif_start_queue(dev);

    return 0;
}
コード例 #11
0
ファイル: korina.c プロジェクト: aircross/ray
static int rc32434_rx(struct net_device *dev, int limit)
{
        struct rc32434_local *lp = netdev_priv(dev);
	volatile DMAD_t  rd = &lp->rd_ring[lp->rx_next_done];
        struct sk_buff *skb, *skb_new;
        u8 *pkt_buf;
        u32 devcs, pkt_len, dmas, rx_free_desc;
	u32 pktuncrc_len;
        int count;

	dma_cache_inv((u32)rd, sizeof(*rd));
	for (count = 0; count < limit; count++) {
		/* init the var. used for the later operations within the while loop */
		skb_new = NULL;
		devcs = rd->devcs;
		pkt_len = RCVPKT_LENGTH(devcs);
		skb = lp->rx_skb[lp->rx_next_done];
      
		if ((devcs & ( ETHRX_ld_m)) !=	ETHRX_ld_m) {
			/* check that this is a whole packet */
			/* WARNING: DMA_FD bit incorrectly set in Rc32434 (errata ref #077) */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
		}
		else if ( (devcs & ETHRX_rok_m)  ) {
			
				/* must be the (first and) last descriptor then */
				pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data;
				
				pktuncrc_len = pkt_len - 4;
				/* invalidate the cache */
				dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len);
				
				/* Malloc up new buffer. */					  
				skb_new = netdev_alloc_skb(dev, RC32434_RBSIZE + 2);					             	
				
				if (skb_new != NULL){
					/* Make room */
					skb_put(skb, pktuncrc_len);		    
					
					skb->protocol = eth_type_trans(skb, dev);
					
					/* pass the packet to upper layers */
					netif_receive_skb(skb);
					
					dev->last_rx = jiffies;
					lp->stats.rx_packets++;
					lp->stats.rx_bytes += pktuncrc_len;
					
					if (IS_RCV_MP(devcs))
						lp->stats.multicast++;
					
					/* 16 bit align */						  
					skb_reserve(skb_new, 2);	
					
					skb_new->dev = dev;
					lp->rx_skb[lp->rx_next_done] = skb_new;
				}
				else {
					ERR("no memory, dropping rx packet.\n");
					lp->stats.rx_errors++;		
					lp->stats.rx_dropped++;					
				}
		}			
		else {
			/* This should only happen if we enable accepting broken packets */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
			
			/* add statistics counters */
			if (IS_RCV_CRC_ERR(devcs)) {
				DBG(2, "RX CRC error\n");
				lp->stats.rx_crc_errors++;
			} 
			else if (IS_RCV_LOR_ERR(devcs)) {
				DBG(2, "RX LOR error\n");
				lp->stats.rx_length_errors++;
			}				
			else if (IS_RCV_LE_ERR(devcs)) {
				DBG(2, "RX LE error\n");
				lp->stats.rx_length_errors++;
			}
			else if (IS_RCV_OVR_ERR(devcs)) {
				lp->stats.rx_over_errors++;
			}
			else if (IS_RCV_CV_ERR(devcs)) {
				/* code violation */
				DBG(2, "RX CV error\n");
				lp->stats.rx_frame_errors++;
			}
			else if (IS_RCV_CES_ERR(devcs)) {
				DBG(2, "RX Preamble error\n");
			}
		}
		rd->devcs = 0;
		
		/* restore descriptor's curr_addr */
		if(skb_new) {
			rd->ca = CPHYSADDR(skb_new->data);
		}
		else
			rd->ca = CPHYSADDR(skb->data);
		
		rd->control = DMA_COUNT(RC32434_RBSIZE) |DMAD_cod_m |DMAD_iod_m;
		lp->rd_ring[(lp->rx_next_done-1)& RC32434_RDS_MASK].control &=  ~(DMAD_cod_m); 	
		
		lp->rx_next_done = (lp->rx_next_done + 1) & RC32434_RDS_MASK;
		dma_cache_wback((u32)rd, sizeof(*rd));
		rd = &lp->rd_ring[lp->rx_next_done];
		__raw_writel( ~DMAS_d_m, &lp->rx_dma_regs->dmas);
	}	
	
	dmas = __raw_readl(&lp->rx_dma_regs->dmas);
	
	if(dmas & DMAS_h_m) {
		/* Mask off halt and error bits */
		__raw_writel( ~(DMAS_h_m | DMAS_e_m), &lp->rx_dma_regs->dmas);
#ifdef RC32434_PROC_DEBUG
		lp->dma_halt_cnt++;
#endif
		rd->devcs = 0;
		skb = lp->rx_skb[lp->rx_next_done];
		rd->ca = CPHYSADDR(skb->data);
		dma_cache_wback((u32)rd, sizeof(*rd));
		rc32434_chain_rx(lp,rd);
	}
	
	return count;
}