Exemplo n.º 1
0
/* Put a destination buffer into the DMA ring.
 * This updates the destination pointer and byte count.  Normally used
 * to place an empty buffer into the ring for fifo to memory transfers.
 */
u32
_au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags)
{
	chan_tab_t		*ctp;
	au1x_ddma_desc_t	*dp;

	/* I guess we could check this to be within the
	 * range of the table......
	 */
	ctp = *((chan_tab_t **)chanid);

	/* We should have multiple callers for a particular channel,
	 * an interrupt doesn't affect this pointer nor the descriptor,
	 * so no locking should be needed.
	 */
	dp = ctp->put_ptr;

	/* If the descriptor is valid, we are way ahead of the DMA
	 * engine, so just return an error condition.
	 */
	if (dp->dscr_cmd0 & DSCR_CMD0_V)
		return 0;

	/* Load up buffer address and byte count */

	/* Check flags  */
	if (flags & DDMA_FLAGS_IE)
		dp->dscr_cmd0 |= DSCR_CMD0_IE;
	if (flags & DDMA_FLAGS_NOIE)
		dp->dscr_cmd0 &= ~DSCR_CMD0_IE;

	dp->dscr_dest0 = virt_to_phys(buf);
	dp->dscr_cmd1 = nbytes;
#if 0
	printk("cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
			dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
			dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1 );
#endif
	/*
	 * There is an errata on the Au1200/Au1550 parts that could result in
	 * "stale" data being DMA'd. It has to do with the snoop logic on the
	 * dache eviction buffer. NONCOHERENT_IO is on by default for these
	 * parts. If it is fixedin the future, these dma_cache_inv will just
	 * be nothing more than empty macros. See io.h.
	 * */
	dma_cache_inv((unsigned long)buf,nbytes);
	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
	au_sync();
	dma_cache_wback_inv((unsigned long)dp, sizeof(dp));
        ctp->chan_ptr->ddma_dbell = 0;

	/* Get next descriptor pointer.
	*/
	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));

	/* return something not zero.
	*/
	return nbytes;
}
Exemplo n.º 2
0
static void
powertecscsi_invalidate(char *addr, long len, fasdmadir_t direction)
{
	if (direction == DMA_OUT)
		dma_cache_wback((unsigned long)addr, (unsigned long)len);
	else
		dma_cache_inv((unsigned long)addr, (unsigned long)len);
}
Exemplo n.º 3
0
void ocelot_copy_from_cache(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
	if (cacheflush) {
		dma_cache_inv(map->map_priv_2, map->size);
		cacheflush = 0;
	}
	memcpy_fromio(to, map->map_priv_1 + from, len);
}
Exemplo n.º 4
0
Arquivo: hwcs.c Projeto: jhbsz/102
inline unsigned short
ath_hwcs_get_csum_from_desc(ath_hwcs_desc_t *d)
{
#if 0
	dma_cache_inv((unsigned long)ath_hwcs_tx_desc, sizeof(ath_hwcs_desc_t));
	return (unsigned short)((ath_hwcs_tx_desc->info.control.pktSize) & 0xffff);
#else
	return (unsigned short)((uncached_cksum_desc->info.control.pktSize) & 0xffff);
#endif
}
Exemplo n.º 5
0
static int pcu_dma_tasklet_read(uint32_t virtual_addr_buffer,  L_OFF_T external_physical_device_address, uint32_t pcu_dma_len)
{
    uint32_t  phys_mem;
    int ret = 0;

    unsigned long flags;

    if (KSEGX(virtual_addr_buffer) == KSEG0) {
        dma_cache_inv(virtual_addr_buffer, pcu_dma_len);
        phys_mem = virt_to_phys((void *)virtual_addr_buffer);
    }
    else {
        dma_cache_inv((unsigned long)pcu_dma_buf, pcu_dma_len);
        phys_mem = virt_to_phys((void *)pcu_dma_buf);
    }

     spin_lock_irqsave(&gPcuDmaIsrData.lock, flags);

     gPcuDmaIsrData.flashAddr = __ll_low(external_physical_device_address);
     gPcuDmaIsrData.dramAddr = phys_mem;
    
    /*
     * Enable L2 Interrupt
     */
    gPcuDmaIsrData.cmd = PCU_DMA_READ;
    gPcuDmaIsrData.opComplete = 0;
    gPcuDmaIsrData.status = 0;

    gPcuDmaIsrData.mask = PCU_DMA_INTR2_STATUS_NAND_CHNL_EOB_STAT_MASK; 
    gPcuDmaIsrData.expect = PCU_DMA_INTR2_STATUS_NAND_CHNL_EOB_STAT_MASK;
    gPcuDmaIsrData.error = 0;   /* there is no DMA error reported check NAND controller status while processing */
    gPcuDmaIsrData.intr =  PCU_DMA_INTR2_STATUS_NAND_CHNL_EOB_STAT_MASK; /* write back 1 to clear */ 

    spin_unlock_irqrestore(&gPcuDmaIsrData.lock, flags);

    PCU_DMA_CLRI(); 
    ISR_enable_irq();
    
    //issue command and return:  Interrupts will be handled from the tasklet
    pcu_dma_issue_command(phys_mem, external_physical_device_address, PCU_DMA_READ, pcu_dma_len);
        
    return ret;
} 
int rtl_cipher_crypt(struct crypto_cipher *cipher, u8 bEncrypt,
	struct rtl_cipher_ctx *ctx, u8 *src, unsigned int nbytes, u8 *iv, u8 *dst)
{
	unsigned int bsize = crypto_cipher_blocksize(cipher);
	u8 *key = bEncrypt ? ctx->key : ctx->mode & 0x20 ? ctx->aes_dekey : ctx->key;
	rtl_ipsecScatter_t scatter[1];
	u32 flag_encrypt = bEncrypt ? 4 : 0;
	int err;

#ifdef CONFIG_RTK_VOIP_DBG
	printk("%s: src=%p, len=%d, blk=%d, key=%p, iv=%p, dst=%p\n", __FUNCTION__,
		src, nbytes, bsize, key, iv, dst);

	rtl_crypto_hexdump((void *) src, nbytes);
	rtl_crypto_hexdump((void *) key, ctx->key_length);
	rtl_crypto_hexdump((void *) iv, bsize);
#endif

	dma_cache_wback((u32) src, nbytes);
	dma_cache_wback((u32) key, ctx->key_length);
	dma_cache_wback((u32) iv, bsize);

	scatter[0].len = (nbytes / bsize) * bsize;
	scatter[0].ptr = (void *) CKSEG1ADDR(src);

	/*
		int32 rtl_ipsecEngine(uint32 modeCrypto, uint32 modeAuth, 
			uint32 cntScatter, rtl_ipsecScatter_t *scatter, void *pCryptResult,
			uint32 lenCryptoKey, void* pCryptoKey, 
			uint32 lenAuthKey, void* pAuthKey, 
			void* pIv, void* pPad, void* pDigest,
			uint32 a2eo, uint32 enl)
	*/
	err = rtl_ipsecEngine(ctx->mode | flag_encrypt,
		-1, 1, scatter,
		(void *) CKSEG1ADDR(dst),
		ctx->key_length, (void *) CKSEG1ADDR(key),
		0, NULL,
		(void *) CKSEG1ADDR(iv), NULL, NULL,
		0, scatter[0].len);

	if (unlikely(err))
		printk("%s: rtl_ipsecEngine failed\n", __FUNCTION__);

	dma_cache_inv((u32) dst, nbytes);
#ifdef CONFIG_RTK_VOIP_DBG
	printk("result:\n");
	rtl_crypto_hexdump(dst, nbytes);
#endif

	// return handled bytes, even err! (for blkcipher_walk)
	return nbytes - scatter[0].len;
}
Exemplo n.º 7
0
static int dma_setup(Scsi_Cmnd *cmd, int datainp)
{
	struct WD33C93_hostdata *hdata = (struct WD33C93_hostdata *)cmd->host->hostdata;
	struct hpc3_scsiregs *hregs = (struct hpc3_scsiregs *) cmd->host->base;
	struct hpc_chunk *hcp = (struct hpc_chunk *) hdata->dma_bounce_buffer;

#ifdef DEBUG_DMA
	printk("dma_setup: datainp<%d> hcp<%p> ",
	       datainp, hcp);
#endif

	hdata->dma_dir = datainp;

	/*
	 * wd33c93 shouldn't pass us bogus dma_setups, but
	 * it does:-( The other wd33c93 drivers deal with
	 * it the same way (which isn't that obvious).
	 * IMHO a better fix would be, not to do these
	 * dma setups in the first place
	 */
	if (cmd->SCp.ptr == NULL)
		return 1;

	fill_hpc_entries (&hcp, cmd->SCp.ptr,cmd->SCp.this_residual);

	/* To make sure, if we trip an HPC bug, that we transfer
	 * every single byte, we tag on an extra zero length dma
	 * descriptor at the end of the chain.
	 */
	hcp->desc.pbuf = 0;
	hcp->desc.cntinfo = (HPCDMA_EOX);

#ifdef DEBUG_DMA
	printk(" HPCGO\n");
#endif

	/* Start up the HPC. */
	hregs->ndptr = PHYSADDR(hdata->dma_bounce_buffer);
	if(datainp) {
		dma_cache_inv((unsigned long) cmd->SCp.ptr, cmd->SCp.this_residual);
		hregs->ctrl = (HPC3_SCTRL_ACTIVE);
	} else {
		dma_cache_wback_inv((unsigned long) cmd->SCp.ptr, cmd->SCp.this_residual);
		hregs->ctrl = (HPC3_SCTRL_ACTIVE | HPC3_SCTRL_DIR);
	}

	return 0;
}
Exemplo n.º 8
0
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
		size_t size, enum dma_data_direction dir)
{
	switch (dir) {
	case DMA_TO_DEVICE:
		break;

	/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
	case DMA_FROM_DEVICE:
	case DMA_BIDIRECTIONAL:
		dma_cache_inv(paddr, size);
		break;

	default:
		break;
	}
}
Exemplo n.º 9
0
/*
 * streaming DMA Mapping API...
 * CPU accesses page via normal paddr, thus needs to explicitly made
 * consistent before each use
 */
static void _dma_cache_sync(phys_addr_t paddr, size_t size,
		enum dma_data_direction dir)
{
	switch (dir) {
	case DMA_FROM_DEVICE:
		dma_cache_inv(paddr, size);
		break;
	case DMA_TO_DEVICE:
		dma_cache_wback(paddr, size);
		break;
	case DMA_BIDIRECTIONAL:
		dma_cache_wback_inv(paddr, size);
		break;
	default:
		pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr);
	}
}
Exemplo n.º 10
0
static int ar2313_allocate_descriptors(struct net_device *dev)
{
    struct ar2313_private *sp = dev->priv;
    int size;
    int j;
    ar2313_descr_t *space;

    if (sp->rx_ring != NULL) {
        printk("%s: already done.\n", __FUNCTION__);
        return 0;
    }

    size =
        (sizeof(ar2313_descr_t) * (AR2313_DESCR_ENTRIES * AR2313_QUEUES));
    space = kmalloc(size, GFP_KERNEL);
    if (space == NULL)
        return 1;

    /* invalidate caches */
    dma_cache_inv((unsigned int) space, size);

    /* now convert pointer to KSEG1 */
    space = (ar2313_descr_t *) KSEG1ADDR(space);

    memset((void *) space, 0, size);

    sp->rx_ring = space;
    space += AR2313_DESCR_ENTRIES;

    sp->tx_ring = space;
    space += AR2313_DESCR_ENTRIES;

    /* Initialize the transmit Descriptors */
    for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
        ar2313_descr_t *td = &sp->tx_ring[j];
        td->status = 0;
        td->devcs = DMA_TX1_CHAINED;
        td->addr = 0;
        td->descr =
            virt_to_phys(&sp->
                         tx_ring[(j + 1) & (AR2313_DESCR_ENTRIES - 1)]);
    }

    return 0;
}
Exemplo n.º 11
0
/*
  DMA memory allocation, derived from pci_alloc_consistent.
*/
static void *dmaalloc(size_t size, dma_addr_t * dma_handle)
{
	void *ret;

	ret =
	    (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA,
				      get_order(size));

	if (ret != NULL) {
		dma_cache_inv((unsigned long) ret, size);
		if (dma_handle != NULL)
			*dma_handle = virt_to_phys(ret);

		/* bump virtual address up to non-cached area */
		ret = (void *) KSEG1ADDR(ret);
	}

	return ret;
}
Exemplo n.º 12
0
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
		size_t size, enum dma_data_direction dir)
{
	switch (dir) {
	case DMA_TO_DEVICE:
		dma_cache_wback(paddr, size);
		break;

	case DMA_FROM_DEVICE:
		dma_cache_inv(paddr, size);
		break;

	case DMA_BIDIRECTIONAL:
		dma_cache_wback_inv(paddr, size);
		break;

	default:
		break;
	}
}
Exemplo n.º 13
0
static inline void __dma_sync(unsigned long addr, size_t size,
	enum dma_data_direction direction)
{
	switch (direction) {
	case DMA_TO_DEVICE:
		dma_cache_wback(addr, size);
		break;

	case DMA_FROM_DEVICE:
		dma_cache_inv(addr, size);
		break;

	case DMA_BIDIRECTIONAL:
		dma_cache_wback_inv(addr, size);
		break;

	default:
		BUG();
	}
}
Exemplo n.º 14
0
static INLINE struct sk_buff* alloc_skb_rx(void)
{
    struct sk_buff *skb;

    /*  allocate memroy including trailer and padding   */
    skb = dev_alloc_skb(rx_max_packet_size + RX_HEAD_MAC_ADDR_ALIGNMENT + DATA_BUFFER_ALIGNMENT);
    if ( skb != NULL ) {
        /*  must be burst length alignment and reserve two more bytes for MAC address alignment  */
        if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
            skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
        /*  pub skb in reserved area "skb->data - 4"    */
        *((struct sk_buff **)skb->data - 1) = skb;
        wmb();
        /*  write back and invalidate cache    */
        dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
        /*  invalidate cache    */
        dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
    }

    return skb;
}
Exemplo n.º 15
0
int
dma_device_read (struct dma_device_info *dma_dev, u8 ** dataptr, void **opt)
{
	u8 *buf;
	int len;
	int byte_offset = 0;
	void *p = NULL;

	_dma_channel_info *pCh = dma_dev->rx_chan[dma_dev->current_rx_chan];

	struct rx_desc *rx_desc_p;

	/*get the rx data first */
	rx_desc_p = (struct rx_desc *) pCh->desc_base + pCh->curr_desc;
	if (!
	    (rx_desc_p->status.field.OWN == CPU_OWN
	     && rx_desc_p->status.field.C)) {
		return 0;
	}
	buf = (u8 *) __va (rx_desc_p->Data_Pointer);
	*(u32 *) dataptr = (u32) buf;
	len = rx_desc_p->status.field.data_length;
#ifndef CONFIG_MIPS_UNCACHED
	/* 20/12/2006 tc.chen , move to buffer_alloc function
	dma_cache_inv ((unsigned long) buf, len);
	*/
#endif //CONFIG_MIPS_UNCACHED
	if (opt) {
		*(int *) opt = (int) pCh->opt[pCh->curr_desc];
	}

	/*replace with a new allocated buffer */
	buf = dma_dev->buffer_alloc (pCh->packet_size, &byte_offset, &p);
	if (buf) {
#ifndef CONFIG_MIPS_UNCACHED
    		/* tc.chen: invalidate cache    */
    		dma_cache_inv ((unsigned long) buf,
               			pCh->packet_size);
#endif

		pCh->opt[pCh->curr_desc] = p;

		wmb ();
		rx_desc_p->Data_Pointer = (u32) CPHYSADDR ((u32) buf);
#if 0
		wmb ();
		if ((rx_desc_p->Data_Pointer & 0x1F) == 0)
			printk ("wild dma: rx_desc_p = %08X, rx_desc_p->Data_Pointer = %08X, buf = %08X\n", (u32) rx_desc_p, (u32) rx_desc_p->Data_Pointer, (u32) CPHYSADDR ((u32) buf));
#endif
		rx_desc_p->status.word = (DMA_OWN << 31)
			| ((byte_offset) << 23)
			| pCh->packet_size;

		wmb ();
	}
	else {
		*(u32 *) dataptr = 0;
		if (opt)
			*(int *) opt = 0;
		len = 0;
	}

	/*increase the curr_desc pointer */
	pCh->curr_desc++;
	if (pCh->curr_desc == pCh->desc_len)
		pCh->curr_desc = 0;
	/*return the length of the received packet */
	return len;
}
Exemplo n.º 16
0
int
dma_device_register (_dma_device_info * dev)
{
	int result = IFX_SUCCESS;
	int i, j;
	int chan_no = 0;
	u8 *buffer;
	int byte_offset;
	int flag;
	_dma_device_info *pDev;
	_dma_channel_info *pCh;
	struct rx_desc *rx_desc_p;
	struct tx_desc *tx_desc_p;
#if 0
	if (strcmp (dev->device_name, "MCTRL0") == 0 || strcmp (dev->device_name, "MCTRL1") == 0) {	/*select the port */
		*DANUBE_DMA_PS = 4;
		/*set port parameters */
		*DANUBE_DMA_PCTRL |= 1 << 16;	/*flush memcopy */
	}
#endif
	for (i = 0; i < dev->max_tx_chan_num; i++) {
		pCh = dev->tx_chan[i];
		if (pCh->control == DANUBE_DMA_CH_ON) {
			chan_no = (int) (pCh - dma_chan);
			for (j = 0; j < pCh->desc_len; j++) {
				tx_desc_p =
					(struct tx_desc *) pCh->desc_base + j;
				memset (tx_desc_p, 0,
					sizeof (struct tx_desc));
			}
			local_irq_save (flag);
			*DANUBE_DMA_CS = chan_no;
#if defined(ENABLE_DANUBE_ETHERNET_D2) && ENABLE_DANUBE_ETHERNET_D2
			/*check if the descriptor base is changed */
			if (*DANUBE_DMA_CDBA !=
			    (u32) CPHYSADDR (pCh->desc_base))
				*DANUBE_DMA_CDBA =
					(u32) CPHYSADDR (pCh->desc_base);
#endif
			/*check if the descriptor length is changed */
			if (*DANUBE_DMA_CDLEN != pCh->desc_len)
				*DANUBE_DMA_CDLEN = pCh->desc_len;

			*DANUBE_DMA_CCTRL &= ~1;
			*DANUBE_DMA_CCTRL |= 2;
			while (*DANUBE_DMA_CCTRL & 2) {
			};
			//disable_danube_irq(pCh->irq);
			//*DANUBE_DMA_CIE=0x0a;
			*DANUBE_DMA_IRNEN |= 1 << chan_no;
			*DANUBE_DMA_CCTRL = 0x30100;	/*reset and enable channel,enable channel later */
			local_irq_restore (flag);
		}
	}

	for (i = 0; i < dev->max_rx_chan_num; i++) {
		pCh = dev->rx_chan[i];
		if (pCh->control == DANUBE_DMA_CH_ON) {
			chan_no = (int) (pCh - dma_chan);

			for (j = 0; j < pCh->desc_len; j++) {
				rx_desc_p =
					(struct rx_desc *) pCh->desc_base + j;
				pDev = (_dma_device_info *) (pCh->dma_dev);
				buffer = pDev->buffer_alloc (pCh->packet_size,
							     &byte_offset,
							     (void *) &(pCh->
									opt
									[j]));
				if (!buffer)
					break;
#ifndef CONFIG_MIPS_UNCACHED
    				/* tc.chen: invalidate cache    */
    				dma_cache_inv ((unsigned long) buffer,
                  			pCh->packet_size);
#endif

				rx_desc_p->Data_Pointer =
					(u32) CPHYSADDR ((u32) buffer);
				rx_desc_p->status.word = 0;
				rx_desc_p->status.field.byte_offset =
					byte_offset;
				rx_desc_p->status.field.OWN = DMA_OWN;
				rx_desc_p->status.field.data_length =
					pCh->packet_size;
			}

			local_irq_save (flag);
			*DANUBE_DMA_CS = chan_no;
#if defined(ENABLE_DANUBE_ETHERNET_D2) && ENABLE_DANUBE_ETHERNET_D2
			/*check if the descriptor base is changed */
			if (*DANUBE_DMA_CDBA !=
			    (u32) CPHYSADDR (pCh->desc_base))
				*DANUBE_DMA_CDBA =
					(u32) CPHYSADDR (pCh->desc_base);
#endif
			/*check if the descriptor length is changed */
			if (*DANUBE_DMA_CDLEN != pCh->desc_len)
				*DANUBE_DMA_CDLEN = pCh->desc_len;
			*DANUBE_DMA_CCTRL &= ~1;
			*DANUBE_DMA_CCTRL |= 2;
			while (*DANUBE_DMA_CCTRL & 2) {
			};
			*DANUBE_DMA_CIE = 0x0A;	/*fix me, should enable all the interrupts here? */
			*DANUBE_DMA_IRNEN |= 1 << chan_no;
			*DANUBE_DMA_CCTRL = 0x30000;
			local_irq_restore (flag);
			enable_danube_irq (dma_chan[chan_no].irq);
		}
	}
	return result;
}
Exemplo n.º 17
0
int
danube_dma_init (void)
{
	int result = 0;
	int i;
	//pliu: 2007021201
#ifdef TWEAK_DMA_BUFFER_RING_SIZE
	int cnt=0;
	void ** p = NULL;
#endif
	result = register_chrdev (DMA_MAJOR, "dma-core", &dma_fops);
	if (result) {
		DANUBE_DMA_EMSG ("cannot register device dma-core!\n");
		return result;
	}
	danube_dma_sem =
		(struct semaphore *) kmalloc (sizeof (struct semaphore),
					      GFP_KERNEL);
	init_MUTEX (danube_dma_sem);
	dma_chip_init ();
	map_dma_chan (default_dma_map);
	//pliu: 2007021201
#ifdef TWEAK_DMA_BUFFER_RING_SIZE
	//customize descriptor length per channels
	for(i=0;i<MAX_DMA_CHANNEL_NUM;i++) {
		dma_chan[i].desc_len= DANUBE_DMA_DESCRIPTOR_OFFSET;
		if (  ( i== 6) || (i == 7)  ) {
			//RX/TX channels for Eth0/Eth1
			dma_chan[i].desc_len= DMA_ETH_NUM_DESCRS;
		}
		cnt += dma_chan[i].desc_len;
		p = (void **) kmalloc(dma_chan[i].desc_len * sizeof(void *), GFP_DMA);
		if (p == NULL){
			DANUBE_DMA_EMSG("no memory for desriptor opt\n");
			goto dma_init_no_memory_err_exit;
		}
		dma_chan[i].opt = p;
	}
	g_desc_list=(u64*)kmalloc(cnt * sizeof(u64), GFP_DMA);
	if (g_desc_list == NULL){
		DANUBE_DMA_EMSG("no memory for desriptor\n");
		goto dma_init_no_memory_err_exit;
	}
	g_desc_list_backup = g_desc_list;
	g_desc_list = (u64*)((u32)g_desc_list | 0xA0000000);
	dma_cache_inv(g_desc_list_backup, cnt * sizeof(u64));
	memset(g_desc_list, 0, cnt * sizeof(u64));
	cnt=0;
	for(i=0;i<MAX_DMA_CHANNEL_NUM;i++)
	{
		dma_chan[i].desc_base=(u32)g_desc_list+cnt*sizeof(u64);
		dma_chan[i].curr_desc=0;
		cnt+=dma_chan[i].desc_len;
		select_chan(i);
		*DANUBE_DMA_CDBA=(u32)CPHYSADDR(dma_chan[i].desc_base);
		*DANUBE_DMA_CDLEN=dma_chan[i].desc_len;
	}
#else
//	g_desc_list = (u64 *) (__get_free_page (GFP_DMA));
    g_desc_list=(u64*)kmalloc(DANUBE_DMA_DESCRIPTOR_OFFSET * MAX_DMA_CHANNEL_NUM * sizeof(u64), GFP_DMA);
	if (g_desc_list == NULL) {
		DANUBE_DMA_EMSG ("no memory for desriptor\n");
		return -ENOMEM;
	}
//	dma_cache_inv(g_desc_list, PAGE_SIZE);
    dma_cache_inv(g_desc_list, DANUBE_DMA_DESCRIPTOR_OFFSET * MAX_DMA_CHANNEL_NUM * sizeof(u64));
    g_desc_list_backup = g_desc_list;
//	g_desc_list = KSEG1ADDR(g_desc_list);
    g_desc_list = (u64*)((u32)g_desc_list | 0xA0000000);
//	memset (g_desc_list, 0, PAGE_SIZE);
    memset(g_desc_list, 0, DANUBE_DMA_DESCRIPTOR_OFFSET * MAX_DMA_CHANNEL_NUM * sizeof(u64));
	for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++) {
		dma_chan[i].desc_base =
			(u32) g_desc_list +
			i * DANUBE_DMA_DESCRIPTOR_OFFSET * 8;
		dma_chan[i].curr_desc = 0;
		dma_chan[i].desc_len = DANUBE_DMA_DESCRIPTOR_OFFSET;
		select_chan (i);
		*DANUBE_DMA_CDBA = (u32) CPHYSADDR (dma_chan[i].desc_base);
		*DANUBE_DMA_CDLEN = dma_chan[i].desc_len;
	}
#endif

	g_danube_dma_dir = proc_mkdir ("danube_dma", NULL);

	create_proc_read_entry ("dma_register",
				0,
				g_danube_dma_dir,
				dma_register_proc_read, NULL);

	create_proc_read_entry ("g_desc_list",
				0,
				g_danube_dma_dir, desc_list_proc_read, NULL);

	create_proc_read_entry ("channel_weight",
				0,
				g_danube_dma_dir,
				channel_weight_proc_read, NULL);
    proc_file_create();
	return 0;
	//pliu: 2007021201
#ifdef TWEAK_DMA_BUFFER_RING_SIZE
dma_init_no_memory_err_exit:
	for (i=0;i<MAX_DMA_CHANNEL_NUM;i++){
		if (dma_chan[i].opt){
			kfree(dma_chan[i].opt);
		}
	}
	return -ENOMEM;
#endif
}
Exemplo n.º 18
0
static void rc32434_rx_tasklet(unsigned long rx_data_dev)
#endif
{
	struct net_device *dev = (struct net_device *)rx_data_dev;	
	struct rc32434_local* lp = netdev_priv(dev);
	volatile DMAD_t  rd = &lp->rd_ring[lp->rx_next_done];
	struct sk_buff *skb, *skb_new;
	u8* pkt_buf;
	u32 devcs, count, pkt_len, pktuncrc_len;
	volatile u32 dmas;
#ifdef CONFIG_IDT_USE_NAPI
	u32 received = 0;
	int rx_work_limit = min(*budget,dev->quota);
#else
	unsigned long 	flags;
	spin_lock_irqsave(&lp->lock, flags);
#endif

	dma_cache_inv((u32)rd, sizeof(*rd));
	while ( (count = RC32434_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
#ifdef CONFIG_IDT_USE_NAPI
		if(--rx_work_limit <0)
                {
                        break;
                }
#endif
		/* init the var. used for the later operations within the while loop */
		skb_new = NULL;
		devcs = rd->devcs;
		pkt_len = RCVPKT_LENGTH(devcs);
		skb = lp->rx_skb[lp->rx_next_done];
      
		if (count < 64) {
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;			
		}
		else if ((devcs & ( ETHRX_ld_m)) !=	ETHRX_ld_m) {
			/* check that this is a whole packet */
			/* WARNING: DMA_FD bit incorrectly set in Rc32434 (errata ref #077) */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
		}
		else if ( (devcs & ETHRX_rok_m)  ) {
			
			{
				/* must be the (first and) last descriptor then */
				pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data;
				
				pktuncrc_len = pkt_len - 4;
				/* invalidate the cache */
				dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len);
				
				/* Malloc up new buffer. */					  
				skb_new = dev_alloc_skb(RC32434_RBSIZE + 2);					             	
				
				if (skb_new != NULL){
					/* Make room */
					skb_put(skb, pktuncrc_len);		    
					
					skb->protocol = eth_type_trans(skb, dev);
					
					/* pass the packet to upper layers */
#ifdef CONFIG_IDT_USE_NAPI
					netif_receive_skb(skb);
#else
					netif_rx(skb);
#endif
					
					dev->last_rx = jiffies;
					lp->stats.rx_packets++;
					lp->stats.rx_bytes += pktuncrc_len;
					
					if (IS_RCV_MP(devcs))
						lp->stats.multicast++;
					
					/* 16 bit align */						  
					skb_reserve(skb_new, 2);	
					
					skb_new->dev = dev;
					lp->rx_skb[lp->rx_next_done] = skb_new;
				}
				else {
					ERR("no memory, dropping rx packet.\n");
					lp->stats.rx_errors++;		
					lp->stats.rx_dropped++;					
				}
			}
			
		}			
		else {
			/* This should only happen if we enable accepting broken packets */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
			
			/* add statistics counters */
			if (IS_RCV_CRC_ERR(devcs)) {
				DBG(2, "RX CRC error\n");
				lp->stats.rx_crc_errors++;
			} 
			else if (IS_RCV_LOR_ERR(devcs)) {
				DBG(2, "RX LOR error\n");
				lp->stats.rx_length_errors++;
			}				
			else if (IS_RCV_LE_ERR(devcs)) {
				DBG(2, "RX LE error\n");
				lp->stats.rx_length_errors++;
			}
			else if (IS_RCV_OVR_ERR(devcs)) {
				lp->stats.rx_over_errors++;
			}
			else if (IS_RCV_CV_ERR(devcs)) {
				/* code violation */
				DBG(2, "RX CV error\n");
				lp->stats.rx_frame_errors++;
			}
			else if (IS_RCV_CES_ERR(devcs)) {
				DBG(2, "RX Preamble error\n");
			}
		}
		
		rd->devcs = 0;
		
		/* restore descriptor's curr_addr */
		if(skb_new)
			rd->ca = CPHYSADDR(skb_new->data); 
		else
			rd->ca = CPHYSADDR(skb->data);
		
		rd->control = DMA_COUNT(RC32434_RBSIZE) |DMAD_cod_m |DMAD_iod_m;
		lp->rd_ring[(lp->rx_next_done-1)& RC32434_RDS_MASK].control &=  ~(DMAD_cod_m); 	
		
		lp->rx_next_done = (lp->rx_next_done + 1) & RC32434_RDS_MASK;
		dma_cache_wback((u32)rd, sizeof(*rd));
		rd = &lp->rd_ring[lp->rx_next_done];
		__raw_writel( ~DMAS_d_m, &lp->rx_dma_regs->dmas);
	}	
#ifdef CONFIG_IDT_USE_NAPI
        dev->quota -= received;
        *budget =- received;
        if(rx_work_limit < 0)
                goto not_done;
#endif
	
	dmas = __raw_readl(&lp->rx_dma_regs->dmas);
	
	if(dmas & DMAS_h_m) {
		__raw_writel( ~(DMAS_h_m | DMAS_e_m), &lp->rx_dma_regs->dmas);
#ifdef RC32434_PROC_DEBUG
		lp->dma_halt_cnt++;
#endif
		rd->devcs = 0;
		skb = lp->rx_skb[lp->rx_next_done];
		rd->ca = CPHYSADDR(skb->data);
		dma_cache_wback((u32)rd, sizeof(*rd));
		rc32434_chain_rx(lp,rd);
	}
	
#ifdef CONFIG_IDT_USE_NAPI
	netif_rx_complete(dev);
#endif
	/* Enable D H E bit in Rx DMA */
	__raw_writel(__raw_readl(&lp->rx_dma_regs->dmasm) & ~(DMASM_d_m | DMASM_h_m |DMASM_e_m), &lp->rx_dma_regs->dmasm); 
#ifdef CONFIG_IDT_USE_NAPI
	return 0;
 not_done:
	return 1;
#else
	spin_unlock_irqrestore(&lp->lock, flags);
	return;
#endif

	
}	
Exemplo n.º 19
0
/* transmit packet */
static int rc32434_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct rc32434_local		*lp = (struct rc32434_local *)dev->priv;
	unsigned long 			flags;
	u32					length;
	DMAD_t				td;
	
	
	spin_lock_irqsave(&lp->lock, flags);
	
	td = &lp->td_ring[lp->tx_chain_tail];
	
	/* stop queue when full, drop pkts if queue already full */
	if(lp->tx_count >= (RC32434_NUM_TDS - 2)) {
		lp->tx_full = 1;
		
		if(lp->tx_count == (RC32434_NUM_TDS - 2)) {
			netif_stop_queue(dev);
		}
		else {
			lp->stats.tx_dropped++;
			dev_kfree_skb_any(skb);
			spin_unlock_irqrestore(&lp->lock, flags);
			return 1;
		}	   
	}	 
	
	lp->tx_count ++;
	
	lp->tx_skb[lp->tx_chain_tail] = skb;
	
	length = skb->len;
	dma_cache_wback((u32)skb->data, skb->len);
	
	/* Setup the transmit descriptor. */
	dma_cache_inv((u32) td, sizeof(*td));
	td->ca = CPHYSADDR(skb->data);
	
	if(__raw_readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
		if( lp->tx_chain_status == empty ) {
			td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m;                                /*  Update tail      */
			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK;                          /*   Move tail       */
			__raw_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Write to NDPTR    */
			lp->tx_chain_head = lp->tx_chain_tail;                                                  /* Move head to tail */
		}
		else {
			td->control = DMA_COUNT(length) |DMAD_cof_m|DMAD_iof_m;                                 /* Update tail */
			lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].control &=  ~(DMAD_cof_m);          /* Link to prev */
			lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].link =  CPHYSADDR(td);              /* Link to prev */
			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK;                          /* Move tail */
			__raw_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Write to NDPTR */
			lp->tx_chain_head = lp->tx_chain_tail;                                                  /* Move head to tail */
			lp->tx_chain_status = empty;
		}
	}
	else {
		if( lp->tx_chain_status == empty ) {
			td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m;                                /* Update tail */
			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK;                          /* Move tail */
			lp->tx_chain_status = filled;
		}
		else {
			td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m;                                /* Update tail */
			lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].control &=  ~(DMAD_cof_m);          /* Link to prev */
			lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].link =  CPHYSADDR(td);              /* Link to prev */
			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK;                          /* Move tail */
		}
	}
	dma_cache_wback((u32) td, sizeof(*td));
	
	dev->trans_start = jiffies;				
	
	spin_unlock_irqrestore(&lp->lock, flags);
	
	return 0;
}
Exemplo n.º 20
0
static int rc32434_probe(struct platform_device *pdev)
{
	struct korina_device *bif = (struct korina_device *) pdev->dev.platform_data;
	struct rc32434_local *lp = NULL;
	struct net_device *dev = NULL;
	struct resource *r;
	int i, retval,err;
	
	dev = alloc_etherdev(sizeof(struct rc32434_local));
	if(!dev) {
		ERR("Korina_eth: alloc_etherdev failed\n");
		return -1;
	}

	platform_set_drvdata(pdev, dev);
	SET_MODULE_OWNER(dev);
	bif->dev = dev;
	
	memcpy(dev->dev_addr, bif->mac, 6);

	/* Initialize the device structure. */
	if (dev->priv == NULL) {
		lp = (struct rc32434_local *)kmalloc(sizeof(*lp), GFP_KERNEL);
		memset(lp, 0, sizeof(struct rc32434_local));
	} 
	else {
		lp = (struct rc32434_local *)dev->priv;
	}
	
	lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
	lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
	lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr");
	lp->und_irq = platform_get_irq_byname(pdev, "korina_und");

	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
	dev->base_addr = r->start;
	lp->eth_regs = ioremap_nocache(r->start, r->end - r->start);
	if (!lp->eth_regs) {
		ERR("Can't remap eth registers\n");
		retval = -ENXIO;
		goto probe_err_out;
	}

	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
	lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
	if (!lp->rx_dma_regs) {
		ERR("Can't remap Rx DMA registers\n");
		retval = -ENXIO;
		goto probe_err_out;
	}

	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
	lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
	if (!lp->tx_dma_regs) {
		ERR("Can't remap Tx DMA registers\n");
		retval = -ENXIO;
		goto probe_err_out;
	}
	
#ifdef RC32434_PROC_DEBUG
	lp->ps = create_proc_read_entry (bif->name, 0, proc_net,
					 rc32434_read_proc, dev);
#endif
	
	lp->td_ring =	(DMAD_t)kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
	if (!lp->td_ring) {
		ERR("Can't allocate descriptors\n");
		retval = -ENOMEM;
		goto probe_err_out;
	}
	
	dma_cache_inv((unsigned long)(lp->td_ring), TD_RING_SIZE + RD_RING_SIZE);
	
	/* now convert TD_RING pointer to KSEG1 */
	lp->td_ring = (DMAD_t )KSEG1ADDR(lp->td_ring);
	lp->rd_ring = &lp->td_ring[RC32434_NUM_TDS];
	
	
	spin_lock_init(&lp->lock);
	
	/* just use the rx dma irq */
	dev->irq = lp->rx_irq;
	
	dev->priv = lp;
	
	dev->open = rc32434_open;
	dev->stop = rc32434_close;
	dev->hard_start_xmit = rc32434_send_packet;
	dev->get_stats	= rc32434_get_stats;
	dev->set_multicast_list = &rc32434_multicast_list;
	dev->tx_timeout = rc32434_tx_timeout;
	dev->watchdog_timeo = RC32434_TX_TIMEOUT;

#ifdef CONFIG_IDT_USE_NAPI
	dev->poll = rc32434_poll;
	dev->weight = 64;
	printk("Using NAPI with weight %d\n",dev->weight);
#else
	lp->rx_tasklet = kmalloc(sizeof(struct tasklet_struct), GFP_KERNEL);
	tasklet_init(lp->rx_tasklet, rc32434_rx_tasklet, (unsigned long)dev);
#endif
	lp->tx_tasklet = kmalloc(sizeof(struct tasklet_struct), GFP_KERNEL);
	tasklet_init(lp->tx_tasklet, rc32434_tx_tasklet, (unsigned long)dev);
	
	if ((err = register_netdev(dev))) {
		printk(KERN_ERR "rc32434 ethernet. Cannot register net device %d\n", err);
		free_netdev(dev);
		retval = -EINVAL;
		goto probe_err_out;
	}
	
	INFO("Rx IRQ %d, Tx IRQ %d, ", lp->rx_irq, lp->tx_irq);
	for (i = 0; i < 6; i++) {
		printk("%2.2x", dev->dev_addr[i]);
		if (i<5)
			printk(":");
	}
	printk("\n");
	
	return 0;
	
 probe_err_out:
	rc32434_cleanup_module();
	ERR(" failed.  Returns %d\n", retval);
	return retval;
	
}
Exemplo n.º 21
0
/* transmit packet */
static int acacia_send_packet(struct sk_buff *skb, struct net_device *dev)
{
    struct acacia_local *lp = (struct acacia_local *)dev->priv;
    volatile DMAD_t td;
    struct DMAD_s local_td;
    unsigned long flags;
    int tx_next_in, empty_index;
    u32 laddr, length;

    spin_lock_irqsave(&lp->lock, flags);

    if (lp->tx_count >= ACACIA_NUM_TDS) {
        err("Tx ring full, packet dropped\n");
        lp->tx_full = 1;
        lp->stats.tx_dropped++;
        spin_unlock_irqrestore(&lp->lock, flags);
        return 1;
    }

    tx_next_in = lp->tx_next_in;
    td = &lp->td_ring[tx_next_in];
    empty_index = (tx_next_in + 1) & ACACIA_TDS_MASK;

    if (!IS_DMA_USED(td->control)) {
        err("%s: device owns descriptor, i/f reset\n", __func__);
        lp->stats.tx_errors++;
        lp->stats.tx_dropped++;
        acacia_restart(dev);     /* Restart interface */
        spin_unlock_irqrestore(&lp->lock, flags);
        return 1;
    }

    laddr = virt_to_phys(skb->data);
    /* make sure payload gets written to memory */

    dma_cache_inv((unsigned long)skb->data, skb->len);

    if (lp->tx_skb[tx_next_in] != NULL)
        dev_kfree_skb_any(lp->tx_skb[tx_next_in]);
    lp->tx_skb[tx_next_in] = skb;

    length = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;

    /*
     * Setup the transmit descriptor.
     */
    local_td.devcs = ETHTX_fd_m | ETHTX_ld_m;
    local_td.ca = laddr;
    local_td.control = DMAD_iod_m | DMAD_iof_m | DMA_COUNT(length);
    local_td.link = kseg1_to_phys(&lp->td_ring[empty_index]);

    if (!(readl(&lp->tx_dma_regs->dmac) & DMAC_run_m)) {
        /*
         * DMA is halted, just update the td and go. Note that
         * the dptr will *always* be stopped at this td, so
         * there won't be a linked list left (this has been
         * verified too).
         */
        *td = local_td;
        acacia_start_tx(lp, td);
#ifdef ACACIA_PROC_DEBUG
        lp->dma_halt_cnt++;
        lp->halt_tx_count += lp->tx_count;
#endif
    } else if (readl(&lp->tx_dma_regs->dmadptr) != kseg1_to_phys(td)) {
        /*
         * DMA is running but not on this td presently. There
         * is a race condition right here. The DMA may
         * have moved to this td just after the above 'if'
         * statement, and reads the td from memory just before
         * we update it on the next line. So check if DMA
         * has since moved to this td while we updated it.
         */
        *td = local_td;
        if (readl(&lp->tx_dma_regs->dmadptr) == kseg1_to_phys(td)) {
            dbg(2, "DMA race detected\n");
            acacia_halt_tx(dev);
            *td = local_td;
            acacia_start_tx(lp, td);
#ifdef ACACIA_PROC_DEBUG
            lp->dma_race_cnt++;
            lp->race_tx_count += lp->tx_count;
        } else {
            lp->dma_run_cnt++;
            lp->run_tx_count += lp->tx_count;
#endif
        }
    } else {
        /*
         * DMA is running (or was running) and is presently
         * processing this td, so stop the DMA from what
         * it's doing, update the td and start again.
         */
        acacia_halt_tx(dev);
        *td = local_td;
        acacia_start_tx(lp, td);
#ifdef ACACIA_PROC_DEBUG
        lp->dma_collide_cnt++;
        lp->collide_tx_count += lp->tx_count;
#endif
    }

    dev->trans_start = jiffies;

    /* increment nextIn index */
    lp->tx_next_in = empty_index;
    // increment count and stop queue if full
    if (++lp->tx_count == ACACIA_NUM_TDS) {
        lp->tx_full = 1;
        netif_stop_queue(dev);
        err("Tx Ring now full, queue stopped.\n");
    }

    lp->stats.tx_bytes += length;

    spin_unlock_irqrestore(&lp->lock, flags);

    return 0;
}
Exemplo n.º 22
0
static int korina_rx(struct net_device *dev, int limit)
{
	struct korina_private *lp = netdev_priv(dev);
	struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
	struct sk_buff *skb, *skb_new;
	u8 *pkt_buf;
	u32 devcs, pkt_len, dmas;
	int count;

	dma_cache_inv((u32)rd, sizeof(*rd));

	for (count = 0; count < limit; count++) {
		skb = lp->rx_skb[lp->rx_next_done];
		skb_new = NULL;

		devcs = rd->devcs;

		if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
			break;

		/* Update statistics counters */
		if (devcs & ETH_RX_CRC)
			dev->stats.rx_crc_errors++;
		if (devcs & ETH_RX_LOR)
			dev->stats.rx_length_errors++;
		if (devcs & ETH_RX_LE)
			dev->stats.rx_length_errors++;
		if (devcs & ETH_RX_OVR)
			dev->stats.rx_over_errors++;
		if (devcs & ETH_RX_CV)
			dev->stats.rx_frame_errors++;
		if (devcs & ETH_RX_CES)
			dev->stats.rx_length_errors++;
		if (devcs & ETH_RX_MP)
			dev->stats.multicast++;

		if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
			/* check that this is a whole packet
			 * WARNING: DMA_FD bit incorrectly set
			 * in Rc32434 (errata ref #077) */
			dev->stats.rx_errors++;
			dev->stats.rx_dropped++;
		} else if ((devcs & ETH_RX_ROK)) {
			pkt_len = RCVPKT_LENGTH(devcs);

			/* must be the (first and) last
			 * descriptor then */
			pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;

			/* invalidate the cache */
			dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);

			/* Malloc up new buffer. */
			skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);

			if (!skb_new)
				break;
			/* Do not count the CRC */
			skb_put(skb, pkt_len - 4);
			skb->protocol = eth_type_trans(skb, dev);

			/* Pass the packet to upper layers */
			netif_receive_skb(skb);
			dev->stats.rx_packets++;
			dev->stats.rx_bytes += pkt_len;

			/* Update the mcast stats */
			if (devcs & ETH_RX_MP)
				dev->stats.multicast++;

			/* 16 bit align */
			skb_reserve(skb_new, 2);

			lp->rx_skb[lp->rx_next_done] = skb_new;
		}

		rd->devcs = 0;

		/* Restore descriptor's curr_addr */
		if (skb_new)
			rd->ca = CPHYSADDR(skb_new->data);
		else
			rd->ca = CPHYSADDR(skb->data);

		rd->control = DMA_COUNT(KORINA_RBSIZE) |
			DMA_DESC_COD | DMA_DESC_IOD;
		lp->rd_ring[(lp->rx_next_done - 1) &
			KORINA_RDS_MASK].control &=
			~DMA_DESC_COD;

		lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
		dma_cache_wback((u32)rd, sizeof(*rd));
		rd = &lp->rd_ring[lp->rx_next_done];
		writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
	}

	dmas = readl(&lp->rx_dma_regs->dmas);

	if (dmas & DMA_STAT_HALT) {
		writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
				&lp->rx_dma_regs->dmas);

		lp->dma_halt_cnt++;
		rd->devcs = 0;
		skb = lp->rx_skb[lp->rx_next_done];
		rd->ca = CPHYSADDR(skb->data);
		dma_cache_wback((u32)rd, sizeof(*rd));
		korina_chain_rx(lp, rd);
	}

	return count;
}
Exemplo n.º 23
0
/* transmit packet */
static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);
	unsigned long flags;
	u32 length;
	u32 chain_prev, chain_next;
	struct dma_desc *td;

	spin_lock_irqsave(&lp->lock, flags);

	td = &lp->td_ring[lp->tx_chain_tail];

	/* stop queue when full, drop pkts if queue already full */
	if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
		lp->tx_full = 1;

		if (lp->tx_count == (KORINA_NUM_TDS - 2))
			netif_stop_queue(dev);
		else {
			dev->stats.tx_dropped++;
			dev_kfree_skb_any(skb);
			spin_unlock_irqrestore(&lp->lock, flags);

			return NETDEV_TX_BUSY;
		}
	}

	lp->tx_count++;

	lp->tx_skb[lp->tx_chain_tail] = skb;

	length = skb->len;
	dma_cache_wback((u32)skb->data, skb->len);

	/* Setup the transmit descriptor. */
	dma_cache_inv((u32) td, sizeof(*td));
	td->ca = CPHYSADDR(skb->data);
	chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
	chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;

	if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
		if (lp->tx_chain_status == desc_empty) {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			/* Write to NDPTR */
			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
					&lp->tx_dma_regs->dmandptr);
			/* Move head to tail */
			lp->tx_chain_head = lp->tx_chain_tail;
		} else {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Link to prev */
			lp->td_ring[chain_prev].control &=
					~DMA_DESC_COF;
			/* Link to prev */
			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			/* Write to NDPTR */
			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
					&(lp->tx_dma_regs->dmandptr));
			/* Move head to tail */
			lp->tx_chain_head = lp->tx_chain_tail;
			lp->tx_chain_status = desc_empty;
		}
	} else {
		if (lp->tx_chain_status == desc_empty) {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			lp->tx_chain_status = desc_filled;
		} else {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			lp->td_ring[chain_prev].control &=
					~DMA_DESC_COF;
			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
			lp->tx_chain_tail = chain_next;
		}
	}
	dma_cache_wback((u32) td, sizeof(*td));

	dev->trans_start = jiffies;
	spin_unlock_irqrestore(&lp->lock, flags);

	return NETDEV_TX_OK;
}
Exemplo n.º 24
0
/*
 * dma ring allocation is done here
 */
static int enet_dma_init(struct tangox_enet_priv *priv)
{
	unsigned int size;
	int i, rx_order, tx_order;
	
	/*
	 * allocate rx descriptor list & rx buffers
	 */
	size = RX_DESC_COUNT * sizeof (struct enet_desc);
	for (rx_order = 0; (PAGE_SIZE << rx_order) < size; rx_order++);

	if (!(priv->rx_descs_cached = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, rx_order)))
		return -ENOMEM;
	dma_cache_wback_inv((unsigned long)priv->rx_descs_cached, size);
	priv->rx_descs = (volatile struct enet_desc *)
		CACHE_TO_NONCACHE((unsigned long)priv->rx_descs_cached);

	/*
	 * initialize all rx descs
	 */
	for (i = 0; i < RX_DESC_COUNT; i++) {
		volatile struct enet_desc *rx;
		struct sk_buff *skb;

		rx = &priv->rx_descs[i];
		rx->config = RX_BUF_SIZE | DESC_BTS(2) | DESC_EOF/* | DESC_ID*/;

		skb = dev_alloc_skb(RX_BUF_SIZE + SKB_RESERVE_SIZE);
		if (!skb)
			return -ENOMEM;
		
		skb_reserve(skb, SKB_RESERVE_SIZE);
		*((volatile unsigned long *)KSEG1ADDR(&(priv->rx_report[i]))) = 0; 
		rx->s_addr = PHYSADDR((void *)skb->data);
		rx->r_addr = PHYSADDR((void *)&priv->rx_report[i]);
		rx->n_addr = PHYSADDR((void *)&priv->rx_descs[i+1]);
		if (i == (RX_DESC_COUNT - 1)) {
			rx->n_addr = PHYSADDR((void *)&priv->rx_descs[0]);
			rx->config |= DESC_EOC ;
			priv->rx_eoc = i;
		}
#ifdef ETH_DEBUG
		DBG("rx[%d]=0x%08x\n", i, (unsigned int)rx);
		DBG("  s_addr=0x%08x\n", (unsigned int)rx->s_addr);
		DBG("  n_addr=0x%08x\n", (unsigned int)rx->n_addr);
		DBG("  r_addr=0x%08x\n", (unsigned int)rx->r_addr);
		DBG("  config=0x%08x\n", (unsigned int)rx->config);
#endif
		dma_cache_inv((unsigned long)skb->data, RX_BUF_SIZE);
		priv->rx_skbs[i] = skb;
	}
	priv->last_rx_desc = 0;

	/*
	 * allocate tx descriptor list
	 *
	 * We allocate  only the descriptor list and  prepare them for
	 * further use. When tx is needed, we will set the right flags
	 * and kick the dma.
	 */
	size = TX_DESC_COUNT * sizeof (struct enet_desc);
	for (tx_order = 0; (PAGE_SIZE << tx_order) < size; tx_order++);

	if (!(priv->tx_descs_cached = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, tx_order))) {
		free_pages((u32)priv->rx_descs_cached, rx_order);
		return -ENOMEM;
	}
	dma_cache_wback_inv((unsigned long)priv->tx_descs_cached, size);
	priv->tx_descs = (volatile struct enet_desc *)
		CACHE_TO_NONCACHE((unsigned long)priv->tx_descs_cached);

	/*
	 * initialize tx descs
	 */
	for (i = 0; i < TX_DESC_COUNT; i++) {
		volatile struct enet_desc *tx;

		priv->tx_bufs[i] = (unsigned char *)__get_free_page(GFP_KERNEL | GFP_DMA);
		dma_cache_wback_inv((unsigned long)priv->tx_bufs[i], PAGE_SIZE);

		tx = &priv->tx_descs[i];
		*((volatile unsigned long *)KSEG1ADDR(&(priv->tx_report[i]))) = 0; 
		tx->r_addr = PHYSADDR((void *)&priv->tx_report[i]);
		tx->s_addr = 0;
		tx->config = DESC_EOF;
		if (i == (TX_DESC_COUNT - 1)) {
			tx->config |= DESC_EOC;
			tx->n_addr = PHYSADDR((void *)&priv->tx_descs[0]);
			priv->tx_eoc = i;
		}
		//DBG("tx[%d]=0x%08x\n", i, (unsigned int)tx);
	}
	priv->dirty_tx_desc = priv->next_tx_desc = 0;
	priv->pending_tx = -1;
	priv->pending_tx_cnt  = 0;
	priv->reclaim_limit  = -1;
	priv->free_tx_desc_count = TX_DESC_COUNT;

	/*
	 * write rx desc list & tx desc list addresses in registers
	 */
	enet_writel(ENET_TX_DESC_ADDR(priv->enet_mac_base), PHYSADDR((void *)&priv->tx_descs[0]));
	enet_writel(ENET_RX_DESC_ADDR(priv->enet_mac_base), PHYSADDR((void *)&priv->rx_descs[0]));
	return 0;
}
Exemplo n.º 25
0
/*
 * rx poll func, called by network core
 */
static int enet_poll(struct net_device *dev, int *budget)
{
	struct tangox_enet_priv *priv;
	volatile struct enet_desc *rx, *rx1;
	int limit, received;
	unsigned int rx_eoc;

	priv = netdev_priv(dev);
	rx_eoc = priv->rx_eoc;

	/* calculate how many rx packet we are allowed to fetch */
	limit = *budget;
	if (*budget > dev->quota)
		limit = dev->quota;
	received = 0;

	/* process no more than "limit" done rx */
	while (limit > 0) {
		struct sk_buff *skb;
		volatile u32 *r_addr;
		u32 report_cache;
		unsigned int len = 0;
		int pkt_dropped = 0;

		rx = &priv->rx_descs[priv->last_rx_desc];

		/* we need multiple read on this volatile, avoid
		 * memory access at each time */
		r_addr = (volatile u32 *)KSEG1ADDR((u32)&(priv->rx_report[priv->last_rx_desc]));
		report_cache = __raw_readl(r_addr);

#ifdef ETH_DEBUG
		if (rx->config & DESC_EOC) {
			/* should not happen */
			printk("%s i=0x%x rx=0x%x report=0x%x config=0x%x limit=0x%x\n", 
				__FUNCTION__, priv->last_rx_desc, rx, report_cache, rx->config, limit);
		}
#endif
		if (report_cache == 0){ 
			uint32_t *next_r_addr;
			uint32_t next_report_cache;
			next_r_addr = (uint32_t *)KSEG1ADDR((u32)&(priv->rx_report[(priv->last_rx_desc+1)%RX_DESC_COUNT]));
			next_report_cache = __raw_readl(next_r_addr);

			/*check see if next one on error*/
			if(!enet_rx_error(next_report_cache))
				break;
		}

		--limit;

		if (likely((skb = priv->rx_skbs[priv->last_rx_desc]) != NULL)) {

			len = RX_BYTES_TRANSFERRED(report_cache);
			if((report_cache ==0) ||enet_rx_error(report_cache)){

#ifndef ENABLE_MULTICAST
				if (report_cache & RX_MULTICAST_PKT){ 
					DBG("%s RX_MULTICAST_PKT report=0x%x\n", __FUNCTION__, report_cache);				
					priv->stats.rx_length_errors++;
				}
#endif
				if (report_cache & RX_FCS_ERR) {
					DBG("%s RX_FCS_ERR report=0x%x\n", __FUNCTION__, report_cache);				
					priv->stats.rx_crc_errors++;
				}

				if (report_cache & RX_LATE_COLLISION){ 
					DBG("%s RX_LATE_COLLSION report=0x%x\n", __FUNCTION__, report_cache);				
				}

				if (report_cache &  RX_FIFO_OVERRUN ){ 
					DBG("%s RX_FIFO_OVERRUN report=0x%x\n", __FUNCTION__, report_cache);				
				}

				if (report_cache & RX_RUNT_PKT) {
					DBG("%s RX_RUNT_PKT report=0x%x\n", __FUNCTION__, report_cache);				
				}

				if (report_cache & (RX_FRAME_LEN_ERROR | RX_LENGTH_ERR) ||
				     len > RX_BUF_SIZE) {
					priv->stats.rx_length_errors++;
				}

				priv->stats.rx_errors++;
				pkt_dropped = 1;
				goto done_checking;

			} else {

				/* ok, seems  valid, adjust skb  proto and len
				 * and give it to kernel */
				skb->dev = dev;
				skb_put(skb, len);
				skb->protocol = eth_type_trans(skb, dev);
				netif_receive_skb(skb);
#ifdef ETH_DEBUG
				if(len > 0){
					int i;
					DBG("-----received data------\n");
					for (i=0; i< len; i++){
						if(i%16==0 && i>0)
							DBG("\n");
						DBG("%02x ", skb->data[i]);					
					}
					DBG("\n--------------------------\n");
				}
#endif
			}
done_checking:
			rx_eoc = priv->last_rx_desc;

			if (pkt_dropped)
				goto rearm;

			priv->stats.rx_packets++;
			priv->stats.rx_bytes += len;
			dev->last_rx = jiffies;
			priv->rx_skbs[priv->last_rx_desc] = NULL;
			/* we will re-alloc an skb for this slot */
		}

		if (unlikely((skb = dev_alloc_skb(RX_BUF_SIZE + SKB_RESERVE_SIZE)) == NULL)) {
			printk("%s: failed to allocation sk_buff.\n", priv->name);
			rx->config = DESC_BTS(2) | DESC_EOF/* | DESC_ID*/;
			mb();
			break;
		}

		rx->config = RX_BUF_SIZE | DESC_BTS(2) | DESC_EOF/* | DESC_ID*/;

		skb_reserve(skb, SKB_RESERVE_SIZE);
		rx->s_addr = PHYSADDR((void*)(skb->data));
		dma_cache_inv((unsigned long)skb->data, RX_BUF_SIZE);
		priv->rx_skbs[priv->last_rx_desc] = skb;

rearm:
		/* rearm descriptor */
		__raw_writel(0, r_addr);
		priv->last_rx_desc++;
		priv->last_rx_desc %= RX_DESC_COUNT;
		received++;

	}

	if (received != 0) {
		rx = &priv->rx_descs[rx_eoc];
		rx->config |= DESC_EOC;
		mb();
		rx1 = &priv->rx_descs[priv->rx_eoc];
		rx1->config &= ~DESC_EOC; 
		mb();
		priv->rx_eoc = rx_eoc;
	
		dev->quota -= received;
		*budget -= received;
	}

	enet_start_rx(priv);

	if (limit <= 0) {
		/* breaked, but there is still work to do */
		return 1;
	}

	netif_rx_complete(dev);
	return 0;
}
Exemplo n.º 26
0
int acacia_probe(int port_num)
{
    struct acacia_local *lp = NULL;
    struct acacia_if_t *bif = NULL;
    struct net_device *dev = NULL;
    int i, retval;
    bif = &acacia_iflist[port_num];
    if (port_num == 0) {

        request_region(bif->iobase, 0x24C, "ACACIA0");
    }
    else if (port_num == 1)
    {
        request_region(bif->iobase, 0x24C, "ACACIA1");
    }
    /* Allocate a new 'dev' if needed */
    dev = init_etherdev(0, sizeof(struct acacia_local));
    bif->dev = dev;

    if (port_num == 0) {
        info("RC32438 ethernet0 found at 0x%08x\n", bif->iobase);
    }
    else if (port_num == 1)
        info("RC32438 ethernet1 found at 0x%08x\n", bif->iobase);

    /* Fill in the 'dev' fields. */
    dev->base_addr = bif->iobase;
    dev->irq = bif->rx_dma_irq; /* just use the rx dma irq */

    if ((retval = parse_mac_addr(dev, bif->mac_str))) {
        err("%s: MAC address parse failed\n", __func__);
        retval = -EINVAL;
        goto probe1_err_out;
    }

    info("HW Address ");
    for (i = 0; i < 6; i++) {
        printk("%2.2x", dev->dev_addr[i]);
        if (i<5)
            printk(":");
    }
    printk("\n");

    info("Rx IRQ %d, Tx IRQ %d\n", bif->rx_dma_irq, bif->tx_dma_irq);

    /* Initialize the device structure. */
    if (dev->priv == NULL) {
        lp = (struct acacia_local *)kmalloc(sizeof(*lp), GFP_KERNEL);
        memset(lp, 0, sizeof(struct acacia_local));
    } else {
        lp = (struct acacia_local *)dev->priv;
    }

    dev->priv = lp;

    lp->rx_irq = bif->rx_dma_irq;
    lp->tx_irq = bif->tx_dma_irq;
    lp->ovr_irq = bif->rx_ovr_irq;

    lp->eth_regs = ioremap_nocache(bif->iobase,
                                   sizeof(*lp->eth_regs));
    if (!lp->eth_regs) {
        err("Can't remap eth registers\n");
        retval = -ENXIO;
        goto probe1_err_out;
    }
    if (port_num == 0) {
        lp->rx_dma_regs =
            ioremap_nocache(DMA0_PhysicalAddress + 2*DMA_CHAN_OFFSET,
                            sizeof(struct DMA_Chan_s));
        if (!lp->rx_dma_regs) {
            err("Can't remap Rx DMA registers\n");
            retval = -ENXIO;
            goto probe1_err_out;
        }

        lp->tx_dma_regs =
            ioremap_nocache(DMA0_PhysicalAddress  + 3*DMA_CHAN_OFFSET,
                            sizeof(struct DMA_Chan_s));
        if (!lp->tx_dma_regs) {
            err("Can't remap Tx DMA registers\n");
            retval = -ENXIO;
            goto probe1_err_out;
        }
    }
    else if (port_num == 1) {
        lp->rx_dma_regs =
            ioremap_nocache(DMA0_PhysicalAddress  + 4*DMA_CHAN_OFFSET,
                            sizeof(struct DMA_Chan_s));
        if (!lp->rx_dma_regs) {
            err("Can't remap Rx DMA registers\n");
            retval = -ENXIO;
            goto probe1_err_out;
        }

        lp->tx_dma_regs =
            ioremap_nocache(DMA0_PhysicalAddress  + 5*DMA_CHAN_OFFSET,
                            sizeof(struct DMA_Chan_s));
        if (!lp->tx_dma_regs) {
            err("Can't remap Tx DMA registers\n");
            retval = -ENXIO;
            goto probe1_err_out;
        }

    }
    lp->td_ring =
        (DMAD_t)kmalloc(TD_RING_SIZE + RD_RING_SIZE,
                        GFP_KERNEL);
    if (!lp->td_ring) {
        err("Can't allocate descriptors\n");
        retval = -ENOMEM;
        goto probe1_err_out;
    }

    dma_cache_inv((unsigned long)(lp->td_ring),
                  TD_RING_SIZE + RD_RING_SIZE);

    /* now convert TD_RING pointer to KSEG1 */
    lp->td_ring = (DMAD_t )KSEG1ADDR(lp->td_ring);
    lp->rd_ring = &lp->td_ring[ACACIA_NUM_TDS];

    /* allocate receive buffer area */
    /* FIXME, maybe we should use skbs */
    if ((lp->rba = (u8*)kmalloc(ACACIA_NUM_RDS * ACACIA_RBSIZE,
                                GFP_KERNEL)) == NULL) {
        err("couldn't allocate receive buffers\n");
        retval = -ENOMEM;
        goto probe1_err_out;
    } 	 	/* get virtual dma address */

    dma_cache_inv((unsigned long)(lp->rba),
                  ACACIA_NUM_RDS * ACACIA_RBSIZE);

    spin_lock_init(&lp->lock);

    dev->open = acacia_open;
    dev->stop = acacia_close;
    dev->hard_start_xmit = acacia_send_packet;
    dev->get_stats	= acacia_get_stats;
    dev->set_multicast_list = &acacia_multicast_list;
    dev->tx_timeout = acacia_tx_timeout;
    dev->watchdog_timeo = ACACIA_TX_TIMEOUT;

#ifdef ACACIA_PROC_DEBUG
    lp->ps = create_proc_read_entry ("net/rc32438", 0, NULL,
                                     acacia_read_proc, dev);
#endif
    /*
     * clear tally counter
     */

    /* Fill in the fields of the device structure with ethernet values. */
    ether_setup(dev);
    return 0;

probe1_err_out:
    acacia_cleanup_module();
    err("%s failed.  Returns %d\n", __func__, retval);
    return retval;
}
Exemplo n.º 27
0
static int rc32434_rx(struct net_device *dev, int limit)
{
        struct rc32434_local *lp = netdev_priv(dev);
	volatile DMAD_t  rd = &lp->rd_ring[lp->rx_next_done];
        struct sk_buff *skb, *skb_new;
        u8 *pkt_buf;
        u32 devcs, pkt_len, dmas, rx_free_desc;
	u32 pktuncrc_len;
        int count;

	dma_cache_inv((u32)rd, sizeof(*rd));
	for (count = 0; count < limit; count++) {
		/* init the var. used for the later operations within the while loop */
		skb_new = NULL;
		devcs = rd->devcs;
		pkt_len = RCVPKT_LENGTH(devcs);
		skb = lp->rx_skb[lp->rx_next_done];
      
		if ((devcs & ( ETHRX_ld_m)) !=	ETHRX_ld_m) {
			/* check that this is a whole packet */
			/* WARNING: DMA_FD bit incorrectly set in Rc32434 (errata ref #077) */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
		}
		else if ( (devcs & ETHRX_rok_m)  ) {
			
				/* must be the (first and) last descriptor then */
				pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data;
				
				pktuncrc_len = pkt_len - 4;
				/* invalidate the cache */
				dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len);
				
				/* Malloc up new buffer. */					  
				skb_new = netdev_alloc_skb(dev, RC32434_RBSIZE + 2);					             	
				
				if (skb_new != NULL){
					/* Make room */
					skb_put(skb, pktuncrc_len);		    
					
					skb->protocol = eth_type_trans(skb, dev);
					
					/* pass the packet to upper layers */
					netif_receive_skb(skb);
					
					dev->last_rx = jiffies;
					lp->stats.rx_packets++;
					lp->stats.rx_bytes += pktuncrc_len;
					
					if (IS_RCV_MP(devcs))
						lp->stats.multicast++;
					
					/* 16 bit align */						  
					skb_reserve(skb_new, 2);	
					
					skb_new->dev = dev;
					lp->rx_skb[lp->rx_next_done] = skb_new;
				}
				else {
					ERR("no memory, dropping rx packet.\n");
					lp->stats.rx_errors++;		
					lp->stats.rx_dropped++;					
				}
		}			
		else {
			/* This should only happen if we enable accepting broken packets */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
			
			/* add statistics counters */
			if (IS_RCV_CRC_ERR(devcs)) {
				DBG(2, "RX CRC error\n");
				lp->stats.rx_crc_errors++;
			} 
			else if (IS_RCV_LOR_ERR(devcs)) {
				DBG(2, "RX LOR error\n");
				lp->stats.rx_length_errors++;
			}				
			else if (IS_RCV_LE_ERR(devcs)) {
				DBG(2, "RX LE error\n");
				lp->stats.rx_length_errors++;
			}
			else if (IS_RCV_OVR_ERR(devcs)) {
				lp->stats.rx_over_errors++;
			}
			else if (IS_RCV_CV_ERR(devcs)) {
				/* code violation */
				DBG(2, "RX CV error\n");
				lp->stats.rx_frame_errors++;
			}
			else if (IS_RCV_CES_ERR(devcs)) {
				DBG(2, "RX Preamble error\n");
			}
		}
		rd->devcs = 0;
		
		/* restore descriptor's curr_addr */
		if(skb_new) {
			rd->ca = CPHYSADDR(skb_new->data);
		}
		else
			rd->ca = CPHYSADDR(skb->data);
		
		rd->control = DMA_COUNT(RC32434_RBSIZE) |DMAD_cod_m |DMAD_iod_m;
		lp->rd_ring[(lp->rx_next_done-1)& RC32434_RDS_MASK].control &=  ~(DMAD_cod_m); 	
		
		lp->rx_next_done = (lp->rx_next_done + 1) & RC32434_RDS_MASK;
		dma_cache_wback((u32)rd, sizeof(*rd));
		rd = &lp->rd_ring[lp->rx_next_done];
		__raw_writel( ~DMAS_d_m, &lp->rx_dma_regs->dmas);
	}	
	
	dmas = __raw_readl(&lp->rx_dma_regs->dmas);
	
	if(dmas & DMAS_h_m) {
		/* Mask off halt and error bits */
		__raw_writel( ~(DMAS_h_m | DMAS_e_m), &lp->rx_dma_regs->dmas);
#ifdef RC32434_PROC_DEBUG
		lp->dma_halt_cnt++;
#endif
		rd->devcs = 0;
		skb = lp->rx_skb[lp->rx_next_done];
		rd->ca = CPHYSADDR(skb->data);
		dma_cache_wback((u32)rd, sizeof(*rd));
		rc32434_chain_rx(lp,rd);
	}
	
	return count;
}
Exemplo n.º 28
0
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
		size_t size, enum dma_data_direction dir)
{
	dma_cache_inv(paddr, size);
}
void bcm_cache_inv(uint32 start, uint32 size)
{
#if !(defined(CONFIG_BCM93383) && defined(CONFIG_BCM_LOT1))
    dma_cache_inv(start, size);
#endif
}
Exemplo n.º 30
0
/*
 * We have a good packet(s), get it/them out of the buffers.
 *
 * cgg - this driver works by creating (once) a circular list of receiver
 *       DMA descriptors that will be used serially by the Banyan.
 *       Because the descriptors are never unlinked from the list _they
 *       are always live_.  We are counting on Linux (and the chosen number
 *	 of buffers) to keep ahead of the hardware otherwise the same
 *	 descriptor might be used for more than one reception.
 */
static void
acacia_rx(struct net_device *dev)
{
    struct acacia_local* lp = (struct acacia_local *)dev->priv;
    volatile DMAD_t  rd = &lp->rd_ring[lp->rx_next_out];
    struct sk_buff *skb;
    u8* pkt_buf;
    u32 devcs;
    u32 count, pkt_len;

    /* cgg - keep going while we have received into more descriptors */

    while (IS_DMA_USED(rd->control)) {

        devcs = rd->devcs;

        pkt_len = RCVPKT_LENGTH(devcs);

        pkt_buf = &lp->rba[lp->rx_next_out * ACACIA_RBSIZE];

        /*
         * cgg - RESET the address pointer later - if we get a second
         * reception it will occur in the remains of the current
         * area of memory - protected by the diminished DMA count.
         */

        /*
         * Due to a bug in banyan processor, the packet length
         * given by devcs field and count field sometimes differ.
         * If that is the case, report Error.
         */
        count = ACACIA_RBSIZE - (u32)DMA_COUNT(rd->control);
        if( count != pkt_len) {
            lp->stats.rx_errors++;
        } else if (count < 64) {
            lp->stats.rx_errors++;
        } else if ((devcs & (/*ETHERDMA_IN_FD |*/ ETHRX_ld_m)) !=
                   (/*ETHERDMA_IN_FD |*/ ETHRX_ld_m)) {
            /* cgg - check that this is a whole packet */
            /* WARNING: DMA_FD bit incorrectly set in Acacia
               (errata ref #077) */
            lp->stats.rx_errors++;
            lp->stats.rx_over_errors++;
        } else if (devcs & ETHRX_rok_m) {
            /* must be the (first and) last descriptor then */

            /* Malloc up new buffer. */
            skb = dev_alloc_skb(pkt_len+2);
            if (skb == NULL) {
                err("no memory, dropping rx packet.\n");
                lp->stats.rx_dropped++;
            } else {
                /* else added by cgg - used to fall through! */
                /* invalidate the cache before copying
                   the buffer */
                dma_cache_inv((unsigned long)pkt_buf, pkt_len);

                skb->dev = dev;
                skb_reserve(skb, 2);	/* 16 bit align */
                skb_put(skb, pkt_len);	/* Make room */
                eth_copy_and_sum(skb, pkt_buf, pkt_len, 0);
                skb->protocol = eth_type_trans(skb, dev);
                /* pass the packet to upper layers */
                netif_rx(skb);
                dev->last_rx = jiffies;
                lp->stats.rx_packets++;
                lp->stats.rx_bytes += pkt_len;

                if (IS_RCV_MP(devcs))
                    lp->stats.multicast++;
            }

        } else {
            /* This should only happen if we enable
               accepting broken packets */
            lp->stats.rx_errors++;

            /* cgg - (re-)added statistics counters */
            if (IS_RCV_CRC_ERR(devcs)) {
                dbg(2, "RX CRC error\n");
                lp->stats.rx_crc_errors++;
            } else {
                if (IS_RCV_LOR_ERR(devcs)) {
                    dbg(2, "RX LOR error\n");
                    lp->stats.rx_length_errors++;
                }

                if (IS_RCV_LE_ERR(devcs)) {
                    dbg(2, "RX LE error\n");
                    lp->stats.rx_length_errors++;
                }
            }

            if (IS_RCV_OVR_ERR(devcs)) {
                /*
                 * The overflow errors are handled through
                 * an interrupt handler.
                 */
                lp->stats.rx_over_errors++;
            }
            /* code violation */
            if (IS_RCV_CV_ERR(devcs)) {
                dbg(2, "RX CV error\n");
                lp->stats.rx_frame_errors++;
            }

            if (IS_RCV_CES_ERR(devcs)) {
                dbg(2, "RX Preamble error\n");
            }
        }


        /* reset descriptor's curr_addr */
        rd->ca = virt_to_phys(pkt_buf);

        /*
         * cgg - clear the bits that let us see whether this
         * descriptor has been used or not & reset reception
         * length.
         */
        rd->control = DMAD_iod_m | DMA_COUNT(ACACIA_RBSIZE);
        rd->devcs = 0;
        lp->rx_next_out = (lp->rx_next_out + 1) & ACACIA_RDS_MASK;
        rd = &lp->rd_ring[lp->rx_next_out];

        /*
         * we'll deal with all possible interrupts up to the last
         * used descriptor - so cancel any interrupts that may have
         * arrisen while we've been processing.
         */
        writel(0, &lp->rx_dma_regs->dmas);
    }

    /*
     * If any worth-while packets have been received, dev_rint()
     * has done a mark_bh(NET_BH) for us and will work on them
     * when we get to the bottom-half routine.
     */
}