Exemplo n.º 1
0
void consistent_sync_page(struct page *page, unsigned long offset,
			  size_t size, int direction)
{
	void *start;

	start = page_address(page) + offset;
	consistent_sync(start, size, direction);
}
Exemplo n.º 2
0
static void setup_blist_entry(struct sk_buff* skb,struct rx_blist_ent* blist_ent_ptr){
	/* Make the buffer consistent with the cache as the mac is going to write
	 * directly into it*/
	blist_ent_ptr->fd.FDSystem=(unsigned int)skb;
	blist_ent_ptr->bd.BuffData=(char*)__pa(skb->data);
	consistent_sync(skb->data,PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
        /* align IP on 16 Byte (DMA_CTL set to skip 2 bytes) */
	skb_reserve(skb,2);
	blist_ent_ptr->bd.BuffLength=PKT_BUF_SZ-2;
	blist_ent_ptr->fd.FDLength=1;
	blist_ent_ptr->fd.FDCtl=FDCTL_COWNSFD_MSK;
	blist_ent_ptr->bd.BDCtl=BDCTL_COWNSBD_MSK;
}
Exemplo n.º 3
0
static void ide_itdm320_outsw (unsigned long port, void *addr, u32 count)
{
	volatile u16 *d_ptr;

	unsigned long dest_addr;

        if ( count <= SECTOR_WORDS ) {
            writesw(port, addr, count);
            return;
        }

	d_ptr = (volatile u16*) addr;
	dest_addr = virt_to_phys((void*) d_ptr);

	if (((u32) dest_addr) & 0x2) {
		writesw(port, addr, 1);
		dest_addr += 2;
		count -= 2;
                if (!count) return;
	}//if


        if (  0 != (count&15) ) { 
            printk( "Warning: word count=%d\n", count );
        }

        // flush write data to DRAM 
//        arm926_dma_flush_range((unsigned long)addr, (unsigned long)addr + count );
        consistent_sync( addr, count, DMA_TO_DEVICE );

	while (inw(IO_EMIF_DMACTL) & 0x0001); // wait for DMA completion

	outw(0x0830, IO_SDRAM_SDDMASEL); // MTC 1 burst DMA
	outw(0x0003, IO_EMIF_DMAMTCSEL); // DMA MTC Select: CS3
	outw(((port & 0x0FFF0000) >> 16) |
	     (1 << 15), IO_EMIF_MTCADDH);
	outw(port & 0x0000FFFF, IO_EMIF_MTCADDL);
	outw((dest_addr & 0x7FFF0000) >> 16, IO_EMIF_AHBADDH);
	outw(dest_addr & 0x0000FFFF, IO_EMIF_AHBADDL);
        SET_DMA_SIZE( count );

	/* Start the DMA transfer */
	outw(0x0000, IO_EMIF_DMACTL);
	outw(inw(IO_EMIF_DMACTL) | 1, IO_EMIF_DMACTL);
}
Exemplo n.º 4
0
static void ide_itdm320_insw (unsigned long port, void *addr, u32 count)
{
	volatile u16 *d_ptr;
	unsigned long dest_addr;

        if ( count <= SECTOR_WORDS ) {
            readsw(port, addr, count);
            return;
        }

	d_ptr = (volatile u16*) addr;
	dest_addr = virt_to_phys((void*) d_ptr);

	if (((u32) dest_addr) & 0x2) {
		readsw(port, addr, 1);
		dest_addr += 2;
		count -= 2;
                if (!count) return;
	}//if

	while (inw(IO_EMIF_DMACTL) & 0x0001); // wait for pending outsw() if any
	outw(0x0830, IO_SDRAM_SDDMASEL); // MTC 1 burst DMA
	outw(0x0003, IO_EMIF_DMAMTCSEL); // DMA MTC Select: CS3
	outw(((port & 0x0FFF0000) >> 16) |
	     (1 << 15), IO_EMIF_MTCADDH);
	outw(port & 0x0000FFFF, IO_EMIF_MTCADDL);
	outw((dest_addr & 0x7FFF0000) >> 16, IO_EMIF_AHBADDH);
	outw(dest_addr & 0x0000FFFF, IO_EMIF_AHBADDL);
        SET_DMA_SIZE( count );

	/* Start the DMA transfer */
	outw(0x0002, IO_EMIF_DMACTL);
	outw(inw(IO_EMIF_DMACTL) | 1, IO_EMIF_DMACTL);

        // invalidate cache, so we read new data from DRAM 
        //arm926_dma_inv_range((unsigned long)addr, (unsigned long)addr + count );
        consistent_sync( addr, count, DMA_FROM_DEVICE );

}
Exemplo n.º 5
0
/*----------------------------------------------------------------------
* hash_write_entry
*----------------------------------------------------------------------*/
int hash_write_entry(HASH_ENTRY_T *entry, unsigned char *key)
{
	int		i;
	u32		*srcep, *destp, *destp2;

	srcep = (u32 *)key;
	destp2 = destp = (u32 *)&hash_tables[entry->index][0];

	for (i=0; i<(entry->total_dwords); i++, srcep++, destp++)
		*destp = *srcep;

	srcep = (u32 *)&entry->action;
	*destp++ = *srcep;

	srcep = (u32 *)&entry->param;
	for (i=0; i<(sizeof(ENTRY_PARAM_T)/sizeof(*destp)); i++, srcep++, destp++)
		*destp = *srcep;

	memset(destp, 0, (HASH_MAX_DWORDS-entry->total_dwords-HASH_ACTION_DWORDS) * sizeof(u32));

	consistent_sync(destp2, (entry->total_dwords+HASH_ACTION_DWORDS) * 4, PCI_DMA_TODEVICE);
	return 0;
}
Exemplo n.º 6
0
static void lubbock_map_inval_cache(struct map_info *map, unsigned long from, ssize_t len)
{
	consistent_sync((char *)map->cached + from, len, DMA_FROM_DEVICE);
}
Exemplo n.º 7
0
static inline void
sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	struct safe_buffer *buf = NULL;

	if (device_info)
		buf = find_safe_buffer(device_info, dma_addr);

	if (buf) {
		/*
		 * Both of these checks from original code need to be
		 * commented out b/c some drivers rely on the following:
		 *
		 * 1) Drivers may map a large chunk of memory into DMA space
		 *    but only sync a small portion of it. Good example is
		 *    allocating a large buffer, mapping it, and then
		 *    breaking it up into small descriptors. No point
		 *    in syncing the whole buffer if you only have to
		 *    touch one descriptor.
		 *
		 * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
		 *    usually only synced in one dir at a time.
		 *
		 * See drivers/net/eepro100.c for examples of both cases.
		 *
		 * -ds
		 *
		 * BUG_ON(buf->size != size);
		 * BUG_ON(buf->direction != dir);
		 */

		dev_dbg(dev,
			"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
			__func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
			buf->safe, (void *) buf->safe_dma_addr);

		DO_STATS ( device_info->bounce_count++ );

		switch (dir) {
		case DMA_FROM_DEVICE:
			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, buf->ptr, size);
			memcpy(buf->ptr, buf->safe, size);
			break;
		case DMA_TO_DEVICE:
			dev_dbg(dev,
				"%s: copy out unsafe %p to safe %p, size %d\n",
				__func__,buf->ptr, buf->safe, size);
			memcpy(buf->safe, buf->ptr, size);
			break;
		case DMA_BIDIRECTIONAL:
			BUG();	/* is this allowed?  what does it mean? */
		default:
			BUG();
		}
		/*
		 * No need to sync the safe buffer - it was allocated
		 * via the coherent allocators.
		 */
	} else {
		consistent_sync(dma_to_virt(dev, dma_addr), size, dir);
	}
}
Exemplo n.º 8
0
static inline dma_addr_t
map_single(struct device *dev, void *ptr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	dma_addr_t dma_addr;
	int needs_bounce = 0;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	dma_addr = virt_to_dma(dev, ptr);

	if (dev->dma_mask) {
		unsigned long mask = *dev->dma_mask;
		unsigned long limit;

		limit = (mask + 1) & ~mask;
		if (limit && size > limit) {
			dev_err(dev, "DMA mapping too big (requested %#x "
				"mask %#Lx)\n", size, *dev->dma_mask);
			return ~0;
		}

		/*
		 * Figure out if we need to bounce from the DMA mask.
		 */
		needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
	}

	if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
		struct safe_buffer *buf;

		buf = alloc_safe_buffer(device_info, ptr, size, dir);
		if (buf == 0) {
			dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
			       __func__, ptr);
			return 0;
		}

		dev_dbg(dev,
			"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
			__func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
			buf->safe, (void *) buf->safe_dma_addr);

		if ((dir == DMA_TO_DEVICE) ||
		    (dir == DMA_BIDIRECTIONAL)) {
			dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
				__func__, ptr, buf->safe, size);
			memcpy(buf->safe, ptr, size);
		}
		ptr = buf->safe;

		dma_addr = buf->safe_dma_addr;
	} else {
		/*
		 * We don't need to sync the DMA buffer since
		 * it was allocated via the coherent allocators.
		 */
		consistent_sync(ptr, size, dir);
	}

	return dma_addr;
}
Exemplo n.º 9
0
Arquivo: xor.c Projeto: cilynx/dd-wrt
int
raid_memchk(unsigned int *p1, unsigned int pattern, unsigned int bytes)
{
	int status=0;
	RAID_DMA_STATUS_T 	dma_status;

	if(bytes > (1<<(SRAM_PAR_SIZE+11))){
		printk("XOR: out of SRAM partition!![0x%x]\n",(unsigned int)bytes);
	}

	status = ((pattern&0xFFFF)%bytes )/4;
	p1[status] = pattern;

	while(tp.status != COMPLETE){
		DPRINTK("XOR yield\n");
		//schedule();
		yield();
	}
	tp.status = RUNNING;

	// flush the cache to memory before H/W XOR touches them
	consistent_sync(p1, bytes, DMA_BIDIRECTIONAL);

	tp.tx_desc = tp.tx_first_desc;
	if((tp.tx_desc->func_ctrl.bits.own == CPU)/*&&(tp.rx_desc->func_ctrl.bits.own == DMA)*/){
		// prepare tx descript
		raid_write_reg(RAID_FCHDMA_CURR_DESC,(unsigned int)tp.tx_desc-tx_desc_virtual_base,0xFFFFFFFF);
		tp.tx_desc->buf_addr = (unsigned int)__pa(p1);		// physical address
    	tp.tx_desc->func_ctrl.bits.raid_ctrl_status = 0;
    	tp.tx_desc->func_ctrl.bits.buffer_size = bytes ;    		/* total frame byte count */
    	tp.tx_desc->flg_status.bits32 = CMD_CHK;		// only support memory FILL command
    	tp.tx_desc->next_desc_addr.bits.sof_eof = 0x03;          /*only one descriptor*/
    	tp.tx_desc->func_ctrl.bits.own = DMA;	        		/* set owner bit */
    	tp.tx_desc->next_desc_addr.bits32 = 0x0000000b;
//    	tp.tx_cur_desc = (RAID_DESCRIPTOR_T *)((tp.tx_desc->next_desc_addr.bits32 & 0xFFFFFFF0)+tx_desc_virtual_base);

	}
	else{
	 	/* no free tx descriptor */
	 	printk("XOR:no free tx descript");
		return -1;
	}

	// change status
	//tp.status = RUNNING;
	status = tp.busy = 1;

	// start tx DMA
	txdma_ctrl.bits.td_start = 1;

	raid_write_reg(RAID_FCHDMA_CTRL, txdma_ctrl.bits32,0x80000000);
//	raid_write_reg(RAID_STRDMA_CTRL, rxdma_ctrl.bits32,0x80000000);

#ifdef SPIN_WAIT
	gemini_xor_isr(2);
#else
	xor_queue_descriptor();
#endif

//	dma_status.bits32 = raid_read_reg(RAID_DMA_STATUS);
//	if (dma_status.bits32 & (1<<15))	{

	if((tp.tx_first_desc->func_ctrl.bits.raid_ctrl_status & 0x2)) {
		status = 1;
//		raid_write_reg(RAID_DMA_STATUS,0x00008000,0x00080000);
	}
	else{
		status = 0;
	}

	tp.tx_desc->next_desc_addr.bits32 = ((unsigned long)tp.tx_first_desc - tx_desc_virtual_base + sizeof(RAID_DESCRIPTOR_T)*1) ;
	tp.status = COMPLETE;
//	tp.rx_desc->func_ctrl.bits.own = DMA;
	return status ;
}
Exemplo n.º 10
0
Arquivo: xor.c Projeto: cilynx/dd-wrt
void
raid_memcpy(unsigned int *to, unsigned int *from, unsigned int bytes)
{
	int status=0,i;

	if(bytes > (1<<(SRAM_PAR_SIZE+11))){
		printk("XOR: out of SRAM partition!![0x%x]\n",(unsigned int)bytes);
	}

	// flush the cache to memory before H/W XOR touches them
	consistent_sync(to, bytes, DMA_BIDIRECTIONAL);
	consistent_sync(from,bytes, DMA_TO_DEVICE);

	while(tp.status != COMPLETE){
		DPRINTK("XOR yield\n");
		//schedule();
		yield();
	}
	tp.status = RUNNING;

	tp.tx_desc = tp.tx_first_desc;
	tp.rx_desc = tp.rx_first_desc;
	if((tp.tx_desc->func_ctrl.bits.own == CPU)/*&&(tp.rx_desc->func_ctrl.bits.own == DMA)*/){
		// prepare tx descript
		raid_write_reg(RAID_FCHDMA_CURR_DESC,(unsigned int)tp.tx_desc-tx_desc_virtual_base,0xFFFFFFFF);
		tp.tx_desc->buf_addr = (unsigned int)__pa(from);		// physical address
    	tp.tx_desc->func_ctrl.bits.buffer_size = bytes;    		/* total frame byte count */
    	tp.tx_desc->flg_status.bits32 = CMD_CPY;		// only support memory FILL command
    	tp.tx_desc->next_desc_addr.bits.sof_eof = 0x03;          /*only one descriptor*/
    	tp.tx_desc->func_ctrl.bits.own = DMA;	        		/* set owner bit */
    	tp.tx_desc->next_desc_addr.bits32 = 0x0000000b;
//    	tp.tx_cur_desc = (RAID_DESCRIPTOR_T *)((tp.tx_desc->next_desc_addr.bits32 & 0xFFFFFFF0)+tx_desc_virtual_base);

    	// prepare rx descript
    	raid_write_reg(RAID_STRDMA_CURR_DESC,(unsigned int)tp.rx_desc-rx_desc_virtual_base,0xFFFFFFFF);
    	tp.rx_desc->buf_addr = (unsigned int)__pa(to);
    	tp.rx_desc->func_ctrl.bits.buffer_size = bytes;    		/* total frame byte count */
    	tp.rx_desc->flg_status.bits32 = 0;				// link data from XOR
    	tp.rx_cur_desc->next_desc_addr.bits.sof_eof = 0x03;          /*only one descriptor*/
    	tp.rx_desc->func_ctrl.bits.own = DMA;	        		/* set owner bit */
//    	tp.rx_cur_desc = (RAID_DESCRIPTOR_T *)((tp.rx_cur_desc->next_desc_addr.bits32 & 0xfffffff0)+rx_desc_virtual_base);
    	tp.rx_desc->next_desc_addr.bits32 = 0x0000000b;// end of descript

	}
	else{
	 	/* no free tx descriptor */
	 	printk("XOR:no free tx descript");
		return ;
	}

	// change status
	//tp.status = RUNNING;
	status = tp.busy = 1;

	// start tx DMA
	rxdma_ctrl.bits.rd_start = 1;
	// start rx DMA
	txdma_ctrl.bits.td_start = 1;

	raid_write_reg(RAID_FCHDMA_CTRL, txdma_ctrl.bits32,0x80000000);
	raid_write_reg(RAID_STRDMA_CTRL, rxdma_ctrl.bits32,0x80000000);

#ifdef SPIN_WAIT
	gemini_xor_isr(2);
#else
	xor_queue_descriptor();
#endif

#ifdef XOR_TEST
	for(i=1; i<(bytes/sizeof(int)); i++) {
		if(to[i]!=from[i]){
			printk("pattern check error!\n");
			printk("offset=0x%x p1=%x p2=%x\n",i*4,to[i],from[i]);
			while(1);
		}
	}
#endif

	tp.tx_desc->next_desc_addr.bits32 = ((unsigned long)tp.tx_first_desc - tx_desc_virtual_base + sizeof(RAID_DESCRIPTOR_T)*1) ;
	tp.status = COMPLETE;
//	tp.rx_desc->next_desc_addr.bits32 = ((unsigned long)tp.rx_first_desc - tx_desc_virtual_base + sizeof(RAID_DESCRIPTOR_T)*1) ;
	//tp.rx_desc = tp.rx_first_desc ;
//	tp.rx_desc->func_ctrl.bits.own = DMA;

}
Exemplo n.º 11
0
Arquivo: xor.c Projeto: cilynx/dd-wrt
void
xor_gemini_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
		unsigned long *p3, unsigned long *p4, unsigned long *p5)
{

	int status=0;
	unsigned int flags;


	if(bytes > (1<<(SRAM_PAR_SIZE+11))){
		printk("XOR: out of SRAM partition!![0x%x]\n",(unsigned int)bytes);
	}

	spin_lock_irqsave(&raid_lock,flags);
	while(tp.status != COMPLETE){
		spin_unlock_irqrestore(&raid_lock, flags);
		//printk("XOR yield5\n");
#ifdef XOR_SW_FILL_IN
		xor_arm4regs_5(bytes,p1,p2,p3,p4,p5);
		return;
#else
		msleep(1);
		yield();
#endif
	}
	spin_unlock_irqrestore(&raid_lock, flags);
	tp.status = RUNNING;

	// flush the cache to memory before H/W XOR touches them
	consistent_sync(p1, bytes, DMA_BIDIRECTIONAL);
	consistent_sync(p2, bytes, DMA_TO_DEVICE);
	consistent_sync(p3, bytes, DMA_TO_DEVICE);
	consistent_sync(p4, bytes, DMA_TO_DEVICE);
	consistent_sync(p5, bytes, DMA_TO_DEVICE);

	tp.tx_desc = tp.tx_first_desc;
	tp.rx_desc = tp.rx_first_desc;
	if((tp.tx_desc->func_ctrl.bits.own == CPU)/*&&(tp.rx_desc->func_ctrl.bits.own == DMA)*/){
		// prepare tx descript
		raid_write_reg(RAID_FCHDMA_CURR_DESC,(unsigned int)tp.tx_desc-tx_desc_virtual_base,0xffffffff);
		tp.tx_desc->buf_addr = (unsigned int)__pa(p1);		// physical address
    	tp.tx_desc->func_ctrl.bits.buffer_size = bytes;    		/* total frame byte count */
//    	tp.tx_desc->flg_status.bits_cmd_status.bcc = 2;			// first descript
//    	tp.tx_desc->flg_status.bits_cmd_status.mode = 0;		// only support XOR command
		tp.tx_desc->flg_status.bits32 = 0x00020000;
    	tp.tx_desc->next_desc_addr.bits.sof_eof = 0x03;          /*only one descriptor*/
    	wmb();
    	tp.tx_desc->func_ctrl.bits.own = DMA;	        		/* set owner bit */
    	tp.tx_cur_desc = (RAID_DESCRIPTOR_T *)((tp.tx_desc->next_desc_addr.bits32 & 0xfffffff0)+tx_desc_virtual_base);

    	tp.tx_desc = tp.tx_cur_desc;
    	tp.tx_desc->buf_addr = (unsigned int)__pa(p2);		// pysical address
    	tp.tx_desc->func_ctrl.bits.buffer_size = bytes;    		/* total frame byte count */
//    	tp.tx_desc->flg_status.bits_cmd_status.bcc = 0;			// first descript
//    	tp.tx_desc->flg_status.bits_cmd_status.mode = 0;		// only support XOR command
		tp.tx_desc->flg_status.bits32 = 0x00000000;
    	tp.tx_desc->next_desc_addr.bits.sof_eof = 0x03;          /*only one descriptor*/
    	wmb();
    	tp.tx_desc->func_ctrl.bits.own = DMA;	        		/* set owner bit */
    	tp.tx_cur_desc = (RAID_DESCRIPTOR_T *)((tp.tx_desc->next_desc_addr.bits32 & 0xfffffff0)+tx_desc_virtual_base);

    	tp.tx_desc = tp.tx_cur_desc;
    	tp.tx_desc->buf_addr = (unsigned int)__pa(p3);		// pysical address
    	tp.tx_desc->func_ctrl.bits.buffer_size = bytes;    		/* total frame byte count */
//    	tp.tx_desc->flg_status.bits_cmd_status.bcc = 0;			// first descript
//    	tp.tx_desc->flg_status.bits_cmd_status.mode = 0;		// only support XOR command
		tp.tx_desc->flg_status.bits32 = 0x00000000;
    	tp.tx_desc->next_desc_addr.bits.sof_eof = 0x03;          /*only one descriptor*/
    	wmb();
    	tp.tx_desc->func_ctrl.bits.own = DMA;	        		/* set owner bit */
    	tp.tx_cur_desc = (RAID_DESCRIPTOR_T *)((tp.tx_desc->next_desc_addr.bits32 & 0xfffffff0)+tx_desc_virtual_base);

		tp.tx_desc = tp.tx_cur_desc;
    	tp.tx_desc->buf_addr = (unsigned int)__pa(p4);		// pysical address
    	tp.tx_desc->func_ctrl.bits.buffer_size = bytes;    		/* total frame byte count */
//    	tp.tx_desc->flg_status.bits_cmd_status.bcc = 0;			// first descript
//    	tp.tx_desc->flg_status.bits_cmd_status.mode = 0;		// only support XOR command
		tp.tx_desc->flg_status.bits32 = 0x00000000;
    	tp.tx_desc->next_desc_addr.bits.sof_eof = 0x03;          /*only one descriptor*/
    	wmb();
    	tp.tx_desc->func_ctrl.bits.own = DMA;	        		/* set owner bit */
    	tp.tx_cur_desc = (RAID_DESCRIPTOR_T *)((tp.tx_desc->next_desc_addr.bits32 & 0xfffffff0)+tx_desc_virtual_base);


    	tp.tx_desc = tp.tx_cur_desc;
    	tp.tx_desc->buf_addr = (unsigned int)__pa(p5);		// pysical address
    	tp.tx_desc->func_ctrl.bits.buffer_size = bytes;    		/* total frame byte count */
//    	tp.tx_desc->flg_status.bits_cmd_status.bcc = 1;			// last descript
//    	tp.tx_desc->flg_status.bits_cmd_status.mode = 0;		// only support XOR command
//    	tp.tx_cur_desc->next_desc_addr.bits.sof_eof = 0x03;          /*only one descriptor*/
		tp.tx_desc->flg_status.bits32 = 0x00010000;
    	tp.tx_desc->func_ctrl.bits.own = DMA;	        		/* set owner bit */
    	tp.tx_desc->next_desc_addr.bits32 = 0x0000000b;// end of descript
    	tp.tx_cur_desc = (RAID_DESCRIPTOR_T *)((tp.tx_desc->next_desc_addr.bits32 & 0xfffffff0)+tx_desc_virtual_base);
    	tp.tx_finished_desc = tp.tx_desc;								// keep last descript

    	// prepare rx descript
    	raid_write_reg(RAID_STRDMA_CURR_DESC,(unsigned int)tp.rx_desc-rx_desc_virtual_base,0xFFFFFFFF);
    	tp.rx_desc->buf_addr = (unsigned int)__pa(p1);
    	tp.rx_desc->func_ctrl.bits.buffer_size = bytes;    		/* total frame byte count */
    	tp.rx_desc->flg_status.bits32 = 0;				// link data from XOR
//    	tp.rx_cur_desc->next_desc_addr.bits.sof_eof = 0x03;          /*only one descriptor*/
    	tp.rx_desc->func_ctrl.bits.own = DMA;	        		/* set owner bit */
    	tp.rx_desc->next_desc_addr.bits32 = 0x0000000b;// end of descript

	}
	else{
	 	/* no free tx descriptor */
	 	printk("XOR:no free tx descript");
		return ;
	}

	// change status
//	tp.status = RUNNING;
	status = tp.busy = 1;

	// start tx DMA
	rxdma_ctrl.bits.rd_start = 1;
	// start rx DMA
	txdma_ctrl.bits.td_start = 1;
	wmb();
	raid_write_reg(RAID_FCHDMA_CTRL, txdma_ctrl.bits32,0x80000000);
	raid_write_reg(RAID_STRDMA_CTRL, rxdma_ctrl.bits32,0x80000000);

#ifdef SPIN_WAIT
	gemini_xor_isr(5);
#else
	xor_queue_descriptor();
#endif

	tp.tx_desc->next_desc_addr.bits32 = ((unsigned long)tp.tx_first_desc - tx_desc_virtual_base + sizeof(RAID_DESCRIPTOR_T)*5) | 0x0B;
	tp.status = COMPLETE;
//	tp.rx_desc->next_desc_addr.bits32 = ((unsigned long)tp.rx_first_desc - tx_desc_virtual_base + sizeof(RAID_DESCRIPTOR_T)*1) | 0x0B;
	//tp.rx_desc = tp.rx_first_desc ;
//	tp.rx_desc->func_ctrl.bits.own = DMA;

}