void cy_as_hal_dump_reg(cy_as_hal_device_tag tag)
{
	u16		data16;
	int 	i;
	int	retval = 0;
	printk(KERN_ERR "=======================================\n");
	printk(KERN_ERR "========   Astoria REG Dump      =========\n");
	for ( i=0 ; i<256 ; i++ )
	{
		data16 = cy_as_hal_read_register(tag, i);
		printk(KERN_ERR "%4.4x ", data16);
		if( i%8 == 7 )
			printk(KERN_ERR "\n");
	}
	printk(KERN_ERR "=======================================\n\n");

	printk(KERN_ERR "========   Astoria REG Test      =========\n");
	
	cy_as_hal_write_register(tag, CY_AS_MEM_MCU_MAILBOX1, 0xAAAA);
	cy_as_hal_write_register(tag, CY_AS_MEM_MCU_MAILBOX2, 0x5555);
	cy_as_hal_write_register(tag, CY_AS_MEM_MCU_MAILBOX3, 0xB4C3);

	data16 = cy_as_hal_read_register(tag, CY_AS_MEM_MCU_MAILBOX1);
	if( data16 != 0xAAAA)
	{
		printk(KERN_ERR "REG Test Error in CY_AS_MEM_MCU_MAILBOX1 - %4.4x\n", data16);
		retval = 1; 
	}
	data16 = cy_as_hal_read_register(tag, CY_AS_MEM_MCU_MAILBOX2);
	if( data16 != 0x5555)
	{
		printk(KERN_ERR "REG Test Error in CY_AS_MEM_MCU_MAILBOX2 - %4.4x\n", data16);
		retval = 1;
	}
	data16 = cy_as_hal_read_register(tag, CY_AS_MEM_MCU_MAILBOX3);
	if( data16 != 0xB4C3)
	{	
		printk(KERN_ERR "REG Test Error in CY_AS_MEM_MCU_MAILBOX3 - %4.4x\n", data16);
		retval = 1;
	}

	if( retval)
		printk(KERN_ERR "REG Test fail !!!!!\n");
	else
		printk(KERN_ERR "REG Test success !!!!!\n");

	printk(KERN_ERR "=======================================\n\n");
}
Esempio n. 2
0
/*
 * Set the DRQ mask register for the given endpoint number.  If state is
 * CyTrue, the DRQ interrupt for the given endpoint is enabled, otherwise
 * it is disabled.
 */
static void
cy_as_dma_set_drq(cy_as_device *dev_p,
		cy_as_end_point_number_t ep, cy_bool state)
{
	uint16_t mask ;
	uint16_t v ;
	uint32_t intval ;

	/*
	 * there are not DRQ register bits for EP0 and EP1
	 */
	if (ep == 0 || ep == 1)
		return ;

	/*
	 * disable interrupts while we do this to be sure the state of the
	 * DRQ mask register is always well defined.
	 */
	intval = cy_as_hal_disable_interrupts() ;

	/*
	 * set the DRQ bit to the given state for the ep given
	 */
	mask = (1 << ep) ;
	v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_DRQ_MASK) ;

	if (state)
		v |= mask ;
	else
		v &= ~mask ;

	cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_DRQ_MASK, v) ;
	cy_as_hal_enable_interrupts(intval) ;
}
Esempio n. 3
0
void  hal_reset(cy_as_hal_device_tag tag)
{
    cy_as_hal_print_message("<1> send soft hard rst: "
                            "MEM_RST_CTRL_REG_HARD...\n");
    cy_as_hal_write_register(tag, CY_AS_MEM_RST_CTRL_REG,
                             CY_AS_MEM_RST_CTRL_REG_HARD);
    mdelay(60);

    cy_as_hal_print_message("<1> after RST: si_rev_REG:%x, "
                            "PNANDCFG_reg:%x\n",
                            cy_as_hal_read_register(tag, CY_AS_MEM_CM_WB_CFG_ID),
                            cy_as_hal_read_register(tag, CY_AS_MEM_PNAND_CFG)
                           );

    /* set it to LBD */
    cy_as_hal_write_register(tag, CY_AS_MEM_PNAND_CFG,
                             PNAND_REG_CFG_INIT_VAL);
}
void cy_as_hal_dma_cancel_request(cy_as_hal_device_tag tag, uint8_t ep)
{
	DBGPRN("cy_as_hal_dma_cancel_request on ep:%d", ep);
	if (end_points[ep].pending)
		cy_as_hal_write_register(tag,
				CY_AS_MEM_P0_EP2_DMA_REG + ep - 2, 0);

	end_points[ep].buffer_valid = cy_false ;
	end_points[ep].type = cy_as_hal_none;
}
Esempio n. 5
0
uint32_t cy_as_intr_stop(cy_as_device *dev_p)
{
	cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);

	if (cy_as_device_is_intr_running(dev_p) == 0)
		return CY_AS_ERROR_NOT_RUNNING;

	cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_INT_MASK_REG, 0);
	cy_as_device_set_intr_stopped(dev_p);

	return CY_AS_ERROR_SUCCESS;
}
/*
 * Called On AstDevice LKM exit
 */
int cy_as_hal_omap_cram_stop(const char *pgm, cy_as_hal_device_tag tag)
{
	cy_as_omap_dev_kernel *dev_p = (cy_as_omap_dev_kernel *)tag ;

	/*
	 * TODO: Need to disable WB interrupt handlere 1st
	 */
	if (0 == dev_p)
		return 1 ;

	cy_as_hal_print_message("<1>_stopping OMAP34xx HAL layer object\n");
	if (dev_p->m_sig != CY_AS_OMAP_CRAM_HAL_SIG) {
		cy_as_hal_print_message("<1>%s: %s: bad HAL tag\n",
								pgm, __func__) ;
		return 1 ;
	}

	/*
	 * disable interrupt
	 */
	cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
			CY_AS_MEM_P0_INT_MASK_REG, 0x0000) ;

#if 0
	if (dev_p->thread_flag == 0) {
		dev_p->thread_flag = 1 ;
		wait_for_completion(&dev_p->thread_complete) ;
		cy_as_hal_print_message("cyasomaphal:"
			"done cleaning thread\n");
		cy_as_hal_destroy_sleep_channel(&dev_p->thread_sc) ;
	}
#endif

	cy_as_hal_omap_hardware_deinit(dev_p);

	/*
	 * Rearrange the list
	 */
	if (m_omap_list_p == dev_p)
		m_omap_list_p = dev_p->m_next_p ;

	cy_as_hal_free(dev_p) ;

	cy_as_hal_print_message(KERN_INFO"OMAP_kernel_hal stopped\n");
	return 0;
}
int omap_start_intr(cy_as_hal_device_tag tag)
{
	cy_as_omap_dev_kernel *dev_p = (cy_as_omap_dev_kernel *)tag ;
	int ret = 0 ;
	const uint16_t mask = CY_AS_MEM_P0_INTR_REG_DRQINT |
				CY_AS_MEM_P0_INTR_REG_MBINT ;

	/*
	 * register for interrupts
	 */
	ret = cy_as_hal_configure_interrupts(dev_p) ;

	/*
	 * enable only MBox & DRQ interrupts for now
	 */
	cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
				CY_AS_MEM_P0_INT_MASK_REG, mask) ;

	return 1 ;
}
Esempio n. 8
0
uint32_t cy_as_intr_start(cy_as_device *dev_p, cy_bool dmaintr)
{
	uint16_t v;

	cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);

	if (cy_as_device_is_intr_running(dev_p) != 0)
		return CY_AS_ERROR_ALREADY_RUNNING;

	v = CY_AS_MEM_P0_INT_MASK_REG_MMCUINT |
		CY_AS_MEM_P0_INT_MASK_REG_MMBINT |
		CY_AS_MEM_P0_INT_MASK_REG_MPMINT;

	if (dmaintr)
		v |= CY_AS_MEM_P0_INT_MASK_REG_MDRQINT;

	/* Enable the interrupts of interest */
	cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_INT_MASK_REG, v);

	/* Mark the interrupt module as initialized */
	cy_as_device_set_intr_running(dev_p);

	return CY_AS_ERROR_SUCCESS;
}
/*
 * Astoria DMA read request, APP_CPU reads from WB ep buffer
 */
static void cy_service_e_p_dma_read_request(
			cy_as_omap_dev_kernel *dev_p, uint8_t ep)
{
	cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p ;
	uint16_t  v, i, size;
	uint16_t	*dptr;
	uint16_t ep_dma_reg = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
    register void     *read_addr ;
    register uint16_t a,b,c,d,e,f,g,h ;
	/*
	 * get the XFER size frtom WB eP DMA REGISTER
	 */
	v = cy_as_hal_read_register(tag, ep_dma_reg);

	/*
	 * amount of data in EP buff in  bytes
	 */
	size =  v & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK;

	/*
	 * memory pointer for this DMA packet xfer (sub_segment)
	 */
	dptr = (uint16_t *) end_points[ep].data_p;

	read_addr = dev_p->m_vma_addr_base + CYAS_DEV_CALC_EP_ADDR(ep) ;

	cy_as_hal_assert(size != 0);

	if (size) {
	     /*
		 * Now, read the data from the device
		 */
		for(i = size/16 ; i > 0 ; i--) {
			a = (unsigned short) readw (read_addr) ;
			b = (unsigned short) readw (read_addr) ;
			c = (unsigned short) readw (read_addr) ;
			d = (unsigned short) readw (read_addr) ;
			e = (unsigned short) readw (read_addr) ;
			f = (unsigned short) readw (read_addr) ;
			g = (unsigned short) readw (read_addr) ;
			h = (unsigned short) readw (read_addr) ;
	                *dptr++ = a ;
	                *dptr++ = b ;
	                *dptr++ = c ;
	                *dptr++ = d ;
	                *dptr++ = e ;
	                *dptr++ = f ;
	                *dptr++ = g ;
	                *dptr++ = h ;
		}
	
		switch ((size & 0xF)/2) {
		case 7:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 6:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 5:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 4:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 3:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 2:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 1:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
	            break ;
		}
	
		if (size & 1) {
			/* Odd sized packet */
			uint16_t d = (unsigned short) readw (read_addr) ;
			*((uint8_t *)dptr) = (d & 0xff) ;
		}
	}

	/*
	 * clear DMAVALID bit indicating that the data has been read
	 */
	cy_as_hal_write_register(tag, ep_dma_reg, 0) ;

	end_points[ep].seg_xfer_cnt += size;
	end_points[ep].req_xfer_cnt += size;

	/*
	 *  pre-advance data pointer (if it's outside sg
	 * list it will be reset anyway
	 */
	end_points[ep].data_p += size;

	if (prep_for_next_xfer(tag, ep)) {
		/*
		 * we have more data to read in this request,
		 * setup next dma packet due tell WB how much
		 * data we are going to xfer next
		 */
		v = end_points[ep].dma_xfer_sz/*HAL_DMA_PKT_SZ*/ |
				CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL ;
		cy_as_hal_write_register(tag, ep_dma_reg, v);
	} else {
		end_points[ep].pending	  = cy_false ;
		end_points[ep].type		 = cy_as_hal_none ;
		end_points[ep].buffer_valid = cy_false ;

		/*
		 * notify the API that we are done with rq on this EP
		 */
		if (callback) {
			DBGPRN("<1>trigg rd_dma completion cb: xfer_sz:%d\n",
				end_points[ep].req_xfer_cnt);
				callback(tag, ep,
					end_points[ep].req_xfer_cnt,
					CY_AS_ERROR_SUCCESS);
		}
	}
}
/*
 * west bridge astoria ISR (Interrupt handler)
 */
static irqreturn_t cy_astoria_int_handler(int irq,
				void *dev_id, struct pt_regs *regs)
{
	cy_as_omap_dev_kernel *dev_p;
	uint16_t		  read_val = 0 ;
	uint16_t		  mask_val = 0 ;

	/*
	* debug stuff, counts number of loops per one intr trigger
	*/
	uint16_t		  drq_loop_cnt = 0;
	uint8_t		   irq_pin;
	/*
	 * flags to watch
	 */
	const uint16_t	sentinel = (CY_AS_MEM_P0_INTR_REG_MCUINT |
				CY_AS_MEM_P0_INTR_REG_MBINT |
				CY_AS_MEM_P0_INTR_REG_PMINT |
				CY_AS_MEM_P0_INTR_REG_PLLLOCKINT);

	/*
	 * sample IRQ pin level (just for statistics)
	 */
	irq_pin = __gpio_get_value(AST_INT);

	/*
	 * this one just for debugging
	 */
	intr_sequence_num++ ;

	/*
	 * astoria device handle
	 */
	dev_p = dev_id;

	/*
	 * read Astoria intr register
	 */
	read_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
						CY_AS_MEM_P0_INTR_REG) ;

	/*
	 * save current mask value
	 */
	mask_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
						CY_AS_MEM_P0_INT_MASK_REG) ;

	DBGPRN("<1>HAL__intr__enter:_seq:%d, P0_INTR_REG:%x\n",
			intr_sequence_num, read_val);

	/*
	 * Disable WB interrupt signal generation while we are in ISR
	 */
	cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
					CY_AS_MEM_P0_INT_MASK_REG, 0x0000) ;

	/*
	* this is a DRQ Interrupt
	*/
	if (read_val & CY_AS_MEM_P0_INTR_REG_DRQINT) {

		do {
			/*
			 * handle DRQ interrupt
			 */
			drq_loop_cnt++;

			cy_handle_d_r_q_interrupt(dev_p) ;

			/*
			 * spending to much time in ISR may impact
			 * average system performance
			 */
			if (drq_loop_cnt >= MAX_DRQ_LOOPS_IN_ISR)
				break;

		/*
		 * Keep processing if there is another DRQ int flag
		 */
		} while (cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
					CY_AS_MEM_P0_INTR_REG) &
					CY_AS_MEM_P0_INTR_REG_DRQINT);
	}

	if (read_val & sentinel)
		cy_as_intr_service_interrupt((cy_as_hal_device_tag)dev_p) ;

	DBGPRN("<1>_hal:_intr__exit seq:%d, mask=%4.4x,"
			"int_pin:%d DRQ_jobs:%d\n",
			intr_sequence_num,
			mask_val,
			irq_pin,
			drq_loop_cnt);

	/*
	 * re-enable WB hw interrupts
	 */
	cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
					CY_AS_MEM_P0_INT_MASK_REG, mask_val) ;

	return IRQ_HANDLED ;
}
/*
 * This function must be defined to transfer a block of data from
 * the WestBridge device.  This function can use the burst read
 * (DMA) capabilities of WestBridge to do this, or it can just
 * copy the data using reads.
 */
void cy_as_hal_dma_setup_read(cy_as_hal_device_tag tag,
					uint8_t ep, void *buf,
					uint32_t size, uint16_t maxsize)
{
	uint32_t addr ;
	uint16_t v ;

	/*
	 * Note: "size" is the actual request size
	 * "maxsize" - is the P port fragment size
	 * No EP0 or EP1 traffic should get here
	 */
	cy_as_hal_assert(ep != 0 && ep != 1) ;

	/*
	 * If this asserts, we have an ordering problem.
	 * Another DMA request is coming down before the
	 * previous one has completed. we should not get
	 * new requests if current is still in process
	 */

	cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);

	end_points[ep].buffer_valid = cy_true ;
	end_points[ep].type = cy_as_hal_read ;
	end_points[ep].pending = cy_true;
	end_points[ep].req_xfer_cnt = 0;
	end_points[ep].req_length = size;

	if (size >= maxsize) {
		/*
		 * set xfer size for very 1st DMA xfer operation
		 * port max packet size ( typically 512 or 1024)
		 */
		end_points[ep].dma_xfer_sz = maxsize;
	} else {
		/*
		 * so that we could handle small xfers on in case
		 * of non-storage EPs
		 */
		end_points[ep].dma_xfer_sz = size;
	}

	addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2 ;

	if (end_points[ep].sg_list_enabled) {
		/*
		 * Handle sg-list assisted EPs
		 * seg_xfer_cnt - keeps track of N of sent packets
		 * buf - pointer to the SG list
		 * data_p - data pointer for the 1st DMA segment
		 */
		end_points[ep].seg_xfer_cnt = 0 ;
		end_points[ep].sg_p = buf;
		end_points[ep].data_p = sg_virt(end_points[ep].sg_p);

		#ifdef DBGPRN_DMA_SETUP_RD
		DBGPRN("cyasomaphal:DMA_setup_read sg_list EP:%d, "
			   "buf:%p, buf_va:%p, req_sz:%d, maxsz:%d\n",
				ep,
				buf,
				end_points[ep].data_p,
				size,
				maxsize);
		#endif
		v = (end_points[ep].dma_xfer_sz &
				CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
				CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL ;
		cy_as_hal_write_register(tag, addr, v);
	} else {
		/*
		 * Non sg list EP passed  void *buf rather then scatterlist *sg
		 */
		#ifdef DBGPRN_DMA_SETUP_RD
			DBGPRN("%s:non-sg_list EP:%d,"
					"RQ_sz:%d, maxsz:%d\n",
					__func__, ep, size,  maxsize);
		#endif

		end_points[ep].sg_p = NULL;

		/*
		 * must be a VMA of a membuf in kernel space
		 */
		end_points[ep].data_p = buf;

		/*
		 * Program the EP DMA register for Storage endpoints only.
		 */
		if (is_storage_e_p(ep)) {
			v = (end_points[ep].dma_xfer_sz &
					CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
					CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL ;
			cy_as_hal_write_register(tag, addr, v);
		}
	}
}
/*
 * This function must be defined to transfer a block of data to
 * the WestBridge device.  This function can use the burst write
 * (DMA) capabilities of WestBridge to do this, or it can just copy
 * the data using writes.
 */
void cy_as_hal_dma_setup_write(cy_as_hal_device_tag tag,
						uint8_t ep, void *buf,
						uint32_t size, uint16_t maxsize)
{
	uint32_t addr = 0 ;
	uint16_t v  = 0;

	/*
	 * Note: "size" is the actual request size
	 * "maxsize" - is the P port fragment size
	 * No EP0 or EP1 traffic should get here
	 */
	cy_as_hal_assert(ep != 0 && ep != 1) ;

	/*
	 * If this asserts, we have an ordering problem.  Another DMA request
	 * is coming down before the previous one has completed.
	 */
	cy_as_hal_assert(end_points[ep].buffer_valid == cy_false) ;
	end_points[ep].buffer_valid = cy_true ;
	end_points[ep].type = cy_as_hal_write ;
	end_points[ep].pending = cy_true;

	/*
	 * total length of the request
	 */
	end_points[ep].req_length = size;

	if (size >= maxsize) {
		/*
		 * set xfer size for very 1st DMA xfer operation
		 * port max packet size ( typically 512 or 1024)
		 */
		end_points[ep].dma_xfer_sz = maxsize;
	} else {
		/*
		 * smaller xfers for non-storage EPs
		 */
		end_points[ep].dma_xfer_sz = size;
	}

	/*
	 * check the EP transfer mode uses sg_list rather then a memory buffer
	 * block devices pass it to the HAL, so the hAL could get to the real
	 * physical address for each segment and set up a DMA controller
	 * hardware ( if there is one)
	 */
	if (end_points[ep].sg_list_enabled) {
		/*
		 * buf -  pointer to the SG list
		 * data_p - data pointer to the 1st DMA segment
		 * seg_xfer_cnt - keeps track of N of bytes sent in current
		 *		sg_list segment
		 * req_xfer_cnt - keeps track of the total N of bytes
		 *		transferred for the request
		 */
		end_points[ep].sg_p = buf;
		end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
		end_points[ep].seg_xfer_cnt = 0 ;
		end_points[ep].req_xfer_cnt = 0;

#ifdef DBGPRN_DMA_SETUP_WR
		DBGPRN("cyasomaphal:%s: EP:%d, buf:%p, buf_va:%p,"
				"req_sz:%d, maxsz:%d\n",
				__func__,
				ep,
				buf,
				end_points[ep].data_p,
				size,
				maxsize);
#endif

	} else {
		/*
		 * setup XFER for non sg_list assisted EPs
		 */

		#ifdef DBGPRN_DMA_SETUP_WR
			DBGPRN("<1>%s non storage or sz < 512:"
					"EP:%d, sz:%d\n", __func__, ep, size);
		#endif

		end_points[ep].sg_p = NULL;

		/*
		 * must be a VMA of a membuf in kernel space
		 */
		end_points[ep].data_p = buf;

		/*
		 * will keep track No of bytes xferred for the request
		 */
		end_points[ep].req_xfer_cnt = 0;
	}

	/*
	 * Tell WB we are ready to send data on the given endpoint
	 */
	v = (end_points[ep].dma_xfer_sz & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK)
			| CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL ;

	addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2 ;

	cy_as_hal_write_register(tag, addr, v) ;
}
/*
 * omap_cpu needs to transfer data to ASTORIA EP buffer
 */
static void cy_service_e_p_dma_write_request(
			cy_as_omap_dev_kernel *dev_p, uint8_t ep)
{
	uint16_t  addr;
	uint16_t v  = 0, i = 0;
	uint32_t  size;
	uint16_t	*dptr;
	register void     *write_addr ;
    register uint16_t a,b,c,d ;
    
	cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p ;
	/*
	 * note: size here its the size of the dma transfer could be
	 * anything > 0 && < P_PORT packet size
	 */
	size = end_points[ep].dma_xfer_sz ;
	dptr = end_points[ep].data_p ;
	
	write_addr = (void *) (dev_p->m_vma_addr_base + CYAS_DEV_CALC_EP_ADDR(ep)) ;

	/*
	 * perform the soft DMA transfer, soft in this case
	 */
	if (size){
		/*
		 * Now, write the data to the device
		 */
		for(i = size/8 ; i > 0 ; i--) {
	            a = *dptr++ ;
	            b = *dptr++ ;
	            c = *dptr++ ;
	            d = *dptr++ ;
		    writew (a, write_addr) ;
		    writew (b, write_addr) ;
		    writew (c, write_addr) ;
		    writew (d, write_addr) ;
		}
	
		switch ((size & 7)/2) {
		case 3:
		    writew (*dptr, write_addr) ;
		    dptr++ ;
		case 2:
		    writew (*dptr, write_addr) ;
		    dptr++ ;
		case 1:
		    writew (*dptr, write_addr) ;
		    dptr++ ;
	            break ;
		}
	
		if (size & 1) {
		    uint16_t v = *((uint8_t *)dptr) ;
		    writew (v, write_addr);
		}
	}

	end_points[ep].seg_xfer_cnt += size;
	end_points[ep].req_xfer_cnt += size;
	/*
	 * pre-advance data pointer
	 * (if it's outside sg list it will be reset anyway)
	 */
	end_points[ep].data_p += size;

	/*
	 * now clear DMAVAL bit to indicate we are done
	 * transferring data and that the data can now be
	 * sent via USB to the USB host, sent to storage,
	 * or used internally.
	 */

	addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2 ;
	cy_as_hal_write_register(tag, addr, size) ;

	/*
	 * finally, tell the USB subsystem that the
	 * data is gone and we can accept the
	 * next request if one exists.
	 */
	if (prep_for_next_xfer(tag, ep)) {
		/*
		 * There is more data to go. Re-init the WestBridge DMA side
		 */
		v = end_points[ep].dma_xfer_sz |
			CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL ;
		cy_as_hal_write_register(tag, addr, v) ;
	} else {

	   end_points[ep].pending	  = cy_false ;
	   end_points[ep].type		 = cy_as_hal_none ;
	   end_points[ep].buffer_valid = cy_false ;

		/*
		 * notify the API that we are done with rq on this EP
		 */
		if (callback) {
			/*
			 * this callback will wake up the process that might be
			 * sleeping on the EP which data is being transferred
			 */
			callback(tag, ep,
					end_points[ep].req_xfer_cnt,
					CY_AS_ERROR_SUCCESS);
		}
	}
}