Esempio n. 1
0
static void
create_register_settings(cy_as_device *dev_p,
	cy_as_physical_endpoint_state epstate[4])
{
	int i;
	uint8_t v;

	for (i = 0; i < 4; i++) {
		if (i == 0) {
			/* Start with the values that specify size */
			dev_p->usb_pepcfg[i] =
				pep_register_values
					[dev_p->usb_phy_config - 1][0];
		} else if (i == 2) {
			/* Start with the values that specify size */
			dev_p->usb_pepcfg[i] =
				pep_register_values
					[dev_p->usb_phy_config - 1][1];
		} else
			dev_p->usb_pepcfg[i] = 0;

		/* Adjust direction if it is in */
		if (epstate[i] == cy_as_e_p_iso_in ||
			epstate[i] == cy_as_e_p_in)
			dev_p->usb_pepcfg[i] |= (1 << 6);
	}

	/* Configure the logical EP registers */
	for (i = 0; i < 10; i++) {
		int val;
		int epnum = end_point_map[i];

		v = 0x10;	  /* PEP 1, Bulk Endpoint, EP not valid */
		if (dev_p->usb_config[epnum].enabled) {
			v |= (1 << 7);	 /* Enabled */

			val = dev_p->usb_config[epnum].physical - 1;
			cy_as_hal_assert(val >= 0 && val <= 3);
			v |= (val << 5);

			switch (dev_p->usb_config[epnum].type) {
			case cy_as_usb_bulk:
				val = 2;
				break;
			case cy_as_usb_int:
				val = 3;
				break;
			case cy_as_usb_iso:
				val = 1;
				break;
			default:
				cy_as_hal_assert(cy_false);
				break;
			}
			v |= (val << 3);
		}

		dev_p->usb_lepcfg[i] = v;
	}
}
Esempio n. 2
0
uint32_t cy_as_intr_stop(cy_as_device *dev_p)
{
	cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);

	if (cy_as_device_is_intr_running(dev_p) == 0)
		return CY_AS_ERROR_NOT_RUNNING;

	cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_INT_MASK_REG, 0);
	cy_as_device_set_intr_stopped(dev_p);

	return CY_AS_ERROR_SUCCESS;
}
Esempio n. 3
0
void cy_as_intr_service_interrupt(cy_as_hal_device_tag tag)
{
	uint16_t v;
	cy_as_device *dev_p;

	dev_p = cy_as_device_find_from_tag(tag);

	/*
	 * only power management interrupts can occur before the
	 * antioch API setup is complete. if this is a PM interrupt
	 *  handle it here; otherwise output a warning message.
	 */
	if (dev_p == 0) {
		v = cy_as_hal_read_register(tag, CY_AS_MEM_P0_INTR_REG);
		if (v == CY_AS_MEM_P0_INTR_REG_PMINT) {
			/* Read the PWR_MAGT_STAT register
			 * to clear this interrupt. */
			v = cy_as_hal_read_register(tag,
				CY_AS_MEM_PWR_MAGT_STAT);
		} else
			cy_as_hal_print_message("stray antioch "
				"interrupt detected"
				", tag not associated "
				"with any created device.");
		return;
	}

	/* Make sure we got a valid object from CyAsDeviceFindFromTag */
	cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);

	v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_INTR_REG);

	if (v & CY_AS_MEM_P0_INTR_REG_MCUINT)
		cy_as_mcu_interrupt_handler(dev_p);

	if (v & CY_AS_MEM_P0_INTR_REG_PMINT)
		cy_as_power_management_interrupt_handler(dev_p);

	if (v & CY_AS_MEM_P0_INTR_REG_PLLLOCKINT)
		cy_as_pll_lock_loss_interrupt_handler(dev_p);

	/* If the interrupt module is not running, no mailbox
	 * interrupts are expected from the west bridge. */
	if (cy_as_device_is_intr_running(dev_p) == 0)
		return;

	if (v & CY_AS_MEM_P0_INTR_REG_MBINT)
		cy_as_mail_box_interrupt_handler(dev_p);
}
Esempio n. 4
0
/*
* This function is used to kick start DMA on a given
* channel.  If DMA is already running on the given
* endpoint, nothing happens.  If DMA is not running,
* the first entry is pulled from the DMA queue and
* sent/recevied to/from the West Bridge device.
*/
cy_as_return_status_t
cy_as_dma_kick_start(cy_as_device *dev_p, cy_as_end_point_number_t ep)
{
	cy_as_dma_end_point *ep_p ;
	cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE) ;

	ep_p = CY_AS_NUM_EP(dev_p, ep) ;

	/* We are already running */
	if (cy_as_dma_end_point_is_running(ep_p))
		return CY_AS_ERROR_SUCCESS ;

	cy_as_dma_send_next_dma_request(dev_p, ep_p);
	return CY_AS_ERROR_SUCCESS ;
}
Esempio n. 5
0
/*
 * Get a DMA queue entry from the free list.
 */
static cy_as_dma_queue_entry *
cy_as_dma_get_dma_queue_entry(cy_as_device *dev_p)
{
	cy_as_dma_queue_entry *req_p ;
	uint32_t imask ;

	cy_as_hal_assert(dev_p->dma_freelist_p != 0) ;

	imask = cy_as_hal_disable_interrupts() ;
	req_p = dev_p->dma_freelist_p ;
	dev_p->dma_freelist_p = req_p->next_p ;
	cy_as_hal_enable_interrupts(imask) ;

	return req_p ;
}
Esempio n. 6
0
uint32_t cy_as_intr_start(cy_as_device *dev_p, cy_bool dmaintr)
{
	uint16_t v;

	cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);

	if (cy_as_device_is_intr_running(dev_p) != 0)
		return CY_AS_ERROR_ALREADY_RUNNING;

	v = CY_AS_MEM_P0_INT_MASK_REG_MMCUINT |
		CY_AS_MEM_P0_INT_MASK_REG_MMBINT |
		CY_AS_MEM_P0_INT_MASK_REG_MPMINT;

	if (dmaintr)
		v |= CY_AS_MEM_P0_INT_MASK_REG_MDRQINT;

	/* Enable the interrupts of interest */
	cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_INT_MASK_REG, v);

	/* Mark the interrupt module as initialized */
	cy_as_device_set_intr_running(dev_p);

	return CY_AS_ERROR_SUCCESS;
}
Esempio n. 7
0
/*
 * This function is called when the HAL layer has
 * completed the last requested DMA operation.
 * This function sends/receives the next batch of
 * data associated with the current DMA request,
 * or it is is complete, moves to the next DMA request.
 */
void
cy_as_dma_completed_callback(cy_as_hal_device_tag tag,
	cy_as_end_point_number_t ep, uint32_t cnt, cy_as_return_status_t status)
{
	uint32_t mask ;
	cy_as_dma_queue_entry *req_p ;
	cy_as_dma_end_point *ep_p ;
	cy_as_device *dev_p = cy_as_device_find_from_tag(tag) ;

	/* Make sure the HAL layer gave us good parameters */
	cy_as_hal_assert(dev_p != 0) ;
	cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE) ;
	cy_as_hal_assert(ep < 16) ;


	/* Get the endpoint ptr */
	if(ep < 16)
		ep_p = CY_AS_NUM_EP(dev_p, ep) ;
	else
		return;

	cy_as_hal_assert(ep_p->queue_p != 0) ;

	/* Get a pointer to the current entry in the queue */
	mask = cy_as_hal_disable_interrupts() ;
	req_p = ep_p->queue_p ;

	/* Update the offset to reflect the data actually received or sent */
	req_p->offset += cnt ;

	/*
	 * if we are still sending/receiving the current packet,
	 * send/receive the next chunk basically we keep going
	 * if we have not sent/received enough data, and we are
	 * not doing a packet operation, and the last packet
	 * sent or received was a full sized packet.  in other
	 * words, when we are NOT doing a packet operation, a
	 * less than full size packet (a short packet) will
	 * terminate the operation.
	 *
	 * note: if this is EP1 request and the request has
	 * timed out, it means the buffer is not free.
	 * we have to resend the data.
	 *
	 * note: for the MTP data transfers, the DMA transfer
	 * for the next packet can only be started asynchronously,
	 * after a firmware event notifies that the device is ready.
	 */
	if (((req_p->offset != req_p->size) && (req_p->packet == cy_false) &&
		((cnt == ep_p->maxhaldata) || ((cnt == ep_p->maxhwdata) &&
		((ep != CY_AS_MTP_READ_ENDPOINT) ||
		(cnt == dev_p->usb_max_tx_size)))))
			|| ((ep == 1) && (status == CY_AS_ERROR_TIMEOUT))) {
		cy_as_hal_enable_interrupts(mask) ;

		/*
		 * and send the request again to send the next block of
		 * data. special handling for MTP transfers on E_ps 2
		 * and 6. the send_next_request will be processed based
		 * on the event sent by the firmware.
		 */
		if ((ep == CY_AS_MTP_WRITE_ENDPOINT) || (
				(ep == CY_AS_MTP_READ_ENDPOINT) &&
				(!cy_as_dma_end_point_is_direction_in(ep_p))))
                {
			cy_as_dma_end_point_set_stopped(ep_p) ;
			cy_as_dma_set_drq(dev_p, ep, cy_false) ;
                }
		else
			cy_as_dma_send_next_dma_request(dev_p, ep_p) ;
	} else {
		/*
		 * we get here if ...
		 *	we have sent or received all of the data
		 *		 or
		 *	we are doing a packet operation
		 *		 or
		 *	we receive a short packet
		 */

		/*
		 * remove this entry from the DMA queue for this endpoint.
		 */
		cy_as_dma_end_point_clear_in_transit(ep_p) ;
		ep_p->queue_p = req_p->next_p ;
		if (ep_p->last_p == req_p) {
			/*
			 * we have removed the last packet from the DMA queue,
			 * disable the interrupt associated with this interrupt.
			 */
			ep_p->last_p = 0 ;
			cy_as_hal_enable_interrupts(mask) ;
			cy_as_dma_set_drq(dev_p, ep, cy_false) ;
		} else
			cy_as_hal_enable_interrupts(mask) ;

		if (req_p->cb) {
			/*
			 * if the request has a callback associated with it,
			 * call the callback to tell the interested party that
			 * this DMA request has completed.
			 *
			 * note, we set the in_callback bit to insure that we
			 * cannot recursively call an API function that is
			 * synchronous only from a callback.
			 */
			cy_as_device_set_in_callback(dev_p) ;
			(*req_p->cb)(dev_p, ep, req_p->buf_p,
				req_p->offset, status) ;
			cy_as_device_clear_in_callback(dev_p) ;
		}

		/*
		 * we are done with this request, put it on the freelist to be
		 * reused at a later time.
		 */
		cy_as_dma_add_request_to_free_queue(dev_p, req_p) ;

		if (ep_p->queue_p == 0) {
			/*
			 * if the endpoint is out of DMA entries, set the
			 * endpoint as stopped.
			 */
			cy_as_dma_end_point_set_stopped(ep_p) ;

			/*
			 * the DMA queue is empty, wake any task waiting on
			 * the QUEUE to drain.
			 */
			if (cy_as_dma_end_point_is_sleeping(ep_p)) {
				cy_as_dma_end_point_set_wake_state(ep_p) ;
				cy_as_hal_wake(&ep_p->channel) ;
			}
		} else {
			/*
			 * if the queued operation is a MTP transfer,
			 * wait until firmware event before sending
			 * down the next DMA request.
			 */
			if ((ep == CY_AS_MTP_WRITE_ENDPOINT) ||
				((ep == CY_AS_MTP_READ_ENDPOINT) &&
				(!cy_as_dma_end_point_is_direction_in(ep_p))) ||
				((ep == dev_p->storage_read_endpoint) &&
				(!cy_as_device_is_p2s_dma_start_recvd(dev_p)))
				|| ((ep == dev_p->storage_write_endpoint) &&
				(!cy_as_device_is_p2s_dma_start_recvd(dev_p))))
				cy_as_dma_end_point_set_stopped(ep_p) ;
			else
				cy_as_dma_send_next_dma_request(dev_p, ep_p) ;
		}
	}
}
Esempio n. 8
0
/*
* Send the next DMA request for the endpoint given
*/
static void
cy_as_dma_send_next_dma_request(cy_as_device *dev_p, cy_as_dma_end_point *ep_p)
{
	uint32_t datacnt ;
	void *buf_p ;
	cy_as_dma_queue_entry *dma_p ;

	cy_as_log_debug_message(6, "cy_as_dma_send_next_dma_request called") ;

	/* If the queue is empty, nothing to do */
	dma_p = ep_p->queue_p ;
	if (dma_p == 0) {
		/*
		 * there are no pending DMA requests
		 * for this endpoint.  disable the DRQ
		 * mask bits to insure no interrupts
		 * will be triggered by this endpoint
		 * until someone is interested in the data.
		 */
		cy_as_dma_set_drq(dev_p, ep_p->ep, cy_false) ;
		return ;
	}

	cy_as_dma_end_point_set_running(ep_p) ;

	/*
	 * get the number of words that still
	 * need to be xferred in this request.
	 */
	datacnt = dma_p->size - dma_p->offset ;
	cy_as_hal_assert(datacnt >= 0) ;

	/*
	 * the HAL layer should never limit the size
	 * of the transfer to something less than the
	 * maxhwdata otherwise, the data will be sent
	 * in packets that are not correct in size.
	 */
	cy_as_hal_assert(ep_p->maxhaldata == CY_AS_DMA_MAX_SIZE_HW_SIZE
			|| ep_p->maxhaldata >= ep_p->maxhwdata) ;

	/*
	 * update the number of words that need to be xferred yet
	 * based on the limits of the HAL layer.
	 */
	if (ep_p->maxhaldata == CY_AS_DMA_MAX_SIZE_HW_SIZE) {
		if (datacnt > ep_p->maxhwdata)
			datacnt = ep_p->maxhwdata ;
	} else {
		if (datacnt > ep_p->maxhaldata)
			datacnt = ep_p->maxhaldata ;
	}

	/*
	 * find a pointer to the data that needs to be transferred
	 */
	buf_p = (((char *)dma_p->buf_p) + dma_p->offset);

	/*
	 * mark a request in transit
	 */
	cy_as_dma_end_point_set_in_transit(ep_p) ;

	if (ep_p->ep == 0 || ep_p->ep == 1) {
		/*
		 * if this is a WRITE request on EP0 and EP1
		 * we write the data via an EP_DATA request
		 * to west bridge via the mailbox registers.
		 * if this is a READ request, we do nothing
		 * and the data will arrive via an EP_DATA
		 * request from west bridge.  in the request
		 * handler for the USB context we will pass
		 * the data back into the DMA module.
		 */
		if (dma_p->readreq == cy_false) {
			uint16_t v ;
			uint16_t len ;
			cy_as_ll_request_response *resp_p ;
			cy_as_ll_request_response *req_p ;
			cy_as_return_status_t ret ;

			len = (uint16_t)(datacnt / 2) ;
			if (datacnt % 2)
				len++ ;

			len++ ;

			if (ep_p->ep == 0) {
				req_p = dev_p->usb_ep0_dma_req ;
				resp_p = dev_p->usb_ep0_dma_resp ;
				dev_p->usb_ep0_dma_req = 0 ;
				dev_p->usb_ep0_dma_resp = 0 ;
			} else {
				req_p = dev_p->usb_ep1_dma_req ;
				resp_p = dev_p->usb_ep1_dma_resp ;
				dev_p->usb_ep1_dma_req = 0 ;
				dev_p->usb_ep1_dma_resp = 0 ;
			}

			cy_as_hal_assert(req_p != 0) ;
			cy_as_hal_assert(resp_p != 0) ;
			cy_as_hal_assert(len <= 64) ;

			cy_as_ll_init_request(req_p, CY_RQT_USB_EP_DATA,
				CY_RQT_USB_RQT_CONTEXT, len) ;

			v = (uint16_t)(datacnt | (ep_p->ep << 13) | (1 << 14)) ;
			if (dma_p->offset == 0)
				v |= (1 << 12) ;/* Set the first packet bit */
			if (dma_p->offset + datacnt == dma_p->size)
				v |= (1 << 11) ;/* Set the last packet bit */

			cy_as_ll_request_response__set_word(req_p, 0, v) ;
			cy_as_ll_request_response__pack(req_p,
					1, datacnt, buf_p) ;

			cy_as_ll_init_response(resp_p, 1) ;

			ret = cy_as_ll_send_request(dev_p, req_p, resp_p,
				cy_false, cy_as_dma_request_callback) ;
			if (ret == CY_AS_ERROR_SUCCESS)
				cy_as_log_debug_message(5,
				"+++ send EP 0/1 data via mailbox registers") ;
			else
				cy_as_log_debug_message(5,
				"+++ error sending EP 0/1 data via mailbox "
				"registers - CY_AS_ERROR_TIMEOUT") ;

			if (ret != CY_AS_ERROR_SUCCESS)
				cy_as_dma_completed_callback(dev_p->tag,
					ep_p->ep, 0, ret) ;
		}
	} else {
		/*
		 * this is a DMA request on an endpoint that is accessible
		 * via the P port.  ask the HAL DMA capabilities to
		 * perform this.  the amount of data sent is limited by the
		 * HAL max size as well as what we need to send.  if the
		 * ep_p->maxhaldata is set to a value larger than the
		 * endpoint buffer size, then we will pass more than a
		 * single buffer worth of data to the HAL layer and expect
		 * the HAL layer to divide the data into packets.  the last
		 * parameter here (ep_p->maxhwdata) gives the packet size for
		 * the data so the HAL layer knows what the packet size should
		 * be.
		 */
		if (cy_as_dma_end_point_is_direction_in(ep_p))
			cy_as_hal_dma_setup_write(dev_p->tag,
				ep_p->ep, buf_p, datacnt, ep_p->maxhwdata) ;
		else
			cy_as_hal_dma_setup_read(dev_p->tag,
				ep_p->ep, buf_p, datacnt, ep_p->maxhwdata) ;

		/*
		 * the DRQ interrupt for this endpoint should be enabled
		 * so that the data transfer progresses at interrupt time.
		 */
		cy_as_dma_set_drq(dev_p, ep_p->ep, cy_true) ;
	}
}
/*
 * Astoria DMA read request, APP_CPU reads from WB ep buffer
 */
static void cy_service_e_p_dma_read_request(
			cy_as_omap_dev_kernel *dev_p, uint8_t ep)
{
	cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p ;
	uint16_t  v, i, size;
	uint16_t	*dptr;
	uint16_t ep_dma_reg = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
    register void     *read_addr ;
    register uint16_t a,b,c,d,e,f,g,h ;
	/*
	 * get the XFER size frtom WB eP DMA REGISTER
	 */
	v = cy_as_hal_read_register(tag, ep_dma_reg);

	/*
	 * amount of data in EP buff in  bytes
	 */
	size =  v & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK;

	/*
	 * memory pointer for this DMA packet xfer (sub_segment)
	 */
	dptr = (uint16_t *) end_points[ep].data_p;

	read_addr = dev_p->m_vma_addr_base + CYAS_DEV_CALC_EP_ADDR(ep) ;

	cy_as_hal_assert(size != 0);

	if (size) {
	     /*
		 * Now, read the data from the device
		 */
		for(i = size/16 ; i > 0 ; i--) {
			a = (unsigned short) readw (read_addr) ;
			b = (unsigned short) readw (read_addr) ;
			c = (unsigned short) readw (read_addr) ;
			d = (unsigned short) readw (read_addr) ;
			e = (unsigned short) readw (read_addr) ;
			f = (unsigned short) readw (read_addr) ;
			g = (unsigned short) readw (read_addr) ;
			h = (unsigned short) readw (read_addr) ;
	                *dptr++ = a ;
	                *dptr++ = b ;
	                *dptr++ = c ;
	                *dptr++ = d ;
	                *dptr++ = e ;
	                *dptr++ = f ;
	                *dptr++ = g ;
	                *dptr++ = h ;
		}
	
		switch ((size & 0xF)/2) {
		case 7:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 6:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 5:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 4:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 3:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 2:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 1:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
	            break ;
		}
	
		if (size & 1) {
			/* Odd sized packet */
			uint16_t d = (unsigned short) readw (read_addr) ;
			*((uint8_t *)dptr) = (d & 0xff) ;
		}
	}

	/*
	 * clear DMAVALID bit indicating that the data has been read
	 */
	cy_as_hal_write_register(tag, ep_dma_reg, 0) ;

	end_points[ep].seg_xfer_cnt += size;
	end_points[ep].req_xfer_cnt += size;

	/*
	 *  pre-advance data pointer (if it's outside sg
	 * list it will be reset anyway
	 */
	end_points[ep].data_p += size;

	if (prep_for_next_xfer(tag, ep)) {
		/*
		 * we have more data to read in this request,
		 * setup next dma packet due tell WB how much
		 * data we are going to xfer next
		 */
		v = end_points[ep].dma_xfer_sz/*HAL_DMA_PKT_SZ*/ |
				CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL ;
		cy_as_hal_write_register(tag, ep_dma_reg, v);
	} else {
		end_points[ep].pending	  = cy_false ;
		end_points[ep].type		 = cy_as_hal_none ;
		end_points[ep].buffer_valid = cy_false ;

		/*
		 * notify the API that we are done with rq on this EP
		 */
		if (callback) {
			DBGPRN("<1>trigg rd_dma completion cb: xfer_sz:%d\n",
				end_points[ep].req_xfer_cnt);
				callback(tag, ep,
					end_points[ep].req_xfer_cnt,
					CY_AS_ERROR_SUCCESS);
		}
	}
}
/*
 * This function must be defined to transfer a block of data from
 * the WestBridge device.  This function can use the burst read
 * (DMA) capabilities of WestBridge to do this, or it can just
 * copy the data using reads.
 */
void cy_as_hal_dma_setup_read(cy_as_hal_device_tag tag,
					uint8_t ep, void *buf,
					uint32_t size, uint16_t maxsize)
{
	uint32_t addr ;
	uint16_t v ;

	/*
	 * Note: "size" is the actual request size
	 * "maxsize" - is the P port fragment size
	 * No EP0 or EP1 traffic should get here
	 */
	cy_as_hal_assert(ep != 0 && ep != 1) ;

	/*
	 * If this asserts, we have an ordering problem.
	 * Another DMA request is coming down before the
	 * previous one has completed. we should not get
	 * new requests if current is still in process
	 */

	cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);

	end_points[ep].buffer_valid = cy_true ;
	end_points[ep].type = cy_as_hal_read ;
	end_points[ep].pending = cy_true;
	end_points[ep].req_xfer_cnt = 0;
	end_points[ep].req_length = size;

	if (size >= maxsize) {
		/*
		 * set xfer size for very 1st DMA xfer operation
		 * port max packet size ( typically 512 or 1024)
		 */
		end_points[ep].dma_xfer_sz = maxsize;
	} else {
		/*
		 * so that we could handle small xfers on in case
		 * of non-storage EPs
		 */
		end_points[ep].dma_xfer_sz = size;
	}

	addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2 ;

	if (end_points[ep].sg_list_enabled) {
		/*
		 * Handle sg-list assisted EPs
		 * seg_xfer_cnt - keeps track of N of sent packets
		 * buf - pointer to the SG list
		 * data_p - data pointer for the 1st DMA segment
		 */
		end_points[ep].seg_xfer_cnt = 0 ;
		end_points[ep].sg_p = buf;
		end_points[ep].data_p = sg_virt(end_points[ep].sg_p);

		#ifdef DBGPRN_DMA_SETUP_RD
		DBGPRN("cyasomaphal:DMA_setup_read sg_list EP:%d, "
			   "buf:%p, buf_va:%p, req_sz:%d, maxsz:%d\n",
				ep,
				buf,
				end_points[ep].data_p,
				size,
				maxsize);
		#endif
		v = (end_points[ep].dma_xfer_sz &
				CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
				CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL ;
		cy_as_hal_write_register(tag, addr, v);
	} else {
		/*
		 * Non sg list EP passed  void *buf rather then scatterlist *sg
		 */
		#ifdef DBGPRN_DMA_SETUP_RD
			DBGPRN("%s:non-sg_list EP:%d,"
					"RQ_sz:%d, maxsz:%d\n",
					__func__, ep, size,  maxsize);
		#endif

		end_points[ep].sg_p = NULL;

		/*
		 * must be a VMA of a membuf in kernel space
		 */
		end_points[ep].data_p = buf;

		/*
		 * Program the EP DMA register for Storage endpoints only.
		 */
		if (is_storage_e_p(ep)) {
			v = (end_points[ep].dma_xfer_sz &
					CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
					CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL ;
			cy_as_hal_write_register(tag, addr, v);
		}
	}
}
/*
 * This function must be defined to transfer a block of data to
 * the WestBridge device.  This function can use the burst write
 * (DMA) capabilities of WestBridge to do this, or it can just copy
 * the data using writes.
 */
void cy_as_hal_dma_setup_write(cy_as_hal_device_tag tag,
						uint8_t ep, void *buf,
						uint32_t size, uint16_t maxsize)
{
	uint32_t addr = 0 ;
	uint16_t v  = 0;

	/*
	 * Note: "size" is the actual request size
	 * "maxsize" - is the P port fragment size
	 * No EP0 or EP1 traffic should get here
	 */
	cy_as_hal_assert(ep != 0 && ep != 1) ;

	/*
	 * If this asserts, we have an ordering problem.  Another DMA request
	 * is coming down before the previous one has completed.
	 */
	cy_as_hal_assert(end_points[ep].buffer_valid == cy_false) ;
	end_points[ep].buffer_valid = cy_true ;
	end_points[ep].type = cy_as_hal_write ;
	end_points[ep].pending = cy_true;

	/*
	 * total length of the request
	 */
	end_points[ep].req_length = size;

	if (size >= maxsize) {
		/*
		 * set xfer size for very 1st DMA xfer operation
		 * port max packet size ( typically 512 or 1024)
		 */
		end_points[ep].dma_xfer_sz = maxsize;
	} else {
		/*
		 * smaller xfers for non-storage EPs
		 */
		end_points[ep].dma_xfer_sz = size;
	}

	/*
	 * check the EP transfer mode uses sg_list rather then a memory buffer
	 * block devices pass it to the HAL, so the hAL could get to the real
	 * physical address for each segment and set up a DMA controller
	 * hardware ( if there is one)
	 */
	if (end_points[ep].sg_list_enabled) {
		/*
		 * buf -  pointer to the SG list
		 * data_p - data pointer to the 1st DMA segment
		 * seg_xfer_cnt - keeps track of N of bytes sent in current
		 *		sg_list segment
		 * req_xfer_cnt - keeps track of the total N of bytes
		 *		transferred for the request
		 */
		end_points[ep].sg_p = buf;
		end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
		end_points[ep].seg_xfer_cnt = 0 ;
		end_points[ep].req_xfer_cnt = 0;

#ifdef DBGPRN_DMA_SETUP_WR
		DBGPRN("cyasomaphal:%s: EP:%d, buf:%p, buf_va:%p,"
				"req_sz:%d, maxsz:%d\n",
				__func__,
				ep,
				buf,
				end_points[ep].data_p,
				size,
				maxsize);
#endif

	} else {
		/*
		 * setup XFER for non sg_list assisted EPs
		 */

		#ifdef DBGPRN_DMA_SETUP_WR
			DBGPRN("<1>%s non storage or sz < 512:"
					"EP:%d, sz:%d\n", __func__, ep, size);
		#endif

		end_points[ep].sg_p = NULL;

		/*
		 * must be a VMA of a membuf in kernel space
		 */
		end_points[ep].data_p = buf;

		/*
		 * will keep track No of bytes xferred for the request
		 */
		end_points[ep].req_xfer_cnt = 0;
	}

	/*
	 * Tell WB we are ready to send data on the given endpoint
	 */
	v = (end_points[ep].dma_xfer_sz & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK)
			| CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL ;

	addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2 ;

	cy_as_hal_write_register(tag, addr, v) ;
}