Example #1
0
void cy_as_intr_service_interrupt(cy_as_hal_device_tag tag)
{
	uint16_t v;
	cy_as_device *dev_p;

	dev_p = cy_as_device_find_from_tag(tag);

	/*
	 * only power management interrupts can occur before the
	 * antioch API setup is complete. if this is a PM interrupt
	 *  handle it here; otherwise output a warning message.
	 */
	if (dev_p == 0) {
		v = cy_as_hal_read_register(tag, CY_AS_MEM_P0_INTR_REG);
		if (v == CY_AS_MEM_P0_INTR_REG_PMINT) {
			/* Read the PWR_MAGT_STAT register
			 * to clear this interrupt. */
			v = cy_as_hal_read_register(tag,
				CY_AS_MEM_PWR_MAGT_STAT);
		} else
			cy_as_hal_print_message("stray antioch "
				"interrupt detected"
				", tag not associated "
				"with any created device.");
		return;
	}

	/* Make sure we got a valid object from CyAsDeviceFindFromTag */
	cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);

	v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_INTR_REG);

	if (v & CY_AS_MEM_P0_INTR_REG_MCUINT)
		cy_as_mcu_interrupt_handler(dev_p);

	if (v & CY_AS_MEM_P0_INTR_REG_PMINT)
		cy_as_power_management_interrupt_handler(dev_p);

	if (v & CY_AS_MEM_P0_INTR_REG_PLLLOCKINT)
		cy_as_pll_lock_loss_interrupt_handler(dev_p);

	/* If the interrupt module is not running, no mailbox
	 * interrupts are expected from the west bridge. */
	if (cy_as_device_is_intr_running(dev_p) == 0)
		return;

	if (v & CY_AS_MEM_P0_INTR_REG_MBINT)
		cy_as_mail_box_interrupt_handler(dev_p);
}
Example #2
0
/*
 * This function is called when the HAL layer has
 * completed the last requested DMA operation.
 * This function sends/receives the next batch of
 * data associated with the current DMA request,
 * or it is is complete, moves to the next DMA request.
 */
void
cy_as_dma_completed_callback(cy_as_hal_device_tag tag,
	cy_as_end_point_number_t ep, uint32_t cnt, cy_as_return_status_t status)
{
	uint32_t mask ;
	cy_as_dma_queue_entry *req_p ;
	cy_as_dma_end_point *ep_p ;
	cy_as_device *dev_p = cy_as_device_find_from_tag(tag) ;

	/* Make sure the HAL layer gave us good parameters */
	cy_as_hal_assert(dev_p != 0) ;
	cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE) ;
	cy_as_hal_assert(ep < 16) ;


	/* Get the endpoint ptr */
	if(ep < 16)
		ep_p = CY_AS_NUM_EP(dev_p, ep) ;
	else
		return;

	cy_as_hal_assert(ep_p->queue_p != 0) ;

	/* Get a pointer to the current entry in the queue */
	mask = cy_as_hal_disable_interrupts() ;
	req_p = ep_p->queue_p ;

	/* Update the offset to reflect the data actually received or sent */
	req_p->offset += cnt ;

	/*
	 * if we are still sending/receiving the current packet,
	 * send/receive the next chunk basically we keep going
	 * if we have not sent/received enough data, and we are
	 * not doing a packet operation, and the last packet
	 * sent or received was a full sized packet.  in other
	 * words, when we are NOT doing a packet operation, a
	 * less than full size packet (a short packet) will
	 * terminate the operation.
	 *
	 * note: if this is EP1 request and the request has
	 * timed out, it means the buffer is not free.
	 * we have to resend the data.
	 *
	 * note: for the MTP data transfers, the DMA transfer
	 * for the next packet can only be started asynchronously,
	 * after a firmware event notifies that the device is ready.
	 */
	if (((req_p->offset != req_p->size) && (req_p->packet == cy_false) &&
		((cnt == ep_p->maxhaldata) || ((cnt == ep_p->maxhwdata) &&
		((ep != CY_AS_MTP_READ_ENDPOINT) ||
		(cnt == dev_p->usb_max_tx_size)))))
			|| ((ep == 1) && (status == CY_AS_ERROR_TIMEOUT))) {
		cy_as_hal_enable_interrupts(mask) ;

		/*
		 * and send the request again to send the next block of
		 * data. special handling for MTP transfers on E_ps 2
		 * and 6. the send_next_request will be processed based
		 * on the event sent by the firmware.
		 */
		if ((ep == CY_AS_MTP_WRITE_ENDPOINT) || (
				(ep == CY_AS_MTP_READ_ENDPOINT) &&
				(!cy_as_dma_end_point_is_direction_in(ep_p))))
                {
			cy_as_dma_end_point_set_stopped(ep_p) ;
			cy_as_dma_set_drq(dev_p, ep, cy_false) ;
                }
		else
			cy_as_dma_send_next_dma_request(dev_p, ep_p) ;
	} else {
		/*
		 * we get here if ...
		 *	we have sent or received all of the data
		 *		 or
		 *	we are doing a packet operation
		 *		 or
		 *	we receive a short packet
		 */

		/*
		 * remove this entry from the DMA queue for this endpoint.
		 */
		cy_as_dma_end_point_clear_in_transit(ep_p) ;
		ep_p->queue_p = req_p->next_p ;
		if (ep_p->last_p == req_p) {
			/*
			 * we have removed the last packet from the DMA queue,
			 * disable the interrupt associated with this interrupt.
			 */
			ep_p->last_p = 0 ;
			cy_as_hal_enable_interrupts(mask) ;
			cy_as_dma_set_drq(dev_p, ep, cy_false) ;
		} else
			cy_as_hal_enable_interrupts(mask) ;

		if (req_p->cb) {
			/*
			 * if the request has a callback associated with it,
			 * call the callback to tell the interested party that
			 * this DMA request has completed.
			 *
			 * note, we set the in_callback bit to insure that we
			 * cannot recursively call an API function that is
			 * synchronous only from a callback.
			 */
			cy_as_device_set_in_callback(dev_p) ;
			(*req_p->cb)(dev_p, ep, req_p->buf_p,
				req_p->offset, status) ;
			cy_as_device_clear_in_callback(dev_p) ;
		}

		/*
		 * we are done with this request, put it on the freelist to be
		 * reused at a later time.
		 */
		cy_as_dma_add_request_to_free_queue(dev_p, req_p) ;

		if (ep_p->queue_p == 0) {
			/*
			 * if the endpoint is out of DMA entries, set the
			 * endpoint as stopped.
			 */
			cy_as_dma_end_point_set_stopped(ep_p) ;

			/*
			 * the DMA queue is empty, wake any task waiting on
			 * the QUEUE to drain.
			 */
			if (cy_as_dma_end_point_is_sleeping(ep_p)) {
				cy_as_dma_end_point_set_wake_state(ep_p) ;
				cy_as_hal_wake(&ep_p->channel) ;
			}
		} else {
			/*
			 * if the queued operation is a MTP transfer,
			 * wait until firmware event before sending
			 * down the next DMA request.
			 */
			if ((ep == CY_AS_MTP_WRITE_ENDPOINT) ||
				((ep == CY_AS_MTP_READ_ENDPOINT) &&
				(!cy_as_dma_end_point_is_direction_in(ep_p))) ||
				((ep == dev_p->storage_read_endpoint) &&
				(!cy_as_device_is_p2s_dma_start_recvd(dev_p)))
				|| ((ep == dev_p->storage_write_endpoint) &&
				(!cy_as_device_is_p2s_dma_start_recvd(dev_p))))
				cy_as_dma_end_point_set_stopped(ep_p) ;
			else
				cy_as_dma_send_next_dma_request(dev_p, ep_p) ;
		}
	}
}