Ejemplo n.º 1
0
Archivo: qm_dma.c Proyecto: Hlotfy/qmsi
/*
 * Transfer interrupt handler.
 */
static void qm_dma_isr_handler(const qm_dma_t dma,
			       const qm_dma_channel_id_t channel_id)
{
	uint32_t transfer_length;
	dma_cfg_prv_t *chan_cfg;
	volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;

	QM_ASSERT(int_reg->status_int_low & QM_DMA_INT_STATUS_TFR);
	QM_ASSERT(int_reg->status_tfr_low & BIT(channel_id));

	/* Clear interrupt */
	int_reg->clear_tfr_low = BIT(channel_id);

	/* Mask interrupts for this channel */
	int_reg->mask_tfr_low = BIT(channel_id) << 8;
	int_reg->mask_err_low = BIT(channel_id) << 8;

	/* Call the callback if registered and pass the
	 * transfer length */
	chan_cfg = &dma_channel_config[dma][channel_id];
	if (chan_cfg->client_callback) {
		transfer_length = get_transfer_length(dma, channel_id);
		chan_cfg->client_callback(chan_cfg->callback_context,
					  transfer_length, 0);
	}
}
Ejemplo n.º 2
0
/*
 * Transfer interrupt handler.
 * - Single block: TFR triggers an user callback invocation
 * - Multiblock (contiguous): TFR triggers an user callback invocation, block
 *   interrupts are silent
 * - Multiblock (linked list): Last block interrupt on each buffer triggers an
 *   user callback invocation, TFR is silent
 */
static void qm_dma_isr_handler(const qm_dma_t dma,
			       const qm_dma_channel_id_t channel_id)
{
	dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id];
	volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;
	volatile qm_dma_chan_reg_t *chan_reg =
	    &QM_DMA[dma]->chan_reg[channel_id];
	uint32_t transfer_length =
	    get_transfer_length(dma, channel_id, prv_cfg);

	/* The status can't be asserted here as there is a possible race
	 * condition when terminating channels. It's possible that an interrupt
	 * can be generated before the terminate function masks the
	 * interrupts. */

	if (int_reg->status_int_low & QM_DMA_INT_STATUS_TFR) {

		QM_ASSERT(int_reg->status_tfr_low & BIT(channel_id));

		/* Transfer completed, clear interrupt */
		int_reg->clear_tfr_low = BIT(channel_id);

		/* If multiblock, the final block is also completed. */
		int_reg->clear_block_low = BIT(channel_id);

		/* Mask interrupts for this channel */
		int_reg->mask_block_low = BIT(channel_id) << 8;
		int_reg->mask_tfr_low = BIT(channel_id) << 8;
		int_reg->mask_err_low = BIT(channel_id) << 8;

		/* Clear llp register */
		chan_reg->llp_low = 0;

		/*
		 * Call the callback if registered and pass the transfer length.
		 */
		if (prv_cfg->client_callback) {
			/* Single block or contiguous multiblock. */
			prv_cfg->client_callback(prv_cfg->callback_context,
						 transfer_length, 0);
		}
	} else if (int_reg->status_int_low & QM_DMA_INT_STATUS_BLOCK) {
		/* Block interrupts are only unmasked in multiblock mode. */
		QM_ASSERT(int_reg->status_block_low & BIT(channel_id));

		/* Block completed, clear interrupt. */
		int_reg->clear_block_low = BIT(channel_id);

		prv_cfg->num_blocks_int_pending--;

		if (NULL != prv_cfg->lli_tail &&
		    0 == prv_cfg->num_blocks_int_pending) {
			/*
			 * Linked list mode, invoke callback if this is last
			 * block of buffer.
			 */
			if (prv_cfg->client_callback) {
				prv_cfg->client_callback(
				    prv_cfg->callback_context, transfer_length,
				    0);
			}

			/* Buffer done, set for next buffer. */
			prv_cfg->num_blocks_int_pending =
			    prv_cfg->num_blocks_per_buffer;

		} else if (NULL == prv_cfg->lli_tail) {
			QM_ASSERT(prv_cfg->num_blocks_int_pending <
				  prv_cfg->num_blocks_per_buffer);
			if (1 == prv_cfg->num_blocks_int_pending) {
				/*
				 * Contiguous mode. We have just processed the
				 * next to last block, clear CFG.RELOAD so
				 * that the next block is the last one to be
				 * transfered.
				 */
				chan_reg->cfg_low &=
				    ~QM_DMA_CFG_L_RELOAD_SRC_MASK;
				chan_reg->cfg_low &=
				    ~QM_DMA_CFG_L_RELOAD_DST_MASK;
			}
		}
	}
}