Exemplo n.º 1
0
/**
 * @brief Find the currently executing interrupt vector, if any
 *
 * This routine finds the vector of the interrupt that is being processed.
 * The ISR (In-Service Register) register contain the vectors of the interrupts
 * in service. And the higher vector is the indentification of the interrupt
 * being currently processed.
 *
 * MVIC ISR registers' offsets:
 * --------------------
 * | Offset | bits    |
 * --------------------
 * | 0110H  |  32:63  |
 * --------------------
 *
 * @return The vector of the interrupt that is currently being processed.
 */
int _loapic_isr_vector_get(void)
{
	/* pointer to ISR vector table */
	volatile int *pReg;

	pReg = (volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_MVIC_ISR);

	return 32 + (find_msb_set(*pReg) - 1);
}
Exemplo n.º 2
0
Arquivo: mvic.c Projeto: 01org/zephyr
/**
 * @brief Find the currently executing interrupt vector, if any
 *
 * This routine finds the vector of the interrupt that is being processed.
 * The ISR (In-Service Register) register contain the vectors of the interrupts
 * in service. And the higher vector is the identification of the interrupt
 * being currently processed.
 *
 * MVIC ISR registers' offsets:
 * --------------------
 * | Offset | bits    |
 * --------------------
 * | 0110H  |  32:63  |
 * --------------------
 *
 * @return The vector of the interrupt that is currently being processed, or
 * -1 if this can't be determined
 */
int __irq_controller_isr_vector_get(void)
{
	/* In-service register value */
	int isr;

	isr = sys_read32(MVIC_ISR);
	if (unlikely(!isr)) {
		return -1;
	}
	return 32 + (find_msb_set(isr) - 1);
}
Exemplo n.º 3
0
/**
 * @brief Find the currently executing interrupt vector, if any
 *
 * This routine finds the vector of the interrupt that is being processed.
 * The ISR (In-Service Register) register contain the vectors of the interrupts
 * in service. And the higher vector is the identification of the interrupt
 * being currently processed.
 *
 * This function must be called with interrupts locked in interrupt context.
 *
 * ISR registers' offsets:
 * --------------------
 * | Offset | bits    |
 * --------------------
 * | 0100H  |   0:31  |
 * | 0110H  |  32:63  |
 * | 0120H  |  64:95  |
 * | 0130H  |  96:127 |
 * | 0140H  | 128:159 |
 * | 0150H  | 160:191 |
 * | 0160H  | 192:223 |
 * | 0170H  | 224:255 |
 * --------------------
 *
 * @return The vector of the interrupt that is currently being processed, or -1
 * if no IRQ is being serviced.
 */
int __irq_controller_isr_vector_get(void)
{
	int pReg, block;

	/* Block 0 bits never lit up as these are all exception or reserved
	 * vectors
	 */
	for (block = 7; likely(block > 0); block--) {
		pReg = sys_read32(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_ISR +
				  (block * 0x10));
		if (pReg) {
			return (block * 32) + (find_msb_set(pReg) - 1);
		}

	}
	return -1;
}
Exemplo n.º 4
0
/* Interrupt handler, gets messages on all incoming enabled mailboxes */
void quark_se_ipm_isr(void *param)
{
    int channel;
    int sts, bit;
    struct device *d;
    struct quark_se_ipm_config_info *config;
    struct quark_se_ipm_driver_data *driver_data;
    volatile struct quark_se_ipm *ipm;
    unsigned int key;

    ARG_UNUSED(param);
    sts = quark_se_ipm_sts_get();

    __ASSERT(sts, "spurious IPM interrupt");
    bit = find_msb_set(sts) - 1;
    channel = bit / 2;
    d = device_by_channel[channel];

    __ASSERT(d, "got IRQ on channel with no IPM device");
    config = d->config->config_info;
    driver_data = d->driver_data;
    ipm = config->ipm;

    __ASSERT(driver_data->callback, "enabled IPM channel with no callback");
    driver_data->callback(driver_data->callback_ctx, ipm->ctrl.ctrl,
                          &ipm->data);

    key = irq_lock();

    ipm->sts.irq = 1; /* Clear the interrupt bit */
    ipm->sts.sts = 1; /* Clear channel status bit */

    /* Wait for the above register writes to clear the channel
     * to propagate to the global channel status register
     */
    while (quark_se_ipm_sts_get() & (0x3 << (channel * 2))) {
        /* Busy-wait */
    }
    irq_unlock(key);
}
Exemplo n.º 5
0
static int dw_dma_config(struct device *dev, u32_t channel,
			 struct dma_config *cfg)
{
	struct dw_dma_dev_data *const dev_data = DEV_DATA(dev);
	struct dma_chan_data *chan_data;
	struct dma_block_config *cfg_blocks;
	u32_t cnt;
	u32_t m_size;
	u32_t tr_width;

	struct dw_lli2 *lli_desc;
	struct dw_lli2 *lli_desc_tail;

	if (channel >= DW_MAX_CHAN) {
		return -EINVAL;
	}

	__ASSERT_NO_MSG(cfg->source_data_size == cfg->dest_data_size);
	__ASSERT_NO_MSG(cfg->source_burst_length == cfg->dest_burst_length);

	if (cfg->source_data_size != BYTE && cfg->source_data_size != WORD &&
	    cfg->source_data_size != DWORD) {
		SYS_LOG_ERR("Invalid 'source_data_size' value");
		return -EINVAL;
	}

	chan_data = &dev_data->chan[channel];

	/* default channel config */
	chan_data->direction = cfg->channel_direction;
	chan_data->cfg_lo = DW_CFG_LOW_DEF;
	chan_data->cfg_hi = DW_CFG_LOW_DEF;

	/* data_size = (2 ^ tr_width) */
	tr_width = find_msb_set(cfg->source_data_size) - 1;
	SYS_LOG_DBG("tr_width=%d", tr_width);

	/* burst_size = (2 ^ msize) */
	m_size = find_msb_set(cfg->source_burst_length) - 1;
	SYS_LOG_DBG("m_size=%d", m_size);

	cfg_blocks = cfg->head_block;

	/* Allocate space for the linked list */
	chan_data->lli = (struct dw_lli2 *)k_malloc(sizeof(struct dw_lli2)
							* (cfg->block_count));
	if (chan_data->lli == NULL) {
		SYS_LOG_ERR("not enough memory\n");
		return -ENOMEM;
	}

	(void)memset(chan_data->lli, 0,
		     (sizeof(struct dw_lli2) * cfg->block_count));
	lli_desc = chan_data->lli;
	lli_desc_tail = lli_desc + cfg->block_count - 1;

	/* initialize descriptors */
	cnt = cfg->block_count;

	do {
		lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(tr_width);
		lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(tr_width);
		lli_desc->ctrl_lo |= DW_CTLL_SRC_MSIZE(m_size);
		lli_desc->ctrl_lo |= DW_CTLL_DST_MSIZE(m_size);

		/* enable interrupt */
		lli_desc->ctrl_lo |= DW_CTLL_INT_EN;

		switch (cfg->channel_direction) {

		case MEMORY_TO_MEMORY:
			lli_desc->ctrl_lo |= DW_CTLL_FC_M2M;
			lli_desc->ctrl_lo |= DW_CTLL_SRC_INC | DW_CTLL_DST_INC;
			break;

		case MEMORY_TO_PERIPHERAL:
			lli_desc->ctrl_lo |= DW_CTLL_FC_M2P;
			lli_desc->ctrl_lo |= DW_CTLL_SRC_INC | DW_CTLL_DST_FIX;

			/* Assign a hardware handshaking interface (0-15) to the
			 * destination of channel
			 */
			chan_data->cfg_hi |=
				DW_CFGH_DST_PER(cfg->dma_slot);
			break;

		case PERIPHERAL_TO_MEMORY:
			lli_desc->ctrl_lo |= DW_CTLL_FC_P2M;
			lli_desc->ctrl_lo |= DW_CTLL_SRC_FIX | DW_CTLL_DST_INC;

			/* Assign a hardware handshaking interface (0-15) to the
			 * source of channel
			 */
			chan_data->cfg_hi |=
				DW_CFGH_SRC_PER(cfg->dma_slot);
			break;

		default:
			SYS_LOG_ERR("channel_direction %d is not supported",
				    cfg->channel_direction);
			return -EINVAL;
		}

		lli_desc->sar = cfg_blocks->source_address;
		lli_desc->dar = cfg_blocks->dest_address;

		/* Block size */
		lli_desc->ctrl_hi = DW_CFG_CLASS(
				dev_data->channel_data->chan[channel].class) |
				cfg_blocks->block_size;

		/* set next descriptor in list */
		lli_desc->llp = (u32_t)(lli_desc + 1);
		lli_desc->ctrl_lo |= DW_CTLL_LLP_S_EN | DW_CTLL_LLP_D_EN;

		/* next descriptor */
		lli_desc++;
		cfg_blocks = cfg_blocks->next_block;
		cnt--;
	} while (cfg_blocks && cnt);

	/* check if application requests circular list */
	if (cfg_blocks) {
		/*
		 * if the last block was pointing to another block, then
		 * it means the application is requesting a circular list
		 */
		lli_desc_tail->llp = (u32_t)chan_data->lli;
	} else {
		lli_desc_tail->llp = 0x0;
		lli_desc_tail->ctrl_lo &=
			~(DW_CTLL_LLP_S_EN | DW_CTLL_LLP_D_EN);
	}

#ifdef CONFIG_DCACHE_WRITEBACK
	/* Flush the cache so that the descriptors are written to the memory.
	 * If this is not done, DMA engine will read the old stale data at
	 * that location and hence the DMA operation will not succeed.
	 */
	dcache_writeback_region(chan_data->lli,
			sizeof(struct dw_lli2) * cfg->block_count);
#endif

	/* Configure a callback appropriately depending on whether the
	 * interrupt is requested at the end of transaction completion or
	 * at the end of each block.
	 */
	if (cfg->complete_callback_en) {
		chan_data->dma_blkcallback = cfg->dma_callback;
	} else {
		chan_data->dma_tfrcallback = cfg->dma_callback;
	}

	return 0;
}
Exemplo n.º 6
0
static int sam_xdmac_config(struct device *dev, u32_t channel,
			    struct dma_config *cfg)
{
	struct sam_xdmac_dev_data *const dev_data = DEV_DATA(dev);
	struct sam_xdmac_channel_config channel_cfg;
	struct sam_xdmac_transfer_config transfer_cfg;
	u32_t burst_size;
	u32_t data_size;
	int ret;

	if (channel >= DMA_CHANNELS_NO) {
		return -EINVAL;
	}

	__ASSERT_NO_MSG(cfg->source_data_size == cfg->dest_data_size);
	__ASSERT_NO_MSG(cfg->source_burst_length == cfg->dest_burst_length);

	if (cfg->source_data_size != 1 && cfg->source_data_size != 2 &&
	    cfg->source_data_size != 4) {
		SYS_LOG_ERR("Invalid 'source_data_size' value");
		return -EINVAL;
	}

	if (cfg->block_count != 1) {
		SYS_LOG_ERR("Only single block transfer is currently supported."
			    " Please submit a patch.");
		return -EINVAL;
	}

	burst_size = find_msb_set(cfg->source_burst_length) - 1;
	SYS_LOG_DBG("burst_size=%d", burst_size);
	data_size = find_msb_set(cfg->source_data_size) - 1;
	SYS_LOG_DBG("data_size=%d", data_size);

	switch (cfg->channel_direction) {
	case MEMORY_TO_MEMORY:
		channel_cfg.cfg =
			  XDMAC_CC_TYPE_MEM_TRAN
			| XDMAC_CC_MBSIZE(burst_size == 0 ? 0 : burst_size - 1)
			| XDMAC_CC_SAM_INCREMENTED_AM
			| XDMAC_CC_DAM_INCREMENTED_AM;
		break;
	case MEMORY_TO_PERIPHERAL:
		channel_cfg.cfg =
			  XDMAC_CC_TYPE_PER_TRAN
			| XDMAC_CC_CSIZE(burst_size)
			| XDMAC_CC_DSYNC_MEM2PER
			| XDMAC_CC_SAM_INCREMENTED_AM
			| XDMAC_CC_DAM_FIXED_AM;
		break;
	case PERIPHERAL_TO_MEMORY:
		channel_cfg.cfg =
			  XDMAC_CC_TYPE_PER_TRAN
			| XDMAC_CC_CSIZE(burst_size)
			| XDMAC_CC_DSYNC_PER2MEM
			| XDMAC_CC_SAM_FIXED_AM
			| XDMAC_CC_DAM_INCREMENTED_AM;
		break;
	default:
		SYS_LOG_ERR("'channel_direction' value %d is not supported",
			    cfg->channel_direction);
		return -EINVAL;
	}

	channel_cfg.cfg |=
		  XDMAC_CC_DWIDTH(data_size)
		| XDMAC_CC_SIF_AHB_IF1
		| XDMAC_CC_DIF_AHB_IF1
		| XDMAC_CC_PERID(cfg->dma_slot);
	channel_cfg.ds_msp = 0;
	channel_cfg.sus = 0;
	channel_cfg.dus = 0;
	channel_cfg.cie =
		  (cfg->complete_callback_en ? XDMAC_CIE_BIE : XDMAC_CIE_LIE)
		| (cfg->error_callback_en ? XDMAC_INT_ERR : 0);

	ret = sam_xdmac_channel_configure(dev, channel, &channel_cfg);
	if (ret < 0) {
		return ret;
	}

	dev_data->dma_channels[channel].callback = cfg->dma_callback;

	(void)memset(&transfer_cfg, 0, sizeof(transfer_cfg));
	transfer_cfg.sa = cfg->head_block->source_address;
	transfer_cfg.da = cfg->head_block->dest_address;
	transfer_cfg.ublen = cfg->head_block->block_size >> data_size;

	ret = sam_xdmac_transfer_configure(dev, channel, &transfer_cfg);

	return ret;
}