static int i2c_qmsi_init(struct device *dev) { struct i2c_qmsi_driver_data *driver_data = GET_DRIVER_DATA(dev); const struct i2c_qmsi_config_info *config = dev->config->config_info; qm_i2c_t instance = GET_CONTROLLER_INSTANCE(dev); int err; k_sem_init(&driver_data->device_sync_sem, 0, UINT_MAX); k_sem_init(&driver_data->sem, 1, UINT_MAX); switch (instance) { case QM_I2C_0: /* Register interrupt handler, unmask IRQ and route it * to Lakemont core. */ IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_I2C_0_INT), CONFIG_I2C_0_IRQ_PRI, qm_i2c_0_irq_isr, NULL, (IOAPIC_LEVEL | IOAPIC_HIGH)); irq_enable(IRQ_GET_NUMBER(QM_IRQ_I2C_0_INT)); QM_IR_UNMASK_INTERRUPTS( QM_INTERRUPT_ROUTER->i2c_master_0_int_mask); break; #ifdef CONFIG_I2C_1 case QM_I2C_1: IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_I2C_1_INT), CONFIG_I2C_1_IRQ_PRI, qm_i2c_1_irq_isr, NULL, (IOAPIC_LEVEL | IOAPIC_HIGH)); irq_enable(IRQ_GET_NUMBER(QM_IRQ_I2C_1_INT)); QM_IR_UNMASK_INTERRUPTS( QM_INTERRUPT_ROUTER->i2c_master_1_int_mask); break; #endif /* CONFIG_I2C_1 */ default: return -EIO; } clk_periph_enable(config->clock_gate); err = i2c_qmsi_configure(dev, config->default_cfg); if (err < 0) { return err; } dev->driver_api = &api; i2c_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); return 0; }
static int aio_qmsi_cmp_init(struct device *dev) { u8_t i; struct aio_qmsi_cmp_dev_data_t *dev_data = (struct aio_qmsi_cmp_dev_data_t *)dev->driver_data; aio_cmp_config(dev); /* Disable all comparator interrupts */ CMP_INTR_ROUTER |= INT_COMPARATORS_MASK; /* Clear status and dissble all comparators */ QM_SCSS_CMP->cmp_stat_clr |= INT_COMPARATORS_MASK; QM_SCSS_CMP->cmp_pwr &= ~INT_COMPARATORS_MASK; QM_SCSS_CMP->cmp_en &= ~INT_COMPARATORS_MASK; /* Don't use the QMSI callback */ config.callback = NULL; /* Get Initial configuration from HW */ config.reference = QM_SCSS_CMP->cmp_ref_sel; config.polarity = QM_SCSS_CMP->cmp_ref_pol; config.power = QM_SCSS_CMP->cmp_pwr; config.cmp_en = QM_SCSS_CMP->cmp_en; /* Clear callback pointers */ for (i = 0; i < dev_data->num_cmp; i++) { dev_data->cb[i].cb = NULL; dev_data->cb[i].param = NULL; } irq_enable(IRQ_GET_NUMBER(QM_IRQ_COMPARATOR_0_INT)); return 0; }
static int aio_cmp_config(struct device *dev) { ARG_UNUSED(dev); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_COMPARATOR_0_INT), CONFIG_AIO_COMPARATOR_0_IRQ_PRI, aio_qmsi_cmp_isr, DEVICE_GET(aio_qmsi_cmp), 0); return 0; }
static int spi_qmsi_init(struct device *dev) { const struct spi_qmsi_config *spi_config = dev->config->config_info; struct spi_qmsi_runtime *context = dev->driver_data; switch (spi_config->spi) { case QM_SPI_MST_0: IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_SPI_MASTER_0_INT), CONFIG_SPI_0_IRQ_PRI, qm_spi_master_0_isr, 0, IOAPIC_LEVEL | IOAPIC_HIGH); irq_enable(IRQ_GET_NUMBER(QM_IRQ_SPI_MASTER_0_INT)); clk_periph_enable(CLK_PERIPH_CLK | CLK_PERIPH_SPI_M0_REGISTER); QM_IR_UNMASK_INTERRUPTS( QM_INTERRUPT_ROUTER->spi_master_0_int_mask); break; #ifdef CONFIG_SPI_1 case QM_SPI_MST_1: IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_SPI_MASTER_1_INT), CONFIG_SPI_1_IRQ_PRI, qm_spi_master_1_isr, 0, IOAPIC_LEVEL | IOAPIC_HIGH); irq_enable(IRQ_GET_NUMBER(QM_IRQ_SPI_MASTER_1_INT)); clk_periph_enable(CLK_PERIPH_CLK | CLK_PERIPH_SPI_M1_REGISTER); QM_IR_UNMASK_INTERRUPTS( QM_INTERRUPT_ROUTER->spi_master_1_int_mask); break; #endif /* CONFIG_SPI_1 */ default: return -EIO; } context->gpio_cs = gpio_cs_init(spi_config); k_sem_init(&context->device_sync_sem, 0, UINT_MAX); k_sem_init(&context->sem, 1, UINT_MAX); spi_master_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); dev->driver_api = &spi_qmsi_api; return 0; }
static int aon_timer_init(struct device *dev) { dev->driver_api = &aon_timer_qmsi_api; user_cb = NULL; IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_AONPT_0_INT), CONFIG_AON_TIMER_IRQ_PRI, qm_aonpt_0_isr, NULL, IOAPIC_EDGE | IOAPIC_HIGH); irq_enable(IRQ_GET_NUMBER(QM_IRQ_AONPT_0_INT)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->aonpt_0_int_mask); if (IS_ENABLED(CONFIG_AON_API_REENTRANCY)) { k_sem_init(RP_GET(dev), 1, UINT_MAX); } aonpt_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); return 0; }
static void dma_qmsi_config(struct device *dev) { ARG_UNUSED(dev); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_0), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_0, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_0)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_0_mask); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_1), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_1, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_1)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_1_mask); #if (CONFIG_SOC_QUARK_SE_C1000) IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_2), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_2, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_2)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_2_mask); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_3), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_3, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_3)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_3_mask); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_4), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_4, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_4)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_4_mask); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_5), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_5, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_5)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_5_mask); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_6), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_6, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_6)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_6_mask); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_7), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_7, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_7)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_7_mask); #endif /* CONFIG_SOC_QUARK_SE_C1000 */ IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_ERROR_INT), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_error_isr, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_ERROR_INT)); #if (QM_LAKEMONT) QM_INTERRUPT_ROUTER->dma_0_error_int_mask &= ~QM_IR_DMA_ERROR_HOST_MASK; #elif (QM_SENSOR) QM_INTERRUPT_ROUTER->dma_0_error_int_mask &= ~QM_IR_DMA_ERROR_SS_MASK; #endif }