static int i2c_qmsi_init(struct device *dev) { struct i2c_qmsi_driver_data *driver_data = GET_DRIVER_DATA(dev); const struct i2c_qmsi_config_info *config = dev->config->config_info; qm_i2c_t instance = GET_CONTROLLER_INSTANCE(dev); u32_t bitrate_cfg; int err; k_sem_init(&driver_data->device_sync_sem, 0, UINT_MAX); k_sem_init(&driver_data->sem, 1, UINT_MAX); switch (instance) { case QM_I2C_0: /* Register interrupt handler, unmask IRQ and route it * to Lakemont core. */ IRQ_CONNECT(CONFIG_I2C_0_IRQ, CONFIG_I2C_0_IRQ_PRI, qm_i2c_0_irq_isr, NULL, CONFIG_I2C_0_IRQ_FLAGS); irq_enable(CONFIG_I2C_0_IRQ); QM_IR_UNMASK_INTERRUPTS( QM_INTERRUPT_ROUTER->i2c_master_0_int_mask); break; #ifdef CONFIG_I2C_1 case QM_I2C_1: IRQ_CONNECT(CONFIG_I2C_1_IRQ, CONFIG_I2C_1_IRQ_PRI, qm_i2c_1_irq_isr, NULL, CONFIG_I2C_1_IRQ_FLAGS); irq_enable(CONFIG_I2C_1_IRQ); QM_IR_UNMASK_INTERRUPTS( QM_INTERRUPT_ROUTER->i2c_master_1_int_mask); break; #endif /* CONFIG_I2C_1 */ default: return -EIO; } clk_periph_enable(config->clock_gate); bitrate_cfg = _i2c_map_dt_bitrate(config->bitrate); err = i2c_qmsi_configure(dev, I2C_MODE_MASTER | bitrate_cfg); if (err < 0) { return err; } dev->driver_api = &api; i2c_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); return 0; }
static void dma_qmsi_config(struct device *dev) { ARG_UNUSED(dev); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_0), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_0, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_0)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_0_mask); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_1), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_1, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_1)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_1_mask); #if (CONFIG_SOC_QUARK_SE_C1000) IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_2), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_2, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_2)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_2_mask); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_3), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_3, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_3)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_3_mask); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_4), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_4, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_4)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_4_mask); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_5), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_5, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_5)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_5_mask); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_6), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_6, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_6)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_6_mask); IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_7), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_7, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_7)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_7_mask); #endif /* CONFIG_SOC_QUARK_SE_C1000 */ IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_ERROR_INT), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_error_isr, DEVICE_GET(dma_qmsi), 0); irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_ERROR_INT)); #if (QM_LAKEMONT) QM_INTERRUPT_ROUTER->dma_0_error_int_mask &= ~QM_IR_DMA_ERROR_HOST_MASK; #elif (QM_SENSOR) QM_INTERRUPT_ROUTER->dma_0_error_int_mask &= ~QM_IR_DMA_ERROR_SS_MASK; #endif }
static int spi_qmsi_init(struct device *dev) { const struct spi_qmsi_config *spi_config = dev->config->config_info; struct spi_qmsi_runtime *context = dev->driver_data; switch (spi_config->spi) { case QM_SPI_MST_0: IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_SPI_MASTER_0_INT), CONFIG_SPI_0_IRQ_PRI, qm_spi_master_0_isr, 0, IOAPIC_LEVEL | IOAPIC_HIGH); irq_enable(IRQ_GET_NUMBER(QM_IRQ_SPI_MASTER_0_INT)); clk_periph_enable(CLK_PERIPH_CLK | CLK_PERIPH_SPI_M0_REGISTER); QM_IR_UNMASK_INTERRUPTS( QM_INTERRUPT_ROUTER->spi_master_0_int_mask); break; #ifdef CONFIG_SPI_1 case QM_SPI_MST_1: IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_SPI_MASTER_1_INT), CONFIG_SPI_1_IRQ_PRI, qm_spi_master_1_isr, 0, IOAPIC_LEVEL | IOAPIC_HIGH); irq_enable(IRQ_GET_NUMBER(QM_IRQ_SPI_MASTER_1_INT)); clk_periph_enable(CLK_PERIPH_CLK | CLK_PERIPH_SPI_M1_REGISTER); QM_IR_UNMASK_INTERRUPTS( QM_INTERRUPT_ROUTER->spi_master_1_int_mask); break; #endif /* CONFIG_SPI_1 */ default: return -EIO; } context->gpio_cs = gpio_cs_init(spi_config); k_sem_init(&context->device_sync_sem, 0, UINT_MAX); k_sem_init(&context->sem, 1, UINT_MAX); spi_master_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); dev->driver_api = &spi_qmsi_api; return 0; }
static void ss_gpio_interrupt_example(void) { uint32_t i; QM_PUTS("Starting: SS GPIO interrupt"); /* Enable clock to interrupt controller. */ __builtin_arc_sr(BIT(31) + BIT(0), QM_SS_GPIO_0_BASE + QM_SS_GPIO_LS_SYNC); /* Set SS GPIO pin 2 as OUTPUT. */ __builtin_arc_sr(BIT(2), QM_SS_GPIO_0_BASE + QM_SS_GPIO_SWPORTA_DDR); /* Register the SS GPIO interrupt. */ QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->ss_gpio_0_int_mask); qm_ss_irq_request(QM_SS_IRQ_GPIO_0_INT, ss_gpio_interrupt_isr); /* Set the bit 3 to rising edge-sensitive. */ __builtin_arc_sr(BIT(3), QM_SS_GPIO_0_BASE + QM_SS_GPIO_INTTYPE_LEVEL); /* Unmask SS GPIO pin 3 interrupt only. */ __builtin_arc_sr(~BIT(3), QM_SS_GPIO_0_BASE + QM_SS_GPIO_INTMASK); /* Clear SS GPIO interrupt requests. */ __builtin_arc_sr(BIT(3), QM_SS_GPIO_0_BASE + QM_SS_GPIO_PORTA_EOI); /* Enable SS GPIO interrupt. */ __builtin_arc_sr(BIT(3), QM_SS_GPIO_0_BASE + QM_SS_GPIO_INTEN); for (i = 0; i < NUM_LOOPS; i++) { /* * Toggle the SS GPIO 2, will trigger the interrupt on SS * GPIO 3. */ clk_sys_udelay(DELAY); __builtin_arc_sr(BIT(2), QM_SS_GPIO_0_BASE + QM_SS_GPIO_SWPORTA_DR); QM_GPIO[0]->gpio_swporta_dr |= BIT(LED_BIT); clk_sys_udelay(DELAY); __builtin_arc_sr(0, QM_SS_GPIO_0_BASE + QM_SS_GPIO_SWPORTA_DR); QM_GPIO[0]->gpio_swporta_dr &= ~BIT(LED_BIT); } /* Unmask all SS GPIO interrupts. */ __builtin_arc_sr(0xff, QM_SS_GPIO_0_BASE + QM_SS_GPIO_INTMASK); if (counter == NUM_LOOPS) { QM_PUTS("Success"); } else { QM_PUTS("Error: Check are pins 14 and 16 on J14 connector " "short connected?"); } QM_PUTS("Finished: SS GPIO interrupt"); }
int main(void) { qm_ss_gpio_state_t state; qm_ss_gpio_port_config_t conf; QM_PUTS("Starting: SS GPIO"); pin_mux_setup(); /* Request IRQ and write SS GPIO port config. */ conf.direction = BIT(PIN_OUT); /* Set PIN_OUT to output. */ conf.int_en = BIT(PIN_INTR); /* Interrupt enabled. */ conf.int_type = BIT(PIN_INTR); /* Edge sensitive interrupt. */ conf.int_polarity = ~BIT(PIN_INTR); /* Falling edge. */ conf.int_debounce = BIT(PIN_INTR); /* Debounce enabled. */ conf.callback = gpio_example_callback; conf.callback_data = NULL; /* Enable clock. */ ss_clk_gpio_enable(QM_SS_GPIO_0); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->ss_gpio_0_int_mask); qm_ss_irq_request(QM_SS_IRQ_GPIO_0_INT, qm_ss_gpio_0_isr); qm_ss_gpio_set_config(QM_SS_GPIO_0, &conf); /* Clear PIN_OUT to trigger PIN_INTR interrupt. */ qm_ss_gpio_set_pin(QM_SS_GPIO_0, PIN_OUT); qm_ss_gpio_clear_pin(QM_SS_GPIO_0, PIN_OUT); /* Wait for callback to be invoked */ while (!callback_invoked) ; QM_PRINTF("Callback fired, status: 0x%x\n", callback_status); if (qm_ss_gpio_read_pin(QM_SS_GPIO_0, PIN_OUT, &state)) { QM_PUTS("Error: read pin failed"); return 1; } if (state != QM_SS_GPIO_LOW) { QM_PUTS("Error: SS GPIO pin out comparison failed"); return 1; } QM_PUTS("Finished: SS GPIO"); return 0; }
static int rtc_qmsi_init(struct device *dev) { if (IS_ENABLED(CONFIG_RTC_QMSI_API_REENTRANCY)) { k_sem_init(RP_GET(dev), 1, UINT_MAX); } IRQ_CONNECT(DT_RTC_0_IRQ, CONFIG_RTC_0_IRQ_PRI, qm_rtc_0_isr, NULL, DT_RTC_0_IRQ_FLAGS); /* Unmask RTC interrupt */ irq_enable(DT_RTC_0_IRQ); /* Route RTC interrupt to the current core */ QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->rtc_0_int_mask); rtc_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); return 0; }
int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode, qm_mpr_callback_t callback_fn, void *cb_data) { QM_CHECK(mode <= MPR_VIOL_MODE_PROBE, -EINVAL); /* interrupt mode */ if (MPR_VIOL_MODE_INTERRUPT == mode) { callback = callback_fn; callback_data = cb_data; /* unmask interrupt */ QM_IR_UNMASK_INTERRUPTS( QM_INTERRUPT_ROUTER->sram_mpr_0_int_mask); QM_IR_MASK_HALTS(QM_INTERRUPT_ROUTER->sram_mpr_0_int_mask); QM_SCSS_SS->ss_cfg &= ~QM_SS_STS_HALT_INTERRUPT_REDIRECTION; } /* probe or reset mode */ else { /* mask interrupt */ QM_IR_MASK_INTERRUPTS(QM_INTERRUPT_ROUTER->sram_mpr_0_int_mask); QM_IR_UNMASK_HALTS(QM_INTERRUPT_ROUTER->sram_mpr_0_int_mask); if (MPR_VIOL_MODE_PROBE == mode) { /* When an enabled host halt interrupt occurs, this bit * determines if the interrupt event triggers a warm * reset * or an entry into Probe Mode. * 0b : Warm Reset * 1b : Probe Mode Entry */ QM_SCSS_SS->ss_cfg |= QM_SS_STS_HALT_INTERRUPT_REDIRECTION; } else { QM_SCSS_SS->ss_cfg &= ~QM_SS_STS_HALT_INTERRUPT_REDIRECTION; } } return 0; }
void _sys_soc_power_state_post_ops(enum power_states state) { u32_t limit; switch (state) { case SYS_POWER_STATE_CPU_LPS_1: /* Expire the timer as it is disabled in SS2. */ limit = _arc_v2_aux_reg_read(_ARC_V2_TMR0_LIMIT); _arc_v2_aux_reg_write(_ARC_V2_TMR0_COUNT, limit - 1); case SYS_POWER_STATE_CPU_LPS: __builtin_arc_seti(0); break; case SYS_POWER_STATE_DEEP_SLEEP: qm_ss_power_soc_lpss_disable(); /* If flag is cleared it means the system entered in * sleep state while we were in LPS. In that case, we * must set ARC_READY flag so x86 core can continue * its execution. */ if ((QM_SCSS_GP->gp0 & GP0_BIT_SLEEP_READY) == 0) { _quark_se_ss_ready(); __builtin_arc_seti(0); } else { QM_SCSS_GP->gp0 &= ~GP0_BIT_SLEEP_READY; QM_SCSS_GP->gps0 &= ~QM_GPS0_BIT_SENSOR_WAKEUP; } break; case SYS_POWER_STATE_DEEP_SLEEP_1: case SYS_POWER_STATE_DEEP_SLEEP_2: /* Route RTC interrupt to the current core */ QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->rtc_0_int_mask); __builtin_arc_seti(0); break; break; default: break; } }
static int aon_timer_init(struct device *dev) { dev->driver_api = &aon_timer_qmsi_api; user_cb = NULL; IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_AONPT_0_INT), CONFIG_AON_TIMER_IRQ_PRI, qm_aonpt_0_isr, NULL, IOAPIC_EDGE | IOAPIC_HIGH); irq_enable(IRQ_GET_NUMBER(QM_IRQ_AONPT_0_INT)); QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->aonpt_0_int_mask); if (IS_ENABLED(CONFIG_AON_API_REENTRANCY)) { k_sem_init(RP_GET(dev), 1, UINT_MAX); } aonpt_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); return 0; }