qm_rc_t qm_flash_page_erase(const qm_flash_t flash, qm_flash_region_t region, uint32_t page_num) { QM_CHECK(flash < QM_FLASH_NUM, QM_RC_EINVAL); QM_CHECK(region <= QM_FLASH_REGION_NUM, QM_RC_EINVAL); QM_CHECK(page_num <= QM_FLASH_MAX_PAGE_NUM, QM_RC_EINVAL); switch (region) { case QM_FLASH_REGION_SYS: #if (QUARK_D2000) page_num += QM_FLASH_REGION_DATA_0_PAGES; case QM_FLASH_REGION_DATA: #endif QM_FLASH[flash].flash_wr_ctrl = (page_num << (QM_FLASH_PAGE_SIZE_BITS + WR_ADDR_OFFSET)) | ER_REQ; break; case QM_FLASH_REGION_OTP: QM_FLASH[flash].rom_wr_ctrl = (page_num << (QM_FLASH_PAGE_SIZE_BITS + WR_ADDR_OFFSET)) | ER_REQ; break; default: return QM_RC_EINVAL; } while (!(QM_FLASH[flash].flash_stts & ER_DONE)) ; return QM_RC_OK; }
int qm_spi_dma_transfer_terminate(qm_spi_t spi) { QM_CHECK(spi < QM_SPI_NUM, -EINVAL); QM_CHECK(dma_context_tx[spi].cb_pending ? (dma_context_tx[spi].dma_channel_id < QM_DMA_CHANNEL_NUM) : 1, -EINVAL); QM_CHECK(dma_context_rx[spi].cb_pending ? (dma_context_rx[spi].dma_channel_id < QM_DMA_CHANNEL_NUM) : 1, -EINVAL); int ret = 0; if (dma_context_tx[spi].cb_pending) { if (0 != qm_dma_transfer_terminate( dma_core[spi], dma_context_tx[spi].dma_channel_id)) { ret = -EIO; } } if (dma_context_rx[spi].cb_pending) { if (0 != qm_dma_transfer_terminate( dma_core[spi], dma_context_rx[spi].dma_channel_id)) { ret = -EIO; } } return ret; }
int vreg_aon_set_mode(const vreg_mode_t mode) { QM_CHECK(mode < VREG_MODE_NUM, -EINVAL); QM_CHECK(mode != VREG_MODE_SWITCHING, -EINVAL); return vreg_set_mode(AON_VR, mode); }
int qm_spi_set_config(const qm_spi_t spi, const qm_spi_config_t *cfg) { QM_CHECK(spi < QM_SPI_NUM, -EINVAL); QM_CHECK(cfg, -EINVAL); if (0 != QM_SPI[spi]->ssienr) { return -EBUSY; } qm_spi_reg_t *const controller = QM_SPI[spi]; /* Apply the selected cfg options */ controller->ctrlr0 = (cfg->frame_size << QM_SPI_CTRLR0_DFS_32_OFFSET) | (cfg->transfer_mode << QM_SPI_CTRLR0_TMOD_OFFSET) | (cfg->bus_mode << QM_SPI_CTRLR0_SCPOL_SCPH_OFFSET); controller->baudr = cfg->clk_divider; /* Keep the current data frame size in bytes, being: * - 1 byte for DFS set from 4 to 8 bits; * - 2 bytes for DFS set from 9 to 16 bits; * - 3 bytes for DFS set from 17 to 24 bits; * - 4 bytes for DFS set from 25 to 32 bits. */ dfs[spi] = (cfg->frame_size / 8) + 1; tmode[spi] = cfg->transfer_mode; return 0; }
int qm_aonc_get_value(const qm_aonc_t aonc, uint32_t *const val) { QM_CHECK(aonc < QM_AONC_NUM, -EINVAL); QM_CHECK(val != NULL, -EINVAL); *val = QM_AONC[aonc].aonc_cnt; return 0; }
int qm_aonpt_get_status(const qm_scss_aon_t aonc, bool *const status) { QM_CHECK(aonc < QM_SCSS_AON_NUM, -EINVAL); QM_CHECK(status != NULL, -EINVAL); *status = QM_SCSS_AON[aonc].aonpt_stat & BIT(0); return 0; }
int qm_aonpt_get_value(const qm_scss_aon_t aonc, uint32_t *const val) { QM_CHECK(aonc < QM_SCSS_AON_NUM, -EINVAL); QM_CHECK(val != NULL, -EINVAL); *val = QM_SCSS_AON[aonc].aonpt_cnt; return 0; }
qm_rc_t qm_flash_page_write(const qm_flash_t flash, qm_flash_region_t region, uint32_t page_num, uint32_t *data, uint32_t len) { QM_CHECK(flash < QM_FLASH_NUM, QM_RC_EINVAL); QM_CHECK(region <= QM_FLASH_REGION_NUM, QM_RC_EINVAL); QM_CHECK(page_num <= QM_FLASH_MAX_PAGE_NUM, QM_RC_EINVAL); QM_CHECK(data != NULL, QM_RC_EINVAL); QM_CHECK(len <= QM_FLASH_PAGE_SIZE, QM_RC_EINVAL); uint32_t i; volatile uint32_t *p_wr_data, *p_wr_ctrl; /* Rom and flash write registers are laid out the same, but different */ /* locations in memory, so point to those to have the same function to*/ /* update page section based on main or rom. */ switch (region) { case QM_FLASH_REGION_SYS: #if (QUARK_D2000) page_num += QM_FLASH_REGION_DATA_0_PAGES; case QM_FLASH_REGION_DATA: #endif p_wr_data = &QM_FLASH[flash].flash_wr_data; p_wr_ctrl = &QM_FLASH[flash].flash_wr_ctrl; break; case QM_FLASH_REGION_OTP: p_wr_data = &QM_FLASH[flash].rom_wr_data; p_wr_ctrl = &QM_FLASH[flash].rom_wr_ctrl; break; default: return QM_RC_ERROR; break; } /* Update address to include the write_address offset. */ page_num <<= (QM_FLASH_PAGE_SIZE_BITS + WR_ADDR_OFFSET); /* Erase the Flash page. */ *p_wr_ctrl = page_num | ER_REQ; /* Wait for the erase to complete. */ while (!(QM_FLASH[flash].flash_stts & ER_DONE)) ; /* Write bytes into Flash. */ for (i = 0; i < len; i++) { *p_wr_data = data[i]; *p_wr_ctrl = page_num; *p_wr_ctrl |= WR_REQ; page_num += QM_FLASH_ADDR_INC; /* Wait for write to finish. */ while (!(QM_FLASH[flash].flash_stts & WR_DONE)) ; } return QM_RC_OK; }
qm_rc_t qm_gpio_clear_pin(const qm_gpio_t gpio, const uint8_t pin) { QM_CHECK(gpio < QM_GPIO_NUM, QM_RC_EINVAL); QM_CHECK(pin <= QM_NUM_GPIO_PINS, QM_RC_EINVAL); QM_GPIO[gpio]->gpio_swporta_dr &= ~(1 << pin); return QM_RC_OK; }
qm_rc_t qm_wdt_get_config(const qm_wdt_t wdt, qm_wdt_config_t *const cfg) { QM_CHECK(wdt < QM_WDT_NUM, QM_RC_EINVAL); QM_CHECK(cfg != NULL, QM_RC_EINVAL); cfg->timeout = QM_WDT[wdt].wdt_torr & QM_WDT_TIMEOUT_MASK; cfg->mode = (QM_WDT[wdt].wdt_cr & QM_WDT_MODE) >> QM_WDT_MODE_OFFSET; cfg->callback = callback[wdt]; return QM_RC_OK; }
int qm_flash_save_context(const qm_flash_t flash, qm_flash_context_t *const ctx) { QM_CHECK(flash < QM_FLASH_NUM, -EINVAL); QM_CHECK(ctx != NULL, -EINVAL); qm_flash_reg_t *const controller = QM_FLASH[flash]; ctx->tmg_ctrl = controller->tmg_ctrl; ctx->ctrl = controller->ctrl; return 0; }
int qm_spi_save_context(const qm_spi_t spi, qm_spi_context_t *const ctx) { QM_CHECK(spi < QM_SPI_NUM, -EINVAL); QM_CHECK(ctx != NULL, -EINVAL); qm_spi_reg_t *const regs = QM_SPI[spi]; ctx->ctrlr0 = regs->ctrlr0; ctx->ser = regs->ser; ctx->baudr = regs->baudr; return 0; }
int qm_flash_word_write(const qm_flash_t flash, const qm_flash_region_t region, uint32_t f_addr, const uint32_t data) { QM_CHECK(flash < QM_FLASH_NUM, -EINVAL); QM_CHECK(region <= QM_FLASH_REGION_NUM, -EINVAL); QM_CHECK(f_addr < QM_FLASH_MAX_ADDR, -EINVAL); volatile uint32_t *p_wr_data, *p_wr_ctrl; qm_flash_reg_t *const controller = QM_FLASH[flash]; /* Rom and flash write registers are laid out the same, but different */ /* locations in memory, so point to those to have the same function to*/ /* update page section based on main or rom. */ switch (region) { case QM_FLASH_REGION_SYS: p_wr_data = &controller->flash_wr_data; p_wr_ctrl = &controller->flash_wr_ctrl; #if (QUARK_D2000) /* Main flash memory starts after flash data section. */ f_addr += QM_FLASH_REGION_DATA_0_SIZE; #endif break; #if (QUARK_D2000) case QM_FLASH_REGION_DATA: p_wr_data = &controller->flash_wr_data; p_wr_ctrl = &controller->flash_wr_ctrl; break; #endif case QM_FLASH_REGION_OTP: p_wr_data = &controller->rom_wr_data; p_wr_ctrl = &controller->rom_wr_ctrl; break; default: return -EINVAL; break; } /* Update address to include the write_address offset. */ f_addr <<= WR_ADDR_OFFSET; *p_wr_data = data; *p_wr_ctrl = f_addr |= WR_REQ; /* Wait for write to finish. */ while (!(controller->flash_stts & WR_DONE)) ; return 0; }
qm_rc_t qm_wdt_set_config(const qm_wdt_t wdt, const qm_wdt_config_t *const cfg) { QM_CHECK(wdt < QM_WDT_NUM, QM_RC_EINVAL); QM_CHECK(cfg != NULL, QM_RC_EINVAL); QM_WDT[wdt].wdt_cr &= ~QM_WDT_MODE; QM_WDT[wdt].wdt_cr |= cfg->mode << QM_WDT_MODE_OFFSET; QM_WDT[wdt].wdt_torr = cfg->timeout; /* kick the WDT to load the Timeout Period(TOP) value */ qm_wdt_reload(wdt); callback[wdt] = cfg->callback; return QM_RC_OK; }
int qm_ss_spi_get_status(const qm_ss_spi_t spi, qm_ss_spi_status_t *const status) { QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL); QM_CHECK(status, -EINVAL); if (__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY) { *status = QM_SS_SPI_BUSY; } else { *status = QM_SS_SPI_IDLE; } return 0; }
int qm_ss_spi_irq_transfer(const qm_ss_spi_t spi, const qm_ss_spi_async_transfer_t *const xfer) { QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL); QM_CHECK(xfer, -EINVAL); /* Load and save initial control register */ uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL); uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >> QM_SS_SPI_CTRL_TMOD_OFFS); uint8_t bytes = BYTES_PER_FRAME(ctrl); QM_CHECK(tmode == QM_SS_SPI_TMOD_TX_RX ? (xfer->tx_len == xfer->rx_len) : 1, -EINVAL); spi_async_transfer[spi] = xfer; tx_c[spi] = xfer->tx_len; rx_c[spi] = xfer->rx_len; /* Set NDF (Number of Data Frames) in RX or EEPROM Read mode. (-1) */ if (tmode == QM_SS_SPI_TMOD_RX || tmode == QM_SS_SPI_TMOD_EEPROM_READ) { ctrl &= ~QM_SS_SPI_CTRL_NDF_MASK; ctrl |= ((xfer->rx_len - 1) << QM_SS_SPI_CTRL_NDF_OFFS) & QM_SS_SPI_CTRL_NDF_MASK; __builtin_arc_sr(ctrl, base[spi] + QM_SS_SPI_CTRL); } uint32_t ftlr = (((FIFO_RX_W_MARK < xfer->rx_len ? FIFO_RX_W_MARK : xfer->rx_len) - 1) << QM_SS_SPI_FTLR_RFT_OFFS) & QM_SS_SPI_FTLR_RFT_MASK; __builtin_arc_sr(ftlr, base[spi] + QM_SS_SPI_FTLR); /* Unmask all interrupts */ __builtin_arc_sr(QM_SS_SPI_INTR_ALL, base[spi] + QM_SS_SPI_INTR_MASK); /* Enable SPI device */ QM_SS_REG_AUX_OR(base[spi] + QM_SS_SPI_SPIEN, QM_SS_SPI_SPIEN_EN); /* RX only transfers need a dummy frame byte to be sent. */ if (tmode == QM_SS_SPI_TMOD_RX) { fifo_write(spi, (uint8_t *)&dummy_frame, bytes); } return 0; }
int qm_ss_spi_transfer_terminate(const qm_ss_spi_t spi) { QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL); const qm_ss_spi_async_transfer_t *const transfer = spi_async_transfer[spi]; spi_disable(spi); if (transfer->callback) { uint32_t len = 0; uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL); uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >> QM_SS_SPI_CTRL_TMOD_OFFS); if (tmode == QM_SS_SPI_TMOD_TX || tmode == QM_SS_SPI_TMOD_TX_RX) { len = transfer->tx_len - tx_c[spi]; } else { len = transfer->rx_len - rx_c[spi]; } /* * NOTE: change this to return controller-specific code * 'user aborted'. */ transfer->callback(transfer->callback_data, -ECANCELED, QM_SS_SPI_IDLE, (uint16_t)len); }
qm_rc_t qm_aonpt_get_config(const qm_scss_aon_t aonc, qm_aonpt_config_t *const cfg) { QM_CHECK(aonc < QM_SCSS_AON_NUM, QM_RC_EINVAL); QM_CHECK(cfg != NULL, QM_RC_EINVAL); cfg->count = QM_SCSS_AON[aonc].aonpt_cfg; if (callback == NULL) { cfg->int_en = false; } else { cfg->int_en = true; } cfg->callback = callback; return QM_RC_OK; }
int rar_set_mode(const rar_state_t mode) { QM_CHECK(mode <= RAR_RETENTION, -EINVAL); volatile uint32_t i = 32; volatile uint32_t reg; switch (mode) { case RAR_RETENTION: QM_SCSS_PMU->aon_vr |= (QM_AON_VR_PASS_CODE | QM_AON_VR_ROK_BUF_VREG_MASK); QM_SCSS_PMU->aon_vr |= (QM_AON_VR_PASS_CODE | QM_AON_VR_VREG_SEL); break; case RAR_NORMAL: reg = QM_SCSS_PMU->aon_vr & ~QM_AON_VR_VREG_SEL; QM_SCSS_PMU->aon_vr = QM_AON_VR_PASS_CODE | reg; /* Wait for >= 2usec, at most 64 clock cycles. */ while (i--) { __asm__ __volatile__("nop"); } reg = QM_SCSS_PMU->aon_vr & ~QM_AON_VR_ROK_BUF_VREG_MASK; QM_SCSS_PMU->aon_vr = QM_AON_VR_PASS_CODE | reg; break; } return 0; }
qm_rc_t qm_flash_get_config(const qm_flash_t flash, qm_flash_config_t *cfg) { QM_CHECK(flash < QM_FLASH_NUM, QM_RC_EINVAL); QM_CHECK(cfg != NULL, QM_RC_EINVAL); cfg->wait_states = (QM_FLASH[flash].tmg_ctrl & QM_FLASH_WAIT_STATE_MASK) >> QM_FLASH_WAIT_STATE_OFFSET; cfg->us_count = QM_FLASH[flash].tmg_ctrl & QM_FLASH_MICRO_SEC_COUNT_MASK; cfg->write_disable = (QM_FLASH[flash].ctrl & QM_FLASH_WRITE_DISABLE_VAL) >> QM_FLASH_WRITE_DISABLE_OFFSET; return QM_RC_OK; }
int qm_spi_irq_transfer_terminate(const qm_spi_t spi) { QM_CHECK(spi < QM_SPI_NUM, -EINVAL); qm_spi_reg_t *const controller = QM_SPI[spi]; const qm_spi_async_transfer_t *const transfer = spi_async_transfer[spi]; /* Mask the interrupts */ controller->imr = QM_SPI_IMR_MASK_ALL; controller->ssienr = 0; /** Disable SPI device */ if (transfer->callback) { uint16_t len = 0; if (tmode[spi] == QM_SPI_TMOD_TX || tmode[spi] == QM_SPI_TMOD_TX_RX) { len = tx_counter[spi]; } else { len = rx_counter[spi]; } /* * NOTE: change this to return controller-specific code * 'user aborted'. */ transfer->callback(transfer->callback_data, -ECANCELED, QM_SPI_IDLE, len); } tx_counter[spi] = 0; rx_counter[spi] = 0; return 0; }
static int vreg_set_mode(const vreg_t id, const vreg_mode_t mode) { QM_CHECK(mode < VREG_MODE_NUM, -EINVAL); uint32_t vr; vr = *vreg[id]; switch (mode) { case VREG_MODE_SWITCHING: vr |= QM_SCSS_VR_EN; vr &= ~QM_SCSS_VR_VREG_SEL; break; case VREG_MODE_LINEAR: vr |= QM_SCSS_VR_EN; vr |= QM_SCSS_VR_VREG_SEL; break; case VREG_MODE_SHUTDOWN: vr &= ~QM_SCSS_VR_EN; break; default: break; } *vreg[id] = vr; while ((mode == VREG_MODE_SWITCHING) && (*vreg[id] & QM_SCSS_VR_ROK) == 0) { } return 0; }
qm_rc_t qm_gpio_write_port(const qm_gpio_t gpio, const uint32_t val) { QM_CHECK(gpio < QM_GPIO_NUM, QM_RC_EINVAL); QM_GPIO[gpio]->gpio_swporta_dr = val; return QM_RC_OK; }
qm_rc_t qm_aonpt_set_config(const qm_scss_aon_t aonc, const qm_aonpt_config_t *const cfg) { QM_CHECK(aonc < QM_SCSS_AON_NUM, QM_RC_EINVAL); QM_CHECK(cfg != NULL, QM_RC_EINVAL); QM_SCSS_AON[aonc].aonpt_ctrl |= BIT(0); /* Clear pending interrupts */ QM_SCSS_AON[aonc].aonpt_cfg = cfg->count; if (cfg->int_en) { callback = cfg->callback; } else { callback = NULL; } QM_SCSS_AON[aonc].aonpt_ctrl |= BIT(1); /* Reset the value to count */ return QM_RC_OK; }
qm_rc_t qm_aonc_disable(const qm_scss_aon_t aonc) { QM_CHECK(aonc < QM_SCSS_AON_NUM, QM_RC_EINVAL); QM_SCSS_AON[aonc].aonc_cfg = 0x0; return QM_RC_OK; }
qm_rc_t qm_aonpt_reset(const qm_scss_aon_t aonc) { QM_CHECK(aonc < QM_SCSS_AON_NUM, QM_RC_EINVAL); QM_SCSS_AON[aonc].aonpt_ctrl |= BIT(1); return QM_RC_OK; }
qm_rc_t qm_wdt_reload(const qm_wdt_t wdt) { QM_CHECK(wdt < QM_WDT_NUM, QM_RC_EINVAL); QM_WDT[wdt].wdt_crr = QM_WDT_RELOAD_VALUE; return QM_RC_OK; }
int qm_aonc_disable(const qm_aonc_t aonc) { QM_CHECK(aonc < QM_AONC_NUM, -EINVAL); QM_AONC[aonc].aonc_cfg = 0x0; return 0; }
int qm_aonpt_set_config(const qm_aonc_t aonc, const qm_aonpt_config_t *const cfg) { QM_CHECK(aonc < QM_AONC_NUM, -EINVAL); QM_CHECK(cfg != NULL, -EINVAL); QM_AONC[aonc].aonpt_cfg = cfg->count; if (cfg->int_en) { callback = cfg->callback; callback_data = cfg->callback_data; } else { callback = NULL; } pt_reset(aonc); return 0; }
int qm_wdt_reload(const qm_wdt_t wdt) { QM_CHECK(wdt < QM_WDT_NUM, -EINVAL); QM_WDT[wdt].wdt_crr = QM_WDT_RELOAD_VALUE; return 0; }