static __inline__ void fifo_read(const qm_ss_spi_t spi, void *data, uint8_t size) { __builtin_arc_sr(QM_SS_SPI_DR_R_MASK, base[spi] + QM_SS_SPI_DR); if (size == 1) { *(uint8_t *)data = __builtin_arc_lr(base[spi] + QM_SS_SPI_DR); } else { *(uint16_t *)data = __builtin_arc_lr(base[spi] + QM_SS_SPI_DR); } }
int qm_ss_spi_transfer_terminate(const qm_ss_spi_t spi) { QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL); const qm_ss_spi_async_transfer_t *const transfer = spi_async_transfer[spi]; spi_disable(spi); if (transfer->callback) { uint32_t len = 0; uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL); uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >> QM_SS_SPI_CTRL_TMOD_OFFS); if (tmode == QM_SS_SPI_TMOD_TX || tmode == QM_SS_SPI_TMOD_TX_RX) { len = transfer->tx_len - tx_c[spi]; } else { len = transfer->rx_len - rx_c[spi]; } /* * NOTE: change this to return controller-specific code * 'user aborted'. */ transfer->callback(transfer->callback_data, -ECANCELED, QM_SS_SPI_IDLE, (uint16_t)len); }
int qm_ss_spi_slave_select(const qm_ss_spi_t spi, const qm_ss_spi_slave_select_t ss) { QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL); /* Check if the device reports as busy. */ /* NOTE: check if QM_ASSERT is the right thing to do here */ QM_ASSERT( !(__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY)); uint32_t spien = __builtin_arc_lr(base[spi] + QM_SS_SPI_SPIEN); spien &= ~QM_SS_SPI_SPIEN_SER_MASK; spien |= (ss << QM_SS_SPI_SPIEN_SER_OFFS); __builtin_arc_sr(spien, base[spi] + QM_SS_SPI_SPIEN); return 0; }
/* Public Functions */ int qm_ss_spi_set_config(const qm_ss_spi_t spi, const qm_ss_spi_config_t *const cfg) { QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL); QM_CHECK(cfg, -EINVAL); /* Configuration can be changed only when SPI is disabled */ /* NOTE: check if QM_ASSERT is the right thing to do here */ QM_ASSERT((__builtin_arc_lr(base[spi] + QM_SS_SPI_SPIEN) & QM_SS_SPI_SPIEN_EN) == 0); uint32_t ctrl = __builtin_arc_lr(QM_SS_SPI_0_BASE + QM_SS_SPI_CTRL); ctrl &= QM_SS_SPI_CTRL_CLK_ENA; ctrl |= cfg->frame_size << QM_SS_SPI_CTRL_DFS_OFFS; ctrl |= cfg->transfer_mode << QM_SS_SPI_CTRL_TMOD_OFFS; ctrl |= cfg->bus_mode << QM_SS_SPI_CTRL_BMOD_OFFS; __builtin_arc_sr(ctrl, base[spi] + QM_SS_SPI_CTRL); __builtin_arc_sr(cfg->clk_divider, base[spi] + QM_SS_SPI_TIMING); return 0; }
int qm_ss_spi_get_status(const qm_ss_spi_t spi, qm_ss_spi_status_t *const status) { QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL); QM_CHECK(status, -EINVAL); if (__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY) { *status = QM_SS_SPI_BUSY; } else { *status = QM_SS_SPI_IDLE; } return 0; }
static uint32_t switch_rtc_to_level(void) { uint32_t prev_trigger; /* The sensor cannot be woken up with an edge triggered * interrupt from the RTC and the AON Counter. * Switch to Level triggered interrupts and restore * the setting when waking up. */ __builtin_arc_sr(QM_IRQ_RTC_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT); prev_trigger = __builtin_arc_lr(QM_SS_AUX_IRQ_TRIGGER); __builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER); return prev_trigger; }
int qm_ss_spi_irq_transfer(const qm_ss_spi_t spi, const qm_ss_spi_async_transfer_t *const xfer) { QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL); QM_CHECK(xfer, -EINVAL); /* Load and save initial control register */ uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL); uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >> QM_SS_SPI_CTRL_TMOD_OFFS); uint8_t bytes = BYTES_PER_FRAME(ctrl); QM_CHECK(tmode == QM_SS_SPI_TMOD_TX_RX ? (xfer->tx_len == xfer->rx_len) : 1, -EINVAL); spi_async_transfer[spi] = xfer; tx_c[spi] = xfer->tx_len; rx_c[spi] = xfer->rx_len; /* Set NDF (Number of Data Frames) in RX or EEPROM Read mode. (-1) */ if (tmode == QM_SS_SPI_TMOD_RX || tmode == QM_SS_SPI_TMOD_EEPROM_READ) { ctrl &= ~QM_SS_SPI_CTRL_NDF_MASK; ctrl |= ((xfer->rx_len - 1) << QM_SS_SPI_CTRL_NDF_OFFS) & QM_SS_SPI_CTRL_NDF_MASK; __builtin_arc_sr(ctrl, base[spi] + QM_SS_SPI_CTRL); } uint32_t ftlr = (((FIFO_RX_W_MARK < xfer->rx_len ? FIFO_RX_W_MARK : xfer->rx_len) - 1) << QM_SS_SPI_FTLR_RFT_OFFS) & QM_SS_SPI_FTLR_RFT_MASK; __builtin_arc_sr(ftlr, base[spi] + QM_SS_SPI_FTLR); /* Unmask all interrupts */ __builtin_arc_sr(QM_SS_SPI_INTR_ALL, base[spi] + QM_SS_SPI_INTR_MASK); /* Enable SPI device */ QM_SS_REG_AUX_OR(base[spi] + QM_SS_SPI_SPIEN, QM_SS_SPI_SPIEN_EN); /* RX only transfers need a dummy frame byte to be sent. */ if (tmode == QM_SS_SPI_TMOD_RX) { fifo_write(spi, (uint8_t *)&dummy_frame, bytes); } return 0; }
int qm_ss_spi_irq_transfer(const qm_ss_spi_t spi, const qm_ss_spi_async_transfer_t *const xfer) { QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL); QM_CHECK(xfer, -EINVAL); /* Load and save initial control register */ uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL); uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >> QM_SS_SPI_CTRL_TMOD_OFFS); QM_CHECK(tmode == QM_SS_SPI_TMOD_TX_RX ? (xfer->tx_len == xfer->rx_len) : 1, -EINVAL); transfer[spi] = xfer; tx_c[spi] = xfer->tx_len; rx_c[spi] = xfer->rx_len; tx_p[spi] = xfer->tx; rx_p[spi] = xfer->rx; /* RX only transfers need a dummy frame byte to be sent. */ if (tmode == QM_SS_SPI_TMOD_RX) { tx_p[spi] = (uint8_t *)&dummy_frame; tx_c[spi] = 1; } uint32_t ftlr = (((FIFO_RX_W_MARK < xfer->rx_len ? FIFO_RX_W_MARK : xfer->rx_len) - 1) << QM_SS_SPI_FTLR_RFT_OFFS) & QM_SS_SPI_FTLR_RFT_MASK; __builtin_arc_sr(ftlr, base[spi] + QM_SS_SPI_FTLR); /* Unmask all interrupts */ __builtin_arc_sr(QM_SS_SPI_INTR_ALL, base[spi] + QM_SS_SPI_INTR_MASK); /* Enable SPI device */ QM_SS_REG_AUX_OR(base[spi] + QM_SS_SPI_SPIEN, QM_SS_SPI_SPIEN_EN); return 0; }
int qm_ss_spi_transfer(const qm_ss_spi_t spi, const qm_ss_spi_transfer_t *const xfer, qm_ss_spi_status_t *const status) { QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL); QM_CHECK(xfer, -EINVAL); uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL); uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >> QM_SS_SPI_CTRL_TMOD_OFFS); QM_CHECK(tmode == QM_SS_SPI_TMOD_TX_RX ? (xfer->tx_len == xfer->rx_len) : 1, -EINVAL); QM_CHECK(tmode == QM_SS_SPI_TMOD_TX ? (xfer->rx_len == 0) : 1, -EINVAL); QM_CHECK(tmode == QM_SS_SPI_TMOD_EEPROM_READ ? (xfer->rx_len > 0) : 1, -EINVAL); QM_CHECK(tmode == QM_SS_SPI_TMOD_RX ? (xfer->rx_len > 0) : 1, -EINVAL); QM_CHECK(tmode == QM_SS_SPI_TMOD_RX ? (xfer->tx_len == 0) : 1, -EINVAL); uint32_t tx_cnt = xfer->tx_len; uint32_t rx_cnt = xfer->rx_len; uint8_t *rx_buffer = xfer->rx; uint8_t *tx_buffer = xfer->tx; int ret = 0; /* Disable all SPI interrupts */ __builtin_arc_sr(0, base[spi] + QM_SS_SPI_INTR_MASK); /* Set NDF (Number of Data Frames) in RX or EEPROM Read mode. (-1) */ if (tmode == QM_SS_SPI_TMOD_RX || tmode == QM_SS_SPI_TMOD_EEPROM_READ) { ctrl &= ~QM_SS_SPI_CTRL_NDF_MASK; ctrl |= ((xfer->rx_len - 1) << QM_SS_SPI_CTRL_NDF_OFFS) & QM_SS_SPI_CTRL_NDF_MASK; __builtin_arc_sr(ctrl, base[spi] + QM_SS_SPI_CTRL); } /* RX only transfers need a dummy frame to be sent. */ if (tmode == QM_SS_SPI_TMOD_RX) { tx_buffer = (uint8_t *)&dummy_frame; tx_cnt = 1; } /* Calculate number of bytes per frame (1 or 2)*/ uint8_t bytes = BYTES_PER_FRAME(ctrl); /* Enable SPI device */ QM_SS_REG_AUX_OR(base[spi] + QM_SS_SPI_SPIEN, QM_SS_SPI_SPIEN_EN); while (tx_cnt || rx_cnt) { uint32_t sr = __builtin_arc_lr(base[spi] + QM_SS_SPI_SR); /* Break and report error if RX FIFO has overflown */ if (__builtin_arc_lr(base[spi] + QM_SS_SPI_INTR_STAT) & QM_SS_SPI_INTR_RXOI) { ret = -EIO; if (status) { *status |= QM_SS_SPI_RX_OVERFLOW; } break; } /* Copy data to buffer as long RX-FIFO is not empty */ if (sr & QM_SS_SPI_SR_RFNE && rx_cnt) { fifo_read(spi, rx_buffer, bytes); rx_buffer += bytes; rx_cnt--; } /* Copy data from buffer as long TX-FIFO is not full. */ if (sr & QM_SS_SPI_SR_TFNF && tx_cnt) { fifo_write(spi, tx_buffer, bytes); tx_buffer += bytes; tx_cnt--; } } /* Wait for last byte transfered */ while (__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY) ; spi_disable(spi); return ret; }