static void stm32_transmit_chars_pio(struct uart_port *port) { struct stm32_port *stm32_port = to_stm32_port(port); struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; struct circ_buf *xmit = &port->state->xmit; unsigned int isr; int ret; if (stm32_port->tx_dma_busy) { stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT); stm32_port->tx_dma_busy = false; } ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr, (isr & USART_SR_TXE), 10, 100000); if (ret) dev_err(port->dev, "tx empty not set\n"); stm32_set_bits(port, ofs->cr1, USART_CR1_TXEIE); writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; }
static void stm32_tx_dma_complete(void *arg) { struct uart_port *port = arg; struct stm32_port *stm32port = to_stm32_port(port); struct stm32_usart_offsets *ofs = &stm32port->info->ofs; unsigned int isr; int ret; ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr, (isr & USART_SR_TC), 10, 100000); if (ret) dev_err(port->dev, "terminal count not set\n"); if (ofs->icr == UNDEF_REG) stm32_clr_bits(port, ofs->isr, USART_SR_TC); else stm32_set_bits(port, ofs->icr, USART_CR_TC); stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT); stm32port->tx_dma_busy = false; /* Let's see if we have pending data to send */ stm32_transmit_chars(port); }
static int stm32_qspi_tx_poll(struct stm32_qspi *qspi, const struct spi_mem_op *op) { void (*tx_fifo)(u8 *val, void __iomem *addr); u32 len = op->data.nbytes, sr; u8 *buf; int ret; if (op->data.dir == SPI_MEM_DATA_IN) { tx_fifo = stm32_qspi_read_fifo; buf = op->data.buf.in; } else { tx_fifo = stm32_qspi_write_fifo; buf = (u8 *)op->data.buf.out; } while (len--) { ret = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR, sr, (sr & SR_FTF), 1, STM32_FIFO_TIMEOUT_US); if (ret) { dev_err(qspi->dev, "fifo timeout (len:%d stat:%#x)\n", len, sr); return ret; } tx_fifo(buf++, qspi->io_base + QSPI_DR); } return 0; }
static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi) { u32 sr; return readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR, sr, !(sr & SR_BUSY), 1, STM32_BUSY_TIMEOUT_US); }
static int stm32_rtc_wait_sync(struct stm32_rtc *rtc) { const struct stm32_rtc_registers *regs = &rtc->data->regs; unsigned int isr = readl_relaxed(rtc->base + regs->isr); isr &= ~STM32_RTC_ISR_RSF; writel_relaxed(isr, rtc->base + regs->isr); /* * Wait for RSF to be set to ensure the calendar registers are * synchronised, it takes around 2 rtc_ck clock cycles */ return readl_relaxed_poll_timeout_atomic(rtc->base + regs->isr, isr, (isr & STM32_RTC_ISR_RSF), 10, 100000); }
static int stm32_rtc_enter_init_mode(struct stm32_rtc *rtc) { const struct stm32_rtc_registers *regs = &rtc->data->regs; unsigned int isr = readl_relaxed(rtc->base + regs->isr); if (!(isr & STM32_RTC_ISR_INITF)) { isr |= STM32_RTC_ISR_INIT; writel_relaxed(isr, rtc->base + regs->isr); /* * It takes around 2 rtc_ck clock cycles to enter in * initialization phase mode (and have INITF flag set). As * slowest rtc_ck frequency may be 32kHz and highest should be * 1MHz, we poll every 10 us with a timeout of 100ms. */ return readl_relaxed_poll_timeout_atomic( rtc->base + regs->isr, isr, (isr & STM32_RTC_ISR_INITF), 10, 100000); } return 0; }
static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op) { struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master); struct stm32_qspi_flash *flash = &qspi->flash[mem->spi->chip_select]; u32 ccr, cr, addr_max; int timeout, err = 0; dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n", op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, op->dummy.buswidth, op->data.buswidth, op->addr.val, op->data.nbytes); err = stm32_qspi_wait_nobusy(qspi); if (err) goto abort; addr_max = op->addr.val + op->data.nbytes + 1; if (op->data.dir == SPI_MEM_DATA_IN) { if (addr_max < qspi->mm_size && op->addr.buswidth) qspi->fmode = CCR_FMODE_MM; else qspi->fmode = CCR_FMODE_INDR; } else { qspi->fmode = CCR_FMODE_INDW; } cr = readl_relaxed(qspi->io_base + QSPI_CR); cr &= ~CR_PRESC_MASK & ~CR_FSEL; cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc); cr |= FIELD_PREP(CR_FSEL, flash->cs); writel_relaxed(cr, qspi->io_base + QSPI_CR); if (op->data.nbytes) writel_relaxed(op->data.nbytes - 1, qspi->io_base + QSPI_DLR); else qspi->fmode = CCR_FMODE_INDW; ccr = qspi->fmode; ccr |= FIELD_PREP(CCR_INST_MASK, op->cmd.opcode); ccr |= FIELD_PREP(CCR_IMODE_MASK, stm32_qspi_get_mode(qspi, op->cmd.buswidth)); if (op->addr.nbytes) { ccr |= FIELD_PREP(CCR_ADMODE_MASK, stm32_qspi_get_mode(qspi, op->addr.buswidth)); ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1); } if (op->dummy.buswidth && op->dummy.nbytes) ccr |= FIELD_PREP(CCR_DCYC_MASK, op->dummy.nbytes * 8 / op->dummy.buswidth); if (op->data.nbytes) { ccr |= FIELD_PREP(CCR_DMODE_MASK, stm32_qspi_get_mode(qspi, op->data.buswidth)); } writel_relaxed(ccr, qspi->io_base + QSPI_CCR); if (op->addr.nbytes && qspi->fmode != CCR_FMODE_MM) writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR); err = stm32_qspi_tx(qspi, op); /* * Abort in: * -error case * -read memory map: prefetching must be stopped if we read the last * byte of device (device size - fifo size). like device size is not * knows, the prefetching is always stop. */ if (err || qspi->fmode == CCR_FMODE_MM) goto abort; /* wait end of tx in indirect mode */ err = stm32_qspi_wait_cmd(qspi, op); if (err) goto abort; return 0; abort: cr = readl_relaxed(qspi->io_base + QSPI_CR) | CR_ABORT; writel_relaxed(cr, qspi->io_base + QSPI_CR); /* wait clear of abort bit by hw */ timeout = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_CR, cr, !(cr & CR_ABORT), 1, STM32_ABT_TIMEOUT_US); writel_relaxed(FCR_CTCF, qspi->io_base + QSPI_FCR); if (err || timeout) dev_err(qspi->dev, "%s err:%d abort timeout:%d\n", __func__, err, timeout); return err; }