static void dma_reset(void) { rCR3 &= ~(USART_CR3_DMAT | USART_CR3_DMAR); (void)rSR; (void)rDR; (void)rDR; /* kill any pending DMA */ stm32_dmastop(tx_dma); stm32_dmastop(rx_dma); /* reset the RX side */ stm32_dmasetup( rx_dma, (uint32_t)&rDR, (uint32_t)&dma_packet, sizeof(dma_packet), DMA_CCR_MINC | DMA_CCR_PSIZE_8BITS | DMA_CCR_MSIZE_8BITS | DMA_CCR_PRIVERYHI); /* start receive DMA ready for the next packet */ stm32_dmastart(rx_dma, rx_dma_callback, NULL, false); rCR3 |= USART_CR3_DMAR; }
static void rx_dma_callback(DMA_HANDLE handle, uint8_t status, void *arg) { uint16_t sr = rSR; stm32_dmastop(rx_dma); stm32_dmastop(tx_dma); /* handle the received packet */ rx_handle_packet(); /* re-set DMA for reception first, so we are ready to receive before we start sending */ if (!(sr & SPI_SR_BSY)) { dma_reset(); } /* send the reply to the just-processed request */ dma_packet.crc = 0; dma_packet.crc = crc_packet(&dma_packet); stm32_dmasetup( tx_dma, (uint32_t)&rDR, (uint32_t)&dma_packet, PKT_SIZE(dma_packet), DMA_CCR_DIR | DMA_CCR_MINC | DMA_CCR_PSIZE_8BITS | DMA_CCR_MSIZE_8BITS | DMA_CCR_PRIMED); stm32_dmastart(tx_dma, NULL, NULL, false); perf_end(pc_txns); }
static void rx_dma_callback(DMA_HANDLE handle, uint8_t status, void *arg) { /* * We are here because DMA completed, or UART reception stopped and * we think we have a packet in the buffer. */ perf_begin(pc_txns); /* disable UART DMA */ rCR3 &= ~(USART_CR3_DMAT | USART_CR3_DMAR); /* handle the received packet */ rx_handle_packet(); /* re-set DMA for reception first, so we are ready to receive before we start sending */ dma_reset(); /* send the reply to the just-processed request */ dma_packet.crc = 0; dma_packet.crc = crc_packet(&dma_packet); stm32_dmasetup( tx_dma, (uint32_t)&rDR, (uint32_t)&dma_packet, PKT_SIZE(dma_packet), DMA_CCR_DIR | DMA_CCR_MINC | DMA_CCR_PSIZE_8BITS | DMA_CCR_MSIZE_8BITS); stm32_dmastart(tx_dma, NULL, NULL, false); rCR3 |= USART_CR3_DMAT; perf_end(pc_txns); }
static int dac_send(FAR struct dac_dev_s *dev, FAR struct dac_msg_s *msg) { FAR struct stm32_chan_s *chan = dev->ad_priv; /* Enable DAC Channel */ stm32_dac_modify_cr(chan, 0, DAC_CR_EN); #ifdef HAVE_DMA if (chan->hasdma) { /* Configure the DMA stream/channel. * * - Channel number * - Peripheral address * - Direction: Memory to peripheral * - Disable peripheral address increment * - Enable memory address increment * - Peripheral data size: half word * - Mode: circular??? * - Priority: ? * - FIFO mode: disable * - FIFO threshold: half full * - Memory Burst: single * - Peripheral Burst: single */ stm32_dmasetup(chan->dma, chan->dro, (uint32_t)chan->dmabuffer, chan->buffer_len, DAC_DMA_CONTROL_WORD); /* Enable DMA */ stm32_dmastart(chan->dma, dac_dmatxcallback, chan, false); /* Enable DMA for DAC Channel */ stm32_dac_modify_cr(chan, 0, DAC_CR_DMAEN); } else #endif { /* Non-DMA transfer */ putreg16(msg->am_data, chan->dro); dac_txdone(dev); } /* Reset counters (generate an update). Only when timer is not HRTIM */ #ifdef HAVE_TIMER if (chan->timer != TIM_INDEX_HRTIM) { tim_modifyreg(chan, STM32_BTIM_EGR_OFFSET, 0, ATIM_EGR_UG); } #endif return OK; }
static void i2c_rx_setup(void) { /* * Note that we configure DMA in circular mode; this means that a too-long * transfer will overwrite the buffer, but that avoids us having to deal with * bailing out of a transaction while the master is still babbling at us. */ rx_len = 0; stm32_dmasetup(rx_dma, (uintptr_t)&rDR, (uintptr_t)&rx_buf[0], sizeof(rx_buf), DMA_CCR_CIRC | DMA_CCR_MINC | DMA_CCR_PSIZE_32BITS | DMA_CCR_MSIZE_8BITS | DMA_CCR_PRIMED); stm32_dmastart(rx_dma, NULL, NULL, false); }
static void i2c_tx_setup(void) { /* * Note that we configure DMA in circular mode; this means that a too-long * transfer will copy the buffer more than once, but that avoids us having * to deal with bailing out of a transaction while the master is still * babbling at us. */ stm32_dmasetup(tx_dma, (uintptr_t)&rDR, (uintptr_t)&tx_buf[0], tx_len, DMA_CCR_DIR | DMA_CCR_CIRC | DMA_CCR_MINC | DMA_CCR_PSIZE_8BITS | DMA_CCR_MSIZE_8BITS | DMA_CCR_PRIMED); stm32_dmastart(tx_dma, NULL, NULL, false); }
static void dma_reset(void) { /* kill any pending DMA */ stm32_dmastop(tx_dma); stm32_dmastop(rx_dma); /* reset the RX side */ stm32_dmasetup( rx_dma, (uint32_t)&rDR, (uint32_t)&dma_packet, sizeof(dma_packet), DMA_CCR_MINC | DMA_CCR_PSIZE_8BITS | DMA_CCR_MSIZE_8BITS | DMA_CCR_PRIMED); /* start receive DMA ready for the next packet */ stm32_dmastart(rx_dma, rx_dma_callback, NULL, false); }
int PX4IO_serial_f4::_bus_exchange(IOPacket *_packet) { _current_packet = _packet; /* clear any lingering error status */ (void)rSR; (void)rDR; /* start RX DMA */ perf_begin(_pc_txns); perf_begin(_pc_dmasetup); /* DMA setup time ~3µs */ _rx_dma_status = _dma_status_waiting; /* * Note that we enable circular buffer mode as a workaround for * there being no API to disable the DMA FIFO. We need direct mode * because otherwise when the line idle interrupt fires there * will be packet bytes still in the DMA FIFO, and we will assume * that the idle was spurious. * * XXX this should be fixed with a NuttX change. */ stm32_dmasetup( _rx_dma, PX4IO_SERIAL_BASE + STM32_USART_DR_OFFSET, reinterpret_cast<uint32_t>(_current_packet), sizeof(*_current_packet), DMA_SCR_CIRC | /* XXX see note above */ DMA_SCR_DIR_P2M | DMA_SCR_MINC | DMA_SCR_PSIZE_8BITS | DMA_SCR_MSIZE_8BITS | DMA_SCR_PBURST_SINGLE | DMA_SCR_MBURST_SINGLE); stm32_dmastart(_rx_dma, _dma_callback, this, false); rCR3 |= USART_CR3_DMAR; /* start TX DMA - no callback if we also expect a reply */ /* DMA setup time ~3µs */ stm32_dmasetup( _tx_dma, PX4IO_SERIAL_BASE + STM32_USART_DR_OFFSET, reinterpret_cast<uint32_t>(_current_packet), PKT_SIZE(*_current_packet), DMA_SCR_DIR_M2P | DMA_SCR_MINC | DMA_SCR_PSIZE_8BITS | DMA_SCR_MSIZE_8BITS | DMA_SCR_PBURST_SINGLE | DMA_SCR_MBURST_SINGLE); stm32_dmastart(_tx_dma, nullptr, nullptr, false); //rCR1 &= ~USART_CR1_TE; //rCR1 |= USART_CR1_TE; rCR3 |= USART_CR3_DMAT; perf_end(_pc_dmasetup); /* compute the deadline for a 10ms timeout */ struct timespec abstime; clock_gettime(CLOCK_REALTIME, &abstime); abstime.tv_nsec += 10 * 1000 * 1000; if (abstime.tv_nsec >= 1000 * 1000 * 1000) { abstime.tv_sec++; abstime.tv_nsec -= 1000 * 1000 * 1000; } /* wait for the transaction to complete - 64 bytes @ 1.5Mbps ~426µs */ int ret; for (;;) { ret = sem_timedwait(&_completion_semaphore, &abstime); if (ret == OK) { /* check for DMA errors */ if (_rx_dma_status & DMA_STATUS_TEIF) { perf_count(_pc_dmaerrs); ret = -EIO; break; } /* check packet CRC - corrupt packet errors mean IO receive CRC error */ uint8_t crc = _current_packet->crc; _current_packet->crc = 0; if ((crc != crc_packet(_current_packet)) || (PKT_CODE(*_current_packet) == PKT_CODE_CORRUPT)) { perf_count(_pc_crcerrs); ret = -EIO; break; } /* successful txn (may still be reporting an error) */ break; } if (errno == ETIMEDOUT) { /* something has broken - clear out any partial DMA state and reconfigure */ _abort_dma(); perf_count(_pc_timeouts); perf_cancel(_pc_txns); /* don't count this as a transaction */ break; } /* we might? see this for EINTR */ syslog(LOG_ERR, "unexpected ret %d/%d\n", ret, errno); } /* reset DMA status */ _rx_dma_status = _dma_status_inactive; /* update counters */ perf_end(_pc_txns); return ret; }