static void rx_handle_packet(void) { /* check packet CRC */ uint8_t crc = dma_packet.crc; dma_packet.crc = 0; if (crc != crc_packet(&dma_packet)) { perf_count(pc_crcerr); /* send a CRC error reply */ dma_packet.count_code = PKT_CODE_CORRUPT; dma_packet.page = 0xff; dma_packet.offset = 0xff; return; } if (PKT_CODE(dma_packet) == PKT_CODE_WRITE) { /* it's a blind write - pass it on */ if (registers_set(dma_packet.page, dma_packet.offset, &dma_packet.regs[0], PKT_COUNT(dma_packet))) { perf_count(pc_regerr); dma_packet.count_code = PKT_CODE_ERROR; } else { dma_packet.count_code = PKT_CODE_SUCCESS; } return; } if (PKT_CODE(dma_packet) == PKT_CODE_READ) { /* it's a read - get register pointer for reply */ unsigned count; uint16_t *registers; if (registers_get(dma_packet.page, dma_packet.offset, ®isters, &count) < 0) { perf_count(pc_regerr); dma_packet.count_code = PKT_CODE_ERROR; } else { /* constrain reply to requested size */ if (count > PKT_MAX_REGS) count = PKT_MAX_REGS; if (count > PKT_COUNT(dma_packet)) count = PKT_COUNT(dma_packet); /* copy reply registers into DMA buffer */ memcpy((void *)&dma_packet.regs[0], registers, count * 2); dma_packet.count_code = count | PKT_CODE_SUCCESS; } return; } /* send a bad-packet error reply */ dma_packet.count_code = PKT_CODE_CORRUPT; dma_packet.page = 0xff; dma_packet.offset = 0xfe; }
int PX4IO_serial_f4::_bus_exchange(IOPacket *_packet) { _current_packet = _packet; /* clear any lingering error status */ (void)rSR; (void)rDR; /* start RX DMA */ perf_begin(_pc_txns); perf_begin(_pc_dmasetup); /* DMA setup time ~3µs */ _rx_dma_status = _dma_status_waiting; /* * Note that we enable circular buffer mode as a workaround for * there being no API to disable the DMA FIFO. We need direct mode * because otherwise when the line idle interrupt fires there * will be packet bytes still in the DMA FIFO, and we will assume * that the idle was spurious. * * XXX this should be fixed with a NuttX change. */ stm32_dmasetup( _rx_dma, PX4IO_SERIAL_BASE + STM32_USART_DR_OFFSET, reinterpret_cast<uint32_t>(_current_packet), sizeof(*_current_packet), DMA_SCR_CIRC | /* XXX see note above */ DMA_SCR_DIR_P2M | DMA_SCR_MINC | DMA_SCR_PSIZE_8BITS | DMA_SCR_MSIZE_8BITS | DMA_SCR_PBURST_SINGLE | DMA_SCR_MBURST_SINGLE); stm32_dmastart(_rx_dma, _dma_callback, this, false); rCR3 |= USART_CR3_DMAR; /* start TX DMA - no callback if we also expect a reply */ /* DMA setup time ~3µs */ stm32_dmasetup( _tx_dma, PX4IO_SERIAL_BASE + STM32_USART_DR_OFFSET, reinterpret_cast<uint32_t>(_current_packet), PKT_SIZE(*_current_packet), DMA_SCR_DIR_M2P | DMA_SCR_MINC | DMA_SCR_PSIZE_8BITS | DMA_SCR_MSIZE_8BITS | DMA_SCR_PBURST_SINGLE | DMA_SCR_MBURST_SINGLE); stm32_dmastart(_tx_dma, nullptr, nullptr, false); //rCR1 &= ~USART_CR1_TE; //rCR1 |= USART_CR1_TE; rCR3 |= USART_CR3_DMAT; perf_end(_pc_dmasetup); /* compute the deadline for a 10ms timeout */ struct timespec abstime; clock_gettime(CLOCK_REALTIME, &abstime); abstime.tv_nsec += 10 * 1000 * 1000; if (abstime.tv_nsec >= 1000 * 1000 * 1000) { abstime.tv_sec++; abstime.tv_nsec -= 1000 * 1000 * 1000; } /* wait for the transaction to complete - 64 bytes @ 1.5Mbps ~426µs */ int ret; for (;;) { ret = sem_timedwait(&_completion_semaphore, &abstime); if (ret == OK) { /* check for DMA errors */ if (_rx_dma_status & DMA_STATUS_TEIF) { perf_count(_pc_dmaerrs); ret = -EIO; break; } /* check packet CRC - corrupt packet errors mean IO receive CRC error */ uint8_t crc = _current_packet->crc; _current_packet->crc = 0; if ((crc != crc_packet(_current_packet)) || (PKT_CODE(*_current_packet) == PKT_CODE_CORRUPT)) { perf_count(_pc_crcerrs); ret = -EIO; break; } /* successful txn (may still be reporting an error) */ break; } if (errno == ETIMEDOUT) { /* something has broken - clear out any partial DMA state and reconfigure */ _abort_dma(); perf_count(_pc_timeouts); perf_cancel(_pc_txns); /* don't count this as a transaction */ break; } /* we might? see this for EINTR */ syslog(LOG_ERR, "unexpected ret %d/%d\n", ret, errno); } /* reset DMA status */ _rx_dma_status = _dma_status_inactive; /* update counters */ perf_end(_pc_txns); return ret; }