void dma_stream_reset(u32 dma, u8 stream) { /* Disable stream (must be done before register is otherwise changed). */ DMA_SCR(dma, stream) &= ~DMA_SxCR_EN; /* Reset all config bits. */ DMA_SCR(dma, stream) = 0; /* Reset data transfer number. */ DMA_SNDTR(dma, stream) = 0; /* Reset peripheral and memory addresses. */ DMA_SPAR(dma, stream) = 0; DMA_SM0AR(dma, stream) = 0; DMA_SM1AR(dma, stream) = 0; /* This is the default setting */ DMA_SFCR(dma, stream) = 0x21; /* Reset all stream interrupt flags using the interrupt flag clear register. */ u32 mask = DMA_ISR_MASK(stream); if (stream < 4) { DMA_LIFCR(dma) |= mask; } else { DMA_HIFCR(dma) |= mask; } }
/** Helper function that schedules a new transfer with the DMA controller if * needed. * \param s The USART DMA state structure. * */ static void dma_schedule(usart_tx_dma_state* s) { /* TODO: We shouldn't have to check for this now that we are called * atomically but leaving it in for now just in case. */ if (DMA_SCR(s->dma, s->stream) & DMA_SxCR_EN) screaming_death("DMA TX scheduled while DMA channel running"); DMA_SM0AR(s->dma, s->stream) = &(s->buff[s->rd]); /* Save the transfer length so we can increment the read index after the * transfer is finished. */ if (s->rd < s->wr) /* DMA up until write pointer. */ s->xfer_len = s->wr - s->rd; else /* DMA up until the end of the buffer. */ s->xfer_len = USART_TX_BUFFER_LEN - s->rd; /* Set the number of datas in the DMA controller. */ DMA_SNDTR(s->dma, s->stream) = s->xfer_len; /* Clear USART_TC flag */ USART_SR(s->usart) &= ~USART_SR_TC; /* Enable DMA stream to start transfer. */ DMA_SCR(s->dma, s->stream) |= DMA_SxCR_EN; }
void spi1_xfer_dma(u16 n_bytes, u8 data_in[], const u8 data_out[]) { /* We use a static buffer here for DMA transfers as data_in/data_out * often are on the stack in CCM which is not accessible by DMA. */ static volatile u8 spi_dma_buf[128]; memcpy((u8*)spi_dma_buf, data_out, n_bytes); /* Setup transmit stream */ DMA_SM0AR(DMA2, 3) = spi_dma_buf; DMA_SNDTR(DMA2, 3) = n_bytes; /* Setup receive stream */ DMA_SM0AR(DMA2, 0) = spi_dma_buf; DMA_SNDTR(DMA2, 0) = n_bytes; /* We need a memory buffer here to avoid a transfer error */ asm volatile ("dmb"); /* Enable the DMA RX channel. */ DMA_SCR(DMA2, 0) |= DMA_SxCR_EN; /* Enable the transmit channel to begin the transaction */ DMA_SCR(DMA2, 3) |= DMA_SxCR_EN; /* Yeild the CPU while we wait for the transaction to complete */ chBSemWait(&spi_dma_sem); if (data_in != NULL) memcpy(data_in, (u8*)spi_dma_buf, n_bytes); }
void dma_set_transfer_mode(u32 dma, u8 stream, u32 direction) { u32 reg32 = (DMA_SCR(dma, stream) & ~DMA_SxCR_DIR_MASK); /* Disable circular and double buffer modes if memory to memory transfers are in effect (Direct Mode is automatically disabled by hardware) */ if (direction == DMA_SxCR_DIR_MEM_TO_MEM) { reg32 &= ~(DMA_SxCR_CIRC | DMA_SxCR_DBM); } DMA_SCR(dma, stream) = (reg32 | direction); }
static void spi_dma_setup_rx(uint32_t spi, uint32_t dma, u8 stream, u8 channel) { spi_enable_rx_dma(spi); /* Make sure stream is disabled to start. */ DMA_SCR(dma, stream) &= ~DMA_SxCR_EN; /* RM0090 - 9.3.17 : Supposed to wait until enable bit reads '0' before we * write to registers. */ while (DMA_SCR(dma, stream) & DMA_SxCR_EN) ; /* RM0090 - 9.3.17 : Supposed to clear any interrupts in DMA status register * before we reconfigure registers. */ dma_clear_interrupt_flags(dma, stream, DMA_ISR_FLAGS); /* Configure the DMA controller. */ DMA_SCR(dma, stream) = 0; DMA_SCR(dma, stream) = /* Error interrupts. */ DMA_SxCR_DMEIE | DMA_SxCR_TEIE | DMA_SxCR_DIR_PERIPHERAL_TO_MEM | /* Enable DMA transfer complete interrupt */ DMA_SxCR_TCIE | /* Increment the memory address after each transfer. */ DMA_SxCR_MINC | /* 8 bit transfers from SPI peripheral. */ DMA_SxCR_PSIZE_8BIT | /* and to memory. */ DMA_SxCR_MSIZE_8BIT | /* Low priority. */ DMA_SxCR_PL_VERY_HIGH | /* The channel selects which request line will trigger a transfer. * (see CD00225773.pdf Table 23). */ DMA_SxCR_CHSEL(channel); /* Transfer up to the length of the buffer. */ DMA_SNDTR(dma, stream) = 0; /* DMA from the SPI data register... */ DMA_SPAR(dma, stream) = &SPI_DR(spi); /* Enable DMA interrupts for this stream with the NVIC. */ if (dma == DMA1) nvicEnableVector(dma_irq_lookup[0][stream], CORTEX_PRIORITY_MASK(CORTEX_MAX_KERNEL_PRIORITY+2)); else if (dma == DMA2) nvicEnableVector(dma_irq_lookup[1][stream], CORTEX_PRIORITY_MASK(CORTEX_MAX_KERNEL_PRIORITY+2)); }
/** Disable USART TX DMA. * \param s The USART DMA state structure. */ void usart_tx_dma_disable(usart_tx_dma_state* s) { /* Disable DMA stream interrupts with the NVIC. */ if (s->dma == DMA1) nvicDisableVector(dma_irq_lookup[0][s->stream]); else if (s->dma == DMA2) nvicDisableVector(dma_irq_lookup[1][s->stream]); /* Disable DMA stream. */ DMA_SCR(s->dma, s->stream) &= ~DMA_SxCR_EN; while (DMA_SCR(s->dma, s->stream) & DMA_SxCR_EN) ; /* Disable RX DMA on the USART. */ usart_disable_tx_dma(s->usart); }
/** Disable USART RX DMA. * \param s The USART DMA state structure. */ void usart_rx_dma_disable(usart_rx_dma_state* s) { /* Disable DMA stream interrupts with the NVIC. */ if (s->dma == DMA1) nvicDisableVector(dma_irq_lookup[0][s->stream]); else if (s->dma == DMA2) nvicDisableVector(dma_irq_lookup[1][s->stream]); /* Disable DMA stream. */ DMA_SCR(s->dma, s->stream) &= ~DMA_SxCR_EN; while (DMA_SCR(s->dma, s->stream) & DMA_SxCR_EN) ; /* Disable RX DMA on the USART. */ usart_disable_rx_dma(s->usart); /* Clear the DMA transmit complete and half complete interrupt flags. */ dma_clear_interrupt_flags(s->dma, s->stream, DMA_HTIF | DMA_TCIF); }
static void spi_dma_setup_tx(uint32_t spi, uint32_t dma, u8 stream, u8 channel) { spi_enable_tx_dma(spi); /* Make sure stream is disabled to start. */ DMA_SCR(dma, stream) &= ~DMA_SxCR_EN; /* Configure the DMA controller. */ DMA_SCR(dma, stream) = 0; DMA_SCR(dma, stream) = /* Error interrupts. */ DMA_SxCR_DMEIE | DMA_SxCR_TEIE | DMA_SxCR_DIR_MEM_TO_PERIPHERAL | /* Increment the memory address after each transfer. */ DMA_SxCR_MINC | /* 8 bit transfers from SPI peripheral. */ DMA_SxCR_PSIZE_8BIT | /* and to memory. */ DMA_SxCR_MSIZE_8BIT | /* Low priority. */ DMA_SxCR_PL_VERY_HIGH | /* The channel selects which request line will trigger a transfer. * (see CD00225773.pdf Table 23). */ DMA_SxCR_CHSEL(channel); /* Transfer up to the length of the buffer. */ DMA_SNDTR(dma, stream) = 0; /* DMA from the SPI data register... */ DMA_SPAR(dma, stream) = &SPI_DR(spi); /* Enable DMA interrupts for this stream with the NVIC. */ if (dma == DMA1) nvicEnableVector(dma_irq_lookup[0][stream], CORTEX_PRIORITY_MASK(CORTEX_MAX_KERNEL_PRIORITY+2)); else if (dma == DMA2) nvicEnableVector(dma_irq_lookup[1][stream], CORTEX_PRIORITY_MASK(CORTEX_MAX_KERNEL_PRIORITY+2)); }
/******************************************************************************* * void bsp_uart_start_dma(struct CB_UART* pctl); * @brief : If DMA driven uart, start DMA sending if not already sending * @param : pctl: control block poiner * @return : *******************************************************************************/ void bsp_uart_start_dma(struct CB_UART* pctl) { if (pctl->flag != 2) return; // Return, not DMA driven /* Are we already running? */ // First look at the enable bit if ((DMA_SCR(pctl->idma, pctl->txdma_stream) & 0x1) != 0) return; // Already running // Check if the counter has gone to zero if ( DMA_SNDTR(pctl->idma,pctl->txdma_stream) != 0) return; /* Not running. If there are any to send, set up the DMA. */ common_dma(pctl); return; }
static void common_dma(struct CB_UART* pctl) { /* NOTE: This routine is entered from mainline only if the DMA is idle, and entered after a DMA interrupt, after it is disabled. Therefore, the following is based on a given stream DMA interrupt not occuring while the mainline is mucking around in this routine. */ int tmp; u8* ptmp; /* Are there bytes buffered? (Certainly yes if entered from mainline.) */ tmp = (pctl->txbuff_in - pctl->txbuff_out); if (tmp == 0) return; if (tmp < 0) // Wrap around check. { // Here, there is wrap-around, so send what remains in (non-circular) buffer. // (Upon the next DMA interrupt what remains after the wrap-around will be sent.) // Compute number of bytes remaining to the end of the buffer tmp = (pctl->txbuff_end - pctl->txbuff_out); // Remaining ct DMA_SM0AR(pctl->idma,pctl->txdma_stream) = pctl->txbuff_out; // Set TX mem address pctl->txbuff_dmanext = pctl->txbuff_base; // Save new start ptr } else { // Here, no wrap around, so all buffered bytes can be sent with one setting DMA_SM0AR(pctl->idma,pctl->txdma_stream) = pctl->txbuff_out; // Set TX mem address // redundant tmp = pctl->txbuff_in - pctl->txbuff_out); // Number of bytes to send ptmp = tmp + pctl->txbuff_out; if (ptmp >= pctl->txbuff_end) ptmp = pctl->txbuff_base; pctl->txbuff_dmanext = ptmp; // Save new start ptr } DMA_SNDTR(pctl->idma,pctl->txdma_stream) = tmp; // Set number of bytes DMA_SCR (pctl->idma,pctl->txdma_stream) |= 0x1; // Enable DMA and away we go! return; }
/** Write out data over the USART using DMA. * Note that this function is not reentrant and does not guard against DMA IRQs * running at the same time which will also cause spurious behaviours. Ensure * that the calling function prevents this from happening. * * \param s The USART DMA state structure. * \param data A pointer to the data to write out. * \param len The number of bytes to write. * \return The number of bytes that will be written, may be less than len. */ u32 usart_write_dma(usart_tx_dma_state* s, u8 data[], u32 len) { /* If there is no data to write, just return. */ if (len == 0) return 0; /* Check if the write would cause a buffer overflow, if so only write up to * the end of the buffer. */ u32 n_free = usart_tx_n_free(s); if (len > n_free) return 0; u32 old_wr = s->wr; s->wr = (s->wr + len) % USART_TX_BUFFER_LEN; if (old_wr + len <= USART_TX_BUFFER_LEN) memcpy(&(s->buff[old_wr]), data, len); else { /* Deal with case where write wraps the buffer. */ memcpy(&(s->buff[old_wr]), &data[0], USART_TX_BUFFER_LEN - old_wr); memcpy(&(s->buff[0]), &data[USART_TX_BUFFER_LEN - old_wr], len - (USART_TX_BUFFER_LEN - old_wr)); } /* Check if there is a DMA transfer either in progress or waiting for its * interrupt to be serviced. Its very important to also check the interrupt * flag as EN will be cleared when the transfer finishes but we really need * to make sure the ISR has been run to finish up the bookkeeping for the * transfer. Also, make sure that this is done atomically without a DMA * interrupt squeezing in there. */ if (!((DMA_SCR(s->dma, s->stream) & DMA_SxCR_EN) || dma_get_interrupt_flag(s->dma, s->stream, DMA_TCIF))) dma_schedule(s); s->byte_counter += len; return len; }
void dma_enable_double_buffer_mode(u32 dma, u8 stream) { DMA_SCR(dma, stream) |= DMA_SxCR_DBM; }
void dma_disable_double_buffer_mode(u32 dma, u8 stream) { DMA_SCR(dma, stream) &= ~DMA_SxCR_DBM; }
void dma_set_memory_address_1(u32 dma, u8 stream, u32 address) { u32 reg32 = DMA_SCR(dma, stream); if ( !(reg32 & DMA_SxCR_EN) || (!(reg32 & DMA_SxCR_CT) && (reg32 & DMA_SxCR_DBM)) ) DMA_SM1AR(dma, stream) = (u32 *) address; }
void dma_set_peripheral_address(u32 dma, u8 stream, u32 address) { if (!(DMA_SCR(dma, stream) & DMA_SxCR_EN)) DMA_SPAR(dma, stream) = (u32 *) address; }
void dma_enable_half_transfer_interrupt(u32 dma, u8 stream) { dma_clear_interrupt_flags(dma, stream, DMA_ISR_HTIF); DMA_SCR(dma, stream) |= DMA_SxCR_HTIE; }
void dma_set_initial_target(u32 dma, u8 stream, u8 memory) { u32 reg32 = (DMA_SCR(dma, stream) & ~DMA_SxCR_CT); if (memory == 1) reg32 |= DMA_SxCR_CT; DMA_SCR(dma, stream) = reg32; }
void dma_set_peripheral_flow_control(u32 dma, u8 stream) { DMA_SCR(dma, stream) |= DMA_SxCR_PFCTRL; }
void dma_set_dma_flow_control(u32 dma, u8 stream) { DMA_SCR(dma, stream) &= ~DMA_SxCR_PFCTRL; }
void dma_disable_transfer_complete_interrupt(u32 dma, u8 stream) { DMA_SCR(dma, stream) &= ~DMA_SxCR_TCIE; }
void dma_enable_transfer_complete_interrupt(u32 dma, u8 stream) { dma_clear_interrupt_flags(dma, stream, DMA_ISR_TCIF); DMA_SCR(dma, stream) |= DMA_SxCR_TCIE; }
void dma_disable_half_transfer_interrupt(u32 dma, u8 stream) { DMA_SCR(dma, stream) &= ~DMA_SxCR_HTIE; }
/** Setup the USART for transmission with DMA. * This function sets up the DMA controller and additional USART parameters for * DMA transmit. The USART must already be configured for normal operation. * * \param s The USART DMA state structure. * \oaram usart The USART base address. * \param dma The DMA controller base address. * \param stream The DMA stream number to use. * \param channel The DMA channel to use. The stream and channel must * correspond to a USART RX channel. */ void usart_tx_dma_setup(usart_tx_dma_state* s, u32 usart, u32 dma, u8 stream, u8 channel) { s->dma = dma; s->usart = usart; s->stream = stream; s->channel = channel; s->byte_counter = 0; s->last_byte_ticks = chTimeNow(); /* Enable clock to DMA peripheral. */ if (dma == DMA1) RCC_AHB1ENR |= RCC_AHB1ENR_DMA1EN; else if (dma == DMA2) RCC_AHB1ENR |= RCC_AHB1ENR_DMA2EN; /* Enable TX DMA on the USART. */ usart_enable_tx_dma(usart); /* Make sure stream is disabled to start. */ DMA_SCR(dma, stream) &= ~DMA_SxCR_EN; /* Configure the DMA controller. */ DMA_SCR(dma, stream) = 0; DMA_SCR(dma, stream) = /* Error interrupts. */ DMA_SxCR_DMEIE | DMA_SxCR_TEIE | /* Transfer complete interrupt. */ DMA_SxCR_TCIE | DMA_SxCR_DIR_MEM_TO_PERIPHERAL | /* Increment the memory address after each transfer. */ DMA_SxCR_MINC | /* 4 bytes written to the FIFO from memory at a time */ DMA_SxCR_MBURST_INCR4 | /* 8 bit transfers from USART peripheral. */ DMA_SxCR_PSIZE_8BIT | /* and to memory. */ DMA_SxCR_MSIZE_8BIT | /* TODO: what priority level is necessary? */ /* Very high priority. */ DMA_SxCR_PL_VERY_HIGH | /* The channel selects which request line will trigger a transfer. * (see CD00225773.pdf Table 23). */ DMA_SxCR_CHSEL(channel); /* For now, don't transfer any number of datas * (will be set in the initiating function). */ DMA_SNDTR(dma, stream) = 0; /* DMA into the USART data register... */ DMA_SPAR(dma, stream) = &USART_DR(usart); /* ...from the TX buffer. */ DMA_SM0AR(dma, stream) = s->buff; /* TODO: Investigate more about the best FIFO settings. */ DMA_SFCR(dma, stream) = DMA_SxFCR_DMDIS | /* Enable DMA stream FIFO. */ DMA_SxFCR_FTH_2_4_FULL | /* Trigger level 2/4 full. */ DMA_SxFCR_FEIE; /* Enable FIFO error interrupt. */ s->wr = s->rd = 0; /* Buffer is empty to begin with. */ /* Enable DMA interrupts for this stream with the NVIC. */ if (dma == DMA1) nvicEnableVector(dma_irq_lookup[0][stream], CORTEX_PRIORITY_MASK(USART_DMA_ISR_PRIORITY)); else if (dma == DMA2) nvicEnableVector(dma_irq_lookup[1][stream], CORTEX_PRIORITY_MASK(USART_DMA_ISR_PRIORITY)); }
void dma_enable_stream(u32 dma, u8 stream) { DMA_SCR(dma, stream) |= DMA_SxCR_EN; }
u8 dma_get_target(u32 dma, u8 stream) { if (DMA_SCR(dma, stream) & DMA_SxCR_CT) return 1; return 0; }
void dma_disable_stream(u32 dma, u8 stream) { DMA_SCR(dma, stream) &= ~DMA_SxCR_EN; }
void dma_enable_direct_mode_error_interrupt(u32 dma, u8 stream) { dma_clear_interrupt_flags(dma, stream, DMA_ISR_DMEIF); DMA_SCR(dma, stream) |= DMA_SxCR_DMEIE; }
void dma_disable_transfer_error_interrupt(u32 dma, u8 stream) { DMA_SCR(dma, stream) &= ~DMA_SxCR_TEIE; }
/** Setup the USART for receive with DMA. * This function sets up the DMA controller and additional USART parameters for * DMA receive. The USART must already be configured for normal operation. * * \param s The USART DMA state structure. * \oaram usart The USART base address. * \param dma The DMA controller base address. * \param stream The DMA stream number to use. * \param channel The DMA channel to use. The stream and channel must * correspond to a USART RX channel. */ void usart_rx_dma_setup(usart_rx_dma_state* s, u32 usart, u32 dma, u8 stream, u8 channel) { s->dma = dma; s->usart = usart; s->stream = stream; s->channel = channel; chBSemInit(&s->ready_sem, TRUE); s->byte_counter = 0; s->last_byte_ticks = chTimeNow(); /* Enable clock to DMA peripheral. */ if (dma == DMA1) RCC_AHB1ENR |= RCC_AHB1ENR_DMA1EN; else if (dma == DMA2) RCC_AHB1ENR |= RCC_AHB1ENR_DMA2EN; /* Enable RX DMA on the USART. */ usart_enable_rx_dma(usart); /* Make sure stream is disabled to start. */ DMA_SCR(dma, stream) &= ~DMA_SxCR_EN; /* RM0090 - 9.3.17 : Supposed to wait until enable bit reads '0' before we * write to registers. */ while (DMA_SCR(dma, stream) & DMA_SxCR_EN) ; /* RM0090 - 9.3.17 : Supposed to clear any interrupts in DMA status register * before we reconfigure registers. */ dma_clear_interrupt_flags(dma, stream, DMA_ISR_FLAGS); /* Configure the DMA controller. */ DMA_SCR(dma, stream) = 0; DMA_SCR(dma, stream) = /* Error interrupts. */ DMA_SxCR_DMEIE | DMA_SxCR_TEIE | /* Transfer complete interrupt. */ DMA_SxCR_TCIE | /* Enable circular buffer mode. */ DMA_SxCR_CIRC | DMA_SxCR_DIR_PERIPHERAL_TO_MEM | /* Increment the memory address after each transfer. */ DMA_SxCR_MINC | /* 8 bit transfers from USART peripheral. */ DMA_SxCR_PSIZE_8BIT | /* and to memory. */ DMA_SxCR_MSIZE_8BIT | /* Low priority. */ DMA_SxCR_PL_LOW | /* The channel selects which request line will trigger a transfer. * (see CD00225773.pdf Table 23). */ DMA_SxCR_CHSEL(channel); /* Transfer up to the length of the buffer. */ DMA_SNDTR(dma, stream) = USART_RX_BUFFER_LEN; /* DMA from the USART data register... */ DMA_SPAR(dma, stream) = &USART_DR(usart); /* ...to the RX buffer. */ DMA_SM0AR(dma, stream) = s->buff; /* Buffer is empty to begin with. */ s->rd = 0; s->rd_wraps = s->wr_wraps = 0; /* Enable DMA interrupts for this stream with the NVIC. */ if (dma == DMA1) nvicEnableVector(dma_irq_lookup[0][stream], CORTEX_PRIORITY_MASK(USART_DMA_ISR_PRIORITY)); else if (dma == DMA2) nvicEnableVector(dma_irq_lookup[1][stream], CORTEX_PRIORITY_MASK(USART_DMA_ISR_PRIORITY)); /* These reads clear error flags before enabling DMA */ (void)USART_SR(usart); (void)USART_DR(usart); /* Enable the DMA channel. */ DMA_SCR(dma, stream) |= DMA_SxCR_EN; }
void dma_disable_direct_mode_error_interrupt(u32 dma, u8 stream) { DMA_SCR(dma, stream) &= ~DMA_SxCR_DMEIE; }