/* Run a check of memory-to-memory DMA */ void dma_test(enum dma_channel stream) { stm32_dma_stream_t *dma_stream = dma_get_channel(stream); uint32_t ctrl; char periph[32], memory[32]; unsigned count = sizeof(periph); int i; memset(memory, '\0', sizeof(memory)); for (i = 0; i < count; i++) periph[i] = 10 + i; dma_clear_isr(stream); /* Following the order in Doc ID 15965 Rev 5 p194 */ dma_stream->spar = (uint32_t)periph; dma_stream->sm0ar = (uint32_t)memory; dma_stream->sndtr = count; dma_stream->sfcr &= ~STM32_DMA_SFCR_DMDIS; ctrl = STM32_DMA_CCR_PL_MEDIUM; dma_stream->scr = ctrl; ctrl |= STM32_DMA_CCR_MINC; ctrl |= STM32_DMA_CCR_DIR_M2M; ctrl |= STM32_DMA_CCR_PINC; dma_stream->scr = ctrl; dma_dump(stream); dma_stream->scr = ctrl | STM32_DMA_CCR_EN; for (i = 0; i < count; i++) CPRINTF("%d/%d ", periph[i], memory[i]); CPRINTF("\ncount=%d\n", dma_stream->sndtr); dma_dump(stream); }
void dma_prepare_tx(const struct dma_option *option, unsigned count, const void *memory) { rotor_mcu_dma_chan_t *chan = dma_get_channel(option->channel); /* TODO(aaboagye): Actually set up a transaction. */ }
void dma_disable(enum dma_channel channel) { stm32_dma_chan_t *chan = dma_get_channel(channel); if (chan->ccr & STM32_DMA_CCR_EN) chan->ccr &= ~STM32_DMA_CCR_EN; }
int spi_slave_send_response_async(struct spi_comm_packet *resp) { int size = resp->size + SPI_PACKET_HEADER_SIZE; stm32_spi_regs_t *spi = STM32_SPI1_REGS; if (size > SPI_PACKET_MAX_SIZE) return EC_ERROR_OVERFLOW; if (out_msg != (uint8_t *)resp) memcpy(out_msg, resp, size); master_slave_sync(100); if (spi->sr & STM32_SPI_SR_RXNE) in_msg[0] = spi->dr; spi->dr = out_msg[0]; /* Set N_CHG (master SPI_NSS) to high */ STM32_GPIO_BSRR(GPIO_A) = 1 << 1; while (!(spi->sr & STM32_SPI_SR_RXNE)) ; in_msg[0] = spi->dr; dma_clear_isr(STM32_DMAC_SPI1_TX); dma_clear_isr(STM32_DMAC_SPI1_RX); dma_start_rx(&dma_rx_option, size - 1, in_msg); dma_prepare_tx(&dma_tx_option, size - 1, out_msg + 1); dma_go(dma_get_channel(STM32_DMAC_SPI1_TX)); master_slave_sync(5); return EC_SUCCESS; }
/* Run a check of memory-to-memory DMA */ void dma_test(void) { enum dma_channel channel = STM32_DMAC_CH4; stm32_dma_chan_t *chan = dma_get_channel(channel); uint32_t ctrl; char periph[16], memory[16]; unsigned count = sizeof(periph); int i; memset(memory, '\0', sizeof(memory)); for (i = 0; i < count; i++) periph[i] = 10 + i; /* Following the order in Doc ID 15965 Rev 5 p194 */ chan->cpar = (uint32_t)periph; chan->cmar = (uint32_t)memory; chan->cndtr = count; ctrl = STM32_DMA_CCR_PL_MEDIUM; chan->ccr = ctrl; ctrl |= STM32_DMA_CCR_MINC; /* | STM32_DMA_CCR_DIR */; ctrl |= STM32_DMA_CCR_MEM2MEM; ctrl |= STM32_DMA_CCR_PINC; /* ctrl |= STM32_DMA_CCR_MSIZE_32_BIT; */ /* ctrl |= STM32_DMA_CCR_PSIZE_32_BIT; */ chan->ccr = ctrl; chan->ccr = ctrl | STM32_DMA_CCR_EN; for (i = 0; i < count; i++) CPRINTF("%d/%d ", periph[i], memory[i]); CPRINTF("\ncount=%d\n", chan->cndtr); }
/** * Called for V2 protocol to indicate that a command has completed * * Some commands can continue for a while. This function is called by * host_command when it completes. * */ static void spi_send_response(struct host_cmd_handler_args *args) { enum ec_status result = args->result; stm32_dma_chan_t *txdma; /* * If we're not processing, then the AP has already terminated the * transaction, and won't be listening for a response. */ if (state != SPI_STATE_PROCESSING) return; /* state == SPI_STATE_PROCESSING */ if (args->response_size > args->response_max) result = EC_RES_INVALID_RESPONSE; /* Transmit the reply */ txdma = dma_get_channel(STM32_DMAC_SPI1_TX); reply(txdma, result, args->response, args->response_size); /* * Before the state is set to SENDING, any CS de-assertion would * set setup_transaction_later to 1. */ state = SPI_STATE_SENDING; check_setup_transaction_later(); }
void dma_start_rx(const struct dma_option *option, unsigned count, void *memory) { stm32_dma_chan_t *chan = dma_get_channel(option->channel); prepare_channel(chan, count, option->periph, memory, STM32_DMA_CCR_MINC | option->flags); dma_go(chan); }
void dma_disable_tc_interrupt(enum dma_channel channel) { stm32_dma_chan_t *chan = dma_get_channel(channel); id[channel] = TASK_ID_INVALID; chan->ccr &= ~STM32_DMA_CCR_TCIE; task_disable_irq(dma_get_irq(channel)); }
void dma_enable_tc_interrupt(enum dma_channel channel) { stm32_dma_chan_t *chan = dma_get_channel(channel); /* Store task ID so the ISR knows which task to wake */ id[channel] = task_get_current(); chan->ccr |= STM32_DMA_CCR_TCIE; task_enable_irq(dma_get_irq(channel)); }
void dma_disable(enum dma_channel ch) { stm32_dma_stream_t *stream = dma_get_channel(ch); if (stream->scr & STM32_DMA_CCR_EN) { stream->scr &= ~STM32_DMA_CCR_EN; while (stream->scr & STM32_DMA_CCR_EN) ; } }
void dma_disable_tc_interrupt(enum dma_channel stream) { stm32_dma_stream_t *dma_stream = dma_get_channel(stream); dma_stream->scr &= ~STM32_DMA_CCR_TCIE; task_disable_irq(dma_get_irq(stream)); dma_irq[stream].cb = NULL; dma_irq[stream].cb_data = NULL; }
void dma_start_rx(const struct dma_option *option, unsigned count, void *memory) { stm32_dma_stream_t *stream = dma_get_channel(option->channel); prepare_stream(option->channel, count, option->periph, memory, STM32_DMA_CCR_MINC | STM32_DMA_CCR_DIR_P2M | option->flags); dma_go(stream); }
void dma_enable_tc_interrupt_callback(enum dma_channel stream, void (*callback)(void *), void *callback_data) { stm32_dma_stream_t *dma_stream = dma_get_channel(stream); dma_irq[stream].cb = callback; dma_irq[stream].cb_data = callback_data; dma_stream->scr |= STM32_DMA_CCR_TCIE; task_enable_irq(dma_get_irq(stream)); }
void dma_dump(enum dma_channel stream) { stm32_dma_stream_t *dma_stream = dma_get_channel(stream); CPRINTF("scr=%x, sndtr=%x, spar=%x, sm0ar=%x, sfcr=%x\n", dma_stream->scr, dma_stream->sndtr, dma_stream->spar, dma_stream->sm0ar, dma_stream->sfcr); CPRINTF("stream %d, isr=%x, ifcr=%x\n", stream, STM32_DMA_GET_ISR(stream), STM32_DMA_GET_IFCR(stream)); }
void dma_dump(enum dma_channel channel) { stm32_dma_regs_t *dma = STM32_DMA1_REGS; stm32_dma_chan_t *chan = dma_get_channel(channel); CPRINTF("ccr=%x, cndtr=%x, cpar=%x, cmar=%x\n", chan->ccr, chan->cndtr, chan->cpar, chan->cmar); CPRINTF("chan %d, isr=%x, ifcr=%x\n", channel, (dma->isr >> (channel * 4)) & 0xf, (dma->ifcr >> (channel * 4)) & 0xf); }
void dma_prepare_tx(const struct dma_option *option, unsigned count, const void *memory) { stm32_dma_chan_t *chan = dma_get_channel(option->channel); /* * Cast away const for memory pointer; this is ok because we know * we're preparing the channel for transmit. */ prepare_channel(chan, count, option->periph, (void *)memory, STM32_DMA_CCR_MINC | STM32_DMA_CCR_DIR | option->flags); }
static int spi_master_read_write_byte(uint8_t *in_buf, uint8_t *out_buf, int sz) { int ret; dma_start_rx(&dma_rx_option, sz, in_buf); dma_prepare_tx(&dma_tx_option, sz, out_buf); dma_go(dma_get_channel(STM32_DMAC_SPI1_TX)); ret = dma_wait(STM32_DMAC_SPI1_TX); ret |= dma_wait(STM32_DMAC_SPI1_RX); dma_disable(STM32_DMAC_SPI1_TX); dma_disable(STM32_DMAC_SPI1_RX); dma_clear_isr(STM32_DMAC_SPI1_TX); dma_clear_isr(STM32_DMAC_SPI1_RX); return ret; }
void dma_check(enum dma_channel stream, char *buf) { stm32_dma_stream_t *dma_stream = dma_get_channel(stream); int count; int i; count = dma_stream->sndtr; CPRINTF("c=%d\n", count); udelay(100 * MSEC); CPRINTF("c=%d\n", dma_stream->sndtr); for (i = 0; i < count; i++) CPRINTF("%02x ", buf[i]); udelay(100 * MSEC); CPRINTF("c=%d\n", dma_stream->sndtr); for (i = 0; i < count; i++) CPRINTF("%02x ", buf[i]); }
static int wait_bits(int port, int nb) { int avail; stm32_dma_chan_t *rx = dma_get_channel(DMAC_TIM_RX(port)); avail = dma_bytes_done(rx, PD_MAX_RAW_SIZE); if (avail < nb) { /* no received yet ... */ while ((dma_bytes_done(rx, PD_MAX_RAW_SIZE) < nb) && !(pd_phy[port].tim_rx->sr & 4)) ; /* optimized for latency, not CPU usage ... */ if (dma_bytes_done(rx, PD_MAX_RAW_SIZE) < nb) { CPRINTS("PD TMOUT RX %d/%d", dma_bytes_done(rx, PD_MAX_RAW_SIZE), nb); return -1; } } return nb; }
void dma_check(enum dma_channel channel, char *buf) { stm32_dma_chan_t *chan; int count; int i; chan = dma_get_channel(channel); count = chan->cndtr; CPRINTF("c=%d\n", count); udelay(100 * MSEC); CPRINTF("c=%d\n", chan->cndtr); for (i = 0; i < count; i++) CPRINTF("%02x ", buf[i]); udelay(100 * MSEC); CPRINTF("c=%d\n", chan->cndtr); for (i = 0; i < count; i++) CPRINTF("%02x ", buf[i]); }
static void spi_nss_interrupt(void) { const struct spi_comm_packet *cmd = (const struct spi_comm_packet *)in_msg; stm32_spi_regs_t *spi = STM32_SPI1_REGS; if (spi->sr & STM32_SPI_SR_RXNE) in_msg[0] = spi->dr; master_slave_sync(5); /* Read in the packet size */ while (!(spi->sr & STM32_SPI_SR_RXNE)) ; in_msg[0] = spi->dr; /* Read in the rest of the packet */ dma_clear_isr(STM32_DMAC_SPI1_RX); dma_start_rx(&dma_rx_option, in_msg[0] + SPI_PACKET_HEADER_SIZE - 1, in_msg + 1); dma_prepare_tx(&dma_tx_option, in_msg[0] + SPI_PACKET_HEADER_SIZE - 1, out_msg); dma_go(dma_get_channel(STM32_DMAC_SPI1_TX)); master_slave_sync(5); if (dma_wait(STM32_DMAC_SPI1_RX) != EC_SUCCESS) { debug_printf("SPI: Incomplete packet\n"); spi_slave_nack(); return; } if (spi->sr & STM32_SPI_SR_CRCERR) { debug_printf("SPI: CRC mismatch\n"); spi_slave_nack(); return; } if (cmd->cmd_sts == TS_CMD_HELLO) spi_slave_hello_back(cmd); else if (cmd->cmd_sts == TS_CMD_FULL_SCAN) touch_scan_slave_start(); else spi_slave_nack(); }
static int i2c_write_raw_slave(int port, void *buf, int len) { stm32_dma_chan_t *chan; int rv; /* we don't want to race with TxE interrupt event */ disable_i2c_interrupt(port); /* Configuring DMA1 channel DMAC_SLAVE_TX */ enable_ack(port); chan = dma_get_channel(DMAC_SLAVE_TX); dma_prepare_tx(dma_tx_option + port, len, buf); /* Start the DMA */ dma_go(chan); /* Configuring i2c to use DMA */ STM32_I2C_CR2(port) |= (1 << 11); if (in_interrupt_context()) { /* Poll for the transmission complete flag */ dma_wait(DMAC_SLAVE_TX); dma_clear_isr(DMAC_SLAVE_TX); } else { /* Wait for the transmission complete Interrupt */ dma_enable_tc_interrupt(DMAC_SLAVE_TX); rv = task_wait_event(DMA_TRANSFER_TIMEOUT_US); dma_disable_tc_interrupt(DMAC_SLAVE_TX); if (!(rv & TASK_EVENT_WAKE)) { CPRINTS("Slave timeout, resetting i2c"); i2c_init_port(port); } } dma_disable(DMAC_SLAVE_TX); STM32_I2C_CR2(port) &= ~(1 << 11); enable_i2c_interrupt(port); return len; }
/** * Prepare a stream for use and start it * * @param stream stream to read * @param count Number of bytes to transfer * @param periph Pointer to peripheral data register * @param memory Pointer to memory address for receive/transmit * @param flags DMA flags for the control register. */ static void prepare_stream(enum dma_channel stream, unsigned count, void *periph, void *memory, unsigned flags) { stm32_dma_stream_t *dma_stream = dma_get_channel(stream); uint32_t ccr = STM32_DMA_CCR_PL_VERY_HIGH; dma_disable(stream); dma_clear_isr(stream); /* Following the order in DocID026448 Rev 1 (RM0383) p181 */ dma_stream->spar = (uint32_t)periph; dma_stream->sm0ar = (uint32_t)memory; dma_stream->sndtr = count; dma_stream->scr = ccr; ccr |= flags & STM32_DMA_CCR_CHANNEL_MASK; dma_stream->scr = ccr; dma_stream->sfcr &= ~STM32_DMA_SFCR_DMDIS; ccr |= flags; dma_stream->scr = ccr; }
/** * Called to send a response back to the host. * * Some commands can continue for a while. This function is called by * host_command when it completes. * */ static void spi_send_response_packet(struct host_packet *pkt) { stm32_dma_chan_t *txdma; /* * If we're not processing, then the AP has already terminated the * transaction, and won't be listening for a response. */ if (state != SPI_STATE_PROCESSING) return; /* state == SPI_STATE_PROCESSING */ /* Append our past-end byte, which we reserved space for. */ ((uint8_t *)pkt->response)[pkt->response_size + 0] = EC_SPI_PAST_END; #ifdef CHIP_FAMILY_STM32F0 /* Make sure we are going to be outputting it properly when the DMA * ends due to the TX FIFO bug on the F0. See crbug.com/31390 */ ((uint8_t *)pkt->response)[pkt->response_size + 1] = EC_SPI_PAST_END; ((uint8_t *)pkt->response)[pkt->response_size + 2] = EC_SPI_PAST_END; ((uint8_t *)pkt->response)[pkt->response_size + 3] = EC_SPI_PAST_END; #endif /* Transmit the reply */ txdma = dma_get_channel(STM32_DMAC_SPI1_TX); dma_prepare_tx(&dma_tx_option, sizeof(out_preamble) + pkt->response_size + EC_SPI_PAST_END_LENGTH, out_msg); dma_go(txdma); /* * Before the state is set to SENDING, any CS de-assertion would * set setup_transaction_later to 1. */ state = SPI_STATE_SENDING; check_setup_transaction_later(); }
int spi_master_wait_response_async(void) { stm32_spi_regs_t *spi = STM32_SPI1_REGS; int size; master_slave_sync(40); if (wait_for_signal(GPIO_A, 1 << 0, 1, 40 * MSEC)) goto err_wait_resp_async; /* Discard potential garbage in SPI DR */ if (spi->sr & STM32_SPI_SR_RXNE) in_msg[0] = spi->dr; /* Get the packet size */ spi->dr = DUMMY_DATA; while (!(spi->sr & STM32_SPI_SR_RXNE)) ; in_msg[0] = spi->dr; size = in_msg[0] + SPI_PACKET_HEADER_SIZE; master_slave_sync(5); dma_clear_isr(STM32_DMAC_SPI1_TX); dma_clear_isr(STM32_DMAC_SPI1_RX); /* Get the rest of the packet*/ dma_start_rx(&dma_rx_option, size - 1, in_msg + 1); dma_prepare_tx(&dma_tx_option, size - 1, out_msg); dma_go(dma_get_channel(STM32_DMAC_SPI1_TX)); return EC_SUCCESS; err_wait_resp_async: /* Set CS1 (slave SPI_NSS) to high */ STM32_GPIO_BSRR(GPIO_A) = 1 << 6; return EC_ERROR_TIMEOUT; }
/** * Handle an event on the NSS pin * * A falling edge of NSS indicates that the master is starting a new * transaction. A rising edge indicates that we have finsihed * * @param signal GPIO signal for the NSS pin */ void spi_event(enum gpio_signal signal) { stm32_dma_chan_t *rxdma; uint16_t *nss_reg; uint32_t nss_mask; uint16_t i; /* If not enabled, ignore glitches on NSS */ if (!enabled) return; /* Check chip select. If it's high, the AP ended a transaction. */ nss_reg = gpio_get_level_reg(GPIO_SPI1_NSS, &nss_mask); if (REG16(nss_reg) & nss_mask) { enable_sleep(SLEEP_MASK_SPI); /* * If the buffer is still used by the host command, postpone * the DMA rx setup. */ if (state == SPI_STATE_PROCESSING) { setup_transaction_later = 1; return; } /* Set up for the next transaction */ spi_init(); /* Fix for bug chrome-os-partner:31390 */ return; } disable_sleep(SLEEP_MASK_SPI); /* Chip select is low = asserted */ if (state != SPI_STATE_READY_TO_RX) { /* * AP started a transaction but we weren't ready for it. * Tell AP we weren't ready, and ignore the received data. */ CPRINTS("SPI not ready"); tx_status(EC_SPI_NOT_READY); state = SPI_STATE_RX_BAD; return; } /* We're now inside a transaction */ state = SPI_STATE_RECEIVING; tx_status(EC_SPI_RECEIVING); rxdma = dma_get_channel(STM32_DMAC_SPI1_RX); /* Wait for version, command, length bytes */ if (wait_for_bytes(rxdma, 3, nss_reg, nss_mask)) goto spi_event_error; if (in_msg[0] == EC_HOST_REQUEST_VERSION) { /* Protocol version 3 */ struct ec_host_request *r = (struct ec_host_request *)in_msg; int pkt_size; /* Wait for the rest of the command header */ if (wait_for_bytes(rxdma, sizeof(*r), nss_reg, nss_mask)) goto spi_event_error; /* * Check how big the packet should be. We can't just wait to * see how much data the host sends, because it will keep * sending dummy data until we respond. */ pkt_size = host_request_expected_size(r); if (pkt_size == 0 || pkt_size > sizeof(in_msg)) goto spi_event_error; /* Wait for the packet data */ if (wait_for_bytes(rxdma, pkt_size, nss_reg, nss_mask)) goto spi_event_error; spi_packet.send_response = spi_send_response_packet; spi_packet.request = in_msg; spi_packet.request_temp = NULL; spi_packet.request_max = sizeof(in_msg); spi_packet.request_size = pkt_size; /* Response must start with the preamble */ memcpy(out_msg, out_preamble, sizeof(out_preamble)); spi_packet.response = out_msg + sizeof(out_preamble); /* Reserve space for the preamble and trailing past-end byte */ spi_packet.response_max = sizeof(out_msg) - sizeof(out_preamble) - EC_SPI_PAST_END_LENGTH; spi_packet.response_size = 0; spi_packet.driver_result = EC_RES_SUCCESS; /* Move to processing state */ state = SPI_STATE_PROCESSING; tx_status(EC_SPI_PROCESSING); host_packet_receive(&spi_packet); return; } else if (in_msg[0] >= EC_CMD_VERSION0) { /* * Protocol version 2 * * TODO(crosbug.com/p/20257): Remove once kernel supports * version 3. */ #ifdef CHIP_FAMILY_STM32F0 CPRINTS("WARNING: Protocol version 2 is not supported on the F0" " line due to crbug.com/31390"); #endif args.version = in_msg[0] - EC_CMD_VERSION0; args.command = in_msg[1]; args.params_size = in_msg[2]; /* Wait for parameters */ if (wait_for_bytes(rxdma, 3 + args.params_size, nss_reg, nss_mask)) goto spi_event_error; /* * Params are not 32-bit aligned in protocol version 2. As a * workaround, move them to the beginning of the input buffer * so they are aligned. */ if (args.params_size) memmove(in_msg, in_msg + 3, args.params_size); args.params = in_msg; args.send_response = spi_send_response; /* Allow room for the header bytes */ args.response = out_msg + SPI_PROTO2_OFFSET; args.response_max = sizeof(out_msg) - SPI_PROTO2_OVERHEAD; args.response_size = 0; args.result = EC_RES_SUCCESS; /* Move to processing state */ state = SPI_STATE_PROCESSING; tx_status(EC_SPI_PROCESSING); host_command_received(&args); return; } spi_event_error: /* Error, timeout, or protocol we can't handle. Ignore data. */ tx_status(EC_SPI_RX_BAD_DATA); state = SPI_STATE_RX_BAD; CPRINTS("SPI rx bad data"); CPRINTF("in_msg=["); for (i = 0; i < dma_bytes_done(rxdma, sizeof(in_msg)); i++) CPRINTF("%02x ", in_msg[i]); CPRINTF("]\n"); }