esp_err_t spi_bus_free(spi_host_device_t host) { int x; SPI_CHECK(host>=SPI_HOST && host<=VSPI_HOST, "invalid host", ESP_ERR_INVALID_ARG); SPI_CHECK(spihost[host]!=NULL, "host not in use", ESP_ERR_INVALID_STATE); for (x=0; x<NO_CS; x++) { SPI_CHECK(spihost[host]->device[x]==NULL, "not all CSses freed", ESP_ERR_INVALID_STATE); } if ( spihost[host]->dma_chan > 0 ) { spicommon_dma_chan_free ( spihost[host]->dma_chan ); } #ifdef CONFIG_PM_ENABLE esp_pm_lock_delete(spihost[host]->pm_lock); #endif spihost[host]->hw->slave.trans_inten=0; spihost[host]->hw->slave.trans_done=0; esp_intr_free(spihost[host]->intr); spicommon_periph_free(host); free(spihost[host]->dmadesc_tx); free(spihost[host]->dmadesc_rx); free(spihost[host]); spihost[host]=NULL; return ESP_OK; }
esp_err_t spi_slave_free(spi_host_device_t host) { SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG); SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG); if (spihost[host]->trans_queue) vQueueDelete(spihost[host]->trans_queue); if (spihost[host]->ret_queue) vQueueDelete(spihost[host]->ret_queue); free(spihost[host]->dmadesc_tx); free(spihost[host]->dmadesc_rx); free(spihost[host]); spihost[host] = NULL; spicommon_periph_free(host); spihost[host] = NULL; return ESP_OK; }
esp_err_t spi_bus_initialize(spi_host_device_t host, const spi_bus_config_t *bus_config, int dma_chan) { bool native, spi_chan_claimed, dma_chan_claimed; /* ToDo: remove this when we have flash operations cooperating with this */ SPI_CHECK(host!=SPI_HOST, "SPI1 is not supported", ESP_ERR_NOT_SUPPORTED); SPI_CHECK(host>=SPI_HOST && host<=VSPI_HOST, "invalid host", ESP_ERR_INVALID_ARG); SPI_CHECK( dma_chan >= 0 && dma_chan <= 2, "invalid dma channel", ESP_ERR_INVALID_ARG ); spi_chan_claimed=spicommon_periph_claim(host); SPI_CHECK(spi_chan_claimed, "host already in use", ESP_ERR_INVALID_STATE); if ( dma_chan != 0 ) { dma_chan_claimed=spicommon_dma_chan_claim(dma_chan); if ( !dma_chan_claimed ) { spicommon_periph_free( host ); SPI_CHECK(dma_chan_claimed, "dma channel already in use", ESP_ERR_INVALID_STATE); } } spihost[host]=malloc(sizeof(spi_host_t)); if (spihost[host]==NULL) goto nomem; memset(spihost[host], 0, sizeof(spi_host_t)); #ifdef CONFIG_PM_ENABLE esp_err_t err = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "spi_master", &spihost[host]->pm_lock); if (err != ESP_OK) { goto nomem; } #endif //CONFIG_PM_ENABLE spicommon_bus_initialize_io(host, bus_config, dma_chan, SPICOMMON_BUSFLAG_MASTER|SPICOMMON_BUSFLAG_QUAD, &native); spihost[host]->no_gpio_matrix=native; spihost[host]->dma_chan=dma_chan; if (dma_chan == 0) { spihost[host]->max_transfer_sz = 32; } else { //See how many dma descriptors we need and allocate them int dma_desc_ct=(bus_config->max_transfer_sz+SPI_MAX_DMA_LEN-1)/SPI_MAX_DMA_LEN; if (dma_desc_ct==0) dma_desc_ct=1; //default to 4k when max is not given spihost[host]->max_transfer_sz = dma_desc_ct*SPI_MAX_DMA_LEN; spihost[host]->dmadesc_tx=heap_caps_malloc(sizeof(lldesc_t)*dma_desc_ct, MALLOC_CAP_DMA); spihost[host]->dmadesc_rx=heap_caps_malloc(sizeof(lldesc_t)*dma_desc_ct, MALLOC_CAP_DMA); if (!spihost[host]->dmadesc_tx || !spihost[host]->dmadesc_rx) goto nomem; } esp_intr_alloc(spicommon_irqsource_for_host(host), ESP_INTR_FLAG_INTRDISABLED, spi_intr, (void*)spihost[host], &spihost[host]->intr); spihost[host]->hw=spicommon_hw_for_host(host); spihost[host]->cur_cs = NO_CS; spihost[host]->prev_cs = NO_CS; //Reset DMA spihost[host]->hw->dma_conf.val|=SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST; spihost[host]->hw->dma_out_link.start=0; spihost[host]->hw->dma_in_link.start=0; spihost[host]->hw->dma_conf.val&=~(SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST); //Reset timing spihost[host]->hw->ctrl2.val=0; //Disable unneeded ints spihost[host]->hw->slave.rd_buf_done=0; spihost[host]->hw->slave.wr_buf_done=0; spihost[host]->hw->slave.rd_sta_done=0; spihost[host]->hw->slave.wr_sta_done=0; spihost[host]->hw->slave.rd_buf_inten=0; spihost[host]->hw->slave.wr_buf_inten=0; spihost[host]->hw->slave.rd_sta_inten=0; spihost[host]->hw->slave.wr_sta_inten=0; //Force a transaction done interrupt. This interrupt won't fire yet because we initialized the SPI interrupt as //disabled. This way, we can just enable the SPI interrupt and the interrupt handler will kick in, handling //any transactions that are queued. spihost[host]->hw->slave.trans_inten=1; spihost[host]->hw->slave.trans_done=1; return ESP_OK; nomem: if (spihost[host]) { free(spihost[host]->dmadesc_tx); free(spihost[host]->dmadesc_rx); #ifdef CONFIG_PM_ENABLE if (spihost[host]->pm_lock) { esp_pm_lock_delete(spihost[host]->pm_lock); } #endif } free(spihost[host]); spicommon_periph_free(host); spicommon_dma_chan_free(dma_chan); return ESP_ERR_NO_MEM; }
esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *bus_config, const spi_slave_interface_config_t *slave_config, int dma_chan) { bool native, claimed; //We only support HSPI/VSPI, period. SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG); claimed = spicommon_periph_claim(host); SPI_CHECK(claimed, "host already in use", ESP_ERR_INVALID_STATE); spihost[host] = malloc(sizeof(spi_slave_t)); if (spihost[host] == NULL) goto nomem; memset(spihost[host], 0, sizeof(spi_slave_t)); memcpy(&spihost[host]->cfg, slave_config, sizeof(spi_slave_interface_config_t)); spicommon_bus_initialize_io(host, bus_config, dma_chan, SPICOMMON_BUSFLAG_SLAVE, &native); gpio_set_direction(slave_config->spics_io_num, GPIO_MODE_INPUT); spicommon_cs_initialize(host, slave_config->spics_io_num, 0, native == false); spihost[host]->no_gpio_matrix = native; spihost[host]->dma_chan = dma_chan; if (dma_chan != 0) { //See how many dma descriptors we need and allocate them int dma_desc_ct = (bus_config->max_transfer_sz + SPI_MAX_DMA_LEN - 1) / SPI_MAX_DMA_LEN; if (dma_desc_ct == 0) dma_desc_ct = 1; //default to 4k when max is not given spihost[host]->max_transfer_sz = dma_desc_ct * SPI_MAX_DMA_LEN; spihost[host]->dmadesc_tx = pvPortMallocCaps(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA); spihost[host]->dmadesc_rx = pvPortMallocCaps(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA); if (!spihost[host]->dmadesc_tx || !spihost[host]->dmadesc_rx) goto nomem; } else { //We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most. spihost[host]->max_transfer_sz = 16 * 4; } //Create queues spihost[host]->trans_queue = xQueueCreate(slave_config->queue_size, sizeof(spi_slave_transaction_t *)); spihost[host]->ret_queue = xQueueCreate(slave_config->queue_size, sizeof(spi_slave_transaction_t *)); if (!spihost[host]->trans_queue || !spihost[host]->ret_queue) goto nomem; esp_intr_alloc(spicommon_irqsource_for_host(host), ESP_INTR_FLAG_INTRDISABLED, spi_intr, (void *)spihost[host], &spihost[host]->intr); spihost[host]->hw = spicommon_hw_for_host(host); //Configure slave spihost[host]->hw->clock.val = 0; spihost[host]->hw->user.val = 0; spihost[host]->hw->ctrl.val = 0; spihost[host]->hw->slave.wr_rd_buf_en = 1; //no sure if needed spihost[host]->hw->user.doutdin = 1; //we only support full duplex spihost[host]->hw->user.sio = 0; spihost[host]->hw->slave.slave_mode = 1; spihost[host]->hw->dma_conf.val |= SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST; spihost[host]->hw->dma_out_link.start = 0; spihost[host]->hw->dma_in_link.start = 0; spihost[host]->hw->dma_conf.val &= ~(SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST); spihost[host]->hw->dma_conf.out_data_burst_en = 1; spihost[host]->hw->slave.sync_reset = 1; spihost[host]->hw->slave.sync_reset = 0; bool nodelay = true; spihost[host]->hw->ctrl.rd_bit_order = (slave_config->flags & SPI_SLAVE_RXBIT_LSBFIRST) ? 1 : 0; spihost[host]->hw->ctrl.wr_bit_order = (slave_config->flags & SPI_SLAVE_TXBIT_LSBFIRST) ? 1 : 0; if (slave_config->mode == 0) { spihost[host]->hw->pin.ck_idle_edge = 0; spihost[host]->hw->user.ck_i_edge = 1; spihost[host]->hw->ctrl2.miso_delay_mode = nodelay ? 0 : 2; } else if (slave_config->mode == 1) { spihost[host]->hw->pin.ck_idle_edge = 0; spihost[host]->hw->user.ck_i_edge = 0; spihost[host]->hw->ctrl2.miso_delay_mode = nodelay ? 0 : 1; } else if (slave_config->mode == 2) { spihost[host]->hw->pin.ck_idle_edge = 1; spihost[host]->hw->user.ck_i_edge = 0; spihost[host]->hw->ctrl2.miso_delay_mode = nodelay ? 0 : 1; } else if (slave_config->mode == 3) { spihost[host]->hw->pin.ck_idle_edge = 1; spihost[host]->hw->user.ck_i_edge = 1; spihost[host]->hw->ctrl2.miso_delay_mode = nodelay ? 0 : 2; } //Reset DMA spihost[host]->hw->dma_conf.val |= SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST; spihost[host]->hw->dma_out_link.start = 0; spihost[host]->hw->dma_in_link.start = 0; spihost[host]->hw->dma_conf.val &= ~(SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST); //Disable unneeded ints spihost[host]->hw->slave.rd_buf_done = 0; spihost[host]->hw->slave.wr_buf_done = 0; spihost[host]->hw->slave.rd_sta_done = 0; spihost[host]->hw->slave.wr_sta_done = 0; spihost[host]->hw->slave.rd_buf_inten = 0; spihost[host]->hw->slave.wr_buf_inten = 0; spihost[host]->hw->slave.rd_sta_inten = 0; spihost[host]->hw->slave.wr_sta_inten = 0; //Force a transaction done interrupt. This interrupt won't fire yet because we initialized the SPI interrupt as //disabled. This way, we can just enable the SPI interrupt and the interrupt handler will kick in, handling //any transactions that are queued. spihost[host]->hw->slave.trans_inten = 1; spihost[host]->hw->slave.trans_done = 1; return ESP_OK; nomem: if (spihost[host]) { if (spihost[host]->trans_queue) vQueueDelete(spihost[host]->trans_queue); if (spihost[host]->ret_queue) vQueueDelete(spihost[host]->ret_queue); free(spihost[host]->dmadesc_tx); free(spihost[host]->dmadesc_rx); } free(spihost[host]); spihost[host] = NULL; spicommon_periph_free(host); return ESP_ERR_NO_MEM; }