static int tegra_spi_dma_finish(struct tegra_spi_channel *spi) { int ret; unsigned int todo; struct apb_dma * const apb_dma = (struct apb_dma *)TEGRA_APB_DMA_BASE; todo = read32(&spi->dma_in->regs->wcount); if (spi->dma_in) { while ((read32(&spi->dma_in->regs->dma_byte_sta) < todo) || dma_busy(spi->dma_in)) ; dma_stop(spi->dma_in); clrbits_le32(&spi->regs->command1, SPI_CMD1_RX_EN); /* Disable secure access for the channel. */ clrbits_le32(&apb_dma->security_reg, SECURITY_EN_BIT(spi->dma_in->num)); dma_release(spi->dma_in); } if (spi->dma_out) { while ((read32(&spi->dma_out->regs->dma_byte_sta) < todo) || dma_busy(spi->dma_out)) ; clrbits_le32(&spi->regs->command1, SPI_CMD1_TX_EN); dma_stop(spi->dma_out); /* Disable secure access for the channel. */ clrbits_le32(&apb_dma->security_reg, SECURITY_EN_BIT(spi->dma_out->num)); dma_release(spi->dma_out); } if (fifo_error(spi)) { printk(BIOS_ERR, "%s: ERROR:\n", __func__); dump_dma_regs(spi->dma_out); dump_dma_regs(spi->dma_in); dump_spi_regs(spi); dump_fifo_status(spi); ret = -1; goto done; } ret = 0; done: spi->dma_in = NULL; spi->dma_out = NULL; return ret; }
void uart_write(uart_t uart, const uint8_t *data, size_t len) { assert(uart < UART_NUMOF); #if DEVELHELP /* If tx is not enabled don't try to send */ if (!(dev(uart)->CR1 & USART_CR1_TE)) { return; } #endif #ifdef MODULE_PERIPH_DMA if (!len) { return; } if (uart_config[uart].dma != DMA_STREAM_UNDEF) { if (irq_is_in()) { uint16_t todo = 0; if (dev(uart)->CR3 & USART_CR3_DMAT) { /* DMA transfer for UART on-going */ todo = dma_suspend(uart_config[uart].dma); } if (todo) { dma_stop(uart_config[uart].dma); dev(uart)->CR3 &= ~USART_CR3_DMAT; } for (unsigned i = 0; i < len; i++) { send_byte(uart, data[i]); } if (todo > 0) { wait_for_tx_complete(uart); dev(uart)->CR3 |= USART_CR3_DMAT; dma_resume(uart_config[uart].dma, todo); } } else { dma_acquire(uart_config[uart].dma); dev(uart)->CR3 |= USART_CR3_DMAT; dma_transfer(uart_config[uart].dma, uart_config[uart].dma_chan, data, (void *)&dev(uart)->TDR_REG, len, DMA_MEM_TO_PERIPH, DMA_INC_SRC_ADDR); dma_release(uart_config[uart].dma); /* make sure the function is synchronous by waiting for the transfer to * finish */ wait_for_tx_complete(uart); dev(uart)->CR3 &= ~USART_CR3_DMAT; } return; } #endif for (size_t i = 0; i < len; i++) { send_byte(uart, data[i]); } /* make sure the function is synchronous by waiting for the transfer to * finish */ wait_for_tx_complete(uart); }
static int tegra_spi_dma_finish(struct tegra_spi_channel *spi) { int ret; unsigned int todo; todo = read32(&spi->dma_in->regs->wcount); if (spi->dma_in) { while ((read32(&spi->dma_in->regs->dma_byte_sta) < todo) || dma_busy(spi->dma_in)) ; /* this shouldn't take long, no udelay */ dma_stop(spi->dma_in); clrbits_le32(&spi->regs->command1, SPI_CMD1_RX_EN); dma_release(spi->dma_in); } if (spi->dma_out) { while ((read32(&spi->dma_out->regs->dma_byte_sta) < todo) || dma_busy(spi->dma_out)) spi_delay(spi, todo - spi_byte_count(spi)); clrbits_le32(&spi->regs->command1, SPI_CMD1_TX_EN); dma_stop(spi->dma_out); dma_release(spi->dma_out); } if (fifo_error(spi)) { printk(BIOS_ERR, "%s: ERROR:\n", __func__); dump_dma_regs(spi->dma_out); dump_dma_regs(spi->dma_in); dump_spi_regs(spi); dump_fifo_status(spi); ret = -1; goto done; } ret = 0; done: spi->dma_in = NULL; spi->dma_out = NULL; return ret; }
static int vino_waitfor(struct vino_device *v, int frame) { wait_queue_t wait; int i, err = 0; if (frame != 0) return -EINVAL; spin_lock_irq(&v->state_lock); switch (v->buffer_state) { case VINO_BUF_GRABBING: init_waitqueue_entry(&wait, current); /* add ourselves into wait queue */ add_wait_queue(&v->dma_wait, &wait); /* and set current state */ set_current_state(TASK_INTERRUPTIBLE); /* before releasing spinlock */ spin_unlock_irq(&v->state_lock); /* to ensure that schedule_timeout will return imediately * if VINO interrupt was triggred meanwhile */ schedule_timeout(HZ / 10); if (signal_pending(current)) err = -EINTR; spin_lock_irq(&v->state_lock); remove_wait_queue(&v->dma_wait, &wait); /* don't rely on schedule_timeout return value and check what * really happened */ if (!err && v->buffer_state == VINO_BUF_GRABBING) err = -EIO; /* fall through */ case VINO_BUF_DONE: for (i = 0; i < v->page_count; i++) pci_dma_sync_single(NULL, v->dma_desc.cpu[PAGE_RATIO*i], PAGE_SIZE, PCI_DMA_FROMDEVICE); v->buffer_state = VINO_BUF_UNUSED; break; default: err = -EINVAL; } spin_unlock_irq(&v->state_lock); if (err && err != -EINVAL) { DEBUG("VINO: waiting for frame failed\n"); spin_lock_irq(&Vino->vino_lock); dma_stop(v); clear_eod(v); spin_unlock_irq(&Vino->vino_lock); } return err; }
void pcm_init(void) { pcm_playing = false; pcm_paused = false; BUSMASTER_CTRL = 0x81; /* PARK[1,0]=10 + BCR24BIT */ DIVR0 = 54; /* DMA0 is mapped into vector 54 in system.c */ DMAROUTE = (DMAROUTE & 0xffffff00) | DMA0_REQ_AUDIO_1; DMACONFIG = 1; /* DMA0Req = PDOR3 */ /* Reset the audio FIFO */ IIS2CONFIG = IIS_RESET; /* Enable interrupt at level 7, priority 0 */ ICR4 = (ICR4 & 0xffff00ff) | 0x00001c00; IMR &= ~(1<<14); /* bit 14 is DMA0 */ pcm_set_frequency(44100); /* Prevent pops (resets DAC to zero point) */ IIS2CONFIG = IIS_DEFPARM(pcm_freq) | IIS_RESET; #if defined(HAVE_UDA1380) /* Initialize default register values. */ uda1380_init(); /* Sleep a while so the power can stabilize (especially a long delay is needed for the line out connector). */ sleep(HZ); /* Power on FSDAC and HP amp. */ uda1380_enable_output(true); /* Unmute the master channel (DAC should be at zero point now). */ uda1380_mute(false); #elif defined(HAVE_TLV320) tlv320_init(); tlv320_enable_output(true); sleep(HZ/4); tlv320_mute(false); #endif /* Call dma_stop to initialize everything. */ dma_stop(); }
void dma_start(int ch, unsigned int srcAddr, unsigned int dstAddr, unsigned int count) { // printf("dma gao start1\n"); dma_stop(ch); //set_dma_addr REG_DMAC_DSAR(ch) = srcAddr; REG_DMAC_DDAR(ch) = dstAddr; //set_dma_count REG_DMAC_DTCR(ch) = count / dma_unit_size[ch]; //enable_dma REG_DMAC_DCMD(ch) = dma_mode[ch]; REG_DMAC_DCCSR(ch) &= ~(DMAC_DCCSR_HLT|DMAC_DCCSR_TC|DMAC_DCCSR_AR); REG_DMAC_DCCSR(ch) |= DMAC_DCCSR_NDES; /* No-descriptor transfer */ __dmac_enable_channel(ch); if (dma_irq[ch]) __dmac_channel_enable_irq(ch); }
static void tx_stream_disable(struct stream *stream, struct device *dev) { const struct i2s_stm32_cfg *cfg = DEV_CFG(dev); struct i2s_stm32_data *const dev_data = DEV_DATA(dev); struct device *dev_dma = dev_data->dev_dma; LL_I2S_DisableDMAReq_TX(cfg->i2s); LL_I2S_DisableIT_ERR(cfg->i2s); dma_stop(dev_dma, stream->dma_channel); if (stream->mem_block != NULL) { k_mem_slab_free(stream->cfg.mem_slab, &stream->mem_block); stream->mem_block = NULL; } LL_I2S_Disable(cfg->i2s); active_dma_tx_channel[stream->dma_channel] = NULL; }
static void write_thread(void *arg) { USE_SPD_REGS; volatile iop_dmac_chan_t *dev9_chan = (volatile iop_dmac_chan_t *)DEV9_DMAC_BASE; struct eng_args *args = (struct eng_args *)arg; ata_dma_transfer_t *t = &dma_transfer; u32 res; while (1) { while (SleepThread() || WaitSema(args->semid)) ; ClearEventFlag(args->evflg, 0); dma_setup(1); EnableIntr(IOP_IRQ_DMA_DEV9); /* Initiate the DMA transfer. */ dev9_chan->madr = (u32)dma_buffer; dev9_chan->bcr = ((t->size / 128) << 16) | 32; dev9_chan->chcr = 0x01000201; SPD_REG8(0x4e) = t->command; /* ATA command register. */ SPD_REG8(SPD_R_PIO_DIR) = 1; SPD_REG8(SPD_R_PIO_DATA) = 0; SPD_REG8(SPD_R_XFR_CTRL) |= 0x80; WaitEventFlag(args->evflg, (EF_DMA_DONE|EF_ATA_DONE), 0x11, &res); SPD_REG8(SPD_R_XFR_CTRL) &= 0x7f; DisableIntr(IOP_IRQ_DMA_DEV9, NULL); /* If we got the ATA end signal, force stop the transfer. */ if (res & EF_ATA_DONE) dma_stop(1); SignalSema(args->semid); } }
static void dwmmc_intr(void *arg) { struct mmc_command *cmd; struct dwmmc_softc *sc; uint32_t reg; sc = arg; DWMMC_LOCK(sc); cmd = sc->curcmd; /* First handle SDMMC controller interrupts */ reg = READ4(sc, SDMMC_MINTSTS); if (reg) { dprintf("%s 0x%08x\n", __func__, reg); if (reg & DWMMC_CMD_ERR_FLAGS) { WRITE4(sc, SDMMC_RINTSTS, DWMMC_CMD_ERR_FLAGS); dprintf("cmd err 0x%08x cmd 0x%08x\n", reg, cmd->opcode); cmd->error = MMC_ERR_TIMEOUT; } if (reg & DWMMC_DATA_ERR_FLAGS) { WRITE4(sc, SDMMC_RINTSTS, DWMMC_DATA_ERR_FLAGS); dprintf("data err 0x%08x cmd 0x%08x\n", reg, cmd->opcode); cmd->error = MMC_ERR_FAILED; if (!sc->use_pio) { dma_done(sc, cmd); dma_stop(sc); } } if (reg & SDMMC_INTMASK_CMD_DONE) { dwmmc_cmd_done(sc); sc->cmd_done = 1; WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CMD_DONE); } if (reg & SDMMC_INTMASK_ACD) { sc->acd_rcvd = 1; WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_ACD); } if (reg & SDMMC_INTMASK_DTO) { sc->dto_rcvd = 1; WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_DTO); } if (reg & SDMMC_INTMASK_CD) { /* XXX: Handle card detect */ WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CD); } } if (sc->use_pio) { if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) { pio_read(sc, cmd); } if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) { pio_write(sc, cmd); } } else { /* Now handle DMA interrupts */ reg = READ4(sc, SDMMC_IDSTS); if (reg) { dprintf("dma intr 0x%08x\n", reg); if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) { WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)); WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI); dma_done(sc, cmd); } } } dwmmc_tasklet(sc); DWMMC_UNLOCK(sc); }
void pcm_play_stop(void) { if (pcm_playing) { dma_stop(); } }
int cdi_dma_close(struct cdi_dma_handle *handle) { if (dma_stop(handle->channel)==-1) return -1; return dma_free(handle->dmabuf,handle->length); }