static void clear_page_dma(void *to) { extern unsigned long empty_zero_page[1024]; /* * We get invoked quite early on, if the DMAC hasn't been initialized * yet, fall back on the slow manual implementation. */ if (dma_info[dma_channel].chan != dma_channel) { clear_page_slow(to); return; } dma_write_page(dma_channel, (unsigned long)empty_zero_page, (unsigned long)to); /* * FIXME: Something is a bit racy here, if we poll the counter right * away, we seem to lock. flushing the page from the dcache doesn't * seem to make a difference one way or the other, though either a full * icache or dcache flush does. * * The location of this is important as well, and must happen prior to * the completion loop but after the transfer was initiated. * * Oddly enough, this doesn't appear to be an issue for copy_page().. */ flush_icache_range((unsigned long)to, PAGE_SIZE); dma_wait_for_completion(dma_channel); }
static int aica_dma_transfer(int channels, int buffer_size, struct snd_pcm_substream *substream) { int q, err, period_offset; struct snd_card_aica *dreamcastcard; struct snd_pcm_runtime *runtime; unsigned long flags; err = 0; dreamcastcard = substream->pcm->private_data; period_offset = dreamcastcard->clicks; period_offset %= (AICA_PERIOD_NUMBER / channels); runtime = substream->runtime; for (q = 0; q < channels; q++) { local_irq_save(flags); err = dma_xfer(AICA_DMA_CHANNEL, (unsigned long) (runtime->dma_area + (AICA_BUFFER_SIZE * q) / channels + AICA_PERIOD_SIZE * period_offset), AICA_CHANNEL0_OFFSET + q * CHANNEL_OFFSET + AICA_PERIOD_SIZE * period_offset, buffer_size / channels, AICA_DMA_MODE); if (unlikely(err < 0)) { local_irq_restore(flags); break; } dma_wait_for_completion(AICA_DMA_CHANNEL); local_irq_restore(flags); } return err; }
static int snd_stm_pcm_player_hw_free(struct snd_pcm_substream *substream) { struct snd_stm_pcm_player *pcm_player = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; snd_stm_printd(1, "snd_stm_pcm_player_hw_free(substream=0x%p)\n", substream); BUG_ON(!pcm_player); BUG_ON(!snd_stm_magic_valid(pcm_player)); BUG_ON(!runtime); /* This callback may be called more than once... */ if (snd_stm_buffer_is_allocated(pcm_player->buffer)) { /* Let the FDMA stop */ dma_wait_for_completion(pcm_player->fdma_channel); /* Free buffer */ snd_stm_buffer_free(pcm_player->buffer); /* Free FDMA parameters & configuration */ dma_params_free(&pcm_player->fdma_params); dma_req_free(pcm_player->fdma_channel, pcm_player->fdma_request); } return 0; }
/* * Setup FDMA transfer. Add 'oob' to list, if present. Assumes FDMA channel * has been initialised, and data areas are suitably aligned. */ static int nand_write_dma(struct mtd_info *mtd, const uint8_t *buf, int buf_len, uint8_t *oob, int oob_len) { struct nand_chip *chip = mtd->priv; struct stm_nand_emi *data = chip->priv; unsigned long nand_dma; dma_addr_t buf_dma; dma_addr_t oob_dma; unsigned long res = 0; /* Check channel is ready for use */ if (dma_get_status(data->dma_chan) != DMA_CHANNEL_STATUS_IDLE) { printk(KERN_ERR NAME ": requested channel not idle\n"); return 1; } /* Set up and map DMA addresses */ nand_dma = data->nand_phys_addr; buf_dma = dma_map_single(NULL, buf, buf_len, DMA_TO_DEVICE); dma_params_addrs(&data->dma_params[0], buf_dma, nand_dma, buf_len); /* Are we doing data+oob linked transfer? */ if (oob) { oob_dma = dma_map_single(NULL, oob, oob_len, DMA_TO_DEVICE); dma_params_link(&data->dma_params[0], &data->dma_params[1]); dma_params_addrs(&data->dma_params[1], oob_dma, nand_dma, oob_len); } else { data->dma_params[0].next = NULL; } /* Compile transfer list */ res = dma_compile_list(data->dma_chan, &data->dma_params[0], GFP_ATOMIC); if (res != 0) { printk(KERN_ERR NAME ": DMA compile list failed (err_code = %ld)\n", res); return 1; } /* Initiate transfer */ res = dma_xfer_list(data->dma_chan, &data->dma_params[0]); if (res != 0) { printk(KERN_ERR NAME ": transfer failed (err_code = %ld)\n", res); return 1; } /* Wait for completion... */ dma_wait_for_completion(data->dma_chan); /* Unmap DMA memory */ dma_unmap_single(NULL, buf_dma, buf_len, DMA_TO_DEVICE); if (oob) dma_unmap_single(NULL, oob_dma, oob_len, DMA_TO_DEVICE); return 0; }
static void copy_page_dma(void *to, void *from) { /* * This doesn't seem to get triggered until further along in the * boot process, at which point the DMAC is already initialized. * Fix this in the same fashion as clear_page_dma() in the event * that this crashes due to the DMAC not being initialized. */ flush_icache_range((unsigned long)from, PAGE_SIZE); dma_write_page(dma_channel, (unsigned long)from, (unsigned long)to); dma_wait_for_completion(dma_channel); }
static irqreturn_t pvr2_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs) { if (get_dma_residue(PVR2_CASCADE_CHAN)) { printk(KERN_WARNING "DMA: SH DMAC did not complete transfer " "on channel %d, waiting..\n", PVR2_CASCADE_CHAN); dma_wait_for_completion(PVR2_CASCADE_CHAN); } if (count++ < 10) pr_debug("Got a pvr2 dma interrupt for channel %d\n", irq - HW_EVENT_PVR2_DMA); return IRQ_HANDLED; }
void ksnd_pcm_streaming_stop(ksnd_pcm_streaming_t handle) { // Raise the flag set_bit(FLAG_STOPPED, &handle->flags); // Clean ALSA notifications handle->capture_handle->substream->runtime->transfer_ack_end = NULL; // Break transfer (if any) dma_stop_channel(handle->fdma_channel); dma_wait_for_completion(handle->fdma_channel); // Free FDMA resources dma_params_free(&handle->fdma_params); free_dma(handle->fdma_channel); // Free description memory handle->magic = magic_bad; kfree(handle); }