static int omap_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; struct omap_pcm_dma_data *dma_data = prtd->dma_data; struct omap_dma_channel_params dma_params; int bytes; /* return if this is a bufferless transfer e.g. * codec <--> BT codec or GSM modem -- lg FIXME */ if (!prtd->dma_data) return 0; memset(&dma_params, 0, sizeof(dma_params)); dma_params.data_type = dma_data->data_type; dma_params.trigger = dma_data->dma_req; dma_params.sync_mode = dma_data->sync_mode; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC; dma_params.src_start = runtime->dma_addr; dma_params.dst_start = dma_data->port_addr; dma_params.dst_port = OMAP_DMA_PORT_MPUI; dma_params.dst_fi = dma_data->packet_size; } else { dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dma_params.src_start = dma_data->port_addr; dma_params.dst_start = runtime->dma_addr; dma_params.src_port = OMAP_DMA_PORT_MPUI; dma_params.src_fi = dma_data->packet_size; } /* * Set DMA transfer frame size equal to ALSA period size and frame * count as no. of ALSA periods. Then with DMA frame interrupt enabled, * we can transfer the whole ALSA buffer with single DMA transfer but * still can get an interrupt at each period bounary */ bytes = snd_pcm_lib_period_bytes(substream); dma_params.elem_count = bytes >> dma_data->data_type; dma_params.frame_count = runtime->periods; omap_set_dma_params(prtd->dma_ch, &dma_params); if ((cpu_is_omap1510())) omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ | OMAP_DMA_LAST_IRQ | OMAP_DMA_BLOCK_IRQ); else omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ); if (!(cpu_class_is_omap1())) { omap_set_dma_src_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16); omap_set_dma_dest_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16); } return 0; }
static int omap_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; struct omap_pcm_dma_data *dma_data = prtd->dma_data; struct omap_dma_channel_params dma_params; int sync_mode; memset(&dma_params, 0, sizeof(dma_params)); /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */ if (cpu_is_omap34xx() && (prtd->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)) sync_mode = OMAP_DMA_SYNC_FRAME; else sync_mode = OMAP_DMA_SYNC_ELEMENT; /* * Note: Regardless of interface data formats supported by OMAP McBSP * or EAC blocks, internal representation is always fixed 16-bit/sample */ dma_params.data_type = OMAP_DMA_DATA_TYPE_S16; dma_params.trigger = dma_data->dma_req; dma_params.sync_mode = sync_mode; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC; dma_params.src_start = runtime->dma_addr; dma_params.dst_start = dma_data->port_addr; dma_params.dst_port = OMAP_DMA_PORT_MPUI; } else { dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dma_params.src_start = dma_data->port_addr; dma_params.dst_start = runtime->dma_addr; dma_params.src_port = OMAP_DMA_PORT_MPUI; } /* * Set DMA transfer frame size equal to ALSA period size and frame * count as no. of ALSA periods. Then with DMA frame interrupt enabled, * we can transfer the whole ALSA buffer with single DMA transfer but * still can get an interrupt at each period bounary */ dma_params.elem_count = snd_pcm_lib_period_bytes(substream) / 2; dma_params.frame_count = runtime->periods; omap_set_dma_params(prtd->dma_ch, &dma_params); omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ); omap_set_dma_src_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16); omap_set_dma_dest_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16); return 0; }
static int abe_dbg_start_dma(struct omap_abe *abe, int circular) { struct omap_dma_channel_params dma_params; int err; /* start the DMA in either :- * * 1) circular buffer mode where the DMA will restart when it get to * the end of the buffer. * 2) default mode, where DMA stops at the end of the buffer. */ abe->debugfs.dma_req = OMAP44XX_DMA_ABE_REQ_7; err = omap_request_dma(abe->debugfs.dma_req, "ABE debug", abe_dbg_dma_irq, abe, &abe->debugfs.dma_ch); if (abe->debugfs.circular) { /* * Link channel with itself so DMA doesn't need any * reprogramming while looping the buffer */ omap_dma_link_lch(abe->debugfs.dma_ch, abe->debugfs.dma_ch); } memset(&dma_params, 0, sizeof(dma_params)); dma_params.data_type = OMAP_DMA_DATA_TYPE_S32; dma_params.trigger = abe->debugfs.dma_req; dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX; dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dma_params.src_start = OMAP_ABE_D_DEBUG_FIFO_ADDR + ABE_DEFAULT_BASE_ADDRESS_L3 + ABE_DMEM_BASE_OFFSET_MPU; dma_params.dst_start = abe->debugfs.buffer_addr; dma_params.src_port = OMAP_DMA_PORT_MPUI; dma_params.src_ei = 1; dma_params.src_fi = 1 - abe->debugfs.elem_bytes; /* 128 bytes shifted into words */ dma_params.elem_count = abe->debugfs.elem_bytes >> 2; dma_params.frame_count = abe->debugfs.buffer_bytes / abe->debugfs.elem_bytes; omap_set_dma_params(abe->debugfs.dma_ch, &dma_params); omap_enable_dma_irq(abe->debugfs.dma_ch, OMAP_DMA_FRAME_IRQ); omap_set_dma_src_burst_mode(abe->debugfs.dma_ch, OMAP_DMA_DATA_BURST_16); omap_set_dma_dest_burst_mode(abe->debugfs.dma_ch, OMAP_DMA_DATA_BURST_16); abe->debugfs.reader_offset = 0; pm_runtime_get_sync(abe->dev); omap_start_dma(abe->debugfs.dma_ch); return 0; }
/* Prepare to transfer the next segment of a scatterlist */ static void mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data) { int dma_ch = host->dma_ch; unsigned long data_addr; u16 buf, frame; u32 count; struct scatterlist *sg = &data->sg[host->sg_idx]; int src_port = 0; int dst_port = 0; int sync_dev = 0; data_addr = host->phys_base + OMAP_MMC_REG_DATA; frame = data->blksz; count = sg_dma_len(sg); if ((data->blocks == 1) && (count > data->blksz)) count = frame; host->dma_len = count; /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx. * Use 16 or 32 word frames when the blocksize is at least that large. * Blocksize is usually 512 bytes; but not for some SD reads. */ if (cpu_is_omap15xx() && frame > 32) frame = 32; else if (frame > 64) frame = 64; count /= frame; frame >>= 1; if (!(data->flags & MMC_DATA_WRITE)) { buf = 0x800f | ((frame - 1) << 8); if (cpu_class_is_omap1()) { src_port = OMAP_DMA_PORT_TIPB; dst_port = OMAP_DMA_PORT_EMIFF; } if (cpu_is_omap24xx()) sync_dev = OMAP24XX_DMA_MMC1_RX; omap_set_dma_src_params(dma_ch, src_port, OMAP_DMA_AMODE_CONSTANT, data_addr, 0, 0); omap_set_dma_dest_params(dma_ch, dst_port, OMAP_DMA_AMODE_POST_INC, sg_dma_address(sg), 0, 0); omap_set_dma_dest_data_pack(dma_ch, 1); omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); } else { buf = 0x0f80 | ((frame - 1) << 0); if (cpu_class_is_omap1()) { src_port = OMAP_DMA_PORT_EMIFF; dst_port = OMAP_DMA_PORT_TIPB; } if (cpu_is_omap24xx()) sync_dev = OMAP24XX_DMA_MMC1_TX; omap_set_dma_dest_params(dma_ch, dst_port, OMAP_DMA_AMODE_CONSTANT, data_addr, 0, 0); omap_set_dma_src_params(dma_ch, src_port, OMAP_DMA_AMODE_POST_INC, sg_dma_address(sg), 0, 0); omap_set_dma_src_data_pack(dma_ch, 1); omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); } /* Max limit for DMA frame count is 0xffff */ BUG_ON(count > 0xffff); OMAP_MMC_WRITE(host, BUF, buf); omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16, frame, count, OMAP_DMA_SYNC_FRAME, sync_dev, 0); }
static void OMAPLFBFliepNoLock_HDMI(OMAPLFB_SWAPCHAIN *psSwapChain, OMAPLFB_DEVINFO *psDevInfo, struct omapfb_info *ofbi, struct fb_info *framebuffer, unsigned long fb_offset) { struct omap_overlay *ovl_hdmi; struct omap_overlay_info info; struct omap_overlay *hdmi; struct omap_overlay_manager *manager; bool overlay_change_requested = false; enum omap_dss_overlay_s3d_type s3d_type_in_video_hdmi = omap_dss_overlay_s3d_none; ovl_hdmi = omap_dss_get_overlay(3); if(ovl_hdmi->info.enabled) s3d_type_in_video_hdmi = ovl_hdmi->info.s3d_type; hdmi = psSwapChain->stHdmiTiler.overlay; manager = hdmi->manager; hdmi->get_overlay_info(hdmi, &info); //not good... if ( omap_overlay_info_req[hdmi->id].status==2 ) { info.enabled = omap_overlay_info_req[hdmi->id].enabled; info.rotation = omap_overlay_info_req[hdmi->id].rotation; info.pos_x = omap_overlay_info_req[hdmi->id].pos_x; info.pos_y = omap_overlay_info_req[hdmi->id].pos_y; info.out_width = omap_overlay_info_req[hdmi->id].out_width; info.out_height = omap_overlay_info_req[hdmi->id].out_height; info.global_alpha = omap_overlay_info_req[hdmi->id].global_alpha; info.zorder = omap_overlay_info_req[hdmi->id].zorder; printk("GUI HDMI layer change requested. req_enabled(%d)\n", omap_overlay_info_req[hdmi->id].enabled); omap_overlay_info_req[hdmi->id].status = 0; overlay_change_requested = true; } if ( info.enabled ) { mutex_lock(&psSwapChain->stHdmiTiler.lock); if ( !psSwapChain->stHdmiTiler.alloc ) { // if ( AllocTilerForHdmi(psSwapChain, psDevInfo) ) { ERROR_PRINTK("Tiler memory for HDMI GUI cloning is not allocated\n"); mutex_unlock(&psSwapChain->stHdmiTiler.lock); return; // } } if ( psSwapChain->stHdmiTiler.alloc ) //if Tiler memory is allocated { unsigned long line_offset; unsigned long w, h; unsigned long src_stride, dst_stride; unsigned long i; unsigned char *dst, *src; unsigned long pStride; u32 j, *src_4, *dst_4, *dst1_4; int ch; src_stride = psDevInfo->sFBInfo.ulByteStride; dst_stride = psSwapChain->stHdmiTiler.vStride; line_offset = fb_offset / src_stride; h = psDevInfo->sFBInfo.ulHeight; w = psDevInfo->sFBInfo.ulWidth; pStride = psSwapChain->stHdmiTiler.pStride; //Copy dst = (unsigned char*)psSwapChain->stHdmiTiler.vAddr + (line_offset * dst_stride); src = (unsigned char*)framebuffer->screen_base + fb_offset; DEBUG_PRINTK("Copy Start h:%d, src:0x%p src_stride:%d, dst:0x%p dst_stride:%d, line offset:%d, pStride:%d\n", h, src, src_stride, dst, dst_stride, line_offset, pStride); if( psSwapChain->s3d_type==omap_dss_overlay_s3d_side_by_side && s3d_type_in_video_hdmi == omap_dss_overlay_s3d_top_bottom) { for(j=0; j<h/2; j++) { src_4 = (u32 *)(src + src_stride*j); dst_4 = (u32 *)(dst + dst_stride*2*j); dst1_4 = (u32 *)(dst + dst_stride*2*j + w*2); for(i=0;i<w/2;i++) { *dst_4++ = *src_4; *dst1_4++ = *src_4++; src_4++; } src_4 = (u32 *)(src + src_stride*j); dst_4 = (u32 *)(dst + dst_stride*(2*j+1)); dst1_4 = (u32 *)(dst + dst_stride*(2*j+1) + w*2); for(i=0;i<w/2;i++) { *dst_4++ = *src_4; *dst1_4++ = *src_4++; src_4++; } } info.s3d_type = omap_dss_overlay_s3d_top_bottom; } else { if(hdmi_dma.state == HDMI_DMA_DONE) { ch = hdmi_dma.frame_pos; hdmi->get_overlay_info(hdmi, &hdmi_dma.info[ch]); hdmi_dma.hdmi = hdmi; printk("S:%x\n", psSwapChain->stHdmiTiler.pAddr + (h * pStride * ch)); omap_set_dma_transfer_params(hdmi_dma.lch, OMAP_DMA_DATA_TYPE_S32, src_stride>>2, h, 0, 0, 0); omap_set_dma_src_params(hdmi_dma.lch, 0, OMAP_DMA_AMODE_POST_INC, framebuffer->fix.smem_start + fb_offset, 1, 1); omap_set_dma_dest_params(hdmi_dma.lch, 0, OMAP_DMA_AMODE_DOUBLE_IDX, psSwapChain->stHdmiTiler.pAddr + (h * pStride * ch), 1, pStride - src_stride + 1 ); omap_dma_set_prio_lch(hdmi_dma.lch, DMA_CH_PRIO_HIGH, DMA_CH_PRIO_HIGH); omap_set_dma_src_burst_mode(hdmi_dma.lch, OMAP_DMA_DATA_BURST_16); omap_set_dma_dest_burst_mode(hdmi_dma.lch, OMAP_DMA_DATA_BURST_16); omap_start_dma(hdmi_dma.lch); hdmi_dma.state = HDMI_DMA_TRANSFERRING; if ( omap_overlay_info_req[hdmi->id].status==2 ) { hdmi_dma.info[ch].enabled = omap_overlay_info_req[hdmi->id].enabled; hdmi_dma.info[ch].rotation = omap_overlay_info_req[hdmi->id].rotation; hdmi_dma.info[ch].pos_x = omap_overlay_info_req[hdmi->id].pos_x; hdmi_dma.info[ch].pos_y = omap_overlay_info_req[hdmi->id].pos_y; hdmi_dma.info[ch].out_width = omap_overlay_info_req[hdmi->id].out_width; hdmi_dma.info[ch].out_height = omap_overlay_info_req[hdmi->id].out_height; hdmi_dma.info[ch].global_alpha = omap_overlay_info_req[hdmi->id].global_alpha; hdmi_dma.info[ch].zorder = omap_overlay_info_req[hdmi->id].zorder; } //fill info //@todo not good find another way later hdmi_dma.info[ch].color_mode = ofbi->overlays[0]->info.color_mode; hdmi_dma.info[ch].paddr = psSwapChain->stHdmiTiler.pAddr + (h * pStride * ch); hdmi_dma.info[ch].vaddr = NULL; //no need if ( hdmi_dma.info[ch].rotation==OMAP_DSS_ROT_90 || hdmi_dma.info[ch].rotation==OMAP_DSS_ROT_270 ) { hdmi_dma.info[ch].width = h; hdmi_dma.info[ch].height = w; hdmi_dma.info[ch].screen_width = h; } else { hdmi_dma.info[ch].width =w; hdmi_dma.info[ch].height = h; hdmi_dma.info[ch].screen_width = w; } hdmi_dma.info[ch].rotation_type = OMAP_DSS_ROT_TILER; hdmi_dma.info[ch].s3d_type = psSwapChain->s3d_type; hdmi_dma.curr_frame = hdmi_dma.frame_pos; if( ++hdmi_dma.frame_pos >= HDMI_DMA_MAX ) hdmi_dma.frame_pos = 0; if( omap_overlay_info_req[hdmi->id].status == 2) { omap_overlay_info_req[hdmi->id].status = 0; } } else printk("DOLCOM : DMA busy!!!!!!!!!!!!!!!!!\n");
static int omap2_onenand_probe(struct platform_device *pdev) { struct omap_onenand_platform_data *pdata; struct omap2_onenand *c; struct onenand_chip *this; int r; struct resource *res; struct mtd_part_parser_data ppdata = {}; pdata = dev_get_platdata(&pdev->dev); if (pdata == NULL) { dev_err(&pdev->dev, "platform data missing\n"); return -ENODEV; } c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL); if (!c) return -ENOMEM; init_completion(&c->irq_done); init_completion(&c->dma_done); c->flags = pdata->flags; c->gpmc_cs = pdata->cs; c->gpio_irq = pdata->gpio_irq; c->dma_channel = pdata->dma_channel; if (c->dma_channel < 0) { /* if -1, don't use DMA */ c->gpio_irq = 0; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { r = -EINVAL; dev_err(&pdev->dev, "error getting memory resource\n"); goto err_kfree; } c->phys_base = res->start; c->mem_size = resource_size(res); if (request_mem_region(c->phys_base, c->mem_size, pdev->dev.driver->name) == NULL) { dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n", c->phys_base, c->mem_size); r = -EBUSY; goto err_kfree; } c->onenand.base = ioremap(c->phys_base, c->mem_size); if (c->onenand.base == NULL) { r = -ENOMEM; goto err_release_mem_region; } if (pdata->onenand_setup != NULL) { r = pdata->onenand_setup(c->onenand.base, &c->freq); if (r < 0) { dev_err(&pdev->dev, "Onenand platform setup failed: " "%d\n", r); goto err_iounmap; } c->setup = pdata->onenand_setup; } if (c->gpio_irq) { if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) { dev_err(&pdev->dev, "Failed to request GPIO%d for " "OneNAND\n", c->gpio_irq); goto err_iounmap; } gpio_direction_input(c->gpio_irq); if ((r = request_irq(gpio_to_irq(c->gpio_irq), omap2_onenand_interrupt, IRQF_TRIGGER_RISING, pdev->dev.driver->name, c)) < 0) goto err_release_gpio; } if (c->dma_channel >= 0) { r = omap_request_dma(0, pdev->dev.driver->name, omap2_onenand_dma_cb, (void *) c, &c->dma_channel); if (r == 0) { omap_set_dma_write_mode(c->dma_channel, OMAP_DMA_WRITE_NON_POSTED); omap_set_dma_src_data_pack(c->dma_channel, 1); omap_set_dma_src_burst_mode(c->dma_channel, OMAP_DMA_DATA_BURST_8); omap_set_dma_dest_data_pack(c->dma_channel, 1); omap_set_dma_dest_burst_mode(c->dma_channel, OMAP_DMA_DATA_BURST_8); } else { dev_info(&pdev->dev, "failed to allocate DMA for OneNAND, " "using PIO instead\n"); c->dma_channel = -1; } } dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual " "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base, c->onenand.base, c->freq); c->pdev = pdev; c->mtd.name = dev_name(&pdev->dev); c->mtd.priv = &c->onenand; c->mtd.owner = THIS_MODULE; c->mtd.dev.parent = &pdev->dev; this = &c->onenand; if (c->dma_channel >= 0) { this->wait = omap2_onenand_wait; if (c->flags & ONENAND_IN_OMAP34XX) { this->read_bufferram = omap3_onenand_read_bufferram; this->write_bufferram = omap3_onenand_write_bufferram; } else { this->read_bufferram = omap2_onenand_read_bufferram; this->write_bufferram = omap2_onenand_write_bufferram; } } if (pdata->regulator_can_sleep) { c->regulator = regulator_get(&pdev->dev, "vonenand"); if (IS_ERR(c->regulator)) { dev_err(&pdev->dev, "Failed to get regulator\n"); r = PTR_ERR(c->regulator); goto err_release_dma; } c->onenand.enable = omap2_onenand_enable; c->onenand.disable = omap2_onenand_disable; } if (pdata->skip_initial_unlocking) this->options |= ONENAND_SKIP_INITIAL_UNLOCKING; if ((r = onenand_scan(&c->mtd, 1)) < 0) goto err_release_regulator; ppdata.of_node = pdata->of_node; r = mtd_device_parse_register(&c->mtd, NULL, &ppdata, pdata ? pdata->parts : NULL, pdata ? pdata->nr_parts : 0); if (r) goto err_release_onenand; platform_set_drvdata(pdev, c); return 0; err_release_onenand: onenand_release(&c->mtd); err_release_regulator: regulator_put(c->regulator); err_release_dma: if (c->dma_channel != -1) omap_free_dma(c->dma_channel); if (c->gpio_irq) free_irq(gpio_to_irq(c->gpio_irq), c); err_release_gpio: if (c->gpio_irq) gpio_free(c->gpio_irq); err_iounmap: iounmap(c->onenand.base); err_release_mem_region: release_mem_region(c->phys_base, c->mem_size); err_kfree: kfree(c); return r; }
static int __devinit omap2_onenand_probe(struct platform_device *pdev) { struct omap_onenand_platform_data *pdata; struct omap2_onenand *c; int r; pdata = pdev->dev.platform_data; if (pdata == NULL) { dev_err(&pdev->dev, "platform data missing\n"); return -ENODEV; } c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL); if (!c) return -ENOMEM; init_completion(&c->irq_done); init_completion(&c->dma_done); c->gpmc_cs = pdata->cs; c->gpio_irq = pdata->gpio_irq; c->dma_channel = pdata->dma_channel; if (c->dma_channel < 0) { c->gpio_irq = 0; } r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base); if (r < 0) { dev_err(&pdev->dev, "Cannot request GPMC CS\n"); goto err_kfree; } if (request_mem_region(c->phys_base, ONENAND_IO_SIZE, pdev->dev.driver->name) == NULL) { dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, " "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE); r = -EBUSY; goto err_free_cs; } c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE); if (c->onenand.base == NULL) { r = -ENOMEM; goto err_release_mem_region; } if (pdata->onenand_setup != NULL) { r = pdata->onenand_setup(c->onenand.base, c->freq); if (r < 0) { dev_err(&pdev->dev, "Onenand platform setup failed: " "%d\n", r); goto err_iounmap; } c->setup = pdata->onenand_setup; } if (c->gpio_irq) { if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) { dev_err(&pdev->dev, "Failed to request GPIO%d for " "OneNAND\n", c->gpio_irq); goto err_iounmap; } gpio_direction_input(c->gpio_irq); if ((r = request_irq(gpio_to_irq(c->gpio_irq), omap2_onenand_interrupt, IRQF_TRIGGER_RISING, pdev->dev.driver->name, c)) < 0) goto err_release_gpio; } if (c->dma_channel >= 0) { r = omap_request_dma(0, pdev->dev.driver->name, omap2_onenand_dma_cb, (void *) c, &c->dma_channel); if (r == 0) { omap_set_dma_write_mode(c->dma_channel, OMAP_DMA_WRITE_NON_POSTED); omap_set_dma_src_data_pack(c->dma_channel, 1); omap_set_dma_src_burst_mode(c->dma_channel, OMAP_DMA_DATA_BURST_8); omap_set_dma_dest_data_pack(c->dma_channel, 1); omap_set_dma_dest_burst_mode(c->dma_channel, OMAP_DMA_DATA_BURST_8); } else { dev_info(&pdev->dev, "failed to allocate DMA for OneNAND, " "using PIO instead\n"); c->dma_channel = -1; } } dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual " "base %p\n", c->gpmc_cs, c->phys_base, c->onenand.base); c->pdev = pdev; c->mtd.name = dev_name(&pdev->dev); c->mtd.priv = &c->onenand; c->mtd.owner = THIS_MODULE; c->mtd.dev.parent = &pdev->dev; if (c->dma_channel >= 0) { struct onenand_chip *this = &c->onenand; this->wait = omap2_onenand_wait; if (cpu_is_omap34xx()) { this->read_bufferram = omap3_onenand_read_bufferram; this->write_bufferram = omap3_onenand_write_bufferram; } else { this->read_bufferram = omap2_onenand_read_bufferram; this->write_bufferram = omap2_onenand_write_bufferram; } } if ((r = onenand_scan(&c->mtd, 1)) < 0) goto err_release_dma; switch ((c->onenand.version_id >> 4) & 0xf) { case 0: c->freq = 40; break; case 1: c->freq = 54; break; case 2: c->freq = 66; break; case 3: c->freq = 83; break; } #ifdef CONFIG_MTD_PARTITIONS if (pdata->parts != NULL) r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts); else #endif r = add_mtd_device(&c->mtd); if (r < 0) goto err_release_onenand; platform_set_drvdata(pdev, c); return 0; err_release_onenand: onenand_release(&c->mtd); err_release_dma: if (c->dma_channel != -1) omap_free_dma(c->dma_channel); if (c->gpio_irq) free_irq(gpio_to_irq(c->gpio_irq), c); err_release_gpio: if (c->gpio_irq) gpio_free(c->gpio_irq); err_iounmap: iounmap(c->onenand.base); err_release_mem_region: release_mem_region(c->phys_base, ONENAND_IO_SIZE); err_free_cs: gpmc_cs_free(c->gpmc_cs); err_kfree: kfree(c); return r; }
static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, u8 rndis_mode, dma_addr_t dma_addr, u32 len) { struct tusb_omap_dma_ch *chdat = to_chdat(channel); struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; struct musb *musb = chdat->musb; struct device *dev = musb->controller; struct musb_hw_ep *hw_ep = chdat->hw_ep; void __iomem *mbase = musb->mregs; void __iomem *ep_conf = hw_ep->conf; dma_addr_t fifo = hw_ep->fifo_sync; struct omap_dma_channel_params dma_params; u32 dma_remaining; int src_burst, dst_burst; u16 csr; int ch; s8 dmareq; s8 sync_dev; if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz)) return false; /* * HW issue #10: Async dma will eventually corrupt the XFR_SIZE * register which will cause missed DMA interrupt. We could try to * use a timer for the callback, but it is unsafe as the XFR_SIZE * register is corrupt, and we won't know if the DMA worked. */ if (dma_addr & 0x2) return false; /* * Because of HW issue #10, it seems like mixing sync DMA and async * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before * using the channel for DMA. */ if (chdat->tx) dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET); else dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET); dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining); if (dma_remaining) { dev_dbg(musb->controller, "Busy %s dma ch%i, not using: %08x\n", chdat->tx ? "tx" : "rx", chdat->ch, dma_remaining); return false; } chdat->transfer_len = len & ~0x1f; if (len < packet_sz) chdat->transfer_packet_sz = chdat->transfer_len; else chdat->transfer_packet_sz = packet_sz; if (tusb_dma->multichannel) { ch = chdat->ch; dmareq = chdat->dmareq; sync_dev = chdat->sync_dev; } else { if (tusb_omap_use_shared_dmareq(chdat) != 0) { dev_dbg(musb->controller, "could not get dma for ep%i\n", chdat->epnum); return false; } if (tusb_dma->ch < 0) { /* REVISIT: This should get blocked earlier, happens * with MSC ErrorRecoveryTest */ WARN_ON(1); return false; } ch = tusb_dma->ch; dmareq = tusb_dma->dmareq; sync_dev = tusb_dma->sync_dev; omap_set_dma_callback(ch, tusb_omap_dma_cb, channel); } chdat->packet_sz = packet_sz; chdat->len = len; channel->actual_len = 0; chdat->dma_addr = dma_addr; channel->status = MUSB_DMA_STATUS_BUSY; /* Since we're recycling dma areas, we need to clean or invalidate */ if (chdat->tx) dma_map_single(dev, phys_to_virt(dma_addr), len, DMA_TO_DEVICE); else dma_map_single(dev, phys_to_virt(dma_addr), len, DMA_FROM_DEVICE); /* Use 16-bit transfer if dma_addr is not 32-bit aligned */ if ((dma_addr & 0x3) == 0) { dma_params.data_type = OMAP_DMA_DATA_TYPE_S32; dma_params.elem_count = 8; /* Elements in frame */ } else { dma_params.data_type = OMAP_DMA_DATA_TYPE_S16; dma_params.elem_count = 16; /* Elements in frame */ fifo = hw_ep->fifo_async; } dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */ dev_dbg(musb->controller, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n", chdat->epnum, chdat->tx ? "tx" : "rx", ch, dma_addr, chdat->transfer_len, len, chdat->transfer_packet_sz, packet_sz); /* * Prepare omap DMA for transfer */ if (chdat->tx) { dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; dma_params.src_start = (unsigned long)dma_addr; dma_params.src_ei = 0; dma_params.src_fi = 0; dma_params.dst_amode = OMAP_DMA_AMODE_DOUBLE_IDX; dma_params.dst_start = (unsigned long)fifo; dma_params.dst_ei = 1; dma_params.dst_fi = -31; /* Loop 32 byte window */ dma_params.trigger = sync_dev; dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; dma_params.src_or_dst_synch = 0; /* Dest sync */ src_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 read */ dst_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 write */ } else { dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX; dma_params.src_start = (unsigned long)fifo; dma_params.src_ei = 1; dma_params.src_fi = -31; /* Loop 32 byte window */ dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; dma_params.dst_start = (unsigned long)dma_addr; dma_params.dst_ei = 0; dma_params.dst_fi = 0; dma_params.trigger = sync_dev; dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; dma_params.src_or_dst_synch = 1; /* Source sync */ src_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 read */ dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */ } dev_dbg(musb->controller, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n", chdat->epnum, chdat->tx ? "tx" : "rx", (dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16, ((dma_addr & 0x3) == 0) ? "sync" : "async", dma_params.src_start, dma_params.dst_start); omap_set_dma_params(ch, &dma_params); omap_set_dma_src_burst_mode(ch, src_burst); omap_set_dma_dest_burst_mode(ch, dst_burst); omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED); /* * Prepare MUSB for DMA transfer */ if (chdat->tx) { musb_ep_select(mbase, chdat->epnum); csr = musb_readw(hw_ep->regs, MUSB_TXCSR); csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); csr &= ~MUSB_TXCSR_P_UNDERRUN; musb_writew(hw_ep->regs, MUSB_TXCSR, csr); } else { musb_ep_select(mbase, chdat->epnum); csr = musb_readw(hw_ep->regs, MUSB_RXCSR); csr |= MUSB_RXCSR_DMAENAB; csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE); musb_writew(hw_ep->regs, MUSB_RXCSR, csr | MUSB_RXCSR_P_WZC_BITS); } /* * Start DMA transfer */ omap_start_dma(ch); if (chdat->tx) { /* Send transfer_packet_sz packets at a time */ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, chdat->transfer_packet_sz); musb_writel(ep_conf, TUSB_EP_TX_OFFSET, TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); } else { /* Receive transfer_packet_sz packets at a time */ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, chdat->transfer_packet_sz << 16); musb_writel(ep_conf, TUSB_EP_RX_OFFSET, TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); } return true; }
int omap_vout_prepare_vrfb(struct omap_vout_device *vout, struct videobuf_buffer *vb) { dma_addr_t dmabuf; struct vid_vrfb_dma *tx; enum dss_rotation rotation; u32 dest_frame_index = 0, src_element_index = 0; u32 dest_element_index = 0, src_frame_index = 0; u32 elem_count = 0, frame_count = 0, pixsize = 2; if (!is_rotation_enabled(vout)) return 0; dmabuf = vout->buf_phy_addr[vb->i]; /* If rotation is enabled, copy input buffer into VRFB * memory space using DMA. We are copying input buffer * into VRFB memory space of desired angle and DSS will * read image VRFB memory for 0 degree angle */ pixsize = vout->bpp * vout->vrfb_bpp; /* * DMA transfer in double index mode */ /* Frame index */ dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) - (vout->pix.width * vout->bpp)) + 1; /* Source and destination parameters */ src_element_index = 0; src_frame_index = 0; dest_element_index = 1; /* Number of elements per frame */ elem_count = vout->pix.width * vout->bpp; frame_count = vout->pix.height; tx = &vout->vrfb_dma_tx; tx->tx_status = 0; omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32, (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT, tx->dev_id, 0x0); /* src_port required only for OMAP1 */ omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, dmabuf, src_element_index, src_frame_index); /*set dma source burst mode for VRFB */ omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16); rotation = calc_rotation(vout); /* dest_port required only for OMAP1 */ omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX, vout->vrfb_context[vb->i].paddr[0], dest_element_index, dest_frame_index); /*set dma dest burst mode for VRFB */ omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16); omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0); omap_start_dma(tx->dma_ch); wait_event_interruptible_timeout(tx->wait, tx->tx_status == 1, VRFB_TX_TIMEOUT); if (tx->tx_status == 0) { omap_stop_dma(tx->dma_ch); return -EINVAL; } /* Store buffers physical address into an array. Addresses * from this array will be used to configure DSS */ vout->queued_buf_addr[vb->i] = (u8 *) vout->vrfb_context[vb->i].paddr[rotation]; return 0; }
static int aes_dma_init(struct aes_hwa_ctx *ctx) { int err = -ENOMEM; struct omap_dma_channel_params dma_params; ctx->dma_lch_out = -1; ctx->dma_lch_in = -1; ctx->buflen = PAGE_SIZE; ctx->buflen &= ~(AES_BLOCK_SIZE - 1); dprintk(KERN_INFO "aes_dma_init(%p)\n", ctx); /* Allocate and map cache buffers */ ctx->buf_in = dma_alloc_coherent(NULL, ctx->buflen, &ctx->dma_addr_in, GFP_KERNEL); if (!ctx->buf_in) { dprintk(KERN_ERR "SMC: Unable to alloc AES in cache buffer\n"); return -ENOMEM; } ctx->buf_out = dma_alloc_coherent(NULL, ctx->buflen, &ctx->dma_addr_out, GFP_KERNEL); if (!ctx->buf_out) { dprintk(KERN_ERR "SMC: Unable to alloc AES out cache buffer\n"); dma_free_coherent(NULL, ctx->buflen, ctx->buf_in, ctx->dma_addr_in); return -ENOMEM; } /* Request DMA channels */ err = omap_request_dma(0, "smc-aes-rx", aes_dma_callback, ctx, &ctx->dma_lch_in); if (err) { dprintk(KERN_ERR "SMC: Unable to request AES RX DMA channel\n"); goto err_dma_in; } err = omap_request_dma(0, "smc-aes-rx", aes_dma_callback, ctx, &ctx->dma_lch_out); if (err) { dprintk(KERN_ERR "SMC: Unable to request AES TX DMA channel\n"); goto err_dma_out; } dma_params.data_type = OMAP_DMA_DATA_TYPE_S32; dma_params.elem_count = DMA_CEN_Elts_per_Frame_AES; dma_params.src_ei = 0; dma_params.src_fi = 0; dma_params.dst_ei = 0; dma_params.dst_fi = 0; dma_params.read_prio = 0; dma_params.write_prio = 0; dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; /* IN */ dma_params.trigger = ctx->dma_in; dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC; dma_params.dst_start = AES1_REGS_HW_ADDR + 0x60; dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; omap_set_dma_params(ctx->dma_lch_in, &dma_params); omap_set_dma_dest_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8); omap_set_dma_src_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8); omap_set_dma_src_data_pack(ctx->dma_lch_in, 1); /* OUT */ dma_params.trigger = ctx->dma_out; dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dma_params.src_start = AES1_REGS_HW_ADDR + 0x60; dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; omap_set_dma_params(ctx->dma_lch_out, &dma_params); omap_set_dma_dest_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8); omap_set_dma_src_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8); omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1); omap_dma_base = ioremap(0x4A056000, 0x1000); if (!omap_dma_base) { printk(KERN_ERR "SMC: Unable to ioremap DMA registers\n"); goto err_dma_out; } dprintk(KERN_INFO "aes_dma_init(%p) configured DMA channels" "(RX = %d, TX = %d)\n", ctx, ctx->dma_lch_in, ctx->dma_lch_out); return 0; err_dma_out: omap_free_dma(ctx->dma_lch_in); err_dma_in: dma_free_coherent(NULL, ctx->buflen, ctx->buf_in, ctx->dma_addr_in); dma_free_coherent(NULL, ctx->buflen, ctx->buf_out, ctx->dma_addr_out); return err; }
static int aes_dma_start(struct aes_hwa_ctx *ctx) { int err, fast = 0, in, out; size_t count; dma_addr_t addr_in, addr_out; struct omap_dma_channel_params dma_params; struct tf_crypto_aes_operation_state *state = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req)); static size_t last_count; unsigned long flags; in = IS_ALIGNED((u32)ctx->in_sg->offset, sizeof(u32)); out = IS_ALIGNED((u32)ctx->out_sg->offset, sizeof(u32)); fast = in && out; if (fast) { count = min(ctx->total, sg_dma_len(ctx->in_sg)); count = min(count, sg_dma_len(ctx->out_sg)); if (count != ctx->total) return -EINVAL; /* Only call dma_map_sg if it has not yet been done */ if (!(ctx->req->base.flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) { err = dma_map_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE); if (!err) return -EINVAL; err = dma_map_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE); if (!err) { dma_unmap_sg( NULL, ctx->in_sg, 1, DMA_TO_DEVICE); return -EINVAL; } } ctx->req->base.flags &= ~CRYPTO_TFM_REQ_DMA_VISIBLE; addr_in = sg_dma_address(ctx->in_sg); addr_out = sg_dma_address(ctx->out_sg); ctx->flags |= FLAGS_FAST; } else { count = sg_copy(&ctx->in_sg, &ctx->in_offset, ctx->buf_in, ctx->buflen, ctx->total, 0); addr_in = ctx->dma_addr_in; addr_out = ctx->dma_addr_out; ctx->flags &= ~FLAGS_FAST; } ctx->total -= count; /* Configure HWA */ tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG); tf_aes_restore_registers(state, ctx->flags & FLAGS_ENCRYPT ? 1 : 0); OUTREG32(&paes_reg->AES_SYSCONFIG, INREG32(&paes_reg->AES_SYSCONFIG) | AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT | AES_SYSCONFIG_DMA_REQ_IN_EN_BIT); ctx->dma_size = count; if (!fast) dma_sync_single_for_device(NULL, addr_in, count, DMA_TO_DEVICE); dma_params.data_type = OMAP_DMA_DATA_TYPE_S32; dma_params.frame_count = count / AES_BLOCK_SIZE; dma_params.elem_count = DMA_CEN_Elts_per_Frame_AES; dma_params.src_ei = 0; dma_params.src_fi = 0; dma_params.dst_ei = 0; dma_params.dst_fi = 0; dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; dma_params.read_prio = 0; dma_params.write_prio = 0; /* IN */ dma_params.trigger = ctx->dma_in; dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC; dma_params.dst_start = AES1_REGS_HW_ADDR + 0x60; dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.src_start = addr_in; dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; if (reconfigure_dma) { omap_set_dma_params(ctx->dma_lch_in, &dma_params); omap_set_dma_dest_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8); omap_set_dma_src_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8); omap_set_dma_src_data_pack(ctx->dma_lch_in, 1); } else { if (last_count != count) omap_set_dma_transfer_params(ctx->dma_lch_in, dma_params.data_type, dma_params.elem_count, dma_params.frame_count, dma_params.sync_mode, dma_params.trigger, dma_params.src_or_dst_synch); /* Configure input start address */ __raw_writel(dma_params.src_start, omap_dma_base + (0x60 * (ctx->dma_lch_in) + 0x9c)); } /* OUT */ dma_params.trigger = ctx->dma_out; dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dma_params.src_start = AES1_REGS_HW_ADDR + 0x60; dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.dst_start = addr_out; dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; if (reconfigure_dma) { omap_set_dma_params(ctx->dma_lch_out, &dma_params); omap_set_dma_dest_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8); omap_set_dma_src_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8); omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1); reconfigure_dma = false; } else { if (last_count != count) { omap_set_dma_transfer_params(ctx->dma_lch_out, dma_params.data_type, dma_params.elem_count, dma_params.frame_count, dma_params.sync_mode, dma_params.trigger, dma_params.src_or_dst_synch); last_count = count; } /* Configure output start address */ __raw_writel(dma_params.dst_start, omap_dma_base + (0x60 * (ctx->dma_lch_out) + 0xa0)); } /* Is this really needed? */ omap_enable_dma_irq(ctx->dma_lch_in, OMAP_DMA_BLOCK_IRQ); omap_enable_dma_irq(ctx->dma_lch_out, OMAP_DMA_BLOCK_IRQ); wmb(); omap_start_dma(ctx->dma_lch_in); omap_start_dma(ctx->dma_lch_out); spin_lock_irqsave(&ctx->lock, flags); if (ctx->next_req) { struct ablkcipher_request *req = ablkcipher_request_cast(ctx->next_req); if (!(ctx->next_req->flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) { err = dma_map_sg(NULL, req->src, 1, DMA_TO_DEVICE); if (!err) { /* Silently fail for now... */ spin_unlock_irqrestore(&ctx->lock, flags); return 0; } err = dma_map_sg(NULL, req->dst, 1, DMA_FROM_DEVICE); if (!err) { dma_unmap_sg(NULL, req->src, 1, DMA_TO_DEVICE); /* Silently fail for now... */ spin_unlock_irqrestore(&ctx->lock, flags); return 0; } ctx->next_req->flags |= CRYPTO_TFM_REQ_DMA_VISIBLE; ctx->next_req = NULL; } } if (ctx->backlog) { ctx->backlog->complete(ctx->backlog, -EINPROGRESS); ctx->backlog = NULL; } spin_unlock_irqrestore(&ctx->lock, flags); return 0; }
/* *Static function, perform AES encryption/decryption using the DMA for data *transfer. * *inputs: src : pointer of the input data to process * nb_blocks : number of block to process * dma_use : PUBLIC_CRYPTO_DMA_USE_IRQ (use irq to monitor end of DMA) * | PUBLIC_CRYPTO_DMA_USE_POLLING (poll the end of DMA) *output: dest : pointer of the output data (can be eq to src) */ static bool tf_aes_update_dma(u8 *src, u8 *dest, u32 nb_blocks, u32 ctrl, bool is_kernel) { /* *Note: The DMA only sees physical addresses ! */ int dma_ch0; int dma_ch1; struct omap_dma_channel_params ch0_parameters; struct omap_dma_channel_params ch1_parameters; u32 length = nb_blocks * AES_BLOCK_SIZE; u32 length_loop = 0; u32 nb_blocksLoop = 0; struct tf_device *dev = tf_get_device(); dprintk(KERN_INFO "%s: In=0x%08x, Out=0x%08x, Len=%u\n", __func__, (unsigned int)src, (unsigned int)dest, (unsigned int)length); /*lock the DMA */ while (!mutex_trylock(&dev->sm.dma_mutex)) cpu_relax(); if (tf_dma_request(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) { mutex_unlock(&dev->sm.dma_mutex); return false; } if (tf_dma_request(&dma_ch1) != PUBLIC_CRYPTO_OPERATION_SUCCESS) { omap_free_dma(dma_ch0); mutex_unlock(&dev->sm.dma_mutex); return false; } while (length > 0) { /* * At this time, we are sure that the DMAchannels *are available and not used by other public crypto operation */ /*DMA used for Input and Output */ OUTREG32(&paes_reg->AES_SYSCONFIG, INREG32(&paes_reg->AES_SYSCONFIG) | AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT | AES_SYSCONFIG_DMA_REQ_IN_EN_BIT); /*check length */ if (length <= dev->dma_buffer_length) length_loop = length; else length_loop = dev->dma_buffer_length; /*The length is always a multiple of the block size */ nb_blocksLoop = length_loop / AES_BLOCK_SIZE; /* * Copy the data from the user input buffer into a preallocated * buffer which has correct properties from efficient DMA * transfers. */ if (!is_kernel) { if (copy_from_user( dev->dma_buffer, src, length_loop)) { omap_free_dma(dma_ch0); omap_free_dma(dma_ch1); mutex_unlock(&dev->sm.dma_mutex); return false; } } else { memcpy(dev->dma_buffer, src, length_loop); } /*DMA1: Mem -> AES */ tf_dma_set_channel_common_params(&ch0_parameters, nb_blocksLoop, DMA_CEN_Elts_per_Frame_AES, AES1_REGS_HW_ADDR + 0x60, (u32)dev->dma_buffer_phys, OMAP44XX_DMA_AES1_P_DATA_IN_REQ); ch0_parameters.src_amode = OMAP_DMA_AMODE_POST_INC; ch0_parameters.dst_amode = OMAP_DMA_AMODE_CONSTANT; ch0_parameters.src_or_dst_synch = OMAP_DMA_DST_SYNC; dprintk(KERN_INFO "%s: omap_set_dma_params(ch0)\n", __func__); omap_set_dma_params(dma_ch0, &ch0_parameters); omap_set_dma_src_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_8); omap_set_dma_dest_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_8); omap_set_dma_src_data_pack(dma_ch0, 1); /*DMA2: AES -> Mem */ tf_dma_set_channel_common_params(&ch1_parameters, nb_blocksLoop, DMA_CEN_Elts_per_Frame_AES, (u32)dev->dma_buffer_phys, AES1_REGS_HW_ADDR + 0x60, OMAP44XX_DMA_AES1_P_DATA_OUT_REQ); ch1_parameters.src_amode = OMAP_DMA_AMODE_CONSTANT; ch1_parameters.dst_amode = OMAP_DMA_AMODE_POST_INC; ch1_parameters.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dprintk(KERN_INFO "%s: omap_set_dma_params(ch1)\n", __func__); omap_set_dma_params(dma_ch1, &ch1_parameters); omap_set_dma_src_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_8); omap_set_dma_dest_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_8); omap_set_dma_dest_data_pack(dma_ch1, 1); wmb(); dprintk(KERN_INFO "%s: Start DMA channel %d\n", __func__, (unsigned int)dma_ch1); tf_dma_start(dma_ch1, OMAP_DMA_BLOCK_IRQ); dprintk(KERN_INFO "%s: Start DMA channel %d\n", __func__, (unsigned int)dma_ch0); tf_dma_start(dma_ch0, OMAP_DMA_BLOCK_IRQ); dprintk(KERN_INFO "%s: Waiting for IRQ\n", __func__); tf_dma_wait(2); /*Unset DMA synchronisation requests */ OUTREG32(&paes_reg->AES_SYSCONFIG, INREG32(&paes_reg->AES_SYSCONFIG) & (~AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT) & (~AES_SYSCONFIG_DMA_REQ_IN_EN_BIT)); omap_clear_dma(dma_ch0); omap_clear_dma(dma_ch1); /* *The dma transfer is complete */ pr_info("%s completing\n", __func__); #ifdef CONFIG_TF_DRIVER_FAULT_INJECTION tf_aes_fault_injection(ctrl, dev->dma_buffer); #endif /*The DMA output is in the preallocated aligned buffer *and needs to be copied to the output buffer.*/ if (!is_kernel) { if (copy_to_user( dest, dev->dma_buffer, length_loop)) { omap_free_dma(dma_ch0); omap_free_dma(dma_ch1); mutex_unlock(&dev->sm.dma_mutex); return false; } } else { memcpy(dest, dev->dma_buffer, length_loop); } src += length_loop; dest += length_loop; length -= length_loop; } /*For safety reasons, let's clean the working buffer */ memset(dev->dma_buffer, 0, length_loop); /*release the DMA */ omap_free_dma(dma_ch0); omap_free_dma(dma_ch1); mutex_unlock(&dev->sm.dma_mutex); dprintk(KERN_INFO "%s: Success\n", __func__); return true; }
static int omapvout_dss_perform_vrfb_dma(struct omapvout_device *vout, int buf_idx, bool vrfb_cfg) { int rc = 0; int rot = 0; struct omapvout_dss_vrfb *vrfb; u32 src_paddr; u32 dst_paddr; /* It is assumed that the caller has locked the vout mutex */ if (vout->dss->vrfb.req_status != DMA_CHAN_ALLOTED) return -EINVAL; vrfb = &vout->dss->vrfb; if (vrfb_cfg) { enum omap_color_mode dss_fmt; int bytespp; int w, h; u32 fmt = vout->pix.pixelformat; w = vout->crop.width; h = vout->crop.height; dss_fmt = omapvout_dss_color_mode(vout->pix.pixelformat); omap_vrfb_setup(&vrfb->ctx[0], vrfb->phy_addr[0], w, h, dss_fmt, vout->rotation); omap_vrfb_setup(&vrfb->ctx[1], vrfb->phy_addr[1], w, h, dss_fmt, vout->rotation); bytespp = omapvout_dss_format_bytespp(vout->pix.pixelformat); vrfb->en = (w * bytespp) / 4; /* 32 bit ES */ vrfb->fn = h; vrfb->dst_ei = 1; if (fmt == V4L2_PIX_FMT_YUYV || fmt == V4L2_PIX_FMT_UYVY) { vrfb->dst_fi = (OMAP_VRFB_LINE_LEN * bytespp * 2) - (vrfb->en * 4) + 1; } else { vrfb->dst_fi = (OMAP_VRFB_LINE_LEN * bytespp) - (vrfb->en * 4) + 1; } } switch (vout->rotation) { case 1: rot = 3; break; case 3: rot = 1; break; default: rot = vout->rotation; break; } src_paddr = vout->queue.bufs[buf_idx]->baddr; dst_paddr = vrfb->ctx[vrfb->next].paddr[rot]; omap_set_dma_transfer_params(vrfb->dma_ch, OMAP_DMA_DATA_TYPE_S32, vrfb->en, vrfb->fn, OMAP_DMA_SYNC_ELEMENT, vrfb->dma_id, 0x0); omap_set_dma_src_params(vrfb->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, src_paddr, 0, 0); omap_set_dma_src_burst_mode(vrfb->dma_ch, OMAP_DMA_DATA_BURST_16); omap_set_dma_dest_params(vrfb->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX, dst_paddr, vrfb->dst_ei, vrfb->dst_fi); omap_set_dma_dest_burst_mode(vrfb->dma_ch, OMAP_DMA_DATA_BURST_16); omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0); vrfb->dma_complete = false; omap_start_dma(vrfb->dma_ch); wait_event_interruptible_timeout(vrfb->wait, vrfb->dma_complete, VRFB_TX_TIMEOUT); if (!vrfb->dma_complete) { DBG("VRFB DMA timeout\n"); omap_stop_dma(vrfb->dma_ch); return -EINVAL; } return rc; }