/** * previewer_vbq_release - Videobuffer queue release * @q: Structure containing the videobuffer queue. * @vb: Structure containing the videobuffer used for previewer processing. **/ static void previewer_vbq_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct prev_fh *fh = q->priv_data; struct prev_device *device = fh->device; if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { ispmmu_unmap(device->isp_addr_read); device->isp_addr_read = 0; spin_lock(&device->inout_vbq_lock); vb->state = VIDEOBUF_NEEDS_INIT; spin_unlock(&device->inout_vbq_lock); } else if (q->type == V4L2_BUF_TYPE_PRIVATE) { ispmmu_unmap(device->isp_addr_lsc); device->isp_addr_lsc = 0; spin_lock(&device->lsc_vbq_lock); vb->state = VIDEOBUF_NEEDS_INIT; spin_unlock(&device->lsc_vbq_lock); } if (vb->memory != V4L2_MEMORY_MMAP) { videobuf_dma_unmap(q, videobuf_to_dma(vb)); videobuf_dma_free(videobuf_to_dma(vb)); } dev_dbg(prev_dev, "previewer_vbq_release\n"); }
/** * unmap_buffers_from_kernel - Unmaps memory from isp and kernel address space. * @ibuffer: Pointer to internal buffer to be unmapped. * * No return value. **/ void unmap_buffer_from_kernel(struct hp3a_internal_buffer *ibuffer) { if (ibuffer->isp_addr) { #if defined(CONFIG_VIDEO_OLDOMAP3) if (ispmmu_unmap(ibuffer->isp_addr) != 0) { printk(KERN_ERR "Error unmapping from ispmmu (0x%x)!", (unsigned int)ibuffer->isp_addr); } #else ispmmu_vunmap(ibuffer->isp_addr); #endif ibuffer->isp_addr = 0; } if (ibuffer->pages != NULL && ibuffer->buffer_size > 0) { unmap_user_memory(ibuffer->pages, NR_PAGES(ibuffer->user_addr, ibuffer->buffer_size)); kfree(ibuffer->pages); ibuffer->pages = NULL; } }
/** * rsz_start - Enables Resizer Wrapper * @arg: Currently not used. * @device: Structure containing ISP resizer wrapper global information * * Submits a resizing task specified by the rsz_resize structure. The call can * either be blocked until the task is completed or returned immediately based * on the value of the blocking argument in the rsz_resize structure. If it is * blocking, the status of the task can be checked by calling ioctl * RSZ_G_STATUS. Only one task can be outstanding for each logical channel. * * Returns 0 if successful, or -EINVAL if could not set callback for RSZR IRQ * event or the state of the channel is not configured. **/ int rsz_start(int *arg, struct rsz_fh *fh) { struct channel_config *rsz_conf_chan = fh->config; struct rsz_mult *multipass = fh->multipass; struct videobuf_queue *q = &fh->vbq; int ret; if (rsz_conf_chan->config_state) { dev_err(rsz_device, "State not configured \n"); goto err_einval; } rsz_conf_chan->status = CHANNEL_BUSY; rsz_hardware_setup(rsz_conf_chan); if (isp_set_callback(CBK_RESZ_DONE, rsz_isr, (void *) NULL, (void *)NULL)) { dev_err(rsz_device, "No callback for RSZR\n"); goto err_einval; } mult: device_config->compl_isr.done = 0; ispresizer_enable(1); ret = wait_for_completion_interruptible(&device_config->compl_isr); if (ret != 0) { dev_dbg(rsz_device, "Unexpected exit from " "wait_for_completion_interruptible\n"); wait_for_completion(&device_config->compl_isr); } if (multipass->active) { rsz_set_multipass(multipass, rsz_conf_chan); goto mult; } if (fh->isp_addr_read) { ispmmu_unmap(fh->isp_addr_read); fh->isp_addr_read = 0; } if (fh->isp_addr_write) { ispmmu_unmap(fh->isp_addr_write); fh->isp_addr_write = 0; } rsz_conf_chan->status = CHANNEL_FREE; q->bufs[rsz_conf_chan->input_buf_index]->state = VIDEOBUF_NEEDS_INIT; q->bufs[rsz_conf_chan->output_buf_index]->state = VIDEOBUF_NEEDS_INIT; rsz_conf_chan->register_config.rsz_sdr_outadd = 0; rsz_conf_chan->register_config.rsz_sdr_inadd = 0; /* Unmap and free the DMA memory allocated for buffers */ videobuf_dma_unmap(q, videobuf_to_dma( q->bufs[rsz_conf_chan->input_buf_index])); videobuf_dma_unmap(q, videobuf_to_dma( q->bufs[rsz_conf_chan->output_buf_index])); videobuf_dma_free(videobuf_to_dma( q->bufs[rsz_conf_chan->input_buf_index])); videobuf_dma_free(videobuf_to_dma( q->bufs[rsz_conf_chan->output_buf_index])); isp_unset_callback(CBK_RESZ_DONE); return 0; err_einval: return -EINVAL; }