static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf) { struct soc_camera_device *icd = vq->priv_data; struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb); int i; BUG_ON(in_interrupt()); dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__, &buf->vb, buf->vb.baddr, buf->vb.bsize); /* * This waits until this buffer is out of danger, i.e., until it is no * longer in STATE_QUEUED or STATE_ACTIVE */ videobuf_waiton(vq, &buf->vb, 0, 0); videobuf_dma_unmap(vq->dev, dma); videobuf_dma_free(dma); for (i = 0; i < ARRAY_SIZE(buf->dmas); i++) { if (buf->dmas[i].sg_cpu) dma_free_coherent(ici->v4l2_dev.dev, buf->dmas[i].sg_size, buf->dmas[i].sg_cpu, buf->dmas[i].sg_dma); buf->dmas[i].sg_cpu = NULL; } buf->vb.state = VIDEOBUF_NEEDS_INIT; }
/** * previewer_vbq_release - Videobuffer queue release * @q: Structure containing the videobuffer queue. * @vb: Structure containing the videobuffer used for previewer processing. **/ static void previewer_vbq_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct prev_fh *fh = q->priv_data; struct prev_device *device = fh->device; if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { ispmmu_unmap(device->isp_addr_read); device->isp_addr_read = 0; spin_lock(&device->inout_vbq_lock); vb->state = VIDEOBUF_NEEDS_INIT; spin_unlock(&device->inout_vbq_lock); } else if (q->type == V4L2_BUF_TYPE_PRIVATE) { ispmmu_unmap(device->isp_addr_lsc); device->isp_addr_lsc = 0; spin_lock(&device->lsc_vbq_lock); vb->state = VIDEOBUF_NEEDS_INIT; spin_unlock(&device->lsc_vbq_lock); } if (vb->memory != V4L2_MEMORY_MMAP) { videobuf_dma_unmap(q, videobuf_to_dma(vb)); videobuf_dma_free(videobuf_to_dma(vb)); } dev_dbg(prev_dev, "previewer_vbq_release\n"); }
int videobuf_sg_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma) { struct videobuf_queue q; q.dev = dev; return videobuf_dma_unmap(&q, dma); }
int videobuf_pci_dma_unmap(struct pci_dev *pci,struct videobuf_dmabuf *dma) { struct videobuf_queue q; q.dev=pci; return (videobuf_dma_unmap(&q,dma)); }
void saa7134_dma_free(struct videobuf_queue *q,struct saa7134_buf *buf) { BUG_ON(in_interrupt()); videobuf_waiton(&buf->vb,0,0); videobuf_dma_unmap(q, &buf->vb.dma); videobuf_dma_free(&buf->vb.dma); buf->vb.state = STATE_NEEDS_INIT; }
void saa7134_dma_free(struct videobuf_queue *q,struct saa7134_buf *buf) { struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); BUG_ON(in_interrupt()); videobuf_waiton(&buf->vb,0,0); videobuf_dma_unmap(q, dma); videobuf_dma_free(dma); buf->vb.state = VIDEOBUF_NEEDS_INIT; }
void cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf) { struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); BUG_ON(in_interrupt()); videobuf_waiton(q, &buf->vb, 0, 0); videobuf_dma_unmap(q->dev, dma); videobuf_dma_free(dma); btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc); buf->vb.state = VIDEOBUF_NEEDS_INIT; }
void saa7146_dma_free(struct saa7146_dev *dev,struct videobuf_queue *q, struct saa7146_buf *buf) { struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); DEB_EE(("dev:%p, buf:%p\n",dev,buf)); BUG_ON(in_interrupt()); videobuf_waiton(&buf->vb,0,0); videobuf_dma_unmap(q, dma); videobuf_dma_free(dma); buf->vb.state = VIDEOBUF_NEEDS_INIT; }
void tw68_dma_free(struct videobuf_queue *q, struct tw68_buf *buf) { struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb); if (core_debug & DBG_FLOW) printk(KERN_DEBUG "%s: called\n", __func__); BUG_ON(in_interrupt()); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,36) videobuf_waiton(&buf->vb, 0, 0); #else videobuf_waiton(q, &buf->vb, 0, 0); #endif #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,35) videobuf_dma_unmap(q, dma); #else videobuf_dma_unmap(q->dev, dma); #endif videobuf_dma_free(dma); /* if no risc area allocated, btcx_riscmem_free just returns */ btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc); buf->vb.state = VIDEOBUF_NEEDS_INIT; }
static int dsp_buffer_free(struct cx23885_audio_dev *chip) { BUG_ON(!chip->dma_size); dprintk(2, "Freeing buffer\n"); videobuf_dma_unmap(&chip->pci->dev, chip->dma_risc); videobuf_dma_free(chip->dma_risc); btcx_riscmem_free(chip->pci, &chip->buf->risc); kfree(chip->buf); chip->dma_risc = NULL; chip->dma_size = 0; return 0; }
static void omap24xxcam_vbq_release(struct videobuf_queue *vbq, struct videobuf_buffer *vb) { struct videobuf_dmabuf *dma = videobuf_to_dma(vb); /* wait for buffer, especially to get out of the sgdma queue */ videobuf_waiton(vbq, vb, 0, 0); if (vb->memory == V4L2_MEMORY_MMAP) { dma_unmap_sg(vbq->dev, dma->sglist, dma->sglen, dma->direction); dma->direction = DMA_NONE; } else { videobuf_dma_unmap(vbq->dev, videobuf_to_dma(vb)); videobuf_dma_free(videobuf_to_dma(vb)); } vb->state = VIDEOBUF_NEEDS_INIT; }
static void usbcam_videobuf_free(struct videobuf_queue *vq, struct usbcam_frame *framep) { struct videobuf_dmabuf *dma = usbframe_get_dmabuf(&framep->vbb); videobuf_waiton(&framep->vbb, 0, 0); videobuf_dma_unmap(vq, dma); videobuf_dma_free(dma); if (framep->vbb.state != STATE_NEEDS_INIT) { if (framep->vmap_base) { vunmap(framep->vmap_base); framep->vmap_base = NULL; framep->vmap_sof = NULL; } assert(list_empty(&framep->cap_links)); framep->vbb.state = STATE_NEEDS_INIT; } }
static void free_buffer(struct videobuf_queue *vq, struct omap1_cam_buf *buf, enum omap1_cam_vb_mode vb_mode) { struct videobuf_buffer *vb = &buf->vb; BUG_ON(in_interrupt()); videobuf_waiton(vq, vb, 0, 0); if (vb_mode == OMAP1_CAM_DMA_CONTIG) { videobuf_dma_contig_free(vq, vb); } else { struct soc_camera_device *icd = vq->priv_data; struct device *dev = icd->dev.parent; struct videobuf_dmabuf *dma = videobuf_to_dma(vb); videobuf_dma_unmap(dev, dma); videobuf_dma_free(dma); } vb->state = VIDEOBUF_NEEDS_INIT; }
/** * prev2resz_vbq_release - Videobuffer queue release */ static void prev2resz_vbq_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct prev2resz_fhdl *fhdl = q->priv_data; if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { ispmmu_vunmap(fhdl->isp, fhdl->dst_buff_addr); fhdl->dst_buff_addr = 0; spin_lock(&fhdl->dst_vbq_lock); vb->state = VIDEOBUF_NEEDS_INIT; spin_unlock(&fhdl->dst_vbq_lock); } else if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { ispmmu_vunmap(fhdl->isp, fhdl->src_buff_addr); fhdl->src_buff_addr = 0; spin_lock(&fhdl->src_vbq_lock); vb->state = VIDEOBUF_NEEDS_INIT; spin_unlock(&fhdl->src_vbq_lock); } if (vb->memory != V4L2_MEMORY_MMAP) { videobuf_dma_unmap(q, videobuf_to_dma(vb)); videobuf_dma_free(videobuf_to_dma(vb)); } }
int tw686x_audio_create(struct tw686x_adev *dev) { unsigned int period_size, periods; int err; daprintk(DPRT_LEVEL0, dev, "%s()\n", __func__); mutex_init(&dev->lock); period_size = TW686X_AUDIO_PERIOD_SIZE; //固定为4096 periods = 4; //最小为2 daprintk(DPRT_LEVEL0, dev, "%s(bufsize=%d)\n", __func__, period_size * periods); dev->blocks = periods; dev->blksize = period_size; dev->bufsize = period_size * periods; dev->period_idx = 0; dev->read_offset = 0; dev->substream = NULL; dev->card = NULL; err = tw686x_audio_buffer_init(dev); if (0 != err) { dev->blocks = 0; dev->blksize = 0; dev->bufsize = 0; return err; } #if(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) if (0 != (err = videobuf_dma_map(&dev->chip->pci->dev, &dev->dma))) { #else if (0 != (err = videobuf_sg_dma_map(&dev->chip->pci->dev, &dev->dma))) { #endif tw686x_audio_buffer_free(dev); return err; } daprintk(DPRT_LEVEL0, dev, "%s_%d: period_size %d, periods %d, sglen %d.\n", __func__, dev->channel_id, period_size, periods, dev->dma.sglen); return 0; } int tw686x_audio_free(struct tw686x_adev *dev) { if (dev->substream != NULL) { #if(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) videobuf_dma_unmap(&dev->chip->pci->dev, &dev->dma); #else videobuf_sg_dma_unmap(&dev->chip->pci->dev, &dev->dma); #endif tw686x_audio_buffer_free(dev); dev->substream = NULL; } return 0; }
/** * rsz_start - Enables Resizer Wrapper * @arg: Currently not used. * @device: Structure containing ISP resizer wrapper global information * * Submits a resizing task specified by the rsz_resize structure. The call can * either be blocked until the task is completed or returned immediately based * on the value of the blocking argument in the rsz_resize structure. If it is * blocking, the status of the task can be checked by calling ioctl * RSZ_G_STATUS. Only one task can be outstanding for each logical channel. * * Returns 0 if successful, or -EINVAL if could not set callback for RSZR IRQ * event or the state of the channel is not configured. **/ int rsz_start(int *arg, struct rsz_fh *fh) { struct channel_config *rsz_conf_chan = fh->config; struct rsz_mult *multipass = fh->multipass; struct videobuf_queue *q = &fh->vbq; int ret; if (rsz_conf_chan->config_state) { dev_err(rsz_device, "State not configured \n"); goto err_einval; } rsz_conf_chan->status = CHANNEL_BUSY; rsz_hardware_setup(rsz_conf_chan); if (isp_set_callback(CBK_RESZ_DONE, rsz_isr, (void *) NULL, (void *)NULL)) { dev_err(rsz_device, "No callback for RSZR\n"); goto err_einval; } mult: device_config->compl_isr.done = 0; ispresizer_enable(1); ret = wait_for_completion_interruptible(&device_config->compl_isr); if (ret != 0) { dev_dbg(rsz_device, "Unexpected exit from " "wait_for_completion_interruptible\n"); wait_for_completion(&device_config->compl_isr); } if (multipass->active) { rsz_set_multipass(multipass, rsz_conf_chan); goto mult; } if (fh->isp_addr_read) { ispmmu_unmap(fh->isp_addr_read); fh->isp_addr_read = 0; } if (fh->isp_addr_write) { ispmmu_unmap(fh->isp_addr_write); fh->isp_addr_write = 0; } rsz_conf_chan->status = CHANNEL_FREE; q->bufs[rsz_conf_chan->input_buf_index]->state = VIDEOBUF_NEEDS_INIT; q->bufs[rsz_conf_chan->output_buf_index]->state = VIDEOBUF_NEEDS_INIT; rsz_conf_chan->register_config.rsz_sdr_outadd = 0; rsz_conf_chan->register_config.rsz_sdr_inadd = 0; /* Unmap and free the DMA memory allocated for buffers */ videobuf_dma_unmap(q, videobuf_to_dma( q->bufs[rsz_conf_chan->input_buf_index])); videobuf_dma_unmap(q, videobuf_to_dma( q->bufs[rsz_conf_chan->output_buf_index])); videobuf_dma_free(videobuf_to_dma( q->bufs[rsz_conf_chan->input_buf_index])); videobuf_dma_free(videobuf_to_dma( q->bufs[rsz_conf_chan->output_buf_index])); isp_unset_callback(CBK_RESZ_DONE); return 0; err_einval: return -EINVAL; }