void videobuf_vmalloc_free(struct videobuf_buffer *buf) { struct videobuf_vmalloc_memory *mem = buf->priv; /* mmapped memory can't be freed here, otherwise mmapped region would be released, while still needed. In this case, the memory release should happen inside videobuf_vm_close(). So, it should free memory only if the memory were allocated for read() operation. */ if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr) return; if (!mem) return; MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM); vfree(mem->vaddr); mem->vaddr = NULL; return; }
static int __videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, struct v4l2_framebuffer *fbuf) { int err, pages; dma_addr_t bus; struct videobuf_dma_sg_memory *mem = vb->priv; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_SG_MEM); switch (vb->memory) { case V4L2_MEMORY_MMAP: case V4L2_MEMORY_USERPTR: if (0 == vb->baddr) { /* no userspace addr -- kernel bounce buffer */ pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT; err = videobuf_dma_init_kernel(&mem->dma, DMA_FROM_DEVICE, pages); if (0 != err) return err; } else if (vb->memory == V4L2_MEMORY_USERPTR) {
int videobuf_dma_free(struct videobuf_dmabuf *dma) { MAGIC_CHECK(dma->magic,MAGIC_DMABUF); BUG_ON(dma->sglen); if (dma->pages) { int i; for (i=0; i < dma->nr_pages; i++) page_cache_release(dma->pages[i]); kfree(dma->pages); dma->pages = NULL; } vfree(dma->vmalloc); dma->vmalloc = NULL; dma->varea = NULL; if (dma->bus_addr) { dma->bus_addr = 0; } dma->direction = PCI_DMA_NONE; return 0; }
void videobuf_vmalloc_free(struct videobuf_buffer *buf) { struct videobuf_vmalloc_memory *mem = buf->priv; /* */ if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr) return; if (!mem) return; MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM); vfree(mem->vaddr); mem->vaddr = NULL; return; }
/* Locking: Caller holds q->vb_lock */ static int __videobuf_mmap_free(struct videobuf_queue *q) { int i; if (!q) return 0; MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); for (i = 0; i < VIDEO_MAX_FRAME; i++) if (q->bufs[i] && q->bufs[i]->map) return -EBUSY; for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == q->bufs[i]) continue; q->ops->buf_release(q, q->bufs[i]); kfree(q->bufs[i]); q->bufs[i] = NULL; } return 0; }
static void sipsess_close_handler(int err, const struct sip_msg *msg, void *arg) { struct call *call = arg; char reason[128] = ""; MAGIC_CHECK(call); if (err) { info("%s: session closed: %m\n", call->peer_uri, err); if (call->not) { (void)call_notify_sipfrag(call, 500, "%m", err); } } else if (msg) { call->scode = msg->scode; (void)re_snprintf(reason, sizeof(reason), "%u %r", msg->scode, &msg->reason); info("%s: session closed: %u %r\n", call->peer_uri, msg->scode, &msg->reason); if (call->not) { (void)call_notify_sipfrag(call, msg->scode, "%r", &msg->reason); } } else { info("%s: session closed\n", call->peer_uri); } call_stream_stop(call); call_event_handler(call, CALL_EVENT_CLOSED, reason); }
static void videobuf_vm_close(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; struct videobuf_queue *q = map->q; struct videobuf_dma_sg_memory *mem; int i; dprintk(2,"vm_close %p [count=%d,vma=%08lx-%08lx]\n",map, map->count,vma->vm_start,vma->vm_end); map->count--; if (0 == map->count) { dprintk(1,"munmap %p q=%p\n",map,q); mutex_lock(&q->vb_lock); for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == q->bufs[i]) continue; mem=q->bufs[i]->priv; if (!mem) continue; MAGIC_CHECK(mem->magic,MAGIC_SG_MEM); if (q->bufs[i]->map != map) continue; q->bufs[i]->map = NULL; q->bufs[i]->baddr = 0; q->ops->buf_release(q,q->bufs[i]); } mutex_unlock(&q->vb_lock); kfree(map); } return; }
static int __videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, struct v4l2_framebuffer *fbuf) { struct videobuf_marucam_memory *mem = vb->priv; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_MARUCAM_MEM); switch (vb->memory) { case V4L2_MEMORY_MMAP: if (!mem->mapped) { marucam_err("The memory is not mmapped.\n"); return -EINVAL; } break; default: marucam_err("Memory method currently unsupported.\n"); return -EINVAL; } return 0; }
EAPI Evas_GL * evas_gl_new(Evas *e) { Evas_GL *evas_gl; MAGIC_CHECK(e, Evas, MAGIC_EVAS); return NULL; MAGIC_CHECK_END(); evas_gl = calloc(1, sizeof(Evas_GL)); if (!evas_gl) return NULL; evas_gl->magic = MAGIC_EVAS_GL; evas_gl->evas = eo_data_ref(e, EVAS_CLASS); if (!evas_gl->evas->engine.func->gl_context_create) { ERR("Evas GL engine not available."); free(evas_gl); return NULL; } return evas_gl; }
EAPI void evas_gl_surface_destroy(Evas_GL *evas_gl, Evas_GL_Surface *surf) { // Magic MAGIC_CHECK(evas_gl, Evas_GL, MAGIC_EVAS_GL); return; MAGIC_CHECK_END(); if (!surf) { ERR("Trying to destroy a NULL surface pointer!"); return; } // Call Engine's Surface Destroy evas_gl->evas->engine.func->gl_surface_destroy(evas_gl->evas->engine.data.output, surf->data); // Remove it from the list evas_gl->surfaces = eina_list_remove(evas_gl->surfaces, surf); // Delete the object free(surf); surf = NULL; }
EAPI void evas_gl_context_destroy(Evas_GL *evas_gl, Evas_GL_Context *ctx) { MAGIC_CHECK(evas_gl, Evas_GL, MAGIC_EVAS_GL); return; MAGIC_CHECK_END(); if (!ctx) { ERR("Trying to destroy a NULL context pointer!"); return; } // Call Engine's destroy evas_gl->evas->engine.func->gl_context_destroy(evas_gl->evas->engine.data.output, ctx->data); // Remove it from the list evas_gl->contexts = eina_list_remove(evas_gl->contexts, ctx); // Delete the object free(ctx); ctx = NULL; }
/* Locking: Only usage in bttv unsafe find way to remove */ int videobuf_queue_is_busy(struct videobuf_queue *q) { int i; MAGIC_CHECK(q->int_ops->magic,MAGIC_QTYPE_OPS); if (q->streaming) { dprintk(1,"busy: streaming active\n"); return 1; } if (q->reading) { dprintk(1,"busy: pending read #1\n"); return 1; } if (q->read_buf) { dprintk(1,"busy: pending read #2\n"); return 1; } for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == q->bufs[i]) continue; if (q->bufs[i]->map) { dprintk(1,"busy: buffer #%d mapped\n",i); return 1; } if (q->bufs[i]->state == STATE_QUEUED) { dprintk(1,"busy: buffer #%d queued\n",i); return 1; } if (q->bufs[i]->state == STATE_ACTIVE) { dprintk(1,"busy: buffer #%d avtive\n",i); return 1; } } return 0; }
/* Locking: Caller holds q->lock */ static int __videobuf_mmap_free(struct videobuf_queue *q) { int i; int rc; if (!q) return 0; MAGIC_CHECK(q->int_ops->magic,MAGIC_QTYPE_OPS); rc = CALL(q,mmap_free,q); if (rc<0) return rc; for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == q->bufs[i]) continue; q->ops->buf_release(q,q->bufs[i]); kfree(q->bufs[i]); q->bufs[i] = NULL; } return rc; }
EAPI Evas_Object * evas_object_below_get(const Evas_Object *obj) { MAGIC_CHECK(obj, Evas_Object, MAGIC_OBJ); return NULL; MAGIC_CHECK_END(); if (obj->smart.parent) { do { obj = (Evas_Object *)((EINA_INLIST_GET(obj))->prev); if ((obj) && (!obj->delete_me)) return (Evas_Object *)obj; } while (obj); return NULL; } obj = evas_object_below_get_internal(obj); while (obj) { if (!obj->delete_me) return (Evas_Object *)obj; obj = evas_object_below_get_internal(obj); } return NULL; }
static int __videobuf_mmap_mapper(struct videobuf_queue *q, struct videobuf_buffer *buf, struct vm_area_struct *vma) { struct videobuf_contig_pmem *mem; struct videobuf_mapping *map; int retval; unsigned long size; D("%s\n", __func__); /* create mapping + update buffer list */ map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL); if (!map) { pr_err("%s: kzalloc failed.\n", __func__); return -ENOMEM; } buf->map = map; map->q = q; buf->baddr = vma->vm_start; mem = buf->priv; D("mem = 0x%x\n", (u32)mem); D("buf = 0x%x\n", (u32)buf); BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_PMEM); mem->size = PAGE_ALIGN(buf->bsize); mem->y_off = 0; mem->cbcr_off = (buf->bsize)*2/3; if (buf->i >= 0 && buf->i <= 3) mem->buffer_type = OUTPUT_TYPE_P; else mem->buffer_type = OUTPUT_TYPE_V; buf->bsize = mem->size; mem->phyaddr = msm_mem_allocate(mem->size); if (IS_ERR((void *)mem->phyaddr)) { pr_err("%s : pmem memory allocation failed\n", __func__); goto error; } /* Try to remap memory */ size = vma->vm_end - vma->vm_start; size = (size < mem->size) ? size : mem->size; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); retval = remap_pfn_range(vma, vma->vm_start, mem->phyaddr >> PAGE_SHIFT, size, vma->vm_page_prot); if (retval) { pr_err("mmap: remap failed with error %d. ", retval); retval = msm_mem_free(mem->phyaddr); if (retval < 0) printk(KERN_ERR "%s: Invalid memory location\n", __func__); else { mem->phyaddr = 0; } goto error; } vma->vm_ops = &videobuf_vm_ops; vma->vm_flags |= VM_DONTEXPAND; vma->vm_private_data = map; D("mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n", map, q, vma->vm_start, vma->vm_end, (long int)buf->bsize, vma->vm_pgoff, buf->i); videobuf_vm_open(vma); return 0; error: kfree(map); return -ENOMEM; }
static int __videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, struct v4l2_framebuffer *fbuf) { struct videobuf_vmalloc_memory *mem = vb->priv; int pages; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM); switch (vb->memory) { case V4L2_MEMORY_MMAP: dprintk(1, "%s memory method MMAP\n", __func__); /* All handling should be done by __videobuf_mmap_mapper() */ if (!mem->vaddr) { printk(KERN_ERR "memory is not alloced/mmapped.\n"); return -EINVAL; } break; case V4L2_MEMORY_USERPTR: pages = PAGE_ALIGN(vb->size); dprintk(1, "%s memory method USERPTR\n", __func__); if (vb->baddr) { printk(KERN_ERR "USERPTR is currently not supported\n"); return -EINVAL; } /* The only USERPTR currently supported is the one needed for * read() method. */ mem->vaddr = vmalloc_user(pages); if (!mem->vaddr) { printk(KERN_ERR "vmalloc (%d pages) failed\n", pages); return -ENOMEM; } dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages); #if 0 int rc; /* Kernel userptr is used also by read() method. In this case, there's no need to remap, since data will be copied to user */ if (!vb->baddr) return 0; /* FIXME: to properly support USERPTR, remap should occur. The code below won't work, since mem->vma = NULL */ /* Try to remap memory */ rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0); if (rc < 0) { printk(KERN_ERR "mmap: remap failed with error %d", rc); return -ENOMEM; } #endif break; case V4L2_MEMORY_OVERLAY: default: dprintk(1, "%s memory method OVERLAY/unknown\n", __func__); /* Currently, doesn't support V4L2_MEMORY_OVERLAY */ printk(KERN_ERR "Memory method currently unsupported.\n"); return -EINVAL; } return 0; }
static int __videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, struct v4l2_framebuffer *fbuf) { struct videobuf_vmalloc_memory *mem = vb->priv; int pages; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM); switch (vb->memory) { case V4L2_MEMORY_MMAP: dprintk(1, "%s memory method MMAP\n", __func__); if (!mem->vaddr) { printk(KERN_ERR "memory is not alloced/mmapped.\n"); return -EINVAL; } break; case V4L2_MEMORY_USERPTR: pages = PAGE_ALIGN(vb->size); dprintk(1, "%s memory method USERPTR\n", __func__); if (vb->baddr) { printk(KERN_ERR "USERPTR is currently not supported\n"); return -EINVAL; } mem->vaddr = vmalloc_user(pages); if (!mem->vaddr) { printk(KERN_ERR "vmalloc (%d pages) failed\n", pages); return -ENOMEM; } dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages); #if 0 int rc; if (!vb->baddr) return 0; rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0); if (rc < 0) { printk(KERN_ERR "mmap: remap failed with error %d", rc); return -ENOMEM; } #endif break; case V4L2_MEMORY_OVERLAY: default: dprintk(1, "%s memory method OVERLAY/unknown\n", __func__); printk(KERN_ERR "Memory method currently unsupported.\n"); return -EINVAL; } return 0; }
static void call_event_handler(struct call *call, enum call_event ev, const char *str, void *arg) { struct ua *ua = arg; const char *peeruri; struct call *call2 = NULL; int err; MAGIC_CHECK(ua); peeruri = call_peeruri(call); /* stop any ringtones */ ua->play = mem_deref(ua->play); switch (ev) { case CALL_EVENT_INCOMING: if (contact_block_access(peeruri)) { info("ua: blocked access: \"%s\"\n", peeruri); ua_event(ua, UA_EVENT_CALL_CLOSED, call, str); mem_deref(call); break; } switch (ua->acc->answermode) { case ANSWERMODE_EARLY: (void)call_progress(call); break; case ANSWERMODE_AUTO: (void)call_answer(call, 200); break; case ANSWERMODE_MANUAL: default: if (list_count(&ua->calls) > 1) { (void)play_file(&ua->play, "callwaiting.wav", 3); } else { /* Alert user */ (void)play_file(&ua->play, "ring.wav", -1); } ua_event(ua, UA_EVENT_CALL_INCOMING, call, peeruri); break; } break; case CALL_EVENT_RINGING: (void)play_file(&ua->play, "ringback.wav", -1); ua_event(ua, UA_EVENT_CALL_RINGING, call, peeruri); break; case CALL_EVENT_PROGRESS: ua_printf(ua, "Call in-progress: %s\n", peeruri); ua_event(ua, UA_EVENT_CALL_PROGRESS, call, peeruri); break; case CALL_EVENT_ESTABLISHED: ua_printf(ua, "Call established: %s\n", peeruri); ua_event(ua, UA_EVENT_CALL_ESTABLISHED, call, peeruri); break; case CALL_EVENT_CLOSED: if (call_scode(call)) { const char *tone; tone = translate_errorcode(call_scode(call)); if (tone) (void)play_file(&ua->play, tone, 1); } ua_event(ua, UA_EVENT_CALL_CLOSED, call, str); mem_deref(call); break; case CALL_EVENT_TRANSFER: /* * Create a new call to transfer target. * * NOTE: we will automatically connect a new call to the * transfer target */ ua_printf(ua, "transferring call to %s\n", str); err = ua_call_alloc(&call2, ua, VIDMODE_ON, NULL, call, call_localuri(call)); if (!err) { struct pl pl; pl_set_str(&pl, str); err = call_connect(call2, &pl); if (err) { warning("ua: transfer: connect error: %m\n", err); } } if (err) { (void)call_notify_sipfrag(call, 500, "Call Error"); mem_deref(call2); } break; case CALL_EVENT_TRANSFER_FAILED: ua_event(ua, UA_EVENT_CALL_TRANSFER_FAILED, call, str); break; } }
ssize_t videobuf_read_one(struct videobuf_queue *q, char __user *data, size_t count, loff_t *ppos, int nonblocking) { enum v4l2_field field; unsigned long flags=0; unsigned size, nbufs; int retval; MAGIC_CHECK(q->int_ops->magic,MAGIC_QTYPE_OPS); mutex_lock(&q->lock); nbufs = 1; size = 0; q->ops->buf_setup(q,&nbufs,&size); if (NULL == q->read_buf && count >= size && !nonblocking) { retval = videobuf_read_zerocopy(q,data,count,ppos); if (retval >= 0 || retval == -EIO) /* ok, all done */ goto done; /* fallback to kernel bounce buffer on failures */ } if (NULL == q->read_buf) { /* need to capture a new frame */ retval = -ENOMEM; q->read_buf = videobuf_alloc(q); dprintk(1,"video alloc=0x%p\n", q->read_buf); if (NULL == q->read_buf) goto done; q->read_buf->memory = V4L2_MEMORY_USERPTR; q->read_buf->bsize = count; /* preferred size */ field = videobuf_next_field(q); retval = q->ops->buf_prepare(q,q->read_buf,field); if (0 != retval) { kfree (q->read_buf); q->read_buf = NULL; goto done; } if (q->irqlock) spin_lock_irqsave(q->irqlock,flags); q->ops->buf_queue(q,q->read_buf); if (q->irqlock) spin_unlock_irqrestore(q->irqlock,flags); q->read_off = 0; } /* wait until capture is done */ retval = videobuf_waiton(q->read_buf, nonblocking, 1); if (0 != retval) goto done; CALL(q,sync,q,q->read_buf); if (STATE_ERROR == q->read_buf->state) { /* catch I/O errors */ q->ops->buf_release(q,q->read_buf); kfree(q->read_buf); q->read_buf = NULL; retval = -EIO; goto done; } /* Copy to userspace */ retval=CALL(q,video_copy_to_user,q,data,count,nonblocking); if (retval<0) goto done; q->read_off += retval; if (q->read_off == q->read_buf->size) { /* all data copied, cleanup */ q->ops->buf_release(q,q->read_buf); kfree(q->read_buf); q->read_buf = NULL; } done: mutex_unlock(&q->lock); return retval; }
static void videobuf_vm_close(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; struct videobuf_queue *q = map->q; int i, rc; D("vm_close %p [count=%u,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); map->count--; if (0 == map->count) { struct videobuf_contig_pmem *mem; D("munmap %p q=%p\n", map, q); mutex_lock(&q->vb_lock); /* We need first to cancel streams, before unmapping */ if (q->streaming) videobuf_queue_cancel(q); for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == q->bufs[i]) continue; if (q->bufs[i]->map != map) continue; mem = q->bufs[i]->priv; if (mem) { /* This callback is called only if kernel has * allocated memory and this memory is mmapped. * In this case, memory should be freed, * in order to do memory unmap. */ MAGIC_CHECK(mem->magic, MAGIC_PMEM); /* vfree is not atomic - can't be called with IRQ's disabled */ D("buf[%d] freeing physical %d\n", i, mem->phyaddr); rc = msm_mem_free(mem->phyaddr); if (rc < 0) D("%s: Invalid memory location\n", __func__); else { mem->phyaddr = 0; } } q->bufs[i]->map = NULL; q->bufs[i]->baddr = 0; } kfree(map); mutex_unlock(&q->vb_lock); /* deallocate the q->bufs[i] structure not a good solution as it will result in unnecessary iterations but right now this looks like the only cleaner way */ videobuf_mmap_free(q); } }
int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b) { struct videobuf_buffer *buf; enum v4l2_field field; unsigned long flags = 0; int retval; MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); if (b->memory == V4L2_MEMORY_MMAP) down_read(¤t->mm->mmap_sem); videobuf_queue_lock(q); retval = -EBUSY; if (q->reading) { dprintk(1, "qbuf: Reading running...\n"); goto done; } retval = -EINVAL; if (b->type != q->type) { dprintk(1, "qbuf: Wrong type.\n"); goto done; } if (b->index >= VIDEO_MAX_FRAME) { dprintk(1, "qbuf: index out of range.\n"); goto done; } buf = q->bufs[b->index]; if (NULL == buf) { dprintk(1, "qbuf: buffer is null.\n"); goto done; } MAGIC_CHECK(buf->magic, MAGIC_BUFFER); if (buf->memory != b->memory) { dprintk(1, "qbuf: memory type is wrong.\n"); goto done; } if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) { dprintk(1, "qbuf: buffer is already queued or active.\n"); goto done; } switch (b->memory) { case V4L2_MEMORY_MMAP: if (0 == buf->baddr) { dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n"); goto done; } if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT || q->type == V4L2_BUF_TYPE_VBI_OUTPUT || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) { buf->size = b->bytesused; buf->field = b->field; buf->ts = v4l2_timeval_to_ns(&b->timestamp); } break; case V4L2_MEMORY_USERPTR: if (b->length < buf->bsize) { dprintk(1, "qbuf: buffer length is not enough\n"); goto done; } if (VIDEOBUF_NEEDS_INIT != buf->state && buf->baddr != b->m.userptr) q->ops->buf_release(q, buf); buf->baddr = b->m.userptr; break; case V4L2_MEMORY_OVERLAY: buf->boff = b->m.offset; break; default: dprintk(1, "qbuf: wrong memory type\n"); goto done; } dprintk(1, "qbuf: requesting next field\n"); field = videobuf_next_field(q); retval = q->ops->buf_prepare(q, buf, field); if (0 != retval) { dprintk(1, "qbuf: buffer_prepare returned %d\n", retval); goto done; } list_add_tail(&buf->stream, &q->stream); if (q->streaming) { spin_lock_irqsave(q->irqlock, flags); q->ops->buf_queue(q, buf); spin_unlock_irqrestore(q->irqlock, flags); } dprintk(1, "qbuf: succeeded\n"); retval = 0; wake_up_interruptible_sync(&q->wait); done: videobuf_queue_unlock(q); if (b->memory == V4L2_MEMORY_MMAP) up_read(¤t->mm->mmap_sem); return retval; }
static int __videobuf_mmap_mapper(struct videobuf_queue *q, struct videobuf_buffer *buf, struct vm_area_struct *vma) { struct videobuf_vmalloc_memory *mem; struct videobuf_mapping *map; int retval, pages; dprintk(1, "%s\n", __func__); /* create mapping + update buffer list */ map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL); if (NULL == map) return -ENOMEM; buf->map = map; map->q = q; buf->baddr = vma->vm_start; mem = buf->priv; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM); pages = PAGE_ALIGN(vma->vm_end - vma->vm_start); mem->vaddr = vmalloc_user(pages); if (!mem->vaddr) { printk(KERN_ERR "vmalloc (%d pages) failed\n", pages); goto error; } dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages); /* Try to remap memory */ retval = remap_vmalloc_range(vma, mem->vaddr, 0); if (retval < 0) { printk(KERN_ERR "mmap: remap failed with error %d. ", retval); vfree(mem->vaddr); goto error; } vma->vm_ops = &videobuf_vm_ops; #if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; #else vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; #endif vma->vm_private_data = map; dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n", map, q, vma->vm_start, vma->vm_end, (long int)buf->bsize, vma->vm_pgoff, buf->i); videobuf_vm_open(vma); return 0; error: mem = NULL; kfree(map); return -ENOMEM; }
Eina_Bool evas_map_coords_get(const Evas_Map *m, Evas_Coord x, Evas_Coord y, Evas_Coord *mx, Evas_Coord *my, int grab) { MAGIC_CHECK(m, Evas_Map, MAGIC_MAP); return EINA_FALSE; MAGIC_CHECK_END(); int i, j, edges, edge[m->count][2], douv; Evas_Coord xe[2]; double u[2] = { 0.0, 0.0 }; double v[2] = { 0.0, 0.0 }; if (m->count < 4) return 0; // FIXME need to handle grab mode and extrapolte coords outside // map if (grab) { Evas_Coord ymin, ymax; ymin = m->points[0].y; ymax = m->points[0].y; for (i = 1; i < m->count; i++) { if (m->points[i].y < ymin) ymin = m->points[i].y; else if (m->points[i].y > ymax) ymax = m->points[i].y; } if (y <= ymin) y = ymin + 1; if (y >= ymax) y = ymax - 1; } edges = 0; for (i = 0; i < m->count; i++) { j = (i + 1) % m->count; if ((m->points[i].y <= y) && (m->points[j].y > y)) { edge[edges][0] = i; edge[edges][1] = j; edges++; } else if ((m->points[j].y <= y) && (m->points[i].y > y)) { edge[edges][0] = j; edge[edges][1] = i; edges++; } } douv = 0; if ((mx) || (my)) douv = 1; for (i = 0; i < (edges - 1); i+= 2) { Evas_Coord yp, yd; j = i + 1; yd = m->points[edge[i][1]].y - m->points[edge[i][0]].y; if (yd > 0) { yp = y - m->points[edge[i][0]].y; xe[0] = m->points[edge[i][1]].x - m->points[edge[i][0]].x; xe[0] = m->points[edge[i][0]].x + ((xe[0] * yp) / yd); if (douv) { u[0] = m->points[edge[i][1]].u - m->points[edge[i][0]].u; u[0] = m->points[edge[i][0]].u + ((u[0] * yp) / yd); v[0] = m->points[edge[i][1]].v - m->points[edge[i][0]].v; v[0] = m->points[edge[i][0]].v + ((v[0] * yp) / yd); } } else { xe[0] = m->points[edge[i][0]].x; if (douv) { u[0] = m->points[edge[i][0]].u; v[0] = m->points[edge[i][0]].v; } } yd = m->points[edge[j][1]].y - m->points[edge[j][0]].y; if (yd > 0) { yp = y - m->points[edge[j][0]].y; xe[1] = m->points[edge[j][1]].x - m->points[edge[j][0]].x; xe[1] = m->points[edge[j][0]].x + ((xe[1] * yp) / yd); if (douv) { u[1] = m->points[edge[j][1]].u - m->points[edge[j][0]].u; u[1] = m->points[edge[j][0]].u + ((u[1] * yp) / yd); v[1] = m->points[edge[j][1]].v - m->points[edge[j][0]].v; v[1] = m->points[edge[j][0]].v + ((v[1] * yp) / yd); } } else { xe[1] = m->points[edge[j][0]].x; if (douv) { u[1] = m->points[edge[j][0]].u; v[1] = m->points[edge[j][0]].v; } } if (xe[0] > xe[1]) { int ti; ti = xe[0]; xe[0] = xe[1]; xe[1] = ti; if (douv) { double td; td = u[0]; u[0] = u[1]; u[1] = td; td = v[0]; v[0] = v[1]; v[1] = td; } } if ((x >= xe[0]) && (x < xe[1])) { if (douv) { if (mx) *mx = u[0] + (((x - xe[0]) * (u[1] - u[0])) / (xe[1] - xe[0])); if (my) *my = v[0] + (((x - xe[0]) * (v[1] - v[0])) / (xe[1] - xe[0])); } return EINA_TRUE; } if (grab) { if (douv) { if (mx) *mx = u[0] + (((x - xe[0]) * (u[1] - u[0])) / (xe[1] - xe[0])); if (my) *my = v[0] + (((x - xe[0]) * (v[1] - v[0])) / (xe[1] - xe[0])); } return EINA_TRUE; } } return EINA_FALSE; }
/** * Sets the geometric type displayed by the given gradient object. * @param obj The given gradient object. * @param name Name of the geometric type that the gradient is to be drawn as. * @param params List of allowable params that the given gradient type allows. * Can be NULL. */ EAPI void evas_object_gradient_type_set(Evas_Object *obj, const char *name, const char *params) { Evas_Object_Gradient *o; MAGIC_CHECK(obj, Evas_Object, MAGIC_OBJ); return; MAGIC_CHECK_END(); o = (Evas_Object_Gradient *)(obj->object_data); MAGIC_CHECK(o, Evas_Object_Gradient, MAGIC_OBJ_GRADIENT); return; MAGIC_CHECK_END(); if (!name || !*name) { name = "linear"; params = NULL; } if (params && !*params) params = NULL; if ((o->cur.type.name) && (!strcmp(o->cur.type.name, name))) { if ((!o->cur.type.params) && (!params)) return; if ((o->cur.type.params) && (params) && (!strcmp(o->cur.type.params, params))) return; if (o->cur.type.params) { if (o->prev.type.params == o->cur.type.params) o->prev.type.params = strdup(o->cur.type.params); free(o->cur.type.params); o->cur.type.params = NULL; } if (params) o->cur.type.params = strdup(params); o->changed = 1; o->gradient_changed = 1; o->type_changed = 1; evas_object_change(obj); return; } if (o->cur.type.name) { if (o->prev.type.name == o->cur.type.name) o->prev.type.name = strdup(o->cur.type.name); free(o->cur.type.name); o->cur.type.name = NULL; } o->cur.type.name = strdup(name); if (o->cur.type.params) { if (o->prev.type.params == o->cur.type.params) o->prev.type.params = strdup(o->cur.type.params); free(o->cur.type.params); o->cur.type.params = NULL; } if (params) o->cur.type.params = strdup(params); o->changed = 1; o->gradient_changed = 1; o->type_changed = 1; evas_object_change(obj); }
static int __videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma) { struct videobuf_dma_contig_memory *mem; struct videobuf_mapping *map; unsigned int first; int retval; unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT; dev_dbg(q->dev, "%s\n", __func__); if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) return -EINVAL; /* look for first buffer to map */ for (first = 0; first < VIDEO_MAX_FRAME; first++) { if (!q->bufs[first]) continue; if (V4L2_MEMORY_MMAP != q->bufs[first]->memory) continue; if (q->bufs[first]->boff == offset) break; } if (VIDEO_MAX_FRAME == first) { dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n", offset); return -EINVAL; } /* create mapping + update buffer list */ map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL); if (!map) return -ENOMEM; q->bufs[first]->map = map; map->start = vma->vm_start; map->end = vma->vm_end; map->q = q; q->bufs[first]->baddr = vma->vm_start; mem = q->bufs[first]->priv; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_DC_MEM); mem->size = PAGE_ALIGN(q->bufs[first]->bsize); mem->vaddr = dma_alloc_coherent(q->dev, mem->size, &mem->dma_handle, GFP_KERNEL); if (!mem->vaddr) { dev_err(q->dev, "dma_alloc_coherent size %ld failed\n", mem->size); goto error; } dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n", mem->vaddr, mem->size); /* Try to remap memory */ size = vma->vm_end - vma->vm_start; size = (size < mem->size) ? size : mem->size; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); retval = remap_pfn_range(vma, vma->vm_start, mem->dma_handle >> PAGE_SHIFT, size, vma->vm_page_prot); if (retval) { dev_err(q->dev, "mmap: remap failed with error %d. ", retval); dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle); goto error; } vma->vm_ops = &videobuf_vm_ops; vma->vm_flags |= VM_DONTEXPAND; vma->vm_private_data = map; dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n", map, q, vma->vm_start, vma->vm_end, (long int) q->bufs[first]->bsize, vma->vm_pgoff, first); videobuf_vm_open(vma); return 0; error: kfree(map); return -ENOMEM; }
static unsigned int msm_vb2_mem_ops_num_users(void *buf_priv) { struct videobuf2_contig_pmem *mem = buf_priv; MAGIC_CHECK(mem->magic, MAGIC_PMEM); return mem->count; }
ssize_t videobuf_read_stream(struct videobuf_queue *q, char __user *data, size_t count, loff_t *ppos, int vbihack, int nonblocking) { int rc, retval; unsigned long flags=0; MAGIC_CHECK(q->int_ops->magic,MAGIC_QTYPE_OPS); dprintk(2,"%s\n",__FUNCTION__); mutex_lock(&q->lock); retval = -EBUSY; if (q->streaming) goto done; if (!q->reading) { retval = __videobuf_read_start(q); if (retval < 0) goto done; } retval = 0; while (count > 0) { /* get / wait for data */ if (NULL == q->read_buf) { q->read_buf = list_entry(q->stream.next, struct videobuf_buffer, stream); list_del(&q->read_buf->stream); q->read_off = 0; } rc = videobuf_waiton(q->read_buf, nonblocking, 1); if (rc < 0) { if (0 == retval) retval = rc; break; } if (q->read_buf->state == STATE_DONE) { rc = CALL (q,copy_stream, q, data + retval, count, retval, vbihack, nonblocking); if (rc < 0) { retval = rc; break; } retval += rc; count -= rc; q->read_off += rc; } else { /* some error */ q->read_off = q->read_buf->size; if (0 == retval) retval = -EIO; } /* requeue buffer when done with copying */ if (q->read_off == q->read_buf->size) { list_add_tail(&q->read_buf->stream, &q->stream); if (q->irqlock) spin_lock_irqsave(q->irqlock,flags); q->ops->buf_queue(q,q->read_buf); if (q->irqlock) spin_unlock_irqrestore(q->irqlock,flags); q->read_buf = NULL; } if (retval < 0) break; }
static int __videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma) { struct videobuf_vmalloc_memory *mem; struct videobuf_mapping *map; unsigned int first; int retval, pages; unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; dprintk(1, "%s\n", __func__); if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) return -EINVAL; /* look for first buffer to map */ for (first = 0; first < VIDEO_MAX_FRAME; first++) { if (NULL == q->bufs[first]) continue; if (V4L2_MEMORY_MMAP != q->bufs[first]->memory) continue; if (q->bufs[first]->boff == offset) break; } if (VIDEO_MAX_FRAME == first) { dprintk(1,"mmap app bug: offset invalid [offset=0x%lx]\n", (vma->vm_pgoff << PAGE_SHIFT)); return -EINVAL; } /* create mapping + update buffer list */ map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL); if (NULL == map) return -ENOMEM; q->bufs[first]->map = map; map->start = vma->vm_start; map->end = vma->vm_end; map->q = q; q->bufs[first]->baddr = vma->vm_start; mem = q->bufs[first]->priv; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM); pages = PAGE_ALIGN(vma->vm_end - vma->vm_start); mem->vmalloc = vmalloc_user(pages); if (!mem->vmalloc) { printk(KERN_ERR "vmalloc (%d pages) failed\n", pages); goto error; } dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vmalloc, pages); /* Try to remap memory */ retval = remap_vmalloc_range(vma, mem->vmalloc, 0); if (retval < 0) { printk(KERN_ERR "mmap: remap failed with error %d. ", retval); vfree(mem->vmalloc); goto error; } vma->vm_ops = &videobuf_vm_ops; vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; vma->vm_private_data = map; dprintk(1,"mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n", map, q, vma->vm_start, vma->vm_end, (long int) q->bufs[first]->bsize, vma->vm_pgoff, first); videobuf_vm_open(vma); return 0; error: mem = NULL; kfree(map); return -ENOMEM; }
int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b) { struct videobuf_buffer *buf; enum v4l2_field field; unsigned long flags=0; int retval; MAGIC_CHECK(q->int_ops->magic,MAGIC_QTYPE_OPS); if (b->memory == V4L2_MEMORY_MMAP) down_read(¤t->mm->mmap_sem); mutex_lock(&q->lock); retval = -EBUSY; if (q->reading) { dprintk(1,"qbuf: Reading running...\n"); goto done; } retval = -EINVAL; if (b->type != q->type) { dprintk(1,"qbuf: Wrong type.\n"); goto done; } if (b->index < 0 || b->index >= VIDEO_MAX_FRAME) { dprintk(1,"qbuf: index out of range.\n"); goto done; } buf = q->bufs[b->index]; if (NULL == buf) { dprintk(1,"qbuf: buffer is null.\n"); goto done; } MAGIC_CHECK(buf->magic,MAGIC_BUFFER); if (buf->memory != b->memory) { dprintk(1,"qbuf: memory type is wrong.\n"); goto done; } if (buf->state != STATE_NEEDS_INIT && buf->state != STATE_IDLE) { dprintk(1,"qbuf: buffer is already queued or active.\n"); goto done; } if (b->flags & V4L2_BUF_FLAG_INPUT) { if (b->input >= q->inputs) { dprintk(1,"qbuf: wrong input.\n"); goto done; } buf->input = b->input; } else { buf->input = UNSET; } switch (b->memory) { case V4L2_MEMORY_MMAP: if (0 == buf->baddr) { dprintk(1,"qbuf: mmap requested but buffer addr is zero!\n"); goto done; } break; case V4L2_MEMORY_USERPTR: if (b->length < buf->bsize) { dprintk(1,"qbuf: buffer length is not enough\n"); goto done; } if (STATE_NEEDS_INIT != buf->state && buf->baddr != b->m.userptr) q->ops->buf_release(q,buf); buf->baddr = b->m.userptr; break; case V4L2_MEMORY_OVERLAY: buf->boff = b->m.offset; break; default: dprintk(1,"qbuf: wrong memory type\n"); goto done; } dprintk(1,"qbuf: requesting next field\n"); field = videobuf_next_field(q); retval = q->ops->buf_prepare(q,buf,field); if (0 != retval) { dprintk(1,"qbuf: buffer_prepare returned %d\n",retval); goto done; } list_add_tail(&buf->stream,&q->stream); if (q->streaming) { if (q->irqlock) spin_lock_irqsave(q->irqlock,flags); q->ops->buf_queue(q,buf); if (q->irqlock) spin_unlock_irqrestore(q->irqlock,flags); } dprintk(1,"qbuf: succeded\n"); retval = 0; done: mutex_unlock(&q->lock); if (b->memory == V4L2_MEMORY_MMAP) up_read(¤t->mm->mmap_sem); return retval; }
EAPI void evas_object_stack_below(Evas_Object *obj, Evas_Object *below) { MAGIC_CHECK(obj, Evas_Object, MAGIC_OBJ); return; MAGIC_CHECK_END(); MAGIC_CHECK(below, Evas_Object, MAGIC_OBJ); return; MAGIC_CHECK_END(); if (obj == below) return; if (evas_object_intercept_call_stack_below(obj, below)) return; if (!below) { evas_object_lower(obj); return; } if ((EINA_INLIST_GET(obj))->next == EINA_INLIST_GET(below)) { evas_object_inform_call_restack(obj); return; } if (obj->smart.parent) { if (obj->smart.parent != below->smart.parent) { ERR("BITCH! evas_object_stack_below(), %p not inside same smart as %p!", obj, below); return; } evas_object_smart_member_stack_below(obj, below); } else { if (below->smart.parent) { ERR("BITCH! evas_object_stack_below(), %p stack below %p, but below has smart parent, obj does not", obj, below); return; } if (obj->layer != below->layer) { ERR("BITCH! evas_object_stack_below(), %p stack below %p, not matching layers", obj, below); return; } if (obj->in_layer) { obj->layer->objects = (Evas_Object *)eina_inlist_remove(EINA_INLIST_GET(obj->layer->objects), EINA_INLIST_GET(obj)); obj->layer->objects = (Evas_Object *)eina_inlist_prepend_relative(EINA_INLIST_GET(obj->layer->objects), EINA_INLIST_GET(obj), EINA_INLIST_GET(below)); } } if (obj->clip.clipees) { evas_object_inform_call_restack(obj); return; } if (obj->layer) evas_render_invalidate(obj->layer->evas); obj->restack = 1; evas_object_change(obj); evas_object_inform_call_restack(obj); if (obj->layer->evas->events_frozen <= 0) { if (!evas_event_passes_through(obj)) { if (!obj->smart.smart) { if (evas_object_is_in_output_rect(obj, obj->layer->evas->pointer.x, obj->layer->evas->pointer.y, 1, 1) && obj->cur.visible) evas_event_feed_mouse_move(obj->layer->evas, obj->layer->evas->pointer.x, obj->layer->evas->pointer.y, obj->layer->evas->last_timestamp, NULL); } } } }