static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, struct vm_area_struct *vma) { struct drm_gem_object *obj = buffer->priv; int ret = 0; ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); if (ret < 0) return ret; return omap_gem_mmap_obj(obj, vma); }
static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, struct vm_area_struct *vma) { struct drm_gem_object *obj = buffer->priv; struct drm_device *dev = obj->dev; int ret = 0; if (WARN_ON(!obj->filp)) return -EINVAL; mutex_lock(&dev->struct_mutex); ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); mutex_unlock(&dev->struct_mutex); if (ret < 0) return ret; return omap_gem_mmap_obj(obj, vma); }
static int ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_omap_gem_info *args = data; struct drm_gem_object *obj; int ret = 0; VERB("%p:%p: handle=%d", dev, file_priv, args->handle); obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (!obj) return -ENOENT; args->size = omap_gem_mmap_size(obj); args->offset = omap_gem_mmap_offset(obj); drm_gem_object_unreference_unlocked(obj); return ret; }
/* * TODO maybe we can split up drm_gem_mmap to avoid duplicating * some here.. or at least have a drm_dmabuf_mmap helper. */ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, struct vm_area_struct *vma) { struct drm_gem_object *obj = buffer->priv; int ret = 0; if (WARN_ON(!obj->filp)) return -EINVAL; /* Check for valid size. */ if (omap_gem_mmap_size(obj) < vma->vm_end - vma->vm_start) { ret = -EINVAL; goto out_unlock; } if (!obj->dev->driver->gem_vm_ops) { ret = -EINVAL; goto out_unlock; } vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; vma->vm_ops = obj->dev->driver->gem_vm_ops; vma->vm_private_data = obj; vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); /* Take a ref for this mapping of the object, so that the fault * handler can dereference the mmap offset's pointer to the object. * This reference is cleaned up by the corresponding vm_close * (which should happen whether the vma was created by this call, or * by a vm_open due to mremap or partial unmap or whatever). */ vma->vm_ops->open(vma); out_unlock: return omap_gem_mmap_obj(obj, vma); }
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) { struct omap_framebuffer *omap_fb; struct drm_framebuffer *fb = NULL; const struct format *format = NULL; int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format); DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)", dev, mode_cmd, mode_cmd->width, mode_cmd->height, (char *)&mode_cmd->pixel_format); for (i = 0; i < ARRAY_SIZE(formats); i++) { if (formats[i].pixel_format == mode_cmd->pixel_format) { format = &formats[i]; break; } } if (!format) { dev_err(dev->dev, "unsupported pixel format: %4.4s\n", (char *)&mode_cmd->pixel_format); ret = -EINVAL; goto fail; } omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL); if (!omap_fb) { ret = -ENOMEM; goto fail; } fb = &omap_fb->base; omap_fb->format = format; for (i = 0; i < n; i++) { struct plane *plane = &omap_fb->planes[i]; int size, pitch = mode_cmd->pitches[i]; if (pitch < (mode_cmd->width * format->planes[i].stride_bpp)) { dev_err(dev->dev, "provided buffer pitch is too small! %d < %d\n", pitch, mode_cmd->width * format->planes[i].stride_bpp); ret = -EINVAL; goto fail; } size = pitch * mode_cmd->height / format->planes[i].sub_y; if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) { dev_err(dev->dev, "provided buffer object is too small! %d < %d\n", bos[i]->size - mode_cmd->offsets[i], size); ret = -EINVAL; goto fail; } plane->bo = bos[i]; plane->offset = mode_cmd->offsets[i]; plane->pitch = pitch; plane->paddr = 0; } drm_helper_mode_fill_fb_struct(fb, mode_cmd); ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs); if (ret) { dev_err(dev->dev, "framebuffer init failed: %d\n", ret); goto fail; } DBG("create: FB ID: %d (%p)", fb->base.id, fb); return fb; fail: if (fb) omap_framebuffer_destroy(fb); return ERR_PTR(ret); }