コード例 #1
0
/**
 * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
 *     object
 * @file_priv: DRM file-private structure containing the GEM object
 * @drm: DRM device
 * @handle: GEM object handle
 * @offset: return location for the fake mmap offset
 *
 * This function look up an object by its handle and returns the fake mmap
 * offset associated with it. Drivers using the CMA helpers should set this
 * as their DRM driver's ->dumb_map_offset() callback.
 *
 * Returns:
 * 0 on success or a negative error code on failure.
 */
int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
				struct drm_device *drm, u32 handle,
				u64 *offset)
{
	struct drm_gem_object *gem_obj;

	mutex_lock(&drm->struct_mutex);

	gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
	if (!gem_obj) {
		dev_err(drm->dev, "failed to lookup GEM object\n");
		mutex_unlock(&drm->struct_mutex);
		return -EINVAL;
	}

	*offset = drm_vma_node_offset_addr(&gem_obj->vma_node);

	drm_gem_object_unreference(gem_obj);

	mutex_unlock(&drm->struct_mutex);

	return 0;
}
コード例 #2
0
/**
 * Called after the last handle to the object has been closed
 *
 * Removes any name for the object. Note that this must be
 * called before drm_gem_object_free or we'll be touching
 * freed memory
 */
void
drm_gem_object_handle_free(struct kref *kref)
{
	struct drm_gem_object *obj = container_of(kref,
						  struct drm_gem_object,
						  handlecount);
	struct drm_device *dev = obj->dev;

	/* Remove any name for this object */
	spin_lock(&dev->object_name_lock);
	if (obj->name) {
		idr_remove(&dev->object_name_idr, obj->name);
		obj->name = 0;
		spin_unlock(&dev->object_name_lock);
		/*
		 * The object name held a reference to this object, drop
		 * that now.
		 */
		drm_gem_object_unreference(obj);
	} else
		spin_unlock(&dev->object_name_lock);

}
コード例 #3
0
/**
 * Returns the current tiling mode and required bit 6 swizzling for the object.
 */
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
		   struct drm_file *file)
{
	struct drm_i915_gem_get_tiling *args = data;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj;

	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
	if (&obj->base == NULL)
		return -ENOENT;

	args->tiling_mode = obj->tiling_mode;
	switch (obj->tiling_mode) {
	case I915_TILING_X:
		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		break;
	case I915_TILING_Y:
		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
		break;
	case I915_TILING_NONE:
		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		break;
	default:
		DRM_ERROR("unknown tiling mode\n");
	}

	/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
		args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
		args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;

	drm_gem_object_unreference(&obj->base);

	return 0;
}
コード例 #4
0
ファイル: gem.c プロジェクト: Aeternam/gdev
/**
 *	psb_gem_create		-	create a mappable object
 *	@file: the DRM file of the client
 *	@dev: our device
 *	@size: the size requested
 *	@handlep: returned handle (opaque number)
 *
 *	Create a GEM object, fill in the boilerplate and attach a handle to
 *	it so that userspace can speak about it. This does the core work
 *	for the various methods that do/will create GEM objects for things
 */
static int psb_gem_create(struct drm_file *file,
	struct drm_device *dev, uint64_t size, uint32_t *handlep)
{
	struct gtt_range *r;
	int ret;
	u32 handle;

	size = roundup(size, PAGE_SIZE);

	/* Allocate our object - for now a direct gtt range which is not
	   stolen memory backed */
	r = psb_gtt_alloc_range(dev, size, "gem", 0);
	if (r == NULL) {
		dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
		return -ENOSPC;
	}
	/* Initialize the extra goodies GEM needs to do all the hard work */
	if (drm_gem_object_init(dev, &r->gem, size) != 0) {
		psb_gtt_free_range(dev, r);
		/* GEM doesn't give an error code so use -ENOMEM */
		dev_err(dev->dev, "GEM init failed for %lld\n", size);
		return -ENOMEM;
	}
	/* Give the object a handle so we can carry it more easily */
	ret = drm_gem_handle_create(file, &r->gem, &handle);
	if (ret) {
		dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
							&r->gem, size);
		drm_gem_object_release(&r->gem);
		psb_gtt_free_range(dev, r);
		return ret;
	}
	/* We have the initial and handle reference but need only one now */
	drm_gem_object_unreference(&r->gem);
	*handlep = handle;
	return 0;
}
コード例 #5
0
ファイル: bochs_mm.c プロジェクト: helbard/linux
int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
                           uint32_t handle, uint64_t *offset)
{
    struct drm_gem_object *obj;
    int ret;
    struct bochs_bo *bo;

    mutex_lock(&dev->struct_mutex);
    obj = drm_gem_object_lookup(dev, file, handle);
    if (obj == NULL) {
        ret = -ENOENT;
        goto out_unlock;
    }

    bo = gem_to_bochs_bo(obj);
    *offset = bochs_bo_mmap_offset(bo);

    drm_gem_object_unreference(obj);
    ret = 0;
out_unlock:
    mutex_unlock(&dev->struct_mutex);
    return ret;

}
コード例 #6
0
void i915_gem_context_fini(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;

	if (dev_priv->hw_contexts_disabled)
		return;

	/* The only known way to stop the gpu from accessing the hw context is
	 * to reset it. Do this as the very last operation to avoid confusing
	 * other code, leading to spurious errors. */
	intel_gpu_reset(dev);

	i915_gem_object_unpin(dctx->obj);

	/* When default context is created and switched to, base object refcount
	 * will be 2 (+1 from object creation and +1 from do_switch()).
	 * i915_gem_context_fini() will be called after gpu_idle() has switched
	 * to default context. So we need to unreference the base object once
	 * to offset the do_switch part, so that i915_gem_context_unreference()
	 * can then free the base object correctly. */
	drm_gem_object_unreference(&dctx->obj->base);
	i915_gem_context_unreference(dctx);
}
コード例 #7
0
/**
 * Returns the current tiling mode and required bit 6 swizzling for the object.
 */
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
		   struct drm_file *file_priv)
{
	struct drm_i915_gem_get_tiling *args = data;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL)
		return -EINVAL;
	obj_priv = obj->driver_private;

	mutex_lock(&dev->struct_mutex);

	args->tiling_mode = obj_priv->tiling_mode;
	switch (obj_priv->tiling_mode) {
	case I915_TILING_X:
		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		break;
	case I915_TILING_Y:
		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
		break;
	case I915_TILING_NONE:
		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		break;
	default:
		DRM_ERROR("unknown tiling mode\n");
	}

	drm_gem_object_unreference(obj);
	mutex_unlock(&dev->struct_mutex);

	return 0;
}
コード例 #8
0
static int intelfb_create(struct intel_fbdev *ifbdev,
                          struct drm_fb_helper_surface_size *sizes)
{
    struct drm_device *dev = ifbdev->helper.dev;
    struct drm_i915_private *dev_priv = dev->dev_private;
    struct fb_info *info;
    struct drm_framebuffer *fb;
    struct drm_mode_fb_cmd2 mode_cmd;
    struct drm_i915_gem_object *obj;
    struct device *device = &dev->pdev->dev;
    int size, ret;


    if (sizes->surface_bpp == 24)
        sizes->surface_bpp = 32;

    mode_cmd.width = sizes->surface_width;
    mode_cmd.height = sizes->surface_height;

    mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
                                8), 64);
    mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
                            sizes->surface_depth);

    size = mode_cmd.pitches[0] * mode_cmd.height;
    size = ALIGN(size, PAGE_SIZE);
    obj = i915_gem_alloc_object(dev, size);
    if (!obj) {
        DRM_ERROR("failed to allocate framebuffer\n");
        ret = -ENOMEM;
        goto out;
    }

    mutex_lock(&dev->struct_mutex);


    ret = intel_pin_and_fence_fb_obj(dev, obj, false);
    if (ret) {
        DRM_ERROR("failed to pin fb: %d\n", ret);
        goto out_unref;
    }

    info = framebuffer_alloc(0, device);
    if (!info) {
        ret = -ENOMEM;
        goto out_unpin;
    }

    info->par = ifbdev;

    ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
    if (ret)
        goto out_unpin;

    fb = &ifbdev->ifb.base;

    ifbdev->helper.fb = fb;
    ifbdev->helper.fbdev = info;

    strcpy(info->fix.id, "inteldrmfb");

    info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
    info->fbops = &intelfb_ops;

    ret = fb_alloc_cmap(&info->cmap, 256, 0);
    if (ret) {
        ret = -ENOMEM;
        goto out_unpin;
    }

    info->apertures = alloc_apertures(1);
    if (!info->apertures) {
        ret = -ENOMEM;
        goto out_unpin;
    }
    info->apertures->ranges[0].base = dev->mode_config.fb_base;
    info->apertures->ranges[0].size =
        dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;

    info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
    info->fix.smem_len = size;

    info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
    if (!info->screen_base) {
        ret = -ENOSPC;
        goto out_unpin;
    }
    info->screen_size = size;


    drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
    drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);



    DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
                  fb->width, fb->height,
                  obj->gtt_offset, obj);


    mutex_unlock(&dev->struct_mutex);
    vga_switcheroo_client_fb_set(dev->pdev, info);
    return 0;

out_unpin:
    i915_gem_object_unpin(obj);
out_unref:
    drm_gem_object_unreference(&obj->base);
    mutex_unlock(&dev->struct_mutex);
out:
    return ret;
}
コード例 #9
0
ファイル: i915_gem_tiling.c プロジェクト: bahamas10/openzfs
/*ARGSUSED*/
int
i915_gem_set_tiling(DRM_IOCTL_ARGS)
{
	DRM_DEVICE;
	struct drm_i915_gem_set_tiling args;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;
	int ret;

	if (dev->driver->use_gem != 1)
		return ENODEV;

	DRM_COPYFROM_WITH_RETURN(&args,
            (struct drm_i915_gem_set_tiling __user *) data, sizeof(args));

	obj = drm_gem_object_lookup(fpriv, args.handle);
	if (obj == NULL)
		return EINVAL;
	obj_priv = obj->driver_private;

	if (!i915_tiling_ok(dev, args.stride, obj->size, args.tiling_mode)) {
		drm_gem_object_unreference(obj);
		DRM_DEBUG("i915 tiling is not OK");
		return EINVAL;
	}

	spin_lock(&dev->struct_mutex);

	if (args.tiling_mode == I915_TILING_NONE) {
		args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
	} else {
		if (args.tiling_mode == I915_TILING_X)
			args.swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		else
			args.swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
		/* If we can't handle the swizzling, make it untiled. */
		if (args.swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
			args.tiling_mode = I915_TILING_NONE;
			args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		}
	}

	if (args.tiling_mode != obj_priv->tiling_mode) {
		int ret;

		/* Unbind the object, as switching tiling means we're
		 * switching the cache organization due to fencing, probably.
		 */
		ret = i915_gem_object_unbind(obj, 1);
		if (ret != 0) {
			args.tiling_mode = obj_priv->tiling_mode;
			spin_unlock(&dev->struct_mutex);
			drm_gem_object_unreference(obj);
			DRM_ERROR("tiling switch!! unbind error %d", ret);
			return ret;
		}
		obj_priv->tiling_mode = args.tiling_mode;
	}
	obj_priv->stride = args.stride;

	ret = DRM_COPY_TO_USER((struct drm_i915_gem_set_tiling __user *) data, &args, sizeof(args));
        if ( ret != 0)
                DRM_ERROR(" gem set tiling error! %d", ret);

	drm_gem_object_unreference(obj);
	spin_unlock(&dev->struct_mutex);

	return 0;
}
コード例 #10
0
ファイル: intel_fb.c プロジェクト: SylvestreG/bitrig
static int intelfb_create(struct intel_fbdev *ifbdev,
    struct drm_fb_helper_surface_size *sizes)
{
	struct drm_device *dev = ifbdev->helper.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
#if 0
	struct fb_info *info;
#endif
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd2 mode_cmd = {};
	struct drm_i915_gem_object *obj;
	int size, ret;

	/* we don't do packed 24bpp */
	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.pitches[0] = roundup2(mode_cmd.width * ((sizes->surface_bpp + 7) /
							 8), 64);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	size = roundup2(size, PAGE_SIZE);
	obj = i915_gem_alloc_object(dev, size);
	if (!obj) {
		DRM_ERROR("failed to allocate framebuffer\n");
		ret = -ENOMEM;
		goto out;
	}

	DRM_LOCK();

	/* Flush everything out, we'll be doing GTT only from now on */
	ret = intel_pin_and_fence_fb_obj(dev, obj, false);
	if (ret) {
		DRM_ERROR("failed to pin fb: %d\n", ret);
		goto out_unref;
	}

#if 0
	info = framebuffer_alloc(0, device);
	if (!info) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->par = ifbdev;
#endif

	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
	if (ret)
		goto out_unpin;

	fb = &ifbdev->ifb.base;

	ifbdev->helper.fb = fb;
#if 0
	ifbdev->helper.fbdev = info;

	strlcpy(info->fix.id, "inteldrmfb", sizeof(info->fix.id));

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &intelfb_ops;

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unpin;
	}
	/* setup aperture base/size for vesafb takeover */
	info->apertures = alloc_apertures(1);
	if (!info->apertures) {
		ret = -ENOMEM;
		goto out_unpin;
	}
	info->apertures->ranges[0].base = dev->mode_config.fb_base;
	info->apertures->ranges[0].size =
		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;

	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
	info->fix.smem_len = size;

	info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
	if (!info->screen_base) {
		ret = -ENOSPC;
		goto out_unpin;
	}
	info->screen_size = size;

//	memset(info->screen_base, 0, size);

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
#else
{
	struct rasops_info *ri = &dev_priv->ro;
	bus_space_handle_t bsh;
	int err;

	err = agp_map_subregion(dev_priv->agph, obj->gtt_offset, size, &bsh);
	if (err) {
		ret = -err;
		goto out_unpin;
	}

	ri->ri_bits = bus_space_vaddr(dev->bst, bsh);
	ri->ri_depth = fb->bits_per_pixel;
	ri->ri_stride = fb->pitches[0];
	ri->ri_width = sizes->fb_width;
	ri->ri_height = sizes->fb_height;

	switch (fb->pixel_format) {
	case DRM_FORMAT_XRGB8888:
		ri->ri_rnum = 8;
		ri->ri_rpos = 16;
		ri->ri_gnum = 8;
		ri->ri_gpos = 8;
		ri->ri_bnum = 8;
		ri->ri_bpos = 0;
		break;
	case DRM_FORMAT_RGB565:
		ri->ri_rnum = 5;
		ri->ri_rpos = 11;
		ri->ri_gnum = 6;
		ri->ri_gpos = 5;
		ri->ri_bnum = 5;
		ri->ri_bpos = 0;
		break;
	}
}
#endif

	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
		      fb->width, fb->height,
		      obj->gtt_offset, obj);

	DRM_UNLOCK();
#if 1
	DRM_DEBUG_KMS("skipping call to vga_switcheroo_client_fb_set\n");
#else
	vga_switcheroo_client_fb_set(dev->pdev, info);
#endif
	return 0;

out_unpin:
	i915_gem_object_unpin(obj);
out_unref:
	drm_gem_object_unreference(&obj->base);
	DRM_UNLOCK();
out:
	return ret;
}
コード例 #11
0
int exynos_dmabuf_prime_fd_to_handle(struct drm_device *drm_dev,
					struct drm_file *file,
					int prime_fd, unsigned int *handle)
{
	struct drm_exynos_file_private *file_priv = file->driver_priv;
	struct dma_buf_attachment *attach;
	struct dma_buf *dmabuf;
	struct sg_table *sgt;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	int ret;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	ret = mutex_lock_interruptible(&drm_dev->struct_mutex);
	if (ret < 0)
		return ret;

	dmabuf = dma_buf_get(prime_fd);
	if (IS_ERR(dmabuf)) {
		ret = PTR_ERR(dmabuf);
		goto out;
	}

	/*
	 * if there is same dmabuf as the one to prime_fd
	 * in file_priv->prime list then return the handle.
	 *
	 * Note:
	 * but if the prime_fd from user belongs to another process
	 * then there couldn't be the dmabuf in file_priv->prime list
	 * because file_priv is unique to process.
	 */
	ret = drm_prime_lookup_fd_handle_mapping(&file_priv->prime,
							dmabuf, handle);
	if (!ret) {
		/* drop reference we got above. */
		dma_buf_put(dmabuf);
		goto out;
	}

	attach = dma_buf_attach(dmabuf, drm_dev->dev);
	if (IS_ERR(attach)) {
		ret = PTR_ERR(attach);
		goto fail_put;
	}

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto fail_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
		ret = -ENOMEM;
		goto fail_unmap;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dmabuf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto fail_unmap;
	}

	ret = drm_gem_handle_create(file, &exynos_gem_obj->base, handle);
	if (ret < 0)
		goto fail_handle;

	/* consider physically non-continuous memory with IOMMU. */

	buffer->dma_addr = sg_dma_address(sgt->sgl);
	buffer->size = sg_dma_len(sgt->sgl);
	buffer->sgt = sgt;

	/*
	 * import(fd to handle) means that the physical memory region
	 * from the sgt is being shared with others so shared_refcount
	 * should be 1.
	 */
	atomic_set(&buffer->shared_refcount, 1);

	exynos_gem_obj->base.import_attach = attach;

	ret = drm_prime_insert_fd_handle_mapping(&file_priv->prime,
							dmabuf, *handle);
	if (ret < 0)
		goto fail_handle;

	/* register buffer information to private buffer manager. */
	ret = register_buf_to_priv_mgr(exynos_gem_obj,
					&exynos_gem_obj->priv_handle,
					&exynos_gem_obj->priv_id);
	if (ret < 0) {
		drm_prime_remove_fd_handle_mapping(&file_priv->prime, dmabuf);
		goto fail_handle;
	}

	DRM_DEBUG_KMS("fd = %d, handle = %d, dma_addr = 0x%x, size = 0x%lx\n",
			prime_fd, *handle, buffer->dma_addr, buffer->size);

	drm_gem_object_unreference(&exynos_gem_obj->base);
	mutex_unlock(&drm_dev->struct_mutex);

	return 0;

fail_handle:
	drm_gem_object_unreference(&exynos_gem_obj->base);
	kfree(buffer);
	drm_gem_object_release(&exynos_gem_obj->base);
	kfree(exynos_gem_obj);
fail_unmap:
	dma_buf_unmap_attachment(attach, sgt);
fail_detach:
	dma_buf_detach(dmabuf, attach);
fail_put:
	dma_buf_put(dmabuf);
out:
	mutex_unlock(&drm_dev->struct_mutex);
	return ret;
}
コード例 #12
0
static int do_switch(struct drm_i915_gem_object *from_obj,
		     struct i915_hw_context *to,
		     u32 seqno)
{
	struct intel_ring_buffer *ring = NULL;
	u32 hw_flags = 0;
	int ret;

	BUG_ON(to == NULL);
	BUG_ON(from_obj != NULL && from_obj->pin_count == 0);

	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
	if (ret)
		return ret;

	/* Clear this page out of any CPU caches for coherent swap-in/out. Note
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
	 * XXX: We need a real interface to do this instead of trickery. */
	ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
	if (ret) {
		i915_gem_object_unpin(to->obj);
		return ret;
	}

	if (!to->obj->has_global_gtt_mapping)
		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);

	if (!to->is_initialized || is_default_context(to))
		hw_flags |= MI_RESTORE_INHIBIT;
	else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
		hw_flags |= MI_FORCE_RESTORE;

	ring = to->ring;
	ret = mi_set_context(ring, to, hw_flags);
	if (ret) {
		i915_gem_object_unpin(to->obj);
		return ret;
	}

	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
	if (from_obj != NULL) {
		from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
		i915_gem_object_move_to_active(from_obj, ring, seqno);
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
		from_obj->dirty = 1;
		BUG_ON(from_obj->ring != to->ring);
		i915_gem_object_unpin(from_obj);

		drm_gem_object_unreference(&from_obj->base);
	}

	drm_gem_object_reference(&to->obj->base);
	ring->last_context_obj = to->obj;
	to->is_initialized = true;

	return 0;
}
コード例 #13
0
/**
 * Sets the tiling mode of an object, returning the required swizzling of
 * bit 6 of addresses in the object.
 */
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
		   struct drm_file *file_priv)
{
	struct drm_i915_gem_set_tiling *args = data;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;
	int ret = 0;

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL)
		return -EINVAL;
	obj_priv = obj->driver_private;

	if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
		mutex_lock(&dev->struct_mutex);
		drm_gem_object_unreference(obj);
		mutex_unlock(&dev->struct_mutex);
		return -EINVAL;
	}

	if (args->tiling_mode == I915_TILING_NONE) {
		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		args->stride = 0;
	} else {
		if (args->tiling_mode == I915_TILING_X)
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		else
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;

		/* Hide bit 17 swizzling from the user.  This prevents old Mesa
		 * from aborting the application on sw fallbacks to bit 17,
		 * and we use the pread/pwrite bit17 paths to swizzle for it.
		 * If there was a user that was relying on the swizzle
		 * information for drm_intel_bo_map()ed reads/writes this would
		 * break it, but we don't have any of those.
		 */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;

		/* If we can't handle the swizzling, make it untiled. */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
			args->tiling_mode = I915_TILING_NONE;
			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
			args->stride = 0;
		}
	}

	mutex_lock(&dev->struct_mutex);
	if (args->tiling_mode != obj_priv->tiling_mode ||
	    args->stride != obj_priv->stride) {
		/* We need to rebind the object if its current allocation
		 * no longer meets the alignment restrictions for its new
		 * tiling mode. Otherwise we can just leave it alone, but
		 * need to ensure that any fence register is cleared.
		 */
		if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
		    ret = i915_gem_object_unbind(obj);
		else
		    ret = i915_gem_object_put_fence_reg(obj);
		if (ret != 0) {
			WARN(ret != -ERESTARTSYS,
			     "failed to reset object for tiling switch");
			args->tiling_mode = obj_priv->tiling_mode;
			args->stride = obj_priv->stride;
			goto err;
		}

		/* If we've changed tiling, GTT-mappings of the object
		 * need to re-fault to ensure that the correct fence register
		 * setup is in place.
		 */
		i915_gem_release_mmap(obj);

		obj_priv->tiling_mode = args->tiling_mode;
		obj_priv->stride = args->stride;
	}
err:
	drm_gem_object_unreference(obj);
	mutex_unlock(&dev->struct_mutex);

	return ret;
}
コード例 #14
0
static int evdifb_create(struct drm_fb_helper *helper,
			 struct drm_fb_helper_surface_size *sizes)
{
	struct evdi_fbdev *ufbdev = (struct evdi_fbdev *)helper;
	struct drm_device *dev = ufbdev->helper.dev;
	struct fb_info *info;
	struct device *device = dev->dev;
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd2 mode_cmd;
	struct evdi_gem_object *obj;
	uint32_t size;
	int ret = 0;

	if (sizes->surface_bpp == 24) {
		sizes->surface_bpp = 32;
	} else if (sizes->surface_bpp != 32) {
		EVDI_ERROR("Not supported pixel format (bpp=%d)\n",
			   sizes->surface_bpp);
		return -EINVAL;
	}

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;
	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);

	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	size = ALIGN(size, PAGE_SIZE);

	obj = evdi_gem_alloc_object(dev, size);
	if (!obj)
		goto out;

	ret = evdi_gem_vmap(obj);
	if (ret) {
		DRM_ERROR("failed to vmap fb\n");
		goto out_gfree;
	}

	info = framebuffer_alloc(0, device);
	if (!info) {
		ret = -ENOMEM;
		goto out_gfree;
	}
	info->par = ufbdev;

	ret = evdi_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
	if (ret)
		goto out_gfree;

	fb = &ufbdev->ufb.base;

	ufbdev->helper.fb = fb;
	ufbdev->helper.fbdev = info;

	strcpy(info->fix.id, "evdidrmfb");

	info->screen_base = ufbdev->ufb.obj->vmapping;
	info->fix.smem_len = size;
	info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &evdifb_ops;
	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width,
			       sizes->fb_height);

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_gfree;
	}

	DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
		      fb->width, fb->height, ufbdev->ufb.obj->vmapping);

	return ret;
 out_gfree:
	drm_gem_object_unreference(&ufbdev->ufb.obj->base);
 out:
	return ret;
}
コード例 #15
0
ファイル: i915_gem_stolen.c プロジェクト: HEATHlabs/ravlinux
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
					       u32 stolen_offset,
					       u32 gtt_offset,
					       u32 size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_address_space *ggtt = &dev_priv->gtt.base;
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
	struct i915_vma *vma;
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return NULL;

	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
			stolen_offset, gtt_offset, size);

	/* KISS and expect everything to be page-aligned */
	BUG_ON(stolen_offset & 4095);
	BUG_ON(size & 4095);

	if (WARN_ON(size == 0))
		return NULL;

	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
		return NULL;

	stolen->start = stolen_offset;
	stolen->size = size;
	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
	if (ret) {
		DRM_DEBUG_KMS("failed to allocate stolen space\n");
		kfree(stolen);
		return NULL;
	}

	obj = _i915_gem_object_create_stolen(dev, stolen);
	if (obj == NULL) {
		DRM_DEBUG_KMS("failed to allocate stolen object\n");
		drm_mm_remove_node(stolen);
		kfree(stolen);
		return NULL;
	}

	/* Some objects just need physical mem from stolen space */
	if (gtt_offset == I915_GTT_OFFSET_NONE)
		return obj;

	vma = i915_gem_vma_create(obj, ggtt);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_out;
	}

	/* To simplify the initialisation sequence between KMS and GTT,
	 * we allow construction of the stolen object prior to
	 * setting up the GTT space. The actual reservation will occur
	 * later.
	 */
	vma->node.start = gtt_offset;
	vma->node.size = size;
	if (drm_mm_initialized(&ggtt->mm)) {
		ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
		if (ret) {
			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
			goto err_vma;
		}
	}

	obj->has_global_gtt_mapping = 1;

	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
	list_add_tail(&vma->mm_list, &ggtt->inactive_list);

	return obj;

err_vma:
	i915_gem_vma_destroy(vma);
err_out:
	drm_mm_remove_node(stolen);
	kfree(stolen);
	drm_gem_object_unreference(&obj->base);
	return NULL;
}
コード例 #16
0
/**
 * Sets the tiling mode of an object, returning the required swizzling of
 * bit 6 of addresses in the object.
 */
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
		   struct drm_file *file)
{
	struct drm_i915_gem_set_tiling *args = data;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj;
	int ret = 0;

	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
	if (&obj->base == NULL)
		return -ENOENT;

	if (!i915_tiling_ok(dev,
			    args->stride, obj->base.size, args->tiling_mode)) {
		drm_gem_object_unreference_unlocked(&obj->base);
		return -EINVAL;
	}

	if (obj->pin_count) {
		drm_gem_object_unreference_unlocked(&obj->base);
		return -EBUSY;
	}

	if (args->tiling_mode == I915_TILING_NONE) {
		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		args->stride = 0;
	} else {
		if (args->tiling_mode == I915_TILING_X)
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		else
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;

		/* Hide bit 17 swizzling from the user.  This prevents old Mesa
		 * from aborting the application on sw fallbacks to bit 17,
		 * and we use the pread/pwrite bit17 paths to swizzle for it.
		 * If there was a user that was relying on the swizzle
		 * information for drm_intel_bo_map()ed reads/writes this would
		 * break it, but we don't have any of those.
		 */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;

		/* If we can't handle the swizzling, make it untiled. */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
			args->tiling_mode = I915_TILING_NONE;
			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
			args->stride = 0;
		}
	}

	DRM_LOCK(dev);
	if (args->tiling_mode != obj->tiling_mode ||
	    args->stride != obj->stride) {
		/* We need to rebind the object if its current allocation
		 * no longer meets the alignment restrictions for its new
		 * tiling mode. Otherwise we can just leave it alone, but
		 * need to ensure that any fence register is cleared.
		 *
		 * After updating the tiling parameters, we then flag whether
		 * we need to update an associated fence register. Note this
		 * has to also include the unfenced register the GPU uses
		 * whilst executing a fenced command for an untiled object.
		 */
		i915_gem_release_mmap(obj);

		obj->map_and_fenceable =
			obj->gtt_space == NULL ||
			(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
			 i915_gem_object_fence_ok(obj, args->tiling_mode));

		/* Rebind if we need a change of alignment */
		if (!obj->map_and_fenceable) {
			u32 unfenced_alignment =
				i915_gem_get_unfenced_gtt_alignment(dev,
								    obj->base.size,
								    args->tiling_mode);
			if (obj->gtt_offset & (unfenced_alignment - 1))
				ret = i915_gem_object_unbind(obj);
		}

		if (ret == 0) {
			obj->fence_dirty =
				obj->fenced_gpu_access ||
				obj->fence_reg != I915_FENCE_REG_NONE;

			obj->tiling_mode = args->tiling_mode;
			obj->stride = args->stride;
		}
	}
	/* we have to maintain this existing ABI... */
	args->stride = obj->stride;
	args->tiling_mode = obj->tiling_mode;
	drm_gem_object_unreference(&obj->base);
	DRM_UNLOCK(dev);

	return ret;
}
コード例 #17
0
/**
 * Sets the tiling mode of an object, returning the required swizzling of
 * bit 6 of addresses in the object.
 */
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
		   struct drm_file *file_priv)
{
	struct drm_i915_gem_set_tiling *args = data;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL)
		return -EINVAL;
	obj_priv = obj->driver_private;

	if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
		drm_gem_object_unreference(obj);
		return -EINVAL;
	}

	mutex_lock(&dev->struct_mutex);

	if (args->tiling_mode == I915_TILING_NONE) {
		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
	} else {
		if (args->tiling_mode == I915_TILING_X)
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		else
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;

		/* Hide bit 17 swizzling from the user.  This prevents old Mesa
		 * from aborting the application on sw fallbacks to bit 17,
		 * and we use the pread/pwrite bit17 paths to swizzle for it.
		 * If there was a user that was relying on the swizzle
		 * information for drm_intel_bo_map()ed reads/writes this would
		 * break it, but we don't have any of those.
		 */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;

		/* If we can't handle the swizzling, make it untiled. */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
			args->tiling_mode = I915_TILING_NONE;
			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		}
	}
	if (args->tiling_mode != obj_priv->tiling_mode) {
		int ret;

		/* Unbind the object, as switching tiling means we're
		 * switching the cache organization due to fencing, probably.
		 */
		ret = i915_gem_object_unbind(obj);
		if (ret != 0) {
			WARN(ret != -ERESTARTSYS,
			     "failed to unbind object for tiling switch");
			args->tiling_mode = obj_priv->tiling_mode;
			mutex_unlock(&dev->struct_mutex);
			drm_gem_object_unreference(obj);

			return ret;
		}
		obj_priv->tiling_mode = args->tiling_mode;
	}
	obj_priv->stride = args->stride;

	drm_gem_object_unreference(obj);
	mutex_unlock(&dev->struct_mutex);

	return 0;
}
コード例 #18
0
ファイル: intel_fbdev.c プロジェクト: nemumu/linux
static int intelfb_create(struct drm_fb_helper *helper,
                          struct drm_fb_helper_surface_size *sizes)
{
    struct intel_fbdev *ifbdev =
        container_of(helper, struct intel_fbdev, helper);
    struct intel_framebuffer *intel_fb = ifbdev->fb;
    struct drm_device *dev = helper->dev;
    struct drm_i915_private *dev_priv = dev->dev_private;
    struct fb_info *info;
    struct drm_framebuffer *fb;
    struct drm_i915_gem_object *obj;
    int size, ret;
    bool prealloc = false;

    mutex_lock(&dev->struct_mutex);

    if (intel_fb &&
            (sizes->fb_width > intel_fb->base.width ||
             sizes->fb_height > intel_fb->base.height)) {
        DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
                      " releasing it\n",
                      intel_fb->base.width, intel_fb->base.height,
                      sizes->fb_width, sizes->fb_height);
        drm_framebuffer_unreference(&intel_fb->base);
        intel_fb = ifbdev->fb = NULL;
    }
    if (!intel_fb || WARN_ON(!intel_fb->obj)) {
        DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
        ret = intelfb_alloc(helper, sizes);
        if (ret)
            goto out_unlock;
        intel_fb = ifbdev->fb;
    } else {
        DRM_DEBUG_KMS("re-using BIOS fb\n");
        prealloc = true;
        sizes->fb_width = intel_fb->base.width;
        sizes->fb_height = intel_fb->base.height;
    }

    obj = intel_fb->obj;
    size = obj->base.size;

    info = framebuffer_alloc(0, &dev->pdev->dev);
    if (!info) {
        ret = -ENOMEM;
        goto out_unpin;
    }

    info->par = helper;

    fb = &ifbdev->fb->base;

    ifbdev->helper.fb = fb;
    ifbdev->helper.fbdev = info;

    strcpy(info->fix.id, "inteldrmfb");

    info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
    info->fbops = &intelfb_ops;

    ret = fb_alloc_cmap(&info->cmap, 256, 0);
    if (ret) {
        ret = -ENOMEM;
        goto out_unpin;
    }
    /* setup aperture base/size for vesafb takeover */
    info->apertures = alloc_apertures(1);
    if (!info->apertures) {
        ret = -ENOMEM;
        goto out_unpin;
    }
    info->apertures->ranges[0].base = dev->mode_config.fb_base;
    info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;

    info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
    info->fix.smem_len = size;

    info->screen_base =
        ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
                   size);
    if (!info->screen_base) {
        ret = -ENOSPC;
        goto out_unpin;
    }
    info->screen_size = size;

    /* This driver doesn't need a VT switch to restore the mode on resume */
    info->skip_vt_switch = true;

    drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
    drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

    /* If the object is shmemfs backed, it will have given us zeroed pages.
     * If the object is stolen however, it will be full of whatever
     * garbage was left in there.
     */
    if (ifbdev->fb->obj->stolen && !prealloc)
        memset_io(info->screen_base, 0, info->screen_size);

    /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */

    DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
                  fb->width, fb->height,
                  i915_gem_obj_ggtt_offset(obj), obj);

    mutex_unlock(&dev->struct_mutex);
    vga_switcheroo_client_fb_set(dev->pdev, info);
    return 0;

out_unpin:
    i915_gem_object_ggtt_unpin(obj);
    drm_gem_object_unreference(&obj->base);
out_unlock:
    mutex_unlock(&dev->struct_mutex);
    return ret;
}
コード例 #19
0
ファイル: msm_ringbuffer.c プロジェクト: 03199618/linux
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
{
	if (ring->bo)
		drm_gem_object_unreference(ring->bo);
	kfree(ring);
}
コード例 #20
0
struct drm_i915_gem_object *
kos_gem_fb_object_create(struct drm_device *dev,
                           u32 gtt_offset,
                           u32 size)
{
    struct drm_i915_private *dev_priv = dev->dev_private;
    struct i915_address_space *ggtt = &dev_priv->gtt.base;
    struct drm_i915_gem_object *obj;
    struct drm_mm_node *fb_node;
    struct i915_vma *vma;
    int ret;

    DRM_DEBUG_KMS("creating preallocated framebuffer object: gtt_offset=%x, size=%x\n",
                  gtt_offset, size);

    /* KISS and expect everything to be page-aligned */
    BUG_ON(size & 4095);

    if (WARN_ON(size == 0))
        return NULL;

    fb_node = kzalloc(sizeof(*fb_node), GFP_KERNEL);
    if (!fb_node)
        return NULL;

    fb_node->start = gtt_offset;
    fb_node->size = size;

    obj = _kos_fb_object_create(dev, fb_node);
    if (obj == NULL) {
        DRM_DEBUG_KMS("failed to preallocate framebuffer object\n");
        kfree(fb_node);
        return NULL;
    }

    vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
    if (IS_ERR(vma)) {
        ret = PTR_ERR(vma);
        goto err_out;
    }

    /* To simplify the initialisation sequence between KMS and GTT,
     * we allow construction of the stolen object prior to
     * setting up the GTT space. The actual reservation will occur
     * later.
     */
    vma->node.start = gtt_offset;
    vma->node.size = size;
    if (drm_mm_initialized(&ggtt->mm)) {
        ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
        if (ret) {
            DRM_DEBUG_KMS("failed to allocate framebuffer GTT space\n");
            goto err_vma;
        }
    }

//    obj->has_global_gtt_mapping = 1;

    list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
    list_add_tail(&vma->mm_list, &ggtt->inactive_list);

    mutex_lock(&dev->object_name_lock);
    idr_preload(GFP_KERNEL);

    if (!obj->base.name) {
        ret = idr_alloc(&dev->object_name_idr, &obj->base, 1, 0, GFP_NOWAIT);
        if (ret < 0)
            goto err_gem;

        obj->base.name = ret;

        /* Allocate a reference for the name table.  */
        drm_gem_object_reference(&obj->base);

        DRM_DEBUG_KMS("%s allocate fb name %d\n", __FUNCTION__, obj->base.name );
    }

    idr_preload_end();
    mutex_unlock(&dev->object_name_lock);
    drm_gem_object_unreference(&obj->base);
    return obj;

err_gem:
    idr_preload_end();
    mutex_unlock(&dev->object_name_lock);
err_vma:
    i915_gem_vma_destroy(vma);
err_out:
    kfree(fb_node);
    drm_gem_object_unreference(&obj->base);
    return NULL;
}
コード例 #21
0
static int intelfb_create(struct drm_fb_helper *helper,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct intel_fbdev *ifbdev =
		container_of(helper, struct intel_fbdev, helper);
	struct intel_framebuffer *intel_fb = &ifbdev->ifb;
	struct drm_device *dev = helper->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_i915_gem_object *obj;
	int size, ret;

	mutex_lock(&dev->struct_mutex);

	if (!intel_fb->obj) {
		DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
		ret = intelfb_alloc(helper, sizes);
		if (ret)
			goto out_unlock;
	} else {
		DRM_DEBUG_KMS("re-using BIOS fb\n");
		sizes->fb_width = intel_fb->base.width;
		sizes->fb_height = intel_fb->base.height;
	}

	obj = intel_fb->obj;
	size = obj->base.size;

	info = framebuffer_alloc(0, &dev->pdev->dev);
	if (!info) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->par = helper;

	fb = &ifbdev->ifb.base;

	ifbdev->helper.fb = fb;
	ifbdev->helper.fbdev = info;

	strcpy(info->fix.id, "inteldrmfb");

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &intelfb_ops;

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
	info->fix.smem_len = size;

	info->screen_base =
		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
			   size);
	if (!info->screen_base) {
		ret = -ENOSPC;
		goto out_unpin;
	}
	info->screen_size = size;

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	/* If the object is shmemfs backed, it will have given us zeroed pages.
	 * If the object is stolen however, it will be full of whatever
	 * garbage was left in there.
	 */
	if (ifbdev->ifb.obj->stolen)
		memset_io(info->screen_base, 0, info->screen_size);

	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */

	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
		      fb->width, fb->height,
		      i915_gem_obj_ggtt_offset(obj), obj);

	mutex_unlock(&dev->struct_mutex);
	vga_switcheroo_client_fb_set(dev->pdev, info);
	return 0;

out_unpin:
	i915_gem_object_unpin(obj);
	drm_gem_object_unreference(&obj->base);
out_unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
コード例 #22
0
static int intelfb_create(struct drm_fb_helper *helper,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct intel_fbdev *ifbdev =
		container_of(helper, struct intel_fbdev, helper);
	struct drm_device *dev = helper->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd2 mode_cmd = {};
	struct drm_i915_gem_object *obj;
	struct device *device = &dev->pdev->dev;
	int size, ret;

	/* we don't do packed 24bpp */
	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
						      8), 64);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	size = ALIGN(size, PAGE_SIZE);
	obj = i915_gem_object_create_stolen(dev, size);
	if (obj == NULL)
		obj = i915_gem_alloc_object(dev, size);
	if (!obj) {
		DRM_ERROR("failed to allocate framebuffer\n");
		ret = -ENOMEM;
		goto out;
	}

	mutex_lock(&dev->struct_mutex);

	/* Flush everything out, we'll be doing GTT only from now on */
	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
	if (ret) {
		DRM_ERROR("failed to pin fb: %d\n", ret);
		goto out_unref;
	}

	info = framebuffer_alloc(0, device);
	if (!info) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->par = helper;

	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
	if (ret)
		goto out_fbrelease;

	fb = &ifbdev->ifb.base;

	ifbdev->helper.fb = fb;
	ifbdev->helper.fbdev = info;

	strncpy(info->fix.id, "inteldrmfb", sizeof(info->fix.id) - 1);

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &intelfb_ops;

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_fbdestroy;
	}
	/* setup aperture base/size for vesafb takeover */
	info->apertures = alloc_apertures(1);
	if (!info->apertures) {
		ret = -ENOMEM;
		goto out_decmap;
	}
	info->apertures->ranges[0].base = dev->mode_config.fb_base;
	info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;

	info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
	info->fix.smem_len = size;

	info->screen_base =
		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
			   size);
	if (!info->screen_base) {
		ret = -ENOSPC;
		goto out_freeap;
	}
	info->screen_size = size;

	/* This driver doesn't need a VT switch to restore the mode on resume */
	info->skip_vt_switch = true;

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	/* If the object is shmemfs backed, it will have given us zeroed pages.
	 * If the object is stolen however, it will be full of whatever
	 * garbage was left in there.
	 */
	if (ifbdev->ifb.obj->stolen)
		memset_io(info->screen_base, 0, info->screen_size);

	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */

	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
		      fb->width, fb->height,
		      i915_gem_obj_ggtt_offset(obj), obj);


	mutex_unlock(&dev->struct_mutex);
	vga_switcheroo_client_fb_set(dev->pdev, info);
	return 0;

out_freeap:
	kfree(info->apertures);
out_decmap:
	fb_dealloc_cmap(&info->cmap);
out_fbdestroy:
	fb->funcs->destroy(fb);
out_fbrelease:
	kfree(info);
out_unpin:
	i915_gem_object_unpin(obj);
out_unref:
	drm_gem_object_unreference(&obj->base);
	mutex_unlock(&dev->struct_mutex);
out:
	return ret;
}
コード例 #23
0
ファイル: intel_fb.c プロジェクト: ziozzang/kernel-rhel6
static int intelfb_create(struct intel_fbdev *ifbdev,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct drm_device *dev = ifbdev->helper.dev;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd mode_cmd;
	struct drm_i915_gem_object *obj;
	struct device *device = &dev->pdev->dev;
	int size, ret;

	/* we don't do packed 24bpp */
	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.bpp = sizes->surface_bpp;
	mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
	mode_cmd.depth = sizes->surface_depth;

	size = mode_cmd.pitch * mode_cmd.height;
	size = ALIGN(size, PAGE_SIZE);
	obj = i915_gem_alloc_object(dev, size);
	if (!obj) {
		DRM_ERROR("failed to allocate framebuffer\n");
		ret = -ENOMEM;
		goto out;
	}

	mutex_lock(&dev->struct_mutex);

	/* Flush everything out, we'll be doing GTT only from now on */
	ret = intel_pin_and_fence_fb_obj(dev, obj, false);
	if (ret) {
		DRM_ERROR("failed to pin fb: %d\n", ret);
		goto out_unref;
	}

	info = framebuffer_alloc(0, device);
	if (!info) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->par = ifbdev;

	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
	if (ret)
		goto out_unpin;

	fb = &ifbdev->ifb.base;

	ifbdev->helper.fb = fb;
	ifbdev->helper.fbdev = info;

	strcpy(info->fix.id, "inteldrmfb");

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &intelfb_ops;

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unpin;
	}
	/* setup aperture base/size for vesafb takeover */
	info->aperture_base = dev->mode_config.fb_base;
	if (!IS_GEN2(dev))
		info->aperture_size = pci_resource_len(dev->pdev, 2);
	else
		info->aperture_size = pci_resource_len(dev->pdev, 0);

	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
	info->fix.smem_len = size;

	info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
	if (!info->screen_base) {
		ret = -ENOSPC;
		goto out_unpin;
	}
	info->screen_size = size;

//	memset(info->screen_base, 0, size);

	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	info->pixmap.size = 64*1024;
	info->pixmap.buf_align = 8;
	info->pixmap.access_align = 32;
	info->pixmap.flags = FB_PIXMAP_SYSTEM;
	info->pixmap.scan_align = 1;

	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
		      fb->width, fb->height,
		      obj->gtt_offset, obj);


	mutex_unlock(&dev->struct_mutex);
	vga_switcheroo_client_fb_set(dev->pdev, info);
	return 0;

out_unpin:
	i915_gem_object_unpin(obj);
out_unref:
	drm_gem_object_unreference(&obj->base);
	mutex_unlock(&dev->struct_mutex);
out:
	return ret;
}
コード例 #24
0
ファイル: radeon_fb.c プロジェクト: 383530895/linux
static int radeonfb_create(struct drm_fb_helper *helper,
			   struct drm_fb_helper_surface_size *sizes)
{
	struct radeon_fbdev *rfbdev =
		container_of(helper, struct radeon_fbdev, helper);
	struct radeon_device *rdev = rfbdev->rdev;
	struct fb_info *info;
	struct drm_framebuffer *fb = NULL;
	struct drm_mode_fb_cmd2 mode_cmd;
	struct drm_gem_object *gobj = NULL;
	struct radeon_bo *rbo = NULL;
	struct device *device = &rdev->pdev->dev;
	int ret;
	unsigned long tmp;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	/* avivo can't scanout real 24bpp */
	if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
		sizes->surface_bpp = 32;

	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
	if (ret) {
		DRM_ERROR("failed to create fbcon object %d\n", ret);
		return ret;
	}

	rbo = gem_to_radeon_bo(gobj);

	/* okay we have an object now allocate the framebuffer */
	info = framebuffer_alloc(0, device);
	if (info == NULL) {
		ret = -ENOMEM;
		goto out_unref;
	}

	info->par = rfbdev;

	ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
	if (ret) {
		DRM_ERROR("failed to initialize framebuffer %d\n", ret);
		goto out_unref;
	}

	fb = &rfbdev->rfb.base;

	/* setup helper */
	rfbdev->helper.fb = fb;
	rfbdev->helper.fbdev = info;

	memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));

	strcpy(info->fix.id, "radeondrmfb");

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &radeonfb_ops;

	tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
	info->fix.smem_start = rdev->mc.aper_base + tmp;
	info->fix.smem_len = radeon_bo_size(rbo);
	info->screen_base = rbo->kptr;
	info->screen_size = radeon_bo_size(rbo);

	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);

	/* setup aperture base/size for vesafb takeover */
	info->apertures = alloc_apertures(1);
	if (!info->apertures) {
		ret = -ENOMEM;
		goto out_unref;
	}
	info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
	info->apertures->ranges[0].size = rdev->mc.aper_size;

	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */

	if (info->screen_base == NULL) {
		ret = -ENOSPC;
		goto out_unref;
	}

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unref;
	}

	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
	DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)rdev->mc.aper_base);
	DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
	DRM_INFO("fb depth is %d\n", fb->depth);
	DRM_INFO("   pitch is %d\n", fb->pitches[0]);

	vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
	return 0;

out_unref:
	if (rbo) {

	}
	if (fb && ret) {
		drm_gem_object_unreference(gobj);
		drm_framebuffer_unregister_private(fb);
		drm_framebuffer_cleanup(fb);
		kfree(fb);
	}
	return ret;
}
コード例 #25
0
ファイル: i915_gem_tiling.c プロジェクト: membry/linux-2.6
/**
 * Sets the tiling mode of an object, returning the required swizzling of
 * bit 6 of addresses in the object.
 */
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
		   struct drm_file *file)
{
	struct drm_i915_gem_set_tiling *args = data;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj;
	int ret;

	ret = i915_gem_check_is_wedged(dev);
	if (ret)
		return ret;

	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
	if (obj == NULL)
		return -ENOENT;

	if (!i915_tiling_ok(dev,
			    args->stride, obj->base.size, args->tiling_mode)) {
		drm_gem_object_unreference_unlocked(&obj->base);
		return -EINVAL;
	}

	if (obj->pin_count) {
		drm_gem_object_unreference_unlocked(&obj->base);
		return -EBUSY;
	}

	if (args->tiling_mode == I915_TILING_NONE) {
		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		args->stride = 0;
	} else {
		if (args->tiling_mode == I915_TILING_X)
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		else
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;

		/* Hide bit 17 swizzling from the user.  This prevents old Mesa
		 * from aborting the application on sw fallbacks to bit 17,
		 * and we use the pread/pwrite bit17 paths to swizzle for it.
		 * If there was a user that was relying on the swizzle
		 * information for drm_intel_bo_map()ed reads/writes this would
		 * break it, but we don't have any of those.
		 */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;

		/* If we can't handle the swizzling, make it untiled. */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
			args->tiling_mode = I915_TILING_NONE;
			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
			args->stride = 0;
		}
	}

	mutex_lock(&dev->struct_mutex);
	if (args->tiling_mode != obj->tiling_mode ||
	    args->stride != obj->stride) {
		/* We need to rebind the object if its current allocation
		 * no longer meets the alignment restrictions for its new
		 * tiling mode. Otherwise we can just leave it alone, but
		 * need to ensure that any fence register is cleared.
		 */
		i915_gem_release_mmap(obj);

		obj->map_and_fenceable =
			obj->gtt_space == NULL ||
			(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
			 i915_gem_object_fence_ok(obj, args->tiling_mode));

		obj->tiling_changed = true;
		obj->tiling_mode = args->tiling_mode;
		obj->stride = args->stride;
	}
	drm_gem_object_unreference(&obj->base);
	mutex_unlock(&dev->struct_mutex);

	return 0;
}
コード例 #26
0
ファイル: intel_fb.c プロジェクト: ChristosKa/freebsd
static int intelfb_create(struct intel_fbdev *ifbdev,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct drm_device *dev = ifbdev->helper.dev;
#if 0
	struct drm_i915_private *dev_priv = dev->dev_private;
#endif
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd2 mode_cmd;
	struct drm_i915_gem_object *obj;
	int size, ret;

	/* we don't do packed 24bpp */
	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.pitches[0] = roundup2(mode_cmd.width * ((sizes->surface_bpp + 7) /
							 8), 64);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	size = roundup2(size, PAGE_SIZE);
	obj = i915_gem_alloc_object(dev, size);
	if (!obj) {
		DRM_ERROR("failed to allocate framebuffer\n");
		ret = -ENOMEM;
		goto out;
	}

	DRM_LOCK(dev);

	/* Flush everything out, we'll be doing GTT only from now on */
	ret = intel_pin_and_fence_fb_obj(dev, obj, false);
	if (ret) {
		DRM_ERROR("failed to pin fb: %d\n", ret);
		goto out_unref;
	}

#if 0
	info = framebuffer_alloc(0, device);
	if (!info) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->par = ifbdev;
#else
	info = malloc(sizeof(struct fb_info), DRM_MEM_KMS, M_WAITOK | M_ZERO);
	info->fb_size = size;
	info->fb_bpp = sizes->surface_bpp;
	info->fb_width = sizes->fb_width;
	info->fb_height = sizes->fb_height;
	info->fb_pbase = dev->agp->base + obj->gtt_offset;
	info->fb_vbase = (vm_offset_t)pmap_mapdev_attr(info->fb_pbase, size,
	    PAT_WRITE_COMBINING);

#endif

	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
	if (ret)
		goto out_unpin;

	fb = &ifbdev->ifb.base;

	ifbdev->helper.fb = fb;
	ifbdev->helper.fbdev = info;
#if 0

	strcpy(info->fix.id, "inteldrmfb");

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &intelfb_ops;

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unpin;
	}
	/* setup aperture base/size for vesafb takeover */
	info->apertures = alloc_apertures(1);
	if (!info->apertures) {
		ret = -ENOMEM;
		goto out_unpin;
	}
	info->apertures->ranges[0].base = dev->mode_config.fb_base;
	info->apertures->ranges[0].size =
		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;

	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
	info->fix.smem_len = size;

	info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
	if (!info->screen_base) {
		ret = -ENOSPC;
		goto out_unpin;
	}
	info->screen_size = size;

//	memset(info->screen_base, 0, size);

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
#endif
	DRM_DEBUG_KMS("allocated %dx%d (s %dbits) fb: 0x%08x, bo %p\n",
		      fb->width, fb->height, fb->depth,
		      obj->gtt_offset, obj);

	DRM_UNLOCK(dev);
#if 1
	KIB_NOTYET();
#else
	vga_switcheroo_client_fb_set(dev->pdev, info);
#endif
	return 0;

out_unpin:
	i915_gem_object_unpin(obj);
out_unref:
	drm_gem_object_unreference(&obj->base);
	DRM_UNLOCK(dev);
out:
	return ret;
}
コード例 #27
0
ファイル: i915_gem_render_state.c プロジェクト: 020gzh/linux
void i915_gem_render_state_fini(struct render_state *so)
{
	i915_gem_object_ggtt_unpin(so->obj);
	drm_gem_object_unreference(&so->obj->base);
}
コード例 #28
0
int exynos_dmabuf_prime_handle_to_fd(struct drm_device *drm_dev,
					struct drm_file *file,
					unsigned int handle, int *prime_fd)
{
	struct drm_gem_object *obj;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	int ret = 0;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	ret = mutex_lock_interruptible(&drm_dev->struct_mutex);
	if (ret < 0)
		return ret;

	obj = drm_gem_object_lookup(drm_dev, file, handle);
	if (!obj) {
		DRM_DEBUG_KMS("failed to lookup gem object.\n");
		ret = -EINVAL;
		goto err1;
	}

	exynos_gem_obj = to_exynos_gem_obj(obj);

	if (obj->prime_fd != -1) {
		/* we have a prime fd already referencing the object. */
		goto have_fd;
	}

	/*
	 * get the dmabuf object for a gem object after registering
	 * the gem object to allocated dmabuf.
	 *
	 * P.S. dma_buf_export function performs the followings:
	 *	- create a new dmabuf object.
	 *	- dmabuf->priv = gem object.
	 *	- file->private_data = dmabuf.
	 */
	obj->export_dma_buf = dma_buf_export(obj, &exynos_dmabuf_ops,
						obj->size, 0600);
	if (!obj->export_dma_buf) {
		ret = PTR_ERR(obj->export_dma_buf);
		goto err2;
	}

	/* get file descriptor for a given dmabuf object. */
	obj->prime_fd = dma_buf_fd(obj->export_dma_buf);
	if (obj->prime_fd < 0) {
		DRM_DEBUG_KMS("failed to get fd from dmabuf.\n");
		dma_buf_put(obj->export_dma_buf);
		ret = obj->prime_fd;
		goto err2;
	}

	/*
	 * this gem object is referenced by the fd so
	 * the object refcount should be increased.
	 * after that when dmabuf_ops->release() is called,
	 * it will be decreased again.
	 */
	drm_gem_object_reference(obj);

have_fd:
	*prime_fd = obj->prime_fd;
err2:
	drm_gem_object_unreference(obj);
err1:
	mutex_unlock(&drm_dev->struct_mutex);
	return ret;
}