コード例 #1
0
ファイル: drm_gem.c プロジェクト: E-LLP/n900
/**
 * Allocate a GEM object of the specified size with shmfs backing store
 */
struct drm_gem_object *
drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
	struct drm_gem_object *obj;

	BUG_ON((size & (PAGE_SIZE - 1)) != 0);

	obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);

	obj->dev = dev;
	obj->filp = shmem_file_setup("drm mm object", size, 0);
	if (IS_ERR(obj->filp)) {
		kfree(obj);
		return NULL;
	}

	kref_init(&obj->refcount);
	kref_init(&obj->handlecount);
	obj->size = size;
	if (dev->driver->gem_init_object != NULL &&
	    dev->driver->gem_init_object(obj) != 0) {
		fput(obj->filp);
		kfree(obj);
		return NULL;
	}
	atomic_inc(&dev->object_count);
	atomic_add(obj->size, &dev->object_memory);
	return obj;
}
コード例 #2
0
ファイル: ttm_tt.c プロジェクト: AlexShiLucky/linux
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
{
	struct address_space *swap_space;
	struct file *swap_storage;
	struct page *from_page;
	struct page *to_page;
	int i;
	int ret = -ENOMEM;

	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
	BUG_ON(ttm->caching_state != tt_cached);

	if (!persistent_swap_storage) {
		swap_storage = shmem_file_setup("ttm swap",
						ttm->num_pages << PAGE_SHIFT,
						0);
		if (IS_ERR(swap_storage)) {
			pr_err("Failed allocating swap storage\n");
			return PTR_ERR(swap_storage);
		}
	} else {
		swap_storage = persistent_swap_storage;
	}

	swap_space = swap_storage->f_mapping;

	for (i = 0; i < ttm->num_pages; ++i) {
		gfp_t gfp_mask = mapping_gfp_mask(swap_space);

		gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);

		from_page = ttm->pages[i];
		if (unlikely(from_page == NULL))
			continue;

		to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
		if (IS_ERR(to_page)) {
			ret = PTR_ERR(to_page);
			goto out_err;
		}
		copy_highpage(to_page, from_page);
		set_page_dirty(to_page);
		mark_page_accessed(to_page);
		put_page(to_page);
	}

	ttm_tt_unpopulate(ttm);
	ttm->swap_storage = swap_storage;
	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
	if (persistent_swap_storage)
		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;

	return 0;
out_err:
	if (!persistent_swap_storage)
		fput(swap_storage);

	return ret;
}
コード例 #3
0
ファイル: ttm_tt.c プロジェクト: RyanMallon/linux-ep93xx
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
{
	struct address_space *swap_space;
	struct file *swap_storage;
	struct page *from_page;
	struct page *to_page;
	int i;
	int ret = -ENOMEM;

	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
	BUG_ON(ttm->caching_state != tt_cached);

	if (!persistent_swap_storage) {
		swap_storage = shmem_file_setup("ttm swap",
						ttm->num_pages << PAGE_SHIFT,
						0);
		if (unlikely(IS_ERR(swap_storage))) {
			pr_err("Failed allocating swap storage\n");
			return PTR_ERR(swap_storage);
		}
	} else
		swap_storage = persistent_swap_storage;

	swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;

	for (i = 0; i < ttm->num_pages; ++i) {
		from_page = ttm->pages[i];
		if (unlikely(from_page == NULL))
			continue;
		to_page = shmem_read_mapping_page(swap_space, i);
		if (unlikely(IS_ERR(to_page))) {
			ret = PTR_ERR(to_page);
			goto out_err;
		}
		preempt_disable();
		copy_highpage(to_page, from_page);
		preempt_enable();
		set_page_dirty(to_page);
		mark_page_accessed(to_page);
		page_cache_release(to_page);
	}

	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
	ttm->swap_storage = swap_storage;
	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
	if (persistent_swap_storage)
		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;

	return 0;
out_err:
	if (!persistent_swap_storage)
		fput(swap_storage);

	return ret;
}
コード例 #4
0
ファイル: memfd.c プロジェクト: marb73/kdbus
/* create file and install a new file descriptor */
int kdbus_memfd_new(int *fd)
{
	struct kdbus_memfile *mf;
	struct file *shmemfp;
	struct file *fp;
	int f;
	int ret;

	mf = kzalloc(sizeof(struct kdbus_memfile), GFP_KERNEL);
	if (!mf)
		return -ENOMEM;

	mutex_init(&mf->lock);

	/* allocate a new unlinked shmem file */
	shmemfp = shmem_file_setup("kdbus-memfd", 0, 0);
	if (IS_ERR(shmemfp)) {
		ret = PTR_ERR(shmemfp);
		goto exit;
	}
	mf->fp = shmemfp;

	f = get_unused_fd_flags(O_CLOEXEC);
	if (f < 0) {
		ret = f;
		goto exit_shmem;
	}

	/* The anonymous exported inode ops cannot reach the otherwise
	 * invisible shmem inode. We rely on the fact that nothing else
	 * can create a new file for the shmem inode, like by opening the
	 * fd in /proc/$PID/fd/ */
	fp = anon_inode_getfile("[kdbus]", &kdbus_memfd_fops, mf, O_RDWR);
	if (IS_ERR(fp)) {
		ret = PTR_ERR(fp);
		goto exit_fd;
	}

	fp->f_mode |= FMODE_LSEEK|FMODE_PREAD|FMODE_PWRITE;
	fp->f_mapping = shmemfp->f_mapping;
	fd_install(f, fp);

	*fd = f;
	return 0;

exit_fd:
	put_unused_fd(f);
exit_shmem:
	fput(shmemfp);
exit:
	kfree(mf);
	return ret;
}
コード例 #5
0
ファイル: pool.c プロジェクト: Mayzie/bus1
/**
 * bus1_pool_create_internal() - create memory pool
 * @pool:	(uninitialized) pool to operate on
 * @size:	size of the pool
 *
 * Initialize a new pool object. This allocates a backing shmem object with the
 * given name and size.
 *
 * NOTE: All pools must be embedded into a parent bus1_peer_info object. The
 *       code works fine, if you don't, but the lockdep-annotations will fail
 *       horribly. They rely on bus1_peer_info_from_pool() to be valid on every
 *       pool. Use the bus1_pool_create_for_peer() macro to make sure you
 *       never violate this rule.
 *
 * Return: 0 on success, negative error code on failure.
 */
int bus1_pool_create_internal(struct bus1_pool *pool, size_t size)
{
	struct bus1_pool_slice *slice;
	struct file *f;
	int r;

	/* cannot calculate width of bitfields, so hardcode '4' as flag-size */
	BUILD_BUG_ON(BUS1_POOL_SLICE_SIZE_BITS + 4 > 32);
	BUILD_BUG_ON(BUS1_POOL_SIZE_MAX >=
		     (1ULL <<
		      (sizeof(((struct bus1_pool_slice *)0)->offset) * 8)));

	size = ALIGN(size, 8);
	if (size == 0 || size > BUS1_POOL_SIZE_MAX)
		return -EMSGSIZE;

	f = shmem_file_setup(KBUILD_MODNAME "-peer", size, 0);
	if (IS_ERR(f))
		return PTR_ERR(f);

	r = get_write_access(file_inode(f));
	if (r < 0)
		goto error_put_file;

	slice = bus1_pool_slice_new(0, size);
	if (IS_ERR(slice)) {
		r = PTR_ERR(slice);
		goto error_put_write;
	}

	slice->free = true;
	slice->accounted = false;
	slice->ref_kernel = false;
	slice->ref_user = false;

	pool->f = f;
	pool->size = size;
	pool->accounted_size = 0;
	INIT_LIST_HEAD(&pool->slices);
	pool->slices_free = RB_ROOT;
	pool->slices_busy = RB_ROOT;

	list_add(&slice->entry, &pool->slices);
	bus1_pool_slice_link_free(slice, pool);

	return 0;

error_put_write:
	put_write_access(file_inode(f));
error_put_file:
	fput(f);
	return r;
}
コード例 #6
0
/**
 * Initialize an already allocated GEM object of the specified size with
 * shmfs backing store.
 */
int drm_gem_object_init(struct drm_device *dev,
			struct drm_gem_object *obj, size_t size)
{
	struct file *filp;

	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
	if (IS_ERR(filp))
		return PTR_ERR(filp);

	drm_gem_private_object_init(dev, obj, size);
	obj->filp = filp;

	return 0;
}
コード例 #7
0
ファイル: tiny-shmem.c プロジェクト: 274914765/C
/**
 * shmem_zero_setup - setup a shared anonymous mapping
 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
 */
int shmem_zero_setup(struct vm_area_struct *vma)
{
    struct file *file;
    loff_t size = vma->vm_end - vma->vm_start;

    file = shmem_file_setup("dev/zero", size, vma->vm_flags);
    if (IS_ERR(file))
        return PTR_ERR(file);

    if (vma->vm_file)
        fput(vma->vm_file);
    vma->vm_file = file;
    vma->vm_ops = &generic_file_vm_ops;
    return 0;
}
コード例 #8
0
ファイル: drm_gem.c プロジェクト: yyzreal/cedarview-drm
/**
 * Initialize an already allocated GEM object of the specified size with
 * shmfs backing store.
 */
int drm_gem_object_init(struct drm_device *dev,
                        struct drm_gem_object *obj, size_t size)
{
    BUG_ON((size & (PAGE_SIZE - 1)) != 0);

    obj->dev = dev;
    obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
    if (IS_ERR(obj->filp))
        return -ENOMEM;

    kref_init(&obj->refcount);
    atomic_set(&obj->handle_count, 0);
    obj->size = size;

    return 0;
}
コード例 #9
0
/**
 * Allocate a GEM object of the specified size with shmfs backing store
 */
struct drm_gem_object *
drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
	struct drm_gem_object *obj;

	BUG_ON((size & (PAGE_SIZE - 1)) != 0);

	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
	if (!obj)
		goto free;

	obj->dev = dev;
	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
	if (IS_ERR(obj->filp))
		goto free;

	/* Basically we want to disable the OOM killer and handle ENOMEM
	 * ourselves by sacrificing pages from cached buffers.
	 * XXX shmem_file_[gs]et_gfp_mask()
	 */
	mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
			     GFP_HIGHUSER |
			     __GFP_COLD |
			     __GFP_FS |
			     __GFP_RECLAIMABLE |
			     __GFP_NORETRY |
			     __GFP_NOWARN |
			     __GFP_NOMEMALLOC);

	kref_init(&obj->refcount);
	kref_init(&obj->handlecount);
	obj->size = size;
	if (dev->driver->gem_init_object != NULL &&
	    dev->driver->gem_init_object(obj) != 0) {
		goto fput;
	}
	atomic_inc(&dev->object_count);
	atomic_add(obj->size, &dev->object_memory);
	return obj;
fput:
	fput(obj->filp);
free:
	kfree(obj);
	return NULL;
}
コード例 #10
0
/**
 * Initialize an already allocated GEM object of the specified size with
 * shmfs backing store.
 */
int drm_gem_object_init(struct drm_device *dev,
			struct drm_gem_object *obj, size_t size)
{
	BUG_ON((size & (PAGE_SIZE - 1)) != 0);

	obj->dev = dev;
#ifdef __NetBSD__
	obj->gemo_shm_uao = uao_create(size, 0);
	KASSERT(drm_core_check_feature(dev, DRIVER_GEM));
	KASSERT(dev->driver->gem_uvm_ops != NULL);
	uvm_obj_init(&obj->gemo_uvmobj, dev->driver->gem_uvm_ops, true, 1);
#else
	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
	if (IS_ERR(obj->filp))
		return PTR_ERR(obj->filp);
#endif

	kref_init(&obj->refcount);
	atomic_set(&obj->handle_count, 0);
	obj->size = size;

	return 0;
}
コード例 #11
0
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
{
	struct address_space *swap_space;
	struct file *swap_storage;
	struct page *from_page;
	struct page *to_page;
	void *from_virtual;
	void *to_virtual;
	int i;
	int ret = -ENOMEM;

	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
	BUG_ON(ttm->caching_state != tt_cached);

	/*
	 * For user buffers, just unpin the pages, as there should be
	 * vma references.
	 */

	if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
		ttm_tt_free_user_pages(ttm);
		ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
		ttm->swap_storage = NULL;
		return 0;
	}

	if (!persistant_swap_storage) {
		swap_storage = shmem_file_setup("ttm swap",
						ttm->num_pages << PAGE_SHIFT,
						0);
		if (unlikely(IS_ERR(swap_storage))) {
			printk(KERN_ERR "Failed allocating swap storage.\n");
			return PTR_ERR(swap_storage);
		}
	} else
		swap_storage = persistant_swap_storage;

	swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;

	for (i = 0; i < ttm->num_pages; ++i) {
		from_page = ttm->pages[i];
		if (unlikely(from_page == NULL))
			continue;
		to_page = shmem_read_mapping_page(swap_space, i);
		if (unlikely(IS_ERR(to_page))) {
			ret = PTR_ERR(to_page);
			goto out_err;
		}
		preempt_disable();
#ifdef VMW_HAS_STACK_KMAP_ATOMIC
		from_virtual = kmap_atomic(from_page);
		to_virtual = kmap_atomic(to_page);
#else
		from_virtual = kmap_atomic(from_page, KM_USER0);
		to_virtual = kmap_atomic(to_page, KM_USER1);
#endif
		memcpy(to_virtual, from_virtual, PAGE_SIZE);
#ifdef VMW_HAS_STACK_KMAP_ATOMIC
		kunmap_atomic(to_virtual);
		kunmap_atomic(from_virtual);
#else
		kunmap_atomic(to_virtual, KM_USER1);
		kunmap_atomic(from_virtual, KM_USER0);
#endif
		preempt_enable();
		set_page_dirty(to_page);
		mark_page_accessed(to_page);
		page_cache_release(to_page);
	}

	ttm_tt_free_alloced_pages(ttm);
	ttm->swap_storage = swap_storage;
	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
	if (persistant_swap_storage)
		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;

	return 0;
out_err:
	if (!persistant_swap_storage)
		fput(swap_storage);

	return ret;
}
コード例 #12
0
int drm_gem_object_init(filler_t **readpage)
{
  *readpage = shmem_file_setup();
  return 0;
}
コード例 #13
0
ファイル: memfd.c プロジェクト: RPajak/kdbus
/**
 * kdbus_memfd_new() - create and install a memfd and file descriptor
 * @name:		Name of the (deleted) file which shows up in
 *			/proc, used for debugging
 * @size:		Initial size of the file
 * @fd:			Installed file descriptor
 *
 * Return: 0 on success, negative errno on failure.
 */
int kdbus_memfd_new(const char *name, size_t size, int *fd)
{
	const char *shmem_name = NULL;
	const char *anon_name = NULL;
	struct kdbus_memfile *mf;
	struct file *shmemfp;
	struct file *fp;
	int f, ret;

	mf = kzalloc(sizeof(*mf), GFP_KERNEL);
	if (!mf)
		return -ENOMEM;

	mutex_init(&mf->lock);

	if (name) {
		mf->name = kstrdup(name, GFP_KERNEL);
		shmem_name = kasprintf(GFP_KERNEL,
				       KBUILD_MODNAME "-memfd:%s", name);
		anon_name = kasprintf(GFP_KERNEL,
				      "[" KBUILD_MODNAME "-memfd:%s]", name);
		if (!mf->name || !shmem_name || !anon_name) {
			ret = -ENOMEM;
			goto exit;
		}
	}

	/* allocate a new unlinked shmem file */
	shmemfp = shmem_file_setup(name ? shmem_name : KBUILD_MODNAME "-memfd",
				   size, 0);
	if (IS_ERR(shmemfp)) {
		ret = PTR_ERR(shmemfp);
		goto exit;
	}
	mf->fp = shmemfp;

	f = get_unused_fd_flags(O_CLOEXEC);
	if (f < 0) {
		ret = f;
		goto exit_shmem;
	}

	/*
	 * The anonymous exported inode ops cannot reach the otherwise
	 * invisible shmem inode. We rely on the fact that nothing else
	 * can create a new file for the shmem inode, like by opening the
	 * fd in /proc/$PID/fd/
	 */
	fp = anon_inode_getfile(name ? anon_name : "[" KBUILD_MODNAME "-memfd]",
				&kdbus_memfd_fops, mf, O_RDWR);
	if (IS_ERR(fp)) {
		ret = PTR_ERR(fp);
		goto exit_fd;
	}

	fp->f_mode |= FMODE_LSEEK|FMODE_PREAD|FMODE_PWRITE;
	fp->f_mapping = shmemfp->f_mapping;
	fd_install(f, fp);

	kfree(anon_name);
	kfree(shmem_name);
	*fd = f;
	return 0;

exit_fd:
	put_unused_fd(f);
exit_shmem:
	fput(shmemfp);
exit:
	kfree(anon_name);
	kfree(shmem_name);
	kfree(mf->name);
	kfree(mf);
	return ret;
}