Пример #1
0
/*----------------------- Privcmd char device methods ------------------------*/
static int
privcmd_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
    vm_object_t *object, int nprot)
{
	struct privcmd_map *map;

	map = malloc(sizeof(*map), M_PRIVCMD, M_WAITOK | M_ZERO);

	map->size = OFF_TO_IDX(size);
	map->pseudo_phys_res_id = 0;

	map->pseudo_phys_res = xenmem_alloc(privcmd_dev,
	    &map->pseudo_phys_res_id, size);
	if (map->pseudo_phys_res == NULL) {
		free(map, M_PRIVCMD);
		return (ENOMEM);
	}

	map->phys_base_addr = rman_get_start(map->pseudo_phys_res);
	map->mem = cdev_pager_allocate(map, OBJT_MGTDEVICE, &privcmd_pg_ops,
	    size, nprot, *offset, NULL);
	if (map->mem == NULL) {
		xenmem_free(privcmd_dev, map->pseudo_phys_res_id,
		    map->pseudo_phys_res);
		free(map, M_PRIVCMD);
		return (ENOMEM);
	}

	*object = map->mem;

	return (0);
}
Пример #2
0
int
drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset, vm_size_t size,
    struct vm_object **obj_res, int nprot)
{
	struct drm_gem_object *gem_obj;
	struct vm_object *vm_obj;

	DRM_LOCK(dev);
	gem_obj = drm_gem_object_from_offset(dev, *offset);
	if (gem_obj == NULL) {
		DRM_UNLOCK(dev);
		return (ENODEV);
	}
	drm_gem_object_reference(gem_obj);
	DRM_UNLOCK(dev);
	vm_obj = cdev_pager_allocate(gem_obj, OBJT_MGTDEVICE,
	    dev->driver->gem_pager_ops, size, nprot,
	    DRM_GEM_MAPPING_MAPOFF(*offset), curthread->td_ucred);
	if (vm_obj == NULL) {
		drm_gem_object_unreference_unlocked(gem_obj);
		return (EINVAL);
	}
	*offset = DRM_GEM_MAPPING_MAPOFF(*offset);
	*obj_res = vm_obj;
	return (0);
}
Пример #3
0
static vm_object_t
dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
    vm_ooffset_t foff, struct ucred *cred)
{

	return (cdev_pager_allocate(handle, OBJT_DEVICE, &old_dev_pager_ops,
	    size, prot, foff, cred));
}
Пример #4
0
static int
netmap_mmap_single(struct dev_mmap_single_args *ap)
{
    int error;
    struct cdev *cdev = ap->a_head.a_dev;
    vm_ooffset_t *foff = ap->a_offset;
    vm_object_t *objp = ap->a_object;
    vm_size_t objsize = ap->a_size;
    struct netmap_vm_handle_t *vmh;
    struct netmap_priv_d *priv;
    int prot = ap->a_nprot;
    vm_object_t obj;

    D("cdev %p foff %jd size %jd objp %p prot %d", cdev,
      (intmax_t )*foff, (intmax_t )objsize, objp, prot);

    vmh = kmalloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF,
                  M_NOWAIT | M_ZERO);
    if (vmh == NULL)
        return ENOMEM;
    vmh->dev = cdev;

    NMG_LOCK();
    error = devfs_get_cdevpriv(ap->a_fp, (void**)&priv);
    if (error)
        goto err_unlock;
    vmh->priv = priv;
    priv->np_refcount++;
    NMG_UNLOCK();

    error = netmap_get_memory(priv);
    if (error)
        goto err_deref;

    obj = cdev_pager_allocate(vmh, OBJT_DEVICE,
                              &netmap_cdev_pager_ops, objsize, prot,
                              *foff, NULL);
    if (obj == NULL) {
        D("cdev_pager_allocate failed");
        error = EINVAL;
        goto err_deref;
    }

    *objp = obj;
    return 0;

err_deref:
    NMG_LOCK();
    priv->np_refcount--;
err_unlock:
    NMG_UNLOCK();
// err:
    kfree(vmh, M_DEVBUF);
    return error;
}
Пример #5
0
int
ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
    struct vm_object **obj_res, int nprot)
{
	struct ttm_bo_driver *driver;
	struct ttm_buffer_object *bo;
	struct vm_object *vm_obj;
	int ret;

	*obj_res = NULL;

	lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
	bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
	if (likely(bo != NULL))
		kref_get(&bo->kref);
	lockmgr(&bdev->vm_lock, LK_RELEASE);

	if (unlikely(bo == NULL)) {
		kprintf("[TTM] Could not find buffer object to map\n");
		return (EINVAL);
	}

	driver = bo->bdev->driver;
	if (unlikely(!driver->verify_access)) {
		ret = EPERM;
		goto out_unref;
	}
	ret = -driver->verify_access(bo);
	if (unlikely(ret != 0))
		goto out_unref;

	vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
	    size, nprot, 0, curthread->td_ucred);

	if (vm_obj == NULL) {
		ret = EINVAL;
		goto out_unref;
	}
	/*
	 * Note: We're transferring the bo reference to vm_obj->handle here.
	 */
	*offset = 0;
	*obj_res = vm_obj;
	return 0;
out_unref:
	ttm_bo_unref(&bo);
	return ret;
}
/*
 * No requirements.
 */
vm_object_t
dev_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t foff)
{
	return (cdev_pager_allocate(handle, OBJT_DEVICE, &old_dev_pager_ops,
	    size, prot, foff, NULL));
}
Пример #7
0
/**
 * Get the VM object representing a given memory mapping of the compositor. This
 * gets or allocates a CFB pool corresponding to the FD being used to perform
 * the user's mmap() call. If a new FD is mmap()ped, a new CFB pool is allocated
 * and returned. If the same FD is mmap()ped again, the same CFB pool is
 * returned. Each vm_object corresponds directly with a CFB pool.
 *
 * offset is a guaranteed-page-aligned offset into the FD requested by the user
 * in their call to mmap(). We may modify it.
 * size is a guaranteed-page-rounded size for the mapping as requested by the
 * user in their call to mmap().
 */
static int
cheri_compositor_cfb_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
    vm_size_t size, struct vm_object **obj_res, int nprot)
{
	struct cheri_compositor_softc *sc;
	struct cfb_vm_object *cfb_vm_obj;
	struct vm_object *vm_obj = NULL;
	struct file *cdev_fd;
	struct compositor_cfb_pool *cfb_pool;
	int error;

	sc = dev->si_drv1;

	error = 0;

	CHERI_COMPOSITOR_DEBUG(sc,
	    "dev: %p, offset: %lu, size: %lu, nprot: %i", dev, *offset, size,
	    nprot);

	cdev_fd = curthread->td_fpop;
	KASSERT(cdev_fd != NULL, ("mmap_single td_fpop == NULL"));

	CHERI_COMPOSITOR_DEBUG(sc, "cdev_fd: %p", cdev_fd);

	/* Allocate a CFB VM object to associate the cdev with the CFB pool
	 * mapping. Note: The ordering here is fairly sensitive to changes, as
	 * the cdev_pager_allocate() call results in sub-calls to
	 * cheri_compositor_cfb_pg_fault(), which assumes various fields in the
	 * CFB VM object have been initialised.
	 *
	 * The CFB VM object gets destroyed in
	 * cheri_compositor_cfb_pg_dtor(). */
	cfb_vm_obj =
	    malloc(sizeof(*cfb_vm_obj), M_CHERI_COMPOSITOR, M_WAITOK | M_ZERO);

	CHERI_COMPOSITOR_LOCK(sc);

	/* Find/Allocate a pool mapping for this FD. */
	if (dup_or_allocate_cfb_pool_for_cdev_fd(sc, cdev_fd,
	    NULL /* set later */, &cfb_pool) != 0) {
		free(cfb_vm_obj, M_CHERI_COMPOSITOR);
		error = ENOMEM;
		goto done;
	}

	/* Update the CFB VM object with the pool mapping and cdev. These have
	 * both been referenced, and the references are transferred to the CFB
	 * VM object. */
	cfb_vm_obj->dev = dev;
	cfb_vm_obj->pool = cfb_pool;

	/* If a pool had already been allocated for this FD, re-use it. */
	if (cfb_pool->vm_obj != NULL) {
		vm_object_reference(cfb_pool->vm_obj);
		vm_obj = cfb_pool->vm_obj;
		goto done;
	}

	/* Allocate a device pager VM object. */
	vm_obj = cdev_pager_allocate(cfb_vm_obj, OBJT_DEVICE,
	    &cheri_compositor_cfb_pager_ops, size, nprot,
	    *offset, curthread->td_ucred);

	if (vm_obj == NULL) {
		CHERI_COMPOSITOR_UNLOCK(sc);
		cheri_compositor_cfb_pg_dtor(cfb_vm_obj);
		error = EINVAL;
		goto done_unlocked;
	}

	/* Update the CFB pool to store the VM object. Transfer the reference
	 * from allocation. */
	cfb_pool->vm_obj = vm_obj;

done:
	CHERI_COMPOSITOR_UNLOCK(sc);
done_unlocked:
	CHERI_COMPOSITOR_DEBUG(sc,
	    "Finished with vm_obj: %p, cfb_pool: %p (retval: %u).",
	    vm_obj, cfb_pool, error);

	*obj_res = vm_obj;

	/* Don't need to modify the offset. It was originally passed by the user
	 * as an offset from the start of the cdev FD. Since the cdev FD maps
	 * directly to a CFB pool/VM object, the offset becomes an offset from
	 * the start of the CFB pool/VM object. */

	return (error);
}