Пример #1
0
/*
 * Get a connection ID and epoch for a client connection from the global pool.
 * The connection struct pointer is then recorded in the idr radix tree.  The
 * epoch doesn't change until the client is rebooted (or, at least, unless the
 * module is unloaded).
 */
static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
					  gfp_t gfp)
{
	struct rxrpc_net *rxnet = conn->params.local->rxnet;
	int id;

	_enter("");

	idr_preload(gfp);
	spin_lock(&rxrpc_conn_id_lock);

	id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
			      1, 0x40000000, GFP_NOWAIT);
	if (id < 0)
		goto error;

	spin_unlock(&rxrpc_conn_id_lock);
	idr_preload_end();

	conn->proto.epoch = rxnet->epoch;
	conn->proto.cid = id << RXRPC_CIDSHIFT;
	set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
	_leave(" [CID %x]", conn->proto.cid);
	return 0;

error:
	spin_unlock(&rxrpc_conn_id_lock);
	idr_preload_end();
	_leave(" = %d", id);
	return id;
}
Пример #2
0
int qxl_surface_id_alloc(struct qxl_device *qdev,
		      struct qxl_bo *surf)
{
	uint32_t handle;
	int idr_ret;
	int count = 0;
again:
	idr_preload(GFP_ATOMIC);
	spin_lock(&qdev->surf_id_idr_lock);
	idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
	spin_unlock(&qdev->surf_id_idr_lock);
	idr_preload_end();
	if (idr_ret < 0)
		return idr_ret;
	handle = idr_ret;

	if (handle >= qdev->rom->n_surfaces) {
		count++;
		spin_lock(&qdev->surf_id_idr_lock);
		idr_remove(&qdev->surf_id_idr, handle);
		spin_unlock(&qdev->surf_id_idr_lock);
		qxl_reap_surface_id(qdev, 2);
		goto again;
	}
	surf->surface_id = handle;

	spin_lock(&qdev->surf_id_idr_lock);
	qdev->last_alloced_surf_id = handle;
	spin_unlock(&qdev->surf_id_idr_lock);
	return 0;
}
Пример #3
0
/**
 * Create a handle for this object. This adds a handle reference
 * to the object, which includes a regular reference count. Callers
 * will likely want to dereference the object afterwards.
 */
int
drm_gem_handle_create(struct drm_file *file_priv,
		       struct drm_gem_object *obj,
		       u32 *handlep)
{
	struct drm_device *dev = obj->dev;
	int ret;

	/*
	 * Get the user-visible handle using idr.  Preload and perform
	 * allocation under our spinlock.
	 */
	idr_preload(GFP_KERNEL);
	spin_lock(&file_priv->table_lock);

	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);

	spin_unlock(&file_priv->table_lock);
	idr_preload_end();
	if (ret < 0)
		return ret;
	*handlep = ret;

	drm_gem_object_handle_reference(obj);

	if (dev->driver->gem_open_object) {
		ret = dev->driver->gem_open_object(obj, file_priv);
		if (ret) {
			drm_gem_handle_delete(file_priv, *handlep);
			return ret;
		}
	}

	return 0;
}
Пример #4
0
int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev)
{
	struct device *cd;
	int ret;

	idr_preload(GFP_KERNEL);
	spin_lock(&dca_idr_lock);

	ret = idr_alloc(&dca_idr, dca, 0, 0, GFP_NOWAIT);
	if (ret >= 0)
		dca->id = ret;

	spin_unlock(&dca_idr_lock);
	idr_preload_end();
	if (ret < 0)
		return ret;

	cd = device_create(dca_class, dev, MKDEV(0, 0), NULL, "dca%d", dca->id);
	if (IS_ERR(cd)) {
		spin_lock(&dca_idr_lock);
		idr_remove(&dca_idr, dca->id);
		spin_unlock(&dca_idr_lock);
		return PTR_ERR(cd);
	}
	dca->cd = cd;
	return 0;
}
Пример #5
0
/*
 * Get a connection ID and epoch for a client connection from the global pool.
 * The connection struct pointer is then recorded in the idr radix tree.  The
 * epoch is changed if this wraps.
 *
 * TODO: The IDR tree gets very expensive on memory if the connection IDs are
 * widely scattered throughout the number space, so we shall need to retire
 * connections that have, say, an ID more than four times the maximum number of
 * client conns away from the current allocation point to try and keep the IDs
 * concentrated.  We will also need to retire connections from an old epoch.
 */
int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, gfp_t gfp)
{
	u32 epoch;
	int id;

	_enter("");

	idr_preload(gfp);
	spin_lock(&rxrpc_conn_id_lock);

	epoch = rxrpc_epoch;

	/* We could use idr_alloc_cyclic() here, but we really need to know
	 * when the thing wraps so that we can advance the epoch.
	 */
	if (rxrpc_client_conn_ids.cur == 0)
		rxrpc_client_conn_ids.cur = 1;
	id = idr_alloc(&rxrpc_client_conn_ids, conn,
		       rxrpc_client_conn_ids.cur, 0x40000000, GFP_NOWAIT);
	if (id < 0) {
		if (id != -ENOSPC)
			goto error;
		id = idr_alloc(&rxrpc_client_conn_ids, conn,
			       1, 0x40000000, GFP_NOWAIT);
		if (id < 0)
			goto error;
		epoch++;
		rxrpc_epoch = epoch;
	}
	rxrpc_client_conn_ids.cur = id + 1;

	spin_unlock(&rxrpc_conn_id_lock);
	idr_preload_end();

	conn->proto.epoch = epoch;
	conn->proto.cid = id << RXRPC_CIDSHIFT;
	set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
	_leave(" [CID %x:%x]", epoch, conn->proto.cid);
	return 0;

error:
	spin_unlock(&rxrpc_conn_id_lock);
	idr_preload_end();
	_leave(" = %d", id);
	return id;
}
Пример #6
0
/**
 * Create a global name for an object, returning the name.
 *
 * Note that the name does not hold a reference; when the object
 * is freed, the name goes away.
 */
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
		    struct drm_file *file_priv)
{
	struct drm_gem_flink *args = data;
	struct drm_gem_object *obj;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL)
		return -ENOENT;

	idr_preload(GFP_KERNEL);
	spin_lock(&dev->object_name_lock);
	if (!obj->name) {
		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
		obj->name = ret;
		args->name = (uint64_t) obj->name;
		spin_unlock(&dev->object_name_lock);
		idr_preload_end();

		if (ret < 0)
			goto err;
		ret = 0;

		/* Allocate a reference for the name table.  */
		drm_gem_object_reference(obj);
	} else {
		args->name = (uint64_t) obj->name;
		spin_unlock(&dev->object_name_lock);
		idr_preload_end();
		ret = 0;
	}

err:
	drm_gem_object_unreference_unlocked(obj);
	return ret;
}
Пример #7
0
void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
				uint32_t *resid)
{
	int handle;

	idr_preload(GFP_KERNEL);
	spin_lock(&vgdev->resource_idr_lock);
	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
	spin_unlock(&vgdev->resource_idr_lock);
	idr_preload_end();
	*resid = handle;
}
Пример #8
0
int
arch_phys_wc_add(unsigned long base, unsigned long size)
{
#if defined(MTRR)
	struct mtrr *mtrr;
	int n = 1;
	int id;
	int ret;

	mtrr = kmem_alloc(sizeof(*mtrr), KM_SLEEP);
	mtrr->base = base;
	mtrr->len = size;
	mtrr->type = MTRR_TYPE_WC;
	mtrr->flags = MTRR_VALID;

	/* XXX errno NetBSD->Linux */
	ret = -mtrr_set(mtrr, &n, NULL, MTRR_GETSET_KERNEL);
	if (ret) {
		KASSERT(n == 0);
		goto fail0;
	}
	KASSERT(n == 1);

	idr_preload(GFP_KERNEL);
	mutex_spin_enter(&linux_writecomb.lock);
	id = idr_alloc(&linux_writecomb.idr, mtrr, 0, 0, GFP_NOWAIT);
	mutex_spin_exit(&linux_writecomb.lock);
	idr_preload_end();
	if (id < 0)
		goto fail1;

	return id;

fail1:	KASSERT(id < 0);
	mtrr->type = 0;
	mtrr->flags = 0;
	/* XXX errno NetBSD->Linux */
	ret = -mtrr_set(mtrr, &n, NULL, MTRR_GETSET_KERNEL);
	KASSERT(ret == 0);
	KASSERT(n == 1);
	ret = id;
fail0:	KASSERT(ret < 0);
	kmem_free(mtrr, sizeof(*mtrr));
	return ret;
#else
	return -1;
#endif
}
Пример #9
0
int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp)
{
	int ret, min, max;

	min = max_t(int, start, PASID_MIN);
	max = min_t(int, end, intel_pasid_max_id);

	WARN_ON(in_interrupt());
	idr_preload(gfp);
	spin_lock(&pasid_lock);
	ret = idr_alloc(&pasid_idr, ptr, min, max, GFP_ATOMIC);
	spin_unlock(&pasid_lock);
	idr_preload_end();

	return ret;
}
Пример #10
0
/*
 * Get a unique NFSv4.0 callback identifier which will be used
 * by the V4.0 callback service to lookup the nfs_client struct
 */
static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
{
	int ret = 0;
	struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);

	if (clp->rpc_ops->version != 4 || minorversion != 0)
		return ret;
	idr_preload(GFP_KERNEL);
	spin_lock(&nn->nfs_client_lock);
	ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT);
	if (ret >= 0)
		clp->cl_cb_ident = ret;
	spin_unlock(&nn->nfs_client_lock);
	idr_preload_end();
	return ret < 0 ? ret : 0;
}
Пример #11
0
static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
{
	struct drm_minor *minor;
	unsigned long flags;
	int r;

	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
	if (!minor)
		return -ENOMEM;

	minor->type = type;
	minor->dev = dev;

	idr_preload(GFP_KERNEL);
	spin_lock_irqsave(&drm_minor_lock, flags);
	r = idr_alloc(&drm_minors_idr,
		      NULL,
		      64 * type,
		      64 * (type + 1),
		      GFP_NOWAIT);
	spin_unlock_irqrestore(&drm_minor_lock, flags);
	idr_preload_end();

	if (r < 0)
		goto err_free;

	minor->index = r;

	minor->kdev = drm_sysfs_minor_alloc(minor);
	if (IS_ERR(minor->kdev)) {
		r = PTR_ERR(minor->kdev);
		goto err_index;
	}

	*drm_minor_get_slot(dev, type) = minor;
	return 0;

err_index:
	spin_lock_irqsave(&drm_minor_lock, flags);
	idr_remove(&drm_minors_idr, minor->index);
	spin_unlock_irqrestore(&drm_minor_lock, flags);
err_free:
	kfree(minor);
	return r;
}
Пример #12
0
int tegra_uapi_open_channel(struct drm_device *drm, void *data,
			    struct drm_file *file)
{
	struct drm_tegra_open_channel *args = data;
	struct tegra_drm_file *fpriv = file->driver_priv;
	struct tegra_drm *tegra = drm->dev_private;
	struct tegra_drm_context_v1 *context;
	struct drm_syncobj *syncobj;
	int err;

	context = kzalloc(sizeof(*context), GFP_KERNEL);
	if (!context)
		return -ENOMEM;

	err = drm_syncobj_create(&syncobj, 0, NULL);
	if (err)
		goto err_free_context;

	kref_init(&context->refcount);
	context->host1x_class = args->client;
	context->syncobj = syncobj;

	idr_preload(GFP_KERNEL);
	spin_lock(&tegra->context_lock);

	err = idr_alloc(&fpriv->uapi_v1_contexts, context, 1, 0, GFP_NOWAIT);

	spin_unlock(&tegra->context_lock);
	idr_preload_end();

	if (err < 0)
		goto err_put_syncobj;

	args->context = err;

	return 0;

err_put_syncobj:
	drm_syncobj_put(syncobj);

err_free_context:
	kfree(context);

	return err;
}
Пример #13
0
/**
 * drm_gem_handle_create_tail - internal functions to create a handle
 * @file_priv: drm file-private structure to register the handle for
 * @obj: object to register
 * @handlep: pointer to return the created handle to the caller
 * 
 * This expects the dev->object_name_lock to be held already and will drop it
 * before returning. Used to avoid races in establishing new handles when
 * importing an object from either an flink name or a dma-buf.
 */
int
drm_gem_handle_create_tail(struct drm_file *file_priv,
			   struct drm_gem_object *obj,
			   u32 *handlep)
{
	struct drm_device *dev = obj->dev;
	int ret;

	WARN_ON(!mutex_is_locked(&dev->object_name_lock));

	/*
	 * Get the user-visible handle using idr.  Preload and perform
	 * allocation under our spinlock.
	 */
	idr_preload(GFP_KERNEL);
	spin_lock(&file_priv->table_lock);

	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
	drm_gem_object_reference(obj);
	obj->handle_count++;
	spin_unlock(&file_priv->table_lock);
	idr_preload_end();
	mutex_unlock(&dev->object_name_lock);
	if (ret < 0) {
		drm_gem_object_handle_unreference_unlocked(obj);
		return ret;
	}
	*handlep = ret;

	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
	if (ret) {
		drm_gem_handle_delete(file_priv, *handlep);
		return ret;
	}

	if (dev->driver->gem_open_object) {
		ret = dev->driver->gem_open_object(obj, file_priv);
		if (ret) {
			drm_gem_handle_delete(file_priv, *handlep);
			return ret;
		}
	}

	return 0;
}
Пример #14
0
/**
 * Create a global name for an object, returning the name.
 *
 * Note that the name does not hold a reference; when the object
 * is freed, the name goes away.
 */
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
		    struct drm_file *file_priv)
{
	struct drm_gem_flink *args = data;
	struct drm_gem_object *obj;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL)
		return -ENOENT;

	mutex_lock(&dev->object_name_lock);
	idr_preload(GFP_KERNEL);
	/* prevent races with concurrent gem_close. */
	if (obj->handle_count == 0) {
		ret = -ENOENT;
		goto err;
	}

	if (!obj->name) {
		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
		if (ret < 0)
			goto err;

		obj->name = ret;

		/* Allocate a reference for the name table.  */
		drm_gem_object_reference(obj);
	}

	args->name = (uint64_t) obj->name;
	ret = 0;

err:
	idr_preload_end();
	mutex_unlock(&dev->object_name_lock);
	drm_gem_object_unreference_unlocked(obj);
	return ret;
}
Пример #15
0
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
                struct sigevent __user *, timer_event_spec,
                timer_t __user *, created_timer_id)
{
    struct k_clock *kc = clockid_to_kclock(which_clock);
    struct k_itimer *new_timer;
    int error, new_timer_id;
    sigevent_t event;
    int it_id_set = IT_ID_NOT_SET;

    if (!kc)
        return -EINVAL;
    if (!kc->timer_create)
        return -EOPNOTSUPP;

    new_timer = alloc_posix_timer();
    if (unlikely(!new_timer))
        return -EAGAIN;

    spin_lock_init(&new_timer->it_lock);

    idr_preload(GFP_KERNEL);
    spin_lock_irq(&idr_lock);
    error = idr_alloc(&posix_timers_id, new_timer, 0, 0, GFP_NOWAIT);
    spin_unlock_irq(&idr_lock);
    idr_preload_end();
    if (error < 0) {
        /*
         * Weird looking, but we return EAGAIN if the IDR is
         * full (proper POSIX return value for this)
         */
        if (error == -ENOSPC)
            error = -EAGAIN;
        goto out;
    }
    new_timer_id = error;

    it_id_set = IT_ID_SET;
    new_timer->it_id = (timer_t) new_timer_id;
    new_timer->it_clock = which_clock;
    new_timer->it_overrun = -1;

    if (timer_event_spec) {
        if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
            error = -EFAULT;
            goto out;
        }
        rcu_read_lock();
        new_timer->it_pid = get_pid(good_sigevent(&event));
        rcu_read_unlock();
        if (!new_timer->it_pid) {
            error = -EINVAL;
            goto out;
        }
    } else {
        memset(&event.sigev_value, 0, sizeof(event.sigev_value));
        event.sigev_notify = SIGEV_SIGNAL;
        event.sigev_signo = SIGALRM;
        event.sigev_value.sival_int = new_timer->it_id;
        new_timer->it_pid = get_pid(task_tgid(current));
    }

    new_timer->it_sigev_notify     = event.sigev_notify;
    new_timer->sigq->info.si_signo = event.sigev_signo;
    new_timer->sigq->info.si_value = event.sigev_value;
    new_timer->sigq->info.si_tid   = new_timer->it_id;
    new_timer->sigq->info.si_code  = SI_TIMER;

    if (copy_to_user(created_timer_id,
                     &new_timer_id, sizeof (new_timer_id))) {
        error = -EFAULT;
        goto out;
    }

    error = kc->timer_create(new_timer);
    if (error)
        goto out;

    spin_lock_irq(&current->sighand->siglock);
    new_timer->it_signal = current->signal;
    list_add(&new_timer->list, &current->signal->posix_timers);
    spin_unlock_irq(&current->sighand->siglock);

    return 0;
    /*
     * In the case of the timer belonging to another task, after
     * the task is unlocked, the timer is owned by the other task
     * and may cease to exist at any time.  Don't use or modify
     * new_timer after the unlock call.
     */
out:
    release_posix_timer(new_timer, it_id_set);
    return error;
}
Пример #16
0
/*
 * Initialises a CXL context.
 */
int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
{
    int i;

    spin_lock_init(&ctx->sste_lock);
    ctx->afu = afu;
    ctx->master = master;
    ctx->pid = NULL; /* Set in start work ioctl */

    /*
     * Allocate the segment table before we put it in the IDR so that we
     * can always access it when dereferenced from IDR. For the same
     * reason, the segment table is only destroyed after the context is
     * removed from the IDR.  Access to this in the IOCTL is protected by
     * Linux filesytem symantics (can't IOCTL until open is complete).
     */
    i = cxl_alloc_sst(ctx);
    if (i)
        return i;

    INIT_WORK(&ctx->fault_work, cxl_handle_fault);

    init_waitqueue_head(&ctx->wq);
    spin_lock_init(&ctx->lock);

    ctx->irq_bitmap = NULL;
    ctx->pending_irq = false;
    ctx->pending_fault = false;
    ctx->pending_afu_err = false;

    /*
     * When we have to destroy all contexts in cxl_context_detach_all() we
     * end up with afu_release_irqs() called from inside a
     * idr_for_each_entry(). Hence we need to make sure that anything
     * dereferenced from this IDR is ok before we allocate the IDR here.
     * This clears out the IRQ ranges to ensure this.
     */
    for (i = 0; i < CXL_IRQ_RANGES; i++)
        ctx->irqs.range[i] = 0;

    mutex_init(&ctx->status_mutex);

    ctx->status = OPENED;

    /*
     * Allocating IDR! We better make sure everything's setup that
     * dereferences from it.
     */
    idr_preload(GFP_KERNEL);
    spin_lock(&afu->contexts_lock);
    i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
                  ctx->afu->num_procs, GFP_NOWAIT);
    spin_unlock(&afu->contexts_lock);
    idr_preload_end();
    if (i < 0)
        return i;

    ctx->pe = i;
    ctx->elem = &ctx->afu->spa[i];
    ctx->pe_inserted = false;
    return 0;
}
Пример #17
0
struct drm_i915_gem_object *
kos_gem_fb_object_create(struct drm_device *dev,
                           u32 gtt_offset,
                           u32 size)
{
    struct drm_i915_private *dev_priv = dev->dev_private;
    struct i915_address_space *ggtt = &dev_priv->gtt.base;
    struct drm_i915_gem_object *obj;
    struct drm_mm_node *fb_node;
    struct i915_vma *vma;
    int ret;

    DRM_DEBUG_KMS("creating preallocated framebuffer object: gtt_offset=%x, size=%x\n",
                  gtt_offset, size);

    /* KISS and expect everything to be page-aligned */
    BUG_ON(size & 4095);

    if (WARN_ON(size == 0))
        return NULL;

    fb_node = kzalloc(sizeof(*fb_node), GFP_KERNEL);
    if (!fb_node)
        return NULL;

    fb_node->start = gtt_offset;
    fb_node->size = size;

    obj = _kos_fb_object_create(dev, fb_node);
    if (obj == NULL) {
        DRM_DEBUG_KMS("failed to preallocate framebuffer object\n");
        kfree(fb_node);
        return NULL;
    }

    vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
    if (IS_ERR(vma)) {
        ret = PTR_ERR(vma);
        goto err_out;
    }

    /* To simplify the initialisation sequence between KMS and GTT,
     * we allow construction of the stolen object prior to
     * setting up the GTT space. The actual reservation will occur
     * later.
     */
    vma->node.start = gtt_offset;
    vma->node.size = size;
    if (drm_mm_initialized(&ggtt->mm)) {
        ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
        if (ret) {
            DRM_DEBUG_KMS("failed to allocate framebuffer GTT space\n");
            goto err_vma;
        }
    }

//    obj->has_global_gtt_mapping = 1;

    list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
    list_add_tail(&vma->mm_list, &ggtt->inactive_list);

    mutex_lock(&dev->object_name_lock);
    idr_preload(GFP_KERNEL);

    if (!obj->base.name) {
        ret = idr_alloc(&dev->object_name_idr, &obj->base, 1, 0, GFP_NOWAIT);
        if (ret < 0)
            goto err_gem;

        obj->base.name = ret;

        /* Allocate a reference for the name table.  */
        drm_gem_object_reference(&obj->base);

        DRM_DEBUG_KMS("%s allocate fb name %d\n", __FUNCTION__, obj->base.name );
    }

    idr_preload_end();
    mutex_unlock(&dev->object_name_lock);
    drm_gem_object_unreference(&obj->base);
    return obj;

err_gem:
    idr_preload_end();
    mutex_unlock(&dev->object_name_lock);
err_vma:
    i915_gem_vma_destroy(vma);
err_out:
    kfree(fb_node);
    drm_gem_object_unreference(&obj->base);
    return NULL;
}