static s32 create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc)
{
	int id, ret;

	while (true) {
		if (idr_pre_get(&hwfile->idr, GFP_KERNEL) == 0)
			return -ENOMEM;

		ret = idr_get_new_above(&hwfile->idr, alloc, 1, &id);
		if (ret == 0)
			break;
		else if (ret != -EAGAIN)
			return -ENOMEM;
	}

	/*
	 * IDR always returns the lowest free id so there is no wrapping issue
	 * because of this.
	 */
	if (id >= (s32)1 << (31 - PAGE_SHIFT)) {
		dev_err(hwmem_device.this_device, "Out of IDs!\n");
		idr_remove(&hwfile->idr, id);
		return -ENOMSG;
	}

	return (s32)id << PAGE_SHIFT;
}
Esempio n. 2
0
/**
 * spmi_add_controller: Controller bring-up.
 * @ctrl: controller to be registered.
 * A controller is registered with the framework using this API. ctrl->nr is the
 * desired number with which SPMI framework registers the controller.
 * Function will return -EBUSY if the number is in use.
 */
int spmi_add_controller(struct spmi_controller *ctrl)
{
	int	id;
	int	status;

	if (!ctrl)
		return -EINVAL;

	pr_debug("adding controller for bus %d (0x%p)\n", ctrl->nr, ctrl);

	if (ctrl->nr & ~MAX_ID_MASK) {
		pr_err("invalid bus identifier %d\n", ctrl->nr);
		return -EINVAL;
	}

retry:
	if (idr_pre_get(&ctrl_idr, GFP_KERNEL) == 0) {
		pr_err("no free memory for idr\n");
		return -ENOMEM;
	}

	mutex_lock(&board_lock);
	status = idr_get_new_above(&ctrl_idr, ctrl, ctrl->nr, &id);
	if (status == 0 && id != ctrl->nr) {
		status = -EBUSY;
		idr_remove(&ctrl_idr, id);
	}
	mutex_unlock(&board_lock);
	if (status == -EAGAIN)
		goto retry;

	if (status == 0)
		status = spmi_register_controller(ctrl);
	return status;
}
static int drm_minor_get_id(struct drm_device *dev, int type)
{
	int new_id;
	int ret;
	int base = 0, limit = 63;

again:
	if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
		DRM_ERROR("Out of memory expanding drawable idr\n");
		return -ENOMEM;
	}
	mutex_lock(&dev->struct_mutex);
	ret = idr_get_new_above(&drm_minors_idr, NULL,
				base, &new_id);
	mutex_unlock(&dev->struct_mutex);
	if (ret == -EAGAIN) {
		goto again;
	} else if (ret) {
		return ret;
	}

	if (new_id >= limit) {
		idr_remove(&drm_minors_idr, new_id);
		return -EINVAL;
	}
	return new_id;
}
Esempio n. 4
0
/**
 * Allocate drawable ID and memory to store information about it.
 */
int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
    unsigned long irqflags;
    struct drm_draw *draw = data;
    int new_id = 0;
    int ret;

again:
    if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
        DRM_ERROR("Out of memory expanding drawable idr\n");
        return -ENOMEM;
    }

    spin_lock_irqsave(&dev->drw_lock, irqflags);
    ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
    if (ret == -EAGAIN) {
        spin_unlock_irqrestore(&dev->drw_lock, irqflags);
        goto again;
    }

    spin_unlock_irqrestore(&dev->drw_lock, irqflags);

    draw->handle = new_id;

    DRM_DEBUG("%d\n", draw->handle);

    return 0;
}
Esempio n. 5
0
/*
 * See if the device with a specific minor # is free.
 */
static int specific_minor(struct mapped_device *md, int minor)
{
	int r, m;

	if (minor >= (1 << MINORBITS))
		return -EINVAL;

	r = idr_pre_get(&_minor_idr, GFP_KERNEL);
	if (!r)
		return -ENOMEM;

	spin_lock(&_minor_lock);

	if (idr_find(&_minor_idr, minor)) {
		r = -EBUSY;
		goto out;
	}

	r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
	if (r)
		goto out;

	if (m != minor) {
		idr_remove(&_minor_idr, m);
		r = -EBUSY;
		goto out;
	}

out:
	spin_unlock(&_minor_lock);
	return r;
}
Esempio n. 6
0
static inline int siw_add_obj(spinlock_t *lock, struct idr *idr,
			      struct siw_objhdr *obj)
{
	u32		pre_id, id;
	unsigned long	flags;
	int		rv;

	get_random_bytes(&pre_id, sizeof pre_id);
	pre_id &= 0xffffff;
again:
	do {
		if (!(idr_pre_get(idr, GFP_KERNEL)))
			return -ENOMEM;

		spin_lock_irqsave(lock, flags);
		rv = idr_get_new_above(idr, obj, pre_id, &id);
		spin_unlock_irqrestore(lock, flags);

	} while  (rv == -EAGAIN);

	if (rv == 0) {
		siw_objhdr_init(obj);
		obj->id = id;
		dprint(DBG_OBJ, "(OBJ%d): IDR New Object\n", id);
	} else if (rv == -ENOSPC && pre_id != 1) {
		pre_id = 1;
		goto again;
	} else {
		dprint(DBG_OBJ|DBG_ON, "(OBJ??): IDR New Object failed!\n");
	}
	return rv;
}
Esempio n. 7
0
File: drm_gem.c Progetto: E-LLP/n900
/**
 * Create a handle for this object. This adds a handle reference
 * to the object, which includes a regular reference count. Callers
 * will likely want to dereference the object afterwards.
 */
int
drm_gem_handle_create(struct drm_file *file_priv,
		       struct drm_gem_object *obj,
		       int *handlep)
{
	int	ret;

	/*
	 * Get the user-visible handle using idr.
	 */
again:
	/* ensure there is space available to allocate a handle */
	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
		return -ENOMEM;

	/* do the allocation under our spinlock */
	spin_lock(&file_priv->table_lock);
	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
	spin_unlock(&file_priv->table_lock);
	if (ret == -EAGAIN)
		goto again;

	if (ret != 0)
		return ret;

	drm_gem_object_handle_reference(obj);
	return 0;
}
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	int index;
	int err;

again:
	if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
		return -ENOMEM;

	spin_lock(&mmu_context_lock);
	err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
	spin_unlock(&mmu_context_lock);

	if (err == -EAGAIN)
		goto again;
	else if (err)
		return err;

	if (index > MAX_CONTEXT) {
		spin_lock(&mmu_context_lock);
		idr_remove(&mmu_context_idr, index);
		spin_unlock(&mmu_context_lock);
		return -ENOMEM;
	}

	/* The old code would re-promote on fork, we don't do that
	 * when using slices as it could cause problem promoting slices
	 * that have been forced down to 4K
	 */
	if (slice_mm_new_context(mm))
		slice_set_user_psize(mm, mmu_virtual_psize);
	mm->context.id = index;

	return 0;
}
Esempio n. 9
0
int
drm_gem_handle_create(struct drm_file *file_priv,
		       struct drm_gem_object *obj,
		       u32 *handlep)
{
	int	ret;

	
again:
	
	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
		return -ENOMEM;

	
	spin_lock(&file_priv->table_lock);
	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
	spin_unlock(&file_priv->table_lock);
	if (ret == -EAGAIN)
		goto again;

	if (ret != 0)
		return ret;

	drm_gem_object_handle_reference(obj);
	return 0;
}
Esempio n. 10
0
/*
 * See if the device with a specific minor # is free.
 */
static int specific_minor(struct mapped_device *md, unsigned int minor)
{
	int r, m;

	if (minor >= (1 << MINORBITS))
		return -EINVAL;

	down(&_minor_lock);

	if (idr_find(&_minor_idr, minor)) {
		r = -EBUSY;
		goto out;
	}

	r = idr_pre_get(&_minor_idr, GFP_KERNEL);
	if (!r) {
		r = -ENOMEM;
		goto out;
	}

	r = idr_get_new_above(&_minor_idr, md, minor, &m);
	if (r) {
		goto out;
	}

	if (m != minor) {
		idr_remove(&_minor_idr, m);
		r = -EBUSY;
		goto out;
	}

out:
	up(&_minor_lock);
	return r;
}
Esempio n. 11
0
/* Return a server_id with a unique task_id element.  Free the
 * returned pointer to de-allocate the task_id via a talloc destructor
 * (ie, use talloc_free()) */
static struct server_id *new_server_id_task(TALLOC_CTX *mem_ctx)
{
	struct server_id *server_id;
	int task_id;
	if (!task_id_tree) {
		task_id_tree = idr_init(NULL);
		if (!task_id_tree) {
			return NULL;
		}
	}

	server_id = talloc(mem_ctx, struct server_id);

	if (!server_id) {
		return NULL;
	}
	*server_id = procid_self();

	/* 0 is the default server_id, so we need to start with 1 */
	task_id = idr_get_new_above(task_id_tree, server_id, 1, INT32_MAX);

	if (task_id == -1) {
		talloc_free(server_id);
		return NULL;
	}

	talloc_set_destructor(server_id, free_task_id);
	server_id->task_id = task_id;
	return server_id;
}
 /* for more info, see below documentation of rpmsg_create_ept() */
 static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
		struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
		void *priv, u32 addr)
{
	int err, tmpaddr, request;
	struct rpmsg_endpoint *ept;
	struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;

	if (!idr_pre_get(&vrp->endpoints, GFP_KERNEL))
		return NULL;

	ept = kzalloc(sizeof(*ept), GFP_KERNEL);
	if (!ept) {
		dev_err(dev, "failed to kzalloc a new ept\n");
		return NULL;
	}

	kref_init(&ept->refcount);
	mutex_init(&ept->cb_lock);

	kref_init(&ept->refcount);
	mutex_init(&ept->cb_lock);

	ept->rpdev = rpdev;
	ept->cb = cb;
	ept->priv = priv;

	
	request = addr == RPMSG_ADDR_ANY ? RPMSG_RESERVED_ADDRESSES : addr;

	mutex_lock(&vrp->endpoints_lock);

	
	err = idr_get_new_above(&vrp->endpoints, ept, request, &tmpaddr);
	if (err) {
		dev_err(dev, "idr_get_new_above failed: %d\n", err);
		goto free_ept;
	}

	
	if (addr != RPMSG_ADDR_ANY && tmpaddr != addr) {
		dev_err(dev, "address 0x%x already in use\n", addr);
		goto rem_idr;
	}

	ept->addr = tmpaddr;

	mutex_unlock(&vrp->endpoints_lock);

	return ept;

rem_idr:
	idr_remove(&vrp->endpoints, request);
free_ept:
	mutex_unlock(&vrp->endpoints_lock);
	kref_put(&ept->refcount, __ept_release);
	return NULL;
}
Esempio n. 13
0
NTSTATUS smb2srv_queue_pending(struct smb2srv_request *req)
{
	NTSTATUS status;
	bool signing_used = false;
	int id;
	uint16_t credits = SVAL(req->in.hdr, SMB2_HDR_CREDIT);

	if (credits == 0) {
		credits = 1;
	}

	if (req->pending_id) {
		return NT_STATUS_INTERNAL_ERROR;
	}

	if (req->smb_conn->connection->event.fde == NULL) {
		/* the socket has been destroyed - no point trying to send an error! */
		return NT_STATUS_REMOTE_DISCONNECT;
	}

	id = idr_get_new_above(req->smb_conn->requests2.idtree_req, req, 
			       1, req->smb_conn->requests2.idtree_limit);
	if (id == -1) {
		return NT_STATUS_INSUFFICIENT_RESOURCES;
	}

	DLIST_ADD_END(req->smb_conn->requests2.list, req, struct smb2srv_request *);
	req->pending_id = id;

	talloc_set_destructor(req, smb2srv_request_deny_destructor);

	status = smb2srv_setup_reply(req, 8, true, 0);
	if (!NT_STATUS_IS_OK(status)) {
		return status;
	}

	SIVAL(req->out.hdr, SMB2_HDR_STATUS, NT_STATUS_V(STATUS_PENDING));
	SSVAL(req->out.hdr, SMB2_HDR_CREDIT, credits);

	SSVAL(req->out.body, 0x02, 0);
	SIVAL(req->out.body, 0x04, 0);

	/* if the real reply will be signed set the signed flags, but don't sign */
	if (req->is_signed) {
		SIVAL(req->out.hdr, SMB2_HDR_FLAGS, IVAL(req->out.hdr, SMB2_HDR_FLAGS) | SMB2_HDR_FLAG_SIGNED);
		signing_used = req->is_signed;
		req->is_signed = false;
	}

	smb2srv_send_reply(req);

	req->is_signed = signing_used;

	talloc_set_destructor(req, smb2srv_request_destructor);
	return NT_STATUS_OK;
}
Esempio n. 14
0
uint32_t ctdb_reqid_new(struct ctdb_context *ctdb, void *state)
{
	int id = idr_get_new_above(ctdb->idr, state, ctdb->lastid+1, INT_MAX);
	if (id < 0) {
		DEBUG(DEBUG_DEBUG, ("Reqid wrap!\n"));
		id = idr_get_new(ctdb->idr, state, INT_MAX);
	}
	ctdb->lastid = id;
	return id;
}
static struct i915_hw_context *
create_hw_context(struct drm_device *dev,
		  struct drm_i915_file_private *file_priv)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_context *ctx;
	int ret, id;

	ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);

	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
	if (ctx->obj == NULL) {
		kfree(ctx);
		DRM_DEBUG_DRIVER("Context object allocated failed\n");
		return ERR_PTR(-ENOMEM);
	}

	/* The ring associated with the context object is handled by the normal
	 * object tracking code. We give an initial ring value simple to pass an
	 * assertion in the context switch code.
	 */
	ctx->ring = &dev_priv->ring[RCS];

	/* Default context will never have a file_priv */
	if (file_priv == NULL)
		return ctx;

	ctx->file_priv = file_priv;

again:
	if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
		ret = -ENOMEM;
		DRM_DEBUG_DRIVER("idr allocation failed\n");
		goto err_out;
	}

	ret = idr_get_new_above(&file_priv->context_idr, ctx,
				DEFAULT_CONTEXT_ID + 1, &id);
	if (ret == 0)
		ctx->id = id;

	if (ret == -EAGAIN)
		goto again;
	else if (ret)
		goto err_out;

	return ctx;

err_out:
	do_destroy(ctx);
	return ERR_PTR(ret);
}
Esempio n. 16
0
static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
		struct rpmsg_channel *rpdev,
		void (*cb)(struct rpmsg_channel *, void *, int, void *, u32),
		void *priv, u32 addr)
{
	int err, tmpaddr, request;
	struct rpmsg_endpoint *ept;
	struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;

	if (!idr_pre_get(&vrp->endpoints, GFP_KERNEL))
		return NULL;

	ept = kzalloc(sizeof(*ept), GFP_KERNEL);
	if (!ept) {
		dev_err(dev, "failed to kzalloc a new ept\n");
		return NULL;
	}

	ept->rpdev = rpdev;
	ept->cb = cb;
	ept->priv = priv;

	/* do we need to allocate a local address ? */
	request = addr == RPMSG_ADDR_ANY ? RPMSG_RESERVED_ADDRESSES : addr;

	spin_lock(&vrp->endpoints_lock);

	/* bind the endpoint to an rpmsg address (and allocate one if needed) */
	err = idr_get_new_above(&vrp->endpoints, ept, request, &tmpaddr);
	if (err) {
		dev_err(dev, "idr_get_new_above failed: %d\n", err);
		goto free_ept;
	}

	if (addr != RPMSG_ADDR_ANY && tmpaddr != addr) {
		dev_err(dev, "address 0x%x already in use\n", addr);
		goto rem_idr;
	}

	ept->addr = tmpaddr;

	spin_unlock(&vrp->endpoints_lock);

	return ept;

rem_idr:
	idr_remove(&vrp->endpoints, request);
free_ept:
	spin_unlock(&vrp->endpoints_lock);
	kfree(ept);
	return NULL;
}
Esempio n. 17
0
File: c2_qp.c Progetto: 274914765/C
static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
{
    int ret;

        do {
        spin_lock_irq(&c2dev->qp_table.lock);
        ret = idr_get_new_above(&c2dev->qp_table.idr, qp,
                    c2dev->qp_table.last++, &qp->qpn);
        spin_unlock_irq(&c2dev->qp_table.lock);
        } while ((ret == -EAGAIN) &&
          idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL));
    return ret;
}
Esempio n. 18
0
/**
 * Create a global name for an object, returning the name.
 *
 * Note that the name does not hold a reference; when the object
 * is freed, the name goes away.
 */
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
		    struct drm_file *file_priv)
{
	struct drm_gem_flink *args = data;
	struct drm_gem_object *obj;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL)
		return -EBADF;

again:
	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
		ret = -ENOMEM;
		goto err;
	}

	spin_lock(&dev->object_name_lock);
	if (!obj->name) {
		ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
					&obj->name);
		args->name = (uint64_t) obj->name;
		spin_unlock(&dev->object_name_lock);

		if (ret == -EAGAIN)
			goto again;

		if (ret != 0)
			goto err;

		/* Allocate a reference for the name table.  */
		drm_gem_object_reference(obj);
	} else {
		args->name = (uint64_t) obj->name;
		spin_unlock(&dev->object_name_lock);
		ret = 0;
	}

err:
	mutex_lock(&dev->struct_mutex);
	drm_gem_object_unreference(obj);
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
Esempio n. 19
0
/* get new free unit number and associate pointer with it */
static int btn_get_unit(struct idr *p, void *ptr)
{
	int unit, err;

again:
	if (!idr_pre_get(p, GFP_KERNEL)) {
		printk(KERN_ERR "BTN: No free memory for idr\n");
		return -ENOMEM;
	}

	err = idr_get_new_above(p, ptr, 0, &unit);
	if (err == -EAGAIN)
		goto again;

	return unit;
}
int mali_mem_backend_struct_create(mali_mem_backend **backend, u32 psize)
{
	mali_mem_backend *mem_backend = NULL;
	s32 ret = -ENOSPC;
	s32 index = -1;
	*backend = (mali_mem_backend *)kzalloc(sizeof(mali_mem_backend), GFP_KERNEL);
	if (NULL == *backend) {
		MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: backend descriptor was NULL\n"));
		return -1;
	}
	mem_backend = *backend;
	mem_backend->size = psize;
	mutex_init(&mem_backend->mutex);
	INIT_LIST_HEAD(&mem_backend->list);
	mem_backend->using_count = 0;


	/* link backend with id */
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
again:
	if (!idr_pre_get(&mali_backend_idr, GFP_KERNEL)) {
		kfree(mem_backend);
		return -ENOMEM;
	}
	mutex_lock(&mali_idr_mutex);
	ret = idr_get_new_above(&mali_backend_idr, mem_backend, 1, &index);
	mutex_unlock(&mali_idr_mutex);

	if (-ENOSPC == ret) {
		kfree(mem_backend);
		return -ENOSPC;
	}
	if (-EAGAIN == ret)
		goto again;
#else
	mutex_lock(&mali_idr_mutex);
	ret = idr_alloc(&mali_backend_idr, mem_backend, 1, MALI_S32_MAX, GFP_KERNEL);
	mutex_unlock(&mali_idr_mutex);
	index = ret;
	if (ret < 0) {
		MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: Can't allocate idr for backend! \n"));
		kfree(mem_backend);
		return -ENOSPC;
	}
#endif
	return index;
}
Esempio n. 21
0
File: drm_gem.c Progetto: E-LLP/n900
/**
 * Create a global name for an object, returning the name.
 *
 * Note that the name does not hold a reference; when the object
 * is freed, the name goes away.
 */
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
		    struct drm_file *file_priv)
{
	struct drm_gem_flink *args = data;
	struct drm_gem_object *obj;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL)
		return -EBADF;

again:
	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
		return -ENOMEM;

	spin_lock(&dev->object_name_lock);
	if (obj->name) {
		args->name = obj->name;
		spin_unlock(&dev->object_name_lock);
		return 0;
	}
	ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
				 &obj->name);
	spin_unlock(&dev->object_name_lock);
	if (ret == -EAGAIN)
		goto again;

	if (ret != 0) {
		mutex_lock(&dev->struct_mutex);
		drm_gem_object_unreference(obj);
		mutex_unlock(&dev->struct_mutex);
		return ret;
	}

	/*
	 * Leave the reference from the lookup around as the
	 * name table now holds one
	 */
	args->name = (uint64_t) obj->name;

	return 0;
}
Esempio n. 22
0
/*
 * Stag lookup is based on its index part only (24 bits)
 * It is assumed that the idr_get_new_above(,,1,) function will
 * always return a new id within this range (0x1...0xffffff),
 * if one is available.
 * The code avoids special Stag of zero and tries to randomize
 * STag values.
 */
int siw_mem_add(struct siw_dev *sdev, struct siw_mem *m)
{
	u32		id, pre_id;
	unsigned long	flags;
	int		rv;

	do {
		get_random_bytes(&pre_id, sizeof pre_id);
		pre_id &= 0xffff;
	} while (pre_id == 0);
again:
	do {
		if (!(idr_pre_get(&sdev->mem_idr, GFP_KERNEL)))
			return -ENOMEM;

		spin_lock_irqsave(&sdev->idr_lock, flags);
		rv = idr_get_new_above(&sdev->mem_idr, m, pre_id, &id);
		spin_unlock_irqrestore(&sdev->idr_lock, flags);

	} while (rv == -EAGAIN);

	if (rv == -ENOSPC || (rv == 0 && id > SIW_STAG_MAX)) {
		if (rv == 0) {
			spin_lock_irqsave(&sdev->idr_lock, flags);
			idr_remove(&sdev->mem_idr, id);
			spin_unlock_irqrestore(&sdev->idr_lock, flags);
		}
		if (pre_id == 1) {
			dprint(DBG_OBJ|DBG_MM|DBG_ON,
				"(IDR): New Object failed: %d\n", pre_id);
			return -ENOSPC;
		}
		pre_id = 1;
		goto again;
	} else if (rv) {
		dprint(DBG_OBJ|DBG_MM|DBG_ON,
			"(IDR%d): New Object failed: rv %d\n", id, rv);
		return rv;
	}
	siw_objhdr_init(&m->hdr);
	m->hdr.id = id;
	m->hdr.sdev = sdev;
	dprint(DBG_OBJ|DBG_MM, "(IDR%d): New Object\n", id);

	return 0;
}
Esempio n. 23
0
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	int index;
	int err;
	int new_context = (mm->context.id == 0);

again:
	if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
		return -ENOMEM;

	spin_lock(&mmu_context_lock);
	err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
	spin_unlock(&mmu_context_lock);

	if (err == -EAGAIN)
		goto again;
	else if (err)
		return err;

	if (index > MAX_CONTEXT) {
		spin_lock(&mmu_context_lock);
		idr_remove(&mmu_context_idr, index);
		spin_unlock(&mmu_context_lock);
		return -ENOMEM;
	}

	mm->context.id = index;
#ifdef CONFIG_PPC_MM_SLICES
	/* The old code would re-promote on fork, we don't do that
	 * when using slices as it could cause problem promoting slices
	 * that have been forced down to 4K
	 */
	if (new_context)
		slice_set_user_psize(mm, mmu_virtual_psize);
#else
	mm->context.user_psize = mmu_virtual_psize;
	mm->context.sllp = SLB_VSID_USER |
		mmu_psize_defs[mmu_virtual_psize].sllp;
#endif

	return 0;
}
Esempio n. 24
0
static int save_opened_pipe(struct ipc_pipe *pipe)
{
	int id;
	int status;

__retry:
    if (idr_pre_get(&ipc_pipe_idr, GFP_KERNEL) == 0)
		return -ENOMEM;

	status = idr_get_new_above(&ipc_pipe_idr, pipe, pipe->id, &id);
	if (status == 0 && id != pipe->id) {
		status = -EBUSY;
		idr_remove(&ipc_pipe_idr, id);
	}

    if (status == -EAGAIN)
        goto __retry;

	return status;
}
Esempio n. 25
0
static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
{
	struct ib_ucm_context *ctx;
	int result;

	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return NULL;

	ctx->ref  = 1; /* user reference */
	ctx->file = file;

	INIT_LIST_HEAD(&ctx->events);
	init_MUTEX(&ctx->mutex);

	list_add_tail(&ctx->file_list, &file->ctxs);

	ctx_id_rover = (ctx_id_rover + 1) & INT_MAX;
retry:
	result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
	if (!result)
		goto error;

	down(&ctx_id_mutex);
	result = idr_get_new_above(&ctx_id_table, ctx, ctx_id_rover, &ctx->id);
	up(&ctx_id_mutex);

	if (result == -EAGAIN)
		goto retry;
	if (result)
		goto error;

	ucm_dbg("Allocated CM ID <%d>\n", ctx->id);

	return ctx;
error:
	list_del(&ctx->file_list);
	kfree(ctx);

	return NULL;
}
Esempio n. 26
0
/**
 * Create a handle for this object. This adds a handle reference
 * to the object, which includes a regular reference count. Callers
 * will likely want to dereference the object afterwards.
 */
int
drm_gem_handle_create(struct drm_file *file_priv,
                      struct drm_gem_object *obj,
                      u32 *handlep)
{
    struct drm_device *dev = obj->dev;
    int ret;

    /*
     * Get the user-visible handle using idr.
     */
again:
    /* ensure there is space available to allocate a handle */
    if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
        return -ENOMEM;

    /* do the allocation under our spinlock */
    spin_lock(&file_priv->table_lock);
    ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
    spin_unlock(&file_priv->table_lock);
    if (ret == -EAGAIN)
        goto again;

    if (ret != 0)
        return ret;

    drm_gem_object_handle_reference(obj);

    if (dev->driver->gem_open_object) {
        ret = dev->driver->gem_open_object(obj, file_priv);
        if (ret) {
            drm_gem_handle_delete(file_priv, *handlep);
            return ret;
        }
    }

    return 0;
}
Esempio n. 27
0
int
drm_gem_handle_create(struct drm_file *file_priv,
		       struct drm_gem_object *obj,
		       u32 *handlep)
{
	struct drm_device *dev = obj->dev;
	int ret;

	/*
                                          
  */
again:
	/*                                                      */
	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
		return -ENOMEM;

	/*                                      */
	spin_lock(&file_priv->table_lock);
	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
	spin_unlock(&file_priv->table_lock);
	if (ret == -EAGAIN)
		goto again;

	if (ret != 0)
		return ret;

	drm_gem_object_handle_reference(obj);

	if (dev->driver->gem_open_object) {
		ret = dev->driver->gem_open_object(obj, file_priv);
		if (ret) {
			drm_gem_handle_delete(file_priv, *handlep);
			return ret;
		}
	}

	return 0;
}
Esempio n. 28
0
int
ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
{
    return (idr_get_new_above(&ida->idr, NULL, starting_id, p_id));
}
Esempio n. 29
0
int ambsync_proc_open(struct inode *inode, struct file *file)
{
	int				retval = 0;
	struct ambsync_proc_pinfo	*pinfo = file->private_data;
	struct proc_dir_entry		*dp;
	struct ambsync_proc_hinfo	*hinfo;
	int				id;

	dp = PDE(inode);
	hinfo = (struct ambsync_proc_hinfo *)dp->data;
	if (!hinfo) {
		retval = -EPERM;
		goto ambsync_proc_open_exit;
	}
	if (hinfo->maxid > AMBA_SYNC_PROC_MAX_ID) {
		retval = -EPERM;
		goto ambsync_proc_open_exit;
	}

	if (pinfo) {
		retval = -EPERM;
		goto ambsync_proc_open_exit;
	}
	pinfo = kmalloc(sizeof(*pinfo), GFP_KERNEL);
	if (!pinfo) {
		retval = -ENOMEM;
		goto ambsync_proc_open_exit;
	}
	memset(pinfo, 0, sizeof(*pinfo));

	if (idr_pre_get(&hinfo->sync_proc_idr, GFP_KERNEL) == 0) {
		retval = -ENOMEM;
		goto ambsync_proc_open_kfree_p;
	}
	mutex_lock(&hinfo->sync_proc_lock);
	retval = idr_get_new_above(&hinfo->sync_proc_idr, pinfo, 0, &id);
	mutex_unlock(&hinfo->sync_proc_lock);
	if (retval != 0)
		goto ambsync_proc_open_kfree_p;
	if (id > 31) {
		retval = -ENOMEM;
		goto ambsync_proc_open_remove_id;
	}

	if (!(pinfo->page = (char*) __get_free_page(GFP_KERNEL))) {
		retval = -ENOMEM;
		goto ambsync_proc_open_remove_id;
	}
	pinfo->id = id;
	pinfo->mask = (0x01 << id);

	file->private_data = pinfo;
	file->f_version = 0;
	file->f_mode &= ~FMODE_PWRITE;

	goto ambsync_proc_open_exit;

ambsync_proc_open_remove_id:
	mutex_lock(&hinfo->sync_proc_lock);
	idr_remove(&hinfo->sync_proc_idr, id);
	mutex_unlock(&hinfo->sync_proc_lock);

ambsync_proc_open_kfree_p:
	kfree(pinfo);

ambsync_proc_open_exit:
	return retval;
}
Esempio n. 30
0
static bool torture_local_idtree_simple(struct torture_context *tctx)
{
	struct idr_context *idr;
	int i, ret;
	int *ids;
	int *present;
	extern int torture_numops;
	int n = torture_numops;
	TALLOC_CTX *mem_ctx = tctx;

	idr = idr_init(mem_ctx);

	ids = talloc_zero_array(mem_ctx, int, n);
	present = talloc_zero_array(mem_ctx, int, n);

	for (i=0;i<n;i++) {
		ids[i] = -1;
	}

	for (i=0;i<n;i++) {
		int ii = random() % n;
		void *p = idr_find(idr, ids[ii]);
		if (present[ii]) {
			if (p != &ids[ii]) {
				torture_fail(tctx, talloc_asprintf(tctx, 
						"wrong ptr at %d - %p should be %p", 
				       ii, p, &ids[ii]));
			}
			if (random() % 7 == 0) {
				if (idr_remove(idr, ids[ii]) != 0) {
					torture_fail(tctx, talloc_asprintf(tctx,
						"remove failed at %d (id=%d)", 
					       i, ids[ii]));
				}
				present[ii] = 0;
				ids[ii] = -1;
			}
		} else {
			if (p != NULL) {
				torture_fail(tctx, 
					     talloc_asprintf(tctx,
							     "non-present at %d gave %p (would be %d)", 
							     ii, p, 
							     (int)((((char *)p) - (char *)(&ids[0])) / sizeof(int))));
			}
			if (random() % 5) {
				ids[ii] = idr_get_new(idr, &ids[ii], n);
				if (ids[ii] < 0) {
					torture_fail(tctx, talloc_asprintf(tctx,
						"alloc failure at %d (ret=%d)", 
					       ii, ids[ii]));
				} else {
					present[ii] = 1;
				}
			}
		}
	}

	torture_comment(tctx, "done %d random ops\n", i);

	for (i=0;i<n;i++) {
		if (present[i]) {
			if (idr_remove(idr, ids[i]) != 0) {
				torture_fail(tctx, talloc_asprintf(tctx,
						"delete failed on cleanup at %d (id=%d)", 
				       i, ids[i]));
			}
		}
	}

	/* now test some limits */
	for (i=0;i<25000;i++) {
		ret = idr_get_new_above(idr, &ids[0], random() % 25000, 0x10000-3);
		torture_assert(tctx, ret != -1, "idr_get_new_above failed");
	}

	ret = idr_get_new_above(idr, &ids[0], 0x10000-2, 0x10000);
	torture_assert_int_equal(tctx, ret, 0x10000-2, "idr_get_new_above failed");
	ret = idr_get_new_above(idr, &ids[0], 0x10000-1, 0x10000);
	torture_assert_int_equal(tctx, ret, 0x10000-1, "idr_get_new_above failed");
	ret = idr_get_new_above(idr, &ids[0], 0x10000, 0x10000);
	torture_assert_int_equal(tctx, ret, 0x10000, "idr_get_new_above failed");
	ret = idr_get_new_above(idr, &ids[0], 0x10000+1, 0x10000);
	torture_assert_int_equal(tctx, ret, -1, "idr_get_new_above succeeded above limit");
	ret = idr_get_new_above(idr, &ids[0], 0x10000+2, 0x10000);
	torture_assert_int_equal(tctx, ret, -1, "idr_get_new_above succeeded above limit");

	torture_comment(tctx, "cleaned up\n");
	return true;
}