Пример #1
0
static int add_client_resource(struct client *client,
			       struct client_resource *resource, gfp_t gfp_mask)
{
	unsigned long flags;
	int ret;

 retry:
	if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
		return -ENOMEM;

	spin_lock_irqsave(&client->lock, flags);
	if (client->in_shutdown)
		ret = -ECANCELED;
	else
		ret = idr_get_new(&client->resource_idr, resource,
				  &resource->handle);
	if (ret >= 0) {
		client_get(client);
		if (resource->release == release_iso_resource)
			schedule_iso_resource(container_of(resource,
						struct iso_resource, resource));
	}
	spin_unlock_irqrestore(&client->lock, flags);

	if (ret == -EAGAIN)
		goto retry;

	return ret < 0 ? ret : 0;
}
Пример #2
0
struct kgsl_context *
kgsl_create_context(struct kgsl_device_private *dev_priv)
{
	struct kgsl_context *context;
	int ret, id;

	context = kzalloc(sizeof(*context), GFP_KERNEL);

	if (context == NULL)
		return NULL;

	while (1) {
		if (idr_pre_get(&dev_priv->device->context_idr,
				GFP_KERNEL) == 0) {
			kfree(context);
			return NULL;
		}

		ret = idr_get_new(&dev_priv->device->context_idr,
				  context, &id);

		if (ret != -EAGAIN)
			break;
	}

	if (ret) {
		kfree(context);
		return NULL;
	}

	context->id = id;
	context->dev_priv = dev_priv;

	return context;
}
int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev)
{
	struct device *cd;
	int err = 0;

idr_try_again:
	if (!idr_pre_get(&dca_idr, GFP_KERNEL))
		return -ENOMEM;
	spin_lock(&dca_idr_lock);
	err = idr_get_new(&dca_idr, dca, &dca->id);
	spin_unlock(&dca_idr_lock);
	switch (err) {
	case 0:
		break;
	case -EAGAIN:
		goto idr_try_again;
	default:
		return err;
	}

	cd = device_create(dca_class, dev, MKDEV(0, 0), NULL, "dca%d", dca->id);
	if (IS_ERR(cd)) {
		spin_lock(&dca_idr_lock);
		idr_remove(&dca_idr, dca->id);
		spin_unlock(&dca_idr_lock);
		return PTR_ERR(cd);
	}
	dca->cd = cd;
	return 0;
}
Пример #4
0
/**
 * hwmon_device_register - register w/ hwmon
 * @dev: the device to register
 *
 * hwmon_device_unregister() must be called when the device is no
 * longer needed.
 *
 * Returns the pointer to the new device.
 */
struct device *hwmon_device_register(struct device *dev)
{
	struct device *hwdev;
	int id, err;

again:
	if (unlikely(idr_pre_get(&hwmon_idr, GFP_KERNEL) == 0))
		return ERR_PTR(-ENOMEM);

	spin_lock(&idr_lock);
	err = idr_get_new(&hwmon_idr, NULL, &id);
	spin_unlock(&idr_lock);

	if (unlikely(err == -EAGAIN))
		goto again;
	else if (unlikely(err))
		return ERR_PTR(err);

	id = id & MAX_ID_MASK;
	hwdev = device_create(hwmon_class, dev, MKDEV(0, 0), NULL,
			      HWMON_ID_FORMAT, id);

	if (IS_ERR(hwdev)) {
		spin_lock(&idr_lock);
		idr_remove(&hwmon_idr, id);
		spin_unlock(&idr_lock);
	}

	return hwdev;
}
Пример #5
0
static int next_free_minor(struct mapped_device *md, int *minor)
{
	int r, m;

	r = idr_pre_get(&_minor_idr, GFP_KERNEL);
	if (!r)
		return -ENOMEM;

	spin_lock(&_minor_lock);

	r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
	if (r) {
		goto out;
	}

	if (m >= (1 << MINORBITS)) {
		idr_remove(&_minor_idr, m);
		r = -ENOSPC;
		goto out;
	}

	*minor = m;

out:
	spin_unlock(&_minor_lock);
	return r;
}
Пример #6
0
static int new_bat_id(void)
{
	int ret;

	while (1) {
		int id;

		ret = idr_pre_get(&bat_idr, GFP_KERNEL);
		if (ret == 0)
			return -ENOMEM;

		mutex_lock(&bat_idr_lock);
		ret = idr_get_new(&bat_idr, NULL, &id);
		mutex_unlock(&bat_idr_lock);

		if (ret == 0) {
			ret = id & MAX_ID_MASK;
			break;
		} else if (ret == -EAGAIN) {
			continue;
		} else {
			break;
		}
	}

	return ret;
}
Пример #7
0
static int next_free_minor(struct mapped_device *md, unsigned int *minor)
{
	int r;
	unsigned int m;

	down(&_minor_lock);

	r = idr_pre_get(&_minor_idr, GFP_KERNEL);
	if (!r) {
		r = -ENOMEM;
		goto out;
	}

	r = idr_get_new(&_minor_idr, md, &m);
	if (r) {
		goto out;
	}

	if (m >= (1 << MINORBITS)) {
		idr_remove(&_minor_idr, m);
		r = -ENOSPC;
		goto out;
	}

	*minor = m;

out:
	up(&_minor_lock);
	return r;
}
Пример #8
0
static int uio_get_minor(struct uio_device *idev)
{
	int retval = -ENOMEM;
	int id;

	mutex_lock(&minor_lock);
	if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0)
		goto exit;

	retval = idr_get_new(&uio_idr, idev, &id);
	if (retval < 0) {
		if (retval == -EAGAIN)
			retval = -ENOMEM;
		goto exit;
	}
	if (id < UIO_MAX_DEVICES) {
		idev->minor = id;
	} else {
		dev_err(idev->dev, "too many uio devices\n");
		retval = -EINVAL;
		idr_remove(&uio_idr, id);
	}
exit:
	mutex_unlock(&minor_lock);
	return retval;
}
Пример #9
0
int v9fs_get_idpool(struct v9fs_idpool *p)
{
	int i = 0;
	int error;

retry:
	if (idr_pre_get(&p->pool, GFP_KERNEL) == 0)
		return 0;

	if (down_interruptible(&p->lock) == -EINTR) {
		eprintk(KERN_WARNING, "Interrupted while locking\n");
		return -1;
	}

	/* no need to store exactly p, we just need something non-null */
	error = idr_get_new(&p->pool, p, &i);
	up(&p->lock);

	if (error == -EAGAIN)
		goto retry;
	else if (error)
		return -1;

	return i;
}
Пример #10
0
/**
 * blk_alloc_devt - allocate a dev_t for a partition
 * @part: partition to allocate dev_t for
 * @devt: out parameter for resulting dev_t
 *
 * Allocate a dev_t for block device.
 *
 * RETURNS:
 * 0 on success, allocated dev_t is returned in *@devt.  -errno on
 * failure.
 *
 * CONTEXT:
 * Might sleep.
 */
int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
{
	struct gendisk *disk = part_to_disk(part);
	int idx, rc;

	/* in consecutive minor range? */
	if (part->partno < disk->minors) {
		*devt = MKDEV(disk->major, disk->first_minor + part->partno);
		return 0;
	}

	/* allocate ext devt */
	do {
		if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
			return -ENOMEM;
		rc = idr_get_new(&ext_devt_idr, part, &idx);
	} while (rc == -EAGAIN);

	if (rc)
		return rc;

	if (idx > MAX_EXT_DEVT) {
		idr_remove(&ext_devt_idr, idx);
		return -EBUSY;
	}

	*devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
	return 0;
}
Пример #11
0
int pps_register_cdev(struct pps_device *pps)
{
	int err;
	dev_t devt;

	mutex_lock(&pps_idr_lock);
	
	if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
		mutex_unlock(&pps_idr_lock);
		return -ENOMEM;
	}

	err = idr_get_new(&pps_idr, pps, &pps->id);
	mutex_unlock(&pps_idr_lock);

	if (err < 0)
		return err;

	pps->id &= MAX_ID_MASK;
	if (pps->id >= PPS_MAX_SOURCES) {
		pr_err("%s: too many PPS sources in the system\n",
					pps->info.name);
		err = -EBUSY;
		goto free_idr;
	}

	devt = MKDEV(MAJOR(pps_devt), pps->id);

	cdev_init(&pps->cdev, &pps_cdev_fops);
	pps->cdev.owner = pps->info.owner;

	err = cdev_add(&pps->cdev, devt, 1);
	if (err) {
		pr_err("%s: failed to add char device %d:%d\n",
				pps->info.name, MAJOR(pps_devt), pps->id);
		goto free_idr;
	}
	pps->dev = device_create(pps_class, pps->info.dev, devt, pps,
							"pps%d", pps->id);
	if (IS_ERR(pps->dev))
		goto del_cdev;

	pps->dev->release = pps_device_destruct;

	pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
			MAJOR(pps_devt), pps->id);

	return 0;

del_cdev:
	cdev_del(&pps->cdev);

free_idr:
	mutex_lock(&pps_idr_lock);
	idr_remove(&pps_idr, pps->id);
	mutex_unlock(&pps_idr_lock);

	return err;
}
Пример #12
0
static int bq27x00_battery_probe(struct i2c_client *client,
				 const struct i2c_device_id *id)
{
	char *name;
	struct bq27x00_device_info *di;
	int num;
	int retval = 0;

	/* Get new ID for the new battery device */
	retval = idr_pre_get(&battery_id, GFP_KERNEL);
	if (retval == 0)
		return -ENOMEM;
	mutex_lock(&battery_mutex);
	retval = idr_get_new(&battery_id, client, &num);
	mutex_unlock(&battery_mutex);
	if (retval < 0)
		return retval;

	name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
	if (!name) {
		dev_err(&client->dev, "failed to allocate device name\n");
		retval = -ENOMEM;
		goto batt_failed_1;
	}

	di = kzalloc(sizeof(*di), GFP_KERNEL);
	if (!di) {
		dev_err(&client->dev, "failed to allocate device info data\n");
		retval = -ENOMEM;
		goto batt_failed_2;
	}

	di->id = num;
	di->dev = &client->dev;
	di->chip = id->driver_data;
	di->bat.name = name;
	di->bus.read = &bq27x00_read_i2c;

	if (bq27x00_powersupply_init(di))
		goto batt_failed_3;

	i2c_set_clientdata(client, di);

	return 0;

batt_failed_3:
	kfree(di);
batt_failed_2:
	kfree(name);
batt_failed_1:
	mutex_lock(&battery_mutex);
	idr_remove(&battery_id, num);
	mutex_unlock(&battery_mutex);

	return retval;
}
Пример #13
0
uint32_t ctdb_reqid_new(struct ctdb_context *ctdb, void *state)
{
	int id = idr_get_new_above(ctdb->idr, state, ctdb->lastid+1, INT_MAX);
	if (id < 0) {
		DEBUG(DEBUG_DEBUG, ("Reqid wrap!\n"));
		id = idr_get_new(ctdb->idr, state, INT_MAX);
	}
	ctdb->lastid = id;
	return id;
}
Пример #14
0
static int ocrdma_get_instance(void)
{
	int instance = 0;

	/* Assign an unused number */
	if (!idr_pre_get(&ocrdma_dev_id, GFP_KERNEL))
		return -1;
	if (idr_get_new(&ocrdma_dev_id, NULL, &instance))
		return -1;
	return instance;
}
Пример #15
0
struct display_device *display_device_register(struct display_driver *driver,
						struct device *parent, void *devdata)
{
	struct display_device *new_dev = NULL;
	int ret = -EINVAL;

	if (unlikely(!driver))
		return ERR_PTR(ret);

	mutex_lock(&allocated_dsp_lock);
	ret = idr_pre_get(&allocated_dsp, GFP_KERNEL);
	mutex_unlock(&allocated_dsp_lock);
	if (!ret)
		return ERR_PTR(ret);

	new_dev = kzalloc(sizeof(struct display_device), GFP_KERNEL);
	if (!new_dev)
		return ERR_PTR(-ENOMEM);
	
	ret = driver->probe(new_dev, devdata);
	if (ret < 0)
		goto out;
	
	/* Reserve the index for this display */
	mutex_lock(&allocated_dsp_lock);
	ret = idr_get_new(&allocated_dsp, new_dev, &new_dev->idx);
	mutex_unlock(&allocated_dsp_lock);

	if (ret)
		goto out;
	
	new_dev->dev = device_create_drvdata(display_class, parent,
			MKDEV(0,0), new_dev, "display%d", new_dev->idx);

	if (IS_ERR(new_dev->dev)) {
		mutex_lock(&allocated_dsp_lock);
		idr_remove(&allocated_dsp, new_dev->idx);
		mutex_unlock(&allocated_dsp_lock);
		ret = -EINVAL;
		goto out;
	}

	new_dev->parent = parent;
	new_dev->driver = driver;
	new_dev->priv_data = devdata;
	mutex_init(&new_dev->lock);
	return new_dev;

 out:
	kfree(new_dev);
	return ERR_PTR(ret);
}
Пример #16
0
/*
  make a remote ctdb call - async send. Called in daemon context.

  This constructs a ctdb_call request and queues it for processing. 
  This call never blocks.
*/
struct ctdb_call_state *ctdb_daemon_call_send_remote(struct ctdb_db_context *ctdb_db, 
						     struct ctdb_call *call, 
						     struct ctdb_ltdb_header *header)
{
	uint32_t len;
	struct ctdb_call_state *state;
	struct ctdb_context *ctdb = ctdb_db->ctdb;

	state = talloc_zero(ctdb_db, struct ctdb_call_state);
	CTDB_NO_MEMORY_NULL(ctdb, state);

	len = offsetof(struct ctdb_req_call, data) + call->key.dsize + call->call_data.dsize;
	state->c = ctdb->methods->allocate_pkt(state, len);
	CTDB_NO_MEMORY_NULL(ctdb, state->c);
	talloc_set_name_const(state->c, "req_call packet");

	state->c->hdr.length    = len;
	state->c->hdr.ctdb_magic = CTDB_MAGIC;
	state->c->hdr.ctdb_version = CTDB_VERSION;
	state->c->hdr.operation = CTDB_REQ_CALL;
	state->c->hdr.destnode  = header->dmaster;
	state->c->hdr.srcnode   = ctdb->vnn;
	/* this limits us to 16k outstanding messages - not unreasonable */
	state->c->hdr.reqid     = idr_get_new(ctdb->idr, state, 0xFFFF);
	state->c->flags         = call->flags;
	state->c->db_id         = ctdb_db->db_id;
	state->c->callid        = call->call_id;
	state->c->keylen        = call->key.dsize;
	state->c->calldatalen   = call->call_data.dsize;
	memcpy(&state->c->data[0], call->key.dptr, call->key.dsize);
	memcpy(&state->c->data[call->key.dsize], 
	       call->call_data.dptr, call->call_data.dsize);
	state->call                = *call;
	state->call.call_data.dptr = &state->c->data[call->key.dsize];
	state->call.key.dptr       = &state->c->data[0];

	state->node   = ctdb->nodes[header->dmaster];
	state->state  = CTDB_CALL_WAIT;
	state->header = *header;
	state->ctdb_db = ctdb_db;

	talloc_set_destructor(state, ctdb_call_destructor);

	ctdb_queue_packet(ctdb, &state->c->hdr);

	event_add_timed(ctdb->ev, state, timeval_current_ofs(CTDB_REQ_TIMEOUT, 0), 
			ctdb_call_timeout, state);
	return state;
}
Пример #17
0
/*
 * Get a unique NFSv4.0 callback identifier which will be used
 * by the V4.0 callback service to lookup the nfs_client struct
 */
static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
{
	int ret = 0;
	struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);

	if (clp->rpc_ops->version != 4 || minorversion != 0)
		return ret;
retry:
	if (!idr_pre_get(&nn->cb_ident_idr, GFP_KERNEL))
		return -ENOMEM;
	spin_lock(&nn->nfs_client_lock);
	ret = idr_get_new(&nn->cb_ident_idr, clp, &clp->cl_cb_ident);
	spin_unlock(&nn->nfs_client_lock);
	if (ret == -EAGAIN)
		goto retry;
	return ret;
}
Пример #18
0
/**
 * iio_trigger_register_id() - get a unique id for this trigger
 * @trig_info:	the trigger
 **/
static int iio_trigger_register_id(struct iio_trigger *trig_info)
{
	int ret = 0;

idr_again:
	if (unlikely(idr_pre_get(&iio_trigger_idr, GFP_KERNEL) == 0))
		return -ENOMEM;

	spin_lock(&iio_trigger_idr_lock);
	ret = idr_get_new(&iio_trigger_idr, NULL, &trig_info->id);
	spin_unlock(&iio_trigger_idr_lock);
	if (unlikely(ret == -EAGAIN))
		goto idr_again;
	else if (likely(!ret))
		trig_info->id = trig_info->id & MAX_ID_MASK;

	return ret;
}
struct display_device *display_device_register(struct display_driver *driver,
						struct device *parent, void *devdata)
{
	struct display_device *new_dev = NULL;
	int ret = -EINVAL;

	if (unlikely(!driver))
		return ERR_PTR(ret);

	mutex_lock(&allocated_dsp_lock);
	ret = idr_pre_get(&allocated_dsp, GFP_KERNEL);
	mutex_unlock(&allocated_dsp_lock);
	if (!ret)
		return ERR_PTR(ret);

	new_dev = kzalloc(sizeof(struct display_device), GFP_KERNEL);
	if (likely(new_dev) && unlikely(driver->probe(new_dev, devdata))) {
		// Reserve the index for this display
		mutex_lock(&allocated_dsp_lock);
		ret = idr_get_new(&allocated_dsp, new_dev, &new_dev->idx);
		mutex_unlock(&allocated_dsp_lock);

		if (!ret) {
			new_dev->dev = device_create_drvdata(display_class,
							     parent,
							     MKDEV(0,0),
							     new_dev,
							     "display%d",
							     new_dev->idx);
			if (!IS_ERR(new_dev->dev)) {
				new_dev->parent = parent;
				new_dev->driver = driver;
				mutex_init(&new_dev->lock);
				return new_dev;
			}
			mutex_lock(&allocated_dsp_lock);
			idr_remove(&allocated_dsp, new_dev->idx);
			mutex_unlock(&allocated_dsp_lock);
			ret = -EINVAL;
		}
	}
	kfree(new_dev);
	return ERR_PTR(ret);
}
Пример #20
0
static int uio_get_minor(struct uio_device *idev)
{
	int retval = -ENOMEM;
	int id;

	mutex_lock(&minor_lock);
	if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0)
		goto exit;

	retval = idr_get_new(&uio_idr, idev, &id);
	if (retval < 0) {
		if (retval == -EAGAIN)
			retval = -ENOMEM;
		goto exit;
	}
	idev->minor = id & MAX_ID_MASK;
exit:
	mutex_unlock(&minor_lock);
	return retval;
}
Пример #21
0
int p9_idpool_get(struct p9_idpool *p)
{
	int i = 0;
	int error;
	unsigned long flags;

retry:
	if (idr_pre_get(&p->pool, GFP_NOFS) == 0)
		return -1;

	spin_lock_irqsave(&p->lock, flags);

	/* no need to store exactly p, we just need something non-null */
	error = idr_get_new(&p->pool, p, &i);
	spin_unlock_irqrestore(&p->lock, flags);

	if (error == -EAGAIN)
		goto retry;
	else if (error)
		return -1;

<<<<<<< HEAD
Пример #22
0
/**
 * hwmon_device_register - register w/ hwmon sysfs class
 * @dev: the device to register
 *
 * hwmon_device_unregister() must be called when the class device is no
 * longer needed.
 *
 * Returns the pointer to the new struct class device.
 */
struct class_device *hwmon_device_register(struct device *dev)
{
	struct class_device *cdev;
	int id;

	if (idr_pre_get(&hwmon_idr, GFP_KERNEL) == 0)
		return ERR_PTR(-ENOMEM);

	if (idr_get_new(&hwmon_idr, NULL, &id) < 0)
		return ERR_PTR(-ENOMEM);

	id = id & MAX_ID_MASK;
	/**
	 * rmember RHEL4 class_device_create does not have the second 
	 * parent argument.
	 */
	cdev = class_device_create(hwmon_class, MKDEV(0,0), dev,
					HWMON_ID_FORMAT, id);

	if (IS_ERR(cdev))
		idr_remove(&hwmon_idr, id);

	return cdev;
}
Пример #23
0
int p9_idpool_get(struct p9_idpool *p)
{
	int i = 0;
	int error;
	unsigned long flags;

retry:
	if (idr_pre_get(&p->pool, GFP_KERNEL) == 0)
		return 0;

	spin_lock_irqsave(&p->lock, flags);

	/* no need to store exactly p, we just need something non-null */
	error = idr_get_new(&p->pool, p, &i);
	spin_unlock_irqrestore(&p->lock, flags);

	if (error == -EAGAIN)
		goto retry;
	else if (error)
		return -1;

	P9_DPRINTK(P9_DEBUG_MUX, " id %d pool %p\n", i, p);
	return i;
}
Пример #24
0
struct ib_qp *ehca_create_qp(struct ib_pd *pd,
			     struct ib_qp_init_attr *init_attr,
			     struct ib_udata *udata)
{
	static int da_rc_msg_size[]={ 128, 256, 512, 1024, 2048, 4096 };
	static int da_ud_sq_msg_size[]={ 128, 384, 896, 1920, 3968 };
	struct ehca_qp *my_qp;
	struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
	struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
					      ib_device);
	struct ib_ucontext *context = NULL;
	u64 h_ret;
	int max_send_sge, max_recv_sge, ret;

	/* h_call's out parameters */
	struct ehca_alloc_qp_parms parms;
	u32 swqe_size = 0, rwqe_size = 0;
	u8 daqp_completion, isdaqp;
	unsigned long flags;

	if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
		init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
		ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
			 init_attr->sq_sig_type);
		return ERR_PTR(-EINVAL);
	}

	/* save daqp completion bits */
	daqp_completion = init_attr->qp_type & 0x60;
	/* save daqp bit */
	isdaqp = (init_attr->qp_type & 0x80) ? 1 : 0;
	init_attr->qp_type = init_attr->qp_type & 0x1F;

	if (init_attr->qp_type != IB_QPT_UD &&
	    init_attr->qp_type != IB_QPT_SMI &&
	    init_attr->qp_type != IB_QPT_GSI &&
	    init_attr->qp_type != IB_QPT_UC &&
	    init_attr->qp_type != IB_QPT_RC) {
		ehca_err(pd->device, "wrong QP Type=%x", init_attr->qp_type);
		return ERR_PTR(-EINVAL);
	}
	if ((init_attr->qp_type != IB_QPT_RC && init_attr->qp_type != IB_QPT_UD)
	    && isdaqp) {
		ehca_err(pd->device, "unsupported LL QP Type=%x",
			 init_attr->qp_type);
		return ERR_PTR(-EINVAL);
	} else if (init_attr->qp_type == IB_QPT_RC && isdaqp &&
		   (init_attr->cap.max_send_wr > 255 ||
		    init_attr->cap.max_recv_wr > 255 )) {
		       ehca_err(pd->device, "Invalid Number of max_sq_wr =%x "
				"or max_rq_wr=%x for QP Type=%x",
				init_attr->cap.max_send_wr,
				init_attr->cap.max_recv_wr,init_attr->qp_type);
		       return ERR_PTR(-EINVAL);
	} else if (init_attr->qp_type == IB_QPT_UD && isdaqp &&
		  init_attr->cap.max_send_wr > 255) {
		ehca_err(pd->device,
			 "Invalid Number of max_send_wr=%x for UD QP_TYPE=%x",
			 init_attr->cap.max_send_wr, init_attr->qp_type);
		return ERR_PTR(-EINVAL);
	}

	if (pd->uobject && udata)
		context = pd->uobject->context;

	my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
	if (!my_qp) {
		ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
		return ERR_PTR(-ENOMEM);
	}

	memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms));
	spin_lock_init(&my_qp->spinlock_s);
	spin_lock_init(&my_qp->spinlock_r);

	my_qp->recv_cq =
		container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
	my_qp->send_cq =
		container_of(init_attr->send_cq, struct ehca_cq, ib_cq);

	my_qp->init_attr = *init_attr;

	do {
		if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
			ret = -ENOMEM;
			ehca_err(pd->device, "Can't reserve idr resources.");
			goto create_qp_exit0;
		}

		spin_lock_irqsave(&ehca_qp_idr_lock, flags);
		ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
		spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);

	} while (ret == -EAGAIN);

	if (ret) {
		ret = -ENOMEM;
		ehca_err(pd->device, "Can't allocate new idr entry.");
		goto create_qp_exit0;
	}

	parms.servicetype = ibqptype2servicetype(init_attr->qp_type);
	if (parms.servicetype < 0) {
		ret = -EINVAL;
		ehca_err(pd->device, "Invalid qp_type=%x", init_attr->qp_type);
		goto create_qp_exit0;
	}

	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
		parms.sigtype = HCALL_SIGT_EVERY;
	else
		parms.sigtype = HCALL_SIGT_BY_WQE;

	/* UD_AV CIRCUMVENTION */
	max_send_sge = init_attr->cap.max_send_sge;
	max_recv_sge = init_attr->cap.max_recv_sge;
	if (IB_QPT_UD == init_attr->qp_type ||
	    IB_QPT_GSI == init_attr->qp_type ||
	    IB_QPT_SMI == init_attr->qp_type) {
		max_send_sge += 2;
		max_recv_sge += 2;
	}

	parms.ipz_eq_handle = shca->eq.ipz_eq_handle;
	parms.daqp_ctrl = isdaqp | daqp_completion;
	parms.pd = my_pd->fw_pd;
	parms.max_recv_sge = max_recv_sge;
	parms.max_send_sge = max_send_sge;

	h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, my_qp, &parms);

	if (h_ret != H_SUCCESS) {
		ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lx",
			 h_ret);
		ret = ehca2ib_return_code(h_ret);
		goto create_qp_exit1;
	}

	my_qp->ib_qp.qp_num = my_qp->real_qp_num;

	switch (init_attr->qp_type) {
	case IB_QPT_RC:
	        if (isdaqp == 0) {
			swqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
					     (parms.act_nr_send_sges)]);
			rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
					     (parms.act_nr_recv_sges)]);
		} else { /* for daqp we need to use msg size, not wqe size */
static int bq24196_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
	char *name;
	struct bq24196_device_info *di;
	struct bq24196_access_methods *bus;
	int num;
	int retval = 0;
	
	printk("lfc bq24196_probe\n");
	if(!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
		printk("lfc bq24196_probe,i2c_func error\n");
		goto err_check_functionality_failed;
		}
	/* Get new ID for the new battery device */
	retval = idr_pre_get(&bq24196_charger_id, GFP_KERNEL);
	if (retval == 0)
		return -ENOMEM;
	retval = idr_get_new(&bq24196_charger_id, client, &num);
	if (retval < 0)
		return retval;
	
	name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
	if (!name) {
		dev_err(&client->dev, "failed to allocate device name\n");
		retval = -ENOMEM;
		goto bq24196_chg_failed_1;
	}

	di = kzalloc(sizeof(*di), GFP_KERNEL);
	if (!di) {
		dev_err(&client->dev, "failed to allocate device info data\n");
		retval = -ENOMEM;
		goto bq24196_chg_failed_2;
	}
	di->id = num;

	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
	if (!bus) {
		dev_err(&client->dev, "failed to allocate access method "
					"data\n");
		retval = -ENOMEM;
		goto bq24196_chg_failed_3;
	}
	
	i2c_set_clientdata(client, di);
	di->dev = &client->dev;
	bus->read = &bq24196_read_i2c;
	bus->write = &bq24196_write_i2c;
	di->bus = bus;
	di->client = client;
	bq24196_client = client;
	bq24196_di = di;
	mutex_init(&di->i2c_lock);
	bq24196_hw_config_init(di);
	
	return 0;
	
err_check_functionality_failed:
	printk("lfc bq24196_probe fail\n");
	return 0;

bq24196_chg_failed_3:
	kfree(di);
bq24196_chg_failed_2:
	kfree(name);
bq24196_chg_failed_1:
	idr_remove(&bq24196_charger_id,num);
	return retval;	
}
Пример #26
0
/* 
   list files in a directory matching a wildcard pattern
*/
static NTSTATUS pvfs_search_first_trans2(struct ntvfs_module_context *ntvfs,
					 struct ntvfs_request *req, union smb_search_first *io, 
					 void *search_private, 
					 bool (*callback)(void *, const union smb_search_data *))
{
	struct pvfs_dir *dir;
	struct pvfs_state *pvfs = talloc_get_type(ntvfs->private_data,
				  struct pvfs_state);
	struct pvfs_search_state *search;
	unsigned int reply_count;
	uint16_t search_attrib, max_count;
	const char *pattern;
	NTSTATUS status;
	struct pvfs_filename *name;
	int id;

	search_attrib = io->t2ffirst.in.search_attrib;
	pattern       = io->t2ffirst.in.pattern;
	max_count     = io->t2ffirst.in.max_count;

	/* resolve the cifs name to a posix name */
	status = pvfs_resolve_name(pvfs, req, pattern, PVFS_RESOLVE_WILDCARD, &name);
	if (!NT_STATUS_IS_OK(status)) {
		return status;
	}

	if (!name->has_wildcard && !name->exists) {
		return NT_STATUS_NO_SUCH_FILE;
	}

	status = pvfs_access_check_parent(pvfs, req, name, SEC_DIR_TRAVERSE | SEC_DIR_LIST);
	if (!NT_STATUS_IS_OK(status)) {
		return status;
	}

	/* we initially make search a child of the request, then if we
	   need to keep it long term we steal it for the private
	   structure */
	search = talloc(req, struct pvfs_search_state);
	if (!search) {
		return NT_STATUS_NO_MEMORY;
	}

	/* do the actual directory listing */
	status = pvfs_list_start(pvfs, name, search, &dir);
	if (!NT_STATUS_IS_OK(status)) {
		return status;
	}

	id = idr_get_new(pvfs->search.idtree, search, MAX_SEARCH_HANDLES);
	if (id == -1) {
		return NT_STATUS_INSUFFICIENT_RESOURCES;
	}

	search->pvfs = pvfs;
	search->handle = id;
	search->dir = dir;
	search->current_index = 0;
	search->search_attrib = search_attrib;
	search->must_attrib = 0;
	search->last_used = 0;
	search->num_ea_names = io->t2ffirst.in.num_names;
	search->ea_names = io->t2ffirst.in.ea_names;
	search->te = NULL;

	DLIST_ADD(pvfs->search.list, search);
	talloc_set_destructor(search, pvfs_search_destructor);

	status = pvfs_search_fill(pvfs, req, max_count, search, io->generic.data_level,
				  &reply_count, search_private, callback);
	if (!NT_STATUS_IS_OK(status)) {
		return status;
	}

	/* not matching any entries is an error */
	if (reply_count == 0) {
		return NT_STATUS_NO_SUCH_FILE;
	}

	io->t2ffirst.out.count = reply_count;
	io->t2ffirst.out.handle = search->handle;
	io->t2ffirst.out.end_of_search = pvfs_list_eos(dir, search->current_index) ? 1 : 0;

	/* work out if we are going to keep the search state
	   and allow for a search continue */
	if ((io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE) ||
	    ((io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) && 
	     io->t2ffirst.out.end_of_search)) {
		talloc_free(search);
	} else {
		talloc_steal(pvfs, search);
	}

	return NT_STATUS_OK;
}
Пример #27
0
/* 
   list files in a directory matching a wildcard pattern - old SMBsearch interface
*/
static NTSTATUS pvfs_search_first_old(struct ntvfs_module_context *ntvfs,
				      struct ntvfs_request *req, union smb_search_first *io, 
				      void *search_private, 
				      bool (*callback)(void *, const union smb_search_data *))
{
	struct pvfs_dir *dir;
	struct pvfs_state *pvfs = talloc_get_type(ntvfs->private_data,
				  struct pvfs_state);
	struct pvfs_search_state *search;
	unsigned int reply_count;
	uint16_t search_attrib;
	const char *pattern;
	NTSTATUS status;
	struct pvfs_filename *name;
	int id;

	search_attrib = io->search_first.in.search_attrib;
	pattern       = io->search_first.in.pattern;

	/* resolve the cifs name to a posix name */
	status = pvfs_resolve_name(pvfs, req, pattern, PVFS_RESOLVE_WILDCARD, &name);
	if (!NT_STATUS_IS_OK(status)) {
		return status;
	}

	if (!name->has_wildcard && !name->exists) {
		return STATUS_NO_MORE_FILES;
	}

	status = pvfs_access_check_parent(pvfs, req, name, SEC_DIR_TRAVERSE | SEC_DIR_LIST);
	if (!NT_STATUS_IS_OK(status)) {
		return status;
	}

	/* we initially make search a child of the request, then if we
	   need to keep it long term we steal it for the private
	   structure */
	search = talloc(req, struct pvfs_search_state);
	if (!search) {
		return NT_STATUS_NO_MEMORY;
	}

	/* do the actual directory listing */
	status = pvfs_list_start(pvfs, name, search, &dir);
	if (!NT_STATUS_IS_OK(status)) {
		return status;
	}

	/* we need to give a handle back to the client so it
	   can continue a search */
	id = idr_get_new(pvfs->search.idtree, search, MAX_OLD_SEARCHES);
	if (id == -1) {
		pvfs_search_cleanup(pvfs);
		id = idr_get_new(pvfs->search.idtree, search, MAX_OLD_SEARCHES);
	}
	if (id == -1) {
		return NT_STATUS_INSUFFICIENT_RESOURCES;
	}

	search->pvfs = pvfs;
	search->handle = id;
	search->dir = dir;
	search->current_index = 0;
	search->search_attrib = search_attrib & 0xFF;
	search->must_attrib = (search_attrib>>8) & 0xFF;
	search->last_used = time(NULL);
	search->te = NULL;

	DLIST_ADD(pvfs->search.list, search);

	talloc_set_destructor(search, pvfs_search_destructor);

	status = pvfs_search_fill(pvfs, req, io->search_first.in.max_count, search, io->generic.data_level,
				  &reply_count, search_private, callback);
	if (!NT_STATUS_IS_OK(status)) {
		return status;
	}

	io->search_first.out.count = reply_count;

	/* not matching any entries is an error */
	if (reply_count == 0) {
		return STATUS_NO_MORE_FILES;
	}

	talloc_steal(pvfs, search);

	return NT_STATUS_OK;
}
Пример #28
0
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
		struct sigevent __user *, timer_event_spec,
		timer_t __user *, created_timer_id)
{
	struct k_clock *kc = clockid_to_kclock(which_clock);
	struct k_itimer *new_timer;
	int error, new_timer_id;
	sigevent_t event;
	int it_id_set = IT_ID_NOT_SET;

	if (!kc)
		return -EINVAL;
	if (!kc->timer_create)
		return -EOPNOTSUPP;

	new_timer = alloc_posix_timer();
	if (unlikely(!new_timer))
		return -EAGAIN;

	spin_lock_init(&new_timer->it_lock);
 retry:
	if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
		error = -EAGAIN;
		goto out;
	}
	spin_lock_irq(&idr_lock);
	error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
	spin_unlock_irq(&idr_lock);
	if (error) {
		if (error == -EAGAIN)
			goto retry;
		/*
		 * Weird looking, but we return EAGAIN if the IDR is
		 * full (proper POSIX return value for this)
		 */
		error = -EAGAIN;
		goto out;
	}

	it_id_set = IT_ID_SET;
	new_timer->it_id = (timer_t) new_timer_id;
	new_timer->it_clock = which_clock;
	new_timer->it_overrun = -1;

	if (timer_event_spec) {
		if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
			error = -EFAULT;
			goto out;
		}
		rcu_read_lock();
		new_timer->it_pid = get_pid(good_sigevent(&event));
		rcu_read_unlock();
		if (!new_timer->it_pid) {
			error = -EINVAL;
			goto out;
		}
	} else {
		event.sigev_notify = SIGEV_SIGNAL;
		event.sigev_signo = SIGALRM;
		event.sigev_value.sival_int = new_timer->it_id;
		new_timer->it_pid = get_pid(task_tgid(current));
	}

	new_timer->it_sigev_notify     = event.sigev_notify;
	new_timer->sigq->info.si_signo = event.sigev_signo;
	new_timer->sigq->info.si_value = event.sigev_value;
	new_timer->sigq->info.si_tid   = new_timer->it_id;
	new_timer->sigq->info.si_code  = SI_TIMER;

	if (copy_to_user(created_timer_id,
			 &new_timer_id, sizeof (new_timer_id))) {
		error = -EFAULT;
		goto out;
	}

	error = kc->timer_create(new_timer);
	if (error)
		goto out;

	spin_lock_irq(&current->sighand->siglock);
	new_timer->it_signal = current->signal;
	list_add(&new_timer->list, &current->signal->posix_timers);
	spin_unlock_irq(&current->sighand->siglock);

	return 0;
	/*
	 * In the case of the timer belonging to another task, after
	 * the task is unlocked, the timer is owned by the other task
	 * and may cease to exist at any time.  Don't use or modify
	 * new_timer after the unlock call.
	 */
out:
	release_posix_timer(new_timer, it_id_set);
	return error;
}
Пример #29
0
static bool torture_local_idtree_simple(struct torture_context *tctx)
{
	struct idr_context *idr;
	int i, ret;
	int *ids;
	int *present;
	extern int torture_numops;
	int n = torture_numops;
	TALLOC_CTX *mem_ctx = tctx;

	idr = idr_init(mem_ctx);

	ids = talloc_zero_array(mem_ctx, int, n);
	present = talloc_zero_array(mem_ctx, int, n);

	for (i=0;i<n;i++) {
		ids[i] = -1;
	}

	for (i=0;i<n;i++) {
		int ii = random() % n;
		void *p = idr_find(idr, ids[ii]);
		if (present[ii]) {
			if (p != &ids[ii]) {
				torture_fail(tctx, talloc_asprintf(tctx, 
						"wrong ptr at %d - %p should be %p", 
				       ii, p, &ids[ii]));
			}
			if (random() % 7 == 0) {
				if (idr_remove(idr, ids[ii]) != 0) {
					torture_fail(tctx, talloc_asprintf(tctx,
						"remove failed at %d (id=%d)", 
					       i, ids[ii]));
				}
				present[ii] = 0;
				ids[ii] = -1;
			}
		} else {
			if (p != NULL) {
				torture_fail(tctx, 
					     talloc_asprintf(tctx,
							     "non-present at %d gave %p (would be %d)", 
							     ii, p, 
							     (int)((((char *)p) - (char *)(&ids[0])) / sizeof(int))));
			}
			if (random() % 5) {
				ids[ii] = idr_get_new(idr, &ids[ii], n);
				if (ids[ii] < 0) {
					torture_fail(tctx, talloc_asprintf(tctx,
						"alloc failure at %d (ret=%d)", 
					       ii, ids[ii]));
				} else {
					present[ii] = 1;
				}
			}
		}
	}

	torture_comment(tctx, "done %d random ops\n", i);

	for (i=0;i<n;i++) {
		if (present[i]) {
			if (idr_remove(idr, ids[i]) != 0) {
				torture_fail(tctx, talloc_asprintf(tctx,
						"delete failed on cleanup at %d (id=%d)", 
				       i, ids[i]));
			}
		}
	}

	/* now test some limits */
	for (i=0;i<25000;i++) {
		ret = idr_get_new_above(idr, &ids[0], random() % 25000, 0x10000-3);
		torture_assert(tctx, ret != -1, "idr_get_new_above failed");
	}

	ret = idr_get_new_above(idr, &ids[0], 0x10000-2, 0x10000);
	torture_assert_int_equal(tctx, ret, 0x10000-2, "idr_get_new_above failed");
	ret = idr_get_new_above(idr, &ids[0], 0x10000-1, 0x10000);
	torture_assert_int_equal(tctx, ret, 0x10000-1, "idr_get_new_above failed");
	ret = idr_get_new_above(idr, &ids[0], 0x10000, 0x10000);
	torture_assert_int_equal(tctx, ret, 0x10000, "idr_get_new_above failed");
	ret = idr_get_new_above(idr, &ids[0], 0x10000+1, 0x10000);
	torture_assert_int_equal(tctx, ret, -1, "idr_get_new_above succeeded above limit");
	ret = idr_get_new_above(idr, &ids[0], 0x10000+2, 0x10000);
	torture_assert_int_equal(tctx, ret, -1, "idr_get_new_above succeeded above limit");

	torture_comment(tctx, "cleaned up\n");
	return true;
}
Пример #30
0
static int rpmsg_probe(struct virtio_device *vdev)
{
	vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
	const char *names[] = { "input", "output" };
	struct virtqueue *vqs[2];
	struct virtproc_info *vrp;
	void *bufs_va;
	int err = 0, i, vproc_id;

	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
	if (!vrp)
		return -ENOMEM;

	vrp->vdev = vdev;

	idr_init(&vrp->endpoints);
	mutex_init(&vrp->endpoints_lock);
	mutex_init(&vrp->tx_lock);
	init_waitqueue_head(&vrp->sendq);

	if (!idr_pre_get(&vprocs, GFP_KERNEL))
		goto free_vrp;

	mutex_lock(&vprocs_mutex);

	err = idr_get_new(&vprocs, vrp, &vproc_id);

	mutex_unlock(&vprocs_mutex);

	if (err) {
		dev_err(&vdev->dev, "idr_get_new failed: %d\n", err);
		goto free_vrp;
	}

	vrp->id = vproc_id;

	/* We expect two virtqueues, rx and tx (and in this order) */
	err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
	if (err)
		goto rem_idr;

	vrp->rvq = vqs[0];
	vrp->svq = vqs[1];

	/* allocate coherent memory for the buffers */
	bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
				RPMSG_TOTAL_BUF_SPACE,
				&vrp->bufs_dma, GFP_KERNEL);
	if (!bufs_va)
		goto vqs_del;

	dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va,
					(unsigned long long)vrp->bufs_dma);

	/* half of the buffers is dedicated for RX */
	vrp->rbufs = bufs_va;

	/* and half is dedicated for TX */
	vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2;

	/* set up the receive buffers */
	for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
		struct scatterlist sg;
		void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;

		sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE);

		err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr,
								GFP_KERNEL);
		WARN_ON(err); /* sanity check; this can't really happen */
	}

	/* suppress "tx-complete" interrupts */
	virtqueue_disable_cb(vrp->svq);

	vdev->priv = vrp;

	/* if supported by the remote processor, enable the name service */
	if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
		/* a dedicated endpoint handles the name service msgs */
		vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
						vrp, RPMSG_NS_ADDR);
		if (!vrp->ns_ept) {
			dev_err(&vdev->dev, "failed to create the ns ept\n");
			err = -ENOMEM;
			goto free_coherent;
		}
	}

	/* tell the remote processor it can start sending messages */
	virtqueue_kick(vrp->rvq);

	dev_info(&vdev->dev, "rpmsg host is online\n");

	return 0;

free_coherent:
	dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
					bufs_va, vrp->bufs_dma);
vqs_del:
	vdev->config->del_vqs(vrp->vdev);
rem_idr:
	mutex_lock(&vprocs_mutex);
	idr_remove(&vprocs, vproc_id);
	mutex_unlock(&vprocs_mutex);
free_vrp:
	kfree(vrp);
	return err;
}