static struct i915_hw_context *
create_hw_context(struct drm_device *dev,
		  struct drm_i915_file_private *file_priv)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_context *ctx;
	int ret, id;

	ctx = kmalloc(sizeof(*ctx), DRM_I915_GEM, M_WAITOK | M_ZERO);
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);

	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
	if (ctx->obj == NULL) {
		kfree(ctx, DRM_I915_GEM);
		DRM_DEBUG_DRIVER("Context object allocated failed\n");
		return ERR_PTR(-ENOMEM);
	}

	if (INTEL_INFO(dev)->gen >= 7) {
		ret = i915_gem_object_set_cache_level(ctx->obj,
						      I915_CACHE_LLC_MLC);
		if (ret)
			goto err_out;
	}

	/* The ring associated with the context object is handled by the normal
	 * object tracking code. We give an initial ring value simple to pass an
	 * assertion in the context switch code.
	 */
	ctx->ring = &dev_priv->ring[RCS];

	/* Default context will never have a file_priv */
	if (file_priv == NULL)
		return ctx;

	ctx->file_priv = file_priv;

again:
	if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
		ret = -ENOMEM;
		DRM_DEBUG_DRIVER("idr allocation failed\n");
		goto err_out;
	}

	ret = idr_get_new_above(&file_priv->context_idr, ctx,
				DEFAULT_CONTEXT_ID + 1, &id);
	if (ret == 0)
		ctx->id = id;

	if (ret == -EAGAIN)
		goto again;
	else if (ret)
		goto err_out;

	return ctx;

err_out:
	do_destroy(ctx);
	return ERR_PTR(ret);
}
Ejemplo n.º 2
0
struct c2port_device *c2port_device_register(char *name,
					struct c2port_ops *ops, void *devdata)
{
	struct c2port_device *c2dev;
	int id, ret;

	if (unlikely(!ops) || unlikely(!ops->access) || \
		unlikely(!ops->c2d_dir) || unlikely(!ops->c2ck_set) || \
		unlikely(!ops->c2d_get) || unlikely(!ops->c2d_set))
		return ERR_PTR(-EINVAL);

	c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
	if (unlikely(!c2dev))
		return ERR_PTR(-ENOMEM);

	ret = idr_pre_get(&c2port_idr, GFP_KERNEL);
	if (!ret) {
		ret = -ENOMEM;
		goto error_idr_get_new;
	}

	spin_lock_irq(&c2port_idr_lock);
	ret = idr_get_new(&c2port_idr, c2dev, &id);
	spin_unlock_irq(&c2port_idr_lock);

	if (ret < 0)
		goto error_idr_get_new;
	c2dev->id = id;

	c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
					"c2port%d", id);
	if (unlikely(!c2dev->dev)) {
		ret = -ENOMEM;
		goto error_device_create;
	}
	dev_set_drvdata(c2dev->dev, c2dev);

	strncpy(c2dev->name, name, C2PORT_NAME_LEN);
	c2dev->ops = ops;
	mutex_init(&c2dev->mutex);

	/* Create binary file */
	c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
	ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
	if (unlikely(ret))
		goto error_device_create_bin_file;

	/* By default C2 port access is off */
	c2dev->access = c2dev->flash_access = 0;
	ops->access(c2dev, 0);

	dev_info(c2dev->dev, "C2 port %s added\n", name);
	dev_info(c2dev->dev, "%s flash has %d blocks x %d bytes "
				"(%d bytes total)\n",
				name, ops->blocks_num, ops->block_size,
				ops->blocks_num * ops->block_size);

	return c2dev;

error_device_create_bin_file:
	device_destroy(c2port_class, 0);

error_device_create:
	spin_lock_irq(&c2port_idr_lock);
	idr_remove(&c2port_idr, id);
	spin_unlock_irq(&c2port_idr_lock);

error_idr_get_new:
	kfree(c2dev);

	return ERR_PTR(ret);
}
Ejemplo n.º 3
0
static int ds278x_battery_probe(struct i2c_client *client,
				const struct i2c_device_id *id)
{
	struct ds278x_platform_data *pdata = client->dev.platform_data;
	struct ds278x_info *info;
	int ret;
	int num;

	/*
	 * ds2786 should have the sense resistor value set
	 * in the platform data
	 */
	if (id->driver_data == DS2786 && !pdata) {
		dev_err(&client->dev, "missing platform data for ds2786\n");
		return -EINVAL;
	}

	/* Get an ID for this battery */
	ret = idr_pre_get(&battery_id, GFP_KERNEL);
	if (ret == 0) {
		ret = -ENOMEM;
		goto fail_id;
	}

	mutex_lock(&battery_lock);
	ret = idr_get_new(&battery_id, client, &num);
	mutex_unlock(&battery_lock);
	if (ret < 0)
		goto fail_id;

	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info) {
		ret = -ENOMEM;
		goto fail_info;
	}

	info->battery.name = kasprintf(GFP_KERNEL, "%s-%d", client->name, num);
	if (!info->battery.name) {
		ret = -ENOMEM;
		goto fail_name;
	}

	if (id->driver_data == DS2786)
		info->rsns = pdata->rsns;

	i2c_set_clientdata(client, info);
	info->client = client;
	info->id = num;
	info->ops  = &ds278x_ops[id->driver_data];
	ds278x_power_supply_init(&info->battery);

	ret = power_supply_register(&client->dev, &info->battery);
	if (ret) {
		dev_err(&client->dev, "failed to register battery\n");
		goto fail_register;
	}

	return 0;

fail_register:
	kfree(info->battery.name);
fail_name:
	kfree(info);
fail_info:
	mutex_lock(&battery_lock);
	idr_remove(&battery_id, num);
	mutex_unlock(&battery_lock);
fail_id:
	return ret;
}
Ejemplo n.º 4
0
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
		struct sigevent __user *, timer_event_spec,
		timer_t __user *, created_timer_id)
{
	struct k_clock *kc = clockid_to_kclock(which_clock);
	struct k_itimer *new_timer;
	int error, new_timer_id;
	sigevent_t event;
	int it_id_set = IT_ID_NOT_SET;

	if (!kc)
		return -EINVAL;
	if (!kc->timer_create)
		return -EOPNOTSUPP;

	new_timer = alloc_posix_timer();
	if (unlikely(!new_timer))
		return -EAGAIN;

	spin_lock_init(&new_timer->it_lock);
 retry:
	if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
		error = -EAGAIN;
		goto out;
	}
	spin_lock_irq(&idr_lock);
	error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
	spin_unlock_irq(&idr_lock);
	if (error) {
		if (error == -EAGAIN)
			goto retry;
		error = -EAGAIN;
		goto out;
	}

	it_id_set = IT_ID_SET;
	new_timer->it_id = (timer_t) new_timer_id;
	new_timer->it_clock = which_clock;
	new_timer->it_overrun = -1;

	if (timer_event_spec) {
		if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
			error = -EFAULT;
			goto out;
		}
		rcu_read_lock();
		new_timer->it_pid = get_pid(good_sigevent(&event));
		rcu_read_unlock();
		if (!new_timer->it_pid) {
			error = -EINVAL;
			goto out;
		}
	} else {
		event.sigev_notify = SIGEV_SIGNAL;
		event.sigev_signo = SIGALRM;
		event.sigev_value.sival_int = new_timer->it_id;
		new_timer->it_pid = get_pid(task_tgid(current));
	}

	new_timer->it_sigev_notify     = event.sigev_notify;
	new_timer->sigq->info.si_signo = event.sigev_signo;
	new_timer->sigq->info.si_value = event.sigev_value;
	new_timer->sigq->info.si_tid   = new_timer->it_id;
	new_timer->sigq->info.si_code  = SI_TIMER;

	if (copy_to_user(created_timer_id,
			 &new_timer_id, sizeof (new_timer_id))) {
		error = -EFAULT;
		goto out;
	}

	error = kc->timer_create(new_timer);
	if (error)
		goto out;

	spin_lock_irq(&current->sighand->siglock);
	new_timer->it_signal = current->signal;
	list_add(&new_timer->list, &current->signal->posix_timers);
	spin_unlock_irq(&current->sighand->siglock);

	return 0;
out:
	release_posix_timer(new_timer, it_id_set);
	return error;
}
Ejemplo n.º 5
0
int pps_register_source(struct pps_source_info *info, int default_params)
{
	struct pps_device *pps;
	int id;
	int err;

	/* Sanity checks */
	if ((info->mode & default_params) != default_params) {
		printk(KERN_ERR "pps: %s: unsupported default parameters\n",
					info->name);
		err = -EINVAL;
		goto pps_register_source_exit;
	}
	if ((info->mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) != 0 &&
			info->echo == NULL) {
		printk(KERN_ERR "pps: %s: echo function is not defined\n",
					info->name);
		err = -EINVAL;
		goto pps_register_source_exit;
	}
	if ((info->mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
		printk(KERN_ERR "pps: %s: unspecified time format\n",
					info->name);
		err = -EINVAL;
		goto pps_register_source_exit;
	}

	/* Allocate memory for the new PPS source struct */
	pps = kzalloc(sizeof(struct pps_device), GFP_KERNEL);
	if (pps == NULL) {
		err = -ENOMEM;
		goto pps_register_source_exit;
	}

	/* These initializations must be done before calling idr_get_new()
	 * in order to avoid reces into pps_event().
	 */
	pps->params.api_version = PPS_API_VERS;
	pps->params.mode = default_params;
	pps->info = *info;

	init_waitqueue_head(&pps->queue);
	spin_lock_init(&pps->lock);
	atomic_set(&pps->usage, 1);

	/* Get new ID for the new PPS source */
	if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
		err = -ENOMEM;
		goto kfree_pps;
	}

	spin_lock_irq(&pps_idr_lock);

	/* Now really allocate the PPS source.
	 * After idr_get_new() calling the new source will be freely available
	 * into the kernel.
	 */
	err = idr_get_new(&pps_idr, pps, &id);
	if (err < 0) {
		spin_unlock_irq(&pps_idr_lock);
		goto kfree_pps;
	}

	id = id & MAX_ID_MASK;
	if (id >= PPS_MAX_SOURCES) {
		spin_unlock_irq(&pps_idr_lock);

		printk(KERN_ERR "pps: %s: too many PPS sources in the system\n",
					info->name);
		err = -EBUSY;
		goto free_idr;
	}
	pps->id = id;

	spin_unlock_irq(&pps_idr_lock);

	/* Create the char device */
	err = pps_register_cdev(pps);
	if (err < 0) {
		printk(KERN_ERR "pps: %s: unable to create char device\n",
					info->name);
		goto free_idr;
	}

	pr_info("new PPS source %s at ID %d\n", info->name, id);

	return id;

free_idr:
	spin_lock_irq(&pps_idr_lock);
	idr_remove(&pps_idr, id);
	spin_unlock_irq(&pps_idr_lock);

kfree_pps:
	kfree(pps);

pps_register_source_exit:
	printk(KERN_ERR "pps: %s: unable to register source\n", info->name);

	return err;
}
Ejemplo n.º 6
0
static int rpmsg_probe(struct virtio_device *vdev)
{
	vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
	const char *names[] = { "input", "output" };
	struct virtqueue *vqs[2];
	struct virtproc_info *vrp;
	void *bufs_va;
	int err = 0, i, vproc_id;

	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
	if (!vrp)
		return -ENOMEM;

	vrp->vdev = vdev;

	idr_init(&vrp->endpoints);
	mutex_init(&vrp->endpoints_lock);
	mutex_init(&vrp->tx_lock);
	init_waitqueue_head(&vrp->sendq);

	if (!idr_pre_get(&vprocs, GFP_KERNEL))
		goto free_vrp;

	mutex_lock(&vprocs_mutex);

	err = idr_get_new(&vprocs, vrp, &vproc_id);

	mutex_unlock(&vprocs_mutex);

	if (err) {
		dev_err(&vdev->dev, "idr_get_new failed: %d\n", err);
		goto free_vrp;
	}

	vrp->id = vproc_id;

	/* We expect two virtqueues, rx and tx (and in this order) */
	err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
	if (err)
		goto rem_idr;

	vrp->rvq = vqs[0];
	vrp->svq = vqs[1];

	/* allocate coherent memory for the buffers */
	bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
				RPMSG_TOTAL_BUF_SPACE,
				&vrp->bufs_dma, GFP_KERNEL);
	if (!bufs_va)
		goto vqs_del;

	dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va,
					(unsigned long long)vrp->bufs_dma);

	/* half of the buffers is dedicated for RX */
	vrp->rbufs = bufs_va;

	/* and half is dedicated for TX */
	vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2;

	/* set up the receive buffers */
	for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
		struct scatterlist sg;
		void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;

		sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE);

		err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr,
								GFP_KERNEL);
		WARN_ON(err); /* sanity check; this can't really happen */
	}

	/* suppress "tx-complete" interrupts */
	virtqueue_disable_cb(vrp->svq);

	vdev->priv = vrp;

	/* if supported by the remote processor, enable the name service */
	if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
		/* a dedicated endpoint handles the name service msgs */
		vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
						vrp, RPMSG_NS_ADDR);
		if (!vrp->ns_ept) {
			dev_err(&vdev->dev, "failed to create the ns ept\n");
			err = -ENOMEM;
			goto free_coherent;
		}
	}

	/* tell the remote processor it can start sending messages */
	virtqueue_kick(vrp->rvq);

	dev_info(&vdev->dev, "rpmsg host is online\n");

	return 0;

free_coherent:
	dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
					bufs_va, vrp->bufs_dma);
vqs_del:
	vdev->config->del_vqs(vrp->vdev);
rem_idr:
	mutex_lock(&vprocs_mutex);
	idr_remove(&vprocs, vproc_id);
	mutex_unlock(&vprocs_mutex);
free_vrp:
	kfree(vrp);
	return err;
}
Ejemplo n.º 7
0
static int bq275x0_battery_probe(struct i2c_client *client,
				 const struct i2c_device_id *id)
{
	int num;
	char *name;
	int retval = 0;
	struct bq275x0_device_info *di;

	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
		BQ275x0_ERR("[%s,%d]: need I2C_FUNC_I2C\n",__FUNCTION__,__LINE__);
		return -ENODEV;
	}
   
	i2c_smbus_write_word_data(client,0x00,0x0008);
    mdelay(2);
	retval = i2c_smbus_read_word_data(client,0x00);
	if(retval<0)
	{
        printk(KERN_ERR "[%s,%d] Coulometer Damaged or Firmware Error\n",__FUNCTION__,__LINE__);
	}
    else
    {
        printk(KERN_ERR "Normal Mode and read Firmware version=%04x\n", retval);
    }
    
    retval = driver_create_file(&(bq275x0_battery_driver.driver), &driver_attr_state);
    if (0 != retval)
    {
		printk("failed to create sysfs entry(state): %d\n", retval);
        return -1;
    }

	power_set_batt_measurement_type(BATT_MEASURE_BY_BQ275x0);

	/* Get new ID for the new battery device */
	retval = idr_pre_get(&bq275x0_battery_id, GFP_KERNEL);
	if (retval == 0) {
		retval = -ENOMEM;
		goto batt_failed_0;
	}
	mutex_lock(&bq275x0_battery_mutex);
	retval = idr_get_new(&bq275x0_battery_id, client, &num);
	mutex_unlock(&bq275x0_battery_mutex);
	if (retval < 0) {
		goto batt_failed_0;
	}

	name = kasprintf(GFP_KERNEL, "bq275x0-%d", num);
	if (!name) {
		dev_err(&client->dev, "failed to allocate device name\n");
		retval = -ENOMEM;
		goto batt_failed_1;
	}

	di = kzalloc(sizeof(*di), GFP_KERNEL);
	if (!di) {
		dev_err(&client->dev, "failed to allocate device info data\n");
		retval = -ENOMEM;
		goto batt_failed_2;
	}
	di->id = num;

	i2c_set_clientdata(client, di);
	di->dev = &client->dev;
	di->bat.name = name;
	di->client = client;

	bq275x0_powersupply_init(di);

	g_battery_measure_by_bq275x0_i2c_client = client;

	dev_info(&client->dev, "bq275x0 support ver. %s enabled\n", DRIVER_VERSION);

	return 0;

batt_failed_2:
	kfree(name);
batt_failed_1:
	mutex_lock(&bq275x0_battery_mutex);
	idr_remove(&bq275x0_battery_id, num);
	mutex_unlock(&bq275x0_battery_mutex);
batt_failed_0:

	power_set_batt_measurement_type(BATT_MEASURE_UNKNOW);

	return retval;
}
ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
			 const char __user *buf, int in_len,
			 int out_len)
{
	struct ib_uverbs_reg_mr      cmd;
	struct ib_uverbs_reg_mr_resp resp;
	struct ib_udata              udata;
	struct ib_umem_object       *obj;
	struct ib_pd                *pd;
	struct ib_mr                *mr;
	int                          ret;

	if (out_len < sizeof resp)
		return -ENOSPC;

	if (copy_from_user(&cmd, buf, sizeof cmd))
		return -EFAULT;

	INIT_UDATA(&udata, buf + sizeof cmd,
		   (unsigned long) cmd.response + sizeof resp,
		   in_len - sizeof cmd, out_len - sizeof resp);

	if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
		return -EINVAL;

	/*
	 * Local write permission is required if remote write or
	 * remote atomic permission is also requested.
	 */
	if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
	    !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
		return -EINVAL;

	obj = kmalloc(sizeof *obj, GFP_KERNEL);
	if (!obj)
		return -ENOMEM;

	obj->uobject.context = file->ucontext;

	/*
	 * We ask for writable memory if any access flags other than
	 * "remote read" are set.  "Local write" and "remote write"
	 * obviously require write access.  "Remote atomic" can do
	 * things like fetch and add, which will modify memory, and
	 * "MW bind" can change permissions by binding a window.
	 */
	ret = ib_umem_get(file->device->ib_dev, &obj->umem,
			  (void *) (unsigned long) cmd.start, cmd.length,
			  !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ));
	if (ret)
		goto err_free;

	obj->umem.virt_base = cmd.hca_va;

	mutex_lock(&ib_uverbs_idr_mutex);

	pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
	if (!pd || pd->uobject->context != file->ucontext) {
		ret = -EINVAL;
		goto err_up;
	}

	if (!pd->device->reg_user_mr) {
		ret = -ENOSYS;
		goto err_up;
	}

	mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata);
	if (IS_ERR(mr)) {
		ret = PTR_ERR(mr);
		goto err_up;
	}

	mr->device  = pd->device;
	mr->pd      = pd;
	mr->uobject = &obj->uobject;
	atomic_inc(&pd->usecnt);
	atomic_set(&mr->usecnt, 0);

	memset(&resp, 0, sizeof resp);
	resp.lkey = mr->lkey;
	resp.rkey = mr->rkey;

retry:
	if (!idr_pre_get(&ib_uverbs_mr_idr, GFP_KERNEL)) {
		ret = -ENOMEM;
		goto err_unreg;
	}

	ret = idr_get_new(&ib_uverbs_mr_idr, mr, &obj->uobject.id);

	if (ret == -EAGAIN)
		goto retry;
	if (ret)
		goto err_unreg;

	resp.mr_handle = obj->uobject.id;

	if (copy_to_user((void __user *) (unsigned long) cmd.response,
			 &resp, sizeof resp)) {
		ret = -EFAULT;
		goto err_idr;
	}

	mutex_lock(&file->mutex);
	list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
	mutex_unlock(&file->mutex);

	mutex_unlock(&ib_uverbs_idr_mutex);

	return in_len;

err_idr:
	idr_remove(&ib_uverbs_mr_idr, obj->uobject.id);

err_unreg:
	ib_dereg_mr(mr);
	atomic_dec(&pd->usecnt);

err_up:
	mutex_unlock(&ib_uverbs_idr_mutex);

	ib_umem_release(file->device->ib_dev, &obj->umem);

err_free:
	kfree(obj);
	return ret;
}
ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
			    const char __user *buf, int in_len,
			    int out_len)
{
	struct ib_uverbs_create_cq      cmd;
	struct ib_uverbs_create_cq_resp resp;
	struct ib_udata                 udata;
	struct ib_ucq_object           *uobj;
	struct ib_uverbs_event_file    *ev_file = NULL;
	struct ib_cq                   *cq;
	int                             ret;

	if (out_len < sizeof resp)
		return -ENOSPC;

	if (copy_from_user(&cmd, buf, sizeof cmd))
		return -EFAULT;

	INIT_UDATA(&udata, buf + sizeof cmd,
		   (unsigned long) cmd.response + sizeof resp,
		   in_len - sizeof cmd, out_len - sizeof resp);

	if (cmd.comp_vector >= file->device->num_comp_vectors)
		return -EINVAL;

	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
	if (!uobj)
		return -ENOMEM;

	if (cmd.comp_channel >= 0) {
		ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
		if (!ev_file) {
			ret = -EINVAL;
			goto err;
		}
	}

	uobj->uobject.user_handle   = cmd.user_handle;
	uobj->uobject.context       = file->ucontext;
	uobj->uverbs_file	    = file;
	uobj->comp_events_reported  = 0;
	uobj->async_events_reported = 0;
	INIT_LIST_HEAD(&uobj->comp_list);
	INIT_LIST_HEAD(&uobj->async_list);

	cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
					     file->ucontext, &udata);
	if (IS_ERR(cq)) {
		ret = PTR_ERR(cq);
		goto err;
	}

	cq->device        = file->device->ib_dev;
	cq->uobject       = &uobj->uobject;
	cq->comp_handler  = ib_uverbs_comp_handler;
	cq->event_handler = ib_uverbs_cq_event_handler;
	cq->cq_context    = ev_file;
	atomic_set(&cq->usecnt, 0);

	mutex_lock(&ib_uverbs_idr_mutex);

retry:
	if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) {
		ret = -ENOMEM;
		goto err_up;
	}

	ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id);

	if (ret == -EAGAIN)
		goto retry;
	if (ret)
		goto err_up;

	memset(&resp, 0, sizeof resp);
	resp.cq_handle = uobj->uobject.id;
	resp.cqe       = cq->cqe;

	if (copy_to_user((void __user *) (unsigned long) cmd.response,
			 &resp, sizeof resp)) {
		ret = -EFAULT;
		goto err_idr;
	}

	mutex_lock(&file->mutex);
	list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
	mutex_unlock(&file->mutex);

	mutex_unlock(&ib_uverbs_idr_mutex);

	return in_len;

err_idr:
	idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);

err_up:
	mutex_unlock(&ib_uverbs_idr_mutex);
	ib_destroy_cq(cq);

err:
	if (ev_file)
		ib_uverbs_release_ucq(file, ev_file, uobj);
	kfree(uobj);
	return ret;
}
ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
			    const char __user *buf, int in_len,
			    int out_len)
{
	struct ib_uverbs_create_ah	 cmd;
	struct ib_uverbs_create_ah_resp	 resp;
	struct ib_uobject		*uobj;
	struct ib_pd			*pd;
	struct ib_ah			*ah;
	struct ib_ah_attr		attr;
	int ret;

	if (out_len < sizeof resp)
		return -ENOSPC;

	if (copy_from_user(&cmd, buf, sizeof cmd))
		return -EFAULT;

	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
	if (!uobj)
		return -ENOMEM;

	mutex_lock(&ib_uverbs_idr_mutex);

	pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
	if (!pd || pd->uobject->context != file->ucontext) {
		ret = -EINVAL;
		goto err_up;
	}

	uobj->user_handle = cmd.user_handle;
	uobj->context     = file->ucontext;

	attr.dlid 	       = cmd.attr.dlid;
	attr.sl 	       = cmd.attr.sl;
	attr.src_path_bits     = cmd.attr.src_path_bits;
	attr.static_rate       = cmd.attr.static_rate;
	attr.ah_flags          = cmd.attr.is_global ? IB_AH_GRH : 0;
	attr.port_num 	       = cmd.attr.port_num;
	attr.grh.flow_label    = cmd.attr.grh.flow_label;
	attr.grh.sgid_index    = cmd.attr.grh.sgid_index;
	attr.grh.hop_limit     = cmd.attr.grh.hop_limit;
	attr.grh.traffic_class = cmd.attr.grh.traffic_class;
	memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);

	ah = ib_create_ah(pd, &attr);
	if (IS_ERR(ah)) {
		ret = PTR_ERR(ah);
		goto err_up;
	}

	ah->uobject = uobj;

retry:
	if (!idr_pre_get(&ib_uverbs_ah_idr, GFP_KERNEL)) {
		ret = -ENOMEM;
		goto err_destroy;
	}

	ret = idr_get_new(&ib_uverbs_ah_idr, ah, &uobj->id);

	if (ret == -EAGAIN)
		goto retry;
	if (ret)
		goto err_destroy;

	resp.ah_handle = uobj->id;

	if (copy_to_user((void __user *) (unsigned long) cmd.response,
			 &resp, sizeof resp)) {
		ret = -EFAULT;
		goto err_idr;
	}

	mutex_lock(&file->mutex);
	list_add_tail(&uobj->list, &file->ucontext->ah_list);
	mutex_unlock(&file->mutex);

	mutex_unlock(&ib_uverbs_idr_mutex);

	return in_len;

err_idr:
	idr_remove(&ib_uverbs_ah_idr, uobj->id);

err_destroy:
	ib_destroy_ah(ah);

err_up:
	mutex_unlock(&ib_uverbs_idr_mutex);

	kfree(uobj);
	return ret;
}
ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
			   const char __user *buf,
			   int in_len, int out_len)
{
	struct ib_uverbs_alloc_pd      cmd;
	struct ib_uverbs_alloc_pd_resp resp;
	struct ib_udata                udata;
	struct ib_uobject             *uobj;
	struct ib_pd                  *pd;
	int                            ret;

	if (out_len < sizeof resp)
		return -ENOSPC;

	if (copy_from_user(&cmd, buf, sizeof cmd))
		return -EFAULT;

	INIT_UDATA(&udata, buf + sizeof cmd,
		   (unsigned long) cmd.response + sizeof resp,
		   in_len - sizeof cmd, out_len - sizeof resp);

	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
	if (!uobj)
		return -ENOMEM;

	uobj->context = file->ucontext;

	pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
					    file->ucontext, &udata);
	if (IS_ERR(pd)) {
		ret = PTR_ERR(pd);
		goto err;
	}

	pd->device  = file->device->ib_dev;
	pd->uobject = uobj;
	atomic_set(&pd->usecnt, 0);

	mutex_lock(&ib_uverbs_idr_mutex);

retry:
	if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) {
		ret = -ENOMEM;
		goto err_up;
	}

	ret = idr_get_new(&ib_uverbs_pd_idr, pd, &uobj->id);

	if (ret == -EAGAIN)
		goto retry;
	if (ret)
		goto err_up;

	memset(&resp, 0, sizeof resp);
	resp.pd_handle = uobj->id;

	if (copy_to_user((void __user *) (unsigned long) cmd.response,
			 &resp, sizeof resp)) {
		ret = -EFAULT;
		goto err_idr;
	}

	mutex_lock(&file->mutex);
	list_add_tail(&uobj->list, &file->ucontext->pd_list);
	mutex_unlock(&file->mutex);

	mutex_unlock(&ib_uverbs_idr_mutex);

	return in_len;

err_idr:
	idr_remove(&ib_uverbs_pd_idr, uobj->id);

err_up:
	mutex_unlock(&ib_uverbs_idr_mutex);
	ib_dealloc_pd(pd);

err:
	kfree(uobj);
	return ret;
}
Ejemplo n.º 12
0
static int bq27425_battery_probe(struct i2c_client *client,
                                 const struct i2c_device_id *id)
{
    char *name;
    int num;
    int retval = 0;
    struct bq275xx_device_info *di;
    struct bq275xx_access_methods *bus;
    struct device *bq27425_dev;

    /* Get new ID for the new battery device */
    retval = idr_pre_get(&battery_id, GFP_KERNEL);
    if (retval == 0)
        return -ENOMEM;
    mutex_lock(&battery_mutex);
    retval = idr_get_new(&battery_id, client, &num);
    mutex_unlock(&battery_mutex);
    if (retval < 0)
        return retval;

    name = kasprintf(GFP_KERNEL, "bq27425-%d", num);
    if (!name) {
        dev_err(&client->dev, "failed to allocate device name\n");
        retval = -ENOMEM;
        goto batt_failed_1;
    }

    di = kzalloc(sizeof(*di), GFP_KERNEL);
    if (!di) {
        dev_err(&client->dev, "failed to allocate device info data\n");
        retval = -ENOMEM;
        goto batt_failed_2;
    }
    di->id = num;

    bus = kzalloc(sizeof(*bus), GFP_KERNEL);
    if (!bus) {
        dev_err(&client->dev, "failed to allocate access method "
                "data\n");
        retval = -ENOMEM;
        goto batt_failed_3;
    }

    bq27425_gpio_init();
    i2c_set_clientdata(client, di);
    di->dev = &client->dev;
    di->bat.name = name;
    bus->read = &bq27425_read;
    di->bus = bus;
    di->client = client;
    bq_this_chip = di;

    //bq27425_first_run(di);

    /*	retval = bq275xx_battery_flags(di);
    	if((retval & 0x20) == 0x20)
    	{     //power on reset
    	       bq27425_g1_support_4350mv(di);
    	}
    */

    bq275xx_powersupply_init(di);

    retval = power_supply_register(&client->dev, &di->bat);

    if (retval) {
        dev_err(&client->dev, "failed to register battery\n");
        goto batt_failed_4;
    }

    setup_timer(&bq27425_timer, bq27425_timer_func, 0);
    mod_timer(&bq27425_timer,
              jiffies + msecs_to_jiffies(1000));
    //init workqueue
    INIT_WORK(&bq27425_work, bq27425_charger_work_func);

    retval = request_irq(
                 gpio_to_irq(EXYNOS4_GPX0(0)),
                 bq27425_int_handler,
                 IRQF_TRIGGER_FALLING,
                 "bq27425_int_handler", NULL);
    if (retval < 0)
    {
        printk("bq27425 request_irq(uok_irq_handler) failed due to %d !!!\n",retval);
        free_irq(gpio_to_irq(EXYNOS4_GPX0(0)),NULL);
        return -1;
    }
    enable_irq_wake(gpio_to_irq(EXYNOS4_GPX0(0)));

    bq27425_class = class_create(THIS_MODULE, "fuel_gaugle");
    if (IS_ERR((void *)bq27425_class))
        return PTR_ERR((void *)bq27425_class);
    bq27425_dev = device_create(bq27425_class, NULL,
                                MKDEV(0, 0), NULL, "bq27425");
    if (IS_ERR((void *)bq27425_dev))
        return PTR_ERR((void *)bq27425_dev);

    retval = device_create_file(bq27425_dev, &dev_attr_update);
    if (retval < 0)
        goto err_create_file_1;

    return 0;

err_create_file_1:
    device_destroy(bq27425_class, MKDEV(0, 0));
    printk(KERN_ERR "switch: Failed to register driver %s\n", "bq27425");
batt_failed_4:
    kfree(bus);
batt_failed_3:
    kfree(di);
batt_failed_2:
    kfree(name);
batt_failed_1:
    mutex_lock(&battery_mutex);
    idr_remove(&battery_id, num);
    mutex_unlock(&battery_mutex);

    return retval;
}
static int bq27x00_battery_probe(struct i2c_client *client,
				 const struct i2c_device_id *id)
{
	char *name;
	struct bq27x00_device_info *di;
	int num, val;
	int retval = 0;
	struct gpio_edge_desc *c;

	/* Get new ID for the new battery device */
	retval = idr_pre_get(&battery_id, GFP_KERNEL);
	if (retval == 0)
		return -ENOMEM;
	mutex_lock(&battery_mutex);
	retval = idr_get_new(&battery_id, client, &num);
	mutex_unlock(&battery_mutex);
	if (retval < 0)
		return retval;

	name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
	if (!name) {
		dev_err(&client->dev, "failed to allocate device name\n");
		retval = -ENOMEM;
		goto batt_failed_1;
	}

	di = kzalloc(sizeof(*di), GFP_KERNEL);
	if (!di) {
		dev_err(&client->dev, "failed to allocate device info data\n");
		retval = -ENOMEM;
		goto batt_failed_2;
	}

	di->id = num;
	di->dev = &client->dev;
	di->chip = id->driver_data;
	di->bat.name = name;
	di->bus.read = &bq27x00_read_i2c;
	di->bus.write = &bq27x00_write_i2c;
	di->irq = client->irq;

	i2c_set_clientdata(client, di);

	val = 0x2;
	retval = bq27x00_write(di, BQ27425_REG_CNTL, &val, false);
	if (retval < 0) {
		dev_dbg(di->dev, "probe failed!\n");
		goto batt_failed_3;
	}

	retval = bq27x00_powersupply_init(di);
	if (retval)
		goto batt_failed_3;

	if (di->chip == BQ27425) {
		val = 0xc;
		bq27x00_write(di, BQ27425_REG_CNTL, &val, false);
		dev_info(di->dev, "battery inserted!\n");

		val = bq27x00_read(di, BQ27x00_REG_FLAGS, false);
		dev_dbg(di->dev, "flags-0: 0x%x\n", val);
	}

	retval = request_threaded_irq(di->irq, NULL, bq27425_irq_handler,
				      IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
				      "bq27425 battery", di);
	if (retval < 0)
		goto batt_failed_3;

	/* initialize the gpio wakeup source */
#ifdef CONFIG_CPU_PXA988
	c = kmalloc(sizeof(struct gpio_edge_desc), GFP_KERNEL);
	/* FIXME: hardcode here*/
	c->mfp = di->irq - IRQ_GPIO_START;
	c->handler = NULL;
	di->gpio_wakeup = c;

	device_init_wakeup(di->dev, 1);
#endif

	return 0;

batt_failed_3:
	kfree(di);
batt_failed_2:
	kfree(name);
batt_failed_1:
	mutex_lock(&battery_mutex);
	idr_remove(&battery_id, num);
	mutex_unlock(&battery_mutex);

	return retval;
}
Ejemplo n.º 14
0
asmlinkage long
sys_timer_create(const clockid_t which_clock,
		 struct sigevent __user *timer_event_spec,
		 timer_t __user * created_timer_id)
{
	int error = 0;
	struct k_itimer *new_timer = NULL;
	int new_timer_id;
	struct task_struct *process = NULL;
	unsigned long flags;
	sigevent_t event;
	int it_id_set = IT_ID_NOT_SET;

	if (invalid_clockid(which_clock))
		return -EINVAL;

	new_timer = alloc_posix_timer();
	if (unlikely(!new_timer))
		return -EAGAIN;

	spin_lock_init(&new_timer->it_lock);
 retry:
	if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
		error = -EAGAIN;
		goto out;
	}
	spin_lock_irq(&idr_lock);
	error = idr_get_new(&posix_timers_id, (void *) new_timer,
			    &new_timer_id);
	spin_unlock_irq(&idr_lock);
	if (error == -EAGAIN)
		goto retry;
	else if (error) {
		/*
		 * Wierd looking, but we return EAGAIN if the IDR is
		 * full (proper POSIX return value for this)
		 */
		error = -EAGAIN;
		goto out;
	}

	it_id_set = IT_ID_SET;
	new_timer->it_id = (timer_t) new_timer_id;
	new_timer->it_clock = which_clock;
	new_timer->it_overrun = -1;
	error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
	if (error)
		goto out;

	/*
	 * return the timer_id now.  The next step is hard to
	 * back out if there is an error.
	 */
	if (copy_to_user(created_timer_id,
			 &new_timer_id, sizeof (new_timer_id))) {
		error = -EFAULT;
		goto out;
	}
	if (timer_event_spec) {
		if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
			error = -EFAULT;
			goto out;
		}
		new_timer->it_sigev_notify = event.sigev_notify;
		new_timer->it_sigev_signo = event.sigev_signo;
		new_timer->it_sigev_value = event.sigev_value;

		read_lock(&tasklist_lock);
		if ((process = good_sigevent(&event))) {
			/*
			 * We may be setting up this process for another
			 * thread.  It may be exiting.  To catch this
			 * case the we check the PF_EXITING flag.  If
			 * the flag is not set, the siglock will catch
			 * him before it is too late (in exit_itimers).
			 *
			 * The exec case is a bit more invloved but easy
			 * to code.  If the process is in our thread
			 * group (and it must be or we would not allow
			 * it here) and is doing an exec, it will cause
			 * us to be killed.  In this case it will wait
			 * for us to die which means we can finish this
			 * linkage with our last gasp. I.e. no code :)
			 */
			spin_lock_irqsave(&process->sighand->siglock, flags);
			if (!(process->flags & PF_EXITING)) {
				new_timer->it_process = process;
				list_add(&new_timer->list,
					 &process->signal->posix_timers);
				spin_unlock_irqrestore(&process->sighand->siglock, flags);
				if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
					get_task_struct(process);
			} else {
				spin_unlock_irqrestore(&process->sighand->siglock, flags);
				process = NULL;
			}
		}
		read_unlock(&tasklist_lock);
		if (!process) {
			error = -EINVAL;
			goto out;
		}
	} else {
		new_timer->it_sigev_notify = SIGEV_SIGNAL;
		new_timer->it_sigev_signo = SIGALRM;
		new_timer->it_sigev_value.sival_int = new_timer->it_id;
		process = current->group_leader;
		spin_lock_irqsave(&process->sighand->siglock, flags);
		new_timer->it_process = process;
		list_add(&new_timer->list, &process->signal->posix_timers);
		spin_unlock_irqrestore(&process->sighand->siglock, flags);
	}

 	/*
	 * In the case of the timer belonging to another task, after
	 * the task is unlocked, the timer is owned by the other task
	 * and may cease to exist at any time.  Don't use or modify
	 * new_timer after the unlock call.
	 */

out:
	if (error)
		release_posix_timer(new_timer, it_id_set);

	return error;
}
Ejemplo n.º 15
0
int pps_register_cdev(struct pps_device *pps)
{
	int err;
	dev_t devt;

	mutex_lock(&pps_idr_lock);
	/* Get new ID for the new PPS source */
	if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
		mutex_unlock(&pps_idr_lock);
		return -ENOMEM;
	}

	/* Now really allocate the PPS source.
	 * After idr_get_new() calling the new source will be freely available
	 * into the kernel.
	 */
	err = idr_get_new(&pps_idr, pps, &pps->id);
	mutex_unlock(&pps_idr_lock);

	if (err < 0)
		return err;

	pps->id &= MAX_ID_MASK;
	if (pps->id >= PPS_MAX_SOURCES) {
		pr_err("%s: too many PPS sources in the system\n",
					pps->info.name);
		err = -EBUSY;
		goto free_idr;
	}

	devt = MKDEV(MAJOR(pps_devt), pps->id);

	cdev_init(&pps->cdev, &pps_cdev_fops);
	pps->cdev.owner = pps->info.owner;

	err = cdev_add(&pps->cdev, devt, 1);
	if (err) {
		pr_err("%s: failed to add char device %d:%d\n",
				pps->info.name, MAJOR(pps_devt), pps->id);
		goto free_idr;
	}
	pps->dev = device_create(pps_class, pps->info.dev, devt, pps,
							"pps%d", pps->id);
	if (IS_ERR(pps->dev)) {
		err = PTR_ERR(pps->dev);
		goto del_cdev;
	}

	pps->dev->release = pps_device_destruct;

	pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
			MAJOR(pps_devt), pps->id);

	return 0;

del_cdev:
	cdev_del(&pps->cdev);

free_idr:
	mutex_lock(&pps_idr_lock);
	idr_remove(&pps_idr, pps->id);
	mutex_unlock(&pps_idr_lock);

	return err;
}
ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
			    const char __user *buf, int in_len,
			    int out_len)
{
	struct ib_uverbs_create_qp      cmd;
	struct ib_uverbs_create_qp_resp resp;
	struct ib_udata                 udata;
	struct ib_uqp_object           *uobj;
	struct ib_pd                   *pd;
	struct ib_cq                   *scq, *rcq;
	struct ib_srq                  *srq;
	struct ib_qp                   *qp;
	struct ib_qp_init_attr          attr;
	int ret;

	if (out_len < sizeof resp)
		return -ENOSPC;

	if (copy_from_user(&cmd, buf, sizeof cmd))
		return -EFAULT;

	INIT_UDATA(&udata, buf + sizeof cmd,
		   (unsigned long) cmd.response + sizeof resp,
		   in_len - sizeof cmd, out_len - sizeof resp);

	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
	if (!uobj)
		return -ENOMEM;

	mutex_lock(&ib_uverbs_idr_mutex);

	pd  = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
	scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle);
	rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle);
	srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL;

	if (!pd  || pd->uobject->context  != file->ucontext ||
	    !scq || scq->uobject->context != file->ucontext ||
	    !rcq || rcq->uobject->context != file->ucontext ||
	    (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) {
		ret = -EINVAL;
		goto err_up;
	}

	attr.event_handler = ib_uverbs_qp_event_handler;
	attr.qp_context    = file;
	attr.send_cq       = scq;
	attr.recv_cq       = rcq;
	attr.srq           = srq;
	attr.sq_sig_type   = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
	attr.qp_type       = cmd.qp_type;

	attr.cap.max_send_wr     = cmd.max_send_wr;
	attr.cap.max_recv_wr     = cmd.max_recv_wr;
	attr.cap.max_send_sge    = cmd.max_send_sge;
	attr.cap.max_recv_sge    = cmd.max_recv_sge;
	attr.cap.max_inline_data = cmd.max_inline_data;

	uobj->uevent.uobject.user_handle = cmd.user_handle;
	uobj->uevent.uobject.context     = file->ucontext;
	uobj->uevent.events_reported     = 0;
	INIT_LIST_HEAD(&uobj->uevent.event_list);
	INIT_LIST_HEAD(&uobj->mcast_list);

	qp = pd->device->create_qp(pd, &attr, &udata);
	if (IS_ERR(qp)) {
		ret = PTR_ERR(qp);
		goto err_up;
	}

	qp->device     	  = pd->device;
	qp->pd         	  = pd;
	qp->send_cq    	  = attr.send_cq;
	qp->recv_cq    	  = attr.recv_cq;
	qp->srq	       	  = attr.srq;
	qp->uobject       = &uobj->uevent.uobject;
	qp->event_handler = attr.event_handler;
	qp->qp_context    = attr.qp_context;
	qp->qp_type	  = attr.qp_type;
	atomic_inc(&pd->usecnt);
	atomic_inc(&attr.send_cq->usecnt);
	atomic_inc(&attr.recv_cq->usecnt);
	if (attr.srq)
		atomic_inc(&attr.srq->usecnt);

	memset(&resp, 0, sizeof resp);
	resp.qpn = qp->qp_num;

retry:
	if (!idr_pre_get(&ib_uverbs_qp_idr, GFP_KERNEL)) {
		ret = -ENOMEM;
		goto err_destroy;
	}

	ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uevent.uobject.id);

	if (ret == -EAGAIN)
		goto retry;
	if (ret)
		goto err_destroy;

	resp.qp_handle       = uobj->uevent.uobject.id;
	resp.max_recv_sge    = attr.cap.max_recv_sge;
	resp.max_send_sge    = attr.cap.max_send_sge;
	resp.max_recv_wr     = attr.cap.max_recv_wr;
	resp.max_send_wr     = attr.cap.max_send_wr;
	resp.max_inline_data = attr.cap.max_inline_data;

	if (copy_to_user((void __user *) (unsigned long) cmd.response,
			 &resp, sizeof resp)) {
		ret = -EFAULT;
		goto err_idr;
	}

	mutex_lock(&file->mutex);
	list_add_tail(&uobj->uevent.uobject.list, &file->ucontext->qp_list);
	mutex_unlock(&file->mutex);

	mutex_unlock(&ib_uverbs_idr_mutex);

	return in_len;

err_idr:
	idr_remove(&ib_uverbs_qp_idr, uobj->uevent.uobject.id);

err_destroy:
	ib_destroy_qp(qp);
	atomic_dec(&pd->usecnt);
	atomic_dec(&attr.send_cq->usecnt);
	atomic_dec(&attr.recv_cq->usecnt);
	if (attr.srq)
		atomic_dec(&attr.srq->usecnt);

err_up:
	mutex_unlock(&ib_uverbs_idr_mutex);

	kfree(uobj);
	return ret;
}
Ejemplo n.º 17
0
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
		struct sigevent __user *, timer_event_spec,
		timer_t __user *, created_timer_id)
{
	struct k_itimer *new_timer;
	int error, new_timer_id;
	sigevent_t event;
	int it_id_set = IT_ID_NOT_SET;

	if (invalid_clockid(which_clock))
		return -EINVAL;

	new_timer = alloc_posix_timer();
	if (unlikely(!new_timer))
		return -EAGAIN;

	spin_lock_init(&new_timer->it_lock);
 retry:
	if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
		error = -EAGAIN;
		goto out;
	}
	spin_lock_irq(&idr_lock);
	error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
	spin_unlock_irq(&idr_lock);
	if (error) {
		if (error == -EAGAIN)
			goto retry;
		/*
		 * Weird looking, but we return EAGAIN if the IDR is
		 * full (proper POSIX return value for this)
		 */
		error = -EAGAIN;
		goto out;
	}

	it_id_set = IT_ID_SET;
	new_timer->it_id = (timer_t) new_timer_id;
	new_timer->it_clock = which_clock;
	new_timer->it_overrun = -1;
	error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
	if (error)
		goto out;

	/*
	 * return the timer_id now.  The next step is hard to
	 * back out if there is an error.
	 */
	if (copy_to_user(created_timer_id,
			 &new_timer_id, sizeof (new_timer_id))) {
		error = -EFAULT;
		goto out;
	}
	if (timer_event_spec) {
		if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
			error = -EFAULT;
			goto out;
		}
		rcu_read_lock();
		new_timer->it_pid = get_pid(good_sigevent(&event));
		rcu_read_unlock();
		if (!new_timer->it_pid) {
			error = -EINVAL;
			goto out;
		}
	} else {
		event.sigev_notify = SIGEV_SIGNAL;
		event.sigev_signo = SIGALRM;
		event.sigev_value.sival_int = new_timer->it_id;
		new_timer->it_pid = get_pid(task_tgid(current));
	}

	new_timer->it_sigev_notify     = event.sigev_notify;
	new_timer->sigq->info.si_signo = event.sigev_signo;
	new_timer->sigq->info.si_value = event.sigev_value;
	new_timer->sigq->info.si_tid   = new_timer->it_id;
	new_timer->sigq->info.si_code  = SI_TIMER;

	spin_lock_irq(&current->sighand->siglock);
	new_timer->it_signal = current->signal;
	list_add(&new_timer->list, &current->signal->posix_timers);
	spin_unlock_irq(&current->sighand->siglock);

	return 0;
 	/*
	 * In the case of the timer belonging to another task, after
	 * the task is unlocked, the timer is owned by the other task
	 * and may cease to exist at any time.  Don't use or modify
	 * new_timer after the unlock call.
	 */
out:
	release_posix_timer(new_timer, it_id_set);
	return error;
}
Ejemplo n.º 18
0
static int bq27541_battery_probe(struct i2c_client *client,
				 const struct i2c_device_id *id)
{
	char *name;
	struct bq27541_device_info *di;
	struct bq27541_access_methods *bus;
	int num;
	int retval = 0;

	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
		return -ENODEV;

	/* Get new ID for the new battery device */
	retval = idr_pre_get(&battery_id, GFP_KERNEL);
	if (retval == 0)
		return -ENOMEM;
	mutex_lock(&battery_mutex);
	retval = idr_get_new(&battery_id, client, &num);
	mutex_unlock(&battery_mutex);
	if (retval < 0)
		return retval;

	name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
	if (!name) {
		dev_err(&client->dev, "failed to allocate device name\n");
		retval = -ENOMEM;
		goto batt_failed_1;
	}

	di = kzalloc(sizeof(*di), GFP_KERNEL);
	if (!di) {
		dev_err(&client->dev, "failed to allocate device info data\n");
		retval = -ENOMEM;
		goto batt_failed_2;
	}
	di->id = num;

	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
	if (!bus) {
		dev_err(&client->dev, "failed to allocate access method "
					"data\n");
		retval = -ENOMEM;
		goto batt_failed_3;
	}

	i2c_set_clientdata(client, di);
	di->dev = &client->dev;
	bus->read = &bq27541_read_i2c;
	di->bus = bus;
	di->client = client;

#ifdef CONFIG_BQ27541_TEST_ENABLE
	platform_set_drvdata(&this_device, di);
	retval = platform_device_register(&this_device);
	if (!retval) {
		retval = sysfs_create_group(&this_device.dev.kobj,
			 &fs_attr_group);
		if (retval)
			goto batt_failed_4;
	} else
		goto batt_failed_4;
#endif

	if (retval) {
		dev_err(&client->dev, "failed to setup bq27541\n");
		goto batt_failed_4;
	}

	if (retval) {
		dev_err(&client->dev, "failed to powerup bq27541\n");
		goto batt_failed_4;
	}

	spin_lock_init(&lock);

	bq27541_di = di;
	INIT_WORK(&di->counter, bq27541_coulomb_counter_work);
	INIT_DELAYED_WORK(&di->hw_config, bq27541_hw_config);
	schedule_delayed_work(&di->hw_config, BQ27541_INIT_DELAY);
	return 0;

batt_failed_4:
	kfree(bus);
batt_failed_3:
	kfree(di);
batt_failed_2:
	kfree(name);
batt_failed_1:
	mutex_lock(&battery_mutex);
	idr_remove(&battery_id, num);
	mutex_unlock(&battery_mutex);

	return retval;
}
Ejemplo n.º 19
0
struct ib_qp *ehca_create_qp(struct ib_pd *pd,
                             struct ib_qp_init_attr *init_attr,
                             struct ib_udata *udata)
{
    static int da_rc_msg_size[]= { 128, 256, 512, 1024, 2048, 4096 };
    static int da_ud_sq_msg_size[]= { 128, 384, 896, 1920, 3968 };
    struct ehca_qp *my_qp;
    struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
    struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
                                          ib_device);
    struct ib_ucontext *context = NULL;
    u64 h_ret;
    int max_send_sge, max_recv_sge, ret;

    /* h_call's out parameters */
    struct ehca_alloc_qp_parms parms;
    u32 swqe_size = 0, rwqe_size = 0;
    u8 daqp_completion, isdaqp;
    unsigned long flags;

    if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
            init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
        ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
                 init_attr->sq_sig_type);
        return ERR_PTR(-EINVAL);
    }

    /* save daqp completion bits */
    daqp_completion = init_attr->qp_type & 0x60;
    /* save daqp bit */
    isdaqp = (init_attr->qp_type & 0x80) ? 1 : 0;
    init_attr->qp_type = init_attr->qp_type & 0x1F;

    if (init_attr->qp_type != IB_QPT_UD &&
            init_attr->qp_type != IB_QPT_SMI &&
            init_attr->qp_type != IB_QPT_GSI &&
            init_attr->qp_type != IB_QPT_UC &&
            init_attr->qp_type != IB_QPT_RC) {
        ehca_err(pd->device, "wrong QP Type=%x", init_attr->qp_type);
        return ERR_PTR(-EINVAL);
    }
    if ((init_attr->qp_type != IB_QPT_RC && init_attr->qp_type != IB_QPT_UD)
            && isdaqp) {
        ehca_err(pd->device, "unsupported LL QP Type=%x",
                 init_attr->qp_type);
        return ERR_PTR(-EINVAL);
    } else if (init_attr->qp_type == IB_QPT_RC && isdaqp &&
               (init_attr->cap.max_send_wr > 255 ||
                init_attr->cap.max_recv_wr > 255 )) {
        ehca_err(pd->device, "Invalid Number of max_sq_wr =%x "
                 "or max_rq_wr=%x for QP Type=%x",
                 init_attr->cap.max_send_wr,
                 init_attr->cap.max_recv_wr,init_attr->qp_type);
        return ERR_PTR(-EINVAL);
    } else if (init_attr->qp_type == IB_QPT_UD && isdaqp &&
               init_attr->cap.max_send_wr > 255) {
        ehca_err(pd->device,
                 "Invalid Number of max_send_wr=%x for UD QP_TYPE=%x",
                 init_attr->cap.max_send_wr, init_attr->qp_type);
        return ERR_PTR(-EINVAL);
    }

    if (pd->uobject && udata)
        context = pd->uobject->context;

    my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
    if (!my_qp) {
        ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
        return ERR_PTR(-ENOMEM);
    }

    memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms));
    spin_lock_init(&my_qp->spinlock_s);
    spin_lock_init(&my_qp->spinlock_r);

    my_qp->recv_cq =
        container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
    my_qp->send_cq =
        container_of(init_attr->send_cq, struct ehca_cq, ib_cq);

    my_qp->init_attr = *init_attr;

    do {
        if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
            ret = -ENOMEM;
            ehca_err(pd->device, "Can't reserve idr resources.");
            goto create_qp_exit0;
        }

        spin_lock_irqsave(&ehca_qp_idr_lock, flags);
        ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
        spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);

    } while (ret == -EAGAIN);

    if (ret) {
        ret = -ENOMEM;
        ehca_err(pd->device, "Can't allocate new idr entry.");
        goto create_qp_exit0;
    }

    parms.servicetype = ibqptype2servicetype(init_attr->qp_type);
    if (parms.servicetype < 0) {
        ret = -EINVAL;
        ehca_err(pd->device, "Invalid qp_type=%x", init_attr->qp_type);
        goto create_qp_exit0;
    }

    if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
        parms.sigtype = HCALL_SIGT_EVERY;
    else
        parms.sigtype = HCALL_SIGT_BY_WQE;

    /* UD_AV CIRCUMVENTION */
    max_send_sge = init_attr->cap.max_send_sge;
    max_recv_sge = init_attr->cap.max_recv_sge;
    if (IB_QPT_UD == init_attr->qp_type ||
            IB_QPT_GSI == init_attr->qp_type ||
            IB_QPT_SMI == init_attr->qp_type) {
        max_send_sge += 2;
        max_recv_sge += 2;
    }

    parms.ipz_eq_handle = shca->eq.ipz_eq_handle;
    parms.daqp_ctrl = isdaqp | daqp_completion;
    parms.pd = my_pd->fw_pd;
    parms.max_recv_sge = max_recv_sge;
    parms.max_send_sge = max_send_sge;

    h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, my_qp, &parms);

    if (h_ret != H_SUCCESS) {
        ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lx",
                 h_ret);
        ret = ehca2ib_return_code(h_ret);
        goto create_qp_exit1;
    }

    switch (init_attr->qp_type) {
    case IB_QPT_RC:
        if (isdaqp == 0) {
            swqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
                                     (parms.act_nr_send_sges)]);
            rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
                                     (parms.act_nr_recv_sges)]);
        } else { /* for daqp we need to use msg size, not wqe size */
Ejemplo n.º 20
0
int ambsync_proc_open(struct inode *inode, struct file *file)
{
	int				retval = 0;
	struct ambsync_proc_pinfo	*pinfo = file->private_data;
	struct proc_dir_entry		*dp;
	struct ambsync_proc_hinfo	*hinfo;
	int				id;

	dp = PDE(inode);
	hinfo = (struct ambsync_proc_hinfo *)dp->data;
	if (!hinfo) {
		retval = -EPERM;
		goto ambsync_proc_open_exit;
	}
	if (hinfo->maxid > AMBA_SYNC_PROC_MAX_ID) {
		retval = -EPERM;
		goto ambsync_proc_open_exit;
	}

	if (pinfo) {
		retval = -EPERM;
		goto ambsync_proc_open_exit;
	}
	pinfo = kmalloc(sizeof(*pinfo), GFP_KERNEL);
	if (!pinfo) {
		retval = -ENOMEM;
		goto ambsync_proc_open_exit;
	}
	memset(pinfo, 0, sizeof(*pinfo));

	if (idr_pre_get(&hinfo->sync_proc_idr, GFP_KERNEL) == 0) {
		retval = -ENOMEM;
		goto ambsync_proc_open_kfree_p;
	}
	mutex_lock(&hinfo->sync_proc_lock);
	retval = idr_get_new_above(&hinfo->sync_proc_idr, pinfo, 0, &id);
	mutex_unlock(&hinfo->sync_proc_lock);
	if (retval != 0)
		goto ambsync_proc_open_kfree_p;
	if (id > 31) {
		retval = -ENOMEM;
		goto ambsync_proc_open_remove_id;
	}

	if (!(pinfo->page = (char*) __get_free_page(GFP_KERNEL))) {
		retval = -ENOMEM;
		goto ambsync_proc_open_remove_id;
	}
	pinfo->id = id;
	pinfo->mask = (0x01 << id);

	file->private_data = pinfo;
	file->f_version = 0;
	file->f_mode &= ~FMODE_PWRITE;

	goto ambsync_proc_open_exit;

ambsync_proc_open_remove_id:
	mutex_lock(&hinfo->sync_proc_lock);
	idr_remove(&hinfo->sync_proc_idr, id);
	mutex_unlock(&hinfo->sync_proc_lock);

ambsync_proc_open_kfree_p:
	kfree(pinfo);

ambsync_proc_open_exit:
	return retval;
}