int init_display_pm(struct display_driver *dispdrv)
{
	init_display_pm_status(dispdrv);

	spin_lock_init(&dispdrv->pm_status.slock);
	mutex_init(&dispdrv->pm_status.pm_lock);
	mutex_init(&dispdrv->pm_status.clk_lock);
#ifdef CONFIG_FB_HIBERNATION_DISPLAY
	set_default_hibernation_mode(dispdrv);
#else
	dispdrv->pm_status.clock_gating_on = false;
	dispdrv->pm_status.power_gating_on = false;
	dispdrv->pm_status.hotplug_gating_on = false;
#endif

	init_kthread_worker(&dispdrv->pm_status.control_clock_gating);

	dispdrv->pm_status.control_clock_gating_thread = kthread_run(kthread_worker_fn,
			&dispdrv->pm_status.control_clock_gating,
			"decon_clk_thread");
	if (IS_ERR(dispdrv->pm_status.control_clock_gating_thread)) {
		int err = PTR_ERR(dispdrv->pm_status.control_clock_gating_thread);
		dispdrv->pm_status.control_clock_gating_thread = NULL;

		pr_err("failed to run control_clock_gating_thread\n");
		return err;
	}
	init_kthread_work(&dispdrv->pm_status.control_clock_gating_work,
		decon_clock_gating_handler);

	init_kthread_worker(&dispdrv->pm_status.control_power_gating);

	dispdrv->pm_status.control_power_gating_thread = kthread_run(kthread_worker_fn,
			&dispdrv->pm_status.control_power_gating,
			"decon_power_thread");
	if (IS_ERR(dispdrv->pm_status.control_power_gating_thread)) {
		int err = PTR_ERR(dispdrv->pm_status.control_power_gating_thread);
		dispdrv->pm_status.control_power_gating_thread = NULL;

		pr_err("failed to run control_power_gating_thread\n");
		return err;
	}
	init_kthread_work(&dispdrv->pm_status.control_power_gating_work,
		decon_power_gating_handler);

#ifdef CONFIG_FB_HIBERNATION_DISPLAY
	dispdrv->pm_status.ops = &display_block_ops;
	dispdrv->decon_driver.ops = &decon_pm_ops;
	dispdrv->dsi_driver.ops = &dsi_pm_ops;
#ifdef CONFIG_DECON_MIC
	dispdrv->mic_driver.ops = &mic_pm_ops;
#endif
#endif
	return 0;
}
Ejemplo n.º 2
0
static struct mcuio_soft_hc *__setup_shc(const struct mcuio_soft_hc_ops *ops,
					 void *priv)
{
	struct mcuio_soft_hc *shc = kzalloc(sizeof(*shc), GFP_KERNEL);
	if (!shc)
		return ERR_PTR(-ENOMEM);
	init_kthread_worker(&shc->irq_kworker);
	shc->irq_kworker_task = kthread_run(kthread_worker_fn,
					    &shc->irq_kworker,
					    "shc_irq");
	if (IS_ERR(shc->irq_kworker_task)) {
		pr_err("failed to create irq tsk for shc\n");
		return ERR_PTR(PTR_ERR(shc->irq_kworker_task));
	}
	init_kthread_work(&shc->do_irq, __do_irq);
	shc->ops = ops;
	shc->priv = priv;
	shc->rx_circ_buf.head = shc->rx_circ_buf.tail = 0;
	shc->rx_circ_buf.buf = shc->rx_buf;
	shc->chip.name = "MCUIO-SHC";
	shc->chip.irq_mask = mcuio_soft_hc_irq_mask;
	shc->chip.irq_unmask = mcuio_soft_hc_irq_unmask;
	shc->irqno = irq_alloc_desc(0);
	irq_set_chip(shc->irqno, &shc->chip);
	irq_set_handler(shc->irqno, &handle_simple_irq);
	irq_modify_status(shc->irqno,
			  IRQ_NOREQUEST | IRQ_NOAUTOEN,
			  IRQ_NOPROBE);
	return shc;
}
Ejemplo n.º 3
0
static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
		     struct mapped_device *md)
{
	tio->md = md;
	tio->ti = NULL;
	tio->clone = NULL;
	tio->orig = rq;
	tio->error = 0;
	/*
	 * Avoid initializing info for blk-mq; it passes
	 * target-specific data through info.ptr
	 * (see: dm_mq_init_request)
	 */
	if (!md->init_tio_pdu)
		memset(&tio->info, 0, sizeof(tio->info));
	if (md->kworker_task)
		init_kthread_work(&tio->work, map_tio_request);
}
Ejemplo n.º 4
0
static int __devinit bq2419x_probe(struct i2c_client *client,
				const struct i2c_device_id *id)
{
	struct bq2419x_chip *bq2419x;
	struct bq2419x_platform_data *pdata;
	int ret = 0;

	pdata = client->dev.platform_data;
	if (!pdata) {
		dev_err(&client->dev, "No Platform data");
		return -EINVAL;
	}

	bq2419x = devm_kzalloc(&client->dev, sizeof(*bq2419x), GFP_KERNEL);
	if (!bq2419x) {
		dev_err(&client->dev, "Memory allocation failed\n");
		return -ENOMEM;
	}

	bq2419x->regmap = devm_regmap_init_i2c(client, &bq2419x_regmap_config);
	if (IS_ERR(bq2419x->regmap)) {
		ret = PTR_ERR(bq2419x->regmap);
		dev_err(&client->dev, "regmap init failed with err %d\n", ret);
		return ret;
	}

	bq2419x->dev = &client->dev;

	if (pdata->bcharger_pdata) {
		bq2419x->update_status	= pdata->bcharger_pdata->update_status;
		bq2419x->rtc_alarm_time	= pdata->bcharger_pdata->rtc_alarm_time;
		bq2419x->wdt_time_sec	= pdata->bcharger_pdata->wdt_timeout;
		bq2419x->chg_restart_time =
					pdata->bcharger_pdata->chg_restart_time;
		bq2419x->chg_enable	= true;
	}

	bq2419x->wdt_refresh_timeout = 25;
	i2c_set_clientdata(client, bq2419x);
	bq2419x->irq = client->irq;

	if (bq2419x->rtc_alarm_time)
		bq2419x->rtc = alarmtimer_get_rtcdev();

	mutex_init(&bq2419x->mutex);
	bq2419x->suspended = 0;
	bq2419x->chg_restart_timeout = 0;

	ret = bq2419x_show_chip_version(bq2419x);
	if (ret < 0) {
		dev_err(&client->dev, "version read failed %d\n", ret);
		return ret;
	}

	ret = bq2419x_charger_init(bq2419x);
	if (ret < 0) {
		dev_err(bq2419x->dev, "Charger init failed: %d\n", ret);
		return ret;
	}

	ret = bq2419x_init_charger_regulator(bq2419x, pdata);
	if (ret < 0) {
		dev_err(&client->dev,
			"Charger regualtor init failed %d\n", ret);
		return ret;
	}

	ret = bq2419x_init_vbus_regulator(bq2419x, pdata);
	if (ret < 0) {
		dev_err(&client->dev,
			"VBUS regualtor init failed %d\n", ret);
		goto scrub_chg_reg;
	}

	init_kthread_worker(&bq2419x->bq_kworker);
	bq2419x->bq_kworker_task = kthread_run(kthread_worker_fn,
				&bq2419x->bq_kworker,
				dev_name(bq2419x->dev));
	if (IS_ERR(bq2419x->bq_kworker_task)) {
		ret = PTR_ERR(bq2419x->bq_kworker_task);
		dev_err(&client->dev, "Kworker task creation failed %d\n", ret);
		goto scrub_vbus_reg;
	}

	init_kthread_work(&bq2419x->bq_wdt_work, bq2419x_work_thread);
	sched_setscheduler(bq2419x->bq_kworker_task,
			SCHED_FIFO, &bq2419x_param);
	queue_kthread_work(&bq2419x->bq_kworker, &bq2419x->bq_wdt_work);

	ret = bq2419x_watchdog_init(bq2419x, bq2419x->wdt_time_sec, "PROBE");
	if (ret < 0) {
		dev_err(bq2419x->dev, "BQWDT init failed %d\n", ret);
		goto scrub_kthread;
	}

	ret = bq2419x_fault_clear_sts(bq2419x);
	if (ret < 0) {
		dev_err(bq2419x->dev, "fault clear status failed %d\n", ret);
		goto scrub_kthread;
	}

	ret = request_threaded_irq(bq2419x->irq, NULL,
		bq2419x_irq, IRQF_TRIGGER_FALLING,
			dev_name(bq2419x->dev), bq2419x);
	if (ret < 0) {
		dev_err(bq2419x->dev, "request IRQ %d fail, err = %d\n",
				bq2419x->irq, ret);
		goto scrub_kthread;
	}

	/* enable charging */
	ret = bq2419x_charger_enable(bq2419x);
	if (ret < 0)
		goto scrub_irq;

	return 0;
scrub_irq:
	free_irq(bq2419x->irq, bq2419x);
scrub_kthread:
	bq2419x->stop_thread = true;
	flush_kthread_worker(&bq2419x->bq_kworker);
	kthread_stop(bq2419x->bq_kworker_task);
scrub_vbus_reg:
	regulator_unregister(bq2419x->vbus_rdev);
scrub_chg_reg:
	regulator_unregister(bq2419x->chg_rdev);
	mutex_destroy(&bq2419x->mutex);
	return ret;
}
Ejemplo n.º 5
0
Archivo: cq.c Proyecto: AK101111/linux
/**
 * rvt_create_cq - create a completion queue
 * @ibdev: the device this completion queue is attached to
 * @attr: creation attributes
 * @context: unused by the QLogic_IB driver
 * @udata: user data for libibverbs.so
 *
 * Called by ib_create_cq() in the generic verbs code.
 *
 * Return: pointer to the completion queue or negative errno values
 * for failure.
 */
struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
			    const struct ib_cq_init_attr *attr,
			    struct ib_ucontext *context,
			    struct ib_udata *udata)
{
	struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
	struct rvt_cq *cq;
	struct rvt_cq_wc *wc;
	struct ib_cq *ret;
	u32 sz;
	unsigned int entries = attr->cqe;

	if (attr->flags)
		return ERR_PTR(-EINVAL);

	if (entries < 1 || entries > rdi->dparms.props.max_cqe)
		return ERR_PTR(-EINVAL);

	/* Allocate the completion queue structure. */
	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
	if (!cq)
		return ERR_PTR(-ENOMEM);

	/*
	 * Allocate the completion queue entries and head/tail pointers.
	 * This is allocated separately so that it can be resized and
	 * also mapped into user space.
	 * We need to use vmalloc() in order to support mmap and large
	 * numbers of entries.
	 */
	sz = sizeof(*wc);
	if (udata && udata->outlen >= sizeof(__u64))
		sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
	else
		sz += sizeof(struct ib_wc) * (entries + 1);
	wc = vmalloc_user(sz);
	if (!wc) {
		ret = ERR_PTR(-ENOMEM);
		goto bail_cq;
	}

	/*
	 * Return the address of the WC as the offset to mmap.
	 * See rvt_mmap() for details.
	 */
	if (udata && udata->outlen >= sizeof(__u64)) {
		int err;

		cq->ip = rvt_create_mmap_info(rdi, sz, context, wc);
		if (!cq->ip) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_wc;
		}

		err = ib_copy_to_udata(udata, &cq->ip->offset,
				       sizeof(cq->ip->offset));
		if (err) {
			ret = ERR_PTR(err);
			goto bail_ip;
		}
	}

	spin_lock(&rdi->n_cqs_lock);
	if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
		spin_unlock(&rdi->n_cqs_lock);
		ret = ERR_PTR(-ENOMEM);
		goto bail_ip;
	}

	rdi->n_cqs_allocated++;
	spin_unlock(&rdi->n_cqs_lock);

	if (cq->ip) {
		spin_lock_irq(&rdi->pending_lock);
		list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
		spin_unlock_irq(&rdi->pending_lock);
	}

	/*
	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
	 * The number of entries should be >= the number requested or return
	 * an error.
	 */
	cq->rdi = rdi;
	cq->ibcq.cqe = entries;
	cq->notify = RVT_CQ_NONE;
	spin_lock_init(&cq->lock);
	init_kthread_work(&cq->comptask, send_complete);
	cq->queue = wc;

	ret = &cq->ibcq;

	goto done;

bail_ip:
	kfree(cq->ip);
bail_wc:
	vfree(wc);
bail_cq:
	kfree(cq);
done:
	return ret;
}
int fimc_is_lib_vra_init_task(struct fimc_is_lib_vra *lib_vra)
{
	s32 ret = 0, cnt = 0;
	struct sched_param param = {.sched_priority = 0};

	if (unlikely(!lib_vra)) {
		err_lib("VRA library is NULL");
		return -EINVAL;
	}

	spin_lock_init(&lib_vra->task_vra.work_lock);
	init_kthread_worker(&lib_vra->task_vra.worker);

	lib_vra->task_vra.task = kthread_run(kthread_worker_fn,
		&lib_vra->task_vra.worker, "fimc_is_lib_vra");
	if (unlikely(!lib_vra->task_vra.task)) {
		err_lib("failed to create VRA task");
		return -ENOMEM;
	}

	ret = sched_setscheduler_nocheck(lib_vra->task_vra.task,
		SCHED_NORMAL, &param);
	if (ret) {
		err("sched_setscheduler_nocheck is fail(%d)", ret);
		return ret;
	}

	lib_vra->task_vra.work_index = 0;

	for (cnt = 0; cnt < FIMC_IS_MAX_TASK; cnt++) {
		lib_vra->task_vra.work[cnt].func = NULL;
		lib_vra->task_vra.work[cnt].params = NULL;
		init_kthread_work(&lib_vra->task_vra.work[cnt].work,
			fimc_is_lib_vra_task_work);
	}

	return 0;
}

void fimc_is_lib_vra_control_set_event(u32 event_type)
{
	int ret;
	struct fimc_is_lib_vra *lib_vra;

	if (unlikely(!g_lib_vra)) {
		err_lib("VRA library is NULL");
		return;
	}

	lib_vra = g_lib_vra;

	switch (event_type) {
	case CTRL_TASK_SET_CH0_INT:
		lib_vra->ctl_task_type = CTRL_TASK_SET_CH0_INT;
		fimc_is_lib_vra_task_trigger(lib_vra,
			fimc_is_lib_vra_invoke_contol_event);
		break;
	case CTRL_TASK_SET_CH1_INT:
		lib_vra->ctl_task_type = CTRL_TASK_SET_CH1_INT;
		fimc_is_lib_vra_task_trigger(lib_vra,
			fimc_is_lib_vra_invoke_contol_event);
		break;
	case CTRL_TASK_SET_NEWFR:
		lib_vra->ctl_task_type = CTRL_TASK_SET_NEWFR;
		ret = fimc_is_lib_vra_invoke_contol_event(lib_vra);
		if (ret) {
			err_lib("vra control set is fail(%#x)", ret);
			return;
		}
		break;
	case CTRL_TASK_SET_ABORT:
		lib_vra->ctl_task_type = CTRL_TASK_SET_ABORT;
		ret = fimc_is_lib_vra_invoke_contol_event(lib_vra);
		if (ret) {
			err_lib("vra control set is fail(%d)", ret);
			return;
		}
		break;
	case CTRL_TASK_SET_FWALGS:
		lib_vra->ctl_task_type = CTRL_TASK_SET_FWALGS;
		fimc_is_lib_vra_task_trigger(lib_vra,
			fimc_is_lib_vra_invoke_contol_event);
		break;
	default:
		err_lib("vra_control_set_event is undefine (%d)", event_type);
		break;

	}
}