コード例 #1
0
/* disp_pm_sched_power_on - it is called in the early start of the
 * fb_ioctl to exit HDM */
int disp_pm_sched_power_on(struct display_driver *dispdrv, unsigned int cmd)
{
	struct s3c_fb *sfb = dispdrv->decon_driver.sfb;

	init_gating_idle_count(dispdrv);

	if (dispdrv->platform_status < DISP_STATUS_PM1) {
		if (cmd == S3CFB_WIN_CONFIG)
			disp_pm_init_status(dispdrv);
	}

	flush_kthread_worker(&dispdrv->pm_status.control_power_gating);
	if (sfb->power_state == POWER_HIBER_DOWN) {
		switch (cmd) {
		case S3CFB_WIN_PSR_EXIT:
		case S3CFB_WIN_CONFIG:
			queue_kthread_work(&dispdrv->pm_status.control_power_gating,
				&dispdrv->pm_status.control_power_gating_work);
			break;
		default:
			return -EBUSY;
		}
	}

	return 0;
}
コード例 #2
0
/* disp_pm_sched_power_on - it is called in the early start of the
 * fb_ioctl to exit HDM */
int disp_pm_sched_power_on(struct display_driver *dispdrv, unsigned int cmd)
{
	struct s3c_fb *sfb = dispdrv->decon_driver.sfb;

	init_gating_idle_count(dispdrv);

	/* First WIN_CONFIG should be on clock and power-gating */
	if (dispdrv->platform_status < DISP_STATUS_PM1) {
		if (cmd == S3CFB_WIN_CONFIG)
			disp_pm_set_plat_status(dispdrv, true);
	}

	flush_kthread_worker(&dispdrv->pm_status.control_power_gating);
	if (sfb->power_state == POWER_HIBER_DOWN) {
		switch (cmd) {
		case S3CFB_PLATFORM_RESET:
			disp_pm_gate_lock(dispdrv, true);
			queue_kthread_work(&dispdrv->pm_status.control_power_gating,
				&dispdrv->pm_status.control_power_gating_work);
			/* Prevent next clock and power-gating */
			disp_pm_set_plat_status(dispdrv, false);
			break;
		case S3CFB_WIN_PSR_EXIT:
		case S3CFB_WIN_CONFIG:
		case S3CFB_SET_VSYNC_INT:
			request_dynamic_hotplug(false);
			disp_pm_gate_lock(dispdrv, true);
			queue_kthread_work(&dispdrv->pm_status.control_power_gating,
				&dispdrv->pm_status.control_power_gating_work);
			break;
		default:
			return -EBUSY;
		}
	} else {
		switch (cmd) {
		case S3CFB_PLATFORM_RESET:
			/* Prevent next clock and power-gating */
			disp_pm_set_plat_status(dispdrv, false);
			break;
		}
	}

	return 0;
}
コード例 #3
0
ファイル: sec_os_booster.c プロジェクト: ColinIanKing/m576
int mc_timer(void)
{
	struct timer_work t_work = {
		KTHREAD_WORK_INIT(t_work.work, mc_timer_work_func),
	};

	if (!queue_kthread_work(&mc_timer_worker, &t_work.work))
		return false;

	flush_kthread_work(&t_work.work);
	return true;
}
コード例 #4
0
/* disp_pm_te_triggered - check clock gating or not.
 * this function is called in the TE interrupt handler */
void disp_pm_te_triggered(struct display_driver *dispdrv)
{
	te_count++;

	if (!dispdrv->pm_status.clock_gating_on) return;

	spin_lock(&dispdrv->pm_status.slock);
	if (dispdrv->platform_status > DISP_STATUS_PM0 &&
		atomic_read(&dispdrv->pm_status.lock_count) == 0) {
		if (dispdrv->pm_status.clock_enabled) {
			if (!dispdrv->pm_status.trigger_masked)
				enable_mask(dispdrv);
		}

		if (dispdrv->pm_status.clock_enabled &&
			MAX_CLK_GATING_COUNT > 0) {
			if (!dispdrv->pm_status.trigger_masked) {
				enable_mask(dispdrv);
			}

			++dispdrv->pm_status.clk_idle_count;
			if (dispdrv->pm_status.clk_idle_count > MAX_CLK_GATING_COUNT) {
				disp_pm_gate_lock(dispdrv, true);
				pm_debug("display_block_clock_off +");
				queue_kthread_work(&dispdrv->pm_status.control_clock_gating,
						&dispdrv->pm_status.control_clock_gating_work);
			}
		} else {
			++dispdrv->pm_status.pwr_idle_count;
			if (dispdrv->pm_status.power_gating_on &&
				dispdrv->pm_status.pwr_idle_count > MAX_PWR_GATING_COUNT) {
				queue_kthread_work(&dispdrv->pm_status.control_power_gating,
						&dispdrv->pm_status.control_power_gating_work);
			}
		}

	}
	spin_unlock(&dispdrv->pm_status.slock);
}
コード例 #5
0
/*
 * q->request_fn for old request-based dm.
 * Called with the queue lock held.
 */
static void dm_old_request_fn(struct request_queue *q)
{
	struct mapped_device *md = q->queuedata;
	struct dm_target *ti = md->immutable_target;
	struct request *rq;
	struct dm_rq_target_io *tio;
	sector_t pos = 0;

	if (unlikely(!ti)) {
		int srcu_idx;
		struct dm_table *map = dm_get_live_table(md, &srcu_idx);

		ti = dm_table_find_target(map, pos);
		dm_put_live_table(md, srcu_idx);
	}

	/*
	 * For suspend, check blk_queue_stopped() and increment
	 * ->pending within a single queue_lock not to increment the
	 * number of in-flight I/Os after the queue is stopped in
	 * dm_suspend().
	 */
	while (!blk_queue_stopped(q)) {
		rq = blk_peek_request(q);
		if (!rq)
			return;

		/* always use block 0 to find the target for flushes for now */
		pos = 0;
		if (!(rq->cmd_flags & REQ_FLUSH))
			pos = blk_rq_pos(rq);

		if ((dm_old_request_peeked_before_merge_deadline(md) &&
		     md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
		     md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
		    (ti->type->busy && ti->type->busy(ti))) {
			blk_delay_queue(q, 10);
			return;
		}

		dm_start_request(md, rq);

		tio = tio_from_request(rq);
		/* Establish tio->ti before queuing work (map_tio_request) */
		tio->ti = ti;
		queue_kthread_work(&md->kworker, &tio->work);
		BUG_ON(!irqs_disabled());
	}
}
コード例 #6
0
ファイル: mcuio-soft-hc.c プロジェクト: artynet/linux-3.3.8
int mcuio_soft_hc_push_chars(struct mcuio_soft_hc *shc, const u8 *in, int len)
{
	int s = sizeof(shc->rx_buf), available, actual;
	struct circ_buf *buf = &shc->rx_circ_buf;
	available = CIRC_SPACE_TO_END(buf->head, buf->tail, s);
	if (available < sizeof(u32)) {
		pr_debug("%s %d\n", __func__, __LINE__);
		return -EAGAIN;
	}
	actual = min(len, available);
	memcpy(&buf->buf[buf->head], in, actual);
	buf->head = (buf->head + actual) & (s - 1);
	/* set irq status register RX_RDY bit */
	shc->irqstat |= RX_RDY;
	if (shc->irq_enabled)
		queue_kthread_work(&shc->irq_kworker, &shc->do_irq);
	return actual;
}
コード例 #7
0
/* disp_pm_te_triggered - check clock gating or not.
 * this function is called in the TE interrupt handler */
void disp_pm_te_triggered(struct display_driver *dispdrv)
{
	te_count++;

	if (!dispdrv->pm_status.power_gating_on) return;

	spin_lock(&dispdrv->pm_status.slock);
	if (dispdrv->platform_status > DISP_STATUS_PM0 &&
		atomic_read(&dispdrv->pm_status.lock_count) == 0) {
		++dispdrv->pm_status.pwr_idle_count;
		if (dispdrv->pm_status.power_gating_on &&
			dispdrv->pm_status.pwr_idle_count > MAX_PWR_GATING_COUNT) {
			disp_pm_gate_lock(dispdrv, true);
			queue_kthread_work(&dispdrv->pm_status.control_power_gating,
					&dispdrv->pm_status.control_power_gating_work);
		}

	}
	spin_unlock(&dispdrv->pm_status.slock);
}
コード例 #8
0
void fimc_is_lib_vra_task_trigger(struct fimc_is_lib_vra *lib_vra,
	void *func)
{
	u32 work_index = 0;
	struct fimc_is_lib_task *task_vra;

	if (unlikely(!lib_vra)) {
		err_lib("VRA library is NULL");
		return;
	}

	task_vra = &lib_vra->task_vra;

	spin_lock(&task_vra->work_lock);

	task_vra->work[task_vra->work_index % FIMC_IS_MAX_TASK].func = func;
	task_vra->work[task_vra->work_index % FIMC_IS_MAX_TASK].params = lib_vra;
	task_vra->work_index++;
	work_index = (task_vra->work_index - 1) % FIMC_IS_MAX_TASK;

	spin_unlock(&task_vra->work_lock);

	queue_kthread_work(&task_vra->worker, &task_vra->work[work_index].work);
}
コード例 #9
0
ファイル: skl-sst-ipc.c プロジェクト: DenisLug/mptcp
irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
{
	struct sst_dsp *dsp = context;
	struct skl_sst *skl = sst_dsp_get_thread_context(dsp);
	struct sst_generic_ipc *ipc = &skl->ipc;
	struct skl_ipc_header header = {0};
	u32 hipcie, hipct, hipcte;
	int ipc_irq = 0;

	if (dsp->intr_status & SKL_ADSPIS_CL_DMA)
		skl_cldma_process_intr(dsp);

	/* Here we handle IPC interrupts only */
	if (!(dsp->intr_status & SKL_ADSPIS_IPC))
		return IRQ_NONE;

	hipcie = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCIE);
	hipct = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCT);

	/* reply message from DSP */
	if (hipcie & SKL_ADSP_REG_HIPCIE_DONE) {
		sst_dsp_shim_update_bits(dsp, SKL_ADSP_REG_HIPCCTL,
			SKL_ADSP_REG_HIPCCTL_DONE, 0);

		/* clear DONE bit - tell DSP we have completed the operation */
		sst_dsp_shim_update_bits_forced(dsp, SKL_ADSP_REG_HIPCIE,
			SKL_ADSP_REG_HIPCIE_DONE, SKL_ADSP_REG_HIPCIE_DONE);

		ipc_irq = 1;

		/* unmask Done interrupt */
		sst_dsp_shim_update_bits(dsp, SKL_ADSP_REG_HIPCCTL,
			SKL_ADSP_REG_HIPCCTL_DONE, SKL_ADSP_REG_HIPCCTL_DONE);
	}

	/* New message from DSP */
	if (hipct & SKL_ADSP_REG_HIPCT_BUSY) {
		hipcte = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCTE);
		header.primary = hipct;
		header.extension = hipcte;
		dev_dbg(dsp->dev, "IPC irq: Firmware respond primary:%x",
						header.primary);
		dev_dbg(dsp->dev, "IPC irq: Firmware respond extension:%x",
						header.extension);

		if (IPC_GLB_NOTIFY_RSP_TYPE(header.primary)) {
			/* Handle Immediate reply from DSP Core */
			skl_ipc_process_reply(ipc, header);
		} else {
			dev_dbg(dsp->dev, "IPC irq: Notification from firmware\n");
			skl_ipc_process_notification(ipc, header);
		}
		/* clear  busy interrupt */
		sst_dsp_shim_update_bits_forced(dsp, SKL_ADSP_REG_HIPCT,
			SKL_ADSP_REG_HIPCT_BUSY, SKL_ADSP_REG_HIPCT_BUSY);
		ipc_irq = 1;
	}

	if (ipc_irq == 0)
		return IRQ_NONE;

	skl_ipc_int_enable(dsp);

	/* continue to send any remaining messages... */
	queue_kthread_work(&ipc->kworker, &ipc->kwork);

	return IRQ_HANDLED;
}
コード例 #10
0
/**
 * adf_device_post_nocopy - flip to a new set of buffers
 *
 * adf_device_post_nocopy() has the same behavior as adf_device_post(),
 * except ADF does not copy @intfs, @bufs, or @custom_data, and it does
 * not take an extra reference on the dma-bufs in @bufs.
 *
 * @intfs, @bufs, and @custom_data must point to buffers allocated by
 * kmalloc().  On success, ADF takes ownership of these buffers and the dma-bufs
 * in @bufs, and will kfree()/dma_buf_put() them when they are no longer needed.
 * On failure, adf_device_post_nocopy() does NOT take ownership of these
 * buffers or the dma-bufs, and the caller must clean them up.
 *
 * adf_device_post_nocopy() is mainly intended for implementing ADF's ioctls.
 * Clients may find the nocopy variant useful in limited cases, but most should
 * call adf_device_post() instead.
 */
struct sync_fence *adf_device_post_nocopy(struct adf_device *dev,
		struct adf_interface **intfs, size_t n_intfs,
		struct adf_buffer *bufs, size_t n_bufs,
		void *custom_data, size_t custom_data_size)
{
	struct adf_pending_post *cfg;
	struct adf_buffer_mapping *mappings;
	struct sync_fence *ret;
	size_t i;
	int err;

	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
	if (!cfg)
		return ERR_PTR(-ENOMEM);

	mappings = kzalloc(sizeof(mappings[0]) * n_bufs, GFP_KERNEL);
	if (!mappings) {
		ret = ERR_PTR(-ENOMEM);
		goto err_alloc;
	}

	mutex_lock(&dev->client_lock);

	for (i = 0; i < n_bufs; i++) {
		err = adf_buffer_validate(&bufs[i]);
		if (err < 0) {
			ret = ERR_PTR(err);
			goto err_buf;
		}

		err = adf_buffer_map(dev, &bufs[i], &mappings[i]);
		if (err < 0) {
			ret = ERR_PTR(err);
			goto err_buf;
		}
	}

	INIT_LIST_HEAD(&cfg->head);
	cfg->config.n_bufs = n_bufs;
	cfg->config.bufs = bufs;
	cfg->config.mappings = mappings;
	cfg->config.custom_data = custom_data;
	cfg->config.custom_data_size = custom_data_size;

	err = dev->ops->validate(dev, &cfg->config, &cfg->state);
	if (err < 0) {
		ret = ERR_PTR(err);
		goto err_buf;
	}

	mutex_lock(&dev->post_lock);

	if (dev->ops->complete_fence)
		ret = dev->ops->complete_fence(dev, &cfg->config,
				cfg->state);
	else
		ret = adf_sw_complete_fence(dev);

	if (IS_ERR(ret))
		goto err_fence;

	list_add_tail(&cfg->head, &dev->post_list);
	queue_kthread_work(&dev->post_worker, &dev->post_work);
	mutex_unlock(&dev->post_lock);
	mutex_unlock(&dev->client_lock);
	kfree(intfs);
	return ret;

err_fence:
	mutex_unlock(&dev->post_lock);

err_buf:
	for (i = 0; i < n_bufs; i++)
		adf_buffer_mapping_cleanup(&mappings[i], &bufs[i]);

	mutex_unlock(&dev->client_lock);
	kfree(mappings);

err_alloc:
	kfree(cfg);
	return ret;
}
コード例 #11
0
static int __devinit bq2419x_probe(struct i2c_client *client,
				const struct i2c_device_id *id)
{
	struct bq2419x_chip *bq2419x;
	struct bq2419x_platform_data *pdata;
	int ret = 0;

	pdata = client->dev.platform_data;
	if (!pdata) {
		dev_err(&client->dev, "No Platform data");
		return -EINVAL;
	}

	bq2419x = devm_kzalloc(&client->dev, sizeof(*bq2419x), GFP_KERNEL);
	if (!bq2419x) {
		dev_err(&client->dev, "Memory allocation failed\n");
		return -ENOMEM;
	}

	bq2419x->regmap = devm_regmap_init_i2c(client, &bq2419x_regmap_config);
	if (IS_ERR(bq2419x->regmap)) {
		ret = PTR_ERR(bq2419x->regmap);
		dev_err(&client->dev, "regmap init failed with err %d\n", ret);
		return ret;
	}

	bq2419x->dev = &client->dev;

	if (pdata->bcharger_pdata) {
		bq2419x->update_status	= pdata->bcharger_pdata->update_status;
		bq2419x->rtc_alarm_time	= pdata->bcharger_pdata->rtc_alarm_time;
		bq2419x->wdt_time_sec	= pdata->bcharger_pdata->wdt_timeout;
		bq2419x->chg_restart_time =
					pdata->bcharger_pdata->chg_restart_time;
		bq2419x->chg_enable	= true;
	}

	bq2419x->wdt_refresh_timeout = 25;
	i2c_set_clientdata(client, bq2419x);
	bq2419x->irq = client->irq;

	if (bq2419x->rtc_alarm_time)
		bq2419x->rtc = alarmtimer_get_rtcdev();

	mutex_init(&bq2419x->mutex);
	bq2419x->suspended = 0;
	bq2419x->chg_restart_timeout = 0;

	ret = bq2419x_show_chip_version(bq2419x);
	if (ret < 0) {
		dev_err(&client->dev, "version read failed %d\n", ret);
		return ret;
	}

	ret = bq2419x_charger_init(bq2419x);
	if (ret < 0) {
		dev_err(bq2419x->dev, "Charger init failed: %d\n", ret);
		return ret;
	}

	ret = bq2419x_init_charger_regulator(bq2419x, pdata);
	if (ret < 0) {
		dev_err(&client->dev,
			"Charger regualtor init failed %d\n", ret);
		return ret;
	}

	ret = bq2419x_init_vbus_regulator(bq2419x, pdata);
	if (ret < 0) {
		dev_err(&client->dev,
			"VBUS regualtor init failed %d\n", ret);
		goto scrub_chg_reg;
	}

	init_kthread_worker(&bq2419x->bq_kworker);
	bq2419x->bq_kworker_task = kthread_run(kthread_worker_fn,
				&bq2419x->bq_kworker,
				dev_name(bq2419x->dev));
	if (IS_ERR(bq2419x->bq_kworker_task)) {
		ret = PTR_ERR(bq2419x->bq_kworker_task);
		dev_err(&client->dev, "Kworker task creation failed %d\n", ret);
		goto scrub_vbus_reg;
	}

	init_kthread_work(&bq2419x->bq_wdt_work, bq2419x_work_thread);
	sched_setscheduler(bq2419x->bq_kworker_task,
			SCHED_FIFO, &bq2419x_param);
	queue_kthread_work(&bq2419x->bq_kworker, &bq2419x->bq_wdt_work);

	ret = bq2419x_watchdog_init(bq2419x, bq2419x->wdt_time_sec, "PROBE");
	if (ret < 0) {
		dev_err(bq2419x->dev, "BQWDT init failed %d\n", ret);
		goto scrub_kthread;
	}

	ret = bq2419x_fault_clear_sts(bq2419x);
	if (ret < 0) {
		dev_err(bq2419x->dev, "fault clear status failed %d\n", ret);
		goto scrub_kthread;
	}

	ret = request_threaded_irq(bq2419x->irq, NULL,
		bq2419x_irq, IRQF_TRIGGER_FALLING,
			dev_name(bq2419x->dev), bq2419x);
	if (ret < 0) {
		dev_err(bq2419x->dev, "request IRQ %d fail, err = %d\n",
				bq2419x->irq, ret);
		goto scrub_kthread;
	}

	/* enable charging */
	ret = bq2419x_charger_enable(bq2419x);
	if (ret < 0)
		goto scrub_irq;

	return 0;
scrub_irq:
	free_irq(bq2419x->irq, bq2419x);
scrub_kthread:
	bq2419x->stop_thread = true;
	flush_kthread_worker(&bq2419x->bq_kworker);
	kthread_stop(bq2419x->bq_kworker_task);
scrub_vbus_reg:
	regulator_unregister(bq2419x->vbus_rdev);
scrub_chg_reg:
	regulator_unregister(bq2419x->chg_rdev);
	mutex_destroy(&bq2419x->mutex);
	return ret;
}
コード例 #12
0
ファイル: cq.c プロジェクト: AK101111/linux
/**
 * rvt_cq_enter - add a new entry to the completion queue
 * @cq: completion queue
 * @entry: work completion entry to add
 * @sig: true if @entry is solicited
 *
 * This may be called with qp->s_lock held.
 */
void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
{
	struct rvt_cq_wc *wc;
	unsigned long flags;
	u32 head;
	u32 next;

	spin_lock_irqsave(&cq->lock, flags);

	/*
	 * Note that the head pointer might be writable by user processes.
	 * Take care to verify it is a sane value.
	 */
	wc = cq->queue;
	head = wc->head;
	if (head >= (unsigned)cq->ibcq.cqe) {
		head = cq->ibcq.cqe;
		next = 0;
	} else {
		next = head + 1;
	}

	if (unlikely(next == wc->tail)) {
		spin_unlock_irqrestore(&cq->lock, flags);
		if (cq->ibcq.event_handler) {
			struct ib_event ev;

			ev.device = cq->ibcq.device;
			ev.element.cq = &cq->ibcq;
			ev.event = IB_EVENT_CQ_ERR;
			cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
		}
		return;
	}
	if (cq->ip) {
		wc->uqueue[head].wr_id = entry->wr_id;
		wc->uqueue[head].status = entry->status;
		wc->uqueue[head].opcode = entry->opcode;
		wc->uqueue[head].vendor_err = entry->vendor_err;
		wc->uqueue[head].byte_len = entry->byte_len;
		wc->uqueue[head].ex.imm_data =
			(__u32 __force)entry->ex.imm_data;
		wc->uqueue[head].qp_num = entry->qp->qp_num;
		wc->uqueue[head].src_qp = entry->src_qp;
		wc->uqueue[head].wc_flags = entry->wc_flags;
		wc->uqueue[head].pkey_index = entry->pkey_index;
		wc->uqueue[head].slid = entry->slid;
		wc->uqueue[head].sl = entry->sl;
		wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
		wc->uqueue[head].port_num = entry->port_num;
		/* Make sure entry is written before the head index. */
		smp_wmb();
	} else {
		wc->kqueue[head] = *entry;
	}
	wc->head = next;

	if (cq->notify == IB_CQ_NEXT_COMP ||
	    (cq->notify == IB_CQ_SOLICITED &&
	     (solicited || entry->status != IB_WC_SUCCESS))) {
		struct kthread_worker *worker;
		/*
		 * This will cause send_complete() to be called in
		 * another thread.
		 */
		smp_read_barrier_depends(); /* see rvt_cq_exit */
		worker = cq->rdi->worker;
		if (likely(worker)) {
			cq->notify = RVT_CQ_NONE;
			cq->triggered++;
			queue_kthread_work(worker, &cq->comptask);
		}
	}

	spin_unlock_irqrestore(&cq->lock, flags);
}