コード例 #1
0
ファイル: irq_sim.c プロジェクト: ReneNyffenegger/linux
/**
 * irq_sim_fire - Enqueue an interrupt.
 *
 * @sim:        The interrupt simulator object.
 * @offset:     Offset of the simulated interrupt which should be fired.
 */
void irq_sim_fire(struct irq_sim *sim, unsigned int offset)
{
	if (sim->irqs[offset].enabled) {
		sim->work_ctx.irq = irq_sim_irqnum(sim, offset);
		irq_work_queue(&sim->work_ctx.work);
	}
}
コード例 #2
0
static void yas_work_func(struct work_struct *work)
{
	struct yas_data acc[1];
	struct yas_state *st =
		container_of((struct delayed_work *)work,
				struct yas_state, work);
	uint32_t time_before, time_after;
	int32_t delay;
	int ret, i;

	time_before = jiffies_to_msecs(jiffies);
	mutex_lock(&st->lock);
	ret = st->acc.measure(acc, 1);
	if (ret == 1) {
		for (i = 0; i < 3; i++)
			st->accel_data[i]
				= acc[0].xyz.v[i] - st->calib_bias[i];
	}
	mutex_unlock(&st->lock);

	IIF("%s: acc(x, y, z) = (%d, %d, %d)\n", __func__, st->accel_data[0]
		, st->accel_data[1], st->accel_data[2]);

	if (ret == 1)
		irq_work_queue(&st->iio_irq_work);
	time_after = jiffies_to_msecs(jiffies);
	delay = MSEC_PER_SEC / st->sampling_frequency
		- (time_after - time_before);
	if (delay <= 0)
		delay = 1;
	schedule_delayed_work(&st->work, msecs_to_jiffies(delay));
}
コード例 #3
0
ファイル: ring_buffer.c プロジェクト: 33d/linux-2.6.21-hh20
static void perf_output_wakeup(struct perf_output_handle *handle)
{
	atomic_set(&handle->rb->poll, POLL_IN);

	handle->event->pending_wakeup = 1;
	irq_work_queue(&handle->event->pending);
}
コード例 #4
0
ファイル: iio-trig-sysfs.c プロジェクト: 020gzh/linux
static ssize_t iio_sysfs_trigger_poll(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct iio_trigger *trig = to_iio_trigger(dev);
	struct iio_sysfs_trig *sysfs_trig = iio_trigger_get_drvdata(trig);

	irq_work_queue(&sysfs_trig->work);

	return count;
}
コード例 #5
0
ファイル: ras.c プロジェクト: avagin/linux
/*
 * See if we can recover from a machine check exception.
 * This is only called on power4 (or above) and only via
 * the Firmware Non-Maskable Interrupts (fwnmi) handler
 * which provides the error analysis for us.
 *
 * Return 1 if corrected (or delivered a signal).
 * Return 0 if there is nothing we can do.
 */
static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err)
{
	int recovered = 0;
	int disposition = rtas_error_disposition(err);

	pseries_print_mce_info(regs, err);

	if (!(regs->msr & MSR_RI)) {
		/* If MSR_RI isn't set, we cannot recover */
		pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
		recovered = 0;

	} else if (disposition == RTAS_DISP_FULLY_RECOVERED) {
		/* Platform corrected itself */
		recovered = 1;

	} else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
		/* Platform corrected itself but could be degraded */
		printk(KERN_ERR "MCE: limited recovery, system may "
		       "be degraded\n");
		recovered = 1;

	} else if (user_mode(regs) && !is_global_init(current) &&
		   rtas_error_severity(err) == RTAS_SEVERITY_ERROR_SYNC) {

		/*
		 * If we received a synchronous error when in userspace
		 * kill the task. Firmware may report details of the fail
		 * asynchronously, so we can't rely on the target and type
		 * fields being valid here.
		 */
		printk(KERN_ERR "MCE: uncorrectable error, killing task "
		       "%s:%d\n", current->comm, current->pid);

		_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
		recovered = 1;
	}

	pseries_process_ue(regs, err);

	/* Queue irq work to log this rtas event later. */
	irq_work_queue(&mce_errlog_process_work);

	return recovered;
}
コード例 #6
0
ファイル: mce.c プロジェクト: CCNITSilchar/linux
/*
 * Queue up the MCE event which then can be handled later.
 */
void machine_check_queue_event(void)
{
	int index;
	struct machine_check_event evt;

	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
		return;

	index = __this_cpu_inc_return(mce_queue_count) - 1;
	/* If queue is full, just return for now. */
	if (index >= MAX_MC_EVT) {
		__this_cpu_dec(mce_queue_count);
		return;
	}
	memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));

	/* Queue irq work to process this event later. */
	irq_work_queue(&mce_event_process_work);
}
コード例 #7
0
ファイル: mce.c プロジェクト: 1800alex/linux
/*
 * Queue up the MCE event which then can be handled later.
 */
void machine_check_queue_event(void)
{
	int index;
	struct machine_check_event evt;

	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
		return;

	index = __get_cpu_var(mce_queue_count)++;
	/* If queue is full, just return for now. */
	if (index >= MAX_MC_EVT) {
		__get_cpu_var(mce_queue_count)--;
		return;
	}
	__get_cpu_var(mce_event_queue[index]) = evt;

	/* Queue irq work to process this event later. */
	irq_work_queue(&mce_event_process_work);
}
コード例 #8
0
/*
 * Kick the current CPU if it's full dynticks in order to force it to
 * re-evaluate its dependency on the tick and restart it if necessary.
 */
void tick_nohz_full_kick(void)
{
	if (tick_nohz_full_cpu(smp_processor_id()))
		irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
}