Beispiel #1
0
static irqreturn_t sun3_int7(int irq, void *dev_id, struct pt_regs *fp)
{
	*sun3_intreg |=  (1 << irq);
	if (!(kstat_cpu(0).irqs[irq] % 2000))
		sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 16000) / 2000]);
	return IRQ_HANDLED;
}
Beispiel #2
0
static irqreturn_t sun3_int7(int irq, void *dev_id, struct pt_regs *fp)
{
	sun3_do_irq(irq,fp);
	if(!(kstat_cpu(0).irqs[SYS_IRQS + irq] % 2000))
		sun3_leds(led_pattern[(kstat_cpu(0).irqs[SYS_IRQS+irq]%16000)
			  /2000]);
	return IRQ_HANDLED;
}
Beispiel #3
0
/*
 * do_IRQ() handles all normal I/O device IRQ's (the special
 *	    SMP cross-CPU interrupts have their own specific
 *	    handlers).
 *
 */
void __irq_entry do_IRQ(struct pt_regs *regs)
{
	struct tpi_info *tpi_info;
	struct subchannel *sch;
	struct irb *irb;
	struct pt_regs *old_regs;

	old_regs = set_irq_regs(regs);
	irq_enter();
	__this_cpu_write(s390_idle.nohz_delay, 1);
	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
		/* Serve timer interrupts first. */
		clock_comparator_work();
	/*
	 * Get interrupt information from lowcore
	 */
	tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
	irb = (struct irb *)&S390_lowcore.irb;
	do {
		kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
		if (tpi_info->adapter_IO) {
			do_adapter_IO(tpi_info->isc);
			continue;
		}
		sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
		if (!sch) {
			/* Clear pending interrupt condition. */
			kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
			tsch(tpi_info->schid, irb);
			continue;
		}
		spin_lock(sch->lock);
		/* Store interrupt response block to lowcore. */
		if (tsch(tpi_info->schid, irb) == 0) {
			/* Keep subchannel information word up to date. */
			memcpy (&sch->schib.scsw, &irb->scsw,
				sizeof (irb->scsw));
			/* Call interrupt handler if there is one. */
			if (sch->driver && sch->driver->irq)
				sch->driver->irq(sch);
			else
				kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
		} else
			kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
		spin_unlock(sch->lock);
		/*
		 * Are more interrupts pending?
		 * If so, the tpi instruction will update the lowcore
		 * to hold the info for the next interrupt.
		 * We don't do this for VM because a tpi drops the cpu
		 * out of the sie which costs more cycles than it saves.
		 */
	} while (MACHINE_IS_LPAR && tpi(NULL) != 0);
	irq_exit();
	set_irq_regs(old_regs);
}
static void measure_cpu(void)
{
	int i;

	PDEBUG("cpu_probe: start.\n");

	for(i=0; i<num_online_cpus(); i++) {
		cpu_used[i] =
			cputime64_to_clock_t(kstat_cpu(i).cpustat.user) +
			cputime64_to_clock_t(kstat_cpu(i).cpustat.nice) +
			cputime64_to_clock_t(kstat_cpu(i).cpustat.system) +
			cputime64_to_clock_t(kstat_cpu(i).cpustat.iowait) +
			cputime64_to_clock_t(kstat_cpu(i).cpustat.irq) +
			cputime64_to_clock_t(kstat_cpu(i).cpustat.softirq) +
			cputime64_to_clock_t(kstat_cpu(i).cpustat.steal);
		cpu_total[i] = cpu_used[i] +
			cputime64_to_clock_t(kstat_cpu(i).cpustat.idle);

		//spin_lock( &(probe_data[i].lock) );
		probe_data[i].cpu_used = cpu_used[i] - cpu_used_prev[i];
		probe_data[i].cpu_total = cpu_total[i] - cpu_total_prev[i];
		//spin_unlock( &(probe_data[i].lock) );

		PDEBUG("measurements for CPU%d: used=%llu total=%llu\n",
			i, (unsigned long long)cpu_used[i],
			(unsigned long long)cpu_total[i]);

		cpu_used_prev[i] = cpu_used[i];
		cpu_total_prev[i] = cpu_total[i];
	}

	PDEBUG("cpu_probe: done\n.");
}
Beispiel #5
0
static irqreturn_t sun3_int5(int irq, void *dev_id)
{
#ifdef CONFIG_SUN3
	intersil_clear();
#endif
        *sun3_intreg |=  (1 << irq);
#ifdef CONFIG_SUN3
	intersil_clear();
#endif
	xtime_update(1);
	update_process_times(user_mode(get_irq_regs()));
        if (!(kstat_cpu(0).irqs[irq] % 20))
                sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]);
	return IRQ_HANDLED;
}
Beispiel #6
0
int show_interrupts(struct seq_file *p, void *v)
{
	int i = *(loff_t *) v, j;
	struct irqaction * action;
	unsigned long flags;

	if (i == 0) {
		seq_printf(p, "           ");
		for_each_online_cpu(j)
			seq_printf(p, "CPU%d       ",j);
		seq_putc(p, '\n');
	}

	if (i < NR_IRQS) {
		spin_lock_irqsave(&irq_desc[i].lock, flags);
		action = irq_desc[i].action;
		if (!action)
			goto skip;
		seq_printf(p, "%3d: ",i);
#ifndef CONFIG_SMP
		seq_printf(p, "%10u ", kstat_irqs(i));
#else
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
		seq_printf(p, " %14s", irq_desc[i].chip->typename);
		seq_printf(p, "  %s", action->name);

		for (action=action->next; action; action = action->next)
			seq_printf(p, ", %s", action->name);

		seq_putc(p, '\n');
skip:
		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
	} else if (i == NR_IRQS)
Beispiel #7
0
int show_interrupts(struct seq_file *p, void *v)
{
	int i = *(loff_t *) v, j;
	struct irqaction * action;
	unsigned long flags;

	if (i == 0) {
		seq_puts(p, "           ");
		for_each_online_cpu(j)
			seq_printf(p, "CPU%d       ",j);
		seq_putc(p, '\n');
	}

	if (i < sh_mv.mv_nr_irqs) {
		spin_lock_irqsave(&irq_desc[i].lock, flags);
		action = irq_desc[i].action;
		if (!action)
			goto unlock;
		seq_printf(p, "%3d: ",i);
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
		seq_printf(p, " %14s", irq_desc[i].chip->name);
		seq_printf(p, "-%-8s", irq_desc[i].name);
		seq_printf(p, "  %s", action->name);

		for (action=action->next; action; action = action->next)
			seq_printf(p, ", %s", action->name);
		seq_putc(p, '\n');
unlock:
		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
	} else if (i == sh_mv.mv_nr_irqs)
		seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));

	return 0;
}
Beispiel #8
0
/*
 * Level-based IRQ handler.  Nice and simple.
 */
void
do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
	struct irqaction *action;
	const int cpu = smp_processor_id();

	desc->triggered = 1;

	/*
	 * Acknowledge, clear _AND_ disable the interrupt.
	 */
	desc->chip->ack(irq);

	if (likely(desc->enabled)) {
		kstat_cpu(cpu).irqs[irq]++;

		/*
		 * Return with this interrupt masked if no action
		 */
		action = desc->action;
		if (action) {
			__do_irq(irq, desc->action, regs);

			if (likely(desc->enabled &&
				   !check_irq_lock(desc, irq, regs)))
				desc->chip->unmask(irq);
		}
	}
}
Beispiel #9
0
static int uptime_proc_show(struct seq_file *m, void *v)
{
	struct timespec calc_uptime;
	struct timespec calc_idle;
	int i;
	cputime_t calc_idletime = cputime_zero;

	if (!uptime)
	{
		do_posix_clock_monotonic_gettime(&calc_uptime);
		monotonic_to_bootbased(&calc_uptime);

		for_each_possible_cpu(i)
			calc_idletime += cputime64_add(calc_idletime, kstat_cpu(i).cpustat.idle);
		cputime_to_timespec(calc_idletime, &calc_idle);
	}
	else
	{
		calc_uptime.tv_sec = uptime * HZ + jiffies - startjiffies;
		calc_uptime.tv_nsec = 0;
		calc_idle.tv_sec = idletime * HZ + jiffies - startjiffies;
		calc_idle.tv_nsec = 0;
	}

	seq_printf(m, "%lu.%02lu %lu.%02lu\n",
			(unsigned long) calc_uptime.tv_sec,
			(calc_uptime.tv_nsec / (NSEC_PER_SEC / 100)),
			(unsigned long) calc_idle.tv_sec,
			(calc_idle.tv_nsec / (NSEC_PER_SEC / 100)));
	return 0;
}
Beispiel #10
0
int show_interrupts(struct seq_file *p, void *v)
{
	int i = *(loff_t *) v;
	struct irqaction * action;
	unsigned long flags;

	if (i < NR_IRQS) {
		local_irq_save(flags);
		action = irq_action[i];
		if (!action) 
			goto skip;
		seq_printf(p, "%2d: %10u %c %s",
			i, kstat_cpu(0).irqs[i],
			(action->flags & SA_INTERRUPT) ? '+' : ' ',
			action->name);
		for (action = action->next; action; action = action->next) {
			seq_printf(p, ",%s %s",
				(action->flags & SA_INTERRUPT) ? " +" : "",
				action->name);
		}
		seq_putc(p, '\n');
skip:
		local_irq_restore(flags);
	}
	return 0;
}
Beispiel #11
0
int show_interrupts(struct seq_file *p, void *v)
{
	int i = *(loff_t *) v, j;
	struct irqaction * action;
	unsigned long flags;

	if (i == 0)
		seq_puts(p, "           CPU0");

	if (i < NR_IRQS) {
		spin_lock_irqsave(&irq_desc[i].lock, flags);
		action = irq_desc[i].action;
		if (!action)
			goto unlock;
		seq_printf(p, "%3d: ",i);
		seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
		seq_printf(p, " %14s", irq_desc[i].chip->name);
		seq_printf(p, "-%-8s", irq_desc[i].name);
		seq_printf(p, "  %s", action->name);

		for (action=action->next; action; action = action->next)
			seq_printf(p, ", %s", action->name);
		seq_putc(p, '\n');
unlock:
		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
	}
	return 0;
}
Beispiel #12
0
/* The 68k family did not have a good way to determine the source
 * of interrupts until later in the family.  The EC000 core does
 * not provide the vector number on the stack, we vector everything
 * into one vector and look in the blasted mask register...
 * This code is designed to be fast, almost constant time, not clean!
 */
void process_int(int vec, struct pt_regs *fp)
{
	int irq;
	int mask;

	/* unsigned long pend = *(volatile unsigned long *)0xfffff30c; */

	/* irq = vec + (CPM_VECTOR_BASE<<4); */
	irq = vec;

	/* unsigned long pend = *(volatile unsigned long *)pquicc->intr_cipr; */

	/* Bugger all that weirdness. For the moment, I seem to know where I came from;
	 * vec is passed from a specific ISR, so I'll use it. */

	if (int_irq_list[irq].handler) {
		int_irq_list[irq].handler(irq , int_irq_list[irq].dev_id, fp);
		kstat_cpu(0).irqs[irq]++;
		pquicc->intr_cisr = (1 << vec); /* indicate that irq has been serviced */
	} else {
		printk(KERN_ERR "unregistered interrupt %d!\nTurning it off in the CIMR...\n", irq);
		/* *(volatile unsigned long *)0xfffff304 |= mask; */
		pquicc->intr_cimr &= ~(1 << vec);
		num_spurious += 1;
	}
	return(IRQ_HANDLED);
}
Beispiel #13
0
static void dump_irqs(void)
{
	int n;
	dprintf("irqnr       total  since-last   status  name\n");
	for (n = 1; n < NR_IRQS; n++) {
		struct irqaction *act = irq_desc[n].action;
		if (!act && !kstat_cpu(0).irqs[n])
			continue;
		dprintf("%5d: %10u %11u %8x  %s\n", n,
			kstat_cpu(0).irqs[n],
			kstat_cpu(0).irqs[n] - last_irqs[n],
			irq_desc[n].status,
			(act && act->name) ? act->name : "???");
		last_irqs[n] = kstat_cpu(0).irqs[n];
	}
}
Beispiel #14
0
void mac_do_irq_list(int irq, struct pt_regs *fp)
{
	irq_node_t *node, *slow_nodes;
	unsigned long flags;

	kstat_cpu(0).irqs[irq]++;

#ifdef DEBUG_SPURIOUS
	if (!mac_irq_list[irq] && (console_loglevel > 7)) {
		printk("mac_do_irq_list: spurious interrupt %d!\n", irq);
		return;
	}
#endif

	/* serve first fast and normal handlers */
	for (node = mac_irq_list[irq];
	     node && (!(node->flags & IRQ_FLG_SLOW));
	     node = node->next)
		node->handler(irq, node->dev_id, fp);
	if (!node) return;
	local_save_flags(flags);
	local_irq_restore((flags & ~0x0700) | (fp->sr & 0x0700));
	/* if slow handlers exists, serve them now */
	slow_nodes = node;
	for (; node; node = node->next) {
		node->handler(irq, node->dev_id, fp);
	}
}
Beispiel #15
0
/**
 *	handle_simple_irq - Simple and software-decoded IRQs.
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Simple interrupts are either sent from a demultiplexing interrupt
 *	handler or come from hardware, where no interrupt hardware control
 *	is necessary.
 *
 *	Note: The caller is expected to handle the ack, clear, mask and
 *	unmask issues if necessary.
 */
void fastcall
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irqaction *action;
	irqreturn_t action_ret;
	const unsigned int cpu = smp_processor_id();

	spin_lock(&desc->lock);

	if (unlikely(desc->status & IRQ_INPROGRESS))
		goto out_unlock;
	kstat_cpu(cpu).irqs[irq]++;

	action = desc->action;
	if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
		if (desc->chip->mask)
			desc->chip->mask(irq);
		desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
		desc->status |= IRQ_PENDING;
		goto out_unlock;
	}

	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING | IRQ_PENDING);
	desc->status |= IRQ_INPROGRESS;
	spin_unlock(&desc->lock);

	action_ret = handle_IRQ_event(irq, action);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

	spin_lock(&desc->lock);
	desc->status &= ~IRQ_INPROGRESS;
out_unlock:
	spin_unlock(&desc->lock);
}
Beispiel #16
0
int show_interrupts(struct seq_file *p, void *v)
{
    int i = *(loff_t *)v, cpu;
    struct irqaction *action;
    unsigned long flags;

    if (i == 0) {
        seq_puts(p, "           ");
        for_each_online_cpu(cpu)
        seq_printf(p, "CPU%d       ", cpu);
        seq_putc(p, '\n');
    }

    if (i < NR_IRQS) {
        spin_lock_irqsave(&irq_desc[i].lock, flags);
        action = irq_desc[i].action;
        if (!action)
            goto unlock;

        seq_printf(p, "%3d: ", i);
        for_each_online_cpu(cpu)
        seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
        seq_printf(p, " %8s", irq_desc[i].chip->name ? : "-");
        seq_printf(p, "  %s", action->name);
        for (action = action->next; action; action = action->next)
            seq_printf(p, ", %s", action->name);

        seq_putc(p, '\n');
unlock:
        spin_unlock_irqrestore(&irq_desc[i].lock, flags);
    }
Beispiel #17
0
/*
 * Use cio_tsch to update the subchannel status and call the interrupt handler
 * if status had been pending. Called with the console_subchannel lock.
 */
static void cio_tsch(struct subchannel *sch)
{
	struct irb *irb;
	int irq_context;

	irb = (struct irb *)&S390_lowcore.irb;
	/* Store interrupt response block to lowcore. */
	if (tsch(sch->schid, irb) != 0)
		/* Not status pending or not operational. */
		return;
	memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
	/* Call interrupt handler with updated status. */
	irq_context = in_interrupt();
	if (!irq_context) {
		local_bh_disable();
		irq_enter();
	}
	if (sch->driver && sch->driver->irq)
		sch->driver->irq(sch);
	else
		kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
	if (!irq_context) {
		irq_exit();
		_local_bh_enable();
	}
}
Beispiel #18
0
/*
 * Most edge-triggered IRQ implementations seem to take a broken
 * approach to this.  Hence the complexity.
 */
void
do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
	const int cpu = smp_processor_id();

	desc->triggered = 1;

	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ.  Instead, turn on the
	 * hardware masks.
	 */
	if (unlikely(desc->running || !desc->enabled))
		goto running;

	/*
	 * Acknowledge and clear the IRQ, but don't mask it.
	 */
	desc->chip->ack(irq);

	/*
	 * Mark the IRQ currently in progress.
	 */
	desc->running = 1;

	kstat_cpu(cpu).irqs[irq]++;

	do {
		struct irqaction *action;

		action = desc->action;
		if (!action)
			break;

		if (desc->pending && desc->enabled) {
			desc->pending = 0;
			desc->chip->unmask(irq);
		}

		__do_irq(irq, action, regs);
	} while (desc->pending);

	desc->running = 0;

	/*
	 * If we were disabled or freed, shut down the handler.
	 */
	if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
		return;

 running:
	/*
	 * We got another IRQ while this one was masked or
	 * currently running.  Delay it.
	 */
	desc->pending = 1;
	desc->chip->mask(irq);
	desc->chip->ack(irq);
}
static void clock_comparator_interrupt(struct ext_code ext_code,
				       unsigned int param32,
				       unsigned long param64)
{
	kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++;
	if (S390_lowcore.clock_comparator == -1ULL)
		set_clock_comparator(S390_lowcore.clock_comparator);
}
void __irq_entry do_IRQ(struct pt_regs *regs)
{
	struct tpi_info *tpi_info;
	struct subchannel *sch;
	struct irb *irb;
	struct pt_regs *old_regs;

	old_regs = set_irq_regs(regs);
	irq_enter();
	__this_cpu_write(s390_idle.nohz_delay, 1);
	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
		
		clock_comparator_work();
	tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
	irb = (struct irb *)&S390_lowcore.irb;
	do {
		kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
		if (tpi_info->adapter_IO) {
			do_adapter_IO(tpi_info->isc);
			continue;
		}
		sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
		if (!sch) {
			
			kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
			tsch(tpi_info->schid, irb);
			continue;
		}
		spin_lock(sch->lock);
		
		if (tsch(tpi_info->schid, irb) == 0) {
			
			memcpy (&sch->schib.scsw, &irb->scsw,
				sizeof (irb->scsw));
			
			if (sch->driver && sch->driver->irq)
				sch->driver->irq(sch);
			else
				kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
		} else
			kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
		spin_unlock(sch->lock);
	} while (MACHINE_IS_LPAR && tpi(NULL) != 0);
	irq_exit();
	set_irq_regs(old_regs);
}
Beispiel #21
0
static irqreturn_t sun3_int5(int irq, void *dev_id, struct pt_regs *fp)
{
        kstat_cpu(0).irqs[SYS_IRQS + irq]++;
#ifdef CONFIG_SUN3
	intersil_clear();
#endif
        *sun3_intreg &= ~(1<<irq);
        *sun3_intreg |=  (1<<irq);
#ifdef CONFIG_SUN3
	intersil_clear();
#endif
        do_timer(fp);
        if(!(kstat_cpu(0).irqs[SYS_IRQS + irq] % 20))
                sun3_leds(led_pattern[(kstat_cpu(0).irqs[SYS_IRQS+irq]%160)
                /20]);
	return IRQ_HANDLED;
}
Beispiel #22
0
inline void amiga_do_irq(int irq, struct pt_regs *fp)
{
	irq_desc_t *desc = irq_desc + irq;
	struct irqaction *action = desc->action;

	kstat_cpu(0).irqs[irq]++;
	action->handler(irq, action->dev_id, fp);
}
Beispiel #23
0
static void rtc_sync_work_handler(struct work_struct * __unused)
{
	static unsigned int 	old_idle_tick, busy_count;
	int 			next_interval;
	int 			cpu_idle;

	if (rtc_sync_state == RS_SAVE_DELTA)
	{
		rtc_sync_save_delta();
		rtc_sync_start();
		return;
	}

	switch (rtc_sync_state)
	{
	case RS_WAIT_ADJUST_TIME:
		/* start adjust service */
		busy_count = 0;
	case RS_WAIT_ADJUST_TIME_AFTER_BUSY:
		/* prepare detect cpu idle */
		old_idle_tick = kstat_cpu(0).cpustat.idle + kstat_cpu(0).cpustat.iowait;
		rtc_sync_state = RS_DETECT_IDLE;
		next_interval = RTC_SYNC_DETECT_IDLE_INTERVAL;
		break;
	case RS_DETECT_IDLE:
		cpu_idle = detect_cpu_idle(old_idle_tick);

		/* when cpu idle or passing the adjust force time */
		if (cpu_idle || ++busy_count > RTC_SYNC_MAX_BUSY_COUNT)
		{
			rtc_sync_state = RS_TRY_ADJUST;
			rtc_sync_adjust();
			rtc_sync_state = RS_WAIT_ADJUST_TIME;
			next_interval = RTC_SYNC_ADJUST_INTERVAL;
		}
		else
		{
			rtc_sync_state = RS_WAIT_ADJUST_TIME_AFTER_BUSY;
			next_interval = RTC_SYNC_AFTER_BUSY_INTERVAL;
		}
		break;
	default:
		return;
	}
	schedule_delayed_work(&rtc_sync_work, next_interval);
}
Beispiel #24
0
static irqreturn_t sun3_int5(int irq, void *dev_id, struct pt_regs *fp)
{
#ifdef CONFIG_SUN3
	intersil_clear();
#endif
        *sun3_intreg |=  (1 << irq);
#ifdef CONFIG_SUN3
	intersil_clear();
#endif
        do_timer(fp);
#ifndef CONFIG_SMP
	update_process_times(user_mode(fp));
#endif
        if (!(kstat_cpu(0).irqs[irq] % 20))
                sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]);
	return IRQ_HANDLED;
}
static void timing_alert_interrupt(struct ext_code ext_code,
				   unsigned int param32, unsigned long param64)
{
	kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
	if (param32 & 0x00c40000)
		etr_timing_alert((struct etr_irq_parm *) &param32);
	if (param32 & 0x00038000)
		stp_timing_alert((struct stp_irq_parm *) &param32);
}
Beispiel #26
0
/*
 * do_IRQ() handles all normal I/O device IRQ's (the special
 *	    SMP cross-CPU interrupts have their own specific
 *	    handlers).
 *
 */
void
do_IRQ (struct pt_regs *regs)
{
	struct tpi_info *tpi_info;
	struct subchannel *sch;
	struct irb *irb;
	struct pt_regs *old_regs;

	old_regs = set_irq_regs(regs);
	irq_enter();
	s390_idle_check();
	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
		/* Serve timer interrupts first. */
		clock_comparator_work();
	/*
	 * Get interrupt information from lowcore
	 */
	tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
	irb = (struct irb *) __LC_IRB;
	do {
		kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
		/*
		 * Non I/O-subchannel thin interrupts are processed differently
		 */
		if (tpi_info->adapter_IO == 1 &&
		    tpi_info->int_type == IO_INTERRUPT_TYPE) {
			do_adapter_IO(tpi_info->isc);
			continue;
		}
		sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
		if (!sch) {
			/* Clear pending interrupt condition. */
			tsch(tpi_info->schid, irb);
			continue;
		}
		spin_lock(sch->lock);
		/* Store interrupt response block to lowcore. */
		if (tsch(tpi_info->schid, irb) == 0) {
			/* Keep subchannel information word up to date. */
			memcpy (&sch->schib.scsw, &irb->scsw,
				sizeof (irb->scsw));
			/* Call interrupt handler if there is one. */
			if (sch->driver && sch->driver->irq)
				sch->driver->irq(sch);
		}
		spin_unlock(sch->lock);
		/*
		 * Are more interrupts pending?
		 * If so, the tpi instruction will update the lowcore
		 * to hold the info for the next interrupt.
		 * We don't do this for VM because a tpi drops the cpu
		 * out of the sie which costs more cycles than it saves.
		 */
	} while (!MACHINE_IS_VM && tpi (NULL) != 0);
	irq_exit();
	set_irq_regs(old_regs);
}
static cputime64_t get_idle_time(int cpu)
{
	cputime64_t idle;

	idle = kstat_cpu(cpu).cpustat.idle;
	if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
		idle += arch_idle_time(cpu);
	return idle;
}
static cputime64_t get_iowait_time(int cpu)
{
	cputime64_t iowait;

	iowait = kstat_cpu(cpu).cpustat.iowait;
	if (cpu_online(cpu) && nr_iowait_cpu(cpu))
		iowait += arch_idle_time(cpu);
	return iowait;
}
Beispiel #29
0
static inline int detect_cpu_idle (unsigned int old_idle_tick)
{
	unsigned int idle_tick = kstat_cpu(0).cpustat.idle + kstat_cpu(0).cpustat.iowait;
	unsigned int unit_idle_tick = idle_tick - old_idle_tick;
	int	     state = false;
	
	old_idle_tick = idle_tick;

	if ( unit_idle_tick > (RTC_SYNC_DETECT_IDLE_INTERVAL * RTC_SYNC_IDLE_PERCENT / 100))
	{
		state = true;
	}

#ifdef CONFIG_RTC_S3C_SYNC_SYSTEM_TIME_DEBUG
	printk ("RTC_SYNC: %s idle_tick:%d\n", (state == true)?"<idle>":"<busy>",  unit_idle_tick);
#endif
	return state;
}
/*
 * Use cio_tpi to get a pending interrupt and call the interrupt handler.
 * Return non-zero if an interrupt was processed, zero otherwise.
 */
static int cio_tpi(void)
{
	struct tpi_info *tpi_info;
	struct subchannel *sch;
	struct irb *irb;
	int irq_context;

	tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
	if (tpi(NULL) != 1)
		return 0;
	kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
	if (tpi_info->adapter_IO) {
		do_adapter_IO(tpi_info->isc);
		return 1;
	}
	irb = (struct irb *)&S390_lowcore.irb;
	/* Store interrupt response block to lowcore. */
	if (tsch(tpi_info->schid, irb) != 0) {
		/* Not status pending or not operational. */
		kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
		return 1;
	}
	sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
	if (!sch) {
		kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
		return 1;
	}
	irq_context = in_interrupt();
	if (!irq_context)
		local_bh_disable();
	irq_enter();
	spin_lock(sch->lock);
	memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
	if (sch->driver && sch->driver->irq)
		sch->driver->irq(sch);
	else
		kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
	spin_unlock(sch->lock);
	irq_exit();
	if (!irq_context)
		_local_bh_enable();
	return 1;
}