static irqreturn_t sun3_int5(int irq, void *dev_id)
{
	unsigned int cnt;

#ifdef CONFIG_SUN3
	intersil_clear();
#endif
#ifdef CONFIG_SUN3
	intersil_clear();
#endif
	xtime_update(1);
	update_process_times(user_mode(get_irq_regs()));
	cnt = kstat_irqs_cpu(irq, 0);
	if (!(cnt % 20))
		sun3_leds(led_pattern[cnt % 160 / 20]);
	return IRQ_HANDLED;
}
示例#2
0
static irqreturn_t sun3_int5(int irq, void *dev_id)
{
#ifdef CONFIG_SUN3
	intersil_clear();
#endif
        *sun3_intreg |=  (1 << irq);
#ifdef CONFIG_SUN3
	intersil_clear();
#endif
        do_timer(1);
#ifndef CONFIG_SMP
	update_process_times(user_mode(get_irq_regs()));
#endif
        if (!(kstat_cpu(0).irqs[irq] % 20))
                sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]);
	return IRQ_HANDLED;
}
示例#3
0
void rk28_check_jiffies_at_irq( void )
{
        struct pt_regs *new_regs = get_irq_regs();
        int             print = 0;

        if( !last_jn )
                last_jn = jiffies;
        printk("last jiffies=%ld,now jiffies=%ld,goes=%ld\n" , last_jn , jiffies ,jiffies -last_jn);
        if( jiffies -last_jn < 25 || print ) {
                
                if (new_regs)
		show_regs(new_regs);
	else
		dump_stack();
        }
        last_jn = jiffies;
}
static void sysrq_handle_showallcpus(int key)
{
	/*
	 * Fall back to the workqueue based printing if the
	 * backtrace printing did not succeed or the
	 * architecture has no support for it:
	 */
	if (!trigger_all_cpu_backtrace()) {
		struct pt_regs *regs = get_irq_regs();

		if (regs) {
			printk(KERN_INFO "CPU%d:\n", smp_processor_id());
			show_regs(regs);
		}
		schedule_work(&sysrq_showallcpus);
	}
}
示例#5
0
/*
 * timer_tick()
 *	Kernel system timer support. Needs to keep up the real-time clock,
 * 	as well as call the "do_timer()" routine every clocktick.
 */
static irqreturn_t timer_tick(int irq, void *dummy)
{
	int ticks;

	BUG_ON(!irqs_disabled());
	ticks = timer_reset(timervector, frequency);

	xtime_update(ticks);

	update_process_times(user_mode(get_irq_regs()));
	profile_tick(CPU_PROFILING);

#if defined(CONFIG_SMP)
	smp_send_timer_all();
#endif
	return(IRQ_HANDLED);
}
示例#6
0
/*
 * timer_interrupt() needs to keep up the real-time clock,
 * as well as call the "do_timer()" routine every clocktick
 */
static irqreturn_t timer_interrupt(int irq, void *dummy)
{
	/* last time the cmos clock got updated */
	static long last_rtc_update = 0;

	profile_tick(CPU_PROFILING);
	/*
	 * Here we are in the timer irq handler. We just have irqs locally
	 * disabled but we don't know if the timer_bh is running on the other
	 * CPU. We need to avoid to SMP race with it. NOTE: we don't need
	 * the irq version of write_lock because as just said we have irq
	 * locally disabled. -arca
	 */
	write_seqlock(&xtime_lock);

	do_timer(1);

	/*
	 * If we have an externally synchronized Linux clock, then update
	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
	 * called as close as possible to 500 ms before the new second starts.
	 */
	if (ntp_synced() &&
	    xtime.tv_sec > last_rtc_update + 660 &&
	    (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
	    (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2
	    ) {
		if (set_rtc_mmss(xtime.tv_sec) == 0)
			last_rtc_update = xtime.tv_sec;
		else
			last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
	}

#ifdef CONFIG_HEARTBEAT
	static unsigned short n;
	n++;
	__set_LEDS(n);
#endif /* CONFIG_HEARTBEAT */

	write_sequnlock(&xtime_lock);

	update_process_times(user_mode(get_irq_regs()));

	return IRQ_HANDLED;
}
/*
 * IRQ handler for the timer
 */
static irqreturn_t ne1_timer_interrupt(int irq, void *dev_id)
{
	write_seqlock(&xtime_lock);

	// ...clear the interrupt
	writel(GTINT_TCI, IO_ADDRESS(NE1_BASE_TIMER_0 + TMR_GTINT));

	timer_tick();

#if defined(CONFIG_SMP) && !defined(CONFIG_LOCAL_TIMERS)
	smp_send_timer();
	update_process_times(user_mode(get_irq_regs()));
#endif

	write_sequnlock(&xtime_lock);

	return IRQ_HANDLED;
}
static irqreturn_t l2x0pmu_handle_irq(int irq, void *dev)
{
	irqreturn_t status = IRQ_NONE;
	struct perf_sample_data data;
	struct pt_regs *regs;
	int idx;

	regs = get_irq_regs();

	for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) {
		struct perf_event *event = l2x0pmu_hw_events.events[idx];
		struct hw_perf_event *hwc;

		if (!counter_is_saturated(idx))
			continue;

		status = IRQ_HANDLED;

		hwc = &event->hw;

		/*
                                                            
                                                            
                                                            
             
   */
		l2x0pmu_write_counter(idx, 0);

		armpmu_event_update(event, hwc, idx);
		data.period = event->hw.last_period;

		if (!armpmu_event_set_period(event, hwc, idx))
			continue;

		if (perf_event_overflow(event, &data, regs))
			l2x0pmu_disable_counter(idx);
	}

	l2x0_clear_interrupts(L2X0_INTR_MASK_ECNTR);

	irq_work_run();

	return status;
}
/*
 * The nohz low res interrupt handler
 */
static void tick_nohz_handler(struct clock_event_device *dev)
{
	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
	struct pt_regs *regs = get_irq_regs();
	int cpu = smp_processor_id();
	ktime_t now = ktime_get();

	dev->next_event.tv64 = KTIME_MAX;

	/*
	 * Check if the do_timer duty was dropped. We don't care about
	 * concurrency: This happens only when the cpu in charge went
	 * into a long sleep. If two cpus happen to assign themself to
	 * this duty, then the jiffies update is still serialized by
	 * xtime_lock.
	 */
	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
		tick_do_timer_cpu = cpu;

	/* Check, if the jiffies need an update */
	if (tick_do_timer_cpu == cpu)
		tick_do_update_jiffies64(now);

	/*
	 * When we are idle and the tick is stopped, we have to touch
	 * the watchdog as we might not schedule for a really long
	 * time. This happens on complete idle SMP systems while
	 * waiting on the login prompt. We also increment the "start
	 * of idle" jiffy stamp so the idle accounting adjustment we
	 * do when we go busy again does not account too much ticks.
	 */
	if (ts->tick_stopped) {
		touch_softlockup_watchdog();
		ts->idle_jiffies++;
	}

	update_process_times(user_mode(regs));
	profile_tick(CPU_PROFILING);

	while (tick_nohz_reprogram(ts, now)) {
		now = ktime_get();
		tick_do_update_jiffies64(now);
	}
}
/*
 * As System PMUs are affine to CPU0, the fact that interrupts are disabled
 * during interrupt handling is enough to serialise our actions and make this
 * safe. We do not need to grab our pmu_lock here.
 */
static irqreturn_t l2x0pmu_handle_irq(int irq, void *dev)
{
	irqreturn_t status = IRQ_NONE;
	struct perf_sample_data data;
	struct pt_regs *regs;
	int idx;

	regs = get_irq_regs();

	for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) {
		struct perf_event *event = l2x0pmu_hw_events.events[idx];
		struct hw_perf_event *hwc;

		if (!counter_is_saturated(idx))
			continue;

		status = IRQ_HANDLED;

		hwc = &event->hw;

		/*
		 * The armpmu_* functions expect counters to overflow, but
		 * L220/PL310 counters saturate instead. Fake the overflow
		 * here so the hardware is in sync with what the framework
		 * expects.
		 */
		l2x0pmu_write_counter(idx, 0);

		armpmu_event_update(event, hwc, idx);
		data.period = event->hw.last_period;

		if (!armpmu_event_set_period(event, hwc, idx))
			continue;

		if (perf_event_overflow(event, &data, regs))
			l2x0pmu_disable_counter(idx);
	}

	l2x0_clear_interrupts(L2X0_INTR_MASK_ECNTR);

	irq_work_run();

	return status;
}
示例#11
0
文件: err_ev6.c 项目: 274914765/C
void
ev6_machine_check(u64 vector, u64 la_ptr)
{
    struct el_common *mchk_header = (struct el_common *)la_ptr;

    /*
     * Sync the processor
     */
    mb();
    draina();

    /*
     * Parse the logout frame without printing first. If the only error(s)
     * found are have a disposition of "dismiss", then just dismiss them
     * and don't print any message
     */
    if (ev6_process_logout_frame(mchk_header, 0) != 
        MCHK_DISPOSITION_DISMISS) {
        char *saved_err_prefix = err_print_prefix;
        err_print_prefix = KERN_CRIT;

        /*
         * Either a nondismissable error was detected or no
         * recognized error was detected  in the logout frame 
         * -- report the error in either case
         */
        printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d:\n", 
               err_print_prefix,
               (vector == SCB_Q_PROCERR)?"Correctable":"Uncorrectable",
               (unsigned int)vector, (int)smp_processor_id());
        
        ev6_process_logout_frame(mchk_header, 1);
        dik_show_regs(get_irq_regs(), NULL);

        err_print_prefix = saved_err_prefix;
    }

    /* 
     * Release the logout frame 
     */
    wrmces(0x7);
    mb();
}
示例#12
0
void smp_local_timer_interrupt(void)
{
//	profile_tick(CPU_PROFILING);
#ifdef CONFIG_SMP
	update_process_times(user_mode(get_irq_regs()));
#endif
	if (apic_runs_main_timer > 1 && smp_processor_id() == boot_cpu_id)
		main_timer_handler();
	/*
	 * We take the 'long' return path, and there every subsystem
	 * grabs the appropriate locks (kernel lock/ irq lock).
	 *
	 * We might want to decouple profiling from the 'long path',
	 * and do the profiling totally in assembly.
	 *
	 * Currently this isn't too much of an issue (performance wise),
	 * we can take more than 100K local irqs per second on a 100 MHz P5.
	 */
}
示例#13
0
文件: hrt.c 项目: FrozenCow/FIRE-ICE
static enum hrtimer_restart hrtimer_handler(struct hrtimer *hrtimer)
{
	struct pt_regs *regs;

	regs = get_irq_regs();

	if (!hrt.active)
		return HRTIMER_NORESTART;

	qm_debug_handler_sample(regs);

	if (regs)
		read_all_sources(regs, NULL);

	hrtimer_forward_now(hrtimer, ns_to_ktime(hrt.sample_period));
	qm_debug_timer_forward(regs, hrt.sample_period);

	return HRTIMER_RESTART;
}
示例#14
0
文件: ip32-irq.c 项目: 274914765/C
static void ip32_unknown_interrupt(void)
{
    printk("Unknown interrupt occurred!\n");
    printk("cp0_status: %08x\n", read_c0_status());
    printk("cp0_cause: %08x\n", read_c0_cause());
    printk("CRIME intr mask: %016lx\n", crime->imask);
    printk("CRIME intr status: %016lx\n", crime->istat);
    printk("CRIME hardware intr register: %016lx\n", crime->hard_int);
    printk("MACE ISA intr mask: %08lx\n", mace->perif.ctrl.imask);
    printk("MACE ISA intr status: %08lx\n", mace->perif.ctrl.istat);
    printk("MACE PCI control register: %08x\n", mace->pci.control);

    printk("Register dump:\n");
    show_regs(get_irq_regs());

    printk("Please mail this report to [email protected]\n");
    printk("Spinning...");
    while(1) ;
}
static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
{
	uint64_t counter, counter1, counter2;
	struct pt_regs *regs = get_irq_regs();
	int enabled;
	unsigned long flags;

	/*
	 * LOONGSON2 defines two 32-bit performance counters.
	 * To avoid a race updating the registers we need to stop the counters
	 * while we're messing with
	 * them ...
	 */

	/* Check whether the irq belongs to me */
	enabled = reg.cnt1_enabled | reg.cnt2_enabled;
	if (!enabled)
		return IRQ_NONE;

	counter = read_c0_perfcnt();
	counter1 = counter & 0xffffffff;
	counter2 = counter >> 32;

	spin_lock_irqsave(&sample_lock, flags);

	if (counter1 & LOONGSON2_PERFCNT_OVERFLOW) {
		if (reg.cnt1_enabled)
			oprofile_add_sample(regs, 0);
		counter1 = reg.reset_counter1;
	}
	if (counter2 & LOONGSON2_PERFCNT_OVERFLOW) {
		if (reg.cnt2_enabled)
			oprofile_add_sample(regs, 1);
		counter2 = reg.reset_counter2;
	}

	spin_unlock_irqrestore(&sample_lock, flags);

	write_c0_perfcnt((counter2 << 32) | counter1);

	return IRQ_HANDLED;
}
/***********************************************************************
 * sh7109_pwm_interrupt()
 *
 *
 */
static irqreturn_t sh7109_pwm_interrupt(int irq, void *dev_id)
{
        u32 reg = 0;
        struct pt_regs *regs = get_irq_regs();

        /* Give the sample to oprofile. */
        oprofile_add_sample(regs, 0);

        /* Update the compare value. */
        reg = ctrl_inl(pwm->base + PWM1_CMP_VAL_REG);
        reg += results.compare_increment;
        ctrl_outl(reg, pwm->base + PWM1_CMP_VAL_REG);

        /* Ack active irq sources. */
        reg = ctrl_inl(pwm->base + PWM_INT_STA_REG);
        ctrl_outl(reg, pwm->base + PWM_INT_ACK_REG);


        return IRQ_HANDLED;
}
示例#17
0
/*
 * handle_timer_tick() needs to keep up the real-time clock,
 * as well as call the "do_timer()" routine every clocktick
 */
void handle_timer_tick(void)
{
	if (current->pid)
		profile_tick(CPU_PROFILING);

#ifdef CONFIG_HEARTBEAT
	if (sh_mv.mv_heartbeat != NULL)
		sh_mv.mv_heartbeat();
#endif

	/*
	 * Here we are in the timer irq handler. We just have irqs locally
	 * disabled but we don't know if the timer_bh is running on the other
	 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
	 * the irq version of write_lock because as just said we have irq
	 * locally disabled. -arca
	 */
	write_seqlock(&xtime_lock);
	do_timer(1);

	/*
	 * If we have an externally synchronized Linux clock, then update
	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
	 * called as close as possible to 500 ms before the new second starts.
	 */
	if (ntp_synced() &&
	    xtime.tv_sec > last_rtc_update + 660 &&
	    (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
	    (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
		if (rtc_sh_set_time(xtime.tv_sec) == 0)
			last_rtc_update = xtime.tv_sec;
		else
			/* do it again in 60s */
			last_rtc_update = xtime.tv_sec - 600;
	}
	write_sequnlock(&xtime_lock);

#ifndef CONFIG_SMP
	update_process_times(user_mode(get_irq_regs()));
#endif
}
示例#18
0
static irqreturn_t tx3927_pcierr_interrupt(int irq, void *dev_id)
{
	struct pt_regs *regs = get_irq_regs();

	if (txx9_pci_err_action != TXX9_PCI_ERR_IGNORE) {
		printk(KERN_WARNING "PCI error interrupt at 0x%08lx.\n",
		       regs->cp0_epc);
		printk(KERN_WARNING "pcistat:%02x, lbstat:%04lx\n",
		       tx3927_pcicptr->pcistat, tx3927_pcicptr->lbstat);
	}
	if (txx9_pci_err_action != TXX9_PCI_ERR_PANIC) {
		/* clear all pci errors */
		tx3927_pcicptr->pcistat |= TX3927_PCIC_PCISTATIM_ALL;
		tx3927_pcicptr->istat = TX3927_PCIC_IIM_ALL;
		tx3927_pcicptr->tstat = TX3927_PCIC_TIM_ALL;
		tx3927_pcicptr->lbstat = TX3927_PCIC_LBIM_ALL;
		return IRQ_HANDLED;
	}
	console_verbose();
	panic("PCI error.");
}
示例#19
0
static irqreturn_t sun3_int5(int irq, void *dev_id)
{
	unsigned long flags;
	unsigned int cnt;

	local_irq_save(flags);
#ifdef CONFIG_SUN3
	intersil_clear();
#endif
	sun3_disable_irq(5);
	sun3_enable_irq(5);
#ifdef CONFIG_SUN3
	intersil_clear();
#endif
	xtime_update(1);
	update_process_times(user_mode(get_irq_regs()));
	cnt = kstat_irqs_cpu(irq, 0);
	if (!(cnt % 20))
		sun3_leds(led_pattern[cnt % 160 / 20]);
	local_irq_restore(flags);
	return IRQ_HANDLED;
}
示例#20
0
文件: time.c 项目: CSCLOG/beaglebone
/*
 * timer_interrupt() needs to keep up the real-time clock,
 * as well as call the "xtime_update()" routine every clocktick
 */
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
#ifndef CONFIG_SMP
	profile_tick(CPU_PROFILING);
#endif
	xtime_update(1);

#ifndef CONFIG_SMP
	update_process_times(user_mode(get_irq_regs()));
#endif
	/* As we return to user mode fire off the other CPU schedulers..
	   this is basically because we don't yet share IRQ's around.
	   This message is rigged to be safe on the 386 - basically it's
	   a hack, so don't look closely for now.. */

#ifdef CONFIG_SMP
	smp_local_timer_interrupt();
	smp_send_timer();
#endif

	return IRQ_HANDLED;
}
示例#21
0
notrace void probe_irq_entry(void *_data, unsigned int id, struct pt_regs *regs,
	struct irqaction *action)
{
	struct marker *marker;
	struct serialize_long_long_short_char data;

	if (unlikely(!regs))
		regs = get_irq_regs();
	if (likely(regs)) {
		data.f1 = instruction_pointer(regs);
		data.f4 = !user_mode(regs);
	} else {
		data.f1 = 0UL;
		data.f4 = 1;
	}
	data.f2 = (unsigned long) (action ? action->handler : NULL);
	data.f3 = id;

	marker = &GET_MARKER(kernel, irq_entry);
	ltt_specialized_trace(marker, marker->single.probe_private,
		&data, serialize_sizeof(data), sizeof(long));
}
示例#22
0
文件: time.c 项目: 274914765/C
/*
 * timer_interrupt() needs to keep up the real-time clock,
 * as well as call the "do_timer()" routine every clocktick
 */
irqreturn_t timer_interrupt(int irq, void *dev_id)
{
#ifndef CONFIG_SMP
    profile_tick(CPU_PROFILING);
#endif
    do_timer(1);

#ifndef CONFIG_SMP
    update_process_times(user_mode(get_irq_regs()));
#endif
    /*
     * If we have an externally synchronized Linux clock, then update
     * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
     * called as close as possible to 500 ms before the new second starts.
     */
    write_seqlock(&xtime_lock);
    if (ntp_synced()
        && xtime.tv_sec > last_rtc_update + 660
        && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2
        && (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2)
    {
        if (set_rtc_mmss(xtime.tv_sec) == 0)
            last_rtc_update = xtime.tv_sec;
        else    /* do it again in 60 s */
            last_rtc_update = xtime.tv_sec - 600;
    }
    write_sequnlock(&xtime_lock);
    /* As we return to user mode fire off the other CPU schedulers..
       this is basically because we don't yet share IRQ's around.
       This message is rigged to be safe on the 386 - basically it's
       a hack, so don't look closely for now.. */

#ifdef CONFIG_SMP
    smp_local_timer_interrupt();
    smp_send_timer();
#endif

    return IRQ_HANDLED;
}
irqreturn_t px_css_isr(int irq, void * dev)
{
	struct pt_regs *regs;

	unsigned int pid;
	unsigned int tid;
	unsigned int cpu;
	unsigned long flags;
	unsigned long long ts;
	irqreturn_t ret;

	local_irq_save(flags);

	ret = IRQ_NONE;

	regs = get_irq_regs();

	pid = current->tgid;
	tid = current->pid;

	cpu = smp_processor_id();
	
	ts = get_timestamp();

#ifdef HW_TBS
	if (irq == get_timer_irq())
	{
		ret = px_timer_isr(regs, pid, tid, cpu, ts);
	}
	else
#endif
	{
		ret = px_pmu_isr(regs, pid, tid, cpu, ts);
	}

	local_irq_restore(flags);

	return ret;
}
示例#24
0
irqreturn_t timer_interrupt (int irq, void *dev_id)
{

	unsigned long next;

	next = get_linux_timer();

again:
	while ((signed long)(get_ccount() - next) > 0) {

		profile_tick(CPU_PROFILING);
#ifndef CONFIG_SMP
		update_process_times(user_mode(get_irq_regs()));
#endif

		write_seqlock(&xtime_lock);

		do_timer(1); /* Linux handler in kernel/timer.c */

		/* Note that writing CCOMPARE clears the interrupt. */

		next += CCOUNT_PER_JIFFY;
		set_linux_timer(next);

		write_sequnlock(&xtime_lock);
	}

	/* Allow platform to do something useful (Wdog). */

	platform_heartbeat();

	/* Make sure we didn't miss any tick... */

	if ((signed long)(get_ccount() - next) > 0)
		goto again;

	return IRQ_HANDLED;
}
irqreturn_t pdacf_interrupt(int irq, void *dev)
{
	struct snd_pdacf *chip = dev;
	unsigned short stat;

	if ((chip->chip_status & (PDAUDIOCF_STAT_IS_STALE|
				  PDAUDIOCF_STAT_IS_CONFIGURED|
				  PDAUDIOCF_STAT_IS_SUSPENDED)) != PDAUDIOCF_STAT_IS_CONFIGURED)
		return IRQ_HANDLED;	/* IRQ_NONE here? */

	stat = inw(chip->port + PDAUDIOCF_REG_ISR);
	if (stat & (PDAUDIOCF_IRQLVL|PDAUDIOCF_IRQOVR)) {
		if (stat & PDAUDIOCF_IRQOVR)	/* should never happen */
			snd_printk(KERN_ERR "PDAUDIOCF SRAM buffer overrun detected!\n");
		if (chip->pcm_substream)
			tasklet_hi_schedule(&chip->tq);
		if (!(stat & PDAUDIOCF_IRQAKM))
			stat |= PDAUDIOCF_IRQAKM;	/* check rate */
	}
	if (get_irq_regs() != NULL)
		snd_ak4117_check_rate_and_errors(chip->ak4117, 0);
	return IRQ_HANDLED;
}
static int loongson3_perfcount_handler(void)
{
	unsigned long flags;
	uint64_t counter1, counter2;
	uint32_t cause, handled = IRQ_NONE;
	struct pt_regs *regs = get_irq_regs();

	cause = read_c0_cause();
	if (!(cause & CAUSEF_PCI))
		return handled;

	counter1 = read_c0_perfhi1();
	counter2 = read_c0_perfhi2();

	local_irq_save(flags);

	if (counter1 & LOONGSON3_PERFCNT_OVERFLOW) {
		if (reg.ctr1_enable)
			oprofile_add_sample(regs, 0);
		counter1 = reg.reset_counter1;
	}
	if (counter2 & LOONGSON3_PERFCNT_OVERFLOW) {
		if (reg.ctr2_enable)
			oprofile_add_sample(regs, 1);
		counter2 = reg.reset_counter2;
	}

	local_irq_restore(flags);

	write_c0_perfhi1(counter1);
	write_c0_perfhi2(counter2);

	if (!(cause & CAUSEF_TI))
		handled = IRQ_HANDLED;

	return handled;
}
static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
{
	struct tick_sched *ts =
		container_of(timer, struct tick_sched, sched_timer);
	struct pt_regs *regs = get_irq_regs();
	ktime_t now = ktime_get();
	int cpu = smp_processor_id();

#ifdef CONFIG_NO_HZ
	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
		tick_do_timer_cpu = cpu;
#endif

	
	if (tick_do_timer_cpu == cpu)
		tick_do_update_jiffies64(now);

	if (regs) {
		if (ts->tick_stopped) {
			touch_softlockup_watchdog();
			ts->idle_jiffies++;
		}
		update_process_times(user_mode(regs));
		profile_tick(CPU_PROFILING);

		if ((rq_info.init == 1) && (tick_do_timer_cpu == cpu)) {

			update_rq_stats();

			wakeup_user();
		}
	}

	hrtimer_forward(timer, now, tick_period);

	return HRTIMER_RESTART;
}
示例#28
0
文件: time.c 项目: 274914765/C
irqreturn_t timer_interrupt(int irq, void *dummy)
{
    /* last time the cmos clock got updated */
    static long last_rtc_update;

    write_seqlock(&xtime_lock);

    do_timer(1);

    profile_tick(CPU_PROFILING);

    /*
     * If we have an externally synchronized Linux clock, then update
     * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
     * called as close as possible to 500 ms before the new second starts.
     */

    if (ntp_synced() &&
        xtime.tv_sec > last_rtc_update + 660 &&
        (xtime.tv_nsec / NSEC_PER_USEC) >=
        500000 - ((unsigned)TICK_SIZE) / 2
        && (xtime.tv_nsec / NSEC_PER_USEC) <=
        500000 + ((unsigned)TICK_SIZE) / 2) {
        if (set_rtc_mmss(xtime.tv_sec) == 0)
            last_rtc_update = xtime.tv_sec;
        else
            /* Do it again in 60s. */
            last_rtc_update = xtime.tv_sec - 600;
    }
    write_sequnlock(&xtime_lock);

#ifndef CONFIG_SMP
    update_process_times(user_mode(get_irq_regs()));
#endif

    return IRQ_HANDLED;
}
示例#29
0
/*
 * timer_interrupt() needs to keep up the real-time clock,
 * as well as call the "do_timer()" routine every clocktick
 */
irqreturn_t timer_interrupt(int irq, void *dummy)
{
	/* last time the cmos clock got updated */
	static long last_rtc_update = 0;

	/* Clear the interrupt condition */
	outw(0, timer_membase + ALTERA_TIMER_STATUS_REG);
	nios2_timer_count += NIOS2_TIMER_PERIOD;

	write_seqlock(&xtime_lock);

	do_timer(1);
	profile_tick(CPU_PROFILING);
	/*
	 * If we have an externally synchronized Linux clock, then update
	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
	 * called as close as possible to 500 ms before the new second starts.
	 */
	if (ntp_synced() &&
	    xtime.tv_sec > last_rtc_update + 660 &&
	    (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2 &&
	    (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2) {
		if (set_rtc_mmss(xtime.tv_sec) == 0)
			last_rtc_update = xtime.tv_sec;
		else
			last_rtc_update = xtime.tv_sec - 600;	/* do it again in 60 s */
	}

	write_sequnlock(&xtime_lock);

#ifndef CONFIG_SMP
	update_process_times(user_mode(get_irq_regs()));
#endif

	return (IRQ_HANDLED);
}
示例#30
0
文件: sysrq.c 项目: kzlin129/tt-gpl
static void sysrq_handle_showregs(int key, struct tty_struct *tty)
{
    struct pt_regs *regs = get_irq_regs();
    if (regs)
        show_regs(regs);
}