Example #1
0
void mwait_idle_with_hints(unsigned int eax, unsigned int ecx)
{
    unsigned int cpu = smp_processor_id();
    s_time_t expires = per_cpu(timer_deadline, cpu);

    if ( boot_cpu_has(X86_FEATURE_CLFLUSH_MONITOR) )
    {
        mb();
        clflush((void *)&mwait_wakeup(cpu));
        mb();
    }

    __monitor((void *)&mwait_wakeup(cpu), 0, 0);
    smp_mb();

    /*
     * Timer deadline passing is the event on which we will be woken via
     * cpuidle_mwait_wakeup. So check it now that the location is armed.
     */
    if ( (expires > NOW() || expires == 0) && !softirq_pending(cpu) )
    {
        cpumask_set_cpu(cpu, &cpuidle_mwait_flags);
        __mwait(eax, ecx);
        cpumask_clear_cpu(cpu, &cpuidle_mwait_flags);
    }

    if ( expires <= NOW() && expires > 0 )
        raise_softirq(TIMER_SOFTIRQ);
}
Example #2
0
File: tasklet.c Project: fdario/xen
/* VCPU context work */
void do_tasklet(void)
{
    unsigned int cpu = smp_processor_id();
    unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
    struct list_head *list = &per_cpu(tasklet_list, cpu);

    /*
     * We want to be sure any caller has checked that a tasklet is both
     * enqueued and scheduled, before calling this. And, if the caller has
     * actually checked, it's not an issue that we are outside of the
     * critical region, in fact:
     * - TASKLET_enqueued is cleared only here,
     * - TASKLET_scheduled is only cleared when schedule() find it set,
     *   without TASKLET_enqueued being set as well.
     */
    ASSERT(tasklet_work_to_do(cpu));

    spin_lock_irq(&tasklet_lock);

    do_tasklet_work(cpu, list);

    if ( list_empty(list) )
    {
        clear_bit(_TASKLET_enqueued, work_to_do);        
        raise_softirq(SCHEDULE_SOFTIRQ);
    }

    spin_unlock_irq(&tasklet_lock);
}
Example #3
0
fastcall void smp_apic_timer_interrupt(struct cpu_user_regs * regs)
{
    struct cpu_user_regs *old_regs = set_irq_regs(regs);
    ack_APIC_irq();
    perfc_incr(apic_timer);
    raise_softirq(TIMER_SOFTIRQ);
    set_irq_regs(old_regs);
}
/*
 * Record an rcu_bh quiescent state.
 */
void rcu_bh_qs(void)
{
	unsigned long flags;

	local_irq_save(flags);
	if (rcu_qsctr_help(&rcu_bh_ctrlblk))
		raise_softirq(RCU_SOFTIRQ);
	local_irq_restore(flags);
}
/*
 * Record an rcu_bh quiescent state.
 */
void rcu_bh_qs(void)
{
	unsigned long flags;

	local_irq_save(flags);
	if (rcu_qsctr_help(&rcu_bh_ctrlblk))
		raise_softirq(RCU_SOFTIRQ);
  mutant_covered = 1;
 /* MUTANT (del_stmt) */ /* 	local_irq_restore(flags); */ 
}
Example #6
0
static enum hrtimer_restart rcu_timer_func(struct hrtimer *t)
{
	ktime_t next;

	raise_softirq(RCU_SOFTIRQ);

	next = ktime_add_ns(ktime_get(), RCU_PERIOD_NS);
	hrtimer_set_expires_range_ns(&rcu_timer, next, RCU_PERIOD_DELTA_NS);
	return HRTIMER_RESTART;
}
Example #7
0
File: irq.c Project: kcoewoys/work
//上半部
//中断处理函数,硬件自动执行,想执行的快
irqreturn_t my_handler(int irq, void *dev_id)
{
	//响应中断
	//触发软中断,软中断在合适的时机执行
	//void raise_softirq(unsigned int nr)
	raise_softirq(MYTEST_SOFTIRQ);

	printk("key1  %d is pressed\n", irq);
	return 0;
}
/*
 * Record an rcu quiescent state.  And an rcu_bh quiescent state while we
 * are at it, given that any rcu quiescent state is also an rcu_bh
 * quiescent state.  Use "+" instead of "||" to defeat short circuiting.
 */
void rcu_sched_qs(void)
{
	unsigned long flags;

	local_irq_save(flags);
  mutant_covered = 1;
 /* MUTANT (rep_op) */	if (rcu_qsctr_help(&rcu_sched_ctrlblk) -
	    rcu_qsctr_help(&rcu_bh_ctrlblk))
		raise_softirq(RCU_SOFTIRQ);
	local_irq_restore(flags);
}
Example #9
0
File: time.c Project: HPSI/xen-v4v
/* Handle the firing timer */
static void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
{
    if ( irq == (timer_irq[TIMER_HYP_PPI].irq) &&
         READ_SYSREG32(CNTHP_CTL_EL2) & CNTx_CTL_PENDING )
    {
        /* Signal the generic timer code to do its work */
        raise_softirq(TIMER_SOFTIRQ);
        /* Disable the timer to avoid more interrupts */
        WRITE_SYSREG32(0, CNTHP_CTL_EL2);
    }

    if ( irq == (timer_irq[TIMER_PHYS_NONSECURE_PPI].irq) &&
         READ_SYSREG32(CNTP_CTL_EL0) & CNTx_CTL_PENDING )
    {
        /* Signal the generic timer code to do its work */
        raise_softirq(TIMER_SOFTIRQ);
        /* Disable the timer to avoid more interrupts */
        WRITE_SYSREG32(0, CNTP_CTL_EL0);
    }
}
/*
 * Record an rcu_bh quiescent state.
 */
void rcu_bh_qs(void)
{
	unsigned long flags;

	local_irq_save(flags);
	if (rcu_qsctr_help(&rcu_bh_ctrlblk)) {
		if (!__covered10) {__covered10 = 1; total_covered += 1;}
		raise_softirq(RCU_SOFTIRQ);
	}
	local_irq_restore(flags);
}
/*
 * Record an rcu_bh quiescent state.
 */
void rcu_bh_qs(void)
{
	unsigned long flags;

 mutant_covered = 1;
 /* MUTANT (del_stmt) */ /* 	local_irq_save(flags); */ 
	if (rcu_qsctr_help(&rcu_bh_ctrlblk)) {
		if (!__covered10) {__covered10 = 1; total_covered += 1;}
		raise_softirq(RCU_SOFTIRQ);
	}
	local_irq_restore(flags);
}
Example #12
0
static long do_poll(struct sched_poll *sched_poll)
{
    struct vcpu   *v = current;
    struct domain *d = v->domain;
    evtchn_port_t  port;
    long           rc = 0;
    unsigned int   i;

    /* Fairly arbitrary limit. */
    if ( sched_poll->nr_ports > 128 )
        return -EINVAL;

    if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) )
        return -EFAULT;

    set_bit(_VPF_blocked, &v->pause_flags);
    v->is_polling = 1;
    d->is_polling = 1;

    /* Check for events /after/ setting flags: avoids wakeup waiting race. */
    smp_wmb();

    for ( i = 0; i < sched_poll->nr_ports; i++ )
    {
        rc = -EFAULT;
        if ( __copy_from_guest_offset(&port, sched_poll->ports, i, 1) )
            goto out;

        rc = -EINVAL;
        if ( port >= MAX_EVTCHNS(d) )
            goto out;

        rc = 0;
        if ( test_bit(port, shared_info_addr(d, evtchn_pending)) )
            goto out;
    }

    if ( sched_poll->timeout != 0 )
        set_timer(&v->poll_timer, sched_poll->timeout);

    TRACE_2D(TRC_SCHED_BLOCK, d->domain_id, v->vcpu_id);
    raise_softirq(SCHEDULE_SOFTIRQ);

    return 0;

 out:
    v->is_polling = 0;
    clear_bit(_VPF_blocked, &v->pause_flags);
    return rc;
}
Example #13
0
File: tasklet.c Project: fdario/xen
/* Softirq context work */
static void tasklet_softirq_action(void)
{
    unsigned int cpu = smp_processor_id();
    struct list_head *list = &per_cpu(softirq_tasklet_list, cpu);

    spin_lock_irq(&tasklet_lock);

    do_tasklet_work(cpu, list);

    if ( !list_empty(list) && !cpu_is_offline(cpu) )
        raise_softirq(TASKLET_SOFTIRQ);

    spin_unlock_irq(&tasklet_lock);
}
Example #14
0
static void force_quiescent_state(struct rcu_data *rdp,
                                  struct rcu_ctrlblk *rcp)
{
    cpumask_t cpumask;
    raise_softirq(SCHEDULE_SOFTIRQ);
    if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) {
        rdp->last_rs_qlen = rdp->qlen;
        /*
         * Don't send IPI to itself. With irqs disabled,
         * rdp->cpu is the current cpu.
         */
        cpumask = rcp->cpumask;
        cpu_clear(rdp->cpu, cpumask);
        cpumask_raise_softirq(cpumask, SCHEDULE_SOFTIRQ);
    }
}
Example #15
0
void tasklet_schedule(struct tasklet *t)
{
    unsigned long flags;

    spin_lock_irqsave(&tasklet_lock, flags);

    if ( !t->is_dead )
    {
        if ( !t->is_scheduled && !t->is_running )
        {
            BUG_ON(!list_empty(&t->list));
            list_add_tail(&t->list, &tasklet_list);
        }
        t->is_scheduled = 1;
        raise_softirq(TASKLET_SOFTIRQ);
    }

    spin_unlock_irqrestore(&tasklet_lock, flags);
}
Example #16
0
/* Block the currently-executing domain until a pertinent event occurs. */
static long do_block(void)
{
    struct vcpu *v = current;

    local_event_delivery_enable();
    set_bit(_VPF_blocked, &v->pause_flags);

    /* Check for events /after/ blocking: avoids wakeup waiting race. */
    if ( local_events_need_delivery() )
    {
        clear_bit(_VPF_blocked, &v->pause_flags);
    }
    else
    {
        TRACE_2D(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
        raise_softirq(SCHEDULE_SOFTIRQ);
    }

    return 0;
}
Example #17
0
/*
 * Invoke the completed RCU callbacks. They are expected to be in
 * a per-cpu list.
 */
static void rcu_do_batch(struct rcu_data *rdp)
{
    struct rcu_head *next, *list;
    int count = 0;

    list = rdp->donelist;
    while (list) {
        next = rdp->donelist = list->next;
        list->func(list);
        list = next;
        rdp->qlen--;
        if (++count >= rdp->blimit)
            break;
    }
    if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
        rdp->blimit = blimit;
    if (!rdp->donelist)
        rdp->donetail = &rdp->donelist;
    else
        raise_softirq(RCU_SOFTIRQ);
}
Example #18
0
u32
tlbflush_clock_inc_and_return(void)
{
    u32 t, t1, t2;

    t = tlbflush_clock;
    do {
        t1 = t2 = t;
        /* Clock wrapped: someone else is leading a global TLB shootdown. */
        if (unlikely(t1 == 0))
            return t2;
        t2 = (t + 1) & WRAP_MASK;
        t = ia64_cmpxchg(acq, &tlbflush_clock, t1, t2, sizeof(tlbflush_clock));
    } while (unlikely(t != t1));

    /* Clock wrapped: we will lead a global TLB shootdown. */
    if (unlikely(t2 == 0))
        raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ);

    return t2;
}
Example #19
0
static void tasklet_action(void)
{
    struct tasklet *t;

    spin_lock_irq(&tasklet_lock);

    if ( list_empty(&tasklet_list) )
    {
        spin_unlock_irq(&tasklet_lock);
        return;
    }

    t = list_entry(tasklet_list.next, struct tasklet, list);
    list_del_init(&t->list);

    BUG_ON(t->is_dead || t->is_running || !t->is_scheduled);
    t->is_scheduled = 0;
    t->is_running = 1;

    spin_unlock_irq(&tasklet_lock);
    t->func(t->data);
    spin_lock_irq(&tasklet_lock);

    t->is_running = 0;

    if ( t->is_scheduled )
    {
        BUG_ON(t->is_dead || !list_empty(&t->list));
        list_add_tail(&t->list, &tasklet_list);
    }

    /*
     * If there is more work to do then reschedule. We don't grab more work
     * immediately as we want to allow other softirq work to happen first.
     */
    if ( !list_empty(&tasklet_list) )
        raise_softirq(TASKLET_SOFTIRQ);

    spin_unlock_irq(&tasklet_lock);
}
Example #20
0
/*
 * When High resolution timers are active, try to reprogram. Note, that in case
 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
 * check happens. The timer gets enqueued into the rbtree. The reprogramming
 * and expiry check is done in the hrtimer_interrupt or in the softirq.
 */
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
					    struct hrtimer_clock_base *base)
{
	if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {

		/* Timer is expired, act upon the callback mode */
		switch(timer->cb_mode) {
		case HRTIMER_CB_IRQSAFE_NO_RESTART:
			/*
			 * We can call the callback from here. No restart
			 * happens, so no danger of recursion
			 */
			BUG_ON(timer->function(timer) != HRTIMER_NORESTART);
			return 1;
		case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ:
			/*
			 * This is solely for the sched tick emulation with
			 * dynamic tick support to ensure that we do not
			 * restart the tick right on the edge and end up with
			 * the tick timer in the softirq ! The calling site
			 * takes care of this.
			 */
			return 1;
		case HRTIMER_CB_IRQSAFE:
		case HRTIMER_CB_SOFTIRQ:
			/*
			 * Move everything else into the softirq pending list !
			 */
			list_add_tail(&timer->cb_entry,
				      &base->cpu_base->cb_pending);
			timer->state = HRTIMER_STATE_PENDING;
			raise_softirq(HRTIMER_SOFTIRQ);
			return 1;
		default:
			BUG();
		}
	}
	return 0;
}
Example #21
0
static void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
{
    unsigned int cpu = smp_processor_id();
    s_time_t expires = per_cpu(timer_deadline, cpu);

    __monitor((void *)&mwait_wakeup(cpu), 0, 0);
    smp_mb();

    /*
     * Timer deadline passing is the event on which we will be woken via
     * cpuidle_mwait_wakeup. So check it now that the location is armed.
     */
    if ( expires > NOW() || expires == 0 )
    {
        cpumask_set_cpu(cpu, &cpuidle_mwait_flags);
        __mwait(eax, ecx);
        cpumask_clear_cpu(cpu, &cpuidle_mwait_flags);
    }

    if ( expires <= NOW() && expires > 0 )
        raise_softirq(TIMER_SOFTIRQ);
}
Example #22
0
void apic_timer_interrupt(struct cpu_user_regs * regs)
{
    ack_APIC_irq();
    perfc_incr(apic_timer);
    raise_softirq(TIMER_SOFTIRQ);
}
Example #23
0
/* Voluntarily yield the processor for this allocation. */
static long do_yield(void)
{
    TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
    raise_softirq(SCHEDULE_SOFTIRQ);
    return 0;
}
Example #24
0
static void
ss_ipi(struct irq_work *work)
{
	raise_softirq(NET_TX_SOFTIRQ);
}
Example #25
0
/*
 * High resolution timer interrupt
 * Called with interrupts disabled
 */
void hrtimer_interrupt(struct clock_event_device *dev)
{
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
	struct hrtimer_clock_base *base;
	ktime_t expires_next, now;
	int i, raise = 0;

	BUG_ON(!cpu_base->hres_active);
	cpu_base->nr_events++;
	dev->next_event.tv64 = KTIME_MAX;

 retry:
	now = ktime_get();

	expires_next.tv64 = KTIME_MAX;

	base = cpu_base->clock_base;

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		ktime_t basenow;
		struct rb_node *node;

		spin_lock(&cpu_base->lock);

		basenow = ktime_add(now, base->offset);

		while ((node = base->first)) {
			struct hrtimer *timer;

			timer = rb_entry(node, struct hrtimer, node);

			/*
			 * The immediate goal for using the softexpires is
			 * minimizing wakeups, not running timers at the
			 * earliest interrupt after their soft expiration.
			 * This allows us to avoid using a Priority Search
			 * Tree, which can answer a stabbing querry for
			 * overlapping intervals and instead use the simple
			 * BST we already have.
			 * We don't add extra wakeups by delaying timers that
			 * are right-of a not yet expired timer, because that
			 * timer will have to trigger a wakeup anyway.
			 */

			if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
				ktime_t expires;

				expires = ktime_sub(hrtimer_get_expires(timer),
						    base->offset);
				if (expires.tv64 < expires_next.tv64)
					expires_next = expires;
				break;
			}

			/* Move softirq callbacks to the pending list */
			if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
				__remove_hrtimer(timer, base,
						 HRTIMER_STATE_PENDING, 0);
				list_add_tail(&timer->cb_entry,
					      &base->cpu_base->cb_pending);
				raise = 1;
				continue;
			}

			__run_hrtimer(timer);
		}
		spin_unlock(&cpu_base->lock);
		base++;
	}

	cpu_base->expires_next = expires_next;

	/* Reprogramming necessary ? */
	if (expires_next.tv64 != KTIME_MAX) {
		if (tick_program_event(expires_next, 0))
			goto retry;
	}

	/* Raise softirq ? */
	if (raise)
		raise_softirq(HRTIMER_SOFTIRQ);
}
Example #26
0
static inline void hrtimer_raise_softirq(void)
{
	raise_softirq(HRTIMER_SOFTIRQ);
}
Example #27
0
/*
 * High resolution timer interrupt
 * Called with interrupts disabled
 */
void hrtimer_interrupt(struct clock_event_device *dev)
{
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
	struct hrtimer_clock_base *base;
	ktime_t expires_next, now;
	int i, raise = 0;

	BUG_ON(!cpu_base->hres_active);
	cpu_base->nr_events++;
	dev->next_event.tv64 = KTIME_MAX;

 retry:
	now = ktime_get();

	expires_next.tv64 = KTIME_MAX;

	base = cpu_base->clock_base;

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		ktime_t basenow;
		struct rb_node *node;

		spin_lock(&cpu_base->lock);

		basenow = ktime_add(now, base->offset);

		while ((node = base->first)) {
			struct hrtimer *timer;

			timer = rb_entry(node, struct hrtimer, node);

			if (basenow.tv64 < timer->expires.tv64) {
				ktime_t expires;

				expires = ktime_sub(timer->expires,
						    base->offset);
				if (expires.tv64 < expires_next.tv64)
					expires_next = expires;
				break;
			}

			/* Move softirq callbacks to the pending list */
			if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
				__remove_hrtimer(timer, base,
						 HRTIMER_STATE_PENDING, 0);
				list_add_tail(&timer->cb_entry,
					      &base->cpu_base->cb_pending);
				raise = 1;
				continue;
			}

			__run_hrtimer(timer);
		}
		spin_unlock(&cpu_base->lock);
		base++;
	}

	cpu_base->expires_next = expires_next;

	/* Reprogramming necessary ? */
	if (expires_next.tv64 != KTIME_MAX) {
		if (tick_program_event(expires_next, 0))
			goto retry;
	}

	/* Raise softirq ? */
	if (raise)
		raise_softirq(HRTIMER_SOFTIRQ);
}
Example #28
0
/*
 * Record an rcu quiescent state.  And an rcu_bh quiescent state while we
 * are at it, given that any rcu quiescent state is also an rcu_bh
 * quiescent state.  Use "+" instead of "||" to defeat short circuiting.
 */
void rcu_qsctr_inc(int cpu)
{
	if (rcu_qsctr_help(&rcu_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk))
		raise_softirq(RCU_SOFTIRQ);
}
Example #29
0
/*
 * High resolution timer interrupt
 * Called with interrupts disabled
 */
void hrtimer_interrupt(struct clock_event_device *dev)
{
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
	struct hrtimer_clock_base *base;
	ktime_t expires_next, now;
	int i, raise = 0;

	BUG_ON(!cpu_base->hres_active);
	cpu_base->nr_events++;
	dev->next_event.tv64 = KTIME_MAX;

 retry:
	now = ktime_get();

	expires_next.tv64 = KTIME_MAX;

	base = cpu_base->clock_base;

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		ktime_t basenow;
		struct rb_node *node;

		spin_lock(&cpu_base->lock);

		basenow = ktime_add(now, base->offset);

		while ((node = base->first)) {
			struct hrtimer *timer;

			timer = rb_entry(node, struct hrtimer, node);

			if (basenow.tv64 < timer->expires.tv64) {
				ktime_t expires;

				expires = ktime_sub(timer->expires,
						    base->offset);
				if (expires.tv64 < expires_next.tv64)
					expires_next = expires;
				break;
			}

			/* Move softirq callbacks to the pending list */
			if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
				__remove_hrtimer(timer, base,
						 HRTIMER_STATE_PENDING, 0);
				list_add_tail(&timer->cb_entry,
					      &base->cpu_base->cb_pending);
				raise = 1;
				continue;
			}

			__remove_hrtimer(timer, base,
					 HRTIMER_STATE_CALLBACK, 0);
			timer_stats_account_hrtimer(timer);

			/*
			 * Note: We clear the CALLBACK bit after
			 * enqueue_hrtimer to avoid reprogramming of
			 * the event hardware. This happens at the end
			 * of this function anyway.
			 */
			if (timer->function(timer) != HRTIMER_NORESTART) {
				BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
				enqueue_hrtimer(timer, base, 0);
			}
			timer->state &= ~HRTIMER_STATE_CALLBACK;
		}
		spin_unlock(&cpu_base->lock);
		base++;
	}

	cpu_base->expires_next = expires_next;

	/* Reprogramming necessary ? */
	if (expires_next.tv64 != KTIME_MAX) {
		if (tick_program_event(expires_next, 0))
			goto retry;
	}

	/* Raise softirq ? */
	if (raise)
		raise_softirq(HRTIMER_SOFTIRQ);
}
Example #30
0
File: mce.c Project: dzan/xenOnArm
/* Shared #MC handler. */
void mcheck_cmn_handler(struct cpu_user_regs *regs, long error_code,
    struct mca_banks *bankmask, struct mca_banks *clear_bank)
{
    uint64_t gstatus;
    mctelem_cookie_t mctc = NULL;
    struct mca_summary bs;

    mce_spin_lock(&mce_logout_lock);

    if (clear_bank != NULL) {
        memset( clear_bank->bank_map, 0x0,
            sizeof(long) * BITS_TO_LONGS(clear_bank->num));
    }
    mctc = mcheck_mca_logout(MCA_MCE_SCAN, bankmask, &bs, clear_bank);

    if (bs.errcnt) {
        /*
         * Uncorrected errors must be dealt with in softirq context.
         */
        if (bs.uc || bs.pcc) {
            add_taint(TAINT_MACHINE_CHECK);
            if (mctc != NULL)
                mctelem_defer(mctc);
            /*
             * For PCC=1 and can't be recovered, context is lost, so
             * reboot now without clearing the banks, and deal with
             * the telemetry after reboot (the MSRs are sticky)
             */
            if (bs.pcc || !bs.recoverable)
                cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus);
        } else {
            if (mctc != NULL)
                mctelem_commit(mctc);
        }
        atomic_set(&found_error, 1);

        /* The last CPU will be take check/clean-up etc */
        atomic_set(&severity_cpu, smp_processor_id());

        mce_printk(MCE_CRITICAL, "MCE: clear_bank map %lx on CPU%d\n",
                *((unsigned long*)clear_bank), smp_processor_id());
        if (clear_bank != NULL)
            mcheck_mca_clearbanks(clear_bank);
    } else {
        if (mctc != NULL)
            mctelem_dismiss(mctc);
    }
    mce_spin_unlock(&mce_logout_lock);

    mce_barrier_enter(&mce_trap_bar);
    if ( mctc != NULL && mce_urgent_action(regs, mctc))
        cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus);
    mce_barrier_exit(&mce_trap_bar);

    /*
     * Wait until everybody has processed the trap.
     */
    mce_barrier_enter(&mce_trap_bar);
    if (atomic_read(&severity_cpu) == smp_processor_id())
    {
        /* According to SDM, if no error bank found on any cpus,
         * something unexpected happening, we can't do any
         * recovery job but to reset the system.
         */
        if (atomic_read(&found_error) == 0)
            mc_panic("MCE: No CPU found valid MCE, need reset\n");
        if (!cpumask_empty(&mce_fatal_cpus))
        {
            char *ebufp, ebuf[96] = "MCE: Fatal error happened on CPUs ";
            ebufp = ebuf + strlen(ebuf);
            cpumask_scnprintf(ebufp, 95 - strlen(ebuf), &mce_fatal_cpus);
            mc_panic(ebuf);
        }
        atomic_set(&found_error, 0);
    }
    mce_barrier_exit(&mce_trap_bar); 

    /* Clear flags after above fatal check */
    mce_barrier_enter(&mce_trap_bar);
    gstatus = mca_rdmsr(MSR_IA32_MCG_STATUS);
    if ((gstatus & MCG_STATUS_MCIP) != 0) {
        mce_printk(MCE_CRITICAL, "MCE: Clear MCIP@ last step");
        mca_wrmsr(MSR_IA32_MCG_STATUS, gstatus & ~MCG_STATUS_MCIP);
    }
    mce_barrier_exit(&mce_trap_bar);

    raise_softirq(MACHINE_CHECK_SOFTIRQ);
}