Ejemplo n.º 1
0
/*
 * Decide whether the caller should poll the NB.  The decision is made
 * and any poll is performed under protection of the chip-wide mutex
 * enforced at the caller's level.  That mutex already ensures that all
 * pollers on a chip are serialized - the following is simply to
 * avoid the NB poll ping-ponging between different detectors.
 */
uint64_t
ao_ms_poll_ownermask(cmi_hdl_t hdl, hrtime_t pintvl)
{
	ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl);
	hrtime_t now = gethrtime_waitfree();
	hrtime_t last = ao->ao_ms_shared->aos_nb_poll_timestamp;
	int dopoll = 0;

	if (now - last > 2 * pintvl || last == 0) {
		/*
		 * If no last value has been recorded assume ownership.
		 * Otherwise only take over if the current "owner" seems
		 * to be making little progress.
		 */
		ao->ao_ms_shared->aos_nb_poll_owner = hdl;
		dopoll = 1;
	} else if (ao->ao_ms_shared->aos_nb_poll_owner == hdl) {
		/*
		 * This is the current owner and it is making progress.
		 */
		dopoll = 1;
	}

	if (dopoll)
		ao->ao_ms_shared->aos_nb_poll_timestamp = now;

	return (dopoll ? -1ULL : ~(1 << AMD_MCA_BANK_NB));
}
Ejemplo n.º 2
0
/*
 * Called from trap() when processing the ast posted by the high-level
 * interrupt handler.
 */
int
kcpc_overflow_ast()
{
	kcpc_ctx_t	*ctx = curthread->t_cpc_ctx;
	int		i;
	int		found = 0;
	uint64_t	curtick = KCPC_GET_TICK();

	ASSERT(ctx != NULL);	/* Beware of interrupt skid. */

	/*
	 * An overflow happened: sample the context to ensure that
	 * the overflow is propagated into the upper bits of the
	 * virtualized 64-bit counter(s).
	 */
	kpreempt_disable();
	ctx->kc_hrtime = gethrtime_waitfree();
	pcbe_ops->pcbe_sample(ctx);
	kpreempt_enable();

	ctx->kc_vtick += curtick - ctx->kc_rawtick;

	/*
	 * The interrupt handler has marked any pics with KCPC_PIC_OVERFLOWED
	 * if that pic generated an overflow and if the request it was counting
	 * on behalf of had CPC_OVERFLOW_REQUEST specified. We go through all
	 * pics in the context and clear the KCPC_PIC_OVERFLOWED flags. If we
	 * found any overflowed pics, keep the context frozen and return true
	 * (thus causing a signal to be sent).
	 */
	for (i = 0; i < cpc_ncounters; i++) {
		if (ctx->kc_pics[i].kp_flags & KCPC_PIC_OVERFLOWED) {
			atomic_and_uint(&ctx->kc_pics[i].kp_flags,
			    ~KCPC_PIC_OVERFLOWED);
			found = 1;
		}
	}
	if (found)
		return (1);

	/*
	 * Otherwise, re-enable the counters and continue life as before.
	 */
	kpreempt_disable();
	atomic_and_uint(&ctx->kc_flags, ~KCPC_CTX_FREEZE);
	pcbe_ops->pcbe_program(ctx);
	kpreempt_enable();
	return (0);
}
Ejemplo n.º 3
0
/*ARGSUSED*/
uint_t
kcpc_hw_overflow_intr(caddr_t arg1, caddr_t arg2)
{
	kcpc_ctx_t	*ctx;
	uint64_t	bitmap;

	if (pcbe_ops == NULL ||
	    (bitmap = pcbe_ops->pcbe_overflow_bitmap()) == 0)
		return (DDI_INTR_UNCLAIMED);

	/*
	 * Prevent any further interrupts.
	 */
	pcbe_ops->pcbe_allstop();

	/*
	 * Invoke the "generic" handler.
	 *
	 * If the interrupt has occurred in the context of an lwp owning
	 * the counters, then the handler posts an AST to the lwp to
	 * trigger the actual sampling, and optionally deliver a signal or
	 * restart the counters, on the way out of the kernel using
	 * kcpc_hw_overflow_ast() (see below).
	 *
	 * On the other hand, if the handler returns the context to us
	 * directly, then it means that there are no other threads in
	 * the middle of updating it, no AST has been posted, and so we
	 * should sample the counters here, and restart them with no
	 * further fuss.
	 */
	if ((ctx = kcpc_overflow_intr(arg1, bitmap)) != NULL) {
		uint64_t curtick = KCPC_GET_TICK();

		ctx->kc_hrtime = gethrtime_waitfree();
		ctx->kc_vtick += curtick - ctx->kc_rawtick;
		ctx->kc_rawtick = curtick;
		pcbe_ops->pcbe_sample(ctx);
		pcbe_ops->pcbe_program(ctx);
	}

	return (DDI_INTR_CLAIMED);
}
Ejemplo n.º 4
0
void
panicsys(const char *format, va_list alist, struct regs *rp, int on_panic_stack)
{
	int s = spl8();
	kthread_t *t = curthread;
	cpu_t *cp = CPU;

	caddr_t intr_stack = NULL;
	uint_t intr_actv;

	ushort_t schedflag = t->t_schedflag;
	cpu_t *bound_cpu = t->t_bound_cpu;
	char preempt = t->t_preempt;

	(void) setjmp(&t->t_pcb);
	t->t_flag |= T_PANIC;

	t->t_schedflag |= TS_DONT_SWAP;
	t->t_bound_cpu = cp;
	t->t_preempt++;

	/*
	 * Switch lbolt to event driven mode.
	 */
	lbolt_hybrid = lbolt_event_driven;

	panic_enter_hw(s);

	/*
	 * If we're on the interrupt stack and an interrupt thread is available
	 * in this CPU's pool, preserve the interrupt stack by detaching an
	 * interrupt thread and making its stack the intr_stack.
	 */
	if (CPU_ON_INTR(cp) && cp->cpu_intr_thread != NULL) {
		kthread_t *it = cp->cpu_intr_thread;

		intr_stack = cp->cpu_intr_stack;
		intr_actv = cp->cpu_intr_actv;

		cp->cpu_intr_stack = thread_stk_init(it->t_stk);
		cp->cpu_intr_thread = it->t_link;

		/*
		 * Clear only the high level bits of cpu_intr_actv.
		 * We want to indicate that high-level interrupts are
		 * not active without destroying the low-level interrupt
		 * information stored there.
		 */
		cp->cpu_intr_actv &= ((1 << (LOCK_LEVEL + 1)) - 1);
	}

	/*
	 * Record one-time panic information and quiesce the other CPUs.
	 * Then print out the panic message and stack trace.
	 */
	if (on_panic_stack) {
		panic_data_t *pdp = (panic_data_t *)panicbuf;

		pdp->pd_version = PANICBUFVERS;
		pdp->pd_msgoff = sizeof (panic_data_t) - sizeof (panic_nv_t);

		if (t->t_panic_trap != NULL)
			panic_savetrap(pdp, t->t_panic_trap);
		else
			panic_saveregs(pdp, rp);

		(void) vsnprintf(&panicbuf[pdp->pd_msgoff],
		    PANICBUFSIZE - pdp->pd_msgoff, format, alist);

		/*
		 * Call into the platform code to stop the other CPUs.
		 * We currently have all interrupts blocked, and expect that
		 * the platform code will lower ipl only as far as needed to
		 * perform cross-calls, and will acquire as *few* locks as is
		 * possible -- panicstr is not set so we can still deadlock.
		 */
		panic_stopcpus(cp, t, s);

		panicstr = (char *)format;
		va_copy(panicargs, alist);
		panic_lbolt = LBOLT_NO_ACCOUNT;
		panic_lbolt64 = LBOLT_NO_ACCOUNT64;
		panic_hrestime = hrestime;
		panic_hrtime = gethrtime_waitfree();
		panic_thread = t;
		panic_regs = t->t_pcb;
		panic_reg = rp;
		panic_cpu = *cp;
		panic_ipl = spltoipl(s);
		panic_schedflag = schedflag;
		panic_bound_cpu = bound_cpu;
		panic_preempt = preempt;

		if (intr_stack != NULL) {
			panic_cpu.cpu_intr_stack = intr_stack;
			panic_cpu.cpu_intr_actv = intr_actv;
		}

		/*
		 * Lower ipl to 10 to keep clock() from running, but allow
		 * keyboard interrupts to enter the debugger.  These callbacks
		 * are executed with panicstr set so they can bypass locks.
		 */
		splx(ipltospl(CLOCK_LEVEL));
		panic_quiesce_hw(pdp);
		(void) FTRACE_STOP();
		(void) callb_execute_class(CB_CL_PANIC, NULL);

		if (log_intrq != NULL)
			log_flushq(log_intrq);

		/*
		 * If log_consq has been initialized and syslogd has started,
		 * print any messages in log_consq that haven't been consumed.
		 */
		if (log_consq != NULL && log_consq != log_backlogq)
			log_printq(log_consq);

		fm_banner();

#if defined(__x86)
		/*
		 * A hypervisor panic originates outside of Solaris, so we
		 * don't want to prepend the panic message with misleading
		 * pointers from within Solaris.
		 */
		if (!IN_XPV_PANIC())
#endif
			printf("\n\rpanic[cpu%d]/thread=%p: ", cp->cpu_id,
			    (void *)t);
		vprintf(format, alist);
		printf("\n\n");

		if (t->t_panic_trap != NULL) {
			panic_showtrap(t->t_panic_trap);
			printf("\n");
		}

		traceregs(rp);
		printf("\n");

		if (((boothowto & RB_DEBUG) || obpdebug) &&
		    !nopanicdebug && !panic_forced) {
			if (dumpvp != NULL) {
				debug_enter("panic: entering debugger "
				    "(continue to save dump)");
			} else {
				debug_enter("panic: entering debugger "
				    "(no dump device, continue to reboot)");
			}
		}

	} else if (panic_dump != 0 || panic_sync != 0 || panicstr != NULL) {
		printf("\n\rpanic[cpu%d]/thread=%p: ", cp->cpu_id, (void *)t);
		vprintf(format, alist);
		printf("\n");
	} else
		goto spin;

	/*
	 * Prior to performing sync or dump, we make sure that do_polled_io is
	 * set, but we'll leave ipl at 10; deadman(), a CY_HIGH_LEVEL cyclic,
	 * will re-enter panic if we are not making progress with sync or dump.
	 */

	/*
	 * Sync the filesystems.  Reset t_cred if not set because much of
	 * the filesystem code depends on CRED() being valid.
	 */
	if (!in_sync && panic_trigger(&panic_sync)) {
		if (t->t_cred == NULL)
			t->t_cred = kcred;
		splx(ipltospl(CLOCK_LEVEL));
		do_polled_io = 1;
		vfs_syncall();
	}

	/*
	 * Take the crash dump.  If the dump trigger is already set, try to
	 * enter the debugger again before rebooting the system.
	 */
	if (panic_trigger(&panic_dump)) {
		panic_dump_hw(s);
		splx(ipltospl(CLOCK_LEVEL));
		errorq_panic();
		do_polled_io = 1;
		dumpsys();
	} else if (((boothowto & RB_DEBUG) || obpdebug) && !nopanicdebug) {
		debug_enter("panic: entering debugger (continue to reboot)");
	} else
		printf("dump aborted: please record the above information!\n");

	if (halt_on_panic)
		mdboot(A_REBOOT, AD_HALT, NULL, B_FALSE);
	else
		mdboot(A_REBOOT, panic_bootfcn, panic_bootstr, B_FALSE);
spin:
	/*
	 * Restore ipl to at most CLOCK_LEVEL so we don't end up spinning
	 * and unable to jump into the debugger.
	 */
	splx(MIN(s, ipltospl(CLOCK_LEVEL)));
	for (;;)
		;
}