Beispiel #1
0
void
softcall_init(void)
{
	softcall_t *sc;

	softcalls = kmem_zalloc(sizeof (softcall_t) * NSOFTCALLS, KM_SLEEP);
	softcall_cpuset = kmem_zalloc(sizeof (cpuset_t), KM_SLEEP);
	for (sc = softcalls; sc < &softcalls[NSOFTCALLS]; sc++) {
		sc->sc_next = softfree;
		softfree = sc;
	}
	mutex_init(&softcall_lock, NULL, MUTEX_SPIN,
	    (void *)ipltospl(SPL8));
	softcall_state = SOFT_IDLE;
	softcall_tick = lbolt;

	if (softcall_delay < 0)
		softcall_delay = 1;

	/*
	 * Since softcall_delay is expressed as 1 = 10 milliseconds.
	 */
	softcall_delay = softcall_delay * (hz/100);
	CPUSET_ZERO(*softcall_cpuset);
}
Beispiel #2
0
int
_init(void)
{
	int error = EBUSY;
	int	status;
	extern int (*acpi_fp_setwake)();
	extern kmutex_t cpu_map_lock;

	mutex_init(&acpica_module_lock, NULL, MUTEX_DRIVER, NULL);
	mutex_init(&cpu_map_lock, NULL, MUTEX_SPIN,
	    (ddi_iblock_cookie_t)ipltospl(DISP_LEVEL));

	if ((error = mod_install(&modlinkage)) != 0) {
		mutex_destroy(&acpica_module_lock);
		goto load_error;
	}

	AcpiGbl_EnableInterpreterSlack = (acpica_enable_interpreter_slack != 0);

	/* global ACPI CA initialization */
	if (ACPI_FAILURE(status = AcpiInitializeSubsystem()))
		cmn_err(CE_WARN, "!AcpiInitializeSubsystem failed: %d", status);

	/* initialize table manager */
	if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 0, 0)))
		cmn_err(CE_WARN, "!AcpiInitializeTables failed: %d", status);

	acpi_fp_setwake = acpica_ddi_setwake;

load_error:
	return (error);
}
int
pcmu_pbm_register_intr(pcmu_pbm_t *pcbm_p)
{
	pcmu_t		*pcmu_p = pcbm_p->pcbm_pcmu_p;
	uint32_t	mondo;
	int		r = DDI_SUCCESS;

	pcmu_ib_nintr_clear(pcmu_p->pcmu_ib_p, pcmu_p->pcmu_inos[CBNINTR_PBM]);

	/*
	 * Install the PCI error interrupt handler.
	 */
	mondo = PCMU_IB_INO_TO_MONDO(pcmu_p->pcmu_ib_p,
	    pcmu_p->pcmu_inos[CBNINTR_PBM]);

	VERIFY(add_ivintr(mondo, pcmu_pil[CBNINTR_PBM],
	    (intrfunc)pcmu_pbm_error_intr, (caddr_t)pcmu_p, NULL, NULL) == 0);

	pcbm_p->pcbm_iblock_cookie = (void *)(uintptr_t)pcmu_pil[CBNINTR_PBM];

	/*
	 * Create the pokefault mutex at the PIL below the error interrupt.
	 */

	mutex_init(&pcbm_p->pcbm_pokeflt_mutex, NULL, MUTEX_DRIVER,
	    (void *)(uintptr_t)ipltospl(spltoipl(
	    (int)(uintptr_t)pcbm_p->pcbm_iblock_cookie) - 1));

	return (PCMU_ATTACH_RETCODE(PCMU_PBM_OBJ, PCMU_OBJ_INTR_ADD, r));
}
int
kcpc_hw_add_ovf_intr(kcpc_ctx_t *(*handler)(caddr_t))
{
	if (x86_type != X86_TYPE_P6)
		return (0);
	overflow_intr_handler = handler;
	return (ipltospl(APIC_PCINT_IPL));
}
/*
 * Called once per second on a CPU from the cyclic subsystem's
 * CY_HIGH_LEVEL interrupt.  (No longer just cpu0-only)
 */
void
tsc_tick(void)
{
	hrtime_t now, delta;
	ushort_t spl;

	/*
	 * Before we set the new variables, we set the shadow values.  This
	 * allows for lock free operation in dtrace_gethrtime().
	 */
	lock_set_spl((lock_t *)&shadow_hres_lock + HRES_LOCK_OFFSET,
	    ipltospl(CBE_HIGH_PIL), &spl);

	shadow_tsc_hrtime_base = tsc_hrtime_base;
	shadow_tsc_last = tsc_last;
	shadow_nsec_scale = nsec_scale;

	shadow_hres_lock++;
	splx(spl);

	CLOCK_LOCK(&spl);

	now = tsc_read();

	if (gethrtimef == tsc_gethrtime_delta)
		now += tsc_sync_tick_delta[CPU->cpu_id];

	if (now < tsc_last) {
		/*
		 * The TSC has just jumped into the past.  We assume that
		 * this is due to a suspend/resume cycle, and we're going
		 * to use the _current_ value of TSC as the delta.  This
		 * will keep tsc_hrtime_base correct.  We're also going to
		 * assume that rate of tsc does not change after a suspend
		 * resume (i.e nsec_scale remains the same).
		 */
		delta = now;
		tsc_last_jumped += tsc_last;
		tsc_jumped = 1;
	} else {
		/*
		 * Determine the number of TSC ticks since the last clock
		 * tick, and add that to the hrtime base.
		 */
		delta = now - tsc_last;
	}

	TSC_CONVERT_AND_ADD(delta, tsc_hrtime_base, nsec_scale);
	tsc_last = now;

	CLOCK_UNLOCK(spl);
}
int
_init(void)
{
	int status;

	status = ddi_soft_state_init(&ds1287_state, sizeof (struct ds1287), 0);
	if (status != 0) {
		return (status);
	}

	if ((status = mod_install(&modlinkage)) != 0) {
		ddi_soft_state_fini(&ds1287_state);
		return (status);
	}


	ds1287_hi_iblock = (ddi_iblock_cookie_t)(uintptr_t)
	    ipltospl(ds1287_interrupt_priority);
	mutex_init(&ds1287_reg_mutex, NULL, MUTEX_DRIVER, ds1287_hi_iblock);

	mutex_enter(&ds1287_reg_mutex);
	/* Select Bank 1 */
	select_bank(1);
	DS1287_ADDR_REG = RTC_B;
	DS1287_DATA_REG = (RTC_DM | RTC_HM);
	mutex_exit(&ds1287_reg_mutex);

	tod_ops.tod_get = todds_get;
	tod_ops.tod_set = todds_set;

	/*
	 * If v_pmc_addr_reg isn't set, it's because it wasn't set in
	 * sun4u/os/fillsysinfo.c:have_pmc(). This means the real (pmc)
	 * watchdog routines (sun4u/io/pmc.c) will not be used. If the
	 * user were to set watchdog_enable in /etc/system, we'll need to
	 * use our own NOP routines.
	 */
	if (v_pmc_addr_reg == NULL) {
		tod_ops.tod_set_watchdog_timer = todds_set_watchdog_timer;
		tod_ops.tod_clear_watchdog_timer = todds_clear_watchdog_timer;
	}
	tod_ops.tod_set_power_alarm = todds_set_power_alarm;
	tod_ops.tod_clear_power_alarm = todds_clear_power_alarm;
	tod_ops.tod_get_cpufrequency = todds_get_cpufrequency;

	return (status);
}
Beispiel #7
0
/*
 * Do cross call to all other CPUs with absolutely no waiting or handshaking.
 * This should only be used for extraordinary operations, like panic(), which
 * need to work, in some fashion, in a not completely functional system.
 * All other uses that want minimal waiting should use xc_call_nowait().
 */
void
xc_priority(
	xc_arg_t arg1,
	xc_arg_t arg2,
	xc_arg_t arg3,
	ulong_t *set,
	xc_func_t func)
{
	extern int IGNORE_KERNEL_PREEMPTION;
	int save_spl = splr(ipltospl(XC_HI_PIL));
	int save_kernel_preemption = IGNORE_KERNEL_PREEMPTION;

	IGNORE_KERNEL_PREEMPTION = 1;
	xc_priority_common((xc_func_t)func, arg1, arg2, arg3, set);
	IGNORE_KERNEL_PREEMPTION = save_kernel_preemption;
	splx(save_spl);
}
Beispiel #8
0
/* ARGSUSED */
void
mutex_init(kmutex_t *mp, char *name, kmutex_type_t type, void *ibc)
{
	mutex_impl_t *lp = (mutex_impl_t *)mp;

	ASSERT(ibc < (void *)KERNELBASE);	/* see 1215173 */

	if ((intptr_t)ibc > ipltospl(LOCK_LEVEL) && ibc < (void *)KERNELBASE) {
		ASSERT(type != MUTEX_ADAPTIVE && type != MUTEX_DEFAULT);
		MUTEX_SET_TYPE(lp, MUTEX_SPIN);
		LOCK_INIT_CLEAR(&lp->m_spin.m_spinlock);
		LOCK_INIT_HELD(&lp->m_spin.m_dummylock);
		lp->m_spin.m_minspl = (int)(intptr_t)ibc;
	} else {
		ASSERT(type != MUTEX_SPIN);
		MUTEX_SET_TYPE(lp, MUTEX_ADAPTIVE);
		MUTEX_CLEAR_LOCK_AND_WAITERS(lp);
	}
}
Beispiel #9
0
RTDECL(int) RTSemMutexCreate(PRTSEMMUTEX phMtx)
{
    /*
     * Allocate.
     */
    PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis));
    if (RT_UNLIKELY(!pThis))
        return VERR_NO_MEMORY;

    /*
     * Initialize.
     */
    pThis->u32Magic     = RTSEMMUTEX_MAGIC;
    pThis->cRecursions  = 0;
    pThis->cWaiters     = 0;
    pThis->cRefs        = 1;
    pThis->hOwnerThread = NIL_RTNATIVETHREAD;
    mutex_init(&pThis->Mtx, "IPRT Mutex", MUTEX_DRIVER, (void *)ipltospl(DISP_LEVEL));
    cv_init(&pThis->Cnd, "IPRT CVM", CV_DRIVER, NULL);
    *phMtx = pThis;
    return VINF_SUCCESS;
}
RTDECL(int)  RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName)
{
    RT_ASSERT_PREEMPTIBLE();
    AssertReturn(fFlags == RTSPINLOCK_FLAGS_INTERRUPT_SAFE || fFlags == RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, VERR_INVALID_PARAMETER);

    /*
     * Allocate.
     */
    AssertCompile(sizeof(RTSPINLOCKINTERNAL) > sizeof(void *));
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)RTMemAlloc(sizeof(*pThis));
    if (!pThis)
        return VERR_NO_MEMORY;

    /*
     * Initialize & return.
     */
    pThis->u32Magic  = RTSPINLOCK_MAGIC;
    pThis->fFlags    = fFlags;
    pThis->fIntSaved = 0;
    /** @todo Consider different PIL when not interrupt safe requirement. */
    mutex_init(&pThis->Mtx, "IPRT Spinlock", MUTEX_SPIN, (void *)ipltospl(PIL_MAX));
    *pSpinlock = pThis;
    return VINF_SUCCESS;
}
Beispiel #11
0
/*
 * Initiate cross call processing.
 */
static void
xc_common(
	xc_func_t func,
	xc_arg_t arg1,
	xc_arg_t arg2,
	xc_arg_t arg3,
	ulong_t *set,
	uint_t command)
{
	int c;
	struct cpu *cpup;
	xc_msg_t *msg;
	xc_data_t *data;
	int cnt;
	int save_spl;

	if (!xc_initialized) {
		if (BT_TEST(set, CPU->cpu_id) && (CPU->cpu_flags & CPU_READY) &&
		    func != NULL)
			(void) (*func)(arg1, arg2, arg3);
		return;
	}

	save_spl = splr(ipltospl(XC_HI_PIL));

	/*
	 * fill in cross call data
	 */
	data = &CPU->cpu_m.xc_data;
	data->xc_func = func;
	data->xc_a1 = arg1;
	data->xc_a2 = arg2;
	data->xc_a3 = arg3;

	/*
	 * Post messages to all CPUs involved that are CPU_READY
	 */
	CPU->cpu_m.xc_wait_cnt = 0;
	for (c = 0; c < max_ncpus; ++c) {
		if (!BT_TEST(set, c))
			continue;
		cpup = cpu[c];
		if (cpup == NULL || !(cpup->cpu_flags & CPU_READY))
			continue;

		/*
		 * Fill out a new message.
		 */
		msg = xc_extract(&CPU->cpu_m.xc_free);
		if (msg == NULL)
			panic("Ran out of free xc_msg_t's");
		msg->xc_command = command;
		if (msg->xc_master != CPU->cpu_id)
			panic("msg %p has wrong xc_master", (void *)msg);
		msg->xc_slave = c;

		/*
		 * Increment my work count for all messages that I'll
		 * transition from DONE to FREE.
		 * Also remember how many XC_MSG_WAITINGs to look for
		 */
		(void) xc_increment(&CPU->cpu_m);
		if (command == XC_MSG_SYNC)
			++CPU->cpu_m.xc_wait_cnt;

		/*
		 * Increment the target CPU work count then insert the message
		 * in the target msgbox. If I post the first bit of work
		 * for the target to do, send an IPI to the target CPU.
		 */
		cnt = xc_increment(&cpup->cpu_m);
		xc_insert(&cpup->cpu_m.xc_msgbox, msg);
		if (cpup != CPU) {
			if (cnt == 0) {
				CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
				send_dirint(c, XC_HI_PIL);
				if (xc_collect_enable)
					++xc_total_cnt;
			} else if (xc_collect_enable) {
				++xc_multi_cnt;
			}
		}
	}

	/*
	 * Now drop into the message handler until all work is done
	 */
	(void) xc_serv(NULL, NULL);
	splx(save_spl);
}
Beispiel #12
0
void
log_init(void)
{
	int log_maxzones;

	/*
	 * Create a backlog queue to consume console messages during periods
	 * when there is no console reader (e.g. before syslogd(1M) starts).
	 */
	log_backlogq = log_consq = log_makeq(0, LOG_HIWAT, NULL);

	/*
	 * Create a queue to hold free message of size <= LOG_MSGSIZE.
	 * Calls from high-level interrupt handlers will do a getq_noenab()
	 * from this queue, so its q_lock must be a maximum SPL spin lock.
	 */
	log_freeq = log_makeq(LOG_MINFREE, LOG_MAXFREE, (void *)ipltospl(SPL8));

	/*
	 * Create a queue for messages from high-level interrupt context.
	 * These messages are drained via softcall, or explicitly by panic().
	 */
	log_intrq = log_makeq(0, LOG_HIWAT, (void *)ipltospl(SPL8));

	/*
	 * Create a queue to hold the most recent 8K of console messages.
	 * Useful for debugging.  Required by the "$<msgbuf" adb macro.
	 */
	log_recentq = log_makeq(0, LOG_RECENTSIZE, NULL);

	/*
	 * Create an id space for clone devices opened via /dev/log.
	 * Need to limit the number of zones to avoid exceeding the
	 * available minor number space.
	 */
	log_maxzones = (L_MAXMIN32 - LOG_LOGMIN) / LOG_NUMCLONES - 1;
	if (log_maxzones < maxzones)
		maxzones = log_maxzones;
	log_minorspace = id_space_create("logminor_space", LOG_LOGMIN + 1,
	    L_MAXMIN32);
	/*
	 * Put ourselves on the ZSD list.  Note that zones have not been
	 * initialized yet, but our constructor will be called on the global
	 * zone when they are.
	 */
	zone_key_create(&log_zone_key, log_zoneinit, NULL, log_zonefree);

	/*
	 * Initialize backlog structure.
	 */
	log_backlog.log_zoneid = GLOBAL_ZONEID;
	log_backlog.log_minor = LOG_BACKLOG;

	/* Allocate kmem cache for conslog's log structures */
	log_cons_cache = kmem_cache_create("log_cons_cache",
	    sizeof (struct log), 0, log_cons_constructor, log_cons_destructor,
	    NULL, NULL, NULL, 0);

	/*
	 * Let the logging begin.
	 */
	log_update(&log_backlog, log_backlogq, SL_CONSOLE, log_console);

	/*
	 * Now that logging is enabled, emit the OS banner.
	 */
	printf("\rSunOS Release %s Version %s %u-bit\n",
	    utsname.release, utsname.version, NBBY * (uint_t)sizeof (void *));
	printf("Copyright (c) 1983, 2010, Oracle and/or its affiliates. "
	    "All rights reserved.\n");
	printf("Copyright 2015 Nexenta Systems, Inc.  All rights reserved.\n");
#ifdef DEBUG
	printf("DEBUG enabled\n");
#endif
}
Beispiel #13
0
void
sc_create(pci_t *pci_p)
{
	dev_info_t *dip = pci_p->pci_dip;
	sc_t *sc_p;
	uint64_t paddr;

#ifdef lint
	dip = dip;
#endif

	if (!pci_stream_buf_exists)
		return;

	/*
	 * Allocate streaming cache state structure and link it to
	 * the pci state structure.
	 */
	sc_p = (sc_t *)kmem_zalloc(sizeof (sc_t), KM_SLEEP);
	pci_p->pci_sc_p = sc_p;
	sc_p->sc_pci_p = pci_p;

	pci_sc_setup(sc_p);
	sc_p->sc_sync_reg_pa = va_to_pa((char *)sc_p->sc_sync_reg);

	DEBUG3(DBG_ATTACH, dip, "sc_create: ctrl=%x, invl=%x, sync=%x\n",
		sc_p->sc_ctrl_reg, sc_p->sc_invl_reg,
		sc_p->sc_sync_reg);
	DEBUG2(DBG_ATTACH, dip, "sc_create: ctx_invl=%x ctx_match=%x\n",
		sc_p->sc_ctx_invl_reg, sc_p->sc_ctx_match_reg);
	DEBUG3(DBG_ATTACH, dip,
		"sc_create: data_diag=%x, tag_diag=%x, ltag_diag=%x\n",
		sc_p->sc_data_diag_acc, sc_p->sc_tag_diag_acc,
		sc_p->sc_ltag_diag_acc);

	/*
	 * Allocate the flush/sync buffer.  Make sure it's properly
	 * aligned.
	 */
	sc_p->sc_sync_flag_base =
	    vmem_xalloc(static_alloc_arena, PCI_SYNC_FLAG_SIZE,
		PCI_SYNC_FLAG_SIZE, 0, 0, NULL, NULL, VM_SLEEP);
	sc_p->sc_sync_flag_vaddr = (uint64_t *)sc_p->sc_sync_flag_base;
	paddr = (uint64_t)hat_getpfnum(kas.a_hat,
	    (caddr_t)sc_p->sc_sync_flag_vaddr);
	paddr <<= MMU_PAGESHIFT;
	paddr += (uint64_t)
	    ((uintptr_t)sc_p->sc_sync_flag_vaddr & ~MMU_PAGEMASK);
	sc_p->sc_sync_flag_pa = paddr;
	DEBUG2(DBG_ATTACH, dip, "sc_create: sync buffer - vaddr=%x paddr=%x\n",
	    sc_p->sc_sync_flag_vaddr, sc_p->sc_sync_flag_pa);

	/*
	 * Create a mutex to go along with it.  While the mutex is held,
	 * all interrupts should be blocked.  This will prevent driver
	 * interrupt routines from attempting to acquire the mutex while
	 * held by a lower priority interrupt routine.  Note also that
	 * we now block cross calls as well, to prevent issues with
	 * relocation.
	 */
	mutex_init(&sc_p->sc_sync_mutex, NULL, MUTEX_DRIVER,
	    (void *)ipltospl(XCALL_PIL));

	sc_configure(sc_p);
}
RTDECL(int)  RTSemEventMultiCreateEx(PRTSEMEVENTMULTI phEventMultiSem, uint32_t fFlags, RTLOCKVALCLASS hClass,
                                     const char *pszNameFmt, ...)
{
    AssertReturn(!(fFlags & ~RTSEMEVENTMULTI_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
    AssertPtrReturn(phEventMultiSem, VERR_INVALID_POINTER);
    RT_ASSERT_PREEMPTIBLE();

    AssertCompile(sizeof(RTSEMEVENTMULTIINTERNAL) > sizeof(void *));
    PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)RTMemAlloc(sizeof(*pThis));
    if (pThis)
    {
        pThis->u32Magic     = RTSEMEVENTMULTI_MAGIC;
        pThis->cRefs        = 1;
        pThis->fStateAndGen = RTSEMEVENTMULTISOL_STATE_GEN_INIT;
        mutex_init(&pThis->Mtx, "IPRT Multiple Release Event Semaphore", MUTEX_DRIVER, (void *)ipltospl(DISP_LEVEL));
        cv_init(&pThis->Cnd, "IPRT CV", CV_DRIVER, NULL);

        *phEventMultiSem = pThis;
        return VINF_SUCCESS;
    }
    return VERR_NO_MEMORY;
}
Beispiel #15
0
int
spl_xcall(void)
{
	return (splr(ipltospl(XCALL_PIL)));
}
Beispiel #16
0
void
panicsys(const char *format, va_list alist, struct regs *rp, int on_panic_stack)
{
	int s = spl8();
	kthread_t *t = curthread;
	cpu_t *cp = CPU;

	caddr_t intr_stack = NULL;
	uint_t intr_actv;

	ushort_t schedflag = t->t_schedflag;
	cpu_t *bound_cpu = t->t_bound_cpu;
	char preempt = t->t_preempt;

	(void) setjmp(&t->t_pcb);
	t->t_flag |= T_PANIC;

	t->t_schedflag |= TS_DONT_SWAP;
	t->t_bound_cpu = cp;
	t->t_preempt++;

	/*
	 * Switch lbolt to event driven mode.
	 */
	lbolt_hybrid = lbolt_event_driven;

	panic_enter_hw(s);

	/*
	 * If we're on the interrupt stack and an interrupt thread is available
	 * in this CPU's pool, preserve the interrupt stack by detaching an
	 * interrupt thread and making its stack the intr_stack.
	 */
	if (CPU_ON_INTR(cp) && cp->cpu_intr_thread != NULL) {
		kthread_t *it = cp->cpu_intr_thread;

		intr_stack = cp->cpu_intr_stack;
		intr_actv = cp->cpu_intr_actv;

		cp->cpu_intr_stack = thread_stk_init(it->t_stk);
		cp->cpu_intr_thread = it->t_link;

		/*
		 * Clear only the high level bits of cpu_intr_actv.
		 * We want to indicate that high-level interrupts are
		 * not active without destroying the low-level interrupt
		 * information stored there.
		 */
		cp->cpu_intr_actv &= ((1 << (LOCK_LEVEL + 1)) - 1);
	}

	/*
	 * Record one-time panic information and quiesce the other CPUs.
	 * Then print out the panic message and stack trace.
	 */
	if (on_panic_stack) {
		panic_data_t *pdp = (panic_data_t *)panicbuf;

		pdp->pd_version = PANICBUFVERS;
		pdp->pd_msgoff = sizeof (panic_data_t) - sizeof (panic_nv_t);

		if (t->t_panic_trap != NULL)
			panic_savetrap(pdp, t->t_panic_trap);
		else
			panic_saveregs(pdp, rp);

		(void) vsnprintf(&panicbuf[pdp->pd_msgoff],
		    PANICBUFSIZE - pdp->pd_msgoff, format, alist);

		/*
		 * Call into the platform code to stop the other CPUs.
		 * We currently have all interrupts blocked, and expect that
		 * the platform code will lower ipl only as far as needed to
		 * perform cross-calls, and will acquire as *few* locks as is
		 * possible -- panicstr is not set so we can still deadlock.
		 */
		panic_stopcpus(cp, t, s);

		panicstr = (char *)format;
		va_copy(panicargs, alist);
		panic_lbolt = LBOLT_NO_ACCOUNT;
		panic_lbolt64 = LBOLT_NO_ACCOUNT64;
		panic_hrestime = hrestime;
		panic_hrtime = gethrtime_waitfree();
		panic_thread = t;
		panic_regs = t->t_pcb;
		panic_reg = rp;
		panic_cpu = *cp;
		panic_ipl = spltoipl(s);
		panic_schedflag = schedflag;
		panic_bound_cpu = bound_cpu;
		panic_preempt = preempt;

		if (intr_stack != NULL) {
			panic_cpu.cpu_intr_stack = intr_stack;
			panic_cpu.cpu_intr_actv = intr_actv;
		}

		/*
		 * Lower ipl to 10 to keep clock() from running, but allow
		 * keyboard interrupts to enter the debugger.  These callbacks
		 * are executed with panicstr set so they can bypass locks.
		 */
		splx(ipltospl(CLOCK_LEVEL));
		panic_quiesce_hw(pdp);
		(void) FTRACE_STOP();
		(void) callb_execute_class(CB_CL_PANIC, NULL);

		if (log_intrq != NULL)
			log_flushq(log_intrq);

		/*
		 * If log_consq has been initialized and syslogd has started,
		 * print any messages in log_consq that haven't been consumed.
		 */
		if (log_consq != NULL && log_consq != log_backlogq)
			log_printq(log_consq);

		fm_banner();

#if defined(__x86)
		/*
		 * A hypervisor panic originates outside of Solaris, so we
		 * don't want to prepend the panic message with misleading
		 * pointers from within Solaris.
		 */
		if (!IN_XPV_PANIC())
#endif
			printf("\n\rpanic[cpu%d]/thread=%p: ", cp->cpu_id,
			    (void *)t);
		vprintf(format, alist);
		printf("\n\n");

		if (t->t_panic_trap != NULL) {
			panic_showtrap(t->t_panic_trap);
			printf("\n");
		}

		traceregs(rp);
		printf("\n");

		if (((boothowto & RB_DEBUG) || obpdebug) &&
		    !nopanicdebug && !panic_forced) {
			if (dumpvp != NULL) {
				debug_enter("panic: entering debugger "
				    "(continue to save dump)");
			} else {
				debug_enter("panic: entering debugger "
				    "(no dump device, continue to reboot)");
			}
		}

	} else if (panic_dump != 0 || panic_sync != 0 || panicstr != NULL) {
		printf("\n\rpanic[cpu%d]/thread=%p: ", cp->cpu_id, (void *)t);
		vprintf(format, alist);
		printf("\n");
	} else
		goto spin;

	/*
	 * Prior to performing sync or dump, we make sure that do_polled_io is
	 * set, but we'll leave ipl at 10; deadman(), a CY_HIGH_LEVEL cyclic,
	 * will re-enter panic if we are not making progress with sync or dump.
	 */

	/*
	 * Sync the filesystems.  Reset t_cred if not set because much of
	 * the filesystem code depends on CRED() being valid.
	 */
	if (!in_sync && panic_trigger(&panic_sync)) {
		if (t->t_cred == NULL)
			t->t_cred = kcred;
		splx(ipltospl(CLOCK_LEVEL));
		do_polled_io = 1;
		vfs_syncall();
	}

	/*
	 * Take the crash dump.  If the dump trigger is already set, try to
	 * enter the debugger again before rebooting the system.
	 */
	if (panic_trigger(&panic_dump)) {
		panic_dump_hw(s);
		splx(ipltospl(CLOCK_LEVEL));
		errorq_panic();
		do_polled_io = 1;
		dumpsys();
	} else if (((boothowto & RB_DEBUG) || obpdebug) && !nopanicdebug) {
		debug_enter("panic: entering debugger (continue to reboot)");
	} else
		printf("dump aborted: please record the above information!\n");

	if (halt_on_panic)
		mdboot(A_REBOOT, AD_HALT, NULL, B_FALSE);
	else
		mdboot(A_REBOOT, panic_bootfcn, panic_bootstr, B_FALSE);
spin:
	/*
	 * Restore ipl to at most CLOCK_LEVEL so we don't end up spinning
	 * and unable to jump into the debugger.
	 */
	splx(MIN(s, ipltospl(CLOCK_LEVEL)));
	for (;;)
		;
}
static int
ds1287_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
	struct ds1287 *softsp;

	DPRINTF("ds1287_attach\n");
	switch (cmd) {
	case DDI_ATTACH:
		break;
	case DDI_RESUME:
		return (DDI_SUCCESS);
	default:
		return (DDI_FAILURE);
	}

	if (instance != -1) {
		cmn_err(CE_WARN, "ds1287_attach: Another instance is already "
		    "attached.");
		return (DDI_FAILURE);
	}

	instance = ddi_get_instance(dip);

	if (v_rtc_addr_reg == NULL) {
		cmn_err(CE_WARN, "ds1287_attach: v_rtc_addr_reg is NULL");
		return (DDI_FAILURE);
	}

	/*
	 * Allocate softc information.
	 */
	if (ddi_soft_state_zalloc(ds1287_state, instance) != DDI_SUCCESS) {
		cmn_err(CE_WARN, "ds1287_attach: Failed to allocate "
		    "soft states.");
		return (DDI_FAILURE);
	}

	softsp = ddi_get_soft_state(ds1287_state, instance);
	DPRINTF("ds1287_attach: instance=%d softsp=0x%p\n", instance,
	    (void *)softsp);

	softsp->dip = dip;

	if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
	    "interrupt-priorities", (caddr_t)&ds1287_interrupt_priority,
	    sizeof (int)) != DDI_PROP_SUCCESS) {
		cmn_err(CE_WARN, "ds1287_attach: Failed to create \""
		    "interrupt-priorities\" property.");
		goto error;
	}

	/* add the softint */
	ds1287_lo_iblock = (ddi_iblock_cookie_t)(uintptr_t)
	    ipltospl(ds1287_softint_priority);

	if (ddi_add_softintr(dip, DDI_SOFTINT_FIXED, &ds1287_softintr_id,
	    &ds1287_lo_iblock, NULL, ds1287_softintr, (caddr_t)softsp) !=
	    DDI_SUCCESS) {
		cmn_err(CE_WARN, "ds1287_attach: Failed to add low interrupt.");
		goto error1;
	}

	/* add the hi interrupt */
	if (ddi_add_intr(dip, 0, NULL, (ddi_idevice_cookie_t *)
	    &ds1287_hi_iblock, ds1287_intr, NULL) != DDI_SUCCESS) {
		cmn_err(CE_WARN, "ds1287_attach: Failed to add high "
		    "interrupt.");
		goto error2;
	}

	/*
	 * Combination of instance number and clone number 0 is used for
	 * creating the minor node.
	 */
	if (ddi_create_minor_node(dip, "power_button", S_IFCHR,
	    (instance << 8) + 0, "ddi_power_button", NULL) == DDI_FAILURE) {
		cmn_err(CE_WARN, "ds1287_attach: Failed to create minor node");
		goto error3;
	}

	ddi_report_dev(dip);

	return (DDI_SUCCESS);

error3:
	ddi_remove_intr(dip, 0, NULL);
error2:
	ddi_remove_softintr(ds1287_softintr_id);
error1:
	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "interrupt-priorities");
error:
	ddi_soft_state_free(ds1287_state, instance);
	return (DDI_FAILURE);
}