Example #1
0
void
lock_set_spl_spin(lock_t *lp, int new_pil, ushort_t *old_pil_addr, int old_pil)
{
	int spin_count = 1;
	int backoff;	/* current backoff */
	int backctr;	/* ctr for backoff */

	if (panicstr)
		return;

	if (ncpus == 1)
		panic("lock_set_spl: %p lock held and only one CPU", lp);

	ASSERT(new_pil > LOCK_LEVEL);

	if (&plat_lock_delay) {
		backoff = 0;
	} else {
		backoff = BACKOFF_BASE;
	}
	do {
		splx(old_pil);
		while (LOCK_HELD(lp)) {
			if (panicstr) {
				*old_pil_addr = (ushort_t)splr(new_pil);
				return;
			}
			spin_count++;
			/*
			 * Add an exponential backoff delay before trying again
			 * to touch the mutex data structure.
			 * spin_count test and call to nulldev are to prevent
			 * compiler optimizer from eliminating the delay loop.
			 */
			if (&plat_lock_delay) {
				plat_lock_delay(&backoff);
			} else {
				for (backctr = backoff; backctr; backctr--) {
					if (!spin_count) (void) nulldev();
				}
				backoff = backoff << 1;		/* double it */
				if (backoff > BACKOFF_CAP) {
					backoff = BACKOFF_CAP;
				}

				SMT_PAUSE();
			}
		}
		old_pil = splr(new_pil);
	} while (!lock_spin_try(lp));

	*old_pil_addr = (ushort_t)old_pil;

	if (spin_count) {
		LOCKSTAT_RECORD(LS_LOCK_SET_SPL_SPIN, lp, spin_count);
	}

	LOCKSTAT_RECORD(LS_LOCK_SET_SPL_ACQUIRE, lp, spin_count);
}
Example #2
0
/*
 * Do cross call to all other CPUs with absolutely no waiting or handshaking.
 * This should only be used for extraordinary operations, like panic(), which
 * need to work, in some fashion, in a not completely functional system.
 * All other uses that want minimal waiting should use xc_call_nowait().
 */
void
xc_priority(
	xc_arg_t arg1,
	xc_arg_t arg2,
	xc_arg_t arg3,
	ulong_t *set,
	xc_func_t func)
{
	extern int IGNORE_KERNEL_PREEMPTION;
	int save_spl = splr(ipltospl(XC_HI_PIL));
	int save_kernel_preemption = IGNORE_KERNEL_PREEMPTION;

	IGNORE_KERNEL_PREEMPTION = 1;
	xc_priority_common((xc_func_t)func, arg1, arg2, arg3, set);
	IGNORE_KERNEL_PREEMPTION = save_kernel_preemption;
	splx(save_spl);
}
Example #3
0
/*
 * mutex_vector_tryenter() is called from the assembly mutex_tryenter()
 * routine if the lock is held or is not of type MUTEX_ADAPTIVE.
 */
int
mutex_vector_tryenter(mutex_impl_t *lp)
{
	int s;

	if (MUTEX_TYPE_ADAPTIVE(lp))
		return (0);		/* we already tried in assembly */

	if (!MUTEX_TYPE_SPIN(lp)) {
		mutex_panic("mutex_tryenter: bad mutex", lp);
		return (0);
	}

	s = splr(lp->m_spin.m_minspl);
	if (lock_try(&lp->m_spin.m_spinlock)) {
		lp->m_spin.m_oldspl = (ushort_t)s;
		return (1);
	}
	splx(s);
	return (0);
}
Example #4
0
/*
 * Initiate cross call processing.
 */
static void
xc_common(
	xc_func_t func,
	xc_arg_t arg1,
	xc_arg_t arg2,
	xc_arg_t arg3,
	ulong_t *set,
	uint_t command)
{
	int c;
	struct cpu *cpup;
	xc_msg_t *msg;
	xc_data_t *data;
	int cnt;
	int save_spl;

	if (!xc_initialized) {
		if (BT_TEST(set, CPU->cpu_id) && (CPU->cpu_flags & CPU_READY) &&
		    func != NULL)
			(void) (*func)(arg1, arg2, arg3);
		return;
	}

	save_spl = splr(ipltospl(XC_HI_PIL));

	/*
	 * fill in cross call data
	 */
	data = &CPU->cpu_m.xc_data;
	data->xc_func = func;
	data->xc_a1 = arg1;
	data->xc_a2 = arg2;
	data->xc_a3 = arg3;

	/*
	 * Post messages to all CPUs involved that are CPU_READY
	 */
	CPU->cpu_m.xc_wait_cnt = 0;
	for (c = 0; c < max_ncpus; ++c) {
		if (!BT_TEST(set, c))
			continue;
		cpup = cpu[c];
		if (cpup == NULL || !(cpup->cpu_flags & CPU_READY))
			continue;

		/*
		 * Fill out a new message.
		 */
		msg = xc_extract(&CPU->cpu_m.xc_free);
		if (msg == NULL)
			panic("Ran out of free xc_msg_t's");
		msg->xc_command = command;
		if (msg->xc_master != CPU->cpu_id)
			panic("msg %p has wrong xc_master", (void *)msg);
		msg->xc_slave = c;

		/*
		 * Increment my work count for all messages that I'll
		 * transition from DONE to FREE.
		 * Also remember how many XC_MSG_WAITINGs to look for
		 */
		(void) xc_increment(&CPU->cpu_m);
		if (command == XC_MSG_SYNC)
			++CPU->cpu_m.xc_wait_cnt;

		/*
		 * Increment the target CPU work count then insert the message
		 * in the target msgbox. If I post the first bit of work
		 * for the target to do, send an IPI to the target CPU.
		 */
		cnt = xc_increment(&cpup->cpu_m);
		xc_insert(&cpup->cpu_m.xc_msgbox, msg);
		if (cpup != CPU) {
			if (cnt == 0) {
				CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
				send_dirint(c, XC_HI_PIL);
				if (xc_collect_enable)
					++xc_total_cnt;
			} else if (xc_collect_enable) {
				++xc_multi_cnt;
			}
		}
	}

	/*
	 * Now drop into the message handler until all work is done
	 */
	(void) xc_serv(NULL, NULL);
	splx(save_spl);
}
Example #5
0
int
spl_xcall(void)
{
	return (splr(ipltospl(XCALL_PIL)));
}
Example #6
0
DECLHIDDEN(int) rtR0InitNative(void)
{
    /*
     * IPRT has not yet been initialized at this point, so use Solaris' native cmn_err() for logging.
     */
    int rc = RTR0DbgKrnlInfoOpen(&g_hKrnlDbgInfo, 0 /* fFlags */);
    if (RT_SUCCESS(rc))
    {
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        /*
         * Detect whether spl*() is preserving the interrupt flag or not.
         * This is a problem on S10.
         */
        RTCCUINTREG uOldFlags = ASMIntDisableFlags();
        int iOld = splr(DISP_LEVEL);
        if (ASMIntAreEnabled())
            g_frtSolSplSetsEIF = true;
        splx(iOld);
        if (ASMIntAreEnabled())
            g_frtSolSplSetsEIF = true;
        ASMSetFlags(uOldFlags);
#else
        /* PORTME: See if the amd64/x86 problem applies to this architecture. */
#endif
        /*
         * Mandatory: Preemption offsets.
         */
        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "cpu_t", "cpu_runrun", &g_offrtSolCpuPreempt);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_runrun!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "cpu_t", "cpu_kprunrun", &g_offrtSolCpuForceKernelPreempt);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_kprunrun!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_preempt", &g_offrtSolThreadPreempt);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_preempt!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_did", &g_offrtSolThreadId);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_did!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_intr", &g_offrtSolThreadIntrThread);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_intr!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_lockp", &g_offrtSolThreadLock);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_lockp!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_procp", &g_offrtSolThreadProc);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_procp!\n");
            goto errorbail;
        }
        cmn_err(CE_CONT, "!cpu_t::cpu_runrun @ 0x%lx (%ld)\n",    g_offrtSolCpuPreempt, g_offrtSolCpuPreempt);
        cmn_err(CE_CONT, "!cpu_t::cpu_kprunrun @ 0x%lx (%ld)\n",  g_offrtSolCpuForceKernelPreempt, g_offrtSolCpuForceKernelPreempt);
        cmn_err(CE_CONT, "!kthread_t::t_preempt @ 0x%lx (%ld)\n", g_offrtSolThreadPreempt, g_offrtSolThreadPreempt);
        cmn_err(CE_CONT, "!kthread_t::t_did @ 0x%lx (%ld)\n",     g_offrtSolThreadId, g_offrtSolThreadId);
        cmn_err(CE_CONT, "!kthread_t::t_intr @ 0x%lx (%ld)\n",    g_offrtSolThreadIntrThread, g_offrtSolThreadIntrThread);
        cmn_err(CE_CONT, "!kthread_t::t_lockp @ 0x%lx (%ld)\n",   g_offrtSolThreadLock, g_offrtSolThreadLock);
        cmn_err(CE_CONT, "!kthread_t::t_procp @ 0x%lx (%ld)\n",   g_offrtSolThreadProc, g_offrtSolThreadProc);

        /*
         * Mandatory: CPU cross call infrastructure. Refer the-solaris-kernel.h for details.
         */
        rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "xc_init_cpu", NULL /* ppvSymbol */);
        if (RT_SUCCESS(rc))
        {
            if (ncpus > IPRT_SOL_NCPUS)
            {
                cmn_err(CE_NOTE, "rtR0InitNative: CPU count mismatch! ncpus=%d IPRT_SOL_NCPUS=%d\n", ncpus, IPRT_SOL_NCPUS);
                rc = VERR_NOT_SUPPORTED;
                goto errorbail;
            }
            g_rtSolXcCall.u.pfnSol_xc_call = (void *)xc_call;
        }
        else
        {
            g_frtSolOldIPI = true;
            g_rtSolXcCall.u.pfnSol_xc_call_old = (void *)xc_call;
            if (max_cpuid + 1 == sizeof(ulong_t) * 8)
            {
                g_frtSolOldIPIUlong = true;
                g_rtSolXcCall.u.pfnSol_xc_call_old_ulong = (void *)xc_call;
            }
            else if (max_cpuid + 1 != IPRT_SOL_NCPUS)
            {
                cmn_err(CE_NOTE, "rtR0InitNative: cpuset_t size mismatch! max_cpuid=%d IPRT_SOL_NCPUS=%d\n", max_cpuid,
                        IPRT_SOL_NCPUS);
                rc = VERR_NOT_SUPPORTED;
                goto errorbail;
            }
        }

        /*
         * Mandatory: Thread-context hooks.
         */
        rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "exitctx",  NULL /* ppvSymbol */);
        if (RT_SUCCESS(rc))
        {
            g_rtSolThreadCtx.Install.pfnSol_installctx = (void *)installctx;
            g_rtSolThreadCtx.Remove.pfnSol_removectx   = (void *)removectx;
        }
        else
        {
            g_frtSolOldThreadCtx = true;
            g_rtSolThreadCtx.Install.pfnSol_installctx_old = (void *)installctx;
            g_rtSolThreadCtx.Remove.pfnSol_removectx_old   = (void *)removectx;
        }

        /*
         * Optional: Timeout hooks.
         */
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "timeout_generic",
                                   (void **)&g_pfnrtR0Sol_timeout_generic);
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "untimeout_generic",
                                   (void **)&g_pfnrtR0Sol_untimeout_generic);
        if ((g_pfnrtR0Sol_timeout_generic == NULL) != (g_pfnrtR0Sol_untimeout_generic == NULL))
        {
            static const char *s_apszFn[2] = { "timeout_generic", "untimeout_generic" };
            bool iMissingFn = g_pfnrtR0Sol_timeout_generic == NULL;
            cmn_err(CE_NOTE, "rtR0InitNative: Weird! Found %s but not %s!\n", s_apszFn[!iMissingFn], s_apszFn[iMissingFn]);
            g_pfnrtR0Sol_timeout_generic   = NULL;
            g_pfnrtR0Sol_untimeout_generic = NULL;
        }
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "cyclic_reprogram",
                                   (void **)&g_pfnrtR0Sol_cyclic_reprogram);

        /*
         * Optional: Querying page no-relocation support.
         */
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /*pszModule */, "page_noreloc_supported",
                                   (void **)&g_pfnrtR0Sol_page_noreloc_supported);

        /*
         * Weak binding failures: contig_free
         */
        if (g_pfnrtR0Sol_contig_free == NULL)
        {
            rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "contig_free",
                                            (void **)&g_pfnrtR0Sol_contig_free);
            if (RT_FAILURE(rc))
            {
                cmn_err(CE_NOTE, "rtR0InitNative: failed to find contig_free!\n");
                goto errorbail;
            }
        }

        g_frtSolInitDone = true;
        return VINF_SUCCESS;
    }
    else
    {
        cmn_err(CE_NOTE, "RTR0DbgKrnlInfoOpen failed. rc=%d\n", rc);
        return rc;
    }

errorbail:
    RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo);
    return rc;
}