コード例 #1
0
void
kcpc_hw_init(cpu_t *cp)
{
	kthread_t *t = cp->cpu_idle_thread;
	uint32_t versionid;
	struct cpuid_regs cpuid;

	strands_perfmon_shared = 0;
	if (x86_feature & X86_HTT) {
		if (cpuid_getvendor(cpu[0]) == X86_VENDOR_Intel) {
			/*
			 * Intel processors that support Architectural
			 * Performance Monitoring Version 3 have per strand
			 * performance monitoring hardware.
			 * Hence we can allow use of performance counters on
			 * multiple strands on the same core simultaneously.
			 */
			cpuid.cp_eax = 0x0;
			(void) __cpuid_insn(&cpuid);
			if (cpuid.cp_eax < 0xa) {
				strands_perfmon_shared = 1;
			} else {
				cpuid.cp_eax = 0xa;
				(void) __cpuid_insn(&cpuid);

				versionid = cpuid.cp_eax & 0xFF;
				if (versionid < 3) {
					strands_perfmon_shared = 1;
				}
			}
		} else {
			strands_perfmon_shared = 1;
		}
	}

	if (strands_perfmon_shared) {
		mutex_enter(&cpu_setup_lock);
		if (setup_registered == 0) {
			mutex_enter(&cpu_lock);
			register_cpu_setup_func(kcpc_cpu_setup, NULL);
			mutex_exit(&cpu_lock);
			setup_registered = 1;
		}
		mutex_exit(&cpu_setup_lock);
	}

	mutex_init(&cp->cpu_cpc_ctxlock, "cpu_cpc_ctxlock", MUTEX_DEFAULT, 0);

	if (kcpc_counts_include_idle)
		return;

	installctx(t, cp, kcpc_idle_save, kcpc_idle_restore,
	    NULL, NULL, NULL, NULL);
}
コード例 #2
0
ファイル: pool_pset.c プロジェクト: JackieXie168/mac-zfs
/*
 * Initialize processor set plugin.  Called once at boot time.
 */
void
pool_pset_init(void)
{
	ASSERT(pool_pset_default == NULL);
	pool_pset_default = kmem_zalloc(sizeof (pool_pset_t), KM_SLEEP);
	pool_pset_default->pset_id = PS_NONE;
	pool_pset_default->pset_npools = 1;	/* for pool_default */
	pool_default->pool_pset = pool_pset_default;
	list_create(&pool_pset_list, sizeof (pool_pset_t),
	    offsetof(pool_pset_t, pset_link));
	list_insert_head(&pool_pset_list, pool_pset_default);
	mutex_enter(&cpu_lock);
	register_cpu_setup_func(pool_pset_cpu_setup, NULL);
	mutex_exit(&cpu_lock);
}
コード例 #3
0
void
clock_tick_mp_init(void)
{
	cpu_t	*cp;

	mutex_enter(&cpu_lock);

	cp = cpu_active;
	do {
		(void) clock_tick_cpu_setup(CPU_ON, cp->cpu_id, NULL);
	} while ((cp = cp->cpu_next_onln) != cpu_active);

	register_cpu_setup_func(clock_tick_cpu_setup, NULL);

	mutex_exit(&cpu_lock);
}
コード例 #4
0
DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
{
    if (ASMAtomicReadBool(&g_fSolCpuWatch) == true)
        return VERR_WRONG_ORDER;

    /*
     * Register the callback building the online cpu set as we do so.
     */
    RTCpuSetEmpty(&g_rtMpSolCpuSet);

    mutex_enter(&cpu_lock);
    register_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */);

    for (int i = 0; i < (int)RTMpGetCount(); ++i)
        if (cpu_is_online(cpu[i]))
            rtMpNotificationCpuEvent(CPU_ON, i, NULL /* pvArg */);

    ASMAtomicWriteBool(&g_fSolCpuWatch, true);
    mutex_exit(&cpu_lock);

    return VINF_SUCCESS;
}
コード例 #5
0
/*
 * Initialize IP squeues.
 */
void
ip_squeue_init(void (*callback)(squeue_t *))
{
	int i;
	squeue_set_t	*sqs;

	ASSERT(sqset_global_list == NULL);

	ip_squeue_create_callback = callback;
	squeue_init();
	mutex_init(&sqset_lock, NULL, MUTEX_DEFAULT, NULL);
	sqset_global_list =
	    kmem_zalloc(sizeof (squeue_set_t *) * (NCPU+1), KM_SLEEP);
	sqset_global_size = 0;
	/*
	 * We are called at system boot time and we don't
	 * expect memory allocation failure.
	 */
	sqs = ip_squeue_set_create(-1);
	ASSERT(sqs != NULL);

	mutex_enter(&cpu_lock);
	/* Create squeue for each active CPU available */
	for (i = 0; i < NCPU; i++) {
		cpu_t *cp = cpu_get(i);
		if (CPU_ISON(cp) && cp->cpu_squeue_set == NULL) {
			/*
			 * We are called at system boot time and we don't
			 * expect memory allocation failure then
			 */
			cp->cpu_squeue_set = ip_squeue_set_create(cp->cpu_id);
			ASSERT(cp->cpu_squeue_set != NULL);
		}
	}

	register_cpu_setup_func(ip_squeue_cpu_setup, NULL);
	mutex_exit(&cpu_lock);
}