Example #1
0
void
sfmmu_set_tsbs()
{
	uint64_t rv;
	struct hv_tsb_block *hvbp = &ksfmmup->sfmmu_hvblock;

#ifdef DEBUG
	if (hv_use_0_tsb == 0)
		return;
#endif /* DEBUG */

	rv = hv_set_ctx0(hvbp->hv_tsb_info_cnt,
	    hvbp->hv_tsb_info_pa);
	if (rv != H_EOK)
		prom_printf("cpu%d: hv_set_ctx0() returned %lx\n",
		    getprocessorid(), rv);

#ifdef SET_MMU_STATS
	ASSERT(getprocessorid() < NCPU);
	rv = hv_mmu_set_stat_area(va_to_pa(&mmu_stat_area[getprocessorid()]),
	    sizeof (mmu_stat_area[0]));
	if (rv != H_EOK)
		prom_printf("cpu%d: hv_mmu_set_stat_area() returned %lx\n",
		    getprocessorid(), rv);
#endif /* SET_MMU_STATS */
}
Example #2
0
static void
sfmmu_set_fault_status_area(void)
{
	caddr_t mmfsa_va;
	extern	caddr_t mmu_fault_status_area;

	mmfsa_va =
	    mmu_fault_status_area + (MMFSA_SIZE  * getprocessorid());
	set_mmfsa_scratchpad(mmfsa_va);
	prom_set_mmfsa_traptable(&trap_table, va_to_pa(mmfsa_va));
}
Example #3
0
void
kdi_tlb_page_lock(caddr_t va, int do_dtlb)
{
	tte_t tte;
	pfn_t pfn = va_to_pfn(va);
	uint64_t ret;

	sfmmu_memtte(&tte, pfn, (PROC_TEXT | HAT_NOSYNC), TTE8K);
	ret = hv_mmu_map_perm_addr(va, KCONTEXT, *(uint64_t *)&tte,
	    MAP_ITLB | (do_dtlb ? MAP_DTLB : 0));

	if (ret != H_EOK) {
		cmn_err(CE_PANIC, "cpu%d: cannot set permanent mapping for "
		    "va=0x%p, hv error code 0x%lux",
		    getprocessorid(), (void *)va, ret);
	}
}
Example #4
0
/*
 * Drop the prom lock if it is held by the current CPU.  If the lock is held
 * recursively, return without clearing prom_cpu.  If the hold count is now
 * zero, clear prom_cpu and cv_signal any waiting CPU.
 */
void
kern_postprom(void)
{
	processorid_t cpuid = getprocessorid();
	cpu_t *cp = cpu[cpuid];

	if (panicstr)
		return; /* do not modify lock further if we have panicked */

	if (prom_cpu != cp)
		panic("kern_postprom: not owner, cp=%p owner=%p",
		    (void *)cp, (void *)prom_cpu);

	if (prom_holdcnt == 0)
		panic("kern_postprom: prom_holdcnt == 0, owner=%p",
		    (void *)prom_cpu);

	if (atomic_dec_32_nv(&prom_holdcnt) != 0)
		return; /* prom lock is held recursively by this CPU */

	if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger)
		kmdb_enter();

	prom_thread = NULL;
	membar_producer();

	prom_cpu = NULL;
	membar_producer();

	if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
		mutex_enter(&prom_mutex);
		cv_signal(&prom_cv);
		mutex_exit(&prom_mutex);
		kpreempt_enable();
	}
}
Example #5
0
/*ARGSUSED*/
void
start_other_cpus(int flag)
{
	int cpuid;
	extern void idlestop_init(void);
	int bootcpu;

	/*
	 * Check if cpu_bringup_set has been explicitly set before
	 * initializing it.
	 */
	if (CPUSET_ISNULL(cpu_bringup_set)) {
		CPUSET_ALL(cpu_bringup_set);
	}

	if (&cpu_feature_init)
		cpu_feature_init();

	/*
	 * Initialize CPC.
	 */
	kcpc_hw_init();

	mutex_enter(&cpu_lock);

	/*
	 * Initialize our own cpu_info.
	 */
	init_cpu_info(CPU);

	/*
	 * Initialize CPU 0 cpu module private data area, including scrubber.
	 */
	cpu_init_private(CPU);
	populate_idstr(CPU);

	/*
	 * perform such initialization as is needed
	 * to be able to take CPUs on- and off-line.
	 */
	cpu_pause_init();
	xc_init();		/* initialize processor crosscalls */
	idlestop_init();

	if (!use_mp) {
		mutex_exit(&cpu_lock);
		cmn_err(CE_CONT, "?***** Not in MP mode\n");
		return;
	}
	/*
	 * should we be initializing this cpu?
	 */
	bootcpu = getprocessorid();

	/*
	 * launch all the slave cpus now
	 */
	for (cpuid = 0; cpuid < NCPU; cpuid++) {
		pnode_t nodeid = cpunodes[cpuid].nodeid;

		if (nodeid == (pnode_t)0)
			continue;

		if (cpuid == bootcpu) {
			if (!CPU_IN_SET(cpu_bringup_set, cpuid)) {
				cmn_err(CE_WARN, "boot cpu not a member "
				    "of cpu_bringup_set, adding it");
				CPUSET_ADD(cpu_bringup_set, cpuid);
			}
			continue;
		}
		if (!CPU_IN_SET(cpu_bringup_set, cpuid))
			continue;

		ASSERT(cpu[cpuid] == NULL);

		if (setup_cpu_common(cpuid)) {
			cmn_err(CE_PANIC, "cpu%d: setup failed", cpuid);
		}

		common_startup_init(cpu[cpuid], cpuid);

		start_cpu(cpuid, cold_flag_set);
		/*
		 * Because slave_startup() gets fired off after init()
		 * starts, we can't use the '?' trick to do 'boot -v'
		 * printing - so we always direct the 'cpu .. online'
		 * messages to the log.
		 */
		cmn_err(CE_CONT, "!cpu%d initialization complete - online\n",
		    cpuid);

		cpu_state_change_notify(cpuid, CPU_SETUP);

		if (dtrace_cpu_init != NULL)
			(*dtrace_cpu_init)(cpuid);
	}

	/*
	 * since all the cpus are online now, redistribute interrupts to them.
	 */
	intr_redist_all_cpus();

	mutex_exit(&cpu_lock);

	/*
	 * Start the Ecache scrubber.  Must be done after all calls to
	 * cpu_init_private for every cpu (including CPU 0).
	 */
	cpu_init_cache_scrub();

	if (&cpu_mp_init)
		cpu_mp_init();
}
Example #6
0
/*
 * Check for a legal set of CPUs.
 */
static void
check_cpus_set(void)
{
    int i;
    int impl;
    int npanther = 0;
    int njupiter = 0;

    impl = cpunodes[getprocessorid()].implementation;

    switch (impl) {
    case CHEETAH_PLUS_IMPL:
    case JAGUAR_IMPL:
    case PANTHER_IMPL:
        /*
         * Check for a legal heterogeneous set of CPUs.
         */
        for (i = 0; i < NCPU; i++) {
            if (cpunodes[i].nodeid == 0)
                continue;

            if (IS_PANTHER(cpunodes[i].implementation)) {
                npanther += 1;
            }

            if (!(IS_CHEETAH_PLUS(cpunodes[i].implementation) ||
                    IS_JAGUAR(cpunodes[i].implementation) ||
                    IS_PANTHER(cpunodes[i].implementation))) {
                use_mp = 0;
                break;
            }
        }
        break;
    case OLYMPUS_C_IMPL:
    case JUPITER_IMPL:
        /*
         * Check for a legal heterogeneous set of CPUs on the
         * OPL platform.
         */
        for (i = 0; i < NCPU; i++) {
            if (cpunodes[i].nodeid == 0)
                continue;

            if (IS_JUPITER(cpunodes[i].implementation)) {
                njupiter += 1;
            }
            if (!(IS_OLYMPUS_C(cpunodes[i].implementation) ||
                    IS_JUPITER(cpunodes[i].implementation))) {
                use_mp = 0;
                break;
            }
        }
        break;
    default:
        /*
         * Check for a homogeneous set of CPUs.
         */
        for (i = 0; i < NCPU; i++) {
            if (cpunodes[i].nodeid == 0)
                continue;

            if (cpunodes[i].implementation != impl) {
                use_mp = 0;
                break;
            }
        }
        break;
    }

    /*
     * Change from mmu_page_sizes from 4 to 6 for totally-Panther domains,
     * where npanther == ncpunode. Also, set ecache_alignsize (and a few
     * other globals) to the correct value for totally-Panther domains.
     */
    if (&mmu_init_mmu_page_sizes) {
        (void) mmu_init_mmu_page_sizes(npanther);
    }
    if ((npanther == ncpunode) && (&cpu_fix_allpanther)) {
        cpu_fix_allpanther();
    }

    /*
     * For all-Jupiter domains the cpu module will update the hwcap features
     * for integer multiply-add instruction support.
     */
    if ((njupiter == ncpunode) && (&cpu_fix_alljupiter)) {
        cpu_fix_alljupiter();
    }

    /*
     * Set max cpus we can have based on ncpunode and use_mp
     */
    if (use_mp) {
        int (*set_max_ncpus)(void);

        set_max_ncpus = (int (*)(void))
                        kobj_getsymvalue("set_platform_max_ncpus", 0);

        if (set_max_ncpus) {
            max_ncpus = set_max_ncpus();
            if (max_ncpus < ncpunode)
                max_ncpus = ncpunode;
            boot_ncpus = boot_max_ncpus = ncpunode;
        } else {
            max_ncpus = ncpunode;
        }
    } else {
        cmn_err(CE_NOTE, "MP not supported on mismatched modules,"
                " booting UP only");

        for (i = 0; i < NCPU; i++) {
            if (cpunodes[i].nodeid == 0)
                continue;

            cmn_err(CE_NOTE, "cpu%d: %s version 0x%x", i,
                    cpunodes[i].name, cpunodes[i].version);
        }

        max_ncpus = 1;
    }
}
Example #7
0
static void
check_cpus_ver(void)
{
    int i;
    int impl, cpuid = getprocessorid();
    int min_supported_rev;

    ASSERT(cpunodes[cpuid].nodeid != 0);

    impl = cpunodes[cpuid].implementation;
    switch (impl) {
    default:
        min_supported_rev = 0;
        break;
    case SPITFIRE_IMPL:
        min_supported_rev = SPITFIRE_MINREV_SUPPORTED;
        break;
    case CHEETAH_IMPL:
        min_supported_rev = CHEETAH_MINREV_SUPPORTED;
        break;
    }

    for (i = 0; i < NCPU; i++) {
        if (cpunodes[i].nodeid == 0)
            continue;

        if (IS_SPITFIRE(impl)) {
            if (cpunodes[i].version < min_supported_rev) {
                cmn_err(CE_PANIC, "UltraSPARC versions older "
                        "than %d.%d are no longer supported "
                        "(cpu #%d)",
                        SPITFIRE_MAJOR_VERSION(min_supported_rev),
                        SPITFIRE_MINOR_VERSION(min_supported_rev),
                        i);
            }

            /*
             * Min supported rev is 2.1 but we've seen problems
             * with that so we still want to warn if we see one.
             */
            if (cpunodes[i].version < 0x22) {
                cmn_err(CE_WARN,
                        "UltraSPARC versions older than "
                        "2.2 are not supported (cpu #%d)", i);
#ifdef SF_ERRATA_30 /* call causes fp-disabled */
                spitfire_call_bug = 1;
#endif /* SF_ERRATA_30 */
            }
        }


#ifdef SF_V9_TABLE_28	/* fp over/underflow traps may cause wrong fsr.cexc */
        if (IS_SPITFIRE(impl) || IS_BLACKBIRD(impl))
            spitfire_bb_fsr_bug = 1;
#endif /* SF_V9_TABLE_28 */

        if (IS_CHEETAH(impl)) {
            if (cpunodes[i].version < min_supported_rev) {
                cmn_err(CE_PANIC, "UltraSPARC-III versions "
                        "older than %d.%d are no longer supported "
                        "(cpu #%d)",
                        CHEETAH_MAJOR_VERSION(min_supported_rev),
                        CHEETAH_MINOR_VERSION(min_supported_rev),
                        i);
            }

        }

#ifdef JALAPENO_ERRATA_85
        if (IS_JALAPENO(impl) && (cpunodes[i].version < 0x24)) {
            jp_errata_85_allow_slow_scrub = 0;
            jp_errata_85_enable = 1;
        }
#endif /* JALAPENO_ERRATA_85 */
    }
}
Example #8
0
/*
 * launch slave cpus into kernel text, pause them,
 * and restore the original prom pages
 */
void
i_cpr_mp_setup(void)
{
	extern void restart_other_cpu(int);
	cpu_t *cp;

	uint64_t kctx = kcontextreg;

	/*
	 * Do not allow setting page size codes in MMU primary context
	 * register while using cif wrapper. This is needed to work
	 * around OBP incorrect handling of this MMU register.
	 */
	kcontextreg = 0;

	/*
	 * reset cpu_ready_set so x_calls work properly
	 */
	CPUSET_ZERO(cpu_ready_set);
	CPUSET_ADD(cpu_ready_set, getprocessorid());

	/*
	 * setup cif to use the cookie from the new/tmp prom
	 * and setup tmp handling for calling prom services.
	 */
	i_cpr_cif_setup(CIF_SPLICE);

	/*
	 * at this point, only the nucleus and a few cpr pages are
	 * mapped in.  once we switch to the kernel trap table,
	 * we can access the rest of kernel space.
	 */
	prom_set_traptable(&trap_table);

	if (ncpus > 1) {
		sfmmu_init_tsbs();

		mutex_enter(&cpu_lock);
		/*
		 * All of the slave cpus are not ready at this time,
		 * yet the cpu structures have various cpu_flags set;
		 * clear cpu_flags and mutex_ready.
		 * Since we are coming up from a CPU suspend, the slave cpus
		 * are frozen.
		 */
		for (cp = CPU->cpu_next; cp != CPU; cp = cp->cpu_next) {
			cp->cpu_flags = CPU_FROZEN;
			cp->cpu_m.mutex_ready = 0;
		}

		for (cp = CPU->cpu_next; cp != CPU; cp = cp->cpu_next)
			restart_other_cpu(cp->cpu_id);

		pause_cpus(NULL, NULL);
		mutex_exit(&cpu_lock);

		i_cpr_xcall(i_cpr_clear_entries);
	} else
		i_cpr_clear_entries(0, 0);

	/*
	 * now unlink the cif wrapper;  WARNING: do not call any
	 * prom_xxx() routines until after prom pages are restored.
	 */
	i_cpr_cif_setup(CIF_UNLINK);

	(void) i_cpr_prom_pages(CPR_PROM_RESTORE);

	/* allow setting page size codes in MMU primary context register */
	kcontextreg = kctx;
}
Example #9
0
void
kern_preprom(void)
{
	for (;;) {
		/*
		 * Load the current CPU pointer and examine the mutex_ready bit.
		 * It doesn't matter if we are preempted here because we are
		 * only trying to determine if we are in the *set* of mutex
		 * ready CPUs.  We cannot disable preemption until we confirm
		 * that we are running on a CPU in this set, since a call to
		 * kpreempt_disable() requires access to curthread.
		 */
		processorid_t cpuid = getprocessorid();
		cpu_t *cp = cpu[cpuid];
		cpu_t *prcp;

		if (panicstr)
			return; /* just return if we are currently panicking */

		if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
			/*
			 * Disable premption, and reload the current CPU.  We
			 * can't move from a mutex_ready cpu to a non-ready cpu
			 * so we don't need to re-check cp->cpu_m.mutex_ready.
			 */
			kpreempt_disable();
			cp = CPU;
			ASSERT(cp->cpu_m.mutex_ready);

			/*
			 * Try the lock.  If we don't get the lock, re-enable
			 * preemption and see if we should sleep.  If we are
			 * already the lock holder, remove the effect of the
			 * previous kpreempt_disable() before returning since
			 * preemption was disabled by an earlier kern_preprom.
			 */
			prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
			if (prcp == NULL ||
			    (prcp == cp && prom_thread == curthread)) {
				if (prcp == cp)
					kpreempt_enable();
				break;
			}

			kpreempt_enable();

			/*
			 * We have to be very careful here since both prom_cpu
			 * and prcp->cpu_m.mutex_ready can be changed at any
			 * time by a non mutex_ready cpu holding the lock.
			 * If the owner is mutex_ready, holding prom_mutex
			 * prevents kern_postprom() from completing.  If the
			 * owner isn't mutex_ready, we only know it will clear
			 * prom_cpu before changing cpu_m.mutex_ready, so we
			 * issue a membar after checking mutex_ready and then
			 * re-verify that prom_cpu is still held by the same
			 * cpu before actually proceeding to cv_wait().
			 */
			mutex_enter(&prom_mutex);
			prcp = prom_cpu;
			if (prcp != NULL && prcp->cpu_m.mutex_ready != 0) {
				membar_consumer();
				if (prcp == prom_cpu)
					cv_wait(&prom_cv, &prom_mutex);
			}
			mutex_exit(&prom_mutex);

		} else {
			/*
			 * If we are not yet mutex_ready, just attempt to grab
			 * the lock.  If we get it or already hold it, break.
			 */
			ASSERT(getpil() == PIL_MAX);
			prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
			if (prcp == NULL || prcp == cp)
				break;
		}
	}

	/*
	 * We now hold the prom_cpu lock.  Increment the hold count by one
	 * and assert our current state before returning to the caller.
	 */
	atomic_inc_32(&prom_holdcnt);
	ASSERT(prom_holdcnt >= 1);
	prom_thread = curthread;
}