Exemple #1
0
/* ------------------------------------------------------------------------*//**
 * @FUNCTION		cpu_die_id_get
 * @BRIEF		return SoC DIE ID (4x 32-bit integers, string).
 * @RETURNS		OMAP DIE ID string (as "DIEID3-DIEID2-DIEID1-DIEID0")
 *			NULL in case of error
 * @param[in,out]	die_id_3: DIE ID (part 3, MSB)
 * @param[in,out]	die_id_2: DIE ID (part 2)
 * @param[in,out]	die_id_1: DIE ID (part 1)
 * @param[in,out]	die_id_0: DIE ID (part 0, LSB)
 * @param[in,out]	die_id: DIE ID string ("DIEID3-DIEID2-DIEID1-DIEID0")
 * @DESCRIPTION		return SoC DIE ID (4x 32-bit integers, string).
 *//*------------------------------------------------------------------------ */
char *cpu_die_id_get(unsigned int *die_id_3, unsigned int *die_id_2,
	unsigned int *die_id_1, unsigned int *die_id_0,
	char die_id[CPU_DIE_ID_LENGTH])
{	unsigned int die_id_add_3;
	unsigned int die_id_add_2;
	unsigned int die_id_add_1;
	unsigned int die_id_add_0;

	CHECK_NULL_ARG(die_id, NULL);

	/*
	 * The DIE ID for the AM335X is TI proprietary information
	 * NULL is returned since it cannot be shown
	 */
	if (cpu_is_am335x() || cpu_is_am437x())
		return NULL;

	if (cpu_get() == DRA_75X || cpu_get() == DRA_72X) {
		die_id_add_3 = DRA7_CONTROL_STD_FUSE_DIE_ID_3;
		die_id_add_2 = DRA7_CONTROL_STD_FUSE_DIE_ID_2;
		die_id_add_1 = DRA7_CONTROL_STD_FUSE_DIE_ID_1;
		die_id_add_0 = DRA7_CONTROL_STD_FUSE_DIE_ID_0;
	} else {
		die_id_add_3 = CONTROL_STD_FUSE_DIE_ID_3;
		die_id_add_2 = CONTROL_STD_FUSE_DIE_ID_2;
		die_id_add_1 = CONTROL_STD_FUSE_DIE_ID_1;
		die_id_add_0 = CONTROL_STD_FUSE_DIE_ID_0;
	}

	if (mem_read(die_id_add_3, die_id_3) != 0)
		return NULL;
	dprintf("%s(): die_id_3 = 0x%08X\n", __func__, *die_id_3);
	if (mem_read(die_id_add_2, die_id_2) != 0)
		return NULL;
	dprintf("%s(): die_id_2 = 0x%08X\n", __func__, *die_id_2);
	if (mem_read(die_id_add_1, die_id_1) != 0)
		return NULL;
	dprintf("%s(): die_id_1 = 0x%08X\n", __func__, *die_id_1);
	if (mem_read(die_id_add_0, die_id_0) != 0)
		return NULL;
	dprintf("%s(): die_id_0 = 0x%08X\n", __func__, *die_id_0);

	sprintf(die_id, "%08X-%08X-%08X-%08X",
		*die_id_3, *die_id_2, *die_id_1, *die_id_0);
	dprintf("%s(): die_id = %s\n", __func__, die_id);

	return die_id;
}
static int
vcpu_config_poweroff(processorid_t id)
{
	int oldstate;
	int error;
	cpu_t *cp;

	mutex_enter(&cpu_lock);

	if ((cp = cpu_get(id)) == NULL) {
		mutex_exit(&cpu_lock);
		return (ESRCH);
	}

	if (cpu_get_state(cp) == P_POWEROFF) {
		mutex_exit(&cpu_lock);
		return (0);
	}

	mutex_exit(&cpu_lock);

	do {
		error = p_online_internal(id, P_OFFLINE,
		    &oldstate);

		if (error != 0)
			break;

		/*
		 * So we just changed it to P_OFFLINE.  But then we dropped
		 * cpu_lock, so now it is possible for another thread to change
		 * the cpu back to a different, non-quiesced state e.g.
		 * P_ONLINE.
		 */
		mutex_enter(&cpu_lock);
		if ((cp = cpu_get(id)) == NULL)
			error = ESRCH;
		else {
			if (cp->cpu_flags & CPU_QUIESCED)
				error = poweroff_vcpu(cp);
			else
				error = EBUSY;
		}
		mutex_exit(&cpu_lock);
	} while (error == EBUSY);

	return (error);
}
Exemple #3
0
/* ARGSUSED */
static int
pool_pset_cpu_setup(cpu_setup_t what, int id, void *arg)
{
	processorid_t cpuid = id;
	struct setup_arg sarg;
	int error;
	cpu_t *c;

	ASSERT(MUTEX_HELD(&cpu_lock));
	ASSERT(INGLOBALZONE(curproc));

	if (!pool_pset_enabled())
		return (0);
	if (what != CPU_CONFIG && what != CPU_UNCONFIG &&
	    what != CPU_ON && what != CPU_OFF &&
	    what != CPU_CPUPART_IN && what != CPU_CPUPART_OUT)
		return (0);
	c = cpu_get(cpuid);
	ASSERT(c != NULL);
	sarg.psetid = cpupart_query_cpu(c);
	sarg.cpu = c;
	sarg.what = what;

	error = zone_walk(pool_pset_setup_cb, &sarg);
	ASSERT(error == 0);
	return (0);
}
Exemple #4
0
/* ------------------------------------------------------------------------*//**
 * @FUNCTION		tps659038_is_present
 * @BRIEF		return 1 if TPS659038 is present on this platform,
 *			0 otherwise
 * @RETURNS		1 if TPS659038 chip is found
 *			0 otherwise
 * @DESCRIPTION		return 1 if TPS659038 is present on this platform,
 *			0 otherwise
 *//*------------------------------------------------------------------------ */
unsigned short int tps659038_is_present(void)
{
	int ret;
	unsigned int id_lsb, id_msb;
	unsigned short present;

	switch (cpu_get()) {
	case DRA_7XX:
		ret = i2cget(TPS659038_I2C_BUS, TPS659038_ID1_ADDR,
			TPS659038_PRODUCT_ID_LSB, &id_lsb);
		if (ret != 0)
			return 0;

		ret = i2cget(TPS659038_I2C_BUS, TPS659038_ID1_ADDR,
			TPS659038_PRODUCT_ID_MSB, &id_msb);
		if (ret != 0)
			return 0;

		present = ((id_lsb == 0x35) && (id_msb == 0xc0)) ? 1 : 0;
		break;

	default:
		present = 0;
	}

	dprintf("%s(): present=%u\n", __func__, present);
	return present;
}
Exemple #5
0
/* ------------------------------------------------------------------------*//**
 * @FUNCTION		tps65217x_is_present
 * @BRIEF		return 1 if TPS65217X is present on this platform,
 *			0 otherwise
 * @RETURNS		1 if TPS65217X chip is found
 *			0 otherwise
 * @DESCRIPTION		return 1 if TPS65217X is present on this platform,
 *			0 otherwise
 *//*------------------------------------------------------------------------ */
unsigned short int tps65217x_is_present(void)
{
	int ret;
	unsigned int id_lsb;
	unsigned short present;

	switch (cpu_get()) {
	case AM_3352:
	case AM_3354:
	case AM_3356:
	case AM_3357:
	case AM_3358:
	case AM_3359:
		/* Check to see if address is readable */
		ret = i2cget(TPS65217X_I2C_BUS, TPS65217X_ID0_ADDR,
			0x00, &id_lsb);
		if (ret != 0)
			return 0;

		/* Check to see if chip and revision is valid */
		present = ((tps65217x_chip_get() >= 6) ||
			tps65217x_chip_revision_get() > 0) ? 1 : 0;
		break;

	default:
		present = 0;
	}

	dprintf("%s(): present=%u\n", __func__, present);
	return present;
}
/*
 * mac_soft_ring_bind
 *
 * Bind a soft ring worker thread to supplied CPU.
 */
cpu_t *
mac_soft_ring_bind(mac_soft_ring_t *ringp, processorid_t cpuid)
{
	cpu_t *cp;
	boolean_t clear = B_FALSE;

	ASSERT(MUTEX_HELD(&cpu_lock));

	if (mac_soft_ring_thread_bind == 0) {
		DTRACE_PROBE1(mac__soft__ring__no__cpu__bound,
		    mac_soft_ring_t *, ringp);
		return (NULL);
	}

	cp = cpu_get(cpuid);
	if (cp == NULL || !cpu_is_online(cp))
		return (NULL);

	mutex_enter(&ringp->s_ring_lock);
	ringp->s_ring_state |= S_RING_BOUND;
	if (ringp->s_ring_cpuid != -1)
		clear = B_TRUE;
	ringp->s_ring_cpuid = cpuid;
	mutex_exit(&ringp->s_ring_lock);

	if (clear)
		thread_affinity_clear(ringp->s_ring_worker);

	DTRACE_PROBE2(mac__soft__ring__cpu__bound, mac_soft_ring_t *,
	    ringp, processorid_t, cpuid);

	thread_affinity_set(ringp->s_ring_worker, cpuid);

	return (cp);
}
Exemple #7
0
/*
 * Transfer specified CPUs between processor sets.
 */
int
pool_pset_xtransfer(psetid_t src, psetid_t dst, size_t size, id_t *ids)
{
	struct cpu *cpu;
	int ret = 0;
	int id;

	ASSERT(pool_lock_held());
	ASSERT(INGLOBALZONE(curproc));

	if (size == 0 || size > max_ncpus)	/* quick sanity check */
		return (EINVAL);

	mutex_enter(&cpu_lock);
	for (id = 0; id < size; id++) {
		if ((cpu = cpu_get((processorid_t)ids[id])) == NULL ||
		    cpupart_query_cpu(cpu) != src) {
			ret = EINVAL;
			break;
		}
		if ((ret = cpupart_attach_cpu(dst, cpu, 1)) != 0)
			break;
	}
	mutex_exit(&cpu_lock);
	if (ret == 0)
		pool_pset_mod = gethrtime();
	return (ret);
}
Exemple #8
0
/* ------------------------------------------------------------------------*//**
 * @FUNCTION		cpu_dra7xx_silicon_max_speed_get
 * @BRIEF		return silicon max speed (depending on cpu type and
 *			silicon type)
 * @RETURNS		Silicon max speed (in MHz)
 *			0 in case of error
 * @DESCRIPTION		return silicon max speed (depending on cpu type and
 *			silicon type)
 *//*------------------------------------------------------------------------ */
unsigned int cpu_dra7xx_silicon_max_speed_get(void)
{
	unsigned int max_speed;

	switch (cpu_get()) {
	/*
	 * TBD: to use DIE ID to detect maximum speed capable. For the moment,
	 * we use cpufreq entries if any to detect max speed.
	 */
	case DRA_72X:
	case DRA_75X:
		switch (cpu_silicon_type_get()) {
		case STANDARD_PERF_SI:
			max_speed = 1000;
			break;
		case HIGH_PERF_SI:
			max_speed = 1500;
			break;
		default:
			max_speed = 0;
			break;
		}
		break;

	default:
		fprintf(stderr, "%s(): unknown chip!\n", __func__);
		max_speed = 0;
	}

	dprintf("%s(): max speed = %dMHz\n", __func__, max_speed);
	return max_speed;
}
Exemple #9
0
/*
 * cpu0 should contain bootcpu info
 */
cpu_t *
i_cpr_bootcpu(void)
{
	ASSERT(MUTEX_HELD(&cpu_lock));

	return (cpu_get(i_cpr_bootcpuid()));
}
/*
 * processor_info(2) - return information on a processor.
 */
int
processor_info(processorid_t cpun, processor_info_t *infop)
{
	cpu_t *cp;
	processor_info_t temp;

	mutex_enter(&cpu_lock);
	if ((cp = cpu_get(cpun)) == NULL) {
		mutex_exit(&cpu_lock);
		return (set_errno(EINVAL));
	}
	bcopy(&cp->cpu_type_info, &temp, sizeof (temp));
	mutex_exit(&cpu_lock);

	/*
	 * The spec indicates that the rest of the information is meaningless
	 * if the CPU is offline, but if presented by the machine-dependent
	 * layer, it is probably still accurate.  It seems OK to copy it all in
	 * either case.
	 */
	if (copyout((caddr_t)&temp, (caddr_t)infop,
	    sizeof (processor_info_t)))
		return (set_errno(EFAULT));

	return (0);
}
Exemple #11
0
/* ------------------------------------------------------------------------*//**
 * @FUNCTION		smps_name_get
 * @BRIEF		return PMIC SMPS name as a string
 * @RETURNS		PMIC SMPS name as a string in case of success
 *			NULL in case of error
 * @param[in]		smps_id: valid SMPS ID
 * @DESCRIPTION		return PMIC SMPS name as a string
 *//*------------------------------------------------------------------------ */
const char *smps_name_get(pmic_smps_id smps_id)
{
	CHECK_ARG_LESS_THAN(smps_id, PMIC_SMPS_ID_MAX, NULL);

	switch (cpu_get()) {
	case OMAP_4430:
	case OMAP_4460:
	case OMAP_4470:
		return smps44xx_names[smps_id];
		break;

	case OMAP_5430:
	case OMAP_5432:
		return smps54xx_names[smps_id];
		break;

	case DRA_75X:
		return smps_dra7xx_names[smps_id];
		break;

	case AM_3352:
	case AM_3354:
	case AM_3356:
	case AM_3357:
	case AM_3358:
	case AM_3359:
		return smps_am335x_names[smps_id];
		break;

	default:
		return smps44xx_names[smps_id];
	}
}
Exemple #12
0
/* ------------------------------------------------------------------------*//**
 * @FUNCTION		twl603x_is_twl6034
 * @BRIEF		return 1 if PMIC chip is TWL6034, 0 otherwise.
 * @RETURNS		1 PMIC chip is TWL6034
 *			0 otherwise
 * @DESCRIPTION		return 1 if PMIC chip is TWL6034, 0 otherwise.
 *//*------------------------------------------------------------------------ */
unsigned short twl603x_is_twl6034(void)
{
	int ret;
	unsigned int val1, val2;

	if (cpu_get() == DRA_75X)
		return 0;

	if (twl603x_data.chip_type != TWL603X_TYPE_MAX) {
		dprintf("%s(): flag=%d\n", __func__,
			(twl603x_data.chip_type == TWL6034));
		return twl603x_data.chip_type == TWL6034;
	}

	ret = i2cget(TWL6030_I2C_BUS, 0x49, 0x02, &val2);
	if (ret != 0)
		goto twl603x_is_twl6034_end;

	ret = i2cget(TWL6030_I2C_BUS, 0x49, 0x03, &val1);
	if (ret != 0)
		goto twl603x_is_twl6034_end;

	if ((val1 == 0x00) && (val2 == 0x00))
		twl603x_data.chip_type = TWL6034;

twl603x_is_twl6034_end:
	dprintf("%s(): val1=0x%02X val2=0x%02X flag=%d\n",
		__func__, val1, val2, (twl603x_data.chip_type == TWL6034));
	return twl603x_data.chip_type == TWL6034;
}
/* ARGSUSED */
static int
ip_squeue_cpu_setup(cpu_setup_t what, int id, void *arg)
{
	cpu_t *cp = cpu_get(id);

	ASSERT(MUTEX_HELD(&cpu_lock));
	switch (what) {
	case CPU_CONFIG:
	case CPU_ON:
	case CPU_INIT:
	case CPU_CPUPART_IN:
		if (CPU_ISON(cp) && cp->cpu_squeue_set == NULL)
			cp->cpu_squeue_set = ip_squeue_set_create(cp->cpu_id);
		break;
	case CPU_UNCONFIG:
	case CPU_OFF:
	case CPU_CPUPART_OUT:
		if (cp->cpu_squeue_set != NULL) {
			ip_squeue_set_destroy(cp);
			cp->cpu_squeue_set = NULL;
		}
		break;
	default:
		break;
	}
	return (0);
}
static int
vcpu_config_poweron(processorid_t id)
{
	cpu_t *cp;
	int oldstate;
	int error;

	if (id >= ncpus)
		return (vcpu_config_new(id));

	mutex_enter(&cpu_lock);

	if ((cp = cpu_get(id)) == NULL) {
		mutex_exit(&cpu_lock);
		return (ESRCH);
	}

	if (cpu_get_state(cp) != P_POWEROFF) {
		mutex_exit(&cpu_lock);
		return (0);
	}

	if ((error = poweron_vcpu(cp)) != 0) {
		mutex_exit(&cpu_lock);
		return (error);
	}

	mutex_exit(&cpu_lock);

	return (p_online_internal(id, P_ONLINE, &oldstate));
}
Exemple #15
0
/* ------------------------------------------------------------------------*//**
 * @FUNCTION		cpu_gets
 * @BRIEF		return CPU name as a string
 * @RETURNS		CPU name as a string
 * @param[in,out]	s: pre-allocated string where to store CPU name
 * @DESCRIPTION		return CPU name as a string
 *//*------------------------------------------------------------------------ */
char *cpu_gets(char s[CPU_NAME_MAX_LENGTH])
{
	omap_chip omap;

	omap = cpu_get();
	if (omap > OMAP_MAX)
		omap = OMAP_MAX;
	return strncpy(s, cpu_name[omap], CPU_NAME_MAX_LENGTH);
}
Exemple #16
0
/* ------------------------------------------------------------------------*//**
 * @FUNCTION		sr44xx_golden_settings_get
 * @BRIEF		return SR module golden settings.
 * @RETURNS		SR module name on success
 *			"FIXME" string in case of error
 * @param[in]		sr_id: SR module ID
 *			opp_id: OPP ID
 * @DESCRIPTION		return SR module golden settings, for a given chip,
 *			module and OPP.
 *//*------------------------------------------------------------------------ */
const sr_audit_settings *sr44xx_golden_settings_get(omap4_sr_module_id sr_id,
	opp44xx_id opp_id)
{
	omap_chip chip_id;

	CHECK_ARG_LESS_THAN(sr_id, OMAP4_SR_ID_MAX, NULL);
	CHECK_ARG_LESS_THAN(opp_id, OPP44XX_ID_MAX, NULL);

	chip_id = cpu_get();
	dprintf("%s(): sr_id=%d opp_id=%d chip_id=%d\n", __func__,
		sr_id, opp_id, chip_id);
	return sr44xx_golden_settings[chip_id][sr_id][opp_id];
}
Exemple #17
0
/*
 * Jump to the fast reboot switcher.  This function never returns.
 */
void
fast_reboot()
{
	processorid_t bootcpuid = 0;
	extern uintptr_t postbootkernelbase;
	extern char	fb_swtch_image[];
	fastboot_file_t	*fb;
	int i;

	postbootkernelbase = 0;

	fb = &newkernel.fi_files[FASTBOOT_SWTCH];

	/*
	 * Map the address into both the current proc's address
	 * space and the kernel's address space in case the panic
	 * is forced by kmdb.
	 */
	if (&kas != curproc->p_as) {
		hat_devload(curproc->p_as->a_hat, (caddr_t)fb->fb_va,
		    MMU_PAGESIZE, mmu_btop(fb->fb_dest_pa),
		    PROT_READ | PROT_WRITE | PROT_EXEC,
		    HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
	}

	bcopy((void *)fb_swtch_image, (void *)fb->fb_va, fb->fb_size);


	/*
	 * Set fb_va to fake_va
	 */
	for (i = 0; i < FASTBOOT_MAX_FILES_MAP; i++) {
		newkernel.fi_files[i].fb_va = fake_va;

	}

	if (panicstr && CPU->cpu_id != bootcpuid &&
	    CPU_ACTIVE(cpu_get(bootcpuid))) {
		extern void panic_idle(void);
		cpuset_t cpuset;

		CPUSET_ZERO(cpuset);
		CPUSET_ADD(cpuset, bootcpuid);
		xc_priority((xc_arg_t)&newkernel, 0, 0, CPUSET2BV(cpuset),
		    (xc_func_t)fastboot_xc_func);

		panic_idle();
	} else
		(void) fastboot_xc_func(&newkernel, 0, 0);
}
void
mp_enter_barrier(void)
{
	hrtime_t last_poke_time = 0;
	int poke_allowed = 0;
	int done = 0;
	int i;

	ASSERT(MUTEX_HELD(&cpu_lock));

	pause_cpus(NULL);

	while (!done) {
		done = 1;
		poke_allowed = 0;

		if (xpv_gethrtime() - last_poke_time > POKE_TIMEOUT) {
			last_poke_time = xpv_gethrtime();
			poke_allowed = 1;
		}

		for (i = 0; i < NCPU; i++) {
			cpu_t *cp = cpu_get(i);

			if (cp == NULL || cp == CPU)
				continue;

			switch (cpu_phase[i]) {
			case CPU_PHASE_NONE:
				cpu_phase[i] = CPU_PHASE_WAIT_SAFE;
				poke_cpu(i);
				done = 0;
				break;

			case CPU_PHASE_WAIT_SAFE:
				if (poke_allowed)
					poke_cpu(i);
				done = 0;
				break;

			case CPU_PHASE_SAFE:
			case CPU_PHASE_POWERED_OFF:
				break;
			}
		}

		SMT_PAUSE();
	}
}
Exemple #19
0
/*
 * Disable processor set plugin.
 */
int
pool_pset_disable(void)
{
	processorid_t cpuid;
	cpu_t *cpu;
	int error;

	ASSERT(pool_lock_held());
	ASSERT(INGLOBALZONE(curproc));

	mutex_enter(&cpu_lock);
	if (cp_numparts > 1) {	/* make sure only default pset is left */
		mutex_exit(&cpu_lock);
		return (EBUSY);
	}
	/*
	 * Remove all non-system CPU and processor set properties
	 */
	for (cpuid = 0; cpuid < NCPU; cpuid++) {
		if ((cpu = cpu_get(cpuid)) == NULL)
			continue;
		if (cpu->cpu_props != NULL) {
			(void) nvlist_free(cpu->cpu_props);
			cpu->cpu_props = NULL;
		}
	}

	/*
	 * We want to switch things such that everything is now visible
	 * to ALL_ZONES: first add the special "ALL_ZONES" token to the
	 * visibility list then remove individual zones.  There must
	 * only be the default pset active if pools are being disabled,
	 * so we only need to deal with it.
	 */
	error = zone_walk(pool_pset_zone_pset_set, (void *)ZONE_PS_INVAL);
	ASSERT(error == 0);
	pool_pset_visibility_add(PS_NONE, NULL);
	pool_pset_visibility_remove(PS_NONE, global_zone);
	/*
	 * pool_pset_enabled() will henceforth return B_FALSE.
	 */
	global_zone->zone_psetid = ZONE_PS_INVAL;
	mutex_exit(&cpu_lock);
	if (pool_pset_default->pset_props != NULL) {
		nvlist_free(pool_pset_default->pset_props);
		pool_pset_default->pset_props = NULL;
	}
	return (0);
}
Exemple #20
0
/*
 * Put new CPU property.
 * Handle special case of "cpu.status".
 */
int
pool_cpu_propput(processorid_t cpuid, nvpair_t *pair)
{
	int ret = 0;
	cpu_t *cpu;

	ASSERT(pool_lock_held());
	ASSERT(INGLOBALZONE(curproc));

	if (nvpair_type(pair) == DATA_TYPE_STRING &&
	    strcmp(nvpair_name(pair), "cpu.status") == 0) {
		char *val;
		int status;
		int old_status;
		(void) nvpair_value_string(pair, &val);
		if (strcmp(val, PS_OFFLINE) == 0)
			status = P_OFFLINE;
		else if (strcmp(val, PS_ONLINE) == 0)
			status = P_ONLINE;
		else if (strcmp(val, PS_NOINTR) == 0)
			status = P_NOINTR;
		else if (strcmp(val, PS_FAULTED) == 0)
			status = P_FAULTED;
		else if (strcmp(val, PS_SPARE) == 0)
			status = P_SPARE;
		else
			return (EINVAL);
		ret = p_online_internal(cpuid, status, &old_status);
	} else {
		mutex_enter(&cpu_lock);
		if ((cpu = cpu_get(cpuid)) == NULL)
			ret = EINVAL;
		if (cpu->cpu_props == NULL) {
			(void) nvlist_alloc(&cpu->cpu_props,
			    NV_UNIQUE_NAME, KM_SLEEP);
			(void) nvlist_add_string(cpu->cpu_props,
			    "cpu.comment", "");
		}
		ret = pool_propput_common(cpu->cpu_props, pair, pool_cpu_props);
		if (ret == 0)
			pool_cpu_mod = gethrtime();
		mutex_exit(&cpu_lock);
	}
	return (ret);
}
Exemple #21
0
void
apic_map(void) {
    uint i;
    struct vmm_flags flags = {.present = 1, .writeable = 1, .privileged = 1};

    /* map each lapic and ioapic base addr */
    for(i=0; i<IO_APIC_NUM; i++) {
        if(io_apic_tbl[i].base_addr == 0) {
            //*(byte *)(io_apic_tbl[i].base_addr) = '0';
            break;
        }

        vmm_map(flags, io_apic_tbl[i].base_addr, io_apic_tbl[i].base_addr);
    }

    for(i=0; i<cpu_count(); i++) {
        uint lapic_base = cpu_get(i)->lapic_base;

        vmm_map(flags, lapic_base, lapic_base);
    }
}
Exemple #22
0
/*
 * Stop the counters on the CPU this context is bound to.
 */
static void
kcpc_stop_hw(kcpc_ctx_t *ctx)
{
	cpu_t *cp;

	ASSERT((ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED))
	    == KCPC_CTX_INVALID);

	kpreempt_disable();

	cp = cpu_get(ctx->kc_cpuid);
	ASSERT(cp != NULL);

	if (cp == CPU) {
		pcbe_ops->pcbe_allstop();
		atomic_or_uint(&ctx->kc_flags,
		    KCPC_CTX_INVALID_STOPPED);
	} else
		kcpc_remote_stop(cp);
	kpreempt_enable();
}
Exemple #23
0
static int
pset_assign(psetid_t pset, processorid_t cpuid, psetid_t *opset, int forced)
{
	psetid_t oldpset;
	int	error = 0;
	cpu_t	*cp;

	if (pset != PS_QUERY && secpolicy_pset(CRED()) != 0)
		return (set_errno(EPERM));

	pool_lock();
	if (pset != PS_QUERY && pool_state == POOL_ENABLED) {
		pool_unlock();
		return (set_errno(ENOTSUP));
	}

	mutex_enter(&cpu_lock);
	if ((cp = cpu_get(cpuid)) == NULL) {
		mutex_exit(&cpu_lock);
		pool_unlock();
		return (set_errno(EINVAL));
	}

	oldpset = cpupart_query_cpu(cp);

	if (pset != PS_QUERY)
		error = cpupart_attach_cpu(pset, cp, forced);
	mutex_exit(&cpu_lock);
	pool_unlock();

	if (error)
		return (set_errno(error));

	if (opset != NULL)
		if (copyout(&oldpset, opset, sizeof (psetid_t)) != 0)
			return (set_errno(EFAULT));

	return (0);
}
Exemple #24
0
/*
 * Get dynamic property for CPUs.
 * The only dynamic property currently implemented is "cpu.status".
 */
int
pool_cpu_propget(processorid_t cpuid, char *name, nvlist_t *nvl)
{
	int ret = ESRCH;
	cpu_t *cpu;

	ASSERT(pool_lock_held());

	mutex_enter(&cpu_lock);
	if ((cpu = cpu_get(cpuid)) == NULL) {
		mutex_exit(&cpu_lock);
		return (ESRCH);
	}
	if (strcmp(name, "cpu.status") == 0) {
		ret = nvlist_add_string(nvl, "cpu.status",
		    (char *)cpu_get_state_str(cpu));
	} else {
		ret = EINVAL;
	}
	mutex_exit(&cpu_lock);
	return (ret);
}
int
ip_squeue_cpu_move(squeue_t *sq, processorid_t cpuid)
{
	cpu_t *cpu;
	squeue_set_t *set;

	if (sq->sq_state & SQS_DEFAULT)
		return (-1);

	ASSERT(MUTEX_HELD(&cpu_lock));

	cpu = cpu_get(cpuid);
	if (!CPU_ISON(cpu))
		return (-1);

	mutex_enter(&sqset_lock);
	set = cpu->cpu_squeue_set;
	if (set != NULL)
		ip_squeue_set_move(sq, set);
	mutex_exit(&sqset_lock);
	return ((set == NULL) ? -1 : 0);
}
/*
 * Called by the generic framework to check if it's OK to bind a set to a CPU.
 */
int
kcpc_hw_cpu_hook(processorid_t cpuid, ulong_t *kcpc_cpumap)
{
	cpu_t		*cpu, *p;
	pg_t		*chip_pg;
	pg_cpu_itr_t	itr;

	if (!strands_perfmon_shared)
		return (0);

	/*
	 * Only one logical CPU on each Pentium 4 HT CPU may be bound to at
	 * once.
	 *
	 * This loop is protected by holding cpu_lock, in order to properly
	 * access the cpu_t of the desired cpu.
	 */
	mutex_enter(&cpu_lock);
	if ((cpu = cpu_get(cpuid)) == NULL) {
		mutex_exit(&cpu_lock);
		return (-1);
	}

	chip_pg = (pg_t *)pghw_find_pg(cpu, PGHW_CHIP);

	PG_CPU_ITR_INIT(chip_pg, itr);
	while ((p = pg_cpu_next(&itr)) != NULL) {
		if (p == cpu)
			continue;
		if (BT_TEST(kcpc_cpumap, p->cpu_id)) {
			mutex_exit(&cpu_lock);
			return (-1);
		}
	}

	mutex_exit(&cpu_lock);
	return (0);
}
/*
 * Initialize IP squeues.
 */
void
ip_squeue_init(void (*callback)(squeue_t *))
{
	int i;
	squeue_set_t	*sqs;

	ASSERT(sqset_global_list == NULL);

	ip_squeue_create_callback = callback;
	squeue_init();
	mutex_init(&sqset_lock, NULL, MUTEX_DEFAULT, NULL);
	sqset_global_list =
	    kmem_zalloc(sizeof (squeue_set_t *) * (NCPU+1), KM_SLEEP);
	sqset_global_size = 0;
	/*
	 * We are called at system boot time and we don't
	 * expect memory allocation failure.
	 */
	sqs = ip_squeue_set_create(-1);
	ASSERT(sqs != NULL);

	mutex_enter(&cpu_lock);
	/* Create squeue for each active CPU available */
	for (i = 0; i < NCPU; i++) {
		cpu_t *cp = cpu_get(i);
		if (CPU_ISON(cp) && cp->cpu_squeue_set == NULL) {
			/*
			 * We are called at system boot time and we don't
			 * expect memory allocation failure then
			 */
			cp->cpu_squeue_set = ip_squeue_set_create(cp->cpu_id);
			ASSERT(cp->cpu_squeue_set != NULL);
		}
	}

	register_cpu_setup_func(ip_squeue_cpu_setup, NULL);
	mutex_exit(&cpu_lock);
}
Exemple #28
0
/* ARGSUSED */
void
cmp_error_resteer(processorid_t cpuid)
{
#ifndef	_CMP_NO_ERROR_STEERING
	cpuset_t mycores;
	cpu_t *cpu;
	chipid_t chipid;
	int i;

	if (!cmp_cpu_is_cmp(cpuid))
		return;

	ASSERT(MUTEX_HELD(&cpu_lock));
	chipid = cpunodes[cpuid].portid;
	mycores = chips[chipid];

	/* Look for an online sibling core */
	for (i = 0; i < NCPU; i++) {
		if (i == cpuid)
			continue;

		if (CPU_IN_SET(mycores, i) &&
		    (cpu = cpu_get(i)) != NULL && cpu_is_active(cpu)) {
			/* Found one, reset error steering  */
			xc_one(i, (xcfunc_t *)set_cmp_error_steering, 0, 0);
			break;
		}
	}

	/* No online sibling cores, point to this core.  */
	if (i == NCPU) {
		xc_one(cpuid, (xcfunc_t *)set_cmp_error_steering, 0, 0);
	}
#else
	/* Not all CMP's support (e.g. Olympus-C by Fujitsu) error steering */
	return;
#endif /* _CMP_NO_ERROR_STEERING */
}
void
mp_leave_barrier(void)
{
	int i;

	ASSERT(MUTEX_HELD(&cpu_lock));

	for (i = 0; i < NCPU; i++) {
		cpu_t *cp = cpu_get(i);

		if (cp == NULL || cp == CPU)
			continue;

		switch (cpu_phase[i]) {
		/*
		 * If we see a CPU in one of these phases, something has
		 * gone badly wrong with the guarantees
		 * mp_enter_barrier() is supposed to provide.  Rather
		 * than attempt to stumble along (and since we can't
		 * panic properly in this context), we tell the
		 * hypervisor we've crashed.
		 */
		case CPU_PHASE_NONE:
		case CPU_PHASE_WAIT_SAFE:
			(void) HYPERVISOR_shutdown(SHUTDOWN_crash);
			break;

		case CPU_PHASE_POWERED_OFF:
			break;

		case CPU_PHASE_SAFE:
			cpu_phase[i] = CPU_PHASE_NONE;
		}
	}

	start_cpus();
}
Exemple #30
0
/*
 * Remove existing CPU property.
 */
int
pool_cpu_proprm(processorid_t cpuid, char *name)
{
	int ret;
	cpu_t *cpu;

	ASSERT(pool_lock_held());
	ASSERT(INGLOBALZONE(curproc));

	mutex_enter(&cpu_lock);
	if ((cpu = cpu_get(cpuid)) == NULL || cpu_is_poweredoff(cpu)) {
		ret = EINVAL;
	} else {
		if (cpu->cpu_props == NULL)
			ret = EINVAL;
		else
			ret = pool_proprm_common(cpu->cpu_props, name,
			    pool_cpu_props);
	}
	if (ret == 0)
		pool_cpu_mod = gethrtime();
	mutex_exit(&cpu_lock);
	return (ret);
}