Пример #1
0
int
acquire_sys(struct snapshot *ss, kstat_ctl_t *kc)
{
	size_t i;
	kstat_named_t *knp;
	kstat_t *ksp;

	if ((ksp = kstat_lookup(kc, "unix", 0, "sysinfo")) == NULL)
		return (errno);

	if (kstat_read(kc, ksp, &ss->s_sys.ss_sysinfo) == -1)
		return (errno);

	if ((ksp = kstat_lookup(kc, "unix", 0, "vminfo")) == NULL)
		return (errno);

	if (kstat_read(kc, ksp, &ss->s_sys.ss_vminfo) == -1)
		return (errno);

	if ((ksp = kstat_lookup(kc, "unix", 0, "dnlcstats")) == NULL)
		return (errno);

	if (kstat_read(kc, ksp, &ss->s_sys.ss_nc) == -1)
		return (errno);

	if ((ksp = kstat_lookup(kc, "unix", 0, "system_misc")) == NULL)
		return (errno);

	if (kstat_read(kc, ksp, NULL) == -1)
		return (errno);

	knp = (kstat_named_t *)kstat_data_lookup(ksp, "clk_intr");
	if (knp == NULL)
		return (errno);

	ss->s_sys.ss_ticks = knp->value.l;

	knp = (kstat_named_t *)kstat_data_lookup(ksp, "deficit");
	if (knp == NULL)
		return (errno);

	ss->s_sys.ss_deficit = knp->value.l;

	for (i = 0; i < ss->s_nr_cpus; i++) {
		if (!CPU_ACTIVE(&ss->s_cpus[i]))
			continue;

		if (kstat_add(&ss->s_cpus[i].cs_sys, &ss->s_sys.ss_agg_sys))
			return (errno);
		if (kstat_add(&ss->s_cpus[i].cs_vm, &ss->s_sys.ss_agg_vm))
			return (errno);
		ss->s_nr_active_cpus++;
	}

	return (0);
}
Пример #2
0
/*
 * Jump to the fast reboot switcher.  This function never returns.
 */
void
fast_reboot()
{
	processorid_t bootcpuid = 0;
	extern uintptr_t postbootkernelbase;
	extern char	fb_swtch_image[];
	fastboot_file_t	*fb;
	int i;

	postbootkernelbase = 0;

	fb = &newkernel.fi_files[FASTBOOT_SWTCH];

	/*
	 * Map the address into both the current proc's address
	 * space and the kernel's address space in case the panic
	 * is forced by kmdb.
	 */
	if (&kas != curproc->p_as) {
		hat_devload(curproc->p_as->a_hat, (caddr_t)fb->fb_va,
		    MMU_PAGESIZE, mmu_btop(fb->fb_dest_pa),
		    PROT_READ | PROT_WRITE | PROT_EXEC,
		    HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
	}

	bcopy((void *)fb_swtch_image, (void *)fb->fb_va, fb->fb_size);


	/*
	 * Set fb_va to fake_va
	 */
	for (i = 0; i < FASTBOOT_MAX_FILES_MAP; i++) {
		newkernel.fi_files[i].fb_va = fake_va;

	}

	if (panicstr && CPU->cpu_id != bootcpuid &&
	    CPU_ACTIVE(cpu_get(bootcpuid))) {
		extern void panic_idle(void);
		cpuset_t cpuset;

		CPUSET_ZERO(cpuset);
		CPUSET_ADD(cpuset, bootcpuid);
		xc_priority((xc_arg_t)&newkernel, 0, 0, CPUSET2BV(cpuset),
		    (xc_func_t)fastboot_xc_func);

		panic_idle();
	} else
		(void) fastboot_xc_func(&newkernel, 0, 0);
}
Пример #3
0
/*
 * Class callback when a CPU is actually moving partitions
 */
static void
pg_cmt_cpupart_move(cpu_t *cp, cpupart_t *oldpp, cpupart_t *newpp)
{
	cpu_t		*cpp;
	group_t		*pgs;
	pg_t		*pg;
	group_iter_t	pg_iter;
	pg_cpu_itr_t	cpu_iter;
	boolean_t	found;

	ASSERT(MUTEX_HELD(&cpu_lock));

	if (cmt_sched_disabled)
		return;

	pgs = &cp->cpu_pg->pgs;
	group_iter_init(&pg_iter);

	/*
	 * Iterate over the CPUs CMT PGs
	 */
	while ((pg = group_iterate(pgs, &pg_iter)) != NULL) {

		if (IS_CMT_PG(pg) == 0)
			continue;

		/*
		 * Add the PG to the bitset in the new partition.
		 */
		bitset_add(&newpp->cp_cmt_pgs, pg->pg_id);

		/*
		 * Remove the PG from the bitset in the old partition
		 * if the last of the PG's CPUs have left.
		 */
		found = B_FALSE;
		PG_CPU_ITR_INIT(pg, cpu_iter);
		while ((cpp = pg_cpu_next(&cpu_iter)) != NULL) {
			if (cpp == cp)
				continue;
			if (CPU_ACTIVE(cpp) &&
			    cpp->cpu_part->cp_id == oldpp->cp_id) {
				found = B_TRUE;
				break;
			}
		}
		if (!found)
			bitset_del(&cp->cpu_part->cp_cmt_pgs, pg->pg_id);
	}
}
Пример #4
0
static void
cpr_save_mp_state(void)
{
	cpu_t *cp;

	ASSERT(MUTEX_HELD(&cpu_lock));

	cp = cpu_list;
	do {
		cp->cpu_cpr_flags &= ~CPU_CPR_ONLINE;
		if (CPU_ACTIVE(cp))
			CPU_SET_CPR_FLAGS(cp, CPU_CPR_ONLINE);
	} while ((cp = cp->cpu_next) != cpu_list);
}
Пример #5
0
static int
acquire_cpus(struct snapshot *ss, kstat_ctl_t *kc)
{
	size_t i;

	ss->s_nr_cpus = sysconf(_SC_CPUID_MAX) + 1;
	ss->s_cpus = calloc(ss->s_nr_cpus, sizeof (struct cpu_snapshot));
	if (ss->s_cpus == NULL)
		goto out;

	for (i = 0; i < ss->s_nr_cpus; i++) {
		kstat_t *ksp;

		ss->s_cpus[i].cs_id = ID_NO_CPU;
		ss->s_cpus[i].cs_state = p_online(i, P_STATUS);
		/* If no valid CPU is present, move on to the next one */
		if (ss->s_cpus[i].cs_state == -1)
			continue;
		ss->s_cpus[i].cs_id = i;

		if ((ksp = kstat_lookup_read(kc, "cpu_info", i, NULL)) == NULL)
			goto out;

		(void) pset_assign(PS_QUERY, i, &ss->s_cpus[i].cs_pset_id);
		if (ss->s_cpus[i].cs_pset_id == PS_NONE)
			ss->s_cpus[i].cs_pset_id = ID_NO_PSET;

		if (!CPU_ACTIVE(&ss->s_cpus[i]))
			continue;

		if ((ksp = kstat_lookup_read(kc, "cpu", i, "vm")) == NULL)
			goto out;

		if (kstat_copy(ksp, &ss->s_cpus[i].cs_vm))
			goto out;

		if ((ksp = kstat_lookup_read(kc, "cpu", i, "sys")) == NULL)
			goto out;

		if (kstat_copy(ksp, &ss->s_cpus[i].cs_sys))
			goto out;
	}

	errno = 0;
out:
	return (errno);
}
Пример #6
0
int
cpr_mp_online(void)
{
	cpu_t *cp, *bootcpu = CPU;
	int rc = 0;

	/*
	 * Do nothing for UP.
	 */
	if (ncpus == 1)
		return (0);

	/*
	 * cpr_save_mp_state() sets CPU_CPR_ONLINE in cpu_cpr_flags
	 * to indicate a cpu was online at the time of cpr_suspend();
	 * now restart those cpus that were marked as CPU_CPR_ONLINE
	 * and actually are offline.
	 */
	mutex_enter(&cpu_lock);
	for (cp = bootcpu->cpu_next; cp != bootcpu; cp = cp->cpu_next) {
		/*
		 * Clear the CPU_FROZEN flag in all cases.
		 */
		cp->cpu_flags &= ~CPU_FROZEN;

		if (CPU_CPR_IS_OFFLINE(cp))
			continue;
		if (CPU_ACTIVE(cp))
			continue;
		if ((rc = cpr_p_online(cp, CPU_CPR_ONLINE))) {
			mutex_exit(&cpu_lock);
			return (rc);
		}
	}

	/*
	 * turn off the boot cpu if it was offlined
	 */
	if (CPU_CPR_IS_OFFLINE(bootcpu)) {
		if ((rc = cpr_p_online(bootcpu, CPU_CPR_OFFLINE))) {
			mutex_exit(&cpu_lock);
			return (rc);
		}
	}
	mutex_exit(&cpu_lock);
	return (0);
}
Пример #7
0
/*
 * CPU ONLINE/OFFLINE CODE
 */
int
cpr_mp_offline(void)
{
	cpu_t *cp, *bootcpu;
	int rc = 0;
	int brought_up_boot = 0;

	/*
	 * Do nothing for UP.
	 */
	if (ncpus == 1)
		return (0);

	mutex_enter(&cpu_lock);

	cpr_save_mp_state();

	bootcpu = i_cpr_bootcpu();
	if (!CPU_ACTIVE(bootcpu)) {
		if ((rc = cpr_p_online(bootcpu, CPU_CPR_ONLINE))) {
			mutex_exit(&cpu_lock);
			return (rc);
		}
		brought_up_boot = 1;
	}

	cp = cpu_list;
	do {
		if (cp == bootcpu)
			continue;
		if (cp->cpu_flags & CPU_OFFLINE)
			continue;
		if ((rc = cpr_p_online(cp, CPU_CPR_OFFLINE))) {
			mutex_exit(&cpu_lock);
			return (rc);
		}
	} while ((cp = cp->cpu_next) != cpu_list);
	if (brought_up_boot && (cpr_debug & (CPR_DEBUG1 | CPR_DEBUG6)))
		prom_printf("changed cpu %p to state %d\n",
		    (void *)bootcpu, CPU_CPR_ONLINE);
	mutex_exit(&cpu_lock);

	return (rc);
}
Пример #8
0
/*ARGSUSED*/
void
mdboot(int cmd, int fcn, char *mdep, boolean_t invoke_cb)
{
	processorid_t bootcpuid = 0;
	static int is_first_quiesce = 1;
	static int is_first_reset = 1;
	int reset_status = 0;
	static char fallback_str[] = "Falling back to regular reboot.\n";

	if (fcn == AD_FASTREBOOT && !newkernel.fi_valid)
		fcn = AD_BOOT;

	if (!panicstr) {
		kpreempt_disable();
		if (fcn == AD_FASTREBOOT) {
			mutex_enter(&cpu_lock);
			if (CPU_ACTIVE(cpu_get(bootcpuid))) {
				affinity_set(bootcpuid);
			}
			mutex_exit(&cpu_lock);
		} else {
			affinity_set(CPU_CURRENT);
		}
	}

	if (force_shutdown_method != AD_UNKNOWN)
		fcn = force_shutdown_method;

	/*
	 * XXX - rconsvp is set to NULL to ensure that output messages
	 * are sent to the underlying "hardware" device using the
	 * monitor's printf routine since we are in the process of
	 * either rebooting or halting the machine.
	 */
	rconsvp = NULL;

	/*
	 * Print the reboot message now, before pausing other cpus.
	 * There is a race condition in the printing support that
	 * can deadlock multiprocessor machines.
	 */
	if (!(fcn == AD_HALT || fcn == AD_POWEROFF))
		prom_printf("rebooting...\n");

	if (IN_XPV_PANIC())
		reset();

	/*
	 * We can't bring up the console from above lock level, so do it now
	 */
	pm_cfb_check_and_powerup();

	/* make sure there are no more changes to the device tree */
	devtree_freeze();

	if (invoke_cb)
		(void) callb_execute_class(CB_CL_MDBOOT, NULL);

	/*
	 * Clear any unresolved UEs from memory.
	 */
	page_retire_mdboot();

#if defined(__xpv)
	/*
	 * XXPV	Should probably think some more about how we deal
	 *	with panicing before it's really safe to panic.
	 *	On hypervisors, we reboot very quickly..  Perhaps panic
	 *	should only attempt to recover by rebooting if,
	 *	say, we were able to mount the root filesystem,
	 *	or if we successfully launched init(1m).
	 */
	if (panicstr && proc_init == NULL)
		(void) HYPERVISOR_shutdown(SHUTDOWN_poweroff);
#endif
	/*
	 * stop other cpus and raise our priority.  since there is only
	 * one active cpu after this, and our priority will be too high
	 * for us to be preempted, we're essentially single threaded
	 * from here on out.
	 */
	(void) spl6();
	if (!panicstr) {
		mutex_enter(&cpu_lock);
		pause_cpus(NULL, NULL);
		mutex_exit(&cpu_lock);
	}

	/*
	 * If the system is panicking, the preloaded kernel is valid, and
	 * fastreboot_onpanic has been set, and the system has been up for
	 * longer than fastreboot_onpanic_uptime (default to 10 minutes),
	 * choose Fast Reboot.
	 */
	if (fcn == AD_BOOT && panicstr && newkernel.fi_valid &&
	    fastreboot_onpanic &&
	    (panic_lbolt - lbolt_at_boot) > fastreboot_onpanic_uptime) {
		fcn = AD_FASTREBOOT;
	}

	/*
	 * Try to quiesce devices.
	 */
	if (is_first_quiesce) {
		/*
		 * Clear is_first_quiesce before calling quiesce_devices()
		 * so that if quiesce_devices() causes panics, it will not
		 * be invoked again.
		 */
		is_first_quiesce = 0;

		quiesce_active = 1;
		quiesce_devices(ddi_root_node(), &reset_status);
		if (reset_status == -1) {
			if (fcn == AD_FASTREBOOT && !force_fastreboot) {
				prom_printf("Driver(s) not capable of fast "
				    "reboot.\n");
				prom_printf(fallback_str);
				fastreboot_capable = 0;
				fcn = AD_BOOT;
			} else if (fcn != AD_FASTREBOOT)
				fastreboot_capable = 0;
		}
		quiesce_active = 0;
	}

	/*
	 * Try to reset devices. reset_leaves() should only be called
	 * a) when there are no other threads that could be accessing devices,
	 *    and
	 * b) on a system that's not capable of fast reboot (fastreboot_capable
	 *    being 0), or on a system where quiesce_devices() failed to
	 *    complete (quiesce_active being 1).
	 */
	if (is_first_reset && (!fastreboot_capable || quiesce_active)) {
		/*
		 * Clear is_first_reset before calling reset_devices()
		 * so that if reset_devices() causes panics, it will not
		 * be invoked again.
		 */
		is_first_reset = 0;
		reset_leaves();
	}

	/* Verify newkernel checksum */
	if (fastreboot_capable && fcn == AD_FASTREBOOT &&
	    fastboot_cksum_verify(&newkernel) != 0) {
		fastreboot_capable = 0;
		prom_printf("Fast reboot: checksum failed for the new "
		    "kernel.\n");
		prom_printf(fallback_str);
	}

	(void) spl8();

	if (fastreboot_capable && fcn == AD_FASTREBOOT) {
		/*
		 * psm_shutdown is called within fast_reboot()
		 */
		fast_reboot();
	} else {
		(*psm_shutdownf)(cmd, fcn);

		if (fcn == AD_HALT || fcn == AD_POWEROFF)
			halt((char *)NULL);
		else
			prom_reboot("");
	}
	/*NOTREACHED*/
}
Пример #9
0
/*
 * Class callback when a CPU goes inactive (offline)
 *
 * This is called in a context where CPUs are paused
 */
static void
pg_cmt_cpu_inactive(cpu_t *cp)
{
	int		err;
	group_t		*pgs;
	pg_cmt_t	*pg;
	cpu_t		*cpp;
	group_iter_t	i;
	pg_cpu_itr_t	cpu_itr;
	boolean_t	found;

	ASSERT(MUTEX_HELD(&cpu_lock));

	if (cmt_sched_disabled)
		return;

	pgs = &cp->cpu_pg->pgs;
	group_iter_init(&i);

	while ((pg = group_iterate(pgs, &i)) != NULL) {

		if (IS_CMT_PG(pg) == 0)
			continue;

		/*
		 * Remove the CPU from the CMT PGs active CPU group
		 * bitmap
		 */
		err = group_remove(&pg->cmt_cpus_actv, cp, GRP_NORESIZE);
		ASSERT(err == 0);

		bitset_del(&pg->cmt_cpus_actv_set, cp->cpu_seqid);

		/*
		 * If there are no more active CPUs in this PG over which
		 * load was balanced, remove it as a balancing candidate.
		 */
		if (GROUP_SIZE(&pg->cmt_cpus_actv) == 0 &&
		    (pg->cmt_policy & (CMT_BALANCE | CMT_COALESCE))) {
			err = group_remove(pg->cmt_siblings, pg, GRP_NORESIZE);
			ASSERT(err == 0);

			if (pg->cmt_parent == NULL &&
			    pg->cmt_siblings != &cmt_root->cl_pgs) {
				err = group_remove(&cmt_root->cl_pgs, pg,
				    GRP_NORESIZE);
				ASSERT(err == 0);
			}
		}

		/*
		 * Assert the number of active CPUs does not exceed
		 * the total number of CPUs in the PG
		 */
		ASSERT(GROUP_SIZE(&pg->cmt_cpus_actv) <=
		    GROUP_SIZE(&((pg_t *)pg)->pg_cpus));

		/*
		 * Update the PG bitset in the CPU's old partition
		 */
		found = B_FALSE;
		PG_CPU_ITR_INIT(pg, cpu_itr);
		while ((cpp = pg_cpu_next(&cpu_itr)) != NULL) {
			if (cpp == cp)
				continue;
			if (CPU_ACTIVE(cpp) &&
			    cpp->cpu_part->cp_id == cp->cpu_part->cp_id) {
				found = B_TRUE;
				break;
			}
		}
		if (!found) {
			bitset_del(&cp->cpu_part->cp_cmt_pgs,
			    ((pg_t *)pg)->pg_id);
		}
	}
}
Пример #10
0
static int
acquire_psets(struct snapshot *ss)
{
	psetid_t *pids = NULL;
	struct pset_snapshot *ps;
	size_t pids_nr;
	size_t i, j;

	/*
	 * Careful in this code. We have to use pset_list
	 * twice, but inbetween pids_nr can change at will.
	 * We delay the setting of s_nr_psets until we have
	 * the "final" value of pids_nr.
	 */

	if (pset_list(NULL, &pids_nr) < 0)
		return (errno);

	if ((pids = calloc(pids_nr, sizeof (psetid_t))) == NULL)
		goto out;

	if (pset_list(pids, &pids_nr) < 0)
		goto out;

	ss->s_psets = calloc(pids_nr + 1, sizeof (struct pset_snapshot));
	if (ss->s_psets == NULL)
		goto out;
	ss->s_nr_psets = pids_nr + 1;

	/* CPUs not in any actual pset */
	ps = &ss->s_psets[0];
	ps->ps_id = 0;
	ps->ps_cpus = calloc(ss->s_nr_cpus, sizeof (struct cpu_snapshot *));
	if (ps->ps_cpus == NULL)
		goto out;

	/* CPUs in a a pset */
	for (i = 1; i < ss->s_nr_psets; i++) {
		ps = &ss->s_psets[i];

		ps->ps_id = pids[i - 1];
		ps->ps_cpus =
		    calloc(ss->s_nr_cpus, sizeof (struct cpu_snapshot *));
		if (ps->ps_cpus == NULL)
			goto out;
	}

	for (i = 0; i < ss->s_nr_psets; i++) {
		ps = &ss->s_psets[i];

		for (j = 0; j < ss->s_nr_cpus; j++) {
			if (!CPU_ACTIVE(&ss->s_cpus[j]))
				continue;
			if (ss->s_cpus[j].cs_pset_id != ps->ps_id)
				continue;

			ps->ps_cpus[ps->ps_nr_cpus++] = &ss->s_cpus[j];
		}
	}

	errno = 0;
out:
	free(pids);
	return (errno);
}