Example #1
0
int
_init(void)
{
	int retval;
	extern int install_callbacks(void);
	extern void remove_callbacks(void);

	if (install_callbacks() != 0)
		return (ENXIO);

	if (boothowto & RB_HALT)
		debug_enter("obpsym: halt flag (-h) is set.\n");

	retval = mod_install(&modlinkage);

	/*
	 * if load fails remove callback and unlock symbols
	 */
	if (retval) {
		printf("obpsym: Error %d installing OBP syms module\n", retval);
		remove_callbacks();
	}
	else
		(void) callb_add(reset_callbacks, 0, CB_CL_CPR_OBP, "obpsym");

	return (retval);
}
Example #2
0
/*
 * Initialize all callout tables.  Called at boot time just before clkstart().
 */
void
callout_init(void)
{
	int f, t;
	int table_id;
	callout_table_t *ct;

	callout_fanout = MIN(CALLOUT_FANOUT, max_ncpus);

	for (t = 0; t < CALLOUT_NTYPES; t++) {
		for (f = 0; f < CALLOUT_FANOUT; f++) {
			table_id = CALLOUT_TABLE(t, f);
			if (f >= callout_fanout) {
				callout_table[table_id] =
				    callout_table[table_id - callout_fanout];
				continue;
			}
			ct = kmem_zalloc(sizeof (callout_table_t), KM_SLEEP);
			callout_table[table_id] = ct;
			ct->ct_short_id = (callout_id_t)table_id |
			    CALLOUT_COUNTER_HIGH;
			ct->ct_long_id = ct->ct_short_id | CALLOUT_LONGTERM;
			ct->ct_curtime = ct->ct_runtime = lbolt;
			if (t == CALLOUT_NORMAL) {
				/*
				 * Each callout thread consumes exactly one
				 * task structure while active.  Therefore,
				 * prepopulating with 2 * CALLOUT_THREADS tasks
				 * ensures that there's at least one task per
				 * thread that's either scheduled or on the
				 * freelist.  In turn, this guarantees that
				 * taskq_dispatch() will always either succeed
				 * (because there's a free task structure) or
				 * be unnecessary (because "callout_excute(ct)"
				 * has already scheduled).
				 */
				ct->ct_taskq =
				    taskq_create_instance("callout_taskq", f,
				    CALLOUT_THREADS, maxclsyspri,
				    2 * CALLOUT_THREADS, 2 * CALLOUT_THREADS,
				    TASKQ_PREPOPULATE | TASKQ_CPR_SAFE);
			}
		}
	}
	(void) callb_add(callout_cpr_callb, 0, CB_CL_CPR_CALLOUT, "callout");
}
/*
 * Validate that this processor supports deep cstate and if so,
 * get the c-state data from ACPI and cache it.
 */
static int
cpu_idle_init(cpu_t *cp)
{
	cpupm_mach_state_t *mach_state =
	    (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
	cpu_acpi_cstate_t *cstate;
	char name[KSTAT_STRLEN];
	int cpu_max_cstates, i;
	int ret;

	/*
	 * Cache the C-state specific ACPI data.
	 */
	if ((ret = cpu_acpi_cache_cstate_data(handle)) != 0) {
		if (ret < 0)
			cmn_err(CE_NOTE,
			    "!Support for CPU deep idle states is being "
			    "disabled due to errors parsing ACPI C-state "
			    "objects exported by BIOS.");
		cpu_idle_fini(cp);
		return (-1);
	}

	cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);

	cpu_max_cstates = cpu_acpi_get_max_cstates(handle);

	for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) {
		(void) snprintf(name, KSTAT_STRLEN - 1, "c%d", cstate->cs_type);
		/*
		 * Allocate, initialize and install cstate kstat
		 */
		cstate->cs_ksp = kstat_create("cstate", CPU->cpu_id,
		    name, "misc",
		    KSTAT_TYPE_NAMED,
		    sizeof (cpu_idle_kstat) / sizeof (kstat_named_t),
		    KSTAT_FLAG_VIRTUAL);

		if (cstate->cs_ksp == NULL) {
			cmn_err(CE_NOTE, "kstat_create(c_state) fail");
		} else {
			cstate->cs_ksp->ks_data = &cpu_idle_kstat;
			cstate->cs_ksp->ks_lock = &cpu_idle_mutex;
			cstate->cs_ksp->ks_update = cpu_idle_kstat_update;
			cstate->cs_ksp->ks_data_size += MAXNAMELEN;
			cstate->cs_ksp->ks_private = cstate;
			kstat_install(cstate->cs_ksp);
			cstate++;
		}
	}

	cpupm_alloc_domains(cp, CPUPM_C_STATES);
	cpupm_alloc_ms_cstate(cp);

	if (cpu_deep_cstates_supported()) {
		uint32_t value;

		mutex_enter(&cpu_idle_callb_mutex);
		if (cpu_deep_idle_callb_id == (callb_id_t)0)
			cpu_deep_idle_callb_id = callb_add(&cpu_deep_idle_callb,
			    (void *)NULL, CB_CL_CPU_DEEP_IDLE, "cpu_deep_idle");
		if (cpu_idle_cpr_callb_id == (callb_id_t)0)
			cpu_idle_cpr_callb_id = callb_add(&cpu_idle_cpr_callb,
			    (void *)NULL, CB_CL_CPR_PM, "cpu_idle_cpr");
		mutex_exit(&cpu_idle_callb_mutex);


		/*
		 * All supported CPUs (Nehalem and later) will remain in C3
		 * during Bus Master activity.
		 * All CPUs set ACPI_BITREG_BUS_MASTER_RLD to 0 here if it
		 * is not already 0 before enabling Deeper C-states.
		 */
		cpu_acpi_get_register(ACPI_BITREG_BUS_MASTER_RLD, &value);
		if (value & 1)
			cpu_acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
	}

	return (0);
}