Exemple #1
0
static int
cms_search_list_cb(cmi_hdl_t whdl, void *arg1, void *arg2, void *arg3)
{
	cmi_hdl_t thdl = (cmi_hdl_t)arg1;
	int match = *((int *)arg2);
	cmi_hdl_t *rsltp = (cmi_hdl_t *)arg3;

	if (cms_cpu_match(thdl, whdl, match)) {
		cmi_hdl_hold(whdl);	/* short-term hold */
		*rsltp = whdl;
		return (CMI_HDL_WALK_DONE);
	} else {
		return (CMI_HDL_WALK_NEXT);
	}
}
Exemple #2
0
/*
 * cms_init entry point.
 *
 * This module provides broad model-specific support for AMD families
 * 0x6, 0xf and 0x10.  Future families will have to be evaluated once their
 * documentation is available.
 */
int
authamd_init(cmi_hdl_t hdl, void **datap)
{
	uint_t chipid = cmi_hdl_chipid(hdl);
	uint_t procnodeid = cmi_hdl_procnodeid(hdl);
	struct authamd_nodeshared *sp, *osp;
	uint_t family = cmi_hdl_family(hdl);
	uint32_t rev = cmi_hdl_chiprev(hdl);
	authamd_data_t *authamd;
	uint64_t cap;

	if (authamd_ms_support_disable ||
	    !authamd_supported(hdl))
		return (ENOTSUP);

	if (!is_x86_feature(x86_featureset, X86FSET_MCA))
		return (ENOTSUP);

	if (cmi_hdl_rdmsr(hdl, IA32_MSR_MCG_CAP, &cap) != CMI_SUCCESS)
		return (ENOTSUP);

	if (!(cap & MCG_CAP_CTL_P))
		return (ENOTSUP);

	authamd = *datap = kmem_zalloc(sizeof (authamd_data_t), KM_SLEEP);
	cmi_hdl_hold(hdl);	/* release in fini */
	authamd->amd_hdl = hdl;

	if ((sp = authamd_shared[procnodeid]) == NULL) {
		sp = kmem_zalloc(sizeof (struct authamd_nodeshared), KM_SLEEP);
		sp->ans_chipid = chipid;
		sp->ans_procnodeid = procnodeid;
		sp->ans_family = family;
		sp->ans_rev = rev;
		membar_producer();

		osp = atomic_cas_ptr(&authamd_shared[procnodeid], NULL, sp);
		if (osp != NULL) {
			kmem_free(sp, sizeof (struct authamd_nodeshared));
			sp = osp;
		}
	}
	authamd->amd_shared = sp;

	return (0);
}
/*
 * Our cmi_init entry point, called during startup of each cpu instance.
 */
int
gcpu_init(cmi_hdl_t hdl, void **datap)
{
	uint_t chipid = cmi_hdl_chipid(hdl);
	struct gcpu_chipshared *sp, *osp;
	gcpu_data_t *gcpu;

	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
		return (ENOTSUP);

	/*
	 * Allocate the state structure for this cpu.  We will only
	 * allocate the bank logout areas in gcpu_mca_init once we
	 * know how many banks there are.
	 */
	gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP);
	cmi_hdl_hold(hdl);	/* release in gcpu_fini */
	gcpu->gcpu_hdl = hdl;

	/*
	 * Allocate a chipshared structure if no sibling cpu has already
	 * allocated it, but allow for the fact that a sibling core may
	 * be starting up in parallel.
	 */
	if ((sp = gcpu_shared[chipid]) == NULL) {
		sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP);
		osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp);
		if (osp == NULL) {
			mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER,
			    NULL);
			mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER,
			    NULL);
		} else {
			kmem_free(sp, sizeof (struct gcpu_chipshared));
			sp = osp;
		}
	}
	gcpu->gcpu_shared = sp;

	return (0);
}