static cms_t * cms_load_specific(cmi_hdl_t hdl, void **datap) { cms_t *cms; int err; int i; ASSERT(MUTEX_HELD(&cms_load_lock)); for (i = CMS_MATCH_STEPPING; i >= CMS_MATCH_VENDOR; i--) { int suffixlevel; if ((cms = cms_load_module(hdl, i, &suffixlevel)) == NULL) return (NULL); /* * A module has loaded and has a _cms_ops structure, and the * module has been held for this instance. Call the cms_init * entry point - we expect success (0) or ENOTSUP. */ if ((err = cms->cms_ops->cms_init(hdl, datap)) == 0) { if (boothowto & RB_VERBOSE) { printf("initialized model-specific " "module '%s' on chip %d core %d " "strand %d\n", cms->cms_modp->mod_modname, cmi_hdl_chipid(hdl), cmi_hdl_coreid(hdl), cmi_hdl_strandid(hdl)); } return (cms); } else if (err != ENOTSUP) { cmn_err(CE_WARN, "failed to init model-specific " "module '%s' on chip %d core %d strand %d: err=%d", cms->cms_modp->mod_modname, cmi_hdl_chipid(hdl), cmi_hdl_coreid(hdl), cmi_hdl_strandid(hdl), err); } /* * The module failed or declined to init, so release * it and potentially change i to be equal to the number * of suffices actually used in the last module path. */ cms_rele(cms); i = suffixlevel; } return (NULL); }
/* * cms_init entry point. * * This module provides broad model-specific support for AMD families * 0x6, 0xf and 0x10. Future families will have to be evaluated once their * documentation is available. */ int authamd_init(cmi_hdl_t hdl, void **datap) { uint_t chipid = cmi_hdl_chipid(hdl); uint_t procnodeid = cmi_hdl_procnodeid(hdl); struct authamd_nodeshared *sp, *osp; uint_t family = cmi_hdl_family(hdl); uint32_t rev = cmi_hdl_chiprev(hdl); authamd_data_t *authamd; uint64_t cap; if (authamd_ms_support_disable || !authamd_supported(hdl)) return (ENOTSUP); if (!is_x86_feature(x86_featureset, X86FSET_MCA)) return (ENOTSUP); if (cmi_hdl_rdmsr(hdl, IA32_MSR_MCG_CAP, &cap) != CMI_SUCCESS) return (ENOTSUP); if (!(cap & MCG_CAP_CTL_P)) return (ENOTSUP); authamd = *datap = kmem_zalloc(sizeof (authamd_data_t), KM_SLEEP); cmi_hdl_hold(hdl); /* release in fini */ authamd->amd_hdl = hdl; if ((sp = authamd_shared[procnodeid]) == NULL) { sp = kmem_zalloc(sizeof (struct authamd_nodeshared), KM_SLEEP); sp->ans_chipid = chipid; sp->ans_procnodeid = procnodeid; sp->ans_family = family; sp->ans_rev = rev; membar_producer(); osp = atomic_cas_ptr(&authamd_shared[procnodeid], NULL, sp); if (osp != NULL) { kmem_free(sp, sizeof (struct authamd_nodeshared)); sp = osp; } } authamd->amd_shared = sp; return (0); }
/* * Our cmi_init entry point, called during startup of each cpu instance. */ int gcpu_init(cmi_hdl_t hdl, void **datap) { uint_t chipid = cmi_hdl_chipid(hdl); struct gcpu_chipshared *sp, *osp; gcpu_data_t *gcpu; if (gcpu_disable || chipid >= GCPU_MAX_CHIPID) return (ENOTSUP); /* * Allocate the state structure for this cpu. We will only * allocate the bank logout areas in gcpu_mca_init once we * know how many banks there are. */ gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP); cmi_hdl_hold(hdl); /* release in gcpu_fini */ gcpu->gcpu_hdl = hdl; /* * Allocate a chipshared structure if no sibling cpu has already * allocated it, but allow for the fact that a sibling core may * be starting up in parallel. */ if ((sp = gcpu_shared[chipid]) == NULL) { sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP); osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp); if (osp == NULL) { mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER, NULL); mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER, NULL); } else { kmem_free(sp, sizeof (struct gcpu_chipshared)); sp = osp; } } gcpu->gcpu_shared = sp; return (0); }