/* called from the CRYPTO_UNLOAD_SOFT_MODULE ioctl */ int crypto_unload_soft_module(caddr_t name) { int error; modid_t id; kcf_provider_desc_t *provider; struct modctl *mcp; /* verify that 'name' refers to a registered crypto provider */ if ((provider = kcf_prov_tab_lookup_by_name(name)) == NULL) return (CRYPTO_UNKNOWN_PROVIDER); /* * We save the module id and release the reference. We need to * do this as modunload() calls unregister which waits for the * refcnt to drop to zero. */ id = provider->pd_module_id; KCF_PROV_REFRELE(provider); if ((mcp = mod_hold_by_name(name)) != NULL) { mcp->mod_loadflags &= ~(MOD_NOAUTOUNLOAD); mod_release_mod(mcp); } if ((error = modunload(id)) != 0) { return (error == EBUSY ? CRYPTO_BUSY : CRYPTO_FAILED); } return (CRYPTO_SUCCESS); }
/* * Load the generic IA32 MCA cpu module, which may still supplement * itself with model-specific support through cpu model-specific modules. */ static cmi_t * cmi_load_generic(cmi_hdl_t hdl, void **datap) { modctl_t *modp; cmi_t *cmi; int modid; int err; ASSERT(MUTEX_HELD(&cmi_load_lock)); if ((modid = modload(CPUMOD_SUBDIR, CPUMOD_PREFIX ".generic")) == -1) return (NULL); modp = mod_hold_by_id(modid); cmi = cmi_load_modctl(modp); if (cmi) cmi_hold(cmi); mod_release_mod(modp); if (cmi == NULL) return (NULL); if ((err = cmi->cmi_ops->cmi_init(hdl, datap)) != 0) { if (err != ENOTSUP) cmn_err(CE_WARN, CPUMOD_PREFIX ".generic failed to " "init: err=%d", err); cmi_rele(cmi); return (NULL); } return (cmi); }
static uintptr_t kctl_lookup_by_name(char *modname, char *symname) { struct modctl *mctl; Sym *ksym; uintptr_t addr; if ((mctl = mod_hold_by_name(modname)) == NULL) return (0); if ((ksym = kobj_lookup_all(mctl->mod_mp, symname, 1)) == NULL) { mod_release_mod(mctl); return (0); } addr = ksym->st_value; mod_release_mod(mctl); return (addr); }
/** * Retains a kernel module and opens the CTF data associated with it. * * @param pszModule The name of the module to open. * @param ppMod Where to store the module handle. * @param ppCTF Where to store the module's CTF handle. * * @return IPRT status code. */ static int rtR0DbgKrnlInfoModRetain(char *pszModule, modctl_t **ppMod, ctf_file_t **ppCTF) { AssertPtrReturn(pszModule, VERR_INVALID_PARAMETER); AssertPtrReturn(ppMod, VERR_INVALID_PARAMETER); AssertPtrReturn(ppCTF, VERR_INVALID_PARAMETER); int rc = VINF_SUCCESS; modid_t ModId = mod_name_to_modid(pszModule); if (ModId != -1) { *ppMod = mod_hold_by_id(ModId); if (*ppMod) { /* * Hold mod_lock as ctf_modopen may update the module with uncompressed CTF data. */ int err; mutex_enter(&mod_lock); *ppCTF = ctf_modopen(((modctl_t *)*ppMod)->mod_mp, &err); mutex_exit(&mod_lock); mod_release_mod(*ppMod); if (*ppCTF) return VINF_SUCCESS; else { LogRel(("rtR0DbgKrnlInfoModRetain: ctf_modopen failed for '%s' err=%d\n", pszModule, err)); rc = VERR_INTERNAL_ERROR_3; } } else { LogRel(("rtR0DbgKrnlInfoModRetain: mod_hold_by_id failed for '%s'\n", pszModule)); rc = VERR_INTERNAL_ERROR_2; } } else { LogRel(("rtR0DbgKrnlInfoModRetain: mod_name_to_modid failed for '%s'\n", pszModule)); rc = VERR_INTERNAL_ERROR; } return rc; }
/* * Try to find or load a module that offers model-specific support for * this vendor/family/model/stepping combination. When attempting to load * a module we look in CPUMOD_MS_SUBDIR first for a match on * vendor/family/model/stepping, then on vendor/family/model (ignoring * stepping), then on vendor/family (ignoring model and stepping), then * on vendor alone. */ static cms_t * cms_load_module(cmi_hdl_t hdl, int match, int *chosenp) { modctl_t *modp; cms_t *cms; int modid; uint_t s[3]; ASSERT(MUTEX_HELD(&cms_load_lock)); ASSERT(match == CMS_MATCH_STEPPING || match == CMS_MATCH_MODEL || match == CMS_MATCH_FAMILY || match == CMS_MATCH_VENDOR); s[0] = cmi_hdl_family(hdl); s[1] = cmi_hdl_model(hdl); s[2] = cmi_hdl_stepping(hdl); /* * Have we already loaded a module for a cpu with the same * vendor/family/model/stepping? */ if ((cms = cms_search_list(hdl, match)) != NULL) { cms_hold(cms); return (cms); } modid = modload_qualified(CPUMOD_MS_SUBDIR, CPUMOD_MS_PREFIX, cmi_hdl_vendorstr(hdl), ".", s, match, chosenp); if (modid == -1) return (NULL); modp = mod_hold_by_id(modid); cms = cms_load_modctl(modp); if (cms) cms_hold(cms); mod_release_mod(modp); return (cms); }
/* * Called from CRYPTO_LOAD_SOFT_DISABLED ioctl. * If new_count is 0, then completely remove the entry. */ int crypto_load_soft_disabled(char *name, uint_t new_count, crypto_mech_name_t *new_array) { kcf_provider_desc_t *provider = NULL; crypto_mech_name_t *prev_array; uint_t prev_count = 0; int rv; provider = kcf_prov_tab_lookup_by_name(name); if (provider != NULL) { mutex_enter(&provider->pd_lock); /* * Check if any other thread is disabling or removing * this provider. We return if this is the case. */ if (provider->pd_state >= KCF_PROV_DISABLED) { mutex_exit(&provider->pd_lock); KCF_PROV_REFRELE(provider); return (CRYPTO_BUSY); } provider->pd_state = KCF_PROV_DISABLED; mutex_exit(&provider->pd_lock); undo_register_provider(provider, B_TRUE); KCF_PROV_REFRELE(provider); if (provider->pd_kstat != NULL) KCF_PROV_REFRELE(provider); mutex_enter(&provider->pd_lock); /* Wait till the existing requests complete. */ while (provider->pd_state != KCF_PROV_FREED) { cv_wait(&provider->pd_remove_cv, &provider->pd_lock); } mutex_exit(&provider->pd_lock); } if (new_count == 0) { kcf_policy_remove_by_name(name, &prev_count, &prev_array); crypto_free_mech_list(prev_array, prev_count); rv = CRYPTO_SUCCESS; goto out; } /* put disabled mechanisms into policy table */ if ((rv = kcf_policy_load_soft_disabled(name, new_count, new_array, &prev_count, &prev_array)) == CRYPTO_SUCCESS) { crypto_free_mech_list(prev_array, prev_count); } out: if (provider != NULL) { redo_register_provider(provider); if (provider->pd_kstat != NULL) KCF_PROV_REFHOLD(provider); mutex_enter(&provider->pd_lock); provider->pd_state = KCF_PROV_READY; mutex_exit(&provider->pd_lock); } else if (rv == CRYPTO_SUCCESS) { /* * There are some cases where it is useful to kCF clients * to have a provider whose mechanism is enabled now to be * available. So, we attempt to load it here. * * The check, new_count < prev_count, ensures that we do this * only in the case where a mechanism(s) is now enabled. * This check assumes that enable and disable are separate * administrative actions and are not done in a single action. */ if (new_count < prev_count && (in_soft_config_list(name)) && (modload("crypto", name) != -1)) { struct modctl *mcp; boolean_t load_again = B_FALSE; if ((mcp = mod_hold_by_name(name)) != NULL) { mcp->mod_loadflags |= MOD_NOAUTOUNLOAD; /* memory pressure may have unloaded module */ if (!mcp->mod_installed) load_again = B_TRUE; mod_release_mod(mcp); if (load_again) (void) modload("crypto", name); } } } return (rv); }
int VBOXCALL supdrvOSLdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, const char *pszFilename) { pImage->idSolMod = -1; pImage->pSolModCtl = NULL; # if 1 /* This approach requires _init/_fini/_info stubs. */ /* * Construct a filename that escapes the module search path and let us * specify a root path. */ /** @todo change this to use modctl and use_path=0. */ const char *pszName = RTPathFilename(pszFilename); AssertReturn(pszName, VERR_INVALID_PARAMETER); char *pszSubDir = RTStrAPrintf2("../../../../../../../../../../..%.*s", pszName - pszFilename - 1, pszFilename); if (!pszSubDir) return VERR_NO_STR_MEMORY; int idMod = modload(pszSubDir, pszName); if (idMod == -1) { /* This is an horrible hack for avoiding the mod-present check in modrload on S10. Fortunately, nobody else seems to be using that variable... */ extern int swaploaded; int saved_swaploaded = swaploaded; swaploaded = 0; idMod = modload(pszSubDir, pszName); swaploaded = saved_swaploaded; } RTStrFree(pszSubDir); if (idMod == -1) { LogRel(("modload(,%s): failed, could be anything...\n", pszFilename)); return VERR_LDR_GENERAL_FAILURE; } modctl_t *pModCtl = mod_hold_by_id(idMod); if (!pModCtl) { LogRel(("mod_hold_by_id(,%s): failed, weird.\n", pszFilename)); /* No point in calling modunload. */ return VERR_LDR_GENERAL_FAILURE; } pModCtl->mod_loadflags |= MOD_NOAUTOUNLOAD | MOD_NOUNLOAD; /* paranoia */ # else const int idMod = -1; modctl_t *pModCtl = mod_hold_by_name(pszFilename); if (!pModCtl) { LogRel(("mod_hold_by_name failed for '%s'\n", pszFilename)); return VERR_LDR_GENERAL_FAILURE; } int rc = kobj_load_module(pModCtl, 0 /*use_path*/); if (rc != 0) { LogRel(("kobj_load_module failed with rc=%d for '%s'\n", rc, pszFilename)); mod_release_mod(pModCtl); return RTErrConvertFromErrno(rc); } # endif /* * Get the module info. * * Note! The text section is actually not at mi_base, but and the next * alignment boundrary and there seems to be no easy way of * getting at this address. This sabotages supdrvOSLdrLoad. * Bastards! */ struct modinfo ModInfo; kobj_getmodinfo(pModCtl->mod_mp, &ModInfo); pImage->pvImage = ModInfo.mi_base; pImage->idSolMod = idMod; pImage->pSolModCtl = pModCtl; mod_release_mod(pImage->pSolModCtl); LogRel(("supdrvOSLdrOpen: succeeded for '%s' (mi_base=%p mi_size=%#x), id=%d ctl=%p\n", pszFilename, ModInfo.mi_base, ModInfo.mi_size, idMod, pModCtl)); return VINF_SUCCESS; }
int VBOXCALL supdrvOSLdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, const uint8_t *pbImageBits, PSUPLDRLOAD pReq) { #if 0 /* This doesn't work because of text alignment. */ /* * Comparing is very very difficult since text and data may be allocated * separately. */ size_t cbCompare = RT_MIN(pImage->cbImageBits, 64); if (memcmp(pImage->pvImage, pbImageBits, cbCompare)) { LogRel(("Image mismatch: %s (%p)\n", pImage->szName, pImage->pvImage)); LogRel(("Native: %.*Rhxs\n", cbCompare, pImage->pvImage)); LogRel(("SUPLib: %.*Rhxs\n", cbCompare, pbImageBits)); return VERR_LDR_MISMATCH_NATIVE; } #endif /* * Get the exported symbol addresses. */ int rc; modctl_t *pModCtl = mod_hold_by_id(pImage->idSolMod); if (pModCtl && pModCtl == pImage->pSolModCtl) { uint32_t iSym = pImage->cSymbols; while (iSym-- > 0) { const char *pszSymbol = &pImage->pachStrTab[pImage->paSymbols[iSym].offName]; uintptr_t uValue = modlookup_by_modctl(pImage->pSolModCtl, pszSymbol); if (!uValue) { LogRel(("supdrvOSLdrLoad on %s failed to resolve the exported symbol: '%s'\n", pImage->szName, pszSymbol)); break; } uintptr_t offSymbol = uValue - (uintptr_t)pImage->pvImage; pImage->paSymbols[iSym].offSymbol = offSymbol; if (pImage->paSymbols[iSym].offSymbol != (int32_t)offSymbol) { LogRel(("supdrvOSLdrLoad on %s symbol out of range: %p (%s) \n", pImage->szName, offSymbol, pszSymbol)); break; } } rc = iSym == UINT32_MAX ? VINF_SUCCESS : VERR_LDR_GENERAL_FAILURE; /* * Get the standard module entry points. */ if (RT_SUCCESS(rc)) { rc = supdrvSolLdrResolvEp(pImage, "ModuleInit", (void **)&pImage->pfnModuleInit); if (RT_SUCCESS(rc)) rc = supdrvSolLdrResolvEp(pImage, "ModuleTerm", (void **)&pImage->pfnModuleTerm); switch (pReq->u.In.eEPType) { case SUPLDRLOADEP_VMMR0: { if (RT_SUCCESS(rc)) rc = supdrvSolLdrResolvEp(pImage, "VMMR0EntryInt", (void **)&pReq->u.In.EP.VMMR0.pvVMMR0EntryInt); if (RT_SUCCESS(rc)) rc = supdrvSolLdrResolvEp(pImage, "VMMR0EntryFast", (void **)&pReq->u.In.EP.VMMR0.pvVMMR0EntryFast); if (RT_SUCCESS(rc)) rc = supdrvSolLdrResolvEp(pImage, "VMMR0EntryEx", (void **)&pReq->u.In.EP.VMMR0.pvVMMR0EntryEx); break; } case SUPLDRLOADEP_SERVICE: { /** @todo we need the name of the entry point. */ return VERR_NOT_SUPPORTED; } } } mod_release_mod(pImage->pSolModCtl); } else { LogRel(("mod_hold_by_id failed in supdrvOSLdrLoad on %s: %p\n", pImage->szName, pModCtl)); rc = VERR_LDR_MISMATCH_NATIVE; } return rc; }