int fmd_assert(const char *expr, const char *file, int line) { fmd_panic("\"%s\", line %d: assertion failed: %s\n", file, line, expr); /*NOTREACHED*/ return (0); }
static void * fmd_thread_start(void *arg) { fmd_thread_t *tp = arg; if (pthread_setspecific(fmd.d_key, tp) != 0) fmd_panic("failed to initialize thread key to %p", arg); if (!tp->thr_isdoor) { (void) pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); (void) pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); } tp->thr_func(tp->thr_arg); return (NULL); }
void fmd_rpc_init(void) { int err, prog, mode = RPC_SVC_MT_USER; uint64_t sndsize = 0, rcvsize = 0; const char *s; if (rpc_control(RPC_SVC_MTMODE_SET, &mode) == FALSE) fmd_panic("failed to enable user-MT rpc mode"); (void) fmd_conf_getprop(fmd.d_conf, "rpc.sndsize", &sndsize); (void) fmd_conf_getprop(fmd.d_conf, "rpc.rcvsize", &rcvsize); /* * Infer whether we are the "default" fault manager or an alternate one * based on whether the initial setting of rpc.adm.prog is non-zero. */ (void) fmd_conf_getprop(fmd.d_conf, "rpc.adm.prog", &prog); (void) fmd_conf_getprop(fmd.d_conf, "rpc.adm.path", &s); if (prog != 0) { err = fmd_rpc_svc_init(fmd_adm_1, "FMD_ADM", s, "rpc.adm.prog", FMD_ADM, FMD_ADM, FMD_ADM_VERSION_1, (uint_t)sndsize, (uint_t)rcvsize, TRUE); } else { err = fmd_rpc_svc_init(fmd_adm_1, "FMD_ADM", s, "rpc.adm.prog", RPC_TRANS_MIN, RPC_TRANS_MAX, FMD_ADM_VERSION_1, (uint_t)sndsize, (uint_t)rcvsize, FALSE); } if (err != 0) fmd_error(EFMD_EXIT, "failed to create rpc server bindings"); if (fmd_thread_create(fmd.d_rmod, (fmd_thread_f *)svc_run, 0) == NULL) fmd_error(EFMD_EXIT, "failed to create rpc server thread"); }
/* * Custom door server create callback. Any fmd services that use doors will * require those threads to have their fmd-specific TSD initialized, etc. */ static void fmd_door(door_info_t *dip) { if (fmd_thread_create(fmd.d_rmod, fmd_door_server, dip) == NULL) fmd_panic("failed to create server for door %p", (void *)dip); }
void fmd_destroy(fmd_t *dp) { fmd_module_t *mp; fmd_case_t *cp; int core; (void) fmd_conf_getprop(fmd.d_conf, "core", &core); fmd_rpc_fini(); fmd_dr_fini(); if (dp->d_xprt_ids != NULL) fmd_xprt_suspend_all(); /* * Unload the self-diagnosis module first. This ensures that it does * not get confused as we start unloading other modules, etc. We must * hold the dispq lock as a writer while doing so since it uses d_self. */ if (dp->d_self != NULL) { fmd_module_t *self; (void) pthread_rwlock_wrlock(&dp->d_disp->dq_lock); self = dp->d_self; dp->d_self = NULL; (void) pthread_rwlock_unlock(&dp->d_disp->dq_lock); fmd_module_unload(self); fmd_module_rele(self); } /* * Unload modules in reverse order *except* for the root module, which * is first in the list. This allows it to keep its thread and trace. */ for (mp = fmd_list_prev(&dp->d_mod_list); mp != dp->d_rmod; ) { fmd_module_unload(mp); mp = fmd_list_prev(mp); } if (dp->d_mod_hash != NULL) { fmd_modhash_destroy(dp->d_mod_hash); dp->d_mod_hash = NULL; } /* * Close both log files now that modules are no longer active. We must * set these pointers to NULL in case any subsequent errors occur. */ if (dp->d_errlog != NULL) { fmd_log_rele(dp->d_errlog); dp->d_errlog = NULL; } if (dp->d_fltlog != NULL) { fmd_log_rele(dp->d_fltlog); dp->d_fltlog = NULL; } /* * Now destroy the resource cache: each ASRU contains a case reference, * which may in turn contain a pointer to a referenced owning module. */ if (dp->d_asrus != NULL) { fmd_asru_hash_destroy(dp->d_asrus); dp->d_asrus = NULL; } /* * Now that all data structures that refer to modules are torn down, * no modules should be remaining on the module list except for d_rmod. * If we trip one of these assertions, we're missing a rele somewhere. */ ASSERT(fmd_list_prev(&dp->d_mod_list) == dp->d_rmod); ASSERT(fmd_list_next(&dp->d_mod_list) == dp->d_rmod); /* * Now destroy the root module. We clear its thread key first so any * calls to fmd_trace() inside of the module code will be ignored. */ (void) pthread_setspecific(dp->d_key, NULL); fmd_module_lock(dp->d_rmod); while ((cp = fmd_list_next(&dp->d_rmod->mod_cases)) != NULL) fmd_case_discard(cp); fmd_module_unlock(dp->d_rmod); fmd_free(dp->d_rmod->mod_stats, sizeof (fmd_modstat_t)); dp->d_rmod->mod_stats = NULL; (void) pthread_mutex_lock(&dp->d_rmod->mod_lock); dp->d_rmod->mod_flags |= FMD_MOD_FINI; (void) pthread_mutex_unlock(&dp->d_rmod->mod_lock); fmd_module_rele(dp->d_rmod); ASSERT(fmd_list_next(&dp->d_mod_list) == NULL); /* * Now destroy the remaining global data structures. If 'core' was * set to true, force a core dump so we can check for memory leaks. */ if (dp->d_cases != NULL) fmd_case_hash_destroy(dp->d_cases); if (dp->d_disp != NULL) fmd_dispq_destroy(dp->d_disp); if (dp->d_timers != NULL) fmd_timerq_destroy(dp->d_timers); if (dp->d_schemes != NULL) fmd_scheme_hash_destroy(dp->d_schemes); if (dp->d_xprt_ids != NULL) fmd_idspace_destroy(dp->d_xprt_ids); if (dp->d_errstats != NULL) { fmd_free(dp->d_errstats, sizeof (fmd_stat_t) * (EFMD_END - EFMD_UNKNOWN)); } if (dp->d_conf != NULL) fmd_conf_close(dp->d_conf); if (dp->d_topo != NULL) topo_close(dp->d_topo); nvlist_free(dp->d_auth); (void) nv_alloc_fini(&dp->d_nva); dp->d_clockops->fto_fini(dp->d_clockptr); (void) pthread_key_delete(dp->d_key); bzero(dp, sizeof (fmd_t)); if (core) fmd_panic("forcing core dump at user request\n"); }
static void fmd_ckpt_save_case(fmd_ckpt_t *ckp, fmd_case_t *cp) { fmd_case_impl_t *cip = (fmd_case_impl_t *)cp; fmd_case_item_t *cit; fmd_case_susp_t *cis; fcf_case_t fcfc; uint_t n; fcf_secidx_t bufsec = FCF_SECIDX_NONE; fcf_secidx_t evsec = FCF_SECIDX_NONE; fcf_secidx_t nvsec = FCF_SECIDX_NONE; fcf_secidx_t prsec = FCF_SECIDX_NONE; if (cip->ci_xprt != NULL) return; /* do not checkpoint cases from remote transports */ if ((n = fmd_buf_hash_count(&cip->ci_bufs)) != 0) { size_t size = sizeof (fcf_buf_t) * n; fcf_buf_t *bufs = ckp->ckp_arg = fmd_alloc(size, FMD_SLEEP); fmd_buf_hash_apply(&cip->ci_bufs, (fmd_buf_f *)fmd_ckpt_save_buf, ckp); bufsec = fmd_ckpt_section(ckp, bufs, FCF_SECT_BUFS, size); fmd_free(bufs, size); } if (cip->ci_principal != NULL) { prsec = fmd_ckpt_section(ckp, NULL, FCF_SECT_EVENTS, sizeof (fcf_event_t)); fmd_ckpt_save_event(ckp, cip->ci_principal); } if (cip->ci_nitems != 0) { evsec = fmd_ckpt_section(ckp, NULL, FCF_SECT_EVENTS, sizeof (fcf_event_t) * cip->ci_nitems); for (cit = cip->ci_items; cit != NULL; cit = cit->cit_next) fmd_ckpt_save_event(ckp, cit->cit_event); } if (cip->ci_nsuspects != 0) { nvsec = fmd_ckpt_section(ckp, NULL, FCF_SECT_NVLISTS, cip->ci_nvsz); for (cis = cip->ci_suspects; cis != NULL; cis = cis->cis_next) fmd_ckpt_save_nvlist(ckp, cis->cis_nvl); } fcfc.fcfc_uuid = fmd_ckpt_string(ckp, cip->ci_uuid); fcfc.fcfc_bufs = bufsec; fcfc.fcfc_principal = prsec; fcfc.fcfc_events = evsec; fcfc.fcfc_suspects = nvsec; switch (cip->ci_state) { case FMD_CASE_UNSOLVED: fcfc.fcfc_state = FCF_CASE_UNSOLVED; break; case FMD_CASE_SOLVED: fcfc.fcfc_state = FCF_CASE_SOLVED; break; case FMD_CASE_CLOSE_WAIT: fcfc.fcfc_state = FCF_CASE_CLOSE_WAIT; break; default: fmd_panic("case %p (%s) has invalid state %u", (void *)cp, cip->ci_uuid, cip->ci_state); } (void) fmd_ckpt_section(ckp, &fcfc, FCF_SECT_CASE, sizeof (fcf_case_t)); }