/*ARGSUSED*/ static void fmd_err_replay(fmd_log_t *lp, fmd_event_t *ep, fmd_t *dp) { fmd_module_t *mp; fmd_stat_t *sp; (void) pthread_mutex_lock(&dp->d_mod_lock); for (mp = fmd_list_next(&dp->d_mod_list); mp != NULL; mp = fmd_list_next(mp)) { if (fmd_module_contains(mp, ep)) { fmd_module_hold(mp); break; } } (void) pthread_mutex_unlock(&dp->d_mod_lock); if (mp != NULL) { fmd_event_commit(ep); fmd_module_rele(mp); sp = &dp->d_stats->ds_log_partials; } else { fmd_dispq_dispatch(dp->d_disp, ep, FMD_EVENT_DATA(ep)); sp = &dp->d_stats->ds_log_replayed; } (void) pthread_mutex_lock(&dp->d_stats_lock); sp->fmds_value.ui64++; (void) pthread_mutex_unlock(&dp->d_stats_lock); }
bool_t fmd_adm_modinfo_1_svc(struct fmd_rpc_modlist *rvp, struct svc_req *req) { struct fmd_rpc_modinfo *rmi; fmd_module_t *mp; rvp->rml_list = NULL; rvp->rml_err = 0; rvp->rml_len = 0; if (fmd_rpc_deny(req)) { rvp->rml_err = FMD_ADM_ERR_PERM; return (TRUE); } (void) pthread_mutex_lock(&fmd.d_mod_lock); for (mp = fmd_list_next(&fmd.d_mod_list); mp != NULL; mp = fmd_list_next(mp)) { if ((rmi = malloc(sizeof (struct fmd_rpc_modinfo))) == NULL) { rvp->rml_err = FMD_ADM_ERR_NOMEM; break; } fmd_module_lock(mp); /* * If mod_info is NULL, the module is in the middle of loading: * do not report its presence to observability tools yet. */ if (mp->mod_info == NULL) { fmd_module_unlock(mp); free(rmi); continue; } rmi->rmi_name = strdup(mp->mod_name); rmi->rmi_desc = strdup(mp->mod_info->fmdi_desc); rmi->rmi_vers = strdup(mp->mod_info->fmdi_vers); rmi->rmi_faulty = mp->mod_error != 0; rmi->rmi_next = rvp->rml_list; fmd_module_unlock(mp); rvp->rml_list = rmi; rvp->rml_len++; if (rmi->rmi_desc == NULL || rmi->rmi_vers == NULL) { rvp->rml_err = FMD_ADM_ERR_NOMEM; break; } } (void) pthread_mutex_unlock(&fmd.d_mod_lock); return (TRUE); }
static void fmd_ckpt_save_module(fmd_ckpt_t *ckp, fmd_module_t *mp) { fcf_secidx_t bufsec = FCF_SECIDX_NONE; fcf_module_t fcfm; fmd_case_t *cp; uint_t n; for (cp = fmd_list_next(&mp->mod_cases); cp; cp = fmd_list_next(cp)) fmd_ckpt_save_case(ckp, cp); if ((n = fmd_serd_hash_count(&mp->mod_serds)) != 0) { size_t size = sizeof (fcf_serd_t) * n; fcf_serd_t *serds = ckp->ckp_arg = fmd_alloc(size, FMD_SLEEP); fmd_serd_hash_apply(&mp->mod_serds, (fmd_serd_eng_f *)fmd_ckpt_save_serd, ckp); (void) fmd_ckpt_section(ckp, serds, FCF_SECT_SERD, size); fmd_free(serds, size); } if ((n = fmd_buf_hash_count(&mp->mod_bufs)) != 0) { size_t size = sizeof (fcf_buf_t) * n; fcf_buf_t *bufs = ckp->ckp_arg = fmd_alloc(size, FMD_SLEEP); fmd_buf_hash_apply(&mp->mod_bufs, (fmd_buf_f *)fmd_ckpt_save_buf, ckp); bufsec = fmd_ckpt_section(ckp, bufs, FCF_SECT_BUFS, size); fmd_free(bufs, size); } fcfm.fcfm_name = fmd_ckpt_string(ckp, mp->mod_name); fcfm.fcfm_path = fmd_ckpt_string(ckp, mp->mod_path); fcfm.fcfm_desc = fmd_ckpt_string(ckp, mp->mod_info->fmdi_desc); fcfm.fcfm_vers = fmd_ckpt_string(ckp, mp->mod_info->fmdi_vers); fcfm.fcfm_bufs = bufsec; (void) fmd_ckpt_section(ckp, &fcfm, FCF_SECT_MODULE, sizeof (fcf_module_t)); }
static void fmd_ckpt_resv_module(fmd_ckpt_t *ckp, fmd_module_t *mp) { fmd_case_t *cp; uint_t n; for (cp = fmd_list_next(&mp->mod_cases); cp; cp = fmd_list_next(cp)) fmd_ckpt_resv_case(ckp, cp); n = fmd_serd_hash_count(&mp->mod_serds); fmd_serd_hash_apply(&mp->mod_serds, (fmd_serd_eng_f *)fmd_ckpt_resv_serd, ckp); fmd_ckpt_resv(ckp, sizeof (fcf_serd_t) * n, sizeof (uint64_t)); n = fmd_buf_hash_count(&mp->mod_bufs); fmd_buf_hash_apply(&mp->mod_bufs, (fmd_buf_f *)fmd_ckpt_resv_buf, ckp); fmd_ckpt_resv(ckp, sizeof (fcf_buf_t) * n, sizeof (uint32_t)); fmd_ckpt_resv(ckp, sizeof (fcf_module_t), sizeof (uint32_t)); ckp->ckp_strn += strlen(mp->mod_name) + 1; ckp->ckp_strn += strlen(mp->mod_path) + 1; ckp->ckp_strn += strlen(mp->mod_info->fmdi_desc) + 1; ckp->ckp_strn += strlen(mp->mod_info->fmdi_vers) + 1; }
static void fmd_ckpt_save_serd(fmd_serd_eng_t *sgp, fmd_ckpt_t *ckp) { fcf_serd_t *fcfd = ckp->ckp_arg; fcf_secidx_t evsec = FCF_SECT_NONE; fmd_serd_elem_t *sep; if (sgp->sg_count != 0) { evsec = fmd_ckpt_section(ckp, NULL, FCF_SECT_EVENTS, sizeof (fcf_event_t) * sgp->sg_count); for (sep = fmd_list_next(&sgp->sg_list); sep != NULL; sep = fmd_list_next(sep)) fmd_ckpt_save_event(ckp, sep->se_event); } fcfd->fcfd_name = fmd_ckpt_string(ckp, sgp->sg_name); fcfd->fcfd_events = evsec; fcfd->fcfd_pad = 0; fcfd->fcfd_n = sgp->sg_n; fcfd->fcfd_t = sgp->sg_t; ckp->ckp_arg = fcfd + 1; }
static void fmd_adm_serdinfo_record(fmd_serd_eng_t *sgp, struct fmd_rpc_serdinfo *rsi) { uint64_t old, now = fmd_time_gethrtime(); const fmd_serd_elem_t *oep; if ((rsi->rsi_name = strdup(sgp->sg_name)) == NULL) { rsi->rsi_err = FMD_ADM_ERR_NOMEM; return; } if ((oep = fmd_list_next(&sgp->sg_list)) != NULL) old = fmd_event_hrtime(oep->se_event); else old = now; rsi->rsi_delta = now >= old ? now - old : (UINT64_MAX - old) + now + 1; rsi->rsi_count = sgp->sg_count; rsi->rsi_fired = fmd_serd_eng_fired(sgp) != 0; rsi->rsi_n = sgp->sg_n; rsi->rsi_t = sgp->sg_t; }
void fmd_destroy(fmd_t *dp) { fmd_module_t *mp; fmd_case_t *cp; int core; (void) fmd_conf_getprop(fmd.d_conf, "core", &core); fmd_rpc_fini(); fmd_dr_fini(); if (dp->d_xprt_ids != NULL) fmd_xprt_suspend_all(); /* * Unload the self-diagnosis module first. This ensures that it does * not get confused as we start unloading other modules, etc. We must * hold the dispq lock as a writer while doing so since it uses d_self. */ if (dp->d_self != NULL) { fmd_module_t *self; (void) pthread_rwlock_wrlock(&dp->d_disp->dq_lock); self = dp->d_self; dp->d_self = NULL; (void) pthread_rwlock_unlock(&dp->d_disp->dq_lock); fmd_module_unload(self); fmd_module_rele(self); } /* * Unload modules in reverse order *except* for the root module, which * is first in the list. This allows it to keep its thread and trace. */ for (mp = fmd_list_prev(&dp->d_mod_list); mp != dp->d_rmod; ) { fmd_module_unload(mp); mp = fmd_list_prev(mp); } if (dp->d_mod_hash != NULL) { fmd_modhash_destroy(dp->d_mod_hash); dp->d_mod_hash = NULL; } /* * Close both log files now that modules are no longer active. We must * set these pointers to NULL in case any subsequent errors occur. */ if (dp->d_errlog != NULL) { fmd_log_rele(dp->d_errlog); dp->d_errlog = NULL; } if (dp->d_fltlog != NULL) { fmd_log_rele(dp->d_fltlog); dp->d_fltlog = NULL; } /* * Now destroy the resource cache: each ASRU contains a case reference, * which may in turn contain a pointer to a referenced owning module. */ if (dp->d_asrus != NULL) { fmd_asru_hash_destroy(dp->d_asrus); dp->d_asrus = NULL; } /* * Now that all data structures that refer to modules are torn down, * no modules should be remaining on the module list except for d_rmod. * If we trip one of these assertions, we're missing a rele somewhere. */ ASSERT(fmd_list_prev(&dp->d_mod_list) == dp->d_rmod); ASSERT(fmd_list_next(&dp->d_mod_list) == dp->d_rmod); /* * Now destroy the root module. We clear its thread key first so any * calls to fmd_trace() inside of the module code will be ignored. */ (void) pthread_setspecific(dp->d_key, NULL); fmd_module_lock(dp->d_rmod); while ((cp = fmd_list_next(&dp->d_rmod->mod_cases)) != NULL) fmd_case_discard(cp); fmd_module_unlock(dp->d_rmod); fmd_free(dp->d_rmod->mod_stats, sizeof (fmd_modstat_t)); dp->d_rmod->mod_stats = NULL; (void) pthread_mutex_lock(&dp->d_rmod->mod_lock); dp->d_rmod->mod_flags |= FMD_MOD_FINI; (void) pthread_mutex_unlock(&dp->d_rmod->mod_lock); fmd_module_rele(dp->d_rmod); ASSERT(fmd_list_next(&dp->d_mod_list) == NULL); /* * Now destroy the remaining global data structures. If 'core' was * set to true, force a core dump so we can check for memory leaks. */ if (dp->d_cases != NULL) fmd_case_hash_destroy(dp->d_cases); if (dp->d_disp != NULL) fmd_dispq_destroy(dp->d_disp); if (dp->d_timers != NULL) fmd_timerq_destroy(dp->d_timers); if (dp->d_schemes != NULL) fmd_scheme_hash_destroy(dp->d_schemes); if (dp->d_xprt_ids != NULL) fmd_idspace_destroy(dp->d_xprt_ids); if (dp->d_errstats != NULL) { fmd_free(dp->d_errstats, sizeof (fmd_stat_t) * (EFMD_END - EFMD_UNKNOWN)); } if (dp->d_conf != NULL) fmd_conf_close(dp->d_conf); if (dp->d_topo != NULL) topo_close(dp->d_topo); nvlist_free(dp->d_auth); (void) nv_alloc_fini(&dp->d_nva); dp->d_clockops->fto_fini(dp->d_clockptr); (void) pthread_key_delete(dp->d_key); bzero(dp, sizeof (fmd_t)); if (core) fmd_panic("forcing core dump at user request\n"); }