bool_t fmd_adm_moddstat_1_svc(char *name, struct fmd_rpc_modstat *rms, struct svc_req *req) { fmd_module_t *mp; rms->rms_buf.rms_buf_val = NULL; rms->rms_buf.rms_buf_len = 0; rms->rms_err = 0; if (fmd_rpc_deny(req)) { rms->rms_err = FMD_ADM_ERR_PERM; return (TRUE); } if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) { rms->rms_err = FMD_ADM_ERR_MODSRCH; return (TRUE); } rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_modstat_t)); rms->rms_buf.rms_buf_len = sizeof (fmd_modstat_t) / sizeof (fmd_stat_t); if (rms->rms_buf.rms_buf_val == NULL) { rms->rms_err = FMD_ADM_ERR_NOMEM; rms->rms_buf.rms_buf_len = 0; fmd_module_rele(mp); return (TRUE); } /* * Note: the bcopy() here is valid only if no FMD_TYPE_STRING stats * are present in mp->mod_stats. We don't use any for the daemon- * maintained stats and provide this function in order to reduce the * overhead of the fmstat(1M) default view, where these minimal stats * must be retrieved for all of the active modules. */ (void) pthread_mutex_lock(&mp->mod_stats_lock); if (mp->mod_stats != NULL) { mp->mod_stats->ms_snaptime.fmds_value.ui64 = gethrtime(); bcopy(mp->mod_stats, rms->rms_buf.rms_buf_val, sizeof (fmd_modstat_t)); } else { free(rms->rms_buf.rms_buf_val); rms->rms_buf.rms_buf_val = NULL; rms->rms_buf.rms_buf_len = 0; rms->rms_err = FMD_ADM_ERR_MODFAIL; } (void) pthread_mutex_unlock(&mp->mod_stats_lock); fmd_module_rele(mp); return (TRUE); }
bool_t fmd_adm_serdlist_1_svc(char *name, struct fmd_rpc_serdlist *rvp, struct svc_req *req) { fmd_module_t *mp; void *p; rvp->rsl_buf.rsl_buf_len = 0; rvp->rsl_buf.rsl_buf_val = NULL; rvp->rsl_len = 0; rvp->rsl_cnt = 0; rvp->rsl_err = 0; if (fmd_rpc_deny(req)) { rvp->rsl_err = FMD_ADM_ERR_PERM; return (TRUE); } if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) { rvp->rsl_err = FMD_ADM_ERR_MODSRCH; return (TRUE); } fmd_module_lock(mp); /* In the first pass, collect the overall length of the buffer. */ fmd_serd_hash_apply(&mp->mod_serds, fmd_adm_serdlist_measure, rvp); if (rvp->rsl_len == 0) { fmd_module_unlock(mp); fmd_module_rele(mp); return (TRUE); } p = malloc(rvp->rsl_len); if (p) { rvp->rsl_buf.rsl_buf_val = p; rvp->rsl_buf.rsl_buf_len = rvp->rsl_len; bzero(rvp->rsl_buf.rsl_buf_val, rvp->rsl_buf.rsl_buf_len); rvp->rsl_len = 0; /* In the second pass, populate the buffer with data. */ fmd_serd_hash_apply(&mp->mod_serds, fmd_adm_serdlist_record, rvp); } else { rvp->rsl_err = FMD_ADM_ERR_NOMEM; } fmd_module_unlock(mp); fmd_module_rele(mp); return (TRUE); }
/*ARGSUSED*/ static void fmd_err_replay(fmd_log_t *lp, fmd_event_t *ep, fmd_t *dp) { fmd_module_t *mp; fmd_stat_t *sp; (void) pthread_mutex_lock(&dp->d_mod_lock); for (mp = fmd_list_next(&dp->d_mod_list); mp != NULL; mp = fmd_list_next(mp)) { if (fmd_module_contains(mp, ep)) { fmd_module_hold(mp); break; } } (void) pthread_mutex_unlock(&dp->d_mod_lock); if (mp != NULL) { fmd_event_commit(ep); fmd_module_rele(mp); sp = &dp->d_stats->ds_log_partials; } else { fmd_dispq_dispatch(dp->d_disp, ep, FMD_EVENT_DATA(ep)); sp = &dp->d_stats->ds_log_replayed; } (void) pthread_mutex_lock(&dp->d_stats_lock); sp->fmds_value.ui64++; (void) pthread_mutex_unlock(&dp->d_stats_lock); }
bool_t fmd_adm_serdreset_1_svc(char *mname, char *sname, int *rvp, struct svc_req *req) { fmd_module_t *mp; fmd_serd_eng_t *sgp; int err = 0; if (fmd_rpc_deny(req)) { *rvp = FMD_ADM_ERR_PERM; return (TRUE); } if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, mname)) == NULL) { *rvp = FMD_ADM_ERR_MODSRCH; return (TRUE); } fmd_module_lock(mp); if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, sname)) != NULL) { if (fmd_serd_eng_fired(sgp)) { err = FMD_ADM_ERR_SERDFIRED; } else { fmd_serd_eng_reset(sgp); fmd_module_setdirty(mp); } } else err = FMD_ADM_ERR_SERDSRCH; fmd_module_unlock(mp); fmd_module_rele(mp); *rvp = err; return (TRUE); }
bool_t fmd_adm_serdinfo_1_svc(char *mname, char *sname, struct fmd_rpc_serdinfo *rvp, struct svc_req *req) { fmd_module_t *mp; fmd_serd_eng_t *sgp; bzero(rvp, sizeof (struct fmd_rpc_serdinfo)); if (fmd_rpc_deny(req)) { rvp->rsi_err = FMD_ADM_ERR_PERM; return (TRUE); } if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, mname)) == NULL) { rvp->rsi_err = FMD_ADM_ERR_MODSRCH; return (TRUE); } fmd_module_lock(mp); if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, sname)) != NULL) { fmd_adm_serdinfo_record(sgp, rvp); } else rvp->rsi_err = FMD_ADM_ERR_SERDSRCH; fmd_module_unlock(mp); fmd_module_rele(mp); return (TRUE); }
bool_t fmd_adm_modreset_1_svc(char *name, int *rvp, struct svc_req *req) { fmd_module_t *mp = NULL; int err = 0; if (fmd_rpc_deny(req)) err = FMD_ADM_ERR_PERM; else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) err = FMD_ADM_ERR_MODSRCH; else if (mp == fmd.d_self) err = FMD_ADM_ERR_MODBUSY; else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0) err = FMD_ADM_ERR_MODSRCH; if (err == 0) fmd_ckpt_delete(mp); /* erase any saved checkpoints */ if (err == 0 && fmd_modhash_load(fmd.d_mod_hash, mp->mod_path, mp->mod_ops) == NULL) { if (errno == EFMD_MOD_INIT) err = FMD_ADM_ERR_MODINIT; else err = FMD_ADM_ERR_MODLOAD; } if (mp != NULL) fmd_module_rele(mp); *rvp = err; return (TRUE); }
bool_t fmd_adm_modcstat_1_svc(char *name, struct fmd_rpc_modstat *rms, struct svc_req *req) { fmd_ustat_snap_t snap; fmd_module_t *mp; rms->rms_buf.rms_buf_val = NULL; rms->rms_buf.rms_buf_len = 0; rms->rms_err = 0; if (fmd_rpc_deny(req)) { rms->rms_err = FMD_ADM_ERR_PERM; return (TRUE); } if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) { rms->rms_err = FMD_ADM_ERR_MODSRCH; return (TRUE); } if (fmd_modstat_snapshot(mp, &snap) == 0) { rms->rms_buf.rms_buf_val = snap.uss_buf; rms->rms_buf.rms_buf_len = snap.uss_len; } else if (errno == EFMD_HDL_ABORT) { rms->rms_err = FMD_ADM_ERR_MODFAIL; } else rms->rms_err = FMD_ADM_ERR_NOMEM; fmd_module_rele(mp); return (TRUE); }
bool_t fmd_adm_modgc_1_svc(char *name, int *rvp, struct svc_req *req) { fmd_module_t *mp; int err = 0; if (fmd_rpc_deny(req)) err = FMD_ADM_ERR_PERM; else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) err = FMD_ADM_ERR_MODSRCH; else { fmd_module_gc(mp); fmd_module_rele(mp); } *rvp = err; return (TRUE); }
bool_t fmd_adm_modunload_1_svc(char *name, int *rvp, struct svc_req *req) { fmd_module_t *mp = NULL; int err = 0; if (fmd_rpc_deny(req)) err = FMD_ADM_ERR_PERM; else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) err = FMD_ADM_ERR_MODSRCH; else if (mp == fmd.d_self) err = FMD_ADM_ERR_MODBUSY; else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0) err = FMD_ADM_ERR_MODSRCH; if (mp != NULL) fmd_module_rele(mp); *rvp = err; return (TRUE); }
void fmd_destroy(fmd_t *dp) { fmd_module_t *mp; fmd_case_t *cp; int core; (void) fmd_conf_getprop(fmd.d_conf, "core", &core); fmd_rpc_fini(); fmd_dr_fini(); if (dp->d_xprt_ids != NULL) fmd_xprt_suspend_all(); /* * Unload the self-diagnosis module first. This ensures that it does * not get confused as we start unloading other modules, etc. We must * hold the dispq lock as a writer while doing so since it uses d_self. */ if (dp->d_self != NULL) { fmd_module_t *self; (void) pthread_rwlock_wrlock(&dp->d_disp->dq_lock); self = dp->d_self; dp->d_self = NULL; (void) pthread_rwlock_unlock(&dp->d_disp->dq_lock); fmd_module_unload(self); fmd_module_rele(self); } /* * Unload modules in reverse order *except* for the root module, which * is first in the list. This allows it to keep its thread and trace. */ for (mp = fmd_list_prev(&dp->d_mod_list); mp != dp->d_rmod; ) { fmd_module_unload(mp); mp = fmd_list_prev(mp); } if (dp->d_mod_hash != NULL) { fmd_modhash_destroy(dp->d_mod_hash); dp->d_mod_hash = NULL; } /* * Close both log files now that modules are no longer active. We must * set these pointers to NULL in case any subsequent errors occur. */ if (dp->d_errlog != NULL) { fmd_log_rele(dp->d_errlog); dp->d_errlog = NULL; } if (dp->d_fltlog != NULL) { fmd_log_rele(dp->d_fltlog); dp->d_fltlog = NULL; } /* * Now destroy the resource cache: each ASRU contains a case reference, * which may in turn contain a pointer to a referenced owning module. */ if (dp->d_asrus != NULL) { fmd_asru_hash_destroy(dp->d_asrus); dp->d_asrus = NULL; } /* * Now that all data structures that refer to modules are torn down, * no modules should be remaining on the module list except for d_rmod. * If we trip one of these assertions, we're missing a rele somewhere. */ ASSERT(fmd_list_prev(&dp->d_mod_list) == dp->d_rmod); ASSERT(fmd_list_next(&dp->d_mod_list) == dp->d_rmod); /* * Now destroy the root module. We clear its thread key first so any * calls to fmd_trace() inside of the module code will be ignored. */ (void) pthread_setspecific(dp->d_key, NULL); fmd_module_lock(dp->d_rmod); while ((cp = fmd_list_next(&dp->d_rmod->mod_cases)) != NULL) fmd_case_discard(cp); fmd_module_unlock(dp->d_rmod); fmd_free(dp->d_rmod->mod_stats, sizeof (fmd_modstat_t)); dp->d_rmod->mod_stats = NULL; (void) pthread_mutex_lock(&dp->d_rmod->mod_lock); dp->d_rmod->mod_flags |= FMD_MOD_FINI; (void) pthread_mutex_unlock(&dp->d_rmod->mod_lock); fmd_module_rele(dp->d_rmod); ASSERT(fmd_list_next(&dp->d_mod_list) == NULL); /* * Now destroy the remaining global data structures. If 'core' was * set to true, force a core dump so we can check for memory leaks. */ if (dp->d_cases != NULL) fmd_case_hash_destroy(dp->d_cases); if (dp->d_disp != NULL) fmd_dispq_destroy(dp->d_disp); if (dp->d_timers != NULL) fmd_timerq_destroy(dp->d_timers); if (dp->d_schemes != NULL) fmd_scheme_hash_destroy(dp->d_schemes); if (dp->d_xprt_ids != NULL) fmd_idspace_destroy(dp->d_xprt_ids); if (dp->d_errstats != NULL) { fmd_free(dp->d_errstats, sizeof (fmd_stat_t) * (EFMD_END - EFMD_UNKNOWN)); } if (dp->d_conf != NULL) fmd_conf_close(dp->d_conf); if (dp->d_topo != NULL) topo_close(dp->d_topo); nvlist_free(dp->d_auth); (void) nv_alloc_fini(&dp->d_nva); dp->d_clockops->fto_fini(dp->d_clockptr); (void) pthread_key_delete(dp->d_key); bzero(dp, sizeof (fmd_t)); if (core) fmd_panic("forcing core dump at user request\n"); }