示例#1
0
bool_t
fmd_adm_modinfo_1_svc(struct fmd_rpc_modlist *rvp, struct svc_req *req)
{
	struct fmd_rpc_modinfo *rmi;
	fmd_module_t *mp;

	rvp->rml_list = NULL;
	rvp->rml_err = 0;
	rvp->rml_len = 0;

	if (fmd_rpc_deny(req)) {
		rvp->rml_err = FMD_ADM_ERR_PERM;
		return (TRUE);
	}

	(void) pthread_mutex_lock(&fmd.d_mod_lock);

	for (mp = fmd_list_next(&fmd.d_mod_list);
	    mp != NULL; mp = fmd_list_next(mp)) {

		if ((rmi = malloc(sizeof (struct fmd_rpc_modinfo))) == NULL) {
			rvp->rml_err = FMD_ADM_ERR_NOMEM;
			break;
		}

		fmd_module_lock(mp);

		/*
		 * If mod_info is NULL, the module is in the middle of loading:
		 * do not report its presence to observability tools yet.
		 */
		if (mp->mod_info == NULL) {
			fmd_module_unlock(mp);
			free(rmi);
			continue;
		}

		rmi->rmi_name = strdup(mp->mod_name);
		rmi->rmi_desc = strdup(mp->mod_info->fmdi_desc);
		rmi->rmi_vers = strdup(mp->mod_info->fmdi_vers);
		rmi->rmi_faulty = mp->mod_error != 0;
		rmi->rmi_next = rvp->rml_list;

		fmd_module_unlock(mp);
		rvp->rml_list = rmi;
		rvp->rml_len++;

		if (rmi->rmi_desc == NULL || rmi->rmi_vers == NULL) {
			rvp->rml_err = FMD_ADM_ERR_NOMEM;
			break;
		}
	}

	(void) pthread_mutex_unlock(&fmd.d_mod_lock);
	return (TRUE);
}
示例#2
0
bool_t
fmd_adm_serdlist_1_svc(char *name, struct fmd_rpc_serdlist *rvp,
    struct svc_req *req)
{
	fmd_module_t *mp;
	void *p;

	rvp->rsl_buf.rsl_buf_len = 0;
	rvp->rsl_buf.rsl_buf_val = NULL;
	rvp->rsl_len = 0;
	rvp->rsl_cnt = 0;
	rvp->rsl_err = 0;

	if (fmd_rpc_deny(req)) {
		rvp->rsl_err = FMD_ADM_ERR_PERM;
		return (TRUE);
	}

	if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) {
		rvp->rsl_err = FMD_ADM_ERR_MODSRCH;
		return (TRUE);
	}

	fmd_module_lock(mp);
	/* In the first pass, collect the overall length of the buffer. */
	fmd_serd_hash_apply(&mp->mod_serds, fmd_adm_serdlist_measure, rvp);
	if (rvp->rsl_len == 0) {
		fmd_module_unlock(mp);
		fmd_module_rele(mp);
		return (TRUE);
	}
	p = malloc(rvp->rsl_len);
	if (p) {
		rvp->rsl_buf.rsl_buf_val = p;
		rvp->rsl_buf.rsl_buf_len = rvp->rsl_len;
		bzero(rvp->rsl_buf.rsl_buf_val, rvp->rsl_buf.rsl_buf_len);
		rvp->rsl_len = 0;
		/* In the second pass, populate the buffer with data. */
		fmd_serd_hash_apply(&mp->mod_serds, fmd_adm_serdlist_record,
		    rvp);
	} else {
		rvp->rsl_err = FMD_ADM_ERR_NOMEM;
	}
	fmd_module_unlock(mp);

	fmd_module_rele(mp);
	return (TRUE);
}
示例#3
0
static void
fmd_ckpt_restore_serd(fmd_ckpt_t *ckp, fmd_module_t *mp, const fcf_sec_t *sp)
{
	const fcf_serd_t *fcfd = fmd_ckpt_dataptr(ckp, sp);
	uint_t i, n = sp->fcfs_size / sp->fcfs_entsize;
	const fcf_sec_t *esp;
	const char *s;

	for (i = 0; i < n; i++) {
		esp = fmd_ckpt_secptr(ckp, fcfd->fcfd_events, FCF_SECT_EVENTS);

		if (esp == NULL) {
			fmd_ckpt_error(ckp, EFMD_CKPT_INVAL,
			    "invalid events link %u\n", fcfd->fcfd_events);
		}

		if ((s = fmd_ckpt_strptr(ckp, fcfd->fcfd_name, NULL)) == NULL) {
			fmd_ckpt_error(ckp, EFMD_CKPT_INVAL,
			    "serd name %u is corrupt\n", fcfd->fcfd_name);
		}

		fmd_serd_create((fmd_hdl_t *)mp, s, fcfd->fcfd_n, fcfd->fcfd_t);
		fmd_module_lock(mp);

		fmd_ckpt_restore_events(ckp, fcfd->fcfd_events,
		    (void (*)(void *, fmd_event_t *))fmd_serd_eng_record,
		    fmd_serd_eng_lookup(&mp->mod_serds, s));

		fmd_module_unlock(mp);
		fcfd = (fcf_serd_t *)((uintptr_t)fcfd + sp->fcfs_entsize);
	}
}
示例#4
0
bool_t
fmd_adm_serdreset_1_svc(char *mname, char *sname, int *rvp, struct svc_req *req)
{
	fmd_module_t *mp;
	fmd_serd_eng_t *sgp;
	int err = 0;

	if (fmd_rpc_deny(req)) {
		*rvp = FMD_ADM_ERR_PERM;
		return (TRUE);
	}

	if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, mname)) == NULL) {
		*rvp = FMD_ADM_ERR_MODSRCH;
		return (TRUE);
	}

	fmd_module_lock(mp);

	if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, sname)) != NULL) {
		if (fmd_serd_eng_fired(sgp)) {
			err = FMD_ADM_ERR_SERDFIRED;
		} else {
			fmd_serd_eng_reset(sgp);
			fmd_module_setdirty(mp);
		}
	} else
		err = FMD_ADM_ERR_SERDSRCH;

	fmd_module_unlock(mp);
	fmd_module_rele(mp);

	*rvp = err;
	return (TRUE);
}
示例#5
0
bool_t
fmd_adm_serdinfo_1_svc(char *mname, char *sname, struct fmd_rpc_serdinfo *rvp,
    struct svc_req *req)
{
	fmd_module_t *mp;
	fmd_serd_eng_t *sgp;

	bzero(rvp, sizeof (struct fmd_rpc_serdinfo));

	if (fmd_rpc_deny(req)) {
		rvp->rsi_err = FMD_ADM_ERR_PERM;
		return (TRUE);
	}

	if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, mname)) == NULL) {
		rvp->rsi_err = FMD_ADM_ERR_MODSRCH;
		return (TRUE);
	}

	fmd_module_lock(mp);

	if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, sname)) != NULL) {
		fmd_adm_serdinfo_record(sgp, rvp);
	} else
		rvp->rsi_err = FMD_ADM_ERR_SERDSRCH;

	fmd_module_unlock(mp);
	fmd_module_rele(mp);

	return (TRUE);
}
示例#6
0
static void
fmd_ckpt_restore_case(fmd_ckpt_t *ckp, fmd_module_t *mp, const fcf_sec_t *sp)
{
	const fcf_case_t *fcfc = fmd_ckpt_dataptr(ckp, sp);
	const char *uuid = fmd_ckpt_strptr(ckp, fcfc->fcfc_uuid, NULL);
	fmd_case_t *cp;
	int n;

	if (uuid == NULL || fcfc->fcfc_state > FCF_CASE_CLOSE_WAIT) {
		fmd_ckpt_error(ckp, EFMD_CKPT_INVAL, "corrupt %u case uuid "
		    "and/or state\n", (uint_t)(sp - ckp->ckp_secp));
	}

	fmd_module_lock(mp);

	if ((cp = fmd_case_recreate(mp, NULL,
	    fcfc->fcfc_state != FCF_CASE_UNSOLVED ? FCF_CASE_SOLVED :
	    FMD_CASE_UNSOLVED, uuid, NULL)) == NULL) {
		fmd_ckpt_error(ckp, EFMD_CKPT_INVAL,
		    "duplicate case uuid: %s\n", uuid);
	}

	fmd_ckpt_restore_events(ckp, fcfc->fcfc_principal,
	    (void (*)(void *, fmd_event_t *))fmd_case_insert_principal, cp);

	fmd_ckpt_restore_events(ckp, fcfc->fcfc_events,
	    (void (*)(void *, fmd_event_t *))fmd_case_insert_event, cp);

	/*
	 * Once solved, treat suspects from resource cache as master copy.
	 *
	 * If !fmd.d_running, this module must be a builtin, and so we don't
	 * want to restore suspects or call fmd_case_transition_update() at this
	 * stage. The suspects will be added later from the resource cache.
	 * Calling fmd_case_transition("SOLVED") is OK here as the state is
	 * already solved, so all it does is update the case flags.
	 */
	if (fmd.d_running && (n = ((fmd_case_impl_t *)cp)->ci_nsuspects) == 0)
		n = fmd_ckpt_restore_suspects(ckp, cp, fcfc->fcfc_suspects);

	if (!fmd.d_running)
		fmd_case_transition(cp, FMD_CASE_SOLVED, FMD_CF_SOLVED);
	else if (fcfc->fcfc_state == FCF_CASE_SOLVED)
		fmd_case_transition_update(cp, FMD_CASE_SOLVED, FMD_CF_SOLVED);
	else if (fcfc->fcfc_state == FCF_CASE_CLOSE_WAIT && n != 0)
		fmd_case_transition(cp, FMD_CASE_CLOSE_WAIT, FMD_CF_SOLVED);
	else if (fcfc->fcfc_state == FCF_CASE_CLOSE_WAIT && n == 0)
		fmd_case_transition(cp, FMD_CASE_CLOSE_WAIT, 0);

	fmd_module_unlock(mp);
	fmd_ckpt_restore_bufs(ckp, mp, cp, fcfc->fcfc_bufs);
}
示例#7
0
/*PRINTFLIKE3*/
static void
fmd_ckpt_error(fmd_ckpt_t *ckp, int err, const char *format, ...)
{
	fmd_module_t *mp = ckp->ckp_mp;
	va_list ap;

	va_start(ap, format);
	fmd_verror(err, format, ap);
	va_end(ap);

	if (fmd_module_locked(mp))
		fmd_module_unlock(mp);

	fmd_ckpt_destroy(ckp);
	fmd_module_abort(mp, err);
}
示例#8
0
文件: fmd.c 项目: andreiw/polaris
void
fmd_destroy(fmd_t *dp)
{
	fmd_module_t *mp;
	fmd_case_t *cp;
	int core;

	(void) fmd_conf_getprop(fmd.d_conf, "core", &core);

	fmd_rpc_fini();
	fmd_dr_fini();

	if (dp->d_xprt_ids != NULL)
		fmd_xprt_suspend_all();

	/*
	 * Unload the self-diagnosis module first.  This ensures that it does
	 * not get confused as we start unloading other modules, etc.  We must
	 * hold the dispq lock as a writer while doing so since it uses d_self.
	 */
	if (dp->d_self != NULL) {
		fmd_module_t *self;

		(void) pthread_rwlock_wrlock(&dp->d_disp->dq_lock);
		self = dp->d_self;
		dp->d_self = NULL;
		(void) pthread_rwlock_unlock(&dp->d_disp->dq_lock);

		fmd_module_unload(self);
		fmd_module_rele(self);
	}

	/*
	 * Unload modules in reverse order *except* for the root module, which
	 * is first in the list.  This allows it to keep its thread and trace.
	 */
	for (mp = fmd_list_prev(&dp->d_mod_list); mp != dp->d_rmod; ) {
		fmd_module_unload(mp);
		mp = fmd_list_prev(mp);
	}

	if (dp->d_mod_hash != NULL) {
		fmd_modhash_destroy(dp->d_mod_hash);
		dp->d_mod_hash = NULL;
	}

	/*
	 * Close both log files now that modules are no longer active.  We must
	 * set these pointers to NULL in case any subsequent errors occur.
	 */
	if (dp->d_errlog != NULL) {
		fmd_log_rele(dp->d_errlog);
		dp->d_errlog = NULL;
	}

	if (dp->d_fltlog != NULL) {
		fmd_log_rele(dp->d_fltlog);
		dp->d_fltlog = NULL;
	}

	/*
	 * Now destroy the resource cache: each ASRU contains a case reference,
	 * which may in turn contain a pointer to a referenced owning module.
	 */
	if (dp->d_asrus != NULL) {
		fmd_asru_hash_destroy(dp->d_asrus);
		dp->d_asrus = NULL;
	}

	/*
	 * Now that all data structures that refer to modules are torn down,
	 * no modules should be remaining on the module list except for d_rmod.
	 * If we trip one of these assertions, we're missing a rele somewhere.
	 */
	ASSERT(fmd_list_prev(&dp->d_mod_list) == dp->d_rmod);
	ASSERT(fmd_list_next(&dp->d_mod_list) == dp->d_rmod);

	/*
	 * Now destroy the root module.  We clear its thread key first so any
	 * calls to fmd_trace() inside of the module code will be ignored.
	 */
	(void) pthread_setspecific(dp->d_key, NULL);
	fmd_module_lock(dp->d_rmod);

	while ((cp = fmd_list_next(&dp->d_rmod->mod_cases)) != NULL)
		fmd_case_discard(cp);

	fmd_module_unlock(dp->d_rmod);
	fmd_free(dp->d_rmod->mod_stats, sizeof (fmd_modstat_t));
	dp->d_rmod->mod_stats = NULL;

	(void) pthread_mutex_lock(&dp->d_rmod->mod_lock);
	dp->d_rmod->mod_flags |= FMD_MOD_FINI;
	(void) pthread_mutex_unlock(&dp->d_rmod->mod_lock);

	fmd_module_rele(dp->d_rmod);
	ASSERT(fmd_list_next(&dp->d_mod_list) == NULL);

	/*
	 * Now destroy the remaining global data structures.  If 'core' was
	 * set to true, force a core dump so we can check for memory leaks.
	 */
	if (dp->d_cases != NULL)
		fmd_case_hash_destroy(dp->d_cases);
	if (dp->d_disp != NULL)
		fmd_dispq_destroy(dp->d_disp);
	if (dp->d_timers != NULL)
		fmd_timerq_destroy(dp->d_timers);
	if (dp->d_schemes != NULL)
		fmd_scheme_hash_destroy(dp->d_schemes);
	if (dp->d_xprt_ids != NULL)
		fmd_idspace_destroy(dp->d_xprt_ids);

	if (dp->d_errstats != NULL) {
		fmd_free(dp->d_errstats,
		    sizeof (fmd_stat_t) * (EFMD_END - EFMD_UNKNOWN));
	}

	if (dp->d_conf != NULL)
		fmd_conf_close(dp->d_conf);

	if (dp->d_topo != NULL)
		topo_close(dp->d_topo);

	nvlist_free(dp->d_auth);
	(void) nv_alloc_fini(&dp->d_nva);
	dp->d_clockops->fto_fini(dp->d_clockptr);

	(void) pthread_key_delete(dp->d_key);
	bzero(dp, sizeof (fmd_t));

	if (core)
		fmd_panic("forcing core dump at user request\n");
}