Esempio n. 1
0
nvlist_t *
cmd_fault_add_location(fmd_hdl_t *hdl, nvlist_t *flt, const char *locstr) {

	char *t, *s;

	if (nvlist_lookup_string(flt, FM_FAULT_LOCATION, &t) == 0)
		return (flt); /* already has location value */

	/* Replace occurrence of ": " with "/" to avoid confusing ILOM. */
	t = fmd_hdl_zalloc(hdl, strlen(locstr) + 1, FMD_SLEEP);
	s = strstr(locstr, ": ");
	if (s != NULL) {
		(void) strncpy(t, locstr, s - locstr);
		(void) strcat(t, "/");
		(void) strcat(t, s + 2);
	} else {
		(void) strcpy(t, locstr);
	}

	/* Also, remove any J number from end of this string. */
	s = strstr(t, "/J");
	if (s != NULL)
		*s = '\0';

	if (nvlist_add_string(flt, FM_FAULT_LOCATION, t) != 0)
		fmd_hdl_error(hdl, "unable to alloc location for fault\n");
	fmd_hdl_free(hdl, t, strlen(locstr) + 1);
	return (flt);
}
Esempio n. 2
0
cmd_branch_t *
cmd_branch_create(fmd_hdl_t *hdl, nvlist_t *asru)
{
	cmd_branch_t *branch;
	const char *b_unum;

	if ((b_unum = cmd_fmri_get_unum(asru)) == NULL) {
		CMD_STAT_BUMP(bad_mem_asru);
		return (NULL);
	}

	fmd_hdl_debug(hdl, "branch_create: creating new branch %s\n", b_unum);
	CMD_STAT_BUMP(branch_creat);

	branch = fmd_hdl_zalloc(hdl, sizeof (cmd_branch_t), FMD_SLEEP);
	branch->branch_nodetype = CMD_NT_BRANCH;
	branch->branch_version = CMD_BRANCH_VERSION;

	cmd_bufname(branch->branch_bufname, sizeof (branch->branch_bufname),
	    "branch_%s", b_unum);
	cmd_fmri_init(hdl, &branch->branch_asru, asru, "branch_asru_%s",
	    b_unum);

	(void) nvlist_lookup_string(branch->branch_asru_nvl, FM_FMRI_MEM_UNUM,
	    (char **)&branch->branch_unum);

	cmd_list_append(&cmd.cmd_branches, branch);
	cmd_branch_dirty(hdl, branch);

	return (branch);
}
Esempio n. 3
0
/*
 * Read back the persistent representation of an active case.
 */
static zfs_case_t *
zfs_case_unserialize(fmd_hdl_t *hdl, fmd_case_t *cp)
{
	zfs_case_t *zcp;

	zcp = fmd_hdl_zalloc(hdl, sizeof (zfs_case_t), FMD_SLEEP);
	zcp->zc_case = cp;

	fmd_buf_read(hdl, cp, CASE_DATA, &zcp->zc_data,
	    sizeof (zcp->zc_data));

	if (zcp->zc_data.zc_version > CASE_DATA_VERSION_SERD) {
		fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));
		return (NULL);
	}

	/*
	 * fmd_buf_read() will have already zeroed out the remainder of the
	 * buffer, so we don't have to do anything special if the version
	 * doesn't include the SERD engine name.
	 */

	if (zcp->zc_data.zc_has_remove_timer)
		zcp->zc_remove_timer = fmd_timer_install(hdl, zcp,
		    NULL, zfs_remove_timeout);

	uu_list_node_init(zcp, &zcp->zc_node, zfs_case_pool);
	(void) uu_list_insert_before(zfs_cases, NULL, zcp);

	fmd_case_setspecific(hdl, cp, zcp);

	return (zcp);
}
Esempio n. 4
0
void
_fmd_init(fmd_hdl_t *hdl)
{
	sp_monitor_t *smp;
	int error;
	char *msg;

	if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0)
		return;

	smp = fmd_hdl_zalloc(hdl, sizeof (sp_monitor_t), FMD_SLEEP);
	fmd_hdl_setspecific(hdl, smp);

	if ((smp->sm_hdl = ipmi_open(&error, &msg, IPMI_TRANSPORT_BMC, NULL))
	    == NULL) {
		/*
		 * If /dev/ipmi0 doesn't exist on the system, then unload the
		 * module without doing anything.
		 */
		if (error != EIPMI_BMC_OPEN_FAILED)
			fmd_hdl_abort(hdl, "failed to initialize IPMI "
			    "connection: %s\n", msg);
		fmd_hdl_debug(hdl, "failed to load: no IPMI connection "
		    "present");
		fmd_hdl_free(hdl, smp, sizeof (sp_monitor_t));
		fmd_hdl_unregister(hdl);
		return;
	}

	/*
	 * Attempt an initial uptime() call.  If the IPMI command is
	 * unrecognized, then this is an unsupported platform and the module
	 * should be unloaded.  Any other error is treated is transient failure.
	 */
	if ((error = ipmi_sunoem_uptime(smp->sm_hdl, &smp->sm_seconds,
	    &smp->sm_generation)) != 0 &&
	    ipmi_errno(smp->sm_hdl) == EIPMI_INVALID_COMMAND) {
		fmd_hdl_debug(hdl, "failed to load: uptime command "
		    "not supported");
		ipmi_close(smp->sm_hdl);
		fmd_hdl_free(hdl, smp, sizeof (sp_monitor_t));
		fmd_hdl_unregister(hdl);
		return;
	}

	smp->sm_interval = fmd_prop_get_int64(hdl, "interval");

	if (error == 0)
		fmd_hdl_debug(hdl, "successfully loaded, uptime = %u seconds "
		    "(generation %u)", smp->sm_seconds, smp->sm_generation);
	else
		fmd_hdl_debug(hdl, "successfully loaded, but uptime call "
		    "failed: %s", ipmi_errmsg(smp->sm_hdl));

	/*
	 * Setup the recurring timer.
	 */
	(void) fmd_timer_install(hdl, NULL, NULL, 0);
}
Esempio n. 5
0
int
cpu_offline(fmd_hdl_t *hdl, nvlist_t *asru, const char *uuid, int cpustate)
{
	int i;
	uint_t cpuid;
	cma_cpu_t *cpu;

	if (nvlist_lookup_uint32(asru, FM_FMRI_CPU_ID, &cpuid) != 0) {
		fmd_hdl_debug(hdl, "missing '%s'\n", FM_FMRI_CPU_ID);
		cma_stats.bad_flts.fmds_value.ui64++;
		return (CMA_RA_FAILURE);
	}

	/*
	 * cpu offlining using ldom_fmri_retire() may be asynchronous, so we
	 * have to set the timer and check the cpu status later.
	 */
	for (i = 0; i < cma.cma_cpu_tries;
	    i++, (void) nanosleep(&cma.cma_cpu_delay, NULL)) {
		if (cpu_cmd(hdl, asru, cpustate) != -1) {
			cma_stats.cpu_flts.fmds_value.ui64++;
			break;
		}
	}

	if (i >= cma.cma_cpu_tries) {
		cma_stats.cpu_fails.fmds_value.ui64++;
	}

	/*
	 * check to see if the cpu has been offline.
	 */
	fmd_hdl_debug(hdl, "cpu is not offline yet - sleeping\n");

	/*
	 * Create a cpu node and add to the head of the cpu list
	 */
	cpu = fmd_hdl_zalloc(hdl, sizeof (cma_cpu_t), FMD_SLEEP);
	(void) nvlist_dup(asru, &cpu->cpu_fmri, 0);
	if (uuid != NULL)
		cpu->cpu_uuid = fmd_hdl_strdup(hdl, uuid, FMD_SLEEP);

	cpu->cpuid = cpuid;
	cpu->cpu_next = cma.cma_cpus;
	cma.cma_cpus = cpu;

	if (cma.cma_cpu_timerid != 0)
		fmd_timer_remove(hdl, cma.cma_cpu_timerid);

	cma.cma_cpu_curdelay = cma.cma_cpu_mindelay;

	cma.cma_cpu_timerid =
	    fmd_timer_install(hdl, NULL, NULL, cma.cma_cpu_curdelay);

	return (CMA_RA_FAILURE);
}
Esempio n. 6
0
void
cma_cpu_start_retry(fmd_hdl_t *hdl, nvlist_t *fmri, const char *uuid,
    boolean_t repair)
{
	cma_cpu_t *cpu;
	char *scheme;
	uint_t cpuid;
	nvlist_t *asru = NULL;
	topo_hdl_t *thp;
	int err;

	if (repair || nvlist_lookup_string(fmri, FM_FMRI_SCHEME, &scheme) != 0)
		return;
	if (strcmp(scheme, FM_FMRI_SCHEME_CPU) == 0) {
		if (nvlist_lookup_uint32(fmri, FM_FMRI_CPU_ID, &cpuid) != 0)
			return;
	} else if (strcmp(scheme, FM_FMRI_SCHEME_HC) != 0) {
		return;
	} else {
		/* lookup cpuid from ASRU */
		thp = fmd_fmri_topo_hold(TOPO_VERSION);
		if (thp != NULL) {
			(void) topo_fmri_asru(thp, fmri, &asru, &err);
			fmd_fmri_topo_rele(thp);
		}
		if (nvlist_lookup_uint32(asru, FM_FMRI_CPU_ID, &cpuid) != 0) {
			nvlist_free(asru);
			return;
		}
	}

	/*
	 * check to see if the cpu has been offline.
	 */
	fmd_hdl_debug(hdl, "cpu %u is not offline yet - sleeping\n", cpuid);

	/*
	 * Create a cpu node and add to the head of the cpu list
	 */
	cpu = fmd_hdl_zalloc(hdl, sizeof (cma_cpu_t), FMD_SLEEP);
	(void) nvlist_dup(fmri, &cpu->cpu_fmri, 0);
	if (uuid != NULL)
		cpu->cpu_uuid = fmd_hdl_strdup(hdl, uuid, FMD_SLEEP);

	cpu->cpuid = cpuid;
	cpu->cpu_next = cma.cma_cpus;
	cma.cma_cpus = cpu;

	if (cma.cma_cpu_timerid != 0)
		fmd_timer_remove(hdl, cma.cma_cpu_timerid);

	cma.cma_cpu_curdelay = cma.cma_cpu_mindelay;

	cma.cma_cpu_timerid =
	    fmd_timer_install(hdl, NULL, NULL, cma.cma_cpu_curdelay);
}
Esempio n. 7
0
static etm_xport_addr_t
etm_xport_dup_addr(fmd_hdl_t *hdl, etm_xport_addr_t addr)
{
    etm_xport_addr_t new_addr;	/* new transport address */

    new_addr = fmd_hdl_zalloc(hdl, sizeof (_etm_xport_addr_t), FMD_SLEEP);
    (void) memcpy(new_addr, addr, sizeof (_etm_xport_addr_t));
    return (new_addr);

} /* etm_xport_dup_addr() */
Esempio n. 8
0
static int
get_notify_prefs(fmd_hdl_t *hdl, nvlist_t *ev_nvl, nvlist_t ***pref_nvl,
    uint_t *nprefs)
{
	nvlist_t *top_nvl, **np_nvlarr, *mech_nvl;
	nvlist_t **tmparr;
	int ret, i;
	uint_t nelem, nslelem;

	if ((ret = smf_notify_get_params(&top_nvl, ev_nvl)) != SCF_SUCCESS) {
		ret = scf_error();
		if (ret != SCF_ERROR_NOT_FOUND) {
			fmd_hdl_debug(hdl, "Error looking up notification "
			    "preferences (%s)", scf_strerror(ret));
			return (ret);
		}
		return (ret);
	}

	if (nvlist_lookup_nvlist_array(top_nvl, SCF_NOTIFY_PARAMS, &np_nvlarr,
	    &nelem) != 0) {
		fmd_hdl_debug(hdl, "Malformed preference nvlist\n");
		ret = SCF_ERROR_INVALID_ARGUMENT;
		goto pref_done;
	}

	tmparr = fmd_hdl_alloc(hdl, nelem * sizeof (nvlist_t *), FMD_SLEEP);
	nslelem = 0;

	for (i = 0; i < nelem; i++) {
		if (nvlist_lookup_nvlist(np_nvlarr[i], "syslog", &mech_nvl)
		    == 0)
			tmparr[nslelem++] = fmd_nvl_dup(hdl, mech_nvl,
			    FMD_SLEEP);
	}

	if (nslelem != 0) {
		size_t sz = nslelem * sizeof (nvlist_t *);

		*pref_nvl = fmd_hdl_zalloc(hdl, sz, FMD_SLEEP);
		*nprefs = nslelem;
		bcopy(tmparr, *pref_nvl, sz);
		ret = 0;
	} else {
		*pref_nvl = NULL;
		*nprefs = 0;
		ret = SCF_ERROR_NOT_FOUND;
	}

	fmd_hdl_free(hdl, tmparr, nelem * sizeof (nvlist_t *));
pref_done:
	nvlist_free(top_nvl);
	return (ret);
}
Esempio n. 9
0
etm_xport_addr_t *
etm_xport_get_ev_addrv(fmd_hdl_t *hdl, nvlist_t *evp)
{
    _etm_xport_addr_t	*_addr;		/* address handle */
    _etm_xport_addr_t	**_addrv;	/* vector of addresses */

    if (evp == NULL) {

        /*
         * allocate address handles for default/policy destinations
         *
         * in reality we have just 1 dst transport addr
         */

        _addr = fmd_hdl_zalloc(hdl, sizeof (_etm_xport_addr_t),
                               FMD_SLEEP);
    } else {

        /*
         * allocate address handles per FMA event content
         *
         * in reality we have just 1 dst transport addr
         */

        _addr = fmd_hdl_zalloc(hdl, sizeof (_etm_xport_addr_t),
                               FMD_SLEEP);
    } /* whether caller passed in a FMA event */

    /* allocate vector with 1 non-NULL transport addr */

    _addrv = fmd_hdl_zalloc(hdl, 2 * sizeof (_etm_xport_addr_t *),
                            FMD_SLEEP);

    _addr->fn = etm_xport_get_fn(hdl, ETM_IO_OP_WR);
    _addr->magic_num = ETM_XPORT_DD_MAGIC_ADDR;
    _addrv[0] = _addr;
    _addrv[1] = NULL;

    return ((void *) _addrv);

} /* etm_xport_get_ev_addrv() */
Esempio n. 10
0
void
cmd_branch_add_dimm(fmd_hdl_t *hdl, cmd_branch_t *branch, cmd_dimm_t *dimm)
{
	cmd_branch_memb_t *bm;

	if (dimm == NULL)
		return;

	fmd_hdl_debug(hdl, "Attaching dimm %s to branch %s\n",
	    dimm->dimm_unum, branch->branch_unum);
	bm = fmd_hdl_zalloc(hdl, sizeof (cmd_branch_memb_t), FMD_SLEEP);
	bm->dimm = dimm;
	cmd_list_append(&branch->branch_dimms, bm);
}
Esempio n. 11
0
gmem_dimm_t *
gmem_dimm_create(fmd_hdl_t *hdl, nvlist_t *asru, nvlist_t *det)
{
	gmem_dimm_t *dimm;
	nvlist_t *fmri;
	char *serial;
	uint32_t chip_id;

	if (nvlist_lookup_string(asru, FM_FMRI_HC_SERIAL_ID, &serial) != 0) {
		fmd_hdl_debug(hdl, "Unable to get dimm serial\n");
		return (NULL);
	}

	if (nvlist_dup(asru, &fmri, 0) != 0) {
		fmd_hdl_debug(hdl, "dimm create nvlist dup failed");
		return (NULL);
	}

	(void) gmem_find_dimm_chip(det, &chip_id);

	fmd_hdl_debug(hdl, "dimm_create: creating new DIMM serial=%s\n",
	    serial);
	GMEM_STAT_BUMP(dimm_creat);

	dimm = fmd_hdl_zalloc(hdl, sizeof (gmem_dimm_t), FMD_SLEEP);
	dimm->dimm_nodetype = GMEM_NT_DIMM;
	dimm->dimm_version = GMEM_DIMM_VERSION;
	dimm->dimm_phys_addr_low = ULLONG_MAX;
	dimm->dimm_phys_addr_hi = 0;
	dimm->dimm_syl_error = USHRT_MAX;
	dimm->dimm_chipid = chip_id;

	gmem_bufname(dimm->dimm_bufname, sizeof (dimm->dimm_bufname), "dimm_%s",
	    serial);
	gmem_fmri_init(hdl, &dimm->dimm_asru, fmri, "dimm_asru_%s", serial);

	nvlist_free(fmri);

	(void) nvlist_lookup_string(dimm->dimm_asru_nvl, FM_FMRI_HC_SERIAL_ID,
	    (char **)&dimm->dimm_serial);

	gmem_mem_retirestat_create(hdl, &dimm->dimm_retstat, dimm->dimm_serial,
	    0, GMEM_DIMM_STAT_PREFIX);

	gmem_list_append(&gmem.gm_dimms, dimm);
	gmem_dimm_dirty(hdl, dimm);

	return (dimm);
}
Esempio n. 12
0
static cmd_branch_t *
branch_wrapv0(fmd_hdl_t *hdl, cmd_branch_pers_t *pers, size_t psz)
{
	cmd_branch_t *branch;

	if (psz != sizeof (cmd_branch_pers_t)) {
		fmd_hdl_abort(hdl, "size of state doesn't match size of "
		    "version 0 state (%u bytes).\n",
		    sizeof (cmd_branch_pers_t));
	}

	branch = fmd_hdl_zalloc(hdl, sizeof (cmd_branch_t), FMD_SLEEP);
	bcopy(pers, branch, sizeof (cmd_branch_pers_t));
	fmd_hdl_free(hdl, pers, psz);
	return (branch);
}
Esempio n. 13
0
etm_xport_conn_t *
etm_xport_get_addr_conn(fmd_hdl_t *hdl, etm_xport_conn_t *connv,
                        etm_xport_addr_t addr)
{
    _etm_xport_conn_t	**_connv; /* vector of connections */
    _etm_xport_conn_t	**_mcv;	/* matching connections vector */
    _etm_xport_addr_t	*_addr;	/* transport addr to match */
    int			n;	/* matching transport addr cnt */
    int			i;	/* vector index */

    if ((connv == NULL) || (addr == NULL)) {
        errno = EINVAL;
        etm_xport_stats.xport_get_addr_conn_badargs.fmds_value.ui64++;
        return (NULL);
    }

    _connv = (void*)connv;
    _addr = (void*)addr;

    /* count, allocate space for, and copy, all matching addrs */

    n = 0;
    for (i = 0; _connv[i] != NULL; i++) {
        if ((_connv[i]->addr == _addr) ||
                ((_connv[i]->addr != NULL) &&
                 (_connv[i]->addr->fn == _addr->fn))) {
            n++;
        }
    } /* for counting how many addresses match */

    _mcv = fmd_hdl_zalloc(hdl, (n + 1) * sizeof (_etm_xport_conn_t *),
                          FMD_SLEEP);
    n = 0;
    for (i = 0; _connv[i] != NULL; i++) {
        if ((_connv[i]->addr == _addr) ||
                ((_connv[i]->addr != NULL) &&
                 (_connv[i]->addr->fn == _addr->fn))) {
            _mcv[n] = _connv[i];
            n++;
        }
    } /* for copying matching address pointers */
    _mcv[n] = NULL;

    return ((void *) _mcv);

} /* etm_xport_get_addr_conn() */
Esempio n. 14
0
char *
mbd_label(fmd_hdl_t *hdl, cmd_branch_t *branch, const char *nacname)
{
	cmd_dimm_t *dimm;
	cmd_branch_memb_t *bm;
	char *p;
	size_t s;

	for (bm = cmd_list_next(&branch->branch_dimms); bm != NULL;
	    bm = cmd_list_next(bm)) {
		dimm = bm->dimm;
		if ((p = strstr(dimm->dimm_unum, nacname)) != NULL) {
			p = strchr(p, '/');	/* include instance number */
			s = p - dimm->dimm_unum;
			p = fmd_hdl_zalloc(hdl, s+1, FMD_SLEEP);
			(void) strncpy(p, dimm->dimm_unum, s);
			*(p + s) = '\0';
			return (p);
		}
	}
	return (NULL);
}
Esempio n. 15
0
/*
 * Allocate and initialize a transport instance handle.
 * Return hdl pointer for success, NULL for failure.
 */
static exs_hdl_t *
exs_hdl_alloc(fmd_hdl_t *hdl, char *endpoint_id,
    int (*cb_func)(fmd_hdl_t *hdl, etm_xport_conn_t conn, etm_cb_flag_t flag,
    void *arg), void *cb_func_arg, int dom)
{
	exs_hdl_t *hp;

	hp = fmd_hdl_zalloc(hdl, sizeof (exs_hdl_t), FMD_SLEEP);

	hp->h_endpt_id = fmd_hdl_strdup(hdl, endpoint_id, FMD_SLEEP);
	hp->h_dom = dom;
	hp->h_client.c_sd = EXS_SD_FREE;
	hp->h_server.c_sd = EXS_SD_FREE;
	hp->h_tid = EXS_TID_FREE;
	hp->h_destroy = 0;
	hp->h_hdl = hdl;
	hp->h_cb_func = cb_func;
	hp->h_cb_func_arg = cb_func_arg;
	hp->h_quit = 0;

	return (hp);
}
Esempio n. 16
0
/*ARGSUSED*/
int
cma_page_retire(fmd_hdl_t *hdl, nvlist_t *nvl, nvlist_t *asru,
                const char *uuid, boolean_t repair)
{
    cma_page_t *page;
    uint64_t pageaddr;
    const char *action = repair ? "unretire" : "retire";
    int rc;
    nvlist_t *rsrc = NULL, *asrucp = NULL, *hcsp;

    (void) nvlist_lookup_nvlist(nvl, FM_FAULT_RESOURCE, &rsrc);

    if (nvlist_dup(asru, &asrucp, 0) != 0) {
        fmd_hdl_debug(hdl, "page retire nvlist dup failed\n");
        return (CMA_RA_FAILURE);
    }

    /* It should already be expanded, but we'll do it again anyway */
    if (fmd_nvl_fmri_expand(hdl, asrucp) < 0) {
        fmd_hdl_debug(hdl, "failed to expand page asru\n");
        cma_stats.bad_flts.fmds_value.ui64++;
        nvlist_free(asrucp);
        return (CMA_RA_FAILURE);
    }

    if (!repair && !fmd_nvl_fmri_present(hdl, asrucp)) {
        fmd_hdl_debug(hdl, "page retire overtaken by events\n");
        cma_stats.page_nonent.fmds_value.ui64++;
        nvlist_free(asrucp);
        return (CMA_RA_SUCCESS);
    }

    /* Figure out physaddr from resource or asru */
    if (rsrc == NULL ||
            nvlist_lookup_nvlist(rsrc, FM_FMRI_HC_SPECIFIC, &hcsp) != 0 ||
            (nvlist_lookup_uint64(hcsp, "asru-" FM_FMRI_HC_SPECIFIC_PHYSADDR,
                                  &pageaddr) != 0 && nvlist_lookup_uint64(hcsp,
                                          FM_FMRI_HC_SPECIFIC_PHYSADDR, &pageaddr) != 0)) {
        if (nvlist_lookup_uint64(asrucp, FM_FMRI_MEM_PHYSADDR,
                                 &pageaddr) != 0) {
            fmd_hdl_debug(hdl, "mem fault missing 'physaddr'\n");
            cma_stats.bad_flts.fmds_value.ui64++;
            nvlist_free(asrucp);
            return (CMA_RA_FAILURE);
        }
    }

    if (repair) {
        if (!cma.cma_page_dounretire) {
            fmd_hdl_debug(hdl, "suppressed unretire of page %llx\n",
                          (u_longlong_t)pageaddr);
            cma_stats.page_supp.fmds_value.ui64++;
            nvlist_free(asrucp);
            return (CMA_RA_SUCCESS);
        }
        /* If unretire via topo fails, we fall back to legacy way */
        if (rsrc == NULL || (rc = fmd_nvl_fmri_unretire(hdl, rsrc)) < 0)
            rc = cma_fmri_page_unretire(hdl, asrucp);
    } else {
        if (!cma.cma_page_doretire) {
            fmd_hdl_debug(hdl, "suppressed retire of page %llx\n",
                          (u_longlong_t)pageaddr);
            cma_stats.page_supp.fmds_value.ui64++;
            nvlist_free(asrucp);
            return (CMA_RA_FAILURE);
        }
        /* If retire via topo fails, we fall back to legacy way */
        if (rsrc == NULL || (rc = fmd_nvl_fmri_retire(hdl, rsrc)) < 0)
            rc = cma_fmri_page_retire(hdl, asrucp);
    }

    if (rc == FMD_AGENT_RETIRE_DONE) {
        fmd_hdl_debug(hdl, "%sd page 0x%llx\n",
                      action, (u_longlong_t)pageaddr);
        if (repair)
            cma_stats.page_repairs.fmds_value.ui64++;
        else
            cma_stats.page_flts.fmds_value.ui64++;
        nvlist_free(asrucp);
        return (CMA_RA_SUCCESS);
    } else if (repair || rc != FMD_AGENT_RETIRE_ASYNC) {
        fmd_hdl_debug(hdl, "%s of page 0x%llx failed, will not "
                      "retry: %s\n", action, (u_longlong_t)pageaddr,
                      strerror(errno));

        cma_stats.page_fails.fmds_value.ui64++;
        nvlist_free(asrucp);
        return (CMA_RA_FAILURE);
    }

    /*
     * The page didn't immediately retire.  We'll need to periodically
     * check to see if it has been retired.
     */
    fmd_hdl_debug(hdl, "page didn't retire - sleeping\n");

    page = fmd_hdl_zalloc(hdl, sizeof (cma_page_t), FMD_SLEEP);
    page->pg_addr = pageaddr;
    if (rsrc != NULL)
        (void) nvlist_dup(rsrc, &page->pg_rsrc, 0);
    page->pg_asru = asrucp;
    if (uuid != NULL)
        page->pg_uuid = fmd_hdl_strdup(hdl, uuid, FMD_SLEEP);

    page->pg_next = cma.cma_pages;
    cma.cma_pages = page;

    if (cma.cma_page_timerid != 0)
        fmd_timer_remove(hdl, cma.cma_page_timerid);

    cma.cma_page_curdelay = cma.cma_page_mindelay;

    cma.cma_page_timerid =
        fmd_timer_install(hdl, NULL, NULL, cma.cma_page_curdelay);

    /* Don't free asrucp here.  This FMRI will be needed for retry. */
    return (CMA_RA_FAILURE);
}
Esempio n. 17
0
etm_xport_conn_t
etm_xport_open(fmd_hdl_t *hdl, etm_xport_addr_t addr)
{
    _etm_xport_addr_t	*_addr;		/* address handle */
    _etm_xport_conn_t	*_conn;		/* connection handle */
    ssize_t			n;		/* gen use */

    if ((n = etm_xport_valid_addr(addr)) < 0) {
        errno = (-n);
        return (NULL);
    }

    _addr = etm_xport_dup_addr(hdl, addr);

    /* allocate a connection handle and start populating it */

    _conn = fmd_hdl_zalloc(hdl, sizeof (_etm_xport_conn_t), FMD_SLEEP);

    (void) pthread_mutex_lock(&etm_xport_vldc_lock);

    if (use_vldc == 0 || etm_xport_vldc_conn == NULL) {
        if ((_conn->fd = open(_addr->fn,
                              ETM_XPORT_OPEN_FLAGS, 0)) == -1) {
            /* errno assumed set by above call */
            etm_xport_free_addr(hdl, _addr);
            fmd_hdl_free(hdl, _conn, sizeof (_etm_xport_conn_t));
            etm_xport_stats.xport_os_open_fail.fmds_value.ui64++;
            (void) pthread_mutex_unlock(&etm_xport_vldc_lock);
            return (NULL);
        }
    }

    if (use_vldc && etm_xport_vldc_conn == NULL) {
        vldc_opt_op_t op;

        /* Set the channel to reliable mode */
        op.op_sel = VLDC_OP_SET;
        op.opt_sel = VLDC_OPT_MODE;
        op.opt_val = LDC_MODE_RELIABLE;

        if (ioctl(_conn->fd, VLDC_IOCTL_OPT_OP, &op) != 0) {
            /* errno assumed set by above call */
            (void) close(_conn->fd);
            etm_xport_free_addr(hdl, _addr);
            fmd_hdl_free(hdl, _conn, sizeof (_etm_xport_conn_t));
            etm_xport_stats.xport_os_ioctl_fail.fmds_value.ui64++;
            (void) pthread_mutex_unlock(&etm_xport_vldc_lock);
            return (NULL);
        }

        etm_xport_vldc_conn = _conn;
    } else if (use_vldc && etm_xport_vldc_conn != NULL) {
        _conn->fd = dup(etm_xport_vldc_conn->fd);
    }

    (void) pthread_mutex_unlock(&etm_xport_vldc_lock);

    /* return the fully formed connection handle */

    _conn->magic_num = ETM_XPORT_DD_MAGIC_CONN;
    _conn->addr = _addr;

    return (_conn);

} /* etm_xport_open() */
Esempio n. 18
0
static ssize_t
etm_xport_buffered_read(fmd_hdl_t *hdl, _etm_xport_conn_t *_conn,
                        void *buf, size_t byte_cnt)
{
    ssize_t		i, n;		/* gen use */

    /* perform one-time initializations */

    /*
     * Design_Note:
     *
     * These initializations are not done in etm_xport_init() because
     * the connection/device is not yet open and hence the MTU size
     * is not yet known. However, the corresponding cleanup is done
     * in etm_xport_fini(). The buffering for byte stream semantics
     * should be done on a per device vs per connection basis; the
     * MTU size is assumed to remain constant across all connections.
     */

    if (etm_xport_irb_mtu_sz == 0) {
        if ((n = etm_xport_get_opt(hdl, _conn,
                                   ETM_XPORT_OPT_MTU_SZ)) < 0) {
            etm_xport_irb_mtu_sz = ETM_XPORT_MTU_SZ_DEF;
        } else {
            etm_xport_irb_mtu_sz = n;
        }
    }
    if (etm_xport_irb_area == NULL) {
        etm_xport_irb_area = fmd_hdl_zalloc(hdl,
                                            2 * etm_xport_irb_mtu_sz, FMD_SLEEP);
        etm_xport_irb_head = etm_xport_irb_area;
        etm_xport_irb_tail = etm_xport_irb_head;
    }

    /* sanity check the byte count after have MTU */

    if (byte_cnt > etm_xport_irb_mtu_sz) {
        etm_xport_stats.xport_buffread_badargs.fmds_value.ui64++;
        return (-EINVAL);
    }

    /* if intermediate buffer can satisfy request do so w/out xport read */

    if (byte_cnt <= (etm_xport_irb_tail - etm_xport_irb_head)) {
        (void) memcpy(buf, etm_xport_irb_head, byte_cnt);
        etm_xport_irb_head += byte_cnt;
        if (etm_xport_debug_lvl >= 2) {
            fmd_hdl_debug(hdl, "info: quik buffered read == %d\n",
                          byte_cnt);
        }
        return (byte_cnt);
    }

    /* slide buffer contents to front to make room for [MTU] more bytes */

    n = etm_xport_irb_tail - etm_xport_irb_head;
    (void) memmove(etm_xport_irb_area, etm_xport_irb_head, n);
    etm_xport_irb_head = etm_xport_irb_area;
    etm_xport_irb_tail = etm_xport_irb_head + n;

    /*
     * peek to see how much data is avail and read all of it;
     * there is no race condition between peeking and reading
     * due to unbuffered design of the device driver
     */
    if (use_vldc) {
        pollfd_t pollfd;

        pollfd.events = POLLIN;
        pollfd.revents = 0;
        pollfd.fd = _conn->fd;

        if ((n = poll(&pollfd, 1, -1)) < 1) {
            if (n == 0)
                return (-EIO);
            else
                return (-errno);
        }

        /*
         * set i to the maximum size --- read(..., i) below will
         * pull in n bytes (n <= i) anyway
         */
        i = etm_xport_irb_mtu_sz;
    } else {
        if ((i = etm_xport_raw_peek(hdl, _conn, etm_xport_irb_tail,
                                    etm_xport_irb_mtu_sz)) < 0) {
            return (i);
        }
    }
    if ((n = read(_conn->fd, etm_xport_irb_tail, i)) < 0) {
        /* errno assumed set by above call */
        etm_xport_stats.xport_os_read_fail.fmds_value.ui64++;
        return (-errno);
    }
    etm_xport_irb_tail += n;

    /* satisfy request as best we can with what we now have */

    n = MIN(byte_cnt, (etm_xport_irb_tail - etm_xport_irb_head));
    (void) memcpy(buf, etm_xport_irb_head, n);
    etm_xport_irb_head += n;
    if (etm_xport_debug_lvl >= 2) {
        fmd_hdl_debug(hdl, "info: slow buffered read == %d\n", n);
    }
    return (n);

} /* etm_xport_buffered_read() */
Esempio n. 19
0
nvlist_t *
cmd_mkboard_fru(fmd_hdl_t *hdl, char *frustr, char *serialstr, char *partstr) {

	char *nac, *nac_name;
	int n, i, len;
	nvlist_t *fru, **hc_list;

	if (frustr == NULL)
		return (NULL);

	if ((nac_name = strstr(frustr, "MB")) == NULL)
		return (NULL);

	len = strlen(nac_name) + 1;

	nac = fmd_hdl_zalloc(hdl, len, FMD_SLEEP);
	(void) strcpy(nac, nac_name);

	n = cmd_count_components(nac, '/');

	fmd_hdl_debug(hdl, "cmd_mkboard_fru: nac=%s components=%d\n", nac, n);

	hc_list = fmd_hdl_zalloc(hdl, sizeof (nvlist_t *)*n, FMD_SLEEP);

	for (i = 0; i < n; i++) {
		(void) nvlist_alloc(&hc_list[i],
		    NV_UNIQUE_NAME|NV_UNIQUE_NAME_TYPE, 0);
	}

	if (cmd_breakup_components(nac, "/", hc_list) < 0) {
		for (i = 0; i < n; i++) {
			if (hc_list[i] != NULL)
			    nvlist_free(hc_list[i]);
		}
		fmd_hdl_free(hdl, hc_list, sizeof (nvlist_t *)*n);
		fmd_hdl_free(hdl, nac, len);
		return (NULL);
	}

	if (nvlist_alloc(&fru, NV_UNIQUE_NAME, 0) != 0) {
		for (i = 0; i < n; i++) {
			if (hc_list[i] != NULL)
			    nvlist_free(hc_list[i]);
		}
		fmd_hdl_free(hdl, hc_list, sizeof (nvlist_t *)*n);
		fmd_hdl_free(hdl, nac, len);
		return (NULL);
	}

	if (nvlist_add_uint8(fru, FM_VERSION, FM_HC_SCHEME_VERSION) != 0 ||
	    nvlist_add_string(fru, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0 ||
	    nvlist_add_string(fru, FM_FMRI_HC_ROOT, "") != 0 ||
	    nvlist_add_uint32(fru, FM_FMRI_HC_LIST_SZ, n) != 0 ||
	    nvlist_add_nvlist_array(fru, FM_FMRI_HC_LIST, hc_list, n) != 0) {
		for (i = 0; i < n; i++) {
			if (hc_list[i] != NULL)
			    nvlist_free(hc_list[i]);
		}
		fmd_hdl_free(hdl, hc_list, sizeof (nvlist_t *)*n);
		fmd_hdl_free(hdl, nac, len);
		nvlist_free(fru);
		return (NULL);
	}

	for (i = 0; i < n; i++) {
		if (hc_list[i] != NULL)
		    nvlist_free(hc_list[i]);
	}
	fmd_hdl_free(hdl, hc_list, sizeof (nvlist_t *)*n);
	fmd_hdl_free(hdl, nac, len);

	if ((serialstr != NULL &&
	    nvlist_add_string(fru, FM_FMRI_HC_SERIAL_ID, serialstr) != 0) ||
	    (partstr != NULL &&
	    nvlist_add_string(fru, FM_FMRI_HC_PART, partstr) != 0)) {
		nvlist_free(fru);
		return (NULL);
	}

	return (fru);
}
Esempio n. 20
0
cmd_dimm_t *
cmd_dimm_create(fmd_hdl_t *hdl, nvlist_t *asru)
{
	cmd_dimm_t *dimm;
	const char *unum;
	nvlist_t *fmri;
	size_t nserids = 0;
	char **serids = NULL;

	if (!fmd_nvl_fmri_present(hdl, asru)) {
		fmd_hdl_debug(hdl, "dimm_lookup: discarding old ereport\n");
		return (NULL);
	}

	if ((unum = cmd_fmri_get_unum(asru)) == NULL) {
		CMD_STAT_BUMP(bad_mem_asru);
		return (NULL);
	}

#ifdef sun4v
	if (nvlist_lookup_string_array(asru, FM_FMRI_HC_SERIAL_ID, &serids,
	    &nserids) != 0) {
		fmd_hdl_debug(hdl, "sun4v mem: FMRI does not"
		    " have serial_ids\n");
		CMD_STAT_BUMP(bad_mem_asru);
		return (NULL);
	}
#endif
	fmri = cmd_mem_fmri_create(unum, serids, nserids);
	if (fmd_nvl_fmri_expand(hdl, fmri) < 0) {
		CMD_STAT_BUMP(bad_mem_asru);
		nvlist_free(fmri);
		return (NULL);
	}

	fmd_hdl_debug(hdl, "dimm_create: creating new DIMM %s\n", unum);
	CMD_STAT_BUMP(dimm_creat);

	dimm = fmd_hdl_zalloc(hdl, sizeof (cmd_dimm_t), FMD_SLEEP);
	dimm->dimm_nodetype = CMD_NT_DIMM;
	dimm->dimm_version = CMD_DIMM_VERSION;

	cmd_bufname(dimm->dimm_bufname, sizeof (dimm->dimm_bufname), "dimm_%s",
	    unum);
	cmd_fmri_init(hdl, &dimm->dimm_asru, fmri, "dimm_asru_%s", unum);

	nvlist_free(fmri);

	(void) nvlist_lookup_string(dimm->dimm_asru_nvl, FM_FMRI_MEM_UNUM,
	    (char **)&dimm->dimm_unum);

	dimm_attach_to_bank(hdl, dimm);

	cmd_mem_retirestat_create(hdl, &dimm->dimm_retstat, dimm->dimm_unum, 0,
	    CMD_DIMM_STAT_PREFIX);

	cmd_list_append(&cmd.cmd_dimms, dimm);
	cmd_dimm_dirty(hdl, dimm);

	return (dimm);
}