static uint32_t get_cap_conf(fmd_hdl_t *hdl) { uint32_t fma_cap; #if defined(__x86) int found = 0; cpu_tbl_t *cl, ci; if (fma_cap_cpu_info(&ci) == 0) { fmd_hdl_debug(hdl, "Got CPU info: vendor=%s, family=%d, " "model=%d\n", ci.vendor, ci.family, ci.model); for (cl = fma_cap_list; cl->propname != NULL; cl++) { if (strncmp(ci.vendor, cl->vendor, X86_VENDOR_STRLEN) == 0 && ci.family == cl->family && ci.model == cl->model) { found++; break; } } } else { fmd_hdl_debug(hdl, "Failed to get CPU info"); } if (found) { fma_cap = fmd_prop_get_int32(hdl, cl->propname); fmd_hdl_debug(hdl, "Found property, FMA capability=0x%x", fma_cap); } else { #endif fma_cap = fmd_prop_get_int32(hdl, "default_fma_cap"); fmd_hdl_debug(hdl, "Didn't find FMA capability property, " "use default=0x%x", fma_cap); #if defined(__x86) } #endif return (fma_cap); }
static ssize_t etm_xport_raw_peek(fmd_hdl_t *hdl, _etm_xport_conn_t *_conn, void *buf, size_t byte_cnt) { ssize_t rv; /* ret val */ ssize_t n; /* gen use */ etm_xport_msg_peek_t peek_ctl; /* struct for peeking */ rv = 0; /* sanity check args */ if ((hdl == NULL) || (_conn == NULL) || (buf == NULL)) { etm_xport_stats.xport_rawpeek_badargs.fmds_value.ui64++; return (-EINVAL); } if ((etm_xport_irb_mtu_sz > 0) && (byte_cnt > etm_xport_irb_mtu_sz)) { etm_xport_stats.xport_rawpeek_badargs.fmds_value.ui64++; return (-EINVAL); } /* try to peek requested amt of data */ peek_ctl.pk_buf = buf; peek_ctl.pk_buflen = byte_cnt; peek_ctl.pk_flags = 0; peek_ctl.pk_rsvd = 0; if (etm_xport_should_fake_dd) { n = etm_fake_ioctl(_conn->fd, ETM_XPORT_IOCTL_DATA_PEEK, &peek_ctl); } else { n = ioctl(_conn->fd, ETM_XPORT_IOCTL_DATA_PEEK, &peek_ctl); } if (n < 0) { /* errno assumed set by above call */ etm_xport_stats.xport_os_peek_fail.fmds_value.ui64++; rv = (-errno); } else { rv = peek_ctl.pk_buflen; } if (etm_xport_debug_lvl >= 3) { fmd_hdl_debug(hdl, "info: [fake] ioctl(_PEEK) ~= %d bytes\n", rv); } return (rv); } /* etm_xport_raw_peek() */
/*ARGSUSED*/ cmd_evdisp_t cmd_mem_synd_check(fmd_hdl_t *hdl, uint64_t afar, uint8_t afar_status, uint16_t synd, uint8_t synd_status, cmd_cpu_t *cpu) { if (synd == CH_POISON_SYND_FROM_XXU_WRITE || ((cpu->cpu_type == CPU_ULTRASPARC_IIIi || cpu->cpu_type == CPU_ULTRASPARC_IIIiplus) && synd == CH_POISON_SYND_FROM_XXU_WRMERGE)) { fmd_hdl_debug(hdl, "discarding UE due to magic syndrome %x\n", synd); return (CMD_EVD_UNUSED); } return (CMD_EVD_OK); }
void cmd_branch_add_dimm(fmd_hdl_t *hdl, cmd_branch_t *branch, cmd_dimm_t *dimm) { cmd_branch_memb_t *bm; if (dimm == NULL) return; fmd_hdl_debug(hdl, "Attaching dimm %s to branch %s\n", dimm->dimm_unum, branch->branch_unum); bm = fmd_hdl_zalloc(hdl, sizeof (cmd_branch_memb_t), FMD_SLEEP); bm->dimm = dimm; cmd_list_append(&branch->branch_dimms, bm); }
/* * Open a connection with the given endpoint, * Return etm_xport_conn_t for success, NULL and set errno for failure. */ etm_xport_conn_t etm_xport_open(fmd_hdl_t *hdl, etm_xport_hdl_t tlhdl) { int flags; exs_hdl_t *hp = (exs_hdl_t *)tlhdl; if (hp->h_destroy) { fmd_thr_destroy(hp->h_hdl, hp->h_tid); hp->h_tid = EXS_TID_FREE; hp->h_destroy = 0; } if (hp->h_client.c_sd == EXS_SD_FREE) { if (exs_prep_client(hp) != 0) return (NULL); } /* Set the socket to be non-blocking */ flags = fcntl(hp->h_client.c_sd, F_GETFL, 0); (void) fcntl(hp->h_client.c_sd, F_SETFL, flags | O_NONBLOCK); if ((connect(hp->h_client.c_sd, (struct sockaddr *)&hp->h_client.c_saddr, hp->h_client.c_len)) == -1) { if (errno != EINPROGRESS) { fmd_hdl_debug(hdl, "xport - failed to connect to %s", hp->h_endpt_id); EXS_CLOSE_CLR(hp->h_client); return (NULL); } } fmd_hdl_debug(hdl, "xport - connected client socket for %s", hp->h_endpt_id); return (&hp->h_client); }
int etm_xport_fini(fmd_hdl_t *hdl) { fmd_hdl_debug(hdl, "info: xport finalizing\n"); if (use_vldc && (etm_xport_vldc_conn != NULL)) { (void) etm_xport_close(hdl, etm_xport_vldc_conn); etm_xport_vldc_conn = NULL; } /* free any long standing properties from FMD */ fmd_prop_free_string(hdl, etm_xport_addrs); /* cleanup the intermediate read buffer */ if (etm_xport_irb_tail != etm_xport_irb_head) { fmd_hdl_debug(hdl, "warning: xport %d bytes stale data\n", (int)(etm_xport_irb_tail - etm_xport_irb_head)); } fmd_hdl_free(hdl, etm_xport_irb_area, 2 * etm_xport_irb_mtu_sz); etm_xport_irb_area = NULL; etm_xport_irb_head = NULL; etm_xport_irb_tail = NULL; etm_xport_irb_mtu_sz = 0; /* cleanup statistics from FMD */ (void) fmd_stat_destroy(hdl, sizeof (etm_xport_stats) / sizeof (fmd_stat_t), (fmd_stat_t *)&etm_xport_stats); fmd_hdl_debug(hdl, "info: xport finalized ok\n"); return (0); } /* etm_xport_fini() */
/* * Initialize and setup any transport infrastructure before any connections * are opened. * Return etm_xport_hdl_t for success, NULL for failure. */ etm_xport_hdl_t etm_xport_init(fmd_hdl_t *hdl, char *endpoint_id, int (*cb_func)(fmd_hdl_t *hdl, etm_xport_conn_t conn, etm_cb_flag_t flag, void *arg), void *cb_func_arg) { exs_hdl_t *hp, *curr; int dom; if (exs_get_id(hdl, endpoint_id, &dom)) return (NULL); (void) pthread_mutex_lock(&List_lock); /* Check for a duplicate endpoint_id on the list */ for (curr = Exh_head; curr; curr = curr->h_next) { if (dom == curr->h_dom) { fmd_hdl_debug(hdl, "xport - init failed, " "duplicate domain id : %d\n", dom); (void) pthread_mutex_unlock(&List_lock); return (NULL); } } if (Exh_head == NULL) { /* Do one-time initializations */ exs_filter_init(hdl); /* Initialize the accept/listen vars */ Acc.c_sd = EXS_SD_FREE; Acc_tid = EXS_TID_FREE; Acc_destroy = 0; Acc_quit = 0; } hp = exs_hdl_alloc(hdl, endpoint_id, cb_func, cb_func_arg, dom); /* Add this transport instance handle to the list */ hp->h_next = Exh_head; Exh_head = hp; (void) pthread_mutex_unlock(&List_lock); exs_prep_accept(hdl, dom); return ((etm_xport_hdl_t)hp); }
/* * If the case has been solved, don't need to check the dimmlist * If the case has not been solved, the branch is valid if there is least one * existing dimm in the branch */ void cmd_branch_validate(fmd_hdl_t *hdl) { cmd_branch_t *branch, *next; fmd_hdl_debug(hdl, "cmd_branch_validate\n"); for (branch = cmd_list_next(&cmd.cmd_branches); branch != NULL; branch = next) { next = cmd_list_next(branch); if (branch->branch_case.cc_cp != NULL && fmd_case_solved(hdl, branch->branch_case.cc_cp)) continue; if (branch_exist(hdl, branch)) continue; cmd_branch_destroy(hdl, branch); } }
gmem_dimm_t * gmem_dimm_lookup(fmd_hdl_t *hdl, nvlist_t *asru) { gmem_dimm_t *dimm; char *serial; int err; err = nvlist_lookup_string(asru, FM_FMRI_HC_SERIAL_ID, &serial); if (err != 0) { fmd_hdl_debug(hdl, "Can't get dimm serial number\n"); GMEM_STAT_BUMP(bad_mem_resource); return (NULL); } dimm = dimm_lookup_by_serial(serial); return (dimm); }
/* * Solve a given ZFS case. This first checks to make sure the diagnosis is * still valid, as well as cleaning up any pending timer associated with the * case. */ static void zfs_case_solve(fmd_hdl_t *hdl, zfs_case_t *zcp, const char *faultname, boolean_t checkunusable) { nvlist_t *detector, *fault; boolean_t serialize; nvlist_t *fru = NULL; fmd_hdl_debug(hdl, "solving fault '%s'", faultname); /* * Construct the detector from the case data. The detector is in the * ZFS scheme, and is either the pool or the vdev, depending on whether * this is a vdev or pool fault. */ detector = fmd_nvl_alloc(hdl, FMD_SLEEP); (void) nvlist_add_uint8(detector, FM_VERSION, ZFS_SCHEME_VERSION0); (void) nvlist_add_string(detector, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS); (void) nvlist_add_uint64(detector, FM_FMRI_ZFS_POOL, zcp->zc_data.zc_pool_guid); if (zcp->zc_data.zc_vdev_guid != 0) { (void) nvlist_add_uint64(detector, FM_FMRI_ZFS_VDEV, zcp->zc_data.zc_vdev_guid); } fault = fmd_nvl_create_fault(hdl, faultname, 100, detector, fru, detector); fmd_case_add_suspect(hdl, zcp->zc_case, fault); nvlist_free(fru); fmd_case_solve(hdl, zcp->zc_case); serialize = B_FALSE; if (zcp->zc_data.zc_has_remove_timer) { fmd_timer_remove(hdl, zcp->zc_remove_timer); zcp->zc_data.zc_has_remove_timer = 0; serialize = B_TRUE; } if (serialize) zfs_case_serialize(hdl, zcp); nvlist_free(detector); }
cmd_branch_t * cmd_branch_lookup(fmd_hdl_t *hdl, nvlist_t *asru) { cmd_branch_t *branch; const char *unum; if ((unum = cmd_fmri_get_unum(asru)) == NULL) { CMD_STAT_BUMP(bad_mem_asru); return (NULL); } for (branch = cmd_list_next(&cmd.cmd_branches); branch != NULL; branch = cmd_list_next(branch)) { if (strcmp(branch->branch_unum, unum) == 0) return (branch); } fmd_hdl_debug(hdl, "cmd_branch_lookup: discarding old \n"); return (NULL); }
void cmd_branch_remove_dimm(fmd_hdl_t *hdl, cmd_branch_t *branch, cmd_dimm_t *dimm) { cmd_branch_memb_t *bm; fmd_hdl_debug(hdl, "Detaching dimm %s from branch %s\n", dimm->dimm_unum, branch->branch_unum); for (bm = cmd_list_next(&branch->branch_dimms); bm != NULL; bm = cmd_list_next(bm)) { if (bm->dimm == dimm) { cmd_list_delete(&branch->branch_dimms, bm); fmd_hdl_free(hdl, bm, sizeof (cmd_branch_memb_t)); return; } } fmd_hdl_abort(hdl, "Attempt to disconnect dimm from non-parent branch\n"); }
int cma_cpu_statechange(fmd_hdl_t *hdl, nvlist_t *asru, const char *uuid, int cpustate, boolean_t repair) { int i; uint_t cpuid; if (nvlist_lookup_uint32(asru, FM_FMRI_CPU_ID, &cpuid) != 0) { fmd_hdl_debug(hdl, "missing '%s'\n", FM_FMRI_CPU_ID); cma_stats.bad_flts.fmds_value.ui64++; return (CMA_RA_FAILURE); } /* * cpu offlining using ldom_fmri_retire() may be asynchronous, so we * have to set the timer and check the cpu status later. */ for (i = 0; i < cma.cma_cpu_tries; i++, (void) nanosleep(&cma.cma_cpu_delay, NULL)) { if (cpu_cmd(hdl, asru, cpustate) != -1) { if (repair) cma_stats.cpu_repairs.fmds_value.ui64++; else cma_stats.cpu_flts.fmds_value.ui64++; break; } } if (i >= cma.cma_cpu_tries) { cma_stats.cpu_fails.fmds_value.ui64++; } cma_cpu_start_retry(hdl, asru, uuid, repair); return (CMA_RA_FAILURE); }
/* * Prepare the client connection. * Return 0 for success, nonzero for failure. */ static int exs_prep_client(exs_hdl_t *hp) { int rv, optval = 1; struct linger ling; /* Find the DSCP address for the remote endpoint */ if ((rv = dscpAddr(hp->h_dom, DSCP_ADDR_REMOTE, (struct sockaddr *)&hp->h_client.c_saddr, &hp->h_client.c_len)) != DSCP_OK) { fmd_hdl_debug(hp->h_hdl, "dscpAddr on the client socket " "failed for %s : rv = %d\n", hp->h_endpt_id, rv); return (1); } if ((hp->h_client.c_sd = socket(AF_INET, SOCK_STREAM, 0)) == -1) { fmd_hdl_error(hp->h_hdl, "Failed to create the client socket " "for %s", hp->h_endpt_id); return (2); } if (setsockopt(hp->h_client.c_sd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof (optval))) { fmd_hdl_error(hp->h_hdl, "Failed to set REUSEADDR on the " "client socket for %s", hp->h_endpt_id); EXS_CLOSE_CLR(hp->h_client); return (3); } /* * Set SO_LINGER so TCP aborts the connection when closed. * If the domain's client socket goes into the TIME_WAIT state, * ETM will be unable to connect to the SP until this clears. * This connection is over DSCP, which is a simple point-to-point * connection and therefore has no routers or multiple forwarding. * The risk of receiving old packets from a previously terminated * connection is very small. */ ling.l_onoff = 1; ling.l_linger = 0; if (setsockopt(hp->h_client.c_sd, SOL_SOCKET, SO_LINGER, &ling, sizeof (ling))) { fmd_hdl_error(hp->h_hdl, "Failed to set SO_LINGER on the " "client socket for %s", hp->h_endpt_id); EXS_CLOSE_CLR(hp->h_client); return (4); } /* Bind the socket to the local IP address of the DSCP link */ if ((rv = dscpBind(hp->h_dom, hp->h_client.c_sd, EXS_CLIENT_PORT)) != DSCP_OK) { if (rv == DSCP_ERROR_DOWN) { fmd_hdl_debug(hp->h_hdl, "xport - dscp link for %s " "is down", hp->h_endpt_id); } else { fmd_hdl_error(hp->h_hdl, "dscpBind on the client " "socket failed : rv = %d\n", rv); } EXS_CLOSE_CLR(hp->h_client); return (5); } hp->h_client.c_saddr.sin_port = htons(EXS_SERVER_PORT); /* Set IPsec security policy for this socket */ if ((rv = dscpSecure(hp->h_dom, hp->h_client.c_sd)) != DSCP_OK) { fmd_hdl_error(hp->h_hdl, "dscpSecure on the client socket " "failed for %s : rv = %d\n", hp->h_endpt_id, rv); EXS_CLOSE_CLR(hp->h_client); return (6); } return (0); }
/* * Prepare to accept a connection. * Return 0 for success, nonzero for failure. */ void exs_prep_accept(fmd_hdl_t *hdl, int dom) { int flags, optval = 1; int rv; if (Acc.c_sd != EXS_SD_FREE) return; /* nothing to do */ if (Acc_destroy) { fmd_thr_destroy(hdl, Acc_tid); Acc_tid = EXS_TID_FREE; } /* Check to see if the DSCP interface is configured */ if ((rv = dscpAddr(dom, DSCP_ADDR_LOCAL, (struct sockaddr *)&Acc.c_saddr, &Acc.c_len)) != DSCP_OK) { fmd_hdl_debug(hdl, "xport - dscpAddr on the accept socket " "failed for domain %d : rv = %d", dom, rv); return; } if ((Acc.c_sd = socket(AF_INET, SOCK_STREAM, 0)) == -1) { fmd_hdl_error(hdl, "Failed to create the accept socket"); return; } if (setsockopt(Acc.c_sd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof (optval))) { fmd_hdl_error(hdl, "Failed to set REUSEADDR for the accept " "socket"); EXS_CLOSE_CLR(Acc); return; } /* Bind the socket to the local IP address of the DSCP link */ if ((rv = dscpBind(dom, Acc.c_sd, EXS_SERVER_PORT)) != DSCP_OK) { if (rv == DSCP_ERROR_DOWN) { fmd_hdl_debug(hdl, "xport - dscp link for domain %d " "is down", dom); } else { fmd_hdl_error(hdl, "dscpBind on the accept socket " "failed : rv = %d\n", rv); } EXS_CLOSE_CLR(Acc); return; } /* Activate IPsec security policy for this socket */ if ((rv = dscpSecure(dom, Acc.c_sd)) != DSCP_OK) { fmd_hdl_error(hdl, "dscpSecure on the accept socket failed : " "rv = %d\n", dom, rv); EXS_CLOSE_CLR(Acc); return; } if ((listen(Acc.c_sd, EXS_NUM_SOCKS)) == -1) { fmd_hdl_debug(hdl, "Failed to listen() for connections"); EXS_CLOSE_CLR(Acc); return; } flags = fcntl(Acc.c_sd, F_GETFL, 0); (void) fcntl(Acc.c_sd, F_SETFL, flags | O_NONBLOCK); Acc_tid = fmd_thr_create(hdl, exs_listen, hdl); }
int etm_xport_init(fmd_hdl_t *hdl) { _etm_xport_addr_t **_addrv; /* address vector */ int i; /* vector index */ ssize_t n; /* gen use */ int rv; /* ret val */ struct stat stat_buf; /* file stat struct */ char *fn; /* filename of dev node */ rv = 0; /* assume good */ _addrv = NULL; if (hdl == NULL) { rv = (-EINVAL); goto func_ret; } fmd_hdl_debug(hdl, "info: xport initializing\n"); /* setup statistics and properties from FMD */ (void) fmd_stat_create(hdl, FMD_STAT_NOALLOC, sizeof (etm_xport_stats) / sizeof (fmd_stat_t), (fmd_stat_t *)&etm_xport_stats); etm_xport_debug_lvl = fmd_prop_get_int32(hdl, ETM_PROP_NM_DEBUG_LVL); etm_xport_addrs = fmd_prop_get_string(hdl, ETM_PROP_NM_XPORT_ADDRS); fmd_hdl_debug(hdl, "info: etm_xport_debug_lvl %d\n", etm_xport_debug_lvl); fmd_hdl_debug(hdl, "info: etm_xport_addrs %s\n", etm_xport_addrs); /* decide whether to fake [some of] the device driver behavior */ etm_xport_should_fake_dd = 0; /* default to false */ fn = etm_xport_get_fn(hdl, ETM_IO_OP_RD); if (stat(fn, &stat_buf) < 0) { /* errno assumed set by above call */ fmd_hdl_error(hdl, "error: bad device node %s errno %d\n", fn, errno); rv = (-errno); goto func_ret; } if (!S_ISCHR(stat_buf.st_mode) && use_vldc == 0) { etm_xport_should_fake_dd = 1; /* not a char driver */ } fmd_hdl_debug(hdl, "info: etm_xport_should_fake_dd %d\n", etm_xport_should_fake_dd); /* validate each default dst transport address */ if ((_addrv = (void *)etm_xport_get_ev_addrv(hdl, NULL)) == NULL) { /* errno assumed set by above call */ rv = (-errno); goto func_ret; } for (i = 0; _addrv[i] != NULL; i++) { if ((n = etm_xport_valid_addr(_addrv[i])) < 0) { fmd_hdl_error(hdl, "error: bad xport addr %p\n", _addrv[i]); rv = n; goto func_ret; } } /* foreach dst addr */ if (use_vldc) { etm_xport_vldc_conn = etm_xport_open(hdl, _addrv[0]); if (etm_xport_vldc_conn == NULL) { fmd_hdl_debug(hdl, "info: etm_xport_open() failed\n"); } } func_ret: if (_addrv != NULL) { etm_xport_free_addrv(hdl, (void *)_addrv); } if (rv >= 0) { fmd_hdl_debug(hdl, "info: xport initialized ok\n"); } return (rv); } /* etm_xport_init() */
etm_xport_conn_t etm_xport_accept(fmd_hdl_t *hdl, etm_xport_addr_t *addrp) { _etm_xport_addr_t *_addr; /* address handle */ _etm_xport_addr_t **_addrv; /* vector of addresses */ _etm_xport_conn_t *_conn; /* connection handle */ _etm_xport_conn_t *rv; /* ret val */ uint8_t buf[4]; /* buffer for peeking */ int n; /* byte cnt */ struct timespec tms; /* for nanosleep() */ rv = NULL; /* default is failure */ _conn = NULL; _addrv = NULL; tms.tv_sec = ETM_SLEEP_QUIK; tms.tv_nsec = 0; /* * get the default dst transport address and open a connection to it; * there is only 1 default addr */ if ((_addrv = (void*)etm_xport_get_ev_addrv(hdl, NULL)) == NULL) { /* errno assumed set by above call */ goto func_ret; } if (_addrv[0] == NULL) { errno = ENXIO; /* missing addr */ etm_xport_stats.xport_accept_badargs.fmds_value.ui64++; goto func_ret; } if (_addrv[1] != NULL) { errno = E2BIG; /* too many addrs */ etm_xport_stats.xport_accept_badargs.fmds_value.ui64++; goto func_ret; } _addr = _addrv[0]; _addr->fn = etm_xport_get_fn(hdl, ETM_IO_OP_RD); if ((_conn = etm_xport_open(hdl, _addr)) == NULL) { /* errno assumed set by above call */ goto func_ret; } if (etm_xport_should_fake_dd) { (void) nanosleep(&tms, NULL); /* delay [for resp capture] */ (void) ftruncate(_conn->fd, 0); /* act like socket/queue/pipe */ } /* * peek from the connection to simulate an accept() system call * behavior; this will pend until some ETM message is written * from the other end */ if (use_vldc) { pollfd_t pollfd; pollfd.events = POLLIN; pollfd.revents = 0; pollfd.fd = _conn->fd; if ((n = poll(&pollfd, 1, -1)) < 1) { if (n == 0) { errno = EIO; } goto func_ret; } } else { if ((n = etm_xport_raw_peek(hdl, _conn, buf, 1)) < 0) { errno = (-n); goto func_ret; } } rv = _conn; /* success, return the open connection */ func_ret: /* cleanup the connection if failed */ if (rv == NULL) { if (_conn != NULL) { (void) etm_xport_close(hdl, _conn); } } else { if (addrp != NULL) { *addrp = _conn->addr; } } /* free _addrv and all its transport addresses */ if (_addrv != NULL) { etm_xport_free_addrv(hdl, (void *)_addrv); } if (etm_xport_debug_lvl >= 2) { fmd_hdl_debug(hdl, "info: accept conn %p w/ *addrp %p\n", rv, (addrp != NULL ? *addrp : NULL)); } return (rv); } /* etm_xport_accept() */
nvlist_t * cmd_mkboard_fru(fmd_hdl_t *hdl, char *frustr, char *serialstr, char *partstr) { char *nac, *nac_name; int n, i, len; nvlist_t *fru, **hc_list; if (frustr == NULL) return (NULL); if ((nac_name = strstr(frustr, "MB")) == NULL) return (NULL); len = strlen(nac_name) + 1; nac = fmd_hdl_zalloc(hdl, len, FMD_SLEEP); (void) strcpy(nac, nac_name); n = cmd_count_components(nac, '/'); fmd_hdl_debug(hdl, "cmd_mkboard_fru: nac=%s components=%d\n", nac, n); hc_list = fmd_hdl_zalloc(hdl, sizeof (nvlist_t *)*n, FMD_SLEEP); for (i = 0; i < n; i++) { (void) nvlist_alloc(&hc_list[i], NV_UNIQUE_NAME|NV_UNIQUE_NAME_TYPE, 0); } if (cmd_breakup_components(nac, "/", hc_list) < 0) { for (i = 0; i < n; i++) { if (hc_list[i] != NULL) nvlist_free(hc_list[i]); } fmd_hdl_free(hdl, hc_list, sizeof (nvlist_t *)*n); fmd_hdl_free(hdl, nac, len); return (NULL); } if (nvlist_alloc(&fru, NV_UNIQUE_NAME, 0) != 0) { for (i = 0; i < n; i++) { if (hc_list[i] != NULL) nvlist_free(hc_list[i]); } fmd_hdl_free(hdl, hc_list, sizeof (nvlist_t *)*n); fmd_hdl_free(hdl, nac, len); return (NULL); } if (nvlist_add_uint8(fru, FM_VERSION, FM_HC_SCHEME_VERSION) != 0 || nvlist_add_string(fru, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0 || nvlist_add_string(fru, FM_FMRI_HC_ROOT, "") != 0 || nvlist_add_uint32(fru, FM_FMRI_HC_LIST_SZ, n) != 0 || nvlist_add_nvlist_array(fru, FM_FMRI_HC_LIST, hc_list, n) != 0) { for (i = 0; i < n; i++) { if (hc_list[i] != NULL) nvlist_free(hc_list[i]); } fmd_hdl_free(hdl, hc_list, sizeof (nvlist_t *)*n); fmd_hdl_free(hdl, nac, len); nvlist_free(fru); return (NULL); } for (i = 0; i < n; i++) { if (hc_list[i] != NULL) nvlist_free(hc_list[i]); } fmd_hdl_free(hdl, hc_list, sizeof (nvlist_t *)*n); fmd_hdl_free(hdl, nac, len); if ((serialstr != NULL && nvlist_add_string(fru, FM_FMRI_HC_SERIAL_ID, serialstr) != 0) || (partstr != NULL && nvlist_add_string(fru, FM_FMRI_HC_PART, partstr) != 0)) { nvlist_free(fru); return (NULL); } return (fru); }
static ssize_t etm_xport_buffered_read(fmd_hdl_t *hdl, _etm_xport_conn_t *_conn, void *buf, size_t byte_cnt) { ssize_t i, n; /* gen use */ /* perform one-time initializations */ /* * Design_Note: * * These initializations are not done in etm_xport_init() because * the connection/device is not yet open and hence the MTU size * is not yet known. However, the corresponding cleanup is done * in etm_xport_fini(). The buffering for byte stream semantics * should be done on a per device vs per connection basis; the * MTU size is assumed to remain constant across all connections. */ if (etm_xport_irb_mtu_sz == 0) { if ((n = etm_xport_get_opt(hdl, _conn, ETM_XPORT_OPT_MTU_SZ)) < 0) { etm_xport_irb_mtu_sz = ETM_XPORT_MTU_SZ_DEF; } else { etm_xport_irb_mtu_sz = n; } } if (etm_xport_irb_area == NULL) { etm_xport_irb_area = fmd_hdl_zalloc(hdl, 2 * etm_xport_irb_mtu_sz, FMD_SLEEP); etm_xport_irb_head = etm_xport_irb_area; etm_xport_irb_tail = etm_xport_irb_head; } /* sanity check the byte count after have MTU */ if (byte_cnt > etm_xport_irb_mtu_sz) { etm_xport_stats.xport_buffread_badargs.fmds_value.ui64++; return (-EINVAL); } /* if intermediate buffer can satisfy request do so w/out xport read */ if (byte_cnt <= (etm_xport_irb_tail - etm_xport_irb_head)) { (void) memcpy(buf, etm_xport_irb_head, byte_cnt); etm_xport_irb_head += byte_cnt; if (etm_xport_debug_lvl >= 2) { fmd_hdl_debug(hdl, "info: quik buffered read == %d\n", byte_cnt); } return (byte_cnt); } /* slide buffer contents to front to make room for [MTU] more bytes */ n = etm_xport_irb_tail - etm_xport_irb_head; (void) memmove(etm_xport_irb_area, etm_xport_irb_head, n); etm_xport_irb_head = etm_xport_irb_area; etm_xport_irb_tail = etm_xport_irb_head + n; /* * peek to see how much data is avail and read all of it; * there is no race condition between peeking and reading * due to unbuffered design of the device driver */ if (use_vldc) { pollfd_t pollfd; pollfd.events = POLLIN; pollfd.revents = 0; pollfd.fd = _conn->fd; if ((n = poll(&pollfd, 1, -1)) < 1) { if (n == 0) return (-EIO); else return (-errno); } /* * set i to the maximum size --- read(..., i) below will * pull in n bytes (n <= i) anyway */ i = etm_xport_irb_mtu_sz; } else { if ((i = etm_xport_raw_peek(hdl, _conn, etm_xport_irb_tail, etm_xport_irb_mtu_sz)) < 0) { return (i); } } if ((n = read(_conn->fd, etm_xport_irb_tail, i)) < 0) { /* errno assumed set by above call */ etm_xport_stats.xport_os_read_fail.fmds_value.ui64++; return (-errno); } etm_xport_irb_tail += n; /* satisfy request as best we can with what we now have */ n = MIN(byte_cnt, (etm_xport_irb_tail - etm_xport_irb_head)); (void) memcpy(buf, etm_xport_irb_head, n); etm_xport_irb_head += n; if (etm_xport_debug_lvl >= 2) { fmd_hdl_debug(hdl, "info: slow buffered read == %d\n", n); } return (n); } /* etm_xport_buffered_read() */
static char * etm_xport_get_fn(fmd_hdl_t *hdl, int io_op) { static char fn_wr[PATH_MAX] = {0}; /* fn for write */ static char fn_rd[PATH_MAX] = {0}; /* fn for read/peek */ char *rv; /* ret val */ char *prop_str; /* property string */ char *cp; /* char ptr */ rv = NULL; /* use cached copies if avail */ if ((io_op == ETM_IO_OP_WR) && (fn_wr[0] != '\0')) { return (fn_wr); } if (((io_op == ETM_IO_OP_RD) || (io_op == ETM_IO_OP_PK)) && (fn_rd[0] != '\0')) { return (fn_rd); } /* create cached copies if empty "" property string */ prop_str = fmd_prop_get_string(hdl, ETM_PROP_NM_XPORT_ADDRS); if (etm_xport_debug_lvl >= 2) { fmd_hdl_debug(hdl, "info: etm_xport_get_fn prop_str %s\n", prop_str); } if (strlen(prop_str) == 0) { struct stat buf; char *fname; if (stat(ETM_XPORT_DEV_VLDC, &buf) == 0) { use_vldc = 1; fname = ETM_XPORT_DEV_VLDC; } else { use_vldc = 0; fname = ETM_XPORT_DEV_FN_SP; } (void) strncpy(fn_wr, fname, PATH_MAX - 1); (void) strncpy(fn_rd, fname, PATH_MAX - 1); rv = fn_rd; if (io_op == ETM_IO_OP_WR) { rv = fn_wr; } goto func_ret; } /* if no/empty property set */ /* create cached copies if "write[|read]" property string */ if (io_op == ETM_IO_OP_WR) { (void) strncpy(fn_wr, prop_str, PATH_MAX - 1); if ((cp = strchr(fn_wr, '|')) != NULL) { *cp = '\0'; } rv = fn_wr; } else { if ((cp = strchr(prop_str, '|')) != NULL) { cp++; } else { cp = prop_str; } (void) strncpy(fn_rd, cp, PATH_MAX - 1); rv = fn_rd; } /* whether io op is write/read/peek */ func_ret: if (etm_xport_debug_lvl >= 2) { fmd_hdl_debug(hdl, "info: etm_xport_get_fn fn_wr %s fn_rd %s\n", fn_wr, fn_rd); } fmd_prop_free_string(hdl, prop_str); return (rv); } /* etm_xport_get_fn() */
/* * Ideally we would just use syslog(3C) for outputting our messages, but our * messaging standard defines a nice multi-line format and syslogd(1M) is very * inflexible and stupid when it comes to multi-line messages. It pulls data * out of log(7D) and splits it up by \n, printing each line to the console * with its usual prefix of date and sender; it uses the same behavior for the * messages file as well. Further, syslog(3C) provides no CE_CONT equivalent * for userland callers (which at least works around repeated file prefixing). * So with a multi-line message format, your file and console end up like this: * * Dec 02 18:08:40 hostname this is my nicely formatted * Dec 02 18:08:40 hostname message designed for 80 cols * ... * * To resolve these issues, we use our own syslog_emit() wrapper to emit * messages and some knowledge of how the Solaris log drivers work. We first * construct an enlarged format string containing the appropriate msgid(1). * We then format the caller's message using the provided format and buffer. * We send this message to log(7D) using putmsg() with SL_CONSOLE | SL_LOGONLY * set in the log_ctl_t. The log driver allows us to set SL_LOGONLY when we * construct messages ourself, indicating that syslogd should only emit the * message to /var/adm/messages and any remote hosts, and skip the console. * Then we emit the message a second time, without the special prefix, to the * sysmsg(7D) device, which handles console redirection and also permits us * to output any characters we like to the console, including \n and \r. */ static void syslog_emit(fmd_hdl_t *hdl, const char *msg) { struct strbuf ctl, dat; uint32_t msgid; char *buf; size_t buflen; const char *format = "fmd: [ID %u FACILITY_AND_PRIORITY] %s"; STRLOG_MAKE_MSGID(format, msgid); buflen = snprintf(NULL, 0, format, msgid, msg); buf = alloca(buflen + 1); (void) snprintf(buf, buflen + 1, format, msgid, msg); ctl.buf = (void *)&syslog_ctl; ctl.len = sizeof (syslog_ctl); dat.buf = buf; dat.len = buflen + 1; /* * The underlying log driver won't accept messages longer than * LOG_MAXPS bytes. Therefore, messages which exceed this limit will * be truncated and appended with a pointer to the full message. */ if (dat.len > LOG_MAXPS) { char *syslog_pointer, *p; size_t plen; if ((syslog_pointer = fmd_msg_gettext_id(syslog_msghdl, NULL, SYSLOG_POINTER)) == NULL) { /* * This shouldn't happen, but if it does we'll just * truncate the message. */ buf[LOG_MAXPS - 1] = '\0'; dat.len = LOG_MAXPS; } else { plen = strlen(syslog_pointer) + 1; buf[LOG_MAXPS - plen] = '\0'; /* * If possible, the pointer is appended after a newline */ if ((p = strrchr(buf, '\n')) == NULL) p = &buf[LOG_MAXPS - plen]; (void) strcpy(p, syslog_pointer); free(syslog_pointer); dat.len = strlen(buf) + 1; } } if (syslog_file && putmsg(syslog_logfd, &ctl, &dat, 0) != 0) { fmd_hdl_debug(hdl, "putmsg failed: %s\n", strerror(errno)); syslog_stats.log_err.fmds_value.ui64++; } dat.buf = strchr(buf, ']'); dat.len -= (size_t)(dat.buf - buf); dat.buf[0] = '\r'; /* overwrite ']' with carriage return */ dat.buf[1] = '\n'; /* overwrite ' ' with newline */ if (syslog_cons && write(syslog_msgfd, dat.buf, dat.len) != dat.len) { fmd_hdl_debug(hdl, "write failed: %s\n", strerror(errno)); syslog_stats.msg_err.fmds_value.ui64++; } }
void * cmd_branch_restore(fmd_hdl_t *hdl, fmd_case_t *cp, cmd_case_ptr_t *ptr) { cmd_branch_t *branch; size_t branchsz; for (branch = cmd_list_next(&cmd.cmd_branches); branch != NULL; branch = cmd_list_next(branch)) { if (strcmp(branch->branch_bufname, ptr->ptr_name) == 0) break; } if (branch == NULL) { fmd_hdl_debug(hdl, "restoring branch from %s\n", ptr->ptr_name); if ((branchsz = fmd_buf_size(hdl, NULL, ptr->ptr_name)) == 0) { fmd_hdl_abort(hdl, "branch referenced by case %s does " "not exist in saved state\n", fmd_case_uuid(hdl, cp)); } else if (branchsz > CMD_BRANCH_MAXSIZE || branchsz < CMD_BRANCH_MINSIZE) { fmd_hdl_abort(hdl, "branch buffer referenced by case %s " "is out of bounds (is %u bytes, max %u, min %u)\n", fmd_case_uuid(hdl, cp), branchsz, CMD_BRANCH_MAXSIZE, CMD_BRANCH_MINSIZE); } if ((branch = cmd_buf_read(hdl, NULL, ptr->ptr_name, branchsz)) == NULL) { fmd_hdl_abort(hdl, "failed to read branch buf %s", ptr->ptr_name); } fmd_hdl_debug(hdl, "found %d in version field\n", branch->branch_version); switch (branch->branch_version) { case CMD_BRANCH_VERSION_0: branch = branch_wrapv0(hdl, (cmd_branch_pers_t *)branch, branchsz); break; default: fmd_hdl_abort(hdl, "unknown version (found %d) " "for branch state referenced by case %s.\n", branch->branch_version, fmd_case_uuid(hdl, cp)); break; } cmd_fmri_restore(hdl, &branch->branch_asru); if ((errno = nvlist_lookup_string(branch->branch_asru_nvl, FM_FMRI_MEM_UNUM, (char **)&branch->branch_unum)) != 0) fmd_hdl_abort(hdl, "failed to retrieve unum from asru"); cmd_list_append(&cmd.cmd_branches, branch); } switch (ptr->ptr_subtype) { case CMD_PTR_BRANCH_CASE: cmd_mem_case_restore(hdl, &branch->branch_case, cp, "branch", branch->branch_unum); break; default: fmd_hdl_abort(hdl, "invalid %s subtype %d\n", ptr->ptr_name, ptr->ptr_subtype); } return (branch); }
void cmd_branch_gc(fmd_hdl_t *hdl) { fmd_hdl_debug(hdl, "cmd_branch_gc\n"); cmd_branch_validate(hdl); }
cmd_dimm_t * cmd_dimm_create(fmd_hdl_t *hdl, nvlist_t *asru) { cmd_dimm_t *dimm; const char *unum; nvlist_t *fmri; size_t nserids = 0; char **serids = NULL; if (!fmd_nvl_fmri_present(hdl, asru)) { fmd_hdl_debug(hdl, "dimm_lookup: discarding old ereport\n"); return (NULL); } if ((unum = cmd_fmri_get_unum(asru)) == NULL) { CMD_STAT_BUMP(bad_mem_asru); return (NULL); } #ifdef sun4v if (nvlist_lookup_string_array(asru, FM_FMRI_HC_SERIAL_ID, &serids, &nserids) != 0) { fmd_hdl_debug(hdl, "sun4v mem: FMRI does not" " have serial_ids\n"); CMD_STAT_BUMP(bad_mem_asru); return (NULL); } #endif fmri = cmd_mem_fmri_create(unum, serids, nserids); if (fmd_nvl_fmri_expand(hdl, fmri) < 0) { CMD_STAT_BUMP(bad_mem_asru); nvlist_free(fmri); return (NULL); } fmd_hdl_debug(hdl, "dimm_create: creating new DIMM %s\n", unum); CMD_STAT_BUMP(dimm_creat); dimm = fmd_hdl_zalloc(hdl, sizeof (cmd_dimm_t), FMD_SLEEP); dimm->dimm_nodetype = CMD_NT_DIMM; dimm->dimm_version = CMD_DIMM_VERSION; cmd_bufname(dimm->dimm_bufname, sizeof (dimm->dimm_bufname), "dimm_%s", unum); cmd_fmri_init(hdl, &dimm->dimm_asru, fmri, "dimm_asru_%s", unum); nvlist_free(fmri); (void) nvlist_lookup_string(dimm->dimm_asru_nvl, FM_FMRI_MEM_UNUM, (char **)&dimm->dimm_unum); dimm_attach_to_bank(hdl, dimm); cmd_mem_retirestat_create(hdl, &dimm->dimm_retstat, dimm->dimm_unum, 0, CMD_DIMM_STAT_PREFIX); cmd_list_append(&cmd.cmd_dimms, dimm); cmd_dimm_dirty(hdl, dimm); return (dimm); }
/*ARGSUSED*/ int cma_page_retire(fmd_hdl_t *hdl, nvlist_t *nvl, nvlist_t *asru, const char *uuid, boolean_t repair) { cma_page_t *page; uint64_t pageaddr; const char *action = repair ? "unretire" : "retire"; int rc; nvlist_t *rsrc = NULL, *asrucp = NULL, *hcsp; (void) nvlist_lookup_nvlist(nvl, FM_FAULT_RESOURCE, &rsrc); if (nvlist_dup(asru, &asrucp, 0) != 0) { fmd_hdl_debug(hdl, "page retire nvlist dup failed\n"); return (CMA_RA_FAILURE); } /* It should already be expanded, but we'll do it again anyway */ if (fmd_nvl_fmri_expand(hdl, asrucp) < 0) { fmd_hdl_debug(hdl, "failed to expand page asru\n"); cma_stats.bad_flts.fmds_value.ui64++; nvlist_free(asrucp); return (CMA_RA_FAILURE); } if (!repair && !fmd_nvl_fmri_present(hdl, asrucp)) { fmd_hdl_debug(hdl, "page retire overtaken by events\n"); cma_stats.page_nonent.fmds_value.ui64++; nvlist_free(asrucp); return (CMA_RA_SUCCESS); } /* Figure out physaddr from resource or asru */ if (rsrc == NULL || nvlist_lookup_nvlist(rsrc, FM_FMRI_HC_SPECIFIC, &hcsp) != 0 || (nvlist_lookup_uint64(hcsp, "asru-" FM_FMRI_HC_SPECIFIC_PHYSADDR, &pageaddr) != 0 && nvlist_lookup_uint64(hcsp, FM_FMRI_HC_SPECIFIC_PHYSADDR, &pageaddr) != 0)) { if (nvlist_lookup_uint64(asrucp, FM_FMRI_MEM_PHYSADDR, &pageaddr) != 0) { fmd_hdl_debug(hdl, "mem fault missing 'physaddr'\n"); cma_stats.bad_flts.fmds_value.ui64++; nvlist_free(asrucp); return (CMA_RA_FAILURE); } } if (repair) { if (!cma.cma_page_dounretire) { fmd_hdl_debug(hdl, "suppressed unretire of page %llx\n", (u_longlong_t)pageaddr); cma_stats.page_supp.fmds_value.ui64++; nvlist_free(asrucp); return (CMA_RA_SUCCESS); } /* If unretire via topo fails, we fall back to legacy way */ if (rsrc == NULL || (rc = fmd_nvl_fmri_unretire(hdl, rsrc)) < 0) rc = cma_fmri_page_unretire(hdl, asrucp); } else { if (!cma.cma_page_doretire) { fmd_hdl_debug(hdl, "suppressed retire of page %llx\n", (u_longlong_t)pageaddr); cma_stats.page_supp.fmds_value.ui64++; nvlist_free(asrucp); return (CMA_RA_FAILURE); } /* If retire via topo fails, we fall back to legacy way */ if (rsrc == NULL || (rc = fmd_nvl_fmri_retire(hdl, rsrc)) < 0) rc = cma_fmri_page_retire(hdl, asrucp); } if (rc == FMD_AGENT_RETIRE_DONE) { fmd_hdl_debug(hdl, "%sd page 0x%llx\n", action, (u_longlong_t)pageaddr); if (repair) cma_stats.page_repairs.fmds_value.ui64++; else cma_stats.page_flts.fmds_value.ui64++; nvlist_free(asrucp); return (CMA_RA_SUCCESS); } else if (repair || rc != FMD_AGENT_RETIRE_ASYNC) { fmd_hdl_debug(hdl, "%s of page 0x%llx failed, will not " "retry: %s\n", action, (u_longlong_t)pageaddr, strerror(errno)); cma_stats.page_fails.fmds_value.ui64++; nvlist_free(asrucp); return (CMA_RA_FAILURE); } /* * The page didn't immediately retire. We'll need to periodically * check to see if it has been retired. */ fmd_hdl_debug(hdl, "page didn't retire - sleeping\n"); page = fmd_hdl_zalloc(hdl, sizeof (cma_page_t), FMD_SLEEP); page->pg_addr = pageaddr; if (rsrc != NULL) (void) nvlist_dup(rsrc, &page->pg_rsrc, 0); page->pg_asru = asrucp; if (uuid != NULL) page->pg_uuid = fmd_hdl_strdup(hdl, uuid, FMD_SLEEP); page->pg_next = cma.cma_pages; cma.cma_pages = page; if (cma.cma_page_timerid != 0) fmd_timer_remove(hdl, cma.cma_page_timerid); cma.cma_page_curdelay = cma.cma_page_mindelay; cma.cma_page_timerid = fmd_timer_install(hdl, NULL, NULL, cma.cma_page_curdelay); /* Don't free asrucp here. This FMRI will be needed for retry. */ return (CMA_RA_FAILURE); }
/* * The following is the common function for handling * memory UE with EID=MEM. * The error could be detected by either CPU/IO. */ cmd_evdisp_t opl_ue_mem(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, int hdlr_type) { nvlist_t *rsrc = NULL, *asru = NULL, *fru = NULL; uint64_t ubc_ue_log_reg, pa; cmd_page_t *page; if (nvlist_lookup_nvlist(nvl, FM_EREPORT_PAYLOAD_NAME_RESOURCE, &rsrc) != 0) return (CMD_EVD_BAD); switch (hdlr_type) { case CMD_OPL_HDLR_CPU: if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_NAME_SFAR, &pa) != 0) return (CMD_EVD_BAD); fmd_hdl_debug(hdl, "cmd_ue_mem: pa=%llx\n", (u_longlong_t)pa); break; case CMD_OPL_HDLR_IO: if (nvlist_lookup_uint64(nvl, OBERON_UBC_MUE, &ubc_ue_log_reg) != 0) return (CMD_EVD_BAD); pa = (ubc_ue_log_reg & UBC_UE_ADR_MASK); fmd_hdl_debug(hdl, "cmd_ue_mem: ue_log_reg=%llx\n", (u_longlong_t)ubc_ue_log_reg); fmd_hdl_debug(hdl, "cmd_ue_mem: pa=%llx\n", (u_longlong_t)pa); break; default: return (CMD_EVD_BAD); } if ((page = cmd_page_lookup(pa)) != NULL && page->page_case.cc_cp != NULL && fmd_case_solved(hdl, page->page_case.cc_cp)) return (CMD_EVD_REDUND); if (nvlist_dup(rsrc, &asru, 0) != 0) { fmd_hdl_debug(hdl, "opl_ue_mem nvlist dup failed\n"); return (CMD_EVD_BAD); } if (fmd_nvl_fmri_expand(hdl, asru) < 0) { nvlist_free(asru); CMD_STAT_BUMP(bad_mem_asru); return (CMD_EVD_BAD); } if ((fru = opl_mem_fru_create(hdl, asru)) == NULL) { nvlist_free(asru); return (CMD_EVD_BAD); } cmd_page_fault(hdl, asru, fru, ep, pa); nvlist_free(asru); nvlist_free(fru); return (CMD_EVD_OK); }