static int cpu_present(fmd_hdl_t *hdl, nvlist_t *asru, uint32_t *cpuid) { nvlist_t *cp_asru; uint32_t i; if (nvlist_dup(asru, &cp_asru, 0) != 0) { fmd_hdl_debug(hdl, "unable to alloc asru for thread\n"); return (-1); } for (i = *cpuid; i < *cpuid + UTS2_CPUS_PER_CHIP; i++) { (void) nvlist_remove_all(cp_asru, FM_FMRI_CPU_ID); if (nvlist_add_uint32(cp_asru, FM_FMRI_CPU_ID, i) == 0) { if (fmd_nvl_fmri_present(hdl, cp_asru) && !fmd_nvl_fmri_unusable(hdl, cp_asru)) { nvlist_free(cp_asru); *cpuid = i; return (0); } } } nvlist_free(cp_asru); return (-1); }
/* * Solve a given ZFS case. This first checks to make sure the diagnosis is * still valid, as well as cleaning up any pending timer associated with the * case. */ static void zfs_case_solve(fmd_hdl_t *hdl, zfs_case_t *zcp, const char *faultname, boolean_t checkunusable) { libzfs_handle_t *zhdl = fmd_hdl_getspecific(hdl); nvlist_t *detector, *fault; boolean_t serialize; nvlist_t *fmri, *fru; topo_hdl_t *thp; int err; /* * Construct the detector from the case data. The detector is in the * ZFS scheme, and is either the pool or the vdev, depending on whether * this is a vdev or pool fault. */ detector = fmd_nvl_alloc(hdl, FMD_SLEEP); (void) nvlist_add_uint8(detector, FM_VERSION, ZFS_SCHEME_VERSION0); (void) nvlist_add_string(detector, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS); (void) nvlist_add_uint64(detector, FM_FMRI_ZFS_POOL, zcp->zc_data.zc_pool_guid); if (zcp->zc_data.zc_vdev_guid != 0) { (void) nvlist_add_uint64(detector, FM_FMRI_ZFS_VDEV, zcp->zc_data.zc_vdev_guid); } /* * We also want to make sure that the detector (pool or vdev) properly * reflects the diagnosed state, when the fault corresponds to internal * ZFS state (i.e. not checksum or I/O error-induced). Otherwise, a * device which was unavailable early in boot (because the driver/file * wasn't available) and is now healthy will be mis-diagnosed. */ if (!fmd_nvl_fmri_present(hdl, detector) || (checkunusable && !fmd_nvl_fmri_unusable(hdl, detector))) { fmd_case_close(hdl, zcp->zc_case); nvlist_free(detector); return; } fru = NULL; if (zcp->zc_fru != NULL && (thp = fmd_hdl_topo_hold(hdl, TOPO_VERSION)) != NULL) { /* * If the vdev had an associated FRU, then get the FRU nvlist * from the topo handle and use that in the suspect list. We * explicitly lookup the FRU because the fmri reported from the * kernel may not have up to date details about the disk itself * (serial, part, etc). */ if (topo_fmri_str2nvl(thp, zcp->zc_fru, &fmri, &err) == 0) { /* * If the disk is part of the system chassis, but the * FRU indicates a different chassis ID than our * current system, then ignore the error. This * indicates that the device was part of another * cluster head, and for obvious reasons cannot be * imported on this system. */ if (libzfs_fru_notself(zhdl, zcp->zc_fru)) { fmd_case_close(hdl, zcp->zc_case); nvlist_free(fmri); fmd_hdl_topo_rele(hdl, thp); nvlist_free(detector); return; } /* * If the device is no longer present on the system, or * topo_fmri_fru() fails for other reasons, then fall * back to the fmri specified in the vdev. */ if (topo_fmri_fru(thp, fmri, &fru, &err) != 0) fru = fmd_nvl_dup(hdl, fmri, FMD_SLEEP); nvlist_free(fmri); } fmd_hdl_topo_rele(hdl, thp); } fault = fmd_nvl_create_fault(hdl, faultname, 100, detector, fru, detector); fmd_case_add_suspect(hdl, zcp->zc_case, fault); nvlist_free(fru); fmd_case_solve(hdl, zcp->zc_case); serialize = B_FALSE; if (zcp->zc_data.zc_has_remove_timer) { fmd_timer_remove(hdl, zcp->zc_remove_timer); zcp->zc_data.zc_has_remove_timer = 0; serialize = B_TRUE; } if (serialize) zfs_case_serialize(hdl, zcp); nvlist_free(detector); }