/* ARGSUSED */ void pci_bus_exit(dev_info_t *dip, ddi_acc_handle_t handle) { pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); pbm_t *pbm_p = pci_p->pci_pbm_p; ddi_fm_error_t derr; ASSERT(MUTEX_HELD(&pbm_p->pbm_pokefault_mutex)); membar_sync(); mutex_enter(&pci_p->pci_common_p->pci_fm_mutex); ddi_fm_acc_err_get(pbm_p->pbm_excl_handle, &derr, DDI_FME_VERSION); if (derr.fme_status == DDI_FM_OK) { if (pci_check_error(pci_p) != 0) { (void) pci_pbm_err_handler(pci_p->pci_dip, &derr, (const void *)pci_p, PCI_BUS_EXIT_CALL); } } mutex_exit(&pci_p->pci_common_p->pci_fm_mutex); pbm_p->pbm_excl_handle = NULL; mutex_exit(&pbm_p->pbm_pokefault_mutex); }
int ndi_fmc_entry_error_all(dev_info_t *dip, int flag, ddi_fm_error_t *derr) { ndi_fmc_t *fcp = NULL; ndi_fmcentry_t *fep; struct i_ddi_fmhdl *fmhdl; int nonfatal = 0; ASSERT(flag == DMA_HANDLE || flag == ACC_HANDLE); fmhdl = DEVI(dip)->devi_fmhdl; ASSERT(fmhdl); if (flag == DMA_HANDLE && DDI_FM_DMA_ERR_CAP(fmhdl->fh_cap)) { fcp = fmhdl->fh_dma_cache; ASSERT(fcp); } else if (flag == ACC_HANDLE && DDI_FM_ACC_ERR_CAP(fmhdl->fh_cap)) { fcp = fmhdl->fh_acc_cache; ASSERT(fcp); } if (fcp != NULL) { /* * Check active resource entries */ mutex_enter(&fcp->fc_lock); for (fep = fcp->fc_head; fep != NULL; fep = fep->fce_next) { ddi_fmcompare_t compare_func; compare_func = (flag == ACC_HANDLE) ? i_ddi_fm_acc_err_cf_get((ddi_acc_handle_t) fep->fce_resource) : i_ddi_fm_dma_err_cf_get((ddi_dma_handle_t) fep->fce_resource); if (compare_func == NULL) /* unbound or not FLAGERR */ continue; /* Set the error for this resource handle */ nonfatal++; if (flag == ACC_HANDLE) { ddi_acc_handle_t ap = fep->fce_resource; i_ddi_fm_acc_err_set(ap, derr->fme_ena, DDI_FM_NONFATAL, DDI_FM_ERR_UNEXPECTED); ddi_fm_acc_err_get(ap, derr, DDI_FME_VERSION); derr->fme_acc_handle = ap; } else { ddi_dma_handle_t dp = fep->fce_resource; i_ddi_fm_dma_err_set(dp, derr->fme_ena, DDI_FM_NONFATAL, DDI_FM_ERR_UNEXPECTED); ddi_fm_dma_err_get(dp, derr, DDI_FME_VERSION); derr->fme_dma_handle = dp; } } mutex_exit(&fcp->fc_lock); } return (nonfatal ? DDI_FM_NONFATAL : DDI_FM_UNKNOWN); }
int ndi_fmc_entry_error(dev_info_t *dip, int flag, ddi_fm_error_t *derr, const void *bus_err_state) { int status, fatal = 0, nonfatal = 0; ndi_fmc_t *fcp = NULL; ndi_fmcentry_t *fep; struct i_ddi_fmhdl *fmhdl; ASSERT(flag == DMA_HANDLE || flag == ACC_HANDLE); fmhdl = DEVI(dip)->devi_fmhdl; ASSERT(fmhdl); status = DDI_FM_UNKNOWN; if (flag == DMA_HANDLE && DDI_FM_DMA_ERR_CAP(fmhdl->fh_cap)) { fcp = fmhdl->fh_dma_cache; ASSERT(fcp); } else if (flag == ACC_HANDLE && DDI_FM_ACC_ERR_CAP(fmhdl->fh_cap)) { fcp = fmhdl->fh_acc_cache; ASSERT(fcp); } if (fcp != NULL) { /* * Check active resource entries */ mutex_enter(&fcp->fc_lock); for (fep = fcp->fc_head; fep != NULL; fep = fep->fce_next) { ddi_fmcompare_t compare_func; /* * Compare captured error state with handle * resources. During the comparison and * subsequent error handling, we block * attempts to free the cache entry. */ compare_func = (flag == ACC_HANDLE) ? i_ddi_fm_acc_err_cf_get((ddi_acc_handle_t) fep->fce_resource) : i_ddi_fm_dma_err_cf_get((ddi_dma_handle_t) fep->fce_resource); if (compare_func == NULL) /* unbound or not FLAGERR */ continue; status = compare_func(dip, fep->fce_resource, bus_err_state, fep->fce_bus_specific); if (status == DDI_FM_UNKNOWN || status == DDI_FM_OK) continue; if (status == DDI_FM_FATAL) ++fatal; else if (status == DDI_FM_NONFATAL) ++nonfatal; /* Set the error for this resource handle */ if (flag == ACC_HANDLE) { ddi_acc_handle_t ap = fep->fce_resource; i_ddi_fm_acc_err_set(ap, derr->fme_ena, status, DDI_FM_ERR_UNEXPECTED); ddi_fm_acc_err_get(ap, derr, DDI_FME_VERSION); derr->fme_acc_handle = ap; } else { ddi_dma_handle_t dp = fep->fce_resource; i_ddi_fm_dma_err_set(dp, derr->fme_ena, status, DDI_FM_ERR_UNEXPECTED); ddi_fm_dma_err_get(dp, derr, DDI_FME_VERSION); derr->fme_dma_handle = dp; } } mutex_exit(&fcp->fc_lock); } return (fatal ? DDI_FM_FATAL : nonfatal ? DDI_FM_NONFATAL : DDI_FM_UNKNOWN); }