static int dr_is_real_device(dev_info_t *dip) { struct regspec *regbuf = NULL; int length = 0; int rc; if (ddi_get_driver(dip) == NULL) return (0); if (DEVI(dip)->devi_pm_flags & (PMC_NEEDS_SR|PMC_PARENTAL_SR)) return (1); if (DEVI(dip)->devi_pm_flags & PMC_NO_SR) return (0); /* * now the general case */ rc = ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", (caddr_t)®buf, &length); ASSERT(rc != DDI_PROP_NO_MEMORY); if (rc != DDI_PROP_SUCCESS) { return (0); } else { if ((length > 0) && (regbuf != NULL)) kmem_free(regbuf, length); return (1); } }
void i_ddi_intr_devi_fini(dev_info_t *dip) { devinfo_intr_t *intr_p = DEVI(dip)->devi_intr_p; DDI_INTR_APIDBG((CE_CONT, "i_ddi_intr_devi_fini: dip %p\n", (void *)dip)); if ((intr_p == NULL) || i_ddi_intr_get_current_nintrs(dip)) return; /* * devi_intr_handle_p will only be used for devices * which are using the legacy DDI Interrupt interfaces. */ if (intr_p->devi_intr_handle_p) { /* nintrs could be zero; so check for it first */ if (intr_p->devi_intr_sup_nintrs) { kmem_free(intr_p->devi_intr_handle_p, intr_p->devi_intr_sup_nintrs * sizeof (ddi_intr_handle_t)); } } /* * devi_irm_req_p will only be used for devices which * are mapped to an Interrupt Resource Management pool. */ if (intr_p->devi_irm_req_p) (void) i_ddi_irm_remove(dip); kmem_free(DEVI(dip)->devi_intr_p, sizeof (devinfo_intr_t)); DEVI(dip)->devi_intr_p = NULL; }
static void di_dfs(dev_info_t *devi, int (*f)(dev_info_t *, int), caddr_t arg) { (void) (*f)(devi, 0); if (devi) { di_dfs((dev_info_t *)DEVI(devi)->devi_child, f, arg); di_dfs((dev_info_t *)DEVI(devi)->devi_sibling, f, arg); } }
static void in_set_instance(dev_info_t *dip, in_drv_t *dp, major_t major) { /* use preassigned instance if available */ if (DEVI(dip)->devi_instance != -1) dp->ind_instance = DEVI(dip)->devi_instance; else dp->ind_instance = in_next_instance(major); }
/*ARGSUSED*/ static int ppb_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset, off_t len, caddr_t *vaddrp) { register dev_info_t *pdip; pdip = (dev_info_t *)DEVI(dip)->devi_parent; return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map) (pdip, rdip, mp, offset, len, vaddrp)); }
void i_ndi_busop_access_exit(dev_info_t *dip, ddi_acc_handle_t handle) { dev_info_t *pdip = (dev_info_t *)DEVI(dip)->devi_parent; /* Valid operation for BUSO_REV_6 and above */ if (DEVI(pdip)->devi_ops->devo_bus_ops->busops_rev < BUSO_REV_6) return; if (DEVI(pdip)->devi_ops->devo_bus_ops->bus_fm_access_exit == NULL) return; (*DEVI(pdip)->devi_ops->devo_bus_ops->bus_fm_access_exit)(pdip, handle); }
uint_t i_ddi_intr_get_current_nenables(dev_info_t *dip) { devinfo_intr_t *intr_p = DEVI(dip)->devi_intr_p; return (intr_p ? intr_p->devi_intr_curr_nenables : 0); }
int i_ddi_get_msi_msix_cap_ptr(dev_info_t *dip) { devinfo_intr_t *intr_p = DEVI(dip)->devi_intr_p; return (intr_p ? intr_p->devi_cap_ptr : 0); }
/* * i_ddi_intr_get_current_navail: * * Return the number of interrupts currently available. * If a precise number set by IRM is not available, then * return the limit determined by i_ddi_intr_get_limit(). */ uint_t i_ddi_intr_get_current_navail(dev_info_t *dip, int type) { devinfo_intr_t *intr_p; ddi_irm_pool_t *pool_p; ddi_irm_req_t *req_p; uint_t navail; /* Check for a precise number from IRM */ if (((intr_p = DEVI(dip)->devi_intr_p) != NULL) && ((req_p = intr_p->devi_irm_req_p) != NULL) && (type == req_p->ireq_type) && ((pool_p = req_p->ireq_pool_p) != NULL)) { /* * Lock to be sure a rebalance is not in progress. * (Should be changed to a rwlock.) */ mutex_enter(&pool_p->ipool_navail_lock); navail = req_p->ireq_navail; mutex_exit(&pool_p->ipool_navail_lock); return (navail); } /* Otherwise, return the limit */ return (i_ddi_intr_get_limit(dip, type, NULL)); }
/* * i_ddi_intr_get_pool() * * Get an IRM pool that supplies interrupts of a specified type. * Invokes a DDI_INTROP_GETPOOL to the bus nexus driver. Fails * if no pool exists. */ ddi_irm_pool_t * i_ddi_intr_get_pool(dev_info_t *dip, int type) { devinfo_intr_t *intr_p; ddi_irm_pool_t *pool_p; ddi_irm_req_t *req_p; ddi_intr_handle_impl_t hdl; ASSERT(dip != NULL); ASSERT(DDI_INTR_TYPE_FLAG_VALID(type)); if (((intr_p = DEVI(dip)->devi_intr_p) != NULL) && ((req_p = intr_p->devi_irm_req_p) != NULL) && ((pool_p = req_p->ireq_pool_p) != NULL) && (pool_p->ipool_types & type)) { return (pool_p); } bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); hdl.ih_dip = dip; hdl.ih_type = type; if (i_ddi_intr_ops(dip, dip, DDI_INTROP_GETPOOL, &hdl, (void *)&pool_p) == DDI_SUCCESS) return (pool_p); return (NULL); }
ddi_acc_handle_t i_ddi_get_pci_config_handle(dev_info_t *dip) { devinfo_intr_t *intr_p = DEVI(dip)->devi_intr_p; return (intr_p ? intr_p->devi_cfg_handle : NULL); }
/*ARGSUSED*/ static int sbdp_suspend_devices_exit(dev_info_t *dip, void *arg) { struct dev_info *devi = DEVI(dip); ndi_devi_exit(dip, devi->devi_circular); return (DDI_WALK_CONTINUE); }
void i_ddi_set_intr_handle(dev_info_t *dip, int inum, ddi_intr_handle_t intr_hdl) { devinfo_intr_t *intr_p = DEVI(dip)->devi_intr_p; if (intr_p == NULL) return; /* * Changed this to a check and return if an invalid inum * is passed to set a handle */ if ((inum < 0) || (inum >= intr_p->devi_intr_sup_nintrs)) return; if (intr_hdl && (intr_p->devi_intr_handle_p == NULL)) { /* nintrs could be zero; so check for it first */ if (intr_p->devi_intr_sup_nintrs) intr_p->devi_intr_handle_p = kmem_zalloc( sizeof (ddi_intr_handle_t) * intr_p->devi_intr_sup_nintrs, KM_SLEEP); } if (intr_p->devi_intr_handle_p) intr_p->devi_intr_handle_p[inum] = intr_hdl; }
ddi_intr_msix_t * i_ddi_get_msix(dev_info_t *dip) { devinfo_intr_t *intr_p = DEVI(dip)->devi_intr_p; return (intr_p ? intr_p->devi_msix_p : NULL); }
/* * Build a list of dev_t's for a device/devid * * The effect of this function is cumulative, adding dev_t's * for the device to the list of all dev_t's for a given * devid. */ static void e_devid_minor_to_devlist( dev_info_t *dip, char *minor_name, int ndevts_alloced, int *devtcntp, dev_t *devtsp) { struct ddi_minor_data *dmdp; int minor_all = 0; int ndevts = *devtcntp; ASSERT(i_ddi_devi_attached(dip)); /* are we looking for a set of minor nodes? */ if ((minor_name == DEVID_MINOR_NAME_ALL) || (minor_name == DEVID_MINOR_NAME_ALL_CHR) || (minor_name == DEVID_MINOR_NAME_ALL_BLK)) minor_all = 1; mutex_enter(&(DEVI(dip)->devi_lock)); /* Find matching minor names */ for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { /* Skip non-minors, and non matching minor names */ if ((dmdp->type != DDM_MINOR) || ((minor_all == 0) && strcmp(dmdp->ddm_name, minor_name))) continue; /* filter out minor_all mismatches */ if (minor_all && (((minor_name == DEVID_MINOR_NAME_ALL_CHR) && (dmdp->ddm_spec_type != S_IFCHR)) || ((minor_name == DEVID_MINOR_NAME_ALL_BLK) && (dmdp->ddm_spec_type != S_IFBLK)))) continue; if (ndevts < ndevts_alloced) devtsp[ndevts] = dmdp->ddm_dev; ndevts++; } mutex_exit(&(DEVI(dip)->devi_lock)); *devtcntp = ndevts; }
/* * Check error state against the handle resource stored in the specified * FM cache. If tdip != NULL, we check only the cache entries for tdip. * The caller must ensure that tdip is valid throughout the call and * all FM data structures can be safely accesses. * * If tdip == NULL, we check all children that have registered their * FM_DMA_CHK or FM_ACC_CHK capabilities. * * The following status values may be returned: * * DDI_FM_FATAL - if at least one cache entry comparison yields a * fatal error. * * DDI_FM_NONFATAL - if at least one cache entry comparison yields a * non-fatal error and no comparison yields a fatal error. * * DDI_FM_UNKNOWN - cache entry comparisons did not yield fatal or * non-fatal errors. * */ int ndi_fmc_error(dev_info_t *dip, dev_info_t *tdip, int flag, uint64_t ena, const void *bus_err_state) { int status, fatal = 0, nonfatal = 0; ddi_fm_error_t derr; struct i_ddi_fmhdl *fmhdl; struct i_ddi_fmtgt *tgt; ASSERT(flag == DMA_HANDLE || flag == ACC_HANDLE); i_ddi_fm_handler_enter(dip); fmhdl = DEVI(dip)->devi_fmhdl; ASSERT(fmhdl); bzero(&derr, sizeof (ddi_fm_error_t)); derr.fme_version = DDI_FME_VERSION; derr.fme_flag = DDI_FM_ERR_UNEXPECTED; derr.fme_ena = ena; for (tgt = fmhdl->fh_tgts; tgt != NULL; tgt = tgt->ft_next) { if (tdip != NULL && tdip != tgt->ft_dip) continue; /* * Attempt to find the entry in this childs handle cache */ status = ndi_fmc_entry_error(tgt->ft_dip, flag, &derr, bus_err_state); if (status == DDI_FM_FATAL) ++fatal; else if (status == DDI_FM_NONFATAL) ++nonfatal; else continue; /* * Call our child to process this error. */ status = tgt->ft_errhdl->eh_func(tgt->ft_dip, &derr, tgt->ft_errhdl->eh_impl); if (status == DDI_FM_FATAL) ++fatal; else if (status == DDI_FM_NONFATAL) ++nonfatal; } i_ddi_fm_handler_exit(dip); if (fatal) return (DDI_FM_FATAL); else if (nonfatal) return (DDI_FM_NONFATAL); return (DDI_FM_UNKNOWN); }
/* * Call parent busop fm clean-up routine. * * Called during driver detach(1M) */ void i_ndi_busop_fm_fini(dev_info_t *dip) { dev_info_t *pdip = (dev_info_t *)DEVI(dip)->devi_parent; if (dip == ddi_root_node()) return; /* Valid operation for BUSO_REV_6 and above */ if (DEVI(pdip)->devi_ops->devo_bus_ops->busops_rev < BUSO_REV_6) return; if (DEVI(pdip)->devi_ops->devo_bus_ops->bus_fm_fini == NULL) return; (*DEVI(pdip)->devi_ops->devo_bus_ops->bus_fm_fini)(pdip, dip); }
/* * NOTE: This function is only called by i_ddi_dev_init(). */ void i_ddi_intr_set_supported_types(dev_info_t *dip, int intr_types) { devinfo_intr_t *intr_p = DEVI(dip)->devi_intr_p; if (intr_p) intr_p->devi_intr_sup_types = intr_types; }
int iommulib_iommu_dma_get_sleep_flags(dev_info_t *dip, ddi_dma_handle_t handle) { iommulib_nexops_t *nexops; nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops; return (nexops->nops_dma_get_sleep_flags(handle)); }
void iommulib_iommu_dma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle) { iommulib_nexops_t *nexops; nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops; nexops->nops_dma_reset_cookies(dip, handle); }
void i_ddi_set_pci_config_handle(dev_info_t *dip, ddi_acc_handle_t handle) { devinfo_intr_t *intr_p = DEVI(dip)->devi_intr_p; if (intr_p) intr_p->devi_cfg_handle = handle; }
void i_ddi_set_msix(dev_info_t *dip, ddi_intr_msix_t *msix_p) { devinfo_intr_t *intr_p = DEVI(dip)->devi_intr_p; if (intr_p) intr_p->devi_msix_p = msix_p; }
/* * i_ddi_irm_supported() * * Query if IRM is supported by a driver using a specific interrupt type. * Notice that IRM is limited to MSI-X users with registered callbacks. */ int i_ddi_irm_supported(dev_info_t *dip, int type) { ddi_cb_t *cb_p = DEVI(dip)->devi_cb_p; return ((DDI_IRM_HAS_CB(cb_p) && (type == DDI_INTR_TYPE_MSIX)) ? DDI_SUCCESS : DDI_ENOTSUP); }
void i_ddi_set_msi_msix_cap_ptr(dev_info_t *dip, int cap_ptr) { devinfo_intr_t *intr_p = DEVI(dip)->devi_intr_p; if (intr_p) intr_p->devi_cap_ptr = cap_ptr; }
/* * NOTE: This function is only called by * ddi_intr_alloc() and ddi_intr_free(). */ void i_ddi_intr_set_current_type(dev_info_t *dip, int intr_type) { devinfo_intr_t *intr_p = DEVI(dip)->devi_intr_p; if (intr_p) intr_p->devi_intr_curr_type = intr_type; }
int iommulib_iommu_dma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle) { iommulib_nexops_t *nexops; nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops; return (nexops->nops_dma_clear_cookies(dip, handle)); }
/* * NOTE: This function is only called by ddi_intr_alloc(). */ void i_ddi_intr_set_supported_nintrs(dev_info_t *dip, int nintrs) { devinfo_intr_t *intr_p = DEVI(dip)->devi_intr_p; if (intr_p) intr_p->devi_intr_sup_nintrs = nintrs; }
void i_ddi_intr_set_current_nenables(dev_info_t *dip, int nintrs) { devinfo_intr_t *intr_p = DEVI(dip)->devi_intr_p; if (intr_p) intr_p->devi_intr_curr_nenables = nintrs; }
/* * ndi_fmc_insert - * Add a new entry to the specified cache. * * This function must be called at or below LOCK_LEVEL */ void ndi_fmc_insert(dev_info_t *dip, int flag, void *resource, void *bus_specific) { struct dev_info *devi = DEVI(dip); ndi_fmc_t *fcp; ndi_fmcentry_t *fep, **fpp; struct i_ddi_fmhdl *fmhdl; ASSERT(devi); ASSERT(flag == DMA_HANDLE || flag == ACC_HANDLE); fmhdl = devi->devi_fmhdl; if (fmhdl == NULL) { return; } if (flag == DMA_HANDLE) { if (!DDI_FM_DMA_ERR_CAP(fmhdl->fh_cap)) { return; } fcp = fmhdl->fh_dma_cache; fpp = &((ddi_dma_impl_t *)resource)->dmai_error.err_fep; } else if (flag == ACC_HANDLE) { if (!DDI_FM_ACC_ERR_CAP(fmhdl->fh_cap)) { i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_NOSLEEP); return; } fcp = fmhdl->fh_acc_cache; fpp = &((ddi_acc_impl_t *)resource)->ahi_err->err_fep; } fep = kmem_cache_alloc(ndi_fm_entry_cache, KM_NOSLEEP); if (fep == NULL) { atomic_inc_64(&fmhdl->fh_kstat.fek_fmc_full.value.ui64); return; } /* * Set-up the handle resource and bus_specific information. * Also remember the pointer back to the cache for quick removal. */ fep->fce_bus_specific = bus_specific; fep->fce_resource = resource; fep->fce_next = NULL; /* Add entry to the end of the active list */ mutex_enter(&fcp->fc_lock); ASSERT(*fpp == NULL); *fpp = fep; fep->fce_prev = fcp->fc_tail; if (fcp->fc_tail != NULL) fcp->fc_tail->fce_next = fep; else fcp->fc_head = fep; fcp->fc_tail = fep; mutex_exit(&fcp->fc_lock); }
void * iommulib_iommu_dmahdl_getprivate(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) { iommulib_nexops_t *nexops; nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops; return (nexops->nops_dmahdl_getprivate(dip, rdip, handle)); }