/*ARGSUSED*/ static int sbdp_suspend_devices_exit(dev_info_t *dip, void *arg) { struct dev_info *devi = DEVI(dip); ndi_devi_exit(dip, devi->devi_circular); return (DDI_WALK_CONTINUE); }
/* * vdds_find_node -- A common function to find a NIU nexus or NIU node. */ static dev_info_t * vdds_find_node(uint64_t cookie, dev_info_t *sdip, int (*match_func)(dev_info_t *dip, void *arg)) { vdds_cb_arg_t arg; dev_info_t *pdip; int circ; DBG1(NULL, "Called cookie=%lx\n", cookie); arg.dip = NULL; arg.cookie = cookie; if (pdip = ddi_get_parent(sdip)) { ndi_devi_enter(pdip, &circ); } ddi_walk_devs(sdip, match_func, (void *)&arg); if (pdip != NULL) { ndi_devi_exit(pdip, circ); } DBG1(NULL, "Returning dip=0x%p", arg.dip); return (arg.dip); }
/* * check for a possible substitute node. This routine searches the * children of parent_dip, looking for a node that: * 1. is a prom node * 2. binds to the same major number * 3. there is no need to verify that the unit-address information * match since it is likely that the substitute node * will have none (e.g. disk) - this would be the reason the * framework rejected it in the first place. * * assumes parent_dip is held */ static dev_info_t * find_alternate_node(dev_info_t *parent_dip, major_t major) { int circ; dev_info_t *child_dip; /* lock down parent to keep children from being removed */ ndi_devi_enter(parent_dip, &circ); for (child_dip = ddi_get_child(parent_dip); child_dip != NULL; child_dip = ddi_get_next_sibling(child_dip)) { /* look for obp node with matching major */ if ((ndi_dev_is_prom_node(child_dip) != 0) && (ddi_driver_major(child_dip) == major)) { ndi_hold_devi(child_dip); break; } } ndi_devi_exit(parent_dip, circ); return (child_dip); }
/* * translate a devfs pathname to one that will be acceptable * by the prom. In most cases, there is no translation needed. * For systems supporting generically named devices, the prom * may support nodes such as 'disk' that do not have any unit * address information (i.e. target,lun info). If this is the * case, the ddi framework will reject the node as invalid and * populate the devinfo tree with nodes froms the .conf file * (e.g. sd). In this case, the names that show up in /devices * are sd - since the prom only knows about 'disk' nodes, this * routine detects this situation and does the conversion * There are also cases such as pluto where the disk node in the * prom is named "SUNW,ssd" but in /devices the name is "ssd". * * If MPxIO is enabled, the translation involves following * pathinfo nodes to the "best" parent. * * return a 0 on success with the new device string in ret_buf. * Otherwise return the appropriate error code as we may be called * from the openprom driver. */ int i_devname_to_promname(char *dev_name, char *ret_buf, size_t len) { dev_info_t *dip, *pdip, *cdip, *alt_dip = NULL; mdi_pathinfo_t *pip = NULL; char *dev_path, *prom_path; char *unit_address, *minorname, *nodename; major_t major; char *rptr, *optr, *offline; size_t olen, rlen; int circ; int ret = 0; /* do some sanity checks */ if ((dev_name == NULL) || (ret_buf == NULL) || (strlen(dev_name) > MAXPATHLEN)) { return (EINVAL); } /* * Convert to a /devices name. Fail the translation if * the name doesn't exist. */ dev_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); if (resolve_devfs_name(dev_name, dev_path) != 0 || strncmp(dev_path, "/devices/", 9) != 0) { kmem_free(dev_path, MAXPATHLEN); return (EINVAL); } dev_name = dev_path + sizeof ("/devices") - 1; bzero(ret_buf, len); if (prom_finddevice(dev_name) != OBP_BADNODE) { /* we are done */ (void) snprintf(ret_buf, len, "%s", dev_name); kmem_free(dev_path, MAXPATHLEN); return (0); } /* * if we get here, then some portion of the device path is * not understood by the prom. We need to look for alternate * names (e.g. replace ssd by disk) and mpxio enabled devices. */ dip = e_ddi_hold_devi_by_path(dev_name, 0); if (dip == NULL) { cmn_err(CE_NOTE, "cannot find dip for %s", dev_name); kmem_free(dev_path, MAXPATHLEN); return (EINVAL); } prom_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); rlen = len; rptr = ret_buf; if (!MDI_CLIENT(dip)) { ret = i_devi_to_promname(dip, prom_path, &alt_dip); if (ret == 0) { minorname = strrchr(dev_name, ':'); if (minorname && (minorname[1] != '\0')) { (void) strcat(prom_path, minorname); } (void) snprintf(rptr, rlen, "%s", prom_path); } } else { /* * if get to here, means dip is a vhci client */ offline = kmem_zalloc(len, KM_SLEEP); /* offline paths */ olen = len; optr = offline; /* * The following code assumes that the phci client is at leaf * level. */ ndi_devi_enter(dip, &circ); while ((pip = mdi_get_next_phci_path(dip, pip)) != NULL) { /* * walk all paths associated to the client node */ bzero(prom_path, MAXPATHLEN); /* * replace with mdi_hold_path() when mpxio goes into * genunix */ MDI_PI_LOCK(pip); MDI_PI_HOLD(pip); MDI_PI_UNLOCK(pip); if (mdi_pi_pathname_obp(pip, prom_path) != NULL) { /* * The path has different obp path */ goto minor_pathinfo; } pdip = mdi_pi_get_phci(pip); ndi_hold_devi(pdip); /* * Get obp path name of the phci node firstly. * NOTE: if the alternate node of pdip exists, * the third argument of the i_devi_to_promname() * would be set to the alternate node. */ (void) i_devi_to_promname(pdip, prom_path, &alt_dip); if (alt_dip != NULL) { ndi_rele_devi(pdip); pdip = alt_dip; ndi_hold_devi(pdip); } nodename = ddi_node_name(dip); unit_address = MDI_PI(pip)->pi_addr; major = ddi_driver_major(dip); cdip = find_alternate_node(pdip, major); if (cdip) { nodename = ddi_node_name(cdip); } /* * node name + unitaddr to the prom_path */ (void) strcat(prom_path, "/"); (void) strcat(prom_path, nodename); if (unit_address && (*unit_address)) { (void) strcat(prom_path, "@"); (void) strcat(prom_path, unit_address); } if (cdip) { /* hold from find_alternate_node */ ndi_rele_devi(cdip); } ndi_rele_devi(pdip); minor_pathinfo: minorname = strrchr(dev_name, ':'); if (minorname && (minorname[1] != '\0')) { (void) strcat(prom_path, minorname); } if (MDI_PI_IS_ONLINE(pip)) { (void) snprintf(rptr, rlen, "%s", prom_path); rlen -= strlen(rptr) + 1; rptr += strlen(rptr) + 1; if (rlen <= 0) /* drop paths we can't store */ break; } else { /* path is offline */ (void) snprintf(optr, olen, "%s", prom_path); olen -= strlen(optr) + 1; if (olen > 0) /* drop paths we can't store */ optr += strlen(optr) + 1; } MDI_PI_LOCK(pip); MDI_PI_RELE(pip); if (MDI_PI(pip)->pi_ref_cnt == 0) cv_broadcast(&MDI_PI(pip)->pi_ref_cv); MDI_PI_UNLOCK(pip); } ndi_devi_exit(dip, circ); ret = 0; if (rlen > 0) { /* now add as much of offline to ret_buf as possible */ bcopy(offline, rptr, rlen); } kmem_free(offline, len); } /* release hold from e_ddi_hold_devi_by_path() */ ndi_rele_devi(dip); ret_buf[len - 1] = '\0'; ret_buf[len - 2] = '\0'; kmem_free(dev_path, MAXPATHLEN); kmem_free(prom_path, MAXPATHLEN); return (ret); }
/* * Search for cached entries matching a devid * Return two lists: * a list of dev_info nodes, for those devices in the attached state * a list of pathnames whose instances registered the given devid * If the lists passed in are not sufficient to return the matching * references, return the size of lists required. * The dev_info nodes are returned with a hold that the caller must release. */ static int e_devid_cache_devi_path_lists(ddi_devid_t devid, int retmax, int *retndevis, dev_info_t **retdevis, int *retnpaths, char **retpaths) { nvp_devid_t *np; int ndevis, npaths; dev_info_t *dip, *pdip; int circ; int maxdevis = 0; int maxpaths = 0; ndevis = 0; npaths = 0; for (np = NVF_DEVID_LIST(dcfd); np; np = NVP_DEVID_NEXT(np)) { if (np->nvp_devid == NULL) continue; if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) { DEVIDERR((CE_CONT, "find: invalid devid %s\n", np->nvp_devpath)); continue; } if (ddi_devid_compare(devid, np->nvp_devid) == 0) { DEVID_DEBUG2((CE_CONT, "find: devid match: %s 0x%x\n", np->nvp_devpath, np->nvp_flags)); DEVID_LOG_MATCH(("find", devid, np->nvp_devpath)); DEVID_LOG_PATHS((CE_CONT, "%s\n", np->nvp_devpath)); /* * Check if we have a cached devinfo reference for this * devid. Place a hold on it to prevent detach * Otherwise, use the path instead. * Note: returns with a hold on each dev_info * node in the list. */ dip = NULL; if (np->nvp_flags & NVP_DEVID_DIP) { pdip = ddi_get_parent(np->nvp_dip); if (ndi_devi_tryenter(pdip, &circ)) { dip = np->nvp_dip; ndi_hold_devi(dip); ndi_devi_exit(pdip, circ); ASSERT(!DEVI_IS_ATTACHING(dip)); ASSERT(!DEVI_IS_DETACHING(dip)); } else { DEVID_LOG_DETACH((CE_CONT, "may be detaching: %s\n", np->nvp_devpath)); } } if (dip) { if (ndevis < retmax) { retdevis[ndevis++] = dip; } else { ndi_rele_devi(dip); } maxdevis++; } else { if (npaths < retmax) retpaths[npaths++] = np->nvp_devpath; maxpaths++; } } } *retndevis = ndevis; *retnpaths = npaths; return (maxdevis > maxpaths ? maxdevis : maxpaths); }
static void dr_resume_devices(dev_info_t *start, dr_sr_handle_t *srh) { dr_handle_t *handle; dev_info_t *dip, *next, *last = NULL; major_t major; char *bn; int circ; major = (major_t)-1; /* attach in reverse device tree order */ while (last != start) { dip = start; next = ddi_get_next_sibling(dip); while (next != last && dip != srh->sr_failed_dip) { dip = next; next = ddi_get_next_sibling(dip); } if (dip == srh->sr_failed_dip) { /* release hold acquired in dr_suspend_devices() */ srh->sr_failed_dip = NULL; ndi_rele_devi(dip); } else if (dr_is_real_device(dip) && srh->sr_failed_dip == NULL) { if ((bn = ddi_binding_name(dip)) != NULL) { major = ddi_name_to_major(bn); } else { bn = "<null>"; } if (!dr_bypass_device(bn) && !drmach_verify_sr(dip, 0)) { char d_name[40], d_alias[40], *d_info; d_name[0] = 0; d_info = ddi_get_name_addr(dip); if (d_info == NULL) d_info = "<null>"; if (!dr_resolve_devname(dip, d_name, d_alias)) { if (d_alias[0] != 0) { prom_printf("\tresuming " "%s@%s (aka %s)\n", d_name, d_info, d_alias); } else { prom_printf("\tresuming " "%s@%s\n", d_name, d_info); } } else { prom_printf("\tresuming %s@%s\n", bn, d_info); } if (devi_attach(dip, DDI_RESUME) != DDI_SUCCESS) { /* * Print a console warning, * set an e_code of ESBD_RESUME, * and save the driver major * number in the e_rsc. */ prom_printf("\tFAILED to resume %s@%s", d_name[0] ? d_name : bn, d_info); srh->sr_err_idx = dr_add_int(srh->sr_err_ints, srh->sr_err_idx, DR_MAX_ERR_INT, (uint64_t)major); handle = srh->sr_dr_handlep; dr_op_err(CE_IGNORE, handle, ESBD_RESUME, "%s@%s", d_name[0] ? d_name : bn, d_info); } } } /* Hold parent busy while walking its children */ ndi_devi_enter(dip, &circ); dr_resume_devices(ddi_get_child(dip), srh); ndi_devi_exit(dip, circ); last = dip; } }
/* * The "dip" argument's parent (if it exists) must be held busy. */ static int dr_suspend_devices(dev_info_t *dip, dr_sr_handle_t *srh) { dr_handle_t *handle; major_t major; char *dname; int circ; /* * If dip is the root node, it has no siblings and it is * always held. If dip is not the root node, dr_suspend_devices() * will be invoked with the parent held busy. */ for (; dip != NULL; dip = ddi_get_next_sibling(dip)) { char d_name[40], d_alias[40], *d_info; ndi_devi_enter(dip, &circ); if (dr_suspend_devices(ddi_get_child(dip), srh)) { ndi_devi_exit(dip, circ); return (ENXIO); } ndi_devi_exit(dip, circ); if (!dr_is_real_device(dip)) continue; major = (major_t)-1; if ((dname = ddi_binding_name(dip)) != NULL) major = ddi_name_to_major(dname); if (dr_bypass_device(dname)) { PR_QR(" bypassed suspend of %s (major# %d)\n", dname, major); continue; } if (drmach_verify_sr(dip, 1)) { PR_QR(" bypassed suspend of %s (major# %d)\n", dname, major); continue; } if ((d_info = ddi_get_name_addr(dip)) == NULL) d_info = "<null>"; d_name[0] = 0; if (dr_resolve_devname(dip, d_name, d_alias) == 0) { if (d_alias[0] != 0) { prom_printf("\tsuspending %s@%s (aka %s)\n", d_name, d_info, d_alias); } else { prom_printf("\tsuspending %s@%s\n", d_name, d_info); } } else { prom_printf("\tsuspending %s@%s\n", dname, d_info); } if (devi_detach(dip, DDI_SUSPEND) != DDI_SUCCESS) { prom_printf("\tFAILED to suspend %s@%s\n", d_name[0] ? d_name : dname, d_info); srh->sr_err_idx = dr_add_int(srh->sr_err_ints, srh->sr_err_idx, DR_MAX_ERR_INT, (uint64_t)major); ndi_hold_devi(dip); srh->sr_failed_dip = dip; handle = srh->sr_dr_handlep; dr_op_err(CE_IGNORE, handle, ESBD_SUSPEND, "%s@%s", d_name[0] ? d_name : dname, d_info); return (DDI_FAILURE); } } return (DDI_SUCCESS); }
static void sbdp_resume_devices(dev_info_t *start, sbdp_sr_handle_t *srh) { int circ; dev_info_t *dip, *next, *last = NULL; char *bn; sbd_error_t *sep; sep = &srh->sep; /* attach in reverse device tree order */ while (last != start) { dip = start; next = ddi_get_next_sibling(dip); while (next != last && dip != SR_FAILED_DIP(srh)) { dip = next; next = ddi_get_next_sibling(dip); } if (dip == SR_FAILED_DIP(srh)) { /* Release hold acquired in sbdp_suspend_devices() */ ndi_rele_devi(dip); SR_FAILED_DIP(srh) = NULL; } else if (sbdp_is_real_device(dip) && SR_FAILED_DIP(srh) == NULL) { if (DEVI(dip)->devi_binding_name != NULL) { bn = ddi_binding_name(dip); } #ifdef DEBUG if (!sbdp_bypass_device(bn)) { #else { #endif char d_name[40], d_alias[40], *d_info; d_name[0] = 0; d_info = ddi_get_name_addr(dip); if (d_info == NULL) d_info = "<null>"; if (!sbdp_resolve_devname(dip, d_name, d_alias)) { if (d_alias[0] != 0) { SBDP_DBG_QR("\tresuming " "%s@%s (aka %s)\n", d_name, d_info, d_alias); } else { SBDP_DBG_QR("\tresuming " "%s@%s\n", d_name, d_info); } } else { SBDP_DBG_QR("\tresuming %s@%s\n", bn, d_info); } if (devi_attach(dip, DDI_RESUME) != DDI_SUCCESS) { /* * Print a console warning, * set an errno of ESGT_RESUME, * and save the driver major * number in the e_str. */ (void) sprintf(sbdp_get_err_buf(sep), "%s@%s", d_name[0] ? d_name : bn, d_info); SBDP_DBG_QR("\tFAILED to resume " "%s\n", sbdp_get_err_buf(sep)); sbdp_set_err(sep, ESGT_RESUME, NULL); } } } ndi_devi_enter(dip, &circ); sbdp_resume_devices(ddi_get_child(dip), srh); ndi_devi_exit(dip, circ); last = dip; } } /* * True if thread is virtually stopped. Similar to CPR_VSTOPPED * but from DR point of view. These user threads are waiting in * the kernel. Once they return from kernel, they will process * the stop signal and stop. */ #define SBDP_VSTOPPED(t) \ ((t)->t_state == TS_SLEEP && \ (t)->t_wchan != NULL && \ (t)->t_astflag && \ ((t)->t_proc_flag & TP_CHKPT)) static int sbdp_stop_user_threads(sbdp_sr_handle_t *srh) { int count; char cache_psargs[PSARGSZ]; kthread_id_t cache_tp; uint_t cache_t_state; int bailout; sbd_error_t *sep; kthread_id_t tp; extern void add_one_utstop(); extern void utstop_timedwait(clock_t); extern void utstop_init(void); #define SBDP_UTSTOP_RETRY 4 #define SBDP_UTSTOP_WAIT hz if (sbdp_skip_user_threads) return (DDI_SUCCESS); sep = &srh->sep; ASSERT(sep); utstop_init(); /* we need to try a few times to get past fork, etc. */ for (count = 0; count < SBDP_UTSTOP_RETRY; count++) { /* walk the entire threadlist */ mutex_enter(&pidlock); for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) { proc_t *p = ttoproc(tp); /* handle kernel threads separately */ if (p->p_as == &kas || p->p_stat == SZOMB) continue; mutex_enter(&p->p_lock); thread_lock(tp); if (tp->t_state == TS_STOPPED) { /* add another reason to stop this thread */ tp->t_schedflag &= ~TS_RESUME; } else { tp->t_proc_flag |= TP_CHKPT; thread_unlock(tp); mutex_exit(&p->p_lock); add_one_utstop(); mutex_enter(&p->p_lock); thread_lock(tp); aston(tp); if (ISWAKEABLE(tp) || ISWAITING(tp)) { setrun_locked(tp); } } /* grab thread if needed */ if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU) poke_cpu(tp->t_cpu->cpu_id); thread_unlock(tp); mutex_exit(&p->p_lock); } mutex_exit(&pidlock); /* let everything catch up */ utstop_timedwait(count * count * SBDP_UTSTOP_WAIT); /* now, walk the threadlist again to see if we are done */ mutex_enter(&pidlock); for (tp = curthread->t_next, bailout = 0; tp != curthread; tp = tp->t_next) { proc_t *p = ttoproc(tp); /* handle kernel threads separately */ if (p->p_as == &kas || p->p_stat == SZOMB) continue; /* * If this thread didn't stop, and we don't allow * unstopped blocked threads, bail. */ thread_lock(tp); if (!CPR_ISTOPPED(tp) && !(sbdp_allow_blocked_threads && SBDP_VSTOPPED(tp))) { /* nope, cache the details for later */ bcopy(p->p_user.u_psargs, cache_psargs, sizeof (cache_psargs)); cache_tp = tp; cache_t_state = tp->t_state; bailout = 1; } thread_unlock(tp); } mutex_exit(&pidlock); /* were all the threads stopped? */ if (!bailout) break; } /* were we unable to stop all threads after a few tries? */ if (bailout) { cmn_err(CE_NOTE, "process: %s id: %p state: %x\n", cache_psargs, cache_tp, cache_t_state); (void) sprintf(sbdp_get_err_buf(sep), "%s", cache_psargs); sbdp_set_err(sep, ESGT_UTHREAD, NULL); return (ESRCH); } return (DDI_SUCCESS); }
/* * Setup resource map for the pci bus node based on the "available" * property and "bus-range" property. */ int pci_resource_setup(dev_info_t *dip) { pci_regspec_t *regs; int rlen, rcount, i; char bus_type[16] = "(unknown)"; int len; struct busnum_ctrl ctrl; int circular_count; int rval = NDI_SUCCESS; /* * If this is a pci bus node then look for "available" property * to find the available resources on this bus. */ len = sizeof (bus_type); if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type", (caddr_t)&bus_type, &len) != DDI_SUCCESS) return (NDI_FAILURE); /* it is not a pci/pci-ex bus type */ if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0)) return (NDI_FAILURE); /* * The pci-hotplug project addresses adding the call * to pci_resource_setup from pci nexus driver. * However that project would initially be only for x86, * so for sparc pcmcia-pci support we still need to call * pci_resource_setup in pcic driver. Once all pci nexus drivers * are updated to call pci_resource_setup this portion of the * code would really become an assert to make sure this * function is not called for the same dip twice. */ { if (ra_map_exist(dip, NDI_RA_TYPE_MEM) == NDI_SUCCESS) { return (NDI_FAILURE); } } /* * Create empty resource maps first. * * NOTE: If all the allocated resources are already assigned to * device(s) in the hot plug slot then "available" property may not * be present. But, subsequent hot plug operation may unconfigure * the device in the slot and try to free up it's resources. So, * at the minimum we should create empty maps here. */ if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE) { return (NDI_FAILURE); } if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE) { return (NDI_FAILURE); } if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_BUSNUM) == NDI_FAILURE) { return (NDI_FAILURE); } if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) == NDI_FAILURE) { return (NDI_FAILURE); } /* read the "available" property if it is available */ if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "available", (caddr_t)®s, &rlen) == DDI_SUCCESS) { /* * create the available resource list for both memory and * io space */ rcount = rlen / sizeof (pci_regspec_t); for (i = 0; i < rcount; i++) { switch (PCI_REG_ADDR_G(regs[i].pci_phys_hi)) { case PCI_REG_ADDR_G(PCI_ADDR_MEM32): (void) ndi_ra_free(dip, (uint64_t)regs[i].pci_phys_low, (uint64_t)regs[i].pci_size_low, (regs[i].pci_phys_hi & PCI_REG_PF_M) ? NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM, 0); break; case PCI_REG_ADDR_G(PCI_ADDR_MEM64): (void) ndi_ra_free(dip, ((uint64_t)(regs[i].pci_phys_mid) << 32) | ((uint64_t)(regs[i].pci_phys_low)), ((uint64_t)(regs[i].pci_size_hi) << 32) | ((uint64_t)(regs[i].pci_size_low)), (regs[i].pci_phys_hi & PCI_REG_PF_M) ? NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM, 0); break; case PCI_REG_ADDR_G(PCI_ADDR_IO): (void) ndi_ra_free(dip, (uint64_t)regs[i].pci_phys_low, (uint64_t)regs[i].pci_size_low, NDI_RA_TYPE_IO, 0); break; case PCI_REG_ADDR_G(PCI_ADDR_CONFIG): break; default: cmn_err(CE_WARN, "pci_resource_setup: bad addr type: %x\n", PCI_REG_ADDR_G(regs[i].pci_phys_hi)); break; } } kmem_free((caddr_t)regs, rlen); } /* * update resource map for available bus numbers if the node * has available-bus-range or bus-range property. */ len = sizeof (struct bus_range); if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "available-bus-range", (caddr_t)&pci_bus_range, &len) == DDI_SUCCESS) { /* * Add bus numbers in the range to the free list. */ (void) ndi_ra_free(dip, (uint64_t)pci_bus_range.lo, (uint64_t)pci_bus_range.hi - (uint64_t)pci_bus_range.lo + 1, NDI_RA_TYPE_PCI_BUSNUM, 0); } else { /* * We don't have an available-bus-range property. If, instead, * we have a bus-range property we add all the bus numbers * in that range to the free list but we must then scan * for pci-pci bridges on this bus to find out the if there * are any of those bus numbers already in use. If so, we can * reclaim them. */ len = sizeof (struct bus_range); if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "bus-range", (caddr_t)&pci_bus_range, &len) == DDI_SUCCESS) { if (pci_bus_range.lo != pci_bus_range.hi) { /* * Add bus numbers other than the secondary * bus number to the free list. */ (void) ndi_ra_free(dip, (uint64_t)pci_bus_range.lo + 1, (uint64_t)pci_bus_range.hi - (uint64_t)pci_bus_range.lo, NDI_RA_TYPE_PCI_BUSNUM, 0); /* scan for pci-pci bridges */ ctrl.rv = DDI_SUCCESS; ctrl.dip = dip; ctrl.range = &pci_bus_range; ndi_devi_enter(dip, &circular_count); ddi_walk_devs(ddi_get_child(dip), claim_pci_busnum, (void *)&ctrl); ndi_devi_exit(dip, circular_count); if (ctrl.rv != DDI_SUCCESS) { /* failed to create the map */ (void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_BUSNUM); rval = NDI_FAILURE; } } } } #ifdef BUSRA_DEBUG if (busra_debug) { (void) ra_dump_all(NULL, dip); } #endif return (rval); }