/* * The first round search is to find: * 1) a VGA device. * 2) a PCI VGA compatible device whose IO space is enabled * and the VGA Enable bit of any PCI-PCI bridge above it is set. * If the first round search succeeds, prune the second round search. * * The second round seach does not check the VGA Enable bit. * * Return the device path as the console fb path. */ char * plat_fbpath(void) { struct find_fb_dev_param param; static char *fbpath = NULL; static char fbpath_buf[MAXPATHLEN]; /* first round search */ param.found_dip = NULL; param.vga_enable = 1; ddi_walk_devs(ddi_root_node(), find_fb_dev, ¶m); if (param.found_dip != NULL) { (void) ddi_pathname(param.found_dip, fbpath_buf); fbpath = fbpath_buf; return (fbpath); } /* * second round search, do not check the * PCI_BCNF_BCNTRL_VGA_ENABLE bit */ param.found_dip = NULL; param.vga_enable = 0; ddi_walk_devs(ddi_root_node(), find_fb_dev, ¶m); if (param.found_dip == NULL) return (NULL); (void) ddi_pathname(param.found_dip, fbpath_buf); fbpath = fbpath_buf; return (fbpath); }
/* * Return the dips or number of dips associated with a given interrupt block. * Size of dips array arg is passed in as dips_ret arg. * Number of dips returned is returned in dips_ret arg. * Array of dips gets returned in the dips argument. * Function returns number of dips existing for the given interrupt block. * * Note: this function assumes an enabled/valid INO, which is why it returns * the px node and (Internal) when it finds no other devices (and *devs_ret > 0) */ uint8_t pxtool_ib_get_ino_devs(px_t *px_p, uint32_t ino, uint32_t msi_num, uint8_t *devs_ret, pcitool_intr_dev_t *devs) { px_ib_t *ib_p = px_p->px_ib_p; px_ino_t *ino_p; px_ino_pil_t *ipil_p; px_ih_t *ih_p; uint32_t num_devs = 0; char pathname[MAXPATHLEN]; int i, j; mutex_enter(&ib_p->ib_ino_lst_mutex); ino_p = px_ib_locate_ino(ib_p, ino); if (ino_p != NULL) { for (j = 0, ipil_p = ino_p->ino_ipil_p; ipil_p; ipil_p = ipil_p->ipil_next_p) { num_devs += ipil_p->ipil_ih_size; for (i = 0, ih_p = ipil_p->ipil_ih_head; ((i < ipil_p->ipil_ih_size) && (i < *devs_ret)); i++, j++, ih_p = ih_p->ih_next) { (void) ddi_pathname(ih_p->ih_dip, pathname); if (ih_p->ih_msg_code == msi_num) { num_devs = *devs_ret = 1; px_fill_in_intr_devs(&devs[0], (char *)ddi_driver_name( ih_p->ih_dip), pathname, ddi_get_instance(ih_p->ih_dip)); goto done; } px_fill_in_intr_devs(&devs[j], (char *)ddi_driver_name(ih_p->ih_dip), pathname, ddi_get_instance(ih_p->ih_dip)); } } *devs_ret = j; } else if (*devs_ret > 0) { (void) ddi_pathname(px_p->px_dip, pathname); strcat(pathname, " (Internal)"); px_fill_in_intr_devs(&devs[0], (char *)ddi_driver_name(px_p->px_dip), pathname, ddi_get_instance(px_p->px_dip)); num_devs = *devs_ret = 1; } done: mutex_exit(&ib_p->ib_ino_lst_mutex); return (num_devs); }
/* * Return the dips or number of dips associated with a given interrupt block. * Size of dips array arg is passed in as dips_ret arg. * Number of dips returned is returned in dips_ret arg. * Array of dips gets returned in the dips argument. * Function returns number of dips existing for the given interrupt block. * */ uint8_t ib_get_ino_devs( ib_t *ib_p, uint32_t ino, uint8_t *devs_ret, pcitool_intr_dev_t *devs) { ib_ino_info_t *ino_p; ib_ino_pil_t *ipil_p; ih_t *ih_p; uint32_t num_devs = 0; int i, j; mutex_enter(&ib_p->ib_ino_lst_mutex); ino_p = ib_locate_ino(ib_p, ino); if (ino_p != NULL) { for (j = 0, ipil_p = ino_p->ino_ipil_p; ipil_p; ipil_p = ipil_p->ipil_next_p) { num_devs += ipil_p->ipil_ih_size; for (i = 0, ih_p = ipil_p->ipil_ih_head; ((i < ipil_p->ipil_ih_size) && (i < *devs_ret)); i++, j++, ih_p = ih_p->ih_next) { (void) strlcpy(devs[i].driver_name, ddi_driver_name(ih_p->ih_dip), MAXMODCONFNAME); (void) ddi_pathname(ih_p->ih_dip, devs[i].path); devs[i].dev_inst = ddi_get_instance(ih_p->ih_dip); } } *devs_ret = j; } mutex_exit(&ib_p->ib_ino_lst_mutex); return (num_devs); }
int iommulib_nex_open(dev_info_t *dip, dev_info_t *rdip) { iommulib_unit_t *unitp; int instance = ddi_get_instance(rdip); const char *driver = ddi_driver_name(rdip); const char *f = "iommulib_nex_open"; ASSERT(DEVI(dip)->devi_iommulib_nex_handle != NULL); ASSERT(DEVI(rdip)->devi_iommulib_handle == NULL); /* prevent use of IOMMU for AMD IOMMU's DMA */ if (strcmp(driver, "amd_iommu") == 0) { DEVI(rdip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED; return (DDI_ENOTSUP); } /* * Use the probe entry point to determine in a hardware specific * manner whether this dip is controlled by an IOMMU. If yes, * return the handle corresponding to the IOMMU unit. */ mutex_enter(&iommulib_lock); for (unitp = iommulib_list; unitp; unitp = unitp->ilu_next) { if (unitp->ilu_ops->ilops_probe(unitp, rdip) == DDI_SUCCESS) break; } if (unitp == NULL) { mutex_exit(&iommulib_lock); if (iommulib_debug) { char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP); cmn_err(CE_WARN, "%s: %s%d: devinfo node (%p): is not " "controlled by an IOMMU: path=%s", f, driver, instance, (void *)rdip, ddi_pathname(rdip, buf)); kmem_free(buf, MAXPATHLEN); } DEVI(rdip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED; return (DDI_ENOTSUP); } mutex_enter(&unitp->ilu_lock); unitp->ilu_nex = DEVI(dip)->devi_iommulib_nex_handle; unitp->ilu_ref++; DEVI(rdip)->devi_iommulib_handle = unitp; mutex_exit(&unitp->ilu_lock); mutex_exit(&iommulib_lock); atomic_inc_uint(&DEVI(dip)->devi_iommulib_nex_handle->nex_ref); return (DDI_SUCCESS); }
static int sbdp_check_dip(dev_info_t *dip, void *arg, uint_t ref) { char *dname; sbdp_ref_t *sbrp = (sbdp_ref_t *)arg; if (dip == NULL) return (DDI_WALK_CONTINUE); ASSERT(sbrp->sep != NULL); ASSERT(sbrp->refcount != NULL); if (!sbdp_is_real_device(dip)) return (DDI_WALK_CONTINUE); dname = ddi_binding_name(dip); if ((strcmp(dname, "pciclass,060940") == 0) || (strcmp(dname, "pciclass,060980") == 0)) { (void) ddi_pathname(dip, sbdp_get_err_buf(sbrp->sep)); sbdp_set_err(sbrp->sep, ESBD_BUSY, NULL); (*sbrp->refcount)++; return (DDI_WALK_TERMINATE); } #ifdef DEBUG if (sbdp_bypass_device(dname)) return (DDI_WALK_CONTINUE); #endif if (ref) { (*sbrp->refcount)++; SBDP_DBG_QR("\n%s (major# %d) is referenced\n", dname, ddi_name_to_major(dname)); (void) ddi_pathname(dip, sbdp_get_err_buf(sbrp->sep)); sbdp_set_err(sbrp->sep, ESBD_BUSY, NULL); return (DDI_WALK_TERMINATE); } return (DDI_WALK_CONTINUE); }
/*ARGSUSED*/ void sbd_attach_io(sbd_handle_t *hp, sbderror_t *ep, dev_info_t *dip, int unit) { sbd_board_t *sbp = SBDH2BD(hp->h_sbd); ASSERT(e_ddi_branch_held(dip)); (void) e_ddi_branch_configure(dip, NULL, 0); ASSERT(sbp->sb_iopath[unit] != NULL); (void) ddi_pathname(dip, sbp->sb_iopath[unit]); }
void oplmsu_cmn_prt_pathname(dev_info_t *dip) { char pathname[128]; char wrkbuf[128]; ddi_pathname(dip, wrkbuf); *(wrkbuf + strlen(wrkbuf)) = '\0'; sprintf(pathname, "/devices%s:%c", wrkbuf, 'a'+ ddi_get_instance(dip)); DBG_PRINT((CE_NOTE, "oplmsu: debug-info: " "Active path change to path => %s", pathname)); }
/* return path of first usb serial device */ static char * plat_usbser_path(void) { extern dev_info_t *usbser_first_device(void); dev_info_t *us_dip; static char *us_path = NULL; if (us_path) return (us_path); us_dip = usbser_first_device(); if (us_dip == NULL) return (NULL); us_path = kmem_alloc(MAXPATHLEN, KM_SLEEP); (void) ddi_pathname(us_dip, us_path); ndi_rele_devi(us_dip); /* held from usbser_first_device */ return (us_path); }
void iommulib_nex_close(dev_info_t *rdip) { iommulib_unit_t *unitp; const char *driver; int instance; uint32_t unitid; iommulib_nex_t *nexp; const char *f = "iommulib_nex_close"; ASSERT(IOMMU_USED(rdip)); unitp = DEVI(rdip)->devi_iommulib_handle; mutex_enter(&iommulib_lock); mutex_enter(&unitp->ilu_lock); nexp = (iommulib_nex_t *)unitp->ilu_nex; DEVI(rdip)->devi_iommulib_handle = NULL; unitid = unitp->ilu_unitid; driver = ddi_driver_name(unitp->ilu_dip); instance = ddi_get_instance(unitp->ilu_dip); unitp->ilu_ref--; mutex_exit(&unitp->ilu_lock); mutex_exit(&iommulib_lock); atomic_dec_uint(&nexp->nex_ref); if (iommulib_debug) { char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP); (void) ddi_pathname(rdip, buf); cmn_err(CE_NOTE, "%s: %s%d: closing IOMMU for dip (%p), " "unitid=%u rdip path = %s", f, driver, instance, (void *)rdip, unitid, buf); kmem_free(buf, MAXPATHLEN); } }
static char * plat_devpath(char *name, char *path) { major_t major; dev_info_t *dip, *pdip; if ((major = ddi_name_to_major(name)) == (major_t)-1) return (NULL); if ((dip = devnamesp[major].dn_head) == NULL) return (NULL); pdip = ddi_get_parent(dip); if (i_ddi_attach_node_hierarchy(pdip) != DDI_SUCCESS) return (NULL); if (ddi_initchild(pdip, dip) != DDI_SUCCESS) return (NULL); (void) ddi_pathname(dip, path); return (path); }
static char * plat_ttypath(int inum) { static char *defaultpath[] = { "/isa/asy@1,3f8:a", "/isa/asy@1,2f8:b" }; static char path[MAXPATHLEN]; char *bp; major_t major; dev_info_t *dip; if (pseudo_isa) return (defaultpath[inum]); if ((major = ddi_name_to_major("asy")) == (major_t)-1) return (NULL); if ((dip = devnamesp[major].dn_head) == NULL) return (NULL); for (; dip != NULL; dip = ddi_get_next(dip)) { if (i_ddi_attach_node_hierarchy(dip) != DDI_SUCCESS) return (NULL); if (DEVI(dip)->devi_minor->ddm_name[0] == ('a' + (char)inum)) break; } if (dip == NULL) return (NULL); (void) ddi_pathname(dip, path); bp = path + strlen(path); (void) snprintf(bp, 3, ":%s", DEVI(dip)->devi_minor->ddm_name); return (path); }
static void usb_vprintf(dev_info_t *dip, int level, char *label, char *fmt, va_list ap) { size_t len; int instance; char driver_name[USBA_DRVNAME_LEN]; char *msg_ptr; if (usba_suppress_dprintf) { return; } *driver_name = '\0'; mutex_enter(&usba_print_mutex); /* * Check if we have a valid buf size? * Suppress logging to usb_buffer if so. */ if (usba_debug_buf_size <= 0) { usba_buffer_dprintf = 0; } /* * if there is label and dip, use <driver name><instance>: * otherwise just use the label */ if (dip) { instance = ddi_get_instance(dip); (void) snprintf(driver_name, USBA_DRVNAME_LEN, "%s%d", ddi_driver_name(dip), instance); } if (label == (char *)NULL) { len = snprintf(usba_print_buf, USBA_PRINT_BUF_LEN, "\t"); } else if (usba_timestamp_dprintf) { hrtime_t t = gethrtime(); hrtime_t elapsed = (t - usba_last_timestamp)/1000; usba_last_timestamp = t; if (dip) { len = snprintf(usba_print_buf, USBA_PRINT_BUF_LEN, "+%lld->%p: %s%d: ", elapsed, (void *)curthread, label, instance); } else { len = snprintf(usba_print_buf, USBA_PRINT_BUF_LEN, "+%lld->%p: %s: ", elapsed, (void *)curthread, label); } } else { if (dip) { len = snprintf(usba_print_buf, USBA_PRINT_BUF_LEN, "%s%d:\t", label, instance); } else { len = snprintf(usba_print_buf, USBA_PRINT_BUF_LEN, "%s:\t", label); } } msg_ptr = usba_print_buf + len; (void) vsnprintf(msg_ptr, USBA_PRINT_BUF_LEN - len - 2, fmt, ap); len = min(strlen(usba_print_buf), USBA_PRINT_BUF_LEN - 2); usba_print_buf[len++] = '\n'; usba_print_buf[len] = '\0'; /* * stuff the message in the debug buf */ if (usba_buffer_dprintf) { if (usba_debug_buf == NULL) { usba_debug_buf = kmem_alloc( usba_debug_buf_size + USBA_DEBUG_SIZE_EXTRA_ALLOC, KM_SLEEP); usba_clear_dprint_buf(); } else if (usba_clear_debug_buf_flag) { usba_clear_dprint_buf(); usba_clear_debug_buf_flag = 0; } /* * overwrite >>>> that might be over the end of the * the buffer */ *(usba_debug_buf + usba_debug_buf_size) = '\0'; if ((usba_buf_sptr + len) > usba_buf_eptr) { size_t left = _PTRDIFF(usba_buf_eptr, usba_buf_sptr); bcopy(usba_print_buf, usba_buf_sptr, left); bcopy((caddr_t)usba_print_buf + left, usba_debug_buf, len - left); usba_buf_sptr = usba_debug_buf + len - left; } else { bcopy(usba_print_buf, usba_buf_sptr, len); usba_buf_sptr += len; } /* add marker */ (void) sprintf(usba_buf_sptr, ">>>>"); } /* * L4-L2 message may go to the log buf if not logged in usba_debug_buf * L1 messages will go to the log buf in non-debug kernels and * to console and log buf in debug kernels if usba_debug_chatty * has been set * L0 messages are warnings and will go to console and log buf and * include the pathname, if available */ switch (level) { case USB_LOG_L4: case USB_LOG_L3: case USB_LOG_L2: if (!usba_buffer_dprintf) { cmn_err(CE_CONT, "^%s", usba_print_buf); } break; case USB_LOG_L1: if (dip) { char *pathname = kmem_alloc(MAXPATHLEN, KM_NOSLEEP); if (pathname) { cmn_err(CE_CONT, usba_debug_chatty ? "%s (%s): %s" : "?%s (%s): %s", ddi_pathname(dip, pathname), driver_name, msg_ptr); kmem_free(pathname, MAXPATHLEN); } else { cmn_err(CE_CONT, usba_debug_chatty ? "%s" : "?%s", usba_print_buf); } } else { cmn_err(CE_CONT, usba_debug_chatty ? "%s" : "?%s", usba_print_buf); } break; case USB_LOG_L0: /* Strip the "\n" added earlier */ if (usba_print_buf[len - 1] == '\n') { usba_print_buf[len - 1] = '\0'; } if (msg_ptr[len - 1] == '\n') { msg_ptr[len - 1] = '\0'; } if (dip) { char *pathname = kmem_alloc(MAXPATHLEN, KM_NOSLEEP); if (pathname) { cmn_err(CE_WARN, "%s (%s): %s", ddi_pathname(dip, pathname), driver_name, msg_ptr); kmem_free(pathname, MAXPATHLEN); } else { cmn_err(CE_WARN, usba_print_buf); } } else { cmn_err(CE_WARN, usba_print_buf); } break; } mutex_exit(&usba_print_mutex); }
static int acpinex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { int instance; acpinex_softstate_t *softsp; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: return (DDI_SUCCESS); default: return (DDI_FAILURE); } /* Get and check instance number. */ instance = ddi_get_instance(devi); if (instance >= ACPINEX_INSTANCE_MAX) { cmn_err(CE_WARN, "acpinex: instance number %d is out of range " "in acpinex_attach(), max %d.", instance, ACPINEX_INSTANCE_MAX - 1); return (DDI_FAILURE); } /* Get soft state structure. */ if (ddi_soft_state_zalloc(acpinex_softstates, instance) != DDI_SUCCESS) { cmn_err(CE_WARN, "!acpinex: failed to allocate soft state " "object in acpinex_attach()."); return (DDI_FAILURE); } softsp = ddi_get_soft_state(acpinex_softstates, instance); /* Initialize soft state structure */ softsp->ans_dip = devi; (void) ddi_pathname(devi, softsp->ans_path); if (ACPI_FAILURE(acpica_get_handle(devi, &softsp->ans_hdl))) { ACPINEX_DEBUG(CE_WARN, "!acpinex: failed to get ACPI handle for %s.", softsp->ans_path); ddi_soft_state_free(acpinex_softstates, instance); return (DDI_FAILURE); } mutex_init(&softsp->ans_lock, NULL, MUTEX_DRIVER, NULL); /* Install event handler for child/descendant objects. */ if (acpinex_event_scan(softsp, B_TRUE) != DDI_SUCCESS) { cmn_err(CE_WARN, "!acpinex: failed to install event handler " "for children of %s.", softsp->ans_path); } /* nothing to suspend/resume here */ (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, "pm-hardware-state", "no-suspend-resume"); (void) ddi_prop_update_int(DDI_DEV_T_NONE, devi, DDI_NO_AUTODETACH, 1); acpinex_fm_init(softsp); ddi_report_dev(devi); return (DDI_SUCCESS); }
int e_devid_cache_register(dev_info_t *dip, ddi_devid_t devid) { nvp_devid_t *np; nvp_devid_t *new_nvp; ddi_devid_t new_devid; int new_devid_size; char *path, *fullpath; ddi_devid_t free_devid = NULL; int pathlen; ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); fullpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); (void) ddi_pathname(dip, fullpath); pathlen = strlen(fullpath) + 1; path = kmem_alloc(pathlen, KM_SLEEP); bcopy(fullpath, path, pathlen); kmem_free(fullpath, MAXPATHLEN); DEVID_LOG_REG(("register", devid, path)); new_nvp = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP); new_devid_size = ddi_devid_sizeof(devid); new_devid = kmem_alloc(new_devid_size, KM_SLEEP); (void) bcopy(devid, new_devid, new_devid_size); rw_enter(&dcfd->nvf_lock, RW_WRITER); for (np = NVF_DEVID_LIST(dcfd); np; np = NVP_DEVID_NEXT(np)) { if (strcmp(path, np->nvp_devpath) == 0) { DEVID_DEBUG2((CE_CONT, "register: %s path match\n", path)); if (np->nvp_devid == NULL) { replace: np->nvp_devid = new_devid; np->nvp_flags |= NVP_DEVID_DIP | NVP_DEVID_REGISTERED; np->nvp_dip = dip; NVF_MARK_DIRTY(dcfd); rw_exit(&dcfd->nvf_lock); kmem_free(new_nvp, sizeof (nvp_devid_t)); kmem_free(path, pathlen); goto exit; } if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) { /* replace invalid devid */ free_devid = np->nvp_devid; goto replace; } /* * We're registering an already-cached path * Does the device's devid match the cache? */ if (ddi_devid_compare(devid, np->nvp_devid) != 0) { DEVID_DEBUG((CE_CONT, "devid register: " "devid %s does not match\n", path)); /* * Replace cached devid for this path * with newly registered devid. A devid * may map to multiple paths but one path * should only map to one devid. */ nfd_nvp_free_and_unlink(dcfd, NVPLIST(np)); np = NULL; break; } else { DEVID_DEBUG2((CE_CONT, "devid register: %s devid match\n", path)); np->nvp_flags |= NVP_DEVID_DIP | NVP_DEVID_REGISTERED; np->nvp_dip = dip; rw_exit(&dcfd->nvf_lock); kmem_free(new_nvp, sizeof (nvp_devid_t)); kmem_free(path, pathlen); kmem_free(new_devid, new_devid_size); return (DDI_SUCCESS); } } } /* * Add newly registered devid to the cache */ ASSERT(np == NULL); new_nvp->nvp_devpath = path; new_nvp->nvp_flags = NVP_DEVID_DIP | NVP_DEVID_REGISTERED; new_nvp->nvp_dip = dip; new_nvp->nvp_devid = new_devid; NVF_MARK_DIRTY(dcfd); nfd_nvp_link(dcfd, NVPLIST(new_nvp)); rw_exit(&dcfd->nvf_lock); exit: if (free_devid) kmem_free(free_devid, ddi_devid_sizeof(free_devid)); if (!devid_cache_write_disable) wake_nvpflush_daemon(); return (DDI_SUCCESS); }
int dr_io_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp) { int i, ix; dr_board_t *bp; sbd_io_stat_t *isp; dr_io_unit_t *ip; bp = hp->h_bd; /* * Only look for requested devices that are actually present. */ devset &= DR_DEVS_PRESENT(bp); for (i = ix = 0; i < MAX_IO_UNITS_PER_BOARD; i++) { drmachid_t id; dev_info_t *dip; sbd_error_t *err; drmach_status_t pstat; if (DEVSET_IN_SET(devset, SBD_COMP_IO, i) == 0) continue; ip = dr_get_io_unit(bp, i); if (ip->sbi_cm.sbdev_state == DR_STATE_EMPTY) { /* present, but not fully initialized */ continue; } id = ip->sbi_cm.sbdev_id; if (id == (drmachid_t)0) continue; err = drmach_status(ip->sbi_cm.sbdev_id, &pstat); if (err) { DRERR_SET_C(&ip->sbi_cm.sbdev_error, &err); return (-1); } dip = NULL; err = drmach_get_dip(id, &dip); if (err) { /* catch this in debug kernels */ ASSERT(0); sbd_err_clear(&err); continue; } isp = &dsp->d_io; bzero((caddr_t)isp, sizeof (*isp)); isp->is_cm.c_id.c_type = ip->sbi_cm.sbdev_type; isp->is_cm.c_id.c_unit = ip->sbi_cm.sbdev_unum; strncpy(isp->is_cm.c_id.c_name, pstat.type, sizeof (isp->is_cm.c_id.c_name)); dr_get_comp_cond(ip, dip); isp->is_cm.c_cond = ip->sbi_cm.sbdev_cond; isp->is_cm.c_busy = ip->sbi_cm.sbdev_busy | pstat.busy; isp->is_cm.c_time = ip->sbi_cm.sbdev_time; isp->is_cm.c_ostate = ip->sbi_cm.sbdev_ostate; isp->is_cm.c_sflags = 0; if (dip == NULL) { isp->is_pathname[0] = '\0'; isp->is_referenced = 0; isp->is_unsafe_count = 0; } else { int refcount = 0, idx = 0; uint64_t unsafe_devs[SBD_MAX_UNSAFE]; ASSERT(e_ddi_branch_held(dip)); (void) ddi_pathname(dip, isp->is_pathname); /* check reference and unsafe counts on devices */ isp->is_unsafe_count = 0; dr_check_devices(dip, &refcount, hp, unsafe_devs, &idx, SBD_MAX_UNSAFE); while (idx > 0) { isp->is_unsafe_list[idx-1] = unsafe_devs[idx-1]; --idx; } isp->is_referenced = (refcount == 0) ? 0 : 1; hp->h_err = NULL; } ix++; dsp++; } return (ix); }
/* * Return 1 if instance block was assigned for the path. * * For multi-port NIC cards, sequential instance assignment across all * ports on a card is highly desirable since the ppa is typically the * same as the instance number, and the ppa is used in the NIC's public * /dev name. This sequential assignment typically occurs as a result * of in_preassign_instance() after initial install, or by * i_ndi_init_hw_children() for NIC ports that share a common parent. * * Some NIC cards however use multi-function bridge chips, and to * support sequential instance assignment accross all ports, without * disabling multi-threaded attach, we have a (currently) undocumented * hack to allocate instance numbers in contiguous blocks based on * driver.conf properties. * * ^ * /---------- ------------\ * pci@0 pci@0,1 MULTI-FUNCTION BRIDGE CHIP * / \ / \ * FJSV,e4ta@4 FJSV,e4ta@4,1 FJSV,e4ta@6 FJSV,e4ta@6,1 NIC PORTS * n n+2 n+2 n+3 INSTANCE * * For the above example, the following driver.conf properties would be * used to guarantee sequential instance number assignment. * * ddi-instance-blocks ="ib-FJSVe4ca", "ib-FJSVe4ta", "ib-generic"; * ib-FJSVe4ca = "/pci@0/FJSV,e4ca@4", "/pci@0/FJSV,e4ca@4,1", * "/pci@0,1/FJSV,e4ca@6", "/pci@0,1/FJSV,e4ca@6,1"; * ib-FJSVe4ta = "/pci@0/FJSV,e4ta@4", "/pci@0/FJSV,e4ta@4,1", * "/pci@0,1/FJSV,e4ta@6", "/pci@0,1/FJSV,e4ta@6,1"; * ib-generic = "/pci@0/network@4", "/pci@0/network@4,1", * "/pci@0,1/network@6", "/pci@0,1/network@6,1"; * * The value of the 'ddi-instance-blocks' property references a series * of card specific properties, like 'ib-FJSV-e4ta', who's value * defines a single 'instance block'. The 'instance block' describes * all the paths below a multi-function bridge, where each path is * called an 'instance path'. The 'instance block' property value is a * series of 'instance paths'. The number of 'instance paths' in an * 'instance block' defines the size of the instance block, and the * ordering of the 'instance paths' defines the instance number * assignment order for paths going through the 'instance block'. * * In the instance assignment code below, if a (path, driver) that * currently has no instance number has a path that goes through an * 'instance block', then block instance number allocation occurs. The * block allocation code will find a sequential set of unused instance * numbers, and assign instance numbers for all the paths in the * 'instance block'. Each path is assigned a persistent instance * number, even paths that don't exist in the device tree or fail * probe(9E). */ static int in_assign_instance_block(dev_info_t *dip) { char **ibn; /* instance block names */ uint_t nibn; /* number of instance block names */ uint_t ibni; /* ibn index */ char *driver; major_t major; char *path; char *addr; int plen; char **ibp; /* instance block paths */ uint_t nibp; /* number of paths in instance block */ uint_t ibpi; /* ibp index */ int ibplen; /* length of instance block path */ char *ipath; int instance_base; int splice; int i; /* check for fresh install case (in miniroot) */ if (DEVI(dip)->devi_instance != -1) return (0); /* already assigned */ /* * Check to see if we need to allocate a block of contiguous instance * numbers by looking for the 'ddi-instance-blocks' property. */ if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ddi-instance-blocks", &ibn, &nibn) != DDI_SUCCESS) return (0); /* no instance block needed */ /* * Get information out about node we are processing. * * NOTE: Since the node is not yet at DS_INITIALIZED, ddi_pathname() * will not return the unit-address of the final path component even * though the node has an established devi_addr unit-address - so we * need to add the unit-address by hand. */ driver = (char *)ddi_driver_name(dip); major = ddi_driver_major(dip); path = kmem_alloc(MAXPATHLEN, KM_SLEEP); (void) ddi_pathname(dip, path); if ((addr = ddi_get_name_addr(dip)) != NULL) { (void) strcat(path, "@"); (void) strcat(path, addr); } plen = strlen(path); /* loop through instance block names */ for (ibni = 0; ibni < nibn; ibni++) { if (ibn[ibni] == NULL) continue; /* lookup instance block */ if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, ibn[ibni], &ibp, &nibp) != DDI_SUCCESS) { cmn_err(CE_WARN, "no devinition for instance block '%s' in %s.conf", ibn[ibni], driver); continue; } /* Does 'path' go through this instance block? */ for (ibpi = 0; ibpi < nibp; ibpi++) { if (ibp[ibpi] == NULL) continue; ibplen = strlen(ibp[ibpi]); if ((ibplen <= plen) && (strcmp(ibp[ibpi], path + plen - ibplen) == 0)) break; } if (ibpi >= nibp) { ddi_prop_free(ibp); continue; /* no try next instance block */ } /* yes, allocate and assign instances for all paths in block */ /* * determine where we splice in instance paths and verify * that none of the paths are too long. */ splice = plen - ibplen; for (i = 0; i < nibp; i++) { if ((splice + strlen(ibp[i])+ 1) >= MAXPATHLEN) { cmn_err(CE_WARN, "path %d through instance block '%s' from " "%s.conf too long", i, ibn[ibni], driver); break; } } if (i < nibp) { ddi_prop_free(ibp); continue; /* too long */ } /* allocate the instance block - no more failures */ instance_base = in_next_instance_block(major, nibp); ipath = kmem_alloc(MAXPATHLEN, KM_SLEEP); for (ibpi = 0; ibpi < nibp; ibpi++) { if (ibp[ibpi] == NULL) continue; (void) strcpy(ipath, path); (void) strcpy(ipath + splice, ibp[ibpi]); (void) in_pathin(ipath, instance_base + ibpi, driver, NULL); } /* free allocations */ kmem_free(ipath, MAXPATHLEN); ddi_prop_free(ibp); kmem_free(path, MAXPATHLEN); ddi_prop_free(ibn); /* notify devfsadmd to sync of path_to_inst file */ mutex_enter(&e_ddi_inst_state.ins_serial); i_log_devfs_instance_mod(); e_ddi_inst_state.ins_dirty = 1; mutex_exit(&e_ddi_inst_state.ins_serial); return (1); } /* our path did not go through any of of the instance blocks */ kmem_free(path, MAXPATHLEN); ddi_prop_free(ibn); return (0); }
/* * If we don't already have a master SBBC selected, * get the <sbbc> property from the /chosen node. If * the pathname matches, this is the master SBBC and * we set up the console/TOD SRAM mapping here. */ static void sbbc_chosen_init(sbbc_softstate_t *softsp) { char master_sbbc[MAXNAMELEN]; char pn[MAXNAMELEN]; int nodeid, len; pnode_t dnode; if (master_chosen != FALSE) { /* * We've got one already */ return; } /* * Get /chosen node info. prom interface will handle errors. */ dnode = prom_chosennode(); /* * Look for the "iosram" property on the chosen node with a prom * interface as ddi_find_devinfo() couldn't be used (calls * ddi_walk_devs() that creates one extra lock on the device tree). */ if (prom_getprop(dnode, IOSRAM_CHOSEN_PROP, (caddr_t)&nodeid) <= 0) { /* * No I/O Board SBBC set up as console, what to do ? */ SBBC_ERR(CE_PANIC, "No SBBC found for Console/TOD \n"); } if (prom_getprop(dnode, IOSRAM_TOC_PROP, (caddr_t)&softsp->sram_toc) <= 0) { /* * SRAM TOC Offset defaults to 0 */ SBBC_ERR(CE_WARN, "No SBBC TOC Offset found\n"); softsp->sram_toc = 0; } /* * get the full OBP pathname of this node */ if (prom_phandle_to_path((phandle_t)nodeid, master_sbbc, sizeof (master_sbbc)) < 0) { SBBC_ERR1(CE_PANIC, "prom_phandle_to_path(%d) failed\n", nodeid); } SGSBBC_DBG_ALL("chosen pathname : %s\n", master_sbbc); SGSBBC_DBG_ALL("device pathname : %s\n", ddi_pathname(softsp->dip, pn)); if (strcmp(master_sbbc, ddi_pathname(softsp->dip, pn)) == 0) { /* * map in the SBBC regs */ if (sbbc_map_regs(softsp) != DDI_SUCCESS) { SBBC_ERR(CE_PANIC, "Can't map the SBBC regs \n"); } /* * Only the 'chosen' node is used for iosram_read()/_write() * Must initialise the tunnel before the console/tod * */ if (iosram_tunnel_init(softsp) == DDI_FAILURE) { SBBC_ERR(CE_PANIC, "Can't create the SRAM <-> SC " "comm. tunnel \n"); } master_chosen = TRUE; /* * Verify that an 'interrupts' property * exists for this device */ if (ddi_getproplen(DDI_DEV_T_ANY, softsp->dip, DDI_PROP_DONTPASS, "interrupts", &len) != DDI_PROP_SUCCESS) { SBBC_ERR(CE_PANIC, "No 'interrupts' property for the " "'chosen' SBBC \n"); } /* * add the interrupt handler * NB * should this be a high-level interrupt ? * NB */ if (sbbc_add_intr(softsp) == DDI_FAILURE) { SBBC_ERR(CE_PANIC, "Can't add interrupt handler for " "'chosen' SBBC \n"); } sbbc_enable_intr(softsp); /* * Create the mailbox */ if (sbbc_mbox_create(softsp) != 0) { cmn_err(CE_WARN, "No IOSRAM MailBox created!\n"); } } }
/* * The function is to get prom name according non-client dip node. * And the function will set the alternate node of dip to alt_dip * if it is exist which must be PROM node. */ static int i_devi_to_promname(dev_info_t *dip, char *prom_path, dev_info_t **alt_dipp) { dev_info_t *pdip, *cdip, *idip; char *unit_address, *nodename; major_t major; int depth, old_depth = 0; struct parinfo *parinfo = NULL; struct parinfo *info; int ret = 0; if (MDI_CLIENT(dip)) return (EINVAL); if (ddi_pathname_obp(dip, prom_path) != NULL) { return (0); } /* * ddi_pathname_obp return NULL, but the obp path still could * be different with the devfs path name, so need use a parents * stack to compose the path name string layer by layer. */ /* find the closest ancestor which is a prom node */ pdip = dip; parinfo = kmem_alloc(OBP_STACKDEPTH * sizeof (*parinfo), KM_SLEEP); for (depth = 0; ndi_dev_is_prom_node(pdip) == 0; depth++) { if (depth == OBP_STACKDEPTH) { ret = EINVAL; /* must not have been an obp node */ goto out; } pdip = get_parent(pdip, &parinfo[depth]); } old_depth = depth; ASSERT(pdip); /* at least root is prom node */ if (pdip) (void) ddi_pathname(pdip, prom_path); ndi_hold_devi(pdip); for (depth = old_depth; depth > 0; depth--) { info = &parinfo[depth - 1]; idip = info->dip; nodename = ddi_node_name(idip); unit_address = ddi_get_name_addr(idip); if (pdip) { major = ddi_driver_major(idip); cdip = find_alternate_node(pdip, major); ndi_rele_devi(pdip); if (cdip) { nodename = ddi_node_name(cdip); } } /* * node name + unitaddr to the prom_path */ (void) strcat(prom_path, "/"); (void) strcat(prom_path, nodename); if (unit_address && (*unit_address)) { (void) strcat(prom_path, "@"); (void) strcat(prom_path, unit_address); } pdip = cdip; } if (pdip) { ndi_rele_devi(pdip); /* hold from find_alternate_node */ } /* * Now pdip is the alternate node which is same hierarchy as dip * if it exists. */ *alt_dipp = pdip; out: if (parinfo) { /* release holds from get_parent() */ for (depth = old_depth; depth > 0; depth--) { info = &parinfo[depth - 1]; if (info && info->pdip) ndi_rele_devi(info->pdip); } kmem_free(parinfo, OBP_STACKDEPTH * sizeof (*parinfo)); } return (ret); }