/* * Create a pci_pwr_chld_t structure for a given devinfo node. */ void pci_pwr_create_info(pci_pwr_t *pwr_p, dev_info_t *dip) { pci_pwr_chld_t *p; ASSERT(PM_CAPABLE(pwr_p)); DEBUG2(DBG_PWR, ddi_get_parent(dip), "ADDING NEW PWR_INFO %s@%s\n", ddi_node_name(dip), ddi_get_name_addr(dip)); p = kmem_zalloc(sizeof (struct pci_pwr_chld), KM_SLEEP); p->dip = dip; mutex_enter(&pwr_p->pwr_mutex); /* * Until components are created for this device, bus * should be at full power since power of child device * is unknown. Increment # children requiring "full power" */ p->flags |= PWR_FP_HOLD; pwr_p->pwr_fp++; p->next = pwr_p->pwr_info; pwr_p->pwr_info = p; pci_pwr_change(pwr_p, pwr_p->current_lvl, pci_pwr_new_lvl(pwr_p)); mutex_exit(&pwr_p->pwr_mutex); }
/* * Control ops entry point: * * Requests handled completely: * DDI_CTLOPS_INITCHILD * DDI_CTLOPS_UNINITCHILD * All others are passed to the parent. */ static int acpinex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg, void *result) { int rval = DDI_SUCCESS; switch (op) { case DDI_CTLOPS_INITCHILD: rval = init_child((dev_info_t *)arg); break; case DDI_CTLOPS_UNINITCHILD: impl_ddi_sunbus_removechild((dev_info_t *)arg); break; case DDI_CTLOPS_REPORTDEV: { if (rdip == (dev_info_t *)0) return (DDI_FAILURE); cmn_err(CE_CONT, "?acpinex: %s@%s, %s%d\n", ddi_node_name(rdip), ddi_get_name_addr(rdip), ddi_driver_name(rdip), ddi_get_instance(rdip)); break; } default: rval = ddi_ctlops(dip, rdip, op, arg, result); break; } return (rval); }
/* * Recursive ascent * * This now only does half the job. It finds the node, then the caller * has to search the node for the binding name */ static in_node_t * in_devwalk(dev_info_t *dip, in_node_t **ap, char *addr) { in_node_t *np; char *name; ASSERT(dip); ASSERT(e_ddi_inst_state.ins_busy); if (dip == ddi_root_node()) { *ap = NULL; return (e_ddi_inst_state.ins_root); } /* * call up to find parent, then look through the list of kids * for a match */ np = in_devwalk(ddi_get_parent(dip), ap, NULL); if (np == NULL) return (np); *ap = np; np = np->in_child; name = ddi_node_name(dip); if (addr == NULL) addr = ddi_get_name_addr(dip); while (np) { if (in_eqstr(np->in_node_name, name) && in_eqstr(np->in_unit_addr, addr)) { return (np); } np = np->in_sibling; } return (np); }
static void ppb_removechild(dev_info_t *dip) { ppb_devstate_t *ppb; ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, ddi_get_instance(ddi_get_parent(dip))); if (PM_CAPABLE(ppb->ppb_pwr_p)) { DEBUG2(DBG_PWR, ddi_get_parent(dip), "UNINITCHILD: removing pwr_info for %s@%s\n", ddi_node_name(dip), ddi_get_name_addr(dip)); pci_pwr_rm_info(ppb->ppb_pwr_p, dip); } ddi_set_name_addr(dip, NULL); /* * Strip the node to properly convert it back to prototype form */ ddi_remove_minor_node(dip, NULL); impl_rem_dev_props(dip); }
/* * pcmu_init_child * * This function is called from our control ops routine on a * DDI_CTLOPS_INITCHILD request. It builds and sets the device's * parent private data area. * * used by: pcmu_ctlops() * * return value: none */ int pcmu_init_child(pcmu_t *pcmu_p, dev_info_t *child) { char name[10]; ddi_acc_handle_t config_handle; uint8_t bcr; uint8_t header_type; if (name_child(child, name, 10) != DDI_SUCCESS) return (DDI_FAILURE); ddi_set_name_addr(child, name); PCMU_DBG2(PCMU_DBG_PWR, ddi_get_parent(child), "INITCHILD: config regs setup for %s@%s\n", ddi_node_name(child), ddi_get_name_addr(child)); /* * Map the child configuration space to for initialization. * We assume the obp will do the following in the devices * config space: * * Set the latency-timer register to values appropriate * for the devices on the bus (based on other devices * MIN_GNT and MAX_LAT registers. * * Set the fast back-to-back enable bit in the command * register if it's supported and all devices on the bus * have the capability. * */ if (pci_config_setup(child, &config_handle) != DDI_SUCCESS) { ddi_set_name_addr(child, NULL); return (DDI_FAILURE); } /* * Determine the configuration header type. */ header_type = pci_config_get8(config_handle, PCI_CONF_HEADER); PCMU_DBG2(PCMU_DBG_INIT_CLD, pcmu_p->pcmu_dip, "%s: header_type=%x\n", ddi_driver_name(child), header_type); /* * If the device has a bus control register then program it * based on the settings in the command register. */ if ((header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) { bcr = pci_config_get8(config_handle, PCI_BCNF_BCNTRL); if (pcmu_command_default & PCI_COMM_PARITY_DETECT) bcr |= PCI_BCNF_BCNTRL_PARITY_ENABLE; if (pcmu_command_default & PCI_COMM_SERR_ENABLE) bcr |= PCI_BCNF_BCNTRL_SERR_ENABLE; bcr |= PCI_BCNF_BCNTRL_MAST_AB_MODE; pci_config_put8(config_handle, PCI_BCNF_BCNTRL, bcr); } pci_config_teardown(&config_handle); return (DDI_SUCCESS); }
/* * Starting from the root node suspend all devices in the device tree. * Assumes that all devices have already been marked busy. */ static int sbdp_suspend_devices_(dev_info_t *dip, sbdp_sr_handle_t *srh) { major_t major; char *dname; for (; dip != NULL; dip = ddi_get_next_sibling(dip)) { char d_name[40], d_alias[40], *d_info; if (sbdp_suspend_devices_(ddi_get_child(dip), srh)) { return (ENXIO); } if (!sbdp_is_real_device(dip)) continue; major = (major_t)-1; if ((dname = DEVI(dip)->devi_binding_name) != NULL) major = ddi_name_to_major(dname); #ifdef DEBUG if (sbdp_bypass_device(dname)) { SBDP_DBG_QR("bypassed suspend of %s (major# %d)\n", dname, major); continue; } #endif if ((d_info = ddi_get_name_addr(dip)) == NULL) d_info = "<null>"; d_name[0] = 0; if (sbdp_resolve_devname(dip, d_name, d_alias) == 0) { if (d_alias[0] != 0) { SBDP_DBG_QR("\tsuspending %s@%s (aka %s)\n", d_name, d_info, d_alias); } else { SBDP_DBG_QR("\tsuspending %s@%s\n", d_name, d_info); } } else { SBDP_DBG_QR("\tsuspending %s@%s\n", dname, d_info); } if (devi_detach(dip, DDI_SUSPEND) != DDI_SUCCESS) { (void) sprintf(sbdp_get_err_buf(&srh->sep), "%d", major); sbdp_set_err(&srh->sep, ESGT_SUSPEND, NULL); ndi_hold_devi(dip); SR_FAILED_DIP(srh) = dip; return (DDI_FAILURE); } } return (DDI_SUCCESS); }
/* * pcmu_report_dev * * This function is called from our control ops routine on a * DDI_CTLOPS_REPORTDEV request. * * The display format is * * <name><inst> at <pname><pinst> device <dev> function <func> * * where * * <name> this device's name property * <inst> this device's instance number * <name> parent device's name property * <inst> parent device's instance number * <dev> this device's device number * <func> this device's function number */ int pcmu_report_dev(dev_info_t *dip) { if (dip == (dev_info_t *)0) { return (DDI_FAILURE); } cmn_err(CE_CONT, "?PCI-device: %s@%s, %s%d\n", ddi_node_name(dip), ddi_get_name_addr(dip), ddi_driver_name(dip), ddi_get_instance(dip)); return (DDI_SUCCESS); }
/* * Allocate space for component state information in pci_pwr_chld_t */ void pci_pwr_add_components(pci_pwr_t *pwr_p, dev_info_t *cdip, pci_pwr_chld_t *p) { int num_comps = PM_NUMCMPTS(cdip); int i; ASSERT(MUTEX_HELD(&pwr_p->pwr_mutex)); /* * Assume the power level of a component is UNKNOWN until * notified otherwise. */ if (num_comps > 0) { p->comp_pwr = kmem_alloc(sizeof (int) * num_comps, KM_SLEEP); p->num_comps = num_comps; DEBUG3(DBG_PWR, ddi_get_parent(cdip), "ADDING %d COMPONENTS FOR %s@%s\n", num_comps, ddi_node_name(cdip), ddi_get_name_addr(cdip)); } else { cmn_err(CE_WARN, "%s%d device has %d components", ddi_driver_name(cdip), ddi_get_instance(cdip), num_comps); return; } /* * Release the fp hold that was made when the device * was created. */ ASSERT((p->flags & PWR_FP_HOLD) == PWR_FP_HOLD); p->flags &= ~PWR_FP_HOLD; pwr_p->pwr_fp--; for (i = 0; i < num_comps; i++) { /* * Initialize the component lvl so that the * state reference counts will be updated correctly. */ p->comp_pwr[i] = PM_LEVEL_NOLEVEL; pci_pwr_update_comp(pwr_p, p, i, PM_LEVEL_UNKNOWN); } }
/*ARGSUSED*/ static int eibnx_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, void *arg, void *result) { dev_info_t *child = arg; int ret; char name[MAXNAMELEN]; switch (ctlop) { case DDI_CTLOPS_REPORTDEV: ENX_DPRINTF_DEBUG("EoIB device: %s@%s, %s%d", ddi_node_name(rdip), ddi_get_name_addr(rdip), ddi_driver_name(rdip), ddi_get_instance(rdip)); /*FALLTHROUGH*/ case DDI_CTLOPS_ATTACH: case DDI_CTLOPS_DETACH: case DDI_CTLOPS_POWER: case DDI_CTLOPS_SIDDEV: case DDI_CTLOPS_IOMIN: ret = DDI_SUCCESS; break; case DDI_CTLOPS_INITCHILD: if ((ret = eibnx_name_child(child, name, sizeof (name))) == DDI_SUCCESS) { ddi_set_name_addr(child, name); } break; case DDI_CTLOPS_UNINITCHILD: ddi_set_name_addr(child, NULL); ret = DDI_SUCCESS; break; default: ret = ddi_ctlops(dip, rdip, ctlop, arg, result); break; } return (ret); }
/* * Retreive the pci_pwr_chld_t structure for a given devinfo node. */ pci_pwr_chld_t * pci_pwr_get_info(pci_pwr_t *pwr_p, dev_info_t *dip) { pci_pwr_chld_t *p; ASSERT(PM_CAPABLE(pwr_p)); ASSERT(MUTEX_HELD(&pwr_p->pwr_mutex)); for (p = pwr_p->pwr_info; p != NULL; p = p->next) { if (p->dip == dip) { return (p); } } cmn_err(CE_PANIC, "unable to find pwr info data for %s@%s", ddi_node_name(dip), ddi_get_name_addr(dip)); /*NOTREACHED*/ return (NULL); }
static int gen_create_mn_disk_wwn(dev_info_t *devi) { struct driver_minor_data *dmdp; int instance = ddi_get_instance(devi); char *address = ddi_get_name_addr(devi); int target, lun; if (address[0] >= '0' && address[0] <= '9' && strchr(address, ',')) { target = atod(address); address = strchr(address, ','); lun = atod(++address); } else { /* this hack is for rm_stale_link() testing */ target = 10; lun = 5; } if (ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, "target", (caddr_t)&target, sizeof (int)) != DDI_PROP_SUCCESS) { return (DDI_FAILURE); } if (ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, "lun", (caddr_t)&lun, sizeof (int)) != DDI_PROP_SUCCESS) { return (DDI_FAILURE); } for (dmdp = disk_minor_data; dmdp->name != NULL; dmdp++) { if (ddi_create_minor_node(devi, dmdp->name, dmdp->type, (INST_TO_MINOR(instance)) | dmdp->minor, DDI_NT_BLOCK_WWN, NULL) != DDI_SUCCESS) { return (DDI_FAILURE); } } return (DDI_SUCCESS); }
/* * Return 1 if instance block was assigned for the path. * * For multi-port NIC cards, sequential instance assignment across all * ports on a card is highly desirable since the ppa is typically the * same as the instance number, and the ppa is used in the NIC's public * /dev name. This sequential assignment typically occurs as a result * of in_preassign_instance() after initial install, or by * i_ndi_init_hw_children() for NIC ports that share a common parent. * * Some NIC cards however use multi-function bridge chips, and to * support sequential instance assignment accross all ports, without * disabling multi-threaded attach, we have a (currently) undocumented * hack to allocate instance numbers in contiguous blocks based on * driver.conf properties. * * ^ * /---------- ------------\ * pci@0 pci@0,1 MULTI-FUNCTION BRIDGE CHIP * / \ / \ * FJSV,e4ta@4 FJSV,e4ta@4,1 FJSV,e4ta@6 FJSV,e4ta@6,1 NIC PORTS * n n+2 n+2 n+3 INSTANCE * * For the above example, the following driver.conf properties would be * used to guarantee sequential instance number assignment. * * ddi-instance-blocks ="ib-FJSVe4ca", "ib-FJSVe4ta", "ib-generic"; * ib-FJSVe4ca = "/pci@0/FJSV,e4ca@4", "/pci@0/FJSV,e4ca@4,1", * "/pci@0,1/FJSV,e4ca@6", "/pci@0,1/FJSV,e4ca@6,1"; * ib-FJSVe4ta = "/pci@0/FJSV,e4ta@4", "/pci@0/FJSV,e4ta@4,1", * "/pci@0,1/FJSV,e4ta@6", "/pci@0,1/FJSV,e4ta@6,1"; * ib-generic = "/pci@0/network@4", "/pci@0/network@4,1", * "/pci@0,1/network@6", "/pci@0,1/network@6,1"; * * The value of the 'ddi-instance-blocks' property references a series * of card specific properties, like 'ib-FJSV-e4ta', who's value * defines a single 'instance block'. The 'instance block' describes * all the paths below a multi-function bridge, where each path is * called an 'instance path'. The 'instance block' property value is a * series of 'instance paths'. The number of 'instance paths' in an * 'instance block' defines the size of the instance block, and the * ordering of the 'instance paths' defines the instance number * assignment order for paths going through the 'instance block'. * * In the instance assignment code below, if a (path, driver) that * currently has no instance number has a path that goes through an * 'instance block', then block instance number allocation occurs. The * block allocation code will find a sequential set of unused instance * numbers, and assign instance numbers for all the paths in the * 'instance block'. Each path is assigned a persistent instance * number, even paths that don't exist in the device tree or fail * probe(9E). */ static int in_assign_instance_block(dev_info_t *dip) { char **ibn; /* instance block names */ uint_t nibn; /* number of instance block names */ uint_t ibni; /* ibn index */ char *driver; major_t major; char *path; char *addr; int plen; char **ibp; /* instance block paths */ uint_t nibp; /* number of paths in instance block */ uint_t ibpi; /* ibp index */ int ibplen; /* length of instance block path */ char *ipath; int instance_base; int splice; int i; /* check for fresh install case (in miniroot) */ if (DEVI(dip)->devi_instance != -1) return (0); /* already assigned */ /* * Check to see if we need to allocate a block of contiguous instance * numbers by looking for the 'ddi-instance-blocks' property. */ if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ddi-instance-blocks", &ibn, &nibn) != DDI_SUCCESS) return (0); /* no instance block needed */ /* * Get information out about node we are processing. * * NOTE: Since the node is not yet at DS_INITIALIZED, ddi_pathname() * will not return the unit-address of the final path component even * though the node has an established devi_addr unit-address - so we * need to add the unit-address by hand. */ driver = (char *)ddi_driver_name(dip); major = ddi_driver_major(dip); path = kmem_alloc(MAXPATHLEN, KM_SLEEP); (void) ddi_pathname(dip, path); if ((addr = ddi_get_name_addr(dip)) != NULL) { (void) strcat(path, "@"); (void) strcat(path, addr); } plen = strlen(path); /* loop through instance block names */ for (ibni = 0; ibni < nibn; ibni++) { if (ibn[ibni] == NULL) continue; /* lookup instance block */ if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, ibn[ibni], &ibp, &nibp) != DDI_SUCCESS) { cmn_err(CE_WARN, "no devinition for instance block '%s' in %s.conf", ibn[ibni], driver); continue; } /* Does 'path' go through this instance block? */ for (ibpi = 0; ibpi < nibp; ibpi++) { if (ibp[ibpi] == NULL) continue; ibplen = strlen(ibp[ibpi]); if ((ibplen <= plen) && (strcmp(ibp[ibpi], path + plen - ibplen) == 0)) break; } if (ibpi >= nibp) { ddi_prop_free(ibp); continue; /* no try next instance block */ } /* yes, allocate and assign instances for all paths in block */ /* * determine where we splice in instance paths and verify * that none of the paths are too long. */ splice = plen - ibplen; for (i = 0; i < nibp; i++) { if ((splice + strlen(ibp[i])+ 1) >= MAXPATHLEN) { cmn_err(CE_WARN, "path %d through instance block '%s' from " "%s.conf too long", i, ibn[ibni], driver); break; } } if (i < nibp) { ddi_prop_free(ibp); continue; /* too long */ } /* allocate the instance block - no more failures */ instance_base = in_next_instance_block(major, nibp); ipath = kmem_alloc(MAXPATHLEN, KM_SLEEP); for (ibpi = 0; ibpi < nibp; ibpi++) { if (ibp[ibpi] == NULL) continue; (void) strcpy(ipath, path); (void) strcpy(ipath + splice, ibp[ibpi]); (void) in_pathin(ipath, instance_base + ibpi, driver, NULL); } /* free allocations */ kmem_free(ipath, MAXPATHLEN); ddi_prop_free(ibp); kmem_free(path, MAXPATHLEN); ddi_prop_free(ibn); /* notify devfsadmd to sync of path_to_inst file */ mutex_enter(&e_ddi_inst_state.ins_serial); i_log_devfs_instance_mod(); e_ddi_inst_state.ins_dirty = 1; mutex_exit(&e_ddi_inst_state.ins_serial); return (1); } /* our path did not go through any of of the instance blocks */ kmem_free(path, MAXPATHLEN); ddi_prop_free(ibn); return (0); }
/* * Look up an instance number for a dev_info node, and assign one if it does * not have one (the dev_info node has devi_name and devi_addr already set). */ uint_t e_ddi_assign_instance(dev_info_t *dip) { char *name; in_node_t *ap, *np; in_drv_t *dp; major_t major; uint_t ret; char *bname; /* * Allow implementation to override */ if ((ret = impl_assign_instance(dip)) != (uint_t)-1) return (ret); /* * If this is a pseudo-device, use the instance number * assigned by the pseudo nexus driver. The mutex is * not needed since the instance tree is not used. */ if (is_pseudo_device(dip)) { return (ddi_get_instance(dip)); } /* * Only one thread is allowed to change the state of the instance * number assignments on the system at any given time. */ e_ddi_enter_instance(); /* * Look for instance node, allocate one if not found */ np = in_devwalk(dip, &ap, NULL); if (np == NULL) { if (in_assign_instance_block(dip)) { np = in_devwalk(dip, &ap, NULL); } else { name = ddi_node_name(dip); np = in_alloc_node(name, ddi_get_name_addr(dip)); ASSERT(np != NULL); in_enlist(ap, np); /* insert into tree */ } } ASSERT(np == in_devwalk(dip, &ap, NULL)); /* * Look for driver entry, allocate one if not found */ bname = (char *)ddi_driver_name(dip); dp = in_drvwalk(np, bname); if (dp == NULL) { dp = in_alloc_drv(bname); ASSERT(dp != NULL); major = ddi_driver_major(dip); ASSERT(major != DDI_MAJOR_T_NONE); in_endrv(np, dp); in_set_instance(dip, dp, major); dp->ind_state = IN_PROVISIONAL; in_hashdrv(dp); } ret = dp->ind_instance; e_ddi_exit_instance(); return (ret); }
/* * attach the module */ static int tphci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { char *vclass; int instance, phci_regis = 0; struct tphci_state *phci = NULL; instance = ddi_get_instance(dip); switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: case DDI_PM_RESUME: return (0); /* nothing to do */ default: return (DDI_FAILURE); } /* * Allocate phci data structure. */ if (ddi_soft_state_zalloc(tphci_state, instance) != DDI_SUCCESS) { return (DDI_FAILURE); } phci = ddi_get_soft_state(tphci_state, instance); ASSERT(phci != NULL); phci->dip = dip; /* bus_addr has the form #,<vhci_class> */ vclass = strchr(ddi_get_name_addr(dip), ','); if (vclass == NULL || vclass[1] == '\0') { cmn_err(CE_NOTE, "tphci invalid bus_addr %s", ddi_get_name_addr(dip)); goto attach_fail; } /* * Attach this instance with the mpxio framework */ if (mdi_phci_register(vclass + 1, dip, 0) != MDI_SUCCESS) { cmn_err(CE_WARN, "%s mdi_phci_register failed", ddi_node_name(dip)); goto attach_fail; } phci_regis++; if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { cmn_err(CE_NOTE, "%s ddi_create_minor_node failed", ddi_node_name(dip)); goto attach_fail; } (void) ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1); ddi_report_dev(dip); return (DDI_SUCCESS); attach_fail: if (phci_regis) (void) mdi_phci_unregister(dip, 0); ddi_soft_state_free(tphci_state, instance); return (DDI_FAILURE); }
/* * The function is to get prom name according non-client dip node. * And the function will set the alternate node of dip to alt_dip * if it is exist which must be PROM node. */ static int i_devi_to_promname(dev_info_t *dip, char *prom_path, dev_info_t **alt_dipp) { dev_info_t *pdip, *cdip, *idip; char *unit_address, *nodename; major_t major; int depth, old_depth = 0; struct parinfo *parinfo = NULL; struct parinfo *info; int ret = 0; if (MDI_CLIENT(dip)) return (EINVAL); if (ddi_pathname_obp(dip, prom_path) != NULL) { return (0); } /* * ddi_pathname_obp return NULL, but the obp path still could * be different with the devfs path name, so need use a parents * stack to compose the path name string layer by layer. */ /* find the closest ancestor which is a prom node */ pdip = dip; parinfo = kmem_alloc(OBP_STACKDEPTH * sizeof (*parinfo), KM_SLEEP); for (depth = 0; ndi_dev_is_prom_node(pdip) == 0; depth++) { if (depth == OBP_STACKDEPTH) { ret = EINVAL; /* must not have been an obp node */ goto out; } pdip = get_parent(pdip, &parinfo[depth]); } old_depth = depth; ASSERT(pdip); /* at least root is prom node */ if (pdip) (void) ddi_pathname(pdip, prom_path); ndi_hold_devi(pdip); for (depth = old_depth; depth > 0; depth--) { info = &parinfo[depth - 1]; idip = info->dip; nodename = ddi_node_name(idip); unit_address = ddi_get_name_addr(idip); if (pdip) { major = ddi_driver_major(idip); cdip = find_alternate_node(pdip, major); ndi_rele_devi(pdip); if (cdip) { nodename = ddi_node_name(cdip); } } /* * node name + unitaddr to the prom_path */ (void) strcat(prom_path, "/"); (void) strcat(prom_path, nodename); if (unit_address && (*unit_address)) { (void) strcat(prom_path, "@"); (void) strcat(prom_path, unit_address); } pdip = cdip; } if (pdip) { ndi_rele_devi(pdip); /* hold from find_alternate_node */ } /* * Now pdip is the alternate node which is same hierarchy as dip * if it exists. */ *alt_dipp = pdip; out: if (parinfo) { /* release holds from get_parent() */ for (depth = old_depth; depth > 0; depth--) { info = &parinfo[depth - 1]; if (info && info->pdip) ndi_rele_devi(info->pdip); } kmem_free(parinfo, OBP_STACKDEPTH * sizeof (*parinfo)); } return (ret); }
static void sbdp_resume_devices(dev_info_t *start, sbdp_sr_handle_t *srh) { int circ; dev_info_t *dip, *next, *last = NULL; char *bn; sbd_error_t *sep; sep = &srh->sep; /* attach in reverse device tree order */ while (last != start) { dip = start; next = ddi_get_next_sibling(dip); while (next != last && dip != SR_FAILED_DIP(srh)) { dip = next; next = ddi_get_next_sibling(dip); } if (dip == SR_FAILED_DIP(srh)) { /* Release hold acquired in sbdp_suspend_devices() */ ndi_rele_devi(dip); SR_FAILED_DIP(srh) = NULL; } else if (sbdp_is_real_device(dip) && SR_FAILED_DIP(srh) == NULL) { if (DEVI(dip)->devi_binding_name != NULL) { bn = ddi_binding_name(dip); } #ifdef DEBUG if (!sbdp_bypass_device(bn)) { #else { #endif char d_name[40], d_alias[40], *d_info; d_name[0] = 0; d_info = ddi_get_name_addr(dip); if (d_info == NULL) d_info = "<null>"; if (!sbdp_resolve_devname(dip, d_name, d_alias)) { if (d_alias[0] != 0) { SBDP_DBG_QR("\tresuming " "%s@%s (aka %s)\n", d_name, d_info, d_alias); } else { SBDP_DBG_QR("\tresuming " "%s@%s\n", d_name, d_info); } } else { SBDP_DBG_QR("\tresuming %s@%s\n", bn, d_info); } if (devi_attach(dip, DDI_RESUME) != DDI_SUCCESS) { /* * Print a console warning, * set an errno of ESGT_RESUME, * and save the driver major * number in the e_str. */ (void) sprintf(sbdp_get_err_buf(sep), "%s@%s", d_name[0] ? d_name : bn, d_info); SBDP_DBG_QR("\tFAILED to resume " "%s\n", sbdp_get_err_buf(sep)); sbdp_set_err(sep, ESGT_RESUME, NULL); } } } ndi_devi_enter(dip, &circ); sbdp_resume_devices(ddi_get_child(dip), srh); ndi_devi_exit(dip, circ); last = dip; } } /* * True if thread is virtually stopped. Similar to CPR_VSTOPPED * but from DR point of view. These user threads are waiting in * the kernel. Once they return from kernel, they will process * the stop signal and stop. */ #define SBDP_VSTOPPED(t) \ ((t)->t_state == TS_SLEEP && \ (t)->t_wchan != NULL && \ (t)->t_astflag && \ ((t)->t_proc_flag & TP_CHKPT)) static int sbdp_stop_user_threads(sbdp_sr_handle_t *srh) { int count; char cache_psargs[PSARGSZ]; kthread_id_t cache_tp; uint_t cache_t_state; int bailout; sbd_error_t *sep; kthread_id_t tp; extern void add_one_utstop(); extern void utstop_timedwait(clock_t); extern void utstop_init(void); #define SBDP_UTSTOP_RETRY 4 #define SBDP_UTSTOP_WAIT hz if (sbdp_skip_user_threads) return (DDI_SUCCESS); sep = &srh->sep; ASSERT(sep); utstop_init(); /* we need to try a few times to get past fork, etc. */ for (count = 0; count < SBDP_UTSTOP_RETRY; count++) { /* walk the entire threadlist */ mutex_enter(&pidlock); for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) { proc_t *p = ttoproc(tp); /* handle kernel threads separately */ if (p->p_as == &kas || p->p_stat == SZOMB) continue; mutex_enter(&p->p_lock); thread_lock(tp); if (tp->t_state == TS_STOPPED) { /* add another reason to stop this thread */ tp->t_schedflag &= ~TS_RESUME; } else { tp->t_proc_flag |= TP_CHKPT; thread_unlock(tp); mutex_exit(&p->p_lock); add_one_utstop(); mutex_enter(&p->p_lock); thread_lock(tp); aston(tp); if (ISWAKEABLE(tp) || ISWAITING(tp)) { setrun_locked(tp); } } /* grab thread if needed */ if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU) poke_cpu(tp->t_cpu->cpu_id); thread_unlock(tp); mutex_exit(&p->p_lock); } mutex_exit(&pidlock); /* let everything catch up */ utstop_timedwait(count * count * SBDP_UTSTOP_WAIT); /* now, walk the threadlist again to see if we are done */ mutex_enter(&pidlock); for (tp = curthread->t_next, bailout = 0; tp != curthread; tp = tp->t_next) { proc_t *p = ttoproc(tp); /* handle kernel threads separately */ if (p->p_as == &kas || p->p_stat == SZOMB) continue; /* * If this thread didn't stop, and we don't allow * unstopped blocked threads, bail. */ thread_lock(tp); if (!CPR_ISTOPPED(tp) && !(sbdp_allow_blocked_threads && SBDP_VSTOPPED(tp))) { /* nope, cache the details for later */ bcopy(p->p_user.u_psargs, cache_psargs, sizeof (cache_psargs)); cache_tp = tp; cache_t_state = tp->t_state; bailout = 1; } thread_unlock(tp); } mutex_exit(&pidlock); /* were all the threads stopped? */ if (!bailout) break; } /* were we unable to stop all threads after a few tries? */ if (bailout) { cmn_err(CE_NOTE, "process: %s id: %p state: %x\n", cache_psargs, cache_tp, cache_t_state); (void) sprintf(sbdp_get_err_buf(sep), "%s", cache_psargs); sbdp_set_err(sep, ESGT_UTHREAD, NULL); return (ESRCH); } return (DDI_SUCCESS); }
/* * control ops entry point: * * Requests handled completely: * DDI_CTLOPS_INITCHILD * DDI_CTLOPS_UNINITCHILD * DDI_CTLOPS_REPORTDEV * DDI_CTLOPS_REGSIZE * DDI_CTLOPS_NREGS * * All others passed to parent. */ static int acebus_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg, void *result) { #ifdef DEBUG ebus_devstate_t *ebus_p = get_acebus_soft_state(ddi_get_instance(dip)); #endif ebus_regspec_t *ebus_rp; int32_t reglen; int i, n; char name[10]; switch (op) { case DDI_CTLOPS_INITCHILD: { dev_info_t *child = (dev_info_t *)arg; /* * Set the address portion of the node name based on the * address/offset. */ DBG2(D_CTLOPS, ebus_p, "DDI_CTLOPS_INITCHILD: rdip=%s%d\n", ddi_get_name(child), ddi_get_instance(child)); if (ddi_getlongprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "reg", (caddr_t)&ebus_rp, ®len) != DDI_SUCCESS) { DBG(D_CTLOPS, ebus_p, "can't get reg property\n"); return (DDI_FAILURE); } (void) sprintf(name, "%x,%x", ebus_rp->addr_hi, ebus_rp->addr_low); ddi_set_name_addr(child, name); kmem_free((caddr_t)ebus_rp, reglen); ddi_set_parent_data(child, NULL); return (DDI_SUCCESS); } case DDI_CTLOPS_UNINITCHILD: DBG2(D_CTLOPS, ebus_p, "DDI_CTLOPS_UNINITCHILD: rdip=%s%d\n", ddi_get_name((dev_info_t *)arg), ddi_get_instance((dev_info_t *)arg)); ddi_set_name_addr((dev_info_t *)arg, NULL); ddi_remove_minor_node((dev_info_t *)arg, NULL); impl_rem_dev_props((dev_info_t *)arg); return (DDI_SUCCESS); case DDI_CTLOPS_REPORTDEV: DBG2(D_CTLOPS, ebus_p, "DDI_CTLOPS_REPORTDEV: rdip=%s%d\n", ddi_get_name(rdip), ddi_get_instance(rdip)); cmn_err(CE_CONT, "?%s%d at %s%d: offset %s\n", ddi_driver_name(rdip), ddi_get_instance(rdip), ddi_driver_name(dip), ddi_get_instance(dip), ddi_get_name_addr(rdip)); return (DDI_SUCCESS); case DDI_CTLOPS_REGSIZE: DBG2(D_CTLOPS, ebus_p, "DDI_CTLOPS_REGSIZE: rdip=%s%d\n", ddi_get_name(rdip), ddi_get_instance(rdip)); if (getprop(rdip, "reg", &ebus_rp, &i) != DDI_SUCCESS) { DBG(D_CTLOPS, ebus_p, "can't get reg property\n"); return (DDI_FAILURE); } n = i / sizeof (ebus_regspec_t); if (*(int *)arg < 0 || *(int *)arg >= n) { DBG(D_MAP, ebus_p, "rnumber out of range\n"); kmem_free((caddr_t)ebus_rp, i); return (DDI_FAILURE); } *((off_t *)result) = ebus_rp[*(int *)arg].size; kmem_free((caddr_t)ebus_rp, i); return (DDI_SUCCESS); case DDI_CTLOPS_NREGS: DBG2(D_CTLOPS, ebus_p, "DDI_CTLOPS_NREGS: rdip=%s%d\n", ddi_get_name(rdip), ddi_get_instance(rdip)); if (getprop(rdip, "reg", &ebus_rp, &i) != DDI_SUCCESS) { DBG(D_CTLOPS, ebus_p, "can't get reg property\n"); return (DDI_FAILURE); } *((uint_t *)result) = i / sizeof (ebus_regspec_t); kmem_free((caddr_t)ebus_rp, i); return (DDI_SUCCESS); } /* * Now pass the request up to our parent. */ DBG2(D_CTLOPS, ebus_p, "passing request to parent: rdip=%s%d\n", ddi_get_name(rdip), ddi_get_instance(rdip)); return (ddi_ctlops(dip, rdip, op, arg, result)); }
static void dr_resume_devices(dev_info_t *start, dr_sr_handle_t *srh) { dr_handle_t *handle; dev_info_t *dip, *next, *last = NULL; major_t major; char *bn; int circ; major = (major_t)-1; /* attach in reverse device tree order */ while (last != start) { dip = start; next = ddi_get_next_sibling(dip); while (next != last && dip != srh->sr_failed_dip) { dip = next; next = ddi_get_next_sibling(dip); } if (dip == srh->sr_failed_dip) { /* release hold acquired in dr_suspend_devices() */ srh->sr_failed_dip = NULL; ndi_rele_devi(dip); } else if (dr_is_real_device(dip) && srh->sr_failed_dip == NULL) { if ((bn = ddi_binding_name(dip)) != NULL) { major = ddi_name_to_major(bn); } else { bn = "<null>"; } if (!dr_bypass_device(bn) && !drmach_verify_sr(dip, 0)) { char d_name[40], d_alias[40], *d_info; d_name[0] = 0; d_info = ddi_get_name_addr(dip); if (d_info == NULL) d_info = "<null>"; if (!dr_resolve_devname(dip, d_name, d_alias)) { if (d_alias[0] != 0) { prom_printf("\tresuming " "%s@%s (aka %s)\n", d_name, d_info, d_alias); } else { prom_printf("\tresuming " "%s@%s\n", d_name, d_info); } } else { prom_printf("\tresuming %s@%s\n", bn, d_info); } if (devi_attach(dip, DDI_RESUME) != DDI_SUCCESS) { /* * Print a console warning, * set an e_code of ESBD_RESUME, * and save the driver major * number in the e_rsc. */ prom_printf("\tFAILED to resume %s@%s", d_name[0] ? d_name : bn, d_info); srh->sr_err_idx = dr_add_int(srh->sr_err_ints, srh->sr_err_idx, DR_MAX_ERR_INT, (uint64_t)major); handle = srh->sr_dr_handlep; dr_op_err(CE_IGNORE, handle, ESBD_RESUME, "%s@%s", d_name[0] ? d_name : bn, d_info); } } } /* Hold parent busy while walking its children */ ndi_devi_enter(dip, &circ); dr_resume_devices(ddi_get_child(dip), srh); ndi_devi_exit(dip, circ); last = dip; } }
/*ARGSUSED*/ static int ppb_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, void *arg, void *result) { pci_regspec_t *drv_regp; int reglen; int rn; int totreg; ppb_devstate_t *ppb = ddi_get_soft_state(ppb_state, ddi_get_instance(dip)); struct detachspec *dsp; struct attachspec *asp; switch (ctlop) { case DDI_CTLOPS_REPORTDEV: if (rdip == (dev_info_t *)0) return (DDI_FAILURE); cmn_err(CE_CONT, "?PCI-device: %s@%s, %s%d\n", ddi_node_name(rdip), ddi_get_name_addr(rdip), ddi_driver_name(rdip), ddi_get_instance(rdip)); return (DDI_SUCCESS); case DDI_CTLOPS_INITCHILD: return (ppb_initchild((dev_info_t *)arg)); case DDI_CTLOPS_UNINITCHILD: ppb_removechild((dev_info_t *)arg); return (DDI_SUCCESS); case DDI_CTLOPS_SIDDEV: return (DDI_SUCCESS); case DDI_CTLOPS_REGSIZE: case DDI_CTLOPS_NREGS: if (rdip == (dev_info_t *)0) return (DDI_FAILURE); break; /* X86 systems support PME wakeup from suspend */ case DDI_CTLOPS_ATTACH: if (!pcie_is_child(dip, rdip)) return (DDI_SUCCESS); asp = (struct attachspec *)arg; if ((ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) && (asp->when == DDI_POST) && (asp->result == DDI_SUCCESS)) pf_init(rdip, (void *)ppb->ppb_fm_ibc, asp->cmd); if (asp->cmd == DDI_RESUME && asp->when == DDI_PRE) if (pci_pre_resume(rdip) != DDI_SUCCESS) return (DDI_FAILURE); return (DDI_SUCCESS); case DDI_CTLOPS_DETACH: if (!pcie_is_child(dip, rdip)) return (DDI_SUCCESS); dsp = (struct detachspec *)arg; if ((ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) && (dsp->when == DDI_PRE)) pf_fini(rdip, dsp->cmd); if (dsp->cmd == DDI_SUSPEND && dsp->when == DDI_POST) if (pci_post_suspend(rdip) != DDI_SUCCESS) return (DDI_FAILURE); return (DDI_SUCCESS); case DDI_CTLOPS_PEEK: case DDI_CTLOPS_POKE: if (strcmp(ddi_driver_name(ddi_get_parent(dip)), "npe") != 0) return (ddi_ctlops(dip, rdip, ctlop, arg, result)); return (pci_peekpoke_check(dip, rdip, ctlop, arg, result, ddi_ctlops, &ppb->ppb_err_mutex, &ppb->ppb_peek_poke_mutex, ppb_peekpoke_cb)); default: return (ddi_ctlops(dip, rdip, ctlop, arg, result)); } *(int *)result = 0; if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "reg", (caddr_t)&drv_regp, ®len) != DDI_SUCCESS) return (DDI_FAILURE); totreg = reglen / sizeof (pci_regspec_t); if (ctlop == DDI_CTLOPS_NREGS) *(int *)result = totreg; else if (ctlop == DDI_CTLOPS_REGSIZE) { rn = *(int *)arg; if (rn >= totreg) { kmem_free(drv_regp, reglen); return (DDI_FAILURE); } *(off_t *)result = drv_regp[rn].pci_size_low; } kmem_free(drv_regp, reglen); return (DDI_SUCCESS); }
/* * The "dip" argument's parent (if it exists) must be held busy. */ static int dr_suspend_devices(dev_info_t *dip, dr_sr_handle_t *srh) { dr_handle_t *handle; major_t major; char *dname; int circ; /* * If dip is the root node, it has no siblings and it is * always held. If dip is not the root node, dr_suspend_devices() * will be invoked with the parent held busy. */ for (; dip != NULL; dip = ddi_get_next_sibling(dip)) { char d_name[40], d_alias[40], *d_info; ndi_devi_enter(dip, &circ); if (dr_suspend_devices(ddi_get_child(dip), srh)) { ndi_devi_exit(dip, circ); return (ENXIO); } ndi_devi_exit(dip, circ); if (!dr_is_real_device(dip)) continue; major = (major_t)-1; if ((dname = ddi_binding_name(dip)) != NULL) major = ddi_name_to_major(dname); if (dr_bypass_device(dname)) { PR_QR(" bypassed suspend of %s (major# %d)\n", dname, major); continue; } if (drmach_verify_sr(dip, 1)) { PR_QR(" bypassed suspend of %s (major# %d)\n", dname, major); continue; } if ((d_info = ddi_get_name_addr(dip)) == NULL) d_info = "<null>"; d_name[0] = 0; if (dr_resolve_devname(dip, d_name, d_alias) == 0) { if (d_alias[0] != 0) { prom_printf("\tsuspending %s@%s (aka %s)\n", d_name, d_info, d_alias); } else { prom_printf("\tsuspending %s@%s\n", d_name, d_info); } } else { prom_printf("\tsuspending %s@%s\n", dname, d_info); } if (devi_detach(dip, DDI_SUSPEND) != DDI_SUCCESS) { prom_printf("\tFAILED to suspend %s@%s\n", d_name[0] ? d_name : dname, d_info); srh->sr_err_idx = dr_add_int(srh->sr_err_ints, srh->sr_err_idx, DR_MAX_ERR_INT, (uint64_t)major); ndi_hold_devi(dip); srh->sr_failed_dip = dip; handle = srh->sr_dr_handlep; dr_op_err(CE_IGNORE, handle, ESBD_SUSPEND, "%s@%s", d_name[0] ? d_name : dname, d_info); return (DDI_FAILURE); } } return (DDI_SUCCESS); }
static int pci_initchild(dev_info_t *child) { char name[80]; ddi_acc_handle_t config_handle; ushort_t command_preserve, command; if (pci_common_name_child(child, name, 80) != DDI_SUCCESS) { return (DDI_FAILURE); } ddi_set_name_addr(child, name); /* * Pseudo nodes indicate a prototype node with per-instance * properties to be merged into the real h/w device node. * The interpretation of the unit-address is DD[,F] * where DD is the device id and F is the function. */ if (ndi_dev_is_persistent_node(child) == 0) { extern int pci_allow_pseudo_children; ddi_set_parent_data(child, NULL); /* * Try to merge the properties from this prototype * node into real h/w nodes. */ if (ndi_merge_node(child, pci_common_name_child) == DDI_SUCCESS) { /* * Merged ok - return failure to remove the node. */ ddi_set_name_addr(child, NULL); return (DDI_FAILURE); } /* workaround for ddivs to run under PCI */ if (pci_allow_pseudo_children) { /* * If the "interrupts" property doesn't exist, * this must be the ddivs no-intr case, and it returns * DDI_SUCCESS instead of DDI_FAILURE. */ if (ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "interrupts", -1) == -1) return (DDI_SUCCESS); /* * Create the ddi_parent_private_data for a pseudo * child. */ pci_common_set_parent_private_data(child); return (DDI_SUCCESS); } /* * The child was not merged into a h/w node, * but there's not much we can do with it other * than return failure to cause the node to be removed. */ cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged", ddi_get_name(child), ddi_get_name_addr(child), ddi_get_name(child)); ddi_set_name_addr(child, NULL); return (DDI_NOT_WELL_FORMED); } if (ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "interrupts", -1) != -1) pci_common_set_parent_private_data(child); else ddi_set_parent_data(child, NULL); /* * initialize command register */ if (pci_config_setup(child, &config_handle) != DDI_SUCCESS) return (DDI_FAILURE); /* * Support for the "command-preserve" property. */ command_preserve = ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "command-preserve", 0); command = pci_config_get16(config_handle, PCI_CONF_COMM); command &= (command_preserve | PCI_COMM_BACK2BACK_ENAB); command |= (pci_command_default & ~command_preserve); pci_config_put16(config_handle, PCI_CONF_COMM, command); pci_config_teardown(&config_handle); return (DDI_SUCCESS); }
static int ppb_initchild(dev_info_t *child) { struct ddi_parent_private_data *pdptr; ppb_devstate_t *ppb; char name[MAXNAMELEN]; ddi_acc_handle_t config_handle; ushort_t command_preserve, command; ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, ddi_get_instance(ddi_get_parent(child))); if (ppb_name_child(child, name, MAXNAMELEN) != DDI_SUCCESS) return (DDI_FAILURE); ddi_set_name_addr(child, name); /* * Pseudo nodes indicate a prototype node with per-instance * properties to be merged into the real h/w device node. * The interpretation of the unit-address is DD[,F] * where DD is the device id and F is the function. */ if (ndi_dev_is_persistent_node(child) == 0) { extern int pci_allow_pseudo_children; ddi_set_parent_data(child, NULL); /* * Try to merge the properties from this prototype * node into real h/w nodes. */ if (ndi_merge_node(child, ppb_name_child) == DDI_SUCCESS) { /* * Merged ok - return failure to remove the node. */ ddi_set_name_addr(child, NULL); return (DDI_FAILURE); } /* workaround for ddivs to run under PCI */ if (pci_allow_pseudo_children) return (DDI_SUCCESS); /* * The child was not merged into a h/w node, * but there's not much we can do with it other * than return failure to cause the node to be removed. */ cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged", ddi_driver_name(child), ddi_get_name_addr(child), ddi_driver_name(child)); ddi_set_name_addr(child, NULL); return (DDI_NOT_WELL_FORMED); } ddi_set_parent_data(child, NULL); /* * PCIe FMA specific * * Note: parent_data for parent is created only if this is PCI-E * platform, for which, SG take a different route to handle device * errors. */ if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) { if (pcie_init_cfghdl(child) != DDI_SUCCESS) return (DDI_FAILURE); pcie_init_dom(child); } /* transfer select properties from PROM to kernel */ if (ddi_getprop(DDI_DEV_T_NONE, child, DDI_PROP_DONTPASS, "interrupts", -1) != -1) { pdptr = kmem_zalloc((sizeof (struct ddi_parent_private_data) + sizeof (struct intrspec)), KM_SLEEP); pdptr->par_intr = (struct intrspec *)(pdptr + 1); pdptr->par_nintr = 1; ddi_set_parent_data(child, pdptr); } else ddi_set_parent_data(child, NULL); if (pci_config_setup(child, &config_handle) != DDI_SUCCESS) { pcie_fini_dom(child); return (DDI_FAILURE); } /* * Support for the "command-preserve" property. */ command_preserve = ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "command-preserve", 0); command = pci_config_get16(config_handle, PCI_CONF_COMM); command &= (command_preserve | PCI_COMM_BACK2BACK_ENAB); command |= (ppb_command_default & ~command_preserve); pci_config_put16(config_handle, PCI_CONF_COMM, command); pci_config_teardown(&config_handle); return (DDI_SUCCESS); }
/*ARGSUSED*/ static int ppb_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, void *arg, void *result) { pci_regspec_t *drv_regp; int reglen; int rn; struct attachspec *as; struct detachspec *ds; int totreg; ppb_devstate_t *ppb_p; ppb_p = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, ddi_get_instance(dip)); switch (ctlop) { case DDI_CTLOPS_REPORTDEV: if (rdip == (dev_info_t *)0) return (DDI_FAILURE); cmn_err(CE_CONT, "?PCI-device: %s@%s, %s%d\n", ddi_node_name(rdip), ddi_get_name_addr(rdip), ddi_driver_name(rdip), ddi_get_instance(rdip)); return (DDI_SUCCESS); case DDI_CTLOPS_INITCHILD: return (ppb_initchild((dev_info_t *)arg)); case DDI_CTLOPS_UNINITCHILD: ppb_uninitchild((dev_info_t *)arg); return (DDI_SUCCESS); case DDI_CTLOPS_ATTACH: if (!pcie_is_child(dip, rdip)) return (DDI_SUCCESS); as = (struct attachspec *)arg; if ((ppb_p->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) && (as->when == DDI_POST) && (as->result == DDI_SUCCESS)) pf_init(rdip, ppb_p->fm_ibc, as->cmd); return (DDI_SUCCESS); case DDI_CTLOPS_DETACH: if (!pcie_is_child(dip, rdip)) return (DDI_SUCCESS); ds = (struct detachspec *)arg; if ((ppb_p->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) && (ds->when == DDI_PRE)) pf_fini(rdip, ds->cmd); return (DDI_SUCCESS); case DDI_CTLOPS_SIDDEV: return (DDI_SUCCESS); case DDI_CTLOPS_REGSIZE: case DDI_CTLOPS_NREGS: if (rdip == (dev_info_t *)0) return (DDI_FAILURE); break; default: return (ddi_ctlops(dip, rdip, ctlop, arg, result)); } *(int *)result = 0; if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "reg", (caddr_t)&drv_regp, ®len) != DDI_SUCCESS) return (DDI_FAILURE); totreg = reglen / sizeof (pci_regspec_t); if (ctlop == DDI_CTLOPS_NREGS) *(int *)result = totreg; else if (ctlop == DDI_CTLOPS_REGSIZE) { rn = *(int *)arg; if (rn >= totreg) { kmem_free(drv_regp, reglen); return (DDI_FAILURE); } *(off_t *)result = drv_regp[rn].pci_size_low | ((uint64_t)drv_regp[rn].pci_size_hi << 32); } kmem_free(drv_regp, reglen); return (DDI_SUCCESS); }
static int ppb_initchild(dev_info_t *child) { char name[MAXNAMELEN]; ddi_acc_handle_t config_handle; ushort_t command_preserve, command; uint_t n; ushort_t bcr; uchar_t header_type; uchar_t min_gnt, latency_timer; ppb_devstate_t *ppb; /* * Name the child */ if (ppb_name_child(child, name, MAXNAMELEN) != DDI_SUCCESS) return (DDI_FAILURE); ddi_set_name_addr(child, name); ddi_set_parent_data(child, NULL); /* * Pseudo nodes indicate a prototype node with per-instance * properties to be merged into the real h/w device node. * The interpretation of the unit-address is DD[,F] * where DD is the device id and F is the function. */ if (ndi_dev_is_persistent_node(child) == 0) { extern int pci_allow_pseudo_children; /* * Try to merge the properties from this prototype * node into real h/w nodes. */ if (ndi_merge_node(child, ppb_name_child) == DDI_SUCCESS) { /* * Merged ok - return failure to remove the node. */ ppb_removechild(child); return (DDI_FAILURE); } /* workaround for ddivs to run under PCI */ if (pci_allow_pseudo_children) return (DDI_SUCCESS); /* * The child was not merged into a h/w node, * but there's not much we can do with it other * than return failure to cause the node to be removed. */ cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged", ddi_driver_name(child), ddi_get_name_addr(child), ddi_driver_name(child)); ppb_removechild(child); return (DDI_NOT_WELL_FORMED); } ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, ddi_get_instance(ddi_get_parent(child))); ddi_set_parent_data(child, NULL); /* * If hardware is PM capable, set up the power info structure. * This also ensures the the bus will not be off (0MHz) otherwise * system panics during a bus access. */ if (PM_CAPABLE(ppb->ppb_pwr_p)) { /* * Create a pwr_info struct for child. Bus will be * at full speed after creating info. */ pci_pwr_create_info(ppb->ppb_pwr_p, child); #ifdef DEBUG ASSERT(ppb->ppb_pwr_p->current_lvl == PM_LEVEL_B0); #endif } /* * If configuration registers were previously saved by * child (before it entered D3), then let the child do the * restore to set up the config regs as it'll first need to * power the device out of D3. */ if (ddi_prop_exists(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "config-regs-saved-by-child") == 1) { DEBUG2(DBG_PWR, ddi_get_parent(child), "INITCHILD: config regs to be restored by child" " for %s@%s\n", ddi_node_name(child), ddi_get_name_addr(child)); return (DDI_SUCCESS); } DEBUG2(DBG_PWR, ddi_get_parent(child), "INITCHILD: config regs setup for %s@%s\n", ddi_node_name(child), ddi_get_name_addr(child)); if (pci_config_setup(child, &config_handle) != DDI_SUCCESS) { if (PM_CAPABLE(ppb->ppb_pwr_p)) { pci_pwr_rm_info(ppb->ppb_pwr_p, child); } return (DDI_FAILURE); } /* * Determine the configuration header type. */ header_type = pci_config_get8(config_handle, PCI_CONF_HEADER); /* * Support for the "command-preserve" property. */ command_preserve = ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "command-preserve", 0); command = pci_config_get16(config_handle, PCI_CONF_COMM); command &= (command_preserve | PCI_COMM_BACK2BACK_ENAB); command |= (ppb_command_default & ~command_preserve); pci_config_put16(config_handle, PCI_CONF_COMM, command); /* * If the device has a bus control register then program it * based on the settings in the command register. */ if ((header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) { bcr = pci_config_get8(config_handle, PCI_BCNF_BCNTRL); if (ppb_command_default & PCI_COMM_PARITY_DETECT) bcr |= PCI_BCNF_BCNTRL_PARITY_ENABLE; if (ppb_command_default & PCI_COMM_SERR_ENABLE) bcr |= PCI_BCNF_BCNTRL_SERR_ENABLE; bcr |= PCI_BCNF_BCNTRL_MAST_AB_MODE; pci_config_put8(config_handle, PCI_BCNF_BCNTRL, bcr); } /* * Initialize cache-line-size configuration register if needed. */ if (ppb_set_cache_line_size_register && ddi_getprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "cache-line-size", 0) == 0) { pci_config_put8(config_handle, PCI_CONF_CACHE_LINESZ, ppb->ppb_cache_line_size); n = pci_config_get8(config_handle, PCI_CONF_CACHE_LINESZ); if (n != 0) { (void) ndi_prop_update_int(DDI_DEV_T_NONE, child, "cache-line-size", n); } } /* * Initialize latency timer configuration registers if needed. */ if (ppb_set_latency_timer_register && ddi_getprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "latency-timer", 0) == 0) { if ((header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) { latency_timer = ppb->ppb_latency_timer; pci_config_put8(config_handle, PCI_BCNF_LATENCY_TIMER, ppb->ppb_latency_timer); } else { min_gnt = pci_config_get8(config_handle, PCI_CONF_MIN_G); latency_timer = min_gnt * 8; } pci_config_put8(config_handle, PCI_CONF_LATENCY_TIMER, latency_timer); n = pci_config_get8(config_handle, PCI_CONF_LATENCY_TIMER); if (n != 0) { (void) ndi_prop_update_int(DDI_DEV_T_NONE, child, "latency-timer", n); } } /* * SPARC PCIe FMA specific * * Note: parent_data for parent is created only if this is sparc PCI-E * platform, for which, SG take a different route to handle device * errors. */ if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) { if (pcie_init_cfghdl(child) != DDI_SUCCESS) { pci_config_teardown(&config_handle); return (DDI_FAILURE); } pcie_init_dom(child); } /* * Check to see if the XMITS/PCI-X workaround applies. */ n = ddi_getprop(DDI_DEV_T_ANY, child, DDI_PROP_NOTPROM, "pcix-update-cmd-reg", -1); if (n != -1) { extern void pcix_set_cmd_reg(dev_info_t *child, uint16_t value); DEBUG1(DBG_INIT_CLD, child, "Turning on XMITS NCPQ " "Workaround: value = %x\n", n); pcix_set_cmd_reg(child, n); } pci_config_teardown(&config_handle); return (DDI_SUCCESS); }
/* * attach the module */ static int tvhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { char *vclass; int instance, vhci_regis = 0; struct tvhci_state *vhci = NULL; dev_info_t *pdip; instance = ddi_get_instance(dip); switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: case DDI_PM_RESUME: return (0); /* nothing to do */ default: return (DDI_FAILURE); } /* * Allocate vhci data structure. */ if (ddi_soft_state_zalloc(tvhci_state, instance) != DDI_SUCCESS) { return (DDI_FAILURE); } vhci = ddi_get_soft_state(tvhci_state, instance); ASSERT(vhci != NULL); vhci->dip = dip; /* parent must be /pshot */ pdip = ddi_get_parent(dip); if (strcmp(ddi_driver_name(pdip), "pshot") != 0 || ddi_get_parent(pdip) != ddi_root_node()) { cmn_err(CE_NOTE, "tvhci must be under /pshot/"); goto attach_fail; } /* * XXX add mpxio-disable property. need to remove the check * from the framework */ (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "mpxio-disable", "no"); /* bus_addr is the <vhci_class> */ vclass = ddi_get_name_addr(dip); if (vclass == NULL || vclass[1] == '\0') { cmn_err(CE_NOTE, "tvhci invalid vhci class"); goto attach_fail; } /* * Attach this instance with the mpxio framework */ if (mdi_vhci_register(vclass, dip, &tvhci_opinfo, 0) != MDI_SUCCESS) { cmn_err(CE_WARN, "%s mdi_vhci_register failed", ddi_node_name(dip)); goto attach_fail; } vhci_regis++; if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { cmn_err(CE_NOTE, "%s ddi_create_minor_node failed", ddi_node_name(dip)); goto attach_fail; } (void) ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1); ddi_report_dev(dip); return (DDI_SUCCESS); attach_fail: if (vhci_regis) (void) mdi_vhci_unregister(dip, 0); ddi_soft_state_free(tvhci_state, instance); return (DDI_FAILURE); }
/*ARGSUSED*/ static int pci_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, void *arg, void *result) { pci_regspec_t *drv_regp; uint_t reglen; int rn; int totreg; pci_state_t *pcip; struct attachspec *asp; switch (ctlop) { case DDI_CTLOPS_REPORTDEV: if (rdip == (dev_info_t *)0) return (DDI_FAILURE); cmn_err(CE_CONT, "?PCI-device: %s@%s, %s%d\n", ddi_node_name(rdip), ddi_get_name_addr(rdip), ddi_driver_name(rdip), ddi_get_instance(rdip)); return (DDI_SUCCESS); case DDI_CTLOPS_INITCHILD: return (pci_initchild((dev_info_t *)arg)); case DDI_CTLOPS_UNINITCHILD: return (pci_removechild((dev_info_t *)arg)); case DDI_CTLOPS_SIDDEV: return (DDI_SUCCESS); case DDI_CTLOPS_REGSIZE: case DDI_CTLOPS_NREGS: if (rdip == (dev_info_t *)0) return (DDI_FAILURE); *(int *)result = 0; if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, "reg", (int **)&drv_regp, ®len) != DDI_PROP_SUCCESS) { return (DDI_FAILURE); } totreg = (reglen * sizeof (int)) / sizeof (pci_regspec_t); if (ctlop == DDI_CTLOPS_NREGS) *(int *)result = totreg; else if (ctlop == DDI_CTLOPS_REGSIZE) { rn = *(int *)arg; if (rn >= totreg) { ddi_prop_free(drv_regp); return (DDI_FAILURE); } *(off_t *)result = drv_regp[rn].pci_size_low; } ddi_prop_free(drv_regp); return (DDI_SUCCESS); case DDI_CTLOPS_POWER: { power_req_t *reqp = (power_req_t *)arg; /* * We currently understand reporting of PCI_PM_IDLESPEED * capability. Everything else is passed up. */ if ((reqp->request_type == PMR_REPORT_PMCAP) && (reqp->req.report_pmcap_req.cap == PCI_PM_IDLESPEED)) { return (DDI_SUCCESS); } return (ddi_ctlops(dip, rdip, ctlop, arg, result)); } case DDI_CTLOPS_PEEK: case DDI_CTLOPS_POKE: pcip = ddi_get_soft_state(pci_statep, ddi_get_instance(dip)); return (pci_peekpoke_check(dip, rdip, ctlop, arg, result, pci_common_peekpoke, &pcip->pci_err_mutex, &pcip->pci_peek_poke_mutex, pci_peekpoke_cb)); /* for now only X86 systems support PME wakeup from suspended state */ case DDI_CTLOPS_ATTACH: asp = (struct attachspec *)arg; if (asp->cmd == DDI_RESUME && asp->when == DDI_PRE) if (pci_pre_resume(rdip) != DDI_SUCCESS) return (DDI_FAILURE); return (ddi_ctlops(dip, rdip, ctlop, arg, result)); case DDI_CTLOPS_DETACH: asp = (struct attachspec *)arg; if (asp->cmd == DDI_SUSPEND && asp->when == DDI_POST) if (pci_post_suspend(rdip) != DDI_SUCCESS) return (DDI_FAILURE); return (ddi_ctlops(dip, rdip, ctlop, arg, result)); default: return (ddi_ctlops(dip, rdip, ctlop, arg, result)); } /* NOTREACHED */ }