static int gfxp_pci_find_vd(dev_info_t *dip, void *arg) { int rc; gfxp_pci_bsf_t *pci_bsf; int vendor_id, device_id, class_code; /* * Look for vendor-id, device-id, class-code to verify * this is some type of PCI child node. */ vendor_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "vendor-id", -1); device_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "device-id", -1); class_code = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "class-code", -1); if ((vendor_id == -1) || (device_id == -1) || (class_code == -1)) { return (DDI_WALK_CONTINUE); } pci_bsf = (gfxp_pci_bsf_t *)arg; if ((vendor_id == pci_bsf->vendor) && (device_id == pci_bsf->device)) { pci_bsf->found = 1; rc = DDI_WALK_TERMINATE; } else { rc = DDI_WALK_CONTINUE; } return (rc); }
/*ARGSUSED*/ static int fipe_search_ioat_dev(dev_info_t *dip, void *arg) { char *unit; struct fipe_pci_ioat_id *id; int i, max, venid, devid, subvenid, subsysid; /* Query PCI id properties. */ venid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "vendor-id", 0xffffffff); if (venid == 0xffffffff) { return (DDI_WALK_CONTINUE); } devid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "device-id", 0xffffffff); if (devid == 0xffffffff) { return (DDI_WALK_CONTINUE); } subvenid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "subsystem-vendor-id", 0xffffffff); if (subvenid == 0xffffffff) { return (DDI_WALK_CONTINUE); } subsysid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "subsystem-id", 0xffffffff); if (subvenid == 0xffffffff) { return (DDI_WALK_CONTINUE); } if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "unit-address", &unit) != DDI_PROP_SUCCESS) { return (DDI_WALK_CONTINUE); } max = sizeof (fipe_pci_ioat_ids) / sizeof (fipe_pci_ioat_ids[0]); for (i = 0; i < max; i++) { id = &fipe_pci_ioat_ids[i]; if ((id->venid == 0xffffu || id->venid == venid) && (id->devid == 0xffffu || id->devid == devid) && (id->subvenid == 0xffffu || id->subvenid == subvenid) && (id->subsysid == 0xffffu || id->subsysid == subsysid) && (id->unitaddr == NULL || strcmp(id->unitaddr, unit) == 0)) { break; } } ddi_prop_free(unit); if (i >= max) { return (DDI_WALK_CONTINUE); } /* Found IOAT device, hold one reference count. */ ndi_hold_devi(dip); fipe_ioat_ctrl.ioat_dev_info = dip; return (DDI_WALK_TERMINATE); }
/* * Checks to see if MMCFG is supported. * Returns: TRUE if MMCFG is supported, FALSE if not. * * If a device is attached to a parent whose "dev_type" is "pciex", * the device will support MMCFG access. Otherwise, use legacy IOCFG access. * * Enable Legacy PCI config space access for AMD K8 north bridges. * Host bridge: AMD HyperTransport Technology Configuration * Host bridge: AMD Address Map * Host bridge: AMD DRAM Controller * Host bridge: AMD Miscellaneous Control * These devices do not support MMCFG access. */ boolean_t npe_is_mmcfg_supported(dev_info_t *dip) { int vendor_id, device_id; vendor_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "vendor-id", -1); device_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "device-id", -1); return !(npe_child_is_pci(dip) || IS_BAD_AMD_NTBRIDGE(vendor_id, device_id)); }
int mp_find_cpu(dev_info_t *dip, void *arg) { struct mp_find_cpu_arg *target = (struct mp_find_cpu_arg *)arg; char *type; int rv = DDI_WALK_CONTINUE; int cpuid; if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "device_type", &type)) return (DDI_WALK_CONTINUE); if (strcmp(type, "cpu") != 0) goto out; cpuid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", -1); if (cpuid == -1) { cmn_err(CE_PANIC, "reg prop not found in cpu node"); } cpuid = PROM_CFGHDL_TO_CPUID(cpuid); if (cpuid != target->cpuid) goto out; /* Found it */ rv = DDI_WALK_TERMINATE; target->dip = dip; out: ddi_prop_free(type); return (rv); }
int mp_find_cpu(dev_info_t *dip, void *arg) { extern int get_portid_ddi(dev_info_t *, dev_info_t **); struct mp_find_cpu_arg *target = (struct mp_find_cpu_arg *)arg; char *type; int rv = DDI_WALK_CONTINUE; int cpuid; if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "device_type", &type)) return (DDI_WALK_CONTINUE); if (strcmp(type, "cpu") != 0) goto out; cpuid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "cpuid", -1); if (cpuid == -1) cpuid = get_portid_ddi(dip, NULL); if (cpuid != target->cpuid) goto out; /* Found it */ rv = DDI_WALK_TERMINATE; target->dip = dip; out: ddi_prop_free(type); return (rv); }
int get_portid_ddi(dev_info_t *dip, dev_info_t **cmpp) { int portid; int i; char dev_type[OBP_MAXPROPNAME]; int len = OBP_MAXPROPNAME; dev_info_t *cpu_parent; if (cmpp != NULL) *cmpp = NULL; if ((portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "portid", -1)) != -1) return (portid); if ((portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "upa-portid", -1)) != -1) return (portid); if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, DDI_PROP_DONTPASS, "device_type", (caddr_t)dev_type, &len) != 0) return (-1); /* * For a virtual cpu node that is a CMP core, the "portid" * is in the parent node. * For a virtual cpu node that is a CMT strand, the "portid" is * in its grandparent node. * So we iterate up as far as 2 levels to get the "portid". */ if (strcmp(dev_type, "cpu") == 0) { cpu_parent = dip = ddi_get_parent(dip); for (i = 0; dip != NULL && i < 2; i++) { if ((portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "portid", -1)) != -1) { if (cmpp != NULL) *cmpp = cpu_parent; return (portid); } dip = ddi_get_parent(dip); } } return (-1); }
/* * The "ddi-intr-weight" property contains the weight of each interrupt * associated with a dev_info node. For devices with multiple interrupts per * dev_info node, the total load of the device is "devi_intr_weight * nintr", * possibly spread out over multiple CPUs. * * Maintaining this as a property permits possible tweaking in the product * in response to customer problems via driver.conf property definitions at * the driver or the instance level. This does not mean that "ddi-intr_weight" * is a formal or committed interface. */ int32_t i_ddi_get_intr_weight(dev_info_t *dip) { int32_t weight; weight = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "ddi-intr-weight", -1); if (weight < -1) weight = -1; /* undefined */ return (weight); }
static int gfxp_pci_find_bsf(dev_info_t *dip, void *arg) { int rc; uint8_t bus, dev, func; gfxp_pci_bsf_t *pci_bsf; int vendor_id, device_id, class_code; /* * Look for vendor-id, device-id, class-code to verify * this is some type of PCI child node. */ vendor_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "vendor-id", -1); device_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "device-id", -1); class_code = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "class-code", -1); if ((vendor_id == -1) || (device_id == -1) || (class_code == -1)) { return (DDI_WALK_CONTINUE); } if (gfxp_pci_get_bsf(dip, &bus, &dev, &func) != DDI_SUCCESS) return (DDI_WALK_TERMINATE); pci_bsf = (gfxp_pci_bsf_t *)arg; if ((bus == pci_bsf->bus) && (dev == pci_bsf->slot) && (func == pci_bsf->function)) { pci_bsf->dip = dip; pci_bsf->vendor = vendor_id; pci_bsf->device = device_id; pci_bsf->found = 1; rc = DDI_WALK_TERMINATE; } else { rc = DDI_WALK_CONTINUE; } return (rc); }
/* * Check properties to set options. (See dld.h for property definitions). */ static void drv_set_opt(dev_info_t *dip) { if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, DLD_PROP_NO_FASTPATH, 0) != 0) { dld_opt |= DLD_OPT_NO_FASTPATH; } if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, DLD_PROP_NO_POLL, 0) != 0) { dld_opt |= DLD_OPT_NO_POLL; } if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, DLD_PROP_NO_ZEROCOPY, 0) != 0) { dld_opt |= DLD_OPT_NO_ZEROCOPY; } if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, DLD_PROP_NO_SOFTRING, 0) != 0) { dld_opt |= DLD_OPT_NO_SOFTRING; } }
/* * If the bridge is empty, disable it */ int npe_disable_empty_bridges_workaround(dev_info_t *child) { /* * Do not bind drivers to empty bridges. * Fail above, if the bridge is found to be hotplug capable */ if (ddi_driver_major(child) == ddi_name_to_major("pcieb") && ddi_get_child(child) == NULL && ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "pci-hotplug-type", INBAND_HPC_NONE) == INBAND_HPC_NONE) return (1); return (0); }
/* * Finish final initialization */ static int fcoe_attach_init(fcoe_soft_state_t *ss) { char taskq_name[TASKQ_NAME_LEN]; if (ddi_create_minor_node(ss->ss_dip, "admin", S_IFCHR, ddi_get_instance(ss->ss_dip), DDI_PSEUDO, 0) != DDI_SUCCESS) { FCOE_LOG("FCOE", "ddi_create_minor_node failed"); return (FCOE_FAILURE); } /* * watchdog responsible for release frame and dispatch events */ (void) snprintf(taskq_name, sizeof (taskq_name), "fcoe_mac"); taskq_name[TASKQ_NAME_LEN - 1] = 0; if ((ss->ss_watchdog_taskq = ddi_taskq_create(NULL, taskq_name, 2, TASKQ_DEFAULTPRI, 0)) == NULL) { return (FCOE_FAILURE); } ss->ss_ioctl_flags = 0; mutex_init(&ss->ss_ioctl_mutex, NULL, MUTEX_DRIVER, NULL); list_create(&ss->ss_mac_list, sizeof (fcoe_mac_t), offsetof(fcoe_mac_t, fm_ss_node)); list_create(&ss->ss_pfrm_list, sizeof (fcoe_i_frame_t), offsetof(fcoe_i_frame_t, fmi_pending_node)); mutex_init(&ss->ss_watch_mutex, 0, MUTEX_DRIVER, 0); cv_init(&ss->ss_watch_cv, NULL, CV_DRIVER, NULL); ss->ss_flags &= ~SS_FLAG_TERMINATE_WATCHDOG; (void) ddi_taskq_dispatch(ss->ss_watchdog_taskq, fcoe_watchdog, ss, DDI_SLEEP); while ((ss->ss_flags & SS_FLAG_WATCHDOG_RUNNING) == 0) { delay(10); } fcoe_nworkers = ddi_prop_get_int(DDI_DEV_T_ANY, ss->ss_dip, DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, (char *)fcoe_workers_num, 4); if (fcoe_nworkers < 1) { fcoe_nworkers = 4; } fcoe_worker_init(); ddi_report_dev(ss->ss_dip); return (FCOE_SUCCESS); }
/* * NOTE: this function is duplicated here and in gfx_private/vgatext while * we work on a set of commitable interfaces to sunpci.c. * * Use the class code to determine if the device is a PCI-to-PCI bridge. * Returns: B_TRUE if the device is a bridge. * B_FALSE if the device is not a bridge or the property cannot be * retrieved. */ static boolean_t is_pci_bridge(dev_info_t *dip) { uint32_t class_code; class_code = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "class-code", 0xffffffff); if (class_code == 0xffffffff || class_code == DDI_PROP_NOT_FOUND) return (B_FALSE); class_code &= 0x00ffff00; if (class_code == ((PCI_CLASS_BRIDGE << 16) | (PCI_BRIDGE_PCI << 8))) return (B_TRUE); return (B_FALSE); }
/* * cpunex_bus_ctl() * This routine implements nexus bus ctl operations. Of importance are * DDI_CTLOPS_REPORTDEV, DDI_CTLOPS_INITCHILD, DDI_CTLOPS_UNINITCHILD * and DDI_CTLOPS_POWER. For DDI_CTLOPS_INITCHILD, it tries to lookup * reg property on the child node and builds and sets the name. */ static int cpunex_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg, void *result) { switch (op) { case DDI_CTLOPS_REPORTDEV: { dev_info_t *pdip = ddi_get_parent(rdip); cmn_err(CE_CONT, "?%s%d at %s%d", ddi_node_name(rdip), ddi_get_instance(rdip), ddi_node_name(pdip), ddi_get_instance(pdip)); return (DDI_SUCCESS); } case DDI_CTLOPS_INITCHILD: { dev_info_t *cdip = (dev_info_t *)arg; int i; char caddr[MAXNAMELEN]; i = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, "reg", -1); if (i == -1) { cmn_err(CE_NOTE, "!%s(%d): \"reg\" property " "not found", ddi_node_name(cdip), ddi_get_instance(cdip)); return (DDI_NOT_WELL_FORMED); } (void) sprintf(caddr, "%d", i); ddi_set_name_addr(cdip, caddr); return (DDI_SUCCESS); } case DDI_CTLOPS_UNINITCHILD: { ddi_prop_remove_all((dev_info_t *)arg); ddi_set_name_addr((dev_info_t *)arg, NULL); return (DDI_SUCCESS); } default: { return (ddi_ctlops(dip, rdip, op, arg, result)); } } }
/* ARGSUSED */ static int fcoe_initchild(dev_info_t *fcoe_dip, dev_info_t *client_dip) { char client_addr[FCOE_STR_LEN]; int rval; rval = ddi_prop_get_int(DDI_DEV_T_ANY, client_dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "mac_id", -1); if (rval == -1) { FCOE_LOG(__FUNCTION__, "no mac_id property: %p", client_dip); return (DDI_FAILURE); } bzero(client_addr, FCOE_STR_LEN); (void) sprintf((char *)client_addr, "%x,0", rval); ddi_set_name_addr(client_dip, client_addr); return (DDI_SUCCESS); }
ACPI_PHYSICAL_ADDRESS AcpiOsGetRootPointer() { ACPI_PHYSICAL_ADDRESS Address; /* * For EFI firmware, the root pointer is defined in EFI systab. * The boot code process the table and put the physical address * in the acpi-root-tab property. */ Address = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_root_node(), DDI_PROP_DONTPASS, "acpi-root-tab", NULL); if ((Address == NULL) && ACPI_FAILURE(AcpiFindRootPointer(&Address))) Address = NULL; return (Address); }
int npe_restore_htconfig_children(dev_info_t *dip) { dev_info_t *cdip = ddi_get_child(dip); int rval = DDI_SUCCESS; for (; cdip != NULL; cdip = ddi_get_next_sibling(cdip)) { if (ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, "htconfig-saved", 0) == 0) continue; if (pci_restore_config_regs(cdip) != DDI_SUCCESS) { cmn_err(CE_WARN, "Failed to restore HT config " "regs for %s\n", ddi_node_name(cdip)); rval = DDI_FAILURE; } } return (rval); }
/*ARGSUSED*/ static int emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int mutex_initted = 0; struct emul64 *emul64; int instance; scsi_hba_tran_t *tran = NULL; ddi_dma_attr_t tmp_dma_attr; emul64_bsd_get_props(dip); bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr)); instance = ddi_get_instance(dip); switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); if (!tran) { return (DDI_FAILURE); } emul64 = TRAN2EMUL64(tran); return (DDI_SUCCESS); default: emul64_i_log(NULL, CE_WARN, "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance); return (DDI_FAILURE); } /* * Allocate emul64 data structure. */ if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) { emul64_i_log(NULL, CE_WARN, "emul64%d: Failed to alloc soft state", instance); return (DDI_FAILURE); } emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); if (emul64 == (struct emul64 *)NULL) { emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state", instance); ddi_soft_state_free(emul64_state, instance); return (DDI_FAILURE); } /* * Allocate a transport structure */ tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); if (tran == NULL) { cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n"); goto fail; } emul64->emul64_tran = tran; emul64->emul64_dip = dip; tran->tran_hba_private = emul64; tran->tran_tgt_private = NULL; tran->tran_tgt_init = emul64_tran_tgt_init; tran->tran_tgt_probe = scsi_hba_probe; tran->tran_tgt_free = NULL; tran->tran_start = emul64_scsi_start; tran->tran_abort = emul64_scsi_abort; tran->tran_reset = emul64_scsi_reset; tran->tran_getcap = emul64_scsi_getcap; tran->tran_setcap = emul64_scsi_setcap; tran->tran_init_pkt = emul64_scsi_init_pkt; tran->tran_destroy_pkt = emul64_scsi_destroy_pkt; tran->tran_dmafree = emul64_scsi_dmafree; tran->tran_sync_pkt = emul64_scsi_sync_pkt; tran->tran_reset_notify = emul64_scsi_reset_notify; tmp_dma_attr.dma_attr_minxfer = 0x1; tmp_dma_attr.dma_attr_burstsizes = 0x7f; /* * Attach this instance of the hba */ if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran, 0) != DDI_SUCCESS) { cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n"); goto fail; } emul64->emul64_initiator_id = 2; /* * Look up the scsi-options property */ emul64->emul64_scsi_options = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options", EMUL64_DEFAULT_SCSI_OPTIONS); EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x", emul64->emul64_scsi_options); /* mutexes to protect the emul64 request and response queue */ mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER, emul64->emul64_iblock); mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER, emul64->emul64_iblock); mutex_initted = 1; EMUL64_MUTEX_ENTER(emul64); /* * Initialize the default Target Capabilities and Sync Rates */ emul64_i_initcap(emul64); EMUL64_MUTEX_EXIT(emul64); ddi_report_dev(dip); emul64->emul64_taskq = taskq_create("emul64_comp", emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0); return (DDI_SUCCESS); fail: emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance); if (mutex_initted) { mutex_destroy(EMUL64_REQ_MUTEX(emul64)); mutex_destroy(EMUL64_RESP_MUTEX(emul64)); } if (tran) { scsi_hba_tran_free(tran); } ddi_soft_state_free(emul64_state, instance); return (DDI_FAILURE); }
/* * Attach an instance of the device. This happens before an open * can succeed. */ static int _nsctl_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int rc; if (cmd == DDI_ATTACH) { nsctl_dip = dip; /* Announce presence of the device */ ddi_report_dev(dip); /* * Get the node parameters now that we can look up. */ nsc_min_nodeid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nsc_min_nodeid", 0); nsc_max_nodeid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nsc_max_nodeid", 5); _nsc_max_devices = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nsc_max_devices", 128); _nsc_maxdev = _nsc_max_devices; nscsetup(); /* * Init raw requires the _nsc_max_devices value and so * cannot be done before the nsc_max_devices property has * been read which can only be done after the module is * attached and we have a dip. */ if ((rc = _nsc_init_raw(_nsc_max_devices)) != 0) { cmn_err(CE_WARN, "!nsctl: unable to initialize raw io provider: %d", rc); return (DDI_FAILURE); } /* * Init rest of soft state structure */ rc = ddi_create_minor_node(dip, "c,nsctl", S_IFCHR, 0, DDI_PSEUDO, 0); if (rc != DDI_SUCCESS) { /* free anything we allocated here */ cmn_err(CE_WARN, "!_nsctl_attach: ddi_create_minor_node failed %d", rc); return (DDI_FAILURE); } /* Announce presence of the device */ ddi_report_dev(dip); /* mark the device as attached, opens may proceed */ return (DDI_SUCCESS); } else return (DDI_FAILURE); }
/* * Process acpi-user-options property if present */ static void acpica_process_user_options() { static int processed = 0; int acpi_user_options; char *acpi_prop; /* * return if acpi-user-options has already been processed */ if (processed) return; else processed = 1; /* converts acpi-user-options from type string to int, if any */ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(), DDI_PROP_DONTPASS, "acpi-user-options", &acpi_prop) == DDI_PROP_SUCCESS) { long data; int ret; ret = ddi_strtol(acpi_prop, NULL, 0, &data); if (ret == 0) { e_ddi_prop_remove(DDI_DEV_T_NONE, ddi_root_node(), "acpi-user-options"); e_ddi_prop_update_int(DDI_DEV_T_NONE, ddi_root_node(), "acpi-user-options", data); } ddi_prop_free(acpi_prop); } /* * fetch the optional options property */ acpi_user_options = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_root_node(), DDI_PROP_DONTPASS, "acpi-user-options", 0); /* * Note that 'off' has precedence over 'on' * Also note - all cases of ACPI_OUSER_MASK * provided here, no default: case is present */ switch (acpi_user_options & ACPI_OUSER_MASK) { case ACPI_OUSER_DFLT: acpica_enable = acpica_check_bios_date(1999, 1, 1); break; case ACPI_OUSER_ON: acpica_enable = TRUE; break; case ACPI_OUSER_OFF: case ACPI_OUSER_OFF | ACPI_OUSER_ON: acpica_enable = FALSE; break; } acpi_init_level = ACPI_FULL_INITIALIZATION; /* * special test here; may be generalized in the * future - test for a machines that are known to * work only in legacy mode, and set OUSER_LEGACY if * we're on one */ if (acpica_metro_old_bios()) acpi_user_options |= ACPI_OUSER_LEGACY; /* * If legacy mode is specified, set initialization * options to avoid entering ACPI mode and hooking SCI * - basically try to act like legacy acpi_intp */ if ((acpi_user_options & ACPI_OUSER_LEGACY) != 0) acpi_init_level |= (ACPI_NO_ACPI_ENABLE | ACPI_NO_HANDLER_INIT); /* * modify default ACPI CA debug output level for non-DEBUG builds * (to avoid BIOS debug chatter in /var/adm/messages) */ if (acpica_muzzle_debug_output) AcpiDbgLevel = 0; }
static int iiattach(dev_info_t *dip, ddi_attach_cmd_t cmd) { struct ii_state *xsp; int instance; int i; intptr_t flags; if (cmd != DDI_ATTACH) { return (DDI_FAILURE); } /* save the dev_info_t to be used in logging using ddi_log_sysevent */ ii_dip = dip; instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(ii_statep, instance) != 0) { cmn_err(CE_WARN, "!ii: no memory for instance %d state.", instance); return (DDI_FAILURE); } flags = 0; xsp = ddi_get_soft_state(ii_statep, instance); if (xsp == NULL) { cmn_err(CE_WARN, "!ii: attach: could not get state for instance %d.", instance); goto out; } ii_debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "ii_debug", 0); if (ii_debug != 0) { #ifdef DEBUG cmn_err(CE_NOTE, "!ii: initializing ii version %d.%d.%d.%d", dsw_major_rev, dsw_minor_rev, dsw_micro_rev, dsw_baseline_rev); #else if (dsw_micro_rev) { cmn_err(CE_NOTE, "!ii: initializing ii vers %d.%d.%d", dsw_major_rev, dsw_minor_rev, dsw_micro_rev); } else { cmn_err(CE_NOTE, "!ii: initializing ii version %d.%d", dsw_major_rev, dsw_minor_rev); } #endif switch (ii_debug) { case 1: case 2: cmn_err(CE_NOTE, "!ii: ii_debug=%d is enabled.", ii_debug); break; default: cmn_err(CE_WARN, "!ii: Value of ii_debug=%d is not 0,1 or 2.", ii_debug); } } ii_bitmap = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "ii_bitmap", II_WTHRU); switch (ii_bitmap) { case II_KMEM: if (ii_debug > 0) cmn_err(CE_NOTE, "!ii: ii_bitmap is in memory"); break; case II_FWC: if (ii_debug > 0) cmn_err(CE_NOTE, "!ii: ii_bitmap is on disk," " no FWC"); break; case II_WTHRU: if (ii_debug > 0) cmn_err(CE_NOTE, "!ii: ii_bitmap is on disk"); break; default: cmn_err(CE_NOTE, "!ii: ii_bitmap=%d out of range; " "defaulting WTHRU(%d)", ii_bitmap, II_WTHRU); ii_bitmap = II_WTHRU; } /* pick up these values if in ii.conf, otherwise leave alone */ i = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "ii_throttle_unit", 0); if (i > 0) { ii_throttle_unit = i; if ((ii_throttle_unit < MIN_THROTTLE_UNIT) || (ii_throttle_unit > MAX_THROTTLE_UNIT) || (ii_debug > 0)) cmn_err(CE_NOTE, "!ii: ii_throttle_unit=%d", ii_throttle_unit); } i = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "ii_throttle_delay", 0); if (i > 0) { ii_throttle_delay = i; if ((ii_throttle_delay < MIN_THROTTLE_DELAY) || (ii_throttle_delay > MIN_THROTTLE_DELAY) || (ii_debug > 0)) cmn_err(CE_NOTE, "!ii: ii_throttle_delay=%d", ii_throttle_delay); } ii_copy_direct = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "ii_copy_direct", 1); if (i > 0) { ii_copy_direct = i; if ((ii_copy_direct < 0) || (ii_copy_direct > 1)) cmn_err(CE_NOTE, "!ii: ii_copy_direct=%d", ii_copy_direct); } if (_ii_init_dev()) { cmn_err(CE_WARN, "!ii: _ii_init_dev failed"); goto out; } flags |= DIDINIT; xsp->dip = dip; xsp->instance = instance; if (ddi_create_minor_node(dip, "ii", S_IFCHR, instance, DDI_PSEUDO, 0) != DDI_SUCCESS) { cmn_err(CE_WARN, "!ii: could not create node."); goto out; } flags |= DIDNODES; ddi_set_driver_private(dip, (caddr_t)flags); ddi_report_dev(dip); ii_create_kstats(); return (DDI_SUCCESS); out: ddi_set_driver_private(dip, (caddr_t)flags); (void) iidetach(dip, DDI_DETACH); return (DDI_FAILURE); }
static int pci_initchild(dev_info_t *child) { char name[80]; ddi_acc_handle_t config_handle; ushort_t command_preserve, command; if (pci_common_name_child(child, name, 80) != DDI_SUCCESS) { return (DDI_FAILURE); } ddi_set_name_addr(child, name); /* * Pseudo nodes indicate a prototype node with per-instance * properties to be merged into the real h/w device node. * The interpretation of the unit-address is DD[,F] * where DD is the device id and F is the function. */ if (ndi_dev_is_persistent_node(child) == 0) { extern int pci_allow_pseudo_children; ddi_set_parent_data(child, NULL); /* * Try to merge the properties from this prototype * node into real h/w nodes. */ if (ndi_merge_node(child, pci_common_name_child) == DDI_SUCCESS) { /* * Merged ok - return failure to remove the node. */ ddi_set_name_addr(child, NULL); return (DDI_FAILURE); } /* workaround for ddivs to run under PCI */ if (pci_allow_pseudo_children) { /* * If the "interrupts" property doesn't exist, * this must be the ddivs no-intr case, and it returns * DDI_SUCCESS instead of DDI_FAILURE. */ if (ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "interrupts", -1) == -1) return (DDI_SUCCESS); /* * Create the ddi_parent_private_data for a pseudo * child. */ pci_common_set_parent_private_data(child); return (DDI_SUCCESS); } /* * The child was not merged into a h/w node, * but there's not much we can do with it other * than return failure to cause the node to be removed. */ cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged", ddi_get_name(child), ddi_get_name_addr(child), ddi_get_name(child)); ddi_set_name_addr(child, NULL); return (DDI_NOT_WELL_FORMED); } if (ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "interrupts", -1) != -1) pci_common_set_parent_private_data(child); else ddi_set_parent_data(child, NULL); /* * initialize command register */ if (pci_config_setup(child, &config_handle) != DDI_SUCCESS) return (DDI_FAILURE); /* * Support for the "command-preserve" property. */ command_preserve = ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "command-preserve", 0); command = pci_config_get16(config_handle, PCI_CONF_COMM); command &= (command_preserve | PCI_COMM_BACK2BACK_ENAB); command |= (pci_command_default & ~command_preserve); pci_config_put16(config_handle, PCI_CONF_COMM, command); pci_config_teardown(&config_handle); return (DDI_SUCCESS); }
/* * audioixp_alloc_port() * * Description: * This routine allocates the DMA handles and the memory for the * DMA engines to use. It also configures the BDL lists properly * for use. * * Arguments: * dev_info_t *dip Pointer to the device's devinfo * * Returns: * DDI_SUCCESS Registers successfully mapped * DDI_FAILURE Registers not successfully mapped */ static int audioixp_alloc_port(audioixp_state_t *statep, int num) { ddi_dma_cookie_t cookie; uint_t count; int dir; unsigned caps; char *prop; audio_dev_t *adev; audioixp_port_t *port; uint32_t paddr; int rc; dev_info_t *dip; audioixp_bd_entry_t *bdentry; adev = statep->adev; dip = statep->dip; port = kmem_zalloc(sizeof (*port), KM_SLEEP); port->statep = statep; port->started = B_FALSE; port->num = num; switch (num) { case IXP_REC: statep->rec_port = port; prop = "record-interrupts"; dir = DDI_DMA_READ; caps = ENGINE_INPUT_CAP; port->sync_dir = DDI_DMA_SYNC_FORKERNEL; port->nchan = 2; break; case IXP_PLAY: statep->play_port = port; prop = "play-interrupts"; dir = DDI_DMA_WRITE; caps = ENGINE_OUTPUT_CAP; port->sync_dir = DDI_DMA_SYNC_FORDEV; /* This could possibly be conditionalized */ port->nchan = 6; break; default: audio_dev_warn(adev, "bad port number (%d)!", num); return (DDI_FAILURE); } port->intrs = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop, IXP_INTS); /* make sure the values are good */ if (port->intrs < IXP_MIN_INTS) { audio_dev_warn(adev, "%s too low, %d, resetting to %d", prop, port->intrs, IXP_INTS); port->intrs = IXP_INTS; } else if (port->intrs > IXP_MAX_INTS) { audio_dev_warn(adev, "%s too high, %d, resetting to %d", prop, port->intrs, IXP_INTS); port->intrs = IXP_INTS; } /* * Figure out how much space we need. Sample rate is 48kHz, and * we need to store 8 chunks. (Note that this means that low * interrupt frequencies will require more RAM.) */ port->fragfr = 48000 / port->intrs; port->fragfr = IXP_ROUNDUP(port->fragfr, IXP_MOD_SIZE); port->fragsz = port->fragfr * port->nchan * 2; port->samp_size = port->fragsz * IXP_BD_NUMS; /* allocate dma handle */ rc = ddi_dma_alloc_handle(dip, &sample_buf_dma_attr, DDI_DMA_SLEEP, NULL, &port->samp_dmah); if (rc != DDI_SUCCESS) { audio_dev_warn(adev, "ddi_dma_alloc_handle failed: %d", rc); return (DDI_FAILURE); } /* allocate DMA buffer */ rc = ddi_dma_mem_alloc(port->samp_dmah, port->samp_size, &buf_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &port->samp_kaddr, &port->samp_size, &port->samp_acch); if (rc == DDI_FAILURE) { audio_dev_warn(adev, "dma_mem_alloc failed"); return (DDI_FAILURE); } /* bind DMA buffer */ rc = ddi_dma_addr_bind_handle(port->samp_dmah, NULL, port->samp_kaddr, port->samp_size, dir|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &cookie, &count); if ((rc != DDI_DMA_MAPPED) || (count != 1)) { audio_dev_warn(adev, "ddi_dma_addr_bind_handle failed: %d", rc); return (DDI_FAILURE); } port->samp_paddr = cookie.dmac_address; /* * now, from here we allocate DMA memory for buffer descriptor list. * we allocate adjacent DMA memory for all DMA engines. */ rc = ddi_dma_alloc_handle(dip, &bdlist_dma_attr, DDI_DMA_SLEEP, NULL, &port->bdl_dmah); if (rc != DDI_SUCCESS) { audio_dev_warn(adev, "ddi_dma_alloc_handle(bdlist) failed"); return (DDI_FAILURE); } /* * we allocate all buffer descriptors lists in continuous dma memory. */ port->bdl_size = sizeof (audioixp_bd_entry_t) * IXP_BD_NUMS; rc = ddi_dma_mem_alloc(port->bdl_dmah, port->bdl_size, &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &port->bdl_kaddr, &port->bdl_size, &port->bdl_acch); if (rc != DDI_SUCCESS) { audio_dev_warn(adev, "ddi_dma_mem_alloc(bdlist) failed"); return (DDI_FAILURE); } rc = ddi_dma_addr_bind_handle(port->bdl_dmah, NULL, port->bdl_kaddr, port->bdl_size, DDI_DMA_WRITE|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &cookie, &count); if ((rc != DDI_DMA_MAPPED) || (count != 1)) { audio_dev_warn(adev, "addr_bind_handle failed"); return (DDI_FAILURE); } port->bdl_paddr = cookie.dmac_address; /* * Wire up the BD list. */ paddr = port->samp_paddr; bdentry = (void *)port->bdl_kaddr; for (int i = 0; i < IXP_BD_NUMS; i++) { /* set base address of buffer */ ddi_put32(port->bdl_acch, &bdentry->buf_base, paddr); ddi_put16(port->bdl_acch, &bdentry->status, 0); ddi_put16(port->bdl_acch, &bdentry->buf_len, port->fragsz / 4); ddi_put32(port->bdl_acch, &bdentry->next, port->bdl_paddr + (((i + 1) % IXP_BD_NUMS) * sizeof (audioixp_bd_entry_t))); paddr += port->fragsz; bdentry++; } (void) ddi_dma_sync(port->bdl_dmah, 0, 0, DDI_DMA_SYNC_FORDEV); port->engine = audio_engine_alloc(&audioixp_engine_ops, caps); if (port->engine == NULL) { audio_dev_warn(adev, "audio_engine_alloc failed"); return (DDI_FAILURE); } audio_engine_set_private(port->engine, port); audio_dev_add_engine(adev, port->engine); return (DDI_SUCCESS); }
static int ppb_initchild(dev_info_t *child) { char name[MAXNAMELEN]; ddi_acc_handle_t config_handle; ushort_t command_preserve, command; uint_t n; ushort_t bcr; uchar_t header_type; uchar_t min_gnt, latency_timer; ppb_devstate_t *ppb; /* * Name the child */ if (ppb_name_child(child, name, MAXNAMELEN) != DDI_SUCCESS) return (DDI_FAILURE); ddi_set_name_addr(child, name); ddi_set_parent_data(child, NULL); /* * Pseudo nodes indicate a prototype node with per-instance * properties to be merged into the real h/w device node. * The interpretation of the unit-address is DD[,F] * where DD is the device id and F is the function. */ if (ndi_dev_is_persistent_node(child) == 0) { extern int pci_allow_pseudo_children; /* * Try to merge the properties from this prototype * node into real h/w nodes. */ if (ndi_merge_node(child, ppb_name_child) == DDI_SUCCESS) { /* * Merged ok - return failure to remove the node. */ ppb_removechild(child); return (DDI_FAILURE); } /* workaround for ddivs to run under PCI */ if (pci_allow_pseudo_children) return (DDI_SUCCESS); /* * The child was not merged into a h/w node, * but there's not much we can do with it other * than return failure to cause the node to be removed. */ cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged", ddi_driver_name(child), ddi_get_name_addr(child), ddi_driver_name(child)); ppb_removechild(child); return (DDI_NOT_WELL_FORMED); } ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, ddi_get_instance(ddi_get_parent(child))); ddi_set_parent_data(child, NULL); /* * If hardware is PM capable, set up the power info structure. * This also ensures the the bus will not be off (0MHz) otherwise * system panics during a bus access. */ if (PM_CAPABLE(ppb->ppb_pwr_p)) { /* * Create a pwr_info struct for child. Bus will be * at full speed after creating info. */ pci_pwr_create_info(ppb->ppb_pwr_p, child); #ifdef DEBUG ASSERT(ppb->ppb_pwr_p->current_lvl == PM_LEVEL_B0); #endif } /* * If configuration registers were previously saved by * child (before it entered D3), then let the child do the * restore to set up the config regs as it'll first need to * power the device out of D3. */ if (ddi_prop_exists(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "config-regs-saved-by-child") == 1) { DEBUG2(DBG_PWR, ddi_get_parent(child), "INITCHILD: config regs to be restored by child" " for %s@%s\n", ddi_node_name(child), ddi_get_name_addr(child)); return (DDI_SUCCESS); } DEBUG2(DBG_PWR, ddi_get_parent(child), "INITCHILD: config regs setup for %s@%s\n", ddi_node_name(child), ddi_get_name_addr(child)); if (pci_config_setup(child, &config_handle) != DDI_SUCCESS) { if (PM_CAPABLE(ppb->ppb_pwr_p)) { pci_pwr_rm_info(ppb->ppb_pwr_p, child); } return (DDI_FAILURE); } /* * Determine the configuration header type. */ header_type = pci_config_get8(config_handle, PCI_CONF_HEADER); /* * Support for the "command-preserve" property. */ command_preserve = ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "command-preserve", 0); command = pci_config_get16(config_handle, PCI_CONF_COMM); command &= (command_preserve | PCI_COMM_BACK2BACK_ENAB); command |= (ppb_command_default & ~command_preserve); pci_config_put16(config_handle, PCI_CONF_COMM, command); /* * If the device has a bus control register then program it * based on the settings in the command register. */ if ((header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) { bcr = pci_config_get8(config_handle, PCI_BCNF_BCNTRL); if (ppb_command_default & PCI_COMM_PARITY_DETECT) bcr |= PCI_BCNF_BCNTRL_PARITY_ENABLE; if (ppb_command_default & PCI_COMM_SERR_ENABLE) bcr |= PCI_BCNF_BCNTRL_SERR_ENABLE; bcr |= PCI_BCNF_BCNTRL_MAST_AB_MODE; pci_config_put8(config_handle, PCI_BCNF_BCNTRL, bcr); } /* * Initialize cache-line-size configuration register if needed. */ if (ppb_set_cache_line_size_register && ddi_getprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "cache-line-size", 0) == 0) { pci_config_put8(config_handle, PCI_CONF_CACHE_LINESZ, ppb->ppb_cache_line_size); n = pci_config_get8(config_handle, PCI_CONF_CACHE_LINESZ); if (n != 0) { (void) ndi_prop_update_int(DDI_DEV_T_NONE, child, "cache-line-size", n); } } /* * Initialize latency timer configuration registers if needed. */ if (ppb_set_latency_timer_register && ddi_getprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "latency-timer", 0) == 0) { if ((header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) { latency_timer = ppb->ppb_latency_timer; pci_config_put8(config_handle, PCI_BCNF_LATENCY_TIMER, ppb->ppb_latency_timer); } else { min_gnt = pci_config_get8(config_handle, PCI_CONF_MIN_G); latency_timer = min_gnt * 8; } pci_config_put8(config_handle, PCI_CONF_LATENCY_TIMER, latency_timer); n = pci_config_get8(config_handle, PCI_CONF_LATENCY_TIMER); if (n != 0) { (void) ndi_prop_update_int(DDI_DEV_T_NONE, child, "latency-timer", n); } } /* * SPARC PCIe FMA specific * * Note: parent_data for parent is created only if this is sparc PCI-E * platform, for which, SG take a different route to handle device * errors. */ if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) { if (pcie_init_cfghdl(child) != DDI_SUCCESS) { pci_config_teardown(&config_handle); return (DDI_FAILURE); } pcie_init_dom(child); } /* * Check to see if the XMITS/PCI-X workaround applies. */ n = ddi_getprop(DDI_DEV_T_ANY, child, DDI_PROP_NOTPROM, "pcix-update-cmd-reg", -1); if (n != -1) { extern void pcix_set_cmd_reg(dev_info_t *child, uint16_t value); DEBUG1(DBG_INIT_CLD, child, "Turning on XMITS NCPQ " "Workaround: value = %x\n", n); pcix_set_cmd_reg(child, n); } pci_config_teardown(&config_handle); return (DDI_SUCCESS); }
/* * bbc_beep_hztocounter() : * Given a frequency in hz, find out the value to * be set in the Keyboard Beep Counter register * BBC beeper uses the following formula to calculate * frequency. The formulae is : * frequency generated = system freq /2^(n+2) * Where n = position of the bit of counter register * that is turned on and can range between 10 to 18. * So in this function, the inputs are frequency generated * and system frequency and we need to find out n, i.e, which * bit to turn on.(Ref. to Section 4.2.22 of the BBC programming * manual). */ unsigned long bbc_beep_hztocounter(int freq) { int i; unsigned long counter; int newfreq, oldfreq; int system_freq; /* * Get system frequency for the root dev_info properties */ system_freq = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_root_node(), 0, "clock-frequency", 0); oldfreq = 0; /* * Calculate frequency by turning on ith bit and * matching it with the passed frequency and we do this * in a loop for all the relevant bits */ for (i = BBC_BEEP_MIN_SHIFT, counter = 1 << BBC_BEEP_MSBIT; i >= BBC_BEEP_MAX_SHIFT; i--, counter >>= 1) { /* * Calculate the frequency by dividing the system * frequency by 2^i */ newfreq = system_freq >> i; /* * Check if we turn on the ith bit, the * frequency matches exactly or not */ if (newfreq == freq) { /* * Exact match of passed frequency with the * counter value */ return (counter); } /* * If calculated frequency is bigger * return the passed frequency */ if (newfreq > freq) { if (i == BBC_BEEP_MIN_SHIFT) { /* Input freq is less than the possible min */ return (counter); } /* * Find out the nearest frequency to the passed * frequency by comparing the difference between * the calculated frequency and the passed frequency */ if ((freq - oldfreq) > (newfreq - freq)) { /* Return new counter corres. to newfreq */ return (counter); } /* Return old counter corresponding to oldfreq */ return (counter << 1); } oldfreq = newfreq; } /* * Input freq is greater than the possible max; * Back off the counter value and return max counter * value possible in the register */ return (counter << 1); }
uint64_t * get_intr_mapping_reg(int upaid, int slave) { int affin_upaid; dev_info_t *affin_dip; uint64_t *addr = intr_map_reg[upaid]; /* If we're a UPA master, or we have a valid mapping register. */ if (!slave || addr != NULL) return (addr); /* * We only get here if we're a UPA slave only device whose interrupt * mapping register has not been set. * We need to try and install the nexus whose physical address * space is where the slaves mapping register resides. They * should call set_intr_mapping_reg() in their xxattach() to register * the mapping register with the system. */ /* * We don't know if a single- or multi-interrupt proxy is fielding * our UPA slave interrupt, we must check both cases. * Start out by assuming the multi-interrupt case. * We assume that single- and multi- interrupters are not * overlapping in UPA portid space. */ affin_upaid = upaid | 3; /* * We start looking for the multi-interrupter affinity node. * We know it's ONLY a child of the root node since the root * node defines UPA space. */ for (affin_dip = ddi_get_child(ddi_root_node()); affin_dip; affin_dip = ddi_get_next_sibling(affin_dip)) if (ddi_prop_get_int(DDI_DEV_T_ANY, affin_dip, DDI_PROP_DONTPASS, "upa-portid", -1) == affin_upaid) break; if (affin_dip) { if (i_ddi_attach_node_hierarchy(affin_dip) == DDI_SUCCESS) { /* try again to get the mapping register. */ addr = intr_map_reg[upaid]; } } /* * If we still don't have a mapping register try single -interrupter * case. */ if (addr == NULL) { affin_upaid = upaid | 1; for (affin_dip = ddi_get_child(ddi_root_node()); affin_dip; affin_dip = ddi_get_next_sibling(affin_dip)) if (ddi_prop_get_int(DDI_DEV_T_ANY, affin_dip, DDI_PROP_DONTPASS, "upa-portid", -1) == affin_upaid) break; if (affin_dip) { if (i_ddi_attach_node_hierarchy(affin_dip) == DDI_SUCCESS) { /* try again to get the mapping register. */ addr = intr_map_reg[upaid]; } } } return (addr); }
static int rmc_comm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { struct rmc_comm_state *rcs = NULL; sig_state_t *current_sgn_p; int instance; /* * only allow one instance */ instance = ddi_get_instance(dip); if (instance != 0) return (DDI_FAILURE); switch (cmd) { default: return (DDI_FAILURE); case DDI_RESUME: if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_attach")) == NULL) return (DDI_FAILURE); /* this "can't happen" */ rmc_comm_hw_reset(rcs); rmc_comm_set_irq(rcs, B_TRUE); rcs->dip = dip; mutex_enter(&tod_lock); if (watchdog_enable && tod_ops.tod_set_watchdog_timer != NULL && watchdog_was_active) { (void) tod_ops.tod_set_watchdog_timer(0); } mutex_exit(&tod_lock); mutex_enter(rcs->dp_state.dp_mutex); dp_reset(rcs, INITIAL_SEQID, 1, 1); mutex_exit(rcs->dp_state.dp_mutex); current_sgn_p = (sig_state_t *)modgetsymvalue( "current_sgn", 0); if ((current_sgn_p != NULL) && (current_sgn_p->state_t.sig != 0)) { CPU_SIGNATURE(current_sgn_p->state_t.sig, current_sgn_p->state_t.state, current_sgn_p->state_t.sub_state, -1); } return (DDI_SUCCESS); case DDI_ATTACH: break; } /* * Allocate the soft-state structure */ if (ddi_soft_state_zalloc(rmc_comm_statep, instance) != DDI_SUCCESS) return (DDI_FAILURE); if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_attach")) == NULL) { rmc_comm_unattach(rcs, dip, instance, 0, 0, 0); return (DDI_FAILURE); } ddi_set_driver_private(dip, rcs); rcs->dip = NULL; /* * Set various options from .conf properties */ rcs->baud = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "baud-rate", 0); rcs->debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "debug", 0); /* * the baud divisor factor tells us how to scale the result of * the SIO_BAUD_TO_DIVISOR macro for platforms which do not * use the standard 24MHz uart clock */ rcs->baud_divisor_factor = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "baud-divisor-factor", SIO_BAUD_DIVISOR_MIN); /* * try to be reasonable if the scale factor contains a silly value */ if ((rcs->baud_divisor_factor < SIO_BAUD_DIVISOR_MIN) || (rcs->baud_divisor_factor > SIO_BAUD_DIVISOR_MAX)) rcs->baud_divisor_factor = SIO_BAUD_DIVISOR_MIN; /* * initialize serial device */ if (rmc_comm_serdev_init(rcs, dip) != 0) { rmc_comm_unattach(rcs, dip, instance, 0, 0, 0); return (DDI_FAILURE); } /* * initialize data protocol */ rmc_comm_dp_init(rcs); /* * initialize driver interface */ if (rmc_comm_drvintf_init(rcs) != 0) { rmc_comm_unattach(rcs, dip, instance, 0, 1, 1); return (DDI_FAILURE); } /* * Initialise devinfo-related fields */ rcs->majornum = ddi_driver_major(dip); rcs->instance = instance; rcs->dip = dip; /* * enable interrupts now */ rmc_comm_set_irq(rcs, B_TRUE); /* * All done, report success */ ddi_report_dev(dip); mutex_enter(&rmc_comm_attach_lock); rcs->is_attached = B_TRUE; mutex_exit(&rmc_comm_attach_lock); return (DDI_SUCCESS); }
/* register callback to mdeg */ static int i_vldc_mdeg_register(vldc_t *vldcp) { mdeg_prop_spec_t *pspecp; mdeg_node_spec_t *inst_specp; mdeg_handle_t mdeg_hdl; size_t templatesz; int inst; char *name; size_t namesz; char *nameprop; int rv; /* get the unique vldc instance assigned by the LDom manager */ inst = ddi_prop_get_int(DDI_DEV_T_ANY, vldcp->dip, DDI_PROP_DONTPASS, "reg", -1); if (inst == -1) { cmn_err(CE_NOTE, "?vldc%d has no 'reg' property", ddi_get_instance(vldcp->dip)); return (DDI_FAILURE); } /* get the name of the vldc instance */ rv = ddi_prop_lookup_string(DDI_DEV_T_ANY, vldcp->dip, DDI_PROP_DONTPASS, "name", &nameprop); if (rv != DDI_PROP_SUCCESS) { cmn_err(CE_NOTE, "?vldc%d has no 'name' property", ddi_get_instance(vldcp->dip)); return (DDI_FAILURE); } D1("i_vldc_mdeg_register: name=%s, instance=%d\n", nameprop, inst); /* * Allocate and initialize a per-instance copy * of the global property spec array that will * uniquely identify this vldc instance. */ templatesz = sizeof (vldc_prop_template); pspecp = kmem_alloc(templatesz, KM_SLEEP); bcopy(vldc_prop_template, pspecp, templatesz); /* copy in the name property */ namesz = strlen(nameprop) + 1; name = kmem_alloc(namesz, KM_SLEEP); bcopy(nameprop, name, namesz); VLDC_SET_MDEG_PROP_NAME(pspecp, name); ddi_prop_free(nameprop); /* copy in the instance property */ VLDC_SET_MDEG_PROP_INST(pspecp, inst); /* initialize the complete prop spec structure */ inst_specp = kmem_alloc(sizeof (mdeg_node_spec_t), KM_SLEEP); inst_specp->namep = "virtual-device"; inst_specp->specp = pspecp; /* perform the registration */ rv = mdeg_register(inst_specp, &vport_match, i_vldc_mdeg_cb, vldcp, &mdeg_hdl); if (rv != MDEG_SUCCESS) { cmn_err(CE_NOTE, "?i_vldc_mdeg_register: mdeg_register " "failed, err = %d", rv); kmem_free(name, namesz); kmem_free(pspecp, templatesz); kmem_free(inst_specp, sizeof (mdeg_node_spec_t)); return (DDI_FAILURE); } /* save off data that will be needed later */ vldcp->inst_spec = inst_specp; vldcp->mdeg_hdl = mdeg_hdl; return (DDI_SUCCESS); }
/* * Initialize memory power management subsystem. * Note: This function should only be called from ATTACH. * Note: caller must ensure exclusive access to all fipe_xxx interfaces. */ int fipe_init(dev_info_t *dip) { size_t nsize; hrtime_t hrt; /* Initialize global control structure. */ bzero(&fipe_gbl_ctrl, sizeof (fipe_gbl_ctrl)); mutex_init(&fipe_gbl_ctrl.lock, NULL, MUTEX_DRIVER, NULL); /* Query power management policy from device property. */ fipe_pm_policy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, FIPE_PROP_PM_POLICY, fipe_pm_policy); if (fipe_pm_policy < 0 || fipe_pm_policy >= FIPE_PM_POLICY_MAX) { cmn_err(CE_CONT, "?fipe: invalid power management policy %d.\n", fipe_pm_policy); fipe_pm_policy = FIPE_PM_POLICY_BALANCE; } fipe_profile_curr = &fipe_profiles[fipe_pm_policy]; /* * Compute unscaled hrtime value corresponding to FIPE_STAT_INTERVAL. * (1 << 36) should be big enough here. */ hrt = 1ULL << 36; scalehrtime(&hrt); fipe_idle_ctrl.tick_interval = FIPE_STAT_INTERVAL * (1ULL << 36) / hrt; if (fipe_mc_init(dip) != 0) { cmn_err(CE_WARN, "!fipe: failed to initialize mc state."); goto out_mc_error; } if (fipe_ioat_init() != 0) { cmn_err(CE_NOTE, "!fipe: failed to initialize ioat state."); goto out_ioat_error; } /* Allocate per-CPU structure. */ nsize = max_ncpus * sizeof (fipe_cpu_state_t); nsize += CPU_CACHE_COHERENCE_SIZE; fipe_gbl_ctrl.state_buf = kmem_zalloc(nsize, KM_SLEEP); fipe_gbl_ctrl.state_size = nsize; fipe_cpu_states = (fipe_cpu_state_t *)P2ROUNDUP( (intptr_t)fipe_gbl_ctrl.state_buf, CPU_CACHE_COHERENCE_SIZE); #ifdef FIPE_KSTAT_SUPPORT fipe_gbl_ctrl.fipe_kstat = kstat_create("fipe", 0, "fipe-pm", "misc", KSTAT_TYPE_NAMED, sizeof (fipe_kstat) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (fipe_gbl_ctrl.fipe_kstat == NULL) { cmn_err(CE_CONT, "?fipe: failed to create kstat object.\n"); } else { fipe_gbl_ctrl.fipe_kstat->ks_lock = &fipe_gbl_ctrl.lock; fipe_gbl_ctrl.fipe_kstat->ks_data = &fipe_kstat; fipe_gbl_ctrl.fipe_kstat->ks_update = fipe_kstat_update; kstat_install(fipe_gbl_ctrl.fipe_kstat); } #endif /* FIPE_KSTAT_SUPPORT */ return (0); out_ioat_error: fipe_mc_fini(); out_mc_error: mutex_destroy(&fipe_gbl_ctrl.lock); bzero(&fipe_gbl_ctrl, sizeof (fipe_gbl_ctrl)); return (-1); }
/* * A hotplug version of fill_cpu(). (Doesn't assume that there's a node * in the PROM device tree for this CPU.) We still need the PROM version * since it is called very early in the boot cycle before (before * setup_ddi()). Sigh...someday this will all be cleaned up. */ void fill_cpu_ddi(dev_info_t *dip) { extern int cpu_get_cpu_unum(int, char *, int, int *); struct cpu_node *cpunode; processorid_t cpuid; int portid; int len = OBP_MAXPROPNAME; int tlbsize; dev_info_t *cmpnode; char namebuf[OBP_MAXPROPNAME], unum[UNUM_NAMLEN]; char *namebufp; char dev_type[OBP_MAXPROPNAME]; if ((portid = get_portid_ddi(dip, &cmpnode)) == -1) { cmn_err(CE_PANIC, "portid not found"); } if ((cpuid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "cpuid", -1)) == -1) { cpuid = portid; } if (cpuid < 0 || cpuid >= NCPU) { cmn_err(CE_PANIC, "cpu dip %p: cpuid %d out of range", (void *)dip, cpuid); return; } cpunode = &cpunodes[cpuid]; cpunode->portid = portid; cpunode->nodeid = ddi_get_nodeid(dip); if (cmpnode != NULL) { /* * For the CMT case, the parent "core" node contains * properties needed below, use it instead of the * cpu node. */ if ((ddi_prop_op(DDI_DEV_T_ANY, cmpnode, PROP_LEN_AND_VAL_BUF, DDI_PROP_DONTPASS, "device_type", (caddr_t)dev_type, &len) == DDI_PROP_SUCCESS) && (strcmp(dev_type, "core") == 0)) dip = cmpnode; } if (cpu_get_cpu_unum(cpuid, unum, UNUM_NAMLEN, &len) != 0) { cpunode->fru_fmri[0] = '\0'; } else { (void) snprintf(cpunode->fru_fmri, sizeof (cpunode->fru_fmri), "%s%s", CPU_FRU_FMRI, unum); } len = sizeof (namebuf); (void) ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, DDI_PROP_DONTPASS, (cmpnode ? "compatible" : "name"), (caddr_t)namebuf, &len); namebufp = namebuf; if (strncmp(namebufp, "SUNW,", 5) == 0) namebufp += 5; else if (strncmp(namebufp, "FJSV,", 5) == 0) namebufp += 5; (void) strcpy(cpunode->name, namebufp); cpunode->implementation = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "implementation#", 0); cpunode->version = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "mask#", 0); if (IS_CHEETAH(cpunode->implementation)) { /* remap mask reg */ cpunode->version = REMAP_CHEETAH_MASK(cpunode->version); } cpunode->clock_freq = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "clock-frequency", 0); ASSERT(cpunode->clock_freq != 0); /* * Compute scaling factor based on rate of %tick. This is used * to convert from ticks derived from %tick to nanoseconds. See * comment in sun4u/sys/clock.h for details. */ cpunode->tick_nsec_scale = (uint_t)(((uint64_t)NANOSEC << (32 - TICK_NSEC_SHIFT)) / cpunode->clock_freq); tlbsize = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "#itlb-entries", 0); ASSERT(tlbsize < USHRT_MAX); /* since we cast it */ cpunode->itlb_size = (ushort_t)tlbsize; tlbsize = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "#dtlb-entries", 0); ASSERT(tlbsize < USHRT_MAX); /* since we cast it */ cpunode->dtlb_size = (ushort_t)tlbsize; if (cmpnode != NULL) { /* * If the CPU has a level 3 cache, then that is it's * external cache. Otherwise the external cache must * be the level 2 cache. */ cpunode->ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "l3-cache-size", 0); if (cpunode->ecache_size == 0) cpunode->ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "l2-cache-size", 0); ASSERT(cpunode->ecache_size != 0); cpunode->ecache_linesize = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "l3-cache-line-size", 0); if (cpunode->ecache_linesize == 0) cpunode->ecache_linesize = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "l2-cache-line-size", 0); ASSERT(cpunode->ecache_linesize != 0); cpunode->ecache_associativity = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "l2-cache-associativity", 0); ASSERT(cpunode->ecache_associativity != 0); cmp_add_cpu(portid, cpuid); } else { cpunode->ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ecache-size", 0); ASSERT(cpunode->ecache_size != 0); cpunode->ecache_linesize = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ecache-line-size", 0); ASSERT(cpunode->ecache_linesize != 0); cpunode->ecache_associativity = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ecache-associativity", 0); ASSERT(cpunode->ecache_associativity != 0); } /* by default set msram to non-mirrored one */ cpunode->msram = ECACHE_CPU_NON_MIRROR; if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "msram")) { cpunode->msram = ECACHE_CPU_MIRROR; } else if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "msram-observed")) { cpunode->msram = ECACHE_CPU_MIRROR; } ASSERT(ncpunode > 0); /* fiximp not req'd */ cpunode->ecache_setsize = cpunode->ecache_size / cpunode->ecache_associativity; adj_ecache_setsize(cpunode->ecache_setsize); ncpunode++; }
static int ppb_initchild(dev_info_t *child) { struct ddi_parent_private_data *pdptr; ppb_devstate_t *ppb; char name[MAXNAMELEN]; ddi_acc_handle_t config_handle; ushort_t command_preserve, command; ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, ddi_get_instance(ddi_get_parent(child))); if (ppb_name_child(child, name, MAXNAMELEN) != DDI_SUCCESS) return (DDI_FAILURE); ddi_set_name_addr(child, name); /* * Pseudo nodes indicate a prototype node with per-instance * properties to be merged into the real h/w device node. * The interpretation of the unit-address is DD[,F] * where DD is the device id and F is the function. */ if (ndi_dev_is_persistent_node(child) == 0) { extern int pci_allow_pseudo_children; ddi_set_parent_data(child, NULL); /* * Try to merge the properties from this prototype * node into real h/w nodes. */ if (ndi_merge_node(child, ppb_name_child) == DDI_SUCCESS) { /* * Merged ok - return failure to remove the node. */ ddi_set_name_addr(child, NULL); return (DDI_FAILURE); } /* workaround for ddivs to run under PCI */ if (pci_allow_pseudo_children) return (DDI_SUCCESS); /* * The child was not merged into a h/w node, * but there's not much we can do with it other * than return failure to cause the node to be removed. */ cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged", ddi_driver_name(child), ddi_get_name_addr(child), ddi_driver_name(child)); ddi_set_name_addr(child, NULL); return (DDI_NOT_WELL_FORMED); } ddi_set_parent_data(child, NULL); /* * PCIe FMA specific * * Note: parent_data for parent is created only if this is PCI-E * platform, for which, SG take a different route to handle device * errors. */ if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) { if (pcie_init_cfghdl(child) != DDI_SUCCESS) return (DDI_FAILURE); pcie_init_dom(child); } /* transfer select properties from PROM to kernel */ if (ddi_getprop(DDI_DEV_T_NONE, child, DDI_PROP_DONTPASS, "interrupts", -1) != -1) { pdptr = kmem_zalloc((sizeof (struct ddi_parent_private_data) + sizeof (struct intrspec)), KM_SLEEP); pdptr->par_intr = (struct intrspec *)(pdptr + 1); pdptr->par_nintr = 1; ddi_set_parent_data(child, pdptr); } else ddi_set_parent_data(child, NULL); if (pci_config_setup(child, &config_handle) != DDI_SUCCESS) { pcie_fini_dom(child); return (DDI_FAILURE); } /* * Support for the "command-preserve" property. */ command_preserve = ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "command-preserve", 0); command = pci_config_get16(config_handle, PCI_CONF_COMM); command &= (command_preserve | PCI_COMM_BACK2BACK_ENAB); command |= (ppb_command_default & ~command_preserve); pci_config_put16(config_handle, PCI_CONF_COMM, command); pci_config_teardown(&config_handle); return (DDI_SUCCESS); }