/* * Validate that the device in hand is indeed virtio network device */ static int virtio_validate_pcidev(dev_info_t *dip) { ddi_acc_handle_t pcihdl; int rc; rc = pci_config_setup(dip, &pcihdl); if (rc != DDI_SUCCESS) { return (DDI_FAILURE); } if (pci_config_get16(pcihdl, PCI_CONF_VENID) != VIRTIO_PCI_VENDOR) { cmn_err(CE_WARN, "Incorrect PCI vendor id"); rc = DDI_FAILURE; } uint16_t devid = pci_config_get16(pcihdl, PCI_CONF_DEVID); if ((devid < VIRTIO_PCI_DEVID_MIN) && (devid > VIRTIO_PCI_DEVID_MAX)) { cmn_err(CE_WARN, "Incorrect PCI device id"); rc = DDI_FAILURE; } if (pci_config_get16(pcihdl, PCI_CONF_REVID) != VIRTIO_PCI_REV_ABIV0) { cmn_err(CE_WARN, "Unsupported virtio ABI detected"); rc = DDI_FAILURE; } pci_config_teardown(&pcihdl); return (rc); }
int oce_identify_hw(struct oce_dev *dev) { int ret = DDI_SUCCESS; dev->vendor_id = pci_config_get16(dev->pci_cfg_handle, PCI_CONF_VENID); dev->device_id = pci_config_get16(dev->pci_cfg_handle, PCI_CONF_DEVID); dev->subsys_id = pci_config_get16(dev->pci_cfg_handle, PCI_CONF_SUBSYSID); dev->subvendor_id = pci_config_get16(dev->pci_cfg_handle, PCI_CONF_SUBVENID); switch (dev->device_id) { case DEVID_TIGERSHARK: dev->chip_rev = OC_CNA_GEN2; break; case DEVID_TOMCAT: dev->chip_rev = OC_CNA_GEN3; break; default: dev->chip_rev = 0; ret = DDI_FAILURE; break; } return (ret); }
void rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp) { ddi_acc_handle_t handle; uint16_t commd; handle = rgep->cfg_handle; /* * Save PCI cache line size and subsystem vendor ID */ cidp->command = pci_config_get16(handle, PCI_CONF_COMM); cidp->vendor = pci_config_get16(handle, PCI_CONF_VENID); cidp->device = pci_config_get16(handle, PCI_CONF_DEVID); cidp->subven = pci_config_get16(handle, PCI_CONF_SUBVENID); cidp->subdev = pci_config_get16(handle, PCI_CONF_SUBSYSID); cidp->revision = pci_config_get8(handle, PCI_CONF_REVID); cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ); cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER); /* * Turn on Master Enable (DMA) and IO Enable bits. * Enable PCI Memory Space accesses */ commd = cidp->command; commd |= PCI_COMM_ME | PCI_COMM_MAE | PCI_COMM_IO; pci_config_put16(handle, PCI_CONF_COMM, commd); RGE_DEBUG(("rge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x", cidp->vendor, cidp->device, cidp->revision)); RGE_DEBUG(("rge_chip_cfg_init: subven 0x%x subdev 0x%x", cidp->subven, cidp->subdev)); RGE_DEBUG(("rge_chip_cfg_init: clsize %d latency %d command 0x%x", cidp->clsize, cidp->latency, cidp->command)); }
void npe_intel_error_workaround(ddi_acc_handle_t cfg_hdl) { uint32_t regs; uint16_t vendor_id = pci_config_get16(cfg_hdl, PCI_CONF_VENID); uint16_t dev_id = pci_config_get16(cfg_hdl, PCI_CONF_DEVID); if (vendor_id == INTEL_VENDOR_ID) { /* * Due to an errata in Intel's ESB2 southbridge, all ECRCs * generation/checking need to be disabled. There is a * workaround by setting a proprietary bit in the ESB2, but it * is not well documented or understood. If that bit is set in * the future, then ECRC generation/checking should be enabled * again. * * Disable ECRC generation/checking by masking ECRC in the AER * UE Mask. The pcie misc module would then automatically * disable ECRC generation/checking in the AER Control register. */ regs = pcie_get_aer_uce_mask() | PCIE_AER_UCE_ECRC; pcie_set_aer_uce_mask(regs); if (INTEL_NB5500_PCIE_DEV_ID(dev_id) || INTEL_NB5520_PCIE_DEV_ID(dev_id)) { /* * Turn full scan on since the Error Source ID register * may not have the correct ID. See Intel 5520 and * Intel 5500 Chipsets errata #34 and #54 in the August * 2009 specification update, document number * 321329-006. */ pcie_force_fullscan(); } } }
static void rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd) { uint64_t regval; uint64_t regno; RGE_TRACE(("rge_chip_peek_cfg($%p, $%p)", (void *)rgep, (void *)ppd)); regno = ppd->pp_acc_offset; switch (ppd->pp_acc_size) { case 1: regval = pci_config_get8(rgep->cfg_handle, regno); break; case 2: regval = pci_config_get16(rgep->cfg_handle, regno); break; case 4: regval = pci_config_get32(rgep->cfg_handle, regno); break; case 8: regval = pci_config_get64(rgep->cfg_handle, regno); break; } ppd->pp_acc_data = regval; }
/* * ppb_save_config_regs * * This routine saves the state of the configuration registers of all * the child nodes of each PBM. * * used by: ppb_detach() on suspends * * return value: none */ static void ppb_save_config_regs(ppb_devstate_t *ppb_p) { int i; dev_info_t *dip; ddi_acc_handle_t config_handle; for (i = 0, dip = ddi_get_child(ppb_p->dip); dip != NULL; i++, dip = ddi_get_next_sibling(dip)) { if (pci_config_setup(dip, &config_handle) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s%d: can't config space for %s%d\n", ddi_driver_name(ppb_p->dip), ddi_get_instance(ppb_p->dip), ddi_driver_name(dip), ddi_get_instance(dip)); continue; } ppb_p->config_state[i].dip = dip; ppb_p->config_state[i].command = pci_config_get16(config_handle, PCI_CONF_COMM); pci_config_teardown(&config_handle); } ppb_p->config_state_index = i; }
/* * agp_target_cap_find() * * Description: * This function searches the linked capability list to find the offset * of the AGP capability register. When it was not found, return 0. * This works for standard AGP chipsets, but not for some Intel chipsets, * like the I830M/I830MP/I852PM/I852GME/I855GME. It will return 0 for * these chipsets even if AGP is supported. So the offset of acapid * should be set manually in thoses cases. * * Arguments: * pci_handle ddi acc handle of pci config * * Returns: * 0 No capability pointer register found * nexcap The AGP capability pointer register offset */ static off_t agp_target_cap_find(ddi_acc_handle_t pci_handle) { off_t nextcap = 0; uint32_t ncapid = 0; uint8_t value = 0; /* Check if this device supports the capability pointer */ value = (uint8_t)(pci_config_get16(pci_handle, PCI_CONF_STAT) & PCI_CONF_CAP_MASK); if (!value) return (0); /* Get the offset of the first capability pointer from CAPPTR */ nextcap = (off_t)(pci_config_get8(pci_handle, AGP_CONF_CAPPTR)); /* Check the AGP capability from the first capability pointer */ while (nextcap) { ncapid = pci_config_get32(pci_handle, nextcap); /* * AGP3.0 rev1.0 127 the capid was assigned by the PCI SIG, * 845 data sheet page 69 */ if ((ncapid & PCI_CONF_CAPID_MASK) == AGP_CAP_ID) /* The AGP cap was found */ break; nextcap = (off_t)((ncapid & PCI_CONF_NCAPID_MASK) >> 8); } return (nextcap); }
/* * The real intent of this routine is to return the value from pci-e * config space at offset reg into the capability space. * ICH devices are "PCI Express"-ish. They have a configuration space, * but do not contain PCI Express Capability registers, so this returns * the equivalent of "not supported" */ int32_t e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) { *value = pci_config_get16(OS_DEP(hw)->cfg_handle, PCI_EX_CONF_CAP + reg); return (0); }
/* * Enable reporting of AER capability next pointer. * This needs to be done only for CK8-04 devices * by setting NV_XVR_VEND_CYA1 (offset 0xf40) bit 13 * NOTE: BIOS is disabling this, it needs to be enabled temporarily */ void npe_ck804_fix_aer_ptr(ddi_acc_handle_t cfg_hdl) { ushort_t cya1; if ((pci_config_get16(cfg_hdl, PCI_CONF_VENID) == NVIDIA_VENDOR_ID) && (pci_config_get16(cfg_hdl, PCI_CONF_DEVID) == NVIDIA_CK804_DEVICE_ID) && (pci_config_get8(cfg_hdl, PCI_CONF_REVID) >= NVIDIA_CK804_AER_VALID_REVID)) { cya1 = pci_config_get16(cfg_hdl, NVIDIA_CK804_VEND_CYA1_OFF); if (!(cya1 & ~NVIDIA_CK804_VEND_CYA1_ERPT_MASK)) (void) pci_config_put16(cfg_hdl, NVIDIA_CK804_VEND_CYA1_OFF, cya1 | NVIDIA_CK804_VEND_CYA1_ERPT_VAL); } }
/* * audio1575_pci_enable() * * Description: * This routine Enables all PCI IO and MEMORY accesses * * Arguments: * audio1575_state_t *statep The device's state structure */ static void audio1575_pci_enable(audio1575_state_t *statep) { uint16_t pcics_reg; pcics_reg = pci_config_get16(statep->pcih, PCI_CONF_COMM); pcics_reg |= (PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME); pci_config_put16(statep->pcih, PCI_CONF_COMM, pcics_reg); }
void npe_nvidia_error_workaround(ddi_acc_handle_t cfg_hdl) { uint32_t regs; uint16_t vendor_id = pci_config_get16(cfg_hdl, PCI_CONF_VENID); uint16_t dev_id = pci_config_get16(cfg_hdl, PCI_CONF_DEVID); if ((vendor_id == NVIDIA_VENDOR_ID) && NVIDIA_PCIE_RC_DEV_ID(dev_id)) { /* Disable ECRC for all devices */ regs = pcie_get_aer_uce_mask() | npe_aer_uce_mask | PCIE_AER_UCE_ECRC; pcie_set_aer_uce_mask(regs); /* * Turn full scan on since the Error Source ID register may not * have the correct ID. */ pcie_force_fullscan(); } }
/* * audio1575_pci_disable() * * Description: * This routine Disables all PCI IO and MEMORY accesses * * Arguments: * audio1575_state_t *statep The device's state structure */ static void audio1575_pci_disable(audio1575_state_t *statep) { uint16_t pcics_reg; if (statep->pcih == NULL) return; pcics_reg = pci_config_get16(statep->pcih, PCI_CONF_COMM); pcics_reg &= ~(PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME); pci_config_put16(statep->pcih, PCI_CONF_COMM, pcics_reg); }
int npe_enable_htmsi(ddi_acc_handle_t cfg_hdl) { uint16_t ptr; uint16_t reg; if (pci_htcap_locate(cfg_hdl, PCI_HTCAP_TYPE_MASK, PCI_HTCAP_MSIMAP_TYPE, &ptr) != DDI_SUCCESS) return (DDI_FAILURE); reg = pci_config_get16(cfg_hdl, ptr + PCI_CAP_ID_REGS_OFF); reg |= PCI_HTCAP_MSIMAP_ENABLE; pci_config_put16(cfg_hdl, ptr + PCI_CAP_ID_REGS_OFF, reg); return (DDI_SUCCESS); }
static int ppb_ht_msimap_set(ddi_acc_handle_t cfg_hdl, int cmd) { uint16_t ptr; uint16_t reg; if (pci_htcap_locate(cfg_hdl, PCI_HTCAP_TYPE_MASK, PCI_HTCAP_MSIMAP_TYPE, &ptr) != DDI_SUCCESS) return (0); reg = pci_config_get16(cfg_hdl, ptr + PCI_CAP_ID_REGS_OFF); switch (cmd) { case HT_MSIMAP_ENABLE: reg |= PCI_HTCAP_MSIMAP_ENABLE; break; case HT_MSIMAP_DISABLE: default: reg &= ~(uint16_t)PCI_HTCAP_MSIMAP_ENABLE; } pci_config_put16(cfg_hdl, ptr + PCI_CAP_ID_REGS_OFF, reg); return (1); }
boolean_t i40e_set_hw_bus_info(struct i40e_hw *hw) { uint8_t pcie_id = PCI_CAP_ID_PCI_E; uint16_t pcie_cap, value; int status; /* locate the pci-e capability block */ status = pci_lcap_locate((OS_DEP(hw))->ios_cfg_handle, pcie_id, &pcie_cap); if (status != DDI_SUCCESS) { i40e_error(OS_DEP(hw)->ios_i40e, "failed to locate PCIe " "capability block: %d", status); return (B_FALSE); } value = pci_config_get16(OS_DEP(hw)->ios_cfg_handle, pcie_cap + PCIE_LINKSTS); i40e_set_pci_config_data(hw, value); return (B_TRUE); }
static void gfxp_check_for_console(dev_info_t *devi, struct vgatext_softc *softc, int pci_pcie_bus) { ddi_acc_handle_t pci_conf; dev_info_t *pdevi; uint16_t data16; /* * Based on Section 11.3, "PCI Display Subsystem Initialization", * of the 1.1 PCI-to-PCI Bridge Architecture Specification * determine if this is the boot console device. First, see * if the SBIOS has turned on PCI I/O for this device. Then if * this is PCI/PCI-E, verify the parent bridge has VGAEnable set. */ if (pci_config_setup(devi, &pci_conf) != DDI_SUCCESS) { cmn_err(CE_WARN, MYNAME ": can't get PCI conf handle"); return; } data16 = pci_config_get16(pci_conf, PCI_CONF_COMM); if (data16 & PCI_COMM_IO) softc->flags |= GFXP_FLAG_CONSOLE; pci_config_teardown(&pci_conf); /* If IO not enabled or ISA/EISA, just return */ if (!(softc->flags & GFXP_FLAG_CONSOLE) || !pci_pcie_bus) return; /* * Check for VGA Enable in the Bridge Control register for all * PCI/PCIEX parents. If not set all the way up the chain, * this cannot be the boot console. */ pdevi = ddi_get_parent(devi); while (pdevi) { int error; ddi_acc_handle_t ppci_conf; char *parent_type = NULL; error = ddi_prop_lookup_string(DDI_DEV_T_ANY, pdevi, DDI_PROP_DONTPASS, "device_type", &parent_type); if (error != DDI_SUCCESS) { return; } /* Verify still on the PCI/PCIEX parent tree */ if (!STREQ(parent_type, "pci") && !STREQ(parent_type, "pciex")) { ddi_prop_free(parent_type); return; } ddi_prop_free(parent_type); parent_type = NULL; if (pci_config_setup(pdevi, &ppci_conf) != DDI_SUCCESS) { /* No registers on root node, done with check */ return; } data16 = pci_config_get16(ppci_conf, PCI_BCNF_BCNTRL); pci_config_teardown(&ppci_conf); if (!(data16 & PCI_BCNF_BCNTRL_VGA_ENABLE)) { softc->flags &= ~GFXP_FLAG_CONSOLE; return; } pdevi = ddi_get_parent(pdevi); } }
static int acebus_config(ebus_devstate_t *ebus_p) { ddi_acc_handle_t conf_handle; uint16_t comm; #ifdef ACEBUS_HOTPLUG int tcr_reg; caddr_t csr_io; ddi_device_acc_attr_t csr_attr = { /* CSR map attributes */ DDI_DEVICE_ATTR_V0, DDI_STRUCTURE_LE_ACC, DDI_STRICTORDER_ACC }; ddi_acc_handle_t csr_handle; #endif /* * Make sure the master enable and memory access enable * bits are set in the config command register. */ if (pci_config_setup(ebus_p->dip, &conf_handle) != DDI_SUCCESS) return (0); comm = pci_config_get16(conf_handle, PCI_CONF_COMM), #ifdef DEBUG DBG1(D_ATTACH, ebus_p, "command register was 0x%x\n", comm); #endif comm |= (PCI_COMM_ME|PCI_COMM_MAE|PCI_COMM_SERR_ENABLE| PCI_COMM_PARITY_DETECT); pci_config_put16(conf_handle, PCI_CONF_COMM, comm), #ifdef DEBUG DBG1(D_MAP, ebus_p, "command register is now 0x%x\n", pci_config_get16(conf_handle, PCI_CONF_COMM)); #endif pci_config_put8(conf_handle, PCI_CONF_CACHE_LINESZ, (uchar_t)acebus_cache_line_size); pci_config_put8(conf_handle, PCI_CONF_LATENCY_TIMER, (uchar_t)acebus_latency_timer); pci_config_teardown(&conf_handle); #ifdef ACEBUS_HOTPLUG if (acebus_update_props(ebus_p) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s%d: Could not update special properties.", ddi_driver_name(ebus_p->dip), ddi_get_instance(ebus_p->dip)); return (0); } if (ddi_regs_map_setup(ebus_p->dip, CSR_IO_RINDEX, (caddr_t *)&csr_io, 0, CSR_SIZE, &csr_attr, &csr_handle) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s%d: Could not map Ebus CSR.", ddi_driver_name(ebus_p->dip), ddi_get_instance(ebus_p->dip)); } #ifdef DEBUG if (acebus_debug_flags) { DBG3(D_ATTACH, ebus_p, "tcr[123] = %x,%x,%x\n", ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR1_OFF)), ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR2_OFF)), ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR3_OFF))); DBG2(D_ATTACH, ebus_p, "pmd-aux=%x, freq-aux=%x\n", ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + PMD_AUX_OFF)), ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + FREQ_AUX_OFF))); #ifdef ACEBUS_DEBUG for (comm = 0; comm < 4; comm++) prom_printf("dcsr%d=%x, dacr%d=%x, dbcr%d=%x\n", comm, ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + 0x700000+(0x2000*comm))), comm, ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + 0x700000+(0x2000*comm)+4)), comm, ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + 0x700000+(0x2000*comm)+8))); #endif } /* acebus_debug_flags */ #endif /* If TCR registers are not initialized, initialize them here */ tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR1_OFF)); if ((tcr_reg == 0) || (tcr_reg == -1)) ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR1_OFF), TCR1_REGVAL); tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR2_OFF)); if ((tcr_reg == 0) || (tcr_reg == -1)) ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR2_OFF), TCR2_REGVAL); tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR3_OFF)); if ((tcr_reg == 0) || (tcr_reg == -1)) ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR3_OFF), TCR3_REGVAL); #ifdef DEBUG if (acebus_debug_flags) { DBG3(D_ATTACH, ebus_p, "wrote tcr[123] = %x,%x,%x\n", ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR1_OFF)), ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR2_OFF)), ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR3_OFF))); } #endif ddi_regs_map_free(&csr_handle); #endif /* ACEBUS_HOTPLUG */ return (1); /* return success */ }
int pcn_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { pcn_t *pcnp; mac_register_t *macp; const pcn_type_t *pcn_type; int instance = ddi_get_instance(dip); int rc; ddi_acc_handle_t pci; uint16_t venid; uint16_t devid; uint16_t svid; uint16_t ssid; switch (cmd) { case DDI_RESUME: return (pcn_ddi_resume(dip)); case DDI_ATTACH: break; default: return (DDI_FAILURE); } if (ddi_slaveonly(dip) == DDI_SUCCESS) { pcn_error(dip, "slot does not support PCI bus-master"); return (DDI_FAILURE); } if (ddi_intr_hilevel(dip, 0) != 0) { pcn_error(dip, "hilevel interrupts not supported"); return (DDI_FAILURE); } if (pci_config_setup(dip, &pci) != DDI_SUCCESS) { pcn_error(dip, "unable to setup PCI config handle"); return (DDI_FAILURE); } venid = pci_config_get16(pci, PCI_CONF_VENID); devid = pci_config_get16(pci, PCI_CONF_DEVID); svid = pci_config_get16(pci, PCI_CONF_SUBVENID); ssid = pci_config_get16(pci, PCI_CONF_SUBSYSID); if ((pcn_type = pcn_match(venid, devid)) == NULL) { pci_config_teardown(&pci); pcn_error(dip, "Unable to identify PCI card"); return (DDI_FAILURE); } if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model", pcn_type->pcn_name) != DDI_PROP_SUCCESS) { pci_config_teardown(&pci); pcn_error(dip, "Unable to create model property"); return (DDI_FAILURE); } if (ddi_soft_state_zalloc(pcn_ssp, instance) != DDI_SUCCESS) { pcn_error(dip, "Unable to allocate soft state"); pci_config_teardown(&pci); return (DDI_FAILURE); } pcnp = ddi_get_soft_state(pcn_ssp, instance); pcnp->pcn_dip = dip; pcnp->pcn_instance = instance; pcnp->pcn_extphyaddr = -1; if (ddi_get_iblock_cookie(dip, 0, &pcnp->pcn_icookie) != DDI_SUCCESS) { pcn_error(pcnp->pcn_dip, "ddi_get_iblock_cookie failed"); ddi_soft_state_free(pcn_ssp, instance); pci_config_teardown(&pci); return (DDI_FAILURE); } mutex_init(&pcnp->pcn_xmtlock, NULL, MUTEX_DRIVER, pcnp->pcn_icookie); mutex_init(&pcnp->pcn_intrlock, NULL, MUTEX_DRIVER, pcnp->pcn_icookie); mutex_init(&pcnp->pcn_reglock, NULL, MUTEX_DRIVER, pcnp->pcn_icookie); /* * Enable bus master, IO space, and memory space accesses */ pci_config_put16(pci, PCI_CONF_COMM, pci_config_get16(pci, PCI_CONF_COMM) | PCI_COMM_ME | PCI_COMM_MAE); pci_config_teardown(&pci); if (ddi_regs_map_setup(dip, 1, (caddr_t *)&pcnp->pcn_regs, 0, 0, &pcn_devattr, &pcnp->pcn_regshandle)) { pcn_error(dip, "ddi_regs_map_setup failed"); goto fail; } if (pcn_set_chipid(pcnp, (uint32_t)ssid << 16 | (uint32_t)svid) != DDI_SUCCESS) { goto fail; } if ((pcnp->pcn_mii = mii_alloc(pcnp, dip, &pcn_mii_ops)) == NULL) goto fail; /* XXX: need to set based on device */ mii_set_pauseable(pcnp->pcn_mii, B_FALSE, B_FALSE); if ((pcn_allocrxring(pcnp) != DDI_SUCCESS) || (pcn_alloctxring(pcnp) != DDI_SUCCESS)) { pcn_error(dip, "unable to allocate DMA resources"); goto fail; } pcnp->pcn_promisc = B_FALSE; mutex_enter(&pcnp->pcn_intrlock); mutex_enter(&pcnp->pcn_xmtlock); rc = pcn_initialize(pcnp, B_TRUE); mutex_exit(&pcnp->pcn_xmtlock); mutex_exit(&pcnp->pcn_intrlock); if (rc != DDI_SUCCESS) goto fail; if (ddi_add_intr(dip, 0, NULL, NULL, pcn_intr, (caddr_t)pcnp) != DDI_SUCCESS) { pcn_error(dip, "unable to add interrupt"); goto fail; } pcnp->pcn_flags |= PCN_INTR_ENABLED; if ((macp = mac_alloc(MAC_VERSION)) == NULL) { pcn_error(pcnp->pcn_dip, "mac_alloc failed"); goto fail; } macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; macp->m_driver = pcnp; macp->m_dip = dip; macp->m_src_addr = pcnp->pcn_addr; macp->m_callbacks = &pcn_m_callbacks; macp->m_min_sdu = 0; macp->m_max_sdu = ETHERMTU; macp->m_margin = VLAN_TAGSZ; if (mac_register(macp, &pcnp->pcn_mh) == DDI_SUCCESS) { mac_free(macp); return (DDI_SUCCESS); } mac_free(macp); return (DDI_SUCCESS); fail: pcn_teardown(pcnp); return (DDI_FAILURE); }
void pci_dump(void *arg) { igb_t *igb = (igb_t *)arg; ddi_acc_handle_t handle; uint8_t cap_ptr; uint8_t next_ptr; uint32_t msix_bar; uint32_t msix_ctrl; uint32_t msix_tbl_sz; uint32_t tbl_offset; uint32_t tbl_bir; uint32_t pba_offset; uint32_t pba_bir; off_t offset; off_t mem_size; uintptr_t base; ddi_acc_handle_t acc_hdl; int i; handle = igb->osdep.cfg_handle; igb_log(igb, "Begin dump PCI config space"); igb_log(igb, "PCI_CONF_VENID:\t0x%x\n", pci_config_get16(handle, PCI_CONF_VENID)); igb_log(igb, "PCI_CONF_DEVID:\t0x%x\n", pci_config_get16(handle, PCI_CONF_DEVID)); igb_log(igb, "PCI_CONF_COMMAND:\t0x%x\n", pci_config_get16(handle, PCI_CONF_COMM)); igb_log(igb, "PCI_CONF_STATUS:\t0x%x\n", pci_config_get16(handle, PCI_CONF_STAT)); igb_log(igb, "PCI_CONF_REVID:\t0x%x\n", pci_config_get8(handle, PCI_CONF_REVID)); igb_log(igb, "PCI_CONF_PROG_CLASS:\t0x%x\n", pci_config_get8(handle, PCI_CONF_PROGCLASS)); igb_log(igb, "PCI_CONF_SUB_CLASS:\t0x%x\n", pci_config_get8(handle, PCI_CONF_SUBCLASS)); igb_log(igb, "PCI_CONF_BAS_CLASS:\t0x%x\n", pci_config_get8(handle, PCI_CONF_BASCLASS)); igb_log(igb, "PCI_CONF_CACHE_LINESZ:\t0x%x\n", pci_config_get8(handle, PCI_CONF_CACHE_LINESZ)); igb_log(igb, "PCI_CONF_LATENCY_TIMER:\t0x%x\n", pci_config_get8(handle, PCI_CONF_LATENCY_TIMER)); igb_log(igb, "PCI_CONF_HEADER_TYPE:\t0x%x\n", pci_config_get8(handle, PCI_CONF_HEADER)); igb_log(igb, "PCI_CONF_BIST:\t0x%x\n", pci_config_get8(handle, PCI_CONF_BIST)); igb_log(igb, "PCI_CONF_BASE0:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE0)); igb_log(igb, "PCI_CONF_BASE1:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE1)); igb_log(igb, "PCI_CONF_BASE2:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE2)); /* MSI-X BAR */ msix_bar = pci_config_get32(handle, PCI_CONF_BASE3); igb_log(igb, "PCI_CONF_BASE3:\t0x%x\n", msix_bar); igb_log(igb, "PCI_CONF_BASE4:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE4)); igb_log(igb, "PCI_CONF_BASE5:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE5)); igb_log(igb, "PCI_CONF_CIS:\t0x%x\n", pci_config_get32(handle, PCI_CONF_CIS)); igb_log(igb, "PCI_CONF_SUBVENID:\t0x%x\n", pci_config_get16(handle, PCI_CONF_SUBVENID)); igb_log(igb, "PCI_CONF_SUBSYSID:\t0x%x\n", pci_config_get16(handle, PCI_CONF_SUBSYSID)); igb_log(igb, "PCI_CONF_ROM:\t0x%x\n", pci_config_get32(handle, PCI_CONF_ROM)); cap_ptr = pci_config_get8(handle, PCI_CONF_CAP_PTR); igb_log(igb, "PCI_CONF_CAP_PTR:\t0x%x\n", cap_ptr); igb_log(igb, "PCI_CONF_ILINE:\t0x%x\n", pci_config_get8(handle, PCI_CONF_ILINE)); igb_log(igb, "PCI_CONF_IPIN:\t0x%x\n", pci_config_get8(handle, PCI_CONF_IPIN)); igb_log(igb, "PCI_CONF_MIN_G:\t0x%x\n", pci_config_get8(handle, PCI_CONF_MIN_G)); igb_log(igb, "PCI_CONF_MAX_L:\t0x%x\n", pci_config_get8(handle, PCI_CONF_MAX_L)); /* Power Management */ offset = cap_ptr; igb_log(igb, "PCI_PM_CAP_ID:\t0x%x\n", pci_config_get8(handle, offset)); next_ptr = pci_config_get8(handle, offset + 1); igb_log(igb, "PCI_PM_NEXT_PTR:\t0x%x\n", next_ptr); igb_log(igb, "PCI_PM_CAP:\t0x%x\n", pci_config_get16(handle, offset + PCI_PMCAP)); igb_log(igb, "PCI_PM_CSR:\t0x%x\n", pci_config_get16(handle, offset + PCI_PMCSR)); igb_log(igb, "PCI_PM_CSR_BSE:\t0x%x\n", pci_config_get8(handle, offset + PCI_PMCSR_BSE)); igb_log(igb, "PCI_PM_DATA:\t0x%x\n", pci_config_get8(handle, offset + PCI_PMDATA)); /* MSI Configuration */ offset = next_ptr; igb_log(igb, "PCI_MSI_CAP_ID:\t0x%x\n", pci_config_get8(handle, offset)); next_ptr = pci_config_get8(handle, offset + 1); igb_log(igb, "PCI_MSI_NEXT_PTR:\t0x%x\n", next_ptr); igb_log(igb, "PCI_MSI_CTRL:\t0x%x\n", pci_config_get16(handle, offset + PCI_MSI_CTRL)); igb_log(igb, "PCI_MSI_ADDR:\t0x%x\n", pci_config_get32(handle, offset + PCI_MSI_ADDR_OFFSET)); igb_log(igb, "PCI_MSI_ADDR_HI:\t0x%x\n", pci_config_get32(handle, offset + 0x8)); igb_log(igb, "PCI_MSI_DATA:\t0x%x\n", pci_config_get16(handle, offset + 0xC)); /* MSI-X Configuration */ offset = next_ptr; igb_log(igb, "PCI_MSIX_CAP_ID:\t0x%x\n", pci_config_get8(handle, offset)); next_ptr = pci_config_get8(handle, offset + 1); igb_log(igb, "PCI_MSIX_NEXT_PTR:\t0x%x\n", next_ptr); msix_ctrl = pci_config_get16(handle, offset + PCI_MSIX_CTRL); msix_tbl_sz = msix_ctrl & 0x7ff; igb_log(igb, "PCI_MSIX_CTRL:\t0x%x\n", msix_ctrl); tbl_offset = pci_config_get32(handle, offset + PCI_MSIX_TBL_OFFSET); tbl_bir = tbl_offset & PCI_MSIX_TBL_BIR_MASK; tbl_offset = tbl_offset & ~PCI_MSIX_TBL_BIR_MASK; igb_log(igb, "PCI_MSIX_TBL_OFFSET:\t0x%x\n", tbl_offset); igb_log(igb, "PCI_MSIX_TBL_BIR:\t0x%x\n", tbl_bir); pba_offset = pci_config_get32(handle, offset + PCI_MSIX_PBA_OFFSET); pba_bir = pba_offset & PCI_MSIX_PBA_BIR_MASK; pba_offset = pba_offset & ~PCI_MSIX_PBA_BIR_MASK; igb_log(igb, "PCI_MSIX_PBA_OFFSET:\t0x%x\n", pba_offset); igb_log(igb, "PCI_MSIX_PBA_BIR:\t0x%x\n", pba_bir); /* PCI Express Configuration */ offset = next_ptr; igb_log(igb, "PCIE_CAP_ID:\t0x%x\n", pci_config_get8(handle, offset + PCIE_CAP_ID)); next_ptr = pci_config_get8(handle, offset + PCIE_CAP_NEXT_PTR); igb_log(igb, "PCIE_CAP_NEXT_PTR:\t0x%x\n", next_ptr); igb_log(igb, "PCIE_PCIECAP:\t0x%x\n", pci_config_get16(handle, offset + PCIE_PCIECAP)); igb_log(igb, "PCIE_DEVCAP:\t0x%x\n", pci_config_get32(handle, offset + PCIE_DEVCAP)); igb_log(igb, "PCIE_DEVCTL:\t0x%x\n", pci_config_get16(handle, offset + PCIE_DEVCTL)); igb_log(igb, "PCIE_DEVSTS:\t0x%x\n", pci_config_get16(handle, offset + PCIE_DEVSTS)); igb_log(igb, "PCIE_LINKCAP:\t0x%x\n", pci_config_get32(handle, offset + PCIE_LINKCAP)); igb_log(igb, "PCIE_LINKCTL:\t0x%x\n", pci_config_get16(handle, offset + PCIE_LINKCTL)); igb_log(igb, "PCIE_LINKSTS:\t0x%x\n", pci_config_get16(handle, offset + PCIE_LINKSTS)); /* MSI-X Memory Space */ if (ddi_dev_regsize(igb->dip, IGB_ADAPTER_MSIXTAB, &mem_size) != DDI_SUCCESS) { igb_log(igb, "ddi_dev_regsize() failed"); return; } if ((ddi_regs_map_setup(igb->dip, IGB_ADAPTER_MSIXTAB, (caddr_t *)&base, 0, mem_size, &igb_regs_acc_attr, &acc_hdl)) != DDI_SUCCESS) { igb_log(igb, "ddi_regs_map_setup() failed"); return; } igb_log(igb, "MSI-X Memory Space: (mem_size = %d, base = %x)", mem_size, base); for (i = 0; i <= msix_tbl_sz; i++) { igb_log(igb, "MSI-X Table Entry(%d):", i); igb_log(igb, "lo_addr:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + tbl_offset + (i * 16)))); igb_log(igb, "up_addr:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + tbl_offset + (i * 16) + 4))); igb_log(igb, "msg_data:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + tbl_offset + (i * 16) + 8))); igb_log(igb, "vct_ctrl:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + tbl_offset + (i * 16) + 12))); } igb_log(igb, "MSI-X Pending Bits:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + pba_offset))); ddi_regs_map_free(&acc_hdl); }
static int pci_initchild(dev_info_t *child) { char name[80]; ddi_acc_handle_t config_handle; ushort_t command_preserve, command; if (pci_common_name_child(child, name, 80) != DDI_SUCCESS) { return (DDI_FAILURE); } ddi_set_name_addr(child, name); /* * Pseudo nodes indicate a prototype node with per-instance * properties to be merged into the real h/w device node. * The interpretation of the unit-address is DD[,F] * where DD is the device id and F is the function. */ if (ndi_dev_is_persistent_node(child) == 0) { extern int pci_allow_pseudo_children; ddi_set_parent_data(child, NULL); /* * Try to merge the properties from this prototype * node into real h/w nodes. */ if (ndi_merge_node(child, pci_common_name_child) == DDI_SUCCESS) { /* * Merged ok - return failure to remove the node. */ ddi_set_name_addr(child, NULL); return (DDI_FAILURE); } /* workaround for ddivs to run under PCI */ if (pci_allow_pseudo_children) { /* * If the "interrupts" property doesn't exist, * this must be the ddivs no-intr case, and it returns * DDI_SUCCESS instead of DDI_FAILURE. */ if (ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "interrupts", -1) == -1) return (DDI_SUCCESS); /* * Create the ddi_parent_private_data for a pseudo * child. */ pci_common_set_parent_private_data(child); return (DDI_SUCCESS); } /* * The child was not merged into a h/w node, * but there's not much we can do with it other * than return failure to cause the node to be removed. */ cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged", ddi_get_name(child), ddi_get_name_addr(child), ddi_get_name(child)); ddi_set_name_addr(child, NULL); return (DDI_NOT_WELL_FORMED); } if (ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "interrupts", -1) != -1) pci_common_set_parent_private_data(child); else ddi_set_parent_data(child, NULL); /* * initialize command register */ if (pci_config_setup(child, &config_handle) != DDI_SUCCESS) return (DDI_FAILURE); /* * Support for the "command-preserve" property. */ command_preserve = ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "command-preserve", 0); command = pci_config_get16(config_handle, PCI_CONF_COMM); command &= (command_preserve | PCI_COMM_BACK2BACK_ENAB); command |= (pci_command_default & ~command_preserve); pci_config_put16(config_handle, PCI_CONF_COMM, command); pci_config_teardown(&config_handle); return (DDI_SUCCESS); }
/* * ehci_attach: * * Description: Attach entry point is called by the Kernel. * Allocates resources for each EHCI host controller instance. * Initializes the EHCI Host Controller. * * Return : DDI_SUCCESS / DDI_FAILURE. */ static int ehci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; ehci_state_t *ehcip = NULL; usba_hcdi_register_args_t hcdi_args; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: ehcip = ehci_obtain_state(dip); return (ehci_cpr_resume(ehcip)); default: return (DDI_FAILURE); } /* Get the instance and create soft state */ instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(ehci_statep, instance) != 0) { return (DDI_FAILURE); } ehcip = ddi_get_soft_state(ehci_statep, instance); if (ehcip == NULL) { return (DDI_FAILURE); } ehcip->ehci_flags = EHCI_ATTACH; ehcip->ehci_log_hdl = usb_alloc_log_hdl(dip, "ehci", &ehci_errlevel, &ehci_errmask, &ehci_instance_debug, 0); ehcip->ehci_flags |= EHCI_ZALLOC; /* Set host controller soft state to initialization */ ehcip->ehci_hc_soft_state = EHCI_CTLR_INIT_STATE; USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehcip = 0x%p", (void *)ehcip); /* Initialize the DMA attributes */ ehci_set_dma_attributes(ehcip); /* Save the dip and instance */ ehcip->ehci_dip = dip; ehcip->ehci_instance = instance; /* Initialize the DMA attributes */ ehci_create_stats(ehcip); /* Create the qtd and qh pools */ if (ehci_allocate_pools(ehcip) != DDI_SUCCESS) { (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } /* Initialize the isochronous resources */ if (ehci_isoc_init(ehcip) != DDI_SUCCESS) { (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } /* Map the registers */ if (ehci_map_regs(ehcip) != DDI_SUCCESS) { (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } /* Get the ehci chip vendor and device id */ ehcip->ehci_vendor_id = pci_config_get16( ehcip->ehci_config_handle, PCI_CONF_VENID); ehcip->ehci_device_id = pci_config_get16( ehcip->ehci_config_handle, PCI_CONF_DEVID); ehcip->ehci_rev_id = pci_config_get8( ehcip->ehci_config_handle, PCI_CONF_REVID); /* Register interrupts */ if (ehci_register_intrs_and_init_mutex(ehcip) != DDI_SUCCESS) { (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } mutex_enter(&ehcip->ehci_int_mutex); /* Initialize the controller */ if (ehci_init_ctlr(ehcip, EHCI_NORMAL_INITIALIZATION) != DDI_SUCCESS) { mutex_exit(&ehcip->ehci_int_mutex); (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } /* * At this point, the hardware will be okay. * Initialize the usba_hcdi structure */ ehcip->ehci_hcdi_ops = ehci_alloc_hcdi_ops(ehcip); mutex_exit(&ehcip->ehci_int_mutex); /* * Make this HCD instance known to USBA * (dma_attr must be passed for USBA busctl's) */ hcdi_args.usba_hcdi_register_version = HCDI_REGISTER_VERSION; hcdi_args.usba_hcdi_register_dip = dip; hcdi_args.usba_hcdi_register_ops = ehcip->ehci_hcdi_ops; hcdi_args.usba_hcdi_register_dma_attr = &ehcip->ehci_dma_attr; /* * Priority and iblock_cookie are one and the same * (However, retaining hcdi_soft_iblock_cookie for now * assigning it w/ priority. In future all iblock_cookie * could just go) */ hcdi_args.usba_hcdi_register_iblock_cookie = (ddi_iblock_cookie_t)(uintptr_t)ehcip->ehci_intr_pri; if (usba_hcdi_register(&hcdi_args, 0) != DDI_SUCCESS) { (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } ehcip->ehci_flags |= EHCI_USBAREG; mutex_enter(&ehcip->ehci_int_mutex); if ((ehci_init_root_hub(ehcip)) != USB_SUCCESS) { mutex_exit(&ehcip->ehci_int_mutex); (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } mutex_exit(&ehcip->ehci_int_mutex); /* Finally load the root hub driver */ if (ehci_load_root_hub_driver(ehcip) != USB_SUCCESS) { (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } ehcip->ehci_flags |= EHCI_RHREG; /* Display information in the banner */ ddi_report_dev(dip); mutex_enter(&ehcip->ehci_int_mutex); /* Reset the ehci initialization flag */ ehcip->ehci_flags &= ~EHCI_ATTACH; /* Print the Host Control's Operational registers */ ehci_print_caps(ehcip); ehci_print_regs(ehcip); (void) pci_report_pmcap(dip, PCI_PM_IDLESPEED, (void *)4000); mutex_exit(&ehcip->ehci_int_mutex); USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_attach: dip = 0x%p done", (void *)dip); return (DDI_SUCCESS); }
uint16_t ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg) { return (pci_config_get16(OS_DEP(hw)->cfg_handle, reg)); }
static int ppb_initchild(dev_info_t *child) { char name[MAXNAMELEN]; ddi_acc_handle_t config_handle; ushort_t command_preserve, command; uint_t n; ushort_t bcr; uchar_t header_type; uchar_t min_gnt, latency_timer; ppb_devstate_t *ppb; /* * Name the child */ if (ppb_name_child(child, name, MAXNAMELEN) != DDI_SUCCESS) return (DDI_FAILURE); ddi_set_name_addr(child, name); ddi_set_parent_data(child, NULL); /* * Pseudo nodes indicate a prototype node with per-instance * properties to be merged into the real h/w device node. * The interpretation of the unit-address is DD[,F] * where DD is the device id and F is the function. */ if (ndi_dev_is_persistent_node(child) == 0) { extern int pci_allow_pseudo_children; /* * Try to merge the properties from this prototype * node into real h/w nodes. */ if (ndi_merge_node(child, ppb_name_child) == DDI_SUCCESS) { /* * Merged ok - return failure to remove the node. */ ppb_removechild(child); return (DDI_FAILURE); } /* workaround for ddivs to run under PCI */ if (pci_allow_pseudo_children) return (DDI_SUCCESS); /* * The child was not merged into a h/w node, * but there's not much we can do with it other * than return failure to cause the node to be removed. */ cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged", ddi_driver_name(child), ddi_get_name_addr(child), ddi_driver_name(child)); ppb_removechild(child); return (DDI_NOT_WELL_FORMED); } ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, ddi_get_instance(ddi_get_parent(child))); ddi_set_parent_data(child, NULL); /* * If hardware is PM capable, set up the power info structure. * This also ensures the the bus will not be off (0MHz) otherwise * system panics during a bus access. */ if (PM_CAPABLE(ppb->ppb_pwr_p)) { /* * Create a pwr_info struct for child. Bus will be * at full speed after creating info. */ pci_pwr_create_info(ppb->ppb_pwr_p, child); #ifdef DEBUG ASSERT(ppb->ppb_pwr_p->current_lvl == PM_LEVEL_B0); #endif } /* * If configuration registers were previously saved by * child (before it entered D3), then let the child do the * restore to set up the config regs as it'll first need to * power the device out of D3. */ if (ddi_prop_exists(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "config-regs-saved-by-child") == 1) { DEBUG2(DBG_PWR, ddi_get_parent(child), "INITCHILD: config regs to be restored by child" " for %s@%s\n", ddi_node_name(child), ddi_get_name_addr(child)); return (DDI_SUCCESS); } DEBUG2(DBG_PWR, ddi_get_parent(child), "INITCHILD: config regs setup for %s@%s\n", ddi_node_name(child), ddi_get_name_addr(child)); if (pci_config_setup(child, &config_handle) != DDI_SUCCESS) { if (PM_CAPABLE(ppb->ppb_pwr_p)) { pci_pwr_rm_info(ppb->ppb_pwr_p, child); } return (DDI_FAILURE); } /* * Determine the configuration header type. */ header_type = pci_config_get8(config_handle, PCI_CONF_HEADER); /* * Support for the "command-preserve" property. */ command_preserve = ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "command-preserve", 0); command = pci_config_get16(config_handle, PCI_CONF_COMM); command &= (command_preserve | PCI_COMM_BACK2BACK_ENAB); command |= (ppb_command_default & ~command_preserve); pci_config_put16(config_handle, PCI_CONF_COMM, command); /* * If the device has a bus control register then program it * based on the settings in the command register. */ if ((header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) { bcr = pci_config_get8(config_handle, PCI_BCNF_BCNTRL); if (ppb_command_default & PCI_COMM_PARITY_DETECT) bcr |= PCI_BCNF_BCNTRL_PARITY_ENABLE; if (ppb_command_default & PCI_COMM_SERR_ENABLE) bcr |= PCI_BCNF_BCNTRL_SERR_ENABLE; bcr |= PCI_BCNF_BCNTRL_MAST_AB_MODE; pci_config_put8(config_handle, PCI_BCNF_BCNTRL, bcr); } /* * Initialize cache-line-size configuration register if needed. */ if (ppb_set_cache_line_size_register && ddi_getprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "cache-line-size", 0) == 0) { pci_config_put8(config_handle, PCI_CONF_CACHE_LINESZ, ppb->ppb_cache_line_size); n = pci_config_get8(config_handle, PCI_CONF_CACHE_LINESZ); if (n != 0) { (void) ndi_prop_update_int(DDI_DEV_T_NONE, child, "cache-line-size", n); } } /* * Initialize latency timer configuration registers if needed. */ if (ppb_set_latency_timer_register && ddi_getprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "latency-timer", 0) == 0) { if ((header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) { latency_timer = ppb->ppb_latency_timer; pci_config_put8(config_handle, PCI_BCNF_LATENCY_TIMER, ppb->ppb_latency_timer); } else { min_gnt = pci_config_get8(config_handle, PCI_CONF_MIN_G); latency_timer = min_gnt * 8; } pci_config_put8(config_handle, PCI_CONF_LATENCY_TIMER, latency_timer); n = pci_config_get8(config_handle, PCI_CONF_LATENCY_TIMER); if (n != 0) { (void) ndi_prop_update_int(DDI_DEV_T_NONE, child, "latency-timer", n); } } /* * SPARC PCIe FMA specific * * Note: parent_data for parent is created only if this is sparc PCI-E * platform, for which, SG take a different route to handle device * errors. */ if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) { if (pcie_init_cfghdl(child) != DDI_SUCCESS) { pci_config_teardown(&config_handle); return (DDI_FAILURE); } pcie_init_dom(child); } /* * Check to see if the XMITS/PCI-X workaround applies. */ n = ddi_getprop(DDI_DEV_T_ANY, child, DDI_PROP_NOTPROM, "pcix-update-cmd-reg", -1); if (n != -1) { extern void pcix_set_cmd_reg(dev_info_t *child, uint16_t value); DEBUG1(DBG_INIT_CLD, child, "Turning on XMITS NCPQ " "Workaround: value = %x\n", n); pcix_set_cmd_reg(child, n); } pci_config_teardown(&config_handle); return (DDI_SUCCESS); }
/* * Autoconfiguration entry points. */ int efe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { ddi_acc_handle_t pci; int types; int count; int actual; uint_t pri; efe_t *efep; mac_register_t *macp; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: efep = ddi_get_driver_private(dip); return (efe_resume(efep)); default: return (DDI_FAILURE); } /* * PCI configuration. */ if (pci_config_setup(dip, &pci) != DDI_SUCCESS) { efe_error(dip, "unable to setup PCI configuration!"); return (DDI_FAILURE); } pci_config_put16(pci, PCI_CONF_COMM, pci_config_get16(pci, PCI_CONF_COMM) | PCI_COMM_MAE | PCI_COMM_ME); pci_config_teardown(&pci); if (ddi_intr_get_supported_types(dip, &types) != DDI_SUCCESS || !(types & DDI_INTR_TYPE_FIXED)) { efe_error(dip, "fixed interrupts not supported!"); return (DDI_FAILURE); } if (ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &count) != DDI_SUCCESS || count != 1) { efe_error(dip, "no fixed interrupts available!"); return (DDI_FAILURE); } /* * Initialize soft state. */ efep = kmem_zalloc(sizeof (efe_t), KM_SLEEP); ddi_set_driver_private(dip, efep); efep->efe_dip = dip; if (ddi_regs_map_setup(dip, 1, (caddr_t *)&efep->efe_regs, 0, 0, &efe_regs_acc_attr, &efep->efe_regs_acch) != DDI_SUCCESS) { efe_error(dip, "unable to setup register mapping!"); goto failure; } efep->efe_rx_ring = efe_ring_alloc(efep->efe_dip, RXDESCL); if (efep->efe_rx_ring == NULL) { efe_error(efep->efe_dip, "unable to allocate rx ring!"); goto failure; } efep->efe_tx_ring = efe_ring_alloc(efep->efe_dip, TXDESCL); if (efep->efe_tx_ring == NULL) { efe_error(efep->efe_dip, "unable to allocate tx ring!"); goto failure; } if (ddi_intr_alloc(dip, &efep->efe_intrh, DDI_INTR_TYPE_FIXED, 0, count, &actual, DDI_INTR_ALLOC_STRICT) != DDI_SUCCESS || actual != count) { efe_error(dip, "unable to allocate fixed interrupt!"); goto failure; } if (ddi_intr_get_pri(efep->efe_intrh, &pri) != DDI_SUCCESS || pri >= ddi_intr_get_hilevel_pri()) { efe_error(dip, "unable to get valid interrupt priority!"); goto failure; } mutex_init(&efep->efe_intrlock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); mutex_init(&efep->efe_txlock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); /* * Initialize device. */ mutex_enter(&efep->efe_intrlock); mutex_enter(&efep->efe_txlock); efe_reset(efep); mutex_exit(&efep->efe_txlock); mutex_exit(&efep->efe_intrlock); /* Use factory address as default */ efe_getaddr(efep, efep->efe_macaddr); /* * Enable the ISR. */ if (ddi_intr_add_handler(efep->efe_intrh, efe_intr, efep, NULL) != DDI_SUCCESS) { efe_error(dip, "unable to add interrupt handler!"); goto failure; } if (ddi_intr_enable(efep->efe_intrh) != DDI_SUCCESS) { efe_error(dip, "unable to enable interrupt!"); goto failure; } /* * Allocate MII resources. */ if ((efep->efe_miih = mii_alloc(efep, dip, &efe_mii_ops)) == NULL) { efe_error(dip, "unable to allocate mii resources!"); goto failure; } /* * Allocate MAC resources. */ if ((macp = mac_alloc(MAC_VERSION)) == NULL) { efe_error(dip, "unable to allocate mac resources!"); goto failure; } macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; macp->m_driver = efep; macp->m_dip = dip; macp->m_src_addr = efep->efe_macaddr; macp->m_callbacks = &efe_m_callbacks; macp->m_min_sdu = 0; macp->m_max_sdu = ETHERMTU; macp->m_margin = VLAN_TAGSZ; if (mac_register(macp, &efep->efe_mh) != 0) { efe_error(dip, "unable to register with mac!"); goto failure; } mac_free(macp); ddi_report_dev(dip); return (DDI_SUCCESS); failure: if (macp != NULL) { mac_free(macp); } if (efep->efe_miih != NULL) { mii_free(efep->efe_miih); } if (efep->efe_intrh != NULL) { (void) ddi_intr_disable(efep->efe_intrh); (void) ddi_intr_remove_handler(efep->efe_intrh); (void) ddi_intr_free(efep->efe_intrh); } mutex_destroy(&efep->efe_txlock); mutex_destroy(&efep->efe_intrlock); if (efep->efe_tx_ring != NULL) { efe_ring_free(&efep->efe_tx_ring); } if (efep->efe_rx_ring != NULL) { efe_ring_free(&efep->efe_rx_ring); } if (efep->efe_regs_acch != NULL) { ddi_regs_map_free(&efep->efe_regs_acch); } kmem_free(efep, sizeof (efe_t)); return (DDI_FAILURE); }
int UM_PCI_Services(Adapter_Struc *pAd, union REGS *pregs) { int func = (int)pregs->h.al; unsigned long regnum; /* register number */ unsigned short vendid; unsigned short devid; unsigned long compval; switch (func) { case PCI_BIOS_PRESENT: /* return PCI present with rev 2.1 */ pregs->h.ah = 0; pregs->h.al = 0; pregs->h.bh = 2; pregs->h.bl = 1; pregs->h.cl = 1; pregs->e.edx = 0x20494350; pregs->x.cflag = 0; break; case FIND_PCI_DEVICE: vendid = pregs->x.dx; devid = pregs->x.cx; compval = (((ulong_t)devid) << 16) | ((ulong_t)vendid); if (vendid == 0xffff) { /* bad vendor id */ pregs->x.cflag = 1; pregs->h.ah = PCI_BAD_VENDOR_ID; } else { if (pci_config_get32( (ddi_acc_handle_t)pAd->pcihandle, 0) == compval) { pregs->h.bh = 0; /* put 0 to fake it */ pregs->h.bl = 0; /* put 0 to fake it */ pregs->h.ah = PCI_SUCCESSFUL; pregs->x.cflag = 0; } else { pregs->h.ah = PCI_DEVICE_NOT_FOUND; pregs->x.cflag = 1; } } break; case PCI_READ_CONFIG_BYTE: regnum = (unsigned long) pregs->h.di; pregs->h.cl = pci_config_get8( (ddi_acc_handle_t)pAd->pcihandle, regnum); pregs->x.cflag = 0; pregs->h.ah = PCI_SUCCESSFUL; break; case PCI_READ_CONFIG_WORD: regnum = (unsigned long)pregs->h.di; if (regnum & 0x1) { pregs->x.cflag = 1; pregs->h.ah = PCI_BAD_REGISTER_NUMBER; } else { pregs->x.cx = pci_config_get16( (ddi_acc_handle_t)pAd->pcihandle, regnum); pregs->x.cflag = 0; pregs->h.ah = PCI_SUCCESSFUL; } break; case PCI_READ_CONFIG_DWORD: regnum = (unsigned long)pregs->h.di; if (regnum & 0x3) { pregs->x.cflag = 1; pregs->h.ah = PCI_BAD_REGISTER_NUMBER; } else { pregs->e.ecx = pci_config_get32( (ddi_acc_handle_t)pAd->pcihandle, regnum); pregs->x.cflag = 0; pregs->h.ah = PCI_SUCCESSFUL; } break; case PCI_WRITE_CONFIG_BYTE: regnum = (unsigned long) pregs->h.di; pci_config_put8((ddi_acc_handle_t)pAd->pcihandle, regnum, pregs->h.cl); pregs->x.cflag = 0; pregs->h.ah = PCI_SUCCESSFUL; break; case PCI_WRITE_CONFIG_WORD: regnum = (unsigned long)pregs->h.di; if (regnum & 0x1) { pregs->x.cflag = 1; pregs->h.ah = PCI_BAD_REGISTER_NUMBER; } else { pci_config_put16( (ddi_acc_handle_t)pAd->pcihandle, regnum, pregs->x.cx); pregs->x.cflag = 0; pregs->h.ah = PCI_SUCCESSFUL; } break; case PCI_WRITE_CONFIG_DWORD: regnum = (unsigned long)pregs->h.di; if (regnum & 0x1) { pregs->x.cflag = 1; pregs->h.ah = PCI_BAD_REGISTER_NUMBER; } else { pci_config_put32( (ddi_acc_handle_t)pAd->pcihandle, regnum, pregs->e.ecx); pregs->x.cflag = 0; pregs->h.ah = PCI_SUCCESSFUL; } break; default: pregs->x.cflag = 1; /* set error */ pregs->h.ah = PCI_FUNC_NOT_SUPPORTED; break; } return (0); }
void t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val) { *val = pci_config_get16(sc->pci_regh, reg); }
/* * audioixp_attach() * * Description: * Attach an instance of the audioixp driver. This routine does * the device dependent attach tasks. * * Arguments: * dev_info_t *dip Pointer to the device's dev_info struct * ddi_attach_cmd_t cmd Attach command * * Returns: * DDI_SUCCESS The driver was initialized properly * DDI_FAILURE The driver couldn't be initialized properly */ static int audioixp_attach(dev_info_t *dip) { uint16_t cmdeg; audioixp_state_t *statep; audio_dev_t *adev; uint32_t devid; const char *name; const char *rev; /* we don't support high level interrupts in the driver */ if (ddi_intr_hilevel(dip, 0) != 0) { cmn_err(CE_WARN, "!%s%d: unsupported high level interrupt", ddi_driver_name(dip), ddi_get_instance(dip)); return (DDI_FAILURE); } /* allocate the soft state structure */ statep = kmem_zalloc(sizeof (*statep), KM_SLEEP); statep->dip = dip; ddi_set_driver_private(dip, statep); if (ddi_get_iblock_cookie(dip, 0, &statep->iblock) != DDI_SUCCESS) { cmn_err(CE_WARN, "!%s%d: cannot get iblock cookie", ddi_driver_name(dip), ddi_get_instance(dip)); kmem_free(statep, sizeof (*statep)); return (DDI_FAILURE); } mutex_init(&statep->inst_lock, NULL, MUTEX_DRIVER, statep->iblock); /* allocate framework audio device */ if ((adev = audio_dev_alloc(dip, 0)) == NULL) { cmn_err(CE_WARN, "!%s%d: unable to allocate audio dev", ddi_driver_name(dip), ddi_get_instance(dip)); goto error; } statep->adev = adev; /* map in the registers */ if (audioixp_map_regs(statep) != DDI_SUCCESS) { audio_dev_warn(adev, "couldn't map registers"); goto error; } /* set device information -- this could be smarter */ devid = ((pci_config_get16(statep->pcih, PCI_CONF_VENID)) << 16) | pci_config_get16(statep->pcih, PCI_CONF_DEVID); name = "ATI AC'97"; switch (devid) { case IXP_PCI_ID_200: rev = "IXP150"; break; case IXP_PCI_ID_300: rev = "SB300"; break; case IXP_PCI_ID_400: if (pci_config_get8(statep->pcih, PCI_CONF_REVID) & 0x80) { rev = "SB450"; } else { rev = "SB400"; } break; case IXP_PCI_ID_SB600: rev = "SB600"; break; default: rev = "Unknown"; break; } audio_dev_set_description(adev, name); audio_dev_set_version(adev, rev); /* allocate port structures */ if ((audioixp_alloc_port(statep, IXP_PLAY) != DDI_SUCCESS) || (audioixp_alloc_port(statep, IXP_REC) != DDI_SUCCESS)) { goto error; } statep->ac97 = ac97_alloc(dip, audioixp_rd97, audioixp_wr97, statep); if (statep->ac97 == NULL) { audio_dev_warn(adev, "failed to allocate ac97 handle"); goto error; } /* set PCI command register */ cmdeg = pci_config_get16(statep->pcih, PCI_CONF_COMM); pci_config_put16(statep->pcih, PCI_CONF_COMM, cmdeg | PCI_COMM_IO | PCI_COMM_MAE); /* set up kernel statistics */ if ((statep->ksp = kstat_create(IXP_NAME, ddi_get_instance(dip), IXP_NAME, "controller", KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT)) != NULL) { kstat_install(statep->ksp); } if (audioixp_chip_init(statep) != DDI_SUCCESS) { audio_dev_warn(statep->adev, "failed to init chip"); goto error; } /* initialize the AC'97 part */ if (ac97_init(statep->ac97, adev) != DDI_SUCCESS) { audio_dev_warn(adev, "ac'97 initialization failed"); goto error; } /* set up the interrupt handler */ if (ddi_add_intr(dip, 0, &statep->iblock, NULL, audioixp_intr, (caddr_t)statep) != DDI_SUCCESS) { audio_dev_warn(adev, "bad interrupt specification"); } statep->intr_added = B_TRUE; if (audio_dev_register(adev) != DDI_SUCCESS) { audio_dev_warn(adev, "unable to register with framework"); goto error; } ddi_report_dev(dip); return (DDI_SUCCESS); error: audioixp_destroy(statep); return (DDI_FAILURE); }
static int ppb_initchild(dev_info_t *child) { struct ddi_parent_private_data *pdptr; ppb_devstate_t *ppb; char name[MAXNAMELEN]; ddi_acc_handle_t config_handle; ushort_t command_preserve, command; ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, ddi_get_instance(ddi_get_parent(child))); if (ppb_name_child(child, name, MAXNAMELEN) != DDI_SUCCESS) return (DDI_FAILURE); ddi_set_name_addr(child, name); /* * Pseudo nodes indicate a prototype node with per-instance * properties to be merged into the real h/w device node. * The interpretation of the unit-address is DD[,F] * where DD is the device id and F is the function. */ if (ndi_dev_is_persistent_node(child) == 0) { extern int pci_allow_pseudo_children; ddi_set_parent_data(child, NULL); /* * Try to merge the properties from this prototype * node into real h/w nodes. */ if (ndi_merge_node(child, ppb_name_child) == DDI_SUCCESS) { /* * Merged ok - return failure to remove the node. */ ddi_set_name_addr(child, NULL); return (DDI_FAILURE); } /* workaround for ddivs to run under PCI */ if (pci_allow_pseudo_children) return (DDI_SUCCESS); /* * The child was not merged into a h/w node, * but there's not much we can do with it other * than return failure to cause the node to be removed. */ cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged", ddi_driver_name(child), ddi_get_name_addr(child), ddi_driver_name(child)); ddi_set_name_addr(child, NULL); return (DDI_NOT_WELL_FORMED); } ddi_set_parent_data(child, NULL); /* * PCIe FMA specific * * Note: parent_data for parent is created only if this is PCI-E * platform, for which, SG take a different route to handle device * errors. */ if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) { if (pcie_init_cfghdl(child) != DDI_SUCCESS) return (DDI_FAILURE); pcie_init_dom(child); } /* transfer select properties from PROM to kernel */ if (ddi_getprop(DDI_DEV_T_NONE, child, DDI_PROP_DONTPASS, "interrupts", -1) != -1) { pdptr = kmem_zalloc((sizeof (struct ddi_parent_private_data) + sizeof (struct intrspec)), KM_SLEEP); pdptr->par_intr = (struct intrspec *)(pdptr + 1); pdptr->par_nintr = 1; ddi_set_parent_data(child, pdptr); } else ddi_set_parent_data(child, NULL); if (pci_config_setup(child, &config_handle) != DDI_SUCCESS) { pcie_fini_dom(child); return (DDI_FAILURE); } /* * Support for the "command-preserve" property. */ command_preserve = ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "command-preserve", 0); command = pci_config_get16(config_handle, PCI_CONF_COMM); command &= (command_preserve | PCI_COMM_BACK2BACK_ENAB); command |= (ppb_command_default & ~command_preserve); pci_config_put16(config_handle, PCI_CONF_COMM, command); pci_config_teardown(&config_handle); return (DDI_SUCCESS); }
/* * The VGA device could be under a subtractive PCI bridge on some systems. * Though the PCI_BCNF_BCNTRL_VGA_ENABLE bit is not set on such subtractive * PCI bridge, the subtractive PCI bridge can forward VGA access if no other * agent claims the access. * The vga_enable element in param acts as a flag, if not set, ignore the * checking for the PCI_BCNF_BCNTRL_VGA_ENABLE bit of the PCI bridge during * the search. */ static int find_fb_dev(dev_info_t *dip, void *param) { struct find_fb_dev_param *p = param; char *dev_type; dev_info_t *pdip; char *parent_type; if (dip == ddi_root_node()) return (DDI_WALK_CONTINUE); if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "device_type", &dev_type) != DDI_SUCCESS) return (DDI_WALK_PRUNECHILD); if ((strcmp(dev_type, "isa") == 0) || (strcmp(dev_type, "eisa") == 0)) { ddi_prop_free(dev_type); return (DDI_WALK_CONTINUE); } if ((strcmp(dev_type, "pci") == 0) || (strcmp(dev_type, "pciex") == 0)) { ddi_acc_handle_t pci_conf; uint16_t data16; char *nodename; ddi_prop_free(dev_type); if (!p->vga_enable) return (DDI_WALK_CONTINUE); nodename = ddi_node_name(dip); /* * If the node is not a PCI-to-PCI bridge, continue traversing * (it could be the root node), otherwise, check for the * VGAEnable bit to be set in the Bridge Control Register. */ if (strcmp(nodename, "pci") == 0) { if (is_pci_bridge(dip) == B_FALSE) return (DDI_WALK_CONTINUE); } if (i_ddi_attach_node_hierarchy(dip) != DDI_SUCCESS) return (DDI_WALK_PRUNECHILD); if (pci_config_setup(dip, &pci_conf) != DDI_SUCCESS) return (DDI_WALK_PRUNECHILD); data16 = pci_config_get16(pci_conf, PCI_BCNF_BCNTRL); pci_config_teardown(&pci_conf); if (data16 & PCI_BCNF_BCNTRL_VGA_ENABLE) return (DDI_WALK_CONTINUE); return (DDI_WALK_PRUNECHILD); } if (strcmp(dev_type, "display") != 0) { ddi_prop_free(dev_type); return (DDI_WALK_CONTINUE); } ddi_prop_free(dev_type); if ((pdip = ddi_get_parent(dip)) == NULL) return (DDI_WALK_PRUNECHILD); if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS, "device_type", &parent_type) != DDI_SUCCESS) return (DDI_WALK_PRUNECHILD); if ((strcmp(parent_type, "isa") == 0) || (strcmp(parent_type, "eisa") == 0)) { p->found_dip = dip; ddi_prop_free(parent_type); return (DDI_WALK_TERMINATE); } if ((strcmp(parent_type, "pci") == 0) || (strcmp(parent_type, "pciex") == 0)) { ddi_acc_handle_t pci_conf; uint16_t data16; ddi_prop_free(parent_type); if (i_ddi_attach_node_hierarchy(dip) != DDI_SUCCESS) return (DDI_WALK_PRUNECHILD); if (pci_config_setup(dip, &pci_conf) != DDI_SUCCESS) return (DDI_WALK_PRUNECHILD); data16 = pci_config_get16(pci_conf, PCI_CONF_COMM); pci_config_teardown(&pci_conf); if (!(data16 & PCI_COMM_IO)) return (DDI_WALK_PRUNECHILD); p->found_dip = dip; return (DDI_WALK_TERMINATE); } ddi_prop_free(parent_type); return (DDI_WALK_PRUNECHILD); }
static void ppb_create_ranges_prop(dev_info_t *dip, ddi_acc_handle_t config_handle) { uint32_t base, limit; ppb_ranges_t ranges[PPB_RANGE_LEN]; uint8_t io_base_lo, io_limit_lo; uint16_t io_base_hi, io_limit_hi, mem_base, mem_limit; int i = 0, rangelen = sizeof (ppb_ranges_t)/sizeof (int); io_base_lo = pci_config_get8(config_handle, PCI_BCNF_IO_BASE_LOW); io_limit_lo = pci_config_get8(config_handle, PCI_BCNF_IO_LIMIT_LOW); io_base_hi = pci_config_get16(config_handle, PCI_BCNF_IO_BASE_HI); io_limit_hi = pci_config_get16(config_handle, PCI_BCNF_IO_LIMIT_HI); mem_base = pci_config_get16(config_handle, PCI_BCNF_MEM_BASE); mem_limit = pci_config_get16(config_handle, PCI_BCNF_MEM_LIMIT); /* * Create ranges for IO space */ ranges[i].size_low = ranges[i].size_high = 0; ranges[i].parent_mid = ranges[i].child_mid = ranges[i].parent_high = 0; ranges[i].child_high = ranges[i].parent_high |= (PCI_REG_REL_M | PCI_ADDR_IO); base = PPB_16bit_IOADDR(io_base_lo); limit = PPB_16bit_IOADDR(io_limit_lo); /* * Check for 32-bit I/O support as per PCI-to-PCI Bridge Arch Spec */ if ((io_base_lo & 0xf) == PPB_32BIT_IO) { base = PPB_LADDR(base, io_base_hi); limit = PPB_LADDR(limit, io_limit_hi); } /* * Check if the bridge implements an I/O address range as per * PCI-to-PCI Bridge Arch Spec */ if ((io_base_lo != 0 || io_limit_lo != 0) && limit >= base) { ranges[i].parent_low = ranges[i].child_low = base; ranges[i].size_low = limit - base + PPB_IOGRAIN; i++; } /* * Create ranges for 32bit memory space */ base = PPB_32bit_MEMADDR(mem_base); limit = PPB_32bit_MEMADDR(mem_limit); ranges[i].size_low = ranges[i].size_high = 0; ranges[i].parent_mid = ranges[i].child_mid = ranges[i].parent_high = 0; ranges[i].child_high = ranges[i].parent_high |= (PCI_REG_REL_M | PCI_ADDR_MEM32); ranges[i].child_low = ranges[i].parent_low = base; if (limit >= base) { ranges[i].size_low = limit - base + PPB_MEMGRAIN; i++; } if (i) { (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "ranges", (int *)ranges, i * rangelen); } }