int pci_resource_setup_avail(dev_info_t *dip, pci_regspec_t *avail_p, int entries) { int i; if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE) return (NDI_FAILURE); if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE) return (NDI_FAILURE); if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) == NDI_FAILURE) return (NDI_FAILURE); /* for each entry in the PCI "available" property */ for (i = 0; i < entries; i++, avail_p++) { if (avail_p->pci_phys_hi == -1u) goto err; switch (PCI_REG_ADDR_G(avail_p->pci_phys_hi)) { case PCI_REG_ADDR_G(PCI_ADDR_MEM32): { (void) ndi_ra_free(dip, (uint64_t)avail_p->pci_phys_low, (uint64_t)avail_p->pci_size_low, (avail_p->pci_phys_hi & PCI_REG_PF_M) ? NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM, 0); } break; case PCI_REG_ADDR_G(PCI_ADDR_IO): (void) ndi_ra_free(dip, (uint64_t)avail_p->pci_phys_low, (uint64_t)avail_p->pci_size_low, NDI_RA_TYPE_IO, 0); break; default: goto err; } } #ifdef BUSRA_DEBUG if (busra_debug) { (void) ra_dump_all(NULL, dip); } #endif return (NDI_SUCCESS); err: cmn_err(CE_WARN, "pci_resource_setup_avail: bad entry[%d]=%x\n", i, avail_p->pci_phys_hi); return (NDI_FAILURE); }
void gp2_fc_ops_free_handle(fco_handle_t rp) { struct fc_resource *ip, *np; ASSERT(rp); if (rp->next_handle) fc_ops_free_handle(rp->next_handle); if (rp->unit_address) kmem_free(rp->unit_address, strlen(rp->unit_address) + 1); if (rp->my_args != NULL) kmem_free(rp->my_args, strlen(rp->my_args) + 1); /* * Release all the resources from the resource list */ for (ip = rp->head; ip != NULL; ip = np) { np = ip->next; switch (ip->type) { case RT_MAP: FC_DEBUG1(1, CE_CONT, "gp2_fc_ops_free: " " map handle - %p\n", ip->fc_map_handle); break; case RT_DMA: /* DMA has to be freed up at exit time */ cmn_err(CE_CONT, "gfc_fc_ops_free: DMA seen!\n"); break; case RT_CONTIGIOUS: FC_DEBUG2(1, CE_CONT, "gp2_fc_ops_free: " "Free claim-memory resource 0x%lx size 0x%x\n", ip->fc_contig_virt, ip->fc_contig_len); (void) ndi_ra_free(ddi_root_node(), (uint64_t)ip->fc_contig_virt, ip->fc_contig_len, "gptwo-contigousmem", NDI_RA_PASS); break; default: cmn_err(CE_CONT, "gp2_fc_ops_free: " "unknown resource type %d\n", ip->type); break; } fc_rem_resource(rp, ip); kmem_free(ip, sizeof (struct fc_resource)); } kmem_free(rp, sizeof (struct fc_resource_list)); }
/* * gfc_release_memory * * release-memory ( size vaddr -- ) */ static int gfc_release_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp) { int32_t vaddr, size; struct fc_resource *ip; if (fc_cell2int(cp->nargs) != 2) return (fc_syntax_error(cp, "nargs must be 2")); if (fc_cell2int(cp->nresults) != 0) return (fc_syntax_error(cp, "nresults must be 0")); vaddr = fc_cell2int(fc_arg(cp, 1)); size = fc_cell2int(fc_arg(cp, 0)); FC_DEBUG2(1, CE_CONT, "gfc_release_memory: vaddr=0x%x size=0x%x\n", vaddr, size); /* * Find if this request matches a mapping resource we set up. */ fc_lock_resource_list(rp); for (ip = rp->head; ip != NULL; ip = ip->next) { if (ip->type != RT_CONTIGIOUS) continue; if (ip->fc_contig_virt != (void *)(uintptr_t)vaddr) continue; if (ip->fc_contig_len == size) break; } fc_unlock_resource_list(rp); if (ip == NULL) return (fc_priv_error(cp, "request doesn't match a " "known mapping")); (void) ndi_ra_free(ddi_root_node(), vaddr, size, "gptwo-contigousmem", NDI_RA_PASS); /* * remove the resource from the list and release it. */ fc_rem_resource(rp, ip); kmem_free(ip, sizeof (struct fc_resource)); cp->nresults = fc_int2cell(0); return (fc_success_op(ap, rp, cp)); }
int _init(void) { int err = 0; /* * Create a resource map for the contigous memory allocated * at start-of-day in startup.c */ if (ndi_ra_map_setup(ddi_root_node(), "gptwo-contigousmem") == NDI_FAILURE) { GPTWO_DEBUG0(1, CE_WARN, "Can not setup resource map - gptwo-contigousmem\n"); return (1); } /* * Put the allocated memory into the pool. */ (void) ndi_ra_free(ddi_root_node(), (uint64_t)efcode_vaddr, (uint64_t)efcode_size, "gptwo-contigousmem", 0); /* register devices with the configurator */ gptwocfg_register_ops(SAFPTYPE_sPCI, gptwo_configure_pci, gptwo_unconfigure_pci); gptwocfg_register_ops(SAFPTYPE_cPCI, gptwo_configure_pci, gptwo_unconfigure_pci); gptwocfg_register_ops(SAFPTYPE_PCIX, gptwo_configure_pci, gptwo_unconfigure_pci); if ((err = mod_install(&modlinkage)) != 0) { GPTWO_DEBUG1(1, CE_WARN, "gptwo_pci (PCI Functions) " "failed to load, error=%d\n", err); gptwocfg_unregister_ops(SAFPTYPE_sPCI); gptwocfg_unregister_ops(SAFPTYPE_cPCI); gptwocfg_unregister_ops(SAFPTYPE_PCIX); } else { GPTWO_DEBUG0(1, CE_WARN, "gptwo_pci (PCI Functions) " "has been loaded.\n"); } return (err); }
/* * Setup resource map for the pci bus node based on the "available" * property and "bus-range" property. */ int pci_resource_setup(dev_info_t *dip) { pci_regspec_t *regs; int rlen, rcount, i; char bus_type[16] = "(unknown)"; int len; struct busnum_ctrl ctrl; int circular_count; int rval = NDI_SUCCESS; /* * If this is a pci bus node then look for "available" property * to find the available resources on this bus. */ len = sizeof (bus_type); if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type", (caddr_t)&bus_type, &len) != DDI_SUCCESS) return (NDI_FAILURE); /* it is not a pci/pci-ex bus type */ if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0)) return (NDI_FAILURE); /* * The pci-hotplug project addresses adding the call * to pci_resource_setup from pci nexus driver. * However that project would initially be only for x86, * so for sparc pcmcia-pci support we still need to call * pci_resource_setup in pcic driver. Once all pci nexus drivers * are updated to call pci_resource_setup this portion of the * code would really become an assert to make sure this * function is not called for the same dip twice. */ { if (ra_map_exist(dip, NDI_RA_TYPE_MEM) == NDI_SUCCESS) { return (NDI_FAILURE); } } /* * Create empty resource maps first. * * NOTE: If all the allocated resources are already assigned to * device(s) in the hot plug slot then "available" property may not * be present. But, subsequent hot plug operation may unconfigure * the device in the slot and try to free up it's resources. So, * at the minimum we should create empty maps here. */ if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE) { return (NDI_FAILURE); } if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE) { return (NDI_FAILURE); } if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_BUSNUM) == NDI_FAILURE) { return (NDI_FAILURE); } if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) == NDI_FAILURE) { return (NDI_FAILURE); } /* read the "available" property if it is available */ if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "available", (caddr_t)®s, &rlen) == DDI_SUCCESS) { /* * create the available resource list for both memory and * io space */ rcount = rlen / sizeof (pci_regspec_t); for (i = 0; i < rcount; i++) { switch (PCI_REG_ADDR_G(regs[i].pci_phys_hi)) { case PCI_REG_ADDR_G(PCI_ADDR_MEM32): (void) ndi_ra_free(dip, (uint64_t)regs[i].pci_phys_low, (uint64_t)regs[i].pci_size_low, (regs[i].pci_phys_hi & PCI_REG_PF_M) ? NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM, 0); break; case PCI_REG_ADDR_G(PCI_ADDR_MEM64): (void) ndi_ra_free(dip, ((uint64_t)(regs[i].pci_phys_mid) << 32) | ((uint64_t)(regs[i].pci_phys_low)), ((uint64_t)(regs[i].pci_size_hi) << 32) | ((uint64_t)(regs[i].pci_size_low)), (regs[i].pci_phys_hi & PCI_REG_PF_M) ? NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM, 0); break; case PCI_REG_ADDR_G(PCI_ADDR_IO): (void) ndi_ra_free(dip, (uint64_t)regs[i].pci_phys_low, (uint64_t)regs[i].pci_size_low, NDI_RA_TYPE_IO, 0); break; case PCI_REG_ADDR_G(PCI_ADDR_CONFIG): break; default: cmn_err(CE_WARN, "pci_resource_setup: bad addr type: %x\n", PCI_REG_ADDR_G(regs[i].pci_phys_hi)); break; } } kmem_free((caddr_t)regs, rlen); } /* * update resource map for available bus numbers if the node * has available-bus-range or bus-range property. */ len = sizeof (struct bus_range); if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "available-bus-range", (caddr_t)&pci_bus_range, &len) == DDI_SUCCESS) { /* * Add bus numbers in the range to the free list. */ (void) ndi_ra_free(dip, (uint64_t)pci_bus_range.lo, (uint64_t)pci_bus_range.hi - (uint64_t)pci_bus_range.lo + 1, NDI_RA_TYPE_PCI_BUSNUM, 0); } else { /* * We don't have an available-bus-range property. If, instead, * we have a bus-range property we add all the bus numbers * in that range to the free list but we must then scan * for pci-pci bridges on this bus to find out the if there * are any of those bus numbers already in use. If so, we can * reclaim them. */ len = sizeof (struct bus_range); if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "bus-range", (caddr_t)&pci_bus_range, &len) == DDI_SUCCESS) { if (pci_bus_range.lo != pci_bus_range.hi) { /* * Add bus numbers other than the secondary * bus number to the free list. */ (void) ndi_ra_free(dip, (uint64_t)pci_bus_range.lo + 1, (uint64_t)pci_bus_range.hi - (uint64_t)pci_bus_range.lo, NDI_RA_TYPE_PCI_BUSNUM, 0); /* scan for pci-pci bridges */ ctrl.rv = DDI_SUCCESS; ctrl.dip = dip; ctrl.range = &pci_bus_range; ndi_devi_enter(dip, &circular_count); ddi_walk_devs(ddi_get_child(dip), claim_pci_busnum, (void *)&ctrl); ndi_devi_exit(dip, circular_count); if (ctrl.rv != DDI_SUCCESS) { /* failed to create the map */ (void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_BUSNUM); rval = NDI_FAILURE; } } } } #ifdef BUSRA_DEBUG if (busra_debug) { (void) ra_dump_all(NULL, dip); } #endif return (rval); }
/* * isa_resource_setup * check for /used-resources and initialize * based on info there. If no /used-resources, * fail. */ int isa_resource_setup() { dev_info_t *used, *usedpdip; /* * note that at this time bootconf creates 32 bit properties for * io-space and device-memory */ struct iorange { uint32_t base; uint32_t len; } *iorange; struct memrange { uint32_t base; uint32_t len; } *memrange; uint32_t *irq; int proplen; int i, len; int maxrange; ndi_ra_request_t req; uint64_t retbase; uint64_t retlen; used = ddi_find_devinfo("used-resources", -1, 0); if (used == NULL) { DEBUGPRT(CE_CONT, "isa_resource_setup: used-resources not found"); return (NDI_FAILURE); } /* * initialize to all resources being present * and then remove the ones in use. */ usedpdip = ddi_root_node(); DEBUGPRT(CE_CONT, "isa_resource_setup: used = %p usedpdip = %p\n", (void *)used, (void *)usedpdip); if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_IO) == NDI_FAILURE) { return (NDI_FAILURE); } /* initialize io space, highest end base is 0xffff */ /* note that length is highest addr + 1 since starts from 0 */ (void) ndi_ra_free(usedpdip, 0, 0xffff + 1, NDI_RA_TYPE_IO, 0); if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS, "io-space", (caddr_t)&iorange, &proplen) == DDI_SUCCESS) { maxrange = proplen / sizeof (struct iorange); /* remove the "used" I/O resources */ for (i = 0; i < maxrange; i++) { bzero((caddr_t)&req, sizeof (req)); req.ra_addr = (uint64_t)iorange[i].base; req.ra_len = (uint64_t)iorange[i].len; req.ra_flags = NDI_RA_ALLOC_SPECIFIED; (void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen, NDI_RA_TYPE_IO, 0); } kmem_free((caddr_t)iorange, proplen); } if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_MEM) == NDI_FAILURE) { return (NDI_FAILURE); } /* initialize memory space where highest end base is 0xffffffff */ /* note that length is highest addr + 1 since starts from 0 */ (void) ndi_ra_free(usedpdip, 0, ((uint64_t)((uint32_t)~0)) + 1, NDI_RA_TYPE_MEM, 0); if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS, "device-memory", (caddr_t)&memrange, &proplen) == DDI_SUCCESS) { maxrange = proplen / sizeof (struct memrange); /* remove the "used" memory resources */ for (i = 0; i < maxrange; i++) { bzero((caddr_t)&req, sizeof (req)); req.ra_addr = (uint64_t)memrange[i].base; req.ra_len = (uint64_t)memrange[i].len; req.ra_flags = NDI_RA_ALLOC_SPECIFIED; (void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen, NDI_RA_TYPE_MEM, 0); } kmem_free((caddr_t)memrange, proplen); } if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_INTR) == NDI_FAILURE) { return (NDI_FAILURE); } /* initialize the interrupt space */ (void) ndi_ra_free(usedpdip, 0, 16, NDI_RA_TYPE_INTR, 0); #if defined(__i386) || defined(__amd64) bzero(&req, sizeof (req)); req.ra_addr = 2; /* 2 == 9 so never allow */ req.ra_len = 1; req.ra_flags = NDI_RA_ALLOC_SPECIFIED; (void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen, NDI_RA_TYPE_INTR, 0); #endif if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS, "interrupts", (caddr_t)&irq, &proplen) == DDI_SUCCESS) { /* Initialize available interrupts by negating the used */ len = (proplen / sizeof (uint32_t)); for (i = 0; i < len; i++) { bzero((caddr_t)&req, sizeof (req)); req.ra_addr = (uint64_t)irq[i]; req.ra_len = 1; req.ra_flags = NDI_RA_ALLOC_SPECIFIED; (void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen, NDI_RA_TYPE_INTR, 0); } kmem_free((caddr_t)irq, proplen); } #ifdef BUSRA_DEBUG if (busra_debug) { (void) ra_dump_all(NULL, usedpdip); } #endif return (NDI_SUCCESS); }