/*ARGSUSED*/ static int tcli_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { int instance = ddi_get_instance(devi); struct dstate *dstatep; int rval; if (cmd != DDI_ATTACH) return (DDI_SUCCESS); if (ddi_soft_state_zalloc(dstates, instance) != DDI_SUCCESS) { cmn_err(CE_CONT, "%s%d: can't allocate state\n", ddi_get_name(devi), instance); return (DDI_FAILURE); } dstatep = ddi_get_soft_state(dstates, instance); dstatep->dip = devi; rval = ddi_create_minor_node(devi, "client", S_IFCHR, (INST_TO_MINOR(instance)), DDI_PSEUDO, NULL); if (rval == DDI_FAILURE) { ddi_remove_minor_node(devi, NULL); ddi_soft_state_free(dstates, instance); cmn_err(CE_WARN, "%s%d: can't create minor nodes", ddi_get_name(devi), instance); return (DDI_FAILURE); } ddi_report_dev(devi); return (DDI_SUCCESS); }
int mii_fixspeed(mii_handle_t mac, int phy, int speed, int fullduplex) { struct phydata *phyd; #ifdef MIIDEBUG cmn_err(CE_CONT, "!%s: setting speed to %d, %s duplex", ddi_get_name(mac->mii_dip), speed, fullduplex ? "full" : "half"); #endif if (!(phyd = mii_get_valid_phydata(mac, phy))) return (MII_PARAM); phyd->control &= ~MII_CONTROL_ANE; if (speed == 100) phyd->control |= MII_CONTROL_100MB; else if (speed == 10) phyd->control &= ~MII_CONTROL_100MB; else cmn_err(CE_NOTE, "%s: mii does not support %d Mb/s speed", ddi_get_name(mac->mii_dip), speed); if (fullduplex) phyd->control |= MII_CONTROL_FDUPLEX; else phyd->control &= ~MII_CONTROL_FDUPLEX; mac->mii_write(mac->mii_dip, phy, MII_CONTROL, phyd->control); phyd->fix_speed = speed; phyd->fix_duplex = fullduplex; return (MII_SUCCESS); }
static void isa_create_ranges_prop(dev_info_t *dip) { dev_info_t *used; int *ioarray, *memarray, status; uint_t nio = 0, nmem = 0, nrng = 0, n; pib_ranges_t *ranges; used = ddi_find_devinfo(USED_RESOURCES, -1, 0); if (used == NULL) { cmn_err(CE_WARN, "Failed to find used-resources <%s>\n", ddi_get_name(dip)); return; } status = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS, "io-space", &ioarray, &nio); if (status != DDI_PROP_SUCCESS && status != DDI_PROP_NOT_FOUND) { cmn_err(CE_WARN, "io-space property failure for %s (%x)\n", ddi_get_name(used), status); return; } status = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS, "device-memory", &memarray, &nmem); if (status != DDI_PROP_SUCCESS && status != DDI_PROP_NOT_FOUND) { cmn_err(CE_WARN, "device-memory property failure for %s (%x)\n", ddi_get_name(used), status); return; } n = (nio + nmem) / USED_CELL_SIZE; ranges = (pib_ranges_t *)kmem_zalloc(sizeof (pib_ranges_t) * n, KM_SLEEP); if (nio != 0) { nrng = isa_used_to_ranges(ISA_ADDR_IO, ioarray, nio, ranges); isa_remove_res_from_pci(ISA_ADDR_IO, ioarray, nio); ddi_prop_free(ioarray); } if (nmem != 0) { nrng += isa_used_to_ranges(ISA_ADDR_MEM, memarray, nmem, ranges + nrng); isa_remove_res_from_pci(ISA_ADDR_MEM, memarray, nmem); ddi_prop_free(memarray); } if (!pseudo_isa) (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "ranges", (int *)ranges, nrng * sizeof (pib_ranges_t) / sizeof (int)); kmem_free(ranges, sizeof (pib_ranges_t) * n); }
static int xpv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { if (cmd != DDI_ATTACH) return (DDI_FAILURE); if (ddi_create_minor_node(dip, ddi_get_name(dip), S_IFCHR, ddi_get_instance(dip), DDI_PSEUDO, 0) != DDI_SUCCESS) return (DDI_FAILURE); xpv_dip = dip; if (xpv_drv_init() != 0) return (DDI_FAILURE); ddi_report_dev(dip); /* * If the memscrubber attempts to scrub the pages we hand to Xen, * the domain will panic. */ memscrub_disable(); /* * Report our version to dom0. */ if (xenbus_printf(XBT_NULL, "guest/xpv", "version", "%d", HVMPV_XPV_VERS)) cmn_err(CE_WARN, "xpv: couldn't write version\n"); return (DDI_SUCCESS); }
int dma_affinity(dev_info_t *dma, dev_info_t *cdev) { uint_t delta; if (strcmp(ddi_get_name(cdev), "esp") != 0) { return (DDI_FAILURE); } else if ((DEVI_PD(dma) && sparc_pd_getnreg(dma) > 0) && (DEVI_PD(cdev) && sparc_pd_getnreg(cdev) > 0)) { uint_t slot = sparc_pd_getreg(dma, 0)->regspec_bustype; uint_t slot_b = sparc_pd_getreg(cdev, 0)->regspec_bustype; uint_t addr = sparc_pd_getreg(dma, 0)->regspec_addr; uint_t addr_b = sparc_pd_getreg(cdev, 0)->regspec_addr; if (addr > addr_b) { delta = addr - addr_b; } else { delta = addr_b - addr; } if ((slot == slot_b) && (!restrict_affinity || (restrict_affinity_delta == delta))) return (DDI_SUCCESS); } return (DDI_FAILURE); }
static int dr_resolve_devname(dev_info_t *dip, char *buffer, char *alias) { major_t devmajor; char *aka, *name; *buffer = *alias = 0; if (dip == NULL) return (-1); if ((name = ddi_get_name(dip)) == NULL) name = "<null name>"; aka = name; if ((devmajor = ddi_name_to_major(aka)) != -1) aka = ddi_major_to_name(devmajor); strcpy(buffer, name); if (strcmp(name, aka)) strcpy(alias, aka); else *alias = 0; return (0); }
/* * Debuging function to dump contents of PHY registers */ int mii_dump_phy(mii_handle_t mac, int phy) { struct phydata *phydat; char *miiregs[] = { "Control ", "Status ", "PHY Id(H) ", "PHY Id(L) ", "Advertisement ", "Link Partner Ability", "Expansion ", "Next Page Transmit ", 0 }; int i; if (!(phydat = mii_get_valid_phydata(mac, phy))) return (MII_PARAM); cmn_err(CE_NOTE, "%s: PHY %d, type %s", ddi_get_name(mac->mii_dip), phy, phydat->description ? phydat->description: "Unknown"); for (i = 0; miiregs[i]; i++) cmn_err(CE_NOTE, "%s:\t%x", miiregs[i], mac->mii_read(mac->mii_dip, phy, i)); if (phydat->phy_dump) phydat->phy_dump((struct mii_info *)mac, phy); return (MII_SUCCESS); }
static int dr_check_io_refs(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) { register int i, reftotal = 0; static fn_t f = "dr_check_io_refs"; for (i = 0; i < devnum; i++) { dr_io_unit_t *ip = (dr_io_unit_t *)devlist[i]; dev_info_t *dip; int ref; sbd_error_t *err; err = drmach_get_dip(ip->sbi_cm.sbdev_id, &dip); if (err) DRERR_SET_C(&ip->sbi_cm.sbdev_error, &err); else if (dip != NULL) { ref = 0; ASSERT(e_ddi_branch_held(dip)); dr_check_devices(dip, &ref, hp, NULL, NULL, 0); hp->h_err = NULL; if (ref) { dr_dev_err(CE_WARN, &ip->sbi_cm, ESBD_BUSY); } PR_IO("%s: dip(%s) ref = %d\n", f, ddi_get_name(dip), ref); reftotal += ref; } else { PR_IO("%s: NO dip for id (0x%x)\n", f, (uint_t)(uintptr_t)ip->sbi_cm.sbdev_id); } } return (reftotal); }
static void acebus_debug(uint_t flag, ebus_devstate_t *ebus_p, char *fmt, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5) { char *s; if (acebus_debug_flags & flag) { switch (flag) { case D_ATTACH: s = "attach"; break; case D_DETACH: s = "detach"; break; case D_MAP: s = "map"; break; case D_CTLOPS: s = "ctlops"; break; case D_INTR: s = "intr"; break; } if (ebus_p) cmn_err(CE_CONT, "%s%d: %s: ", ddi_get_name(ebus_p->dip), ddi_get_instance(ebus_p->dip), s); else cmn_err(CE_CONT, "ebus: "); cmn_err(CE_CONT, fmt, a1, a2, a3, a4, a5); } }
static int lx_ptm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int err; if (cmd != DDI_ATTACH) return (DDI_FAILURE); if (ddi_create_minor_node(dip, LX_PTM_MINOR_NODE, S_IFCHR, ddi_get_instance(dip), DDI_PSEUDO, 0) != DDI_SUCCESS) return (DDI_FAILURE); err = ldi_ident_from_dip(dip, &lps.lps_li); if (err != 0) { ddi_remove_minor_node(dip, ddi_get_name(dip)); return (DDI_FAILURE); } lps.lps_dip = dip; lps.lps_pts_major = ddi_name_to_major(LP_PTS_DRV_NAME); rw_init(&lps.lps_lh_rwlock, NULL, RW_DRIVER, NULL); lps.lps_lh_count = 0; lps.lps_lh_array = NULL; return (DDI_SUCCESS); }
static int xen_uppc_acpi_translate_pci_irq(dev_info_t *dip, int busid, int devid, int ipin, int *pci_irqp, iflag_t *intr_flagp) { int status; acpi_psm_lnk_t acpipsmlnk; if ((status = acpi_get_irq_cache_ent(busid, devid, ipin, pci_irqp, intr_flagp)) == ACPI_PSM_SUCCESS) { XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: Found irqno %d " "from cache for device %s, instance #%d\n", *pci_irqp, ddi_get_name(dip), ddi_get_instance(dip))); return (status); } bzero(&acpipsmlnk, sizeof (acpi_psm_lnk_t)); if ((status = acpi_translate_pci_irq(dip, ipin, pci_irqp, intr_flagp, &acpipsmlnk)) == ACPI_PSM_FAILURE) { XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: " " acpi_translate_pci_irq failed for device %s, instance" " #%d\n", ddi_get_name(dip), ddi_get_instance(dip))); return (status); } if (status == ACPI_PSM_PARTIAL && acpipsmlnk.lnkobj != NULL) { status = xen_uppc_acpi_irq_configure(&acpipsmlnk, dip, pci_irqp, intr_flagp); if (status != ACPI_PSM_SUCCESS) { status = acpi_get_current_irq_resource(&acpipsmlnk, pci_irqp, intr_flagp); } } if (status == ACPI_PSM_SUCCESS) { acpi_new_irq_cache_ent(busid, devid, ipin, *pci_irqp, intr_flagp, &acpipsmlnk); psm_set_elcr(*pci_irqp, 1); /* set IRQ to PCI mode */ XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: [ACPI] " "new irq %d for device %s, instance #%d\n", *pci_irqp, ddi_get_name(dip), ddi_get_instance(dip))); } return (status); }
/* ARGSUSED */ static int ncallprint(dev_t dev, char *str) { cmn_err(CE_WARN, "%s%d: %s", ddi_get_name(ncall_dip), ddi_get_instance(ncall_dip), str); return (0); }
int is_pseudo_device(dev_info_t *dip) { dev_info_t *pdip; for (pdip = ddi_get_parent(dip); pdip && pdip != ddi_root_node(); pdip = ddi_get_parent(pdip)) { if (strcmp(ddi_get_name(pdip), DEVI_PSEUDO_NEXNAME) == 0) return (1); } return (0); }
static int domcaps_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { if (cmd != DDI_ATTACH) return (DDI_FAILURE); if (ddi_create_minor_node(devi, ddi_get_name(devi), S_IFCHR, ddi_get_instance(devi), DDI_PSEUDO, 0) != DDI_SUCCESS) return (DDI_FAILURE); domcaps_devi = devi; ddi_report_dev(devi); return (DDI_SUCCESS); }
static void mii_portmon(mii_handle_t mac) { int i; enum mii_phy_state state; struct phydata *phydata; /* * There is a potential deadlock between this test and the * mutex_enter */ if (!mac->mii_linknotify) /* Exiting */ return; if (mac->lock) mutex_enter(mac->lock); /* * For each initialised phy, see if the link state has changed, and * callback to the mac driver if it has */ for (i = 0; i < 32; i++) { if ((phydata = mac->phys[i]) != 0) { state = mii_linkup(mac, i) ? phy_state_linkup : phy_state_linkdown; if (state != phydata->state) { #ifdef MIIDEBUG if (miidebug) cmn_err(CE_NOTE, "%s: PHY %d link %s", ddi_get_name(mac->mii_dip), i, state == phy_state_linkup ? "up" : "down"); #endif phydata->state = state; mac->mii_linknotify(mac->mii_dip, i, state); } } } /* Check the ports every 5 seconds */ mac->portmon_timer = timeout((void (*)(void*))mii_portmon, (void *)mac, (clock_t)(5 * drv_usectohz(1000000))); if (mac->lock) mutex_exit(mac->lock); }
static int fm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { switch (cmd) { case DDI_ATTACH: if (ddi_create_minor_node(dip, ddi_get_name(dip), S_IFCHR, ddi_get_instance(dip), DDI_PSEUDO, 0) != DDI_SUCCESS) { ddi_remove_minor_node(dip, NULL); return (DDI_FAILURE); } fm_dip = dip; is_i86xpv = (strcmp(platform, "i86xpv") == 0); break; case DDI_RESUME: break; default: return (DDI_FAILURE); } return (DDI_SUCCESS); }
static int acebus_get_ranges_prop(ebus_devstate_t *ebus_p) { struct ebus_pci_rangespec *rangep; int nrange, range_len; if (ddi_getlongprop(DDI_DEV_T_ANY, ebus_p->dip, DDI_PROP_DONTPASS, "ranges", (caddr_t)&rangep, &range_len) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s%d: can't get ranges property", ddi_get_name(ebus_p->dip), ddi_get_instance(ebus_p->dip)); return (DDI_ME_REGSPEC_RANGE); } nrange = range_len / sizeof (struct ebus_pci_rangespec); if (nrange == 0) { kmem_free(rangep, range_len); return (DDI_FAILURE); } #ifdef DEBUG { int i; for (i = 0; i < nrange; i++) { DBG5(D_MAP, ebus_p, "ebus range addr 0x%x.0x%x PCI range " "addr 0x%x.0x%x.0x%x ", rangep[i].ebus_phys_hi, rangep[i].ebus_phys_low, rangep[i].pci_phys_hi, rangep[i].pci_phys_mid, rangep[i].pci_phys_low); DBG1(D_MAP, ebus_p, "Size 0x%x\n", rangep[i].rng_size); } } #endif /* DEBUG */ ebus_p->rangep = rangep; ebus_p->range_cnt = nrange; return (DDI_SUCCESS); }
/* * * If the interrupt link device is already configured, * stores polarity and sensitivity in the structure pointed to by * intr_flagp, and irqno in the value pointed to by pci_irqp. * * Returns: * ACPI_PSM_SUCCESS if the interrupt link device is already configured. * ACPI_PSM_PARTIAL if configuration is needed. * ACPI_PSM_FAILURE in case of error. * * When two devices share the same interrupt link device, and the * link device is already configured (i.e. found in the irq cache) * we need to use the already configured irq instead of reconfiguring * the link device. */ static int acpi_eval_lnk(dev_info_t *dip, char *lnkname, int *pci_irqp, iflag_t *intr_flagp, acpi_psm_lnk_t *acpipsmlnkp) { ACPI_HANDLE tmpobj; ACPI_HANDLE lnkobj; int status; /* * Convert the passed-in link device name to a handle */ if (AcpiGetHandle(NULL, lnkname, &lnkobj) != AE_OK) { return (ACPI_PSM_FAILURE); } /* * Assume that the link device is invalid if no _CRS method * exists, since _CRS method is a required method */ if (AcpiGetHandle(lnkobj, "_CRS", &tmpobj) != AE_OK) { return (ACPI_PSM_FAILURE); } ASSERT(acpipsmlnkp != NULL); acpipsmlnkp->lnkobj = lnkobj; if ((acpi_get_irq_lnk_cache_ent(lnkobj, pci_irqp, intr_flagp)) == ACPI_PSM_SUCCESS) { PSM_VERBOSE_IRQ((CE_CONT, "!psm: link object found from cache " " for device %s, instance #%d, irq no %d\n", ddi_get_name(dip), ddi_get_instance(dip), *pci_irqp)); return (ACPI_PSM_SUCCESS); } else { if (acpica_eval_int(lnkobj, "_STA", &status) == AE_OK) { acpipsmlnkp->device_status = (uchar_t)status; } return (ACPI_PSM_PARTIAL); } }
int mii_rsan(mii_handle_t mac, int phy, enum mii_wait_type wait) { int i; void *dip; struct phydata *phyd; if (wait == mii_wait_interrupt || !(phyd = mii_get_valid_phydata(mac, phy))) return (MII_PARAM); if (phyd->fix_speed) return (MII_STATE); dip = mac->mii_dip; phyd->control |= MII_CONTROL_ANE; mac->mii_write(dip, phy, MII_CONTROL, phyd->control|MII_CONTROL_RSAN); /* * This can take ages (a second or so). It makes more sense to use * the port monitor rather than waiting for completion of this on the * PHY. It is pointless doing a busy wait here */ if (wait == mii_wait_user) { for (i = 200; i--; ) { delay(drv_usectohz(10000)); if (mac->mii_read(dip, phy, MII_STATUS) & MII_STATUS_ANDONE) return (MII_SUCCESS); } cmn_err(CE_NOTE, "!%s:Timed out waiting for autonegotiation", ddi_get_name(mac->mii_dip)); return (MII_TIMEOUT); } return (MII_TIMEOUT); }
static int bdtrp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; bdtrp_devstate_t *rsp; switch (cmd) { case DDI_ATTACH: instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(bdtrp_state, instance) != DDI_SUCCESS) { cmn_err(CE_CONT, "%s%d: can't allocate state\n", ddi_get_name(dip), instance); return (DDI_FAILURE); } else rsp = ddi_get_soft_state(bdtrp_state, instance); if (ddi_create_minor_node(dip, "bdtrp", S_IFCHR, instance, DDI_PSEUDO, 0) == DDI_FAILURE) { ddi_remove_minor_node(dip, NULL); goto attach_failed; } rsp->dip = dip; ddi_report_dev(dip); timeout(bdtrp_timer, (void *)4096, hz*120); return (DDI_SUCCESS); default: return (DDI_FAILURE); } attach_failed: (void) bdtrp_detach(dip, DDI_DETACH); return (DDI_FAILURE); }
/*ARGSUSED*/ static int isa_apply_range(dev_info_t *dip, struct regspec *isa_reg_p, pci_regspec_t *pci_reg_p) { pib_ranges_t *ranges, *rng_p; int len, i, offset, nrange; if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges", (caddr_t)&ranges, &len) != DDI_SUCCESS) { cmn_err(CE_WARN, "Can't get %s ranges property", ddi_get_name(dip)); return (DDI_ME_REGSPEC_RANGE); } nrange = len / sizeof (pib_ranges_t); rng_p = ranges; for (i = 0; i < nrange; i++, rng_p++) { /* Check for correct space */ if (isa_reg_p->regspec_bustype != rng_p->child_high) continue; /* Detect whether request entirely fits within a range */ if (isa_reg_p->regspec_addr < rng_p->child_low) continue; if ((isa_reg_p->regspec_addr + isa_reg_p->regspec_size - 1) > (rng_p->child_low + rng_p->size - 1)) continue; offset = isa_reg_p->regspec_addr - rng_p->child_low; pci_reg_p->pci_phys_hi = rng_p->parent_high; pci_reg_p->pci_phys_mid = 0; pci_reg_p->pci_phys_low = rng_p->parent_low + offset; pci_reg_p->pci_size_hi = 0; pci_reg_p->pci_size_low = isa_reg_p->regspec_size; break; } kmem_free(ranges, len); if (i < nrange) return (DDI_SUCCESS); /* * Check extra resource range specially for serial and parallel * devices, which are treated differently from all other ISA * devices. On some machines, serial ports are not enumerated * by ACPI but by BIOS, with io base addresses noted in legacy * BIOS data area. Parallel port on some machines comes with * illegal size. */ if (isa_reg_p->regspec_bustype != ISA_ADDR_IO) { cmn_err(CE_WARN, "Bus type not ISA I/O\n"); return (DDI_ME_REGSPEC_RANGE); } for (i = 0; i < isa_extra_count; i++) { struct regspec *reg_p = &isa_extra_resource[i]; if (isa_reg_p->regspec_addr < reg_p->regspec_addr) continue; if ((isa_reg_p->regspec_addr + isa_reg_p->regspec_size) > (reg_p->regspec_addr + reg_p->regspec_size)) continue; pci_reg_p->pci_phys_hi = PCI_ADDR_IO | PCI_REG_REL_M; pci_reg_p->pci_phys_mid = 0; pci_reg_p->pci_phys_low = isa_reg_p->regspec_addr; pci_reg_p->pci_size_hi = 0; pci_reg_p->pci_size_low = isa_reg_p->regspec_size; break; } if (i < isa_extra_count) return (DDI_SUCCESS); cmn_err(CE_WARN, "isa_apply_range: Out of range base <0x%x>, size <%d>", isa_reg_p->regspec_addr, isa_reg_p->regspec_size); return (DDI_ME_REGSPEC_RANGE); }
/*ARGSUSED*/ static int tvhci_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, void *arg, void *result) { switch (ctlop) { case DDI_CTLOPS_REPORTDEV: if (rdip == (dev_info_t *)0) return (DDI_FAILURE); cmn_err(CE_CONT, "?tvhci-device: %s%d\n", ddi_get_name(rdip), ddi_get_instance(rdip)); return (DDI_SUCCESS); case DDI_CTLOPS_INITCHILD: { dev_info_t *child = (dev_info_t *)arg; return (tvhci_initchild(dip, child)); } case DDI_CTLOPS_UNINITCHILD: { dev_info_t *child = (dev_info_t *)arg; return (tvhci_uninitchild(dip, child)); } case DDI_CTLOPS_DMAPMAPC: case DDI_CTLOPS_REPORTINT: case DDI_CTLOPS_REGSIZE: case DDI_CTLOPS_NREGS: case DDI_CTLOPS_SIDDEV: case DDI_CTLOPS_SLAVEONLY: case DDI_CTLOPS_AFFINITY: case DDI_CTLOPS_POKE: case DDI_CTLOPS_PEEK: /* * These ops correspond to functions that "shouldn't" be called * by a pseudo driver. So we whine when we're called. */ cmn_err(CE_CONT, "%s%d: invalid op (%d) from %s%d\n", ddi_get_name(dip), ddi_get_instance(dip), ctlop, ddi_get_name(rdip), ddi_get_instance(rdip)); return (DDI_FAILURE); case DDI_CTLOPS_ATTACH: case DDI_CTLOPS_BTOP: case DDI_CTLOPS_BTOPR: case DDI_CTLOPS_DETACH: case DDI_CTLOPS_DVMAPAGESIZE: case DDI_CTLOPS_IOMIN: case DDI_CTLOPS_POWER: case DDI_CTLOPS_PTOB: default: /* * The ops that we pass up (default). We pass up memory * allocation oriented ops that we receive - these may be * associated with pseudo HBA drivers below us with target * drivers below them that use ddi memory allocation * interfaces like scsi_alloc_consistent_buf. */ return (ddi_ctlops(dip, rdip, ctlop, arg, result)); } }
/* * The pmubus_map routine determines if it's child is attempting to map a * shared reg. If it is, it installs it's own vectors and bus private pointer. */ static int pmubus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t off, off_t len, caddr_t *addrp) { pmubus_devstate_t *pmubusp = ddi_get_soft_state(per_pmubus_state, ddi_get_instance(dip)); dev_info_t *pdip = (dev_info_t *)DEVI(dip)->devi_parent; pmubus_regspec_t pmubus_rp; pmubus_obpregspec_t *pmubus_regs = NULL; int pmubus_regs_size; uint64_t *pmubus_regmask = NULL; int pmubus_regmask_size; pci_regspec_t pci_reg; int32_t rnumber = mp->map_obj.rnumber; pmubus_mapreq_t *pmubus_mapreqp; int ret = DDI_SUCCESS; char *map_fail1 = "Map Type Unknown"; char *map_fail2 = "DDI_MT_REGSPEC"; char *s = map_fail1; *addrp = NULL; /* * Handle the mapping according to its type. */ DPRINTF(PMUBUS_MAP_DEBUG, ("rdip=%s%d: off=%lx len=%lx\n", ddi_get_name(rdip), ddi_get_instance(rdip), off, len)); switch (mp->map_type) { case DDI_MT_RNUMBER: { int n; /* * Get the "reg" property from the device node and convert * it to our parent's format. */ rnumber = mp->map_obj.rnumber; DPRINTF(PMUBUS_MAP_DEBUG, ("rdip=%s%d: rnumber=%x " "handlep=%p\n", ddi_get_name(rdip), ddi_get_instance(rdip), rnumber, (void *)mp->map_handlep)); if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, "reg", (caddr_t)&pmubus_regs, &pmubus_regs_size) != DDI_SUCCESS) { DPRINTF(PMUBUS_MAP_DEBUG, ("can't get reg " "property\n")); ret = DDI_ME_RNUMBER_RANGE; goto done; } n = pmubus_regs_size / sizeof (pmubus_obpregspec_t); if (rnumber < 0 || rnumber >= n) { DPRINTF(PMUBUS_MAP_DEBUG, ("rnumber out of range\n")); ret = DDI_ME_RNUMBER_RANGE; goto done; } pmubus_rp.reg_addr = ((uint64_t) pmubus_regs[rnumber].reg_addr_hi << 32) | (uint64_t)pmubus_regs[rnumber].reg_addr_lo; pmubus_rp.reg_size = pmubus_regs[rnumber].reg_size; (void) ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, "register-mask", (caddr_t)&pmubus_regmask, &pmubus_regmask_size); /* Create our own mapping private structure */ break; } case DDI_MT_REGSPEC: /* * This bus has no bus children that have to map in an address * space, so we can assume that we'll never see an * DDI_MT_REGSPEC request */ s = map_fail2; ret = DDI_ME_REGSPEC_RANGE; /*FALLTHROUGH*/ default: if (ret == DDI_SUCCESS) ret = DDI_ME_INVAL; DPRINTF(PMUBUS_MAP_DEBUG, ("rdip=%s%d: pmubus_map: " "%s is an invalid map type.\nmap request handlep=0x%p\n", ddi_get_name(rdip), ddi_get_instance(rdip), s, (void *)mp)); ret = DDI_ME_RNUMBER_RANGE; goto done; } /* Adjust our reg property with offset and length */ if ((pmubus_rp.reg_addr + off) > (pmubus_rp.reg_addr + pmubus_rp.reg_size)) { ret = DDI_ME_INVAL; goto done; } pmubus_rp.reg_addr += off; if (len && (len < pmubus_rp.reg_size)) pmubus_rp.reg_size = len; /* Translate our child regspec into our parents address domain */ ret = pmubus_apply_range(pmubusp, rdip, &pmubus_rp, &pci_reg); /* Check if the apply range failed */ if (ret < DDI_SUCCESS) goto done; /* * If our childs xlated address falls into our shared address range, * setup our mapping handle. */ if (ret > DDI_SUCCESS) { /* Figure out if we're mapping or unmapping */ switch (mp->map_op) { case DDI_MO_MAP_LOCKED: { ddi_acc_impl_t *hp = (ddi_acc_impl_t *)mp->map_handlep; pmubus_mapreqp = kmem_alloc(sizeof (*pmubus_mapreqp), KM_SLEEP); pmubus_mapreqp->mapreq_flags = ret; pmubus_mapreqp->mapreq_softsp = pmubusp; pmubus_mapreqp->mapreq_addr = pmubus_rp.reg_addr; pmubus_mapreqp->mapreq_size = pmubus_rp.reg_size; if (ret & MAPREQ_SHARED_BITS) { pmubus_mapreqp->mapreq_mask = pmubus_mask(pmubus_regs, rnumber, pmubus_regmask); DPRINTF(PMUBUS_MAP_DEBUG, ("rnumber=%d " "mask=%lx\n", rnumber, pmubus_mapreqp->mapreq_mask)); if (pmubus_mapreqp->mapreq_mask == 0) { kmem_free(pmubus_mapreqp, sizeof (pmubus_mapreq_t)); ret = DDI_ME_INVAL; break; } } hp->ahi_common.ah_bus_private = pmubus_mapreqp; /* Initialize the access vectors */ hp->ahi_get8 = pmubus_get8; hp->ahi_get16 = pmubus_noget16; hp->ahi_get32 = pmubus_get32; hp->ahi_get64 = pmubus_noget64; hp->ahi_put8 = pmubus_put8; hp->ahi_put16 = pmubus_noput16; hp->ahi_put32 = pmubus_put32; hp->ahi_put64 = pmubus_noput64; hp->ahi_rep_get8 = pmubus_norep_get8; hp->ahi_rep_get16 = pmubus_norep_get16; hp->ahi_rep_get32 = pmubus_norep_get32; hp->ahi_rep_get64 = pmubus_norep_get64; hp->ahi_rep_put8 = pmubus_norep_put8; hp->ahi_rep_put16 = pmubus_norep_put16; hp->ahi_rep_put32 = pmubus_norep_put32; hp->ahi_rep_put64 = pmubus_norep_put64; ret = DDI_SUCCESS; break; } case DDI_MO_UNMAP: { ddi_acc_impl_t *hp = (ddi_acc_impl_t *)mp->map_handlep; pmubus_mapreqp = hp->ahi_common.ah_bus_private; /* Free the our map request struct */ kmem_free(pmubus_mapreqp, sizeof (pmubus_mapreq_t)); ret = DDI_SUCCESS; break; } default: ret = DDI_ME_UNSUPPORTED; } } else { /* Prepare the map request struct for a call to our parent */ mp->map_type = DDI_MT_REGSPEC; mp->map_obj.rp = (struct regspec *)&pci_reg; /* Pass the mapping operation up the device tree */ ret = (DEVI(pdip)->devi_ops->devo_bus_ops->bus_map) (pdip, rdip, mp, off, len, addrp); } done: if (pmubus_regs != NULL) kmem_free(pmubus_regs, pmubus_regs_size); if (pmubus_regmask != NULL) kmem_free(pmubus_regmask, pmubus_regmask_size); return (ret); }
/*ARGSUSED*/ static int pcmem_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, void *arg, void *result) { char name[MAXNAMELEN]; int techreg, cissp; switch (ctlop) { case DDI_CTLOPS_REPORTDEV: if (rdip == (dev_info_t *)0) { return (DDI_FAILURE); } PCMEM_DEBUG((CE_CONT, "?pcmem_ctlops: %s%d at %s in socket %d\n", ddi_get_name(rdip), ddi_get_instance(rdip), ddi_get_name(dip), ddi_getprop(DDI_DEV_T_NONE, rdip, DDI_PROP_DONTPASS, "socket", -1))); return (DDI_SUCCESS); case DDI_CTLOPS_INITCHILD: PCMEM_DEBUG((CE_CONT, "pcmem_ctlops - DDI_CTLOPS_INITCHILD persistent=%x\n", ndi_dev_is_persistent_node((dev_info_t *)arg))); if (!ndi_dev_is_persistent_node((dev_info_t *)arg)) return (DDI_FAILURE); /* * XXXX - Read card CIS to determine technology * region(tn) and CIS space(dn). * Refer to Bugid 1179336. */ /* * see cis_handler.h for CISTPL_DEVICE * and CISTPL_DEVICE_A * * CISTPL_DEVICE_DTYPE_NULL 0x00 NULL device * CISTPL_DEVICE_DTYPE_ROM 0x01 ROM * CISTPL_DEVICE_DTYPE_OTPROM 0x02 OTPROM * CISTPL_DEVICE_DTYPE_EPROM 0x03 EPROM * CISTPL_DEVICE_DTYPE_EEPROM 0x04 EEPROM * CISTPL_DEVICE_DTYPE_FLASH 0x05 FLASH * CISTPL_DEVICE_DTYPE_SRAM 0x06 SRAM * CISTPL_DEVICE_DTYPE_DRAM 0x07 DRAM * */ /* * XXXX - For now set to default SRAM device */ techreg = CISTPL_DEVICE_DTYPE_SRAM; cissp = 0; (void) sprintf(name, "%d,%d", techreg, cissp); ddi_set_name_addr((dev_info_t *)arg, name); PCMEM_DEBUG((CE_CONT, "pcmem_ctlops - DDI_CTLOPS_INITCHILD name=%s\n", name)); return (DDI_SUCCESS); case DDI_CTLOPS_UNINITCHILD: ddi_set_name_addr((dev_info_t *)arg, NULL); PCMEM_DEBUG((CE_CONT, "pcmem_ctlops - DDI_CTLOPS_UNINITCHILD child: %s(%d)\n", ddi_node_name(arg), ddi_get_instance(arg))); return (DDI_SUCCESS); default: return (ddi_ctlops(dip, rdip, ctlop, arg, result)); } }
static int pca9556_attach(dev_info_t *dip) { pca9556_unit_t *pcap; int instance = ddi_get_instance(dip); char name[MAXNAMELEN]; char *device_name; minor_t minor; int i, num_ports; if (ddi_soft_state_zalloc(pca9556_soft_statep, instance) != 0) { cmn_err(CE_WARN, "%s%d failed to zalloc softstate", ddi_get_name(dip), instance); return (DDI_FAILURE); } pcap = ddi_get_soft_state(pca9556_soft_statep, instance); if (pcap == NULL) return (DDI_FAILURE); mutex_init(&pcap->pca9556_mutex, NULL, MUTEX_DRIVER, NULL); cv_init(&pcap->pca9556_cv, NULL, CV_DRIVER, NULL); (void) snprintf(pcap->pca9556_name, sizeof (pcap->pca9556_name), "%s_%d", ddi_driver_name(dip), instance); device_name = ddi_get_name(dip); if (strcmp(device_name, "i2c-pca9555") == 0) { num_ports = PCA9555_NUM_PORTS; pcap->pca9555_device = B_TRUE; } else { num_ports = PCA9556_NUM_PORTS; pcap->pca9555_device = B_FALSE; minor = INST_TO_MINOR(instance); } for (i = 0; i < num_ports; i++) { if (!(pcap->pca9555_device)) { (void) snprintf(pcap->pca9556_name, sizeof (pcap->pca9556_name), "%s_%d", ddi_driver_name(dip), instance); (void) snprintf(name, sizeof (name), "%s", pcap->pca9556_name); } else { (void) sprintf(name, "port_%d", i); minor = INST_TO_MINOR(instance) | PORT_TO_MINOR(I2C_PORT(i)); } if (ddi_create_minor_node(dip, name, S_IFCHR, minor, PCA9556_NODE_TYPE, NULL) == DDI_FAILURE) { cmn_err(CE_WARN, "%s: failed to create node for %s", pcap->pca9556_name, name); pca9556_detach(dip); return (DDI_FAILURE); } } pcap->pca9556_flags |= PCA9556_MINORFLAG; /* * Add a zero-length attribute to tell the world we support * kernel ioctls (for layered drivers) */ (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, DDI_KERNEL_IOCTL, NULL, 0); /* * preallocate a single buffer for all reads and writes */ if (i2c_transfer_alloc(pcap->pca9556_hdl, &pcap->pca9556_transfer, 2, 2, I2C_SLEEP) != I2C_SUCCESS) { cmn_err(CE_WARN, "%s i2c_transfer_alloc failed", pcap->pca9556_name); pca9556_detach(dip); return (DDI_FAILURE); } pcap->pca9556_flags |= PCA9556_TBUFFLAG; pcap->pca9556_transfer->i2c_version = I2C_XFER_REV; if (i2c_client_register(dip, &pcap->pca9556_hdl) != I2C_SUCCESS) { ddi_remove_minor_node(dip, NULL); cmn_err(CE_WARN, "%s i2c_client_register failed", pcap->pca9556_name); pca9556_detach(dip); return (DDI_FAILURE); } pcap->pca9556_flags |= PCA9556_REGFLAG; /* * Store the dip for future dip. */ pcap->pca9556_dip = dip; return (DDI_SUCCESS); }
static int acebus_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result) { #ifdef DEBUG ebus_devstate_t *ebus_p = get_acebus_soft_state(ddi_get_instance(dip)); #endif int8_t *name, *device_type; int32_t i, max_children, max_device_types, len; /* * NOTE: These ops below will never be supported in this nexus * driver, hence they always return immediately. */ switch (intr_op) { case DDI_INTROP_GETCAP: *(int *)result = DDI_INTR_FLAG_LEVEL; return (DDI_SUCCESS); case DDI_INTROP_SUPPORTED_TYPES: *(int *)result = i_ddi_get_intx_nintrs(rdip) ? DDI_INTR_TYPE_FIXED : 0; return (DDI_SUCCESS); case DDI_INTROP_SETCAP: case DDI_INTROP_SETMASK: case DDI_INTROP_CLRMASK: case DDI_INTROP_GETPENDING: return (DDI_ENOTSUP); default: break; } if (hdlp->ih_pri) goto done; /* * This is a hack to set the PIL for the devices under ebus. * We first look up a device by it's specific name, if we can't * match the name, we try and match it's device_type property. * Lastly we default a PIL level of 1. */ DBG1(D_INTR, ebus_p, "ebus_p %p\n", ebus_p); name = ddi_get_name(rdip); max_children = sizeof (acebus_name_to_pil) / sizeof (struct ebus_string_to_pil); for (i = 0; i < max_children; i++) { if (strcmp(acebus_name_to_pil[i].string, name) == 0) { DBG2(D_INTR, ebus_p, "child name %s; match PIL %d\n", acebus_name_to_pil[i].string, acebus_name_to_pil[i].pil); hdlp->ih_pri = acebus_name_to_pil[i].pil; goto done; } } if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, "device_type", (caddr_t)&device_type, &len) == DDI_SUCCESS) { max_device_types = sizeof (acebus_device_type_to_pil) / sizeof (struct ebus_string_to_pil); for (i = 0; i < max_device_types; i++) { if (strcmp(acebus_device_type_to_pil[i].string, device_type) == 0) { DBG2(D_INTR, ebus_p, "Device type %s; match PIL %d\n", acebus_device_type_to_pil[i].string, acebus_device_type_to_pil[i].pil); hdlp->ih_pri = acebus_device_type_to_pil[i].pil; break; } } kmem_free(device_type, len); } /* * If we get here, we need to set a default value * for the PIL. */ if (hdlp->ih_pri == 0) { hdlp->ih_pri = 1; cmn_err(CE_WARN, "%s%d assigning default interrupt level %d " "for device %s%d", ddi_driver_name(dip), ddi_get_instance(dip), hdlp->ih_pri, ddi_driver_name(rdip), ddi_get_instance(rdip)); } done: /* Pass up the request to our parent. */ return (i_ddi_intr_ops(dip, rdip, intr_op, hdlp, result)); }
/* * control ops entry point: * * Requests handled completely: * DDI_CTLOPS_INITCHILD * DDI_CTLOPS_UNINITCHILD * DDI_CTLOPS_REPORTDEV * DDI_CTLOPS_REGSIZE * DDI_CTLOPS_NREGS * * All others passed to parent. */ static int acebus_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg, void *result) { #ifdef DEBUG ebus_devstate_t *ebus_p = get_acebus_soft_state(ddi_get_instance(dip)); #endif ebus_regspec_t *ebus_rp; int32_t reglen; int i, n; char name[10]; switch (op) { case DDI_CTLOPS_INITCHILD: { dev_info_t *child = (dev_info_t *)arg; /* * Set the address portion of the node name based on the * address/offset. */ DBG2(D_CTLOPS, ebus_p, "DDI_CTLOPS_INITCHILD: rdip=%s%d\n", ddi_get_name(child), ddi_get_instance(child)); if (ddi_getlongprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "reg", (caddr_t)&ebus_rp, ®len) != DDI_SUCCESS) { DBG(D_CTLOPS, ebus_p, "can't get reg property\n"); return (DDI_FAILURE); } (void) sprintf(name, "%x,%x", ebus_rp->addr_hi, ebus_rp->addr_low); ddi_set_name_addr(child, name); kmem_free((caddr_t)ebus_rp, reglen); ddi_set_parent_data(child, NULL); return (DDI_SUCCESS); } case DDI_CTLOPS_UNINITCHILD: DBG2(D_CTLOPS, ebus_p, "DDI_CTLOPS_UNINITCHILD: rdip=%s%d\n", ddi_get_name((dev_info_t *)arg), ddi_get_instance((dev_info_t *)arg)); ddi_set_name_addr((dev_info_t *)arg, NULL); ddi_remove_minor_node((dev_info_t *)arg, NULL); impl_rem_dev_props((dev_info_t *)arg); return (DDI_SUCCESS); case DDI_CTLOPS_REPORTDEV: DBG2(D_CTLOPS, ebus_p, "DDI_CTLOPS_REPORTDEV: rdip=%s%d\n", ddi_get_name(rdip), ddi_get_instance(rdip)); cmn_err(CE_CONT, "?%s%d at %s%d: offset %s\n", ddi_driver_name(rdip), ddi_get_instance(rdip), ddi_driver_name(dip), ddi_get_instance(dip), ddi_get_name_addr(rdip)); return (DDI_SUCCESS); case DDI_CTLOPS_REGSIZE: DBG2(D_CTLOPS, ebus_p, "DDI_CTLOPS_REGSIZE: rdip=%s%d\n", ddi_get_name(rdip), ddi_get_instance(rdip)); if (getprop(rdip, "reg", &ebus_rp, &i) != DDI_SUCCESS) { DBG(D_CTLOPS, ebus_p, "can't get reg property\n"); return (DDI_FAILURE); } n = i / sizeof (ebus_regspec_t); if (*(int *)arg < 0 || *(int *)arg >= n) { DBG(D_MAP, ebus_p, "rnumber out of range\n"); kmem_free((caddr_t)ebus_rp, i); return (DDI_FAILURE); } *((off_t *)result) = ebus_rp[*(int *)arg].size; kmem_free((caddr_t)ebus_rp, i); return (DDI_SUCCESS); case DDI_CTLOPS_NREGS: DBG2(D_CTLOPS, ebus_p, "DDI_CTLOPS_NREGS: rdip=%s%d\n", ddi_get_name(rdip), ddi_get_instance(rdip)); if (getprop(rdip, "reg", &ebus_rp, &i) != DDI_SUCCESS) { DBG(D_CTLOPS, ebus_p, "can't get reg property\n"); return (DDI_FAILURE); } *((uint_t *)result) = i / sizeof (ebus_regspec_t); kmem_free((caddr_t)ebus_rp, i); return (DDI_SUCCESS); } /* * Now pass the request up to our parent. */ DBG2(D_CTLOPS, ebus_p, "passing request to parent: rdip=%s%d\n", ddi_get_name(rdip), ddi_get_instance(rdip)); return (ddi_ctlops(dip, rdip, op, arg, result)); }
static int acebus_apply_range(ebus_devstate_t *ebus_p, dev_info_t *rdip, ebus_regspec_t *ebus_rp, pci_regspec_t *rp) { int b; int rval = DDI_SUCCESS; struct ebus_pci_rangespec *rangep = ebus_p->rangep; int nrange = ebus_p->range_cnt; static const char out_of_range[] = "Out of range register specification from device node <%s>"; DBG3(D_MAP, ebus_p, "Range Matching Addr 0x%x.%x size 0x%x\n", ebus_rp->addr_hi, ebus_rp->addr_low, ebus_rp->size); for (b = 0; b < nrange; ++b, ++rangep) { /* Check for the correct space */ if (ebus_rp->addr_hi == rangep->ebus_phys_hi) /* See if we fit in this range */ if ((ebus_rp->addr_low >= rangep->ebus_phys_low) && ((ebus_rp->addr_low + ebus_rp->size - 1) <= (rangep->ebus_phys_low + rangep->rng_size - 1))) { uint_t addr_offset = ebus_rp->addr_low - rangep->ebus_phys_low; /* * Use the range entry to translate * the EBUS physical address into the * parents PCI space. */ rp->pci_phys_hi = rangep->pci_phys_hi; rp->pci_phys_mid = rangep->pci_phys_mid; rp->pci_phys_low = rangep->pci_phys_low + addr_offset; rp->pci_size_hi = 0; rp->pci_size_low = min(ebus_rp->size, (rangep->rng_size - addr_offset)); DBG2(D_MAP, ebus_p, "Child hi0x%x lo0x%x ", rangep->ebus_phys_hi, rangep->ebus_phys_low); DBG4(D_MAP, ebus_p, "Parent hi0x%x " "mid0x%x lo0x%x size 0x%x\n", rangep->pci_phys_hi, rangep->pci_phys_mid, rangep->pci_phys_low, rangep->rng_size); break; } } if (b == nrange) { cmn_err(CE_WARN, out_of_range, ddi_get_name(rdip)); return (DDI_ME_REGSPEC_RANGE); } return (rval); }
/* * bus map entry point: * * if map request is for an rnumber * get the corresponding regspec from device node * build a new regspec in our parent's format * build a new map_req with the new regspec * call up the tree to complete the mapping */ static int acebus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t off, off_t len, caddr_t *addrp) { ebus_devstate_t *ebus_p = get_acebus_soft_state(ddi_get_instance(dip)); ebus_regspec_t *ebus_rp, *ebus_regs; pci_regspec_t pci_reg; ddi_map_req_t p_map_request; int rnumber, i, n; int rval = DDI_SUCCESS; /* * Handle the mapping according to its type. */ DBG4(D_MAP, ebus_p, "rdip=%s%d: off=%x len=%x\n", ddi_get_name(rdip), ddi_get_instance(rdip), off, len); switch (mp->map_type) { case DDI_MT_REGSPEC: /* * We assume the register specification is in ebus format. * We must convert it into a PCI format regspec and pass * the request to our parent. */ DBG3(D_MAP, ebus_p, "rdip=%s%d: REGSPEC - handlep=%x\n", ddi_get_name(rdip), ddi_get_instance(rdip), mp->map_handlep); ebus_rp = (ebus_regspec_t *)mp->map_obj.rp; break; case DDI_MT_RNUMBER: /* * Get the "reg" property from the device node and convert * it to our parent's format. */ rnumber = mp->map_obj.rnumber; DBG4(D_MAP, ebus_p, "rdip=%s%d: rnumber=%x handlep=%x\n", ddi_get_name(rdip), ddi_get_instance(rdip), rnumber, mp->map_handlep); if (getprop(rdip, "reg", &ebus_regs, &i) != DDI_SUCCESS) { DBG(D_MAP, ebus_p, "can't get reg property\n"); return (DDI_ME_RNUMBER_RANGE); } n = i / sizeof (ebus_regspec_t); if (rnumber < 0 || rnumber >= n) { DBG(D_MAP, ebus_p, "rnumber out of range\n"); return (DDI_ME_RNUMBER_RANGE); } ebus_rp = &ebus_regs[rnumber]; break; default: return (DDI_ME_INVAL); } /* Adjust our reg property with offset and length */ ebus_rp->addr_low += off; if (len) ebus_rp->size = len; /* * Now we have a copy the "reg" entry we're attempting to map. * Translate this into our parents PCI address using the ranges * property. */ rval = acebus_apply_range(ebus_p, rdip, ebus_rp, &pci_reg); if (mp->map_type == DDI_MT_RNUMBER) kmem_free((caddr_t)ebus_regs, i); if (rval != DDI_SUCCESS) return (rval); #ifdef ACEBUS_HOTPLUG /* * The map operation provides a translated (not a re-assigned, or * relocated) ebus address for the child in its address space(range). * Ebus address space is relocatible but its child address space * is not. As specified by their 'reg' properties, they reside * at a fixed offset in their parent's (ebus's) space. * * By setting this bit, we will not run into HostPCI nexus * trying to relocate a translated ebus address (which is already * relocated) and failing the operation. * The reason for doing this here is that the PCI hotplug configurator * always marks the ebus space as relocatible (unlike OBP) and that * information is implied for the child too, which is wrong. */ pci_reg.pci_phys_hi |= PCI_RELOCAT_B; #endif #ifdef DEBUG DBG5(D_MAP, ebus_p, "(%x,%x,%x)(%x,%x)\n", pci_reg.pci_phys_hi, pci_reg.pci_phys_mid, pci_reg.pci_phys_low, pci_reg.pci_size_hi, pci_reg.pci_size_low); #endif p_map_request = *mp; p_map_request.map_type = DDI_MT_REGSPEC; p_map_request.map_obj.rp = (struct regspec *)&pci_reg; rval = ddi_map(dip, &p_map_request, 0, 0, addrp); DBG1(D_MAP, ebus_p, "parent returned %x\n", rval); return (rval); }
/* * dm2s_attach - Module's attach routine. */ int dm2s_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; dm2s_t *dm2sp; char name[20]; instance = ddi_get_instance(dip); /* Only one instance is supported. */ if (instance != 0) { cmn_err(CE_WARN, "only one instance is supported"); return (DDI_FAILURE); } if (cmd != DDI_ATTACH) { return (DDI_FAILURE); } if (ddi_soft_state_zalloc(dm2s_softstate, instance) != DDI_SUCCESS) { cmn_err(CE_WARN, "softstate allocation failure"); return (DDI_FAILURE); } dm2sp = (dm2s_t *)ddi_get_soft_state(dm2s_softstate, instance); if (dm2sp == NULL) { ddi_soft_state_free(dm2s_softstate, instance); cmn_err(CE_WARN, "softstate allocation failure."); return (DDI_FAILURE); } dm2sp->ms_dip = dip; dm2sp->ms_major = ddi_name_to_major(ddi_get_name(dip)); dm2sp->ms_ppa = instance; /* * Get an interrupt block cookie corresponding to the * interrupt priority of the event handler. * Assert that the event priority is not re-defined to * some higher priority. */ /* LINTED */ ASSERT(SCF_EVENT_PRI == DDI_SOFTINT_LOW); if (ddi_get_soft_iblock_cookie(dip, SCF_EVENT_PRI, &dm2sp->ms_ibcookie) != DDI_SUCCESS) { cmn_err(CE_WARN, "ddi_get_soft_iblock_cookie failed."); goto error; } mutex_init(&dm2sp->ms_lock, NULL, MUTEX_DRIVER, (void *)dm2sp->ms_ibcookie); dm2sp->ms_clean |= DM2S_CLEAN_LOCK; cv_init(&dm2sp->ms_wait, NULL, CV_DRIVER, NULL); dm2sp->ms_clean |= DM2S_CLEAN_CV; (void) sprintf(name, "%s%d", DM2S_MODNAME, instance); if (ddi_create_minor_node(dip, name, S_IFCHR, instance, DDI_PSEUDO, NULL) == DDI_FAILURE) { ddi_remove_minor_node(dip, NULL); cmn_err(CE_WARN, "Device node creation failed."); goto error; } dm2sp->ms_clean |= DM2S_CLEAN_NODE; ddi_set_driver_private(dip, (caddr_t)dm2sp); ddi_report_dev(dip); return (DDI_SUCCESS); error: dm2s_cleanup(dm2sp); return (DDI_FAILURE); }