/* * Exported interface to unregister a LDC endpoint with * the channel nexus */ static int cnex_unreg_chan(dev_info_t *dip, uint64_t id) { cnex_ldc_t *cldcp, *prev_cldcp; cnex_soft_state_t *cnex_ssp; int instance; /* Get device instance and structure */ instance = ddi_get_instance(dip); cnex_ssp = ddi_get_soft_state(cnex_state, instance); /* find and remove channel from list */ mutex_enter(&cnex_ssp->clist_lock); prev_cldcp = NULL; cldcp = cnex_ssp->clist; while (cldcp) { if (cldcp->id == id) break; prev_cldcp = cldcp; cldcp = cldcp->next; } if (cldcp == 0) { DWARN("cnex_unreg_chan: invalid channel %d\n", id); mutex_exit(&cnex_ssp->clist_lock); return (EINVAL); } if (cldcp->tx.hdlr || cldcp->rx.hdlr) { DWARN("cnex_unreg_chan: handlers still exist: chan %lx\n", id); mutex_exit(&cnex_ssp->clist_lock); return (ENXIO); } if (prev_cldcp) prev_cldcp->next = cldcp->next; else cnex_ssp->clist = cldcp->next; mutex_exit(&cnex_ssp->clist_lock); /* destroy mutex */ mutex_destroy(&cldcp->lock); /* free channel */ kmem_free(cldcp, sizeof (*cldcp)); return (0); }
/* * for client leaf drivers to register their desire for rmc_comm * to stay attached */ int rmc_comm_register() { struct rmc_comm_state *rcs; mutex_enter(&rmc_comm_attach_lock); rcs = ddi_get_soft_state(rmc_comm_statep, 0); if ((rcs == NULL) || (!rcs->is_attached)) { mutex_exit(&rmc_comm_attach_lock); return (DDI_FAILURE); } rcs->n_registrations++; mutex_exit(&rmc_comm_attach_lock); return (DDI_SUCCESS); }
/* ARGSUSED */ static int tvhci_close(dev_t dev, int flag, int otype, cred_t *credp) { struct tvhci_state *vhci; if (otype != OTYP_CHR) { return (EINVAL); } vhci = ddi_get_soft_state(tvhci_state, getminor(dev)); if (vhci == NULL) { return (ENXIO); } return (0); }
int pcieb_plat_peekpoke(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, void *arg, void *result) { pcieb_devstate_t *pcieb = ddi_get_soft_state(pcieb_state, ddi_get_instance(dip)); if (!PCIE_IS_RP(PCIE_DIP2BUS(dip))) return (ddi_ctlops(dip, rdip, ctlop, arg, result)); return (pci_peekpoke_check(dip, rdip, ctlop, arg, result, ddi_ctlops, &pcieb->pcieb_err_mutex, &pcieb->pcieb_peek_poke_mutex, pcieb_peekpoke_cb)); }
static void ds1287_log_message(void) { struct ds1287 *softsp; if ((softsp = ddi_get_soft_state(ds1287_state, instance)) == NULL) { cmn_err(CE_WARN, "ds1287: Failed to get internal state!"); return; } mutex_enter(&softsp->ds1287_mutex); softsp->shutdown_pending = 0; cmn_err(CE_WARN, "ds1287: Failed to shut down the system!"); mutex_exit(&softsp->ds1287_mutex); }
/*ARGSUSED*/ static int tcli_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp) { struct dstate *dstatep; int instance; instance = MINOR_TO_INST(getminor(dev)); dstatep = ddi_get_soft_state(dstates, instance); if (dstatep == NULL) return (ENXIO); return (0); }
static int todds1307_write_rtc(struct rtc_t *rtc) { ds1307_state_t *statep = NULL; i2c_transfer_t *i2c_tp = NULL; int i2c_cmd_status = I2C_SUCCESS; if (!todds1307_attach_done) { return (todds1307_prom_setdate(rtc)); } statep = ddi_get_soft_state(ds1307_statep, instance); if (statep == NULL) { return (DDI_FAILURE); } if ((i2c_cmd_status = i2c_transfer_alloc(statep->ds1307_i2c_hdl, &i2c_tp, 8, 0, I2C_SLEEP)) != I2C_SUCCESS) { return (i2c_cmd_status); } i2c_tp->i2c_version = I2C_XFER_REV; i2c_tp->i2c_flags = I2C_WR; i2c_tp->i2c_wbuf[0] = (uchar_t)0x00; i2c_tp->i2c_wbuf[1] = rtc->rtc_sec; i2c_tp->i2c_wbuf[2] = rtc->rtc_min; i2c_tp->i2c_wbuf[3] = rtc->rtc_hrs; i2c_tp->i2c_wbuf[4] = rtc->rtc_dow; i2c_tp->i2c_wbuf[5] = rtc->rtc_dom; i2c_tp->i2c_wbuf[6] = rtc->rtc_mon; i2c_tp->i2c_wbuf[7] = rtc->rtc_year; i2c_tp->i2c_wlen = 8; if ((i2c_cmd_status = i2c_transfer(statep->ds1307_i2c_hdl, i2c_tp)) != I2C_SUCCESS) { (void) i2c_transfer_free(statep->ds1307_i2c_hdl, i2c_tp); /* delay(drv_usectohz(I2C_DELAY)); */ drv_usecwait(I2C_DELAY); return (i2c_cmd_status); } tod_read[0] = -1; /* invalidate saved data from read routine */ (void) i2c_transfer_free(statep->ds1307_i2c_hdl, i2c_tp); return (i2c_cmd_status); }
/* * xpvtap_chpoll() */ static int xpvtap_chpoll(dev_t dev, short events, int anyyet, short *reventsp, struct pollhead **phpp) { xpvtap_user_ring_t *usring; xpvtap_state_t *state; int instance; instance = getminor(dev); if (instance == -1) { return (EBADF); } state = ddi_get_soft_state(xpvtap_statep, instance); if (state == NULL) { return (EBADF); } if (((events & (POLLIN | POLLRDNORM)) == 0) && !anyyet) { return (EINVAL); } /* * if we pushed requests on the user ring since the last poll, wakeup * the user app */ *reventsp = 0; usring = &state->bt_user_ring; if (usring->ur_prod_polled != usring->ur_ring.req_prod_pvt) { /* * XXX - is this faster here or xpvtap_user_request_push?? * prelim data says here. Because less membars or because * user thread will spin in poll requests before getting to * responses? */ RING_PUSH_REQUESTS(&usring->ur_ring); usring->ur_prod_polled = usring->ur_ring.sring->req_prod; *reventsp = POLLIN | POLLRDNORM; } if ((*reventsp == 0 && !anyyet) || (events & POLLET)) { *phpp = &state->bt_pollhead; } return (0); }
/* * heci_attach - Driver Attach Routine */ static int heci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance, status; struct iamt_heci_device *device; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: heci_resume(dip); return (DDI_SUCCESS); default: return (DDI_FAILURE); } DBG("%s - version %s\n", heci_driver_string, heci_driver_version); DBG("%s\n", heci_copyright); instance = ddi_get_instance(dip); /* find out which unit */ status = ddi_soft_state_zalloc(heci_soft_state_p, instance); if (status != DDI_SUCCESS) return (DDI_FAILURE); device = ddi_get_soft_state(heci_soft_state_p, instance); ASSERT(device != NULL); /* can't fail - we only just allocated it */ device->dip = dip; status = heci_initialize(dip, device); if (status != DDI_SUCCESS) { ddi_soft_state_free(heci_soft_state_p, instance); return (DDI_FAILURE); } status = ddi_create_minor_node(dip, "AMT", S_IFCHR, MAKE_MINOR_NUM(HECI_MINOR_NUMBER, instance), DDI_PSEUDO, 0); if (status != DDI_SUCCESS) { ddi_remove_minor_node(dip, NULL); ddi_soft_state_free(heci_soft_state_p, instance); return (DDI_FAILURE); } return (status); }
static int agp_target_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { agp_target_softstate_t *softstate; int instance; int status; if (cmd != DDI_ATTACH) return (DDI_FAILURE); instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(agptarget_glob_soft_handle, instance) != DDI_SUCCESS) return (DDI_FAILURE); softstate = ddi_get_soft_state(agptarget_glob_soft_handle, instance); mutex_init(&softstate->tsoft_lock, NULL, MUTEX_DRIVER, NULL); softstate->tsoft_dip = dip; status = pci_config_setup(dip, &softstate->tsoft_pcihdl); if (status != DDI_SUCCESS) { ddi_soft_state_free(agptarget_glob_soft_handle, instance); return (DDI_FAILURE); } softstate->tsoft_devid = pci_config_get32(softstate->tsoft_pcihdl, PCI_CONF_VENID); softstate->tsoft_acaptr = agp_target_cap_find(softstate->tsoft_pcihdl); if (softstate->tsoft_acaptr == 0) { /* Make a correction for some Intel chipsets */ if ((softstate->tsoft_devid & VENDOR_ID_MASK) == INTEL_VENDOR_ID) softstate->tsoft_acaptr = AGP_CAP_OFF_DEF; else return (DDI_FAILURE); } status = ddi_create_minor_node(dip, AGPTARGET_NAME, S_IFCHR, INST2NODENUM(instance), DDI_NT_AGP_TARGET, 0); if (status != DDI_SUCCESS) { pci_config_teardown(&softstate->tsoft_pcihdl); ddi_soft_state_free(agptarget_glob_soft_handle, instance); return (DDI_FAILURE); } return (DDI_SUCCESS); }
/* ARGSUSED */ static int tphci_open(dev_t *devp, int flag, int otype, cred_t *credp) { struct tphci_state *phci; if (otype != OTYP_CHR) { return (EINVAL); } phci = ddi_get_soft_state(tphci_state, getminor(*devp)); if (phci == NULL) { return (ENXIO); } return (0); }
/* * Report an address to per-address report * * damapp: address map handle * address: address in ascii string representation * addridp: address ID * nvl: optional nvlist of configuration-private data * addr_priv: optional provider-private (passed to activate/deactivate cb) * * Returns: DAM_SUCCESS * DAM_EINVAL Invalid argument(s) * DAM_MAPFULL address map exhausted */ int damap_addr_add(damap_t *damapp, char *address, damap_id_t *addridp, nvlist_t *nvl, void *addr_priv) { dam_t *mapp = (dam_t *)damapp; id_t addrid; dam_da_t *passp; if (!mapp || !address || (mapp->dam_rptmode != DAMAP_REPORT_PERADDR)) return (DAM_EINVAL); DTRACE_PROBE3(damap__addr__add, char *, mapp->dam_name, dam_t *, mapp, char *, address); mutex_enter(&mapp->dam_lock); if ((dam_map_alloc(mapp) != DAM_SUCCESS) || ((addrid = dam_get_addrid(mapp, address)) == 0)) { mutex_exit(&mapp->dam_lock); return (DAM_MAPFULL); } passp = ddi_get_soft_state(mapp->dam_da, addrid); ASSERT(passp != NULL); /* * If re-reporting the same address (add or remove) clear * the existing report */ if (DAM_IN_REPORT(mapp, addrid)) { DTRACE_PROBE3(damap__addr__add__jitter, char *, mapp->dam_name, dam_t *, mapp, char *, address); DAM_INCR_STAT(mapp, dam_jitter); dam_addr_report_release(mapp, addrid); passp->da_jitter++; } passp->da_ppriv_rpt = addr_priv; if (nvl) (void) nvlist_dup(nvl, &passp->da_nvl_rpt, KM_SLEEP); dam_addr_report(mapp, passp, addrid, RPT_ADDR_ADD); if (addridp != NULL) *addridp = (damap_id_t)addrid; mutex_exit(&mapp->dam_lock); return (DAM_SUCCESS); }
static int ppb_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int flags, char *name, caddr_t valuep, int *lengthp) { int instance = PCI_MINOR_NUM_TO_INSTANCE(getminor(dev)); ppb_devstate_t *ppb_p = ddi_get_soft_state(ppb_state, instance); if (ppb_p == NULL) return (ENXIO); if (ppb_p->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) return (pcie_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp)); return ((pcihp_get_cb_ops())->cb_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp)); }
static int VBoxUSBMonSolarisClose(dev_t Dev, int fFlag, int fType, cred_t *pCred) { vboxusbmon_state_t *pState = NULL; LogFunc((DEVICE_NAME ":VBoxUSBMonSolarisClose\n")); pState = ddi_get_soft_state(g_pVBoxUSBMonSolarisState, getminor(Dev)); if (!pState) { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisClose: failed to get pState.\n")); return EFAULT; } mutex_enter(&g_VBoxUSBMonSolarisMtx); g_cVBoxUSBMonSolarisClient--; if (!g_cVBoxUSBMonSolarisClient) { if (RT_LIKELY(g_pDip)) { mutex_exit(&g_VBoxUSBMonSolarisMtx); usb_unregister_dev_driver(g_pDip); Log((DEVICE_NAME ":Successfully deregistered driver election callback\n")); } else { mutex_exit(&g_VBoxUSBMonSolarisMtx); LogRel((DEVICE_NAME ":Extreme error! Missing device info during close.\n")); } } else mutex_exit(&g_VBoxUSBMonSolarisMtx); /* * Remove all filters for this client process. */ VBoxUSBFilterRemoveOwner(pState->Process); ddi_soft_state_free(g_pVBoxUSBMonSolarisState, getminor(Dev)); pState = NULL; NOREF(fFlag); NOREF(fType); NOREF(pCred); return 0; }
/* * Helper used by smbfs_mount */ int smb_dev2share(int fd, struct smb_share **sspp) { file_t *fp = NULL; vnode_t *vp; smb_dev_t *sdp; smb_share_t *ssp; dev_t dev; int err; if ((fp = getf(fd)) == NULL) return (EBADF); /* rele fp below */ vp = fp->f_vnode; dev = vp->v_rdev; if (dev == 0 || dev == NODEV || getmajor(dev) != nsmb_major) { err = EINVAL; goto out; } sdp = ddi_get_soft_state(statep, getminor(dev)); if (sdp == NULL) { err = EINVAL; goto out; } ssp = sdp->sd_share; if (ssp == NULL) { err = ENOTCONN; goto out; } /* * Our caller gains a ref. to this share. */ *sspp = ssp; smb_share_hold(ssp); err = 0; out: if (fp) releasef(fd); return (err); }
/*ARGSUSED*/ static int xpvtap_close(dev_t devp, int flag, int otyp, cred_t *cred) { xpvtap_state_t *state; int instance; instance = getminor(devp); state = ddi_get_soft_state(xpvtap_statep, instance); if (state == NULL) { return (ENXIO); } /* * wake thread so it can cleanup and wait for it to exit so we can * be sure it's not in the middle of processing a request/response. */ mutex_enter(&state->bt_thread.ut_mutex); state->bt_thread.ut_wake = B_TRUE; state->bt_thread.ut_exit = B_TRUE; cv_signal(&state->bt_thread.ut_wake_cv); if (!state->bt_thread.ut_exit_done) { cv_wait(&state->bt_thread.ut_exit_done_cv, &state->bt_thread.ut_mutex); } ASSERT(state->bt_thread.ut_exit_done); mutex_exit(&state->bt_thread.ut_mutex); state->bt_map.um_as = NULL; state->bt_map.um_guest_pages = NULL; /* * when the ring is brought down, a userland hotplug script is run * which tries to bring the userland app down. We'll wait for a bit * for the user app to exit. Notify the thread waiting that the app * has closed the driver. */ mutex_enter(&state->bt_open.bo_mutex); ASSERT(state->bt_open.bo_opened); state->bt_open.bo_opened = B_FALSE; cv_signal(&state->bt_open.bo_exit_cv); mutex_exit(&state->bt_open.bo_mutex); return (0); }
static int ics951601_open(dev_t *devp, int flags, int otyp, cred_t *credp) { int instance; ics951601_unit_t *icsp; int err = EBUSY; /* * Make sure the open is for the right file type */ if (otyp != OTYP_CHR) { return (EINVAL); } instance = getminor(*devp); if (instance < 0) { return (ENXIO); } icsp = (ics951601_unit_t *)ddi_get_soft_state(ics951601_soft_statep, instance); if (icsp == NULL) { return (ENXIO); } /* must be privileged to access this device */ if (drv_priv(credp) != 0) { return (EPERM); } /* * Enforce exclusive access if required */ mutex_enter(&icsp->ics951601_mutex); if (flags & FEXCL) { if (icsp->ics951601_oflag == 0) { icsp->ics951601_oflag = FEXCL; err = DDI_SUCCESS; } } else if (icsp->ics951601_oflag != FEXCL) { icsp->ics951601_oflag = (uint16_t)FOPEN; err = DDI_SUCCESS; } mutex_exit(&icsp->ics951601_mutex); return (err); }
static int nsmb_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { smb_dev_t *sdp; if (cmd != DDI_ATTACH) return (DDI_FAILURE); /* * only one instance - but we clone using the open routine */ if (ddi_get_instance(dip) > 0) return (DDI_FAILURE); mutex_enter(&dev_lck); /* * This is the Zero'th minor device which is created. */ if (ddi_soft_state_zalloc(statep, 0) == DDI_FAILURE) { cmn_err(CE_WARN, "nsmb_attach: soft state alloc"); goto attach_failed; } if (ddi_create_minor_node(dip, "nsmb", S_IFCHR, 0, DDI_PSEUDO, NULL) == DDI_FAILURE) { cmn_err(CE_WARN, "nsmb_attach: create minor"); goto attach_failed; } if ((sdp = ddi_get_soft_state(statep, 0)) == NULL) { cmn_err(CE_WARN, "nsmb_attach: get soft state"); ddi_remove_minor_node(dip, NULL); goto attach_failed; } sdp->sd_dip = dip; sdp->sd_seq = 0; sdp->sd_smbfid = -1; mutex_exit(&dev_lck); ddi_report_dev(dip); return (DDI_SUCCESS); attach_failed: ddi_soft_state_free(statep, 0); mutex_exit(&dev_lck); return (DDI_FAILURE); }
/*ARGSUSED*/ static int zc_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) { zc_state_t *zcs; int instance = ZC_INSTANCE((dev_t)arg); switch (infocmd) { case DDI_INFO_DEVT2DEVINFO: if ((zcs = ddi_get_soft_state(zc_soft_state, instance)) == NULL) return (DDI_FAILURE); *result = zcs->zc_devinfo; return (DDI_SUCCESS); case DDI_INFO_DEVT2INSTANCE: *result = (void *)(uintptr_t)instance; return (DDI_SUCCESS); } return (DDI_FAILURE); }
/*ARGSUSED*/ static int tcli_close(dev_t dev, int flag, int otyp, cred_t *cred) { struct dstate *dstatep; minor_t minor = getminor(dev); if (otyp != OTYP_BLK && otyp != OTYP_CHR) return (EINVAL); dstatep = ddi_get_soft_state(dstates, MINOR_TO_INST(minor)); if (dstatep == NULL) return (ENXIO); dstatep->oflag = 0; return (0); }
/*ARGSUSED*/ static int tcli_open(dev_t *devp, int flag, int otyp, cred_t *cred) { minor_t minor; struct dstate *dstatep; if (otyp != OTYP_BLK && otyp != OTYP_CHR) return (EINVAL); minor = getminor(*devp); if ((dstatep = ddi_get_soft_state(dstates, MINOR_TO_INST(minor))) == NULL) return (ENXIO); dstatep->oflag = 1; return (0); }
static zvol_state_t * zvol_minor_lookup(const char *name) { minor_t minor; zvol_state_t *zv; ASSERT(MUTEX_HELD(&zvol_state_lock)); for (minor = 1; minor <= ZVOL_MAX_MINOR; minor++) { zv = ddi_get_soft_state(zvol_state, minor); if (zv == NULL) continue; if (strcmp(zv->zv_name, name) == 0) break; } return (zv); }
static void ppb_uninitchild(dev_info_t *child) { ppb_devstate_t *ppb; ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, ddi_get_instance(ddi_get_parent(child))); /* * SG OPL FMA specific */ if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) { pcie_fini_dom(child); pcie_fini_cfghdl(child); } ppb_removechild(child); }
static int VBoxGuestSolarisRead(dev_t Dev, struct uio *pUio, cred_t *pCred) { LogFlow((DEVICE_NAME "::Read\n")); vboxguest_state_t *pState = ddi_get_soft_state(g_pVBoxGuestSolarisState, getminor(Dev)); if (!pState) { Log((DEVICE_NAME "::Close: failed to get pState.\n")); return EFAULT; } PVBOXGUESTSESSION pSession = pState->pSession; uint32_t u32CurSeq = ASMAtomicUoReadU32(&g_DevExt.u32MousePosChangedSeq); if (pSession->u32MousePosChangedSeq != u32CurSeq) pSession->u32MousePosChangedSeq = u32CurSeq; return 0; }
/* ARGSUSED */ int pcata_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) { ata_soft_t *softp; int ret; cs_ddi_info_t cs_ddi_info; switch (cmd) { case DDI_INFO_DEVT2DEVINFO: case DDI_INFO_DEVT2INSTANCE: cs_ddi_info.Socket = PCATA_SOCKET((dev_t)arg); cs_ddi_info.driver_name = pcata_name; ret = csx_CS_DDI_Info(&cs_ddi_info); if (ret != CS_SUCCESS) { #ifdef ATA_DEBUG cmn_err(CE_CONT, "_getinfo: " "socket %d CS_DD_Info failed %s (0x%x)\n", cs_ddi_info.Socket, pcata_CS_etext(ret), ret); #endif return (DDI_FAILURE); } switch (cmd) { case DDI_INFO_DEVT2DEVINFO: softp = ddi_get_soft_state(pcata_soft, cs_ddi_info.instance); *result = NULL; if (softp) { *result = softp->dip; } break; case DDI_INFO_DEVT2INSTANCE: *result = (void *)(uintptr_t)cs_ddi_info.instance; break; } /* switch */ break; default: return (DDI_FAILURE); } return (DDI_SUCCESS); }
/* ARGSUSED */ static int simmstat_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) { int instance; struct simmstat_soft_state *softsp; /* get the instance of this devi */ instance = ddi_get_instance(devi); /* get the soft state pointer for this device node */ softsp = ddi_get_soft_state(simmstatp, instance); switch (cmd) { case DDI_SUSPEND: return (DDI_SUCCESS); case DDI_DETACH: (void) fhc_bdlist_lock(softsp->board); if (fhc_bd_detachable(softsp->board)) break; else fhc_bdlist_unlock(); /* FALLTHROUGH */ default: return (DDI_FAILURE); } fhc_bdlist_unlock(); /* remove the kstat for this board */ kstat_delete(softsp->simmstat_ksp); /* unmap the registers */ ddi_unmap_regs(softsp->dip, 0, (caddr_t *)&softsp->simmstat_base, 0, 0); /* free up the soft state */ ddi_soft_state_free(simmstatp, instance); ddi_prop_remove_all(devi); return (DDI_SUCCESS); }
static int acpinex_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) { int instance; acpinex_softstate_t *softsp; instance = ddi_get_instance(devi); if (instance >= ACPINEX_INSTANCE_MAX) { cmn_err(CE_WARN, "acpinex: instance number %d is out of range " "in acpinex_detach(), max %d.", instance, ACPINEX_INSTANCE_MAX - 1); return (DDI_FAILURE); } softsp = ddi_get_soft_state(acpinex_softstates, instance); if (softsp == NULL) { ACPINEX_DEBUG(CE_WARN, "!acpinex: failed to get soft state " "object for instance %d in acpinex_detach()", instance); return (DDI_FAILURE); } switch (cmd) { case DDI_DETACH: if (acpinex_event_scan(softsp, B_FALSE) != DDI_SUCCESS) { cmn_err(CE_WARN, "!acpinex: failed to uninstall event " "handler for children of %s.", softsp->ans_path); return (DDI_FAILURE); } ddi_remove_minor_node(devi, NULL); acpinex_fm_fini(softsp); mutex_destroy(&softsp->ans_lock); ddi_soft_state_free(acpinex_softstates, instance); (void) ddi_prop_update_int(DDI_DEV_T_NONE, devi, DDI_NO_AUTODETACH, 0); return (DDI_SUCCESS); case DDI_SUSPEND: return (DDI_SUCCESS); default: return (DDI_FAILURE); } }
/* * FMA registered error callback */ static int ppb_err_callback(dev_info_t *dip, ddi_fm_error_t *derr, const void *impl_data) { ppb_devstate_t *ppb_p = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, ddi_get_instance(dip)); /* * errors handled by SPARC PCI-E framework for PCIe platforms */ if (ppb_p->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) return (DDI_FM_OK); /* * do the following for SPARC PCI platforms */ ASSERT(impl_data == NULL); pci_ereport_post(dip, derr, NULL); return (derr->fme_status); }
/*ARGSUSED*/ static int agp_target_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { int instance; agp_target_softstate_t *softstate; if (cmd != DDI_DETACH) return (DDI_FAILURE); instance = ddi_get_instance(dip); softstate = ddi_get_soft_state(agptarget_glob_soft_handle, instance); ddi_remove_minor_node(dip, AGPTARGET_NAME); pci_config_teardown(&softstate->tsoft_pcihdl); mutex_destroy(&softstate->tsoft_lock); ddi_soft_state_free(agptarget_glob_soft_handle, instance); return (DDI_SUCCESS); }
/*ARGSUSED*/ int zvol_close(dev_t dev, int flag, int otyp, cred_t *cr) { minor_t minor = getminor(dev); zvol_state_t *zv; if (minor == 0) /* This is the control device */ return (0); mutex_enter(&zvol_state_lock); zv = ddi_get_soft_state(zvol_state, minor); if (zv == NULL) { mutex_exit(&zvol_state_lock); return (ENXIO); } /* * The next statement is a workaround for the following DDI bug: * 6343604 specfs race: multiple "last-close" of the same device */ if (zv->zv_total_opens == 0) { mutex_exit(&zvol_state_lock); return (0); } /* * If the open count is zero, this is a spurious close. * That indicates a bug in the kernel / DDI framework. */ ASSERT(zv->zv_open_count[otyp] != 0); ASSERT(zv->zv_total_opens != 0); /* * You may get multiple opens, but only one close. */ zv->zv_open_count[otyp]--; zv->zv_total_opens--; mutex_exit(&zvol_state_lock); return (0); }