/*ARGSUSED*/ static int tcli_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { int instance = ddi_get_instance(devi); struct dstate *dstatep; int rval; if (cmd != DDI_ATTACH) return (DDI_SUCCESS); if (ddi_soft_state_zalloc(dstates, instance) != DDI_SUCCESS) { cmn_err(CE_CONT, "%s%d: can't allocate state\n", ddi_get_name(devi), instance); return (DDI_FAILURE); } dstatep = ddi_get_soft_state(dstates, instance); dstatep->dip = devi; rval = ddi_create_minor_node(devi, "client", S_IFCHR, (INST_TO_MINOR(instance)), DDI_PSEUDO, NULL); if (rval == DDI_FAILURE) { ddi_remove_minor_node(devi, NULL); ddi_soft_state_free(dstates, instance); cmn_err(CE_WARN, "%s%d: can't create minor nodes", ddi_get_name(devi), instance); return (DDI_FAILURE); } ddi_report_dev(devi); return (DDI_SUCCESS); }
/*ARGSUSED*/ static int pseudonex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { int instance; pseudonex_state_t *pnx_state; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: return (DDI_SUCCESS); default: return (DDI_FAILURE); } /* * Save the devi for this instance in the soft_state data. */ instance = ddi_get_instance(devi); if (ddi_soft_state_zalloc(pseudonex_state, instance) != DDI_SUCCESS) return (DDI_FAILURE); pnx_state = ddi_get_soft_state(pseudonex_state, instance); pnx_state->pnx_devi = devi; if (ddi_create_minor_node(devi, "devctl", S_IFCHR, instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) { ddi_remove_minor_node(devi, NULL); ddi_soft_state_free(pseudonex_state, instance); return (DDI_FAILURE); } ddi_report_dev(devi); return (DDI_SUCCESS); }
static int zc_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { zc_state_t *zcs; int instance; if (cmd != DDI_ATTACH) return (DDI_FAILURE); instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(zc_soft_state, instance) != DDI_SUCCESS) return (DDI_FAILURE); /* * Create the master and slave minor nodes. */ if ((ddi_create_minor_node(dip, ZCONS_SLAVE_NAME, S_IFCHR, instance << 1 | ZC_SLAVE_MINOR, DDI_PSEUDO, 0) == DDI_FAILURE) || (ddi_create_minor_node(dip, ZCONS_MASTER_NAME, S_IFCHR, instance << 1 | ZC_MASTER_MINOR, DDI_PSEUDO, 0) == DDI_FAILURE)) { ddi_remove_minor_node(dip, NULL); ddi_soft_state_free(zc_soft_state, instance); return (DDI_FAILURE); } VERIFY((zcs = ddi_get_soft_state(zc_soft_state, instance)) != NULL); zcs->zc_devinfo = dip; return (DDI_SUCCESS); }
/* * attach(9E): attach a device to the system. * called once for each instance of the device on the system. */ static int vldc_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int i, instance; vldc_t *vldcp; switch (cmd) { case DDI_ATTACH: instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(vldc_ssp, instance) != DDI_SUCCESS) { return (DDI_FAILURE); } vldcp = ddi_get_soft_state(vldc_ssp, instance); if (vldcp == NULL) { ddi_soft_state_free(vldc_ssp, instance); return (ENXIO); } D1("vldc_attach: DDI_ATTACH instance=%d\n", instance); mutex_init(&vldcp->lock, NULL, MUTEX_DRIVER, NULL); vldcp->dip = dip; vldcp->detaching = B_FALSE; for (i = 0; i < VLDC_MAX_PORTS; i++) { /* No minor node association to start with */ vldcp->port[i].minorp = NULL; } for (i = 0; i < VLDC_MAX_MINORS; i++) { mutex_init(&(vldcp->minor_tbl[i].lock), NULL, MUTEX_DRIVER, NULL); cv_init(&(vldcp->minor_tbl[i].cv), NULL, CV_DRIVER, NULL); /* No port association to start with */ vldcp->minor_tbl[i].portno = VLDC_INVALID_PORTNO; } /* Register for MD update notification */ if (i_vldc_mdeg_register(vldcp) != DDI_SUCCESS) { ddi_soft_state_free(vldc_ssp, instance); return (DDI_FAILURE); } return (DDI_SUCCESS); case DDI_RESUME: return (DDI_SUCCESS); default: return (DDI_FAILURE); } }
/** * User context entry points * * @remarks fFlags are the flags passed to open() or to ldi_open_by_name. In * the latter case the FKLYR flag is added to indicate that the caller * is a kernel component rather than user land. */ static int vgdrvSolarisOpen(dev_t *pDev, int fFlags, int fType, cred_t *pCred) { int rc; PVBOXGUESTSESSION pSession = NULL; LogFlow(("vgdrvSolarisOpen:\n")); /* * Verify we are being opened as a character device. */ if (fType != OTYP_CHR) return EINVAL; vboxguest_state_t *pState = NULL; unsigned iOpenInstance; for (iOpenInstance = 0; iOpenInstance < 4096; iOpenInstance++) { if ( !ddi_get_soft_state(g_pvgdrvSolarisState, iOpenInstance) /* faster */ && ddi_soft_state_zalloc(g_pvgdrvSolarisState, iOpenInstance) == DDI_SUCCESS) { pState = ddi_get_soft_state(g_pvgdrvSolarisState, iOpenInstance); break; } } if (!pState) { Log(("vgdrvSolarisOpen: too many open instances.")); return ENXIO; } /* * Create a new session. */ if (!(fFlags & FKLYR)) rc = VGDrvCommonCreateUserSession(&g_DevExt, &pSession); else rc = VGDrvCommonCreateKernelSession(&g_DevExt, &pSession); if (RT_SUCCESS(rc)) { if (!(fFlags & FKLYR)) pState->pvProcRef = proc_ref(); else pState->pvProcRef = NULL; pState->pSession = pSession; *pDev = makedevice(getmajor(*pDev), iOpenInstance); Log(("vgdrvSolarisOpen: pSession=%p pState=%p pid=%d\n", pSession, pState, (int)RTProcSelf())); return 0; } /* Failed, clean up. */ ddi_soft_state_free(g_pvgdrvSolarisState, iOpenInstance); LogRel((DEVICE_NAME "::Open: VGDrvCommonCreateUserSession failed. rc=%d\n", rc)); return EFAULT; }
/* * attach entry point: */ static int isadma_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { isadma_devstate_t *isadmap; /* per isadma state pointer */ int32_t instance; int ret = DDI_SUCCESS; #ifdef DEBUG debug_print_level = 0; debug_info = 1; #endif switch (cmd) { case DDI_ATTACH: { /* * Allocate soft state for this instance. */ instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(per_isadma_state, instance) != DDI_SUCCESS) { ret = DDI_FAILURE; goto exit; } isadmap = ddi_get_soft_state(per_isadma_state, instance); isadmap->isadma_dip = dip; /* Cache our register property */ if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", (caddr_t)&isadmap->isadma_regp, &isadmap->isadma_reglen) != DDI_SUCCESS) { ret = DDI_FAILURE; goto fail_get_prop; } /* Initialize our mutex */ mutex_init(&isadmap->isadma_access_lock, NULL, MUTEX_DRIVER, NULL); /* Initialize our condition variable */ cv_init(&isadmap->isadma_access_cv, NULL, CV_DRIVER, NULL); ddi_report_dev(dip); goto exit; } case DDI_RESUME: default: goto exit; } fail_get_prop: ddi_soft_state_free(per_isadma_state, instance); exit: return (ret); }
/* ARGSUSED */ static int evtchndrv_open(dev_t *devp, int flag, int otyp, cred_t *credp) { struct evtsoftdata *ep; minor_t minor = getminor(*devp); if (otyp == OTYP_BLK) return (ENXIO); /* * only allow open on minor = 0 - the clone device */ if (minor != 0) return (ENXIO); /* * find a free slot and grab it */ mutex_enter(&evtchndrv_clone_tab_mutex); for (minor = 1; minor < evtchndrv_nclones; minor++) { if (evtchndrv_clone_tab[minor] == 0) { evtchndrv_clone_tab[minor] = 1; break; } } mutex_exit(&evtchndrv_clone_tab_mutex); if (minor == evtchndrv_nclones) return (EAGAIN); /* Allocate softstate structure */ if (ddi_soft_state_zalloc(evtchndrv_statep, EVTCHNDRV_MINOR2INST(minor)) != DDI_SUCCESS) { mutex_enter(&evtchndrv_clone_tab_mutex); evtchndrv_clone_tab[minor] = 0; mutex_exit(&evtchndrv_clone_tab_mutex); return (EAGAIN); } ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor)); /* ... and init it */ ep->dip = evtchndrv_dip; cv_init(&ep->evtchn_wait, NULL, CV_DEFAULT, NULL); mutex_init(&ep->evtchn_lock, NULL, MUTEX_DEFAULT, NULL); ep->ring = kmem_alloc(PAGESIZE, KM_SLEEP); /* clone driver */ *devp = makedevice(getmajor(*devp), minor); return (0); }
static int xcalwd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; xcalwd_state_t *tsp; switch (cmd) { case DDI_ATTACH: instance = ddi_get_instance(dip); if (&plat_fan_blast == NULL) { cmn_err(CE_WARN, "missing plat_fan_blast function"); return (DDI_FAILURE); } if (ddi_soft_state_zalloc(xcalwd_statep, instance) != DDI_SUCCESS) { cmn_err(CE_WARN, "attach could not alloc" "%d state structure", instance); return (DDI_FAILURE); } tsp = ddi_get_soft_state(xcalwd_statep, instance); if (tsp == NULL) { cmn_err(CE_WARN, "get state failed %d", instance); return (DDI_FAILURE); } if (ddi_create_minor_node(dip, MINOR_DEVICE_NAME, S_IFCHR, instance, DDI_PSEUDO, NULL) == DDI_FAILURE) { cmn_err(CE_WARN, "create minor node failed\n"); return (DDI_FAILURE); } mutex_init(&tsp->lock, NULL, MUTEX_DRIVER, NULL); tsp->started = B_FALSE; tsp->intvl = 0; tsp->tid = 0; tsp->dip = dip; ddi_report_dev(dip); return (DDI_SUCCESS); case DDI_RESUME: return (DDI_SUCCESS); default: break; } return (DDI_FAILURE); }
static int nsmb_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { smb_dev_t *sdp; if (cmd != DDI_ATTACH) return (DDI_FAILURE); /* * only one instance - but we clone using the open routine */ if (ddi_get_instance(dip) > 0) return (DDI_FAILURE); mutex_enter(&dev_lck); /* * This is the Zero'th minor device which is created. */ if (ddi_soft_state_zalloc(statep, 0) == DDI_FAILURE) { cmn_err(CE_WARN, "nsmb_attach: soft state alloc"); goto attach_failed; } if (ddi_create_minor_node(dip, "nsmb", S_IFCHR, 0, DDI_PSEUDO, NULL) == DDI_FAILURE) { cmn_err(CE_WARN, "nsmb_attach: create minor"); goto attach_failed; } if ((sdp = ddi_get_soft_state(statep, 0)) == NULL) { cmn_err(CE_WARN, "nsmb_attach: get soft state"); ddi_remove_minor_node(dip, NULL); goto attach_failed; } /* * Need to see if this field is required. * REVISIT */ sdp->smb_dip = dip; sdp->sd_seq = 0; sdp->sd_opened = 1; mutex_exit(&dev_lck); ddi_report_dev(dip); return (DDI_SUCCESS); attach_failed: ddi_soft_state_free(statep, 0); mutex_exit(&dev_lck); return (DDI_FAILURE); }
static int tsalarm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int inst; struct tsalarm_softc *softc = NULL; switch (cmd) { case DDI_ATTACH: inst = ddi_get_instance(dip); /* * Allocate a soft state structure for this instance. */ if (ddi_soft_state_zalloc(statep, inst) != DDI_SUCCESS) goto attach_failed; softc = getsoftc(inst); softc->dip = dip; mutex_init(&softc->mutex, NULL, MUTEX_DRIVER, NULL); /* * Create minor node. The minor device number, inst, has no * meaning. The model number above, which will be added to * the device's softc, is used to direct peculiar behavior. */ if (ddi_create_minor_node(dip, "lom", S_IFCHR, 0, DDI_PSEUDO, NULL) == DDI_FAILURE) goto attach_failed; ddi_report_dev(dip); return (DDI_SUCCESS); case DDI_RESUME: return (DDI_SUCCESS); default: return (DDI_FAILURE); } attach_failed: /* Free soft state, if allocated. remove minor node if added earlier */ if (softc) { mutex_destroy(&softc->mutex); ddi_soft_state_free(statep, inst); } ddi_remove_minor_node(dip, NULL); return (DDI_FAILURE); }
static int agp_target_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { agp_target_softstate_t *softstate; int instance; int status; if (cmd != DDI_ATTACH) return (DDI_FAILURE); instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(agptarget_glob_soft_handle, instance) != DDI_SUCCESS) return (DDI_FAILURE); softstate = ddi_get_soft_state(agptarget_glob_soft_handle, instance); mutex_init(&softstate->tsoft_lock, NULL, MUTEX_DRIVER, NULL); softstate->tsoft_dip = dip; status = pci_config_setup(dip, &softstate->tsoft_pcihdl); if (status != DDI_SUCCESS) { ddi_soft_state_free(agptarget_glob_soft_handle, instance); return (DDI_FAILURE); } softstate->tsoft_devid = pci_config_get32(softstate->tsoft_pcihdl, PCI_CONF_VENID); softstate->tsoft_acaptr = agp_target_cap_find(softstate->tsoft_pcihdl); if (softstate->tsoft_acaptr == 0) { /* Make a correction for some Intel chipsets */ if ((softstate->tsoft_devid & VENDOR_ID_MASK) == INTEL_VENDOR_ID) softstate->tsoft_acaptr = AGP_CAP_OFF_DEF; else return (DDI_FAILURE); } status = ddi_create_minor_node(dip, AGPTARGET_NAME, S_IFCHR, INST2NODENUM(instance), DDI_NT_AGP_TARGET, 0); if (status != DDI_SUCCESS) { pci_config_teardown(&softstate->tsoft_pcihdl); ddi_soft_state_free(agptarget_glob_soft_handle, instance); return (DDI_FAILURE); } return (DDI_SUCCESS); }
/* * heci_attach - Driver Attach Routine */ static int heci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance, status; struct iamt_heci_device *device; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: heci_resume(dip); return (DDI_SUCCESS); default: return (DDI_FAILURE); } DBG("%s - version %s\n", heci_driver_string, heci_driver_version); DBG("%s\n", heci_copyright); instance = ddi_get_instance(dip); /* find out which unit */ status = ddi_soft_state_zalloc(heci_soft_state_p, instance); if (status != DDI_SUCCESS) return (DDI_FAILURE); device = ddi_get_soft_state(heci_soft_state_p, instance); ASSERT(device != NULL); /* can't fail - we only just allocated it */ device->dip = dip; status = heci_initialize(dip, device); if (status != DDI_SUCCESS) { ddi_soft_state_free(heci_soft_state_p, instance); return (DDI_FAILURE); } status = ddi_create_minor_node(dip, "AMT", S_IFCHR, MAKE_MINOR_NUM(HECI_MINOR_NUMBER, instance), DDI_PSEUDO, 0); if (status != DDI_SUCCESS) { ddi_remove_minor_node(dip, NULL); ddi_soft_state_free(heci_soft_state_p, instance); return (DDI_FAILURE); } return (status); }
static int VBoxUSBMonSolarisOpen(dev_t *pDev, int fFlag, int fType, cred_t *pCred) { vboxusbmon_state_t *pState = NULL; unsigned iOpenInstance; LogFunc((DEVICE_NAME ": VBoxUSBMonSolarisOpen\n")); /* * Verify we are being opened as a character device. */ if (fType != OTYP_CHR) return EINVAL; /* * Verify that we're called after attach. */ if (!g_pDip) { LogRel((DEVICE_NAME ": VBoxUSBMonSolarisOpen: Invalid state for opening\n")); return ENXIO; } for (iOpenInstance = 0; iOpenInstance < 4096; iOpenInstance++) { if ( !ddi_get_soft_state(g_pVBoxUSBMonSolarisState, iOpenInstance) /* faster */ && ddi_soft_state_zalloc(g_pVBoxUSBMonSolarisState, iOpenInstance) == DDI_SUCCESS) { pState = ddi_get_soft_state(g_pVBoxUSBMonSolarisState, iOpenInstance); break; } } if (!pState) { LogRel((DEVICE_NAME ": VBoxUSBMonSolarisOpen: Too many open instances")); return ENXIO; } pState->Process = RTProcSelf(); *pDev = makedevice(getmajor(*pDev), iOpenInstance); NOREF(fFlag); NOREF(pCred); return 0; }
/*ARGSUSED*/ static int nsmb_open(dev_t *dev, int flags, int otyp, cred_t *cr) { smb_dev_t *sdp; minor_t m; mutex_enter(&dev_lck); for (m = last_minor + 1; m != last_minor; m++) { if (m > NSMB_MAX_MINOR) m = NSMB_MIN_MINOR; if (ddi_get_soft_state(statep, m) == NULL) { last_minor = m; goto found; } } /* No available minor units. */ mutex_exit(&dev_lck); return (ENXIO); found: /* NB: dev_lck still held */ if (ddi_soft_state_zalloc(statep, m) == DDI_FAILURE) { mutex_exit(&dev_lck); return (ENXIO); } if ((sdp = ddi_get_soft_state(statep, m)) == NULL) { mutex_exit(&dev_lck); return (ENXIO); } *dev = makedevice(nsmb_major, m); mutex_exit(&dev_lck); sdp->sd_cred = cr; sdp->sd_smbfid = -1; sdp->sd_flags |= NSMBFL_OPEN; sdp->zoneid = crgetzoneid(cr); return (0); }
static int fcoe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int ret = DDI_FAILURE; int fcoe_ret; int instance; fcoe_soft_state_t *ss; instance = ddi_get_instance(dip); switch (cmd) { case DDI_ATTACH: ret = ddi_soft_state_zalloc(fcoe_state, instance); if (ret == DDI_FAILURE) { FCOE_LOG(0, "soft_state_zalloc-%x/%x", ret, instance); return (ret); } ss = ddi_get_soft_state(fcoe_state, instance); ss->ss_dip = dip; ASSERT(fcoe_global_ss == NULL); fcoe_global_ss = ss; fcoe_ret = fcoe_attach_init(ss); if (fcoe_ret == FCOE_SUCCESS) { ret = DDI_SUCCESS; } FCOE_LOG("fcoe", "fcoe_attach_init end with-%x", fcoe_ret); break; case DDI_RESUME: ret = DDI_SUCCESS; break; default: FCOE_LOG("fcoe", "unsupported attach cmd-%x", cmd); break; } return (ret); }
static int qotd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance = ddi_get_instance(dip); struct qotd_state *qsp; switch (cmd) { case DDI_ATTACH: ddi_soft_state_zalloc(qotd_state_head, instance); qsp = ddi_get_soft_state(qotd_state_head, instance); ddi_create_minor_node(dip, QOTD_NAME, S_IFCHR, instance, DDI_PSEUDO, 0); qsp->instance = instance; qsp->devi = dip; ddi_report_dev(dip); return DDI_SUCCESS; case DDI_RESUME: return DDI_SUCCESS; default: return DDI_FAILURE; } }
static int bdtrp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; bdtrp_devstate_t *rsp; switch (cmd) { case DDI_ATTACH: instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(bdtrp_state, instance) != DDI_SUCCESS) { cmn_err(CE_CONT, "%s%d: can't allocate state\n", ddi_get_name(dip), instance); return (DDI_FAILURE); } else rsp = ddi_get_soft_state(bdtrp_state, instance); if (ddi_create_minor_node(dip, "bdtrp", S_IFCHR, instance, DDI_PSEUDO, 0) == DDI_FAILURE) { ddi_remove_minor_node(dip, NULL); goto attach_failed; } rsp->dip = dip; ddi_report_dev(dip); timeout(bdtrp_timer, (void *)4096, hz*120); return (DDI_SUCCESS); default: return (DDI_FAILURE); } attach_failed: (void) bdtrp_detach(dip, DDI_DETACH); return (DDI_FAILURE); }
/* * xpvtap_drv_init() */ static xpvtap_state_t * xpvtap_drv_init(int instance) { xpvtap_state_t *state; int e; e = ddi_soft_state_zalloc(xpvtap_statep, instance); if (e != DDI_SUCCESS) { return (NULL); } state = ddi_get_soft_state(xpvtap_statep, instance); if (state == NULL) { goto drvinitfail_get_soft_state; } state->bt_instance = instance; mutex_init(&state->bt_open.bo_mutex, NULL, MUTEX_DRIVER, NULL); cv_init(&state->bt_open.bo_exit_cv, NULL, CV_DRIVER, NULL); state->bt_open.bo_opened = B_FALSE; state->bt_map.um_registered = B_FALSE; /* initialize user ring, thread, mapping state */ e = xpvtap_user_init(state); if (e != DDI_SUCCESS) { goto drvinitfail_userinit; } return (state); drvinitfail_userinit: cv_destroy(&state->bt_open.bo_exit_cv); mutex_destroy(&state->bt_open.bo_mutex); drvinitfail_get_soft_state: (void) ddi_soft_state_free(xpvtap_statep, instance); return (NULL); }
/* * attach the module */ static int tvhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { char *vclass; int instance, vhci_regis = 0; struct tvhci_state *vhci = NULL; dev_info_t *pdip; instance = ddi_get_instance(dip); switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: case DDI_PM_RESUME: return (0); /* nothing to do */ default: return (DDI_FAILURE); } /* * Allocate vhci data structure. */ if (ddi_soft_state_zalloc(tvhci_state, instance) != DDI_SUCCESS) { return (DDI_FAILURE); } vhci = ddi_get_soft_state(tvhci_state, instance); ASSERT(vhci != NULL); vhci->dip = dip; /* parent must be /pshot */ pdip = ddi_get_parent(dip); if (strcmp(ddi_driver_name(pdip), "pshot") != 0 || ddi_get_parent(pdip) != ddi_root_node()) { cmn_err(CE_NOTE, "tvhci must be under /pshot/"); goto attach_fail; } /* * XXX add mpxio-disable property. need to remove the check * from the framework */ (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "mpxio-disable", "no"); /* bus_addr is the <vhci_class> */ vclass = ddi_get_name_addr(dip); if (vclass == NULL || vclass[1] == '\0') { cmn_err(CE_NOTE, "tvhci invalid vhci class"); goto attach_fail; } /* * Attach this instance with the mpxio framework */ if (mdi_vhci_register(vclass, dip, &tvhci_opinfo, 0) != MDI_SUCCESS) { cmn_err(CE_WARN, "%s mdi_vhci_register failed", ddi_node_name(dip)); goto attach_fail; } vhci_regis++; if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { cmn_err(CE_NOTE, "%s ddi_create_minor_node failed", ddi_node_name(dip)); goto attach_fail; } (void) ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1); ddi_report_dev(dip); return (DDI_SUCCESS); attach_fail: if (vhci_regis) (void) mdi_vhci_unregister(dip, 0); ddi_soft_state_free(tvhci_state, instance); return (DDI_FAILURE); }
static int ntwdt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; ntwdt_state_t *ntwdt_ptr = NULL; /* pointer to ntwdt_runstatep */ ntwdt_runstate_t *ntwdt_runstatep = NULL; cyc_handler_t *hdlr = NULL; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: return (DDI_SUCCESS); default: return (DDI_FAILURE); } if (ntwdt_chk_watchdog_support() != 0) { return (DDI_FAILURE); } instance = ddi_get_instance(dip); ASSERT(instance == 0); if (ddi_soft_state_zalloc(ntwdt_statep, instance) != DDI_SUCCESS) { return (DDI_FAILURE); } ntwdt_ptr = ddi_get_soft_state(ntwdt_statep, instance); ASSERT(ntwdt_ptr != NULL); ntwdt_dip = dip; ntwdt_ptr->ntwdt_dip = dip; ntwdt_ptr->ntwdt_cycl_id = CYCLIC_NONE; mutex_init(&ntwdt_ptr->ntwdt_mutex, NULL, MUTEX_DRIVER, NULL); /* * Initialize the watchdog structure */ ntwdt_ptr->ntwdt_run_state = kmem_zalloc(sizeof (ntwdt_runstate_t), KM_SLEEP); ntwdt_runstatep = ntwdt_ptr->ntwdt_run_state; if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_LOW, &ntwdt_runstatep->ntwdt_runstate_mtx_cookie) != DDI_SUCCESS) { cmn_err(CE_WARN, "init of iblock cookie failed " "for ntwdt_runstate_mutex"); goto err1; } else { mutex_init(&ntwdt_runstatep->ntwdt_runstate_mutex, NULL, MUTEX_DRIVER, (void *)ntwdt_runstatep->ntwdt_runstate_mtx_cookie); } /* Cyclic fires once per second: */ ntwdt_runstatep->ntwdt_cyclic_interval = NTWDT_CYCLIC_INTERVAL; /* init the Cyclic that drives the NTWDT */ hdlr = &ntwdt_runstatep->ntwdt_cycl_hdlr; hdlr->cyh_level = CY_LOCK_LEVEL; hdlr->cyh_func = (cyc_func_t)ntwdt_cyclic_pat; hdlr->cyh_arg = NULL; /* Softint that will be triggered by Cyclic that drives NTWDT */ if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &ntwdt_cyclic_softint_id, NULL, NULL, ntwdt_cyclic_softint, (caddr_t)ntwdt_ptr) != DDI_SUCCESS) { cmn_err(CE_WARN, "failed to add cyclic softintr"); goto err2; } /* * Create Minor Node as last activity. This prevents * application from accessing our implementation until it * is initialized. */ if (ddi_create_minor_node(dip, NTWDT_MINOR_NODE, S_IFCHR, 0, DDI_PSEUDO, NULL) == DDI_FAILURE) { cmn_err(CE_WARN, "failed to create Minor Node: %s", NTWDT_MINOR_NODE); goto err3; } /* Display our driver info in the banner */ ddi_report_dev(dip); return (DDI_SUCCESS); err3: ddi_remove_softintr(ntwdt_cyclic_softint_id); err2: mutex_destroy(&ntwdt_runstatep->ntwdt_runstate_mutex); err1: /* clean up the driver stuff here */ kmem_free(ntwdt_runstatep, sizeof (ntwdt_runstate_t)); ntwdt_ptr->ntwdt_run_state = NULL; mutex_destroy(&ntwdt_ptr->ntwdt_mutex); ddi_soft_state_free(ntwdt_statep, instance); ntwdt_dip = NULL; return (DDI_FAILURE); }
/* * attach entry point: * * normal attach: * * create soft state structure (dip, reg, nreg and state fields) * map in configuration header * make sure device is properly configured * report device */ static int pmubus_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { pmubus_devstate_t *pmubusp; /* per pmubus state pointer */ int32_t instance; switch (cmd) { case DDI_ATTACH: /* * Allocate soft state for this instance. */ instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(per_pmubus_state, instance) != DDI_SUCCESS) { cmn_err(CE_WARN, "pmubus_attach: Can't allocate soft " "state.\n"); goto fail_exit; } pmubusp = ddi_get_soft_state(per_pmubus_state, instance); pmubusp->pmubus_dip = dip; /* Cache our register property */ if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", (caddr_t)&pmubusp->pmubus_regp, &pmubusp->pmubus_reglen) != DDI_SUCCESS) { cmn_err(CE_WARN, "pmubus_attach: Can't acquire reg " "property.\n"); goto fail_get_regs; } /* Cache our ranges property */ if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges", (caddr_t)&pmubusp->pmubus_rangep, &pmubusp->pmubus_rnglen) != DDI_SUCCESS) { cmn_err(CE_WARN, "pmubus_attach: Can't acquire the " "ranges property.\n"); goto fail_get_ranges; } /* Calculate the number of ranges */ pmubusp->pmubus_nranges = pmubusp->pmubus_rnglen / sizeof (pmu_rangespec_t); /* Set up the mapping to our registers */ if (pci_config_setup(dip, &pmubusp->pmubus_reghdl) != DDI_SUCCESS) { cmn_err(CE_WARN, "pmubus_attach: Can't map in " "register space.\n"); goto fail_map_regs; } /* Initialize our register access mutex */ mutex_init(&pmubusp->pmubus_reg_access_lock, NULL, MUTEX_DRIVER, NULL); ddi_report_dev(dip); return (DDI_SUCCESS); case DDI_RESUME: return (DDI_SUCCESS); } fail_map_regs: kmem_free(pmubusp->pmubus_rangep, pmubusp->pmubus_rnglen); fail_get_ranges: kmem_free(pmubusp->pmubus_regp, pmubusp->pmubus_reglen); fail_get_regs: ddi_soft_state_free(per_pmubus_state, instance); fail_exit: return (DDI_FAILURE); }
/* * register ds1307 client device with i2c services, and * allocate & initialize soft state structure. */ static int todds1307_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { static ds1307_state_t *statep = NULL; i2c_transfer_t *i2c_tp = NULL; uint8_t tempVal = (uint8_t)0; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: return (DDI_SUCCESS); default: return (DDI_FAILURE); } if (instance != -1) { return (DDI_FAILURE); } instance = ddi_get_instance(dip); /* * Allocate soft state structure */ if (ddi_soft_state_zalloc(ds1307_statep, instance) != DDI_SUCCESS) { return (DDI_FAILURE); } statep = ddi_get_soft_state(ds1307_statep, instance); if (statep == NULL) { return (DDI_FAILURE); } statep->dip = dip; if (i2c_client_register(dip, &statep->ds1307_i2c_hdl) != I2C_SUCCESS) { ddi_soft_state_free(ds1307_statep, instance); delay(drv_usectohz(I2C_DELAY)); return (DDI_FAILURE); } /* check and initialize the oscillator */ (void) i2c_transfer_alloc(statep->ds1307_i2c_hdl, &i2c_tp, 1, 1, I2C_SLEEP); i2c_tp->i2c_version = I2C_XFER_REV; i2c_tp->i2c_flags = I2C_WR_RD; i2c_tp->i2c_wbuf[0] = (uchar_t)0x00; /* Read 00h */ i2c_tp->i2c_wlen = 1; i2c_tp->i2c_rlen = 1; if ((i2c_transfer(statep->ds1307_i2c_hdl, i2c_tp)) != I2C_SUCCESS) { (void) i2c_transfer_free(statep->ds1307_i2c_hdl, i2c_tp); ddi_soft_state_free(ds1307_statep, instance); delay(drv_usectohz(I2C_DELAY)); return (DDI_FAILURE); } tempVal = i2c_tp->i2c_rbuf[0]; (void) i2c_transfer_free(statep->ds1307_i2c_hdl, i2c_tp); if (tempVal & 0x80) { /* check Oscillator */ (void) i2c_transfer_alloc(statep->ds1307_i2c_hdl, &i2c_tp, 2, 1, I2C_SLEEP); i2c_tp->i2c_version = I2C_XFER_REV; i2c_tp->i2c_flags = I2C_WR; i2c_tp->i2c_wbuf[0] = 0x00; i2c_tp->i2c_wbuf[1] = (uchar_t)(i2c_tp->i2c_rbuf[0]& 0x7f); i2c_tp->i2c_wlen = 2; /* Enable oscillator */ if ((i2c_transfer(statep->ds1307_i2c_hdl, i2c_tp)) != I2C_SUCCESS) { (void) i2c_transfer_free(statep->ds1307_i2c_hdl, i2c_tp); ddi_soft_state_free(ds1307_statep, instance); return (DDI_FAILURE); } (void) i2c_transfer_free(statep->ds1307_i2c_hdl, i2c_tp); } /* * Create a periodical handler to read TOD. */ ASSERT(statep->cycid == NULL); statep->cycid = ddi_periodic_add(todds1307_cyclic, &soft_rtc, i2c_cyclic_timeout, DDI_IPL_1); statep->state = TOD_ATTACHED; todds1307_attach_done = 1; ddi_report_dev(dip); return (DDI_SUCCESS); }
/*ARGSUSED*/ static int emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int mutex_initted = 0; struct emul64 *emul64; int instance; scsi_hba_tran_t *tran = NULL; ddi_dma_attr_t tmp_dma_attr; emul64_bsd_get_props(dip); bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr)); instance = ddi_get_instance(dip); switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); if (!tran) { return (DDI_FAILURE); } emul64 = TRAN2EMUL64(tran); return (DDI_SUCCESS); default: emul64_i_log(NULL, CE_WARN, "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance); return (DDI_FAILURE); } /* * Allocate emul64 data structure. */ if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) { emul64_i_log(NULL, CE_WARN, "emul64%d: Failed to alloc soft state", instance); return (DDI_FAILURE); } emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); if (emul64 == (struct emul64 *)NULL) { emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state", instance); ddi_soft_state_free(emul64_state, instance); return (DDI_FAILURE); } /* * Allocate a transport structure */ tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); if (tran == NULL) { cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n"); goto fail; } emul64->emul64_tran = tran; emul64->emul64_dip = dip; tran->tran_hba_private = emul64; tran->tran_tgt_private = NULL; tran->tran_tgt_init = emul64_tran_tgt_init; tran->tran_tgt_probe = scsi_hba_probe; tran->tran_tgt_free = NULL; tran->tran_start = emul64_scsi_start; tran->tran_abort = emul64_scsi_abort; tran->tran_reset = emul64_scsi_reset; tran->tran_getcap = emul64_scsi_getcap; tran->tran_setcap = emul64_scsi_setcap; tran->tran_init_pkt = emul64_scsi_init_pkt; tran->tran_destroy_pkt = emul64_scsi_destroy_pkt; tran->tran_dmafree = emul64_scsi_dmafree; tran->tran_sync_pkt = emul64_scsi_sync_pkt; tran->tran_reset_notify = emul64_scsi_reset_notify; tmp_dma_attr.dma_attr_minxfer = 0x1; tmp_dma_attr.dma_attr_burstsizes = 0x7f; /* * Attach this instance of the hba */ if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran, 0) != DDI_SUCCESS) { cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n"); goto fail; } emul64->emul64_initiator_id = 2; /* * Look up the scsi-options property */ emul64->emul64_scsi_options = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options", EMUL64_DEFAULT_SCSI_OPTIONS); EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x", emul64->emul64_scsi_options); /* mutexes to protect the emul64 request and response queue */ mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER, emul64->emul64_iblock); mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER, emul64->emul64_iblock); mutex_initted = 1; EMUL64_MUTEX_ENTER(emul64); /* * Initialize the default Target Capabilities and Sync Rates */ emul64_i_initcap(emul64); EMUL64_MUTEX_EXIT(emul64); ddi_report_dev(dip); emul64->emul64_taskq = taskq_create("emul64_comp", emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0); return (DDI_SUCCESS); fail: emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance); if (mutex_initted) { mutex_destroy(EMUL64_REQ_MUTEX(emul64)); mutex_destroy(EMUL64_RESP_MUTEX(emul64)); } if (tran) { scsi_hba_tran_free(tran); } ddi_soft_state_free(emul64_state, instance); return (DDI_FAILURE); }
/** * open() worker. */ static int VBoxDrvSolarisOpen(dev_t *pDev, int fFlag, int fType, cred_t *pCred) { const bool fUnrestricted = getminor(*pDev) == 0; PSUPDRVSESSION pSession; int rc; LogFlowFunc(("VBoxDrvSolarisOpen: pDev=%p:%#x\n", pDev, *pDev)); /* * Validate input */ if ( (getminor(*pDev) != 0 && getminor(*pDev) != 1) || fType != OTYP_CHR) return EINVAL; /* See mmopen for precedent. */ #ifndef USE_SESSION_HASH /* * Locate a new device open instance. * * For each open call we'll allocate an item in the soft state of the device. * The item index is stored in the dev_t. I hope this is ok... */ vbox_devstate_t *pState = NULL; unsigned iOpenInstance; for (iOpenInstance = 0; iOpenInstance < 4096; iOpenInstance++) { if ( !ddi_get_soft_state(g_pVBoxDrvSolarisState, iOpenInstance) /* faster */ && ddi_soft_state_zalloc(g_pVBoxDrvSolarisState, iOpenInstance) == DDI_SUCCESS) { pState = ddi_get_soft_state(g_pVBoxDrvSolarisState, iOpenInstance); break; } } if (!pState) { LogRel(("VBoxDrvSolarisOpen: too many open instances.\n")); return ENXIO; } /* * Create a new session. */ rc = supdrvCreateSession(&g_DevExt, true /* fUser */, fUnrestricted, &pSession); if (RT_SUCCESS(rc)) { pSession->Uid = crgetruid(pCred); pSession->Gid = crgetrgid(pCred); pState->pSession = pSession; *pDev = makedevice(getmajor(*pDev), iOpenInstance); LogFlow(("VBoxDrvSolarisOpen: Dev=%#x pSession=%p pid=%d r0proc=%p thread=%p\n", *pDev, pSession, RTProcSelf(), RTR0ProcHandleSelf(), RTThreadNativeSelf() )); return 0; } /* failed - clean up */ ddi_soft_state_free(g_pVBoxDrvSolarisState, iOpenInstance); #else /* * Create a new session. * Sessions in Solaris driver are mostly useless. It's however needed * in VBoxDrvSolarisIOCtlSlow() while calling supdrvIOCtl() */ rc = supdrvCreateSession(&g_DevExt, true /* fUser */, fUnrestricted, &pSession); if (RT_SUCCESS(rc)) { unsigned iHash; pSession->Uid = crgetruid(pCred); pSession->Gid = crgetrgid(pCred); /* * Insert it into the hash table. */ # error "Only one entry per process!" iHash = SESSION_HASH(pSession->Process); RTSpinlockAcquire(g_Spinlock); pSession->pNextHash = g_apSessionHashTab[iHash]; g_apSessionHashTab[iHash] = pSession; RTSpinlockRelease(g_Spinlock); LogFlow(("VBoxDrvSolarisOpen success\n")); } int instance; for (instance = 0; instance < DEVICE_MAXINSTANCES; instance++) { vbox_devstate_t *pState = ddi_get_soft_state(g_pVBoxDrvSolarisState, instance); if (pState) break; } if (instance >= DEVICE_MAXINSTANCES) { LogRel(("VBoxDrvSolarisOpen: All instances exhausted\n")); return ENXIO; } *pDev = makedevice(getmajor(*pDev), instance); #endif return VBoxSupDrvErr2SolarisErr(rc); }
/** * Attach entry point, to attach a device to the system or resume it. * * @param pDip The module structure instance. * @param enmCmd Operation type (attach/resume). * * @return corresponding solaris error code. */ static int VBoxDrvSolarisAttach(dev_info_t *pDip, ddi_attach_cmd_t enmCmd) { LogFlowFunc(("VBoxDrvSolarisAttach\n")); switch (enmCmd) { case DDI_ATTACH: { int rc; #ifdef USE_SESSION_HASH int instance = ddi_get_instance(pDip); vbox_devstate_t *pState; if (ddi_soft_state_zalloc(g_pVBoxDrvSolarisState, instance) != DDI_SUCCESS) { LogRel(("VBoxDrvSolarisAttach: state alloc failed\n")); return DDI_FAILURE; } pState = ddi_get_soft_state(g_pVBoxDrvSolarisState, instance); #endif /* * Register for suspend/resume notifications */ rc = ddi_prop_create(DDI_DEV_T_NONE, pDip, DDI_PROP_CANSLEEP /* kmem alloc can sleep */, "pm-hardware-state", "needs-suspend-resume", sizeof("needs-suspend-resume")); if (rc != DDI_PROP_SUCCESS) LogRel(("vboxdrv: Suspend/Resume notification registration failed.\n")); /* * Register ourselves as a character device, pseudo-driver */ #ifdef VBOX_WITH_HARDENING rc = ddi_create_priv_minor_node(pDip, DEVICE_NAME_SYS, S_IFCHR, 0 /*minor*/, DDI_PSEUDO, 0, NULL, NULL, 0600); #else rc = ddi_create_priv_minor_node(pDip, DEVICE_NAME_SYS, S_IFCHR, 0 /*minor*/, DDI_PSEUDO, 0, "none", "none", 0666); #endif if (rc == DDI_SUCCESS) { rc = ddi_create_priv_minor_node(pDip, DEVICE_NAME_USR, S_IFCHR, 1 /*minor*/, DDI_PSEUDO, 0, "none", "none", 0666); if (rc == DDI_SUCCESS) { #ifdef USE_SESSION_HASH pState->pDip = pDip; #endif ddi_report_dev(pDip); return DDI_SUCCESS; } ddi_remove_minor_node(pDip, NULL); } return DDI_FAILURE; } case DDI_RESUME: { #if 0 RTSemFastMutexRequest(g_DevExt.mtxGip); if (g_DevExt.pGipTimer) RTTimerStart(g_DevExt.pGipTimer, 0); RTSemFastMutexRelease(g_DevExt.mtxGip); #endif RTPowerSignalEvent(RTPOWEREVENT_RESUME); LogFlow(("vboxdrv: Awakened from suspend.\n")); return DDI_SUCCESS; } default: return DDI_FAILURE; } return DDI_FAILURE; }
/* * attach the module */ static int tphci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { char *vclass; int instance, phci_regis = 0; struct tphci_state *phci = NULL; instance = ddi_get_instance(dip); switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: case DDI_PM_RESUME: return (0); /* nothing to do */ default: return (DDI_FAILURE); } /* * Allocate phci data structure. */ if (ddi_soft_state_zalloc(tphci_state, instance) != DDI_SUCCESS) { return (DDI_FAILURE); } phci = ddi_get_soft_state(tphci_state, instance); ASSERT(phci != NULL); phci->dip = dip; /* bus_addr has the form #,<vhci_class> */ vclass = strchr(ddi_get_name_addr(dip), ','); if (vclass == NULL || vclass[1] == '\0') { cmn_err(CE_NOTE, "tphci invalid bus_addr %s", ddi_get_name_addr(dip)); goto attach_fail; } /* * Attach this instance with the mpxio framework */ if (mdi_phci_register(vclass + 1, dip, 0) != MDI_SUCCESS) { cmn_err(CE_WARN, "%s mdi_phci_register failed", ddi_node_name(dip)); goto attach_fail; } phci_regis++; if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { cmn_err(CE_NOTE, "%s ddi_create_minor_node failed", ddi_node_name(dip)); goto attach_fail; } (void) ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1); ddi_report_dev(dip); return (DDI_SUCCESS); attach_fail: if (phci_regis) (void) mdi_phci_unregister(dip, 0); ddi_soft_state_free(tphci_state, instance); return (DDI_FAILURE); }
static int VBoxUSBMonSolarisOpen(dev_t *pDev, int fFlag, int fType, cred_t *pCred) { vboxusbmon_state_t *pState = NULL; unsigned iOpenInstance; LogFunc((DEVICE_NAME ":VBoxUSBMonSolarisOpen\n")); /* * Verify we are being opened as a character device. */ if (fType != OTYP_CHR) return EINVAL; /* * Verify that we're called after attach. */ if (!g_pDip) { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisOpen invalid state for opening.\n")); return ENXIO; } mutex_enter(&g_VBoxUSBMonSolarisMtx); if (!g_cVBoxUSBMonSolarisClient) { mutex_exit(&g_VBoxUSBMonSolarisMtx); int rc = usb_register_dev_driver(g_pDip, VBoxUSBMonSolarisElectDriver); if (RT_UNLIKELY(rc != DDI_SUCCESS)) { LogRel((DEVICE_NAME ":Failed to register driver election callback with USBA rc=%d\n", rc)); return EINVAL; } Log((DEVICE_NAME ":Successfully registered election callback with USBA\n")); mutex_enter(&g_VBoxUSBMonSolarisMtx); } g_cVBoxUSBMonSolarisClient++; mutex_exit(&g_VBoxUSBMonSolarisMtx); for (iOpenInstance = 0; iOpenInstance < 4096; iOpenInstance++) { if ( !ddi_get_soft_state(g_pVBoxUSBMonSolarisState, iOpenInstance) /* faster */ && ddi_soft_state_zalloc(g_pVBoxUSBMonSolarisState, iOpenInstance) == DDI_SUCCESS) { pState = ddi_get_soft_state(g_pVBoxUSBMonSolarisState, iOpenInstance); break; } } if (!pState) { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisOpen: too many open instances.")); mutex_enter(&g_VBoxUSBMonSolarisMtx); g_cVBoxUSBMonSolarisClient--; mutex_exit(&g_VBoxUSBMonSolarisMtx); return ENXIO; } pState->Process = RTProcSelf(); *pDev = makedevice(getmajor(*pDev), iOpenInstance); NOREF(fFlag); NOREF(pCred); return 0; }
/* * wusb_df_attach: * Attach or resume. * * For attach, initialize state and device, including: * state variables, locks, device node * device registration with system * power management, hotplugging * For resume, restore device and state */ static int wusb_df_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance = ddi_get_instance(dip); char *devinst; int devinstlen; wusb_df_state_t *wusb_dfp = NULL; usb_ep_data_t *ep_datap; int status; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: wusb_df_cpr_resume(dip); /* * Always return success to work around enumeration failures. * This works around an issue where devices which are present * before a suspend and absent upon resume could cause a system * panic on resume. */ return (DDI_SUCCESS); default: return (DDI_FAILURE); } if (ddi_soft_state_zalloc(wusb_df_statep, instance) == DDI_SUCCESS) { wusb_dfp = ddi_get_soft_state(wusb_df_statep, instance); } if (wusb_dfp == NULL) { return (DDI_FAILURE); } wusb_dfp->wusb_df_dip = dip; devinst = kmem_zalloc(USB_MAXSTRINGLEN, KM_SLEEP); devinstlen = snprintf(devinst, USB_MAXSTRINGLEN, "%s%d: ", ddi_driver_name(dip), instance); wusb_dfp->wusb_df_devinst = kmem_zalloc(devinstlen + 1, KM_SLEEP); (void) strncpy(wusb_dfp->wusb_df_devinst, devinst, devinstlen); kmem_free(devinst, USB_MAXSTRINGLEN); wusb_dfp->wusb_df_log_hdl = usb_alloc_log_hdl(dip, "wusb_df", &wusb_df_errlevel, &wusb_df_errmask, &wusb_df_instance_debug, 0); USB_DPRINTF_L4(PRINT_MASK_ATTA, wusb_dfp->wusb_df_log_hdl, "Attach: enter for attach"); if ((status = usb_client_attach(dip, USBDRV_VERSION, 0)) != USB_SUCCESS) { USB_DPRINTF_L2(PRINT_MASK_ATTA, wusb_dfp->wusb_df_log_hdl, "attach: usb_client_attach failed, error code:%d", status); goto fail; } if ((status = usb_get_dev_data(dip, &wusb_dfp->wusb_df_reg, USB_PARSE_LVL_ALL, 0)) != USB_SUCCESS) { USB_DPRINTF_L2(PRINT_MASK_ATTA, wusb_dfp->wusb_df_log_hdl, "attach: usb_get_dev_data failed, error code:%d", status); goto fail; } /* * Get the descriptor for an intr pipe at alt 0 of current interface. * This will be used later to open the pipe. */ if ((ep_datap = usb_lookup_ep_data(dip, wusb_dfp->wusb_df_reg, wusb_dfp->wusb_df_reg->dev_curr_if, 0, 0, USB_EP_ATTR_INTR, USB_EP_DIR_IN)) == NULL) { USB_DPRINTF_L2(PRINT_MASK_ATTA, wusb_dfp->wusb_df_log_hdl, "attach: Error getting intr endpoint descriptor"); goto fail; } wusb_dfp->wusb_df_intr_ep_descr = ep_datap->ep_descr; usb_free_descr_tree(dip, wusb_dfp->wusb_df_reg); mutex_init(&wusb_dfp->wusb_df_mutex, NULL, MUTEX_DRIVER, wusb_dfp->wusb_df_reg->dev_iblock_cookie); cv_init(&wusb_dfp->wusb_df_serial_cv, NULL, CV_DRIVER, NULL); wusb_dfp->wusb_df_serial_inuse = B_FALSE; wusb_dfp->wusb_df_locks_initialized = B_TRUE; /* create minor node */ if (ddi_create_minor_node(dip, name, S_IFCHR, instance, "wusb_df", 0) != DDI_SUCCESS) { USB_DPRINTF_L2(PRINT_MASK_ATTA, wusb_dfp->wusb_df_log_hdl, "attach: Error creating minor node"); goto fail; } /* Put online before PM init as can get power managed afterward. */ wusb_dfp->wusb_df_dev_state = USB_DEV_ONLINE; /* initialize power management */ wusb_df_init_power_mgmt(wusb_dfp); if (usb_register_hotplug_cbs(dip, wusb_df_disconnect_callback, wusb_df_reconnect_callback) != USB_SUCCESS) { goto fail; } /* Report device */ ddi_report_dev(dip); (void) wusb_df_firmware_download(wusb_dfp); if (usb_reset_device(dip, USB_RESET_LVL_REATTACH) != USB_SUCCESS) { USB_DPRINTF_L2(PRINT_MASK_PM, wusb_dfp->wusb_df_log_hdl, "reset device failed"); return (USB_FAILURE); } return (DDI_SUCCESS); fail: if (wusb_dfp) { (void) wusb_df_cleanup(dip, wusb_dfp); } return (DDI_FAILURE); }
/*ARGSUSED*/ static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { px_t *px_p; /* per bus state pointer */ int instance = DIP_TO_INST(dip); int ret = DDI_SUCCESS; devhandle_t dev_hdl = NULL; pcie_hp_regops_t regops; pcie_bus_t *bus_p; switch (cmd) { case DDI_ATTACH: DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); /* See pci_cfgacc.c */ pci_cfgacc_acc_p = pci_cfgacc_acc; /* * Allocate and get the per-px soft state structure. */ if (ddi_soft_state_zalloc(px_state_p, instance) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s%d: can't allocate px state", ddi_driver_name(dip), instance); goto err_bad_px_softstate; } px_p = INST_TO_STATE(instance); px_p->px_dip = dip; mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); px_p->px_soft_state = PCI_SOFT_STATE_CLOSED; (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "device_type", "pciex"); /* Initialize px_dbg for high pil printing */ px_dbg_attach(dip, &px_p->px_dbg_hdl); pcie_rc_init_bus(dip); /* * Get key properties of the pci bridge node and * determine it's type (psycho, schizo, etc ...). */ if (px_get_props(px_p, dip) == DDI_FAILURE) goto err_bad_px_prop; if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) goto err_bad_dev_init; /* Initialize device handle */ px_p->px_dev_hdl = dev_hdl; /* Cache the BDF of the root port nexus */ px_p->px_bdf = px_lib_get_bdf(px_p); /* * Initialize interrupt block. Note that this * initialize error handling for the PEC as well. */ if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) goto err_bad_ib; if (px_cb_attach(px_p) != DDI_SUCCESS) goto err_bad_cb; /* * Start creating the modules. * Note that attach() routines should * register and enable their own interrupts. */ if ((px_mmu_attach(px_p)) != DDI_SUCCESS) goto err_bad_mmu; if ((px_msiq_attach(px_p)) != DDI_SUCCESS) goto err_bad_msiq; if ((px_msi_attach(px_p)) != DDI_SUCCESS) goto err_bad_msi; if ((px_pec_attach(px_p)) != DDI_SUCCESS) goto err_bad_pec; if ((px_dma_attach(px_p)) != DDI_SUCCESS) goto err_bad_dma; /* nothing to uninitialize on DMA */ if ((px_fm_attach(px_p)) != DDI_SUCCESS) goto err_bad_dma; /* * All of the error handlers have been registered * by now so it's time to activate all the interrupt. */ if ((px_enable_err_intr(px_p)) != DDI_SUCCESS) goto err_bad_intr; if (px_lib_hotplug_init(dip, (void *)®ops) == DDI_SUCCESS) { pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE; } (void) px_set_mps(px_p); if (pcie_init(dip, (caddr_t)®ops) != DDI_SUCCESS) goto err_bad_hotplug; (void) pcie_hpintr_enable(dip); if (pxtool_init(dip) != DDI_SUCCESS) goto err_bad_pcitool_node; /* * power management setup. Even if it fails, attach will * succeed as this is a optional feature. Since we are * always at full power, this is not critical. */ if (pwr_common_setup(dip) != DDI_SUCCESS) { DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); } else if (px_pwr_setup(dip) != DDI_SUCCESS) { DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); pwr_common_teardown(dip); } /* * add cpr callback */ px_cpr_add_callb(px_p); /* * do fabric sync in case we don't need to wait for * any bridge driver to be ready */ (void) px_lib_fabric_sync(dip); ddi_report_dev(dip); px_p->px_state = PX_ATTACHED; /* * save base addr in bus_t for pci_cfgacc_xxx(), this * depends of px structure being properly initialized. */ bus_p = PCIE_DIP2BUS(dip); bus_p->bus_cfgacc_base = px_lib_get_cfgacc_base(dip); /* * Partially populate bus_t for all devices in this fabric * for device type macros to work. */ /* * Populate bus_t for all devices in this fabric, after FMA * is initializated, so that config access errors could * trigger panic. */ pcie_fab_init_bus(dip, PCIE_BUS_ALL); DBG(DBG_ATTACH, dip, "attach success\n"); break; err_bad_pcitool_node: (void) pcie_hpintr_disable(dip); (void) pcie_uninit(dip); err_bad_hotplug: (void) px_lib_hotplug_uninit(dip); px_disable_err_intr(px_p); err_bad_intr: px_fm_detach(px_p); err_bad_dma: px_pec_detach(px_p); err_bad_pec: px_msi_detach(px_p); err_bad_msi: px_msiq_detach(px_p); err_bad_msiq: px_mmu_detach(px_p); err_bad_mmu: err_bad_cb: px_ib_detach(px_p); err_bad_ib: if (px_lib_dev_fini(dip) != DDI_SUCCESS) { DBG(DBG_ATTACH, dip, "px_lib_dev_fini failed\n"); } err_bad_dev_init: px_free_props(px_p); err_bad_px_prop: pcie_rc_fini_bus(dip); px_dbg_detach(dip, &px_p->px_dbg_hdl); mutex_destroy(&px_p->px_mutex); ddi_soft_state_free(px_state_p, instance); err_bad_px_softstate: ret = DDI_FAILURE; break; case DDI_RESUME: DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); px_p = INST_TO_STATE(instance); mutex_enter(&px_p->px_mutex); /* suspend might have not succeeded */ if (px_p->px_state != PX_SUSPENDED) { DBG(DBG_ATTACH, px_p->px_dip, "instance NOT suspended\n"); ret = DDI_FAILURE; break; } px_msiq_resume(px_p); px_lib_resume(dip); (void) pcie_pwr_resume(dip); px_p->px_state = PX_ATTACHED; mutex_exit(&px_p->px_mutex); break; default: DBG(DBG_ATTACH, dip, "unsupported attach op\n"); ret = DDI_FAILURE; break; } return (ret); }
static int bd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int inst; bd_handle_t hdl; bd_t *bd; bd_drive_t drive; int rv; char name[16]; char kcache[32]; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: /* We don't do anything native for suspend/resume */ return (DDI_SUCCESS); default: return (DDI_FAILURE); } inst = ddi_get_instance(dip); hdl = ddi_get_parent_data(dip); (void) snprintf(name, sizeof (name), "%s%d", ddi_driver_name(dip), ddi_get_instance(dip)); (void) snprintf(kcache, sizeof (kcache), "%s_xfer", name); if (hdl == NULL) { cmn_err(CE_WARN, "%s: missing parent data!", name); return (DDI_FAILURE); } if (ddi_soft_state_zalloc(bd_state, inst) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s: unable to zalloc soft state!", name); return (DDI_FAILURE); } bd = ddi_get_soft_state(bd_state, inst); if (hdl->h_dma) { bd->d_dma = *(hdl->h_dma); bd->d_dma.dma_attr_granular = max(DEV_BSIZE, bd->d_dma.dma_attr_granular); bd->d_use_dma = B_TRUE; if (bd->d_maxxfer && (bd->d_maxxfer != bd->d_dma.dma_attr_maxxfer)) { cmn_err(CE_WARN, "%s: inconsistent maximum transfer size!", name); /* We force it */ bd->d_maxxfer = bd->d_dma.dma_attr_maxxfer; } else { bd->d_maxxfer = bd->d_dma.dma_attr_maxxfer; } } else { bd->d_use_dma = B_FALSE; if (bd->d_maxxfer == 0) { bd->d_maxxfer = 1024 * 1024; } } bd->d_ops = hdl->h_ops; bd->d_private = hdl->h_private; bd->d_blkshift = 9; /* 512 bytes, to start */ if (bd->d_maxxfer % DEV_BSIZE) { cmn_err(CE_WARN, "%s: maximum transfer misaligned!", name); bd->d_maxxfer &= ~(DEV_BSIZE - 1); } if (bd->d_maxxfer < DEV_BSIZE) { cmn_err(CE_WARN, "%s: maximum transfer size too small!", name); ddi_soft_state_free(bd_state, inst); return (DDI_FAILURE); } bd->d_dip = dip; bd->d_handle = hdl; hdl->h_bd = bd; ddi_set_driver_private(dip, bd); mutex_init(&bd->d_iomutex, NULL, MUTEX_DRIVER, NULL); mutex_init(&bd->d_ocmutex, NULL, MUTEX_DRIVER, NULL); mutex_init(&bd->d_statemutex, NULL, MUTEX_DRIVER, NULL); cv_init(&bd->d_statecv, NULL, CV_DRIVER, NULL); list_create(&bd->d_waitq, sizeof (bd_xfer_impl_t), offsetof(struct bd_xfer_impl, i_linkage)); list_create(&bd->d_runq, sizeof (bd_xfer_impl_t), offsetof(struct bd_xfer_impl, i_linkage)); bd->d_cache = kmem_cache_create(kcache, sizeof (bd_xfer_impl_t), 8, bd_xfer_ctor, bd_xfer_dtor, NULL, bd, NULL, 0); bd->d_ksp = kstat_create(ddi_driver_name(dip), inst, NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); if (bd->d_ksp != NULL) { bd->d_ksp->ks_lock = &bd->d_iomutex; kstat_install(bd->d_ksp); bd->d_kiop = bd->d_ksp->ks_data; } else { /* * Even if we cannot create the kstat, we create a * scratch kstat. The reason for this is to ensure * that we can update the kstat all of the time, * without adding an extra branch instruction. */ bd->d_kiop = kmem_zalloc(sizeof (kstat_io_t), KM_SLEEP); } cmlb_alloc_handle(&bd->d_cmlbh); bd->d_state = DKIO_NONE; bzero(&drive, sizeof (drive)); bd->d_ops.o_drive_info(bd->d_private, &drive); bd->d_qsize = drive.d_qsize; bd->d_removable = drive.d_removable; bd->d_hotpluggable = drive.d_hotpluggable; if (drive.d_maxxfer && drive.d_maxxfer < bd->d_maxxfer) bd->d_maxxfer = drive.d_maxxfer; rv = cmlb_attach(dip, &bd_tg_ops, DTYPE_DIRECT, bd->d_removable, bd->d_hotpluggable, drive.d_lun >= 0 ? DDI_NT_BLOCK_CHAN : DDI_NT_BLOCK, CMLB_FAKE_LABEL_ONE_PARTITION, bd->d_cmlbh, 0); if (rv != 0) { cmlb_free_handle(&bd->d_cmlbh); kmem_cache_destroy(bd->d_cache); mutex_destroy(&bd->d_iomutex); mutex_destroy(&bd->d_ocmutex); mutex_destroy(&bd->d_statemutex); cv_destroy(&bd->d_statecv); list_destroy(&bd->d_waitq); list_destroy(&bd->d_runq); if (bd->d_ksp != NULL) { kstat_delete(bd->d_ksp); bd->d_ksp = NULL; } else { kmem_free(bd->d_kiop, sizeof (kstat_io_t)); } ddi_soft_state_free(bd_state, inst); return (DDI_FAILURE); } if (bd->d_ops.o_devid_init != NULL) { rv = bd->d_ops.o_devid_init(bd->d_private, dip, &bd->d_devid); if (rv == DDI_SUCCESS) { if (ddi_devid_register(dip, bd->d_devid) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s: unable to register devid", name); } } } /* * Add a zero-length attribute to tell the world we support * kernel ioctls (for layered drivers). Also set up properties * used by HAL to identify removable media. */ (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, DDI_KERNEL_IOCTL, NULL, 0); if (bd->d_removable) { (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "removable-media", NULL, 0); } if (bd->d_hotpluggable) { (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "hotpluggable", NULL, 0); } ddi_report_dev(dip); return (DDI_SUCCESS); }