static int kb8042_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { int rc; int scanset; int leds; struct kb8042 *kb8042 = &Kdws; static ddi_device_acc_attr_t attr = { DDI_DEVICE_ATTR_V0, DDI_NEVERSWAP_ACC, DDI_STRICTORDER_ACC, }; switch (cmd) { case DDI_RESUME: leds = kb8042->leds.commanded; kb8042->w_init = 0; kb8042_init(kb8042, B_TRUE); kb8042_setled(kb8042, leds, B_FALSE); return (DDI_SUCCESS); case DDI_ATTACH: if (kb8042_dip != NULL) return (DDI_FAILURE); /* The rest of the function is for attach */ break; default: return (DDI_FAILURE); } kb8042->debugger.mod1 = 58; /* Left Ctrl */ kb8042->debugger.mod2 = 60; /* Left Alt */ kb8042->debugger.trigger = 33; /* D */ kb8042->debugger.mod1_down = B_FALSE; kb8042->debugger.mod2_down = B_FALSE; kb8042->debugger.enabled = B_FALSE; kb8042_dip = devi; kb8042->init_state = KB8042_UNINITIALIZED; kb8042->polled_synthetic_release_pending = B_FALSE; if (ddi_create_minor_node(devi, module_name, S_IFCHR, 0, DDI_NT_KEYBOARD, 0) == DDI_FAILURE) { goto failure; } kb8042->init_state |= KB8042_MINOR_NODE_CREATED; rc = ddi_regs_map_setup(devi, 0, (caddr_t *)&kb8042->addr, (offset_t)0, (offset_t)0, &attr, &kb8042->handle); if (rc != DDI_SUCCESS) { #if defined(KD_DEBUG) cmn_err(CE_WARN, "kb8042_attach: can't map registers"); #endif goto failure; } kb8042->init_state |= KB8042_REGS_MAPPED; if (ddi_get_iblock_cookie(devi, 0, &kb8042->w_iblock) != DDI_SUCCESS) { cmn_err(CE_WARN, "kb8042_attach: Can't get iblock cookie"); goto failure; } mutex_init(&kb8042->w_hw_mutex, NULL, MUTEX_DRIVER, kb8042->w_iblock); kb8042->init_state |= KB8042_HW_MUTEX_INITTED; kb8042_init(kb8042, B_FALSE); #ifdef __sparc /* Detect the scan code set currently in use */ scanset = kb8042_read_scanset(kb8042, B_TRUE); if (scanset < 0 && kb8042_warn_unknown_scanset) { cmn_err(CE_WARN, "Cannot determine keyboard scan code set "); cmn_err(CE_CONT, "(is the keyboard plugged in?). "); cmn_err(CE_CONT, "Defaulting to scan code set %d. If the " "keyboard does not ", kb8042_default_scanset); cmn_err(CE_CONT, "work properly, add " "`set kb8042:kb8042_default_scanset=%d' to /etc/system ", (kb8042_default_scanset == 1) ? 2 : 1); cmn_err(CE_CONT, "(via network or with a USB keyboard) and " "restart the system. If you "); cmn_err(CE_CONT, "do not want to see this message in the " "future, add "); cmn_err(CE_CONT, "`set kb8042:kb8042_warn_unknown_scanset=0' " "to /etc/system.\n"); /* Use the default scan code set. */ scanset = kb8042_default_scanset; } #else /* x86 systems use scan code set 1 -- no detection required */ scanset = 1; #endif if (KeyboardConvertScan_init(kb8042, scanset) != DDI_SUCCESS) { cmn_err(CE_WARN, "Cannot initialize keyboard scan converter: " "Unknown scan code set `%d'.", scanset); /* Scan code set is not supported */ goto failure; } /* * Turn on interrupts... */ if (ddi_add_intr(devi, 0, &kb8042->w_iblock, (ddi_idevice_cookie_t *)NULL, kb8042_intr, (caddr_t)kb8042) != DDI_SUCCESS) { cmn_err(CE_WARN, "kb8042_attach: cannot add interrupt"); goto failure; } kb8042->init_state |= KB8042_INTR_ADDED; ddi_report_dev(devi); #ifdef KD_DEBUG cmn_err(CE_CONT, "?%s #%d: version %s\n", DRIVER_NAME(devi), ddi_get_instance(devi), "1.66 (06/04/07)"); #endif return (DDI_SUCCESS); failure: kb8042_cleanup(kb8042); return (DDI_FAILURE); }
/* * register ds1307 client device with i2c services, and * allocate & initialize soft state structure. */ static int todds1307_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { static ds1307_state_t *statep = NULL; i2c_transfer_t *i2c_tp = NULL; uint8_t tempVal = (uint8_t)0; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: return (DDI_SUCCESS); default: return (DDI_FAILURE); } if (instance != -1) { return (DDI_FAILURE); } instance = ddi_get_instance(dip); /* * Allocate soft state structure */ if (ddi_soft_state_zalloc(ds1307_statep, instance) != DDI_SUCCESS) { return (DDI_FAILURE); } statep = ddi_get_soft_state(ds1307_statep, instance); if (statep == NULL) { return (DDI_FAILURE); } statep->dip = dip; if (i2c_client_register(dip, &statep->ds1307_i2c_hdl) != I2C_SUCCESS) { ddi_soft_state_free(ds1307_statep, instance); delay(drv_usectohz(I2C_DELAY)); return (DDI_FAILURE); } /* check and initialize the oscillator */ (void) i2c_transfer_alloc(statep->ds1307_i2c_hdl, &i2c_tp, 1, 1, I2C_SLEEP); i2c_tp->i2c_version = I2C_XFER_REV; i2c_tp->i2c_flags = I2C_WR_RD; i2c_tp->i2c_wbuf[0] = (uchar_t)0x00; /* Read 00h */ i2c_tp->i2c_wlen = 1; i2c_tp->i2c_rlen = 1; if ((i2c_transfer(statep->ds1307_i2c_hdl, i2c_tp)) != I2C_SUCCESS) { (void) i2c_transfer_free(statep->ds1307_i2c_hdl, i2c_tp); ddi_soft_state_free(ds1307_statep, instance); delay(drv_usectohz(I2C_DELAY)); return (DDI_FAILURE); } tempVal = i2c_tp->i2c_rbuf[0]; (void) i2c_transfer_free(statep->ds1307_i2c_hdl, i2c_tp); if (tempVal & 0x80) { /* check Oscillator */ (void) i2c_transfer_alloc(statep->ds1307_i2c_hdl, &i2c_tp, 2, 1, I2C_SLEEP); i2c_tp->i2c_version = I2C_XFER_REV; i2c_tp->i2c_flags = I2C_WR; i2c_tp->i2c_wbuf[0] = 0x00; i2c_tp->i2c_wbuf[1] = (uchar_t)(i2c_tp->i2c_rbuf[0]& 0x7f); i2c_tp->i2c_wlen = 2; /* Enable oscillator */ if ((i2c_transfer(statep->ds1307_i2c_hdl, i2c_tp)) != I2C_SUCCESS) { (void) i2c_transfer_free(statep->ds1307_i2c_hdl, i2c_tp); ddi_soft_state_free(ds1307_statep, instance); return (DDI_FAILURE); } (void) i2c_transfer_free(statep->ds1307_i2c_hdl, i2c_tp); } /* * Create a periodical handler to read TOD. */ ASSERT(statep->cycid == NULL); statep->cycid = ddi_periodic_add(todds1307_cyclic, &soft_rtc, i2c_cyclic_timeout, DDI_IPL_1); statep->state = TOD_ATTACHED; todds1307_attach_done = 1; ddi_report_dev(dip); return (DDI_SUCCESS); }
static int ntwdt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; ntwdt_state_t *ntwdt_ptr = NULL; /* pointer to ntwdt_runstatep */ ntwdt_runstate_t *ntwdt_runstatep = NULL; cyc_handler_t *hdlr = NULL; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: return (DDI_SUCCESS); default: return (DDI_FAILURE); } if (ntwdt_chk_watchdog_support() != 0) { return (DDI_FAILURE); } instance = ddi_get_instance(dip); ASSERT(instance == 0); if (ddi_soft_state_zalloc(ntwdt_statep, instance) != DDI_SUCCESS) { return (DDI_FAILURE); } ntwdt_ptr = ddi_get_soft_state(ntwdt_statep, instance); ASSERT(ntwdt_ptr != NULL); ntwdt_dip = dip; ntwdt_ptr->ntwdt_dip = dip; ntwdt_ptr->ntwdt_cycl_id = CYCLIC_NONE; mutex_init(&ntwdt_ptr->ntwdt_mutex, NULL, MUTEX_DRIVER, NULL); /* * Initialize the watchdog structure */ ntwdt_ptr->ntwdt_run_state = kmem_zalloc(sizeof (ntwdt_runstate_t), KM_SLEEP); ntwdt_runstatep = ntwdt_ptr->ntwdt_run_state; if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_LOW, &ntwdt_runstatep->ntwdt_runstate_mtx_cookie) != DDI_SUCCESS) { cmn_err(CE_WARN, "init of iblock cookie failed " "for ntwdt_runstate_mutex"); goto err1; } else { mutex_init(&ntwdt_runstatep->ntwdt_runstate_mutex, NULL, MUTEX_DRIVER, (void *)ntwdt_runstatep->ntwdt_runstate_mtx_cookie); } /* Cyclic fires once per second: */ ntwdt_runstatep->ntwdt_cyclic_interval = NTWDT_CYCLIC_INTERVAL; /* init the Cyclic that drives the NTWDT */ hdlr = &ntwdt_runstatep->ntwdt_cycl_hdlr; hdlr->cyh_level = CY_LOCK_LEVEL; hdlr->cyh_func = (cyc_func_t)ntwdt_cyclic_pat; hdlr->cyh_arg = NULL; /* Softint that will be triggered by Cyclic that drives NTWDT */ if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &ntwdt_cyclic_softint_id, NULL, NULL, ntwdt_cyclic_softint, (caddr_t)ntwdt_ptr) != DDI_SUCCESS) { cmn_err(CE_WARN, "failed to add cyclic softintr"); goto err2; } /* * Create Minor Node as last activity. This prevents * application from accessing our implementation until it * is initialized. */ if (ddi_create_minor_node(dip, NTWDT_MINOR_NODE, S_IFCHR, 0, DDI_PSEUDO, NULL) == DDI_FAILURE) { cmn_err(CE_WARN, "failed to create Minor Node: %s", NTWDT_MINOR_NODE); goto err3; } /* Display our driver info in the banner */ ddi_report_dev(dip); return (DDI_SUCCESS); err3: ddi_remove_softintr(ntwdt_cyclic_softint_id); err2: mutex_destroy(&ntwdt_runstatep->ntwdt_runstate_mutex); err1: /* clean up the driver stuff here */ kmem_free(ntwdt_runstatep, sizeof (ntwdt_runstate_t)); ntwdt_ptr->ntwdt_run_state = NULL; mutex_destroy(&ntwdt_ptr->ntwdt_mutex); ddi_soft_state_free(ntwdt_statep, instance); ntwdt_dip = NULL; return (DDI_FAILURE); }
static int bd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int inst; bd_handle_t hdl; bd_t *bd; bd_drive_t drive; int rv; char name[16]; char kcache[32]; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: /* We don't do anything native for suspend/resume */ return (DDI_SUCCESS); default: return (DDI_FAILURE); } inst = ddi_get_instance(dip); hdl = ddi_get_parent_data(dip); (void) snprintf(name, sizeof (name), "%s%d", ddi_driver_name(dip), ddi_get_instance(dip)); (void) snprintf(kcache, sizeof (kcache), "%s_xfer", name); if (hdl == NULL) { cmn_err(CE_WARN, "%s: missing parent data!", name); return (DDI_FAILURE); } if (ddi_soft_state_zalloc(bd_state, inst) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s: unable to zalloc soft state!", name); return (DDI_FAILURE); } bd = ddi_get_soft_state(bd_state, inst); if (hdl->h_dma) { bd->d_dma = *(hdl->h_dma); bd->d_dma.dma_attr_granular = max(DEV_BSIZE, bd->d_dma.dma_attr_granular); bd->d_use_dma = B_TRUE; if (bd->d_maxxfer && (bd->d_maxxfer != bd->d_dma.dma_attr_maxxfer)) { cmn_err(CE_WARN, "%s: inconsistent maximum transfer size!", name); /* We force it */ bd->d_maxxfer = bd->d_dma.dma_attr_maxxfer; } else { bd->d_maxxfer = bd->d_dma.dma_attr_maxxfer; } } else { bd->d_use_dma = B_FALSE; if (bd->d_maxxfer == 0) { bd->d_maxxfer = 1024 * 1024; } } bd->d_ops = hdl->h_ops; bd->d_private = hdl->h_private; bd->d_blkshift = 9; /* 512 bytes, to start */ if (bd->d_maxxfer % DEV_BSIZE) { cmn_err(CE_WARN, "%s: maximum transfer misaligned!", name); bd->d_maxxfer &= ~(DEV_BSIZE - 1); } if (bd->d_maxxfer < DEV_BSIZE) { cmn_err(CE_WARN, "%s: maximum transfer size too small!", name); ddi_soft_state_free(bd_state, inst); return (DDI_FAILURE); } bd->d_dip = dip; bd->d_handle = hdl; hdl->h_bd = bd; ddi_set_driver_private(dip, bd); mutex_init(&bd->d_iomutex, NULL, MUTEX_DRIVER, NULL); mutex_init(&bd->d_ocmutex, NULL, MUTEX_DRIVER, NULL); mutex_init(&bd->d_statemutex, NULL, MUTEX_DRIVER, NULL); cv_init(&bd->d_statecv, NULL, CV_DRIVER, NULL); list_create(&bd->d_waitq, sizeof (bd_xfer_impl_t), offsetof(struct bd_xfer_impl, i_linkage)); list_create(&bd->d_runq, sizeof (bd_xfer_impl_t), offsetof(struct bd_xfer_impl, i_linkage)); bd->d_cache = kmem_cache_create(kcache, sizeof (bd_xfer_impl_t), 8, bd_xfer_ctor, bd_xfer_dtor, NULL, bd, NULL, 0); bd->d_ksp = kstat_create(ddi_driver_name(dip), inst, NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); if (bd->d_ksp != NULL) { bd->d_ksp->ks_lock = &bd->d_iomutex; kstat_install(bd->d_ksp); bd->d_kiop = bd->d_ksp->ks_data; } else { /* * Even if we cannot create the kstat, we create a * scratch kstat. The reason for this is to ensure * that we can update the kstat all of the time, * without adding an extra branch instruction. */ bd->d_kiop = kmem_zalloc(sizeof (kstat_io_t), KM_SLEEP); } cmlb_alloc_handle(&bd->d_cmlbh); bd->d_state = DKIO_NONE; bzero(&drive, sizeof (drive)); bd->d_ops.o_drive_info(bd->d_private, &drive); bd->d_qsize = drive.d_qsize; bd->d_removable = drive.d_removable; bd->d_hotpluggable = drive.d_hotpluggable; if (drive.d_maxxfer && drive.d_maxxfer < bd->d_maxxfer) bd->d_maxxfer = drive.d_maxxfer; rv = cmlb_attach(dip, &bd_tg_ops, DTYPE_DIRECT, bd->d_removable, bd->d_hotpluggable, drive.d_lun >= 0 ? DDI_NT_BLOCK_CHAN : DDI_NT_BLOCK, CMLB_FAKE_LABEL_ONE_PARTITION, bd->d_cmlbh, 0); if (rv != 0) { cmlb_free_handle(&bd->d_cmlbh); kmem_cache_destroy(bd->d_cache); mutex_destroy(&bd->d_iomutex); mutex_destroy(&bd->d_ocmutex); mutex_destroy(&bd->d_statemutex); cv_destroy(&bd->d_statecv); list_destroy(&bd->d_waitq); list_destroy(&bd->d_runq); if (bd->d_ksp != NULL) { kstat_delete(bd->d_ksp); bd->d_ksp = NULL; } else { kmem_free(bd->d_kiop, sizeof (kstat_io_t)); } ddi_soft_state_free(bd_state, inst); return (DDI_FAILURE); } if (bd->d_ops.o_devid_init != NULL) { rv = bd->d_ops.o_devid_init(bd->d_private, dip, &bd->d_devid); if (rv == DDI_SUCCESS) { if (ddi_devid_register(dip, bd->d_devid) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s: unable to register devid", name); } } } /* * Add a zero-length attribute to tell the world we support * kernel ioctls (for layered drivers). Also set up properties * used by HAL to identify removable media. */ (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, DDI_KERNEL_IOCTL, NULL, 0); if (bd->d_removable) { (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "removable-media", NULL, 0); } if (bd->d_hotpluggable) { (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "hotpluggable", NULL, 0); } ddi_report_dev(dip); return (DDI_SUCCESS); }
/** * Attach entry point, to attach a device to the system or resume it. * * @param pDip The module structure instance. * @param enmCmd Operation type (attach/resume). * * @return corresponding solaris error code. */ static int VBoxDrvSolarisAttach(dev_info_t *pDip, ddi_attach_cmd_t enmCmd) { LogFlowFunc(("VBoxDrvSolarisAttach\n")); switch (enmCmd) { case DDI_ATTACH: { int rc; #ifdef USE_SESSION_HASH int instance = ddi_get_instance(pDip); vbox_devstate_t *pState; if (ddi_soft_state_zalloc(g_pVBoxDrvSolarisState, instance) != DDI_SUCCESS) { LogRel(("VBoxDrvSolarisAttach: state alloc failed\n")); return DDI_FAILURE; } pState = ddi_get_soft_state(g_pVBoxDrvSolarisState, instance); #endif /* * Register for suspend/resume notifications */ rc = ddi_prop_create(DDI_DEV_T_NONE, pDip, DDI_PROP_CANSLEEP /* kmem alloc can sleep */, "pm-hardware-state", "needs-suspend-resume", sizeof("needs-suspend-resume")); if (rc != DDI_PROP_SUCCESS) LogRel(("vboxdrv: Suspend/Resume notification registration failed.\n")); /* * Register ourselves as a character device, pseudo-driver */ #ifdef VBOX_WITH_HARDENING rc = ddi_create_priv_minor_node(pDip, DEVICE_NAME_SYS, S_IFCHR, 0 /*minor*/, DDI_PSEUDO, 0, NULL, NULL, 0600); #else rc = ddi_create_priv_minor_node(pDip, DEVICE_NAME_SYS, S_IFCHR, 0 /*minor*/, DDI_PSEUDO, 0, "none", "none", 0666); #endif if (rc == DDI_SUCCESS) { rc = ddi_create_priv_minor_node(pDip, DEVICE_NAME_USR, S_IFCHR, 1 /*minor*/, DDI_PSEUDO, 0, "none", "none", 0666); if (rc == DDI_SUCCESS) { #ifdef USE_SESSION_HASH pState->pDip = pDip; #endif ddi_report_dev(pDip); return DDI_SUCCESS; } ddi_remove_minor_node(pDip, NULL); } return DDI_FAILURE; } case DDI_RESUME: { #if 0 RTSemFastMutexRequest(g_DevExt.mtxGip); if (g_DevExt.pGipTimer) RTTimerStart(g_DevExt.pGipTimer, 0); RTSemFastMutexRelease(g_DevExt.mtxGip); #endif RTPowerSignalEvent(RTPOWEREVENT_RESUME); LogFlow(("vboxdrv: Awakened from suspend.\n")); return DDI_SUCCESS; } default: return DDI_FAILURE; } return DDI_FAILURE; }
/* * bbc_beep_attach: */ static int bbc_beep_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; /* Instance number */ /* Pointer to soft state */ bbc_beep_state_t *bbc_beeptr = NULL; BBC_BEEP_DEBUG1((CE_CONT, "bbc_beep_attach: Start")); switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: return (DDI_SUCCESS); default: return (DDI_FAILURE); } /* Get the instance and create soft state */ instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(bbc_beep_statep, instance) != 0) { return (DDI_FAILURE); } bbc_beeptr = ddi_get_soft_state(bbc_beep_statep, instance); if (bbc_beeptr == NULL) { return (DDI_FAILURE); } BBC_BEEP_DEBUG1((CE_CONT, "bbc_beeptr = 0x%p, instance %x", (void *)bbc_beeptr, instance)); /* Save the dip */ bbc_beeptr->bbc_beep_dip = dip; /* Initialize beeper mode */ bbc_beeptr->bbc_beep_mode = BBC_BEEP_OFF; /* Map the Beep Control and Beep counter Registers */ if (bbc_beep_map_regs(dip, bbc_beeptr) != DDI_SUCCESS) { BBC_BEEP_DEBUG((CE_WARN, \ "bbc_beep_attach: Mapping of bbc registers failed.")); bbc_beep_cleanup(bbc_beeptr); return (DDI_FAILURE); } (void) beep_init((void *)dip, bbc_beep_on, bbc_beep_off, bbc_beep_freq); /* Display information in the banner */ ddi_report_dev(dip); BBC_BEEP_DEBUG1((CE_CONT, "bbc_beep_attach: dip = 0x%p done", (void *)dip)); return (DDI_SUCCESS); }
/* * attach entry point: * * normal attach: * * create soft state structure (dip, reg, nreg and state fields) * map in configuration header * make sure device is properly configured * report device */ static int acebus_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { ebus_devstate_t *ebus_p; /* per ebus state pointer */ int instance; DBG1(D_ATTACH, NULL, "dip=%x\n", dip); switch (cmd) { case DDI_ATTACH: /* * Allocate soft state for this instance. */ instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(per_acebus_state, instance) != DDI_SUCCESS) { DBG(D_ATTACH, NULL, "failed to alloc soft state\n"); return (DDI_FAILURE); } ebus_p = get_acebus_soft_state(instance); ebus_p->dip = dip; /* * Make sure the master enable and memory access enable * bits are set in the config command register. */ if (!acebus_config(ebus_p)) { free_acebus_soft_state(instance); return (DDI_FAILURE); } (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "no-dma-interrupt-sync", NULL, 0); /* Get our ranges property for mapping child registers. */ if (acebus_get_ranges_prop(ebus_p) != DDI_SUCCESS) { free_acebus_soft_state(instance); return (DDI_FAILURE); } /* * Make the state as attached and report the device. */ ebus_p->state = ATTACHED; ddi_report_dev(dip); DBG(D_ATTACH, ebus_p, "returning\n"); return (DDI_SUCCESS); case DDI_RESUME: instance = ddi_get_instance(dip); ebus_p = get_acebus_soft_state(instance); /* * Make sure the master enable and memory access enable * bits are set in the config command register. */ if (!acebus_config(ebus_p)) { free_acebus_soft_state(instance); return (DDI_FAILURE); } ebus_p->state = RESUMED; return (DDI_SUCCESS); } return (DDI_FAILURE); }
/*ARGSUSED*/ static int ppb_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { dev_info_t *root = ddi_root_node(); int instance; ppb_devstate_t *ppb; dev_info_t *pdip; ddi_acc_handle_t config_handle; char *bus; int ret; switch (cmd) { case DDI_ATTACH: /* * Make sure the "device_type" property exists. */ (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, "device_type", "pci"); /* * Allocate and get soft state structure. */ instance = ddi_get_instance(devi); if (ddi_soft_state_zalloc(ppb_state, instance) != DDI_SUCCESS) return (DDI_FAILURE); ppb = ddi_get_soft_state(ppb_state, instance); ppb->dip = devi; /* * don't enable ereports if immediate child of npe */ if (strcmp(ddi_driver_name(ddi_get_parent(devi)), "npe") == 0) ppb->ppb_fmcap = DDI_FM_ERRCB_CAPABLE | DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE; else ppb->ppb_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE | DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE; ddi_fm_init(devi, &ppb->ppb_fmcap, &ppb->ppb_fm_ibc); mutex_init(&ppb->ppb_mutex, NULL, MUTEX_DRIVER, NULL); mutex_init(&ppb->ppb_err_mutex, NULL, MUTEX_DRIVER, (void *)ppb->ppb_fm_ibc); mutex_init(&ppb->ppb_peek_poke_mutex, NULL, MUTEX_DRIVER, (void *)ppb->ppb_fm_ibc); if (ppb->ppb_fmcap & (DDI_FM_ERRCB_CAPABLE | DDI_FM_EREPORT_CAPABLE)) pci_ereport_setup(devi); if (ppb->ppb_fmcap & DDI_FM_ERRCB_CAPABLE) ddi_fm_handler_register(devi, ppb_fm_callback, NULL); if (pci_config_setup(devi, &config_handle) != DDI_SUCCESS) { if (ppb->ppb_fmcap & DDI_FM_ERRCB_CAPABLE) ddi_fm_handler_unregister(devi); if (ppb->ppb_fmcap & (DDI_FM_ERRCB_CAPABLE | DDI_FM_EREPORT_CAPABLE)) pci_ereport_teardown(devi); ddi_fm_fini(devi); ddi_soft_state_free(ppb_state, instance); return (DDI_FAILURE); } ppb->parent_bus = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO; for (pdip = ddi_get_parent(devi); pdip && (pdip != root) && (ppb->parent_bus != PCIE_PCIECAP_DEV_TYPE_PCIE_DEV); pdip = ddi_get_parent(pdip)) { if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS, "device_type", &bus) != DDI_PROP_SUCCESS) break; if (strcmp(bus, "pciex") == 0) ppb->parent_bus = PCIE_PCIECAP_DEV_TYPE_PCIE_DEV; ddi_prop_free(bus); } if (ppb_support_ht_msimap == 1) (void) ppb_ht_msimap_set(config_handle, HT_MSIMAP_ENABLE); else if (ppb_support_ht_msimap == -1) (void) ppb_ht_msimap_set(config_handle, HT_MSIMAP_DISABLE); pci_config_teardown(&config_handle); /* * Initialize hotplug support on this bus. */ if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) ret = pcie_init(devi, NULL); else ret = pcihp_init(devi); if (ret != DDI_SUCCESS) { cmn_err(CE_WARN, "pci: Failed to setup hotplug framework"); (void) ppb_detach(devi, DDI_DETACH); return (ret); } ddi_report_dev(devi); return (DDI_SUCCESS); case DDI_RESUME: /* * Get the soft state structure for the bridge. */ ppb = ddi_get_soft_state(ppb_state, ddi_get_instance(devi)); ppb_restore_config_regs(ppb); return (DDI_SUCCESS); default: break; } return (DDI_FAILURE); }
static int virtionet_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { virtionet_state_t *sp; int instance; int rc; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: default: return (DDI_FAILURE); } /* Sanity check - make sure this is indeed virtio PCI device */ if (virtio_validate_pcidev(dip) != DDI_SUCCESS) { return (DDI_FAILURE); } instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(virtionet_statep, instance) != DDI_SUCCESS) { return (DDI_FAILURE); } sp = ddi_get_soft_state(virtionet_statep, instance); ASSERT(sp); sp->dip = dip; /* Map virtionet PCI header */ rc = ddi_regs_map_setup(sp->dip, 1, &sp->hdraddr, 0, VIRTIO_DEVICE_SPECIFIC, &virtio_devattr, &sp->hdrhandle); if (rc != DDI_SUCCESS) { ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } /* * The device specific portion is *always* in guest native mode, * so it can be accessed directly, w/o ddi_get()/ddi_put() machinery. */ /* Map virtionet device specific configuration area */ off_t len; if (ddi_dev_regsize(sp->dip, 1, &len) != DDI_SUCCESS) { ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } rc = ddi_regs_map_setup(sp->dip, 1, &sp->devaddr, VIRTIO_DEVICE_SPECIFIC, len - VIRTIO_DEVICE_SPECIFIC, &virtio_devattr, &sp->devhandle); if (rc != DDI_SUCCESS) { ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } cmn_err(CE_CONT, "PCI header %p, device specific %p\n", sp->hdraddr, sp->devaddr); /* sp->devcfg = (virtio_net_config_t *)(sp->hdraddr + VIRTIO_DEVICE_SPECIFIC); */ /* Reset device - we are going to re-negotiate feature set */ VIRTIO_DEV_RESET(sp); /* Acknowledge the presense of the device */ VIRTIO_DEV_ACK(sp); rc = virtio_validate_netdev(sp); if (rc != DDI_SUCCESS) { ddi_regs_map_free(&sp->devhandle); ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } /* We know how to drive this device */ VIRTIO_DEV_DRIVER(sp); rc = virtionet_negotiate_features(sp); if (rc != DDI_SUCCESS) { ddi_regs_map_free(&sp->devhandle); ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } virtionet_get_macaddr(sp); rc = virtionet_vq_setup(sp); if (rc != DDI_SUCCESS) { ddi_regs_map_free(&sp->devhandle); ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } rc = virtionet_intr_setup(sp); if (rc != DDI_SUCCESS) { virtionet_vq_teardown(sp); ddi_regs_map_free(&sp->devhandle); ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } rc = virtionet_mac_register(sp); if (rc != DDI_SUCCESS) { (void) virtionet_intr_teardown(sp); virtionet_vq_teardown(sp); ddi_regs_map_free(&sp->devhandle); ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } ddi_report_dev(dip); return (DDI_SUCCESS); }
/* * audioixp_attach() * * Description: * Attach an instance of the audioixp driver. This routine does * the device dependent attach tasks. * * Arguments: * dev_info_t *dip Pointer to the device's dev_info struct * ddi_attach_cmd_t cmd Attach command * * Returns: * DDI_SUCCESS The driver was initialized properly * DDI_FAILURE The driver couldn't be initialized properly */ static int audioixp_attach(dev_info_t *dip) { uint16_t cmdeg; audioixp_state_t *statep; audio_dev_t *adev; uint32_t devid; const char *name; const char *rev; /* we don't support high level interrupts in the driver */ if (ddi_intr_hilevel(dip, 0) != 0) { cmn_err(CE_WARN, "!%s%d: unsupported high level interrupt", ddi_driver_name(dip), ddi_get_instance(dip)); return (DDI_FAILURE); } /* allocate the soft state structure */ statep = kmem_zalloc(sizeof (*statep), KM_SLEEP); statep->dip = dip; ddi_set_driver_private(dip, statep); if (ddi_get_iblock_cookie(dip, 0, &statep->iblock) != DDI_SUCCESS) { cmn_err(CE_WARN, "!%s%d: cannot get iblock cookie", ddi_driver_name(dip), ddi_get_instance(dip)); kmem_free(statep, sizeof (*statep)); return (DDI_FAILURE); } mutex_init(&statep->inst_lock, NULL, MUTEX_DRIVER, statep->iblock); /* allocate framework audio device */ if ((adev = audio_dev_alloc(dip, 0)) == NULL) { cmn_err(CE_WARN, "!%s%d: unable to allocate audio dev", ddi_driver_name(dip), ddi_get_instance(dip)); goto error; } statep->adev = adev; /* map in the registers */ if (audioixp_map_regs(statep) != DDI_SUCCESS) { audio_dev_warn(adev, "couldn't map registers"); goto error; } /* set device information -- this could be smarter */ devid = ((pci_config_get16(statep->pcih, PCI_CONF_VENID)) << 16) | pci_config_get16(statep->pcih, PCI_CONF_DEVID); name = "ATI AC'97"; switch (devid) { case IXP_PCI_ID_200: rev = "IXP150"; break; case IXP_PCI_ID_300: rev = "SB300"; break; case IXP_PCI_ID_400: if (pci_config_get8(statep->pcih, PCI_CONF_REVID) & 0x80) { rev = "SB450"; } else { rev = "SB400"; } break; case IXP_PCI_ID_SB600: rev = "SB600"; break; default: rev = "Unknown"; break; } audio_dev_set_description(adev, name); audio_dev_set_version(adev, rev); /* allocate port structures */ if ((audioixp_alloc_port(statep, IXP_PLAY) != DDI_SUCCESS) || (audioixp_alloc_port(statep, IXP_REC) != DDI_SUCCESS)) { goto error; } statep->ac97 = ac97_alloc(dip, audioixp_rd97, audioixp_wr97, statep); if (statep->ac97 == NULL) { audio_dev_warn(adev, "failed to allocate ac97 handle"); goto error; } /* set PCI command register */ cmdeg = pci_config_get16(statep->pcih, PCI_CONF_COMM); pci_config_put16(statep->pcih, PCI_CONF_COMM, cmdeg | PCI_COMM_IO | PCI_COMM_MAE); /* set up kernel statistics */ if ((statep->ksp = kstat_create(IXP_NAME, ddi_get_instance(dip), IXP_NAME, "controller", KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT)) != NULL) { kstat_install(statep->ksp); } if (audioixp_chip_init(statep) != DDI_SUCCESS) { audio_dev_warn(statep->adev, "failed to init chip"); goto error; } /* initialize the AC'97 part */ if (ac97_init(statep->ac97, adev) != DDI_SUCCESS) { audio_dev_warn(adev, "ac'97 initialization failed"); goto error; } /* set up the interrupt handler */ if (ddi_add_intr(dip, 0, &statep->iblock, NULL, audioixp_intr, (caddr_t)statep) != DDI_SUCCESS) { audio_dev_warn(adev, "bad interrupt specification"); } statep->intr_added = B_TRUE; if (audio_dev_register(adev) != DDI_SUCCESS) { audio_dev_warn(adev, "unable to register with framework"); goto error; } ddi_report_dev(dip); return (DDI_SUCCESS); error: audioixp_destroy(statep); return (DDI_FAILURE); }
static int gpio_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; struct gpio_softc *softc = NULL; ddi_device_acc_attr_t dev_attr; switch (cmd) { case DDI_ATTACH: /* Allocate and get the soft state structure for this instance. */ instance = ddi_get_instance(dip); DBG(dip, "attach: instance is %d", instance, 0, 0, 0, 0); if (ddi_soft_state_zalloc(statep, instance) != DDI_SUCCESS) goto attach_failed; softc = getsoftc(instance); softc->gp_dip = dip; softc->gp_state = 0; mutex_init(&softc->gp_mutex, NULL, MUTEX_DRIVER, NULL); /* Map in the gpio device registers. */ dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; if (ddi_regs_map_setup(dip, 0, (caddr_t *)&softc->gp_regs, 0, 0, &dev_attr, &softc->gp_handle) != DDI_SUCCESS) goto attach_failed; DBG(dip, "attach: regs=0x%p", (uintptr_t)softc->gp_regs, 0, 0, 0, 0); DBG(dip, "attach: port 1 data is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[0]), 0, 0, 0, 0); DBG(dip, "attach: port 1 direction is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[1]), 0, 0, 0, 0); DBG(dip, "attach: port 1 output type is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[2]), 0, 0, 0, 0); DBG(dip, "attach: port 1 pull up control type is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[3]), 0, 0, 0, 0); DBG(dip, "attach: port 2 data is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[4]), 0, 0, 0, 0); DBG(dip, "attach: port 2 direction is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[5]), 0, 0, 0, 0); DBG(dip, "attach: port 2 output type is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[6]), 0, 0, 0, 0); DBG(dip, "attach: port 2 pull up control type is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[7]), 0, 0, 0, 0); /* Create device minor nodes. */ if (ddi_create_minor_node(dip, "gpio", S_IFCHR, instance, NULL, NULL) == DDI_FAILURE) { ddi_regs_map_free(&softc->gp_handle); goto attach_failed; } ddi_report_dev(dip); return (DDI_SUCCESS); case DDI_RESUME: /* Nothing to do for a resume. */ return (DDI_SUCCESS); default: return (DDI_FAILURE); } attach_failed: if (softc) { mutex_destroy(&softc->gp_mutex); if (softc->gp_handle) ddi_regs_map_free(&softc->gp_handle); ddi_soft_state_free(statep, instance); ddi_remove_minor_node(dip, NULL); } return (DDI_FAILURE); }
/*ARGSUSED*/ static int pci_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { /* * Use the minor number as constructed by pcihp, as the index value to * ddi_soft_state_zalloc. */ int instance = ddi_get_instance(devi); pci_state_t *pcip = NULL; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: return (DDI_SUCCESS); default: return (DDI_FAILURE); } if (ddi_prop_update_string(DDI_DEV_T_NONE, devi, "device_type", "pci") != DDI_PROP_SUCCESS) { cmn_err(CE_WARN, "pci: 'device_type' prop create failed"); } if (ddi_soft_state_zalloc(pci_statep, instance) == DDI_SUCCESS) { pcip = ddi_get_soft_state(pci_statep, instance); } if (pcip == NULL) { goto bad_soft_state; } pcip->pci_dip = devi; pcip->pci_soft_state = PCI_SOFT_STATE_CLOSED; /* * Initialize hotplug support on this bus. At minimum * (for non hotplug bus) this would create ":devctl" minor * node to support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls * to this bus. */ if (pcihp_init(devi) != DDI_SUCCESS) { cmn_err(CE_WARN, "pci: Failed to setup hotplug framework"); goto bad_pcihp_init; } /* Second arg: initialize for pci, not pci_express */ if (pcitool_init(devi, B_FALSE) != DDI_SUCCESS) { goto bad_pcitool_init; } pcip->pci_fmcap = DDI_FM_ERRCB_CAPABLE | DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE; ddi_fm_init(devi, &pcip->pci_fmcap, &pcip->pci_fm_ibc); mutex_init(&pcip->pci_mutex, NULL, MUTEX_DRIVER, NULL); mutex_init(&pcip->pci_err_mutex, NULL, MUTEX_DRIVER, (void *)pcip->pci_fm_ibc); mutex_init(&pcip->pci_peek_poke_mutex, NULL, MUTEX_DRIVER, (void *)pcip->pci_fm_ibc); if (pcip->pci_fmcap & DDI_FM_ERRCB_CAPABLE) { pci_ereport_setup(devi); ddi_fm_handler_register(devi, pci_fm_callback, NULL); } ddi_report_dev(devi); return (DDI_SUCCESS); bad_pcitool_init: (void) pcihp_uninit(devi); bad_pcihp_init: ddi_soft_state_free(pci_statep, instance); bad_soft_state: return (DDI_FAILURE); }
static int iiattach(dev_info_t *dip, ddi_attach_cmd_t cmd) { struct ii_state *xsp; int instance; int i; intptr_t flags; if (cmd != DDI_ATTACH) { return (DDI_FAILURE); } /* save the dev_info_t to be used in logging using ddi_log_sysevent */ ii_dip = dip; instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(ii_statep, instance) != 0) { cmn_err(CE_WARN, "!ii: no memory for instance %d state.", instance); return (DDI_FAILURE); } flags = 0; xsp = ddi_get_soft_state(ii_statep, instance); if (xsp == NULL) { cmn_err(CE_WARN, "!ii: attach: could not get state for instance %d.", instance); goto out; } ii_debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "ii_debug", 0); if (ii_debug != 0) { #ifdef DEBUG cmn_err(CE_NOTE, "!ii: initializing ii version %d.%d.%d.%d", dsw_major_rev, dsw_minor_rev, dsw_micro_rev, dsw_baseline_rev); #else if (dsw_micro_rev) { cmn_err(CE_NOTE, "!ii: initializing ii vers %d.%d.%d", dsw_major_rev, dsw_minor_rev, dsw_micro_rev); } else { cmn_err(CE_NOTE, "!ii: initializing ii version %d.%d", dsw_major_rev, dsw_minor_rev); } #endif switch (ii_debug) { case 1: case 2: cmn_err(CE_NOTE, "!ii: ii_debug=%d is enabled.", ii_debug); break; default: cmn_err(CE_WARN, "!ii: Value of ii_debug=%d is not 0,1 or 2.", ii_debug); } } ii_bitmap = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "ii_bitmap", II_WTHRU); switch (ii_bitmap) { case II_KMEM: if (ii_debug > 0) cmn_err(CE_NOTE, "!ii: ii_bitmap is in memory"); break; case II_FWC: if (ii_debug > 0) cmn_err(CE_NOTE, "!ii: ii_bitmap is on disk," " no FWC"); break; case II_WTHRU: if (ii_debug > 0) cmn_err(CE_NOTE, "!ii: ii_bitmap is on disk"); break; default: cmn_err(CE_NOTE, "!ii: ii_bitmap=%d out of range; " "defaulting WTHRU(%d)", ii_bitmap, II_WTHRU); ii_bitmap = II_WTHRU; } /* pick up these values if in ii.conf, otherwise leave alone */ i = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "ii_throttle_unit", 0); if (i > 0) { ii_throttle_unit = i; if ((ii_throttle_unit < MIN_THROTTLE_UNIT) || (ii_throttle_unit > MAX_THROTTLE_UNIT) || (ii_debug > 0)) cmn_err(CE_NOTE, "!ii: ii_throttle_unit=%d", ii_throttle_unit); } i = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "ii_throttle_delay", 0); if (i > 0) { ii_throttle_delay = i; if ((ii_throttle_delay < MIN_THROTTLE_DELAY) || (ii_throttle_delay > MIN_THROTTLE_DELAY) || (ii_debug > 0)) cmn_err(CE_NOTE, "!ii: ii_throttle_delay=%d", ii_throttle_delay); } ii_copy_direct = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "ii_copy_direct", 1); if (i > 0) { ii_copy_direct = i; if ((ii_copy_direct < 0) || (ii_copy_direct > 1)) cmn_err(CE_NOTE, "!ii: ii_copy_direct=%d", ii_copy_direct); } if (_ii_init_dev()) { cmn_err(CE_WARN, "!ii: _ii_init_dev failed"); goto out; } flags |= DIDINIT; xsp->dip = dip; xsp->instance = instance; if (ddi_create_minor_node(dip, "ii", S_IFCHR, instance, DDI_PSEUDO, 0) != DDI_SUCCESS) { cmn_err(CE_WARN, "!ii: could not create node."); goto out; } flags |= DIDNODES; ddi_set_driver_private(dip, (caddr_t)flags); ddi_report_dev(dip); ii_create_kstats(); return (DDI_SUCCESS); out: ddi_set_driver_private(dip, (caddr_t)flags); (void) iidetach(dip, DDI_DETACH); return (DDI_FAILURE); }
/* * Attach an instance of the device. This happens before an open * can succeed. */ static int _nsctl_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int rc; if (cmd == DDI_ATTACH) { nsctl_dip = dip; /* Announce presence of the device */ ddi_report_dev(dip); /* * Get the node parameters now that we can look up. */ nsc_min_nodeid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nsc_min_nodeid", 0); nsc_max_nodeid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nsc_max_nodeid", 5); _nsc_max_devices = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nsc_max_devices", 128); _nsc_maxdev = _nsc_max_devices; nscsetup(); /* * Init raw requires the _nsc_max_devices value and so * cannot be done before the nsc_max_devices property has * been read which can only be done after the module is * attached and we have a dip. */ if ((rc = _nsc_init_raw(_nsc_max_devices)) != 0) { cmn_err(CE_WARN, "!nsctl: unable to initialize raw io provider: %d", rc); return (DDI_FAILURE); } /* * Init rest of soft state structure */ rc = ddi_create_minor_node(dip, "c,nsctl", S_IFCHR, 0, DDI_PSEUDO, 0); if (rc != DDI_SUCCESS) { /* free anything we allocated here */ cmn_err(CE_WARN, "!_nsctl_attach: ddi_create_minor_node failed %d", rc); return (DDI_FAILURE); } /* Announce presence of the device */ ddi_report_dev(dip); /* mark the device as attached, opens may proceed */ return (DDI_SUCCESS); } else return (DDI_FAILURE); }
static int rmc_comm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { struct rmc_comm_state *rcs = NULL; sig_state_t *current_sgn_p; int instance; /* * only allow one instance */ instance = ddi_get_instance(dip); if (instance != 0) return (DDI_FAILURE); switch (cmd) { default: return (DDI_FAILURE); case DDI_RESUME: if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_attach")) == NULL) return (DDI_FAILURE); /* this "can't happen" */ rmc_comm_hw_reset(rcs); rmc_comm_set_irq(rcs, B_TRUE); rcs->dip = dip; mutex_enter(&tod_lock); if (watchdog_enable && tod_ops.tod_set_watchdog_timer != NULL && watchdog_was_active) { (void) tod_ops.tod_set_watchdog_timer(0); } mutex_exit(&tod_lock); mutex_enter(rcs->dp_state.dp_mutex); dp_reset(rcs, INITIAL_SEQID, 1, 1); mutex_exit(rcs->dp_state.dp_mutex); current_sgn_p = (sig_state_t *)modgetsymvalue( "current_sgn", 0); if ((current_sgn_p != NULL) && (current_sgn_p->state_t.sig != 0)) { CPU_SIGNATURE(current_sgn_p->state_t.sig, current_sgn_p->state_t.state, current_sgn_p->state_t.sub_state, -1); } return (DDI_SUCCESS); case DDI_ATTACH: break; } /* * Allocate the soft-state structure */ if (ddi_soft_state_zalloc(rmc_comm_statep, instance) != DDI_SUCCESS) return (DDI_FAILURE); if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_attach")) == NULL) { rmc_comm_unattach(rcs, dip, instance, 0, 0, 0); return (DDI_FAILURE); } ddi_set_driver_private(dip, rcs); rcs->dip = NULL; /* * Set various options from .conf properties */ rcs->baud = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "baud-rate", 0); rcs->debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "debug", 0); /* * the baud divisor factor tells us how to scale the result of * the SIO_BAUD_TO_DIVISOR macro for platforms which do not * use the standard 24MHz uart clock */ rcs->baud_divisor_factor = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "baud-divisor-factor", SIO_BAUD_DIVISOR_MIN); /* * try to be reasonable if the scale factor contains a silly value */ if ((rcs->baud_divisor_factor < SIO_BAUD_DIVISOR_MIN) || (rcs->baud_divisor_factor > SIO_BAUD_DIVISOR_MAX)) rcs->baud_divisor_factor = SIO_BAUD_DIVISOR_MIN; /* * initialize serial device */ if (rmc_comm_serdev_init(rcs, dip) != 0) { rmc_comm_unattach(rcs, dip, instance, 0, 0, 0); return (DDI_FAILURE); } /* * initialize data protocol */ rmc_comm_dp_init(rcs); /* * initialize driver interface */ if (rmc_comm_drvintf_init(rcs) != 0) { rmc_comm_unattach(rcs, dip, instance, 0, 1, 1); return (DDI_FAILURE); } /* * Initialise devinfo-related fields */ rcs->majornum = ddi_driver_major(dip); rcs->instance = instance; rcs->dip = dip; /* * enable interrupts now */ rmc_comm_set_irq(rcs, B_TRUE); /* * All done, report success */ ddi_report_dev(dip); mutex_enter(&rmc_comm_attach_lock); rcs->is_attached = B_TRUE; mutex_exit(&rmc_comm_attach_lock); return (DDI_SUCCESS); }
/** * At attach time, we allocate the soft state structure for the current * instance of the device. */ static int quantis_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; quantis_soft_state_t *soft_state; ddi_device_acc_attr_t dev_acc_attr; /* Hold the device access attributes. */ int nregs; off_t regsize; char msg[MAX_MSG_LEN]; LOG_DEBUG0("attach\n"); switch (cmd) { case DDI_ATTACH: instance = ddi_get_instance(dip); snprintf(msg, MAX_MSG_LEN, "Attaching the Quantis device %d.\n", instance); LOG_DEBUG0(msg); /* * PCI devices are self-identifying devices, so we check that we * indeed have a Quantis QRNG card by checking that we have one * register page with the correct size. */ if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { snprintf(msg, MAX_MSG_LEN, "Could not get the number of register for the Quantis device %d.\n", instance); QUANTIS_ERROR(msg); return DDI_FAILURE; } if (nregs < 4) { snprintf(msg, MAX_MSG_LEN, "The Quantis device %d has %d PCI base registers, but should have at least 4.\n", instance, nregs); QUANTIS_ERROR(msg); return DDI_FAILURE; } if (ddi_dev_regsize(dip, QUANTIS_REG_IDX, ®size) != DDI_SUCCESS) { snprintf(msg, MAX_MSG_LEN, "Could not get the register size for the Quantis device %d.\n", instance); QUANTIS_ERROR(msg); return DDI_FAILURE; } if (regsize < (int)QUANTIS_REG_LENGTH) { snprintf(msg, MAX_MSG_LEN, "The size of the Quantice device (%d) registers file is %d bytes long, " "but should be at least %u bytes long.\n", instance, (int)regsize, (unsigned int)QUANTIS_REG_LENGTH); QUANTIS_ERROR(msg); return DDI_FAILURE; } LOG_DEBUG0("After test of the validity of the card, before soft state alloc.\n"); if (ddi_soft_state_zalloc(quantis_soft_state_p, instance) != DDI_SUCCESS) { snprintf(msg, MAX_MSG_LEN, "Could not allocate soft state structure for the Quantis device %d.\n", instance); QUANTIS_ERROR(msg); return DDI_FAILURE; } soft_state = (quantis_soft_state_t *)ddi_get_soft_state(quantis_soft_state_p, instance); soft_state->dip = dip; ddi_set_driver_private(dip, (caddr_t)soft_state); soft_state->cnt = 0; /* * Initialize the mutex in the soft state. We have no interrupt, * so we can set `arg' to `NULL' */ mutex_init(&soft_state->mutex, NULL, MUTEX_DRIVER, NULL); if (ddi_create_minor_node(dip, ddi_get_name(dip), S_IFCHR, instance, DDI_PSEUDO, 0) == DDI_FAILURE) { snprintf(msg, MAX_MSG_LEN, "Could not create minor node for the Quantis device %d.\n", instance); QUANTIS_ERROR(msg); mutex_destroy(&soft_state->mutex); ddi_soft_state_free(quantis_soft_state_p, instance); return DDI_FAILURE; } LOG_DEBUG1("ddi_get_name %s\n", ddi_get_name(dip)); dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; if (ddi_regs_map_setup(dip, QUANTIS_REG_IDX, (caddr_t *)&soft_state->regs, 0, QUANTIS_REG_LENGTH, &dev_acc_attr, &soft_state->regs_handle) != DDI_SUCCESS) { snprintf(msg, MAX_MSG_LEN, "Could not map the registers space of the Quantis device %d.\n", instance); QUANTIS_ERROR(msg); mutex_destroy(&soft_state->mutex); ddi_soft_state_free(quantis_soft_state_p, instance); return DDI_FAILURE; } mutex_enter(&quantis_mutex); card_count++; mutex_exit(&quantis_mutex); LOG_DEBUG0("Just before mutex\n"); mutex_enter(&soft_state->mutex); LOG_DEBUG0("Just before rng_reset.\n"); quantis_rng_reset(soft_state); LOG_DEBUG0("Just before enable_modules.\n"); quantis_rng_enable_modules(soft_state, quantis_rng_modules_mask(soft_state)); LOG_DEBUG0("Just before release mutex.\n"); mutex_exit(&soft_state->mutex); snprintf(msg, MAX_MSG_LEN, "Successfully attached the Quantis device %d. Currently, %d Quantis cards are available.\n", instance, card_count); QUANTIS_INFO(msg); # ifdef DEBUG ddi_report_dev(dip); # endif return DDI_SUCCESS; case DDI_SUSPEND: case DDI_PM_SUSPEND: return DDI_SUCCESS; default: return DDI_FAILURE; } }
static int sbmem_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { struct sbusmem_unit *un; int error = DDI_FAILURE; int instance, ilen; uint_t size; char *ident; switch (cmd) { case DDI_ATTACH: instance = ddi_get_instance(devi); size = ddi_getprop(DDI_DEV_T_NONE, devi, DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "size", -1); if (size == (uint_t)-1) { #ifdef SBUSMEM_DEBUG sbusmem_debug( "sbmem_attach%d: No size property\n", instance); #endif /* SBUSMEM_DEBUG */ break; } #ifdef SBUSMEM_DEBUG { struct regspec *rp = ddi_rnumber_to_regspec(devi, 0); if (rp == NULL) { sbusmem_debug( "sbmem_attach%d: No reg property\n", instance); } else { sbusmem_debug( "sbmem_attach%d: slot 0x%x size 0x%x\n", instance, rp->regspec_bustype, rp->regspec_size); } } #endif /* SBUSMEM_DEBUG */ if (ddi_getlongprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "ident", (caddr_t)&ident, &ilen) != DDI_PROP_SUCCESS) { #ifdef SBUSMEM_DEBUG sbusmem_debug( "sbmem_attach%d: No ident property\n", instance); #endif /* SBUSMEM_DEBUG */ break; } if (ddi_soft_state_zalloc(sbusmem_state_head, instance) != DDI_SUCCESS) break; if ((un = ddi_get_soft_state(sbusmem_state_head, instance)) == NULL) { ddi_soft_state_free(sbusmem_state_head, instance); break; } if (ddi_create_minor_node(devi, ident, S_IFCHR, instance, DDI_PSEUDO, NULL) == DDI_FAILURE) { kmem_free(ident, ilen); ddi_remove_minor_node(devi, NULL); ddi_soft_state_free(sbusmem_state_head, instance); break; } kmem_free(ident, ilen); un->dip = devi; un->size = size; un->pagesize = ddi_ptob(devi, 1); #ifdef SBUSMEM_DEBUG sbusmem_debug("sbmem_attach%d: dip 0x%p size 0x%x\n", instance, devi, size); #endif /* SBUSMEM_DEBUG */ ddi_report_dev(devi); error = DDI_SUCCESS; break; case DDI_RESUME: error = DDI_SUCCESS; break; default: break; } return (error); }
static int ds1287_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { struct ds1287 *softsp; DPRINTF("ds1287_attach\n"); switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: return (DDI_SUCCESS); default: return (DDI_FAILURE); } if (instance != -1) { cmn_err(CE_WARN, "ds1287_attach: Another instance is already " "attached."); return (DDI_FAILURE); } instance = ddi_get_instance(dip); if (v_rtc_addr_reg == NULL) { cmn_err(CE_WARN, "ds1287_attach: v_rtc_addr_reg is NULL"); return (DDI_FAILURE); } /* * Allocate softc information. */ if (ddi_soft_state_zalloc(ds1287_state, instance) != DDI_SUCCESS) { cmn_err(CE_WARN, "ds1287_attach: Failed to allocate " "soft states."); return (DDI_FAILURE); } softsp = ddi_get_soft_state(ds1287_state, instance); DPRINTF("ds1287_attach: instance=%d softsp=0x%p\n", instance, (void *)softsp); softsp->dip = dip; if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "interrupt-priorities", (caddr_t)&ds1287_interrupt_priority, sizeof (int)) != DDI_PROP_SUCCESS) { cmn_err(CE_WARN, "ds1287_attach: Failed to create \"" "interrupt-priorities\" property."); goto error; } /* add the softint */ ds1287_lo_iblock = (ddi_iblock_cookie_t)(uintptr_t) ipltospl(ds1287_softint_priority); if (ddi_add_softintr(dip, DDI_SOFTINT_FIXED, &ds1287_softintr_id, &ds1287_lo_iblock, NULL, ds1287_softintr, (caddr_t)softsp) != DDI_SUCCESS) { cmn_err(CE_WARN, "ds1287_attach: Failed to add low interrupt."); goto error1; } /* add the hi interrupt */ if (ddi_add_intr(dip, 0, NULL, (ddi_idevice_cookie_t *) &ds1287_hi_iblock, ds1287_intr, NULL) != DDI_SUCCESS) { cmn_err(CE_WARN, "ds1287_attach: Failed to add high " "interrupt."); goto error2; } /* * Combination of instance number and clone number 0 is used for * creating the minor node. */ if (ddi_create_minor_node(dip, "power_button", S_IFCHR, (instance << 8) + 0, "ddi_power_button", NULL) == DDI_FAILURE) { cmn_err(CE_WARN, "ds1287_attach: Failed to create minor node"); goto error3; } ddi_report_dev(dip); return (DDI_SUCCESS); error3: ddi_remove_intr(dip, 0, NULL); error2: ddi_remove_softintr(ds1287_softintr_id); error1: (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "interrupt-priorities"); error: ddi_soft_state_free(ds1287_state, instance); return (DDI_FAILURE); }
/* * dm2s_attach - Module's attach routine. */ int dm2s_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; dm2s_t *dm2sp; char name[20]; instance = ddi_get_instance(dip); /* Only one instance is supported. */ if (instance != 0) { cmn_err(CE_WARN, "only one instance is supported"); return (DDI_FAILURE); } if (cmd != DDI_ATTACH) { return (DDI_FAILURE); } if (ddi_soft_state_zalloc(dm2s_softstate, instance) != DDI_SUCCESS) { cmn_err(CE_WARN, "softstate allocation failure"); return (DDI_FAILURE); } dm2sp = (dm2s_t *)ddi_get_soft_state(dm2s_softstate, instance); if (dm2sp == NULL) { ddi_soft_state_free(dm2s_softstate, instance); cmn_err(CE_WARN, "softstate allocation failure."); return (DDI_FAILURE); } dm2sp->ms_dip = dip; dm2sp->ms_major = ddi_name_to_major(ddi_get_name(dip)); dm2sp->ms_ppa = instance; /* * Get an interrupt block cookie corresponding to the * interrupt priority of the event handler. * Assert that the event priority is not re-defined to * some higher priority. */ /* LINTED */ ASSERT(SCF_EVENT_PRI == DDI_SOFTINT_LOW); if (ddi_get_soft_iblock_cookie(dip, SCF_EVENT_PRI, &dm2sp->ms_ibcookie) != DDI_SUCCESS) { cmn_err(CE_WARN, "ddi_get_soft_iblock_cookie failed."); goto error; } mutex_init(&dm2sp->ms_lock, NULL, MUTEX_DRIVER, (void *)dm2sp->ms_ibcookie); dm2sp->ms_clean |= DM2S_CLEAN_LOCK; cv_init(&dm2sp->ms_wait, NULL, CV_DRIVER, NULL); dm2sp->ms_clean |= DM2S_CLEAN_CV; (void) sprintf(name, "%s%d", DM2S_MODNAME, instance); if (ddi_create_minor_node(dip, name, S_IFCHR, instance, DDI_PSEUDO, NULL) == DDI_FAILURE) { ddi_remove_minor_node(dip, NULL); cmn_err(CE_WARN, "Device node creation failed."); goto error; } dm2sp->ms_clean |= DM2S_CLEAN_NODE; ddi_set_driver_private(dip, (caddr_t)dm2sp); ddi_report_dev(dip); return (DDI_SUCCESS); error: dm2s_cleanup(dm2sp); return (DDI_FAILURE); }
/* * attach */ static int av1394_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance = ddi_get_instance(dip); av1394_inst_t *avp; AV1394_TNF_ENTER(av1394_attach); switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: if ((avp = AV1394_INST2STATE(instance)) == NULL) { return (DDI_FAILURE); } return (av1394_cpr_resume(avp)); default: AV1394_TNF_EXIT(av1394_attach); return (DDI_FAILURE); } if (ddi_soft_state_zalloc(av1394_statep, instance) != 0) { TNF_PROBE_0(av1394_attach_error_soft_state_zalloc, AV1394_TNF_INST_ERROR, ""); AV1394_TNF_EXIT(av1394_attach); return (DDI_FAILURE); } avp = AV1394_INST2STATE(instance); if (av1394_t1394_attach(avp, dip) != DDI_SUCCESS) { av1394_cleanup(avp, 1); AV1394_TNF_EXIT(av1394_attach); return (DDI_FAILURE); } mutex_init(&avp->av_mutex, NULL, MUTEX_DRIVER, avp->av_attachinfo.iblock_cookie); #ifndef __lock_lint avp->av_dip = dip; avp->av_instance = instance; #endif if (av1394_add_events(avp) != DDI_SUCCESS) { av1394_cleanup(avp, 2); AV1394_TNF_EXIT(av1394_attach); return (DDI_FAILURE); } if (av1394_isoch_attach(avp) != DDI_SUCCESS) { av1394_cleanup(avp, 3); AV1394_TNF_EXIT(av1394_attach); return (DDI_FAILURE); } if (av1394_async_attach(avp) != DDI_SUCCESS) { av1394_cleanup(avp, 4); AV1394_TNF_EXIT(av1394_attach); return (DDI_FAILURE); } #ifndef __lock_lint avp->av_dev_state = AV1394_DEV_ONLINE; #endif ddi_report_dev(dip); AV1394_TNF_EXIT(av1394_attach); return (DDI_SUCCESS); }
/* * wusb_df_attach: * Attach or resume. * * For attach, initialize state and device, including: * state variables, locks, device node * device registration with system * power management, hotplugging * For resume, restore device and state */ static int wusb_df_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance = ddi_get_instance(dip); char *devinst; int devinstlen; wusb_df_state_t *wusb_dfp = NULL; usb_ep_data_t *ep_datap; int status; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: wusb_df_cpr_resume(dip); /* * Always return success to work around enumeration failures. * This works around an issue where devices which are present * before a suspend and absent upon resume could cause a system * panic on resume. */ return (DDI_SUCCESS); default: return (DDI_FAILURE); } if (ddi_soft_state_zalloc(wusb_df_statep, instance) == DDI_SUCCESS) { wusb_dfp = ddi_get_soft_state(wusb_df_statep, instance); } if (wusb_dfp == NULL) { return (DDI_FAILURE); } wusb_dfp->wusb_df_dip = dip; devinst = kmem_zalloc(USB_MAXSTRINGLEN, KM_SLEEP); devinstlen = snprintf(devinst, USB_MAXSTRINGLEN, "%s%d: ", ddi_driver_name(dip), instance); wusb_dfp->wusb_df_devinst = kmem_zalloc(devinstlen + 1, KM_SLEEP); (void) strncpy(wusb_dfp->wusb_df_devinst, devinst, devinstlen); kmem_free(devinst, USB_MAXSTRINGLEN); wusb_dfp->wusb_df_log_hdl = usb_alloc_log_hdl(dip, "wusb_df", &wusb_df_errlevel, &wusb_df_errmask, &wusb_df_instance_debug, 0); USB_DPRINTF_L4(PRINT_MASK_ATTA, wusb_dfp->wusb_df_log_hdl, "Attach: enter for attach"); if ((status = usb_client_attach(dip, USBDRV_VERSION, 0)) != USB_SUCCESS) { USB_DPRINTF_L2(PRINT_MASK_ATTA, wusb_dfp->wusb_df_log_hdl, "attach: usb_client_attach failed, error code:%d", status); goto fail; } if ((status = usb_get_dev_data(dip, &wusb_dfp->wusb_df_reg, USB_PARSE_LVL_ALL, 0)) != USB_SUCCESS) { USB_DPRINTF_L2(PRINT_MASK_ATTA, wusb_dfp->wusb_df_log_hdl, "attach: usb_get_dev_data failed, error code:%d", status); goto fail; } /* * Get the descriptor for an intr pipe at alt 0 of current interface. * This will be used later to open the pipe. */ if ((ep_datap = usb_lookup_ep_data(dip, wusb_dfp->wusb_df_reg, wusb_dfp->wusb_df_reg->dev_curr_if, 0, 0, USB_EP_ATTR_INTR, USB_EP_DIR_IN)) == NULL) { USB_DPRINTF_L2(PRINT_MASK_ATTA, wusb_dfp->wusb_df_log_hdl, "attach: Error getting intr endpoint descriptor"); goto fail; } wusb_dfp->wusb_df_intr_ep_descr = ep_datap->ep_descr; usb_free_descr_tree(dip, wusb_dfp->wusb_df_reg); mutex_init(&wusb_dfp->wusb_df_mutex, NULL, MUTEX_DRIVER, wusb_dfp->wusb_df_reg->dev_iblock_cookie); cv_init(&wusb_dfp->wusb_df_serial_cv, NULL, CV_DRIVER, NULL); wusb_dfp->wusb_df_serial_inuse = B_FALSE; wusb_dfp->wusb_df_locks_initialized = B_TRUE; /* create minor node */ if (ddi_create_minor_node(dip, name, S_IFCHR, instance, "wusb_df", 0) != DDI_SUCCESS) { USB_DPRINTF_L2(PRINT_MASK_ATTA, wusb_dfp->wusb_df_log_hdl, "attach: Error creating minor node"); goto fail; } /* Put online before PM init as can get power managed afterward. */ wusb_dfp->wusb_df_dev_state = USB_DEV_ONLINE; /* initialize power management */ wusb_df_init_power_mgmt(wusb_dfp); if (usb_register_hotplug_cbs(dip, wusb_df_disconnect_callback, wusb_df_reconnect_callback) != USB_SUCCESS) { goto fail; } /* Report device */ ddi_report_dev(dip); (void) wusb_df_firmware_download(wusb_dfp); if (usb_reset_device(dip, USB_RESET_LVL_REATTACH) != USB_SUCCESS) { USB_DPRINTF_L2(PRINT_MASK_PM, wusb_dfp->wusb_df_log_hdl, "reset device failed"); return (USB_FAILURE); } return (DDI_SUCCESS); fail: if (wusb_dfp) { (void) wusb_df_cleanup(dip, wusb_dfp); } return (DDI_FAILURE); }
/*ARGSUSED*/ static int ppb_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { dev_info_t *root = ddi_root_node(); int instance; ppb_devstate_t *ppb; dev_info_t *pdip; ddi_acc_handle_t config_handle; char *bus; switch (cmd) { case DDI_ATTACH: /* * Make sure the "device_type" property exists. */ (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, "device_type", "pci"); /* * Allocate and get soft state structure. */ instance = ddi_get_instance(devi); if (ddi_soft_state_zalloc(ppb_state, instance) != DDI_SUCCESS) return (DDI_FAILURE); ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, instance); ppb->dip = devi; mutex_init(&ppb->ppb_mutex, NULL, MUTEX_DRIVER, NULL); ppb->ppb_soft_state = PCI_SOFT_STATE_CLOSED; if (pci_config_setup(devi, &config_handle) != DDI_SUCCESS) { mutex_destroy(&ppb->ppb_mutex); ddi_soft_state_free(ppb_state, instance); return (DDI_FAILURE); } ppb_pwr_setup(ppb, devi); if (PM_CAPABLE(ppb->ppb_pwr_p)) { mutex_enter(&ppb->ppb_pwr_p->pwr_mutex); /* * Before reading config registers, make sure power is * on, and remains on. */ ppb->ppb_pwr_p->pwr_fp++; pci_pwr_change(ppb->ppb_pwr_p, ppb->ppb_pwr_p->current_lvl, pci_pwr_new_lvl(ppb->ppb_pwr_p)); } ppb->ppb_cache_line_size = pci_config_get8(config_handle, PCI_CONF_CACHE_LINESZ); ppb->ppb_latency_timer = pci_config_get8(config_handle, PCI_CONF_LATENCY_TIMER); /* * Check whether the "ranges" property is present. * Otherwise create the ranges property by reading * the configuration registers */ if (ddi_prop_exists(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, "ranges") == 0) { ppb_create_ranges_prop(devi, config_handle); } pci_config_teardown(&config_handle); if (PM_CAPABLE(ppb->ppb_pwr_p)) { ppb->ppb_pwr_p->pwr_fp--; pci_pwr_change(ppb->ppb_pwr_p, ppb->ppb_pwr_p->current_lvl, pci_pwr_new_lvl(ppb->ppb_pwr_p)); mutex_exit(&ppb->ppb_pwr_p->pwr_mutex); } ppb->parent_bus = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO; for (pdip = ddi_get_parent(ppb->dip); pdip && (pdip != root) && (ppb->parent_bus != PCIE_PCIECAP_DEV_TYPE_PCIE_DEV); pdip = ddi_get_parent(pdip)) { if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS, "device_type", &bus) != DDI_PROP_SUCCESS) break; if (strcmp(bus, "pciex") == 0) ppb->parent_bus = PCIE_PCIECAP_DEV_TYPE_PCIE_DEV; ddi_prop_free(bus); } /* * Initialize hotplug support on this bus. */ if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) if (pcie_init(devi, NULL) != DDI_SUCCESS) { (void) ppb_detach(devi, DDI_DETACH); return (DDI_FAILURE); } else ppb_init_hotplug(ppb); DEBUG1(DBG_ATTACH, devi, "ppb_attach(): this nexus %s hotplug slots\n", ppb->hotplug_capable == B_TRUE ? "has":"has no"); ppb_fm_init(ppb); ddi_report_dev(devi); return (DDI_SUCCESS); case DDI_RESUME: /* * Get the soft state structure for the bridge. */ ppb = (ppb_devstate_t *) ddi_get_soft_state(ppb_state, ddi_get_instance(devi)); pci_pwr_resume(devi, ppb->ppb_pwr_p); return (DDI_SUCCESS); } return (DDI_FAILURE); }
/* * attach the module */ static int tphci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { char *vclass; int instance, phci_regis = 0; struct tphci_state *phci = NULL; instance = ddi_get_instance(dip); switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: case DDI_PM_RESUME: return (0); /* nothing to do */ default: return (DDI_FAILURE); } /* * Allocate phci data structure. */ if (ddi_soft_state_zalloc(tphci_state, instance) != DDI_SUCCESS) { return (DDI_FAILURE); } phci = ddi_get_soft_state(tphci_state, instance); ASSERT(phci != NULL); phci->dip = dip; /* bus_addr has the form #,<vhci_class> */ vclass = strchr(ddi_get_name_addr(dip), ','); if (vclass == NULL || vclass[1] == '\0') { cmn_err(CE_NOTE, "tphci invalid bus_addr %s", ddi_get_name_addr(dip)); goto attach_fail; } /* * Attach this instance with the mpxio framework */ if (mdi_phci_register(vclass + 1, dip, 0) != MDI_SUCCESS) { cmn_err(CE_WARN, "%s mdi_phci_register failed", ddi_node_name(dip)); goto attach_fail; } phci_regis++; if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { cmn_err(CE_NOTE, "%s ddi_create_minor_node failed", ddi_node_name(dip)); goto attach_fail; } (void) ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1); ddi_report_dev(dip); return (DDI_SUCCESS); attach_fail: if (phci_regis) (void) mdi_phci_unregister(dip, 0); ddi_soft_state_free(tphci_state, instance); return (DDI_FAILURE); }
/*ARGSUSED*/ static int cnex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { int rv, instance, reglen; cnex_regspec_t *reg_p; ldc_cnex_t cinfo; cnex_soft_state_t *cnex_ssp; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: return (DDI_SUCCESS); default: return (DDI_FAILURE); } /* * Get the instance specific soft state structure. * Save the devi for this instance in the soft_state data. */ instance = ddi_get_instance(devi); if (ddi_soft_state_zalloc(cnex_state, instance) != DDI_SUCCESS) return (DDI_FAILURE); cnex_ssp = ddi_get_soft_state(cnex_state, instance); cnex_ssp->devi = devi; cnex_ssp->clist = NULL; if (ddi_getlongprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, "reg", (caddr_t)®_p, ®len) != DDI_SUCCESS) { return (DDI_FAILURE); } /* get the sun4v config handle for this device */ cnex_ssp->cfghdl = SUN4V_REG_SPEC2CFG_HDL(reg_p->physaddr); kmem_free(reg_p, reglen); D1("cnex_attach: cfghdl=0x%llx\n", cnex_ssp->cfghdl); /* init channel list mutex */ mutex_init(&cnex_ssp->clist_lock, NULL, MUTEX_DRIVER, NULL); /* Register with LDC module */ cinfo.dip = devi; cinfo.reg_chan = cnex_reg_chan; cinfo.unreg_chan = cnex_unreg_chan; cinfo.add_intr = cnex_add_intr; cinfo.rem_intr = cnex_rem_intr; cinfo.clr_intr = cnex_clr_intr; /* * LDC register will fail if an nexus instance had already * registered with the LDC framework */ rv = ldc_register(&cinfo); if (rv) { DWARN("cnex_attach: unable to register with LDC\n"); ddi_soft_state_free(cnex_state, instance); mutex_destroy(&cnex_ssp->clist_lock); return (DDI_FAILURE); } if (ddi_create_minor_node(devi, "devctl", S_IFCHR, instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) { ddi_remove_minor_node(devi, NULL); ddi_soft_state_free(cnex_state, instance); mutex_destroy(&cnex_ssp->clist_lock); return (DDI_FAILURE); } /* Add interrupt redistribution callback. */ intr_dist_add_weighted(cnex_intr_redist, cnex_ssp); ddi_report_dev(devi); return (DDI_SUCCESS); }
/*ARGSUSED*/ static int emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int mutex_initted = 0; struct emul64 *emul64; int instance; scsi_hba_tran_t *tran = NULL; ddi_dma_attr_t tmp_dma_attr; emul64_bsd_get_props(dip); bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr)); instance = ddi_get_instance(dip); switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); if (!tran) { return (DDI_FAILURE); } emul64 = TRAN2EMUL64(tran); return (DDI_SUCCESS); default: emul64_i_log(NULL, CE_WARN, "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance); return (DDI_FAILURE); } /* * Allocate emul64 data structure. */ if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) { emul64_i_log(NULL, CE_WARN, "emul64%d: Failed to alloc soft state", instance); return (DDI_FAILURE); } emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); if (emul64 == (struct emul64 *)NULL) { emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state", instance); ddi_soft_state_free(emul64_state, instance); return (DDI_FAILURE); } /* * Allocate a transport structure */ tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); if (tran == NULL) { cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n"); goto fail; } emul64->emul64_tran = tran; emul64->emul64_dip = dip; tran->tran_hba_private = emul64; tran->tran_tgt_private = NULL; tran->tran_tgt_init = emul64_tran_tgt_init; tran->tran_tgt_probe = scsi_hba_probe; tran->tran_tgt_free = NULL; tran->tran_start = emul64_scsi_start; tran->tran_abort = emul64_scsi_abort; tran->tran_reset = emul64_scsi_reset; tran->tran_getcap = emul64_scsi_getcap; tran->tran_setcap = emul64_scsi_setcap; tran->tran_init_pkt = emul64_scsi_init_pkt; tran->tran_destroy_pkt = emul64_scsi_destroy_pkt; tran->tran_dmafree = emul64_scsi_dmafree; tran->tran_sync_pkt = emul64_scsi_sync_pkt; tran->tran_reset_notify = emul64_scsi_reset_notify; tmp_dma_attr.dma_attr_minxfer = 0x1; tmp_dma_attr.dma_attr_burstsizes = 0x7f; /* * Attach this instance of the hba */ if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran, 0) != DDI_SUCCESS) { cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n"); goto fail; } emul64->emul64_initiator_id = 2; /* * Look up the scsi-options property */ emul64->emul64_scsi_options = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options", EMUL64_DEFAULT_SCSI_OPTIONS); EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x", emul64->emul64_scsi_options); /* mutexes to protect the emul64 request and response queue */ mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER, emul64->emul64_iblock); mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER, emul64->emul64_iblock); mutex_initted = 1; EMUL64_MUTEX_ENTER(emul64); /* * Initialize the default Target Capabilities and Sync Rates */ emul64_i_initcap(emul64); EMUL64_MUTEX_EXIT(emul64); ddi_report_dev(dip); emul64->emul64_taskq = taskq_create("emul64_comp", emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0); return (DDI_SUCCESS); fail: emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance); if (mutex_initted) { mutex_destroy(EMUL64_REQ_MUTEX(emul64)); mutex_destroy(EMUL64_RESP_MUTEX(emul64)); } if (tran) { scsi_hba_tran_free(tran); } ddi_soft_state_free(emul64_state, instance); return (DDI_FAILURE); }
/* * ehci_attach: * * Description: Attach entry point is called by the Kernel. * Allocates resources for each EHCI host controller instance. * Initializes the EHCI Host Controller. * * Return : DDI_SUCCESS / DDI_FAILURE. */ static int ehci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; ehci_state_t *ehcip = NULL; usba_hcdi_register_args_t hcdi_args; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: ehcip = ehci_obtain_state(dip); return (ehci_cpr_resume(ehcip)); default: return (DDI_FAILURE); } /* Get the instance and create soft state */ instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(ehci_statep, instance) != 0) { return (DDI_FAILURE); } ehcip = ddi_get_soft_state(ehci_statep, instance); if (ehcip == NULL) { return (DDI_FAILURE); } ehcip->ehci_flags = EHCI_ATTACH; ehcip->ehci_log_hdl = usb_alloc_log_hdl(dip, "ehci", &ehci_errlevel, &ehci_errmask, &ehci_instance_debug, 0); ehcip->ehci_flags |= EHCI_ZALLOC; /* Set host controller soft state to initialization */ ehcip->ehci_hc_soft_state = EHCI_CTLR_INIT_STATE; USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehcip = 0x%p", (void *)ehcip); /* Initialize the DMA attributes */ ehci_set_dma_attributes(ehcip); /* Save the dip and instance */ ehcip->ehci_dip = dip; ehcip->ehci_instance = instance; /* Initialize the DMA attributes */ ehci_create_stats(ehcip); /* Create the qtd and qh pools */ if (ehci_allocate_pools(ehcip) != DDI_SUCCESS) { (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } /* Initialize the isochronous resources */ if (ehci_isoc_init(ehcip) != DDI_SUCCESS) { (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } /* Map the registers */ if (ehci_map_regs(ehcip) != DDI_SUCCESS) { (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } /* Get the ehci chip vendor and device id */ ehcip->ehci_vendor_id = pci_config_get16( ehcip->ehci_config_handle, PCI_CONF_VENID); ehcip->ehci_device_id = pci_config_get16( ehcip->ehci_config_handle, PCI_CONF_DEVID); ehcip->ehci_rev_id = pci_config_get8( ehcip->ehci_config_handle, PCI_CONF_REVID); /* Register interrupts */ if (ehci_register_intrs_and_init_mutex(ehcip) != DDI_SUCCESS) { (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } mutex_enter(&ehcip->ehci_int_mutex); /* Initialize the controller */ if (ehci_init_ctlr(ehcip, EHCI_NORMAL_INITIALIZATION) != DDI_SUCCESS) { mutex_exit(&ehcip->ehci_int_mutex); (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } /* * At this point, the hardware will be okay. * Initialize the usba_hcdi structure */ ehcip->ehci_hcdi_ops = ehci_alloc_hcdi_ops(ehcip); mutex_exit(&ehcip->ehci_int_mutex); /* * Make this HCD instance known to USBA * (dma_attr must be passed for USBA busctl's) */ hcdi_args.usba_hcdi_register_version = HCDI_REGISTER_VERSION; hcdi_args.usba_hcdi_register_dip = dip; hcdi_args.usba_hcdi_register_ops = ehcip->ehci_hcdi_ops; hcdi_args.usba_hcdi_register_dma_attr = &ehcip->ehci_dma_attr; /* * Priority and iblock_cookie are one and the same * (However, retaining hcdi_soft_iblock_cookie for now * assigning it w/ priority. In future all iblock_cookie * could just go) */ hcdi_args.usba_hcdi_register_iblock_cookie = (ddi_iblock_cookie_t)(uintptr_t)ehcip->ehci_intr_pri; if (usba_hcdi_register(&hcdi_args, 0) != DDI_SUCCESS) { (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } ehcip->ehci_flags |= EHCI_USBAREG; mutex_enter(&ehcip->ehci_int_mutex); if ((ehci_init_root_hub(ehcip)) != USB_SUCCESS) { mutex_exit(&ehcip->ehci_int_mutex); (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } mutex_exit(&ehcip->ehci_int_mutex); /* Finally load the root hub driver */ if (ehci_load_root_hub_driver(ehcip) != USB_SUCCESS) { (void) ehci_cleanup(ehcip); return (DDI_FAILURE); } ehcip->ehci_flags |= EHCI_RHREG; /* Display information in the banner */ ddi_report_dev(dip); mutex_enter(&ehcip->ehci_int_mutex); /* Reset the ehci initialization flag */ ehcip->ehci_flags &= ~EHCI_ATTACH; /* Print the Host Control's Operational registers */ ehci_print_caps(ehcip); ehci_print_regs(ehcip); (void) pci_report_pmcap(dip, PCI_PM_IDLESPEED, (void *)4000); mutex_exit(&ehcip->ehci_int_mutex); USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_attach: dip = 0x%p done", (void *)dip); return (DDI_SUCCESS); }
/* * attach entry point: * * normal attach: * * create soft state structure (dip, reg, nreg and state fields) * map in configuration header * make sure device is properly configured * report device */ static int pmubus_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { pmubus_devstate_t *pmubusp; /* per pmubus state pointer */ int32_t instance; switch (cmd) { case DDI_ATTACH: /* * Allocate soft state for this instance. */ instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(per_pmubus_state, instance) != DDI_SUCCESS) { cmn_err(CE_WARN, "pmubus_attach: Can't allocate soft " "state.\n"); goto fail_exit; } pmubusp = ddi_get_soft_state(per_pmubus_state, instance); pmubusp->pmubus_dip = dip; /* Cache our register property */ if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", (caddr_t)&pmubusp->pmubus_regp, &pmubusp->pmubus_reglen) != DDI_SUCCESS) { cmn_err(CE_WARN, "pmubus_attach: Can't acquire reg " "property.\n"); goto fail_get_regs; } /* Cache our ranges property */ if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges", (caddr_t)&pmubusp->pmubus_rangep, &pmubusp->pmubus_rnglen) != DDI_SUCCESS) { cmn_err(CE_WARN, "pmubus_attach: Can't acquire the " "ranges property.\n"); goto fail_get_ranges; } /* Calculate the number of ranges */ pmubusp->pmubus_nranges = pmubusp->pmubus_rnglen / sizeof (pmu_rangespec_t); /* Set up the mapping to our registers */ if (pci_config_setup(dip, &pmubusp->pmubus_reghdl) != DDI_SUCCESS) { cmn_err(CE_WARN, "pmubus_attach: Can't map in " "register space.\n"); goto fail_map_regs; } /* Initialize our register access mutex */ mutex_init(&pmubusp->pmubus_reg_access_lock, NULL, MUTEX_DRIVER, NULL); ddi_report_dev(dip); return (DDI_SUCCESS); case DDI_RESUME: return (DDI_SUCCESS); } fail_map_regs: kmem_free(pmubusp->pmubus_rangep, pmubusp->pmubus_rnglen); fail_get_ranges: kmem_free(pmubusp->pmubus_regp, pmubusp->pmubus_reglen); fail_get_regs: ddi_soft_state_free(per_pmubus_state, instance); fail_exit: return (DDI_FAILURE); }
static int machtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: return (DDI_SUCCESS); default: return (DDI_FAILURE); } #if !defined(__APPLE__) machtrace_probe = (void (*)())dtrace_probe; membar_enter(); if (ddi_create_minor_node(devi, "machtrace", S_IFCHR, 0, DDI_PSEUDO, NULL) == DDI_FAILURE || dtrace_register("mach_trap", &machtrace_attr, DTRACE_PRIV_USER, NULL, &machtrace_pops, NULL, &machtrace_id) != 0) { machtrace_probe = systrace_stub; #else machtrace_probe = dtrace_probe; membar_enter(); if (ddi_create_minor_node(devi, "machtrace", S_IFCHR, 0, DDI_PSEUDO, 0) == DDI_FAILURE || dtrace_register("mach_trap", &machtrace_attr, DTRACE_PRIV_USER, NULL, &machtrace_pops, NULL, &machtrace_id) != 0) { machtrace_probe = (void (*))&systrace_stub; #endif /* __APPLE__ */ ddi_remove_minor_node(devi, NULL); return (DDI_FAILURE); } ddi_report_dev(devi); machtrace_devi = devi; return (DDI_SUCCESS); } d_open_t _systrace_open; int _systrace_open(dev_t dev, int flags, int devtype, struct proc *p) { #pragma unused(dev,flags,devtype,p) return 0; } #define SYSTRACE_MAJOR -24 /* let the kernel pick the device number */ /* * A struct describing which functions will get invoked for certain * actions. */ static struct cdevsw systrace_cdevsw = { _systrace_open, /* open */ eno_opcl, /* close */ eno_rdwrt, /* read */ eno_rdwrt, /* write */ eno_ioctl, /* ioctl */ (stop_fcn_t *)nulldev, /* stop */ (reset_fcn_t *)nulldev, /* reset */ NULL, /* tty's */ eno_select, /* select */ eno_mmap, /* mmap */ eno_strat, /* strategy */ eno_getc, /* getc */ eno_putc, /* putc */ 0 /* type */ }; static int gSysTraceInited = 0; void systrace_init( void ); void systrace_init( void ) { if (0 == gSysTraceInited) { int majdevno = cdevsw_add(SYSTRACE_MAJOR, &systrace_cdevsw); if (majdevno < 0) { printf("systrace_init: failed to allocate a major number!\n"); gSysTraceInited = 0; return; } systrace_attach( (dev_info_t *)(uintptr_t)majdevno, DDI_ATTACH ); machtrace_attach( (dev_info_t *)(uintptr_t)majdevno, DDI_ATTACH ); gSysTraceInited = 1; } else panic("systrace_init: called twice!\n"); } #undef SYSTRACE_MAJOR #endif /* __APPLE__ */ static uint64_t systrace_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) { #pragma unused(arg,id,parg,aframes) /* __APPLE__ */ uint64_t val = 0; syscall_arg_t *stack = (syscall_arg_t *)NULL; uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); if (uthread) stack = (syscall_arg_t *)uthread->t_dtrace_syscall_args; if (!stack) return(0); DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); /* dtrace_probe arguments arg0 .. arg4 are 64bits wide */ val = (uint64_t)*(stack+argno); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); return (val); }
/* * attach the module */ static int tvhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { char *vclass; int instance, vhci_regis = 0; struct tvhci_state *vhci = NULL; dev_info_t *pdip; instance = ddi_get_instance(dip); switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: case DDI_PM_RESUME: return (0); /* nothing to do */ default: return (DDI_FAILURE); } /* * Allocate vhci data structure. */ if (ddi_soft_state_zalloc(tvhci_state, instance) != DDI_SUCCESS) { return (DDI_FAILURE); } vhci = ddi_get_soft_state(tvhci_state, instance); ASSERT(vhci != NULL); vhci->dip = dip; /* parent must be /pshot */ pdip = ddi_get_parent(dip); if (strcmp(ddi_driver_name(pdip), "pshot") != 0 || ddi_get_parent(pdip) != ddi_root_node()) { cmn_err(CE_NOTE, "tvhci must be under /pshot/"); goto attach_fail; } /* * XXX add mpxio-disable property. need to remove the check * from the framework */ (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "mpxio-disable", "no"); /* bus_addr is the <vhci_class> */ vclass = ddi_get_name_addr(dip); if (vclass == NULL || vclass[1] == '\0') { cmn_err(CE_NOTE, "tvhci invalid vhci class"); goto attach_fail; } /* * Attach this instance with the mpxio framework */ if (mdi_vhci_register(vclass, dip, &tvhci_opinfo, 0) != MDI_SUCCESS) { cmn_err(CE_WARN, "%s mdi_vhci_register failed", ddi_node_name(dip)); goto attach_fail; } vhci_regis++; if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { cmn_err(CE_NOTE, "%s ddi_create_minor_node failed", ddi_node_name(dip)); goto attach_fail; } (void) ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1); ddi_report_dev(dip); return (DDI_SUCCESS); attach_fail: if (vhci_regis) (void) mdi_vhci_unregister(dip, 0); ddi_soft_state_free(tvhci_state, instance); return (DDI_FAILURE); }
static int sbbc_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { int instance; sbbc_softstate_t *softsp; uint32_t *pci_intr_enable_reg; int len; #ifdef DEBUG char name[8]; #endif /* DEBUG */ instance = ddi_get_instance(devi); switch (cmd) { case DDI_ATTACH: if (ddi_soft_state_zalloc(sbbcp, instance) != 0) return (DDI_FAILURE); softsp = ddi_get_soft_state(sbbcp, instance); softsp->sbbc_instance = instance; /* * Set the dip in the soft state * And get interrupt cookies and initialize the * per instance mutex. */ softsp_init(softsp, devi); /* * Verify that an 'interrupts' property exists for * this device. If not, this instance will be ignored. */ if (ddi_getproplen(DDI_DEV_T_ANY, softsp->dip, DDI_PROP_DONTPASS, "interrupts", &len) != DDI_PROP_SUCCESS) { SBBC_ERR1(CE_WARN, "No 'interrupts' property for the " "SBBC instance %d\n", instance); return (DDI_FAILURE); } /* * Add this instance to the sbbc chosen iosram list * so that it can be used for tunnel switch. */ mutex_enter(&chosen_lock); softsp->sbbc_state = SBBC_STATE_INIT; sbbc_add_instance(softsp); /* * If this is the chosen IOSRAM and there is no master IOSRAM * yet, then let's set this instance as the master. * if there is a master alreay due to the previous tunnel switch * then keep as is even though this is the chosen. */ if (sgsbbc_iosram_is_chosen(softsp)) { ASSERT(master_iosram); softsp->iosram = master_iosram; master_iosram->sgsbbc = softsp; /* Do 'chosen' init only */ sbbc_chosen_init(softsp); } mutex_exit(&chosen_lock); #ifdef DEBUG (void) sprintf(name, "sbbc%d", instance); if (ddi_create_minor_node(devi, name, S_IFCHR, instance, NULL, NULL) == DDI_FAILURE) { mutex_destroy(&softsp->sbbc_lock); ddi_remove_minor_node(devi, NULL); ddi_soft_state_free(sbbcp, instance); return (DDI_FAILURE); } #endif /* DEBUG */ ddi_report_dev(devi); return (DDI_SUCCESS); case DDI_RESUME: if (!(softsp = ddi_get_soft_state(sbbcp, instance))) return (DDI_FAILURE); mutex_enter(&softsp->sbbc_lock); if ((softsp->suspended == TRUE) && (softsp->chosen == TRUE)) { /* * Enable Interrupts now, turn on both INT#A lines */ pci_intr_enable_reg = (uint32_t *) ((char *)softsp->sbbc_regs + SBBC_PCI_INT_ENABLE); ddi_put32(softsp->sbbc_reg_handle1, pci_intr_enable_reg, (uint32_t)SBBC_PCI_ENABLE_INT_A); /* * Reset intr_in_enabled to the original value * so the SC can send us interrupt. */ if (iosram_write(SBBC_SC_INTR_ENABLED_KEY, 0, (caddr_t)&intr_in_enabled, sizeof (intr_in_enabled))) { mutex_exit(&softsp->sbbc_lock); return (DDI_FAILURE); } } softsp->suspended = FALSE; mutex_exit(&softsp->sbbc_lock); return (DDI_SUCCESS); default: return (DDI_FAILURE); } }