/*ARGSUSED*/ static void px_fdvma_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index, ddi_dma_cookie_t *cp) { ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma; px_t *px_p = (px_t *)fdvma_p->softsp; px_mmu_t *mmu_p = px_p->px_mmu_p; dev_info_t *dip = px_p->px_dip; px_dvma_addr_t dvma_addr, dvma_pg; uint32_t offset; size_t npages, pg_index; io_attributes_t attr; offset = (uint32_t)(uintptr_t)a & MMU_PAGE_OFFSET; npages = MMU_BTOPR(len + offset); if (!npages) return; /* make sure we don't exceed reserved boundary */ DBG(DBG_FAST_DVMA, dip, "load index=%x: %p+%x ", index, a, len); if (index + npages > mp->dmai_ndvmapages) { cmn_err(px_panic_on_fatal_errors ? CE_PANIC : CE_WARN, "%s%d: kaddr_load index(%x)+pgs(%lx) exceeds limit\n", ddi_driver_name(dip), ddi_get_instance(dip), index, npages); return; } fdvma_p->pagecnt[index] = npages; dvma_addr = mp->dmai_mapping + MMU_PTOB(index); dvma_pg = MMU_BTOP(dvma_addr); pg_index = dvma_pg - mmu_p->dvma_base_pg; /* construct the dma cookie to be returned */ MAKE_DMA_COOKIE(cp, dvma_addr | offset, len); DBG(DBG_FAST_DVMA | DBG_CONT, dip, "cookie: %x+%x\n", cp->dmac_address, cp->dmac_size); attr = PX_GET_TTE_ATTR(mp->dmai_rflags, mp->dmai_attr.dma_attr_flags); if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index), npages, PX_ADD_ATTR_EXTNS(attr, mp->dmai_bdf), (void *)a, 0, MMU_MAP_BUF) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s%d: kaddr_load can't get " "page frame for vaddr %lx", ddi_driver_name(dip), ddi_get_instance(dip), (uintptr_t)a); } }
/* * get_pcmu_properties * * This function is called from the attach routine to get the key * properties of the pci nodes. * * used by: pcmu_attach() * * return value: DDI_FAILURE on failure */ int get_pcmu_properties(pcmu_t *pcmu_p, dev_info_t *dip) { int i; /* * Get the device's port id. */ if ((pcmu_p->pcmu_id = (uint32_t)pcmu_get_portid(dip)) == -1u) { cmn_err(CE_WARN, "%s%d: no portid property\n", ddi_driver_name(dip), ddi_get_instance(dip)); return (DDI_FAILURE); } /* * Get the bus-ranges property. */ i = sizeof (pcmu_p->pcmu_bus_range); if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "bus-range", (caddr_t)&pcmu_p->pcmu_bus_range, &i) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s%d: no bus-range property\n", ddi_driver_name(dip), ddi_get_instance(dip)); return (DDI_FAILURE); } PCMU_DBG2(PCMU_DBG_ATTACH, dip, "get_pcmu_properties: bus-range (%x,%x)\n", pcmu_p->pcmu_bus_range.lo, pcmu_p->pcmu_bus_range.hi); /* * Get the ranges property. */ if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges", (caddr_t)&pcmu_p->pcmu_ranges, &pcmu_p->pcmu_ranges_length) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s%d: no ranges property\n", ddi_driver_name(dip), ddi_get_instance(dip)); return (DDI_FAILURE); } pcmu_fix_ranges(pcmu_p->pcmu_ranges, pcmu_p->pcmu_ranges_length / sizeof (pcmu_ranges_t)); /* * Determine the number upa slot interrupts. */ pcmu_p->pcmu_numproxy = pcmu_get_numproxy(pcmu_p->pcmu_dip); PCMU_DBG1(PCMU_DBG_ATTACH, dip, "get_pcmu_properties: numproxy=%d\n", pcmu_p->pcmu_numproxy); return (DDI_SUCCESS); }
static int ddksample_getinfo (dev_info_t * dip, ddi_info_cmd_t cmd, void *arg, void **result) { dev_t dev; register int error; int minor_num, instance; if (dip == NULL) { cmn_err (CE_WARN, "ddksample_getinfo: dip==NULL\n"); return DDI_FAILURE; } dev = (dev_t) arg; minor_num = getminor (dev); instance = ddi_get_instance (dip); switch (cmd) { case DDI_INFO_DEVT2DEVINFO: *result = dip; error = DDI_SUCCESS; break; case DDI_INFO_DEVT2INSTANCE: *result = (void *) instance; error = DDI_SUCCESS; break; default: *result = NULL; error = DDI_FAILURE; } return (error); }
/* * undo whatever is done in px_pwr_setup. called by px_detach() */ static void px_pwr_teardown(dev_info_t *dip) { int instance = ddi_get_instance(dip); px_t *px_p = INST_TO_STATE(instance); ddi_intr_handle_impl_t hdl; if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip)) return; /* Initialize handle */ bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); hdl.ih_ver = DDI_INTR_VERSION; hdl.ih_state = DDI_IHDL_STATE_ALLOC; hdl.ih_dip = dip; hdl.ih_pri = px_pwr_pil; px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id); (void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG); px_p->px_pm_msiq_id = (msiqid_t)-1; cv_destroy(&px_p->px_l23ready_cv); mutex_destroy(&px_p->px_l23ready_lock); }
/*ARGSUSED*/ static int tcli_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { int instance = ddi_get_instance(devi); struct dstate *dstatep; int rval; if (cmd != DDI_ATTACH) return (DDI_SUCCESS); if (ddi_soft_state_zalloc(dstates, instance) != DDI_SUCCESS) { cmn_err(CE_CONT, "%s%d: can't allocate state\n", ddi_get_name(devi), instance); return (DDI_FAILURE); } dstatep = ddi_get_soft_state(dstates, instance); dstatep->dip = devi; rval = ddi_create_minor_node(devi, "client", S_IFCHR, (INST_TO_MINOR(instance)), DDI_PSEUDO, NULL); if (rval == DDI_FAILURE) { ddi_remove_minor_node(devi, NULL); ddi_soft_state_free(dstates, instance); cmn_err(CE_WARN, "%s%d: can't create minor nodes", ddi_get_name(devi), instance); return (DDI_FAILURE); } ddi_report_dev(devi); return (DDI_SUCCESS); }
/*ARGSUSED*/ static int tvhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { int instance = ddi_get_instance(dip); switch (cmd) { case DDI_DETACH: break; case DDI_SUSPEND: case DDI_PM_SUSPEND: return (0); /* nothing to do */ default: return (DDI_FAILURE); } if (mdi_vhci_unregister(dip, 0) != MDI_SUCCESS) return (DDI_FAILURE); ddi_remove_minor_node(dip, NULL); ddi_soft_state_free(tvhci_state, instance); return (DDI_SUCCESS); }
static void emul64_debug_dump_cdb(struct scsi_address *ap, struct scsi_pkt *pkt) { static char hex[] = "0123456789abcdef"; struct emul64 *emul64 = ADDR2EMUL64(ap); struct emul64_cmd *sp = PKT2CMD(pkt); uint8_t *cdb = pkt->pkt_cdbp; char buf [256]; char *p; int i; (void) snprintf(buf, sizeof (buf), "emul64%d: <%d,%d> ", ddi_get_instance(emul64->emul64_dip), ap->a_target, ap->a_lun); p = buf + strlen(buf); *p++ = '['; for (i = 0; i < sp->cmd_cdblen; i++, cdb++) { if (i != 0) *p++ = ' '; *p++ = hex[(*cdb >> 4) & 0x0f]; *p++ = hex[*cdb & 0x0f]; } *p++ = ']'; *p++ = '\n'; *p = 0; cmn_err(CE_CONT, buf); }
int px_ib_update_intr_state(px_t *px_p, dev_info_t *rdip, uint_t inum, devino_t ino, uint_t pil, uint_t new_intr_state, msiq_rec_type_t rec_type, msgcode_t msg_code) { px_ib_t *ib_p = px_p->px_ib_p; px_ino_t *ino_p; px_ino_pil_t *ipil_p; px_ih_t *ih_p; int ret = DDI_FAILURE; DBG(DBG_IB, px_p->px_dip, "px_ib_update_intr_state: %s%d " "inum %x devino %x pil %x state %x\n", ddi_driver_name(rdip), ddi_get_instance(rdip), inum, ino, pil, new_intr_state); mutex_enter(&ib_p->ib_ino_lst_mutex); ino_p = px_ib_locate_ino(ib_p, ino); if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, pil))) { if (ih_p = px_ib_intr_locate_ih(ipil_p, rdip, inum, rec_type, msg_code)) { ih_p->ih_intr_state = new_intr_state; ret = DDI_SUCCESS; } } mutex_exit(&ib_p->ib_ino_lst_mutex); return (ret); }
/* * dm2s_detach - Module's detach routine. */ int dm2s_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { int instance; dm2s_t *dm2sp; if (cmd != DDI_DETACH) { return (DDI_FAILURE); } instance = ddi_get_instance(dip); dm2sp = (dm2s_t *)ddi_get_soft_state(dm2s_softstate, instance); if (dm2sp == NULL) { return (DDI_FAILURE); } mutex_enter(&dm2sp->ms_lock); /* Check if the mailbox is still in use. */ if (dm2sp->ms_state & DM2S_MB_INITED) { mutex_exit(&dm2sp->ms_lock); cmn_err(CE_WARN, "Mailbox in use: Detach failed"); return (DDI_FAILURE); } mutex_exit(&dm2sp->ms_lock); dm2s_cleanup(dm2sp); return (DDI_SUCCESS); }
/* * Function used to log PBM AFSR register bits and to lookup and fault * handle associated with PBM AFAR register. Called by * pcmu_pbm_err_handler with pcmu_err_mutex held. */ int pcmu_pbm_afsr_report(dev_info_t *dip, uint64_t fme_ena, pcmu_pbm_errstate_t *pbm_err_p) { int fatal = 0; /* LINTED variable */ pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip)); ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex)); pbm_err_p->pcbm_pri = PBM_PRIMARY; (void) pcmu_pbm_classify(pbm_err_p); /* * We are currently not dealing with the multiple error * case, for any secondary errors we will panic. */ pbm_err_p->pcbm_pri = PBM_SECONDARY; if (pcmu_pbm_classify(pbm_err_p)) { fatal++; pcmu_pbm_ereport_post(dip, fme_ena, pbm_err_p); } if (fatal) { return (DDI_FM_FATAL); } return (DDI_FM_NONFATAL); }
static void ppb_removechild(dev_info_t *dip) { ppb_devstate_t *ppb; ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, ddi_get_instance(ddi_get_parent(dip))); if (PM_CAPABLE(ppb->ppb_pwr_p)) { DEBUG2(DBG_PWR, ddi_get_parent(dip), "UNINITCHILD: removing pwr_info for %s@%s\n", ddi_node_name(dip), ddi_get_name_addr(dip)); pci_pwr_rm_info(ppb->ppb_pwr_p, dip); } ddi_set_name_addr(dip, NULL); /* * Strip the node to properly convert it back to prototype form */ ddi_remove_minor_node(dip, NULL); impl_rem_dev_props(dip); }
static int av1394_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { int instance = ddi_get_instance(dip); av1394_inst_t *avp; AV1394_TNF_ENTER(av1394_detach); if ((avp = AV1394_INST2STATE(instance)) == NULL) { TNF_PROBE_0(av1394_detach_error_instance, AV1394_TNF_INST_ERROR, ""); AV1394_TNF_EXIT(av1394_detach); return (DDI_FAILURE); } switch (cmd) { case DDI_DETACH: av1394_cleanup(avp, AV1394_CLEANUP_LEVEL_MAX); AV1394_TNF_EXIT(av1394_detach); return (DDI_SUCCESS); case DDI_SUSPEND: return (av1394_cpr_suspend(avp)); default: AV1394_TNF_EXIT(av1394_detach); return (DDI_FAILURE); } }
/* ARGSUSED */ void ddi_remove_intr(dev_info_t *dip, uint_t inum, ddi_iblock_cookie_t iblock_cookie) { ddi_intr_handle_t hdl; int ret; DDI_INTR_APIDBG((CE_CONT, "ddi_remove_intr: name=%s%d dip=0x%p " "inum=0x%x\n", ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, inum)); if ((hdl = i_ddi_get_intr_handle(dip, inum)) == NULL) { DDI_INTR_APIDBG((CE_CONT, "ddi_remove_intr: no handle " "found\n")); return; } if ((ret = ddi_intr_disable(hdl)) != DDI_SUCCESS) { DDI_INTR_APIDBG((CE_CONT, "ddi_remove_intr: " "ddi_intr_disable failed, ret 0x%x\n", ret)); return; } if ((ret = ddi_intr_remove_handler(hdl)) != DDI_SUCCESS) { DDI_INTR_APIDBG((CE_CONT, "ddi_remove_intr: " "ddi_intr_remove_handler failed, ret 0x%x\n", ret)); return; } if ((ret = ddi_intr_free(hdl)) != DDI_SUCCESS) { DDI_INTR_APIDBG((CE_CONT, "ddi_remove_intr: " "ddi_intr_free failed, ret 0x%x\n", ret)); return; } }
static int virtionet_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { virtionet_state_t *sp; int instance; int rc; switch (cmd) { case DDI_DETACH: break; case DDI_SUSPEND: default: return (DDI_FAILURE); } instance = ddi_get_instance(dip); sp = ddi_get_soft_state(virtionet_statep, instance); ASSERT(sp); rc = virtionet_mac_unregister(sp); if (rc != DDI_SUCCESS) { return (DDI_FAILURE); } (void) virtionet_intr_teardown(sp); virtionet_vq_teardown(sp); ddi_regs_map_free(&sp->devhandle); ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_SUCCESS); }
/* ARGSUSED */ void pci_bus_exit(dev_info_t *dip, ddi_acc_handle_t handle) { pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); pbm_t *pbm_p = pci_p->pci_pbm_p; ddi_fm_error_t derr; ASSERT(MUTEX_HELD(&pbm_p->pbm_pokefault_mutex)); membar_sync(); mutex_enter(&pci_p->pci_common_p->pci_fm_mutex); ddi_fm_acc_err_get(pbm_p->pbm_excl_handle, &derr, DDI_FME_VERSION); if (derr.fme_status == DDI_FM_OK) { if (pci_check_error(pci_p) != 0) { (void) pci_pbm_err_handler(pci_p->pci_dip, &derr, (const void *)pci_p, PCI_BUS_EXIT_CALL); } } mutex_exit(&pci_p->pci_common_p->pci_fm_mutex); pbm_p->pbm_excl_handle = NULL; mutex_exit(&pbm_p->pbm_pokefault_mutex); }
/* * wusb_df_detach: * detach or suspend driver instance * * Note: in detach, only contention threads is from pm and disconnnect. */ static int wusb_df_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { int instance = ddi_get_instance(dip); wusb_df_state_t *wusb_dfp = ddi_get_soft_state(wusb_df_statep, instance); int rval = DDI_FAILURE; switch (cmd) { case DDI_DETACH: USB_DPRINTF_L4(PRINT_MASK_ATTA, wusb_dfp->wusb_df_log_hdl, "Detach: enter for detach"); rval = wusb_df_cleanup(dip, wusb_dfp); break; case DDI_SUSPEND: USB_DPRINTF_L4(PRINT_MASK_ATTA, wusb_dfp->wusb_df_log_hdl, "Detach: enter for suspend"); rval = wusb_df_cpr_suspend(dip); default: break; } return ((rval == USB_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE); }
/* * wusb_df_disconnect_callback: * Called when device hotplug-removed. * Close pipes. (This does not attempt to contact device.) * Set state to DISCONNECTED */ static int wusb_df_disconnect_callback(dev_info_t *dip) { int instance = ddi_get_instance(dip); wusb_df_state_t *wusb_dfp = ddi_get_soft_state(wusb_df_statep, instance); USB_DPRINTF_L4(PRINT_MASK_CB, wusb_dfp->wusb_df_log_hdl, "disconnect: enter"); mutex_enter(&wusb_dfp->wusb_df_mutex); (void) wusb_df_serialize_access(wusb_dfp, WUSB_DF_SER_NOSIG); /* * Save any state of device or IO in progress required by * wusb_df_restore_device_state for proper device "thawing" later. */ wusb_dfp->wusb_df_dev_state = USB_DEV_DISCONNECTED; wusb_df_release_access(wusb_dfp); mutex_exit(&wusb_dfp->wusb_df_mutex); return (USB_SUCCESS); }
int pcn_ddi_resume(dev_info_t *dip) { pcn_t *pcnp; if ((pcnp = ddi_get_soft_state(pcn_ssp, ddi_get_instance(dip))) == NULL) return (DDI_FAILURE); mutex_enter(&pcnp->pcn_intrlock); mutex_enter(&pcnp->pcn_xmtlock); pcnp->pcn_flags &= ~PCN_SUSPENDED; if (!pcn_initialize(pcnp, B_FALSE)) { pcn_error(pcnp->pcn_dip, "unable to resume chip"); pcnp->pcn_flags |= PCN_SUSPENDED; mutex_exit(&pcnp->pcn_intrlock); mutex_exit(&pcnp->pcn_xmtlock); return (DDI_SUCCESS); } if (IS_RUNNING(pcnp)) pcn_startall(pcnp); mutex_exit(&pcnp->pcn_xmtlock); mutex_exit(&pcnp->pcn_intrlock); mii_resume(pcnp->pcn_mii); return (DDI_SUCCESS); }
static int tsalarm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { int inst; struct tsalarm_softc *softc; switch (cmd) { case DDI_DETACH: inst = ddi_get_instance(dip); if ((softc = getsoftc(inst)) == NULL) return (DDI_FAILURE); /* * Free the soft state and remove minor node added earlier. */ ddi_remove_minor_node(dip, NULL); mutex_destroy(&softc->mutex); ddi_soft_state_free(statep, inst); return (DDI_SUCCESS); case DDI_SUSPEND: return (DDI_SUCCESS); default: return (DDI_FAILURE); } }
static void pcn_teardown(pcn_t *pcnp) { ASSERT(!(pcnp->pcn_flags & PCN_RUNNING)); if (pcnp->pcn_mii != NULL) { mii_free(pcnp->pcn_mii); pcnp->pcn_mii = NULL; } if (pcnp->pcn_flags & PCN_INTR_ENABLED) ddi_remove_intr(pcnp->pcn_dip, 0, pcnp->pcn_icookie); /* These will exit gracefully if not yet allocated */ pcn_freerxring(pcnp); pcn_freetxring(pcnp); if (pcnp->pcn_regshandle != NULL) ddi_regs_map_free(&pcnp->pcn_regshandle); mutex_destroy(&pcnp->pcn_xmtlock); mutex_destroy(&pcnp->pcn_intrlock); mutex_destroy(&pcnp->pcn_reglock); ddi_soft_state_free(pcn_ssp, ddi_get_instance(pcnp->pcn_dip)); }
void sda_slot_vprintf(sda_slot_t *s, int level, const char *fmt, va_list ap) { char msgbuf[256]; const char *pfx, *sfx; if (level == CE_CONT) { pfx = "!"; sfx = "\n"; } else { pfx = sfx = ""; } if (s != NULL) { dev_info_t *dip = s->s_hostp->h_dip; (void) snprintf(msgbuf, sizeof (msgbuf), "%s%s%d: slot %d: %s%s", pfx, ddi_driver_name(dip), ddi_get_instance(dip), s->s_slot_num, fmt, sfx); } else { (void) snprintf(msgbuf, sizeof (msgbuf), "%ssda: %s%s", pfx, fmt, sfx); } vcmn_err(level, msgbuf, ap); }
void pcmu_debug(uint64_t flag, dev_info_t *dip, char *fmt, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5) { char *s = "pcmu unknown"; uint_t cont = 0; int i; int no_rec = (sizeof (pcmu_dflag_strings) / sizeof (pcmu_dflag_to_str_t)); if (flag & PCMU_DBG_CONT) { flag &= ~PCMU_DBG_CONT; cont = 1; } if ((pcmu_debug_flags & flag) == flag) { for (i = 0; i < no_rec; i++) { if (pcmu_dflag_strings[i].flag == flag) { s = pcmu_dflag_strings[i].string; break; } } if (s && cont == 0) { prom_printf("%s(%d): %s: ", ddi_driver_name(dip), ddi_get_instance(dip), s); } prom_printf(fmt, a1, a2, a3, a4, a5); } }
/* * detach entry point: */ static int pmubus_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { int instance = ddi_get_instance(dip); pmubus_devstate_t *pmubusp = ddi_get_soft_state(per_pmubus_state, instance); switch (cmd) { case DDI_DETACH: mutex_destroy(&pmubusp->pmubus_reg_access_lock); /* Tear down our register mappings */ pci_config_teardown(&pmubusp->pmubus_reghdl); /* Free our ranges property */ kmem_free(pmubusp->pmubus_rangep, pmubusp->pmubus_rnglen); /* Free the register property */ kmem_free(pmubusp->pmubus_regp, pmubusp->pmubus_reglen); ddi_soft_state_free(per_pmubus_state, instance); break; case DDI_SUSPEND: default: break; } return (DDI_SUCCESS); }
/** * Virtio Net private data allocation routine. * * @param pDevice Pointer to the Virtio device instance. * * @return Allocated private data that must only be freed by calling * VirtioNetDevFree(). */ static void *VirtioNetDevAlloc(PVIRTIODEVICE pDevice) { LogFlowFunc((VIRTIOLOGNAME ":VirtioNetDevAlloc pDevice=%p\n", pDevice)); AssertReturn(pDevice, NULL); virtio_net_t *pNet = RTMemAllocZ(sizeof(virtio_net_t)); if (RT_LIKELY(pNet)) { /* * Create a kernel memory cache for frequently allocated/deallocated * buffers. */ char szCachename[KSTAT_STRLEN]; RTStrPrintf(szCachename, sizeof(szCachename), "VirtioNet_Cache_%d", ddi_get_instance(pDevice->pDip)); pNet->pTxCache = kmem_cache_create(szCachename, /* Cache name */ sizeof(virtio_net_txbuf_t), /* Size of buffers in cache */ 0, /* Align */ VirtioNetTxBufCreate, /* Buffer constructor */ VirtioNetTxBufDestroy, /* Buffer destructor */ NULL, /* pfnReclaim */ pDevice, /* Private data */ NULL, /* "vmp", MBZ (man page) */ 0 /* "cflags", MBZ (man page) */ ); if (RT_LIKELY(pNet->pTxCache)) return pNet; else LogRel((VIRTIOLOGNAME ":kmem_cache_create failed.\n")); } else LogRel((VIRTIOLOGNAME ":failed to alloc %u bytes for Net instance.\n", sizeof(virtio_net_t))); return NULL; }
static int pppt_drv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { if (cmd != DDI_ATTACH) { return (DDI_FAILURE); } if (ddi_get_instance(dip) != 0) { /* we only allow instance 0 to attach */ return (DDI_FAILURE); } /* create the minor node */ if (ddi_create_minor_node(dip, PPPT_MODNAME, S_IFCHR, 0, DDI_PSEUDO, 0) != DDI_SUCCESS) { cmn_err(CE_WARN, "pppt_drv_attach: " "failed creating minor node"); return (DDI_FAILURE); } pppt_global.global_svc_state = PSS_DISABLED; pppt_global.global_dip = dip; return (DDI_SUCCESS); }
/** * Detach entry point, to detach a device to the system or suspend it. * * @param pDip The module structure instance. * @param enmCmd Operation type (detach/suspend). * * @returns corresponding solaris error code. */ static int VBoxVideoSolarisDetach(dev_info_t *pDip, ddi_detach_cmd_t enmCmd) { LogFlow((DEVICE_NAME ":VBoxVideoSolarisDetach pDip=%p enmCmd=%d\n", pDip, enmCmd)); switch (enmCmd) { case DDI_DETACH: { int Instance = ddi_get_instance(pDip); drm_device_t *pState = ddi_get_soft_state(g_pVBoxVideoSolarisState, Instance); if (pState) { drm_detach(pState); drm_supp_unregister(pState->drm_handle); ddi_soft_state_free(g_pVBoxVideoSolarisState, Instance); return DDI_SUCCESS; } else LogRel((DEVICE_NAME ":VBoxVideoSolarisDetach failed to get soft state.\n")); return DDI_FAILURE; } case DDI_RESUME: { /* Nothing to do here... */ return DDI_SUCCESS; } } return DDI_FAILURE; }
static int zc_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { zc_state_t *zcs; int instance; if (cmd != DDI_ATTACH) return (DDI_FAILURE); instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(zc_soft_state, instance) != DDI_SUCCESS) return (DDI_FAILURE); /* * Create the master and slave minor nodes. */ if ((ddi_create_minor_node(dip, ZCONS_SLAVE_NAME, S_IFCHR, instance << 1 | ZC_SLAVE_MINOR, DDI_PSEUDO, 0) == DDI_FAILURE) || (ddi_create_minor_node(dip, ZCONS_MASTER_NAME, S_IFCHR, instance << 1 | ZC_MASTER_MINOR, DDI_PSEUDO, 0) == DDI_FAILURE)) { ddi_remove_minor_node(dip, NULL); ddi_soft_state_free(zc_soft_state, instance); return (DDI_FAILURE); } VERIFY((zcs = ddi_get_soft_state(zc_soft_state, instance)) != NULL); zcs->zc_devinfo = dip; return (DDI_SUCCESS); }
static void acebus_debug(uint_t flag, ebus_devstate_t *ebus_p, char *fmt, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5) { char *s; if (acebus_debug_flags & flag) { switch (flag) { case D_ATTACH: s = "attach"; break; case D_DETACH: s = "detach"; break; case D_MAP: s = "map"; break; case D_CTLOPS: s = "ctlops"; break; case D_INTR: s = "intr"; break; } if (ebus_p) cmn_err(CE_CONT, "%s%d: %s: ", ddi_get_name(ebus_p->dip), ddi_get_instance(ebus_p->dip), s); else cmn_err(CE_CONT, "ebus: "); cmn_err(CE_CONT, fmt, a1, a2, a3, a4, a5); } }
/* * bus dma alloc handle entry point: */ int px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) { px_t *px_p = DIP_TO_STATE(dip); ddi_dma_impl_t *mp; int rval; DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", ddi_driver_name(rdip), ddi_get_instance(rdip)); if (attrp->dma_attr_version != DMA_ATTR_V0) return (DDI_DMA_BADATTR); if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg))) return (DDI_DMA_NORESOURCES); /* * Save requestor's information */ mp->dmai_attr = *attrp; /* whole object - augmented later */ *PX_DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); /* check and convert dma attributes to handle parameters */ if (rval = px_dma_attr2hdl(px_p, mp)) { px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); *handlep = NULL; return (rval); } *handlep = (ddi_dma_handle_t)mp; return (DDI_SUCCESS); }
static void ppb_removechild(dev_info_t *dip) { struct ddi_parent_private_data *pdptr; ppb_devstate_t *ppb; ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, ddi_get_instance(ddi_get_parent(dip))); if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) { pcie_fini_dom(dip); pcie_fini_cfghdl(dip); } else if ((pdptr = ddi_get_parent_data(dip)) != NULL) { kmem_free(pdptr, (sizeof (*pdptr) + sizeof (struct intrspec))); ddi_set_parent_data(dip, NULL); } ddi_set_name_addr(dip, NULL); /* * Strip the node to properly convert it back to prototype form */ ddi_remove_minor_node(dip, NULL); impl_rem_dev_props(dip); }