/** * Detach entry point, to detach a device to the system or suspend it. * * @param pDip The module structure instance. * @param enmCmd Attach type (ddi_attach_cmd_t) * * @return corresponding solaris error code. */ static int VBoxGuestSolarisDetach(dev_info_t *pDip, ddi_detach_cmd_t enmCmd) { LogFlow((DEVICE_NAME "::Detach\n")); switch (enmCmd) { case DDI_DETACH: { VBoxGuestSolarisRemoveIRQ(pDip); ddi_regs_map_free(&g_PciIOHandle); ddi_regs_map_free(&g_PciMMIOHandle); ddi_remove_minor_node(pDip, NULL); VBoxGuestDeleteDevExt(&g_DevExt); g_pDip = NULL; return DDI_SUCCESS; } case DDI_SUSPEND: { /** @todo implement suspend for guest driver. */ return DDI_SUCCESS; } default: return DDI_FAILURE; } }
static int virtionet_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { virtionet_state_t *sp; int instance; int rc; switch (cmd) { case DDI_DETACH: break; case DDI_SUSPEND: default: return (DDI_FAILURE); } instance = ddi_get_instance(dip); sp = ddi_get_soft_state(virtionet_statep, instance); ASSERT(sp); rc = virtionet_mac_unregister(sp); if (rc != DDI_SUCCESS) { return (DDI_FAILURE); } (void) virtionet_intr_teardown(sp); virtionet_vq_teardown(sp); ddi_regs_map_free(&sp->devhandle); ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_SUCCESS); }
/*ARGSUSED*/ int gfxp_vgatext_detach(dev_info_t *devi, ddi_detach_cmd_t cmd, gfxp_vgatext_softc_ptr_t ptr) { struct vgatext_softc *softc = (struct vgatext_softc *)ptr; (void) ddi_prop_remove(DDI_DEV_T_ANY, devi, "primary-controller"); switch (cmd) { case DDI_SUSPEND: return (vgatext_suspend(softc)); /* break; */ case DDI_DETACH: if (softc->fb.mapped) ddi_regs_map_free(&softc->fb.handle); if (softc->regs.mapped) ddi_regs_map_free(&softc->regs.handle); mutex_destroy(&(softc->lock)); return (DDI_SUCCESS); default: cmn_err(CE_WARN, "gfxp_vgatext_detach: unknown cmd 0x%x\n", cmd); return (DDI_FAILURE); } }
static void oce_unmap_regs(struct oce_dev *dev) { ASSERT(NULL != dev); ASSERT(NULL != dev->dip); ddi_regs_map_free(&dev->db_handle); ddi_regs_map_free(&dev->csr_handle); ddi_regs_map_free(&dev->dev_cfg_handle); }
static void pcn_teardown(pcn_t *pcnp) { ASSERT(!(pcnp->pcn_flags & PCN_RUNNING)); if (pcnp->pcn_mii != NULL) { mii_free(pcnp->pcn_mii); pcnp->pcn_mii = NULL; } if (pcnp->pcn_flags & PCN_INTR_ENABLED) ddi_remove_intr(pcnp->pcn_dip, 0, pcnp->pcn_icookie); /* These will exit gracefully if not yet allocated */ pcn_freerxring(pcnp); pcn_freetxring(pcnp); if (pcnp->pcn_regshandle != NULL) ddi_regs_map_free(&pcnp->pcn_regshandle); mutex_destroy(&pcnp->pcn_xmtlock); mutex_destroy(&pcnp->pcn_intrlock); mutex_destroy(&pcnp->pcn_reglock); ddi_soft_state_free(pcn_ssp, ddi_get_instance(pcnp->pcn_dip)); }
static int gpio_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { int instance; struct gpio_softc *softc; switch (cmd) { case DDI_DETACH: instance = ddi_get_instance(dip); DBG(dip, "detach: instance is %d", instance, 0, 0, 0, 0); if ((softc = getsoftc(instance)) == NULL) return (ENXIO); mutex_destroy(&softc->gp_mutex); ddi_regs_map_free(&softc->gp_handle); ddi_soft_state_free(statep, instance); ddi_remove_minor_node(dip, NULL); return (DDI_SUCCESS); case DDI_SUSPEND: /* Nothing to do in the suspend case. */ return (DDI_SUCCESS); default: return (DDI_FAILURE); } }
/*ARGSUSED*/ static int mouse8042_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { struct mouse_state *state; state = ddi_get_driver_private(dip); switch (cmd) { case DDI_SUSPEND: /* Ignore all data from mouse8042_intr until we fully resume */ state->ready = 0; return (DDI_SUCCESS); case DDI_DETACH: ddi_remove_intr(dip, 0, state->ms_iblock_cookie); mouse8042_dip = NULL; cv_destroy(&state->reset_cv); mutex_destroy(&state->reset_mutex); mutex_destroy(&state->ms_mutex); ddi_prop_remove_all(dip); ddi_regs_map_free(&state->ms_handle); ddi_remove_minor_node(dip, NULL); kmem_free(state, sizeof (struct mouse_state)); return (DDI_SUCCESS); default: return (DDI_FAILURE); } }
/* * Higher-level setup & teardown */ static void rmc_comm_offline(struct rmc_comm_state *rcs) { if (rcs->sd_state.sio_handle != NULL) ddi_regs_map_free(&rcs->sd_state.sio_handle); rcs->sd_state.sio_handle = NULL; rcs->sd_state.sio_regs = NULL; }
void drm_ioremapfree(drm_local_map_t *map) { if (map->dev_handle == NULL) { DRM_ERROR("drm_ioremapfree: handle is NULL"); return; } ddi_regs_map_free(&map->dev_handle); }
/** * Virtio Pci detach routine, called from driver detach. * * @param pDevice Pointer to the Virtio device instance. * * @return Solaris DDI error code. DDI_SUCCESS or DDI_FAILURE. */ static int VirtioPciDetach(PVIRTIODEVICE pDevice) { LogFlowFunc((VIRTIOLOGNAME ":VirtioPciDetach pDevice=%p\n", pDevice)); virtio_pci_t *pPciData = pDevice->pvHyper; AssertReturn(pPciData, DDI_FAILURE); VirtioPciRemoveIRQ(pDevice->pDip); ddi_regs_map_free(&pPciData->hIO); return DDI_SUCCESS; }
/* * audioixp_unmap_regs() * * Description: * This routine unmaps control registers. * * Arguments: * audioixp_state_t *state The device's state structure */ static void audioixp_unmap_regs(audioixp_state_t *statep) { if (statep->regsh) { ddi_regs_map_free(&statep->regsh); } if (statep->pcih) { pci_config_teardown(&statep->pcih); } }
/* * Unmap SBBC Internal registers */ static void sbbc_unmap_regs(sbbc_softstate_t *softsp) { if (softsp == NULL) return; mutex_enter(&master_iosram->iosram_lock); if (softsp->sbbc_regs) { ddi_regs_map_free(&softsp->sbbc_reg_handle1); softsp->sbbc_regs = NULL; softsp->port_int_regs = NULL; } if (softsp->epld_regs) { ddi_regs_map_free(&softsp->sbbc_reg_handle2); softsp->epld_regs = NULL; } mutex_exit(&master_iosram->iosram_lock); return; }
int efe_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { efe_t *efep = ddi_get_driver_private(dip); switch (cmd) { case DDI_DETACH: break; case DDI_SUSPEND: return (efe_suspend(efep)); default: return (DDI_FAILURE); } if (mac_unregister(efep->efe_mh) != 0) { efe_error(dip, "unable to unregister from mac!"); return (DDI_FAILURE); } mii_free(efep->efe_miih); (void) ddi_intr_disable(efep->efe_intrh); (void) ddi_intr_remove_handler(efep->efe_intrh); (void) ddi_intr_free(efep->efe_intrh); mutex_destroy(&efep->efe_txlock); mutex_destroy(&efep->efe_intrlock); if (efep->efe_tx_ring != NULL) { efe_ring_free(&efep->efe_tx_ring); } if (efep->efe_rx_ring != NULL) { efe_ring_free(&efep->efe_rx_ring); } ddi_regs_map_free(&efep->efe_regs_acch); kmem_free(efep, sizeof (efe_t)); return (DDI_SUCCESS); }
static void kb8042_cleanup(struct kb8042 *kb8042) { ASSERT(kb8042_dip != NULL); if (kb8042->init_state & KB8042_HW_MUTEX_INITTED) mutex_destroy(&kb8042->w_hw_mutex); if (kb8042->init_state & KB8042_INTR_ADDED) ddi_remove_intr(kb8042_dip, 0, kb8042->w_iblock); if (kb8042->init_state & KB8042_REGS_MAPPED) ddi_regs_map_free(&kb8042->handle); if (kb8042->init_state & KB8042_MINOR_NODE_CREATED) ddi_remove_minor_node(kb8042_dip, NULL); kb8042->init_state = KB8042_UNINITIALIZED; kb8042_dip = NULL; }
static void fipe_ioat_free(void) { mutex_enter(&fipe_ioat_ctrl.ioat_lock); /* Cancel timeout to avoid race condition. */ if (fipe_ioat_ctrl.ioat_timerid != 0) { fipe_ioat_ctrl.ioat_cancel = B_TRUE; mutex_exit(&fipe_ioat_ctrl.ioat_lock); (void) untimeout(fipe_ioat_ctrl.ioat_timerid); mutex_enter(&fipe_ioat_ctrl.ioat_lock); fipe_ioat_ctrl.ioat_timerid = 0; fipe_ioat_ctrl.ioat_cancel = B_FALSE; } if (fipe_ioat_ctrl.ioat_reg_mapped) { ddi_regs_map_free(&fipe_ioat_ctrl.ioat_reg_handle); fipe_ioat_ctrl.ioat_reg_mapped = B_FALSE; } fipe_ioat_ctrl.ioat_ready = B_FALSE; mutex_exit(&fipe_ioat_ctrl.ioat_lock); }
/** * On cleanup, remove the minor node, unmade the register space, destroy the * mutex and free the soft state structure. */ static int quantis_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { int instance; quantis_soft_state_t *soft_state; char msg[MAX_MSG_LEN]; switch (cmd) { case DDI_DETACH: instance = ddi_get_instance(dip); snprintf(msg, MAX_MSG_LEN, "Detaching the Quantis device %d.\n", instance); QUANTIS_INFO(msg); soft_state = (quantis_soft_state_t*)ddi_get_soft_state(quantis_soft_state_p, instance); ddi_remove_minor_node(dip, NULL); ddi_regs_map_free(&soft_state->regs_handle); mutex_destroy(&soft_state->mutex); ddi_soft_state_free(quantis_soft_state_p, instance); mutex_enter(&quantis_mutex); card_count--; mutex_exit(&quantis_mutex); return DDI_SUCCESS; case DDI_SUSPEND: case DDI_PM_SUSPEND: LOG_DEBUG1("Suspending dev %d\n", instance); return DDI_SUCCESS; default: return DDI_FAILURE; } }
static int oce_map_regs(struct oce_dev *dev) { int ret = 0; off_t bar_size = 0; ASSERT(NULL != dev); ASSERT(NULL != dev->dip); /* get number of supported bars */ ret = ddi_dev_nregs(dev->dip, &dev->num_bars); if (ret != DDI_SUCCESS) { oce_log(dev, CE_WARN, MOD_CONFIG, "%d: could not retrieve num_bars", MOD_CONFIG); return (DDI_FAILURE); } /* verify each bar and map it accordingly */ /* PCI CFG */ ret = ddi_dev_regsize(dev->dip, OCE_DEV_CFG_BAR, &bar_size); if (ret != DDI_SUCCESS) { oce_log(dev, CE_WARN, MOD_CONFIG, "Could not get sizeof BAR %d", OCE_DEV_CFG_BAR); return (DDI_FAILURE); } ret = ddi_regs_map_setup(dev->dip, OCE_DEV_CFG_BAR, &dev->dev_cfg_addr, 0, bar_size, ®_accattr, &dev->dev_cfg_handle); if (ret != DDI_SUCCESS) { oce_log(dev, CE_WARN, MOD_CONFIG, "Could not map bar %d", OCE_DEV_CFG_BAR); return (DDI_FAILURE); } /* CSR */ ret = ddi_dev_regsize(dev->dip, OCE_PCI_CSR_BAR, &bar_size); if (ret != DDI_SUCCESS) { oce_log(dev, CE_WARN, MOD_CONFIG, "Could not get sizeof BAR %d", OCE_PCI_CSR_BAR); return (DDI_FAILURE); } ret = ddi_regs_map_setup(dev->dip, OCE_PCI_CSR_BAR, &dev->csr_addr, 0, bar_size, ®_accattr, &dev->csr_handle); if (ret != DDI_SUCCESS) { oce_log(dev, CE_WARN, MOD_CONFIG, "Could not map bar %d", OCE_PCI_CSR_BAR); ddi_regs_map_free(&dev->dev_cfg_handle); return (DDI_FAILURE); } /* Doorbells */ ret = ddi_dev_regsize(dev->dip, OCE_PCI_DB_BAR, &bar_size); if (ret != DDI_SUCCESS) { oce_log(dev, CE_WARN, MOD_CONFIG, "%d Could not get sizeof BAR %d", ret, OCE_PCI_DB_BAR); ddi_regs_map_free(&dev->csr_handle); ddi_regs_map_free(&dev->dev_cfg_handle); return (DDI_FAILURE); } ret = ddi_regs_map_setup(dev->dip, OCE_PCI_DB_BAR, &dev->db_addr, 0, 0, ®_accattr, &dev->db_handle); if (ret != DDI_SUCCESS) { oce_log(dev, CE_WARN, MOD_CONFIG, "Could not map bar %d", OCE_PCI_DB_BAR); ddi_regs_map_free(&dev->csr_handle); ddi_regs_map_free(&dev->dev_cfg_handle); return (DDI_FAILURE); } return (DDI_SUCCESS); }
static int gpio_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; struct gpio_softc *softc = NULL; ddi_device_acc_attr_t dev_attr; switch (cmd) { case DDI_ATTACH: /* Allocate and get the soft state structure for this instance. */ instance = ddi_get_instance(dip); DBG(dip, "attach: instance is %d", instance, 0, 0, 0, 0); if (ddi_soft_state_zalloc(statep, instance) != DDI_SUCCESS) goto attach_failed; softc = getsoftc(instance); softc->gp_dip = dip; softc->gp_state = 0; mutex_init(&softc->gp_mutex, NULL, MUTEX_DRIVER, NULL); /* Map in the gpio device registers. */ dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; if (ddi_regs_map_setup(dip, 0, (caddr_t *)&softc->gp_regs, 0, 0, &dev_attr, &softc->gp_handle) != DDI_SUCCESS) goto attach_failed; DBG(dip, "attach: regs=0x%p", (uintptr_t)softc->gp_regs, 0, 0, 0, 0); DBG(dip, "attach: port 1 data is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[0]), 0, 0, 0, 0); DBG(dip, "attach: port 1 direction is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[1]), 0, 0, 0, 0); DBG(dip, "attach: port 1 output type is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[2]), 0, 0, 0, 0); DBG(dip, "attach: port 1 pull up control type is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[3]), 0, 0, 0, 0); DBG(dip, "attach: port 2 data is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[4]), 0, 0, 0, 0); DBG(dip, "attach: port 2 direction is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[5]), 0, 0, 0, 0); DBG(dip, "attach: port 2 output type is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[6]), 0, 0, 0, 0); DBG(dip, "attach: port 2 pull up control type is %x", (uintptr_t)ddi_get8(softc->gp_handle, &softc->gp_regs[7]), 0, 0, 0, 0); /* Create device minor nodes. */ if (ddi_create_minor_node(dip, "gpio", S_IFCHR, instance, NULL, NULL) == DDI_FAILURE) { ddi_regs_map_free(&softc->gp_handle); goto attach_failed; } ddi_report_dev(dip); return (DDI_SUCCESS); case DDI_RESUME: /* Nothing to do for a resume. */ return (DDI_SUCCESS); default: return (DDI_FAILURE); } attach_failed: if (softc) { mutex_destroy(&softc->gp_mutex); if (softc->gp_handle) ddi_regs_map_free(&softc->gp_handle); ddi_soft_state_free(statep, instance); ddi_remove_minor_node(dip, NULL); } return (DDI_FAILURE); }
static int mouse8042_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { struct mouse_state *state; mblk_t *mp; int instance = ddi_get_instance(dip); static ddi_device_acc_attr_t attr = { DDI_DEVICE_ATTR_V0, DDI_NEVERSWAP_ACC, DDI_STRICTORDER_ACC, }; int rc; if (cmd == DDI_RESUME) { state = (struct mouse_state *)ddi_get_driver_private(dip); /* Ready to handle inbound data from mouse8042_intr */ state->ready = 1; /* * Send a 0xaa 0x00 upstream. * This causes the vuid module to reset the mouse. */ if (state->ms_rqp != NULL) { if (mp = allocb(1, BPRI_MED)) { *mp->b_wptr++ = 0xaa; putnext(state->ms_rqp, mp); } if (mp = allocb(1, BPRI_MED)) { *mp->b_wptr++ = 0x0; putnext(state->ms_rqp, mp); } } return (DDI_SUCCESS); } if (cmd != DDI_ATTACH) return (DDI_FAILURE); if (mouse8042_dip != NULL) return (DDI_FAILURE); /* allocate and initialize state structure */ state = kmem_zalloc(sizeof (struct mouse_state), KM_SLEEP); state->ms_opened = B_FALSE; state->reset_state = MSE_RESET_IDLE; state->reset_tid = 0; state->bc_id = 0; ddi_set_driver_private(dip, state); /* * In order to support virtual keyboard/mouse, we should distinguish * between internal virtual open and external physical open. * * When the physical devices are opened by application, they will * be unlinked from the virtual device and their data stream will * not be sent to the virtual device. When the opened physical * devices are closed, they will be relinked to the virtual devices. * * All these automatic switch between virtual and physical are * transparent. * * So we change minor node numbering scheme to be: * external node minor num == instance * 2 * internal node minor num == instance * 2 + 1 */ rc = ddi_create_minor_node(dip, "mouse", S_IFCHR, instance * 2, DDI_NT_MOUSE, NULL); if (rc != DDI_SUCCESS) { goto fail_1; } if (ddi_create_internal_pathname(dip, "internal_mouse", S_IFCHR, instance * 2 + 1) != DDI_SUCCESS) { goto fail_2; } rc = ddi_regs_map_setup(dip, 0, (caddr_t *)&state->ms_addr, (offset_t)0, (offset_t)0, &attr, &state->ms_handle); if (rc != DDI_SUCCESS) { goto fail_2; } rc = ddi_get_iblock_cookie(dip, 0, &state->ms_iblock_cookie); if (rc != DDI_SUCCESS) { goto fail_3; } mutex_init(&state->ms_mutex, NULL, MUTEX_DRIVER, state->ms_iblock_cookie); mutex_init(&state->reset_mutex, NULL, MUTEX_DRIVER, state->ms_iblock_cookie); cv_init(&state->reset_cv, NULL, CV_DRIVER, NULL); rc = ddi_add_intr(dip, 0, (ddi_iblock_cookie_t *)NULL, (ddi_idevice_cookie_t *)NULL, mouse8042_intr, (caddr_t)state); if (rc != DDI_SUCCESS) { goto fail_3; } mouse8042_dip = dip; /* Ready to handle inbound data from mouse8042_intr */ state->ready = 1; /* Now that we're attached, announce our presence to the world. */ ddi_report_dev(dip); return (DDI_SUCCESS); fail_3: ddi_regs_map_free(&state->ms_handle); fail_2: ddi_remove_minor_node(dip, NULL); fail_1: kmem_free(state, sizeof (struct mouse_state)); return (rc); }
static int virtionet_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { virtionet_state_t *sp; int instance; int rc; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: default: return (DDI_FAILURE); } /* Sanity check - make sure this is indeed virtio PCI device */ if (virtio_validate_pcidev(dip) != DDI_SUCCESS) { return (DDI_FAILURE); } instance = ddi_get_instance(dip); if (ddi_soft_state_zalloc(virtionet_statep, instance) != DDI_SUCCESS) { return (DDI_FAILURE); } sp = ddi_get_soft_state(virtionet_statep, instance); ASSERT(sp); sp->dip = dip; /* Map virtionet PCI header */ rc = ddi_regs_map_setup(sp->dip, 1, &sp->hdraddr, 0, VIRTIO_DEVICE_SPECIFIC, &virtio_devattr, &sp->hdrhandle); if (rc != DDI_SUCCESS) { ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } /* * The device specific portion is *always* in guest native mode, * so it can be accessed directly, w/o ddi_get()/ddi_put() machinery. */ /* Map virtionet device specific configuration area */ off_t len; if (ddi_dev_regsize(sp->dip, 1, &len) != DDI_SUCCESS) { ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } rc = ddi_regs_map_setup(sp->dip, 1, &sp->devaddr, VIRTIO_DEVICE_SPECIFIC, len - VIRTIO_DEVICE_SPECIFIC, &virtio_devattr, &sp->devhandle); if (rc != DDI_SUCCESS) { ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } cmn_err(CE_CONT, "PCI header %p, device specific %p\n", sp->hdraddr, sp->devaddr); /* sp->devcfg = (virtio_net_config_t *)(sp->hdraddr + VIRTIO_DEVICE_SPECIFIC); */ /* Reset device - we are going to re-negotiate feature set */ VIRTIO_DEV_RESET(sp); /* Acknowledge the presense of the device */ VIRTIO_DEV_ACK(sp); rc = virtio_validate_netdev(sp); if (rc != DDI_SUCCESS) { ddi_regs_map_free(&sp->devhandle); ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } /* We know how to drive this device */ VIRTIO_DEV_DRIVER(sp); rc = virtionet_negotiate_features(sp); if (rc != DDI_SUCCESS) { ddi_regs_map_free(&sp->devhandle); ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } virtionet_get_macaddr(sp); rc = virtionet_vq_setup(sp); if (rc != DDI_SUCCESS) { ddi_regs_map_free(&sp->devhandle); ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } rc = virtionet_intr_setup(sp); if (rc != DDI_SUCCESS) { virtionet_vq_teardown(sp); ddi_regs_map_free(&sp->devhandle); ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } rc = virtionet_mac_register(sp); if (rc != DDI_SUCCESS) { (void) virtionet_intr_teardown(sp); virtionet_vq_teardown(sp); ddi_regs_map_free(&sp->devhandle); ddi_regs_map_free(&sp->hdrhandle); ddi_soft_state_free(virtionet_statep, instance); return (DDI_FAILURE); } ddi_report_dev(dip); return (DDI_SUCCESS); }
static int acebus_config(ebus_devstate_t *ebus_p) { ddi_acc_handle_t conf_handle; uint16_t comm; #ifdef ACEBUS_HOTPLUG int tcr_reg; caddr_t csr_io; ddi_device_acc_attr_t csr_attr = { /* CSR map attributes */ DDI_DEVICE_ATTR_V0, DDI_STRUCTURE_LE_ACC, DDI_STRICTORDER_ACC }; ddi_acc_handle_t csr_handle; #endif /* * Make sure the master enable and memory access enable * bits are set in the config command register. */ if (pci_config_setup(ebus_p->dip, &conf_handle) != DDI_SUCCESS) return (0); comm = pci_config_get16(conf_handle, PCI_CONF_COMM), #ifdef DEBUG DBG1(D_ATTACH, ebus_p, "command register was 0x%x\n", comm); #endif comm |= (PCI_COMM_ME|PCI_COMM_MAE|PCI_COMM_SERR_ENABLE| PCI_COMM_PARITY_DETECT); pci_config_put16(conf_handle, PCI_CONF_COMM, comm), #ifdef DEBUG DBG1(D_MAP, ebus_p, "command register is now 0x%x\n", pci_config_get16(conf_handle, PCI_CONF_COMM)); #endif pci_config_put8(conf_handle, PCI_CONF_CACHE_LINESZ, (uchar_t)acebus_cache_line_size); pci_config_put8(conf_handle, PCI_CONF_LATENCY_TIMER, (uchar_t)acebus_latency_timer); pci_config_teardown(&conf_handle); #ifdef ACEBUS_HOTPLUG if (acebus_update_props(ebus_p) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s%d: Could not update special properties.", ddi_driver_name(ebus_p->dip), ddi_get_instance(ebus_p->dip)); return (0); } if (ddi_regs_map_setup(ebus_p->dip, CSR_IO_RINDEX, (caddr_t *)&csr_io, 0, CSR_SIZE, &csr_attr, &csr_handle) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s%d: Could not map Ebus CSR.", ddi_driver_name(ebus_p->dip), ddi_get_instance(ebus_p->dip)); } #ifdef DEBUG if (acebus_debug_flags) { DBG3(D_ATTACH, ebus_p, "tcr[123] = %x,%x,%x\n", ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR1_OFF)), ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR2_OFF)), ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR3_OFF))); DBG2(D_ATTACH, ebus_p, "pmd-aux=%x, freq-aux=%x\n", ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + PMD_AUX_OFF)), ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + FREQ_AUX_OFF))); #ifdef ACEBUS_DEBUG for (comm = 0; comm < 4; comm++) prom_printf("dcsr%d=%x, dacr%d=%x, dbcr%d=%x\n", comm, ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + 0x700000+(0x2000*comm))), comm, ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + 0x700000+(0x2000*comm)+4)), comm, ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + 0x700000+(0x2000*comm)+8))); #endif } /* acebus_debug_flags */ #endif /* If TCR registers are not initialized, initialize them here */ tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR1_OFF)); if ((tcr_reg == 0) || (tcr_reg == -1)) ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR1_OFF), TCR1_REGVAL); tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR2_OFF)); if ((tcr_reg == 0) || (tcr_reg == -1)) ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR2_OFF), TCR2_REGVAL); tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR3_OFF)); if ((tcr_reg == 0) || (tcr_reg == -1)) ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR3_OFF), TCR3_REGVAL); #ifdef DEBUG if (acebus_debug_flags) { DBG3(D_ATTACH, ebus_p, "wrote tcr[123] = %x,%x,%x\n", ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR1_OFF)), ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR2_OFF)), ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR3_OFF))); } #endif ddi_regs_map_free(&csr_handle); #endif /* ACEBUS_HOTPLUG */ return (1); /* return success */ }
/** * Attach entry point, to attach a device to the system or resume it. * * @param pDip The module structure instance. * @param enmCmd Attach type (ddi_attach_cmd_t) * * @return corresponding solaris error code. */ static int VBoxGuestSolarisAttach(dev_info_t *pDip, ddi_attach_cmd_t enmCmd) { LogFlow((DEVICE_NAME "::Attach\n")); switch (enmCmd) { case DDI_ATTACH: { if (g_pDip) { LogRel((DEVICE_NAME "::Attach: Only one instance supported.\n")); return DDI_FAILURE; } int instance = ddi_get_instance(pDip); /* * Enable resources for PCI access. */ ddi_acc_handle_t PciHandle; int rc = pci_config_setup(pDip, &PciHandle); if (rc == DDI_SUCCESS) { /* * Map the register address space. */ caddr_t baseAddr; ddi_device_acc_attr_t deviceAttr; deviceAttr.devacc_attr_version = DDI_DEVICE_ATTR_V0; deviceAttr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; deviceAttr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; deviceAttr.devacc_attr_access = DDI_DEFAULT_ACC; rc = ddi_regs_map_setup(pDip, 1, &baseAddr, 0, 0, &deviceAttr, &g_PciIOHandle); if (rc == DDI_SUCCESS) { /* * Read size of the MMIO region. */ g_uIOPortBase = (uintptr_t)baseAddr; rc = ddi_dev_regsize(pDip, 2, &g_cbMMIO); if (rc == DDI_SUCCESS) { rc = ddi_regs_map_setup(pDip, 2, &g_pMMIOBase, 0, g_cbMMIO, &deviceAttr, &g_PciMMIOHandle); if (rc == DDI_SUCCESS) { /* * Add IRQ of VMMDev. */ rc = VBoxGuestSolarisAddIRQ(pDip); if (rc == DDI_SUCCESS) { /* * Call the common device extension initializer. */ rc = VBoxGuestInitDevExt(&g_DevExt, g_uIOPortBase, g_pMMIOBase, g_cbMMIO, #if ARCH_BITS == 64 VBOXOSTYPE_Solaris_x64, #else VBOXOSTYPE_Solaris, #endif VMMDEV_EVENT_MOUSE_POSITION_CHANGED); if (RT_SUCCESS(rc)) { rc = ddi_create_minor_node(pDip, DEVICE_NAME, S_IFCHR, instance, DDI_PSEUDO, 0); if (rc == DDI_SUCCESS) { g_pDip = pDip; pci_config_teardown(&PciHandle); return DDI_SUCCESS; } LogRel((DEVICE_NAME "::Attach: ddi_create_minor_node failed.\n")); VBoxGuestDeleteDevExt(&g_DevExt); } else LogRel((DEVICE_NAME "::Attach: VBoxGuestInitDevExt failed.\n")); VBoxGuestSolarisRemoveIRQ(pDip); } else LogRel((DEVICE_NAME "::Attach: VBoxGuestSolarisAddIRQ failed.\n")); ddi_regs_map_free(&g_PciMMIOHandle); } else LogRel((DEVICE_NAME "::Attach: ddi_regs_map_setup for MMIO region failed.\n")); } else LogRel((DEVICE_NAME "::Attach: ddi_dev_regsize for MMIO region failed.\n")); ddi_regs_map_free(&g_PciIOHandle); } else LogRel((DEVICE_NAME "::Attach: ddi_regs_map_setup for IOport failed.\n")); pci_config_teardown(&PciHandle); } else LogRel((DEVICE_NAME "::Attach: pci_config_setup failed rc=%d.\n", rc)); return DDI_FAILURE; } case DDI_RESUME: { /** @todo implement resume for guest driver. */ return DDI_SUCCESS; } default: return DDI_FAILURE; } }
/* * Autoconfiguration entry points. */ int efe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { ddi_acc_handle_t pci; int types; int count; int actual; uint_t pri; efe_t *efep; mac_register_t *macp; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: efep = ddi_get_driver_private(dip); return (efe_resume(efep)); default: return (DDI_FAILURE); } /* * PCI configuration. */ if (pci_config_setup(dip, &pci) != DDI_SUCCESS) { efe_error(dip, "unable to setup PCI configuration!"); return (DDI_FAILURE); } pci_config_put16(pci, PCI_CONF_COMM, pci_config_get16(pci, PCI_CONF_COMM) | PCI_COMM_MAE | PCI_COMM_ME); pci_config_teardown(&pci); if (ddi_intr_get_supported_types(dip, &types) != DDI_SUCCESS || !(types & DDI_INTR_TYPE_FIXED)) { efe_error(dip, "fixed interrupts not supported!"); return (DDI_FAILURE); } if (ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &count) != DDI_SUCCESS || count != 1) { efe_error(dip, "no fixed interrupts available!"); return (DDI_FAILURE); } /* * Initialize soft state. */ efep = kmem_zalloc(sizeof (efe_t), KM_SLEEP); ddi_set_driver_private(dip, efep); efep->efe_dip = dip; if (ddi_regs_map_setup(dip, 1, (caddr_t *)&efep->efe_regs, 0, 0, &efe_regs_acc_attr, &efep->efe_regs_acch) != DDI_SUCCESS) { efe_error(dip, "unable to setup register mapping!"); goto failure; } efep->efe_rx_ring = efe_ring_alloc(efep->efe_dip, RXDESCL); if (efep->efe_rx_ring == NULL) { efe_error(efep->efe_dip, "unable to allocate rx ring!"); goto failure; } efep->efe_tx_ring = efe_ring_alloc(efep->efe_dip, TXDESCL); if (efep->efe_tx_ring == NULL) { efe_error(efep->efe_dip, "unable to allocate tx ring!"); goto failure; } if (ddi_intr_alloc(dip, &efep->efe_intrh, DDI_INTR_TYPE_FIXED, 0, count, &actual, DDI_INTR_ALLOC_STRICT) != DDI_SUCCESS || actual != count) { efe_error(dip, "unable to allocate fixed interrupt!"); goto failure; } if (ddi_intr_get_pri(efep->efe_intrh, &pri) != DDI_SUCCESS || pri >= ddi_intr_get_hilevel_pri()) { efe_error(dip, "unable to get valid interrupt priority!"); goto failure; } mutex_init(&efep->efe_intrlock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); mutex_init(&efep->efe_txlock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); /* * Initialize device. */ mutex_enter(&efep->efe_intrlock); mutex_enter(&efep->efe_txlock); efe_reset(efep); mutex_exit(&efep->efe_txlock); mutex_exit(&efep->efe_intrlock); /* Use factory address as default */ efe_getaddr(efep, efep->efe_macaddr); /* * Enable the ISR. */ if (ddi_intr_add_handler(efep->efe_intrh, efe_intr, efep, NULL) != DDI_SUCCESS) { efe_error(dip, "unable to add interrupt handler!"); goto failure; } if (ddi_intr_enable(efep->efe_intrh) != DDI_SUCCESS) { efe_error(dip, "unable to enable interrupt!"); goto failure; } /* * Allocate MII resources. */ if ((efep->efe_miih = mii_alloc(efep, dip, &efe_mii_ops)) == NULL) { efe_error(dip, "unable to allocate mii resources!"); goto failure; } /* * Allocate MAC resources. */ if ((macp = mac_alloc(MAC_VERSION)) == NULL) { efe_error(dip, "unable to allocate mac resources!"); goto failure; } macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; macp->m_driver = efep; macp->m_dip = dip; macp->m_src_addr = efep->efe_macaddr; macp->m_callbacks = &efe_m_callbacks; macp->m_min_sdu = 0; macp->m_max_sdu = ETHERMTU; macp->m_margin = VLAN_TAGSZ; if (mac_register(macp, &efep->efe_mh) != 0) { efe_error(dip, "unable to register with mac!"); goto failure; } mac_free(macp); ddi_report_dev(dip); return (DDI_SUCCESS); failure: if (macp != NULL) { mac_free(macp); } if (efep->efe_miih != NULL) { mii_free(efep->efe_miih); } if (efep->efe_intrh != NULL) { (void) ddi_intr_disable(efep->efe_intrh); (void) ddi_intr_remove_handler(efep->efe_intrh); (void) ddi_intr_free(efep->efe_intrh); } mutex_destroy(&efep->efe_txlock); mutex_destroy(&efep->efe_intrlock); if (efep->efe_tx_ring != NULL) { efe_ring_free(&efep->efe_tx_ring); } if (efep->efe_rx_ring != NULL) { efe_ring_free(&efep->efe_rx_ring); } if (efep->efe_regs_acch != NULL) { ddi_regs_map_free(&efep->efe_regs_acch); } kmem_free(efep, sizeof (efe_t)); return (DDI_FAILURE); }
void pci_dump(void *arg) { igb_t *igb = (igb_t *)arg; ddi_acc_handle_t handle; uint8_t cap_ptr; uint8_t next_ptr; uint32_t msix_bar; uint32_t msix_ctrl; uint32_t msix_tbl_sz; uint32_t tbl_offset; uint32_t tbl_bir; uint32_t pba_offset; uint32_t pba_bir; off_t offset; off_t mem_size; uintptr_t base; ddi_acc_handle_t acc_hdl; int i; handle = igb->osdep.cfg_handle; igb_log(igb, "Begin dump PCI config space"); igb_log(igb, "PCI_CONF_VENID:\t0x%x\n", pci_config_get16(handle, PCI_CONF_VENID)); igb_log(igb, "PCI_CONF_DEVID:\t0x%x\n", pci_config_get16(handle, PCI_CONF_DEVID)); igb_log(igb, "PCI_CONF_COMMAND:\t0x%x\n", pci_config_get16(handle, PCI_CONF_COMM)); igb_log(igb, "PCI_CONF_STATUS:\t0x%x\n", pci_config_get16(handle, PCI_CONF_STAT)); igb_log(igb, "PCI_CONF_REVID:\t0x%x\n", pci_config_get8(handle, PCI_CONF_REVID)); igb_log(igb, "PCI_CONF_PROG_CLASS:\t0x%x\n", pci_config_get8(handle, PCI_CONF_PROGCLASS)); igb_log(igb, "PCI_CONF_SUB_CLASS:\t0x%x\n", pci_config_get8(handle, PCI_CONF_SUBCLASS)); igb_log(igb, "PCI_CONF_BAS_CLASS:\t0x%x\n", pci_config_get8(handle, PCI_CONF_BASCLASS)); igb_log(igb, "PCI_CONF_CACHE_LINESZ:\t0x%x\n", pci_config_get8(handle, PCI_CONF_CACHE_LINESZ)); igb_log(igb, "PCI_CONF_LATENCY_TIMER:\t0x%x\n", pci_config_get8(handle, PCI_CONF_LATENCY_TIMER)); igb_log(igb, "PCI_CONF_HEADER_TYPE:\t0x%x\n", pci_config_get8(handle, PCI_CONF_HEADER)); igb_log(igb, "PCI_CONF_BIST:\t0x%x\n", pci_config_get8(handle, PCI_CONF_BIST)); igb_log(igb, "PCI_CONF_BASE0:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE0)); igb_log(igb, "PCI_CONF_BASE1:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE1)); igb_log(igb, "PCI_CONF_BASE2:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE2)); /* MSI-X BAR */ msix_bar = pci_config_get32(handle, PCI_CONF_BASE3); igb_log(igb, "PCI_CONF_BASE3:\t0x%x\n", msix_bar); igb_log(igb, "PCI_CONF_BASE4:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE4)); igb_log(igb, "PCI_CONF_BASE5:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE5)); igb_log(igb, "PCI_CONF_CIS:\t0x%x\n", pci_config_get32(handle, PCI_CONF_CIS)); igb_log(igb, "PCI_CONF_SUBVENID:\t0x%x\n", pci_config_get16(handle, PCI_CONF_SUBVENID)); igb_log(igb, "PCI_CONF_SUBSYSID:\t0x%x\n", pci_config_get16(handle, PCI_CONF_SUBSYSID)); igb_log(igb, "PCI_CONF_ROM:\t0x%x\n", pci_config_get32(handle, PCI_CONF_ROM)); cap_ptr = pci_config_get8(handle, PCI_CONF_CAP_PTR); igb_log(igb, "PCI_CONF_CAP_PTR:\t0x%x\n", cap_ptr); igb_log(igb, "PCI_CONF_ILINE:\t0x%x\n", pci_config_get8(handle, PCI_CONF_ILINE)); igb_log(igb, "PCI_CONF_IPIN:\t0x%x\n", pci_config_get8(handle, PCI_CONF_IPIN)); igb_log(igb, "PCI_CONF_MIN_G:\t0x%x\n", pci_config_get8(handle, PCI_CONF_MIN_G)); igb_log(igb, "PCI_CONF_MAX_L:\t0x%x\n", pci_config_get8(handle, PCI_CONF_MAX_L)); /* Power Management */ offset = cap_ptr; igb_log(igb, "PCI_PM_CAP_ID:\t0x%x\n", pci_config_get8(handle, offset)); next_ptr = pci_config_get8(handle, offset + 1); igb_log(igb, "PCI_PM_NEXT_PTR:\t0x%x\n", next_ptr); igb_log(igb, "PCI_PM_CAP:\t0x%x\n", pci_config_get16(handle, offset + PCI_PMCAP)); igb_log(igb, "PCI_PM_CSR:\t0x%x\n", pci_config_get16(handle, offset + PCI_PMCSR)); igb_log(igb, "PCI_PM_CSR_BSE:\t0x%x\n", pci_config_get8(handle, offset + PCI_PMCSR_BSE)); igb_log(igb, "PCI_PM_DATA:\t0x%x\n", pci_config_get8(handle, offset + PCI_PMDATA)); /* MSI Configuration */ offset = next_ptr; igb_log(igb, "PCI_MSI_CAP_ID:\t0x%x\n", pci_config_get8(handle, offset)); next_ptr = pci_config_get8(handle, offset + 1); igb_log(igb, "PCI_MSI_NEXT_PTR:\t0x%x\n", next_ptr); igb_log(igb, "PCI_MSI_CTRL:\t0x%x\n", pci_config_get16(handle, offset + PCI_MSI_CTRL)); igb_log(igb, "PCI_MSI_ADDR:\t0x%x\n", pci_config_get32(handle, offset + PCI_MSI_ADDR_OFFSET)); igb_log(igb, "PCI_MSI_ADDR_HI:\t0x%x\n", pci_config_get32(handle, offset + 0x8)); igb_log(igb, "PCI_MSI_DATA:\t0x%x\n", pci_config_get16(handle, offset + 0xC)); /* MSI-X Configuration */ offset = next_ptr; igb_log(igb, "PCI_MSIX_CAP_ID:\t0x%x\n", pci_config_get8(handle, offset)); next_ptr = pci_config_get8(handle, offset + 1); igb_log(igb, "PCI_MSIX_NEXT_PTR:\t0x%x\n", next_ptr); msix_ctrl = pci_config_get16(handle, offset + PCI_MSIX_CTRL); msix_tbl_sz = msix_ctrl & 0x7ff; igb_log(igb, "PCI_MSIX_CTRL:\t0x%x\n", msix_ctrl); tbl_offset = pci_config_get32(handle, offset + PCI_MSIX_TBL_OFFSET); tbl_bir = tbl_offset & PCI_MSIX_TBL_BIR_MASK; tbl_offset = tbl_offset & ~PCI_MSIX_TBL_BIR_MASK; igb_log(igb, "PCI_MSIX_TBL_OFFSET:\t0x%x\n", tbl_offset); igb_log(igb, "PCI_MSIX_TBL_BIR:\t0x%x\n", tbl_bir); pba_offset = pci_config_get32(handle, offset + PCI_MSIX_PBA_OFFSET); pba_bir = pba_offset & PCI_MSIX_PBA_BIR_MASK; pba_offset = pba_offset & ~PCI_MSIX_PBA_BIR_MASK; igb_log(igb, "PCI_MSIX_PBA_OFFSET:\t0x%x\n", pba_offset); igb_log(igb, "PCI_MSIX_PBA_BIR:\t0x%x\n", pba_bir); /* PCI Express Configuration */ offset = next_ptr; igb_log(igb, "PCIE_CAP_ID:\t0x%x\n", pci_config_get8(handle, offset + PCIE_CAP_ID)); next_ptr = pci_config_get8(handle, offset + PCIE_CAP_NEXT_PTR); igb_log(igb, "PCIE_CAP_NEXT_PTR:\t0x%x\n", next_ptr); igb_log(igb, "PCIE_PCIECAP:\t0x%x\n", pci_config_get16(handle, offset + PCIE_PCIECAP)); igb_log(igb, "PCIE_DEVCAP:\t0x%x\n", pci_config_get32(handle, offset + PCIE_DEVCAP)); igb_log(igb, "PCIE_DEVCTL:\t0x%x\n", pci_config_get16(handle, offset + PCIE_DEVCTL)); igb_log(igb, "PCIE_DEVSTS:\t0x%x\n", pci_config_get16(handle, offset + PCIE_DEVSTS)); igb_log(igb, "PCIE_LINKCAP:\t0x%x\n", pci_config_get32(handle, offset + PCIE_LINKCAP)); igb_log(igb, "PCIE_LINKCTL:\t0x%x\n", pci_config_get16(handle, offset + PCIE_LINKCTL)); igb_log(igb, "PCIE_LINKSTS:\t0x%x\n", pci_config_get16(handle, offset + PCIE_LINKSTS)); /* MSI-X Memory Space */ if (ddi_dev_regsize(igb->dip, IGB_ADAPTER_MSIXTAB, &mem_size) != DDI_SUCCESS) { igb_log(igb, "ddi_dev_regsize() failed"); return; } if ((ddi_regs_map_setup(igb->dip, IGB_ADAPTER_MSIXTAB, (caddr_t *)&base, 0, mem_size, &igb_regs_acc_attr, &acc_hdl)) != DDI_SUCCESS) { igb_log(igb, "ddi_regs_map_setup() failed"); return; } igb_log(igb, "MSI-X Memory Space: (mem_size = %d, base = %x)", mem_size, base); for (i = 0; i <= msix_tbl_sz; i++) { igb_log(igb, "MSI-X Table Entry(%d):", i); igb_log(igb, "lo_addr:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + tbl_offset + (i * 16)))); igb_log(igb, "up_addr:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + tbl_offset + (i * 16) + 4))); igb_log(igb, "msg_data:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + tbl_offset + (i * 16) + 8))); igb_log(igb, "vct_ctrl:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + tbl_offset + (i * 16) + 12))); } igb_log(igb, "MSI-X Pending Bits:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + pba_offset))); ddi_regs_map_free(&acc_hdl); }
/* * heci_remove - Device Removal Routine * * @pdev: PCI device information struct * * heci_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. */ static int heci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { struct iamt_heci_device *dev; int err; dev = ddi_get_soft_state(heci_soft_state_p, ddi_get_instance(dip)); ASSERT(dev != NULL); switch (cmd) { case DDI_SUSPEND: err = heci_suspend(dip); if (err) return (DDI_FAILURE); else return (DDI_SUCCESS); case DDI_DETACH: break; default: return (DDI_FAILURE); } if (dev->wd_timer) (void) untimeout(dev->wd_timer); mutex_enter(&dev->device_lock); if (dev->wd_file_ext.state == HECI_FILE_CONNECTED && dev->wd_timeout) { dev->wd_timeout = 0; dev->wd_due_counter = 0; (void) memcpy(dev->wd_data, stop_wd_params, HECI_WD_PARAMS_SIZE); dev->stop = 1; if (dev->host_buffer_is_empty && flow_ctrl_creds(dev, &dev->wd_file_ext)) { dev->host_buffer_is_empty = 0; if (!heci_send_wd(dev)) { DBG("send stop WD failed\n"); } else flow_ctrl_reduce(dev, &dev->wd_file_ext); dev->wd_pending = 0; } else dev->wd_pending = 1; dev->wd_stoped = 0; err = 0; while (!dev->wd_stoped && err != -1) { err = cv_reltimedwait(&dev->wait_stop_wd, &dev->device_lock, 10*HZ, TR_CLOCK_TICK); } if (!dev->wd_stoped) { DBG("stop wd failed to complete.\n"); } else { DBG("stop wd complete.\n"); } } mutex_exit(&dev->device_lock); if (dev->iamthif_file_ext.state == HECI_FILE_CONNECTED) { dev->iamthif_file_ext.state = HECI_FILE_DISCONNECTING; (void) heci_disconnect_host_client(dev, &dev->iamthif_file_ext); } if (dev->wd_file_ext.state == HECI_FILE_CONNECTED) { dev->wd_file_ext.state = HECI_FILE_DISCONNECTING; (void) heci_disconnect_host_client(dev, &dev->wd_file_ext); } /* remove entry if already in list */ DBG("list del iamthif and wd file list.\n"); heci_remove_client_from_file_list(dev, dev->wd_file_ext. host_client_id); heci_remove_client_from_file_list(dev, dev->iamthif_file_ext.host_client_id); dev->iamthif_current_cb = NULL; dev->iamthif_file_ext.file = NULL; /* disable interrupts */ heci_csr_disable_interrupts(dev); ddi_remove_intr(dip, 0, dev->sc_iblk); if (dev->work) ddi_taskq_destroy(dev->work); if (dev->reinit_tsk) ddi_taskq_destroy(dev->reinit_tsk); if (dev->mem_addr) ddi_regs_map_free(&dev->io_handle); if (dev->me_clients && dev->num_heci_me_clients > 0) { kmem_free(dev->me_clients, sizeof (struct heci_me_client) * dev->num_heci_me_clients); } dev->num_heci_me_clients = 0; heci_destroy_locks(dev); ddi_remove_minor_node(dip, NULL); ddi_soft_state_free(heci_soft_state_p, ddi_get_instance(dip)); return (DDI_SUCCESS); }
void pci_config_teardown(ddi_acc_handle_t *handle) { ddi_regs_map_free(handle); }
void drm_ioremapfree(struct drm_local_map *map) { if (map->acc_handle) ddi_regs_map_free(&map->acc_handle); }
/* * heci_probe - Device Initialization Routine */ static int heci_initialize(dev_info_t *dip, struct iamt_heci_device *device) { int err; ddi_device_acc_attr_t attr; err = ddi_get_iblock_cookie(dip, 0, &device->sc_iblk); if (err != DDI_SUCCESS) { cmn_err(CE_WARN, "heci_probe():" " ddi_get_iblock_cookie() failed\n"); goto end; } /* initializes the heci device structure */ init_heci_device(dip, device); attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; if (ddi_regs_map_setup(dip, 1, (caddr_t *)&device->mem_addr, 0, 0, &attr, &device->io_handle) != DDI_SUCCESS) { cmn_err(CE_WARN, "heci%d: unable to map PCI regs\n", ddi_get_instance(dip)); goto fini_heci_device; } err = ddi_add_intr(dip, 0, &device->sc_iblk, NULL, heci_isr_interrupt, (caddr_t)device); if (err != DDI_SUCCESS) { cmn_err(CE_WARN, "heci_probe(): ddi_add_intr() failed\n"); goto unmap_memory; } if (heci_hw_init(device)) { cmn_err(CE_WARN, "init hw failure.\n"); err = -ENODEV; goto release_irq; } (void) heci_initialize_clients(device); if (device->heci_state != HECI_ENABLED) { err = -ENODEV; goto release_hw; } if (device->wd_timeout) device->wd_timer = timeout(heci_wd_timer, device, 1); DBG("heci driver initialization successful.\n"); return (0); release_hw: /* disable interrupts */ device->host_hw_state = read_heci_register(device, H_CSR); heci_csr_disable_interrupts(device); release_irq: ddi_remove_intr(dip, 0, device->sc_iblk); unmap_memory: if (device->mem_addr) ddi_regs_map_free(&device->io_handle); fini_heci_device: fini_heci_device(device); end: cmn_err(CE_WARN, "heci driver initialization failed.\n"); return (err); }