static int vboxUSBMonSolarisResetDevice(char *pszDevicePath, bool fReattach) { int rc = VERR_GENERAL_FAILURE; LogFunc((DEVICE_NAME ": vboxUSBMonSolarisResetDevice: pszDevicePath=%s fReattach=%d\n", pszDevicePath, fReattach)); /* * Try grabbing the dev_info_t. */ dev_info_t *pDeviceInfo = e_ddi_hold_devi_by_path(pszDevicePath, 0); if (pDeviceInfo) { ddi_release_devi(pDeviceInfo); /* * Grab the root device node from the parent hub for resetting. */ dev_info_t *pTmpDeviceInfo = NULL; for (;;) { pTmpDeviceInfo = ddi_get_parent(pDeviceInfo); if (!pTmpDeviceInfo) { LogRel((DEVICE_NAME ":vboxUSBMonSolarisResetDevice: Failed to get parent device info for %s\n", pszDevicePath)); return VERR_GENERAL_FAILURE; } if (ddi_prop_exists(DDI_DEV_T_ANY, pTmpDeviceInfo, DDI_PROP_DONTPASS, "usb-port-count")) /* parent hub */ break; pDeviceInfo = pTmpDeviceInfo; } /* * Try re-enumerating the device. */ rc = usb_reset_device(pDeviceInfo, fReattach ? USB_RESET_LVL_REATTACH : USB_RESET_LVL_DEFAULT); Log((DEVICE_NAME ": vboxUSBMonSolarisResetDevice: usb_reset_device for %s level=%s rc=%d\n", pszDevicePath, fReattach ? "ReAttach" : "Default", rc)); switch (rc) { case USB_SUCCESS: rc = VINF_SUCCESS; break; case USB_INVALID_PERM: rc = VERR_PERMISSION_DENIED; break; case USB_INVALID_ARGS: rc = VERR_INVALID_PARAMETER; break; case USB_BUSY: rc = VERR_RESOURCE_BUSY; break; case USB_INVALID_CONTEXT: rc = VERR_INVALID_CONTEXT; break; case USB_FAILURE: rc = VERR_GENERAL_FAILURE; break; default: rc = VERR_UNRESOLVED_ERROR; break; } } else { rc = VERR_INVALID_HANDLE; LogRel((DEVICE_NAME ": vboxUSBMonSolarisResetDevice: Cannot obtain device info for %s\n", pszDevicePath)); } return rc; }
/* * vdds_destroy_niu_node -- Destroy the NIU node. */ int vdds_destroy_niu_node(dev_info_t *niu_dip, uint64_t cookie) { int rv; dev_info_t *fdip = NULL; dev_info_t *nexus_dip = ddi_get_parent(niu_dip); DBG1(NULL, "Called"); ASSERT(nexus_dip != NULL); mutex_enter(&vdds_dev_lock); if (!e_ddi_branch_held(niu_dip)) e_ddi_branch_hold(niu_dip); /* * As we are destroying now, release the * hold that was done in during the creation. */ ddi_release_devi(niu_dip); rv = e_ddi_branch_destroy(niu_dip, &fdip, 0); if (rv != 0) { DERR(NULL, "Failed to destroy niumx/network node dip=0x%p", niu_dip); if (fdip != NULL) { ddi_release_devi(fdip); } rv = EBUSY; goto dest_exit; } /* * Cleanup the parent's ranges property set * for this Hybrid device. */ vdds_release_range_prop(nexus_dip, cookie); dest_exit: mutex_exit(&vdds_dev_lock); DBG1(NULL, "returning rv=%d", rv); return (rv); }
/*ARGSUSED*/ void sbd_detach_io(sbd_handle_t *hp, sbderror_t *ep, dev_info_t *dip, int unit) { int rv; dev_info_t *fdip = NULL; sbd_board_t *sbp = SBDH2BD(hp->h_sbd); ASSERT(e_ddi_branch_held(dip)); mutex_enter(&sbp->sb_slock); rv = e_ddi_branch_unconfigure(dip, &fdip, DEVI_BRANCH_EVENT); mutex_exit(&sbp->sb_slock); if (rv) { /* * If non-NULL, fdip is returned held and must be released. */ if (fdip != NULL) { sbd_errno_decode(rv, ep, fdip); ddi_release_devi(fdip); } else { sbd_errno_decode(rv, ep, dip); } } }
static int drrput(queue_t *q, mblk_t *mp) { struct drstate *dsp; union DL_primitives *dlp; dev_info_t *dip; switch (DB_TYPE(mp)) { case M_PROTO: case M_PCPROTO: break; default: putnext(q, mp); return (0); } /* make sure size is sufficient for dl_primitive */ if (MBLKL(mp) < sizeof (t_uscalar_t)) { putnext(q, mp); return (0); } dlp = (union DL_primitives *)mp->b_rptr; switch (dlp->dl_primitive) { case DL_OK_ACK: { /* check for proper size, let upper layer deal with error */ if (MBLKL(mp) < DL_OK_ACK_SIZE) { putnext(q, mp); return (0); } dsp = q->q_ptr; switch (dlp->ok_ack.dl_correct_primitive) { case DL_ATTACH_REQ: /* * ddi_assoc_queue_with_devi() will hold dip, * so release after association. * * dip is NULL means we didn't hold dip on read side. * (unlikely, but possible), so we do nothing. */ mutex_enter(&dsp->dr_lock); dip = dsp->dr_dip[dsp->dr_nlast]; dsp->dr_dip[dsp->dr_nlast] = NULL; INCR(dsp->dr_nlast); mutex_exit(&dsp->dr_lock); if (dip) { ddi_assoc_queue_with_devi(q, dip); ddi_release_devi(dip); } break; case DL_DETACH_REQ: ddi_assoc_queue_with_devi(q, NULL); break; default: break; } break; } case DL_ERROR_ACK: if (dlp->error_ack.dl_error_primitive != DL_ATTACH_REQ) break; dsp = q->q_ptr; mutex_enter(&dsp->dr_lock); dip = dsp->dr_dip[dsp->dr_nlast]; dsp->dr_dip[dsp->dr_nlast] = NULL; INCR(dsp->dr_nlast); mutex_exit(&dsp->dr_lock); /* * Release dip on attach failure */ if (dip) { ddi_release_devi(dip); } break; default: break; } putnext(q, mp); return (0); }
/* * Search the devid cache, returning dev_t list for all * device paths mapping to the device identified by the * given devid. * * Primary interface used by ddi_lyr_devid_to_devlist() */ int e_devid_cache_to_devt_list(ddi_devid_t devid, char *minor_name, int *retndevts, dev_t **retdevts) { char *path, **paths; int i, j, n; dev_t *devts, *udevts; dev_t tdevt; int ndevts, undevts, ndevts_alloced; dev_info_t *devi, **devis; int ndevis, npaths, nalloced; ddi_devid_t match_devid; DEVID_LOG_FIND(("find", devid, NULL)); ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); if (ddi_devid_valid(devid) != DDI_SUCCESS) { DEVID_LOG_ERR(("invalid devid", devid, NULL)); return (DDI_FAILURE); } nalloced = 128; for (;;) { paths = kmem_zalloc(nalloced * sizeof (char *), KM_SLEEP); devis = kmem_zalloc(nalloced * sizeof (dev_info_t *), KM_SLEEP); rw_enter(&dcfd->nvf_lock, RW_READER); n = e_devid_cache_devi_path_lists(devid, nalloced, &ndevis, devis, &npaths, paths); if (n <= nalloced) break; rw_exit(&dcfd->nvf_lock); for (i = 0; i < ndevis; i++) ndi_rele_devi(devis[i]); kmem_free(paths, nalloced * sizeof (char *)); kmem_free(devis, nalloced * sizeof (dev_info_t *)); nalloced = n + 128; } for (i = 0; i < npaths; i++) { path = i_ddi_strdup(paths[i], KM_SLEEP); paths[i] = path; } rw_exit(&dcfd->nvf_lock); if (ndevis == 0 && npaths == 0) { DEVID_LOG_ERR(("no devid found", devid, NULL)); kmem_free(paths, nalloced * sizeof (char *)); kmem_free(devis, nalloced * sizeof (dev_info_t *)); return (DDI_FAILURE); } ndevts_alloced = 128; restart: ndevts = 0; devts = kmem_alloc(ndevts_alloced * sizeof (dev_t), KM_SLEEP); for (i = 0; i < ndevis; i++) { ASSERT(!DEVI_IS_ATTACHING(devis[i])); ASSERT(!DEVI_IS_DETACHING(devis[i])); e_devid_minor_to_devlist(devis[i], minor_name, ndevts_alloced, &ndevts, devts); if (ndevts > ndevts_alloced) { kmem_free(devts, ndevts_alloced * sizeof (dev_t)); ndevts_alloced += 128; goto restart; } } for (i = 0; i < npaths; i++) { DEVID_LOG_LOOKUP((CE_CONT, "lookup %s\n", paths[i])); devi = e_ddi_hold_devi_by_path(paths[i], 0); if (devi == NULL) { DEVID_LOG_STALE(("stale device reference", devid, paths[i])); continue; } /* * Verify the newly attached device registered a matching devid */ if (i_ddi_devi_get_devid(DDI_DEV_T_ANY, devi, &match_devid) != DDI_SUCCESS) { DEVIDERR((CE_CONT, "%s: no devid registered on attach\n", paths[i])); ddi_release_devi(devi); continue; } if (ddi_devid_compare(devid, match_devid) != 0) { DEVID_LOG_STALE(("new devid registered", devid, paths[i])); ddi_release_devi(devi); ddi_devid_free(match_devid); continue; } ddi_devid_free(match_devid); e_devid_minor_to_devlist(devi, minor_name, ndevts_alloced, &ndevts, devts); ddi_release_devi(devi); if (ndevts > ndevts_alloced) { kmem_free(devts, ndevts_alloced * sizeof (dev_t)); ndevts_alloced += 128; goto restart; } } /* drop hold from e_devid_cache_devi_path_lists */ for (i = 0; i < ndevis; i++) { ndi_rele_devi(devis[i]); } for (i = 0; i < npaths; i++) { kmem_free(paths[i], strlen(paths[i]) + 1); } kmem_free(paths, nalloced * sizeof (char *)); kmem_free(devis, nalloced * sizeof (dev_info_t *)); if (ndevts == 0) { DEVID_LOG_ERR(("no devid found", devid, NULL)); kmem_free(devts, ndevts_alloced * sizeof (dev_t)); return (DDI_FAILURE); } /* * Build the final list of sorted dev_t's with duplicates collapsed so * returned results are consistent. This prevents implementation * artifacts from causing unnecessary changes in SVM namespace. */ /* bubble sort */ for (i = 0; i < (ndevts - 1); i++) { for (j = 0; j < ((ndevts - 1) - i); j++) { if (devts[j + 1] < devts[j]) { tdevt = devts[j]; devts[j] = devts[j + 1]; devts[j + 1] = tdevt; } } } /* determine number of unique values */ for (undevts = ndevts, i = 1; i < ndevts; i++) { if (devts[i - 1] == devts[i]) undevts--; } /* allocate unique */ udevts = kmem_alloc(undevts * sizeof (dev_t), KM_SLEEP); /* copy unique */ udevts[0] = devts[0]; for (i = 1, j = 1; i < ndevts; i++) { if (devts[i - 1] != devts[i]) udevts[j++] = devts[i]; } ASSERT(j == undevts); kmem_free(devts, ndevts_alloced * sizeof (dev_t)); *retndevts = undevts; *retdevts = udevts; return (DDI_SUCCESS); }