/** * API for cleaning up the heap spinlock on IPRT termination. * This is as RTMemExecDonate specific to AMD64 Linux/GNU. */ DECLHIDDEN(void) rtR0MemExecCleanup(void) { #ifdef RTMEMALLOC_EXEC_HEAP RTSpinlockDestroy(g_HeapExecSpinlock); g_HeapExecSpinlock = NIL_RTSPINLOCK; #endif }
DECLHIDDEN(void) rtR0PowerNotificationTerm(void) { PRTPOWERNOTIFYREG pHead; RTSPINLOCK hSpinlock = g_hRTPowerNotifySpinLock; AssertReturnVoid(hSpinlock != NIL_RTSPINLOCK); /** @todo OS specific term here */ /* pick up the list and the spinlock. */ RTSpinlockAcquire(hSpinlock); ASMAtomicWriteHandle(&g_hRTPowerNotifySpinLock, NIL_RTSPINLOCK); pHead = g_pRTPowerCallbackHead; g_pRTPowerCallbackHead = NULL; ASMAtomicIncU32(&g_iRTPowerGeneration); RTSpinlockRelease(hSpinlock); /* free the list. */ while (pHead) { PRTPOWERNOTIFYREG pFree = pHead; pHead = pHead->pNext; pFree->pNext = NULL; pFree->pfnCallback = NULL; RTMemFree(pFree); } RTSpinlockDestroy(hSpinlock); }
static status_t VBoxGuestHaikuDetach(void) { struct VBoxGuestDeviceState *pState = &sState; if (cUsers > 0) return EBUSY; /* * Reverse what we did in VBoxGuestHaikuAttach. */ VBoxGuestHaikuRemoveIRQ(pState); if (pState->iVMMDevMemAreaId) delete_area(pState->iVMMDevMemAreaId); VBoxGuestDeleteDevExt(&g_DevExt); #ifdef DO_LOG RTLogDestroy(RTLogRelSetDefaultInstance(NULL)); RTLogSetDefaultInstance(NULL); // RTLogDestroy(RTLogSetDefaultInstance(NULL)); #endif RTSpinlockDestroy(g_Spinlock); g_Spinlock = NIL_RTSPINLOCK; RTR0Term(); return B_OK; }
/** * @copydoc RAWPCIDEVPORT:: pfnDestroy */ static DECLCALLBACK(int) vboxPciDevDestroy(PRAWPCIDEVPORT pPort) { PVBOXRAWPCIINS pThis = DEVPORT_2_VBOXRAWPCIINS(pPort); int rc; rc = vboxPciOsDevDestroy(pThis); if (rc == VINF_SUCCESS) { if (pThis->hFastMtx) { RTSemFastMutexDestroy(pThis->hFastMtx); pThis->hFastMtx = NIL_RTSEMFASTMUTEX; } if (pThis->hSpinlock) { RTSpinlockDestroy(pThis->hSpinlock); pThis->hSpinlock = NIL_RTSPINLOCK; } vboxPciGlobalsLock(pThis->pGlobals); vboxPciUnlinkInstanceLocked(pThis->pGlobals, pThis); vboxPciGlobalsUnlock(pThis->pGlobals); RTMemFree(pThis); } return rc; }
DECLHIDDEN(void) rtR0MpNotificationTerm(void) { PRTMPNOTIFYREG pHead; RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; RTSPINLOCK hSpinlock = g_hRTMpNotifySpinLock; AssertReturnVoid(hSpinlock != NIL_RTSPINLOCK); rtR0MpNotificationNativeTerm(); /* pick up the list and the spinlock. */ RTSpinlockAcquire(hSpinlock, &Tmp); ASMAtomicWriteHandle(&g_hRTMpNotifySpinLock, NIL_RTSPINLOCK); pHead = g_pRTMpCallbackHead; g_pRTMpCallbackHead = NULL; ASMAtomicIncU32(&g_iRTMpGeneration); RTSpinlockRelease(hSpinlock, &Tmp); /* free the list. */ while (pHead) { PRTMPNOTIFYREG pFree = pHead; pHead = pHead->pNext; pFree->pNext = NULL; pFree->pfnCallback = NULL; RTMemFree(pFree); } RTSpinlockDestroy(hSpinlock); }
/** * Initializes the thread database. * * @returns iprt status code. */ DECLHIDDEN(int) rtThreadInit(void) { #ifdef IN_RING3 int rc = VINF_ALREADY_INITIALIZED; if (g_ThreadRWSem == NIL_RTSEMRW) { /* * We assume the caller is the 1st thread, which we'll call 'main'. * But first, we'll create the semaphore. */ rc = RTSemRWCreateEx(&g_ThreadRWSem, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL); if (RT_SUCCESS(rc)) { rc = rtThreadNativeInit(); if (RT_SUCCESS(rc)) rc = rtThreadAdopt(RTTHREADTYPE_DEFAULT, 0, RTTHREADINT_FLAGS_MAIN, "main"); if (RT_SUCCESS(rc)) rc = rtSchedNativeCalcDefaultPriority(RTTHREADTYPE_DEFAULT); if (RT_SUCCESS(rc)) { g_frtThreadInitialized = true; return VINF_SUCCESS; } /* failed, clear out */ RTSemRWDestroy(g_ThreadRWSem); g_ThreadRWSem = NIL_RTSEMRW; } } #elif defined(IN_RING0) int rc; /* * Create the spinlock and to native init. */ Assert(g_ThreadSpinlock == NIL_RTSPINLOCK); rc = RTSpinlockCreate(&g_ThreadSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTThread"); if (RT_SUCCESS(rc)) { rc = rtThreadNativeInit(); if (RT_SUCCESS(rc)) { g_frtThreadInitialized = true; return VINF_SUCCESS; } /* failed, clear out */ RTSpinlockDestroy(g_ThreadSpinlock); g_ThreadSpinlock = NIL_RTSPINLOCK; } #else # error "!IN_RING0 && !IN_RING3" #endif return rc; }
/** * Does ring-0 per-VM GIM Hyper-V termination. * * @returns VBox status code. * @param pVM Pointer to the VM. */ VMMR0_INT_DECL(int) gimR0HvTermVM(PVM pVM) { AssertPtr(pVM); Assert(GIMIsEnabled(pVM)); PGIMHV pHv = &pVM->gim.s.u.Hv; RTSpinlockDestroy(pHv->hSpinlockR0); pHv->hSpinlockR0 = NIL_RTSPINLOCK; return VINF_SUCCESS; }
RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser) { *ppTimer = NULL; /* * We don't support the fancy MP features. */ if (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) return VERR_NOT_SUPPORTED; /* * Lazy initialize the spinlock. */ if (g_Spinlock == NIL_RTSPINLOCK) { RTSPINLOCK Spinlock; int rc = RTSpinlockCreate(&Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTTimerOS2"); AssertRCReturn(rc, rc); //bool fRc; //ASMAtomicCmpXchgSize(&g_Spinlock, Spinlock, NIL_RTSPINLOCK, fRc); //if (!fRc) if (!ASMAtomicCmpXchgPtr((void * volatile *)&g_Spinlock, Spinlock, NIL_RTSPINLOCK)) RTSpinlockDestroy(Spinlock); } /* * Allocate and initialize the timer handle. */ PRTTIMER pTimer = (PRTTIMER)RTMemAlloc(sizeof(*pTimer)); if (!pTimer) return VERR_NO_MEMORY; pTimer->u32Magic = RTTIMER_MAGIC; pTimer->pNext = NULL; pTimer->fSuspended = true; pTimer->pfnTimer = pfnTimer; pTimer->pvUser = pvUser; pTimer->u64NanoInterval = u64NanoInterval; pTimer->u64StartTS = 0; /* * Insert the timer into the list (LIFO atm). */ RTSpinlockAcquire(g_Spinlock); g_u32ChangeNo++; pTimer->pNext = g_pTimerHead; g_pTimerHead = pTimer; g_cTimers++; RTSpinlockRelease(g_Spinlock); *ppTimer = pTimer; return VINF_SUCCESS; }
/** * Terminates the thread database. */ DECLHIDDEN(void) rtThreadTerm(void) { #ifdef IN_RING3 /* we don't cleanup here yet */ #elif defined(IN_RING0) /* just destroy the spinlock and assume the thread is fine... */ RTSpinlockDestroy(g_ThreadSpinlock); g_ThreadSpinlock = NIL_RTSPINLOCK; if (g_ThreadTree != NULL) RTAssertMsg2Weak("WARNING: g_ThreadTree=%p\n", g_ThreadTree); #endif }
DECLHIDDEN(int) rtR0MpNotificationInit(void) { int rc = RTSpinlockCreate((PRTSPINLOCK)&g_hRTMpNotifySpinLock, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "RTR0Mp"); if (RT_SUCCESS(rc)) { rc = rtR0MpNotificationNativeInit(); if (RT_SUCCESS(rc)) return rc; RTSpinlockDestroy(g_hRTMpNotifySpinLock); g_hRTMpNotifySpinLock = NIL_RTSPINLOCK; } return rc; }
DECLHIDDEN(int) rtR0PowerNotificationInit(void) { int rc = RTSpinlockCreate((PRTSPINLOCK)&g_hRTPowerNotifySpinLock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTR0Power"); if (RT_SUCCESS(rc)) { /** @todo OS specific init here */ return rc; #if 0 RTSpinlockDestroy(g_hRTPowerNotifySpinLock); g_hRTPowerNotifySpinLock = NIL_RTSPINLOCK; #endif } return rc; }
static void vboxNetAdpSlotDestroy(PVBOXNETADP pThis) { Assert(pThis->cRefs == 0); Assert(pThis->cBusy == 0); Assert(vboxNetAdpGetState(pThis) == kVBoxNetAdpState_Invalid); if (pThis->hEventIdle != NIL_RTSEMEVENT) { RTSemEventDestroy(pThis->hEventIdle); pThis->hEventIdle = NIL_RTSEMEVENT; } if (pThis->hSpinlock != NIL_RTSPINLOCK) { RTSpinlockDestroy(pThis->hSpinlock); pThis->hSpinlock = NIL_RTSPINLOCK; } }
DECLHIDDEN(void) vboxPciDevCleanup(PVBOXRAWPCIINS pThis) { pThis->DevPort.pfnDeinit(&pThis->DevPort, 0); if (pThis->hFastMtx) { RTSemFastMutexDestroy(pThis->hFastMtx); pThis->hFastMtx = NIL_RTSEMFASTMUTEX; } if (pThis->hSpinlock) { RTSpinlockDestroy(pThis->hSpinlock); pThis->hSpinlock = NIL_RTSPINLOCK; } vboxPciGlobalsLock(pThis->pGlobals); vboxPciUnlinkInstanceLocked(pThis->pGlobals, pThis); vboxPciGlobalsUnlock(pThis->pGlobals); }
/** * Stop the kernel module. */ static kern_return_t VBoxDrvDarwinStop(struct kmod_info *pKModInfo, void *pvData) { int rc; LogFlow(("VBoxDrvDarwinStop\n")); /** @todo I've got a nagging feeling that we'll have to keep track of users and refuse * unloading if we're busy. Investigate and implement this! */ /* * Undo the work done during start (in reverse order). */ if (g_pSleepNotifier) { g_pSleepNotifier->remove(); g_pSleepNotifier = NULL; } devfs_remove(g_hDevFsDeviceUsr); g_hDevFsDeviceUsr = NULL; devfs_remove(g_hDevFsDeviceSys); g_hDevFsDeviceSys = NULL; rc = cdevsw_remove(g_iMajorDeviceNo, &g_DevCW); Assert(rc == g_iMajorDeviceNo); g_iMajorDeviceNo = -1; supdrvDeleteDevExt(&g_DevExt); rc = RTSpinlockDestroy(g_Spinlock); AssertRC(rc); g_Spinlock = NIL_RTSPINLOCK; RTR0TermForced(); memset(&g_DevExt, 0, sizeof(g_DevExt)); #ifdef DEBUG printf("VBoxDrvDarwinStop - done\n"); #endif return KMOD_RETURN_SUCCESS; }
static int vboxNetAdpSlotCreate(PVBOXNETADPGLOBALS pGlobals, unsigned uUnit, PVBOXNETADP pNew) { int rc; pNew->MyPort.u32Version = INTNETTRUNKIFPORT_VERSION; pNew->MyPort.pfnRetain = vboxNetAdpPortRetain; pNew->MyPort.pfnRelease = vboxNetAdpPortRelease; pNew->MyPort.pfnDisconnectAndRelease= vboxNetAdpPortDisconnectAndRelease; pNew->MyPort.pfnSetState = vboxNetAdpPortSetState; pNew->MyPort.pfnWaitForIdle = vboxNetAdpPortWaitForIdle; pNew->MyPort.pfnXmit = vboxNetAdpPortXmit; pNew->MyPort.u32VersionEnd = INTNETTRUNKIFPORT_VERSION; pNew->pSwitchPort = NULL; pNew->pGlobals = pGlobals; pNew->hSpinlock = NIL_RTSPINLOCK; pNew->enmState = kVBoxNetAdpState_Invalid; pNew->cRefs = 0; pNew->cBusy = 0; pNew->hEventIdle = NIL_RTSEMEVENT; rc = RTSpinlockCreate(&pNew->hSpinlock); if (RT_SUCCESS(rc)) { rc = RTSemEventCreate(&pNew->hEventIdle); if (RT_SUCCESS(rc)) { rc = vboxNetAdpOsInit(pNew); if (RT_SUCCESS(rc)) { return rc; } RTSemEventDestroy(pNew->hEventIdle); pNew->hEventIdle = NIL_RTSEMEVENT; } RTSpinlockDestroy(pNew->hSpinlock); pNew->hSpinlock = NIL_RTSPINLOCK; } return rc; }
/** * Terminates the VBoxUSB filter manager. */ void VBoxUSBFilterTerm(void) { #ifdef VBOXUSBFILTERMGR_USB_SPINLOCK RTSpinlockDestroy(g_Spinlock); g_Spinlock = NIL_RTSPINLOCK; #else RTSemFastMutexDestroy(g_Mtx); g_Mtx = NIL_RTSEMFASTMUTEX; #endif for (unsigned i = USBFILTERTYPE_FIRST; i < RT_ELEMENTS(g_aLists); i++) { PVBOXUSBFILTER pCur = g_aLists[i].pHead; g_aLists[i].pHead = g_aLists[i].pTail = NULL; while (pCur) { PVBOXUSBFILTER pNext = pCur->pNext; vboxUSBFilterFree(pCur); pCur = pNext; } } }
/* Unregister VBoxGuest char device */ static int VbgdDarwinCharDevRemove(void) { int rc = KMOD_RETURN_SUCCESS; if (g_pSleepNotifier) { g_pSleepNotifier->remove(); g_pSleepNotifier = NULL; } if (g_hDevFsDeviceSys) { devfs_remove(g_hDevFsDeviceSys); g_hDevFsDeviceSys = NULL; } if (g_hDevFsDeviceUsr) { devfs_remove(g_hDevFsDeviceUsr); g_hDevFsDeviceUsr = NULL; } if (g_iMajorDeviceNo != -1) { int rc2 = cdevsw_remove(g_iMajorDeviceNo, &g_DevCW); Assert(rc2 == g_iMajorDeviceNo); g_iMajorDeviceNo = -1; } if (g_Spinlock != NIL_RTSPINLOCK) { int rc2 = RTSpinlockDestroy(g_Spinlock); AssertRC(rc2); g_Spinlock = NIL_RTSPINLOCK; } return rc; }
RTDECL(int) RTMemPoolDestroy(RTMEMPOOL hMemPool) { if (hMemPool == NIL_RTMEMPOOL) return VINF_SUCCESS; PRTMEMPOOLINT pMemPool = hMemPool; RTMEMPOOL_VALID_RETURN_RC(pMemPool, VERR_INVALID_HANDLE); if (pMemPool == &g_rtMemPoolDefault) return VINF_SUCCESS; /* * Invalidate the handle and free all associated resources. */ ASMAtomicWriteU32(&pMemPool->u32Magic, RTMEMPOOL_MAGIC_DEAD); int rc = RTSpinlockDestroy(pMemPool->hSpinLock); AssertRC(rc); pMemPool->hSpinLock = NIL_RTSPINLOCK; PRTMEMPOOLENTRY pEntry = pMemPool->pHead; pMemPool->pHead = NULL; while (pEntry) { PRTMEMPOOLENTRY pFree = pEntry; Assert(pFree->cRefs > 0 && pFree->cRefs < UINT32_MAX / 2); pEntry = pEntry->pNext; pFree->pMemPool = NULL; pFree->pNext = NULL; pFree->pPrev = NULL; pFree->cRefs = UINT32_MAX - 3; RTMemFree(pFree); } RTMemFree(pMemPool); return VINF_SUCCESS; }
int _fini(void) { LogFlowFunc(("vboxdrv:_fini\n")); /* * Undo the work we did at start (in the reverse order). */ int rc = mod_remove(&g_VBoxDrvSolarisModLinkage); if (rc != 0) return rc; supdrvDeleteDevExt(&g_DevExt); rc = RTSpinlockDestroy(g_Spinlock); AssertRC(rc); g_Spinlock = NIL_RTSPINLOCK; RTR0TermForced(); memset(&g_DevExt, 0, sizeof(g_DevExt)); ddi_soft_state_fini(&g_pVBoxDrvSolarisState); return 0; }
/** * Creates a new instance. * * @returns VBox status code. * @param pGlobals The globals. * @param pszName The instance name. * @param ppDevPort Where to store the pointer to our port interface. */ static int vboxPciNewInstance(PVBOXRAWPCIGLOBALS pGlobals, uint32_t u32HostAddress, uint32_t fFlags, PRAWPCIPERVM pVmCtx, PRAWPCIDEVPORT *ppDevPort, uint32_t *pfDevFlags) { int rc; PVBOXRAWPCIINS pNew = (PVBOXRAWPCIINS)RTMemAllocZ(sizeof(*pNew)); if (!pNew) return VERR_NO_MEMORY; pNew->pGlobals = pGlobals; pNew->hSpinlock = NIL_RTSPINLOCK; pNew->cRefs = 1; pNew->pNext = NULL; pNew->HostPciAddress = u32HostAddress; pNew->pVmCtx = pVmCtx; pNew->DevPort.u32Version = RAWPCIDEVPORT_VERSION; pNew->DevPort.pfnInit = vboxPciDevInit; pNew->DevPort.pfnDeinit = vboxPciDevDeinit; pNew->DevPort.pfnDestroy = vboxPciDevDestroy; pNew->DevPort.pfnGetRegionInfo = vboxPciDevGetRegionInfo; pNew->DevPort.pfnMapRegion = vboxPciDevMapRegion; pNew->DevPort.pfnUnmapRegion = vboxPciDevUnmapRegion; pNew->DevPort.pfnPciCfgRead = vboxPciDevPciCfgRead; pNew->DevPort.pfnPciCfgWrite = vboxPciDevPciCfgWrite; pNew->DevPort.pfnPciCfgRead = vboxPciDevPciCfgRead; pNew->DevPort.pfnPciCfgWrite = vboxPciDevPciCfgWrite; pNew->DevPort.pfnRegisterIrqHandler = vboxPciDevRegisterIrqHandler; pNew->DevPort.pfnUnregisterIrqHandler = vboxPciDevUnregisterIrqHandler; pNew->DevPort.pfnPowerStateChange = vboxPciDevPowerStateChange; pNew->DevPort.u32VersionEnd = RAWPCIDEVPORT_VERSION; rc = RTSpinlockCreate(&pNew->hSpinlock); if (RT_SUCCESS(rc)) { rc = RTSemFastMutexCreate(&pNew->hFastMtx); if (RT_SUCCESS(rc)) { rc = pNew->DevPort.pfnInit(&pNew->DevPort, fFlags); if (RT_SUCCESS(rc)) { *ppDevPort = &pNew->DevPort; pNew->pNext = pGlobals->pInstanceHead; pGlobals->pInstanceHead = pNew; } else { RTSemFastMutexDestroy(pNew->hFastMtx); RTSpinlockDestroy(pNew->hSpinlock); RTMemFree(pNew); } } } return rc; }
/** * Kernel entry points */ int _init(void) { LogFlowFunc((DEVICE_NAME ":_init\n")); /* * Prevent module autounloading. */ modctl_t *pModCtl = mod_getctl(&g_VBoxDrvSolarisModLinkage); if (pModCtl) pModCtl->mod_loadflags |= MOD_NOAUTOUNLOAD; else LogRel((DEVICE_NAME ":failed to disable autounloading!\n")); /* * Initialize IPRT R0 driver, which internally calls OS-specific r0 init. */ int rc = RTR0Init(0); if (RT_SUCCESS(rc)) { /* * Initialize the device extension */ rc = supdrvInitDevExt(&g_DevExt, sizeof(SUPDRVSESSION)); if (RT_SUCCESS(rc)) { /* * Initialize the session hash table. */ memset(g_apSessionHashTab, 0, sizeof(g_apSessionHashTab)); rc = RTSpinlockCreate(&g_Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxDrvSol"); if (RT_SUCCESS(rc)) { rc = ddi_soft_state_init(&g_pVBoxDrvSolarisState, sizeof(vbox_devstate_t), 8); if (!rc) { rc = mod_install(&g_VBoxDrvSolarisModLinkage); if (!rc) return rc; /* success */ ddi_soft_state_fini(&g_pVBoxDrvSolarisState); LogRel((DEVICE_NAME ":mod_install failed! rc=%d\n", rc)); } else LogRel((DEVICE_NAME ":failed to initialize soft state.\n")); RTSpinlockDestroy(g_Spinlock); g_Spinlock = NIL_RTSPINLOCK; } else { LogRel((DEVICE_NAME ":VBoxDrvSolarisAttach: RTSpinlockCreate failed\n")); rc = RTErrConvertToErrno(rc); } supdrvDeleteDevExt(&g_DevExt); } else { LogRel((DEVICE_NAME ":VBoxDrvSolarisAttach: supdrvInitDevExt failed\n")); rc = RTErrConvertToErrno(rc); } RTR0TermForced(); } else { LogRel((DEVICE_NAME ":VBoxDrvSolarisAttach: failed to init R0Drv\n")); rc = RTErrConvertToErrno(rc); } memset(&g_DevExt, 0, sizeof(g_DevExt)); return rc; }
/** * Kernel entry points */ int _init(void) { #if 0 /* No IPRT logging before RTR0Init() is done! */ LogFlowFunc(("vboxdrv:_init\n")); #endif /* * Prevent module autounloading. */ modctl_t *pModCtl = mod_getctl(&g_VBoxDrvSolarisModLinkage); if (pModCtl) pModCtl->mod_loadflags |= MOD_NOAUTOUNLOAD; else cmn_err(CE_NOTE, "vboxdrv: failed to disable autounloading!\n"); /* * Initialize IPRT R0 driver, which internally calls OS-specific r0 init. */ int rc = RTR0Init(0); if (RT_SUCCESS(rc)) { /* * Initialize the device extension */ rc = supdrvInitDevExt(&g_DevExt, sizeof(SUPDRVSESSION)); if (RT_SUCCESS(rc)) { cmn_err(CE_CONT, "!tsc::mode %s @ tentative %lu Hz\n", SUPGetGIPModeName(g_DevExt.pGip), g_DevExt.pGip->u64CpuHz); /* * Initialize the session hash table. */ memset(g_apSessionHashTab, 0, sizeof(g_apSessionHashTab)); rc = RTSpinlockCreate(&g_Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxDrvSol"); if (RT_SUCCESS(rc)) { rc = ddi_soft_state_init(&g_pVBoxDrvSolarisState, sizeof(vbox_devstate_t), 8); if (!rc) { rc = mod_install(&g_VBoxDrvSolarisModLinkage); if (!rc) return rc; /* success */ ddi_soft_state_fini(&g_pVBoxDrvSolarisState); LogRel(("vboxdrv: mod_install failed! rc=%d\n", rc)); } else LogRel(("vboxdrv: failed to initialize soft state.\n")); RTSpinlockDestroy(g_Spinlock); g_Spinlock = NIL_RTSPINLOCK; } else { LogRel(("VBoxDrvSolarisAttach: RTSpinlockCreate failed\n")); rc = RTErrConvertToErrno(rc); } supdrvDeleteDevExt(&g_DevExt); } else { LogRel(("VBoxDrvSolarisAttach: supdrvInitDevExt failed\n")); rc = EINVAL; } RTR0TermForced(); } else { LogRel(("VBoxDrvSolarisAttach: failed to init R0Drv\n")); rc = RTErrConvertToErrno(rc); } memset(&g_DevExt, 0, sizeof(g_DevExt)); return rc; }
/** * API for cleaning up the heap spinlock on IPRT termination. * This is as RTMemExecDonate specific to AMD64 Linux/GNU. */ DECLHIDDEN(void) rtR0MemExecCleanup(void) { RTSpinlockDestroy(g_HeapExecSpinlock); g_HeapExecSpinlock = NIL_RTSPINLOCK; }
RTDECL(int) RTHandleTableDestroy(RTHANDLETABLE hHandleTable, PFNRTHANDLETABLEDELETE pfnDelete, void *pvUser) { PRTHANDLETABLEINT pThis; uint32_t i1; uint32_t i; /* * Validate input, quietly ignore the NIL handle. */ if (hHandleTable == NIL_RTHANDLETABLE) return VINF_SUCCESS; pThis = (PRTHANDLETABLEINT)hHandleTable; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTHANDLETABLE_MAGIC, VERR_INVALID_HANDLE); AssertPtrNullReturn(pfnDelete, VERR_INVALID_POINTER); /* * Mark the thing as invalid / deleted. * Then kill the lock. */ rtHandleTableLock(pThis); ASMAtomicWriteU32(&pThis->u32Magic, ~RTHANDLETABLE_MAGIC); rtHandleTableUnlock(pThis); if (pThis->hSpinlock != NIL_RTSPINLOCK) { rtHandleTableLock(pThis); rtHandleTableUnlock(pThis); RTSpinlockDestroy(pThis->hSpinlock); pThis->hSpinlock = NIL_RTSPINLOCK; } if (pfnDelete) { /* * Walk all the tables looking for used handles. */ uint32_t cLeft = pThis->cCurAllocated; if (pThis->fFlags & RTHANDLETABLE_FLAGS_CONTEXT) { for (i1 = 0; cLeft > 0 && i1 < pThis->cLevel1; i1++) { PRTHTENTRYCTX paTable = (PRTHTENTRYCTX)pThis->papvLevel1[i1]; if (paTable) for (i = 0; i < RTHT_LEVEL2_ENTRIES; i++) if (!RTHT_IS_FREE(paTable[i].pvObj)) { pfnDelete(hHandleTable, pThis->uBase + i + i1 * RTHT_LEVEL2_ENTRIES, paTable[i].pvObj, paTable[i].pvCtx, pvUser); Assert(cLeft > 0); cLeft--; } } } else { for (i1 = 0; cLeft > 0 && i1 < pThis->cLevel1; i1++) { PRTHTENTRY paTable = (PRTHTENTRY)pThis->papvLevel1[i1]; if (paTable) for (i = 0; i < RTHT_LEVEL2_ENTRIES; i++) if (!RTHT_IS_FREE(paTable[i].pvObj)) { pfnDelete(hHandleTable, pThis->uBase + i + i1 * RTHT_LEVEL2_ENTRIES, paTable[i].pvObj, NULL, pvUser); Assert(cLeft > 0); cLeft--; } } } Assert(!cLeft); } /* * Free the memory. */ for (i1 = 0; i1 < pThis->cLevel1; i1++) if (pThis->papvLevel1[i1]) { RTMemFree(pThis->papvLevel1[i1]); pThis->papvLevel1[i1] = NULL; } if (pThis->cMax / RTHT_LEVEL2_ENTRIES >= RTHT_LEVEL1_DYN_ALLOC_THRESHOLD) RTMemFree(pThis->papvLevel1); RTMemFree(pThis); return VINF_SUCCESS; }
/** * Start the kernel module. */ static kern_return_t VBoxDrvDarwinStart(struct kmod_info *pKModInfo, void *pvData) { int rc; #ifdef DEBUG printf("VBoxDrvDarwinStart\n"); #endif /* * Initialize IPRT. */ rc = RTR0Init(0); if (RT_SUCCESS(rc)) { /* * Initialize the device extension. */ rc = supdrvInitDevExt(&g_DevExt, sizeof(SUPDRVSESSION)); if (RT_SUCCESS(rc)) { /* * Initialize the session hash table. */ memset(g_apSessionHashTab, 0, sizeof(g_apSessionHashTab)); /* paranoia */ rc = RTSpinlockCreate(&g_Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxDrvDarwin"); if (RT_SUCCESS(rc)) { /* * Registering ourselves as a character device. */ g_iMajorDeviceNo = cdevsw_add(-1, &g_DevCW); if (g_iMajorDeviceNo >= 0) { #ifdef VBOX_WITH_HARDENING g_hDevFsDeviceSys = devfs_make_node(makedev(g_iMajorDeviceNo, 0), DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, DEVICE_NAME_SYS); #else g_hDevFsDeviceSys = devfs_make_node(makedev(g_iMajorDeviceNo, 0), DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, DEVICE_NAME_SYS); #endif if (g_hDevFsDeviceSys) { g_hDevFsDeviceUsr = devfs_make_node(makedev(g_iMajorDeviceNo, 1), DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, DEVICE_NAME_USR); if (g_hDevFsDeviceUsr) { LogRel(("VBoxDrv: version " VBOX_VERSION_STRING " r%d; IOCtl version %#x; IDC version %#x; dev major=%d\n", VBOX_SVN_REV, SUPDRV_IOC_VERSION, SUPDRV_IDC_VERSION, g_iMajorDeviceNo)); /* Register a sleep/wakeup notification callback */ g_pSleepNotifier = registerPrioritySleepWakeInterest(&VBoxDrvDarwinSleepHandler, &g_DevExt, NULL); if (g_pSleepNotifier == NULL) LogRel(("VBoxDrv: register for sleep/wakeup events failed\n")); /* Find kernel symbols that are kind of optional. */ vboxdrvDarwinResolveSymbols(); return KMOD_RETURN_SUCCESS; } LogRel(("VBoxDrv: devfs_make_node(makedev(%d,1),,,,%s) failed\n", g_iMajorDeviceNo, DEVICE_NAME_USR)); devfs_remove(g_hDevFsDeviceSys); g_hDevFsDeviceSys = NULL; } else LogRel(("VBoxDrv: devfs_make_node(makedev(%d,0),,,,%s) failed\n", g_iMajorDeviceNo, DEVICE_NAME_SYS)); cdevsw_remove(g_iMajorDeviceNo, &g_DevCW); g_iMajorDeviceNo = -1; } else LogRel(("VBoxDrv: cdevsw_add failed (%d)\n", g_iMajorDeviceNo)); RTSpinlockDestroy(g_Spinlock); g_Spinlock = NIL_RTSPINLOCK; } else LogRel(("VBoxDrv: RTSpinlockCreate failed (rc=%d)\n", rc)); supdrvDeleteDevExt(&g_DevExt); } else printf("VBoxDrv: failed to initialize device extension (rc=%d)\n", rc); RTR0TermForced(); } else printf("VBoxDrv: failed to initialize IPRT (rc=%d)\n", rc); memset(&g_DevExt, 0, sizeof(g_DevExt)); return KMOD_RETURN_FAILURE; }