RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock) { PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_MAGIC, ("magic=%#x\n", pThis->u32Magic)); KIRQL SavedIrql; if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) { #ifndef RTSPINLOCK_NT_HACK_NOIRQ RTCCUINTREG fIntSaved = ASMGetFlags(); ASMIntDisable(); KeAcquireSpinLock(&pThis->Spinlock, &SavedIrql); #else SavedIrql = KeGetCurrentIrql(); if (SavedIrql < DISPATCH_LEVEL) { KeRaiseIrql(DISPATCH_LEVEL, &SavedIrql); Assert(SavedIrql < DISPATCH_LEVEL); } RTCCUINTREG fIntSaved = ASMGetFlags(); ASMIntDisable(); if (!ASMAtomicCmpXchgU32(&pThis->u32Hack, RTSPINLOCK_NT_HACK_NOIRQ_TAKEN, RTSPINLOCK_NT_HACK_NOIRQ_FREE)) { while (!ASMAtomicCmpXchgU32(&pThis->u32Hack, RTSPINLOCK_NT_HACK_NOIRQ_TAKEN, RTSPINLOCK_NT_HACK_NOIRQ_FREE)) ASMNopPause(); } pThis->fIntSaved = fIntSaved; #endif } else KeAcquireSpinLock(&pThis->Spinlock, &SavedIrql); pThis->SavedIrql = SavedIrql; }
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock) { PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; AssertPtr(pThis); Assert(pThis->u32Magic == RTSPINLOCK_MAGIC); if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) { uint32_t fIntSaved = ASMGetFlags(); ASMIntDisable(); lck_spin_lock(pThis->pSpinLock); pThis->fIntSaved = fIntSaved; } else lck_spin_lock(pThis->pSpinLock); }
/** * Hook function for the thread-preempting event. * * @param pPreemptNotifier Pointer to the preempt_notifier struct. * @param pNext Pointer to the task that is preempting the * current thread. * * @remarks Called with the rq (runqueue) lock held and with preemption and * interrupts disabled! */ static void rtThreadCtxHooksLnxSchedOut(struct preempt_notifier *pPreemptNotifier, struct task_struct *pNext) { PRTTHREADCTXINT pThis = RT_FROM_MEMBER(pPreemptNotifier, RTTHREADCTXINT, hPreemptNotifier); #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) RTCCUINTREG fSavedEFlags = ASMGetFlags(); stac(); #endif AssertPtr(pThis); AssertPtr(pThis->pfnThreadCtxHook); Assert(pThis->fRegistered); Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); pThis->pfnThreadCtxHook(RTTHREADCTXEVENT_PREEMPTING, pThis->pvUser); #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMSetFlags(fSavedEFlags); # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 19) && defined(RT_ARCH_AMD64) pThis->fSavedRFlags = fSavedEFlags; # endif #endif }
/** * Hook function for the thread-resumed event. * * @param pPreemptNotifier Pointer to the preempt_notifier struct. * @param iCpu The CPU this thread is scheduled on. * * @remarks Called without holding the rq (runqueue) lock and with preemption * enabled! */ static void rtThreadCtxHooksLnxSchedIn(struct preempt_notifier *pPreemptNotifier, int iCpu) { PRTTHREADCTXINT pThis = RT_FROM_MEMBER(pPreemptNotifier, RTTHREADCTXINT, hPreemptNotifier); #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) RTCCUINTREG fSavedEFlags = ASMGetFlags(); stac(); #endif AssertPtr(pThis); AssertPtr(pThis->pfnThreadCtxHook); Assert(pThis->fRegistered); pThis->pfnThreadCtxHook(RTTHREADCTXEVENT_RESUMED, pThis->pvUser); #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 19) && defined(RT_ARCH_AMD64) fSavedEFlags &= ~RT_BIT_64(18) /*X86_EFL_AC*/; fSavedEFlags |= pThis->fSavedRFlags & RT_BIT_64(18) /*X86_EFL_AC*/; # endif ASMSetFlags(fSavedEFlags); #endif }
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock) { PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_GEN_MAGIC, ("pThis=%p u32Magic=%08x\n", pThis, pThis ? (int)pThis->u32Magic : 0)); if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) uint32_t fIntSaved = ASMGetFlags(); #endif #if RT_CFG_SPINLOCK_GENERIC_DO_SLEEP for (;;) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMIntDisable(); #endif for (int c = RT_CFG_SPINLOCK_GENERIC_DO_SLEEP; c > 0; c--) { if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) { # if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) pThis->fIntSaved = fIntSaved; # endif return; } ASMNopPause(); } #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMSetFlags(fIntSaved); #endif RTThreadYield(); } #else for (;;) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMIntDisable(); #endif if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) { # if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) pThis->fIntSaved = fIntSaved; # endif return; } #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMSetFlags(fIntSaved); #endif ASMNopPause(); } #endif } else { #if RT_CFG_SPINLOCK_GENERIC_DO_SLEEP for (;;) { for (int c = RT_CFG_SPINLOCK_GENERIC_DO_SLEEP; c > 0; c--) { if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) return; ASMNopPause(); } RTThreadYield(); } #else while (!ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) ASMNopPause(); #endif } }
static int VBoxDrvLinuxIOCtl(struct inode *pInode, struct file *pFilp, unsigned int uCmd, unsigned long ulArg) #endif { PSUPDRVSESSION pSession = (PSUPDRVSESSION)pFilp->private_data; int rc; #if defined(VBOX_STRICT) || defined(VBOX_WITH_EFLAGS_AC_SET_IN_VBOXDRV) RTCCUINTREG fSavedEfl; /* * Refuse all I/O control calls if we've ever detected EFLAGS.AC being cleared. * * This isn't a problem, as there is absolutely nothing in the kernel context that * depend on user context triggering cleanups. That would be pretty wild, right? */ if (RT_UNLIKELY(g_DevExt.cBadContextCalls > 0)) { SUPR0Printf("VBoxDrvLinuxIOCtl: EFLAGS.AC=0 detected %u times, refusing all I/O controls!\n", g_DevExt.cBadContextCalls); return ESPIPE; } fSavedEfl = ASMAddFlags(X86_EFL_AC); # else stac(); # endif /* * Deal with the two high-speed IOCtl that takes it's arguments from * the session and iCmd, and only returns a VBox status code. */ #ifdef HAVE_UNLOCKED_IOCTL if (RT_LIKELY( ( uCmd == SUP_IOCTL_FAST_DO_RAW_RUN || uCmd == SUP_IOCTL_FAST_DO_HM_RUN || uCmd == SUP_IOCTL_FAST_DO_NOP) && pSession->fUnrestricted == true)) rc = supdrvIOCtlFast(uCmd, ulArg, &g_DevExt, pSession); else rc = VBoxDrvLinuxIOCtlSlow(pFilp, uCmd, ulArg, pSession); #else /* !HAVE_UNLOCKED_IOCTL */ unlock_kernel(); if (RT_LIKELY( ( uCmd == SUP_IOCTL_FAST_DO_RAW_RUN || uCmd == SUP_IOCTL_FAST_DO_HM_RUN || uCmd == SUP_IOCTL_FAST_DO_NOP) && pSession->fUnrestricted == true)) rc = supdrvIOCtlFast(uCmd, ulArg, &g_DevExt, pSession); else rc = VBoxDrvLinuxIOCtlSlow(pFilp, uCmd, ulArg, pSession); lock_kernel(); #endif /* !HAVE_UNLOCKED_IOCTL */ #if defined(VBOX_STRICT) || defined(VBOX_WITH_EFLAGS_AC_SET_IN_VBOXDRV) /* * Before we restore AC and the rest of EFLAGS, check if the IOCtl handler code * accidentially modified it or some other important flag. */ if (RT_UNLIKELY( (ASMGetFlags() & (X86_EFL_AC | X86_EFL_IF | X86_EFL_DF | X86_EFL_IOPL)) != ((fSavedEfl & (X86_EFL_AC | X86_EFL_IF | X86_EFL_DF | X86_EFL_IOPL)) | X86_EFL_AC) )) { char szTmp[48]; RTStrPrintf(szTmp, sizeof(szTmp), "uCmd=%#x: %#x->%#x!", _IOC_NR(uCmd), (uint32_t)fSavedEfl, (uint32_t)ASMGetFlags()); supdrvBadContext(&g_DevExt, "SUPDrv-linux.c", __LINE__, szTmp); } ASMSetFlags(fSavedEfl); #else clac(); #endif return rc; }