RTDECL(PRTTIMESPEC) RTTimeNow(PRTTIMESPEC pTime)
{
    IPRT_LINUX_SAVE_EFL_AC();
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 16)
/* On Linux 4.20, time.h includes time64.h and we have to use 64-bit times. */
# ifdef _LINUX_TIME64_H
    struct timespec64 Ts;
    ktime_get_real_ts64(&Ts);
# else
    struct timespec Ts;
    ktime_get_real_ts(&Ts);
# endif
    IPRT_LINUX_RESTORE_EFL_AC();
# ifdef _LINUX_TIME64_H
    return RTTimeSpecSetTimespec64(pTime, &Ts);
#else
    return RTTimeSpecSetTimespec(pTime, &Ts);
#endif
#else   /* < 2.6.16 */
    struct timeval Tv;
    do_gettimeofday(&Tv);
    IPRT_LINUX_RESTORE_EFL_AC();
    return RTTimeSpecSetTimeval(pTime, &Tv);
#endif
}
Esempio n. 2
0
RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
    IPRT_LINUX_SAVE_EFL_AC();
    int rc;
    RTMPARGS Args;

    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    Args.pfnWorker = pfnWorker;
    Args.pvUser1 = pvUser1;
    Args.pvUser2 = pvUser2;
    Args.idCpu = NIL_RTCPUID;
    Args.cHits = 0;

    RTThreadPreemptDisable(&PreemptState);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
    rc = smp_call_function(rtmpLinuxWrapper, &Args, 1 /* wait */);
#else /* older kernels */
    rc = smp_call_function(rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
#endif /* older kernels */
    RTThreadPreemptRestore(&PreemptState);

    Assert(rc == 0); NOREF(rc);
    IPRT_LINUX_RESTORE_EFL_AC();
    return VINF_SUCCESS;
}
Esempio n. 3
0
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
    int rc;
    IPRT_LINUX_SAVE_EFL_AC();

    if (!RTMpIsCpuPossible(idCpu))
        return VERR_CPU_NOT_FOUND;
    if (!RTMpIsCpuOnline(idCpu))
        return VERR_CPU_OFFLINE;

# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
    rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* wait */);
# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
    rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* retry */, 0 /* wait */);
# else  /* older kernels */
#  error oops
# endif /* older kernels */
    NOREF(rc);
    Assert(rc == 0);
    IPRT_LINUX_RESTORE_EFL_AC();
    return VINF_SUCCESS;

#else  /* older kernels */
    /* no unicast here? */
    return VERR_NOT_SUPPORTED;
#endif /* older kernels */
}
RTDECL(int) RTThreadCtxHookEnable(RTTHREADCTXHOOK hCtxHook)
{
    /*
     * Validate input.
     */
    PRTTHREADCTXHOOKINT pThis = hCtxHook;
    AssertPtr(pThis);
    AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
                    VERR_INVALID_HANDLE);
    Assert(pThis->hOwner == RTThreadNativeSelf());
    Assert(!pThis->fEnabled);
    if (!pThis->fEnabled)
    {
        IPRT_LINUX_SAVE_EFL_AC();
        Assert(pThis->PreemptOps.sched_out == rtThreadCtxHooksLnxSchedOut);
        Assert(pThis->PreemptOps.sched_in == rtThreadCtxHooksLnxSchedIn);

        /*
         * Register the callback.
         */
        preempt_disable();
        pThis->fEnabled = true;
        preempt_notifier_register(&pThis->LnxPreemptNotifier);
        preempt_enable();

        IPRT_LINUX_RESTORE_EFL_AC();
    }

    return VINF_SUCCESS;
}
RTDECL(int) RTThreadCtxHookDisable(RTTHREADCTXHOOK hCtxHook)
{
    /*
     * Validate input.
     */
    PRTTHREADCTXHOOKINT pThis = hCtxHook;
    if (pThis != NIL_RTTHREADCTXHOOK)
    {
        AssertPtr(pThis);
        AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
                        VERR_INVALID_HANDLE);
        Assert(pThis->hOwner == RTThreadNativeSelf());

        /*
         * Deregister the callback.
         */
        if (pThis->fEnabled)
        {
            IPRT_LINUX_SAVE_EFL_AC();
            rtThreadCtxHookDisable(pThis);
            IPRT_LINUX_RESTORE_EFL_AC();
        }
    }
    return VINF_SUCCESS;
}
Esempio n. 6
0
/**
 * OS specific free function.
 */
DECLHIDDEN(void) rtR0MemFree(PRTMEMHDR pHdr)
{
    IPRT_LINUX_SAVE_EFL_AC();

    pHdr->u32Magic += 1;
    if (pHdr->fFlags & RTMEMHDR_FLAG_KMALLOC)
        kfree(pHdr);
#ifdef RTMEMALLOC_EXEC_HEAP
    else if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC_HEAP)
    {
        RTSpinlockAcquire(g_HeapExecSpinlock);
        RTHeapSimpleFree(g_HeapExec, pHdr);
        RTSpinlockRelease(g_HeapExecSpinlock);
    }
#endif
#ifdef RTMEMALLOC_EXEC_VM_AREA
    else if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC_VM_AREA)
    {
        PRTMEMLNXHDREX pHdrEx    = RT_FROM_MEMBER(pHdr, RTMEMLNXHDREX, Hdr);
        size_t         iPage     = pHdrEx->pVmArea->nr_pages;
        struct page  **papPages  = pHdrEx->pVmArea->pages;
        void          *pvMapping = pHdrEx->pVmArea->addr;

        vunmap(pvMapping);

        while (iPage-- > 0)
            __free_page(papPages[iPage]);
        kfree(papPages);
    }
#endif
    else
        vfree(pHdr);

    IPRT_LINUX_RESTORE_EFL_AC();
}
Esempio n. 7
0
RTDECL(int)  RTSemEventDestroy(RTSEMEVENT hEventSem)
{
    IPRT_LINUX_SAVE_EFL_AC();

    /*
     * Validate input.
     */
    PRTSEMEVENTINTERNAL pThis = hEventSem;
    if (pThis == NIL_RTSEMEVENT)
        return VINF_SUCCESS;
    AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
    Assert(pThis->cRefs > 0);

    /*
     * Invalidate it and signal the object just in case.
     */
    ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENT_MAGIC);
    ASMAtomicWriteU32(&pThis->fState, 0);
    Assert(!waitqueue_active(&pThis->Head));
    wake_up_all(&pThis->Head);
    rtR0SemEventLnxRelease(pThis);

    IPRT_LINUX_RESTORE_EFL_AC();
    return VINF_SUCCESS;
}
Esempio n. 8
0
/**
 * Frees memory allocated using RTMemContAlloc().
 *
 * @param   pv      Pointer to return from RTMemContAlloc().
 * @param   cb      The cb parameter passed to RTMemContAlloc().
 */
RTR0DECL(void) RTMemContFree(void *pv, size_t cb)
{
    if (pv)
    {
        int             cOrder;
        unsigned        cPages;
        unsigned        iPage;
        struct page    *paPages;
        IPRT_LINUX_SAVE_EFL_AC();

        /* validate */
        AssertMsg(!((uintptr_t)pv & PAGE_OFFSET_MASK), ("pv=%p\n", pv));
        Assert(cb > 0);

        /* calc order and get pages */
        cb = RT_ALIGN_Z(cb, PAGE_SIZE);
        cPages = cb >> PAGE_SHIFT;
        cOrder = CalcPowerOf2Order(cPages);
        paPages = virt_to_page(pv);

        /*
         * Restore page attributes freeing the pages.
         */
        for (iPage = 0; iPage < cPages; iPage++)
        {
            ClearPageReserved(&paPages[iPage]);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 20) /** @todo find the exact kernel where change_page_attr was introduced. */
            MY_SET_PAGES_NOEXEC(&paPages[iPage], 1);
#endif
        }
        __free_pages(paPages, cOrder);
        IPRT_LINUX_RESTORE_EFL_AC();
    }
}
Esempio n. 9
0
RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
    IPRT_LINUX_SAVE_EFL_AC();
    int rc;
    RTMPARGS Args;
    RTCPUSET OnlineSet;
    RTCPUID  idCpu;
    uint32_t cLoops;

    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;

    Args.pfnWorker  = pfnWorker;
    Args.pvUser1    = pvUser1;
    Args.pvUser2    = pvUser2;
    Args.idCpu      = NIL_RTCPUID;
    Args.cHits      = 0;

    RTThreadPreemptDisable(&PreemptState);
    RTMpGetOnlineSet(&OnlineSet);
    Args.pWorkerSet = &OnlineSet;
    idCpu = RTMpCpuId();

    if (RTCpuSetCount(&OnlineSet) > 1)
    {
        /* Fire the function on all other CPUs without waiting for completion. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
        rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* wait */);
#else
        rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* retry */, 0 /* wait */);
#endif
        Assert(!rc); NOREF(rc);
    }

    /* Fire the function on this CPU. */
    Args.pfnWorker(idCpu, Args.pvUser1, Args.pvUser2);
    RTCpuSetDel(Args.pWorkerSet, idCpu);

    /* Wait for all of them finish. */
    cLoops = 64000;
    while (!RTCpuSetIsEmpty(Args.pWorkerSet))
    {
        /* Periodically check if any CPU in the wait set has gone offline, if so update the wait set. */
        if (!cLoops--)
        {
            RTCPUSET OnlineSetNow;
            RTMpGetOnlineSet(&OnlineSetNow);
            RTCpuSetAnd(Args.pWorkerSet, &OnlineSetNow);

            cLoops = 64000;
        }

        ASMNopPause();
    }

    RTThreadPreemptRestore(&PreemptState);
    IPRT_LINUX_RESTORE_EFL_AC();
    return VINF_SUCCESS;
}
Esempio n. 10
0
DECLHIDDEN(void) rtR0AssertNativeMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
{
    IPRT_LINUX_SAVE_EFL_AC();
    printk(KERN_EMERG
           "\r\n!!Assertion Failed!!\r\n"
           "Expression: %s\r\n"
           "Location  : %s(%d) %s\r\n",
           pszExpr, pszFile, uLine, pszFunction);
    IPRT_LINUX_RESTORE_EFL_AC();
}
Esempio n. 11
0
static int rtR0ThreadLnxSleepCommon(RTMSINTERVAL cMillies)
{
    IPRT_LINUX_SAVE_EFL_AC();
    long cJiffies = msecs_to_jiffies(cMillies);
    set_current_state(TASK_INTERRUPTIBLE);
    cJiffies = schedule_timeout(cJiffies);
    IPRT_LINUX_RESTORE_EFL_AC();
    if (!cJiffies)
        return VINF_SUCCESS;
    return VERR_INTERRUPTED;
}
Esempio n. 12
0
DECLHIDDEN(void) rtR0AssertNativeMsg2V(bool fInitial, const char *pszFormat, va_list va)
{
    char szMsg[256];
    IPRT_LINUX_SAVE_EFL_AC();

    RTStrPrintfV(szMsg, sizeof(szMsg) - 1, pszFormat, va);
    szMsg[sizeof(szMsg) - 1] = '\0';
    printk(KERN_EMERG "%s", szMsg);

    NOREF(fInitial);
    IPRT_LINUX_RESTORE_EFL_AC();
}
Esempio n. 13
0
RTDECL(int) RTThreadCtxHookCreate(PRTTHREADCTXHOOK phCtxHook, uint32_t fFlags, PFNRTTHREADCTXHOOK pfnCallback, void *pvUser)
{
    IPRT_LINUX_SAVE_EFL_AC();

    /*
     * Validate input.
     */
    PRTTHREADCTXHOOKINT pThis;
    Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
    AssertReturn(fFlags == 0, VERR_INVALID_FLAGS);

    /*
     * Allocate and initialize a new hook.  We don't register it yet, just
     * create it.
     */
    pThis = (PRTTHREADCTXHOOKINT)RTMemAllocZ(sizeof(*pThis));
    if (RT_UNLIKELY(!pThis))
    {
        IPRT_LINUX_RESTORE_EFL_AC();
        return VERR_NO_MEMORY;
    }
    pThis->u32Magic     = RTTHREADCTXHOOKINT_MAGIC;
    pThis->hOwner       = RTThreadNativeSelf();
    pThis->fEnabled     = false;
    pThis->pfnCallback  = pfnCallback;
    pThis->pvUser       = pvUser;
    preempt_notifier_init(&pThis->LnxPreemptNotifier, &pThis->PreemptOps);
    pThis->PreemptOps.sched_out = rtThreadCtxHooksLnxSchedOut;
    pThis->PreemptOps.sched_in  = rtThreadCtxHooksLnxSchedIn;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
    preempt_notifier_inc();
#endif

    *phCtxHook = pThis;
    IPRT_LINUX_RESTORE_EFL_AC();
    return VINF_SUCCESS;
}
DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
{
    int rc;
    IPRT_LINUX_SAVE_EFL_AC();

# ifdef CPU_DOWN_FAILED
    RTCpuSetEmpty(&g_MpPendingOfflineSet);
# endif

    rc = register_cpu_notifier(&g_NotifierBlock);
    IPRT_LINUX_RESTORE_EFL_AC();
    AssertMsgReturn(!rc, ("%d\n", rc), RTErrConvertFromErrno(rc));
    return VINF_SUCCESS;
}
Esempio n. 15
0
RTDECL(bool) RTThreadYield(void)
{
    IPRT_LINUX_SAVE_EFL_AC();
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 20)
    yield();
#else
    /** @todo r=ramshankar: Can we use cond_resched() instead?  */
    set_current_state(TASK_RUNNING);
    sys_sched_yield();
    schedule();
#endif
    IPRT_LINUX_RESTORE_EFL_AC();
    return true;
}
Esempio n. 16
0
RTDECL(int) SUPR0Printf(const char *pszFormat, ...)
{
    va_list va;
    char    szMsg[512];
    IPRT_LINUX_SAVE_EFL_AC();

    va_start(va, pszFormat);
    RTStrPrintfV(szMsg, sizeof(szMsg) - 1, pszFormat, va);
    va_end(va);
    szMsg[sizeof(szMsg) - 1] = '\0';

    printk("%s", szMsg);

    IPRT_LINUX_RESTORE_EFL_AC();
    return 0;
}
Esempio n. 17
0
static int vboxPciLinuxDevRegisterWithIommu(PVBOXRAWPCIINS pIns)
{
#ifdef VBOX_WITH_IOMMU
    int rc = VINF_SUCCESS;
    struct pci_dev *pPciDev = pIns->pPciDev;
    PVBOXRAWPCIDRVVM pData = VBOX_DRV_VMDATA(pIns);
    IPRT_LINUX_SAVE_EFL_AC();

    if (RT_LIKELY(pData))
    {
        if (RT_LIKELY(pData->pIommuDomain))
        {
            /** @todo: KVM checks IOMMU_CAP_CACHE_COHERENCY and sets
             *  flag IOMMU_CACHE later used when mapping physical
             *  addresses, which could improve performance.
             */
            int rcLnx = iommu_attach_device(pData->pIommuDomain, &pPciDev->dev);
            if (!rcLnx)
            {
                vbpci_printk(KERN_DEBUG, pPciDev, "attached to IOMMU\n");
                pIns->fIommuUsed = true;
                rc = VINF_SUCCESS;
            }
            else
            {
                vbpci_printk(KERN_DEBUG, pPciDev, "failed to attach to IOMMU, error %d\n", rcLnx);
                rc = VERR_INTERNAL_ERROR;
            }
        }
        else
        {
           vbpci_printk(KERN_DEBUG, pIns->pPciDev, "cannot attach to IOMMU, no domain\n");
            rc = VERR_NOT_FOUND;
        }
    }
    else
    {
        vbpci_printk(KERN_DEBUG, pPciDev, "cannot attach to IOMMU, no VM data\n");
        rc = VERR_INVALID_PARAMETER;
    }

    IPRT_LINUX_RESTORE_EFL_AC();
    return rc;
#else
    return VERR_NOT_SUPPORTED;
#endif
}
Esempio n. 18
0
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
    IPRT_LINUX_SAVE_EFL_AC();
    int rc;
    RTMPARGS Args;

    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    Args.pfnWorker = pfnWorker;
    Args.pvUser1 = pvUser1;
    Args.pvUser2 = pvUser2;
    Args.idCpu = idCpu;
    Args.cHits = 0;

    if (!RTMpIsCpuPossible(idCpu))
        return VERR_CPU_NOT_FOUND;

    RTThreadPreemptDisable(&PreemptState);
    if (idCpu != RTMpCpuId())
    {
        if (RTMpIsCpuOnline(idCpu))
        {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
            rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
            rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
#else /* older kernels */
            rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
#endif /* older kernels */
            Assert(rc == 0);
            rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE;
        }
        else
            rc = VERR_CPU_OFFLINE;
    }
    else
    {
        rtmpLinuxWrapper(&Args);
        rc = VINF_SUCCESS;
    }
    RTThreadPreemptRestore(&PreemptState);;

    NOREF(rc);
    IPRT_LINUX_RESTORE_EFL_AC();
    return rc;
}
Esempio n. 19
0
int VBOXCALL    supdrvOSMsrProberWrite(uint32_t uMsr, RTCPUID idCpu, uint64_t uValue)
{
# ifdef SUPDRV_LINUX_HAS_SAFE_MSR_API
    int rc;

    IPRT_LINUX_SAVE_EFL_AC();
    if (idCpu == NIL_RTCPUID)
        rc = wrmsr_safe(uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue));
    else if (RTMpIsCpuOnline(idCpu))
        rc = wrmsr_safe_on_cpu(idCpu, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue));
    else
        return VERR_CPU_OFFLINE;
    IPRT_LINUX_RESTORE_EFL_AC();

    if (rc == 0)
        return VINF_SUCCESS;
    return VERR_ACCESS_DENIED;
# else
    return VERR_NOT_SUPPORTED;
# endif
}
Esempio n. 20
0
RTDECL(int)  RTSemEventCreateEx(PRTSEMEVENT phEventSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...)
{
    PRTSEMEVENTINTERNAL pThis;
    IPRT_LINUX_SAVE_EFL_AC();

    AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER);
    Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL));

    pThis = (PRTSEMEVENTINTERNAL)RTMemAlloc(sizeof(*pThis));
    if (!pThis)
        return VERR_NO_MEMORY;

    pThis->u32Magic = RTSEMEVENT_MAGIC;
    pThis->fState   = 0;
    pThis->cRefs    = 1;
    init_waitqueue_head(&pThis->Head);

    *phEventSem = pThis;
    IPRT_LINUX_RESTORE_EFL_AC();
    return VINF_SUCCESS;
}
Esempio n. 21
0
RTDECL(int)  RTSemEventSignal(RTSEMEVENT hEventSem)
{
    IPRT_LINUX_SAVE_EFL_AC();

    /*
     * Validate input.
     */
    PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)hEventSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
    rtR0SemEventLnxRetain(pThis);

    /*
     * Signal the event object.
     */
    ASMAtomicWriteU32(&pThis->fState, 1);
    wake_up(&pThis->Head);

    rtR0SemEventLnxRelease(pThis);
    IPRT_LINUX_RESTORE_EFL_AC();
    return VINF_SUCCESS;
}
Esempio n. 22
0
static int vboxPciLinuxDevUnregisterWithIommu(PVBOXRAWPCIINS pIns)
{
#ifdef VBOX_WITH_IOMMU
    int rc = VINF_SUCCESS;
    struct pci_dev *pPciDev = pIns->pPciDev;
    PVBOXRAWPCIDRVVM pData = VBOX_DRV_VMDATA(pIns);
    IPRT_LINUX_SAVE_EFL_AC();

    if (RT_LIKELY(pData))
    {
        if (RT_LIKELY(pData->pIommuDomain))
        {
            if (pIns->fIommuUsed)
            {
                iommu_detach_device(pData->pIommuDomain, &pIns->pPciDev->dev);
                vbpci_printk(KERN_DEBUG, pPciDev, "detached from IOMMU\n");
                pIns->fIommuUsed = false;
            }
        }
        else
        {
            vbpci_printk(KERN_DEBUG, pPciDev,
                         "cannot detach from IOMMU, no domain\n");
            rc = VERR_NOT_FOUND;
        }
    }
    else
    {
        vbpci_printk(KERN_DEBUG, pPciDev,
                     "cannot detach from IOMMU, no VM data\n");
        rc = VERR_INVALID_PARAMETER;
    }

    IPRT_LINUX_RESTORE_EFL_AC();
    return rc;
#else
    return VERR_NOT_SUPPORTED;
#endif
}
Esempio n. 23
0
static int vboxPciLinuxDevReset(PVBOXRAWPCIINS pIns)
{
    int rc = VINF_SUCCESS;
    IPRT_LINUX_SAVE_EFL_AC();

    if (RT_LIKELY(pIns->pPciDev))
    {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
        if (pci_reset_function(pIns->pPciDev))
        {
            vbpci_printk(KERN_DEBUG, pIns->pPciDev,
                         "pci_reset_function() failed\n");
            rc = VERR_INTERNAL_ERROR;
        }
#else
        rc = VERR_NOT_SUPPORTED;
#endif
    }
    else
        rc = VERR_INVALID_PARAMETER;

    IPRT_LINUX_RESTORE_EFL_AC();
    return rc;
}
Esempio n. 24
0
RTDECL(int ) RTThreadCtxHookDestroy(RTTHREADCTXHOOK hCtxHook)
{
    IPRT_LINUX_SAVE_EFL_AC();

    /*
     * Validate input.
     */
    PRTTHREADCTXHOOKINT pThis = hCtxHook;
    if (pThis == NIL_RTTHREADCTXHOOK)
        return VINF_SUCCESS;
    AssertPtr(pThis);
    AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
                    VERR_INVALID_HANDLE);
    Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    Assert(!pThis->fEnabled || pThis->hOwner == RTThreadNativeSelf());

    /*
     * If there's still a registered thread-context hook, deregister it now before destroying the object.
     */
    if (pThis->fEnabled)
    {
        Assert(pThis->hOwner == RTThreadNativeSelf());
        rtThreadCtxHookDisable(pThis);
        Assert(!pThis->fEnabled); /* paranoia */
    }

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
    preempt_notifier_dec();
#endif

    ASMAtomicWriteU32(&pThis->u32Magic, ~RTTHREADCTXHOOKINT_MAGIC);
    RTMemFree(pThis);

    IPRT_LINUX_RESTORE_EFL_AC();
    return VINF_SUCCESS;
}
Esempio n. 25
0
int VBOXCALL    supdrvOSMsrProberRead(uint32_t uMsr, RTCPUID idCpu, uint64_t *puValue)
{
# ifdef SUPDRV_LINUX_HAS_SAFE_MSR_API
    uint32_t u32Low, u32High;
    int rc;

    IPRT_LINUX_SAVE_EFL_AC();
    if (idCpu == NIL_RTCPUID)
        rc = rdmsr_safe(uMsr, &u32Low, &u32High);
    else if (RTMpIsCpuOnline(idCpu))
        rc = rdmsr_safe_on_cpu(idCpu, uMsr, &u32Low, &u32High);
    else
        return VERR_CPU_OFFLINE;
    IPRT_LINUX_RESTORE_EFL_AC();
    if (rc == 0)
    {
        *puValue = RT_MAKE_U64(u32Low, u32High);
        return VINF_SUCCESS;
    }
    return VERR_ACCESS_DENIED;
# else
    return VERR_NOT_SUPPORTED;
# endif
}
Esempio n. 26
0
/**
 * Allocates physical contiguous memory (below 4GB).
 * The allocation is page aligned and the content is undefined.
 *
 * @returns Pointer to the memory block. This is page aligned.
 * @param   pPhys   Where to store the physical address.
 * @param   cb      The allocation size in bytes. This is always
 *                  rounded up to PAGE_SIZE.
 */
RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb)
{
    int             cOrder;
    unsigned        cPages;
    struct page    *paPages;
    void           *pvRet;
    IPRT_LINUX_SAVE_EFL_AC();

    /*
     * validate input.
     */
    Assert(VALID_PTR(pPhys));
    Assert(cb > 0);

    /*
     * Allocate page pointer array.
     */
    cb = RT_ALIGN_Z(cb, PAGE_SIZE);
    cPages = cb >> PAGE_SHIFT;
    cOrder = CalcPowerOf2Order(cPages);
#if (defined(RT_ARCH_AMD64) || defined(CONFIG_X86_PAE)) && defined(GFP_DMA32)
    /* ZONE_DMA32: 0-4GB */
    paPages = alloc_pages(GFP_DMA32 | __GFP_NOWARN, cOrder);
    if (!paPages)
#endif
#ifdef RT_ARCH_AMD64
        /* ZONE_DMA; 0-16MB */
        paPages = alloc_pages(GFP_DMA | __GFP_NOWARN, cOrder);
#else
        /* ZONE_NORMAL: 0-896MB */
        paPages = alloc_pages(GFP_USER | __GFP_NOWARN, cOrder);
#endif
    if (paPages)
    {
        /*
         * Reserve the pages and mark them executable.
         */
        unsigned iPage;
        for (iPage = 0; iPage < cPages; iPage++)
        {
            Assert(!PageHighMem(&paPages[iPage]));
            if (iPage + 1 < cPages)
            {
                AssertMsg(          (uintptr_t)phys_to_virt(page_to_phys(&paPages[iPage])) + PAGE_SIZE
                                ==  (uintptr_t)phys_to_virt(page_to_phys(&paPages[iPage + 1]))
                          &&        page_to_phys(&paPages[iPage]) + PAGE_SIZE
                                ==  page_to_phys(&paPages[iPage + 1]),
                          ("iPage=%i cPages=%u [0]=%#llx,%p [1]=%#llx,%p\n", iPage, cPages,
                           (long long)page_to_phys(&paPages[iPage]),     phys_to_virt(page_to_phys(&paPages[iPage])),
                           (long long)page_to_phys(&paPages[iPage + 1]), phys_to_virt(page_to_phys(&paPages[iPage + 1])) ));
            }

            SetPageReserved(&paPages[iPage]);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 20) /** @todo find the exact kernel where change_page_attr was introduced. */
            MY_SET_PAGES_EXEC(&paPages[iPage], 1);
#endif
        }
        *pPhys = page_to_phys(paPages);
        pvRet = phys_to_virt(page_to_phys(paPages));
    }
    else
        pvRet = NULL;

    IPRT_LINUX_RESTORE_EFL_AC();
    return pvRet;
}
Esempio n. 27
0
/**
 * OS specific allocation function.
 */
DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr)
{
    PRTMEMHDR pHdr;
    IPRT_LINUX_SAVE_EFL_AC();

    /*
     * Allocate.
     */
    if (fFlags & RTMEMHDR_FLAG_EXEC)
    {
        if (fFlags & RTMEMHDR_FLAG_ANY_CTX)
            return VERR_NOT_SUPPORTED;

#if defined(RT_ARCH_AMD64)
# ifdef RTMEMALLOC_EXEC_HEAP
        if (g_HeapExec != NIL_RTHEAPSIMPLE)
        {
            RTSpinlockAcquire(g_HeapExecSpinlock);
            pHdr = (PRTMEMHDR)RTHeapSimpleAlloc(g_HeapExec, cb + sizeof(*pHdr), 0);
            RTSpinlockRelease(g_HeapExecSpinlock);
            fFlags |= RTMEMHDR_FLAG_EXEC_HEAP;
        }
        else
            pHdr = NULL;

# elif defined(RTMEMALLOC_EXEC_VM_AREA)
        pHdr = rtR0MemAllocExecVmArea(cb);
        fFlags |= RTMEMHDR_FLAG_EXEC_VM_AREA;

# else  /* !RTMEMALLOC_EXEC_HEAP */
# error "you don not want to go here..."
        pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, MY_PAGE_KERNEL_EXEC);
# endif /* !RTMEMALLOC_EXEC_HEAP */

#elif defined(PAGE_KERNEL_EXEC) && defined(CONFIG_X86_PAE)
        pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, MY_PAGE_KERNEL_EXEC);
#else
        pHdr = (PRTMEMHDR)vmalloc(cb + sizeof(*pHdr));
#endif
    }
    else
    {
        if (
#if 1 /* vmalloc has serious performance issues, avoid it. */
               cb <= PAGE_SIZE*16 - sizeof(*pHdr)
#else
               cb <= PAGE_SIZE
#endif
            || (fFlags & RTMEMHDR_FLAG_ANY_CTX)
           )
        {
            fFlags |= RTMEMHDR_FLAG_KMALLOC;
            pHdr = kmalloc(cb + sizeof(*pHdr),
                           (fFlags & RTMEMHDR_FLAG_ANY_CTX_ALLOC) ? (GFP_ATOMIC | __GFP_NOWARN)
                                                                  : (GFP_KERNEL | __GFP_NOWARN));
            if (RT_UNLIKELY(   !pHdr
                            && cb > PAGE_SIZE
                            && !(fFlags & RTMEMHDR_FLAG_ANY_CTX) ))
            {
                fFlags &= ~RTMEMHDR_FLAG_KMALLOC;
                pHdr = vmalloc(cb + sizeof(*pHdr));
            }
        }
        else
            pHdr = vmalloc(cb + sizeof(*pHdr));
    }
    if (RT_UNLIKELY(!pHdr))
    {
        IPRT_LINUX_RESTORE_EFL_AC();
        return VERR_NO_MEMORY;
    }

    /*
     * Initialize.
     */
    pHdr->u32Magic  = RTMEMHDR_MAGIC;
    pHdr->fFlags    = fFlags;
    pHdr->cb        = cb;
    pHdr->cbReq     = cb;

    *ppHdr = pHdr;
    IPRT_LINUX_RESTORE_EFL_AC();
    return VINF_SUCCESS;
}
DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void)
{
    IPRT_LINUX_SAVE_EFL_AC();
    unregister_cpu_notifier(&g_NotifierBlock);
    IPRT_LINUX_RESTORE_EFL_AC();
}
Esempio n. 29
0
/**
 * Worker for RTSemEventWaitEx and RTSemEventWaitExDebug.
 *
 * @returns VBox status code.
 * @param   pThis           The event semaphore.
 * @param   fFlags          See RTSemEventWaitEx.
 * @param   uTimeout        See RTSemEventWaitEx.
 * @param   pSrcPos         The source code position of the wait.
 */
static int rtR0SemEventLnxWait(PRTSEMEVENTINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
                               PCRTLOCKVALSRCPOS pSrcPos)
{
    int rc;

    /*
     * Validate the input.
     */
    AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
    AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
    AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
    rtR0SemEventLnxRetain(pThis);

    /*
     * Try grab the event without setting up the wait.
     */
    if (   1 /** @todo check if there are someone waiting already - waitqueue_active, but then what do we do below? */
        && ASMAtomicCmpXchgU32(&pThis->fState, 0, 1))
        rc = VINF_SUCCESS;
    else
    {
        /*
         * We have to wait.
         */
        IPRT_LINUX_SAVE_EFL_AC();
        RTR0SEMLNXWAIT Wait;
        rc = rtR0SemLnxWaitInit(&Wait, fFlags, uTimeout, &pThis->Head);
        if (RT_SUCCESS(rc))
        {
            IPRT_DEBUG_SEMS_STATE(pThis, 'E');
            for (;;)
            {
                /* The destruction test. */
                if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENT_MAGIC))
                    rc = VERR_SEM_DESTROYED;
                else
                {
                    rtR0SemLnxWaitPrepare(&Wait);

                    /* Check the exit conditions. */
                    if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENT_MAGIC))
                        rc = VERR_SEM_DESTROYED;
                    else if (ASMAtomicCmpXchgU32(&pThis->fState, 0, 1))
                        rc = VINF_SUCCESS;
                    else if (rtR0SemLnxWaitHasTimedOut(&Wait))
                        rc = VERR_TIMEOUT;
                    else if (rtR0SemLnxWaitWasInterrupted(&Wait))
                        rc = VERR_INTERRUPTED;
                    else
                    {
                        /* Do the wait and then recheck the conditions. */
                        rtR0SemLnxWaitDoIt(&Wait);
                        continue;
                    }
                }
                break;
            }

            rtR0SemLnxWaitDelete(&Wait);
            IPRT_DEBUG_SEMS_STATE_RC(pThis, 'E', rc);
        }
        IPRT_LINUX_RESTORE_EFL_AC();
    }

    rtR0SemEventLnxRelease(pThis);
    return rc;
}
Esempio n. 30
0
RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
    IPRT_LINUX_SAVE_EFL_AC();
    int rc;
    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;

    AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
    AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);

    /*
     * Check that both CPUs are online before doing the broadcast call.
     */
    RTThreadPreemptDisable(&PreemptState);
    if (   RTMpIsCpuOnline(idCpu1)
        && RTMpIsCpuOnline(idCpu2))
    {
        /*
         * Use the smp_call_function variant taking a cpu mask where available,
         * falling back on broadcast with filter.  Slight snag if one of the
         * CPUs is the one we're running on, we must do the call and the post
         * call wait ourselves.
         */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
        cpumask_t   DstCpuMask;
#endif
        RTCPUID     idCpuSelf = RTMpCpuId();
        bool const  fCallSelf = idCpuSelf == idCpu1 || idCpuSelf == idCpu2;
        RTMPARGS    Args;
        Args.pfnWorker = pfnWorker;
        Args.pvUser1 = pvUser1;
        Args.pvUser2 = pvUser2;
        Args.idCpu   = idCpu1;
        Args.idCpu2  = idCpu2;
        Args.cHits   = 0;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
        cpumask_clear(&DstCpuMask);
        cpumask_set_cpu(idCpu1, &DstCpuMask);
        cpumask_set_cpu(idCpu2, &DstCpuMask);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
        cpus_clear(DstCpuMask);
        cpu_set(idCpu1, DstCpuMask);
        cpu_set(idCpu2, DstCpuMask);
#endif

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
        smp_call_function_many(&DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
        rc = 0;
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
        rc = smp_call_function_many(&DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
        rc = smp_call_function_mask(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
#else /* older kernels */
        rc = smp_call_function(rtMpLinuxOnPairWrapper, &Args, 0 /* retry */, !fCallSelf /* wait */);
#endif /* older kernels */
        Assert(rc == 0);

        /* Call ourselves if necessary and wait for the other party to be done. */
        if (fCallSelf)
        {
            uint32_t cLoops = 0;
            rtmpLinuxWrapper(&Args);
            while (ASMAtomicReadU32(&Args.cHits) < 2)
            {
                if ((cLoops & 0x1ff) == 0 && !RTMpIsCpuOnline(idCpuSelf == idCpu1 ? idCpu2 : idCpu1))
                    break;
                cLoops++;
                ASMNopPause();
            }
        }

        Assert(Args.cHits <= 2);
        if (Args.cHits == 2)
            rc = VINF_SUCCESS;
        else if (Args.cHits == 1)
            rc = VERR_NOT_ALL_CPUS_SHOWED;
        else if (Args.cHits == 0)
            rc = VERR_CPU_OFFLINE;
        else
            rc = VERR_CPU_IPE_1;
    }
    /*
     * A CPU must be present to be considered just offline.
     */
    else if (   RTMpIsCpuPresent(idCpu1)
             && RTMpIsCpuPresent(idCpu2))
        rc = VERR_CPU_OFFLINE;
    else
        rc = VERR_CPU_NOT_FOUND;
    RTThreadPreemptRestore(&PreemptState);;
    IPRT_LINUX_RESTORE_EFL_AC();
    return rc;
}