/**
 * Worker for supdrvOSMsrProberModify.
 */
static DECLCALLBACK(void) supdrvLnxMsrProberModifyOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
{
    PSUPMSRPROBER               pReq    = (PSUPMSRPROBER)pvUser1;
    register uint32_t           uMsr    = pReq->u.In.uMsr;
    bool const                  fFaster = pReq->u.In.enmOp == SUPMSRPROBEROP_MODIFY_FASTER;
    uint64_t                    uBefore;
    uint64_t                    uWritten;
    uint64_t                    uAfter;
    int                         rcBefore, rcWrite, rcAfter, rcRestore;
    RTCCUINTREG                 fOldFlags;

    /* Initialize result variables. */
    uBefore = uWritten = uAfter    = 0;
    rcWrite = rcAfter  = rcRestore = -EIO;

    /*
     * Do the job.
     */
    fOldFlags = ASMIntDisableFlags();
    ASMCompilerBarrier(); /* paranoia */
    if (!fFaster)
        ASMWriteBackAndInvalidateCaches();

    rcBefore = rdmsrl_safe(uMsr, &uBefore);
    if (rcBefore >= 0)
    {
        register uint64_t uRestore = uBefore;
        uWritten  = uRestore;
        uWritten &= pReq->u.In.uArgs.Modify.fAndMask;
        uWritten |= pReq->u.In.uArgs.Modify.fOrMask;

        rcWrite   = wrmsr_safe(uMsr, RT_LODWORD(uWritten), RT_HIDWORD(uWritten));
        rcAfter   = rdmsrl_safe(uMsr, &uAfter);
        rcRestore = wrmsr_safe(uMsr, RT_LODWORD(uRestore), RT_HIDWORD(uRestore));

        if (!fFaster)
        {
            ASMWriteBackAndInvalidateCaches();
            ASMReloadCR3();
            ASMNopPause();
        }
    }

    ASMCompilerBarrier(); /* paranoia */
    ASMSetFlags(fOldFlags);

    /*
     * Write out the results.
     */
    pReq->u.Out.uResults.Modify.uBefore    = uBefore;
    pReq->u.Out.uResults.Modify.uWritten   = uWritten;
    pReq->u.Out.uResults.Modify.uAfter     = uAfter;
    pReq->u.Out.uResults.Modify.fBeforeGp  = rcBefore  != 0;
    pReq->u.Out.uResults.Modify.fModifyGp  = rcWrite   != 0;
    pReq->u.Out.uResults.Modify.fAfterGp   = rcAfter   != 0;
    pReq->u.Out.uResults.Modify.fRestoreGp = rcRestore != 0;
    RT_ZERO(pReq->u.Out.uResults.Modify.afReserved);
}
Esempio n. 2
0
/**
 * Refreshes globals from GIP after one or more CPUs were added.
 *
 * There are potential races here.  We might race other threads and we may race
 * more CPUs being added.
 */
static void rtMpWinRefreshGip(void)
{
    PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
    if (   pGip
        && pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC)
    {
        /*
         * Since CPUs cannot be removed, we only have to update the IDs and
         * indexes of CPUs that we think are inactive and the group member counts.
         */
        for (;;)
        {
            unsigned const cbGip          = pGip->cPages * PAGE_SIZE;
            uint32_t const cGipActiveCpus = pGip->cOnlineCpus;
            uint32_t const cMyActiveCpus  = ASMAtomicReadU32(&g_cRtMpWinActiveCpus);
            ASMCompilerBarrier();

            for (uint32_t idxGroup = 0; idxGroup < g_cRtMpWinMaxCpus; idxGroup++)
            {
                unsigned offCpuGroup = pGip->aoffCpuGroup[idxGroup];
                if (offCpuGroup < cbGip)
                {
                    PSUPGIPCPUGROUP pGipCpuGrp  = (PSUPGIPCPUGROUP)((uintptr_t)pGip + offCpuGroup);
                    uint32_t        cMaxMembers = pGipCpuGrp->cMaxMembers;
                    AssertStmt(cMaxMembers < RT_ELEMENTS(g_aRtMpWinCpuGroups[0].aidxCpuSetMembers),
                               cMaxMembers = RT_ELEMENTS(g_aRtMpWinCpuGroups[0].aidxCpuSetMembers));
                    for (uint32_t idxMember = g_aRtMpWinCpuGroups[idxGroup].cActiveCpus; idxMember < cMaxMembers; idxMember++)
                    {
                        int16_t idxSet = pGipCpuGrp->aiCpuSetIdxs[idxMember];
                        g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxSet;
                        if ((unsigned)idxSet < RT_ELEMENTS(g_aidRtMpWinByCpuSetIdx))
# ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
                            g_aidRtMpWinByCpuSetIdx[idxSet] = RTMPCPUID_FROM_GROUP_AND_NUMBER(idxGroup, idxMember);
# else
                            g_aidRtMpWinByCpuSetIdx[idxSet] = idxSet;
# endif
                    }
                    g_aRtMpWinCpuGroups[idxGroup].cMaxCpus    = RT_MIN(pGipCpuGrp->cMembers, cMaxMembers);
                    g_aRtMpWinCpuGroups[idxGroup].cActiveCpus = RT_MIN(pGipCpuGrp->cMembers, cMaxMembers);
                }
                else
                    Assert(g_aRtMpWinCpuGroups[idxGroup].cActiveCpus == 0);
            }

            ASMCompilerBarrier();
            if (cGipActiveCpus == pGip->cOnlineCpus)
                if (ASMAtomicCmpXchgU32(&g_cRtMpWinActiveCpus, cGipActiveCpus, cMyActiveCpus))
                    break;
        }
    }
}
RTDECL(int) RTSemSpinMutexRelease(RTSEMSPINMUTEX hSpinMtx)
{
    RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx;
    RTNATIVETHREAD          hSelf = RTThreadNativeSelf();
    uint32_t                cLockers;
    RTSEMSPINMUTEXSTATE     State;
    bool                    fRc;

    Assert(hSelf != NIL_RTNATIVETHREAD);
    RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);

    /*
     * Get the saved state and try release the semaphore.
     */
    State = pThis->SavedState;
    ASMCompilerBarrier();
    ASMAtomicCmpXchgHandle(&pThis->hOwner, NIL_RTNATIVETHREAD, hSelf, fRc);
    AssertMsgReturn(fRc,
                    ("hOwner=%p hSelf=%p cLockers=%d\n", pThis->hOwner, hSelf, pThis->cLockers),
                    VERR_NOT_OWNER);

    cLockers = ASMAtomicDecS32(&pThis->cLockers);
    rtSemSpinMutexLeave(&State);
    if (cLockers > 0)
    {
        int rc = RTSemEventSignal(pThis->hEventSem);
        AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Rrc\n", rc));
    }
    return VINF_SUCCESS;
}
Esempio n. 4
0
/**
 * Update the queue, notify the host.
 *
 * @param pDevice           Pointer to the Virtio device instance.
 * @param pQueue            Pointer to the Queue that is doing the notification.
 *
 * @return Solaris DDI error code. DDI_SUCCESS or DDI_FAILURE.
 */
static int VirtioPciNotifyQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioPciNotifyQueue pDevice=%p pQueue=%p\n", pDevice, pQueue));
    virtio_pci_t *pPciData = pDevice->pvHyper;
    AssertReturn(pPciData, DDI_FAILURE);

    pQueue->Ring.pRingAvail->Index += pQueue->cBufs;
    pQueue->cBufs = 0;

    ASMCompilerBarrier();

    ddi_put16(pPciData->hIO, (uint16_t *)(pPciData->addrIOBase + VIRTIO_PCI_QUEUE_NOTIFY), pQueue->QueueIndex);
    cmn_err(CE_NOTE, "VirtioPciNotifyQueue\n");
}
Esempio n. 5
0
static void rtR0MemObjNtResolveDynamicApis(void)
{
    ULONG uBuildNumber  = 0;
    PsGetVersion(&g_uMajorVersion, &g_uMinorVersion, &uBuildNumber, NULL);

#ifndef IPRT_TARGET_NT4 /* MmGetSystemRoutineAddress was introduced in w2k. */

    UNICODE_STRING RoutineName;
    RtlInitUnicodeString(&RoutineName, L"MmProtectMdlSystemAddress");
    g_pfnMmProtectMdlSystemAddress = (decltype(MmProtectMdlSystemAddress) *)MmGetSystemRoutineAddress(&RoutineName);

#endif
    ASMCompilerBarrier();
    g_fResolvedDynamicApis = true;
}
/**
 * Insert pending notification
 *
 * @param   pVM             Pointer to the VM.
 * @param   pRec            Notification record to insert
 */
static void remNotifyHandlerInsert(PVM pVM, PREMHANDLERNOTIFICATION pRec)
{
    /*
     * Fetch a free record.
     */
    uint32_t                cFlushes = 0;
    uint32_t                idxFree;
    PREMHANDLERNOTIFICATION pFree;
    do
    {
        idxFree = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
        if (idxFree == UINT32_MAX)
        {
            do
            {
                cFlushes++;
                Assert(cFlushes != 128);
                AssertFatal(cFlushes < _1M);
                VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS, 0);
                idxFree = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
            } while (idxFree == UINT32_MAX);
        }
        pFree = &pVM->rem.s.aHandlerNotifications[idxFree];
    } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, pFree->idxNext, idxFree));

    /*
     * Copy the record.
     */
    pFree->enmKind = pRec->enmKind;
    pFree->u = pRec->u;

    /*
     * Insert it into the pending list.
     */
    uint32_t idxNext;
    do
    {
        idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxPendingList);
        ASMAtomicWriteU32(&pFree->idxNext, idxNext);
        ASMCompilerBarrier();
    } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxPendingList, idxFree, idxNext));

    VM_FF_SET(pVM, VM_FF_REM_HANDLER_NOTIFY);
}
DECLR0VBGL(int) VbglR0GRPerform(VMMDevRequestHeader *pReq)
{
    int rc = vbglR0Enter();
    if (RT_SUCCESS(rc))
    {
        if (pReq)
        {
            RTCCPHYS PhysAddr = VbglR0PhysHeapGetPhysAddr(pReq);
            if (   PhysAddr != 0
                && PhysAddr < _4G) /* Port IO is 32 bit. */
            {
                ASMOutU32(g_vbgldata.portVMMDev + VMMDEV_PORT_OFF_REQUEST, (uint32_t)PhysAddr);
                /* Make the compiler aware that the host has changed memory. */
                ASMCompilerBarrier();
                rc = pReq->rc;
            }
            else
                rc = VERR_VBGL_INVALID_ADDR;
        }
        else
            rc = VERR_INVALID_PARAMETER;
    }
    return rc;
}
/**
 * Push a buffer into the ring.
 *
 * @param pQueue            Pointer to the Virtio queue.
 * @param physBuf           Physical address of the buffer.
 * @param cbBuf             Size of the buffer in bytes.
 * @param fFlags            Buffer flags, see VIRTIO_FLAGS_RING_DESC_*.
 *
 * @return IPRT error code.
 */
int VirtioRingPush(PVIRTIOQUEUE pQueue, paddr_t physBuf, uint32_t cbBuf, uint16_t fFlags)
{
    /*
     * Claim a slot, fill the buffer and move head pointer.
     */
    uint_t FreeIndex          = pQueue->FreeHeadIndex;
    PVIRTIORING pRing         = &pQueue->Ring;

    if (FreeIndex >= pRing->cDesc - 1)
    {
        LogRel((VIRTIOLOGNAME ":VirtioRingPush: failed. No free descriptors. cDesc=%u\n", pRing->cDesc));
        return VERR_BUFFER_OVERFLOW;
    }

    PVIRTIORINGDESC pRingDesc = &pRing->pRingDesc[FreeIndex];

    AssertCompile(sizeof(physBuf) == sizeof(pRingDesc->AddrBuf));

    pQueue->cBufs++;
    uint_t AvailIndex = (pRing->pRingAvail->Index + pQueue->cBufs) % pQueue->Ring.cDesc;
    pRing->pRingAvail->aRings[AvailIndex - 1] = FreeIndex;

    pRingDesc->AddrBuf = physBuf;
    pRingDesc->cbBuf   = cbBuf;
    pRingDesc->fFlags  = fFlags;

    pQueue->FreeHeadIndex = pRingDesc->Next;

    ASMCompilerBarrier();

    cmn_err(CE_NOTE, "VirtioRingPush: cbBuf=%u FreeIndex=%u AvailIndex=%u cDesc=%u Queue->cBufs=%u\n",
            cbBuf, FreeIndex, AvailIndex, pQueue->Ring.cDesc,
            pQueue->cBufs);

    return VINF_SUCCESS;
}
static void doTest(RTTEST hTest)
{
    NOREF(hTest);
    uint32_t iAllocCpu = 0;
    while (iAllocCpu < RTCPUSET_MAX_CPUS)
    {
        const uint32_t cbTestSet   = _1M * 32;
        const uint32_t cIterations = 384;

        /*
         * Change CPU and allocate a chunk of memory.
         */
        RTTESTI_CHECK_RC_OK_RETV(RTThreadSetAffinityToCpu(RTMpCpuIdFromSetIndex(iAllocCpu)));

        void *pvTest = RTMemPageAlloc(cbTestSet); /* may be leaked, who cares */
        RTTESTI_CHECK_RETV(pvTest != NULL);
        memset(pvTest, 0xef, cbTestSet);

        /*
         * Do the tests.
         */
        uint32_t iAccessCpu = 0;
        while (iAccessCpu < RTCPUSET_MAX_CPUS)
        {
            RTTESTI_CHECK_RC_OK_RETV(RTThreadSetAffinityToCpu(RTMpCpuIdFromSetIndex(iAccessCpu)));

            /*
             * The write test.
             */
            RTTimeNanoTS(); RTThreadYield();
            uint64_t u64StartTS = RTTimeNanoTS();
            for (uint32_t i = 0; i < cIterations; i++)
            {
                ASMCompilerBarrier(); /* paranoia */
                memset(pvTest, i, cbTestSet);
            }
            uint64_t const cNsElapsedWrite = RTTimeNanoTS() - u64StartTS;
            uint64_t cMBPerSec = (uint64_t)(  ((uint64_t)cIterations * cbTestSet) /* bytes */
                                            / ((long double)cNsElapsedWrite / RT_NS_1SEC_64) /* seconds */
                                            / _1M /* MB */ );
            RTTestIValueF(cMBPerSec, RTTESTUNIT_MEGABYTES_PER_SEC, "cpu%02u-mem%02u-write", iAllocCpu, iAccessCpu);

            /*
             * The read test.
             */
            memset(pvTest, 0, cbTestSet);
            RTTimeNanoTS(); RTThreadYield();
            u64StartTS = RTTimeNanoTS();
            for (uint32_t i = 0; i < cIterations; i++)
            {
#if 1
                size_t register u = 0;
                size_t volatile *puCur = (size_t volatile *)pvTest;
                size_t volatile *puEnd = puCur + cbTestSet / sizeof(size_t);
                while (puCur != puEnd)
                    u += *puCur++;
#else
                ASMCompilerBarrier(); /* paranoia */
                void *pvFound = memchr(pvTest, (i & 127) + 1, cbTestSet);
                RTTESTI_CHECK(pvFound == NULL);
#endif
            }
            uint64_t const cNsElapsedRead = RTTimeNanoTS() - u64StartTS;
            cMBPerSec = (uint64_t)(  ((uint64_t)cIterations * cbTestSet) /* bytes */
                                   / ((long double)cNsElapsedRead / RT_NS_1SEC_64) /* seconds */
                                   / _1M /* MB */ );
            RTTestIValueF(cMBPerSec, RTTESTUNIT_MEGABYTES_PER_SEC, "cpu%02u-mem%02u-read", iAllocCpu, iAccessCpu);

            /*
             * The read/write test.
             */
            RTTimeNanoTS(); RTThreadYield();
            u64StartTS = RTTimeNanoTS();
            for (uint32_t i = 0; i < cIterations; i++)
            {
                ASMCompilerBarrier(); /* paranoia */
                memcpy(pvTest, (uint8_t *)pvTest + cbTestSet / 2, cbTestSet / 2);
            }
            uint64_t const cNsElapsedRW = RTTimeNanoTS() - u64StartTS;
            cMBPerSec = (uint64_t)(  ((uint64_t)cIterations * cbTestSet) /* bytes */
                                   / ((long double)cNsElapsedRW / RT_NS_1SEC_64) /* seconds */
                                   / _1M /* MB */ );
            RTTestIValueF(cMBPerSec, RTTESTUNIT_MEGABYTES_PER_SEC, "cpu%02u-mem%02u-read-write", iAllocCpu, iAccessCpu);

            /*
             * Total time.
             */
            RTTestIValueF(cNsElapsedRead + cNsElapsedWrite + cNsElapsedRW, RTTESTUNIT_NS,
                          "cpu%02u-mem%02u-time", iAllocCpu, iAccessCpu);

            /* advance */
            iAccessCpu = getNextCpu(iAccessCpu);
        }

        /*
         * Clean up and advance to the next CPU.
         */
        RTMemPageFree(pvTest, cbTestSet);
        iAllocCpu = getNextCpu(iAllocCpu);
    }
}
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
    /*
     * Don't try mess with an offline CPU.
     */
    if (!RTMpIsCpuOnline(idCpu))
        return !RTMpIsCpuPossible(idCpu)
              ? VERR_CPU_NOT_FOUND
              : VERR_CPU_OFFLINE;

    /*
     * Use the broadcast IPI routine if there are no more than two CPUs online,
     * or if the current IRQL is unsuitable for KeWaitForSingleObject.
     */
    int rc;
    uint32_t cHits = 0;
    if (   g_pfnrtKeIpiGenericCall
        && (   RTMpGetOnlineCount() <= 2
            || KeGetCurrentIrql()   > APC_LEVEL)
       )
    {
        rc = rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnSpecificBroadcastIpiWrapper,
                                       idCpu, NIL_RTCPUID, &cHits);
        if (RT_SUCCESS(rc))
        {
            if (cHits == 1)
                return VINF_SUCCESS;
            rc = cHits == 0 ? VERR_CPU_OFFLINE : VERR_CPU_IPE_1;
        }
        return rc;
    }

#if 0
    rc = rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_SPECIFIC, idCpu, NIL_RTCPUID, &cHits);
    if (RT_SUCCESS(rc))
    {
        if (cHits == 1)
            return VINF_SUCCESS;
        rc = cHits == 0 ? VERR_CPU_OFFLINE : VERR_CPU_IPE_1;
    }
    return rc;

#else
    /*
     * Initialize the argument package and the objects within it.
     * The package is referenced counted to avoid unnecessary spinning to
     * synchronize cleanup and prevent stack corruption.
     */
    PRTMPNTONSPECIFICARGS pArgs = (PRTMPNTONSPECIFICARGS)ExAllocatePoolWithTag(NonPagedPool, sizeof(*pArgs), (ULONG)'RTMp');
    if (!pArgs)
        return VERR_NO_MEMORY;
    pArgs->cRefs                  = 2;
    pArgs->fExecuting             = false;
    pArgs->fDone                  = false;
    pArgs->CallbackArgs.pfnWorker = pfnWorker;
    pArgs->CallbackArgs.pvUser1   = pvUser1;
    pArgs->CallbackArgs.pvUser2   = pvUser2;
    pArgs->CallbackArgs.idCpu     = idCpu;
    pArgs->CallbackArgs.cHits     = 0;
    pArgs->CallbackArgs.cRefs     = 2;
    KeInitializeEvent(&pArgs->DoneEvt, SynchronizationEvent, FALSE /* not signalled */);
    KeInitializeDpc(&pArgs->Dpc, rtMpNtOnSpecificDpcWrapper, pArgs);
    KeSetImportanceDpc(&pArgs->Dpc, HighImportance);
    KeSetTargetProcessorDpc(&pArgs->Dpc, (int)idCpu);

    /*
     * Disable preemption while we check the current processor and inserts the DPC.
     */
    KIRQL bOldIrql;
    KeRaiseIrql(DISPATCH_LEVEL, &bOldIrql);
    ASMCompilerBarrier(); /* paranoia */

    if (RTMpCpuId() == idCpu)
    {
        /* Just execute the callback on the current CPU. */
        pfnWorker(idCpu, pvUser1, pvUser2);
        KeLowerIrql(bOldIrql);

        ExFreePool(pArgs);
        return VINF_SUCCESS;
    }

    /* Different CPU, so queue it if the CPU is still online. */
    if (RTMpIsCpuOnline(idCpu))
    {
        BOOLEAN fRc = KeInsertQueueDpc(&pArgs->Dpc, 0, 0);
        Assert(fRc);
        KeLowerIrql(bOldIrql);

        uint64_t const nsRealWaitTS = RTTimeNanoTS();

        /*
         * Wait actively for a while in case the CPU/thread responds quickly.
         */
        uint32_t cLoopsLeft = 0x20000;
        while (cLoopsLeft-- > 0)
        {
            if (pArgs->fDone)
            {
                rtMpNtOnSpecificRelease(pArgs);
                return VINF_SUCCESS;
            }
            ASMNopPause();
        }

        /*
         * It didn't respond, so wait on the event object, poking the CPU if it's slow.
         */
        LARGE_INTEGER Timeout;
        Timeout.QuadPart = -10000; /* 1ms */
        NTSTATUS rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
        if (rcNt == STATUS_SUCCESS)
        {
            rtMpNtOnSpecificRelease(pArgs);
            return VINF_SUCCESS;
        }

        /* If it hasn't respondend yet, maybe poke it and wait some more. */
        if (rcNt == STATUS_TIMEOUT)
        {
#ifndef IPRT_TARGET_NT4
            if (   !pArgs->fExecuting
                && (   g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalSendSoftwareInterrupt
                    || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiW7Plus
                    || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiPreW7))
                RTMpPokeCpu(idCpu);
#endif

            Timeout.QuadPart = -1280000; /* 128ms */
            rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
            if (rcNt == STATUS_SUCCESS)
            {
                rtMpNtOnSpecificRelease(pArgs);
                return VINF_SUCCESS;
            }
        }

        /*
         * Something weird is happening, try bail out.
         */
        if (KeRemoveQueueDpc(&pArgs->Dpc))
        {
            ExFreePool(pArgs); /* DPC was still queued, so we can return without further ado. */
            LogRel(("RTMpOnSpecific(%#x): Not processed after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
        }
        else
        {
            /* DPC is running, wait a good while for it to complete. */
            LogRel(("RTMpOnSpecific(%#x): Still running after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));

            Timeout.QuadPart = -30*1000*1000*10; /* 30 seconds */
            rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
            if (rcNt != STATUS_SUCCESS)
                LogRel(("RTMpOnSpecific(%#x): Giving up on running worker after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
        }
        rc = RTErrConvertFromNtStatus(rcNt);
    }
    else
    {
        /* CPU is offline.*/
        KeLowerIrql(bOldIrql);
        rc = !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE;
    }

    rtMpNtOnSpecificRelease(pArgs);
    return rc;
#endif
}
/**
 * Internal worker for the RTMpOn* APIs.
 *
 * @returns IPRT status code.
 * @param   pfnWorker   The callback.
 * @param   pvUser1     User argument 1.
 * @param   pvUser2     User argument 2.
 * @param   enmCpuid    What to do / is idCpu valid.
 * @param   idCpu       Used if enmCpuid is RT_NT_CPUID_SPECIFIC or
 *                      RT_NT_CPUID_PAIR, otherwise ignored.
 * @param   idCpu2      Used if enmCpuid is RT_NT_CPUID_PAIR, otherwise ignored.
 * @param   pcHits      Where to return the number of this. Optional.
 */
static int rtMpCallUsingDpcs(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2,
                             RT_NT_CPUID enmCpuid, RTCPUID idCpu, RTCPUID idCpu2, uint32_t *pcHits)
{
    PRTMPARGS pArgs;
    KDPC     *paExecCpuDpcs;

#if 0
    /* KeFlushQueuedDpcs must be run at IRQL PASSIVE_LEVEL according to MSDN, but the
     * driver verifier doesn't complain...
     */
    AssertMsg(KeGetCurrentIrql() == PASSIVE_LEVEL, ("%d != %d (PASSIVE_LEVEL)\n", KeGetCurrentIrql(), PASSIVE_LEVEL));
#endif

#ifdef IPRT_TARGET_NT4
    KAFFINITY Mask;
    /* g_pfnrtNt* are not present on NT anyway. */
    return VERR_NOT_SUPPORTED;
#else
    KAFFINITY Mask = KeQueryActiveProcessors();
#endif

    /* KeFlushQueuedDpcs is not present in Windows 2000; import it dynamically so we can just fail this call. */
    if (!g_pfnrtNtKeFlushQueuedDpcs)
        return VERR_NOT_SUPPORTED;

    pArgs = (PRTMPARGS)ExAllocatePoolWithTag(NonPagedPool, MAXIMUM_PROCESSORS*sizeof(KDPC) + sizeof(RTMPARGS), (ULONG)'RTMp');
    if (!pArgs)
        return VERR_NO_MEMORY;

    pArgs->pfnWorker = pfnWorker;
    pArgs->pvUser1   = pvUser1;
    pArgs->pvUser2   = pvUser2;
    pArgs->idCpu     = NIL_RTCPUID;
    pArgs->idCpu2    = NIL_RTCPUID;
    pArgs->cHits     = 0;
    pArgs->cRefs     = 1;

    paExecCpuDpcs = (KDPC *)(pArgs + 1);

    if (enmCpuid == RT_NT_CPUID_SPECIFIC)
    {
        KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
        KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
        KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu);
        pArgs->idCpu = idCpu;
    }
    else if (enmCpuid == RT_NT_CPUID_SPECIFIC)
    {
        KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
        KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
        KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu);
        pArgs->idCpu = idCpu;

        KeInitializeDpc(&paExecCpuDpcs[1], rtmpNtDPCWrapper, pArgs);
        KeSetImportanceDpc(&paExecCpuDpcs[1], HighImportance);
        KeSetTargetProcessorDpc(&paExecCpuDpcs[1], (int)idCpu2);
        pArgs->idCpu2 = idCpu2;
    }
    else
    {
        for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++)
        {
            KeInitializeDpc(&paExecCpuDpcs[i], rtmpNtDPCWrapper, pArgs);
            KeSetImportanceDpc(&paExecCpuDpcs[i], HighImportance);
            KeSetTargetProcessorDpc(&paExecCpuDpcs[i], i);
        }
    }

    /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
     * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
     */
    KIRQL oldIrql;
    KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);

    /*
     * We cannot do other than assume a 1:1 relationship between the
     * affinity mask and the process despite the warnings in the docs.
     * If someone knows a better way to get this done, please let bird know.
     */
    ASMCompilerBarrier(); /* paranoia */
    if (enmCpuid == RT_NT_CPUID_SPECIFIC)
    {
        ASMAtomicIncS32(&pArgs->cRefs);
        BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
        Assert(ret);
    }
    else if (enmCpuid == RT_NT_CPUID_PAIR)
    {
        ASMAtomicIncS32(&pArgs->cRefs);
        BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
        Assert(ret);

        ASMAtomicIncS32(&pArgs->cRefs);
        ret = KeInsertQueueDpc(&paExecCpuDpcs[1], 0, 0);
        Assert(ret);
    }
    else
    {
        unsigned iSelf = KeGetCurrentProcessorNumber();

        for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++)
        {
            if (    (i != iSelf)
                &&  (Mask & RT_BIT_64(i)))
            {
                ASMAtomicIncS32(&pArgs->cRefs);
                BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[i], 0, 0);
                Assert(ret);
            }
        }
        if (enmCpuid != RT_NT_CPUID_OTHERS)
            pfnWorker(iSelf, pvUser1, pvUser2);
    }

    KeLowerIrql(oldIrql);

    /* Flush all DPCs and wait for completion. (can take long!) */
    /** @todo Consider changing this to an active wait using some atomic inc/dec
     *  stuff (and check for the current cpu above in the specific case). */
    /** @todo Seems KeFlushQueuedDpcs doesn't wait for the DPCs to be completely
     *        executed. Seen pArgs being freed while some CPU was using it before
     *        cRefs was added. */
    g_pfnrtNtKeFlushQueuedDpcs();

    if (pcHits)
        *pcHits = pArgs->cHits;

    /* Dereference the argument structure. */
    int32_t cRefs = ASMAtomicDecS32(&pArgs->cRefs);
    Assert(cRefs >= 0);
    if (cRefs == 0)
        ExFreePool(pArgs);

    return VINF_SUCCESS;
}
Esempio n. 12
0
/**
 * Leaves a critical section entered with PDMCritSectEnter().
 *
 * @param   pCritSect           The PDM critical section to leave.
 */
VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
{
    AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
    Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);

    /* Check for NOP sections before asserting ownership. */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return;

    /*
     * Always check that the caller is the owner (screw performance).
     */
    RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    AssertReleaseMsgReturnVoid(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
                               ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
                                pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
                                pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings));
    Assert(pCritSect->s.Core.cNestings >= 1);

    /*
     * Nested leave.
     */
    if (pCritSect->s.Core.cNestings > 1)
    {
        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings >= 1);
        ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
        Assert(pCritSect->s.Core.cLockers >= 0);
        return;
    }

#ifdef IN_RING0
# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
    if (1) /* SUPSemEventSignal is safe */
# else
    if (ASMIntAreEnabled())
# endif
#endif
#if defined(IN_RING3) || defined(IN_RING0)
    {
        /*
         * Leave for real.
         */
        /* update members. */
# ifdef IN_RING3
        RTSEMEVENT hEventToSignal    = pCritSect->s.EventToSignal;
        pCritSect->s.EventToSignal   = NIL_RTSEMEVENT;
#  if defined(PDMCRITSECT_STRICT)
        if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
            RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
#  endif
        Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
# endif
        ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings == 0);

        /* stop and decrement lockers. */
        STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
        ASMCompilerBarrier();
        if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
        {
            /* Someone is waiting, wake up one of them. */
            SUPSEMEVENT     hEvent   = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
            PSUPDRVSESSION  pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
            int rc = SUPSemEventSignal(pSession, hEvent);
            AssertRC(rc);
        }

# ifdef IN_RING3
        /* Signal exit event. */
        if (hEventToSignal != NIL_RTSEMEVENT)
        {
            LogBird(("Signalling %#x\n", hEventToSignal));
            int rc = RTSemEventSignal(hEventToSignal);
            AssertRC(rc);
        }
# endif

# if defined(DEBUG_bird) && defined(IN_RING0)
        VMMTrashVolatileXMMRegs();
# endif
    }
#endif  /* IN_RING3 || IN_RING0 */
#ifdef IN_RING0
    else
#endif
#if defined(IN_RING0) || defined(IN_RC)
    {
        /*
         * Try leave it.
         */
        if (pCritSect->s.Core.cLockers == 0)
        {
            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
            RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
            ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
            STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);

            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
            if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
                return;

            /* darn, someone raced in on us. */
            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
            STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
            Assert(pCritSect->s.Core.cNestings == 0);
            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
        }
        ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);

        /*
         * Queue the request.
         */
        PVM         pVM   = pCritSect->s.CTX_SUFF(pVM);     AssertPtr(pVM);
        PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
        uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectLeaves++;
        LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
        AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
        pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
        STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
        STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
    }
#endif /* IN_RING0 || IN_RC */
}
/**
 * @interface_method_impl{TXSTRANSPORT,pfnRecvPkt}
 */
static DECLCALLBACK(int) txsTcpRecvPkt(PPTXSPKTHDR ppPktHdr)
{
    int rc = VINF_SUCCESS;
    *ppPktHdr = NULL;

    /*
     * Do we have to wait for a client to connect?
     */
    RTSOCKET hTcpClient = g_hTcpClient;
    if (hTcpClient == NIL_RTSOCKET)
    {
        rc = txsTcpConnect();
        if (RT_FAILURE(rc))
            return rc;
        hTcpClient = g_hTcpClient; Assert(hTcpClient != NIL_RTSOCKET);
    }

    /*
     * Read state.
     */
    size_t      offData       = 0;
    size_t      cbData        = 0;
    size_t      cbDataAlloced;
    uint8_t    *pbData        = NULL;

    /*
     * Any stashed data?
     */
    if (g_cbTcpStashedAlloced)
    {
        offData               = g_cbTcpStashed;
        cbDataAlloced         = g_cbTcpStashedAlloced;
        pbData                = g_pbTcpStashed;

        g_cbTcpStashed        = 0;
        g_cbTcpStashedAlloced = 0;
        g_pbTcpStashed        = NULL;
    }
    else
    {
        cbDataAlloced = RT_ALIGN_Z(64,  TXSPKT_ALIGNMENT);
        pbData = (uint8_t *)RTMemAlloc(cbDataAlloced);
        if (!pbData)
            return VERR_NO_MEMORY;
    }

    /*
     * Read and valid the length.
     */
    while (offData < sizeof(uint32_t))
    {
        size_t cbRead;
        rc = RTTcpRead(hTcpClient, pbData + offData, sizeof(uint32_t) - offData, &cbRead);
        if (RT_FAILURE(rc))
            break;
        if (cbRead == 0)
        {
            Log(("txsTcpRecvPkt: RTTcpRead -> %Rrc / cbRead=0 -> VERR_NET_NOT_CONNECTED (#1)\n", rc));
            rc = VERR_NET_NOT_CONNECTED;
            break;
        }
        offData += cbRead;
    }
    if (RT_SUCCESS(rc))
    {
        ASMCompilerBarrier(); /* paranoia^3 */
        cbData = *(uint32_t volatile *)pbData;
        if (cbData >= sizeof(TXSPKTHDR) && cbData <= TXSPKT_MAX_SIZE)
        {
            /*
             * Align the length and reallocate the return packet it necessary.
             */
            cbData = RT_ALIGN_Z(cbData, TXSPKT_ALIGNMENT);
            if (cbData > cbDataAlloced)
            {
                void *pvNew = RTMemRealloc(pbData, cbData);
                if (pvNew)
                {
                    pbData = (uint8_t *)pvNew;
                    cbDataAlloced = cbData;
                }
                else
                    rc = VERR_NO_MEMORY;
            }
            if (RT_SUCCESS(rc))
            {
                /*
                 * Read the remainder of the data.
                 */
                while (offData < cbData)
                {
                    size_t cbRead;
                    rc = RTTcpRead(hTcpClient, pbData + offData, cbData - offData, &cbRead);
                    if (RT_FAILURE(rc))
                        break;
                    if (cbRead == 0)
                    {
                        Log(("txsTcpRecvPkt: RTTcpRead -> %Rrc / cbRead=0 -> VERR_NET_NOT_CONNECTED (#2)\n", rc));
                        rc = VERR_NET_NOT_CONNECTED;
                        break;
                    }
                    offData += cbRead;
                }
            }
        }
        else
            rc = VERR_NET_PROTOCOL_ERROR;
    }
    if (RT_SUCCESS(rc))
        *ppPktHdr = (PTXSPKTHDR)pbData;
    else
    {
        /*
         * Deal with errors.
         */
        if (rc == VERR_INTERRUPTED)
        {
            /* stash it away for the next call. */
            g_cbTcpStashed        = cbData;
            g_cbTcpStashedAlloced = cbDataAlloced;
            g_pbTcpStashed        = pbData;
        }
        else
        {
            RTMemFree(pbData);

            /* assume fatal connection error. */
            Log(("txsTcpRecvPkt: RTTcpRead -> %Rrc -> txsTcpDisconnectClient(%RTsock)\n", rc, g_hTcpClient));
            txsTcpDisconnectClient();
        }
    }

    return rc;
}
RTDECL(int) RTSemSpinMutexRequest(RTSEMSPINMUTEX hSpinMtx)
{
    RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx;
    RTNATIVETHREAD          hSelf = RTThreadNativeSelf();
    RTSEMSPINMUTEXSTATE     State;
    bool                    fRc;
    int                     rc;

    Assert(hSelf != NIL_RTNATIVETHREAD);
    RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);

    /*
     * Check context, disable preemption and save flags if necessary.
     */
    rc = rtSemSpinMutexEnter(&State, pThis);
    if (RT_FAILURE(rc))
        return rc;

    /*
     * Try take the ownership.
     */
    ASMAtomicIncS32(&pThis->cLockers);
    ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
    if (!fRc)
    {
        uint32_t cSpins;

        /*
         * It's busy. Check if it's an attempt at nested access.
         */
        if (RT_UNLIKELY(pThis->hOwner == hSelf))
        {
            AssertMsgFailed(("%p attempt at nested access\n"));
            rtSemSpinMutexLeave(&State);
            return VERR_SEM_NESTED;
        }

        /*
         * Return if we're in interrupt context and the semaphore isn't
         * configure to be interrupt safe.
         */
        if (rc == VINF_SEM_BAD_CONTEXT)
        {
            rtSemSpinMutexLeave(&State);
            return VERR_SEM_BAD_CONTEXT;
        }

        /*
         * Ok, we have to wait.
         */
        if (State.fSpin)
        {
            for (cSpins = 0; ; cSpins++)
            {
                ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
                if (fRc)
                    break;
                ASMNopPause();
                if (RT_UNLIKELY(pThis->u32Magic != RTSEMSPINMUTEX_MAGIC))
                {
                    rtSemSpinMutexLeave(&State);
                    return VERR_SEM_DESTROYED;
                }

                /*
                 * "Yield" once in a while. This may lower our IRQL/PIL which
                 * may preempting us, and it will certainly stop the hammering
                 * of hOwner for a little while.
                 */
                if ((cSpins & 0x7f) == 0x1f)
                {
                    rtSemSpinMutexLeave(&State);
                    rtSemSpinMutexEnter(&State, pThis);
                    Assert(State.fSpin);
                }
            }
        }
        else
        {
            for (cSpins = 0;; cSpins++)
            {
                ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
                if (fRc)
                    break;
                ASMNopPause();
                if (RT_UNLIKELY(pThis->u32Magic != RTSEMSPINMUTEX_MAGIC))
                {
                    rtSemSpinMutexLeave(&State);
                    return VERR_SEM_DESTROYED;
                }

                if ((cSpins & 15) == 15) /* spin a bit before going sleep (again). */
                {
                    rtSemSpinMutexLeave(&State);

                    rc = RTSemEventWait(pThis->hEventSem, RT_INDEFINITE_WAIT);
                    ASMCompilerBarrier();
                    if (RT_SUCCESS(rc))
                        AssertReturn(pThis->u32Magic == RTSEMSPINMUTEX_MAGIC, VERR_SEM_DESTROYED);
                    else if (rc == VERR_INTERRUPTED)
                        AssertRC(rc);       /* shouldn't happen */
                    else
                    {
                        AssertRC(rc);
                        return rc;
                    }

                    rc = rtSemSpinMutexEnter(&State, pThis);
                    AssertRCReturn(rc, rc);
                    Assert(!State.fSpin);
                }
            }
        }
    }

    /*
     * We're the semaphore owner.
     */
    pThis->SavedState = State;
    Assert(pThis->hOwner == hSelf);
    return VINF_SUCCESS;
}
/**
 * Check the credentials and return the gid/uid of user.
 *
 * @param    pszUser     username
 * @param    pszPasswd   password
 * @param    gid         where to store the GID of the user
 * @param    uid         where to store the UID of the user
 * @returns IPRT status code
 */
static int rtCheckCredentials(const char *pszUser, const char *pszPasswd, gid_t *pGid, uid_t *pUid)
{
#if defined(RT_OS_DARWIN)
    RTLogPrintf("rtCheckCredentials\n");

    /*
     * Resolve user to UID and GID.
     */
    char            szBuf[_4K];
    struct passwd   Pw;
    struct passwd  *pPw;
    if (getpwnam_r(pszUser, &Pw, szBuf, sizeof(szBuf), &pPw) != 0)
        return VERR_AUTHENTICATION_FAILURE;
    if (!pPw)
        return VERR_AUTHENTICATION_FAILURE;

    *pUid = pPw->pw_uid;
    *pGid = pPw->pw_gid;

    /*
     * Use PAM for the authentication.
     * Note! libpam.2.dylib was introduced with 10.6.x (OpenPAM).
     */
    void *hModPam = dlopen("libpam.dylib", RTLD_LAZY | RTLD_GLOBAL);
    if (hModPam)
    {
        int (*pfnPamStart)(const char *, const char *, struct pam_conv *, pam_handle_t **);
        int (*pfnPamAuthenticate)(pam_handle_t *, int);
        int (*pfnPamAcctMgmt)(pam_handle_t *, int);
        int (*pfnPamSetItem)(pam_handle_t *, int, const void *);
        int (*pfnPamEnd)(pam_handle_t *, int);
        *(void **)&pfnPamStart        = dlsym(hModPam, "pam_start");
        *(void **)&pfnPamAuthenticate = dlsym(hModPam, "pam_authenticate");
        *(void **)&pfnPamAcctMgmt     = dlsym(hModPam, "pam_acct_mgmt");
        *(void **)&pfnPamSetItem      = dlsym(hModPam, "pam_set_item");
        *(void **)&pfnPamEnd          = dlsym(hModPam, "pam_end");
        ASMCompilerBarrier();
        if (   pfnPamStart
            && pfnPamAuthenticate
            && pfnPamAcctMgmt
            && pfnPamSetItem
            && pfnPamEnd)
        {
#define pam_start           pfnPamStart
#define pam_authenticate    pfnPamAuthenticate
#define pam_acct_mgmt       pfnPamAcctMgmt
#define pam_set_item        pfnPamSetItem
#define pam_end             pfnPamEnd

            /* Do the PAM stuff.
               Note! Abusing 'login' here for now... */
            pam_handle_t   *hPam        = NULL;
            RTPROCPAMARGS   PamConvArgs = { pszUser, pszPasswd };
            struct pam_conv PamConversation;
            RT_ZERO(PamConversation);
            PamConversation.appdata_ptr = &PamConvArgs;
            PamConversation.conv        = rtPamConv;
            int rc = pam_start("login", pszUser, &PamConversation, &hPam);
            if (rc == PAM_SUCCESS)
            {
                rc = pam_set_item(hPam, PAM_RUSER, pszUser);
                if (rc == PAM_SUCCESS)
                    rc = pam_authenticate(hPam, 0);
                if (rc == PAM_SUCCESS)
                {
                    rc = pam_acct_mgmt(hPam, 0);
                    if (   rc == PAM_SUCCESS
                        || rc == PAM_AUTHINFO_UNAVAIL /*??*/)
                    {
                        pam_end(hPam, PAM_SUCCESS);
                        dlclose(hModPam);
                        return VINF_SUCCESS;
                    }
                    Log(("rtCheckCredentials: pam_acct_mgmt -> %d\n", rc));
                }
                else
                    Log(("rtCheckCredentials: pam_authenticate -> %d\n", rc));
                pam_end(hPam, rc);
            }
            else
                Log(("rtCheckCredentials: pam_start -> %d\n", rc));
        }
        else
            Log(("rtCheckCredentials: failed to resolve symbols: %p %p %p %p %p\n",
                 pfnPamStart, pfnPamAuthenticate, pfnPamAcctMgmt, pfnPamSetItem, pfnPamEnd));
        dlclose(hModPam);
    }
    else
        Log(("rtCheckCredentials: Loading libpam.dylib failed\n"));
    return VERR_AUTHENTICATION_FAILURE;

#elif defined(RT_OS_LINUX)
    struct passwd *pw;

    pw = getpwnam(pszUser);
    if (!pw)
        return VERR_AUTHENTICATION_FAILURE;

    if (!pszPasswd)
        pszPasswd = "";

    struct spwd *spwd;
    /* works only if /etc/shadow is accessible */
    spwd = getspnam(pszUser);
    if (spwd)
        pw->pw_passwd = spwd->sp_pwdp;

    /* Default fCorrect=true if no password specified. In that case, pw->pw_passwd
     * must be NULL (no password set for this user). Fail if a password is specified
     * but the user does not have one assigned. */
    int fCorrect = !pszPasswd || !*pszPasswd;
    if (pw->pw_passwd && *pw->pw_passwd)
    {
        struct crypt_data *data = (struct crypt_data*)RTMemTmpAllocZ(sizeof(*data));
        /* be reentrant */
        char *pszEncPasswd = crypt_r(pszPasswd, pw->pw_passwd, data);
        fCorrect = pszEncPasswd && !strcmp(pszEncPasswd, pw->pw_passwd);
        RTMemTmpFree(data);
    }
    if (!fCorrect)
        return VERR_AUTHENTICATION_FAILURE;

    *pGid = pw->pw_gid;
    *pUid = pw->pw_uid;
    return VINF_SUCCESS;

#elif defined(RT_OS_SOLARIS)
    struct passwd *ppw, pw;
    char szBuf[1024];

    if (getpwnam_r(pszUser, &pw, szBuf, sizeof(szBuf), &ppw) != 0 || ppw == NULL)
        return VERR_AUTHENTICATION_FAILURE;

    if (!pszPasswd)
        pszPasswd = "";

    struct spwd spwd;
    char szPwdBuf[1024];
    /* works only if /etc/shadow is accessible */
    if (getspnam_r(pszUser, &spwd, szPwdBuf, sizeof(szPwdBuf)) != NULL)
        ppw->pw_passwd = spwd.sp_pwdp;

    char *pszEncPasswd = crypt(pszPasswd, ppw->pw_passwd);
    if (strcmp(pszEncPasswd, ppw->pw_passwd))
        return VERR_AUTHENTICATION_FAILURE;

    *pGid = ppw->pw_gid;
    *pUid = ppw->pw_uid;
    return VINF_SUCCESS;

#else
    NOREF(pszUser); NOREF(pszPasswd); NOREF(pGid); NOREF(pUid);
    return VERR_AUTHENTICATION_FAILURE;
#endif
}