Exemple #1
0
VOID
NTAPI
InbvAcquireLock(VOID)
{
    KIRQL OldIrql;

    /* Check if we're at dispatch level or lower */
    OldIrql = KeGetCurrentIrql();
    if (OldIrql <= DISPATCH_LEVEL)
    {
        /* Loop until the lock is free */
        while (!KeTestSpinLock(&BootDriverLock));

        /* Raise IRQL to dispatch level */
        KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
    }

    /* Acquire the lock */
    KiAcquireSpinLock(&BootDriverLock);
    InbvOldIrql = OldIrql;
}
Exemple #2
0
/*
 * @implemented
 */
NTSTATUS
NTAPI
KdEnableDebugger(VOID)
{
    KIRQL OldIrql;

    /* Raise IRQL */
    KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);

    /* TODO: Re-enable any breakpoints */

    /* Enable the Debugger */
    KdDebuggerEnabled = TRUE;
    SharedUserData->KdDebuggerEnabled = TRUE;

    /* Lower the IRQL */
    KeLowerIrql(OldIrql);

    /* Return success */
    return STATUS_SUCCESS;
}
Exemple #3
0
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
{
    if (!RTMpIsCpuOnline(idCpu))
        return !RTMpIsCpuPossible(idCpu)
              ? VERR_CPU_NOT_FOUND
              : VERR_CPU_OFFLINE;

    int rc = g_pfnrtSendIpi(idCpu);
    if (rc == VINF_SUCCESS)
        return rc;

    /* Fallback. */
    if (!fPokeDPCsInitialized)
    {
        for (unsigned i = 0; i < RT_ELEMENTS(aPokeDpcs); i++)
        {
            KeInitializeDpc(&aPokeDpcs[i], rtMpNtPokeCpuDummy, NULL);
            KeSetImportanceDpc(&aPokeDpcs[i], HighImportance);
            KeSetTargetProcessorDpc(&aPokeDpcs[i], (int)i);
        }
        fPokeDPCsInitialized = true;
    }

    /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
     * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
     */
    KIRQL oldIrql;
    KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);

    KeSetImportanceDpc(&aPokeDpcs[idCpu], HighImportance);
    KeSetTargetProcessorDpc(&aPokeDpcs[idCpu], (int)idCpu);

    /* Assuming here that high importance DPCs will be delivered immediately; or at least an IPI will be sent immediately.
     * @note: not true on at least Vista & Windows 7
     */
    BOOLEAN bRet = KeInsertQueueDpc(&aPokeDpcs[idCpu], 0, 0);

    KeLowerIrql(oldIrql);
    return (bRet == TRUE) ? VINF_SUCCESS : VERR_ACCESS_DENIED /* already queued */;
}
Exemple #4
0
VOID CreateTrampoline()
{
	PSHARED_DISP_DATA disp = GetSharedData();
	if (disp->Signature != SHARED_SIGNATURE)
	{
		KdPrint (("ngvid:" __FUNCTION__ ": Damaged shared block %X signature %X should be %X\n",
			disp, disp->Signature, SHARED_SIGNATURE));

		return;
	}

	if (disp->Trampoline)
	{
		KdPrint(("Trampoline already exists at %X\n", disp->Trampoline));

		TrampolineIsr = (PTRAMPOLINE) disp->Trampoline;
	}
	else
	{
		TrampolineIsr = (PTRAMPOLINE) ExAllocatePool (NonPagedPool, sizeof(TRAMPOLINE));

		KdPrint(("Trampoline allocated at %X\n", TrampolineIsr));
	}

	KIRQL Irql;
	KeRaiseIrql (HIGH_LEVEL, &Irql);
	TrampolineIsr->e1.PushOpcode = 0x68;
	TrampolineIsr->e1.Address = IsrHookRoutine;
	TrampolineIsr->e1.RetOpcode = 0xC3;
	KeLowerIrql (Irql);

	KdPrint(("Trampoline created\n", TrampolineIsr));
	
	if (disp->Trampoline == NULL)
	{
		I8042HookKeyboard  ((PI8042_KEYBOARD_ISR) TrampolineIsr);
		disp->Trampoline = TrampolineIsr;
	}
}
void _irqlevel_changed_(_irqL *irqlevel, u8 bLower)
{

#ifdef PLATFORM_OS_XP

	if (bLower == LOWER) {
		*irqlevel = KeGetCurrentIrql();

		if (*irqlevel > PASSIVE_LEVEL) {
				KeLowerIrql(PASSIVE_LEVEL);
			//DEBUG_ERR(("\n <=== KeLowerIrql.\n"));
		}
	} else {
		if (KeGetCurrentIrql() == PASSIVE_LEVEL) {
			KeRaiseIrql(DISPATCH_LEVEL, irqlevel);
			//DEBUG_ERR(("\n <=== KeRaiseIrql.\n"));
		}
	}

#endif

}
/**
 * Device I/O Control entry point.
 *
 * @param   pDevObj     Device object.
 * @param   pIrp        Request packet.
 */
NTSTATUS _stdcall VBoxDrvNtDeviceControl(PDEVICE_OBJECT pDevObj, PIRP pIrp)
{
    PSUPDRVDEVEXT       pDevExt  = SUPDRVNT_GET_DEVEXT(pDevObj);
    PIO_STACK_LOCATION  pStack   = IoGetCurrentIrpStackLocation(pIrp);
    PSUPDRVSESSION      pSession = (PSUPDRVSESSION)pStack->FileObject->FsContext;

    /*
     * Deal with the two high-speed IOCtl that takes it's arguments from
     * the session and iCmd, and only returns a VBox status code.
     *
     * Note: The previous method of returning the rc prior to IOC version
     *       7.4 has been abandond, we're no longer compatible with that
     *       interface.
     */
    ULONG ulCmd = pStack->Parameters.DeviceIoControl.IoControlCode;
    if (   (   ulCmd == SUP_IOCTL_FAST_DO_RAW_RUN
            || ulCmd == SUP_IOCTL_FAST_DO_HM_RUN
            || ulCmd == SUP_IOCTL_FAST_DO_NOP)
        && pSession->fUnrestricted == true)
    {
        int rc = supdrvIOCtlFast(ulCmd, (unsigned)(uintptr_t)pIrp->UserBuffer /* VMCPU id */, pDevExt, pSession);

#if 0   /* When preemption was not used i.e. !VBOX_WITH_VMMR0_DISABLE_PREEMPTION. That's no longer required. */
        /* Raise the IRQL to DISPATCH_LEVEL to prevent Windows from rescheduling us to another CPU/core. */
        Assert(KeGetCurrentIrql() <= DISPATCH_LEVEL);
        KIRQL oldIrql;
        KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);
        int rc = supdrvIOCtlFast(ulCmd, (unsigned)(uintptr_t)pIrp->UserBuffer /* VMCPU id */, pDevExt, pSession);
        KeLowerIrql(oldIrql);
#endif

        /* Complete the I/O request. */
        NTSTATUS rcNt = pIrp->IoStatus.Status = RT_SUCCESS(rc) ? STATUS_SUCCESS : STATUS_INVALID_PARAMETER;
        IoCompleteRequest(pIrp, IO_NO_INCREMENT);
        return rcNt;
    }

    return VBoxDrvNtDeviceControlSlow(pDevExt, pSession, pIrp, pStack);
}
Exemple #7
0
/*
 * @implemented
 */
BOOLEAN
NTAPI
KeDeregisterBugCheckCallback(IN PKBUGCHECK_CALLBACK_RECORD CallbackRecord)
{
    KIRQL OldIrql;
    BOOLEAN Status = FALSE;

    /* Raise IRQL to High */
    KeRaiseIrql(HIGH_LEVEL, &OldIrql);

    /* Check the Current State */
    if (CallbackRecord->State == BufferInserted)
    {
        /* Reset state and remove from list */
        CallbackRecord->State = BufferEmpty;
        RemoveEntryList(&CallbackRecord->Entry);
        Status = TRUE;
    }

    /* Lower IRQL and return */
    KeLowerIrql(OldIrql);
    return Status;
}
Exemple #8
0
/* Stops and cleans any tracing if needed */
void stopTracing()
{
    KIRQL old_irql = 0;

    PAGED_CODE();
    
    /* Raise the IRQL otherwise new thread could be created while cleaning */
    old_irql = KeGetCurrentIrql();
    if (old_irql < APC_LEVEL) {
        KeRaiseIrql (APC_LEVEL, &old_irql);
    }

    KdPrint( ("Oregano: stopTracing: Got a stop trace command\r\n") );
    if (TRUE == is_new_thread_handler_installed) {
        PsRemoveCreateThreadNotifyRoutine(newThreadHandler);
        is_new_thread_handler_installed = FALSE;
    } else {
        KdPrint(( "Oregano: stopTracing: Not new thread notifier\r\n" ));
    }
    if (0 != targetProcessId) {
        unsetTrapFlagForAllThreads(targetProcessId);
        targetProcessId = 0;
    }
    if (NULL != targetEProcess) {
        ObDereferenceObject( targetEProcess );
        targetEProcess = NULL;
    }
    target_process = NULL;
    RtlZeroMemory( loggingRanges, sizeof(loggingRanges) );

    /* Set back the Irql */
    if (old_irql < APC_LEVEL) {
        KeLowerIrql( old_irql );
    }

    return;
}
Exemple #9
0
BOOLEAN
FASTCALL
ExiTryToAcquireFastMutex(PFAST_MUTEX FastMutex)
{
    KIRQL OldIrql;

    /* Raise to APC_LEVEL */
    KeRaiseIrql(APC_LEVEL, &OldIrql);

    /* Check if we can quickly acquire it */
    if (InterlockedCompareExchange(&FastMutex->Count, 0, 1) == 1)
    {
        /* We have, set us as owners */
        FastMutex->Owner = KeGetCurrentThread();
        FastMutex->OldIrql = OldIrql;
        return TRUE;
    }
    else
    {
        /* Acquire attempt failed */
        KeLowerIrql(OldIrql);
        return FALSE;
    }
}
int rtMpPokeCpuUsingDpc(RTCPUID idCpu)
{
    /*
     * APC fallback.
     */
    static KDPC s_aPokeDpcs[MAXIMUM_PROCESSORS] = {0};
    static bool s_fPokeDPCsInitialized = false;

    if (!s_fPokeDPCsInitialized)
    {
        for (unsigned i = 0; i < RT_ELEMENTS(s_aPokeDpcs); i++)
        {
            KeInitializeDpc(&s_aPokeDpcs[i], rtMpNtPokeCpuDummy, NULL);
            KeSetImportanceDpc(&s_aPokeDpcs[i], HighImportance);
            KeSetTargetProcessorDpc(&s_aPokeDpcs[i], (int)i);
        }
        s_fPokeDPCsInitialized = true;
    }

    /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
     * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
     */
    KIRQL oldIrql;
    KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);

    KeSetImportanceDpc(&s_aPokeDpcs[idCpu], HighImportance);
    KeSetTargetProcessorDpc(&s_aPokeDpcs[idCpu], (int)idCpu);

    /* Assuming here that high importance DPCs will be delivered immediately; or at least an IPI will be sent immediately.
     * @note: not true on at least Vista & Windows 7
     */
    BOOLEAN bRet = KeInsertQueueDpc(&s_aPokeDpcs[idCpu], 0, 0);

    KeLowerIrql(oldIrql);
    return (bRet == TRUE) ? VINF_SUCCESS : VERR_ACCESS_DENIED /* already queued */;
}
/**
 * Internal worker for the RTMpOn* APIs.
 *
 * @returns IPRT status code.
 * @param   pfnWorker   The callback.
 * @param   pvUser1     User argument 1.
 * @param   pvUser2     User argument 2.
 * @param   enmCpuid    What to do / is idCpu valid.
 * @param   idCpu       Used if enmCpuid is RT_NT_CPUID_SPECIFIC or
 *                      RT_NT_CPUID_PAIR, otherwise ignored.
 * @param   idCpu2      Used if enmCpuid is RT_NT_CPUID_PAIR, otherwise ignored.
 * @param   pcHits      Where to return the number of this. Optional.
 */
static int rtMpCallUsingDpcs(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2,
                             RT_NT_CPUID enmCpuid, RTCPUID idCpu, RTCPUID idCpu2, uint32_t *pcHits)
{
    PRTMPARGS pArgs;
    KDPC     *paExecCpuDpcs;

#if 0
    /* KeFlushQueuedDpcs must be run at IRQL PASSIVE_LEVEL according to MSDN, but the
     * driver verifier doesn't complain...
     */
    AssertMsg(KeGetCurrentIrql() == PASSIVE_LEVEL, ("%d != %d (PASSIVE_LEVEL)\n", KeGetCurrentIrql(), PASSIVE_LEVEL));
#endif

#ifdef IPRT_TARGET_NT4
    KAFFINITY Mask;
    /* g_pfnrtNt* are not present on NT anyway. */
    return VERR_NOT_SUPPORTED;
#else
    KAFFINITY Mask = KeQueryActiveProcessors();
#endif

    /* KeFlushQueuedDpcs is not present in Windows 2000; import it dynamically so we can just fail this call. */
    if (!g_pfnrtNtKeFlushQueuedDpcs)
        return VERR_NOT_SUPPORTED;

    pArgs = (PRTMPARGS)ExAllocatePoolWithTag(NonPagedPool, MAXIMUM_PROCESSORS*sizeof(KDPC) + sizeof(RTMPARGS), (ULONG)'RTMp');
    if (!pArgs)
        return VERR_NO_MEMORY;

    pArgs->pfnWorker = pfnWorker;
    pArgs->pvUser1   = pvUser1;
    pArgs->pvUser2   = pvUser2;
    pArgs->idCpu     = NIL_RTCPUID;
    pArgs->idCpu2    = NIL_RTCPUID;
    pArgs->cHits     = 0;
    pArgs->cRefs     = 1;

    paExecCpuDpcs = (KDPC *)(pArgs + 1);

    if (enmCpuid == RT_NT_CPUID_SPECIFIC)
    {
        KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
        KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
        KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu);
        pArgs->idCpu = idCpu;
    }
    else if (enmCpuid == RT_NT_CPUID_SPECIFIC)
    {
        KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
        KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
        KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu);
        pArgs->idCpu = idCpu;

        KeInitializeDpc(&paExecCpuDpcs[1], rtmpNtDPCWrapper, pArgs);
        KeSetImportanceDpc(&paExecCpuDpcs[1], HighImportance);
        KeSetTargetProcessorDpc(&paExecCpuDpcs[1], (int)idCpu2);
        pArgs->idCpu2 = idCpu2;
    }
    else
    {
        for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++)
        {
            KeInitializeDpc(&paExecCpuDpcs[i], rtmpNtDPCWrapper, pArgs);
            KeSetImportanceDpc(&paExecCpuDpcs[i], HighImportance);
            KeSetTargetProcessorDpc(&paExecCpuDpcs[i], i);
        }
    }

    /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
     * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
     */
    KIRQL oldIrql;
    KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);

    /*
     * We cannot do other than assume a 1:1 relationship between the
     * affinity mask and the process despite the warnings in the docs.
     * If someone knows a better way to get this done, please let bird know.
     */
    ASMCompilerBarrier(); /* paranoia */
    if (enmCpuid == RT_NT_CPUID_SPECIFIC)
    {
        ASMAtomicIncS32(&pArgs->cRefs);
        BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
        Assert(ret);
    }
    else if (enmCpuid == RT_NT_CPUID_PAIR)
    {
        ASMAtomicIncS32(&pArgs->cRefs);
        BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
        Assert(ret);

        ASMAtomicIncS32(&pArgs->cRefs);
        ret = KeInsertQueueDpc(&paExecCpuDpcs[1], 0, 0);
        Assert(ret);
    }
    else
    {
        unsigned iSelf = KeGetCurrentProcessorNumber();

        for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++)
        {
            if (    (i != iSelf)
                &&  (Mask & RT_BIT_64(i)))
            {
                ASMAtomicIncS32(&pArgs->cRefs);
                BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[i], 0, 0);
                Assert(ret);
            }
        }
        if (enmCpuid != RT_NT_CPUID_OTHERS)
            pfnWorker(iSelf, pvUser1, pvUser2);
    }

    KeLowerIrql(oldIrql);

    /* Flush all DPCs and wait for completion. (can take long!) */
    /** @todo Consider changing this to an active wait using some atomic inc/dec
     *  stuff (and check for the current cpu above in the specific case). */
    /** @todo Seems KeFlushQueuedDpcs doesn't wait for the DPCs to be completely
     *        executed. Seen pArgs being freed while some CPU was using it before
     *        cRefs was added. */
    g_pfnrtNtKeFlushQueuedDpcs();

    if (pcHits)
        *pcHits = pArgs->cHits;

    /* Dereference the argument structure. */
    int32_t cRefs = ASMAtomicDecS32(&pArgs->cRefs);
    Assert(cRefs >= 0);
    if (cRefs == 0)
        ExFreePool(pArgs);

    return VINF_SUCCESS;
}
Exemple #12
0
NTSTATUS TransferPktComplete(IN PDEVICE_OBJECT NullFdo, IN PIRP Irp, IN PVOID Context)
{
    PTRANSFER_PACKET pkt = (PTRANSFER_PACKET)Context;
    PFUNCTIONAL_DEVICE_EXTENSION fdoExt = pkt->Fdo->DeviceExtension;
    PCLASS_PRIVATE_FDO_DATA fdoData = fdoExt->PrivateFdoData;
    PIO_STACK_LOCATION origCurrentSp = IoGetCurrentIrpStackLocation(pkt->OriginalIrp);
    BOOLEAN packetDone = FALSE;

    /*
     *  Put all the assertions and spew in here so we don't have to look at them.
     */
    DBGCHECKRETURNEDPKT(pkt);    
    
    if (SRB_STATUS(pkt->Srb.SrbStatus) == SRB_STATUS_SUCCESS){
        
        fdoData->LoggedTURFailureSinceLastIO = FALSE;
        
        /*
         *  The port driver should not have allocated a sense buffer
         *  if the SRB succeeded.
         */
        ASSERT(!PORT_ALLOCATED_SENSE(fdoExt, &pkt->Srb));

        /*
         *  Add this packet's transferred length to the original IRP's.
         */
        InterlockedExchangeAdd((PLONG)&pkt->OriginalIrp->IoStatus.Information, 
                              (LONG)pkt->Srb.DataTransferLength);

        if (pkt->InLowMemRetry){
            packetDone = StepLowMemRetry(pkt);
        }
        else {
            packetDone = TRUE;
        }
        
    }
    else {
        /*
         *  The packet failed.  We may retry it if possible.
         */
        BOOLEAN shouldRetry;
        
        /*
         *  Make sure IRP status matches SRB error status (since we propagate it).
         */
        if (NT_SUCCESS(Irp->IoStatus.Status)){
            Irp->IoStatus.Status = STATUS_UNSUCCESSFUL;
        }

        /*
         *  Interpret the SRB error (to a meaningful IRP status)
         *  and determine if we should retry this packet.
         *  This call looks at the returned SENSE info to figure out what to do.
         */
        shouldRetry = InterpretTransferPacketError(pkt);

        /*
         *  Sometimes the port driver can allocates a new 'sense' buffer
         *  to report transfer errors, e.g. when the default sense buffer
         *  is too small.  If so, it is up to us to free it.
         *  Now that we're done interpreting the sense info, free it if appropriate.
         */
        if (PORT_ALLOCATED_SENSE(fdoExt, &pkt->Srb)) {
            DBGTRACE(ClassDebugSenseInfo, ("Freeing port-allocated sense buffer for pkt %ph.", pkt));
            FREE_PORT_ALLOCATED_SENSE_BUFFER(fdoExt, &pkt->Srb);
            pkt->Srb.SenseInfoBuffer = &pkt->SrbErrorSenseData;
            pkt->Srb.SenseInfoBufferLength = sizeof(SENSE_DATA);
        }

        /*
         *  If the SRB queue is locked-up, release it.
         *  Do this after calling the error handler.
         */
        if (pkt->Srb.SrbStatus & SRB_STATUS_QUEUE_FROZEN){
            ClassReleaseQueue(pkt->Fdo);
        }
        
        if (shouldRetry && (pkt->NumRetries > 0)){           
            packetDone = RetryTransferPacket(pkt);
        }
        else {
            packetDone = TRUE;
        }
        
    }

    /*
     *  If the packet is completed, put it back in the free list.
     *  If it is the last packet servicing the original request, complete the original irp.
     */
    if (packetDone){
        LONG numPacketsRemaining;
        PIRP deferredIrp;
        PDEVICE_OBJECT Fdo = pkt->Fdo;
        UCHAR uniqueAddr;
        
        /*
         *  In case a remove is pending, bump the lock count so we don't get freed
         *  right after we complete the original irp.
         */
        ClassAcquireRemoveLock(Fdo, (PIRP)&uniqueAddr);        

        /*
         *  The original IRP should get an error code
         *  if any one of the packets failed.
         */
        if (!NT_SUCCESS(Irp->IoStatus.Status)){
            pkt->OriginalIrp->IoStatus.Status = Irp->IoStatus.Status;

            /*
             *  If the original I/O originated in user space (i.e. it is thread-queued), 
             *  and the error is user-correctable (e.g. media is missing, for removable media),
             *  alert the user.
             *  Since this is only one of possibly several packets completing for the original IRP,
             *  we may do this more than once for a single request.  That's ok; this allows
             *  us to test each returned status with IoIsErrorUserInduced().
             */
            if (IoIsErrorUserInduced(Irp->IoStatus.Status) &&
                pkt->CompleteOriginalIrpWhenLastPacketCompletes &&
                pkt->OriginalIrp->Tail.Overlay.Thread){

                IoSetHardErrorOrVerifyDevice(pkt->OriginalIrp, pkt->Fdo);
            }
        }

        /*
         *  We use a field in the original IRP to count
         *  down the transfer pieces as they complete.
         */
        numPacketsRemaining = InterlockedDecrement(
            (PLONG)&pkt->OriginalIrp->Tail.Overlay.DriverContext[0]);
            
        if (numPacketsRemaining > 0){
            /*
             *  More transfer pieces remain for the original request.
             *  Wait for them to complete before completing the original irp.
             */
        }
        else {

            /*
             *  All the transfer pieces are done.
             *  Complete the original irp if appropriate.
             */
            ASSERT(numPacketsRemaining == 0);
            if (pkt->CompleteOriginalIrpWhenLastPacketCompletes){  
                if (NT_SUCCESS(pkt->OriginalIrp->IoStatus.Status)){
                    ASSERT((ULONG)pkt->OriginalIrp->IoStatus.Information == origCurrentSp->Parameters.Read.Length);
                    ClasspPerfIncrementSuccessfulIo(fdoExt);
                }
                ClassReleaseRemoveLock(pkt->Fdo, pkt->OriginalIrp);

                ClassCompleteRequest(pkt->Fdo, pkt->OriginalIrp, IO_DISK_INCREMENT);

                /*
                 *  We may have been called by one of the class drivers (e.g. cdrom)
                 *  via the legacy API ClassSplitRequest.  
                 *  This is the only case for which the packet engine is called for an FDO
                 *  with a StartIo routine; in that case, we have to call IoStartNextPacket
                 *  now that the original irp has been completed.
                 */
                if (fdoExt->CommonExtension.DriverExtension->InitData.ClassStartIo) {
                    if (TEST_FLAG(pkt->Srb.SrbFlags, SRB_FLAGS_DONT_START_NEXT_PACKET)){
                        DBGTRAP(("SRB_FLAGS_DONT_START_NEXT_PACKET should never be set here (?)"));
                    }
                    else {
                        KIRQL oldIrql;
                        KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);
                        IoStartNextPacket(pkt->Fdo, FALSE);
                        KeLowerIrql(oldIrql);
                    }
                }              
            }            
        }

        /*
         *  If the packet was synchronous, write the final
         *  result back to the issuer's status buffer and
         *  signal his event.
         */
        if (pkt->SyncEventPtr){
            KeSetEvent(pkt->SyncEventPtr, 0, FALSE);
            pkt->SyncEventPtr = NULL;
        }

        /*
         *  Free the completed packet.
         */
        pkt->OriginalIrp = NULL;
        pkt->InLowMemRetry = FALSE;
        EnqueueFreeTransferPacket(pkt->Fdo, pkt);

        /*
         *  Now that we have freed some resources,
         *  try again to send one of the previously deferred irps.
         */
        deferredIrp = DequeueDeferredClientIrp(fdoData);
        if (deferredIrp){
            DBGWARN(("... retrying deferred irp %xh.", deferredIrp)); 
            ServiceTransferRequest(pkt->Fdo, deferredIrp);
        }

        ClassReleaseRemoveLock(Fdo, (PIRP)&uniqueAddr);        
    }

    return STATUS_MORE_PROCESSING_REQUIRED;
}
Exemple #13
0
/****************************************************************************
REMARKS:
Increase the thread priority to maximum, if possible.
****************************************************************************/
ulong PMAPI PM_setMaxThreadPriority(void)
{
    KIRQL oldIrql;
    KeRaiseIrql(DISPATCH_LEVEL+1,&oldIrql);
    return oldIrql;
}
Exemple #14
0
BOOLEAN
HalEnableSystemInterrupt (
    IN ULONG Vector,
    IN KIRQL Irql,
    IN KINTERRUPT_MODE InterruptMode
    )

/*++

Routine Description:

    This routine enables the specified system interrupt.

Arguments:

    Vector - Supplies the vector of the system interrupt that is enabled.

    Irql - Supplies the IRQL of the interrupting source.

    InterruptMode - Supplies the mode of the interrupt; LevelSensitive or
        Latched.

Return Value:

    TRUE if the system interrupt was enabled

--*/

{
    BOOLEAN Enabled = FALSE;
    KIRQL OldIrql;

    //
    // Raise IRQL to the highest level.
    //

    KeRaiseIrql(HIGH_LEVEL, &OldIrql);

    //
    // If the vector number is within the range of the EISA interrupts, then
    // enable the EISA interrrupt and set the Level/Edge register.
    //

    if (Vector >= EISA_VECTORS &&
        Vector < MAXIMUM_EISA_VECTOR &&
        Irql == DEVICE_HIGH_LEVEL) {
        HalpEnableEisaInterrupt( Vector, InterruptMode );
        Enabled = TRUE;
    }

    //
    // If the vector number is within the range of the PCI interrupts, then
    // enable the PCI interrrupt.
    //

    if (Vector >= PCI_VECTORS &&
        Vector < MAXIMUM_PCI_VECTOR &&
        Irql == DEVICE_HIGH_LEVEL) {
        HalpEnablePciInterrupt( Vector, InterruptMode );
        Enabled = TRUE;
    }

    //
    // If the vector is a performance counter vector we will ignore
    // the enable - the performance counters are enabled directly by
    // the wrperfmon callpal.  Wrperfmon must be controlled directly
    // by the driver.
    //

    switch (Vector) {

    case PC0_VECTOR:
    case PC1_VECTOR:
    case PC2_VECTOR:

        Enabled = TRUE;
        break;


    case CORRECTABLE_VECTOR:

    //
    // Enable the correctable error interrupt.
    //

    {
      CIA_ERR_MASK CiaErrMask;

      CiaErrMask.all = READ_CIA_REGISTER(
               &((PCIA_ERROR_CSRS)(CIA_ERROR_CSRS_QVA))->ErrMask);

      CiaErrMask.CorErr = 0x1;

      WRITE_CIA_REGISTER(&((PCIA_ERROR_CSRS)(CIA_ERROR_CSRS_QVA))->ErrMask,
                 CiaErrMask.all
                 );

      HalpSetMachineCheckEnables( FALSE, FALSE, FALSE );
    }

        Enabled = TRUE;
        break;


    } //end switch Vector

    //
    // Lower IRQL to the previous level.
    //

    KeLowerIrql(OldIrql);
    return Enabled;

}
Exemple #15
0
VOID
HalDisableSystemInterrupt (
    IN ULONG Vector,
    IN KIRQL Irql
    )

/*++

Routine Description:

    This routine disables the specified system interrupt.

Arguments:

    Vector - Supplies the vector of the system interrupt that is disabled.

    Irql - Supplies the IRQL of the interrupting source.

Return Value:

    None.

--*/

{

    KIRQL OldIrql;

    //
    // Raise IRQL to the highest level.
    //

    KeRaiseIrql(HIGH_LEVEL, &OldIrql);

    //
    // If the vector number is within the range of the EISA interrupts, then
    // disable the EISA interrrupt.
    //

    if (Vector >= EISA_VECTORS &&
        Vector < MAXIMUM_EISA_VECTOR &&
        Irql == DEVICE_HIGH_LEVEL) {
        HalpDisableEisaInterrupt(Vector);
    }

    //
    // If the vector number is within the range of the PCI interrupts, then
    // disable the PCI interrrupt.
    //

    if (Vector >= PCI_VECTORS &&
        Vector < MAXIMUM_PCI_VECTOR &&
        Irql == DEVICE_HIGH_LEVEL) {
        HalpDisablePciInterrupt(Vector);
    }

    //
    // If the vector is a performance counter vector we will ignore
    // the enable - the performance counters are enabled directly by
    // the wrperfmon callpal.  Wrperfmon must be controlled directly
    // by the driver.
    //

    switch (Vector) {

    case PC0_VECTOR:
    case PC1_VECTOR:
    case PC2_VECTOR:

        break;

    case CORRECTABLE_VECTOR:

    //
    // Disable the correctable error interrupt.
    //

    {
      CIA_ERR_MASK CiaErrMask;

      CiaErrMask.all = READ_CIA_REGISTER(
               &((PCIA_ERROR_CSRS)(CIA_ERROR_CSRS_QVA))->ErrMask);

      CiaErrMask.CorErr = 0x0;

      WRITE_CIA_REGISTER(&((PCIA_ERROR_CSRS)(CIA_ERROR_CSRS_QVA))->ErrMask,
                 CiaErrMask.all
                 );

      HalpSetMachineCheckEnables( FALSE, TRUE, TRUE );
    }

    break;

    } //end switch Vector

    //
    // Lower IRQL to the previous level.
    //

    KeLowerIrql(OldIrql);
    return;
}
Exemple #16
0
BOOLEAN
HalSetRealTimeClock (
    IN PTIME_FIELDS TimeFields
    )

/*++

Routine Description:

    This routine sets the realtime clock.

    N.B. This routine is required to provide any synchronization necessary
         to set the realtime clock information.

Arguments:

    TimeFields - Supplies a pointer to a time structure that specifies the
        realtime clock information.

Return Value:

    If the power to the realtime clock has not failed, then the time
    values are written to the realtime clock and a value of TRUE is
    returned. Otherwise, a value of FALSE is returned.

--*/

{

    UCHAR DataByte;
    KIRQL OldIrql;

    //
    // If the realtime clock battery is still functioning, then write
    // the realtime clock values, and return a function value of TRUE.
    // Otherwise, return a function value of FALSE.
    //

    KeRaiseIrql(HIGH_LEVEL, &OldIrql);
    DataByte = HalpReadRawClockRegister(RTC_CONTROL_REGISTERD);
    if (((PRTC_CONTROL_REGISTER_D)(&DataByte))->ValidTime == 1) {

        //
        // Set the realtime clock control to set the time.
        //

        DataByte = 0;
        ((PRTC_CONTROL_REGISTER_B)(&DataByte))->HoursFormat = 1;


        ((PRTC_CONTROL_REGISTER_B)(&DataByte))->SetTime = 1;
        HalpWriteRawClockRegister(RTC_CONTROL_REGISTERB, DataByte);

        //
        // Write the realtime clock values.
        //

        if (TimeFields->Year > 1999)
          HalpWriteClockRegister(RTC_YEAR, (UCHAR)(TimeFields->Year - 2000));
        else
          HalpWriteClockRegister(RTC_YEAR, (UCHAR)(TimeFields->Year - 1900));

        HalpWriteClockRegister(RTC_MONTH, (UCHAR)TimeFields->Month);
        HalpWriteClockRegister(RTC_DAY_OF_MONTH, (UCHAR)TimeFields->Day);
        HalpWriteClockRegister(RTC_DAY_OF_WEEK, (UCHAR)(TimeFields->Weekday + 1));
        HalpWriteClockRegister(RTC_HOUR, (UCHAR)TimeFields->Hour);
        HalpWriteClockRegister(RTC_MINUTE, (UCHAR)TimeFields->Minute);
        HalpWriteClockRegister(RTC_SECOND, (UCHAR)TimeFields->Second);

        //
        // Set the realtime clock control to update the time.
        //

        ((PRTC_CONTROL_REGISTER_B)(&DataByte))->SetTime = 0;
        HalpWriteRawClockRegister(RTC_CONTROL_REGISTERB, DataByte);
        KeLowerIrql(OldIrql);
        return TRUE;

    } else {
        KeLowerIrql(OldIrql);
        return FALSE;
    }
}
Exemple #17
0
VOID
HalReturnToFirmware(
    IN FIRMWARE_REENTRY Routine
    )

/*++

Routine Description:

    This function returns control to the specified firmware routine.
    In most cases it generates a soft reset by asserting the reset line
    through the keyboard controller (STRIKER and DUO). However, for
    FALCON we will use the Port92 register in the 82374 to generate
    a software reset (restart) through the ALT_RESET signal.

    Arguments:

	Routine - Supplies a value indicating which firmware routine to invoke.

Return Value:

    Does not return.

--*/

{

    KIRQL OldIrql;


    //
    // Mask interrupts
    //

    KeRaiseIrql(HIGH_LEVEL, &OldIrql);

    //
    // Do the right thing!
    //

    switch (Routine) {

	    case HalHaltRoutine:

		//
		// Hang looping.
		//	

		for (;;) {
		}

	     case HalPowerDownRoutine:

		 //
		 // Power down the system
		 //

		 {
		     ULONG EPCValue;
		     ULONG EPC = (ULONG)HalpEisaControlBase + EXTERNAL_PMP_CONTROL_OFFSET;

		     EPCValue = READ_REGISTER_ULONG( EPC );
                     EPCValue &= ~EPC_POWER;
		     WRITE_REGISTER_ULONG( EPC, EPCValue );
		     EPCValue |= EPC_POWER;
		     WRITE_REGISTER_ULONG( EPC, EPCValue );
		 }

	    case HalRestartRoutine:
	    case HalRebootRoutine:
	    case HalInteractiveModeRoutine:

		//
		// Reset ISA Display Adapter to 80x25 color text mode.
		//

		HalpResetX86DisplayAdapter();

		//
		// Enable Port92 register in 82374
		//	

		WRITE_REGISTER_UCHAR( &((PEISA_CONTROL)HalpEisaControlBase)->Reserved1[0], 0x4F);
		WRITE_REGISTER_UCHAR( &((PEISA_CONTROL)HalpEisaControlBase)->Reserved1[1], 0x7F);

		//
		// Generate soft reset through ALT_RESET signal from 82374
		//

		WRITE_REGISTER_UCHAR( &((PEISA_CONTROL)HalpEisaControlBase)->Reserved2[2], 0);
		WRITE_REGISTER_UCHAR( &((PEISA_CONTROL)HalpEisaControlBase)->Reserved2[2], 0x01);

		//
		// Hang
		//

		for (;;) {
		}

	    default:
	        KdPrint(("HalReturnToFirmware invalid argument\n"));
	        KeLowerIrql(OldIrql);
	        DbgBreakPoint();

    }

}
Exemple #18
0
NDIS_STATUS NTAPI
ProSend(
    IN  NDIS_HANDLE     MacBindingHandle,
    IN  PNDIS_PACKET    Packet)
/*
 * FUNCTION: Forwards a request to send a packet to an NDIS miniport
 * ARGUMENTS:
 *     MacBindingHandle = Adapter binding handle
 *     Packet           = Pointer to NDIS packet descriptor
 * RETURNS:
 *     NDIS_STATUS_SUCCESS if the packet was successfully sent
 *     NDIS_STATUS_PENDING if the miniport was busy or a serialized miniport returned NDIS_STATUS_RESOURCES
 */
{
  PADAPTER_BINDING AdapterBinding;
  PLOGICAL_ADAPTER Adapter;
  PNDIS_BUFFER NdisBuffer;
  PDMA_CONTEXT Context;
  NDIS_STATUS NdisStatus;
  UINT PacketLength;
  KIRQL OldIrql;

  NDIS_DbgPrint(MAX_TRACE, ("Called.\n"));

  ASSERT(MacBindingHandle);
  AdapterBinding = GET_ADAPTER_BINDING(MacBindingHandle);

  ASSERT(AdapterBinding);
  Adapter = AdapterBinding->Adapter;

  ASSERT(Adapter);

  /* if the following is not true, KeRaiseIrql() below will break */
  ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);

  /* XXX what is this crazy black magic? */
  Packet->Reserved[1] = (ULONG_PTR)MacBindingHandle;

  /*
   * Test the packet to see if it is a MAC loopback.
   *
   * We may have to loop this packet if miniport cannot.
   * If dest MAC address of packet == MAC address of adapter,
   * this is a loopback frame.
   */

  if ((Adapter->NdisMiniportBlock.MacOptions & NDIS_MAC_OPTION_NO_LOOPBACK) &&
      MiniAdapterHasAddress(Adapter, Packet))
    {
#if WORKER_TEST
        MiniQueueWorkItem(Adapter, NdisWorkItemSendLoopback, Packet, FALSE);
        return NDIS_STATUS_PENDING;
#else
        return ProIndicatePacket(Adapter, Packet);
#endif
    } else {
        if (Adapter->NdisMiniportBlock.ScatterGatherListSize != 0)
        {
            NDIS_DbgPrint(MID_TRACE, ("Using Scatter/Gather DMA\n"));

            NdisQueryPacket(Packet,
                            NULL,
                            NULL,
                            &NdisBuffer,
                            &PacketLength);

            Context = ExAllocatePool(NonPagedPool, sizeof(DMA_CONTEXT));
            if (!Context) {
                NDIS_DbgPrint(MIN_TRACE, ("Insufficient resources\n"));
                return NDIS_STATUS_RESOURCES;
            }

            Context->Adapter = Adapter;
            Context->Packet = Packet;

            KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);

            KeFlushIoBuffers(NdisBuffer, FALSE, TRUE);

            NdisStatus = Adapter->NdisMiniportBlock.SystemAdapterObject->DmaOperations->GetScatterGatherList(
                          Adapter->NdisMiniportBlock.SystemAdapterObject,
                          Adapter->NdisMiniportBlock.PhysicalDeviceObject,
                          NdisBuffer,
                          MmGetMdlVirtualAddress(NdisBuffer),
                          PacketLength,
                          ScatterGatherSendPacket,
                          Context,
                          TRUE);

            KeLowerIrql(OldIrql);

            if (!NT_SUCCESS(NdisStatus)) {
                NDIS_DbgPrint(MIN_TRACE, ("GetScatterGatherList failed! (%x)\n", NdisStatus));
                return NdisStatus;
            }

            return NDIS_STATUS_PENDING;
        }


        return proSendPacketToMiniport(Adapter, Packet);
    }
}
Exemple #19
0
VOID NTAPI
ProSendPackets(
    IN  NDIS_HANDLE     NdisBindingHandle,
    IN  PPNDIS_PACKET   PacketArray,
    IN  UINT            NumberOfPackets)
{
    PADAPTER_BINDING AdapterBinding = NdisBindingHandle;
    PLOGICAL_ADAPTER Adapter = AdapterBinding->Adapter;
    KIRQL RaiseOldIrql;
    NDIS_STATUS NdisStatus;
    UINT i;

    if(Adapter->NdisMiniportBlock.DriverHandle->MiniportCharacteristics.SendPacketsHandler)
    {
       if(Adapter->NdisMiniportBlock.Flags & NDIS_ATTRIBUTE_DESERIALIZE)
       {
          (*Adapter->NdisMiniportBlock.DriverHandle->MiniportCharacteristics.SendPacketsHandler)(
           Adapter->NdisMiniportBlock.MiniportAdapterContext, PacketArray, NumberOfPackets);
       }
       else
       {
          /* SendPackets is called at DISPATCH_LEVEL for all serialized miniports */
          KeRaiseIrql(DISPATCH_LEVEL, &RaiseOldIrql);
          (*Adapter->NdisMiniportBlock.DriverHandle->MiniportCharacteristics.SendPacketsHandler)(
           Adapter->NdisMiniportBlock.MiniportAdapterContext, PacketArray, NumberOfPackets);
          KeLowerIrql(RaiseOldIrql);
          for (i = 0; i < NumberOfPackets; i++)
          {
             NdisStatus = NDIS_GET_PACKET_STATUS(PacketArray[i]);
             if (NdisStatus != NDIS_STATUS_PENDING)
                 MiniSendComplete(Adapter, PacketArray[i], NdisStatus);
          }
       }
     }
     else
     {
       if(Adapter->NdisMiniportBlock.Flags & NDIS_ATTRIBUTE_DESERIALIZE)
       {
          for (i = 0; i < NumberOfPackets; i++)
          {
             NdisStatus = (*Adapter->NdisMiniportBlock.DriverHandle->MiniportCharacteristics.SendHandler)(
                           Adapter->NdisMiniportBlock.MiniportAdapterContext, PacketArray[i], PacketArray[i]->Private.Flags);
             if (NdisStatus != NDIS_STATUS_PENDING)
                 MiniSendComplete(Adapter, PacketArray[i], NdisStatus);
          }
       }
       else
       {
         /* Send is called at DISPATCH_LEVEL for all serialized miniports */
         KeRaiseIrql(DISPATCH_LEVEL, &RaiseOldIrql);
         for (i = 0; i < NumberOfPackets; i++)
         {
            NdisStatus = (*Adapter->NdisMiniportBlock.DriverHandle->MiniportCharacteristics.SendHandler)(
                           Adapter->NdisMiniportBlock.MiniportAdapterContext, PacketArray[i], PacketArray[i]->Private.Flags);
            if (NdisStatus != NDIS_STATUS_PENDING)
                MiniSendComplete(Adapter, PacketArray[i], NdisStatus);
         }
         KeLowerIrql(RaiseOldIrql);
       }
     }
}
Exemple #20
0
VOID
NTAPI
PspUserThreadStartup(IN PKSTART_ROUTINE StartRoutine,
                     IN PVOID StartContext)
{
    PETHREAD Thread;
    PTEB Teb;
    BOOLEAN DeadThread = FALSE;
    KIRQL OldIrql;
    PAGED_CODE();
    PSTRACE(PS_THREAD_DEBUG,
            "StartRoutine: %p StartContext: %p\n", StartRoutine, StartContext);

    /* Go to Passive Level */
    KeLowerIrql(PASSIVE_LEVEL);
    Thread = PsGetCurrentThread();

    /* Check if the thread is dead */
    if (Thread->DeadThread)
    {
        /* Remember that we're dead */
        DeadThread = TRUE;
    }
    else
    {
        /* Get the Locale ID and save Preferred Proc */
        Teb =  NtCurrentTeb();
        Teb->CurrentLocale = MmGetSessionLocaleId();
        Teb->IdealProcessor = Thread->Tcb.IdealProcessor;
    }

    /* Check if this is a dead thread, or if we're hiding */
    if (!(Thread->DeadThread) && !(Thread->HideFromDebugger))
    {
        /* We're not, so notify the debugger */
        DbgkCreateThread(Thread, StartContext);
    }

    /* Make sure we're not already dead */
    if (!DeadThread)
    {
        /* Check if the Prefetcher is enabled */
        if (CcPfEnablePrefetcher)
        {
            /* FIXME: Prepare to prefetch this process */
        }

        /* Raise to APC */
        KeRaiseIrql(APC_LEVEL, &OldIrql);

        /* Queue the User APC */
        KiInitializeUserApc(KeGetExceptionFrame(&Thread->Tcb),
                            KeGetTrapFrame(&Thread->Tcb),
                            PspSystemDllEntryPoint,
                            NULL,
                            PspSystemDllBase,
                            NULL);

        /* Lower it back to passive */
        KeLowerIrql(PASSIVE_LEVEL);
    }
    else
    {
        /* We're dead, kill us now */
        PspTerminateThreadByPointer(Thread,
                                    STATUS_THREAD_IS_TERMINATING,
                                    TRUE);
    }

    /* Do we have a cookie set yet? */
    while (!SharedUserData->Cookie)
    {
        LARGE_INTEGER SystemTime;
        ULONG NewCookie;
        PKPRCB Prcb;

        /* Generate a new cookie */
        KeQuerySystemTime(&SystemTime);
        Prcb = KeGetCurrentPrcb();
        NewCookie = (Prcb->MmPageFaultCount ^ Prcb->InterruptTime ^
                    SystemTime.u.LowPart ^ SystemTime.u.HighPart ^
                    (ULONG)(ULONG_PTR)&SystemTime);

        /* Set the new cookie*/
        InterlockedCompareExchange((LONG*)&SharedUserData->Cookie,
                                   NewCookie,
                                   0);
    }
}
Exemple #21
0
NDIS_STATUS
proSendPacketToMiniport(PLOGICAL_ADAPTER Adapter, PNDIS_PACKET Packet)
{
#if WORKER_TEST
   MiniQueueWorkItem(Adapter, NdisWorkItemSend, Packet, FALSE);
   return NDIS_STATUS_PENDING;
#else
   KIRQL RaiseOldIrql;
   NDIS_STATUS NdisStatus;

   if(MiniIsBusy(Adapter, NdisWorkItemSend)) {
      MiniQueueWorkItem(Adapter, NdisWorkItemSend, Packet, FALSE);
      return NDIS_STATUS_PENDING;
   }

   if(Adapter->NdisMiniportBlock.DriverHandle->MiniportCharacteristics.SendPacketsHandler)
   {
        if(Adapter->NdisMiniportBlock.Flags & NDIS_ATTRIBUTE_DESERIALIZE)
        {
            NDIS_DbgPrint(MAX_TRACE, ("Calling miniport's SendPackets handler\n"));
            (*Adapter->NdisMiniportBlock.DriverHandle->MiniportCharacteristics.SendPacketsHandler)(
             Adapter->NdisMiniportBlock.MiniportAdapterContext, &Packet, 1);
             NdisStatus = NDIS_STATUS_PENDING;
        } else {
            /* SendPackets is called at DISPATCH_LEVEL for all serialized miniports */
            KeRaiseIrql(DISPATCH_LEVEL, &RaiseOldIrql);
            {
               NDIS_DbgPrint(MAX_TRACE, ("Calling miniport's SendPackets handler\n"));
               (*Adapter->NdisMiniportBlock.DriverHandle->MiniportCharacteristics.SendPacketsHandler)(
                Adapter->NdisMiniportBlock.MiniportAdapterContext, &Packet, 1);
            }
            KeLowerIrql(RaiseOldIrql);

            NdisStatus = NDIS_GET_PACKET_STATUS(Packet);
            if (NdisStatus == NDIS_STATUS_RESOURCES) {
                MiniQueueWorkItem(Adapter, NdisWorkItemSend, Packet, TRUE);
                NdisStatus = NDIS_STATUS_PENDING;
            }
        }

        if (NdisStatus != NDIS_STATUS_PENDING) {
            MiniWorkItemComplete(Adapter, NdisWorkItemSend);
        }

        return NdisStatus;
   } else {
        if(Adapter->NdisMiniportBlock.Flags & NDIS_ATTRIBUTE_DESERIALIZE)
        {
            NDIS_DbgPrint(MAX_TRACE, ("Calling miniport's Send handler\n"));
            NdisStatus = (*Adapter->NdisMiniportBlock.DriverHandle->MiniportCharacteristics.SendHandler)(
                          Adapter->NdisMiniportBlock.MiniportAdapterContext, Packet, Packet->Private.Flags);
            NDIS_DbgPrint(MAX_TRACE, ("back from miniport's send handler\n"));
        } else {
            /* Send is called at DISPATCH_LEVEL for all serialized miniports */
            KeRaiseIrql(DISPATCH_LEVEL, &RaiseOldIrql);
            NDIS_DbgPrint(MAX_TRACE, ("Calling miniport's Send handler\n"));
            NdisStatus = (*Adapter->NdisMiniportBlock.DriverHandle->MiniportCharacteristics.SendHandler)(
                          Adapter->NdisMiniportBlock.MiniportAdapterContext, Packet, Packet->Private.Flags);
            NDIS_DbgPrint(MAX_TRACE, ("back from miniport's send handler\n"));
            KeLowerIrql(RaiseOldIrql);

            if (NdisStatus == NDIS_STATUS_RESOURCES) {
                MiniQueueWorkItem(Adapter, NdisWorkItemSend, Packet, TRUE);
                NdisStatus = NDIS_STATUS_PENDING;
            }
        }

        if (NdisStatus != NDIS_STATUS_PENDING) {
            MiniWorkItemComplete(Adapter, NdisWorkItemSend);
        }

        return NdisStatus;
   }
#endif
}
Exemple #22
0
NTSTATUS
NtGetContextThread(
    IN HANDLE ThreadHandle,
    IN OUT PCONTEXT ThreadContext
    )

/*++

Routine Description:

    This function returns the usermode context of the specified thread. This
    function will fail if the specified thread is a system thread. It will
    return the wrong answer if the thread is a non-system thread that does
    not execute in user-mode.

Arguments:

    ThreadHandle - Supplies an open handle to the thread object from
                   which to retrieve context information.  The handle
                   must allow THREAD_GET_CONTEXT access to the thread.

    ThreadContext - Supplies the address of a buffer that will receive
                    the context of the specified thread.

Return Value:

    None.

--*/

{

    ULONG Alignment;
    ULONG ContextFlags;
    GETSETCONTEXT ContextFrame;
    ULONG ContextLength;
    KIRQL Irql;
    KPROCESSOR_MODE Mode;
    NTSTATUS Status;
    PETHREAD Thread;

    PAGED_CODE();

    //
    // Get previous mode and reference specified thread.
    //

    Mode = KeGetPreviousMode();
    Status = ObReferenceObjectByHandle(ThreadHandle,
                                   THREAD_GET_CONTEXT,
                                   PsThreadType,
                                   Mode,
                                   (PVOID *)&Thread,
                                   NULL);

    //
    // If the reference was successful, the check if the specified thread
    // is a system thread.
    //

    if (NT_SUCCESS(Status)) {

        //
        // If the thread is not a system thread, then attempt to get the
        // context of the thread.
        //

        if (IS_SYSTEM_THREAD(Thread) == FALSE) {

            //
            // Attempt to get the context of the specified thread.
            //

            try {

                //
                // Set the default alignment, capture the context flags,
                // and set the default size of the context record.
                //

                Alignment = CONTEXT_ALIGN;
                ContextFlags = ProbeAndReadUlong(&ThreadContext->ContextFlags);
                ContextLength = sizeof(CONTEXT);

#if defined(_X86_)
                //
                // CONTEXT_EXTENDED_REGISTERS is SET, then we want sizeof(CONTEXT) set above
                // otherwise (not set) we only want the old part of the context record.
                //
                if ((ContextFlags & CONTEXT_EXTENDED_REGISTERS) != CONTEXT_EXTENDED_REGISTERS) {
                    ContextLength = FIELD_OFFSET(CONTEXT, ExtendedRegisters);
                }
#endif

#if defined(_MIPS_)

                //
                // The following code is included for backward compatibility
                // with old code that does not understand extended context
                // records on MIPS systems.
                //

                if ((ContextFlags & CONTEXT_EXTENDED_INTEGER) != CONTEXT_EXTENDED_INTEGER) {
                    Alignment = sizeof(ULONG);
                    ContextLength = FIELD_OFFSET(CONTEXT, ContextFlags) + 4;
                }

#endif

                if (Mode != KernelMode) {
                    ProbeForWrite(ThreadContext, ContextLength, Alignment);
                }

            } except(EXCEPTION_EXECUTE_HANDLER) {
                Status = GetExceptionCode();
            }

            //
            // If an exception did not occur during the probe of the thread
            // context, then get the context of the target thread.
            //

            if (NT_SUCCESS(Status)) {
                KeInitializeEvent(&ContextFrame.OperationComplete,
                                  NotificationEvent,
                                  FALSE);

                ContextFrame.Context.ContextFlags = ContextFlags;

                ContextFrame.Mode = Mode;
                if (Thread == PsGetCurrentThread()) {
                    ContextFrame.Apc.SystemArgument1 = NULL;
                    ContextFrame.Apc.SystemArgument2 = Thread;
                    KeRaiseIrql(APC_LEVEL, &Irql);
                    PspGetSetContextSpecialApc(&ContextFrame.Apc,
                                               NULL,
                                               NULL,
                                               &ContextFrame.Apc.SystemArgument1,
                                               &ContextFrame.Apc.SystemArgument2);

                    KeLowerIrql(Irql);

                    //
                    // Move context to specfied context record. If an exception
                    // occurs, then silently handle it and return success.
                    //

                    try {
                        RtlMoveMemory(ThreadContext,
                                      &ContextFrame.Context,
                                      ContextLength);

                    } except(EXCEPTION_EXECUTE_HANDLER) {
                    }

                } else {
                    KeInitializeApc(&ContextFrame.Apc,
                                    &Thread->Tcb,
                                    OriginalApcEnvironment,
                                    PspGetSetContextSpecialApc,
                                    NULL,
                                    NULL,
                                    KernelMode,
                                    NULL);

                    if (!KeInsertQueueApc(&ContextFrame.Apc, NULL, Thread, 2)) {
                        Status = STATUS_UNSUCCESSFUL;

                    } else {
                        KeWaitForSingleObject(&ContextFrame.OperationComplete,
                                              Executive,
                                              KernelMode,
                                              FALSE,
                                              NULL);
                        //
                        // Move context to specfied context record. If an
                        // exception occurs, then silently handle it and
                        // return success.
                        //

                        try {
                            RtlMoveMemory(ThreadContext,
                                          &ContextFrame.Context,
                                          ContextLength);

                        } except(EXCEPTION_EXECUTE_HANDLER) {
                        }
                    }
                }
            }

        } else {
Exemple #23
0
BOOLEAN
HalpCreateSioStructures (
    VOID
    )

/*++

Routine Description:

    This routine initializes the structures necessary for SIO operations
    and connects the intermediate interrupt dispatcher.

Arguments:

    None.

Return Value:

    If the second level interrupt dispatcher is connected, then a value of
    TRUE is returned. Otherwise, a value of FALSE is returned.

--*/

{

    UCHAR DataByte;
    KIRQL oldIrql;


    //
    // Initialize the Machine Check interrupt handler
    //

    if (HalpEnableInterruptHandler(&HalpMachineCheckInterrupt,
                                   HalpHandleMachineCheck,
                                   NULL,
                                   NULL,
                                   MACHINE_CHECK_VECTOR,
                                   MACHINE_CHECK_LEVEL,
                                   MACHINE_CHECK_LEVEL,
                                   Latched,
                                   FALSE,
                                   0,
                                   FALSE,
                                   InternalUsage,
                                   MACHINE_CHECK_VECTOR
                                   ) == FALSE) {
        KeBugCheck(HAL_INITIALIZATION_FAILED);
    }

    //
    // Enable NMI IOCHK# and PCI SERR#
    //

    DataByte = READ_REGISTER_UCHAR(&((PEISA_CONTROL)HalpIoControlBase)->NmiStatus);
    WRITE_REGISTER_UCHAR(&((PEISA_CONTROL)HalpIoControlBase)->NmiStatus,
                        DataByte & ~DISABLE_IOCHK_NMI & ~DISABLE_PCI_SERR_NMI);

    //
    // Clear the SIO NMI disable bit.  This bit is the high order of the
    // NMI enable register.
    //

    DataByte = 0;


    WRITE_REGISTER_UCHAR(
      &((PEISA_CONTROL) HalpIoControlBase)->NmiEnable,
      DataByte
      );

    //
    // Connect the external interrupt handler
    //

    PCR->InterruptRoutine[EXTERNAL_INTERRUPT_VECTOR] = (PKINTERRUPT_ROUTINE) HalpHandleExternalInterrupt;

    //
    // register the interrupt vector
    //

    HalpRegisterVector(InternalUsage,
                       EXTERNAL_INTERRUPT_VECTOR,
                       EXTERNAL_INTERRUPT_VECTOR,
                       HIGH_LEVEL);




    // Connect directly to the decrementer handler.  This is done
    // directly rather than thru HalpEnableInterruptHandler due to
    // special handling required because the handler calls KdPollBreakIn().
    //

    PCR->InterruptRoutine[DECREMENT_VECTOR] = (PKINTERRUPT_ROUTINE) HalpHandleDecrementerInterrupt;


    //
    // Initialize and connect the Timer 1 interrupt (IRQ0)
    //

    if (HalpEnableInterruptHandler( &HalpProfileInterrupt,
                           (PKSERVICE_ROUTINE) HalpHandleProfileInterrupt,
                           (PVOID) NULL,
                           (PKSPIN_LOCK)NULL,
                           PROFILE_VECTOR,
                           PROFILE_LEVEL,
                           PROFILE_LEVEL,
                           Latched,
                           TRUE,
                           0,
                           FALSE,
                           DeviceUsage,
                           PROFILE_VECTOR
                           ) == FALSE) {
        KeBugCheck(HAL_INITIALIZATION_FAILED);
    }


    //
    // Disable Timer 1; only used by profiling
    //

    HalDisableSystemInterrupt(PROFILE_VECTOR, PROFILE_LEVEL);

    //
    // Set default profile rate
    //

    HalSetProfileInterval(5000);

    //
    // Raise the IRQL while the SIO interrupt controller is initialized.
    //

    KeRaiseIrql(CLOCK2_LEVEL, &oldIrql);

    //
    // Initialize any planar registers
    //

    HalpInitPlanar();

    
    //
    // Enable the clock interrupt
    //
    HalpUpdateDecrementer(1000);        // Get those decrementer ticks going


    //
    // Set ISA bus interrupt affinity.
    //

    HalpIsaBusAffinity = PCR->SetMember;


    //
    // Restore IRQL level.
    //

    KeLowerIrql(oldIrql);


    //
    // DMA command - set assert level
    //

    DataByte = READ_REGISTER_UCHAR(&((PEISA_CONTROL)HalpIoControlBase)->Dma1BasePort.DmaStatus);
    WRITE_REGISTER_UCHAR(&((PEISA_CONTROL)HalpIoControlBase)->Dma1BasePort.DmaStatus,
                        DataByte & ~DACK_ASSERT_HIGH & ~DREQ_ASSERT_LOW);

    //
    // Initialize the DMA mode registers to a default value.
    // Disable all of the DMA channels except channel 4 which is that
    // cascade of channels 0-3.
    //

    WRITE_REGISTER_UCHAR(
        &((PEISA_CONTROL) HalpIoControlBase)->Dma1BasePort.AllMask,
        0x0F
        );

    WRITE_REGISTER_UCHAR(
        &((PEISA_CONTROL) HalpIoControlBase)->Dma2BasePort.AllMask,
        0x0E
        );

    return(TRUE);
}
Exemple #24
0
BOOLEAN
HalQueryRealTimeClock (
    OUT PTIME_FIELDS TimeFields
    )

/*++

Routine Description:

    This routine queries the realtime clock.

    N.B. This routine is required to provide any synchronization necessary
         to query the realtime clock information.

Arguments:

    TimeFields - Supplies a pointer to a time structure that receives
        the realtime clock information.

Return Value:

    If the power to the realtime clock has not failed, then the time
    values are read from the realtime clock and a value of TRUE is
    returned. Otherwise, a value of FALSE is returned.

--*/

{

    UCHAR DataByte;
    KIRQL OldIrql;

    //
    // If the realtime clock battery is still functioning, then read
    // the realtime clock values, and return a function value of TRUE.
    // Otherwise, return a function value of FALSE.
    //

    KeRaiseIrql(HIGH_LEVEL, &OldIrql);
    DataByte = HalpReadRawClockRegister(RTC_CONTROL_REGISTERD);
    if (((PRTC_CONTROL_REGISTER_D)(&DataByte))->ValidTime == 1) {

        //
        // Wait until the realtime clock is not being updated.
        //

        do {
            DataByte = HalpReadRawClockRegister(RTC_CONTROL_REGISTERA);
        } while (((PRTC_CONTROL_REGISTER_A)(&DataByte))->UpdateInProgress == 1);

        //
        // Read the realtime clock values.
        //

        TimeFields->Year = 1900 + (CSHORT)HalpReadClockRegister(RTC_YEAR);
        if (TimeFields->Year < 1980) TimeFields->Year += 100;

        TimeFields->Month = (CSHORT)HalpReadClockRegister(RTC_MONTH);
        TimeFields->Day = (CSHORT)HalpReadClockRegister(RTC_DAY_OF_MONTH);
        TimeFields->Weekday  = (CSHORT)HalpReadClockRegister(RTC_DAY_OF_WEEK) - 1;
        TimeFields->Hour = (CSHORT)HalpReadClockRegister(RTC_HOUR);
        TimeFields->Minute = (CSHORT)HalpReadClockRegister(RTC_MINUTE);
        TimeFields->Second = (CSHORT)HalpReadClockRegister(RTC_SECOND);
        TimeFields->Milliseconds = 0;
        KeLowerIrql(OldIrql);
        return TRUE;

    } else {
        KeLowerIrql(OldIrql);
        return FALSE;
    }
}
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
    /*
     * Don't try mess with an offline CPU.
     */
    if (!RTMpIsCpuOnline(idCpu))
        return !RTMpIsCpuPossible(idCpu)
              ? VERR_CPU_NOT_FOUND
              : VERR_CPU_OFFLINE;

    /*
     * Use the broadcast IPI routine if there are no more than two CPUs online,
     * or if the current IRQL is unsuitable for KeWaitForSingleObject.
     */
    int rc;
    uint32_t cHits = 0;
    if (   g_pfnrtKeIpiGenericCall
        && (   RTMpGetOnlineCount() <= 2
            || KeGetCurrentIrql()   > APC_LEVEL)
       )
    {
        rc = rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnSpecificBroadcastIpiWrapper,
                                       idCpu, NIL_RTCPUID, &cHits);
        if (RT_SUCCESS(rc))
        {
            if (cHits == 1)
                return VINF_SUCCESS;
            rc = cHits == 0 ? VERR_CPU_OFFLINE : VERR_CPU_IPE_1;
        }
        return rc;
    }

#if 0
    rc = rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_SPECIFIC, idCpu, NIL_RTCPUID, &cHits);
    if (RT_SUCCESS(rc))
    {
        if (cHits == 1)
            return VINF_SUCCESS;
        rc = cHits == 0 ? VERR_CPU_OFFLINE : VERR_CPU_IPE_1;
    }
    return rc;

#else
    /*
     * Initialize the argument package and the objects within it.
     * The package is referenced counted to avoid unnecessary spinning to
     * synchronize cleanup and prevent stack corruption.
     */
    PRTMPNTONSPECIFICARGS pArgs = (PRTMPNTONSPECIFICARGS)ExAllocatePoolWithTag(NonPagedPool, sizeof(*pArgs), (ULONG)'RTMp');
    if (!pArgs)
        return VERR_NO_MEMORY;
    pArgs->cRefs                  = 2;
    pArgs->fExecuting             = false;
    pArgs->fDone                  = false;
    pArgs->CallbackArgs.pfnWorker = pfnWorker;
    pArgs->CallbackArgs.pvUser1   = pvUser1;
    pArgs->CallbackArgs.pvUser2   = pvUser2;
    pArgs->CallbackArgs.idCpu     = idCpu;
    pArgs->CallbackArgs.cHits     = 0;
    pArgs->CallbackArgs.cRefs     = 2;
    KeInitializeEvent(&pArgs->DoneEvt, SynchronizationEvent, FALSE /* not signalled */);
    KeInitializeDpc(&pArgs->Dpc, rtMpNtOnSpecificDpcWrapper, pArgs);
    KeSetImportanceDpc(&pArgs->Dpc, HighImportance);
    KeSetTargetProcessorDpc(&pArgs->Dpc, (int)idCpu);

    /*
     * Disable preemption while we check the current processor and inserts the DPC.
     */
    KIRQL bOldIrql;
    KeRaiseIrql(DISPATCH_LEVEL, &bOldIrql);
    ASMCompilerBarrier(); /* paranoia */

    if (RTMpCpuId() == idCpu)
    {
        /* Just execute the callback on the current CPU. */
        pfnWorker(idCpu, pvUser1, pvUser2);
        KeLowerIrql(bOldIrql);

        ExFreePool(pArgs);
        return VINF_SUCCESS;
    }

    /* Different CPU, so queue it if the CPU is still online. */
    if (RTMpIsCpuOnline(idCpu))
    {
        BOOLEAN fRc = KeInsertQueueDpc(&pArgs->Dpc, 0, 0);
        Assert(fRc);
        KeLowerIrql(bOldIrql);

        uint64_t const nsRealWaitTS = RTTimeNanoTS();

        /*
         * Wait actively for a while in case the CPU/thread responds quickly.
         */
        uint32_t cLoopsLeft = 0x20000;
        while (cLoopsLeft-- > 0)
        {
            if (pArgs->fDone)
            {
                rtMpNtOnSpecificRelease(pArgs);
                return VINF_SUCCESS;
            }
            ASMNopPause();
        }

        /*
         * It didn't respond, so wait on the event object, poking the CPU if it's slow.
         */
        LARGE_INTEGER Timeout;
        Timeout.QuadPart = -10000; /* 1ms */
        NTSTATUS rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
        if (rcNt == STATUS_SUCCESS)
        {
            rtMpNtOnSpecificRelease(pArgs);
            return VINF_SUCCESS;
        }

        /* If it hasn't respondend yet, maybe poke it and wait some more. */
        if (rcNt == STATUS_TIMEOUT)
        {
#ifndef IPRT_TARGET_NT4
            if (   !pArgs->fExecuting
                && (   g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalSendSoftwareInterrupt
                    || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiW7Plus
                    || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiPreW7))
                RTMpPokeCpu(idCpu);
#endif

            Timeout.QuadPart = -1280000; /* 128ms */
            rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
            if (rcNt == STATUS_SUCCESS)
            {
                rtMpNtOnSpecificRelease(pArgs);
                return VINF_SUCCESS;
            }
        }

        /*
         * Something weird is happening, try bail out.
         */
        if (KeRemoveQueueDpc(&pArgs->Dpc))
        {
            ExFreePool(pArgs); /* DPC was still queued, so we can return without further ado. */
            LogRel(("RTMpOnSpecific(%#x): Not processed after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
        }
        else
        {
            /* DPC is running, wait a good while for it to complete. */
            LogRel(("RTMpOnSpecific(%#x): Still running after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));

            Timeout.QuadPart = -30*1000*1000*10; /* 30 seconds */
            rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
            if (rcNt != STATUS_SUCCESS)
                LogRel(("RTMpOnSpecific(%#x): Giving up on running worker after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
        }
        rc = RTErrConvertFromNtStatus(rcNt);
    }
    else
    {
        /* CPU is offline.*/
        KeLowerIrql(bOldIrql);
        rc = !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE;
    }

    rtMpNtOnSpecificRelease(pArgs);
    return rc;
#endif
}
Exemple #26
0
LARGE_INTEGER
KeQueryPerformanceCounter (
    OUT PLARGE_INTEGER Frequency OPTIONAL
    )

/*++

Routine Description:

    This routine returns the current performance counter value and the
    performance counter frequency.

Arguments:

    Frequency - Supplies an optional pointer to a variable which receives
        the performance counter frequency in Hertz.

Return Value:

    The current performance counter value is returned as the function
    value.

--*/

{

    ULONG CurrentCount;
    KIRQL OldIrql;
    LARGE_INTEGER PerformanceCounter;

    //
    // Raise IRQL to PROFILE_LEVEL, read the current value of the count
    // register, read the performance counter, and lower IRQL to its
    // previous value.
    //
    // N.B. The minimum, maximum, and default values for the profile
    //      count are chosen such that count register only overflows
    //      after about 20 seconds at 50mhz. Therefore, there is never
    //      a problem with the counter wrapping in the following code.
    //

    KeRaiseIrql(PROFILE_LEVEL, &OldIrql);
    CurrentCount = HalpReadCountRegister();
    PerformanceCounter = HalpPerformanceCounter[KeGetCurrentPrcb()->Number];
    KeLowerIrql(OldIrql);

    //
    // If the frequency parameter is specified, then return the performance
    // counter frequency as the current system time frequency.
    //

    if (ARGUMENT_PRESENT(Frequency) != FALSE) {
        Frequency->QuadPart = HalpProfileCountRate;
    }

    //
    // Return the value of the performance counter.
    //

    PerformanceCounter.QuadPart += CurrentCount;
    return PerformanceCounter;
}
Exemple #27
0
BOOLEAN
HalpHandleExternalInterrupt(
    IN PKINTERRUPT Interrupt,
    IN PVOID ServiceContext,
    IN PVOID TrapFrame
    )

/*++

Routine Description:

    This routine is entered as the result of an interrupt being generated
    via the vector that is connected to an interrupt object that describes
    the SIO device interrupts. Its function is to call the second
    level interrupt dispatch routine and acknowledge the interrupt at the SIO
    controller.

    N.B. This routine is entered and left with external interrupts disabled.


Arguments:

    Interrupt - Supplies a pointer to the interrupt object.

    ServiceContext - Supplies a pointer to the SIO interrupt acknowledge
        register.

      None.

Return Value:

    Returns the value returned from the second level routine.

--*/

{
    PSECONDARY_DISPATCH SioHandler;
    PKINTERRUPT SioInterrupt;
    USHORT interruptVector;
    BOOLEAN returnValue;
    UCHAR OldIrql;
    USHORT Isr;
    UCHAR Irql;


    //
    // Read the interrupt vector.
    //

    interruptVector = READ_REGISTER_UCHAR(HalpInterruptBase);

    //
    // check for nmi interrupt before we raise irql since we would raise to a
    // bogus level
    //

    if (interruptVector == 0xFF) {

       HalpHandleMachineCheck(NULL, NULL);
    }

    //
    // check for spurious interrupt
    //

    if (interruptVector == SPURIOUS_VECTOR) {

       WRITE_REGISTER_UCHAR(&((PEISA_CONTROL)HalpIoControlBase)->Interrupt1ControlPort0,
                            0x0B);
       Isr = READ_REGISTER_UCHAR(&((PEISA_CONTROL)HalpIoControlBase)->Interrupt1ControlPort0);
       if (!(Isr & 0x80)) {

       //
       // Spurious interrupt
       //

#if DBG
         //DbgPrint("A spurious interrupt occurred. \n");
         HalpSpuriousInterruptCount++;
#endif
         return(0);

       }
    }

    if (interruptVector > HIGHEST_8259_VECTOR) {
#if DBG
         DbgPrint("A bogus interrupt (0x%02x) occurred. \n", interruptVector);
         HalpBogusInterruptCount++;
#endif
      return (0);
    }

    //
    // Translate vector to IRQL and raise IRQL
    //

    Irql = HalpTranslateVectorToIrql(interruptVector);
    KeRaiseIrql( Irql, &OldIrql);

    //
    // Dispatch to the secondary interrupt service routine.
    //

    SioHandler = (PSECONDARY_DISPATCH)
                    PCR->InterruptRoutine[DEVICE_VECTORS + interruptVector];
    SioInterrupt = CONTAINING_RECORD(SioHandler,
                                      KINTERRUPT,
                                      DispatchCode[0]);

    returnValue = SioHandler(SioInterrupt,
                              SioInterrupt->ServiceContext,
                              TrapFrame
                              );

    //
    // Dismiss the interrupt in the SIO interrupt controllers.
    //

    //
    // If this is a cascaded interrupt then the interrupt must be dismissed in
    // both controllers.
    //

    if (interruptVector & 0x08) {

        WRITE_REGISTER_UCHAR(
            &((PEISA_CONTROL) HalpIoControlBase)->Interrupt2ControlPort0,
            NONSPECIFIC_END_OF_INTERRUPT
            );

    }

    WRITE_REGISTER_UCHAR(
        &((PEISA_CONTROL) HalpIoControlBase)->Interrupt1ControlPort0,
        NONSPECIFIC_END_OF_INTERRUPT
        );

    //
    // Lower IRQL but disable external interrupts.
    // Return to caller with interrupts disabled.
    //


    HalpResetIrqlAfterInterrupt(OldIrql);

    return(returnValue);

}
Exemple #28
0
/*++
 * @name KiDeliverApc
 * @implemented @NT4
 *
 *     The KiDeliverApc routine is called from IRQL switching code if the
 *     thread is returning from an IRQL >= APC_LEVEL and Kernel-Mode APCs are
 *     pending.
 *
 * @param DeliveryMode
 *        Specifies the current processor mode.
 *
 * @param ExceptionFrame
 *        Pointer to the Exception Frame on non-i386 builds.
 *
 * @param TrapFrame
 *        Pointer to the Trap Frame.
 *
 * @return None.
 *
 * @remarks First, Special APCs are delivered, followed by Kernel-Mode APCs and
 *          User-Mode APCs. Note that the TrapFrame is only valid if the
 *          delivery mode is User-Mode.
 *          Upon entry, this routine executes at APC_LEVEL.
 *
 *--*/
VOID
NTAPI
KiDeliverApc(IN KPROCESSOR_MODE DeliveryMode,
             IN PKEXCEPTION_FRAME ExceptionFrame,
             IN PKTRAP_FRAME TrapFrame)
{
    PKTHREAD Thread = KeGetCurrentThread();
    PKPROCESS Process = Thread->ApcState.Process;
    PKTRAP_FRAME OldTrapFrame;
    PLIST_ENTRY ApcListEntry;
    PKAPC Apc;
    KLOCK_QUEUE_HANDLE ApcLock;
    PKKERNEL_ROUTINE KernelRoutine;
    PVOID NormalContext;
    PKNORMAL_ROUTINE NormalRoutine;
    PVOID SystemArgument1;
    PVOID SystemArgument2;
    ASSERT_IRQL_EQUAL(APC_LEVEL);

    /* Save the old trap frame and set current one */
    OldTrapFrame = Thread->TrapFrame;
    Thread->TrapFrame = TrapFrame;

    /* Clear Kernel APC Pending */
    Thread->ApcState.KernelApcPending = FALSE;

    /* Check if Special APCs are disabled */
    if (Thread->SpecialApcDisable) goto Quickie;

    /* Do the Kernel APCs first */
    while (!IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode]))
    {
        /* Lock the APC Queue */
        KiAcquireApcLockAtApcLevel(Thread, &ApcLock);

        /* Check if the list became empty now */
        if (IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode]))
        {
            /* It is, release the lock and break out */
            KiReleaseApcLock(&ApcLock);
            break;
        }

        /* Kernel APC is not pending anymore */
        Thread->ApcState.KernelApcPending = FALSE;

        /* Get the next Entry */
        ApcListEntry = Thread->ApcState.ApcListHead[KernelMode].Flink;
        Apc = CONTAINING_RECORD(ApcListEntry, KAPC, ApcListEntry);

        /* Save Parameters so that it's safe to free the Object in the Kernel Routine*/
        NormalRoutine = Apc->NormalRoutine;
        KernelRoutine = Apc->KernelRoutine;
        NormalContext = Apc->NormalContext;
        SystemArgument1 = Apc->SystemArgument1;
        SystemArgument2 = Apc->SystemArgument2;

        /* Special APC */
        if (!NormalRoutine)
        {
            /* Remove the APC from the list */
            RemoveEntryList(ApcListEntry);
            Apc->Inserted = FALSE;

            /* Release the APC lock */
            KiReleaseApcLock(&ApcLock);

            /* Call the Special APC */
            KernelRoutine(Apc,
                          &NormalRoutine,
                          &NormalContext,
                          &SystemArgument1,
                          &SystemArgument2);

            /* Make sure it returned correctly */
            if (KeGetCurrentIrql() != ApcLock.OldIrql)
            {
                KeBugCheckEx(IRQL_UNEXPECTED_VALUE,
                             (KeGetCurrentIrql() << 16) |
                             (ApcLock.OldIrql << 8),
                             (ULONG_PTR)KernelRoutine,
                             (ULONG_PTR)Apc,
                             (ULONG_PTR)NormalRoutine);
            }
        }
        else
        {
            /* Normal Kernel APC, make sure it's safe to deliver */
            if ((Thread->ApcState.KernelApcInProgress) ||
                (Thread->KernelApcDisable))
            {
                /* Release lock and return */
                KiReleaseApcLock(&ApcLock);
                goto Quickie;
            }

            /* Dequeue the APC */
            RemoveEntryList(ApcListEntry);
            Apc->Inserted = FALSE;

            /* Go back to APC_LEVEL */
            KiReleaseApcLock(&ApcLock);

            /* Call the Kernel APC */
            KernelRoutine(Apc,
                          &NormalRoutine,
                          &NormalContext,
                          &SystemArgument1,
                          &SystemArgument2);

            /* Make sure it returned correctly */
            if (KeGetCurrentIrql() != ApcLock.OldIrql)
            {
                KeBugCheckEx(IRQL_UNEXPECTED_VALUE,
                             (KeGetCurrentIrql() << 16) |
                             (ApcLock.OldIrql << 8),
                             (ULONG_PTR)KernelRoutine,
                             (ULONG_PTR)Apc,
                             (ULONG_PTR)NormalRoutine);
            }

            /* Check if there still is a Normal Routine */
            if (NormalRoutine)
            {
                /* At Passive Level, an APC can be prempted by a Special APC */
                Thread->ApcState.KernelApcInProgress = TRUE;
                KeLowerIrql(PASSIVE_LEVEL);

                /* Call and Raise IRQL back to APC_LEVEL */
                NormalRoutine(NormalContext, SystemArgument1, SystemArgument2);
                KeRaiseIrql(APC_LEVEL, &ApcLock.OldIrql);
            }

            /* Set Kernel APC in progress to false and loop again */
            Thread->ApcState.KernelApcInProgress = FALSE;
        }
    }

    /* Now we do the User APCs */
    if ((DeliveryMode == UserMode) &&
        !(IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])) &&
         (Thread->ApcState.UserApcPending))
    {
        /* Lock the APC Queue */
        KiAcquireApcLockAtApcLevel(Thread, &ApcLock);

        /* It's not pending anymore */
        Thread->ApcState.UserApcPending = FALSE;

        /* Check if the list became empty now */
        if (IsListEmpty(&Thread->ApcState.ApcListHead[UserMode]))
        {
            /* It is, release the lock and break out */
            KiReleaseApcLock(&ApcLock);
            goto Quickie;
        }

        /* Get the actual APC object */
        ApcListEntry = Thread->ApcState.ApcListHead[UserMode].Flink;
        Apc = CONTAINING_RECORD(ApcListEntry, KAPC, ApcListEntry);

        /* Save Parameters so that it's safe to free the Object in the Kernel Routine*/
        NormalRoutine = Apc->NormalRoutine;
        KernelRoutine = Apc->KernelRoutine;
        NormalContext = Apc->NormalContext;
        SystemArgument1 = Apc->SystemArgument1;
        SystemArgument2 = Apc->SystemArgument2;

        /* Remove the APC from Queue, and release the lock */
        RemoveEntryList(ApcListEntry);
        Apc->Inserted = FALSE;
        KiReleaseApcLock(&ApcLock);

        /* Call the kernel routine */
        KernelRoutine(Apc,
                      &NormalRoutine,
                      &NormalContext,
                      &SystemArgument1,
                      &SystemArgument2);

        /* Check if there's no normal routine */
        if (!NormalRoutine)
        {
            /* Check if more User APCs are Pending */
            KeTestAlertThread(UserMode);
        }
        else
        {
            /* Set up the Trap Frame and prepare for Execution in NTDLL.DLL */
            KiInitializeUserApc(ExceptionFrame,
                                TrapFrame,
                                NormalRoutine,
                                NormalContext,
                                SystemArgument1,
                                SystemArgument2);
        }
    }

Quickie:
    /* Make sure we're still in the same process */
    if (Process != Thread->ApcState.Process)
    {
        /* Erm, we got attached or something! BAD! */
        KeBugCheckEx(INVALID_PROCESS_ATTACH_ATTEMPT,
                     (ULONG_PTR)Process,
                     (ULONG_PTR)Thread->ApcState.Process,
                     Thread->ApcStateIndex,
                     KeGetCurrentPrcb()->DpcRoutineActive);
    }

    /* Restore the trap frame */
    Thread->TrapFrame = OldTrapFrame;
}
Exemple #29
0
VOID
NTAPI
DbgDumpGdiHandleTable(void)
{
    static int leak_reported = 0;
    int i, j, idx, nTraces = 0;
    KIRQL OldIrql;

    if (leak_reported)
    {
        DPRINT1("gdi handle abusers already reported!\n");
        return;
    }

    leak_reported = 1;
    DPRINT1("reporting gdi handle abusers:\n");

    /* We've got serious business to do */
    KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);

    /* Step through GDI handle table and find out who our culprit is... */
    for (idx = RESERVE_ENTRIES_COUNT; idx < GDI_HANDLE_COUNT; idx++)
    {
        /* If the handle is free, continue */
        if (!IS_HANDLE_VALID(idx)) continue;

        /* Step through all previous backtraces */
        for (j = 0; j < nTraces; j++)
        {
            /* Check if the backtrace matches */
            if (CompareBacktraces(idx, AllocatorTable[j].idx))
            {
                /* It matches, increment count and break out */
                AllocatorTable[j].count++;
                break;
            }
        }

        /* Did we find a new backtrace? */
        if (j == nTraces)
        {
            /* Break out, if we reached the maximum */
            if (nTraces == MAX_BACKTRACES) break;

            /* Initialize this entry */
            AllocatorTable[j].idx = idx;
            AllocatorTable[j].count = 1;
            nTraces++;
        }
    }

    /* bubble sort time! weeeeee!! */
    for (i = 0; i < nTraces-1; i++)
    {
        if (AllocatorTable[i].count < AllocatorTable[i+1].count)
        {
            struct DbgOpenGDIHandle temp;

            temp = AllocatorTable[i+1];
            AllocatorTable[i+1] = AllocatorTable[i];
            j = i;
            while (j > 0 && AllocatorTable[j-1].count < temp.count)
                j--;
            AllocatorTable[j] = temp;
        }
    }

    /* Print the worst offenders... */
    DbgPrint("Worst GDI Handle leak offenders (out of %i unique locations):\n", nTraces);
    for (i = 0; i < nTraces && AllocatorTable[i].count > 1; i++)
    {
        /* Print out the allocation count */
        DbgPrint(" %i allocs, type = 0x%lx:\n",
                 AllocatorTable[i].count,
                 GdiHandleTable->Entries[AllocatorTable[i].idx].Type);

        /* Dump the frames */
        KeRosDumpStackFrames(GDIHandleAllocator[AllocatorTable[i].idx], GDI_STACK_LEVELS);
        //KeRosDumpStackFrames(GDIHandleShareLocker[AllocatorTable[i].idx], GDI_STACK_LEVELS);

        /* Print new line for better readability */
        DbgPrint("\n");
    }

    if (i < nTraces)
        DbgPrint("(list terminated - the remaining entries have 1 allocation only)\n");

    KeLowerIrql(OldIrql);

    ASSERT(FALSE);
}
Exemple #30
0
/*++////////////////////////////////////////////////////////////////////////////

ClassIoCompleteAssociated()

Routine Description:

    This routine executes when the port driver has completed a request.
    It looks at the SRB status in the completing SRB and if not success
    it checks for valid request sense buffer information. If valid, the
    info is used to update status with more precise message of type of
    error. This routine deallocates the SRB.  This routine is used for
    requests which were build by split request.  After it has processed
    the request it decrements the Irp count in the master Irp.  If the
    count goes to zero then the master Irp is completed.

Arguments:

    Fdo - Supplies the functional device object which represents the target.

    Irp - Supplies the Irp which has completed.

    Context - Supplies a pointer to the SRB.

Return Value:

    NT status

--*/
NTSTATUS
NTAPI
ClassIoCompleteAssociated(
    IN PDEVICE_OBJECT Fdo,
    IN PIRP Irp,
    IN PVOID Context
    )
{
    PFUNCTIONAL_DEVICE_EXTENSION fdoExtension = Fdo->DeviceExtension;

    PIO_STACK_LOCATION irpStack = IoGetCurrentIrpStackLocation(Irp);
    PSCSI_REQUEST_BLOCK srb = Context;

    PIRP originalIrp = Irp->AssociatedIrp.MasterIrp;
    LONG irpCount;

    NTSTATUS status;
    BOOLEAN retry;

    DBGWARN(("ClassIoCompleteAssociated is OBSOLETE !"));

    //
    // Check SRB status for success of completing request.
    //

    if (SRB_STATUS(srb->SrbStatus) != SRB_STATUS_SUCCESS) {

        ULONG retryInterval;

        DebugPrint((2,"ClassIoCompleteAssociated: IRP %p, SRB %p", Irp, srb));

        //
        // Release the queue if it is frozen.
        //

        if (srb->SrbStatus & SRB_STATUS_QUEUE_FROZEN) {
            ClassReleaseQueue(Fdo);
        }

        retry = ClassInterpretSenseInfo(
                    Fdo,
                    srb,
                    irpStack->MajorFunction,
                    irpStack->MajorFunction == IRP_MJ_DEVICE_CONTROL ?
                        irpStack->Parameters.DeviceIoControl.IoControlCode :
                        0,
                    MAXIMUM_RETRIES -
                        ((ULONG)(ULONG_PTR)irpStack->Parameters.Others.Argument4),
                    &status,
                    &retryInterval);

        //
        // If the status is verified required and the this request
        // should bypass verify required then retry the request.
        //

        if (irpStack->Flags & SL_OVERRIDE_VERIFY_VOLUME &&
            status == STATUS_VERIFY_REQUIRED) {

            status = STATUS_IO_DEVICE_ERROR;
            retry = TRUE;
        }

        if (retry && ((*(PCHAR*)&irpStack->Parameters.Others.Argument4)--)) {

            //
            // Retry request. If the class driver has supplied a StartIo,
            // call it directly for retries.
            //

            DebugPrint((1, "Retry request %p\n", Irp));

            if (PORT_ALLOCATED_SENSE(fdoExtension, srb)) {
                FREE_PORT_ALLOCATED_SENSE_BUFFER(fdoExtension, srb);
            }

            RetryRequest(Fdo, Irp, srb, TRUE, retryInterval);

            return STATUS_MORE_PROCESSING_REQUIRED;
        }

    } else {

        //
        // Set status for successful request.
        //

        status = STATUS_SUCCESS;

    } // end if (SRB_STATUS(srb->SrbStatus) ...

    //
    // Return SRB to list.
    //

    if (PORT_ALLOCATED_SENSE(fdoExtension, srb)) {
        FREE_PORT_ALLOCATED_SENSE_BUFFER(fdoExtension, srb);
    }

    ClassFreeOrReuseSrb(fdoExtension, srb);

    //
    // Set status in completing IRP.
    //

    Irp->IoStatus.Status = status;

    DebugPrint((2, "ClassIoCompleteAssociated: Partial xfer IRP %p\n", Irp));

    //
    // Get next stack location. This original request is unused
    // except to keep track of the completing partial IRPs so the
    // stack location is valid.
    //

    irpStack = IoGetNextIrpStackLocation(originalIrp);

    //
    // Update status only if error so that if any partial transfer
    // completes with error, then the original IRP will return with
    // error. If any of the asynchronous partial transfer IRPs fail,
    // with an error then the original IRP will return 0 bytes transfered.
    // This is an optimization for successful transfers.
    //

    if (!NT_SUCCESS(status)) {

        originalIrp->IoStatus.Status = status;
        originalIrp->IoStatus.Information = 0;

        //
        // Set the hard error if necessary.
        //

        if (IoIsErrorUserInduced(status)) {

            //
            // Store DeviceObject for filesystem.
            //

            IoSetHardErrorOrVerifyDevice(originalIrp, Fdo);
        }
    }

    //
    // Decrement and get the count of remaining IRPs.
    //

    irpCount = InterlockedDecrement(
                    (PLONG)&irpStack->Parameters.Others.Argument1);

    DebugPrint((2, "ClassIoCompleteAssociated: Partial IRPs left %d\n",
                irpCount));

    //
    // Ensure that the irpCount doesn't go negative.  This was happening once
    // because classpnp would get confused if it ran out of resources when
    // splitting the request.
    //

    ASSERT(irpCount >= 0);

    if (irpCount == 0) {

        //
        // All partial IRPs have completed.
        //

        DebugPrint((2,
                 "ClassIoCompleteAssociated: All partial IRPs complete %p\n",
                 originalIrp));

        if (fdoExtension->CommonExtension.DriverExtension->InitData.ClassStartIo) {

            //
            // Acquire a separate copy of the remove lock so the debugging code
            // works okay and we don't have to hold up the completion of this
            // irp until after we start the next packet(s).
            //

            KIRQL oldIrql;
            UCHAR uniqueAddress;
            ClassAcquireRemoveLock(Fdo, (PIRP)&uniqueAddress);
            ClassReleaseRemoveLock(Fdo, originalIrp);
            ClassCompleteRequest(Fdo, originalIrp, IO_DISK_INCREMENT);

            KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);
            IoStartNextPacket(Fdo, FALSE);
            KeLowerIrql(oldIrql);

            ClassReleaseRemoveLock(Fdo, (PIRP)&uniqueAddress);

        } else {

            //
            // just complete this request
            //

            ClassReleaseRemoveLock(Fdo, originalIrp);
            ClassCompleteRequest(Fdo, originalIrp, IO_DISK_INCREMENT);

        }

    }

    //
    // Deallocate IRP and indicate the I/O system should not attempt any more
    // processing.
    //

    IoFreeIrp(Irp);
    return STATUS_MORE_PROCESSING_REQUIRED;

} // end ClassIoCompleteAssociated()