Пример #1
0
VOID
FASTCALL
MiDecrementShareCount (
    IN PMMPFN Pfn1,
    IN PFN_NUMBER PageFrameIndex
    )

/*++

Routine Description:

    This routine decrements the share count within the PFN element
    for the specified physical page.  If the share count becomes
    zero the corresponding PTE is converted to the transition state
    and the reference count is decremented and the ValidPte count
    of the PTEframe is decremented.

Arguments:

    Pfn1 - Supplies the PFN database entry to decrement.

    PageFrameIndex - Supplies the physical page number of which to decrement
                     the share count.

Return Value:

    None.

Environment:

    Must be holding the PFN database lock with APCs disabled.

--*/

{
    ULONG FreeBit;
    MMPTE TempPte;
    PMMPTE PointerPte;
    PEPROCESS Process;

    ASSERT (PageFrameIndex > 0);
    ASSERT (MI_IS_PFN (PageFrameIndex));
    ASSERT (Pfn1 == MI_PFN_ELEMENT (PageFrameIndex));

    if (Pfn1->u3.e1.PageLocation != ActiveAndValid &&
        Pfn1->u3.e1.PageLocation != StandbyPageList) {
            KeBugCheckEx (PFN_LIST_CORRUPT,
                      0x99,
                      PageFrameIndex,
                      Pfn1->u3.e1.PageLocation,
                      0);
    }

    Pfn1->u2.ShareCount -= 1;

    ASSERT (Pfn1->u2.ShareCount < 0xF000000);

    if (Pfn1->u2.ShareCount == 0) {

        if (PERFINFO_IS_GROUP_ON(PERF_MEMORY)) {
            PERFINFO_PFN_INFORMATION PerfInfoPfn;

            PerfInfoPfn.PageFrameIndex = PageFrameIndex;
            PerfInfoLogBytes(PERFINFO_LOG_TYPE_ZEROSHARECOUNT, 
                             &PerfInfoPfn, 
                             sizeof(PerfInfoPfn));
        }

        //
        // The share count is now zero, decrement the reference count
        // for the PFN element and turn the referenced PTE into
        // the transition state if it refers to a prototype PTE.
        // PTEs which are not prototype PTEs do not need to be placed
        // into transition as they are placed in transition when
        // they are removed from the working set (working set free routine).
        //

        //
        // If the PTE referenced by this PFN element is actually
        // a prototype PTE, it must be mapped into hyperspace and
        // then operated on.
        //

        if (Pfn1->u3.e1.PrototypePte == 1) {

            if (MiIsProtoAddressValid (Pfn1->PteAddress)) {
                Process = NULL;
                PointerPte = Pfn1->PteAddress;
            }
            else {

                //
                // The address is not valid in this process, map it into
                // hyperspace so it can be operated upon.
                //

                Process = PsGetCurrentProcess ();
                PointerPte = (PMMPTE) MiMapPageInHyperSpaceAtDpc(Process, Pfn1->u4.PteFrame);
                PointerPte = (PMMPTE)((PCHAR)PointerPte +
                                        MiGetByteOffset(Pfn1->PteAddress));
            }

            TempPte = *PointerPte;

            MI_MAKE_VALID_PTE_TRANSITION (TempPte,
                                          Pfn1->OriginalPte.u.Soft.Protection);
            MI_WRITE_INVALID_PTE (PointerPte, TempPte);

            if (Process != NULL) {
                MiUnmapPageInHyperSpaceFromDpc (Process, PointerPte);
            }

            //
            // There is no need to flush the translation buffer at this
            // time as we only invalidated a prototype PTE.
            //
        }

        //
        // Change the page location to inactive (from active and valid).
        //

        Pfn1->u3.e1.PageLocation = TransitionPage;

        //
        // Decrement the reference count as the share count is now zero.
        //

        MM_PFN_LOCK_ASSERT();

        ASSERT (Pfn1->u3.e2.ReferenceCount != 0);

        if (Pfn1->u3.e2.ReferenceCount == 1) {

            if (MI_IS_PFN_DELETED (Pfn1)) {

                Pfn1->u3.e2.ReferenceCount = 0;

                //
                // There is no referenced PTE for this page, delete the page
                // file space (if any), and place the page on the free list.
                //

                ASSERT (Pfn1->OriginalPte.u.Soft.Prototype == 0);

                FreeBit = GET_PAGING_FILE_OFFSET (Pfn1->OriginalPte);

                if ((FreeBit != 0) && (FreeBit != MI_PTE_LOOKUP_NEEDED)) {
                    MiReleaseConfirmedPageFileSpace (Pfn1->OriginalPte);
                }

                //
                // Temporarily mark the frame as active and valid so that
                // MiIdentifyPfn knows it is safe to walk back through the
                // containing frames for a more accurate identification.
                // Note the page will be immediately re-marked as it is
                // inserted into the freelist.
                //

                Pfn1->u3.e1.PageLocation = ActiveAndValid;

                MiInsertPageInFreeList (PageFrameIndex);
            }
            else {
                MiDecrementReferenceCount (Pfn1, PageFrameIndex);
            }
        }
        else {
            InterlockedDecrementPfn ((PSHORT)&Pfn1->u3.e2.ReferenceCount);
        }
    }

    return;
}
Пример #2
0
VOID
KiCalibrateTimeAdjustment (
    PADJUST_INTERRUPT_TIME_CONTEXT Adjust
    )

/*++

Routine Description:

    This function calibrates the adjustment of time on all processors.

Arguments:

    Adjust - Supplies the operation context.

Return Value:

    None.

--*/

{

    ULONG cl;
    ULONG divisor;
    BOOLEAN Enable;
    LARGE_INTEGER InterruptTime;
    ULARGE_INTEGER li;
    PERFINFO_PO_CALIBRATED_PERFCOUNTER LogEntry;
    LARGE_INTEGER NewTickCount;
    ULONG NewTickOffset;
    LARGE_INTEGER PerfCount;
    LARGE_INTEGER PerfFreq;
    LARGE_INTEGER SetTime;

    //
    // As each processor arrives, decrement the remaining processor count. If
    // this is the last processor to arrive, then compute the time change, and
    // signal all processor when to apply the performance counter change.
    //

    if (InterlockedDecrement((PLONG)&Adjust->KiNumber)) {
        Enable = KeDisableInterrupts();

        //
        // It is possible to deadlock if one or more of the other processors
        // receives and processes a freeze request while this processor has
        // interrupts disabled. Poll for a freeze request until all processors
        // are known to be in this code.
        //

        do {
            KiPollFreezeExecution();
        } while (Adjust->KiNumber != (ULONG)-1);

        //
        // Wait to perform the time set.
        //

        while (Adjust->Barrier);

    } else {

        //
        // Set timer expiration dpc to scan the timer queues once for any
        // expired timers.
        //

        KeRemoveQueueDpc(&KiTimerExpireDpc);
        KeInsertQueueDpc(&KiTimerExpireDpc,
                         ULongToPtr(KiQueryLowTickCount() - TIMER_TABLE_SIZE),
                         NULL);

        //
        // Disable interrupts and indicate that this processor is now
        // in final portion of this code.
        //

        Enable = KeDisableInterrupts();
        InterlockedDecrement((PLONG) &Adjust->KiNumber);

        //
        // Adjust Interrupt Time.
        //

        InterruptTime.QuadPart = KeQueryInterruptTime() + Adjust->Adjustment;
        SetTime.QuadPart = Adjust->Adjustment;

        //
        // Get the current times
        //

        PerfCount = KeQueryPerformanceCounter(&PerfFreq);

        //
        // Compute performance counter for current time.
        //
        // Multiply SetTime * PerfCount and obtain 96-bit result in cl,
        // li.LowPart, li.HighPart.  Then divide the 96-bit result by
        // 10,000,000 to get new performance counter value.
        //

        li.QuadPart = RtlEnlargedUnsignedMultiply((ULONG)SetTime.LowPart,
                                                  (ULONG)PerfFreq.LowPart).QuadPart;

        cl = li.LowPart;
        li.QuadPart =
            li.HighPart + RtlEnlargedUnsignedMultiply((ULONG)SetTime.LowPart,
                                                      (ULONG)PerfFreq.HighPart).QuadPart;

        li.QuadPart =
            li.QuadPart + RtlEnlargedUnsignedMultiply((ULONG)SetTime.HighPart,
                                                      (ULONG)PerfFreq.LowPart).QuadPart;

        li.HighPart = li.HighPart + SetTime.HighPart * PerfFreq.HighPart;
        divisor = 10000000;
        Adjust->NewCount.HighPart = RtlEnlargedUnsignedDivide(li,
                                                              divisor,
                                                              &li.HighPart);

        li.LowPart = cl;
        Adjust->NewCount.LowPart = RtlEnlargedUnsignedDivide(li,
                                                             divisor,
                                                             NULL);

        Adjust->NewCount.QuadPart += PerfCount.QuadPart;

        //
        // Compute tick count and tick offset for current interrupt time.
        //

        NewTickCount = RtlExtendedLargeIntegerDivide(InterruptTime,
                                                     KeMaximumIncrement,
                                                     &NewTickOffset);

        //
        // Apply changes to interrupt time, tick count, tick offset, and the
        // performance counter.
        //

        KiTickOffset = KeMaximumIncrement - NewTickOffset;
        KeInterruptTimeBias += Adjust->Adjustment;
        SharedUserData->TickCount.High2Time = NewTickCount.HighPart;

#if defined(_WIN64)

        SharedUserData->TickCountQuad = NewTickCount.QuadPart;

#else

        SharedUserData->TickCount.LowPart   = NewTickCount.LowPart;
        SharedUserData->TickCount.High1Time = NewTickCount.HighPart;

#endif

        //
        // N.B. There is no global tick count variable on AMD64.
        //

#if defined(_X86_)

        KeTickCount.High2Time = NewTickCount.HighPart;
        KeTickCount.LowPart   = NewTickCount.LowPart;
        KeTickCount.High1Time = NewTickCount.HighPart;

#endif

#if defined(_AMD64_)

        SharedUserData->InterruptTime.High2Time = InterruptTime.HighPart;
        *((volatile ULONG64 *)&SharedUserData->InterruptTime) = InterruptTime.QuadPart;

#else

        SharedUserData->InterruptTime.High2Time = InterruptTime.HighPart;
        SharedUserData->InterruptTime.LowPart   = InterruptTime.LowPart;
        SharedUserData->InterruptTime.High1Time = InterruptTime.HighPart;

#endif

        //
        // Apply the performance counter change.
        //

        Adjust->Barrier = 0;
    }

    KeGetCurrentPrcb()->TickOffset = KiTickOffset;

#if defined(_AMD64_)

    KeGetCurrentPrcb()->MasterOffset = KiTickOffset;

#endif

    HalCalibratePerformanceCounter((LONG volatile *)&Adjust->HalNumber,
                                   (ULONGLONG)Adjust->NewCount.QuadPart);

    //
    // Log an event that the performance counter has been calibrated
    // properly and indicate the new performance counter value.
    //

    if (PERFINFO_IS_GROUP_ON(PERF_POWER)) {
        LogEntry.PerformanceCounter = KeQueryPerformanceCounter(NULL);
        PerfInfoLogBytes(PERFINFO_LOG_TYPE_PO_CALIBRATED_PERFCOUNTER,
                         &LogEntry,
                         sizeof(LogEntry));
    }

    KeEnableInterrupts(Enable);
    return;
}
Пример #3
0
VOID
FASTCALL
PerfProfileInterrupt(
    IN KPROFILE_SOURCE Source,
    IN PVOID InstructionPointer
    )
/*++

Routine description:

    Implements instruction profiling.  If the source is not the one we're sampling on,
    we return.  If caching is off, we write any samples coming from the immediately to
    the log.  If caching is on, wrap the cache update with writes to the two versions so
    that the flush routine can know if it has a valid buffer.

Arguments:
    
    Source - Type of profile interrupt

    InstructionPointer - IP at the time of the interrupt

Return Value:
    None

--*/
{
    ULONG i;
    PERFINFO_SAMPLED_PROFILE_INFORMATION SampleData;
#ifdef _X86_
    ULONG_PTR TwiddledIP;
#endif // _X86_
    ULONG ThreadId;

    if (!PERFINFO_IS_GROUP_ON(PERF_PROFILE) &&
        (Source != PerfInfoProfileSourceActive)
        ) {
        //
        // We don't handle multple sources.
        //
        return;
    }

    ThreadId = HandleToUlong(PsGetCurrentThread()->Cid.UniqueThread);

    if (!PerfInfoSampledProfileCaching ||
        PerfInfoSampledProfileFlushInProgress != 0) {
        //
        // No caching. Log and return.
        //
        SampleData.ThreadId = ThreadId;
        SampleData.InstructionPointer = InstructionPointer;
        SampleData.Count = 1;

        PerfInfoLogBytes(PERFINFO_LOG_TYPE_SAMPLED_PROFILE,
                            &SampleData,
                            sizeof(PERFINFO_SAMPLED_PROFILE_INFORMATION)
                            );
        return;
    }

#ifdef _X86_
    //
    // Clear the low two bits to have more cache hits for loops.  Don't waste
    // cycles on other architectures.
    //
    TwiddledIP = (ULONG_PTR)InstructionPointer & ~3;
#endif // _X86_

    //
    // Initial walk thru Instruction Pointer Cache.  Bump Count if address is in cache.
    //
    for (i = 0; i < PerfProfileCache.Entries ; i++) {

        if ((PerfProfileCache.Sample[i].ThreadId == ThreadId) &&
#ifdef _X86_
            (((ULONG_PTR)PerfProfileCache.Sample[i].InstructionPointer & ~3) == TwiddledIP)
#else
            (PerfProfileCache.Sample[i].InstructionPointer == InstructionPointer)
#endif // _X86_
            ) {
            //
            // If we find the instruction pointer in the cache, bump the count
            //

            PerfProfileCache.Sample[i].Count++;
            return;
        }
    }
    if (PerfProfileCache.Entries < PERFINFO_SAMPLED_PROFILE_CACHE_MAX) {
        //
        // If we find an empty spot in the cache, use it for this instruction pointer
        //

        PerfProfileCache.Sample[i].ThreadId = ThreadId;
        PerfProfileCache.Sample[i].InstructionPointer = InstructionPointer;
        PerfProfileCache.Sample[i].Count = 1;
        PerfProfileCache.Entries++;
        return;
    }

    //
    // Flush the cache
    //
    PerfInfoLogBytes(PERFINFO_LOG_TYPE_SAMPLED_PROFILE_CACHE,
                    &PerfProfileCache,
                    sizeof(PERFINFO_SAMPLED_PROFILE_CACHE)
                    );

    PerfProfileCache.Sample[0].ThreadId = ThreadId;
    PerfProfileCache.Sample[0].InstructionPointer = InstructionPointer;
    PerfProfileCache.Sample[0].Count = 1;
    PerfProfileCache.Entries = 1;
    return;
}