ULONG ParaNdis_GetIndexFromAffinity(KAFFINITY affinity)
{
    DWORD index = 0;
    BOOLEAN result;
#ifdef _WIN64
    result = BitScanForward64(&index, affinity);
#else
    result = BitScanForward(&index, affinity);
#endif
    if (result && ((KAFFINITY)1 << index) == affinity)
    {
        return index;
    }
    return INVALID_PROCESSOR_INDEX;
}
Esempio n. 2
0
// Reads and stores all MTRRs to set a correct memory type for EPT
_Use_decl_annotations_ void EptInitializeMtrrEntries() {
	PAGED_CODE();

	int index = 0;
	MtrrData *mtrr_entries = g_eptp_mtrr_entries;

	// Get and store the default memory type
	Ia32MtrrDefaultTypeMsr default_type = { UtilReadMsr64(Msr::kIa32MtrrDefType) };
	g_eptp_mtrr_default_type = default_type.fields.default_mtemory_type;

	// Read MTRR capability
	Ia32MtrrCapabilitiesMsr mtrr_capabilities = {
		UtilReadMsr64(Msr::kIa32MtrrCap) };
	HYPERPLATFORM_LOG_DEBUG(
		"MTRR Default=%lld, VariableCount=%lld, FixedSupported=%lld, FixedEnabled=%lld",
		default_type.fields.default_mtemory_type,
		mtrr_capabilities.fields.variable_range_count,
		mtrr_capabilities.fields.fixed_range_supported,
		default_type.fields.fixed_mtrrs_enabled);

	// Read fixed range MTRRs if supported
	if (mtrr_capabilities.fields.fixed_range_supported &&
		default_type.fields.fixed_mtrrs_enabled) {
		static const auto k64kBase = 0x0;
		static const auto k64kManagedSize = 0x10000;
		static const auto k16kBase = 0x80000;
		static const auto k16kManagedSize = 0x4000;
		static const auto k4kBase = 0xC0000;
		static const auto k4kManagedSize = 0x1000;

		// The kIa32MtrrFix64k00000 manages 8 ranges of memory. The first range
		// starts at 0x0, and each range manages a 64k (0x10000) range. For example,
		//  entry[0]:     0x0 : 0x10000 - 1
		//  entry[1]: 0x10000 : 0x20000 - 1
		//  ...
		//  entry[7]: 0x70000 : 0x80000 - 1
		ULONG64 offset = 0;
		Ia32MtrrFixedRangeMsr fixed_range = {
			UtilReadMsr64(Msr::kIa32MtrrFix64k00000) };
		for (auto memory_type : fixed_range.fields.types) {
			// Each entry manages 64k (0x10000) length.
			ULONG64 base = k64kBase + offset;
			offset += k64kManagedSize;

			// Saves the MTRR
			mtrr_entries[index].enabled = true;
			mtrr_entries[index].fixedMtrr = true;
			mtrr_entries[index].type = memory_type;
			mtrr_entries[index].range_base = base;
			mtrr_entries[index].range_end = base + k64kManagedSize - 1;
			index++;
		}
		NT_ASSERT(k64kBase + offset == k16kBase);

		// kIa32MtrrFix16k80000 manages 8 ranges of memory. The first range starts
		// at 0x80000, and each range manages a 16k (0x4000) range. For example,
		//  entry[0]: 0x80000 : 0x84000 - 1
		//  entry[1]: 0x88000 : 0x8C000 - 1
		//  ...
		//  entry[7]: 0x9C000 : 0xA0000 - 1
		// Also, subsequent memory ranges are managed by other MSR,
		// kIa32MtrrFix16kA0000, which manages 8 ranges of memory starting at
		// 0xA0000 in the same fashion. For example,
		//  entry[0]: 0xA0000 : 0xA4000 - 1
		//  entry[1]: 0xA8000 : 0xAC000 - 1
		//  ...
		//  entry[7]: 0xBC000 : 0xC0000 - 1
		offset = 0;
		for (auto msr = static_cast<ULONG>(Msr::kIa32MtrrFix16k80000);
			msr <= static_cast<ULONG>(Msr::kIa32MtrrFix16kA0000); msr++) {
			fixed_range.all = UtilReadMsr64(static_cast<Msr>(msr));
			for (auto memory_type : fixed_range.fields.types) {
				// Each entry manages 16k (0x4000) length.
				ULONG64 base = k16kBase + offset;
				offset += k16kManagedSize;

				// Saves the MTRR
				mtrr_entries[index].enabled = true;
				mtrr_entries[index].fixedMtrr = true;
				mtrr_entries[index].type = memory_type;
				mtrr_entries[index].range_base = base;
				mtrr_entries[index].range_end = base + k16kManagedSize - 1;
				index++;
			}
		}
		NT_ASSERT(k16kBase + offset == k4kBase);

		// kIa32MtrrFix4kC0000 manages 8 ranges of memory. The first range starts
		// at 0xC0000, and each range manages a 4k (0x1000) range. For example,
		//  entry[0]: 0xC0000 : 0xC1000 - 1
		//  entry[1]: 0xC1000 : 0xC2000 - 1
		//  ...
		//  entry[7]: 0xC7000 : 0xC8000 - 1
		// Also, subsequent memory ranges are managed by other MSRs such as
		// kIa32MtrrFix4kC8000, kIa32MtrrFix4kD0000, and kIa32MtrrFix4kF8000. Each
		// MSR manages 8 ranges of memory in the same fashion up to 0x100000.
		offset = 0;
		for (auto msr = static_cast<ULONG>(Msr::kIa32MtrrFix4kC0000);
			msr <= static_cast<ULONG>(Msr::kIa32MtrrFix4kF8000); msr++) {
			fixed_range.all = UtilReadMsr64(static_cast<Msr>(msr));
			for (auto memory_type : fixed_range.fields.types) {
				// Each entry manages 4k (0x1000) length.
				ULONG64 base = k4kBase + offset;
				offset += k4kManagedSize;

				// Saves the MTRR
				mtrr_entries[index].enabled = true;
				mtrr_entries[index].fixedMtrr = true;
				mtrr_entries[index].type = memory_type;
				mtrr_entries[index].range_base = base;
				mtrr_entries[index].range_end = base + k4kManagedSize - 1;
				index++;
			}
		}
		NT_ASSERT(k4kBase + offset == 0x100000);
	}

	// Read all variable range MTRRs
	for (auto i = 0; i < mtrr_capabilities.fields.variable_range_count; i++) {
		// Read MTRR mask and check if it is in use
		const auto phy_mask = static_cast<ULONG>(Msr::kIa32MtrrPhysMaskN) + i * 2;
		Ia32MtrrPhysMaskMsr mtrr_mask = { UtilReadMsr64(static_cast<Msr>(phy_mask)) };
		if (!mtrr_mask.fields.valid) {
			continue;
		}

		// Get a length this MTRR manages
		ULONG length;
		BitScanForward64(&length, mtrr_mask.fields.phys_mask * PAGE_SIZE);

		// Read MTRR base and calculate a range this MTRR manages
		const auto phy_base = static_cast<ULONG>(Msr::kIa32MtrrPhysBaseN) + i * 2;
		Ia32MtrrPhysBaseMsr mtrr_base = { UtilReadMsr64(static_cast<Msr>(phy_base)) };
		ULONG64 base = mtrr_base.fields.phys_base * PAGE_SIZE;
		ULONG64 end = base + (1ull << length) - 1;

		// Save it
		mtrr_entries[index].enabled = true;
		mtrr_entries[index].fixedMtrr = false;
		mtrr_entries[index].type = mtrr_base.fields.type;
		mtrr_entries[index].range_base = base;
		mtrr_entries[index].range_end = end;
		index++;
	}
}
Esempio n. 3
0
DECLSPEC_NOINLINE
PKAFFINITY
KiIpiSendRequest (
    IN KAFFINITY TargetSet,
    IN ULONG64 Parameter,
    IN ULONG64 Count,
    IN ULONG64 RequestType
    )

/*++

Routine Description:

    This routine executes the specified immediate request on the specified
    set of processors.

    N.B. This function MUST be called from a non-context switchable state.

Arguments:

   TargetProcessors - Supplies the set of processors on which the specfied
       operation is to be executed.

   Parameter - Supplies the parameter data that will be packed into the
       request summary.

   Count - Supplies the count data that will be packed into the request summary.

   RequestType - Supplies the type of immediate request.

Return Value:

    The address of the appropriate request barrier is returned as the function
    value.

--*/

{

#if !defined(NT_UP)

    PKAFFINITY Barrier;
    PKPRCB Destination;
    ULONG Number;
    KAFFINITY PacketTargetSet;
    PKPRCB Prcb;
    ULONG Processor;
    PREQUEST_MAILBOX RequestMailbox;
    KAFFINITY SetMember;
    PVOID *Start;
    KAFFINITY SummarySet;
    KAFFINITY TargetMember;
    REQUEST_SUMMARY Template;
    PVOID *Virtual;

    ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);

    //
    // Initialize request template.
    //

    Prcb = KeGetCurrentPrcb();
    Template.Summary = 0;
    Template.IpiRequest = RequestType;
    Template.Count = Count;
    Template.Parameter = Parameter;

    //
    // If the target set contains one and only one processor, then use the
    // target set for signal done synchronization. Otherwise, use packet
    // barrier for signal done synchronization.
    //

    Prcb->TargetSet = TargetSet;
    if ((TargetSet & (TargetSet - 1)) == 0) {
        Template.IpiSynchType = TRUE;
        Barrier = (PKAFFINITY)&Prcb->TargetSet;

    } else {
        Prcb->PacketBarrier = 1;
        Barrier = (PKAFFINITY)&Prcb->PacketBarrier;
    }

    //
    // Loop through the target set of processors and set the request summary.
    // If a target processor is already processing a request, then remove
    // that processor from the target set of processor that will be sent an
    // interprocessor interrupt.
    //
    // N.B. It is guaranteed that there is at least one bit set in the target
    //      set.
    //

    Number = Prcb->Number;
    SetMember = Prcb->SetMember;
    SummarySet = TargetSet;
    PacketTargetSet = TargetSet;
    BitScanForward64(&Processor, SummarySet);
    do {
        Destination = KiProcessorBlock[Processor];
        PrefetchForWrite(&Destination->SenderSummary);
        RequestMailbox = &Destination->RequestMailbox[Number];
        PrefetchForWrite(RequestMailbox);
        TargetMember = AFFINITY_MASK(Processor);

        //
        // Make sure that processing of the last IPI is complete before sending
        // another IPI to the same processor.
        // 

        while ((Destination->SenderSummary & SetMember) != 0) {
            KeYieldProcessor();
        }

        //
        // If the request type is flush multiple and the flush entries will
        // fit in the mailbox, then copy the virtual address array to the
        // destination mailbox and change the request type to flush immediate.
        //
        // If the request type is packet ready, then copy the packet to the
        // destination mailbox.
        //

        if (RequestType == IPI_FLUSH_MULTIPLE) {
            Virtual = &RequestMailbox->Virtual[0];
            Start = (PVOID *)Parameter;
            switch (Count) {

                //
                // Copy of up to seven virtual addresses and a conversion of
                // the request type to flush multiple immediate.
                //

            case 7:
                Virtual[6] = Start[6];
            case 6:
                Virtual[5] = Start[5];
            case 5:
                Virtual[4] = Start[4];
            case 4:
                Virtual[3] = Start[3];
            case 3:
                Virtual[2] = Start[2];
            case 2:
                Virtual[1] = Start[1];
            case 1:
                Virtual[0] = Start[0];
                Template.IpiRequest = IPI_FLUSH_MULTIPLE_IMMEDIATE;
                break;
            }

        } else if (RequestType == IPI_PACKET_READY) {
            RequestMailbox->RequestPacket = *(PKREQUEST_PACKET)Parameter;
        }

        RequestMailbox->RequestSummary = Template.Summary;
        if (InterlockedExchangeAdd64((LONG64 volatile *)&Destination->SenderSummary,
                                     SetMember) != 0) {

            TargetSet ^= TargetMember;
        }

        SummarySet ^= TargetMember;
    } while (BitScanForward64(&Processor, SummarySet) != FALSE);

    //
    // Request interprocessor interrupts on the remaining target set of
    // processors.
    //
    //
    // N.B. For packet sends, there exists a potential deadlock situation
    //      unless an IPI is sent to the original set of target processors.
    //      The deadlock arises from the fact that the targets will spin in
    //      their IPI routines.
    //

    if (RequestType == IPI_PACKET_READY) {
        TargetSet = PacketTargetSet;
    }

    if (TargetSet != 0) {
        HalRequestIpi(TargetSet);
    }

    return Barrier;

#else

    UNREFERENCED_PARAMETER(TargetSet);
    UNREFERENCED_PARAMETER(Parameter);
    UNREFERENCED_PARAMETER(Count);
    UNREFERENCED_PARAMETER(RequestType);

    return NULL;

#endif

}
Esempio n. 4
0
DECLSPEC_NOINLINE
VOID
KiIpiProcessRequests (
    VOID
    )

/*++

Routine Description:

    This routine processes interprocessor requests and returns a summary
    of the requests that were processed.

Arguments:

    None.

Return Value:

    None.

--*/

{

#if !defined(NT_UP)

    PVOID *End;
    ULONG64 Number;
    PKPRCB Packet;
    PKPRCB Prcb;
    ULONG Processor;
    REQUEST_SUMMARY Request;
    PREQUEST_MAILBOX RequestMailbox;
    PKREQUEST_PACKET RequestPacket;
    LONG64 SetMember;
    PKPRCB Source;
    KAFFINITY SummarySet;
    KAFFINITY TargetSet;
    PVOID *Virtual;

    //
    // Loop until the sender summary is zero.
    //

    Prcb = KeGetCurrentPrcb();
    TargetSet = ReadForWriteAccess(&Prcb->SenderSummary);
    SetMember = Prcb->SetMember;
    while (TargetSet != 0) {
        SummarySet = TargetSet;
        BitScanForward64(&Processor, SummarySet);
        do {
            Source = KiProcessorBlock[Processor];
            RequestMailbox = &Prcb->RequestMailbox[Processor];
            Request.Summary = RequestMailbox->RequestSummary;

            //
            // If the request type is flush multiple immediate, flush process,
            // flush single, or flush all, then packet done can be signaled
            // before processing the request. Otherwise, the request type must
            // be a packet request, a cache invalidate, or a flush multiple
            //

            if (Request.IpiRequest <= IPI_FLUSH_ALL) {

                //
                // If the synchronization type is target set, then the IPI was
                // only between two processors and target set should be used
                // for synchronization. Otherwise, packet barrier is used for
                // synchronization.
                //
    
                if (Request.IpiSynchType == 0) {
                    if (SetMember == InterlockedXor64((PLONG64)&Source->TargetSet,
                                                      SetMember)) {
    
                        Source->PacketBarrier = 0;
                    }
    
                } else {
                    Source->TargetSet = 0;
                }

                if (Request.IpiRequest == IPI_FLUSH_MULTIPLE_IMMEDIATE) {
                    Number = Request.Count;
                    Virtual = &RequestMailbox->Virtual[0];
                    End = Virtual + Number;
                    do {
                        KiFlushSingleTb(*Virtual);
                        Virtual += 1;
                    } while (Virtual < End);

                } else if (Request.IpiRequest == IPI_FLUSH_PROCESS) {
                    KiFlushProcessTb();
        
                } else if (Request.IpiRequest == IPI_FLUSH_SINGLE) {
                    KiFlushSingleTb((PVOID)Request.Parameter);
        
                } else {

                    ASSERT(Request.IpiRequest == IPI_FLUSH_ALL);

                    KeFlushCurrentTb();
                }

            } else {

                //
                // If the request type is packet ready, then call the worker
                // function. Otherwise, the request must be either a flush
                // multiple or a cache invalidate.
                //
        
                if (Request.IpiRequest == IPI_PACKET_READY) {
                    Packet = Source;
                    if (Request.IpiSynchType != 0) {
                        Packet = (PKPRCB)((ULONG64)Source + 1);
                    }
    
                    RequestPacket = (PKREQUEST_PACKET)&RequestMailbox->RequestPacket;
                    (RequestPacket->WorkerRoutine)((PKIPI_CONTEXT)Packet,
                                                   RequestPacket->CurrentPacket[0],
                                                   RequestPacket->CurrentPacket[1],
                                                   RequestPacket->CurrentPacket[2]);
        
                } else {
                    if (Request.IpiRequest == IPI_FLUSH_MULTIPLE) {
                        Number = Request.Count;
                        Virtual = (PVOID *)Request.Parameter;
                        End = Virtual + Number;
                        do {
                            KiFlushSingleTb(*Virtual);
                            Virtual += 1;
                        } while (Virtual < End);

                    } else if (Request.IpiRequest == IPI_INVALIDATE_ALL) {
                        WritebackInvalidate();

                    } else {

                        ASSERT(FALSE);

                    }
        
                    //
                    // If the synchronization type is target set, then the IPI was
                    // only between two processors and target set should be used
                    // for synchronization. Otherwise, packet barrier is used for
                    // synchronization.
                    //
        
                    if (Request.IpiSynchType == 0) {
                        if (SetMember == InterlockedXor64((PLONG64)&Source->TargetSet,
                                                          SetMember)) {
        
                            Source->PacketBarrier = 0;
                        }
        
                    } else {
                        Source->TargetSet = 0;
                    }
                }
            }
            
            SummarySet ^= AFFINITY_MASK(Processor);
        } while (BitScanForward64(&Processor, SummarySet) != FALSE);

        //
        // Clear target set in sender summary.
        //

        TargetSet = 
            InterlockedExchangeAdd64((LONG64 volatile *)&Prcb->SenderSummary,
                                     -(LONG64)TargetSet) - TargetSet;
    }

#endif

    return;
}