Beispiel #1
0
/*
 * @implemented
 */
SIZE_T
NTAPI
MmSizeOfMdl(IN PVOID Base,
            IN SIZE_T Length)
{
    //
    // Return the MDL size
    //
    return sizeof(MDL) +
           (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
}
Beispiel #2
0
/*
 * @implemented
 */
VOID
NTAPI
MmUnmapIoSpace(IN PVOID BaseAddress,
               IN SIZE_T NumberOfBytes)
{
    PFN_NUMBER Pfn;
    PFN_COUNT PageCount;
    PMMPTE PointerPte;

    //
    // Sanity check
    //
    ASSERT(NumberOfBytes != 0);

    //
    // Get the page count
    //
    PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(BaseAddress, NumberOfBytes);

    //
    // Get the PTE and PFN
    //
    PointerPte = MiAddressToPte(BaseAddress);
    Pfn = PFN_FROM_PTE(PointerPte);

    //
    // Is this an I/O mapping?
    //
    if (!MiGetPfnEntry(Pfn))
    {
        //
        // Destroy the PTE
        //
        RtlZeroMemory(PointerPte, PageCount * sizeof(MMPTE));

        //
        // Blow the TLB
        //
        KeFlushEntireTb(TRUE, TRUE);
    }

    //
    // Release the PTEs
    //
    MiReleaseSystemPtes(PointerPte, PageCount, 0);
}
Beispiel #3
0
/*
 * @implemented
 */
VOID
NTAPI
IoBuildPartialMdl(IN PMDL SourceMdl,
                  IN PMDL TargetMdl,
                  IN PVOID VirtualAddress,
                  IN ULONG Length)
{
    PPFN_NUMBER TargetPages = (PPFN_NUMBER)(TargetMdl + 1);
    PPFN_NUMBER SourcePages = (PPFN_NUMBER)(SourceMdl + 1);
    ULONG Offset;
    ULONG FlagsMask = (MDL_IO_PAGE_READ |
                       MDL_SOURCE_IS_NONPAGED_POOL |
                       MDL_MAPPED_TO_SYSTEM_VA |
                       MDL_IO_SPACE);

    /* Calculate the offset */
    Offset = (ULONG)((ULONG_PTR)VirtualAddress -
                     (ULONG_PTR)SourceMdl->StartVa) -
                     SourceMdl->ByteOffset;

    /* Check if we don't have a length and calculate it */
    if (!Length) Length = SourceMdl->ByteCount - Offset;

    /* Write the process, start VA and byte data */
    TargetMdl->StartVa = (PVOID)PAGE_ROUND_DOWN(VirtualAddress);
    TargetMdl->Process = SourceMdl->Process;
    TargetMdl->ByteCount = Length;
    TargetMdl->ByteOffset = BYTE_OFFSET(VirtualAddress);

    /* Recalculate the length in pages */
    Length = ADDRESS_AND_SIZE_TO_SPAN_PAGES(VirtualAddress, Length);

    /* Set the MDL Flags */
    TargetMdl->MdlFlags &= (MDL_ALLOCATED_FIXED_SIZE | MDL_ALLOCATED_MUST_SUCCEED);
    TargetMdl->MdlFlags |= SourceMdl->MdlFlags & FlagsMask;
    TargetMdl->MdlFlags |= MDL_PARTIAL;

    /* Set the mapped VA */
    TargetMdl->MappedSystemVa = (PCHAR)SourceMdl->MappedSystemVa + Offset;

    /* Now do the copy */
    Offset = (ULONG)(((ULONG_PTR)TargetMdl->StartVa -
                      (ULONG_PTR)SourceMdl->StartVa) >> PAGE_SHIFT);
    SourcePages += Offset;
    RtlCopyMemory(TargetPages, SourcePages, Length * sizeof(PFN_NUMBER));
}
Beispiel #4
0
VOID
SetMemory(
    ULONG_PTR BaseAddress,
    SIZE_T Size,
    TYPE_OF_MEMORY MemoryType)
{
    ULONG_PTR BasePage, PageCount;

    BasePage = BaseAddress / PAGE_SIZE;
    PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(BaseAddress, Size);

    /* Add the memory descriptor */
    PcMapCount = AddMemoryDescriptor(PcMemoryMap,
                                     MAX_BIOS_DESCRIPTORS,
                                     BasePage,
                                     PageCount,
                                     MemoryType);
}
Beispiel #5
0
VOID
ReserveMemory(
    ULONG_PTR BaseAddress,
    SIZE_T Size,
    TYPE_OF_MEMORY MemoryType,
    PCHAR Usage)
{
    ULONG_PTR BasePage, PageCount;
    ULONG i;

    BasePage = BaseAddress / PAGE_SIZE;
    PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(BaseAddress, Size);

    for (i = 0; i < PcMapCount; i++)
    {
        /* Check for conflicting descriptor */
        if ((PcMemoryMap[i].BasePage < BasePage + PageCount) &&
            (PcMemoryMap[i].BasePage + PcMemoryMap[i].PageCount > BasePage))
        {
            /* Check if the memory is free */
            if (PcMemoryMap[i].MemoryType != LoaderFree)
            {
                FrLdrBugCheckWithMessage(
                    MEMORY_INIT_FAILURE,
                    __FILE__,
                    __LINE__,
                    "Failed to reserve memory in the range 0x%Ix - 0x%Ix for %s",
                    BaseAddress,
                    Size,
                    Usage);
            }
        }
    }

    /* Add the memory descriptor */
    PcMapCount = AddMemoryDescriptor(PcMemoryMap,
                                     MAX_BIOS_DESCRIPTORS,
                                     BasePage,
                                     PageCount,
                                     MemoryType);
}
Beispiel #6
0
NTSTATUS
NICWrite(
    __in  PFDO_DATA     FdoData,
    __in  PIRP          Irp
)
/*++

Routine Description:

    This routine handles the hardware specific write request.
    If the device is not ready, fail the request. Otherwise
    get scatter-gather list for the request buffer and send the
    list to the hardware for DMA.

Arguments:

    FdoData - Pointer to the device context.
    Irp     - Pointer to the write request.

Return Value:

    NT Status code.

--*/
{
    NTSTATUS     returnStatus, status;
    PVOID        virtualAddress;
    ULONG        pageCount = 0, length = 0;
    PMDL         tempMdl, mdl;
    KIRQL        oldIrql;
#if defined(DMA_VER2)
    PVOID        sgListBuffer;
#endif

    DebugPrint(TRACE, DBG_WRITE, "--> PciDrvWrite %p\n", Irp);

    Irp->Tail.Overlay.DriverContext[3] = NULL;
    Irp->Tail.Overlay.DriverContext[2] = NULL;
    returnStatus = status = STATUS_SUCCESS;

    //
    // Is this adapter ready for sending?
    //
    if (MP_SHOULD_FAIL_SEND(FdoData))
    {
        DebugPrint(ERROR, DBG_WRITE, "Device not ready %p\n", Irp);
        returnStatus = status = STATUS_DEVICE_NOT_READY;
        goto Error;
    }

    tempMdl = mdl = Irp->MdlAddress;

    //
    // Check for zero length buffer
    //
    if (mdl == NULL || MmGetMdlByteCount(mdl) == 0)
     {
        DebugPrint(ERROR, DBG_WRITE, "Zero length buffer %p\n", Irp);
        status = returnStatus = STATUS_INVALID_DEVICE_REQUEST;
        goto Error;
    }

    //
    // Calculate the total packet length and the number of pages
    // spanned by all the buffers by walking the MDL chain.
    // NOTE: If this driver is used in the miniport configuration, it will
    // not get chained MDLs because the upper filter (NDISEDGE.SYS)
    // coalesces the fragements to a single contiguous buffer before presenting
    // the packet to us.
    //
    while(tempMdl != NULL)
    {
        virtualAddress = MmGetMdlVirtualAddress(tempMdl);
        length += MmGetMdlByteCount(tempMdl);
        pageCount += ADDRESS_AND_SIZE_TO_SPAN_PAGES(virtualAddress, length);
        tempMdl = tempMdl->Next;
    }

    if (length < NIC_MIN_PACKET_SIZE)
    {
        //
        // This will never happen in our case because the ndis-edge
        // pads smaller size packets with zero to make it NIC_MIN_PACKET_SIZE
        // long.
        //
        DebugPrint(ERROR, DBG_WRITE, "Packet size is less than %d\n", NIC_MIN_PACKET_SIZE);
        status = returnStatus = STATUS_INVALID_DEVICE_REQUEST;
        goto Error;
    }

    //
    // Check to see if the packet spans more than the physical pages
    // our hardware can handle or the pageCount exceeds the total number of
    // map registers allocated. If so, we should coalesce the scattered
    // buffers to fit the limit. We can't really break the transfers and
    // DMA in small chunks because each packets has to be DMA'ed in one shot.
    // The code on how to colesce the packet for this hardware is present
    // in the original E100BEX sample.
    //
    if (pageCount > NIC_MAX_PHYS_BUF_COUNT ||
            pageCount > FdoData->AllocatedMapRegisters)
    {
        // TODO: Packet needs to be coalesced
        DebugPrint(ERROR, DBG_WRITE, "Packet needs to be coalesced\n");
        status = returnStatus = STATUS_INVALID_DEVICE_REQUEST;
        goto Error;
    }
    //
    // Build a scatter-gather list of the packet buffer and send the packet.
    //
    // If DMA_VER2 is not defined, use GetScatterGatherList. If the driver
    // is meant to work on XP and above, define DMA_VER2, so that you can
    // use BuildScatterGatherList.
    //
    // Since Build/GetScatterGatherList should be called at DISPATCH_LEVEL
    // let us raise the IRQL.
    //

    KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);

    //
    // Let us mark the IRP pending, because NICProcessSGList is an asynchronous
    // callback and we wouldn't know the status of the IRP. This IRP may either
    // get completed by the DPC handler after the DMA transfer or may
    // get queued if we are low on resources. So the safest thing
    // to do for us here is return STATUS_PENDING irrespective of what happens
    // to the IRP.
    //
    IoMarkIrpPending(Irp);
    returnStatus = STATUS_PENDING;

#if defined(DMA_VER2)

    sgListBuffer = ExAllocateFromNPagedLookasideList(
                            &FdoData->SGListLookasideList);
    if (sgListBuffer)
    {
        Irp->Tail.Overlay.DriverContext[2] =  sgListBuffer;
        status = FdoData->DmaAdapterObject->DmaOperations->BuildScatterGatherList(
                        FdoData->DmaAdapterObject,
                        FdoData->Self,
                        mdl,
                        MmGetMdlVirtualAddress(mdl),
                        length,
                        NICProcessSGList,
                        Irp,
                        TRUE,
                        sgListBuffer,
                        FdoData->ScatterGatherListSize);

        if (!NT_SUCCESS(status))
        {
            DebugPrint(ERROR, DBG_WRITE, "BuildScatterGatherList %x\n", status);
            ExFreeToNPagedLookasideList(&FdoData->SGListLookasideList, sgListBuffer);
            Irp->Tail.Overlay.DriverContext[2] =  NULL;
        }
    }

#else

    status = FdoData->DmaAdapterObject->DmaOperations->GetScatterGatherList(
                    FdoData->DmaAdapterObject,
                    FdoData->Self,
                    mdl,
                    MmGetMdlVirtualAddress(mdl),
                    length,
                    NICProcessSGList,
                    Irp,
                    TRUE);

    if (!NT_SUCCESS(status))
    {
        DebugPrint(ERROR, DBG_WRITE, "GetScatterGatherList %x\n", status);
    }

#endif

    KeLowerIrql(oldIrql);

Error:
    if(!NT_SUCCESS(status)){
        //
        // Our call to get the scatter-gather list failed. We know the
        // NICProcessSGList is not called for sure in that case. So let us
        // complete the IRP here with failure status. Since we marked the
        // IRP pending, we have no choice but to return status-pending
        // even though we are completing the IRP in the incoming thread
        // context.
        //
        NICCompleteSendRequest(FdoData, Irp, status, 0, FALSE);
    }

    DebugPrint(LOUD, DBG_WRITE, "<-- PciDrvWrite %x\n", returnStatus);

    return returnStatus;
}
Beispiel #7
0
VOID
NICWritePacket(
    __in  PFDO_DATA   FdoData,
    __in  PIRP        Irp,
    __in  BOOLEAN     bFromQueue
    )
/*++
Routine Description:

    Do the work to send a packet
    Assumption: Send spinlock has been acquired

Arguments:

    FdoData     Pointer to our FdoData
    Packet      The packet
    bFromQueue  TRUE if it's taken from the send wait queue

Return Value:

--*/
{
    PMP_TCB         pMpTcb = NULL;
    ULONG           packetLength;
    PVOID           virtualAddress;

    DebugPrint(TRACE, DBG_WRITE, "--> NICWritePacket, Irp= %p\n", Irp);

    //
    // Get the next free TCB and initialize it to represent the
    // request buffer.
    //
    pMpTcb = FdoData->CurrSendTail;
    ASSERT(!MP_TEST_FLAG(pMpTcb, fMP_TCB_IN_USE));

    //
    // If the adapter is not ready, fail the request.
    //
    if(MP_IS_NOT_READY(FdoData)) {
        MP_FREE_SEND_PACKET(FdoData, pMpTcb, STATUS_DEVICE_NOT_READY);
        return;
    }

    pMpTcb->FirstBuffer = Irp->MdlAddress;
    virtualAddress = MmGetMdlVirtualAddress(Irp->MdlAddress);
    pMpTcb->BufferCount = 1;
    pMpTcb->PacketLength = packetLength = MmGetMdlByteCount(Irp->MdlAddress);
    pMpTcb->PhysBufCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(virtualAddress,
                                            packetLength);
    pMpTcb->Irp = Irp;
    MP_SET_FLAG(pMpTcb, fMP_TCB_IN_USE);

    //
    // Call the send handler, it only needs to deal with the frag list
    //
    NICSendPacket(FdoData, pMpTcb, Irp->Tail.Overlay.DriverContext[3]);

    FdoData->nBusySend++;
    ASSERT(FdoData->nBusySend <= FdoData->NumTcb);
    FdoData->CurrSendTail = FdoData->CurrSendTail->Next;

    DebugPrint(TRACE, DBG_WRITE, "<-- NICWritePacket\n");
    return;

}
Beispiel #8
0
///////////////////////////////////////////////////////////////////////////////
//
//  OsrStartReadIrp
//
//    This is routine is called by the OsrRead and Dpc routine in order to
//    begin a new Read operation.
//
//  INPUTS:
//
//      DeviceObject - Address of the DEVICE_OBJECT for our device.
//  
//      Irp - Address of the IRP representing the IRP_MJ_READ call.
//
//  OUTPUTS:
//
//      None.
//
//  RETURNS:
//
//      None.
//
//  IRQL:
//
//      This routine is called at IRQL_DISPATCH_LEVEL.
//
//  NOTES:
//      *** Called (and returns) with the WriteQueueLock held.
//
///////////////////////////////////////////////////////////////////////////////
VOID
OsrStartReadIrp(PDEVICE_OBJECT DeviceObject, PIRP Irp)
{
    POSR_DEVICE_EXT devExt = DeviceObject->DeviceExtension;
    PIO_STACK_LOCATION ioStack;
    ULONG mapRegsNeeded;

    ioStack = IoGetCurrentIrpStackLocation(Irp);

    //
    // In progress IRPs cannot be cancelled
    //
    IoSetCancelRoutine(Irp, NULL);

#if DBG
    DbgPrint("OsrRead: Transfer length %d.\n",
                                ioStack->Parameters.Read.Length);
#endif

    //
    // We're starting a request... therefore, we clear the StopEvent
    // flag.
    //
    KeClearEvent(&devExt->StopEvent);

    //
    // There is no in-progress request.  Start this request on the
    // device.
    //
    devExt->CurrentReadIrp = Irp;

    devExt->ReadTotalLength = ioStack->Parameters.Read.Length;

    devExt->ReadSoFar = 0;

    devExt->ReadStartingOffset = 0;

    //
    // Start the watchdog timer on this IRP
    //
    (ULONG)Irp->Tail.Overlay.DriverContext[0] = OSR_WATCHDOG_INTERVAL;

    //
    // Flush the requestor's buffer back from cache on non-dma coherent
    // machines.
    //
    KeFlushIoBuffers(Irp->MdlAddress, TRUE, TRUE);

    //
    // Determine number of map registers required by this read
    //
    mapRegsNeeded = 
        ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Irp->MdlAddress),
                                        ioStack->Parameters.Read.Length);
        
#if DBG
    DbgPrint("StartReadIrp: %d. map regs needed\n", mapRegsNeeded);
#endif

    //
    // Limit the number of map registers used to the maximum allowed by the
    // HAL.  We determined this max when we called HalGetAdapter() during
    // our DriverEntry processing.
    //
    devExt->MapRegsThisRead = ((mapRegsNeeded > devExt->ReadMapRegsGot) ? 
                              devExt->ReadMapRegsGot : mapRegsNeeded);

#if DBG
    DbgPrint("StartReadIrp: %d. map regs this xfer\n", devExt->MapRegsThisRead);
#endif

    IoAllocateAdapterChannel(devExt->ReadAdapter,
                             DeviceObject, 
                             devExt->MapRegsThisRead,
                             OsrAdapterControlRead,
                             Irp);


}
Beispiel #9
0
/*
 * @implemented
 */
PMDL
NTAPI
IoAllocateMdl(IN PVOID VirtualAddress,
              IN ULONG Length,
              IN BOOLEAN SecondaryBuffer,
              IN BOOLEAN ChargeQuota,
              IN PIRP Irp)
{
    PMDL Mdl = NULL, p;
    ULONG Flags = 0;
    ULONG Size;

    /* Make sure we got a valid length */
    ASSERT(Length != 0);

    /* Fail if allocation is over 2GB */
    if (Length & 0x80000000) return NULL;

    /* Calculate the number of pages for the allocation */
    Size = ADDRESS_AND_SIZE_TO_SPAN_PAGES(VirtualAddress, Length);
    if (Size > 23)
    {
        /* This is bigger then our fixed-size MDLs. Calculate real size */
        Size *= sizeof(PFN_NUMBER);
        Size += sizeof(MDL);
        if (Size > MAXUSHORT) return NULL;
    }
    else
    {
        /* Use an internal fixed MDL size */
        Size = (23 * sizeof(PFN_NUMBER)) + sizeof(MDL);
        Flags |= MDL_ALLOCATED_FIXED_SIZE;

        /* Allocate one from the lookaside list */
        Mdl = IopAllocateMdlFromLookaside(LookasideMdlList);
    }

    /* Check if we don't have an mdl yet */
    if (!Mdl)
    {
        /* Allocate one from pool */
        Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
        if (!Mdl) return NULL;
    }

    /* Initialize it */
    MmInitializeMdl(Mdl, VirtualAddress, Length);
    Mdl->MdlFlags |= Flags;

    /* Check if an IRP was given too */
    if (Irp)
    {
        /* Check if it came with a secondary buffer */
        if (SecondaryBuffer)
        {
            /* Insert the MDL at the end */
            p = Irp->MdlAddress;
            while (p->Next) p = p->Next;
            p->Next = Mdl;
      }
      else
      {
            /* Otherwise, insert it directly */
            Irp->MdlAddress = Mdl;
      }
   }

    /* Return the allocated mdl */
    return Mdl;
}
Beispiel #10
0
/*++

Routine Description:

    Called by each processor to initialize his local APIC.
    The first processor to run this routine will map the
    local APICs for all processors.

    Note that all interrupts are blocked on entry since
    we are being called from HalInitializeProcessor().

Arguments:

    Processor - Supplies a logical processor number

Return Value:

    None.

--*/
VOID
CbusInitializeLocalApic(
IN ULONG Processor,
IN PVOID PhysicalApicLocation,
IN ULONG SpuriousVector
)
{
        ULONG           ProcessorBit;
        ULONG           ApicIDBit;
        REDIRECTION_T   RedirectionEntry = { 0 };

        //
        // If the APIC mapping has not been set up yet,
        // do it now.  Given the NT startup architecture,
        // this will always be done by the boot processor.
        //
        // We map in the APIC into global space instead of in
        // the PCR because all processors see it at the
        // same _physical_ address.  Note the page is mapped PWT.
        //

        //
        // Note that all idle threads will share a common
        // page directory, and the HAL PDE is inherited when
        // new processes are created.  Hence, a single
        // HalpMapMemory for the APIC is enough for all
        // processors to be able to see their APICs.
        //
        if (!CbusLocalApic) {
                CbusLocalApic = (PAPIC_REGISTERS) HalpMapPhysicalMemoryWriteThrough (
                                PhysicalApicLocation,
                                (ULONG)ADDRESS_AND_SIZE_TO_SPAN_PAGES(
                                PhysicalApicLocation, LOCAL_APIC_SIZE));
        }
        
        (PTASKPRI) KeGetPcr()->HalReserved[PCR_TASKPRI] =
                 &CbusLocalApic->ApicTaskPriority;

        //
        // Here we initialize our destination format and
        // logical destination registers so that we can get IPIs
        // from other processors.
        //
        // Specify full decode mode in the destination format register -
        // ie: each processor sets only his own bit, and a "match" requires
        // that at least one bit match.  The alternative is encoded mode,
        // in which _ALL_ encoded bits must match the sender's target for
        // this processor to see the sent IPI.
        //
        CbusLocalApic->ApicDestinationFormat = APIC_ALL_PROCESSORS;

        //
        // the logical destination register is what the redirection destination
        // entry compares against.  only the high 8 bits will be supported
        // in Intel's future APICs, although this isn't documented anywhere!
        //
        ProcessorBit = KeGetPcr()->HalReserved[PCR_BIT];

        ApicIDBit = (ProcessorBit << APIC_BIT_TO_ID);

        CbusLocalApic->ApicLogicalDestination = ApicIDBit;

        //
        // designate the spurious interrupt vector we want to see,
        // and inform this processor's APIC to enable interrupt
        // acceptance.
        //
        CbusLocalApic->ApicSpuriousVector =
                                SpuriousVector | LOCAL_APIC_ENABLE;

        //
        // as each processor comes online here, we must have ALL
        // processors resync their arbitration IDs to take into
        // account the new processor.  note that we will set:
        // arb id == APIC id == processor number.
        //
        // the strange ID setting is to satisfy Intel's need for
        // uniqueness amongst I/O and local unit ID numbering.
        //

        CbusLocalApic->LocalUnitID = ((2 * Processor + 1) << APIC_BIT_TO_ID);

        //
        // sync up our new ID with everyone else
        //

        CbusApicArbsync();

        //
        // Create the NMI routing linkage for this processor
        // It is set as level sensitive, enabled and generating NMI trap 2.
        //

        RedirectionEntry.ra.Trigger = APIC_LEVEL;
        RedirectionEntry.ra.Mask = APIC_INTR_UNMASKED;
        RedirectionEntry.ra.Delivery_mode = APIC_INTR_NMI;
        RedirectionEntry.ra.Vector = 2;
        RedirectionEntry.ra.Destination = ApicIDBit;
        CbusLocalApic->ApicLocalInt1 = RedirectionEntry;

        //
        // Create the spurious interrupt IDT entry for this processor
        //

        KiSetHandlerAddressToIDT(SpuriousVector, HalpSpuriousInterrupt);

        //
        // we must specify HIGH_LEVEL when we enable the spurious vector
        // here because it will overwrite the CbusVectorToIrql[] entry
        // for the HIGH_LEVEL (0xFF!).  the spurious vector really only
        // needs an IDT entry and doesn't need any other software tables,
        // but make the enable call for tracking purposes.
        //
        HalEnableSystemInterrupt(SpuriousVector, HIGH_LEVEL, Latched);

	//
	// start off at IRQL 0 - we are still protected by cli.
	//
        CbusLocalApic->ApicTaskPriority.rb.LowDword = 0;
}
Beispiel #11
0
/*
 * @implemented
 */
VOID
NTAPI
MmUnmapLockedPages(IN PVOID BaseAddress,
                   IN PMDL Mdl)
{
    PVOID Base;
    PFN_COUNT PageCount, ExtraPageCount;
    PPFN_NUMBER MdlPages;
    PMMPTE PointerPte;

    //
    // Sanity check
    //
    ASSERT(Mdl->ByteCount != 0);

    //
    // Check if this is a kernel request
    //
    if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
    {
        //
        // Get base and count information
        //
        Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
        PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);

        //
        // Sanity checks
        //
        ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
        ASSERT(PageCount != 0);
        ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);

        //
        // Get the PTE
        //
        PointerPte = MiAddressToPte(BaseAddress);

        //
        // This should be a resident system PTE
        //
        ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
        ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
        ASSERT(PointerPte->u.Hard.Valid == 1);

        //
        // Check if the caller wants us to free advanced pages
        //
        if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
        {
            //
            // Get the MDL page array
            //
            MdlPages = MmGetMdlPfnArray(Mdl);

            /* Number of extra pages stored after the PFN array */
            ExtraPageCount = (PFN_COUNT)*(MdlPages + PageCount);

            //
            // Do the math
            //
            PageCount += ExtraPageCount;
            PointerPte -= ExtraPageCount;
            ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
            ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);

            //
            // Get the new base address
            //
            BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
                                  (ExtraPageCount << PAGE_SHIFT));
        }

        //
        // Remove flags
        //
        Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
                           MDL_PARTIAL_HAS_BEEN_MAPPED |
                           MDL_FREE_EXTRA_PTES);

        //
        // Release the system PTEs
        //
        MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
    }
    else
    {
        UNIMPLEMENTED;
    }
}
Beispiel #12
0
/*
 * @implemented
 */
VOID
NTAPI
MmFreePagesFromMdl(IN PMDL Mdl)
{
    PVOID Base;
    PPFN_NUMBER Pages;
    LONG NumberOfPages;
    PMMPFN Pfn1;
    KIRQL OldIrql;
    DPRINT("Freeing MDL: %p\n", Mdl);

    //
    // Sanity checks
    //
    ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
    ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
    ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);

    //
    // Get address and page information
    //
    Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
    NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);

    //
    // Acquire PFN lock
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

    //
    // Loop all the MDL pages
    //
    Pages = (PPFN_NUMBER)(Mdl + 1);
    do
    {
        //
        // Reached the last page
        //
        if (*Pages == LIST_HEAD) break;

        //
        // Get the page entry
        //
        Pfn1 = MiGetPfnEntry(*Pages);
        ASSERT(Pfn1);
        ASSERT(Pfn1->u2.ShareCount == 1);
        ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE);
        if (Pfn1->u4.PteFrame != 0x1FFEDCB)
        {
            /* Corrupted PFN entry or invalid free */
            KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages);
        }

        //
        // Clear it
        //
        Pfn1->u3.e1.StartOfAllocation = 0;
        Pfn1->u3.e1.EndOfAllocation = 0;
        Pfn1->u2.ShareCount = 0;

        //
        // Dereference it
        //
        ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
        if (Pfn1->u3.e2.ReferenceCount != 1)
        {
            /* Just take off one reference */
            InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
        }
        else
        {
            /* We'll be nuking the whole page */
            MiDecrementReferenceCount(Pfn1, *Pages);
        }

        //
        // Clear this page and move on
        //
        *Pages++ = LIST_HEAD;
    } while (--NumberOfPages != 0);

    //
    // Release the lock
    //
    KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

    //
    // Remove the pages locked flag
    //
    Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
}
Beispiel #13
0
ARC_STATUS
FloppyWrite (
    IN ULONG FileId,
    IN PVOID Buffer,
    IN ULONG Length,
    OUT PULONG Count
    )

/*++

Routine Description:

    This function writes data to the floppy starting at the position
    specified in the file table.


Arguments:

    FileId - Supplies the file table index.

    Buffer - Supplies a poiner to the buffer that contains the data
        to be written.

    Length - Supplies the number of bytes to be writtes.

    Count - Supplies a pointer to a variable that receives the number of
        bytes actually written.

Return Value:


    The write completion status is returned.

--*/

{

    ARC_STATUS ArcStatus;
    ULONG FrameNumber;
    ULONG Index;
    ULONG Limit;
    PMDL MdlAddress;
    UCHAR MdlBuffer[sizeof(MDL) + ((64 / 4) + 1) * sizeof(ULONG)];
    ULONG NumberOfPages;
    ULONG Offset;
    PULONG PageFrame;
    ULONG Position;
    CHAR TempBuffer[SECTOR_SIZE + 32];
    PCHAR TempPointer;

    //
    // If the requested size of the transfer is zero return ESUCCESS
    //
    if (Length==0) {
        return ESUCCESS;
    }
    //
    // If the current position is not at a sector boundary , then
    // read the first and/or last sector separately and copy the data.
    //

    Offset = BlFileTable[FileId].Position.LowPart & (SECTOR_SIZE - 1);
    if (Offset != 0) {
        //
        // Adjust position to the sector boundary, align the transfer address
        // and read that first sector.
        //
        BlFileTable[FileId].Position.LowPart -= Offset;
        TempPointer =  (PVOID) ((ULONG) (TempBuffer + KeGetDcacheFillSize() - 1)
            & ~(KeGetDcacheFillSize() - 1));

        ArcStatus = FloppyRead(FileId, TempPointer, SECTOR_SIZE, Count);

        //
        // If the transfer was not successful, then reset the position
        // and return the completion status.
        //
        if (ArcStatus != ESUCCESS) {
            BlFileTable[FileId].Position.LowPart += Offset;
            return ArcStatus;
        } else {
            //
            // Reset the position as it was before the read.
            //
            BlFileTable[FileId].Position.LowPart -= SECTOR_SIZE;
        }

        //
        // If the length of write is less than the number of bytes from
        // the offset to the end of the sector, then copy only the number
        // of bytes required to fulfil the request. Otherwise copy to the end
        // of the sector and, read the remaining data.
        //

        if ((SECTOR_SIZE - Offset) > Length) {
            Limit = Offset + Length;
        } else {
            Limit = SECTOR_SIZE;
        }

        //
        // Merge the data from the specified buffer.
        //
        for (Index = Offset; Index < Limit; Index += 1) {
            *(TempPointer + Index) = *((PCHAR)Buffer)++;
        }

        //
        // Write the modified sector.
        //
        ArcStatus = FloppyWrite(FileId, TempPointer, SECTOR_SIZE, Count);

        if (ArcStatus != ESUCCESS) {
            return ArcStatus;
        }

        //
        // Adjust the current position and
        // Write the remaining part of the specified transfer.
        //

        BlFileTable[FileId].Position.LowPart -= SECTOR_SIZE-Limit;
        Position = BlFileTable[FileId].Position.LowPart;
        ArcStatus = FloppyWrite(FileId,
                               Buffer,
                               Length - (Limit - Offset),
                               Count);

        //
        // If the transfer was not successful, then reset the device
        // position and return the completion status.
        //

        if (ArcStatus != ESUCCESS) {
            BlFileTable[FileId].Position.LowPart = Position;
            return ArcStatus;
        } else {
            *Count = Length;
            return ESUCCESS;
        }
    } else {
        //
        // if the size of requested data is not a multiple of the sector
        // size then write the last sector separately.
        //
        if (Length & (SECTOR_SIZE - 1)) {

            //
            // Do the transfer of the complete sectors in the middle
            //
            Position = BlFileTable[FileId].Position.LowPart;
            ArcStatus = FloppyWrite(FileId,
                                   Buffer,
                                   Length & (~(SECTOR_SIZE - 1)),
                                   Count);

            //
            // If the transfer was not successful, then reset the device
            // position and return the completion status.
            //
            if (ArcStatus != ESUCCESS) {
                BlFileTable[FileId].Position.LowPart = Position;
                return ArcStatus;
            }

            //
            // Read the last sector and copy the requested data.
            //
            TempPointer =  (PVOID) ((ULONG) (TempBuffer + KeGetDcacheFillSize() - 1)
                & ~(KeGetDcacheFillSize() - 1));

            ArcStatus = FloppyRead(FileId, TempPointer, SECTOR_SIZE, Count);

            //
            // If the transfer was not successful return the completion status.
            //
            if (ArcStatus != ESUCCESS) {
                return ArcStatus;
            }

            //
            // Copy the data to the specified buffer.
            //
            (PCHAR)Buffer += Length & (~(SECTOR_SIZE - 1));
            Limit =  Length & (SECTOR_SIZE - 1);
            for (Index = 0; Index < Limit; Index += 1) {
                *(TempPointer + Index) = *((PCHAR)Buffer)++;
            }
            //
            // Adjust the position and write the data.
            //
            BlFileTable[FileId].Position.LowPart -= SECTOR_SIZE;
            ArcStatus = FloppyWrite(FileId, TempPointer, SECTOR_SIZE, Count);

            //
            // Set the position for the requested transfer
            //
            BlFileTable[FileId].Position.LowPart -= SECTOR_SIZE - Limit;

            *Count = Length;
            return ArcStatus;

        } else {

            //
            // Build the memory descriptor list.
            //

            MdlAddress = (PMDL)&MdlBuffer[0];
            MdlAddress->Next = NULL;
            MdlAddress->Size = sizeof(MDL) +
                  ADDRESS_AND_SIZE_TO_SPAN_PAGES(Buffer, Length) * sizeof(ULONG);
            MdlAddress->StartVa = (PVOID)PAGE_ALIGN(Buffer);
            MdlAddress->ByteCount = Length;
            MdlAddress->ByteOffset = BYTE_OFFSET(Buffer);
            PageFrame = (PULONG)(MdlAddress + 1);
            FrameNumber = (((ULONG)MdlAddress->StartVa) & 0x1fffffff) >> PAGE_SHIFT;
            NumberOfPages = (MdlAddress->ByteCount +
                  MdlAddress->ByteOffset + PAGE_SIZE - 1) >> PAGE_SHIFT;
            for (Index = 0; Index < NumberOfPages; Index += 1) {
                *PageFrame++ = FrameNumber++;
            }

            //
            // Flush I/O buffers and write to the boot device.
            //

            HalFlushIoBuffers(MdlAddress, FALSE, TRUE);
            ArcStatus = FloppyBootIO(MdlAddress,
                                     BlFileTable[FileId].Position.LowPart >> SECTOR_SHIFT,
                                     FileId,
                                     TRUE);

            if (ArcStatus == ESUCCESS) {
                BlFileTable[FileId].Position.LowPart += Length;
                *Count = Length;
                return ESUCCESS;
            } else {
                *Count = 0;
                return EIO;
            }
        }
    }
}
Beispiel #14
0
NTSTATUS
MiCcPrepareReadInfo (
    IN PMI_READ_INFO MiReadInfo
    )

/*++

Routine Description:

    This routine constructs MDLs that describe the pages in the argument
    read-list. The caller will then issue the I/O on return.

Arguments:

    MiReadInfo - Supplies a pointer to the read-list.

Return Value:

    Various NTSTATUS codes.

Environment:

    Kernel mode, PASSIVE_LEVEL.

--*/

{
    UINT64 PteOffset;
    NTSTATUS Status;
    PMMPTE ProtoPte;
    PMMPTE LastProto;
    PMMPTE *ProtoPteArray;
    PCONTROL_AREA ControlArea;
    PSUBSECTION Subsection;
    PMMINPAGE_SUPPORT InPageSupport;
    PMDL Mdl;
    PMDL IoMdl;
    PMDL ApiMdl;
    ULONG i;
    PFN_NUMBER NumberOfPages;

    ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);

    NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (MiReadInfo->FileOffset.LowPart, MiReadInfo->LengthInBytes);

    //
    // Translate the section object into the relevant control area.
    //

    ControlArea = (PCONTROL_AREA)MiReadInfo->FileObject->SectionObjectPointer->DataSectionObject;

    //
    // If the section is backed by a ROM, then there's no need to prefetch
    // anything as it would waste RAM.
    //

    if (ControlArea->u.Flags.Rom == 1) {
        return STATUS_NOT_SUPPORTED;
    }

    //
    // Initialize the internal Mi readlist.
    //

    MiReadInfo->ControlArea = ControlArea;

    //
    // Allocate and initialize an inpage support block for this run.
    //

    InPageSupport = MiGetInPageSupportBlock (MM_NOIRQL, &Status);
    
    if (InPageSupport == NULL) {
        ASSERT (!NT_SUCCESS (Status));
        return Status;
    }
    
    MiReadInfo->InPageSupport = InPageSupport;

    //
    // Allocate and initialize an MDL to return to our caller.  The actual
    // frame numbers are filled in when all the pages are reference counted.
    //

    ApiMdl = MmCreateMdl (NULL, NULL, NumberOfPages << PAGE_SHIFT);

    if (ApiMdl == NULL) {
        return STATUS_INSUFFICIENT_RESOURCES;
    }

    ApiMdl->MdlFlags |= MDL_PAGES_LOCKED;

    MiReadInfo->ApiMdl = ApiMdl;

    //
    // Allocate and initialize an MDL to use for the actual transfer (if any).
    //

    IoMdl = MmCreateMdl (NULL, NULL, NumberOfPages << PAGE_SHIFT);

    if (IoMdl == NULL) {
        return STATUS_INSUFFICIENT_RESOURCES;
    }

    MiReadInfo->IoMdl = IoMdl;
    Mdl = IoMdl;

    //
    // Make sure the section is really prefetchable - physical and
    // pagefile-backed sections are not.
    //

    if ((ControlArea->u.Flags.PhysicalMemory) ||
        (ControlArea->u.Flags.Image == 1) ||
        (ControlArea->FilePointer == NULL)) {

        return STATUS_INVALID_PARAMETER_1;
    }

    //
    // Start the read at the proper file offset.
    //

    InPageSupport->ReadOffset = MiReadInfo->FileOffset;
    ASSERT (BYTE_OFFSET (InPageSupport->ReadOffset.LowPart) == 0);
    InPageSupport->FilePointer = MiReadInfo->FileObject;

    //
    // Stash a pointer to the start of the prototype PTE array (the values
    // in the array are not contiguous as they may cross subsections)
    // in the inpage block so we can walk it quickly later when the pages
    // are put into transition.
    //

    ProtoPteArray = (PMMPTE *)(Mdl + 1);

    InPageSupport->BasePte = (PMMPTE) ProtoPteArray;

    //
    // Data (but not image) reads use the whole page and the filesystems
    // zero fill any remainder beyond valid data length so we don't
    // bother to handle this here.  It is important to specify the
    // entire page where possible so the filesystem won't post this
    // which will hurt perf.  LWFIX: must use CcZero to make this true.
    //

    ASSERT (((ULONG_PTR)Mdl & (sizeof(QUAD) - 1)) == 0);
    InPageSupport->u1.e1.PrefetchMdlHighBits = ((ULONG_PTR)Mdl >> 3);

    //
    // Initialize the prototype PTE pointers.
    //

    ASSERT (ControlArea->u.Flags.GlobalOnlyPerSession == 0);

    if (ControlArea->u.Flags.Rom == 0) {
        Subsection = (PSUBSECTION)(ControlArea + 1);
    }
    else {
        Subsection = (PSUBSECTION)((PLARGE_CONTROL_AREA)ControlArea + 1);
    }

#if DBG
    if (MiCcDebug & MI_CC_FORCE_PREFETCH) {
        MiRemoveUserPages ();
    }
#endif

    //
    // Calculate the first prototype PTE address.
    //

    PteOffset = (UINT64)(MiReadInfo->FileOffset.QuadPart >> PAGE_SHIFT);

    //
    // Make sure the PTEs are not in the extended part of the segment.
    //

    while (TRUE) {
            
        //
        // A memory barrier is needed to read the subsection chains
        // in order to ensure the writes to the actual individual
        // subsection data structure fields are visible in correct
        // order.  This avoids the need to acquire any stronger
        // synchronization (ie: PFN lock), thus yielding better
        // performance and pageability.
        //

        KeMemoryBarrier ();

        if (PteOffset < (UINT64) Subsection->PtesInSubsection) {
            break;
        }

        PteOffset -= Subsection->PtesInSubsection;
        Subsection = Subsection->NextSubsection;
    }

    Status = MiAddViewsForSectionWithPfn ((PMSUBSECTION) Subsection,
                                          Subsection->PtesInSubsection);

    if (!NT_SUCCESS (Status)) {
        return Status;
    }

    MiReadInfo->FirstReferencedSubsection = Subsection;
    MiReadInfo->LastReferencedSubsection = Subsection;

    ProtoPte = &Subsection->SubsectionBase[PteOffset];
    LastProto = &Subsection->SubsectionBase[Subsection->PtesInSubsection];

    for (i = 0; i < NumberOfPages; i += 1) {

        //
        // Calculate which PTE maps the given logical block offset.
        //
        // Always look forwards (as an optimization) in the subsection chain.
        //
        // A quick check is made first to avoid recalculations and loops where
        // possible.
        //
    
        if (ProtoPte >= LastProto) {

            //
            // Handle extended subsections.  Increment the view count for
            // every subsection spanned by this request, creating prototype
            // PTEs if needed.
            //

            ASSERT (i != 0);

            Subsection = Subsection->NextSubsection;

            Status = MiAddViewsForSectionWithPfn ((PMSUBSECTION) Subsection,
                                                  Subsection->PtesInSubsection);

            if (!NT_SUCCESS (Status)) {
                return Status;
            }

            MiReadInfo->LastReferencedSubsection = Subsection;

            ProtoPte = Subsection->SubsectionBase;

            LastProto = &Subsection->SubsectionBase[Subsection->PtesInSubsection];
        }

        *ProtoPteArray = ProtoPte;
        ProtoPteArray += 1;

        ProtoPte += 1;
    }

    return STATUS_SUCCESS;
}
Beispiel #15
0
BOOLEAN
VideoPortDoDma(
    IN PVOID                        HwDeviceExtension,
    IN PVIDEO_REQUEST_PACKET        pVrp
    )
{
    PDEVICE_EXTENSION            deviceExtension =
                                ((PDEVICE_EXTENSION) HwDeviceExtension) - 1;
    PPUBLIC_VIDEO_REQUEST_BLOCK pPVRB;
    PDMA_PARAMETERS             pIoVrb;
    PIRP                        pIrp;

    GET_PVRB_FROM_PVRP(pPVRB, pVrp);

    pIoVrb = pVideoPortGetDmaParameters(deviceExtension, pPVRB);

    if (!pIoVrb) {

        //
        // Can't get DmaParameter storage. set flag and return
        //

        deviceExtension->VRBFlags |= INSUFFICIENT_DMA_RESOURCES;
        return FALSE;
    }

    pIrp                              = pPVRB->pIrp;
    deviceExtension->MapDmaParameters = pIoVrb;

    //
    // Get Mdl for user buffer.
    //

    if (!pPVRB || !IoAllocateMdl(pPVRB->vrp.InputBuffer,
                       pPVRB->vrp.InputBufferLength,
                       FALSE,
                       FALSE,
                       pIrp)) {

            VideoPortDebugPrint(0,
                        "VideoPortIoStartRequest: Can't allocate Mdl\n");

            pPVRB->vrp.StatusBlock->Status = VRB_STATUS_INVALID_REQUEST;

            VideoPortNotification(RequestComplete,
                                 deviceExtension,
                                 pIoVrb);

            VideoPortNotification(NextRequest,
                                 deviceExtension);

            //
            // Queue a DPC to process the work that was just indicated.
            //

            IoRequestDpc(deviceExtension->DeviceObject, NULL, NULL);
            return FALSE;
    }

    //
    // Save the Mdl virtual address
    //

    pIoVrb->DataOffset = MmGetMdlVirtualAddress(pIrp->MdlAddress);

    //
    // Determine if the device needs mapped memory.
    //

    if (deviceExtension->bMapBuffers) {

        if (pIrp->MdlAddress) {
            pIoVrb->DataOffset = MmGetSystemAddressForMdl(pIrp->MdlAddress);

            pPVRB->vrp.InputBuffer  = ((PUCHAR)pIoVrb->DataOffset) +
                                 (ULONG)(((PUCHAR)pPVRB->vrp.InputBuffer) - ((PUCHAR)MmGetMdlVirtualAddress(pIrp->MdlAddress)));
        }
    }

    if (deviceExtension->DmaAdapterObject) {

        //
        // If the buffer is not mapped then the I/O buffer must be flushed
        // to aid in cache coherency.
        //

        KeFlushIoBuffers(pIrp->MdlAddress, TRUE, TRUE);
    }

    //
    // Determine if this adapter needs map registers
    //

    if (deviceExtension->bMasterWithAdapter) {

        //
        // Calculate the number of map registers needed for this transfer.
        // Note that this may be recalculated if the miniport really wants
        // to do DMA
        //

        pIoVrb->NumberOfMapRegisters = ADDRESS_AND_SIZE_TO_SPAN_PAGES(
                pPVRB->vrp.InputBuffer,
                pPVRB->vrp.InputBufferLength
                );
    }

    //
    // The miniport may have requested too big of a buffer, so iteratively
    // chop it in half until we find one we can do. This changes the
    // vrp.InputBufferLength, which the miniport must check to see how much
    // is actually sent and queue up the remainder.
    //

    while (pIoVrb->NumberOfMapRegisters >
        deviceExtension->Capabilities.MaximumPhysicalPages) {

        pPVRB->vrp.InputBufferLength /= 2;

        pIoVrb->NumberOfMapRegisters = ADDRESS_AND_SIZE_TO_SPAN_PAGES(
            pPVRB->vrp.InputBuffer,
            pPVRB->vrp.InputBufferLength
            );

    }

    //
    // Allocate the adapter channel with sufficient map registers
    // for the transfer.
    //

    IoAllocateAdapterChannel(
        deviceExtension->DmaAdapterObject,  // AdapterObject
        deviceExtension->DeviceObject,      // DeviceObject
        pIoVrb->NumberOfMapRegisters,       // NumberOfMapRegisters
        pVideoPortBuildScatterGather,       // ExecutionRoutine (Must return DeallocateObjectKeepRegisters)
        pIoVrb);                            // Context

    //
    // The execution routine called via IoAllocateChannel will do the
    // rest of the work so just return.
    //

    return TRUE;

}
Beispiel #16
0
static int vbsfTransferCommon(VBSFTRANSFERCTX *pCtx)
{
    int rc = VINF_SUCCESS;
    BOOLEAN fProcessed = FALSE;

    uint32_t cbTransferred = 0;

    uint32_t cbToTransfer;
    uint32_t cbIO;

    if (VbglR0CanUsePhysPageList())
    {
        ULONG offFirstPage = MmGetMdlByteOffset(pCtx->pMdl);
        ULONG cPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(pCtx->pMdl), pCtx->cbData);
        ULONG cPagesToTransfer = RT_MIN(cPages, VBSF_MAX_READ_WRITE_PAGES);
        RTGCPHYS64 *paPages = (RTGCPHYS64 *)RTMemTmpAlloc(cPagesToTransfer * sizeof(RTGCPHYS64));

        Log(("VBOXSF: vbsfTransferCommon: using page list: %d pages, offset 0x%03X\n", cPages, offFirstPage));

        if (paPages)
        {
            PPFN_NUMBER paPfns = MmGetMdlPfnArray(pCtx->pMdl);
            ULONG cPagesTransferred = 0;
            cbTransferred = 0;

            while (cPagesToTransfer != 0)
            {
                ULONG iPage;
                cbToTransfer = cPagesToTransfer * PAGE_SIZE - offFirstPage;

                if (cbToTransfer > pCtx->cbData - cbTransferred)
                    cbToTransfer = pCtx->cbData - cbTransferred;

                if (cbToTransfer == 0)
                {
                    /* Nothing to transfer. */
                    break;
                }

                cbIO = cbToTransfer;

                Log(("VBOXSF: vbsfTransferCommon: transferring %d pages at %d; %d bytes at %d\n",
                     cPagesToTransfer, cPagesTransferred, cbToTransfer, cbTransferred));

                for (iPage = 0; iPage < cPagesToTransfer; iPage++)
                    paPages[iPage] = (RTGCPHYS64)paPfns[iPage + cPagesTransferred] << PAGE_SHIFT;

                rc = pCtx->pfnTransferPages(pCtx->pClient, pCtx->pMap, pCtx->hFile,
                                            pCtx->offset + cbTransferred, &cbIO,
                                            (uint16_t)offFirstPage, (uint16_t)cPagesToTransfer, paPages);
                if (RT_FAILURE(rc))
                {
                    Log(("VBOXSF: vbsfTransferCommon: pfnTransferPages %Rrc, cbTransferred %d\n", rc, cbTransferred));

                    /* If some data was transferred, then it is no error. */
                    if (cbTransferred > 0)
                        rc = VINF_SUCCESS;

                    break;
                }

                cbTransferred += cbIO;

                if (cbToTransfer < cbIO)
                {
                    /* Transferred less than requested, do not continue with the possibly remaining data. */
                    break;
                }

                cPagesTransferred += cPagesToTransfer;
                offFirstPage = 0;

                cPagesToTransfer = cPages - cPagesTransferred;
                if (cPagesToTransfer > VBSF_MAX_READ_WRITE_PAGES)
                    cPagesToTransfer = VBSF_MAX_READ_WRITE_PAGES;
            }

            RTMemTmpFree(paPages);

            fProcessed = TRUE;
        }
    }

    if (fProcessed != TRUE)
    {
        /* Split large transfers. */
        cbTransferred = 0;
        cbToTransfer = RT_MIN(pCtx->cbData, VBSF_MAX_READ_WRITE_PAGES * PAGE_SIZE);

        /* Page list not supported or a fallback. */
        Log(("VBOXSF: vbsfTransferCommon: using linear address\n"));

        while (cbToTransfer != 0)
        {
            cbIO = cbToTransfer;

            Log(("VBOXSF: vbsfTransferCommon: transferring %d bytes at %d\n",
                 cbToTransfer, cbTransferred));

            rc = pCtx->pfnTransferBuffer(pCtx->pClient, pCtx->pMap, pCtx->hFile,
                                         pCtx->offset + cbTransferred, &cbIO,
                                         pCtx->pBuffer + cbTransferred, true /* locked */);

            if (RT_FAILURE(rc))
            {
                Log(("VBOXSF: vbsfTransferCommon: pfnTransferBuffer %Rrc, cbTransferred %d\n", rc, cbTransferred));

                /* If some data was transferred, then it is no error. */
                if (cbTransferred > 0)
                    rc = VINF_SUCCESS;

                break;
            }

            cbTransferred += cbIO;

            if (cbToTransfer < cbIO)
            {
                /* Transferred less than requested, do not continue with the possibly remaining data. */
                break;
            }

            cbToTransfer = pCtx->cbData - cbTransferred;
            if (cbToTransfer > VBSF_MAX_READ_WRITE_PAGES * PAGE_SIZE)
                cbToTransfer = VBSF_MAX_READ_WRITE_PAGES * PAGE_SIZE;
        }
    }

    pCtx->cbData = cbTransferred;

    return rc;
}
Beispiel #17
0
PFREELDR_MEMORY_DESCRIPTOR
PcMemGetMemoryMap(ULONG *MemoryMapSize)
{
    ULONG i, EntryCount;
    ULONG ExtendedMemorySizeAtOneMB;
    ULONG ExtendedMemorySizeAtSixteenMB;
    ULONG EbdaBase, EbdaSize;
    TRACE("PcMemGetMemoryMap()\n");

    EntryCount = PcMemGetBiosMemoryMap(PcMemoryMap, MAX_BIOS_DESCRIPTORS);

    /* If the BIOS didn't provide a memory map, synthesize one */
    if (EntryCount == 0)
    {
        GetExtendedMemoryConfiguration(&ExtendedMemorySizeAtOneMB,
                                       &ExtendedMemorySizeAtSixteenMB);

        /* Conventional memory */
        AddMemoryDescriptor(PcMemoryMap,
                            MAX_BIOS_DESCRIPTORS,
                            0,
                            PcMemGetConventionalMemorySize() * 1024 / PAGE_SIZE,
                            LoaderFree);

        /* Extended memory */
        PcMapCount = AddMemoryDescriptor(PcMemoryMap,
                                         MAX_BIOS_DESCRIPTORS,
                                         1024 * 1024 / PAGE_SIZE,
                                         ExtendedMemorySizeAtOneMB * 1024 / PAGE_SIZE,
                                         LoaderFree);

        if (ExtendedMemorySizeAtSixteenMB != 0)
        {
            /* Extended memory at 16MB */
            PcMapCount = AddMemoryDescriptor(PcMemoryMap,
                                             MAX_BIOS_DESCRIPTORS,
                                             0x1000000 / PAGE_SIZE,
                                             ExtendedMemorySizeAtSixteenMB * 64 * 1024 / PAGE_SIZE,
                                             LoaderFree);
        }

        /* Check if we have an EBDA and get it's location */
        if (GetEbdaLocation(&EbdaBase, &EbdaSize))
        {
            /* Add the descriptor */
            PcMapCount = AddMemoryDescriptor(PcMemoryMap,
                                             MAX_BIOS_DESCRIPTORS,
                                             (EbdaBase / PAGE_SIZE),
                                             ADDRESS_AND_SIZE_TO_SPAN_PAGES(EbdaBase, EbdaSize),
                                             LoaderFirmwarePermanent);
        }
    }

    /* Setup some protected ranges */
    SetMemory(0x000000, 0x01000, LoaderFirmwarePermanent); // Realmode IVT / BDA
    SetMemory(0x0A0000, 0x50000, LoaderFirmwarePermanent); // Video memory
    SetMemory(0x0F0000, 0x10000, LoaderSpecialMemory); // ROM
    SetMemory(0xFFF000, 0x01000, LoaderSpecialMemory); // unusable memory (do we really need this?)

    /* Reserve some static ranges for freeldr */
    ReserveMemory(0x1000, STACKLOW - 0x1000, LoaderFirmwareTemporary, "BIOS area");
    ReserveMemory(STACKLOW, STACKADDR - STACKLOW, LoaderOsloaderStack, "FreeLdr stack");
    ReserveMemory(FREELDR_BASE, FrLdrImageSize, LoaderLoadedProgram, "FreeLdr image");

    /* Default to 1 page above freeldr for the disk read buffer */
    DiskReadBuffer = (PUCHAR)ALIGN_UP_BY(FREELDR_BASE + FrLdrImageSize, PAGE_SIZE);
    DiskReadBufferSize = PAGE_SIZE;

    /* Scan for free range above freeldr image */
    for (i = 0; i < PcMapCount; i++)
    {
        if ((PcMemoryMap[i].BasePage > (FREELDR_BASE / PAGE_SIZE)) &&
            (PcMemoryMap[i].MemoryType == LoaderFree))
        {
            /* Use this range for the disk read buffer */
            DiskReadBuffer = (PVOID)(PcMemoryMap[i].BasePage * PAGE_SIZE);
            DiskReadBufferSize = min(PcMemoryMap[i].PageCount * PAGE_SIZE,
                                     MAX_DISKREADBUFFER_SIZE);
            break;
        }
    }

    TRACE("DiskReadBuffer=%p, DiskReadBufferSize=%lx\n",
          DiskReadBuffer, DiskReadBufferSize);

    /* Now reserve the range for the disk read buffer */
    ReserveMemory((ULONG_PTR)DiskReadBuffer,
                  DiskReadBufferSize,
                  LoaderFirmwareTemporary,
                  "Disk read buffer");

    TRACE("Dumping resulting memory map:\n");
    for (i = 0; i < PcMapCount; i++)
    {
        TRACE("BasePage=0x%lx, PageCount=0x%lx, Type=%s\n",
              PcMemoryMap[i].BasePage,
              PcMemoryMap[i].PageCount,
              MmGetSystemMemoryMapTypeString(PcMemoryMap[i].MemoryType));
    }

    *MemoryMapSize = PcMapCount;
    return PcMemoryMap;
}
Beispiel #18
0
/*++

Routine Description:

    Note that all interrupts are blocked on entry since
    this routine is called from HalInitializeProcessor.
    Initialize this processor's local and I/O APIC units.

Arguments:

    Processor - Supplies a logical processor number

Return Value:

    None.

--*/
VOID
CbusInitializeIOApic(
IN ULONG Processor,
IN PVOID PhysicalApicLocation,
IN ULONG RedirVector,
IN ULONG RebootVector,
IN ULONG IrqPolarity
)
{
        ULONG                           ProcessorBit;
        ULONG                           ApicIDBit;
        ULONG                           ApicBusNumber;
        ULONG                           RedirectionAddress;
        REDIRECTION_T   		RedirectionEntry = { 0 };

        if (CbusIOApicCount >= MAX_CBUS_ELEMENTS) {
                return;
        }

        CbusIOApic[CbusIOApicCount] =
                (PVOID) HalpMapPhysicalMemoryWriteThrough (
	                        PhysicalApicLocation,
	                        (ULONG)ADDRESS_AND_SIZE_TO_SPAN_PAGES(
	                                PhysicalApicLocation, IO_APIC_SIZE));

        CbusApicBrandIOUnitID(Processor);

        //
        // Disable all 8259 inputs except the irq0 clock.
        // remember the irq0 clock and the irq13 DMA
        // chaining interrupts are internal to the Intel EISA
        // chipset (specifically, the ISP chip), and if the HAL
        // wants to enable them, it must be done here.
        // This is done by enabling the 8259 ISP to send them
        // to the processor(s) via the APIC.  However, the Corollary HAL
        // currently uses the local APIC timers for clocks.  The irq0
        // clock is enabled solely for the performance counter because
        // we want to use a separate clock for it, (rather than the system
        // timer which creates race conditions).
        //
        // Note that all other EISA bus device interrupts only need to
        // be enabled in the APIC for processors to see them.
        //
	CbusDisable8259s(0xFFFE);

        //
        // All redirection table entries are disabled by default when the
        // processor emerges from reset.  Later, each entry is individually
        // enabled from their respective drivers via HalEnableSystemInterrupt.

        //
        // Indicate the APIC (not the 8259s) will now handle provide
        // the interrupt vectors to the processor during an INTA cycle.
        // This is done by writing to the APMode port.  Note that at this
        // time we will also sync the APIC polarity control registers with
        // the ELCR.  Since irq0 has no polarity control, the hardware
        // uses bit0 for the APMode enable, so make sure this bit is on too.
        //

        CbusLocalApic->APMode = (UCHAR)((IrqPolarity & 0xFF) | 0x1);
        CbusLocalApic->PolarityPortHigh = (UCHAR)((IrqPolarity >> 8) & 0xFF);

        //
        // Create an interrupt gate so other processors can
        // let the boot processor know about desired I/O APIC
        // modifications (ie: enabling & disabling interrupts).
        // This is necessary since each processor can only access
        // his own I/O APIC, and only the boot processor's I/O APIC
        // is attached to the EISA bus interrupt inputs.  this only
        // needs to be done once regardless of how many I/O APICs are
        // present in the system.
        //

        if (CbusIOApicCount == 0) {
                KiSetHandlerAddressToIDT(RedirVector, IOApicUpdate);
                HalEnableSystemInterrupt(RedirVector, IPI_LEVEL, Latched);

                KiSetHandlerAddressToIDT(RebootVector, CbusRebootHandler);
                HalEnableSystemInterrupt(RebootVector, IPI_LEVEL, Latched);
        }

#define TRAP2	2

        ProcessorBit = (ULONG) KeGetPcr()->HalReserved[PCR_BIT];

        ApicIDBit = (ProcessorBit << APIC_BIT_TO_ID);

	/*
	 * support NMIs from the EISA bridge as trap 2.
	 */
        RedirectionEntry.ra.Mask = APIC_INTR_UNMASKED;
        RedirectionEntry.ra.Trigger = APIC_LEVEL;
        RedirectionEntry.ra.Dest_mode = APIC_LOGICAL_MODE;
        RedirectionEntry.ra.Vector = TRAP2;
        RedirectionEntry.ra.Destination = ApicIDBit;
	RedirectionEntry.ra.Delivery_mode = APIC_INTR_FIXED;

        //
        // support multiple I/O buses by initializiing
        // our current bus number...
        //
        ApicBusNumber = CbusIOApicCount;

        RedirectionAddress = (ULONG)CbusApicLinkVector((PBUS_HANDLER)0,
                                                (ULONG)-1, TRAP2);

	WRITE_IOAPIC_ULONG(ApicBusNumber, RedirectionAddress + 1,
				RedirectionEntry.ra.Destination);
	WRITE_IOAPIC_ULONG(ApicBusNumber, RedirectionAddress,
				RedirectionEntry.rb.dword1);

        //
        // we've initialized another I/O APIC...
        //
        CbusIOApicCount++;
}
Beispiel #19
0
NTSTATUS
MiCcPutPagesInTransition (
    IN PMI_READ_INFO MiReadInfo
    )

/*++

Routine Description:

    This routine allocates physical memory for the specified read-list and
    puts all the pages in transition (so collided faults from other threads
    for these same pages remain coherent).  I/O for any pages not already
    resident are issued here.  The caller must wait for their completion.

Arguments:

    MiReadInfo - Supplies a pointer to the read-list.

Return Value:

    STATUS_SUCCESS - all the pages were already resident, reference counts
                     have been applied and no I/O needs to be waited for.

    STATUS_ISSUE_PAGING_IO - the I/O has been issued and the caller must wait.

    Various other failure status values indicate the operation failed.

Environment:

    Kernel mode. PASSIVE_LEVEL.

--*/

{
    NTSTATUS status;
    PMMPTE LocalPrototypePte;
    PVOID StartingVa;
    PFN_NUMBER MdlPages;
    KIRQL OldIrql;
    MMPTE PteContents;
    PFN_NUMBER PageFrameIndex;
    PFN_NUMBER ResidentAvailableCharge;
    PPFN_NUMBER IoPage;
    PPFN_NUMBER ApiPage;
    PPFN_NUMBER Page;
    PPFN_NUMBER DestinationPage;
    ULONG PageColor;
    PMMPTE PointerPte;
    PMMPTE *ProtoPteArray;
    PMMPTE *EndProtoPteArray;
    PFN_NUMBER DummyPage;
    PMDL Mdl;
    PMDL FreeMdl;
    PMMPFN PfnProto;
    PMMPFN Pfn1;
    PMMPFN DummyPfn1;
    ULONG i;
    PFN_NUMBER DummyTrim;
    ULONG NumberOfPagesNeedingIo;
    MMPTE TempPte;
    PMMPTE PointerPde;
    PEPROCESS CurrentProcess;
    PMMINPAGE_SUPPORT InPageSupport;
    PKPRCB Prcb;

    ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);

    MiReadInfo->DummyPagePfn = NULL;

    FreeMdl = NULL;
    CurrentProcess = PsGetCurrentProcess();

    PfnProto = NULL;
    PointerPde = NULL;

    InPageSupport = MiReadInfo->InPageSupport;
    
    Mdl = MI_EXTRACT_PREFETCH_MDL (InPageSupport);
    ASSERT (Mdl == MiReadInfo->IoMdl);

    IoPage = (PPFN_NUMBER)(Mdl + 1);
    ApiPage = (PPFN_NUMBER)(MiReadInfo->ApiMdl + 1);

    StartingVa = (PVOID)((PCHAR)Mdl->StartVa + Mdl->ByteOffset);
    
    MdlPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
                                               Mdl->ByteCount);

    if (MdlPages + 1 > MAXUSHORT) {

        //
        // The PFN ReferenceCount for the dummy page could wrap, refuse the
        // request.
        //

        return STATUS_INSUFFICIENT_RESOURCES;
    }

    NumberOfPagesNeedingIo = 0;

    ProtoPteArray = (PMMPTE *)InPageSupport->BasePte;
    EndProtoPteArray = ProtoPteArray + MdlPages;

    ASSERT (*ProtoPteArray != NULL);

    LOCK_PFN (OldIrql);

    //
    // Ensure sufficient pages exist for the transfer plus the dummy page.
    //

    if (((SPFN_NUMBER)MdlPages > (SPFN_NUMBER)(MmAvailablePages - MM_HIGH_LIMIT)) ||
        (MI_NONPAGEABLE_MEMORY_AVAILABLE() <= (SPFN_NUMBER)MdlPages)) {

        UNLOCK_PFN (OldIrql);

        return STATUS_INSUFFICIENT_RESOURCES;
    }

    //
    // Charge resident available immediately as the PFN lock may get released
    // and reacquired below before all the pages have been locked down.
    // Note the dummy page is immediately charged separately.
    //

    MI_DECREMENT_RESIDENT_AVAILABLE (MdlPages, MM_RESAVAIL_ALLOCATE_BUILDMDL);

    ResidentAvailableCharge = MdlPages;

    //
    // Allocate a dummy page to map discarded pages that aren't skipped.
    //

    DummyPage = MiRemoveAnyPage (0);
    Pfn1 = MI_PFN_ELEMENT (DummyPage);

    ASSERT (Pfn1->u2.ShareCount == 0);
    ASSERT (Pfn1->u3.e2.ReferenceCount == 0);

    MiInitializePfnForOtherProcess (DummyPage, MI_PF_DUMMY_PAGE_PTE, 0);

    //
    // Give the page a containing frame so MiIdentifyPfn won't crash.
    //

    Pfn1->u4.PteFrame = PsInitialSystemProcess->Pcb.DirectoryTableBase[0] >> PAGE_SHIFT;

    //
    // Always bias the reference count by 1 and charge for this locked page
    // up front so the myriad increments and decrements don't get slowed
    // down with needless checking.
    //

    Pfn1->u3.e1.PrototypePte = 0;

    MI_ADD_LOCKED_PAGE_CHARGE (Pfn1);

    Pfn1->u3.e1.ReadInProgress = 1;

    MiReadInfo->DummyPagePfn = Pfn1;

    DummyPfn1 = Pfn1;

    DummyPfn1->u3.e2.ReferenceCount =
        (USHORT)(DummyPfn1->u3.e2.ReferenceCount + MdlPages);

    //
    // Properly initialize the inpage support block fields we overloaded.
    //

    InPageSupport->BasePte = *ProtoPteArray;

    //
    // Build the proper InPageSupport and MDL to describe this run.
    //

    for (; ProtoPteArray < EndProtoPteArray; ProtoPteArray += 1, IoPage += 1, ApiPage += 1) {
    
        //
        // Fill the MDL entry for this RLE.
        //
    
        PointerPte = *ProtoPteArray;

        ASSERT (PointerPte != NULL);

        //
        // The PointerPte better be inside a prototype PTE allocation
        // so that subsequent page trims update the correct PTEs.
        //

        ASSERT (((PointerPte >= (PMMPTE)MmPagedPoolStart) &&
                (PointerPte <= (PMMPTE)MmPagedPoolEnd)) ||
                ((PointerPte >= (PMMPTE)MmSpecialPoolStart) && (PointerPte <= (PMMPTE)MmSpecialPoolEnd)));

        //
        // Check the state of this prototype PTE now that the PFN lock is held.
        // If the page is not resident, the PTE must be put in transition with
        // read in progress before the PFN lock is released.
        //

        //
        // Lock page containing prototype PTEs in memory by
        // incrementing the reference count for the page.
        // Unlock any page locked earlier containing prototype PTEs if
        // the containing page is not the same for both.
        //

        if (PfnProto != NULL) {

            if (PointerPde != MiGetPteAddress (PointerPte)) {

                ASSERT (PfnProto->u3.e2.ReferenceCount > 1);
                MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (PfnProto);
                PfnProto = NULL;
            }
        }

        if (PfnProto == NULL) {

            ASSERT (!MI_IS_PHYSICAL_ADDRESS (PointerPte));
   
            PointerPde = MiGetPteAddress (PointerPte);
 
            if (PointerPde->u.Hard.Valid == 0) {
                MiMakeSystemAddressValidPfn (PointerPte, OldIrql);
            }

            PfnProto = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
            MI_ADD_LOCKED_PAGE_CHARGE (PfnProto);
            ASSERT (PfnProto->u3.e2.ReferenceCount > 1);
        }

recheck:
        PteContents = *PointerPte;

        // LWFIX: are zero or dzero ptes possible here ?
        ASSERT (PteContents.u.Long != 0);

        if (PteContents.u.Hard.Valid == 1) {
            PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
            Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
            ASSERT (Pfn1->u3.e1.PrototypePte == 1);
            MI_ADD_LOCKED_PAGE_CHARGE (Pfn1);
            *ApiPage = PageFrameIndex;
            *IoPage = DummyPage;
            continue;
        }

        if ((PteContents.u.Soft.Prototype == 0) &&
            (PteContents.u.Soft.Transition == 1)) {

            //
            // The page is in transition.  If there is an inpage still in
            // progress, wait for it to complete.  Reference the PFN and
            // then march on.
            //

            PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&PteContents);
            Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
            ASSERT (Pfn1->u3.e1.PrototypePte == 1);

            if (Pfn1->u4.InPageError) {

                //
                // There was an in-page read error and there are other
                // threads colliding for this page, delay to let the
                // other threads complete and then retry.
                //

                UNLOCK_PFN (OldIrql);
                KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmHalfSecond);
                LOCK_PFN (OldIrql);
                goto recheck;
            }

            if (Pfn1->u3.e1.ReadInProgress) {
                    // LWFIX - start with temp\aw.c
            }

            //
            // PTE refers to a normal transition PTE.
            //

            ASSERT ((SPFN_NUMBER)MmAvailablePages >= 0);

            if (MmAvailablePages == 0) {

                //
                // This can only happen if the system is utilizing a hardware
                // compression cache.  This ensures that only a safe amount
                // of the compressed virtual cache is directly mapped so that
                // if the hardware gets into trouble, we can bail it out.
                //

                UNLOCK_PFN (OldIrql);
                KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmHalfSecond);
                LOCK_PFN (OldIrql);
                goto recheck;
            }

            //
            // The PFN reference count will be 1 already here if the
            // modified writer has begun a write of this page.  Otherwise
            // it's ordinarily 0.
            //

            MI_ADD_LOCKED_PAGE_CHARGE_FOR_MODIFIED_PAGE (Pfn1);

            *IoPage = DummyPage;
            *ApiPage = PageFrameIndex;
            continue;
        }

        // LWFIX: need to handle protos that are now pagefile (or dzero)
        // backed - prefetching it from the file here would cause us to lose
        // the contents.  Note this can happen for session-space images
        // as we back modified (ie: for relocation fixups or IAT
        // updated) portions from the pagefile.  remove the assert below too.
        ASSERT (PteContents.u.Soft.Prototype == 1);

        if ((MmAvailablePages < MM_HIGH_LIMIT) &&
            (MiEnsureAvailablePageOrWait (NULL, OldIrql))) {

            //
            // Had to wait so recheck all state.
            //

            goto recheck;
        }

        NumberOfPagesNeedingIo += 1;

        //
        // Allocate a physical page.
        //

        PageColor = MI_PAGE_COLOR_VA_PROCESS (
                        MiGetVirtualAddressMappedByPte (PointerPte),
                        &CurrentProcess->NextPageColor);

        PageFrameIndex = MiRemoveAnyPage (PageColor);

        Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);

        ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
        ASSERT (Pfn1->u2.ShareCount == 0);
        ASSERT (PointerPte->u.Hard.Valid == 0);

        //
        // Initialize read-in-progress PFN.
        //
    
        MiInitializePfn (PageFrameIndex, PointerPte, 0);

        //
        // These pieces of MiInitializePfn initialization are overridden
        // here as these pages are only going into prototype
        // transition and not into any page tables.
        //

        Pfn1->u3.e1.PrototypePte = 1;
        Pfn1->u2.ShareCount -= 1;
        ASSERT (Pfn1->u2.ShareCount == 0);
        Pfn1->u3.e1.PageLocation = ZeroedPageList;
        Pfn1->u3.e2.ReferenceCount -= 1;
        ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
        MI_ADD_LOCKED_PAGE_CHARGE_FOR_MODIFIED_PAGE (Pfn1);

        //
        // Initialize the I/O specific fields.
        //
    
        Pfn1->u1.Event = &InPageSupport->Event;
        Pfn1->u3.e1.ReadInProgress = 1;
        ASSERT (Pfn1->u4.InPageError == 0);

        //
        // Increment the PFN reference count in the control area for
        // the subsection.
        //

        MiReadInfo->ControlArea->NumberOfPfnReferences += 1;
    
        //
        // Put the prototype PTE into the transition state.
        //

        MI_MAKE_TRANSITION_PTE (TempPte,
                                PageFrameIndex,
                                PointerPte->u.Soft.Protection,
                                PointerPte);

        MI_WRITE_INVALID_PTE (PointerPte, TempPte);

        *IoPage = PageFrameIndex;
        *ApiPage = PageFrameIndex;
    }
    
    //
    // If all the pages were resident, dereference the dummy page references
    // now and notify our caller that I/O is not necessary.
    //
    
    if (NumberOfPagesNeedingIo == 0) {
        ASSERT (DummyPfn1->u3.e2.ReferenceCount > MdlPages);
        DummyPfn1->u3.e2.ReferenceCount =
            (USHORT)(DummyPfn1->u3.e2.ReferenceCount - MdlPages);

        //
        // Unlock page containing prototype PTEs.
        //

        if (PfnProto != NULL) {
            ASSERT (PfnProto->u3.e2.ReferenceCount > 1);
            MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (PfnProto);
        }

        UNLOCK_PFN (OldIrql);

        //
        // Return the upfront resident available charge as the
        // individual charges have all been made at this point.
        //

        MI_INCREMENT_RESIDENT_AVAILABLE (ResidentAvailableCharge,
                                         MM_RESAVAIL_FREE_BUILDMDL_EXCESS);

        return STATUS_SUCCESS;
    }

    //
    // Carefully trim leading dummy pages.
    //

    Page = (PPFN_NUMBER)(Mdl + 1);

    DummyTrim = 0;
    for (i = 0; i < MdlPages - 1; i += 1) {
        if (*Page == DummyPage) {
            DummyTrim += 1;
            Page += 1;
        }
        else {
            break;
        }
    }

    if (DummyTrim != 0) {

        Mdl->Size = (USHORT)(Mdl->Size - (DummyTrim * sizeof(PFN_NUMBER)));
        Mdl->ByteCount -= (ULONG)(DummyTrim * PAGE_SIZE);
        ASSERT (Mdl->ByteCount != 0);
        InPageSupport->ReadOffset.QuadPart += (DummyTrim * PAGE_SIZE);
        DummyPfn1->u3.e2.ReferenceCount =
                (USHORT)(DummyPfn1->u3.e2.ReferenceCount - DummyTrim);

        //
        // Shuffle down the PFNs in the MDL.
        // Recalculate BasePte to adjust for the shuffle.
        //

        Pfn1 = MI_PFN_ELEMENT (*Page);

        ASSERT (Pfn1->PteAddress->u.Hard.Valid == 0);
        ASSERT ((Pfn1->PteAddress->u.Soft.Prototype == 0) &&
                 (Pfn1->PteAddress->u.Soft.Transition == 1));

        InPageSupport->BasePte = Pfn1->PteAddress;

        DestinationPage = (PPFN_NUMBER)(Mdl + 1);

        do {
            *DestinationPage = *Page;
            DestinationPage += 1;
            Page += 1;
            i += 1;
        } while (i < MdlPages);

        MdlPages -= DummyTrim;
    }

    //
    // Carefully trim trailing dummy pages.
    //

    ASSERT (MdlPages != 0);

    Page = (PPFN_NUMBER)(Mdl + 1) + MdlPages - 1;

    if (*Page == DummyPage) {

        ASSERT (MdlPages >= 2);

        //
        // Trim the last page specially as it may be a partial page.
        //

        Mdl->Size -= sizeof(PFN_NUMBER);
        if (BYTE_OFFSET(Mdl->ByteCount) != 0) {
            Mdl->ByteCount &= ~(PAGE_SIZE - 1);
        }
        else {
            Mdl->ByteCount -= PAGE_SIZE;
        }
        ASSERT (Mdl->ByteCount != 0);
        DummyPfn1->u3.e2.ReferenceCount -= 1;

        //
        // Now trim any other trailing pages.
        //

        Page -= 1;
        DummyTrim = 0;
        while (Page != ((PPFN_NUMBER)(Mdl + 1))) {
            if (*Page != DummyPage) {
                break;
            }
            DummyTrim += 1;
            Page -= 1;
        }
        if (DummyTrim != 0) {
            ASSERT (Mdl->Size > (USHORT)(DummyTrim * sizeof(PFN_NUMBER)));
            Mdl->Size = (USHORT)(Mdl->Size - (DummyTrim * sizeof(PFN_NUMBER)));
            Mdl->ByteCount -= (ULONG)(DummyTrim * PAGE_SIZE);
            DummyPfn1->u3.e2.ReferenceCount =
                (USHORT)(DummyPfn1->u3.e2.ReferenceCount - DummyTrim);
        }

        ASSERT (MdlPages > DummyTrim + 1);
        MdlPages -= (DummyTrim + 1);

#if DBG
        StartingVa = (PVOID)((PCHAR)Mdl->StartVa + Mdl->ByteOffset);
    
        ASSERT (MdlPages == ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingVa,
                                                               Mdl->ByteCount));
#endif
    }

    //
    // If the MDL is not already embedded in the inpage block, see if its
    // final size qualifies it - if so, embed it now.
    //

    if ((Mdl != &InPageSupport->Mdl) &&
        (Mdl->ByteCount <= (MM_MAXIMUM_READ_CLUSTER_SIZE + 1) * PAGE_SIZE)){

#if DBG
        RtlFillMemoryUlong (&InPageSupport->Page[0],
                            (MM_MAXIMUM_READ_CLUSTER_SIZE+1) * sizeof (PFN_NUMBER),
                            0xf1f1f1f1);
#endif

        RtlCopyMemory (&InPageSupport->Mdl, Mdl, Mdl->Size);

        FreeMdl = Mdl;

        Mdl = &InPageSupport->Mdl;

        ASSERT (((ULONG_PTR)Mdl & (sizeof(QUAD) - 1)) == 0);
        InPageSupport->u1.e1.PrefetchMdlHighBits = ((ULONG_PTR)Mdl >> 3);
    }
Beispiel #20
0
/*
 * @implemented
 */
PVOID
NTAPI
MmMapIoSpace(IN PHYSICAL_ADDRESS PhysicalAddress,
             IN SIZE_T NumberOfBytes,
             IN MEMORY_CACHING_TYPE CacheType)
{

    PFN_NUMBER Pfn;
    PFN_COUNT PageCount;
    PMMPTE PointerPte;
    PVOID BaseAddress;
    MMPTE TempPte;
    PMMPFN Pfn1 = NULL;
    MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
    BOOLEAN IsIoMapping;

    //
    // Must be called with a non-zero count
    //
    ASSERT(NumberOfBytes != 0);

    //
    // Make sure the upper bits are 0 if this system
    // can't describe more than 4 GB of physical memory.
    // FIXME: This doesn't respect PAE, but we currently don't
    // define a PAE build flag since there is no such build.
    //
#if !defined(_M_AMD64)
    ASSERT(PhysicalAddress.HighPart == 0);
#endif

    //
    // Normalize and validate the caching attributes
    //
    CacheType &= 0xFF;
    if (CacheType >= MmMaximumCacheType) return NULL;

    //
    // Calculate page count
    //
    PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(PhysicalAddress.LowPart,
                                               NumberOfBytes);

    //
    // Compute the PFN and check if it's a known I/O mapping
    // Also translate the cache attribute
    //
    Pfn = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
    Pfn1 = MiGetPfnEntry(Pfn);
    IsIoMapping = (Pfn1 == NULL) ? TRUE : FALSE;
    CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];

    //
    // Now allocate system PTEs for the mapping, and get the VA
    //
    PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
    if (!PointerPte) return NULL;
    BaseAddress = MiPteToAddress(PointerPte);

    //
    // Check if this is uncached
    //
    if (CacheAttribute != MiCached)
    {
        //
        // Flush all caches
        //
        KeFlushEntireTb(TRUE, TRUE);
        KeInvalidateAllCaches();
    }

    //
    // Now compute the VA offset
    //
    BaseAddress = (PVOID)((ULONG_PTR)BaseAddress +
                          BYTE_OFFSET(PhysicalAddress.LowPart));

    //
    // Get the template and configure caching
    //
    TempPte = ValidKernelPte;
    switch (CacheAttribute)
    {
        case MiNonCached:

            //
            // Disable the cache
            //
            MI_PAGE_DISABLE_CACHE(&TempPte);
            MI_PAGE_WRITE_THROUGH(&TempPte);
            break;

        case MiCached:

            //
            // Leave defaults
            //
            break;

        case MiWriteCombined:

            //
            // We don't support write combining yet
            //
            ASSERT(FALSE);
            break;

        default:

            //
            // Should never happen
            //
            ASSERT(FALSE);
            break;
    }

    //
    // Sanity check and re-flush
    //
    Pfn = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
    ASSERT((Pfn1 == MiGetPfnEntry(Pfn)) || (Pfn1 == NULL));
    KeFlushEntireTb(TRUE, TRUE);
    KeInvalidateAllCaches();

    //
    // Do the mapping
    //
    do
    {
        //
        // Write the PFN
        //
        TempPte.u.Hard.PageFrameNumber = Pfn++;
        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
    } while (--PageCount);

    //
    // We're done!
    //
    return BaseAddress;
}
Beispiel #21
0
/*
 * @implemented
 */
VOID
NTAPI
MmUnlockPages(IN PMDL Mdl)
{
    PPFN_NUMBER MdlPages, LastPage;
    PEPROCESS Process;
    PVOID Base;
    ULONG Flags, PageCount;
    KIRQL OldIrql;
    PMMPFN Pfn1;
    DPRINT("Unlocking MDL: %p\n", Mdl);

    //
    // Sanity checks
    //
    ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
    ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
    ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
    ASSERT(Mdl->ByteCount != 0);

    //
    // Get the process associated and capture the flags which are volatile
    //
    Process = Mdl->Process;
    Flags = Mdl->MdlFlags;

    //
    // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
    //
    if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
    {
        //
        // Unmap the pages from system space
        //
        MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
    }

    //
    // Get the page count
    //
    MdlPages = (PPFN_NUMBER)(Mdl + 1);
    Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
    PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
    ASSERT(PageCount != 0);

    //
    // We don't support AWE
    //
    if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);

    //
    // Check if the buffer is mapped I/O space
    //
    if (Flags & MDL_IO_SPACE)
    {
        //
        // Acquire PFN lock
        //
        OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

        //
        // Loop every page
        //
        LastPage = MdlPages + PageCount;
        do
        {
            //
            // Last page, break out
            //
            if (*MdlPages == LIST_HEAD) break;

            //
            // Check if this page is in the PFN database
            //
            Pfn1 = MiGetPfnEntry(*MdlPages);
            if (Pfn1) MiDereferencePfnAndDropLockCount(Pfn1);
        } while (++MdlPages < LastPage);

        //
        // Release the lock
        //
        KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

        //
        // Check if we have a process
        //
        if (Process)
        {
            //
            // Handle the accounting of locked pages
            //
            ASSERT(Process->NumberOfLockedPages > 0);
            InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
                                        -(LONG_PTR)PageCount);
        }

        //
        // We're done
        //
        Mdl->MdlFlags &= ~MDL_IO_SPACE;
        Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
        return;
    }

    //
    // Check if we have a process
    //
    if (Process)
    {
        //
        // Handle the accounting of locked pages
        //
        ASSERT(Process->NumberOfLockedPages > 0);
        InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
                                    -(LONG_PTR)PageCount);
    }

    //
    // Loop every page
    //
    LastPage = MdlPages + PageCount;
    do
    {
        //
        // Last page reached
        //
        if (*MdlPages == LIST_HEAD)
        {
            //
            // Were there no pages at all?
            //
            if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
            {
                //
                // We're already done
                //
                Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
                return;
            }

            //
            // Otherwise, stop here
            //
            LastPage = MdlPages;
            break;
        }

        /* Save the PFN entry instead for the secondary loop */
        *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
        ASSERT(*MdlPages != 0);
    } while (++MdlPages < LastPage);

    //
    // Reset pointer
    //
    MdlPages = (PPFN_NUMBER)(Mdl + 1);

    //
    // Now grab the PFN lock for the actual unlock and dereference
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
    do
    {
        /* Get the current entry and reference count */
        Pfn1 = (PMMPFN)*MdlPages;
        MiDereferencePfnAndDropLockCount(Pfn1);
    } while (++MdlPages < LastPage);

    //
    // Release the lock
    //
    KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

    //
    // We're done
    //
    Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
}
Beispiel #22
0
/*
 * @implemented
 */
PVOID
NTAPI
MmAllocateNonCachedMemory(IN SIZE_T NumberOfBytes)
{
    PFN_COUNT PageCount, MdlPageCount;
    PFN_NUMBER PageFrameIndex;
    PHYSICAL_ADDRESS LowAddress, HighAddress, SkipBytes;
    MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
    PMDL Mdl;
    PVOID BaseAddress;
    PPFN_NUMBER MdlPages;
    PMMPTE PointerPte;
    MMPTE TempPte;

    //
    // Get the page count
    //
    ASSERT(NumberOfBytes != 0);
    PageCount = (PFN_COUNT)BYTES_TO_PAGES(NumberOfBytes);

    //
    // Use the MDL allocator for simplicity, so setup the parameters
    //
    LowAddress.QuadPart = 0;
    HighAddress.QuadPart = -1;
    SkipBytes.QuadPart = 0;
    CacheAttribute = MiPlatformCacheAttributes[0][MmNonCached];

    //
    // Now call the MDL allocator
    //
    Mdl = MiAllocatePagesForMdl(LowAddress,
                                HighAddress,
                                SkipBytes,
                                NumberOfBytes,
                                CacheAttribute,
                                0);
    if (!Mdl) return NULL;

    //
    // Get the MDL VA and check how many pages we got (could be partial)
    //
    BaseAddress = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
    MdlPageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(BaseAddress, Mdl->ByteCount);
    if (PageCount != MdlPageCount)
    {
        //
        // Unlike MDLs, partial isn't okay for a noncached allocation, so fail
        //
        ASSERT(PageCount > MdlPageCount);
        MmFreePagesFromMdl(Mdl);
        ExFreePoolWithTag(Mdl, TAG_MDL);
        return NULL;
    }

    //
    // Allocate system PTEs for the base address
    // We use an extra page to store the actual MDL pointer for the free later
    //
    PointerPte = MiReserveSystemPtes(PageCount + 1, SystemPteSpace);
    if (!PointerPte)
    {
        //
        // Out of memory...
        //
        MmFreePagesFromMdl(Mdl);
        ExFreePoolWithTag(Mdl, TAG_MDL);
        return NULL;
    }

    //
    // Store the MDL pointer
    //
    *(PMDL*)PointerPte++ = Mdl;

    //
    // Okay, now see what range we got
    //
    BaseAddress = MiPteToAddress(PointerPte);

    //
    // This is our array of pages
    //
    MdlPages = (PPFN_NUMBER)(Mdl + 1);

    //
    // Setup the template PTE
    //
    TempPte = ValidKernelPte;

    //
    // Now check what kind of caching we should use
    //
    switch (CacheAttribute)
    {
        case MiNonCached:

            //
            // Disable caching
            //
            MI_PAGE_DISABLE_CACHE(&TempPte);
            MI_PAGE_WRITE_THROUGH(&TempPte);
            break;

        case MiWriteCombined:

            //
            // Enable write combining
            //
            MI_PAGE_DISABLE_CACHE(&TempPte);
            MI_PAGE_WRITE_COMBINED(&TempPte);
            break;

        default:
            //
            // Nothing to do
            //
            break;
    }

    //
    // Now loop the MDL pages
    //
    do
    {
        //
        // Get the PFN
        //
        PageFrameIndex = *MdlPages++;

        //
        // Set the PFN in the page and write it
        //
        TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
    } while (--PageCount);

    //
    // Return the base address
    //
    return BaseAddress;

}
Beispiel #23
0
/*
 * @implemented
 */
PVOID
NTAPI
MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
                             IN KPROCESSOR_MODE AccessMode,
                             IN MEMORY_CACHING_TYPE CacheType,
                             IN PVOID BaseAddress,
                             IN ULONG BugCheckOnFailure,
                             IN MM_PAGE_PRIORITY Priority)
{
    PVOID Base;
    PPFN_NUMBER MdlPages, LastPage;
    PFN_COUNT PageCount;
    BOOLEAN IsIoMapping;
    MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
    PMMPTE PointerPte;
    MMPTE TempPte;

    //
    // Sanity check
    //
    ASSERT(Mdl->ByteCount != 0);

    //
    // Get the base
    //
    Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);

    //
    // Handle kernel case first
    //
    if (AccessMode == KernelMode)
    {
        //
        // Get the list of pages and count
        //
        MdlPages = (PPFN_NUMBER)(Mdl + 1);
        PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
        LastPage = MdlPages + PageCount;

        //
        // Sanity checks
        //
        ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
                                 MDL_SOURCE_IS_NONPAGED_POOL |
                                 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
        ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);

        //
        // Get the correct cache type
        //
        IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
        CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];

        //
        // Reserve the PTEs
        //
        PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
        if (!PointerPte)
        {
            //
            // If it can fail, return NULL
            //
            if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;

            //
            // Should we bugcheck?
            //
            if (!BugCheckOnFailure) return NULL;

            //
            // Yes, crash the system
            //
            KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
        }

        //
        // Get the mapped address
        //
        Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);

        //
        // Get the template
        //
        TempPte = ValidKernelPte;
        switch (CacheAttribute)
        {
            case MiNonCached:

                //
                // Disable caching
                //
                MI_PAGE_DISABLE_CACHE(&TempPte);
                MI_PAGE_WRITE_THROUGH(&TempPte);
                break;

            case MiWriteCombined:

                //
                // Enable write combining
                //
                MI_PAGE_DISABLE_CACHE(&TempPte);
                MI_PAGE_WRITE_COMBINED(&TempPte);
                break;

            default:
                //
                // Nothing to do
                //
                break;
        }

        //
        // Loop all PTEs
        //
        do
        {
            //
            // We're done here
            //
            if (*MdlPages == LIST_HEAD) break;

            //
            // Write the PTE
            //
            TempPte.u.Hard.PageFrameNumber = *MdlPages;
            MI_WRITE_VALID_PTE(PointerPte++, TempPte);
        } while (++MdlPages < LastPage);

        //
        // Mark it as mapped
        //
        ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
        Mdl->MappedSystemVa = Base;
        Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;

        //
        // Check if it was partial
        //
        if (Mdl->MdlFlags & MDL_PARTIAL)
        {
            //
            // Write the appropriate flag here too
            //
            Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
        }

        //
        // Return the mapped address
        //
        return Base;
    }

    UNIMPLEMENTED;
    return NULL;
}
Beispiel #24
0
/// <summary>
/// Allocate kernel memory and map into User space. Or free previously allocated memory
/// </summary>
/// <param name="pProcess">Target process object</param>
/// <param name="pAllocFree">Request params.</param>
/// <param name="pResult">Allocated region info.</param>
/// <returns>Status code</returns>
NTSTATUS BBAllocateFreePhysical( IN PEPROCESS pProcess, IN PALLOCATE_FREE_MEMORY pAllocFree, OUT PALLOCATE_FREE_MEMORY_RESULT pResult )
{
    NTSTATUS status = STATUS_SUCCESS;
    PVOID pRegionBase = NULL;
    PMDL pMDL = NULL;

    ASSERT( pProcess != NULL && pResult != NULL );
    if (pProcess == NULL || pResult == NULL)
        return STATUS_INVALID_PARAMETER;

    // MDL doesn't support regions this large
    if (pAllocFree->size > 0xFFFFFFFF)
    {
        DPRINT( "BlackBone: %s: Region size if too big: 0x%p\n", __FUNCTION__, pAllocFree->size );
        return STATUS_INVALID_PARAMETER;
    }

    // Align on page boundaries   
    pAllocFree->base = (ULONGLONG)PAGE_ALIGN( pAllocFree->base );
    pAllocFree->size = ADDRESS_AND_SIZE_TO_SPAN_PAGES( pAllocFree->base, pAllocFree->size ) << PAGE_SHIFT;

    // Allocate
    if (pAllocFree->allocate != FALSE)
    {
        PMMVAD_SHORT pVad = NULL;
        if (pAllocFree->base != 0 && BBFindVAD( pProcess, pAllocFree->base, &pVad ) != STATUS_NOT_FOUND)
            return STATUS_ALREADY_COMMITTED;

        pRegionBase = ExAllocatePoolWithTag( NonPagedPool, pAllocFree->size, BB_POOL_TAG );
        if (!pRegionBase)
            return STATUS_NO_MEMORY;

        // Cleanup buffer before mapping it into UserMode to prevent exposure of kernel data
        RtlZeroMemory( pRegionBase, pAllocFree->size );

        pMDL = IoAllocateMdl( pRegionBase, (ULONG)pAllocFree->size, FALSE, FALSE, NULL );
        if (pMDL == NULL)
        {
            ExFreePoolWithTag( pRegionBase, BB_POOL_TAG );
            return STATUS_NO_MEMORY;
        }

        MmBuildMdlForNonPagedPool( pMDL );

        // Map at original base
        __try {
            pResult->address = (ULONGLONG)MmMapLockedPagesSpecifyCache( 
                pMDL, UserMode, MmCached, (PVOID)pAllocFree->base, FALSE, NormalPagePriority 
                );
        }
        __except (EXCEPTION_EXECUTE_HANDLER) { }

        // Map at any suitable
        if (pResult->address == 0 && pAllocFree->base != 0)
        {
            __try {
                pResult->address = (ULONGLONG)MmMapLockedPagesSpecifyCache(
                    pMDL, UserMode, MmCached, NULL, FALSE, NormalPagePriority
                    );
            }
            __except (EXCEPTION_EXECUTE_HANDLER) { }
        }
Beispiel #25
0
/*
 * @implemented
 */
VOID
NTAPI
MmProbeAndLockPages(IN PMDL Mdl,
                    IN KPROCESSOR_MODE AccessMode,
                    IN LOCK_OPERATION Operation)
{
    PPFN_NUMBER MdlPages;
    PVOID Base, Address, LastAddress, StartAddress;
    ULONG LockPages, TotalPages;
    NTSTATUS Status = STATUS_SUCCESS;
    PEPROCESS CurrentProcess;
    NTSTATUS ProbeStatus;
    PMMPTE PointerPte, LastPte;
    PMMPDE PointerPde;
#if (_MI_PAGING_LEVELS >= 3)
    PMMPDE PointerPpe;
#endif
#if (_MI_PAGING_LEVELS == 4)
    PMMPDE PointerPxe;
#endif
    PFN_NUMBER PageFrameIndex;
    BOOLEAN UsePfnLock;
    KIRQL OldIrql;
    PMMPFN Pfn1;
    DPRINT("Probing MDL: %p\n", Mdl);

    //
    // Sanity checks
    //
    ASSERT(Mdl->ByteCount != 0);
    ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
    ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
    ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
                             MDL_MAPPED_TO_SYSTEM_VA |
                             MDL_SOURCE_IS_NONPAGED_POOL |
                             MDL_PARTIAL |
                             MDL_IO_SPACE)) == 0);

    //
    // Get page and base information
    //
    MdlPages = (PPFN_NUMBER)(Mdl + 1);
    Base = Mdl->StartVa;

    //
    // Get the addresses and how many pages we span (and need to lock)
    //
    Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
    LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
    LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
    ASSERT(LockPages != 0);

    /* Block invalid access */
    if ((AccessMode != KernelMode) &&
        ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress)))
    {
        /* Caller should be in SEH, raise the error */
        *MdlPages = LIST_HEAD;
        ExRaiseStatus(STATUS_ACCESS_VIOLATION);
    }

    //
    // Get the process
    //
    if (Address <= MM_HIGHEST_USER_ADDRESS)
    {
        //
        // Get the process
        //
        CurrentProcess = PsGetCurrentProcess();
    }
    else
    {
        //
        // No process
        //
        CurrentProcess = NULL;
    }

    //
    // Save the number of pages we'll have to lock, and the start address
    //
    TotalPages = LockPages;
    StartAddress = Address;

    /* Large pages not supported */
    ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address));

    //
    // Now probe them
    //
    ProbeStatus = STATUS_SUCCESS;
    _SEH2_TRY
    {
        //
        // Enter probe loop
        //
        do
        {
            //
            // Assume failure
            //
            *MdlPages = LIST_HEAD;

            //
            // Read
            //
            *(volatile CHAR*)Address;

            //
            // Check if this is write access (only probe for user-mode)
            //
            if ((Operation != IoReadAccess) &&
                (Address <= MM_HIGHEST_USER_ADDRESS))
            {
                //
                // Probe for write too
                //
                ProbeForWriteChar(Address);
            }

            //
            // Next address...
            //
            Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE);

            //
            // Next page...
            //
            LockPages--;
            MdlPages++;
        } while (Address < LastAddress);

        //
        // Reset back to the original page
        //
        ASSERT(LockPages == 0);
        MdlPages = (PPFN_NUMBER)(Mdl + 1);
    }
    _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
    {
        //
        // Oops :(
        //
        ProbeStatus = _SEH2_GetExceptionCode();
    }
    _SEH2_END;

    //
    // So how did that go?
    //
    if (ProbeStatus != STATUS_SUCCESS)
    {
        //
        // Fail
        //
        DPRINT1("MDL PROBE FAILED!\n");
        Mdl->Process = NULL;
        ExRaiseStatus(ProbeStatus);
    }

    //
    // Get the PTE and PDE
    //
    PointerPte = MiAddressToPte(StartAddress);
    PointerPde = MiAddressToPde(StartAddress);
#if (_MI_PAGING_LEVELS >= 3)
    PointerPpe = MiAddressToPpe(StartAddress);
#endif
#if (_MI_PAGING_LEVELS == 4)
    PointerPxe = MiAddressToPxe(StartAddress);
#endif

    //
    // Sanity check
    //
    ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));

    //
    // Check what kind of operation this is
    //
    if (Operation != IoReadAccess)
    {
        //
        // Set the write flag
        //
        Mdl->MdlFlags |= MDL_WRITE_OPERATION;
    }
    else
    {
        //
        // Remove the write flag
        //
        Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
    }

    //
    // Mark the MDL as locked *now*
    //
    Mdl->MdlFlags |= MDL_PAGES_LOCKED;

    //
    // Check if this came from kernel mode
    //
    if (Base > MM_HIGHEST_USER_ADDRESS)
    {
        //
        // We should not have a process
        //
        ASSERT(CurrentProcess == NULL);
        Mdl->Process = NULL;

        //
        // In kernel mode, we don't need to check for write access
        //
        Operation = IoReadAccess;

        //
        // Use the PFN lock
        //
        UsePfnLock = TRUE;
        OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
    }
    else
    {
        //
        // Sanity checks
        //
        ASSERT(TotalPages != 0);
        ASSERT(CurrentProcess == PsGetCurrentProcess());

        //
        // Track locked pages
        //
        InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
                                    TotalPages);

        //
        // Save the process
        //
        Mdl->Process = CurrentProcess;

        /* Lock the process working set */
        MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
        UsePfnLock = FALSE;
        OldIrql = MM_NOIRQL;
    }

    //
    // Get the last PTE
    //
    LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));

    //
    // Loop the pages
    //
    do
    {
        //
        // Assume failure and check for non-mapped pages
        //
        *MdlPages = LIST_HEAD;
        while (
#if (_MI_PAGING_LEVELS == 4)
               (PointerPxe->u.Hard.Valid == 0) ||
#endif
#if (_MI_PAGING_LEVELS >= 3)
               (PointerPpe->u.Hard.Valid == 0) ||
#endif
               (PointerPde->u.Hard.Valid == 0) ||
               (PointerPte->u.Hard.Valid == 0))
        {
            //
            // What kind of lock were we using?
            //
            if (UsePfnLock)
            {
                //
                // Release PFN lock
                //
                KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
            }
            else
            {
                /* Release process working set */
                MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
            }

            //
            // Access the page
            //
            Address = MiPteToAddress(PointerPte);

            //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
            Status = MmAccessFault(FALSE, Address, KernelMode, (PVOID)0xBADBADA3);
            if (!NT_SUCCESS(Status))
            {
                //
                // Fail
                //
                DPRINT1("Access fault failed\n");
                goto Cleanup;
            }

            //
            // What lock should we use?
            //
            if (UsePfnLock)
            {
                //
                // Grab the PFN lock
                //
                OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
            }
            else
            {
                /* Lock the process working set */
                MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
            }
        }

        //
        // Check if this was a write or modify
        //
        if (Operation != IoReadAccess)
        {
            //
            // Check if the PTE is not writable
            //
            if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
            {
                //
                // Check if it's copy on write
                //
                if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
                {
                    //
                    // Get the base address and allow a change for user-mode
                    //
                    Address = MiPteToAddress(PointerPte);
                    if (Address <= MM_HIGHEST_USER_ADDRESS)
                    {
                        //
                        // What kind of lock were we using?
                        //
                        if (UsePfnLock)
                        {
                            //
                            // Release PFN lock
                            //
                            KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
                        }
                        else
                        {
                            /* Release process working set */
                            MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
                        }

                        //
                        // Access the page
                        //

                        //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
                        Status = MmAccessFault(TRUE, Address, KernelMode, (PVOID)0xBADBADA3);
                        if (!NT_SUCCESS(Status))
                        {
                            //
                            // Fail
                            //
                            DPRINT1("Access fault failed\n");
                            goto Cleanup;
                        }

                        //
                        // Re-acquire the lock
                        //
                        if (UsePfnLock)
                        {
                            //
                            // Grab the PFN lock
                            //
                            OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
                        }
                        else
                        {
                            /* Lock the process working set */
                            MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
                        }

                        //
                        // Start over
                        //
                        continue;
                    }
                }

                //
                // Fail, since we won't allow this
                //
                Status = STATUS_ACCESS_VIOLATION;
                goto CleanupWithLock;
            }
        }

        //
        // Grab the PFN
        //
        PageFrameIndex = PFN_FROM_PTE(PointerPte);
        Pfn1 = MiGetPfnEntry(PageFrameIndex);
        if (Pfn1)
        {
            /* Either this is for kernel-mode, or the working set is held */
            ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));

            /* No Physical VADs supported yet */
            if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);

            /* This address should already exist and be fully valid */
            MiReferenceProbedPageAndBumpLockCount(Pfn1);
        }
        else
        {
            //
            // For I/O addresses, just remember this
            //
            Mdl->MdlFlags |= MDL_IO_SPACE;
        }

        //
        // Write the page and move on
        //
        *MdlPages++ = PageFrameIndex;
        PointerPte++;

        /* Check if we're on a PDE boundary */
        if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
#if (_MI_PAGING_LEVELS >= 3)
        if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
#endif
#if (_MI_PAGING_LEVELS == 4)
        if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
#endif

    } while (PointerPte <= LastPte);

    //
    // What kind of lock were we using?
    //
    if (UsePfnLock)
    {
        //
        // Release PFN lock
        //
        KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
    }
    else
    {
        /* Release process working set */
        MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
    }

    //
    // Sanity check
    //
    ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
    return;

CleanupWithLock:
    //
    // This is the failure path
    //
    ASSERT(!NT_SUCCESS(Status));

    //
    // What kind of lock were we using?
    //
    if (UsePfnLock)
    {
        //
        // Release PFN lock
        //
        KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
    }
    else
    {
        /* Release process working set */
        MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
    }
Cleanup:
    //
    // Pages must be locked so MmUnlock can work
    //
    ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
    MmUnlockPages(Mdl);

    //
    // Raise the error
    //
    ExRaiseStatus(Status);
}
Beispiel #26
0
NTSTATUS
kmdf1394_AllocateAddressRange (
                               IN WDFDEVICE Device,
                               IN WDFREQUEST Request,
                               IN OUT PALLOCATE_ADDRESS_RANGE AllocateAddrRange)
/*++

Routine Description:

    Allocate Address Range routine.

Arguments:

    Device - the current WDFDEVICE Object.

    Request - the current request.

    AllocateAddressRange - the Data buffer from usermode to be worked on.

Return Value:

    VOID
--*/
{
    NTSTATUS ntStatus = STATUS_SUCCESS;
    PDEVICE_EXTENSION deviceExtension = GetDeviceContext(Device);
    PIRB pIrb = NULL;
    PASYNC_ADDRESS_DATA pAsyncAddressData = NULL;
    ULONG nPages;
    WDFMEMORY Memory;
    CONTEXT_BUNDLE ContextBundle;

    ENTER("kmdf1394_AllocateAddressRange");

    pIrb = ExAllocatePoolWithTag (NonPagedPool, sizeof(IRB), POOLTAG_KMDF_VDEV);
    if (!pIrb)
    {
        TRACE(TL_ERROR, ("Failed to allocate pIrb!\n"));
        return STATUS_INSUFFICIENT_RESOURCES;
    } 

    ContextBundle.Context0 = AllocateAddrRange;

    pAsyncAddressData = ExAllocatePoolWithTag (
        NonPagedPool,
        sizeof(ASYNC_ADDRESS_DATA),
        POOLTAG_KMDF_VDEV);
    if (!pAsyncAddressData)
    {
        TRACE(TL_ERROR, ("Failed to allocate pAsyncAddressData!\n"));

        ExFreePoolWithTag (pIrb, POOLTAG_KMDF_VDEV);
        return STATUS_INSUFFICIENT_RESOURCES;
    }

    ContextBundle.Context1 = pAsyncAddressData;

    pAsyncAddressData->Buffer = ExAllocatePoolWithTag (
        NonPagedPool, 
        AllocateAddrRange->nLength, 
        POOLTAG_KMDF_VDEV);
    if (!pAsyncAddressData->Buffer) 
    {
        TRACE(TL_ERROR, ("Failed to allocate Buffer!\n"));

        ExFreePoolWithTag (pAsyncAddressData, POOLTAG_KMDF_VDEV);
        ExFreePoolWithTag (pIrb, POOLTAG_KMDF_VDEV);
        return STATUS_INSUFFICIENT_RESOURCES;
    }

    if ((0 == AllocateAddrRange->MaxSegmentSize) || 
        (PAGE_SIZE == AllocateAddrRange->MaxSegmentSize))
    {
        nPages = \
            ADDRESS_AND_SIZE_TO_SPAN_PAGES (
            AllocateAddrRange->Data, 
            AllocateAddrRange->nLength);
    }
    else
    {
        nPages = \
            (AllocateAddrRange->nLength % AllocateAddrRange->MaxSegmentSize) ? \
            AllocateAddrRange->nLength / AllocateAddrRange->MaxSegmentSize + 1 : \
            AllocateAddrRange->nLength / AllocateAddrRange->MaxSegmentSize;
    }

    pAsyncAddressData->AddressRange = ExAllocatePoolWithTag (
        NonPagedPool,
        sizeof(ADDRESS_RANGE)*nPages,
        POOLTAG_KMDF_VDEV);
    if (!pAsyncAddressData->AddressRange) 
    {
        TRACE(TL_ERROR, ("Failed to allocate AddressRange!\n"));

        ExFreePoolWithTag(pAsyncAddressData->Buffer, POOLTAG_KMDF_VDEV);
        ExFreePoolWithTag(pAsyncAddressData, POOLTAG_KMDF_VDEV);
        ExFreePoolWithTag(pIrb, POOLTAG_KMDF_VDEV);

        return STATUS_INSUFFICIENT_RESOURCES;
    }

    pAsyncAddressData->pMdl = IoAllocateMdl (
        pAsyncAddressData->Buffer,
        AllocateAddrRange->nLength,
        FALSE,
        FALSE,
        NULL);

    if (!pAsyncAddressData->pMdl) 
    {
        TRACE(TL_ERROR, ("Failed to create pMdl!\n"));

        ExFreePoolWithTag (pAsyncAddressData->AddressRange, POOLTAG_KMDF_VDEV);
        ExFreePoolWithTag (pAsyncAddressData->Buffer, POOLTAG_KMDF_VDEV);
        ExFreePoolWithTag (pAsyncAddressData, POOLTAG_KMDF_VDEV);
        ExFreePoolWithTag (pIrb, POOLTAG_KMDF_VDEV);

        return STATUS_INSUFFICIENT_RESOURCES;
    }

    MmBuildMdlForNonPagedPool (pAsyncAddressData->pMdl);

    TRACE(TL_TRACE, ("pMdl = 0x%x\n", pAsyncAddressData->pMdl));

    //
    // copy over the contents of data to our driver buffer
    //
    RtlCopyMemory (
        pAsyncAddressData->Buffer, 
        AllocateAddrRange->Data, 
        AllocateAddrRange->nLength);

    pAsyncAddressData->nLength = AllocateAddrRange->nLength;

    RtlZeroMemory (pIrb, sizeof (IRB));
    pIrb->FunctionNumber = REQUEST_ALLOCATE_ADDRESS_RANGE;
    pIrb->Flags = 0;
    pIrb->u.AllocateAddressRange.Mdl = pAsyncAddressData->pMdl;
    pIrb->u.AllocateAddressRange.fulFlags = AllocateAddrRange->fulFlags;
    pIrb->u.AllocateAddressRange.nLength = AllocateAddrRange->nLength;
    pIrb->u.AllocateAddressRange.MaxSegmentSize = \
        AllocateAddrRange->MaxSegmentSize;
    pIrb->u.AllocateAddressRange.fulAccessType = \
        AllocateAddrRange->fulAccessType;
    pIrb->u.AllocateAddressRange.fulNotificationOptions = \
        AllocateAddrRange->fulNotificationOptions;


    pIrb->u.AllocateAddressRange.Callback = NULL;
    pIrb->u.AllocateAddressRange.Context = NULL;

    pIrb->u.AllocateAddressRange.Required1394Offset = \
        AllocateAddrRange->Required1394Offset;
    pIrb->u.AllocateAddressRange.FifoSListHead = NULL;
    pIrb->u.AllocateAddressRange.FifoSpinLock = NULL;
    pIrb->u.AllocateAddressRange.AddressesReturned = 0;
    pIrb->u.AllocateAddressRange.p1394AddressRange = \
        pAsyncAddressData->AddressRange;
    pIrb->u.AllocateAddressRange.DeviceExtension = deviceExtension;

    //
    // We need to create a WDF Memory object for the IRB to nestle in
    // for an async request.
    //
    ntStatus = WdfMemoryCreatePreallocated (
        WDF_NO_OBJECT_ATTRIBUTES,
        pIrb,
        sizeof (IRB),
        &Memory);
    if (!NT_SUCCESS (ntStatus))
    {
        TRACE (
            TL_ERROR,
            ("Failed WdfMemoryCreate %d\n", 
            ntStatus));

        IoFreeMdl (pAsyncAddressData->pMdl);
        ExFreePoolWithTag (pAsyncAddressData->AddressRange, POOLTAG_KMDF_VDEV);
        ExFreePoolWithTag (pAsyncAddressData->Buffer, POOLTAG_KMDF_VDEV);
        ExFreePoolWithTag (pAsyncAddressData, POOLTAG_KMDF_VDEV);
        ExFreePoolWithTag (pIrb, POOLTAG_KMDF_VDEV);

        return ntStatus;
    }

    ContextBundle.Context2 = Memory;
    ContextBundle.Context3 = deviceExtension;

    WdfRequestSetCompletionRoutine (
        Request, 
        kmdf1394_AllocateAddressRangeCompletion, 
        &ContextBundle);

    ntStatus = kmdf1394_SubmitIrpAsync (
        deviceExtension->StackIoTarget, 
        Request, 
        Memory);

    EXIT ("kmdf1394_AllocateAddressRange", ntStatus);
    return ntStatus;
} //kmdf1394_AllocateAddressRange
Beispiel #27
0
/*
 * @implemented
 */
VOID
NTAPI
MmBuildMdlForNonPagedPool(IN PMDL Mdl)
{
    PPFN_NUMBER MdlPages, EndPage;
    PFN_NUMBER Pfn, PageCount;
    PVOID Base;
    PMMPTE PointerPte;

    //
    // Sanity checks
    //
    ASSERT(Mdl->ByteCount != 0);
    ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
                             MDL_MAPPED_TO_SYSTEM_VA |
                             MDL_SOURCE_IS_NONPAGED_POOL |
                             MDL_PARTIAL)) == 0);

    //
    // We know the MDL isn't associated to a process now
    //
    Mdl->Process = NULL;

    //
    // Get page and VA information
    //
    MdlPages = (PPFN_NUMBER)(Mdl + 1);
    Base = Mdl->StartVa;

    //
    // Set the system address and now get the page count
    //
    Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
    PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
                                               Mdl->ByteCount);
    ASSERT(PageCount != 0);
    EndPage = MdlPages + PageCount;

    //
    // Loop the PTEs
    //
    PointerPte = MiAddressToPte(Base);
    do
    {
        //
        // Write the PFN
        //
        Pfn = PFN_FROM_PTE(PointerPte++);
        *MdlPages++ = Pfn;
    } while (MdlPages < EndPage);

    //
    // Set the nonpaged pool flag
    //
    Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;

    //
    // Check if this is an I/O mapping
    //
    if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE;
}
static VOID
XenUsb_EvtIoInternalDeviceControl_PVURB(
  WDFQUEUE queue,
  WDFREQUEST request,
  size_t output_buffer_length,
  size_t input_buffer_length,
  ULONG io_control_code)
{
  NTSTATUS status;
  WDFDEVICE device = WdfIoQueueGetDevice(queue);
  PXENUSB_DEVICE_DATA xudd = GetXudd(device);
  WDF_REQUEST_PARAMETERS wrp;
  pvurb_t *pvurb;
  partial_pvurb_t *partial_pvurb;
  KIRQL old_irql;
  
  UNREFERENCED_PARAMETER(input_buffer_length);
  UNREFERENCED_PARAMETER(output_buffer_length);
  UNREFERENCED_PARAMETER(io_control_code);

  FUNCTION_ENTER();

  ASSERT(io_control_code == IOCTL_INTERNAL_PVUSB_SUBMIT_URB);

  WDF_REQUEST_PARAMETERS_INIT(&wrp);
  WdfRequestGetParameters(request, &wrp);
  pvurb = (pvurb_t *)wrp.Parameters.Others.Arg1;
  ASSERT(pvurb);
  RtlZeroMemory(&pvurb->rsp, sizeof(pvurb->rsp));
  pvurb->status = STATUS_SUCCESS;
  pvurb->request = request;
  pvurb->ref = 1;
  pvurb->total_length = 0;
  partial_pvurb = ExAllocatePoolWithTag(NonPagedPool, sizeof(*partial_pvurb), XENUSB_POOL_TAG); /* todo - use lookaside */
  if (!partial_pvurb) {
    WdfRequestComplete(request, STATUS_INSUFFICIENT_RESOURCES);
    FUNCTION_EXIT();
    return;
  }
  KeAcquireSpinLock(&xudd->urb_ring_lock, &old_irql);
  status = WdfRequestMarkCancelableEx(request, XenUsb_EvtRequestCancelPvUrb);
  if (!NT_SUCCESS(status)) {
    KeReleaseSpinLock(&xudd->urb_ring_lock, old_irql);  
    FUNCTION_MSG("WdfRequestMarkCancelableEx returned %08x\n", status);
    WdfRequestComplete(request, STATUS_INSUFFICIENT_RESOURCES);
    FUNCTION_EXIT();
    return;
  }  
  
  partial_pvurb->req = pvurb->req;
  partial_pvurb->mdl = pvurb->mdl; /* 1:1 right now, but may need to split up large pvurb into smaller partial_pvurb's */
  partial_pvurb->pvurb = pvurb;
  partial_pvurb->other_partial_pvurb = NULL;
  partial_pvurb->on_ring = FALSE;
  if (!partial_pvurb->mdl) {
    partial_pvurb->req.nr_buffer_segs = 0;
    partial_pvurb->req.buffer_length = 0;
  } else {
    ULONG remaining = MmGetMdlByteCount(partial_pvurb->mdl);
    USHORT offset = (USHORT)MmGetMdlByteOffset(partial_pvurb->mdl);
    int i;
    partial_pvurb->req.buffer_length = (USHORT)MmGetMdlByteCount(partial_pvurb->mdl);
    partial_pvurb->req.nr_buffer_segs = (USHORT)ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(partial_pvurb->mdl), MmGetMdlByteCount(partial_pvurb->mdl));
    for (i = 0; i < partial_pvurb->req.nr_buffer_segs; i++) {
      partial_pvurb->req.seg[i].gref = XnGrantAccess(xudd->handle, (ULONG)MmGetMdlPfnArray(partial_pvurb->mdl)[i], FALSE, INVALID_GRANT_REF, (ULONG)'XUSB');
      partial_pvurb->req.seg[i].offset = (USHORT)offset;
      partial_pvurb->req.seg[i].length = (USHORT)min((USHORT)remaining, (USHORT)PAGE_SIZE - offset);
      offset = 0;
      remaining -= partial_pvurb->req.seg[i].length;
      FUNCTION_MSG("seg = %d\n", i);
      FUNCTION_MSG(" gref = %d\n", partial_pvurb->req.seg[i].gref);
      FUNCTION_MSG(" offset = %d\n", partial_pvurb->req.seg[i].offset);
      FUNCTION_MSG(" length = %d\n", partial_pvurb->req.seg[i].length);
    }
    FUNCTION_MSG("buffer_length = %d\n", partial_pvurb->req.buffer_length);
    FUNCTION_MSG("nr_buffer_segs = %d\n", partial_pvurb->req.nr_buffer_segs);
  }
  InsertTailList(&xudd->partial_pvurb_queue, &partial_pvurb->entry);
  PutRequestsOnRing(xudd);
  KeReleaseSpinLock(&xudd->urb_ring_lock, old_irql);  
  
  FUNCTION_EXIT();
}
Beispiel #29
0
PVOID
NTAPI
GdiPoolAllocate(
    PGDI_POOL pPool)
{
    PGDI_POOL_SECTION pSection;
    ULONG ulIndex, cjOffset, ulPageBit;
    PLIST_ENTRY ple;
    PVOID pvAlloc, pvBaseAddress;
    SIZE_T cjSize;
    NTSTATUS status;

    /* Disable APCs and acquire the pool lock */
    KeEnterCriticalRegion();
    ExAcquirePushLockExclusive(&pPool->pushlock);

    /* Check if we have a ready section */
    if (!IsListEmpty(&pPool->leReadyList))
    {
        /* Get a free section */
        ple = pPool->leReadyList.Flink;
        pSection = CONTAINING_RECORD(ple, GDI_POOL_SECTION, leReadyLink);
        if (pSection->cAllocCount >= pPool->cSlotsPerSection)
        {
            DPRINT1("pSection->cAllocCount=%lu, pPool->cSlotsPerSection=%lu\n",
                    pSection->cAllocCount, pPool->cSlotsPerSection);
            DBG_DUMP_EVENT_LIST(&pPool->slhLog);
            ASSERT(FALSE);
        }
        ASSERT(pSection->cAllocCount < pPool->cSlotsPerSection);
    }
    else
    {
        /* No, check if we have something on the empty list */
        if (!IsListEmpty(&pPool->leEmptyList))
        {
            /* Yes, remove it from the empty list */
            ple = RemoveHeadList(&pPool->leEmptyList);
            pSection = CONTAINING_RECORD(ple, GDI_POOL_SECTION, leInUseLink);
            pPool->cEmptySections--;
            ASSERT(pSection->cAllocCount == 0);
        }
        else
        {
            /* No, allocate a new section */
            pSection = GdiPoolAllocateSection(pPool);
            if (!pSection)
            {
                DPRINT1("Couldn't allocate a section\n");
                pvAlloc = NULL;
                goto done;
            }
        }

        /* Insert it into the in-use and ready list */
        InsertHeadList(&pPool->leInUseList, &pSection->leInUseLink);
        InsertHeadList(&pPool->leReadyList, &pSection->leReadyLink);
    }

    /* Find and set a single bit */
    ulIndex = RtlFindClearBitsAndSet(&pSection->bitmap, 1, 0);
    ASSERT(ulIndex != MAXULONG);

    /* Calculate the allocation address */
    cjOffset = ulIndex * pPool->cjAllocSize;
    pvAlloc = (PVOID)((ULONG_PTR)pSection->pvBaseAddress + cjOffset);

    /* Check if memory is comitted */
    ulPageBit = 1 << (cjOffset / PAGE_SIZE);
    ulPageBit |= 1 << ((cjOffset + pPool->cjAllocSize - 1) / PAGE_SIZE);
    if ((pSection->ulCommitBitmap & ulPageBit) != ulPageBit)
    {
        /* Commit the pages */
        pvBaseAddress = PAGE_ALIGN(pvAlloc);
        cjSize = ADDRESS_AND_SIZE_TO_SPAN_PAGES(pvAlloc, pPool->cjAllocSize) * PAGE_SIZE;
        status = ZwAllocateVirtualMemory(NtCurrentProcess(),
                                         &pvBaseAddress,
                                         0,
                                         &cjSize,
                                         MEM_COMMIT,
                                         PAGE_READWRITE);
        if (!NT_SUCCESS(status))
        {
            pvAlloc = NULL;
            goto done;
        }

        pSection->ulCommitBitmap |= ulPageBit;
    }

    /* Increase alloc count */
    pSection->cAllocCount++;
    ASSERT(RtlNumberOfSetBits(&pSection->bitmap) == pSection->cAllocCount);
    DBG_LOGEVENT(&pPool->slhLog, EVENT_ALLOCATE, pvAlloc);

    /* Check if section is now busy */
    if (pSection->cAllocCount == pPool->cSlotsPerSection)
    {
        /* Remove the section from the ready list */
        RemoveEntryList(&pSection->leReadyLink);
    }

done:
    /* Release the pool lock and enable APCs */
    ExReleasePushLockExclusive(&pPool->pushlock);
    KeLeaveCriticalRegion();

    DPRINT("GdiPoolallocate: %p\n", pvAlloc);
    return pvAlloc;
}
Beispiel #30
0
///////////////////////////////////////////////////////////////////////////////
//
//  OsrStartWriteIrp
//
//    This is routine is called by the OsrWrite and DpcForIsr routine to
//    start a new Write operation.  The request started is the IRP located
//    at the head of the write queue.
//
//  INPUTS:
//
//      DeviceObject - Address of the DEVICE_OBJECT for our device.
//  
//      Irp - Address of the IRP representing the IRP_MJ_WRITE call.
//
//  OUTPUTS:
//
//      None.
//
//  RETURNS:
//
//      None.
//
//  IRQL:
//
//      This routine is called at IRQL_DISPATCH_LEVEL.
//
//  NOTES:
//      *** Called (and returns) with the WriteQueueLock held.
//
///////////////////////////////////////////////////////////////////////////////
VOID
OsrStartWriteIrp(PDEVICE_OBJECT DeviceObject, PIRP Irp)
{
    POSR_DEVICE_EXT devExt = DeviceObject->DeviceExtension;
    PIO_STACK_LOCATION ioStack;
    ULONG mapRegsNeeded;
    
    ioStack = IoGetCurrentIrpStackLocation(Irp);

    //
    // In progress IRPs cannot be cancelled
    //
    IoSetCancelRoutine(Irp, NULL);

#if DBG
    DbgPrint("OsrWrite: Transfer length %d.\n",
                                ioStack->Parameters.Write.Length);
#endif

    //
    // We're starting a request... therefore, we clear the StopEvent
    // flag.
    //
    KeClearEvent(&devExt->StopEvent);

    //
    // There is no in-progress request.  Start this request on the
    // device.
    //
    devExt->CurrentWriteIrp = Irp;

    devExt->WriteTotalLength = ioStack->Parameters.Write.Length;

    devExt->WriteSoFar = 0;

    devExt->WriteStartingOffset = 0;

    //
    // Start the watchdog timer on this IRP
    //
    (ULONG)Irp->Tail.Overlay.DriverContext[0] = OSR_WATCHDOG_INTERVAL;

    //
    // Since we're about to initiate a DMA operation, ensure the user's data
    // buffer is flushed from the cache back into memory, on processors that
    // are non-DMA cache coherent.
    //
    KeFlushIoBuffers(Irp->MdlAddress, FALSE, TRUE);

    //
    // Determine the number of map registers we'll need for this transfer
    //
    mapRegsNeeded = 
        ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Irp->MdlAddress),
                                        ioStack->Parameters.Write.Length);
        
#if DBG
    DbgPrint("StartWrite: %d. map regs needed\n", mapRegsNeeded);
#endif

    //
    // If the number of map registers required for this transfer exceeds the
    // maximum we're allowed to use (as reported to us from HalGetAdapter() ),
    // we'll need to limit ourselves to the maximum we're allowed.
    //
    devExt->MapRegsThisWrite = ((mapRegsNeeded > devExt->WriteMapRegsGot) ? 
                              devExt->WriteMapRegsGot : mapRegsNeeded);

#if DBG
    DbgPrint("StartWrite: %d. map regs this xfer\n", devExt->MapRegsThisWrite);
#endif

    //
    // Ready to GO! Allocate the appropriate Adapter Object and map registers.
    //
    IoAllocateAdapterChannel(devExt->WriteAdapter,
                             DeviceObject, 
                             devExt->MapRegsThisWrite,
                             OsrAdapterControlWrite,
                             Irp);
}