示例#1
0
VOID
CcSetAdditionalCacheAttributes (
    __in PFILE_OBJECT FileObject,
    __in BOOLEAN DisableReadAhead,
    __in BOOLEAN DisableWriteBehind
    )

/*++

Routine Description:

    This routine supports the setting of disable read ahead or disable write
    behind flags to control Cache Manager operation.  This routine may be
    called any time after calling CcInitializeCacheMap.  Initially both
    read ahead and write behind are enabled.  Note that the state of both
    of these flags must be specified on each call to this routine.

Arguments:

    FileObject - File object for which the respective flags are to be set.

    DisableReadAhead - FALSE to enable read ahead, TRUE to disable it.

    DisableWriteBehind - FALSE to enable write behind, TRUE to disable it.

Return Value:

    None.

--*/

{
    PSHARED_CACHE_MAP SharedCacheMap;
    KIRQL OldIrql;

    //
    //  Get pointer to SharedCacheMap.
    //

    SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;

    //
    //  Now set the flags and return.
    //

    CcAcquireMasterLock( &OldIrql );
    if (DisableReadAhead) {
        SetFlag(SharedCacheMap->Flags, DISABLE_READ_AHEAD);
    } else {
        ClearFlag(SharedCacheMap->Flags, DISABLE_READ_AHEAD);
    }
    if (DisableWriteBehind) {
        SetFlag(SharedCacheMap->Flags, DISABLE_WRITE_BEHIND | MODIFIED_WRITE_DISABLED);
    } else {
        ClearFlag(SharedCacheMap->Flags, DISABLE_WRITE_BEHIND);
    }
    CcReleaseMasterLock( OldIrql );
}
示例#2
0
NTKERNELAPI
BOOLEAN
CcSetPrivateWriteFile(
    PFILE_OBJECT FileObject
    )

/*++

Routine Description:

    This routine will instruct the cache manager to treat the file as
    a private-write stream, so that a caller can implement a private
    logging mechanism for it.  We will turn on both Mm's modify-no-write
    and our disable-write-behind, and disallow non-aware flush/purge for
    the file.

    Caching must already be initiated on the file.

    This routine is only exported to the kernel.

Arguments:

    FileObject - File to make private-write.

Return Value:

    None.

--*/

{
    PSHARED_CACHE_MAP SharedCacheMap;
    BOOLEAN Disabled;
    KIRQL OldIrql;
    PVACB Vacb;
    ULONG ActivePage;
    ULONG PageIsDirty;

    //
    //  Pick up the file exclusive to synchronize against readahead and
    //  other purge/map activity.
    //

    FsRtlAcquireFileExclusive( FileObject );

    //
    //  Get a pointer to the SharedCacheMap. Be sure to release the FileObject
    //  in case an error condition forces a premature exit.
    //
    
    if ((FileObject->SectionObjectPointer == NULL) ||
    	((SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap) == NULL)){
    	 FsRtlReleaseFile( FileObject );
        return FALSE;
    }
    
    //
    //  Unmap all the views in preparation for making the disable mw call.
    //

    //
    //  We still need to wait for any dangling cache read or writes.
    //
    //  In fact we have to loop and wait because the lazy writer can
    //  sneak in and do an CcGetVirtualAddressIfMapped, and we are not
    //  synchronized.
    //
    //  This is the same bit of code that our purge will do.  We assume
    //  that a private writer has succesfully blocked out other activity.
    //

    //
    //  If there is an active Vacb, then delete it now (before waiting!).
    //

    CcAcquireMasterLock( &OldIrql );
    GetActiveVacbAtDpcLevel( SharedCacheMap, Vacb, ActivePage, PageIsDirty );
    CcReleaseMasterLock( OldIrql );
    
    if (Vacb != NULL) {

        CcFreeActiveVacb( SharedCacheMap, Vacb, ActivePage, PageIsDirty );
    }

    while ((SharedCacheMap->Vacbs != NULL) &&
           !CcUnmapVacbArray( SharedCacheMap, NULL, 0, FALSE )) {

        CcWaitOnActiveCount( SharedCacheMap );
    }

    //
    //  Knock the file down.
    // 

    CcFlushCache( FileObject->SectionObjectPointer, NULL, 0, NULL );

    //
    //  Now the file is clean and unmapped. We can still have a racing
    //  lazy writer, though.
    //
    //  We just wait for the lazy writer queue to drain before disabling
    //  modified write.  There may be a better way to do this by having
    //  an event for the WRITE_QUEUED flag. ?  This would also let us
    //  dispense with the pagingio pick/drop in the FS cache coherency
    //  paths, but there could be reasons why CcFlushCache shouldn't
    //  always do such a block.  Investigate this.
    //
    //  This wait takes on the order of ~.5s avg. case.
    //

    CcAcquireMasterLock( &OldIrql );
    
    if (FlagOn( SharedCacheMap->Flags, WRITE_QUEUED ) ||
        FlagOn( SharedCacheMap->Flags, READ_AHEAD_QUEUED )) {
        
        CcReleaseMasterLock( OldIrql );
        FsRtlReleaseFile( FileObject );
        CcWaitForCurrentLazyWriterActivity();
        FsRtlAcquireFileExclusive( FileObject );

    } else {

        CcReleaseMasterLock( OldIrql );
    }

    //
    //  Now set the flags and return.  We do not set our MODIFIED_WRITE_DISABLED
    //  since we don't want to fully promote this cache map.  Future?
    //

    Disabled = MmDisableModifiedWriteOfSection( FileObject->SectionObjectPointer );

    if (Disabled) {
        CcAcquireMasterLock( &OldIrql );
        SetFlag(SharedCacheMap->Flags, DISABLE_WRITE_BEHIND | PRIVATE_WRITE);
        CcReleaseMasterLock( OldIrql );
    }

    //
    //  Now release the file for regular operation.
    //

    FsRtlReleaseFile( FileObject );

    return Disabled;
}
示例#3
0
BOOLEAN
CcIsThereDirtyData (
    __in PVPB Vpb
    )

/*++

Routine Description:

    This routine returns TRUE if the specified Vcb has any unwritten dirty
    data in the cache.

Arguments:

    Vpb - specifies Vpb to check for

Return Value:

    FALSE - if the Vpb has no dirty data
    TRUE - if the Vpb has dirty data

--*/

{
    PSHARED_CACHE_MAP SharedCacheMap;
    KIRQL OldIrql;
    ULONG LoopsWithLockHeld = 0;

    //
    //  Synchronize with changes to the SharedCacheMap list.
    //

    CcAcquireMasterLock( &OldIrql );

    SharedCacheMap = CONTAINING_RECORD( CcDirtySharedCacheMapList.SharedCacheMapLinks.Flink,
                                        SHARED_CACHE_MAP,
                                        SharedCacheMapLinks );

    while (&SharedCacheMap->SharedCacheMapLinks != &CcDirtySharedCacheMapList.SharedCacheMapLinks) {

        //
        //  Look at this one if the Vpb matches and if there is dirty data.
        //  For what it's worth, don't worry about dirty data in temporary files,
        //  as that should not concern the caller if it wants to dismount.
        //

        if (!FlagOn(SharedCacheMap->Flags, IS_CURSOR) &&
            (SharedCacheMap->FileObject->Vpb == Vpb) &&
            (SharedCacheMap->DirtyPages != 0) &&
            !FlagOn(SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE)) {

            CcReleaseMasterLock( OldIrql );
            return TRUE;
        }

        //
        //  Make sure we occasionally drop the lock.  Set WRITE_QUEUED
        //  to keep the guy from going away, and increment DirtyPages to
        //  keep it in this list.
        //

        if ((++LoopsWithLockHeld >= 20) &&
            !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED | IS_CURSOR)) {

            SetFlag( *((ULONG volatile *)&SharedCacheMap->Flags), WRITE_QUEUED);
            *((ULONG volatile *)&SharedCacheMap->DirtyPages) += 1;
            CcReleaseMasterLock( OldIrql );
            LoopsWithLockHeld = 0;
            CcAcquireMasterLock( &OldIrql );
            ClearFlag( *((ULONG volatile *)&SharedCacheMap->Flags), WRITE_QUEUED);
            *((ULONG volatile *)&SharedCacheMap->DirtyPages) -= 1;
        }

        //
        //  Now loop back for the next cache map.
        //

        SharedCacheMap =
            CONTAINING_RECORD( SharedCacheMap->SharedCacheMapLinks.Flink,
                               SHARED_CACHE_MAP,
                               SharedCacheMapLinks );
    }

    CcReleaseMasterLock( OldIrql );

    return FALSE;
}
示例#4
0
LARGE_INTEGER
CcGetDirtyPages (
    __in PVOID LogHandle,
    __in PDIRTY_PAGE_ROUTINE DirtyPageRoutine,
    __in PVOID Context1,
    __in PVOID Context2
    )

/*++

Routine Description:

    This routine may be called to return all of the dirty pages in all files
    for a given log handle.  Each page is returned by an individual call to
    the Dirty Page Routine.  The Dirty Page Routine is defined by a prototype
    in ntos\inc\cache.h.

Arguments:

    LogHandle - Log Handle which must match the log handle previously stored
                for all files which are to be returned.

    DirtyPageRoutine -- The routine to call as each dirty page for this log
                        handle is found.

    Context1 - First context parameter to be passed to the Dirty Page Routine.

    Context2 - First context parameter to be passed to the Dirty Page Routine.

Return Value:

    LARGE_INTEGER - Oldest Lsn found of all the dirty pages, or 0 if no dirty pages

--*/

{
    PSHARED_CACHE_MAP SharedCacheMap;
    PBCB Bcb, BcbToUnpin = NULL;
    KLOCK_QUEUE_HANDLE LockHandle;
    LARGE_INTEGER SavedFileOffset, SavedOldestLsn, SavedNewestLsn;
    ULONG SavedByteLength;
    LARGE_INTEGER OldestLsn = {0,0};

    //
    //  Synchronize with changes to the SharedCacheMap list.
    //

    CcAcquireMasterLock( &LockHandle.OldIrql );

    SharedCacheMap = CONTAINING_RECORD( CcDirtySharedCacheMapList.SharedCacheMapLinks.Flink,
                                        SHARED_CACHE_MAP,
                                        SharedCacheMapLinks );

    //
    //  Use try/finally for cleanup.  The only spot where we can raise is out of the
    //  filesystem callback, but we have the exception handler out here so we aren't
    //  constantly setting/unsetting it.
    //

    try {

        while (&SharedCacheMap->SharedCacheMapLinks != &CcDirtySharedCacheMapList.SharedCacheMapLinks) {

            //
            //  Skip over cursors, SharedCacheMaps for other LogHandles, and ones with
            //  no dirty pages
            //

            if (!FlagOn(SharedCacheMap->Flags, IS_CURSOR) && (SharedCacheMap->LogHandle == LogHandle) &&
                (SharedCacheMap->DirtyPages != 0)) {

                //
                //  This SharedCacheMap should stick around for a while in the dirty list.
                //

                CcIncrementOpenCount( SharedCacheMap, 'pdGS' );
                SharedCacheMap->DirtyPages += 1;
                CcReleaseMasterLock( LockHandle.OldIrql );

                //
                //  Set our initial resume point and point to first Bcb in List.
                //

                KeAcquireInStackQueuedSpinLock( &SharedCacheMap->BcbSpinLock, &LockHandle );
                Bcb = CONTAINING_RECORD( SharedCacheMap->BcbList.Flink, BCB, BcbLinks );

                //
                //  Scan to the end of the Bcb list.
                //

                while (&Bcb->BcbLinks != &SharedCacheMap->BcbList) {

                    //
                    //  If the Bcb is dirty, then capture the inputs for the
                    //  callback routine so we can call without holding a spinlock.
                    //

                    if ((Bcb->NodeTypeCode == CACHE_NTC_BCB) && Bcb->Dirty) {

                        SavedFileOffset = Bcb->FileOffset;
                        SavedByteLength = Bcb->ByteLength;
                        SavedOldestLsn = Bcb->OldestLsn;
                        SavedNewestLsn = Bcb->NewestLsn;

                        //
                        //  Increment PinCount so the Bcb sticks around
                        //

                        Bcb->PinCount += 1;

                        KeReleaseInStackQueuedSpinLock( &LockHandle );

                        //
                        //  Any Bcb to unref from a previous loop?
                        //

                        if (BcbToUnpin != NULL) {
                            CcUnpinFileData( BcbToUnpin, TRUE, UNREF );
                            BcbToUnpin = NULL;
                        }

                        //
                        //  Call the file system.  This callback may raise status.
                        //

                        (*DirtyPageRoutine)( SharedCacheMap->FileObject,
                                             &SavedFileOffset,
                                             SavedByteLength,
                                             &SavedOldestLsn,
                                             &SavedNewestLsn,
                                             Context1,
                                             Context2 );

                        //
                        //  Possibly update OldestLsn
                        //

                        if ((SavedOldestLsn.QuadPart != 0) &&
                            ((OldestLsn.QuadPart == 0) || (SavedOldestLsn.QuadPart < OldestLsn.QuadPart ))) {
                            OldestLsn = SavedOldestLsn;
                        }

                        //
                        //  Now reacquire the spinlock and scan from the resume point
                        //  point to the next Bcb to return in the descending list.
                        //

                        KeAcquireInStackQueuedSpinLock( &SharedCacheMap->BcbSpinLock, &LockHandle );

                        //
                        //  Normally the Bcb can stay around a while, but if not,
                        //  we will just remember it for the next time we do not
                        //  have the spin lock.  We cannot unpin it now, because
                        //  we would lose our place in the list.
                        //
                        //  This is cheating, but it works and is sane since we're
                        //  already traversing the bcb list - dropping the bcb count
                        //  is OK, as long as we don't hit zero.  Zero requires a 
                        //  slight bit more attention that shouldn't be replicated.
                        //  (unmapping the view)
                        //

                        if (Bcb->PinCount > 1) {
                            Bcb->PinCount -= 1;
                        } else {
                            BcbToUnpin = Bcb;
                        }
                    }

                    Bcb = CONTAINING_RECORD( Bcb->BcbLinks.Flink, BCB, BcbLinks );
                }
                KeReleaseInStackQueuedSpinLock( &LockHandle );

                //
                //  We need to unref any Bcb we are holding before moving on to
                //  the next SharedCacheMap, or else CcDeleteSharedCacheMap will
                //  also delete this Bcb.
                //

                if (BcbToUnpin != NULL) {

                    CcUnpinFileData( BcbToUnpin, TRUE, UNREF );
                    BcbToUnpin = NULL;
                }

                CcAcquireMasterLock( &LockHandle.OldIrql );

                //
                //  Now release the SharedCacheMap, leaving it in the dirty list.
                //

                CcDecrementOpenCount( SharedCacheMap, 'pdGF' );
                SharedCacheMap->DirtyPages -= 1;
            }

            //
            //  Now loop back for the next cache map.
            //

            SharedCacheMap =
                CONTAINING_RECORD( SharedCacheMap->SharedCacheMapLinks.Flink,
                                   SHARED_CACHE_MAP,
                                   SharedCacheMapLinks );
        }

        CcReleaseMasterLock( LockHandle.OldIrql );

    } finally {

        //
        //  Drop the Bcb if we are being ejected.  We are guaranteed that the
        //  only raise is from the callback, at which point we have an incremented
        //  pincount.
        //

        if (AbnormalTermination()) {

            CcUnpinFileData( Bcb, TRUE, UNPIN );
        }
    }

    return OldestLsn;
}
示例#5
0
VOID
CcMdlWriteComplete2 (
    IN PFILE_OBJECT FileObject,
    IN PLARGE_INTEGER FileOffset,
    IN PMDL MdlChain
    )

/*++

Routine Description:

    This routine must be called at IPL0 after a call to CcPrepareMdlWrite.
    The caller supplies the ActualLength of data that it actually wrote
    into the buffer, which may be less than or equal to the Length specified
    in CcPrepareMdlWrite.

    This call does the following:

        Makes sure the data up to ActualLength eventually gets written.
        If WriteThrough is FALSE, the data will not be written immediately.
        If WriteThrough is TRUE, then the data is written synchronously.

        Unmaps the pages (if mapped), unlocks them and deletes the MdlChain

Arguments:

    FileObject - Pointer to the file object for a file which was
                 opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for
                 which CcInitializeCacheMap was called by the file system.

    FileOffset - Original file offset read above.

    MdlChain - same as returned from corresponding call to CcPrepareMdlWrite.

Return Value:

    None

--*/

{
    PMDL MdlNext;
    PSHARED_CACHE_MAP SharedCacheMap;
    LARGE_INTEGER FOffset;
    IO_STATUS_BLOCK IoStatus;
    KIRQL OldIrql;
    NTSTATUS StatusToRaise = STATUS_SUCCESS;

    DebugTrace(+1, me, "CcMdlWriteComplete\n", 0 );
    DebugTrace( 0, me, "    FileObject = %08lx\n", FileObject );
    DebugTrace( 0, me, "    MdlChain = %08lx\n", MdlChain );

    SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;

    //
    //  Deallocate the Mdls
    //

    FOffset.QuadPart = *(LONGLONG UNALIGNED *)FileOffset;
    while (MdlChain != NULL) {

        MdlNext = MdlChain->Next;

        DebugTrace( 0, mm, "MmUnlockPages/IoFreeMdl:\n", 0 );
        DebugTrace( 0, mm, "    Mdl = %08lx\n", MdlChain );

        //
        //  Now clear the dirty bits in the Pte and set them in the
        //  Pfn.
        //

        MmUnlockPages( MdlChain );

        //
        //  Extract the File Offset for this part of the transfer.
        //

        if (FlagOn(FileObject->Flags, FO_WRITE_THROUGH)) {

            MmFlushSection ( FileObject->SectionObjectPointer,
                             &FOffset,
                             MdlChain->ByteCount,
                             &IoStatus,
                             TRUE );

            //
            //  If we got an I/O error, remember it.
            //

            if (!NT_SUCCESS(IoStatus.Status)) {
                StatusToRaise = IoStatus.Status;
            }

        } else {

            //
            //  Ignore the only exception (allocation error), and console
            //  ourselves for having tried.
            //

            CcSetDirtyInMask( SharedCacheMap, &FOffset, MdlChain->ByteCount );
        }

        FOffset.QuadPart = FOffset.QuadPart + (LONGLONG)(MdlChain->ByteCount);

        IoFreeMdl( MdlChain );

        MdlChain = MdlNext;
    }

    //
    //  Now release our open count.
    //

    CcAcquireMasterLock( &OldIrql );

    CcDecrementOpenCount( SharedCacheMap, 'ldmC' );

    if ((SharedCacheMap->OpenCount == 0) &&
        !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) &&
        (SharedCacheMap->DirtyPages == 0)) {

        //
        //  Move to the dirty list.
        //

        RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks );
        InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks,
                        &SharedCacheMap->SharedCacheMapLinks );

        //
        //  Make sure the Lazy Writer will wake up, because we
        //  want him to delete this SharedCacheMap.
        //

        LazyWriter.OtherWork = TRUE;
        if (!LazyWriter.ScanActive) {
            CcScheduleLazyWriteScan();
        }
    }

    CcReleaseMasterLock( OldIrql );

    //
    //  If we got an I/O error, raise it now.
    //

    if (!NT_SUCCESS(StatusToRaise)) {
        FsRtlNormalizeNtstatus( StatusToRaise,
                                STATUS_UNEXPECTED_IO_ERROR );
    }

    DebugTrace(-1, me, "CcMdlWriteComplete -> TRUE\n", 0 );

    return;
}
示例#6
0
VOID
CcPrepareMdlWrite (
    IN PFILE_OBJECT FileObject,
    IN PLARGE_INTEGER FileOffset,
    IN ULONG Length,
    OUT PMDL *MdlChain,
    OUT PIO_STATUS_BLOCK IoStatus
    )

/*++

Routine Description:

    This routine attempts to lock the specified file data in the cache
    and return a description of it in an Mdl along with the correct
    I/O status.  Pages to be completely overwritten may be satisfied
    with emtpy pages.  It is *not* safe to call this routine from Dpc level.

    This call is synchronous and raises on error.

    When this call returns, the caller may immediately begin
    to transfer data into the buffers via the Mdl.

    When the call returns with TRUE, the pages described by the Mdl are
    locked in memory, but not mapped in system space.  If the caller
    needs the pages mapped in system space, then it must map them.
    On the subsequent call to CcMdlWriteComplete the pages will be
    unmapped if they were mapped, and in any case unlocked and the Mdl
    deallocated.

Arguments:

    FileObject - Pointer to the file object for a file which was
                 opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for
                 which CcInitializeCacheMap was called by the file system.

    FileOffset - Byte offset in file for desired data.

    Length - Length of desired data in bytes.

    MdlChain - On output it returns a pointer to an Mdl chain describing
               the desired data.  Note that even if FALSE is returned,
               one or more Mdls may have been allocated, as may be ascertained
               by the IoStatus.Information field (see below).

    IoStatus - Pointer to standard I/O status block to receive the status
               for the in-transfer of the data.  (STATUS_SUCCESS guaranteed
               for cache hits, otherwise the actual I/O status is returned.)
               The I/O Information Field indicates how many bytes have been
               successfully locked down in the Mdl Chain.

Return Value:

    None

--*/

{
    PSHARED_CACHE_MAP SharedCacheMap;
    PVOID CacheBuffer;
    LARGE_INTEGER FOffset;
    PMDL Mdl = NULL;
    PMDL MdlTemp;
    LARGE_INTEGER Temp;
    ULONG SavedState = 0;
    ULONG ZeroFlags = 0;
    ULONG Information = 0;

    KIRQL OldIrql;
    ULONG ActivePage;
    ULONG PageIsDirty;
    PVACB Vacb = NULL;

    DebugTrace(+1, me, "CcPrepareMdlWrite\n", 0 );
    DebugTrace( 0, me, "    FileObject = %08lx\n", FileObject );
    DebugTrace2(0, me, "    FileOffset = %08lx, %08lx\n", FileOffset->LowPart,
                                                          FileOffset->HighPart );
    DebugTrace( 0, me, "    Length = %08lx\n", Length );

    //
    //  Get pointer to SharedCacheMap.
    //

    SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;

    //
    //  See if we have an active Vacb, that we need to free.
    //

    GetActiveVacb( SharedCacheMap, OldIrql, Vacb, ActivePage, PageIsDirty );

    //
    //  If there is an end of a page to be zeroed, then free that page now,
    //  so it does not cause our data to get zeroed.  If there is an active
    //  page, free it so we have the correct ValidDataGoal.
    //

    if ((Vacb != NULL) || (SharedCacheMap->NeedToZero != NULL)) {

        CcFreeActiveVacb( SharedCacheMap, Vacb, ActivePage, PageIsDirty );
        Vacb = NULL;
    }

    FOffset = *FileOffset;

    //
    //  Put try-finally around the loop to deal with exceptions
    //

    try {

        //
        //  Not all of the transfer will come back at once, so we have to loop
        //  until the entire transfer is complete.
        //

        while (Length != 0) {

            ULONG ReceivedLength;
            LARGE_INTEGER BeyondLastByte;

            //
            //  Map and see how much we could potentially access at this
            //  FileOffset, then cut it down if it is more than we need.
            //

            CacheBuffer = CcGetVirtualAddress( SharedCacheMap,
                                               FOffset,
                                               &Vacb,
                                               &ReceivedLength );

            if (ReceivedLength > Length) {
                ReceivedLength = Length;
            }

            BeyondLastByte.QuadPart = FOffset.QuadPart + (LONGLONG)ReceivedLength;

            //
            //  At this point we can calculate the ZeroFlags.
            //

            //
            //  We can always zero middle pages, if any.
            //

            ZeroFlags = ZERO_MIDDLE_PAGES;

            //
            //  See if we are completely overwriting the first or last page.
            //

            if (((FOffset.LowPart & (PAGE_SIZE - 1)) == 0) &&
                (ReceivedLength >= PAGE_SIZE)) {
                ZeroFlags |= ZERO_FIRST_PAGE;
            }

            if ((BeyondLastByte.LowPart & (PAGE_SIZE - 1)) == 0) {
                ZeroFlags |= ZERO_LAST_PAGE;
            }

            //
            //  See if the entire transfer is beyond valid data length,
            //  or at least starting from the second page.
            //

            Temp = FOffset;
            Temp.LowPart &= ~(PAGE_SIZE -1);
            ExAcquireFastLock( &SharedCacheMap->BcbSpinLock, &OldIrql );
            Temp.QuadPart = SharedCacheMap->ValidDataGoal.QuadPart - Temp.QuadPart;
            ExReleaseFastLock( &SharedCacheMap->BcbSpinLock, OldIrql );

            if (Temp.QuadPart <= 0) {
                ZeroFlags |= ZERO_FIRST_PAGE | ZERO_MIDDLE_PAGES | ZERO_LAST_PAGE;
            } else if ((Temp.HighPart == 0) && (Temp.LowPart <= PAGE_SIZE)) {
                ZeroFlags |= ZERO_MIDDLE_PAGES | ZERO_LAST_PAGE;
            }

            (VOID)CcMapAndRead( SharedCacheMap,
                                &FOffset,
                                ReceivedLength,
                                ZeroFlags,
                                TRUE,
                                CacheBuffer );

            //
            //  Now attempt to allocate an Mdl to describe the mapped data.
            //

            DebugTrace( 0, mm, "IoAllocateMdl:\n", 0 );
            DebugTrace( 0, mm, "    BaseAddress = %08lx\n", CacheBuffer );
            DebugTrace( 0, mm, "    Length = %08lx\n", ReceivedLength );

            Mdl = IoAllocateMdl( CacheBuffer,
                                 ReceivedLength,
                                 FALSE,
                                 FALSE,
                                 NULL );

            DebugTrace( 0, mm, "    <Mdl = %08lx\n", Mdl );

            if (Mdl == NULL) {
                DebugTrace( 0, 0, "Failed to allocate Mdl\n", 0 );

                ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES );
            }

            DebugTrace( 0, mm, "MmProbeAndLockPages:\n", 0 );
            DebugTrace( 0, mm, "    Mdl = %08lx\n", Mdl );

            MmDisablePageFaultClustering(&SavedState);
            MmProbeAndLockPages( Mdl, KernelMode, IoWriteAccess );
            MmEnablePageFaultClustering(SavedState);
            SavedState = 0;

            //
            //  Now that some data (maybe zeros) is locked in memory and
            //  set dirty, it is safe, and necessary for us to advance
            //  valid data goal, so that we will not subsequently ask
            //  for a zero page.  Note if we are extending valid data,
            //  our caller has the file exclusive.
            //

            ExAcquireFastLock( &SharedCacheMap->BcbSpinLock, &OldIrql );
            if (BeyondLastByte.QuadPart > SharedCacheMap->ValidDataGoal.QuadPart) {
                SharedCacheMap->ValidDataGoal = BeyondLastByte;
            }
            ExReleaseFastLock( &SharedCacheMap->BcbSpinLock, OldIrql );

            //
            //  Unmap the data now, now that the pages are locked down.
            //

            CcFreeVirtualAddress( Vacb );
            Vacb = NULL;

            //
            //  Now link the Mdl into the caller's chain
            //

            if ( *MdlChain == NULL ) {
                *MdlChain = Mdl;
            } else {
                MdlTemp = CONTAINING_RECORD( *MdlChain, MDL, Next );
                while (MdlTemp->Next != NULL) {
                    MdlTemp = MdlTemp->Next;
                }
                MdlTemp->Next = Mdl;
            }
            Mdl = NULL;

            //
            //  Assume we did not get all the data we wanted, and set FOffset
            //  to the end of the returned data.
            //

            FOffset = BeyondLastByte;

            //
            //  Update number of bytes transferred.
            //

            Information += ReceivedLength;

            //
            //  Calculate length left to transfer.
            //

            Length -= ReceivedLength;
        }
    }
    finally {

        if (AbnormalTermination()) {

            if (SavedState != 0) {
                MmEnablePageFaultClustering(SavedState);
            }

            if (Vacb != NULL) {
                CcFreeVirtualAddress( Vacb );
            }
            
            if (Mdl != NULL) {
                IoFreeMdl( Mdl );
            }

            //
            //  Otherwise loop to deallocate the Mdls
            //

            FOffset = *FileOffset;
            while (*MdlChain != NULL) {
                MdlTemp = (*MdlChain)->Next;

                DebugTrace( 0, mm, "MmUnlockPages/IoFreeMdl:\n", 0 );
                DebugTrace( 0, mm, "    Mdl = %08lx\n", *MdlChain );

                MmUnlockPages( *MdlChain );

                //
                //  Extract the File Offset for this part of the transfer, and
                //  tell the lazy writer to write these pages, since we have
                //  marked them dirty.  Ignore the only exception (allocation
                //  error), and console ourselves for having tried.
                //

                CcSetDirtyInMask( SharedCacheMap, &FOffset, (*MdlChain)->ByteCount );

                FOffset.QuadPart = FOffset.QuadPart + (LONGLONG)((*MdlChain)->ByteCount);

                IoFreeMdl( *MdlChain );

                *MdlChain = MdlTemp;
            }

            DebugTrace(-1, me, "CcPrepareMdlWrite -> Unwinding\n", 0 );
        }
        else {

            IoStatus->Status = STATUS_SUCCESS;
            IoStatus->Information = Information;

            //
            //  Make sure the SharedCacheMap does not go away while
            //  the Mdl write is in progress.  We decrment below.
            //

            CcAcquireMasterLock( &OldIrql );
            CcIncrementOpenCount( SharedCacheMap, 'ldmP' );
            CcReleaseMasterLock( OldIrql );
        }
    }

    DebugTrace( 0, me, "    <MdlChain = %08lx\n", *MdlChain );
    DebugTrace(-1, me, "CcPrepareMdlWrite -> VOID\n", 0 );

    return;
}