/* * TsmiHandleMemWrite * * Purpose: * * Patch vbox dll in memory. * * Warning: potential BSOD-generator due to nonstandard way of loading, take care with patch offsets. * */ NTSTATUS TsmiHandleMemWrite( _In_ PVOID SrcAddress, _In_ PVOID DestAddress, _In_ ULONG Size ) { PMDL mdl; NTSTATUS status = STATUS_SUCCESS; PAGED_CODE(); mdl = IoAllocateMdl(DestAddress, Size, FALSE, FALSE, NULL); if (mdl == NULL) { return STATUS_INSUFFICIENT_RESOURCES; } if (DestAddress >= MmSystemRangeStart) if (!MmIsAddressValid(DestAddress)) { return STATUS_ACCESS_VIOLATION; } MmProbeAndLockPages(mdl, KernelMode, IoReadAccess); DestAddress = MmGetSystemAddressForMdlSafe(mdl, HighPagePriority); if (DestAddress != NULL) { status = MmProtectMdlSystemAddress(mdl, PAGE_EXECUTE_READWRITE); __movsb((PUCHAR)DestAddress, (const UCHAR *)SrcAddress, Size); MmUnmapLockedPages(DestAddress, mdl); MmUnlockPages(mdl); } else { status = STATUS_ACCESS_VIOLATION; } IoFreeMdl(mdl); return status; }
VOID DispTdiQueryInformationExComplete( PVOID Context, ULONG Status, UINT ByteCount) /* * FUNCTION: Completes a TDI QueryInformationEx request * ARGUMENTS: * Context = Pointer to the IRP for the request * Status = TDI status of the request * ByteCount = Number of bytes returned in output buffer */ { PTI_QUERY_CONTEXT QueryContext; QueryContext = (PTI_QUERY_CONTEXT)Context; if (NT_SUCCESS(Status)) { CopyBufferToBufferChain( QueryContext->InputMdl, FIELD_OFFSET(TCP_REQUEST_QUERY_INFORMATION_EX, Context), (PCHAR)&QueryContext->QueryInfo.Context, CONTEXT_SIZE); } MmUnlockPages(QueryContext->InputMdl); IoFreeMdl(QueryContext->InputMdl); if( QueryContext->OutputMdl ) { MmUnlockPages(QueryContext->OutputMdl); IoFreeMdl(QueryContext->OutputMdl); } QueryContext->Irp->IoStatus.Information = ByteCount; QueryContext->Irp->IoStatus.Status = Status; ExFreePoolWithTag(QueryContext, QUERY_CONTEXT_TAG); }
NTSTATUS NTAPI IoCompletion ( PDEVICE_OBJECT DeviceObject, PIRP Irp, PVOID Ctx) { PKSSTREAM_HEADER Header; ULONG Length = 0; PMDL Mdl, NextMdl; PWDMAUD_COMPLETION_CONTEXT Context = (PWDMAUD_COMPLETION_CONTEXT)Ctx; /* get stream header */ Header = (PKSSTREAM_HEADER)Irp->AssociatedIrp.SystemBuffer; /* sanity check */ ASSERT(Header); /* time to free all allocated mdls */ Mdl = Irp->MdlAddress; while(Mdl) { /* get next mdl */ NextMdl = Mdl->Next; /* unlock pages */ MmUnlockPages(Mdl); /* grab next mdl */ Mdl = NextMdl; } /* clear mdl list */ Irp->MdlAddress = NULL; /* check if mdl is locked */ if (Context->Mdl->MdlFlags & MDL_PAGES_LOCKED) { /* unlock pages */ MmUnlockPages(Context->Mdl); } /* now free the mdl */ IoFreeMdl(Context->Mdl); DPRINT("IoCompletion Irp %p IoStatus %lx Information %lx Length %lu\n", Irp, Irp->IoStatus.Status, Irp->IoStatus.Information, Length); if (!NT_SUCCESS(Irp->IoStatus.Status)) { /* failed */ Irp->IoStatus.Information = 0; } /* free context */ FreeItem(Context); return STATUS_SUCCESS; }
FORCEINLINE FxIoContext::~FxIoContext( VOID ) { // // Free the buffer allocated for the request, reset m_CopyBackToBuffer // to FALSE. // NOTE: We delay the freeing of the buffer on purpose. // ClearBuffer(); // // Free the MDL allocated for the request // if (m_MdlToFree != NULL) { // // Being defensive here, MmUnlockPages should have been done in // ReleaseAndRestore. // if (m_UnlockPages) { MmUnlockPages(m_MdlToFree); m_UnlockPages = FALSE; } FxMdlFree(m_DriverGlobals, m_MdlToFree); m_MdlToFree = NULL; } }
/// <summary> /// Unmap memory region, release corresponding MDL, and remove region form list /// </summary> /// <param name="pPageEntry">Region data</param> /// <param name="pFoundEntry">Process data</param> /// <returns>Status code</returns> NTSTATUS BBUnmapRegionEntry( IN PMAP_ENTRY pPageEntry, IN PPROCESS_MAP_ENTRY pFoundEntry ) { NTSTATUS status = STATUS_SUCCESS; UNREFERENCED_PARAMETER( pFoundEntry ); // MDL is valid if (pPageEntry->pMdl) { // If MDL is mapped if (pPageEntry->newPtr) { DPRINT( "BlackBone: %s: Unmapping region at 0x%p from process %u\n", __FUNCTION__, pPageEntry->newPtr, pFoundEntry->target.pid ); MmUnmapLockedPages( (PVOID)pPageEntry->newPtr, pPageEntry->pMdl ); pPageEntry->newPtr = 0; } if (pPageEntry->locked) MmUnlockPages( pPageEntry->pMdl ); IoFreeMdl( pPageEntry->pMdl ); } RemoveEntryList( &pPageEntry->link ); ExFreePoolWithTag( pPageEntry, BB_POOL_TAG ); return status; }
void UserMemoryManager::CleanupCurrentProcess() { PEPROCESS curProc = PsGetCurrentProcess(); mLMIList.PublicLock(); LockedMemInfo *pLMI = mLMIList.Head(); // Unlock memory for this process. while (NULL!=(int)(pLMI)) { LockedMemInfo *pNext = mLMIList.Next(pLMI); if (pLMI->proc == curProc) { MmUnlockPages(pLMI->pMdl); IoFreeMdl(pLMI->pMdl); mLMIList.Remove(pLMI); mLMIAllocator.Free(pLMI); } pLMI = pNext; } mLMIList.PublicUnlock(); }
NTSTATUS IoCompletionRoutine( IN PDEVICE_OBJECT DeviceObject, IN PIRP Irp, IN PVOID Context ) { //DbgPrint(("IoCompletionRoutine!\n")); *Irp->UserIosb = Irp->IoStatus; if (Irp->MdlAddress) { //MmUnmapLockedPages( Irp->MdlAddress, // MmGetSystemAddressForMdlSafe( Irp->MdlAddress, NormalPagePriority ) ); MmUnlockPages( Irp->MdlAddress ); IoFreeMdl(Irp->MdlAddress); Irp->MdlAddress = NULL; } if (Irp->UserEvent) KeSetEvent(Irp->UserEvent, IO_NO_INCREMENT, 0); IoFreeIrp(Irp); return STATUS_MORE_PROCESSING_REQUIRED; //return STATUS_SUCCESS; }
NTSTATUS NTAPI MiSimpleReadComplete(PDEVICE_OBJECT DeviceObject, PIRP Irp, PVOID Context) { PMDL Mdl = Irp->MdlAddress; /* Unlock MDL Pages, page 167. */ DPRINT("MiSimpleReadComplete %p\n", Irp); while (Mdl) { DPRINT("MDL Unlock %p\n", Mdl); MmUnlockPages(Mdl); Mdl = Mdl->Next; } /* Check if there's an MDL */ while ((Mdl = Irp->MdlAddress)) { /* Clear all of them */ Irp->MdlAddress = Mdl->Next; IoFreeMdl(Mdl); } return STATUS_SUCCESS; }
NTSTATUS SafeCopyMemory(PVOID SrcAddr, PVOID DstAddr, ULONG Size) { PMDL pSrcMdl, pDstMdl; PUCHAR pSrcAddress, pDstAddress; NTSTATUS st = STATUS_UNSUCCESSFUL; ULONG r; BOOL bInit = FALSE; pSrcMdl = IoAllocateMdl(SrcAddr, Size, FALSE, FALSE, NULL); if (MmIsAddressValidEx(pSrcMdl)) { MmBuildMdlForNonPagedPool(pSrcMdl); pSrcAddress = (PUCHAR)MmGetSystemAddressForMdlSafe(pSrcMdl, NormalPagePriority); if (MmIsAddressValidEx(pSrcAddress)) { pDstMdl = IoAllocateMdl(DstAddr, Size, FALSE, FALSE, NULL); if (MmIsAddressValidEx(pDstMdl)) { __try { MmProbeAndLockPages(pDstMdl, KernelMode, IoWriteAccess); pDstAddress = (PUCHAR)MmGetSystemAddressForMdlSafe(pDstMdl, NormalPagePriority); if (MmIsAddressValidEx(pDstAddress)) { RtlZeroMemory(pDstAddress,Size); RtlCopyMemory(pDstAddress, pSrcAddress, Size); st = STATUS_SUCCESS; } MmUnlockPages(pDstMdl); } __except(EXCEPTION_EXECUTE_HANDLER) { if (pDstMdl) MmUnlockPages(pDstMdl); if (pDstMdl) IoFreeMdl(pDstMdl); if (pSrcMdl) IoFreeMdl(pSrcMdl); return GetExceptionCode(); } IoFreeMdl(pDstMdl); } } IoFreeMdl(pSrcMdl); } return st; }
VOID CcMdlReadComplete2 ( IN PFILE_OBJECT FileObject, IN PMDL MdlChain ) /*++ Routine Description: This routine must be called at IPL0 after a call to CcMdlRead. The caller must simply supply the address of the MdlChain returned in CcMdlRead. This call does the following: Deletes the MdlChain Arguments: FileObject - Pointer to the file object for a file which was opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for which CcInitializeCacheMap was called by the file system. MdlChain - same as returned from corresponding call to CcMdlRead. Return Value: None. --*/ { PMDL MdlNext; DebugTrace(+1, me, "CcMdlReadComplete\n", 0 ); DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); DebugTrace( 0, me, " MdlChain = %08lx\n", MdlChain ); // // Deallocate the Mdls // while (MdlChain != NULL) { MdlNext = MdlChain->Next; DebugTrace( 0, mm, "MmUnlockPages/IoFreeMdl:\n", 0 ); DebugTrace( 0, mm, " Mdl = %08lx\n", MdlChain ); MmUnlockPages( MdlChain ); IoFreeMdl( MdlChain ); MdlChain = MdlNext; } DebugTrace(-1, me, "CcMdlReadComplete -> VOID\n", 0 ); }
NTSTATUS RtAudioPin::ClosePin() { PAGED_CODE(); if (m_BufferMdl != NULL) { MmUnlockPages(m_BufferMdl); IoFreeMdl(m_BufferMdl); m_BufferMdl = NULL; } if (m_PositionPointerMdl != NULL) { MmUnlockPages(m_PositionPointerMdl); IoFreeMdl(m_PositionPointerMdl); m_PositionPointerMdl = NULL; } return KsPin::ClosePin(); }
VOID FreeWskBuffer( __in PWSK_BUF WskBuffer ) { ASSERT(WskBuffer); MmUnlockPages(WskBuffer->Mdl); IoFreeMdl(WskBuffer->Mdl); }
/* * TsmiHandleMemWrite * * Purpose: * * Patch vbox dll in memory. * * Warning: If compiled not in ReleaseSigned configuration this function is a * potential BSOD-generator due to nonstandard way of loading, take care with patch offsets. * */ NTSTATUS TsmiHandleMemWrite( _In_ PVOID SrcAddress, _In_ PVOID DestAddress, _In_ ULONG Size ) { PMDL mdl; NTSTATUS status = STATUS_SUCCESS; PAGED_CODE(); mdl = IoAllocateMdl(DestAddress, Size, FALSE, FALSE, NULL); if (mdl == NULL) { #ifdef _DEBUGMSG DbgPrint("[TSMI] Failed to create MDL at write\n"); #endif return STATUS_INSUFFICIENT_RESOURCES; } #ifdef _SIGNED_BUILD __try { #endif //_SIGNED_BUILD if (DestAddress >= MmSystemRangeStart) if (!MmIsAddressValid(DestAddress)) { #ifdef _DEBUGMSG DbgPrint("[TSMI] Invalid address\n"); #endif //_DEBUGMSG return STATUS_ACCESS_VIOLATION; } MmProbeAndLockPages(mdl, KernelMode, IoReadAccess); DestAddress = MmGetSystemAddressForMdlSafe(mdl, HighPagePriority); if (DestAddress != NULL) { status = MmProtectMdlSystemAddress(mdl, PAGE_EXECUTE_READWRITE); __movsb((PUCHAR)DestAddress, (const UCHAR *)SrcAddress, Size); MmUnmapLockedPages(DestAddress, mdl); MmUnlockPages(mdl); } else { status = STATUS_ACCESS_VIOLATION; } #ifdef _SIGNED_BUILD } __except (EXCEPTION_EXECUTE_HANDLER) { status = STATUS_ACCESS_VIOLATION; #ifdef _DEBUGMSG DbgPrint("[TSMI] MmProbeAndLockPages failed at write DestAddress = %p\n", DestAddress); #endif //_DEBUGMSG } #endif //_SIGNED_BUILD IoFreeMdl(mdl); return status; }
VOID UnmapMappedKernelAddress( IN PMDL pMdl ) { if (pMdl == NULL) return; MmUnlockPages(pMdl); IoFreeMdl(pMdl); }
NTSTATUS DumpKernelMemory(PVOID DstAddr, PVOID SrcAddr, ULONG Size) { PMDL pSrcMdl, pDstMdl; PUCHAR pAddress, pDstAddress; NTSTATUS st = STATUS_UNSUCCESSFUL; ULONG r; // Создаем MDL для буфера-источника pSrcMdl = IoAllocateMdl(SrcAddr, Size, FALSE, FALSE, NULL); if (pSrcMdl) { // Построение MDL MmBuildMdlForNonPagedPool(pSrcMdl); // Получение адреса из MDL pAddress = (PUCHAR)MmGetSystemAddressForMdlSafe(pSrcMdl, NormalPagePriority); zDbgPrint("pAddress = %x", pAddress); if (pAddress != NULL) { pDstMdl = IoAllocateMdl(DstAddr, Size, FALSE, FALSE, NULL); zDbgPrint("pDstMdl = %x", pDstMdl); if (pDstMdl != NULL) { __try { MmProbeAndLockPages(pDstMdl, KernelMode, IoWriteAccess); pDstAddress = (PUCHAR)MmGetSystemAddressForMdlSafe(pDstMdl, NormalPagePriority); zDbgPrint("pDstAddress = %x", pDstAddress); if (pDstAddress != NULL) { memset(pDstAddress, 0, Size); zDbgPrint("Copy block"); for (r = 1; r < Size; r++) { if (MmIsAddressValid(pAddress)) *pDstAddress = *pAddress; else *pDstAddress = 0; pAddress++; pDstAddress++; } st = STATUS_SUCCESS; } MmUnlockPages(pDstMdl); } __except(EXCEPTION_EXECUTE_HANDLER) { zDbgPrint("Copy block exception"); } IoFreeMdl(pDstMdl); } }
NTSTATUS NTAPI IoCompletion ( PDEVICE_OBJECT DeviceObject, PIRP Irp, PVOID Ctx) { PKSSTREAM_HEADER Header; PMDL Mdl, NextMdl; PWDMAUD_COMPLETION_CONTEXT Context = (PWDMAUD_COMPLETION_CONTEXT)Ctx; /* get stream header */ Header = (PKSSTREAM_HEADER)Irp->AssociatedIrp.SystemBuffer; /* sanity check */ ASSERT(Header); /* time to free all allocated mdls */ Mdl = Irp->MdlAddress; while(Mdl) { /* get next mdl */ NextMdl = Mdl->Next; /* unlock pages */ MmUnlockPages(Mdl); /* grab next mdl */ Mdl = NextMdl; } //IoFreeMdl(Mdl); /* clear mdl list */ Irp->MdlAddress = Context->Mdl; DPRINT("IoCompletion Irp %p IoStatus %lx Information %lx\n", Irp, Irp->IoStatus.Status, Irp->IoStatus.Information); if (!NT_SUCCESS(Irp->IoStatus.Status)) { /* failed */ Irp->IoStatus.Information = 0; } /* dereference file object */ ObDereferenceObject(Context->FileObject); /* free context */ FreeItem(Context); return STATUS_SUCCESS; }
FORCEINLINE VOID FxIoContext::ReleaseAndRestore( __in FxRequestBase* Request ) { FxIrp* irp = NULL; irp = Request->GetSubmitFxIrp(); if (m_RestoreState) { irp->SetSystemBuffer(m_OriginalSystemBuffer); irp->SetUserBuffer(m_OriginalUserBuffer); irp->SetMdlAddress(m_OriginalMdl); irp->SetFlags(m_OriginalFlags); m_OriginalSystemBuffer = NULL; m_OriginalUserBuffer = NULL; m_OriginalMdl = NULL; m_OriginalFlags = NULL; m_RestoreState = FALSE; } // // If there was a buffer present don't free the buffer here so that // it can be reused for any request with the same size. // Similarly if there was an MDL to be freed unlock the pages but dont free // the Mdl so that it can be reused. // if (m_MdlToFree != NULL) { if (m_UnlockPages) { MmUnlockPages(m_MdlToFree); m_UnlockPages = FALSE; } m_DriverGlobals = Request->GetDriverGlobals(); } // // Release the 2ndary buffer if we have an outstanding reference // if (m_OtherMemory != NULL) { m_OtherMemory->RELEASE(this); m_OtherMemory = NULL; } // // Release the other buffer and all __super related fields // __super::ReleaseAndRestore(Request); }
CMdl::~CMdl() { if (m_mdl) { if (m_locked) { MmUnlockPages(m_mdl); m_locked = false; } IoFreeMdl(m_mdl); } }
VOID MemFreePoolMemoryForMdl( __in PMDL Mem ) { PVOID PagedMem; PagedMem = MmGetMdlVirtualAddress(Mem); if (Mem->MdlFlags & MDL_PAGES_LOCKED) MmUnlockPages(Mem); ExFreePoolWithTag(PagedMem,VFD_POOL_TAG); }
VOID Ext2DestroyMdl (IN PMDL Mdl) { ASSERT (Mdl != NULL); while (Mdl) { PMDL Next; Next = Mdl->Next; if (IsFlagOn(Mdl->MdlFlags, MDL_PAGES_LOCKED)) { MmUnlockPages (Mdl); } IoFreeMdl (Mdl); Mdl = Next; } }
int hax_unpin_user_pages(hax_memdesc_user *memdesc) { if (!memdesc) { hax_error("%s: memdesc == NULL\n", __func__); return -EINVAL; } if (!memdesc->pmdl) { hax_error("%s: memdesc->pmdl == NULL\n", __func__); return -EINVAL; } MmUnlockPages(memdesc->pmdl); IoFreeMdl(memdesc->pmdl); memdesc->pmdl = NULL; return 0; }
static VOID FreeWskBuffer_NBL( IN PWSK_BUF WskBuffer ) { ASSERT(WskBuffer); TRACE_ENTER(); if ((WskBuffer->Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) != MDL_MAPPED_TO_SYSTEM_VA && (WskBuffer->Mdl->MdlFlags & MDL_PAGES_LOCKED) != MDL_PAGES_LOCKED && (WskBuffer->Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) != MDL_SOURCE_IS_NONPAGED_POOL) { MmUnlockPages(WskBuffer->Mdl); } TRACE_EXIT(); }
VOID ImDiskFreeIrpWithMdls(PIRP Irp) { PMDL mdl; PMDL nextMdl; for (mdl = Irp->MdlAddress; mdl != NULL; mdl = nextMdl) { nextMdl = mdl->Next; if (mdl->MdlFlags & MDL_PAGES_LOCKED) { MmUnlockPages(mdl); } IoFreeMdl(mdl); } Irp->MdlAddress = NULL; IoFreeIrp(Irp); }
void vbglUnlockLinear (void *pvCtx, void *pv, uint32_t u32Size) { #ifdef RT_OS_WINDOWS PMDL pMdl = (PMDL)pvCtx; Assert(pMdl); if (pMdl != NULL) { MmUnlockPages (pMdl); IoFreeMdl (pMdl); } #else RTR0MEMOBJ MemObj = (RTR0MEMOBJ)pvCtx; int rc = RTR0MemObjFree(MemObj, false); AssertRC(rc); #endif NOREF(pv); NOREF(u32Size); }
NTSTATUS NdasDluSendSrbIoCompletion( IN PDEVICE_OBJECT DeviceObject, IN PIRP Irp, IN PVOID Context ){ PMINISENDSRB_CONTEXT sendSrb = (PMINISENDSRB_CONTEXT)Context; UNREFERENCED_PARAMETER(DeviceObject); KDPrint(1,("STATUS=%08lx\n", Irp->IoStatus.Status)); // // get the result // if(Irp->IoStatus.Status == STATUS_SUCCESS) { if(sendSrb->UserBuffer && sendSrb->UserBufferLen) RtlCopyMemory( sendSrb->UserBuffer, (PUCHAR)&sendSrb->SrbIoctl + sizeof(SRB_IO_CONTROL), sendSrb->UserBufferLen); } // Free the IRP resources if(Irp->AssociatedIrp.SystemBuffer) ExFreePool(Irp->AssociatedIrp.SystemBuffer); if( Irp->MdlAddress != NULL ) { MmUnlockPages( Irp->MdlAddress ); IoFreeMdl( Irp->MdlAddress ); Irp->MdlAddress = NULL; } // Free the IRP IoFreeIrp(Irp); ExFreePool(sendSrb); return STATUS_MORE_PROCESSING_REQUIRED; }
NTSTATUS W2KNtfsPerformVerifyDiskRead ( IN PDEVICE_OBJECT DeviceObject, IN PVOID Buffer, IN LONGLONG Offset, IN ULONG NumberOfBytesToRead ) /*++ Routine Description: This routine is used to read in a range of bytes from the disk. It bypasses all of the caching and regular I/O logic, and builds and issues the requests itself. It does this operation overriding the verify volume flag in the device object. Arguments: Vcb - Supplies the Vcb denoting the device for this operation Buffer - Supplies the buffer that will recieve the results of this operation Offset - Supplies the offset of where to start reading NumberOfBytesToRead - Supplies the number of bytes to read, this must be in multiple of bytes units acceptable to the disk driver. Return Value: None. --*/ { KEVENT Event; PIRP Irp; NTSTATUS Status; PAGED_CODE(); // // Initialize the event we're going to use // KeInitializeEvent( &Event, NotificationEvent, FALSE ); // // Build the irp for the operation and also set the overrride flag // // Note that we may be at APC level, so do this asyncrhonously and // use an event for synchronization normal request completion // cannot occur at APC level. // Irp = IoBuildAsynchronousFsdRequest( IRP_MJ_READ, DeviceObject, Buffer, NumberOfBytesToRead, (PLARGE_INTEGER)&Offset, NULL ); if ( Irp == NULL ) { return STATUS_INSUFFICIENT_RESOURCES; } SetFlag( IoGetNextIrpStackLocation( Irp )->Flags, SL_OVERRIDE_VERIFY_VOLUME ); // // Set up the completion routine // IoSetCompletionRoutine( Irp, W2KNtfsVerifyReadCompletionRoutine, &Event, TRUE, TRUE, TRUE ); // // Call the device to do the write and wait for it to finish. // try { (VOID)IoCallDriver( DeviceObject, Irp ); (VOID)KeWaitForSingleObject( &Event, Executive, KernelMode, FALSE, (PLARGE_INTEGER)NULL ); // // Grab the Status. // Status = Irp->IoStatus.Status; } finally { // // If there is an MDL (or MDLs) associated with this I/O // request, Free it (them) here. This is accomplished by // walking the MDL list hanging off of the IRP and deallocating // each MDL encountered. // while (Irp->MdlAddress != NULL) { PMDL NextMdl; NextMdl = Irp->MdlAddress->Next; MmUnlockPages( Irp->MdlAddress ); IoFreeMdl( Irp->MdlAddress ); Irp->MdlAddress = NextMdl; } IoFreeIrp( Irp ); } // // If it doesn't succeed then raise the error // return Status; }
// Dispatch function NTSTATUS DriverDispatch(DEVICE_OBJECT *device_object, IRP *irp) { NTSTATUS ret = STATUS_SUCCESS; IO_STACK_LOCATION *stack; void *buf; bool ok; // Validate arguments if (wfp == NULL || device_object == NULL || irp == NULL || wfp->Halting) { return NDIS_STATUS_FAILURE; } // Get the IRP stack stack = IoGetCurrentIrpStackLocation(irp); // Initialize the number of bytes irp->IoStatus.Information = 0; irp->IoStatus.Status = STATUS_SUCCESS; buf = irp->UserBuffer; if (wfp->Halting != FALSE) { // Device driver is terminating irp->IoStatus.Information = STATUS_UNSUCCESSFUL; IoCompleteRequest(irp, IO_NO_INCREMENT); return STATUS_UNSUCCESSFUL; } ok = false; // Branch to each operation switch (stack->MajorFunction) { case IRP_MJ_CREATE: // Open ok = true; break; case IRP_MJ_CLOSE: // Close ok = true; break; case IRP_MJ_READ: // Read ResetEvent(wfp->Event); break; case IRP_MJ_WRITE: // Write if ((stack->Parameters.Write.Length % sizeof(WFP_LOCAL_IP)) == 0) { // Address check bool check_ok = true; __try { ProbeForRead(buf, stack->Parameters.Write.Length, 1); } __except (EXCEPTION_EXECUTE_HANDLER) { check_ok = false; } if (check_ok) { MDL *mdl = IoAllocateMdl(buf, stack->Parameters.Write.Length, false, false, NULL); UINT size = MIN(WFP_MAX_LOCAL_IP_COUNT * sizeof(WFP_LOCAL_IP), stack->Parameters.Write.Length); UCHAR *copied_buf = Malloc(size); UCHAR *old_buf; if (mdl != NULL) { MmProbeAndLockPages(mdl, KernelMode, IoWriteAccess); } Copy(copied_buf, buf, size); SpinLock(wfp->LocalIPListLock); { old_buf = wfp->LocalIPListData; wfp->LocalIPListData = copied_buf; wfp->LocalIPListSize = size; } SpinUnlock(wfp->LocalIPListLock); if (old_buf != NULL) { Free(old_buf); } if (mdl != NULL) { MmUnlockPages(mdl); IoFreeMdl(mdl); } } } irp->IoStatus.Information = stack->Parameters.Write.Length; ok = true; break; }
DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem) { PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem; /* * Deal with it on a per type basis (just as a variation). */ switch (pMemNt->Core.enmType) { case RTR0MEMOBJTYPE_LOW: #ifndef IPRT_TARGET_NT4 if (pMemNt->fAllocatedPagesForMdl) { Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]); MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]); pMemNt->Core.pv = NULL; if (pMemNt->pvSecureMem) { MmUnsecureVirtualMemory(pMemNt->pvSecureMem); pMemNt->pvSecureMem = NULL; } MmFreePagesFromMdl(pMemNt->apMdls[0]); ExFreePool(pMemNt->apMdls[0]); pMemNt->apMdls[0] = NULL; pMemNt->cMdls = 0; break; } #endif AssertFailed(); break; case RTR0MEMOBJTYPE_PAGE: Assert(pMemNt->Core.pv); ExFreePool(pMemNt->Core.pv); pMemNt->Core.pv = NULL; Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]); IoFreeMdl(pMemNt->apMdls[0]); pMemNt->apMdls[0] = NULL; pMemNt->cMdls = 0; break; case RTR0MEMOBJTYPE_CONT: Assert(pMemNt->Core.pv); MmFreeContiguousMemory(pMemNt->Core.pv); pMemNt->Core.pv = NULL; Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]); IoFreeMdl(pMemNt->apMdls[0]); pMemNt->apMdls[0] = NULL; pMemNt->cMdls = 0; break; case RTR0MEMOBJTYPE_PHYS: /* rtR0MemObjNativeEnterPhys? */ if (!pMemNt->Core.u.Phys.fAllocated) { #ifndef IPRT_TARGET_NT4 Assert(!pMemNt->fAllocatedPagesForMdl); #endif /* Nothing to do here. */ break; } /* fall thru */ case RTR0MEMOBJTYPE_PHYS_NC: #ifndef IPRT_TARGET_NT4 if (pMemNt->fAllocatedPagesForMdl) { MmFreePagesFromMdl(pMemNt->apMdls[0]); ExFreePool(pMemNt->apMdls[0]); pMemNt->apMdls[0] = NULL; pMemNt->cMdls = 0; break; } #endif AssertFailed(); break; case RTR0MEMOBJTYPE_LOCK: if (pMemNt->pvSecureMem) { MmUnsecureVirtualMemory(pMemNt->pvSecureMem); pMemNt->pvSecureMem = NULL; } for (uint32_t i = 0; i < pMemNt->cMdls; i++) { MmUnlockPages(pMemNt->apMdls[i]); IoFreeMdl(pMemNt->apMdls[i]); pMemNt->apMdls[i] = NULL; } break; case RTR0MEMOBJTYPE_RES_VIRT: /* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS) { } else { }*/ AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n")); return VERR_INTERNAL_ERROR; break; case RTR0MEMOBJTYPE_MAPPING: { Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv); PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent; Assert(pMemNtParent); if (pMemNtParent->cMdls) { Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]); Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf()); MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]); } else { Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS && !pMemNtParent->Core.u.Phys.fAllocated); Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS); MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb); } pMemNt->Core.pv = NULL; break; } default: AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType)); return VERR_INTERNAL_ERROR; } return VINF_SUCCESS; }
/* * @implemented */ VOID NTAPI MmProbeAndLockPages(IN PMDL Mdl, IN KPROCESSOR_MODE AccessMode, IN LOCK_OPERATION Operation) { PPFN_NUMBER MdlPages; PVOID Base, Address, LastAddress, StartAddress; ULONG LockPages, TotalPages; NTSTATUS Status = STATUS_SUCCESS; PEPROCESS CurrentProcess; NTSTATUS ProbeStatus; PMMPTE PointerPte, LastPte; PMMPDE PointerPde; #if (_MI_PAGING_LEVELS >= 3) PMMPDE PointerPpe; #endif #if (_MI_PAGING_LEVELS == 4) PMMPDE PointerPxe; #endif PFN_NUMBER PageFrameIndex; BOOLEAN UsePfnLock; KIRQL OldIrql; PMMPFN Pfn1; DPRINT("Probing MDL: %p\n", Mdl); // // Sanity checks // ASSERT(Mdl->ByteCount != 0); ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0); ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0); ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_MAPPED_TO_SYSTEM_VA | MDL_SOURCE_IS_NONPAGED_POOL | MDL_PARTIAL | MDL_IO_SPACE)) == 0); // // Get page and base information // MdlPages = (PPFN_NUMBER)(Mdl + 1); Base = Mdl->StartVa; // // Get the addresses and how many pages we span (and need to lock) // Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset); LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount); LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount); ASSERT(LockPages != 0); /* Block invalid access */ if ((AccessMode != KernelMode) && ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress))) { /* Caller should be in SEH, raise the error */ *MdlPages = LIST_HEAD; ExRaiseStatus(STATUS_ACCESS_VIOLATION); } // // Get the process // if (Address <= MM_HIGHEST_USER_ADDRESS) { // // Get the process // CurrentProcess = PsGetCurrentProcess(); } else { // // No process // CurrentProcess = NULL; } // // Save the number of pages we'll have to lock, and the start address // TotalPages = LockPages; StartAddress = Address; /* Large pages not supported */ ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address)); // // Now probe them // ProbeStatus = STATUS_SUCCESS; _SEH2_TRY { // // Enter probe loop // do { // // Assume failure // *MdlPages = LIST_HEAD; // // Read // *(volatile CHAR*)Address; // // Check if this is write access (only probe for user-mode) // if ((Operation != IoReadAccess) && (Address <= MM_HIGHEST_USER_ADDRESS)) { // // Probe for write too // ProbeForWriteChar(Address); } // // Next address... // Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE); // // Next page... // LockPages--; MdlPages++; } while (Address < LastAddress); // // Reset back to the original page // ASSERT(LockPages == 0); MdlPages = (PPFN_NUMBER)(Mdl + 1); } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) { // // Oops :( // ProbeStatus = _SEH2_GetExceptionCode(); } _SEH2_END; // // So how did that go? // if (ProbeStatus != STATUS_SUCCESS) { // // Fail // DPRINT1("MDL PROBE FAILED!\n"); Mdl->Process = NULL; ExRaiseStatus(ProbeStatus); } // // Get the PTE and PDE // PointerPte = MiAddressToPte(StartAddress); PointerPde = MiAddressToPde(StartAddress); #if (_MI_PAGING_LEVELS >= 3) PointerPpe = MiAddressToPpe(StartAddress); #endif #if (_MI_PAGING_LEVELS == 4) PointerPxe = MiAddressToPxe(StartAddress); #endif // // Sanity check // ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1)); // // Check what kind of operation this is // if (Operation != IoReadAccess) { // // Set the write flag // Mdl->MdlFlags |= MDL_WRITE_OPERATION; } else { // // Remove the write flag // Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION); } // // Mark the MDL as locked *now* // Mdl->MdlFlags |= MDL_PAGES_LOCKED; // // Check if this came from kernel mode // if (Base > MM_HIGHEST_USER_ADDRESS) { // // We should not have a process // ASSERT(CurrentProcess == NULL); Mdl->Process = NULL; // // In kernel mode, we don't need to check for write access // Operation = IoReadAccess; // // Use the PFN lock // UsePfnLock = TRUE; OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); } else { // // Sanity checks // ASSERT(TotalPages != 0); ASSERT(CurrentProcess == PsGetCurrentProcess()); // // Track locked pages // InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages, TotalPages); // // Save the process // Mdl->Process = CurrentProcess; /* Lock the process working set */ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); UsePfnLock = FALSE; OldIrql = MM_NOIRQL; } // // Get the last PTE // LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1)); // // Loop the pages // do { // // Assume failure and check for non-mapped pages // *MdlPages = LIST_HEAD; while ( #if (_MI_PAGING_LEVELS == 4) (PointerPxe->u.Hard.Valid == 0) || #endif #if (_MI_PAGING_LEVELS >= 3) (PointerPpe->u.Hard.Valid == 0) || #endif (PointerPde->u.Hard.Valid == 0) || (PointerPte->u.Hard.Valid == 0)) { // // What kind of lock were we using? // if (UsePfnLock) { // // Release PFN lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); } else { /* Release process working set */ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); } // // Access the page // Address = MiPteToAddress(PointerPte); //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked Status = MmAccessFault(FALSE, Address, KernelMode, (PVOID)0xBADBADA3); if (!NT_SUCCESS(Status)) { // // Fail // DPRINT1("Access fault failed\n"); goto Cleanup; } // // What lock should we use? // if (UsePfnLock) { // // Grab the PFN lock // OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); } else { /* Lock the process working set */ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); } } // // Check if this was a write or modify // if (Operation != IoReadAccess) { // // Check if the PTE is not writable // if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE) { // // Check if it's copy on write // if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte)) { // // Get the base address and allow a change for user-mode // Address = MiPteToAddress(PointerPte); if (Address <= MM_HIGHEST_USER_ADDRESS) { // // What kind of lock were we using? // if (UsePfnLock) { // // Release PFN lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); } else { /* Release process working set */ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); } // // Access the page // //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked Status = MmAccessFault(TRUE, Address, KernelMode, (PVOID)0xBADBADA3); if (!NT_SUCCESS(Status)) { // // Fail // DPRINT1("Access fault failed\n"); goto Cleanup; } // // Re-acquire the lock // if (UsePfnLock) { // // Grab the PFN lock // OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); } else { /* Lock the process working set */ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); } // // Start over // continue; } } // // Fail, since we won't allow this // Status = STATUS_ACCESS_VIOLATION; goto CleanupWithLock; } } // // Grab the PFN // PageFrameIndex = PFN_FROM_PTE(PointerPte); Pfn1 = MiGetPfnEntry(PageFrameIndex); if (Pfn1) { /* Either this is for kernel-mode, or the working set is held */ ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE)); /* No Physical VADs supported yet */ if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL); /* This address should already exist and be fully valid */ MiReferenceProbedPageAndBumpLockCount(Pfn1); } else { // // For I/O addresses, just remember this // Mdl->MdlFlags |= MDL_IO_SPACE; } // // Write the page and move on // *MdlPages++ = PageFrameIndex; PointerPte++; /* Check if we're on a PDE boundary */ if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++; #if (_MI_PAGING_LEVELS >= 3) if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++; #endif #if (_MI_PAGING_LEVELS == 4) if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++; #endif } while (PointerPte <= LastPte); // // What kind of lock were we using? // if (UsePfnLock) { // // Release PFN lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); } else { /* Release process working set */ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); } // // Sanity check // ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0); return; CleanupWithLock: // // This is the failure path // ASSERT(!NT_SUCCESS(Status)); // // What kind of lock were we using? // if (UsePfnLock) { // // Release PFN lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); } else { /* Release process working set */ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); } Cleanup: // // Pages must be locked so MmUnlock can work // ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED); MmUnlockPages(Mdl); // // Raise the error // ExRaiseStatus(Status); }
VOID CloseOpenFiles ( IN PPRIMARY_SESSION PrimarySession, IN BOOLEAN Remove ) { PLIST_ENTRY openFileEntry; for (openFileEntry = PrimarySession->Thread.OpenedFileQueue.Flink; openFileEntry != &PrimarySession->Thread.OpenedFileQueue; ) { POPEN_FILE openFile; NTSTATUS closeStatus; openFile = CONTAINING_RECORD( openFileEntry, OPEN_FILE, ListEntry ); openFileEntry = openFileEntry->Flink; //ASSERT( FALSE ); if (openFile->CleanUp == FALSE) { TYPE_OF_OPEN typeOfOpen; PVCB vcb; PFCB fcb; //PSCB scb; PCCB ccb; typeOfOpen = FatDecodeFileObject( openFile->FileObject, &vcb, &fcb, &ccb ); if (typeOfOpen == UserFileOpen) { PIRP irp; PFILE_OBJECT fileObject; PDEVICE_OBJECT deviceObject; KPROCESSOR_MODE requestorMode; PIO_STACK_LOCATION irpSp; BOOLEAN synchronousIo; PKEVENT eventObject = (PKEVENT) NULL; ULONG keyValue = 0; LARGE_INTEGER fileOffset = {0,0}; PULONG majorFunction; PETHREAD currentThread; KEVENT event; NDFS_WINXP_REQUEST_HEADER ndfsWinxpRequestHeader; PIRP topLevelIrp; PRIMARY_REQUEST_INFO primaryRequestInfo; NTSTATUS cleanupStatus; do { synchronousIo = openFile->FileObject ? BooleanFlagOn(openFile->FileObject->Flags, FO_SYNCHRONOUS_IO) : TRUE; ASSERT( synchronousIo == TRUE ); deviceObject = &PrimarySession->VolDo->DeviceObject; fileObject = openFile->FileObject; currentThread = PsGetCurrentThread (); ASSERT( deviceObject->StackSize >= 1 ); irp = IoAllocateIrp( deviceObject->StackSize, TRUE ); requestorMode = KernelMode; if (!irp) { ASSERT( NDASFAT_INSUFFICIENT_RESOURCES ); break; } irp->Tail.Overlay.OriginalFileObject = fileObject; irp->Tail.Overlay.Thread = currentThread; irp->Tail.Overlay.AuxiliaryBuffer = (PVOID) NULL; irp->RequestorMode = requestorMode; irp->PendingReturned = FALSE; irp->Cancel = FALSE; irp->CancelRoutine = (PDRIVER_CANCEL) NULL; irp->UserEvent = eventObject; irp->UserIosb = NULL; //&ioStatusBlock; irp->Overlay.AsynchronousParameters.UserApcRoutine = NULL; //ApcRoutine; irp->Overlay.AsynchronousParameters.UserApcContext = NULL; //ApcContext; KeInitializeEvent( &event, NotificationEvent, FALSE ); IoSetCompletionRoutine( irp, PrimaryCompletionRoutine, &event, TRUE, TRUE, TRUE ); IoSetNextIrpStackLocation( irp ); irpSp = IoGetCurrentIrpStackLocation( irp ); // = ¤tIrpSp; // = IoGetNextIrpStackLocation( irp ); majorFunction = (PULONG) (&irpSp->MajorFunction); *majorFunction = IRP_MJ_CLEANUP; irpSp->Control = (SL_INVOKE_ON_SUCCESS | SL_INVOKE_ON_ERROR | SL_INVOKE_ON_CANCEL); irpSp->MinorFunction = IRP_MJ_CLEANUP; irpSp->FileObject = fileObject; irpSp->DeviceObject = deviceObject; irp->AssociatedIrp.SystemBuffer = (PVOID) NULL; irp->MdlAddress = (PMDL) NULL; ndfsWinxpRequestHeader.CleanUp.AllocationSize = fcb->Header.AllocationSize.QuadPart; ndfsWinxpRequestHeader.CleanUp.FileSize = fcb->Header.FileSize.LowPart; ndfsWinxpRequestHeader.CleanUp.ValidDataLength = fcb->Header.FileSize.LowPart; ndfsWinxpRequestHeader.CleanUp.VaildDataToDisk = fcb->Header.FileSize.LowPart; primaryRequestInfo.PrimaryTag = 0xe2027482; primaryRequestInfo.PrimarySession = PrimarySession; primaryRequestInfo.NdfsWinxpRequestHeader = &ndfsWinxpRequestHeader; topLevelIrp = IoGetTopLevelIrp(); ASSERT( topLevelIrp == NULL ); IoSetTopLevelIrp( (PIRP)&primaryRequestInfo ); cleanupStatus = FatFsdCleanup( PrimarySession->VolDo, irp ); if (cleanupStatus == STATUS_PENDING) { KeWaitForSingleObject( &event, Executive, KernelMode, FALSE, NULL ); } IoSetTopLevelIrp( topLevelIrp ); cleanupStatus = irp->IoStatus.Status; ASSERT( cleanupStatus == STATUS_SUCCESS ); if (irp->MdlAddress != NULL) { MmUnlockPages( irp->MdlAddress ); IoFreeMdl( irp->MdlAddress ); } IoFreeIrp( irp ); } while (0); } openFile->CleanUp = TRUE; } if (openFile->FileObject) { ObDereferenceObject( openFile->FileObject ); openFile->FileObject = NULL; } if (openFile->FileHandle) { closeStatus = ZwClose( openFile->FileHandle ); openFile->FileHandle = NULL; ASSERT( closeStatus == STATUS_SUCCESS ); } if (openFile->EventHandle) { closeStatus = ZwClose( openFile->EventHandle ); openFile->EventHandle = NULL; ASSERT(closeStatus == STATUS_SUCCESS); } if (Remove) { RemoveEntryList( &openFile->ListEntry ); InitializeListHead( &openFile->ListEntry ); PrimarySession_FreeOpenFile( PrimarySession, openFile ); } } return; }