VOID NTAPI LpcpSaveDataInfoMessage(IN PLPCP_PORT_OBJECT Port, IN PLPCP_MESSAGE Message, IN ULONG LockFlags) { BOOLEAN LockHeld = (LockFlags & LPCP_LOCK_HELD); PAGED_CODE(); /* Acquire the lock */ if (!LockHeld) KeAcquireGuardedMutex(&LpcpLock); /* Check if the port we want is the connection port */ if ((Port->Flags & LPCP_PORT_TYPE_MASK) > LPCP_UNCONNECTED_PORT) { /* Use it */ Port = Port->ConnectionPort; if (!Port) { /* Release the lock and return */ if (!LockHeld) KeReleaseGuardedMutex(&LpcpLock); return; } } /* Link the message */ InsertTailList(&Port->LpcDataInfoChainHead, &Message->Entry); /* Release the lock */ if (!LockHeld) KeReleaseGuardedMutex(&LpcpLock); }
VOID NTAPI LpcExitThread(IN PETHREAD Thread) { PLPCP_MESSAGE Message; ASSERT(Thread == PsGetCurrentThread()); /* Acquire the lock */ KeAcquireGuardedMutex(&LpcpLock); /* Make sure that the Reply Chain is empty */ if (!IsListEmpty(&Thread->LpcReplyChain)) { /* It's not, remove the entry */ RemoveEntryList(&Thread->LpcReplyChain); } /* Set the thread in exit mode */ Thread->LpcExitThreadCalled = TRUE; Thread->LpcReplyMessageId = 0; /* Check if there's a reply message */ Message = LpcpGetMessageFromThread(Thread); if (Message) { /* FIXME: TODO */ ASSERT(FALSE); } /* Release the lock */ KeReleaseGuardedMutex(&LpcpLock); }
// // TdDeleteProtectNameCallback // NTSTATUS TdDeleteProtectNameCallback () { NTSTATUS Status = STATUS_SUCCESS; DbgPrintEx ( DPFLTR_IHVDRIVER_ID, DPFLTR_TRACE_LEVEL, "ObCallbackTest: TdDeleteProtectNameCallback entering\n"); KeAcquireGuardedMutex (&TdCallbacksMutex); // if the callbacks are active - remove them if (bCallbacksInstalled == TRUE) { ObUnRegisterCallbacks(pCBRegistrationHandle); pCBRegistrationHandle = NULL; bCallbacksInstalled = FALSE; } KeReleaseGuardedMutex (&TdCallbacksMutex); DbgPrintEx ( DPFLTR_IHVDRIVER_ID, DPFLTR_TRACE_LEVEL, "ObCallbackTest: TdDeleteProtectNameCallback exiting - status 0x%x\n", Status ); return Status; }
NTSTATUS NTAPI CcRosFlushVacb ( PROS_VACB Vacb) { NTSTATUS Status; KIRQL oldIrql; Status = CcWriteVirtualAddress(Vacb); if (NT_SUCCESS(Status)) { KeAcquireGuardedMutex(&ViewLock); KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql); Vacb->Dirty = FALSE; RemoveEntryList(&Vacb->DirtyVacbListEntry); DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE; CcRosVacbDecRefCount(Vacb); KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql); KeReleaseGuardedMutex(&ViewLock); } return Status; }
/* * @implemented */ BOOLEAN NTAPI FsRtlLookupLargeMcbEntry(IN PLARGE_MCB Mcb, IN LONGLONG Vbn, OUT PLONGLONG Lbn OPTIONAL, OUT PLONGLONG SectorCountFromLbn OPTIONAL, OUT PLONGLONG StartingLbn OPTIONAL, OUT PLONGLONG SectorCountFromStartingLbn OPTIONAL, OUT PULONG Index OPTIONAL) { BOOLEAN Result; DPRINT("FsRtlLookupLargeMcbEntry Mcb %p Vbn %I64d\n", Mcb, Vbn); KeAcquireGuardedMutex(Mcb->GuardedMutex); Result = FsRtlLookupBaseMcbEntry(&(Mcb->BaseMcb), Vbn, Lbn, SectorCountFromLbn, StartingLbn, SectorCountFromStartingLbn, Index); KeReleaseGuardedMutex(Mcb->GuardedMutex); DPRINT("Done %u\n", Result); return Result; }
/* * @unimplemented */ NTSTATUS NTAPI PoQueueShutdownWorkItem( _In_ PWORK_QUEUE_ITEM WorkItem) { NTSTATUS Status; /* Acquire the shutdown list lock */ KeAcquireGuardedMutex(&PopShutdownListMutex); /* Check if the list is (already/still) available */ if (PopShutdownListAvailable) { /* Insert the item into the list */ InsertTailList(&PopShutdownQueue, &WorkItem->List); Status = STATUS_SUCCESS; } else { /* We are already in shutdown */ Status = STATUS_SYSTEM_SHUTDOWN; } /* Release the list lock */ KeReleaseGuardedMutex(&PopShutdownListMutex); return Status; }
void nm_os_selwakeup(NM_SELINFO_T *queue) { //DbgPrint("%i: nm_selwakeup on 0x%p",PsGetCurrentThreadId(), &queue->queue); KeAcquireGuardedMutex(&queue->mutex); KeSetEvent(&queue->queue, PI_NET, FALSE); KeReleaseGuardedMutex(&queue->mutex); }
VOID NTAPI LpcpFreeToPortZone(IN PLPCP_MESSAGE Message, IN ULONG LockFlags) { PLPCP_CONNECTION_MESSAGE ConnectMessage; PLPCP_PORT_OBJECT ClientPort = NULL; PETHREAD Thread = NULL; BOOLEAN LockHeld = (LockFlags & LPCP_LOCK_HELD); BOOLEAN ReleaseLock = (LockFlags & LPCP_LOCK_RELEASE); PAGED_CODE(); LPCTRACE(LPC_CLOSE_DEBUG, "Message: %p. LockFlags: %lx\n", Message, LockFlags); /* Acquire the lock if not already */ if (!LockHeld) KeAcquireGuardedMutex(&LpcpLock); /* Check if the queue list is empty */ if (!IsListEmpty(&Message->Entry)) { /* Remove and re-initialize */ RemoveEntryList(&Message->Entry); InitializeListHead(&Message->Entry); } /* Check if we've already replied */ if (Message->RepliedToThread) { /* Set thread to dereference and clean up */ Thread = Message->RepliedToThread; Message->RepliedToThread = NULL; } /* Check if this is a connection request */ if (Message->Request.u2.s2.Type == LPC_CONNECTION_REQUEST) { /* Get the connection message */ ConnectMessage = (PLPCP_CONNECTION_MESSAGE)(Message + 1); /* Clear the client port */ ClientPort = ConnectMessage->ClientPort; if (ClientPort) ConnectMessage->ClientPort = NULL; } /* Release the lock */ KeReleaseGuardedMutex(&LpcpLock); /* Check if we had anything to dereference */ if (Thread) ObDereferenceObject(Thread); if (ClientPort) ObDereferenceObject(ClientPort); /* Free the entry */ ExFreeToPagedLookasideList(&LpcpMessagesLookaside, Message); /* Reacquire the lock if needed */ if ((LockHeld) && !(ReleaseLock)) KeAcquireGuardedMutex(&LpcpLock); }
VOID NTAPI CmpFreeKeyControlBlock(IN PCM_KEY_CONTROL_BLOCK Kcb) { ULONG i; PCM_ALLOC_PAGE AllocPage; PAGED_CODE(); /* Sanity checks */ ASSERT(IsListEmpty(&Kcb->KeyBodyListHead) == TRUE); for (i = 0; i < 4; i++) ASSERT(Kcb->KeyBodyArray[i] == NULL); /* Check if it wasn't privately allocated */ if (!Kcb->PrivateAlloc) { /* Free it from the pool */ CmpFree(Kcb, 0); return; } /* Acquire the private allocation lock */ KeAcquireGuardedMutex(&CmpAllocBucketLock); /* Sanity check on lock ownership */ CMP_ASSERT_HASH_ENTRY_LOCK(Kcb->ConvKey); /* Add us to the free list */ InsertTailList(&CmpFreeKCBListHead, &Kcb->FreeListEntry); /* Get the allocation page */ AllocPage = CmpGetAllocPageFromKcb(Kcb); /* Sanity check */ ASSERT(AllocPage->FreeCount != CM_KCBS_PER_PAGE); /* Increase free count */ if (++AllocPage->FreeCount == CM_KCBS_PER_PAGE) { /* Loop all the entries */ for (i = 0; i < CM_KCBS_PER_PAGE; i++) { /* Get the KCB */ Kcb = (PVOID)((ULONG_PTR)AllocPage + FIELD_OFFSET(CM_ALLOC_PAGE, AllocPage) + i * sizeof(CM_KEY_CONTROL_BLOCK)); /* Remove the entry */ RemoveEntryList(&Kcb->FreeListEntry); } /* Free the page */ CmpFree(AllocPage, 0); } /* Release the lock */ KeReleaseGuardedMutex(&CmpAllocBucketLock); }
PVOID NTAPI LpcpFreeConMsg(IN OUT PLPCP_MESSAGE *Message, IN OUT PLPCP_CONNECTION_MESSAGE *ConnectMessage, IN PETHREAD CurrentThread) { PVOID SectionToMap; PLPCP_MESSAGE ReplyMessage; /* Acquire the LPC lock */ KeAcquireGuardedMutex(&LpcpLock); /* Check if the reply chain is not empty */ if (!IsListEmpty(&CurrentThread->LpcReplyChain)) { /* Remove this entry and re-initialize it */ RemoveEntryList(&CurrentThread->LpcReplyChain); InitializeListHead(&CurrentThread->LpcReplyChain); } /* Check if there's a reply message */ ReplyMessage = LpcpGetMessageFromThread(CurrentThread); if (ReplyMessage) { /* Get the message */ *Message = ReplyMessage; /* Check if it's got messages */ if (!IsListEmpty(&ReplyMessage->Entry)) { /* Clear the list */ RemoveEntryList(&ReplyMessage->Entry); InitializeListHead(&ReplyMessage->Entry); } /* Clear message data */ CurrentThread->LpcReceivedMessageId = 0; CurrentThread->LpcReplyMessage = NULL; /* Get the connection message and clear the section */ *ConnectMessage = (PLPCP_CONNECTION_MESSAGE)(ReplyMessage + 1); SectionToMap = (*ConnectMessage)->SectionToMap; (*ConnectMessage)->SectionToMap = NULL; } else { /* No message to return */ *Message = NULL; SectionToMap = NULL; } /* Release the lock and return the section */ KeReleaseGuardedMutex(&LpcpLock); return SectionToMap; }
/* * @implemented */ VOID NTAPI FsRtlTruncateLargeMcb(IN PLARGE_MCB Mcb, IN LONGLONG Vbn) { DPRINT("FsRtlTruncateLargeMcb %p Vbn %I64d\n", Mcb, Vbn); KeAcquireGuardedMutex(Mcb->GuardedMutex); FsRtlTruncateBaseMcb(&(Mcb->BaseMcb), Vbn); KeReleaseGuardedMutex(Mcb->GuardedMutex); DPRINT("Done\n"); }
//nm_os_selrecord(NM_SELRECORD_T *sr, NM_SELINFO_T *si) void nm_os_selrecord(IO_STACK_LOCATION *irpSp, win_SELINFO *ev) { if (irpSp->FileObject->FsContext2 == NULL) { //DbgPrint("%i: nm_selrecord on 0x%p", PsGetCurrentThreadId(), &ev->queue); KeAcquireGuardedMutex(&ev->mutex); irpSp->FileObject->FsContext2 = ev; KeClearEvent(&ev->queue); KeReleaseGuardedMutex(&ev->mutex); } }
/* * @implemented */ VOID NTAPI FsRtlResetLargeMcb(IN PLARGE_MCB Mcb, IN BOOLEAN SelfSynchronized) { if (!SelfSynchronized) KeAcquireGuardedMutex(Mcb->GuardedMutex); FsRtlResetBaseMcb(&Mcb->BaseMcb); if (!SelfSynchronized) KeReleaseGuardedMutex(Mcb->GuardedMutex); }
/* * @implemented */ VOID NTAPI FsRtlRemoveLargeMcbEntry(IN PLARGE_MCB Mcb, IN LONGLONG Vbn, IN LONGLONG SectorCount) { DPRINT("FsRtlRemoveLargeMcbEntry Mcb %p, Vbn %I64d, SectorCount %I64d\n", Mcb, Vbn, SectorCount); KeAcquireGuardedMutex(Mcb->GuardedMutex); FsRtlRemoveBaseMcbEntry(&(Mcb->BaseMcb), Vbn, SectorCount); KeReleaseGuardedMutex(Mcb->GuardedMutex); DPRINT("Done\n"); }
NTSTATUS NTAPI PoRequestShutdownWait( _In_ PETHREAD Thread) { PPOP_SHUTDOWN_WAIT_ENTRY ShutDownWaitEntry; NTSTATUS Status; PAGED_CODE(); /* Allocate a new shutdown wait entry */ ShutDownWaitEntry = ExAllocatePoolWithTag(PagedPool, 8u, 'LSoP'); if (ShutDownWaitEntry == NULL) { return STATUS_NO_MEMORY; } /* Reference the thread and save it in the wait entry */ ObReferenceObject(Thread); ShutDownWaitEntry->Thread = Thread; /* Acquire the shutdown list lock */ KeAcquireGuardedMutex(&PopShutdownListMutex); /* Check if the list is still available */ if (PopShutdownListAvailable) { /* Insert the item in the list */ ShutDownWaitEntry->NextEntry = PopShutdownThreadList; PopShutdownThreadList = ShutDownWaitEntry; /* We are successful */ Status = STATUS_SUCCESS; } else { /* We cannot proceed, cleanup and return failure */ ObDereferenceObject(Thread); ExFreePoolWithTag(ShutDownWaitEntry, 0); Status = STATUS_UNSUCCESSFUL; } /* Release the list lock */ KeReleaseGuardedMutex(&PopShutdownListMutex); /* Return the status */ return Status; }
VOID NTAPI PopProcessShutDownLists(VOID) { PPOP_SHUTDOWN_WAIT_ENTRY ShutDownWaitEntry; PWORK_QUEUE_ITEM WorkItem; PLIST_ENTRY ListEntry; /* First signal the shutdown event */ KeSetEvent(&PopShutdownEvent, IO_NO_INCREMENT, FALSE); /* Acquire the shutdown list lock */ KeAcquireGuardedMutex(&PopShutdownListMutex); /* Block any further attempts to register a shutdown event */ PopShutdownListAvailable = FALSE; /* Release the list lock, since we are exclusively using the lists now */ KeReleaseGuardedMutex(&PopShutdownListMutex); /* Process the shutdown queue */ while (!IsListEmpty(&PopShutdownQueue)) { /* Get the head entry */ ListEntry = RemoveHeadList(&PopShutdownQueue); WorkItem = CONTAINING_RECORD(ListEntry, WORK_QUEUE_ITEM, List); /* Call the shutdown worker routine */ WorkItem->WorkerRoutine(WorkItem->Parameter); } /* Now process the shutdown thread list */ while (PopShutdownThreadList != NULL) { /* Get the top entry and remove it from the list */ ShutDownWaitEntry = PopShutdownThreadList; PopShutdownThreadList = PopShutdownThreadList->NextEntry; /* Wait for the thread to finish and dereference it */ KeWaitForSingleObject(ShutDownWaitEntry->Thread, 0, 0, 0, 0); ObfDereferenceObject(ShutDownWaitEntry->Thread); /* Finally free the entry */ ExFreePoolWithTag(ShutDownWaitEntry, 0); } }
VOID NTAPI CcRosTraceCacheMap ( PROS_SHARED_CACHE_MAP SharedCacheMap, BOOLEAN Trace ) { #if DBG KIRQL oldirql; PLIST_ENTRY current_entry; PROS_VACB current; if (!SharedCacheMap) return; SharedCacheMap->Trace = Trace; if (Trace) { DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap); KeAcquireGuardedMutex(&ViewLock); KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql); current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; while (current_entry != &SharedCacheMap->CacheMapVacbListHead) { current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); current_entry = current_entry->Flink; DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n", current, current->ReferenceCount, current->Dirty, current->PageOut ); } KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql); KeReleaseGuardedMutex(&ViewLock); } else { DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap); } #else UNREFERENCED_PARAMETER(SharedCacheMap); UNREFERENCED_PARAMETER(Trace); #endif }
/* * @implemented */ ULONG NTAPI FsRtlNumberOfRunsInLargeMcb(IN PLARGE_MCB Mcb) { ULONG NumberOfRuns; DPRINT("FsRtlNumberOfRunsInLargeMcb Mcb %p\n", Mcb); /* Read the number of runs while holding the MCB lock */ KeAcquireGuardedMutex(Mcb->GuardedMutex); NumberOfRuns = FsRtlNumberOfRunsInBaseMcb(&(Mcb->BaseMcb)); KeReleaseGuardedMutex(Mcb->GuardedMutex); DPRINT("Done %lu\n", NumberOfRuns); /* Return the count */ return NumberOfRuns; }
VOID NTAPI PspCheckProcessList() { PLIST_ENTRY Entry; KeAcquireGuardedMutex(&PspActiveProcessMutex); DbgPrint("# checking PsActiveProcessHead @ %p\n", &PsActiveProcessHead); for (Entry = PsActiveProcessHead.Flink; Entry != &PsActiveProcessHead; Entry = Entry->Flink) { PEPROCESS Process = CONTAINING_RECORD(Entry, EPROCESS, ActiveProcessLinks); POBJECT_HEADER Header; PVOID Info, HeaderLocation; /* Get the header and assume this is what we'll free */ Header = OBJECT_TO_OBJECT_HEADER(Process); HeaderLocation = Header; /* To find the header, walk backwards from how we allocated */ if ((Info = OBJECT_HEADER_TO_CREATOR_INFO(Header))) { HeaderLocation = Info; } if ((Info = OBJECT_HEADER_TO_NAME_INFO(Header))) { HeaderLocation = Info; } if ((Info = OBJECT_HEADER_TO_HANDLE_INFO(Header))) { HeaderLocation = Info; } if ((Info = OBJECT_HEADER_TO_QUOTA_INFO(Header))) { HeaderLocation = Info; } ExpCheckPoolAllocation(HeaderLocation, NonPagedPool, 'corP'); } KeReleaseGuardedMutex(&PspActiveProcessMutex); }
/* * @implemented */ BOOLEAN NTAPI FsRtlLookupLastLargeMcbEntry(IN PLARGE_MCB Mcb, OUT PLONGLONG Vbn, OUT PLONGLONG Lbn) { BOOLEAN Result; DPRINT("FsRtlLookupLastLargeMcbEntry Mcb %p\n", Mcb); KeAcquireGuardedMutex(Mcb->GuardedMutex); Result = FsRtlLookupLastBaseMcbEntry(&(Mcb->BaseMcb), Vbn, Lbn); KeReleaseGuardedMutex(Mcb->GuardedMutex); DPRINT("Done %u\n", Result); return Result; }
/* * @implemented */ BOOLEAN NTAPI FsRtlSplitLargeMcb(IN PLARGE_MCB Mcb, IN LONGLONG Vbn, IN LONGLONG Amount) { BOOLEAN Result; DPRINT("FsRtlSplitLargeMcb %p, Vbn %I64d, Amount %I64d\n", Mcb, Vbn, Amount); KeAcquireGuardedMutex(Mcb->GuardedMutex); Result = FsRtlSplitBaseMcb(&(Mcb->BaseMcb), Vbn, Amount); KeReleaseGuardedMutex(Mcb->GuardedMutex); DPRINT("Done %u\n", Result); return Result; }
/* * @implemented */ BOOLEAN NTAPI FsRtlAddLargeMcbEntry(IN PLARGE_MCB Mcb, IN LONGLONG Vbn, IN LONGLONG Lbn, IN LONGLONG SectorCount) { BOOLEAN Result; DPRINT("Mcb %p Vbn %I64d Lbn %I64d SectorCount %I64d\n", Mcb, Vbn, Lbn, SectorCount); KeAcquireGuardedMutex(Mcb->GuardedMutex); Result = FsRtlAddBaseMcbEntry(&(Mcb->BaseMcb), Vbn, Lbn, SectorCount); KeReleaseGuardedMutex(Mcb->GuardedMutex); DPRINT("Done %u\n", Result); return Result; }
VOID NTAPI CmpFreeDelayItem(PVOID Entry) { PCM_DELAY_ALLOC AllocEntry = (PCM_DELAY_ALLOC)Entry; PCM_ALLOC_PAGE AllocPage; ULONG i; PAGED_CODE(); /* Lock the table */ KeAcquireGuardedMutex(&CmpDelayAllocBucketLock); /* Add the entry at the end */ InsertTailList(&CmpFreeDelayItemsListHead, &AllocEntry->ListEntry); /* Get the alloc page */ AllocPage = CmpGetAllocPageFromDelayAlloc(Entry); ASSERT(AllocPage->FreeCount != CM_DELAYS_PER_PAGE); /* Increase the number of free items */ if (++AllocPage->FreeCount == CM_DELAYS_PER_PAGE) { /* Page is totally free now, loop each entry */ for (i = 0; i < CM_DELAYS_PER_PAGE; i++) { /* Get the entry and unlink it */ AllocEntry = (PVOID)((ULONG_PTR)AllocPage + FIELD_OFFSET(CM_ALLOC_PAGE, AllocPage) + i * sizeof(CM_DELAY_ALLOC)); RemoveEntryList(&AllocEntry->ListEntry); } /* Now free the page */ CmpFree(AllocPage, 0); } /* Release the lock */ KeReleaseGuardedMutex(&CmpDelayAllocBucketLock); }
/* * @implemented */ BOOLEAN NTAPI FsRtlLookupLastLargeMcbEntryAndIndex(IN PLARGE_MCB OpaqueMcb, OUT PLONGLONG LargeVbn, OUT PLONGLONG LargeLbn, OUT PULONG Index) { BOOLEAN Result; DPRINT("FsRtlLookupLastLargeMcbEntryAndIndex %p\n", OpaqueMcb); KeAcquireGuardedMutex(OpaqueMcb->GuardedMutex); Result = FsRtlLookupLastBaseMcbEntryAndIndex(&(OpaqueMcb->BaseMcb), LargeVbn, LargeLbn, Index); KeReleaseGuardedMutex(OpaqueMcb->GuardedMutex); DPRINT("Done %u\n", Result); return Result; }
/* * @implemented */ BOOLEAN NTAPI FsRtlGetNextLargeMcbEntry(IN PLARGE_MCB Mcb, IN ULONG RunIndex, OUT PLONGLONG Vbn, OUT PLONGLONG Lbn, OUT PLONGLONG SectorCount) { BOOLEAN Result; DPRINT("FsRtlGetNextLargeMcbEntry Mcb %p RunIndex %lu\n", Mcb, RunIndex); KeAcquireGuardedMutex(Mcb->GuardedMutex); Result = FsRtlGetNextBaseMcbEntry(&(Mcb->BaseMcb), RunIndex, Vbn, Lbn, SectorCount); KeReleaseGuardedMutex(Mcb->GuardedMutex); DPRINT("Done %u\n", Result); return Result; }
ULONG NTAPI MiFreePoolPages(IN PVOID StartingVa) { PMMPTE PointerPte, StartPte; PMMPFN Pfn1, StartPfn; PFN_COUNT FreePages, NumberOfPages; KIRQL OldIrql; PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry; ULONG i, End; ULONG_PTR Offset; // // Handle paged pool // if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd)) { // // Calculate the offset from the beginning of paged pool, and convert it // into pages // Offset = (ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart; i = (ULONG)(Offset >> PAGE_SHIFT); End = i; // // Now use the end bitmap to scan until we find a set bit, meaning that // this allocation finishes here // while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++; // // Now calculate the total number of pages this allocation spans. If it's // only one page, add it to the S-LIST instead of freeing it // NumberOfPages = End - i + 1; if ((NumberOfPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead) < MiPagedPoolSListMaximum)) { InterlockedPushEntrySList(&MiPagedPoolSListHead, StartingVa); return 1; } /* Delete the actual pages */ PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i; FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL); ASSERT(FreePages == NumberOfPages); // // Acquire the paged pool lock // KeAcquireGuardedMutex(&MmPagedPoolMutex); // // Clear the allocation and free bits // RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End); RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages); // // Update the hint if we need to // if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i; // // Release the lock protecting the bitmaps // KeReleaseGuardedMutex(&MmPagedPoolMutex); // // And finally return the number of pages freed // return NumberOfPages; }
PVOID NTAPI MiAllocatePoolPages(IN POOL_TYPE PoolType, IN SIZE_T SizeInBytes) { PFN_NUMBER PageFrameNumber; PFN_COUNT SizeInPages, PageTableCount; ULONG i; KIRQL OldIrql; PLIST_ENTRY NextEntry, NextHead, LastHead; PMMPTE PointerPte, StartPte; PMMPDE PointerPde; ULONG EndAllocation; MMPTE TempPte; MMPDE TempPde; PMMPFN Pfn1; PVOID BaseVa, BaseVaStart; PMMFREE_POOL_ENTRY FreeEntry; PKSPIN_LOCK_QUEUE LockQueue; // // Figure out how big the allocation is in pages // SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes); // // Check for overflow // if (SizeInPages == 0) { // // Fail // return NULL; } // // Handle paged pool // if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { // // If only one page is being requested, try to grab it from the S-LIST // if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead))) { BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead); if (BaseVa) return BaseVa; } // // Lock the paged pool mutex // KeAcquireGuardedMutex(&MmPagedPoolMutex); // // Find some empty allocation space // i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap, SizeInPages, MmPagedPoolInfo.PagedPoolHint); if (i == 0xFFFFFFFF) { // // Get the page bit count // i = ((SizeInPages - 1) / PTE_COUNT) + 1; DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages); // // Check if there is enougn paged pool expansion space left // if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion > (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool)) { // // Out of memory! // DPRINT1("OUT OF PAGED POOL!!!\n"); KeReleaseGuardedMutex(&MmPagedPoolMutex); return NULL; } // // Check if we'll have to expand past the last PTE we have available // if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) > (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool)) { // // We can only support this much then // PointerPde = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool); PageTableCount = (PFN_COUNT)(PointerPde + 1 - MmPagedPoolInfo.NextPdeForPagedPoolExpansion); ASSERT(PageTableCount < i); i = PageTableCount; } else { // // Otherwise, there is plenty of space left for this expansion // PageTableCount = i; } // // Get the template PDE we'll use to expand // TempPde = ValidKernelPde; // // Get the first PTE in expansion space // PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion; BaseVa = MiPdeToPte(PointerPde); BaseVaStart = BaseVa; // // Lock the PFN database and loop pages // OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); do { // // It should not already be valid // ASSERT(PointerPde->u.Hard.Valid == 0); /* Request a page */ MI_SET_USAGE(MI_USAGE_PAGED_POOL); MI_SET_PROCESS2("Kernel"); PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR()); TempPde.u.Hard.PageFrameNumber = PageFrameNumber; #if (_MI_PAGING_LEVELS >= 3) /* On PAE/x64 systems, there's no double-buffering */ ASSERT(FALSE); #else // // Save it into our double-buffered system page directory // MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde; /* Initialize the PFN */ MiInitializePfnForOtherProcess(PageFrameNumber, (PMMPTE)PointerPde, MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]); /* Write the actual PDE now */ // MI_WRITE_VALID_PDE(PointerPde, TempPde); #endif // // Move on to the next expansion address // PointerPde++; BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE); i--; } while (i > 0); // // Release the PFN database lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); // // These pages are now available, clear their availablity bits // EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion - (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) * PTE_COUNT; RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, EndAllocation, PageTableCount * PTE_COUNT); // // Update the next expansion location // MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount; // // Zero out the newly available memory // RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE); // // Now try consuming the pages again // i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap, SizeInPages, 0); if (i == 0xFFFFFFFF) { // // Out of memory! // DPRINT1("OUT OF PAGED POOL!!!\n"); KeReleaseGuardedMutex(&MmPagedPoolMutex); return NULL; } } // // Update the pool hint if the request was just one page // if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1; // // Update the end bitmap so we know the bounds of this allocation when // the time comes to free it // EndAllocation = i + SizeInPages - 1; RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation); // // Now we can release the lock (it mainly protects the bitmap) // KeReleaseGuardedMutex(&MmPagedPoolMutex); // // Now figure out where this allocation starts // BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT)); // // Flush the TLB // KeFlushEntireTb(TRUE, TRUE); /* Setup a demand-zero writable PTE */ MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE); // // Find the first and last PTE, then loop them all // PointerPte = MiAddressToPte(BaseVa); StartPte = PointerPte + SizeInPages; do { // // Write the demand zero PTE and keep going // MI_WRITE_INVALID_PTE(PointerPte, TempPte); } while (++PointerPte < StartPte); // // Return the allocation address to the caller // return BaseVa; } // // If only one page is being requested, try to grab it from the S-LIST // if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead))) { BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead); if (BaseVa) return BaseVa; } // // Allocations of less than 4 pages go into their individual buckets // i = SizeInPages - 1; if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1; // // Loop through all the free page lists based on the page index // NextHead = &MmNonPagedPoolFreeListHead[i]; LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS]; // // Acquire the nonpaged pool lock // OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); do { // // Now loop through all the free page entries in this given list // NextEntry = NextHead->Flink; while (NextEntry != NextHead) { /* Is freed non paged pool enabled */ if (MmProtectFreedNonPagedPool) { /* We need to be able to touch this page, unprotect it */ MiUnProtectFreeNonPagedPool(NextEntry, 0); } // // Grab the entry and see if it can handle our allocation // FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List); ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE); if (FreeEntry->Size >= SizeInPages) { // // It does, so consume the pages from here // FreeEntry->Size -= SizeInPages; // // The allocation will begin in this free page area // BaseVa = (PVOID)((ULONG_PTR)FreeEntry + (FreeEntry->Size << PAGE_SHIFT)); /* Remove the item from the list, depending if pool is protected */ if (MmProtectFreedNonPagedPool) MiProtectedPoolRemoveEntryList(&FreeEntry->List); else RemoveEntryList(&FreeEntry->List); // // However, check if its' still got space left // if (FreeEntry->Size != 0) { /* Check which list to insert this entry into */ i = FreeEntry->Size - 1; if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1; /* Insert the entry into the free list head, check for prot. pool */ if (MmProtectFreedNonPagedPool) MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE); else InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List); /* Is freed non paged pool protected? */ if (MmProtectFreedNonPagedPool) { /* Protect the freed pool! */ MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size); } } // // Grab the PTE for this allocation // PointerPte = MiAddressToPte(BaseVa); ASSERT(PointerPte->u.Hard.Valid == 1); // // Grab the PFN NextEntry and index // Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte)); // // Now mark it as the beginning of an allocation // ASSERT(Pfn1->u3.e1.StartOfAllocation == 0); Pfn1->u3.e1.StartOfAllocation = 1; /* Mark it as special pool if needed */ ASSERT(Pfn1->u4.VerifierAllocation == 0); if (PoolType & VERIFIER_POOL_MASK) { Pfn1->u4.VerifierAllocation = 1; } // // Check if the allocation is larger than one page // if (SizeInPages != 1) { // // Navigate to the last PFN entry and PTE // PointerPte += SizeInPages - 1; ASSERT(PointerPte->u.Hard.Valid == 1); Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); } // // Mark this PFN as the last (might be the same as the first) // ASSERT(Pfn1->u3.e1.EndOfAllocation == 0); Pfn1->u3.e1.EndOfAllocation = 1; // // Release the nonpaged pool lock, and return the allocation // KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); return BaseVa; } // // Try the next free page entry // NextEntry = FreeEntry->List.Flink; /* Is freed non paged pool protected? */ if (MmProtectFreedNonPagedPool) { /* Protect the freed pool! */ MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size); } } } while (++NextHead < LastHead); // // If we got here, we're out of space. // Start by releasing the lock // KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); // // Allocate some system PTEs // StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion); PointerPte = StartPte; if (StartPte == NULL) { // // Ran out of memory // DPRINT1("Out of NP Expansion Pool\n"); return NULL; } // // Acquire the pool lock now // OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); // // Lock the PFN database too // LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock]; KeAcquireQueuedSpinLockAtDpcLevel(LockQueue); // // Loop the pages // TempPte = ValidKernelPte; do { /* Allocate a page */ MI_SET_USAGE(MI_USAGE_PAGED_POOL); MI_SET_PROCESS2("Kernel"); PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR()); /* Get the PFN entry for it and fill it out */ Pfn1 = MiGetPfnEntry(PageFrameNumber); Pfn1->u3.e2.ReferenceCount = 1; Pfn1->u2.ShareCount = 1; Pfn1->PteAddress = PointerPte; Pfn1->u3.e1.PageLocation = ActiveAndValid; Pfn1->u4.VerifierAllocation = 0; /* Write the PTE for it */ TempPte.u.Hard.PageFrameNumber = PageFrameNumber; MI_WRITE_VALID_PTE(PointerPte++, TempPte); } while (--SizeInPages > 0); // // This is the last page // Pfn1->u3.e1.EndOfAllocation = 1; // // Get the first page and mark it as such // Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber); Pfn1->u3.e1.StartOfAllocation = 1; /* Mark it as a verifier allocation if needed */ ASSERT(Pfn1->u4.VerifierAllocation == 0); if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1; // // Release the PFN and nonpaged pool lock // KeReleaseQueuedSpinLockFromDpcLevel(LockQueue); KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); // // Return the address // return MiPteToAddress(StartPte); }
VOID NTAPI INIT_FUNCTION MiInitializePoolEvents(VOID) { KIRQL OldIrql; PFN_NUMBER FreePoolInPages; /* Lock paged pool */ KeAcquireGuardedMutex(&MmPagedPoolMutex); /* Total size of the paged pool minus the allocated size, is free */ FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool; /* Check the initial state high state */ if (FreePoolInPages >= MiHighPagedPoolThreshold) { /* We have plenty of pool */ KeSetEvent(MiHighPagedPoolEvent, 0, FALSE); } else { /* We don't */ KeClearEvent(MiHighPagedPoolEvent); } /* Check the initial low state */ if (FreePoolInPages <= MiLowPagedPoolThreshold) { /* We're very low in free pool memory */ KeSetEvent(MiLowPagedPoolEvent, 0, FALSE); } else { /* We're not */ KeClearEvent(MiLowPagedPoolEvent); } /* Release the paged pool lock */ KeReleaseGuardedMutex(&MmPagedPoolMutex); /* Now it's time for the nonpaged pool lock */ OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); /* Free pages are the maximum minus what's been allocated */ FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool; /* Check if we have plenty */ if (FreePoolInPages >= MiHighNonPagedPoolThreshold) { /* We do, set the event */ KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE); } else { /* We don't, clear the event */ KeClearEvent(MiHighNonPagedPoolEvent); } /* Check if we have very little */ if (FreePoolInPages <= MiLowNonPagedPoolThreshold) { /* We do, set the event */ KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE); } else { /* We don't, clear it */ KeClearEvent(MiLowNonPagedPoolEvent); } /* We're done, release the nonpaged pool lock */ KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); }
/* * @implemented */ NTSTATUS NTAPI NtRequestWaitReplyPort(IN HANDLE PortHandle, IN PPORT_MESSAGE LpcRequest, IN OUT PPORT_MESSAGE LpcReply) { PLPCP_PORT_OBJECT Port, QueuePort, ReplyPort, ConnectionPort = NULL; KPROCESSOR_MODE PreviousMode = KeGetPreviousMode(); NTSTATUS Status; PLPCP_MESSAGE Message; PETHREAD Thread = PsGetCurrentThread(); BOOLEAN Callback; PKSEMAPHORE Semaphore; ULONG MessageType; PAGED_CODE(); LPCTRACE(LPC_SEND_DEBUG, "Handle: %lx. Messages: %p/%p. Type: %lx\n", PortHandle, LpcRequest, LpcReply, LpcpGetMessageType(LpcRequest)); /* Check if the thread is dying */ if (Thread->LpcExitThreadCalled) return STATUS_THREAD_IS_TERMINATING; /* Check if this is an LPC Request */ if (LpcpGetMessageType(LpcRequest) == LPC_REQUEST) { /* Then it's a callback */ Callback = TRUE; } else if (LpcpGetMessageType(LpcRequest)) { /* This is a not kernel-mode message */ return STATUS_INVALID_PARAMETER; } else { /* This is a kernel-mode message without a callback */ LpcRequest->u2.s2.Type |= LPC_REQUEST; Callback = FALSE; } /* Get the message type */ MessageType = LpcRequest->u2.s2.Type; /* Validate the length */ if (((ULONG)LpcRequest->u1.s1.DataLength + sizeof(PORT_MESSAGE)) > (ULONG)LpcRequest->u1.s1.TotalLength) { /* Fail */ return STATUS_INVALID_PARAMETER; } /* Reference the object */ Status = ObReferenceObjectByHandle(PortHandle, 0, LpcPortObjectType, PreviousMode, (PVOID*)&Port, NULL); if (!NT_SUCCESS(Status)) return Status; /* Validate the message length */ if (((ULONG)LpcRequest->u1.s1.TotalLength > Port->MaxMessageLength) || ((ULONG)LpcRequest->u1.s1.TotalLength <= (ULONG)LpcRequest->u1.s1.DataLength)) { /* Fail */ ObDereferenceObject(Port); return STATUS_PORT_MESSAGE_TOO_LONG; } /* Allocate a message from the port zone */ Message = LpcpAllocateFromPortZone(); if (!Message) { /* Fail if we couldn't allocate a message */ ObDereferenceObject(Port); return STATUS_NO_MEMORY; } /* Check if this is a callback */ if (Callback) { /* FIXME: TODO */ Semaphore = NULL; // we'd use the Thread Semaphore here ASSERT(FALSE); } else { /* No callback, just copy the message */ _SEH2_TRY { /* Copy it */ LpcpMoveMessage(&Message->Request, LpcRequest, LpcRequest + 1, MessageType, &Thread->Cid); } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) { /* Fail */ LpcpFreeToPortZone(Message, 0); ObDereferenceObject(Port); _SEH2_YIELD(return _SEH2_GetExceptionCode()); } _SEH2_END; /* Acquire the LPC lock */ KeAcquireGuardedMutex(&LpcpLock); /* Right now clear the port context */ Message->PortContext = NULL; /* Check if this is a not connection port */ if ((Port->Flags & LPCP_PORT_TYPE_MASK) != LPCP_CONNECTION_PORT) { /* We want the connected port */ QueuePort = Port->ConnectedPort; if (!QueuePort) { /* We have no connected port, fail */ LpcpFreeToPortZone(Message, 3); ObDereferenceObject(Port); return STATUS_PORT_DISCONNECTED; } /* This will be the rundown port */ ReplyPort = QueuePort; /* Check if this is a communication port */ if ((Port->Flags & LPCP_PORT_TYPE_MASK) == LPCP_CLIENT_PORT) { /* Copy the port context and use the connection port */ Message->PortContext = QueuePort->PortContext; ConnectionPort = QueuePort = Port->ConnectionPort; if (!ConnectionPort) { /* Fail */ LpcpFreeToPortZone(Message, 3); ObDereferenceObject(Port); return STATUS_PORT_DISCONNECTED; } } else if ((Port->Flags & LPCP_PORT_TYPE_MASK) != LPCP_COMMUNICATION_PORT) { /* Use the connection port for anything but communication ports */ ConnectionPort = QueuePort = Port->ConnectionPort; if (!ConnectionPort) { /* Fail */ LpcpFreeToPortZone(Message, 3); ObDereferenceObject(Port); return STATUS_PORT_DISCONNECTED; } } /* Reference the connection port if it exists */ if (ConnectionPort) ObReferenceObject(ConnectionPort); } else { /* Otherwise, for a connection port, use the same port object */ QueuePort = ReplyPort = Port; } /* No reply thread */ Message->RepliedToThread = NULL; Message->SenderPort = Port; /* Generate the Message ID and set it */ Message->Request.MessageId = LpcpNextMessageId++; if (!LpcpNextMessageId) LpcpNextMessageId = 1; Message->Request.CallbackId = 0; /* Set the message ID for our thread now */ Thread->LpcReplyMessageId = Message->Request.MessageId; Thread->LpcReplyMessage = NULL; /* Insert the message in our chain */ InsertTailList(&QueuePort->MsgQueue.ReceiveHead, &Message->Entry); InsertTailList(&ReplyPort->LpcReplyChainHead, &Thread->LpcReplyChain); LpcpSetPortToThread(Thread, Port); /* Release the lock and get the semaphore we'll use later */ KeEnterCriticalRegion(); KeReleaseGuardedMutex(&LpcpLock); Semaphore = QueuePort->MsgQueue.Semaphore; /* If this is a waitable port, wake it up */ if (QueuePort->Flags & LPCP_WAITABLE_PORT) { /* Wake it */ KeSetEvent(&QueuePort->WaitEvent, IO_NO_INCREMENT, FALSE); } } /* Now release the semaphore */ LpcpCompleteWait(Semaphore); KeLeaveCriticalRegion(); /* And let's wait for the reply */ LpcpReplyWait(&Thread->LpcReplySemaphore, PreviousMode); /* Acquire the LPC lock */ KeAcquireGuardedMutex(&LpcpLock); /* Get the LPC Message and clear our thread's reply data */ Message = LpcpGetMessageFromThread(Thread); Thread->LpcReplyMessage = NULL; Thread->LpcReplyMessageId = 0; /* Check if we have anything on the reply chain*/ if (!IsListEmpty(&Thread->LpcReplyChain)) { /* Remove this thread and reinitialize the list */ RemoveEntryList(&Thread->LpcReplyChain); InitializeListHead(&Thread->LpcReplyChain); } /* Release the lock */ KeReleaseGuardedMutex(&LpcpLock); /* Check if we got a reply */ if (Status == STATUS_SUCCESS) { /* Check if we have a valid message */ if (Message) { LPCTRACE(LPC_SEND_DEBUG, "Reply Messages: %p/%p\n", &Message->Request, (&Message->Request) + 1); /* Move the message */ _SEH2_TRY { LpcpMoveMessage(LpcReply, &Message->Request, (&Message->Request) + 1, 0, NULL); } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) { Status = _SEH2_GetExceptionCode(); } _SEH2_END; /* Check if this is an LPC request with data information */ if ((LpcpGetMessageType(&Message->Request) == LPC_REQUEST) && (Message->Request.u2.s2.DataInfoOffset)) { /* Save the data information */ LpcpSaveDataInfoMessage(Port, Message, 0); } else { /* Otherwise, just free it */ LpcpFreeToPortZone(Message, 0); } } else { /* We don't have a reply */ Status = STATUS_LPC_REPLY_LOST; } }
/* * @implemented */ NTSTATUS NTAPI NtRequestPort(IN HANDLE PortHandle, IN PPORT_MESSAGE LpcRequest) { PLPCP_PORT_OBJECT Port, QueuePort, ConnectionPort = NULL; KPROCESSOR_MODE PreviousMode = KeGetPreviousMode(); NTSTATUS Status; PLPCP_MESSAGE Message; PETHREAD Thread = PsGetCurrentThread(); PKSEMAPHORE Semaphore; ULONG MessageType; PAGED_CODE(); LPCTRACE(LPC_SEND_DEBUG, "Handle: %lx. Message: %p. Type: %lx\n", PortHandle, LpcRequest, LpcpGetMessageType(LpcRequest)); /* Get the message type */ MessageType = LpcRequest->u2.s2.Type | LPC_DATAGRAM; /* Can't have data information on this type of call */ if (LpcRequest->u2.s2.DataInfoOffset) return STATUS_INVALID_PARAMETER; /* Validate the length */ if (((ULONG)LpcRequest->u1.s1.DataLength + sizeof(PORT_MESSAGE)) > (ULONG)LpcRequest->u1.s1.TotalLength) { /* Fail */ return STATUS_INVALID_PARAMETER; } /* Reference the object */ Status = ObReferenceObjectByHandle(PortHandle, 0, LpcPortObjectType, PreviousMode, (PVOID*)&Port, NULL); if (!NT_SUCCESS(Status)) return Status; /* Validate the message length */ if (((ULONG)LpcRequest->u1.s1.TotalLength > Port->MaxMessageLength) || ((ULONG)LpcRequest->u1.s1.TotalLength <= (ULONG)LpcRequest->u1.s1.DataLength)) { /* Fail */ ObDereferenceObject(Port); return STATUS_PORT_MESSAGE_TOO_LONG; } /* Allocate a message from the port zone */ Message = LpcpAllocateFromPortZone(); if (!Message) { /* Fail if we couldn't allocate a message */ ObDereferenceObject(Port); return STATUS_NO_MEMORY; } /* No callback, just copy the message */ _SEH2_TRY { /* Copy it */ LpcpMoveMessage(&Message->Request, LpcRequest, LpcRequest + 1, MessageType, &Thread->Cid); } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) { /* Fail */ LpcpFreeToPortZone(Message, 0); ObDereferenceObject(Port); _SEH2_YIELD(return _SEH2_GetExceptionCode()); } _SEH2_END; /* Acquire the LPC lock */ KeAcquireGuardedMutex(&LpcpLock); /* Right now clear the port context */ Message->PortContext = NULL; /* Check if this is a not connection port */ if ((Port->Flags & LPCP_PORT_TYPE_MASK) != LPCP_CONNECTION_PORT) { /* We want the connected port */ QueuePort = Port->ConnectedPort; if (!QueuePort) { /* We have no connected port, fail */ LpcpFreeToPortZone(Message, 3); ObDereferenceObject(Port); return STATUS_PORT_DISCONNECTED; } /* Check if this is a communication port */ if ((Port->Flags & LPCP_PORT_TYPE_MASK) == LPCP_CLIENT_PORT) { /* Copy the port context and use the connection port */ Message->PortContext = QueuePort->PortContext; ConnectionPort = QueuePort = Port->ConnectionPort; if (!ConnectionPort) { /* Fail */ LpcpFreeToPortZone(Message, 3); ObDereferenceObject(Port); return STATUS_PORT_DISCONNECTED; } } else if ((Port->Flags & LPCP_PORT_TYPE_MASK) != LPCP_COMMUNICATION_PORT) { /* Use the connection port for anything but communication ports */ ConnectionPort = QueuePort = Port->ConnectionPort; if (!ConnectionPort) { /* Fail */ LpcpFreeToPortZone(Message, 3); ObDereferenceObject(Port); return STATUS_PORT_DISCONNECTED; } } /* Reference the connection port if it exists */ if (ConnectionPort) ObReferenceObject(ConnectionPort); } else { /* Otherwise, for a connection port, use the same port object */ QueuePort = Port; } /* Reference QueuePort if we have it */ if (QueuePort && ObReferenceObjectSafe(QueuePort)) { /* Set sender's port */ Message->SenderPort = Port; /* Generate the Message ID and set it */ Message->Request.MessageId = LpcpNextMessageId++; if (!LpcpNextMessageId) LpcpNextMessageId = 1; Message->Request.CallbackId = 0; /* No Message ID for the thread */ PsGetCurrentThread()->LpcReplyMessageId = 0; /* Insert the message in our chain */ InsertTailList(&QueuePort->MsgQueue.ReceiveHead, &Message->Entry); /* Release the lock and get the semaphore we'll use later */ KeEnterCriticalRegion(); KeReleaseGuardedMutex(&LpcpLock); /* Now release the semaphore */ Semaphore = QueuePort->MsgQueue.Semaphore; LpcpCompleteWait(Semaphore); /* If this is a waitable port, wake it up */ if (QueuePort->Flags & LPCP_WAITABLE_PORT) { /* Wake it */ KeSetEvent(&QueuePort->WaitEvent, IO_NO_INCREMENT, FALSE); } KeLeaveCriticalRegion(); /* Dereference objects */ if (ConnectionPort) ObDereferenceObject(ConnectionPort); ObDereferenceObject(QueuePort); ObDereferenceObject(Port); LPCTRACE(LPC_SEND_DEBUG, "Port: %p. Message: %p\n", QueuePort, Message); return STATUS_SUCCESS; } Status = STATUS_PORT_DISCONNECTED; /* All done with a failure*/ LPCTRACE(LPC_SEND_DEBUG, "Port: %p. Status: %p\n", Port, Status); /* The wait failed, free the message */ if (Message) LpcpFreeToPortZone(Message, 3); ObDereferenceObject(Port); if (ConnectionPort) ObDereferenceObject(ConnectionPort); return Status; }