// Execute a non-image region as a test _Use_decl_annotations_ void MmonExecuteDoggyRegion() { PAGED_CODE(); #pragma prefast(suppress : 30030, "Allocating executable POOL_TYPE memory") auto code = reinterpret_cast<UCHAR *>(ExAllocatePoolWithTag( NonPagedPoolExecute, PAGE_SIZE, kHyperPlatformCommonPoolTag)); if (!code) { return; } RtlZeroMemory(code, PAGE_SIZE); HYPERPLATFORM_LOG_DEBUG("PoolCode = %p, Pa = %016llx", code, UtilPaFromVa(code)); code[0] = 0x90; // nop code[1] = 0x90; // nop if (IsX64()) { code[2] = 0xc3; // ret } else { code[2] = 0xc2; code[3] = 0x04; // retn 4 } KeInvalidateAllCaches(); // Runs code on all processors at once auto function = reinterpret_cast<PKIPI_BROADCAST_WORKER>(code); KeIpiGenericCall(function, 0); ExFreePoolWithTag(code, kHyperPlatformCommonPoolTag); }
// Sets a breakpoint to the address _Use_decl_annotations_ static void SbppEmbedBreakpoint(void* address) { static const UCHAR kBreakpoint[1] = { 0xcc, }; UtilForceCopyMemory(address, kBreakpoint, sizeof(kBreakpoint)); KeInvalidateAllCaches(); }
/* * @implemented */ PVOID NTAPI MmMapIoSpace(IN PHYSICAL_ADDRESS PhysicalAddress, IN SIZE_T NumberOfBytes, IN MEMORY_CACHING_TYPE CacheType) { PFN_NUMBER Pfn; PFN_COUNT PageCount; PMMPTE PointerPte; PVOID BaseAddress; MMPTE TempPte; PMMPFN Pfn1 = NULL; MI_PFN_CACHE_ATTRIBUTE CacheAttribute; BOOLEAN IsIoMapping; // // Must be called with a non-zero count // ASSERT(NumberOfBytes != 0); // // Make sure the upper bits are 0 if this system // can't describe more than 4 GB of physical memory. // FIXME: This doesn't respect PAE, but we currently don't // define a PAE build flag since there is no such build. // #if !defined(_M_AMD64) ASSERT(PhysicalAddress.HighPart == 0); #endif // // Normalize and validate the caching attributes // CacheType &= 0xFF; if (CacheType >= MmMaximumCacheType) return NULL; // // Calculate page count // PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(PhysicalAddress.LowPart, NumberOfBytes); // // Compute the PFN and check if it's a known I/O mapping // Also translate the cache attribute // Pfn = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT); Pfn1 = MiGetPfnEntry(Pfn); IsIoMapping = (Pfn1 == NULL) ? TRUE : FALSE; CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType]; // // Now allocate system PTEs for the mapping, and get the VA // PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace); if (!PointerPte) return NULL; BaseAddress = MiPteToAddress(PointerPte); // // Check if this is uncached // if (CacheAttribute != MiCached) { // // Flush all caches // KeFlushEntireTb(TRUE, TRUE); KeInvalidateAllCaches(); } // // Now compute the VA offset // BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + BYTE_OFFSET(PhysicalAddress.LowPart)); // // Get the template and configure caching // TempPte = ValidKernelPte; switch (CacheAttribute) { case MiNonCached: // // Disable the cache // MI_PAGE_DISABLE_CACHE(&TempPte); MI_PAGE_WRITE_THROUGH(&TempPte); break; case MiCached: // // Leave defaults // break; case MiWriteCombined: // // We don't support write combining yet // ASSERT(FALSE); break; default: // // Should never happen // ASSERT(FALSE); break; } // // Sanity check and re-flush // Pfn = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT); ASSERT((Pfn1 == MiGetPfnEntry(Pfn)) || (Pfn1 == NULL)); KeFlushEntireTb(TRUE, TRUE); KeInvalidateAllCaches(); // // Do the mapping // do { // // Write the PFN // TempPte.u.Hard.PageFrameNumber = Pfn++; MI_WRITE_VALID_PTE(PointerPte++, TempPte); } while (--PageCount); // // We're done! // return BaseAddress; }
void RosKmdRapAdapter::ProcessRenderBuffer( ROSDMABUFSUBMISSION * pDmaBufSubmission) { ROSDMABUFINFO * pDmaBufInfo = pDmaBufSubmission->m_pDmaBufInfo; if (pDmaBufInfo->m_DmaBufState.m_bSwCommandBuffer) { NT_ASSERT(0 == (pDmaBufSubmission->m_EndOffset - pDmaBufSubmission->m_StartOffset) % sizeof(GpuCommand)); GpuCommand * pGpuCommand = (GpuCommand *)(pDmaBufInfo->m_pDmaBuffer + pDmaBufSubmission->m_StartOffset); GpuCommand * pEndofCommand = (GpuCommand *)(pDmaBufInfo->m_pDmaBuffer + pDmaBufSubmission->m_EndOffset); for (; pGpuCommand < pEndofCommand; pGpuCommand++) { switch (pGpuCommand->m_commandId) { case Header: case Nop: break; case ResourceCopy: { RtlCopyMemory( ((BYTE *)RosKmdGlobal::s_pVideoMemory) + pGpuCommand->m_resourceCopy.m_dstGpuAddress.QuadPart, ((BYTE *)RosKmdGlobal::s_pVideoMemory) + pGpuCommand->m_resourceCopy.m_srcGpuAddress.QuadPart, pGpuCommand->m_resourceCopy.m_sizeBytes); } break; default: break; } } } else { // // Submit HW command buffer to the GPU // #if VC4 if (m_flags.m_isVC4) { // // TODO[indyz]: // // 1. Submit the Binning and Rendering Control list simultaneously // and use semaphore for synchronization // 2. Enable interrupt to signal end of frame // // // Generate the Rendering Control List // UINT renderingControlListLength; renderingControlListLength = GenerateRenderingControlList(pDmaBufInfo); #if 1 // TODO[indyz]: Decide the best way to handle the cache // KeInvalidateAllCaches(); // // Flush the VC4 GPU caches // V3D_REG_L2CACTL regL2CACTL = { 0 }; regL2CACTL.L2CCLR = 1; m_pVC4RegFile->V3D_L2CACTL = regL2CACTL.Value; V3D_REG_SLCACTL regSLCACTL = { 0 }; regSLCACTL.ICCS0123 = 0xF; regSLCACTL.UCCS0123 = 0xF; regSLCACTL.T0CCS0123 = 0xF; regSLCACTL.T1CCS0123 = 0xF; m_pVC4RegFile->V3D_SLCACTL = regSLCACTL.Value; #endif // // Submit the Binning Control List from UMD to the GPU // NT_ASSERT(pDmaBufInfo->m_DmaBufferPhysicalAddress.HighPart == 0); NT_ASSERT(pDmaBufInfo->m_DmaBufferSize <= kPageSize); UINT dmaBufBaseAddress; dmaBufBaseAddress = GetAperturePhysicalAddress(pDmaBufInfo->m_DmaBufferPhysicalAddress.LowPart); dmaBufBaseAddress += m_busAddressOffset; // Skip the command buffer header at the beginning SubmitControlList( true, dmaBufBaseAddress + pDmaBufSubmission->m_StartOffset + sizeof(GpuCommand), dmaBufBaseAddress + pDmaBufSubmission->m_EndOffset); // // Submit the Rendering Control List to the GPU // SubmitControlList( false, m_renderingControlListPhysicalAddress + m_busAddressOffset, m_renderingControlListPhysicalAddress + m_busAddressOffset + renderingControlListLength); MoveToNextBinnerRenderMemChunk(renderingControlListLength); // // Flush the VC4 GPU caches // m_pVC4RegFile->V3D_L2CACTL = regL2CACTL.Value; m_pVC4RegFile->V3D_SLCACTL = regSLCACTL.Value; } #endif // VC4 } }