uint64 hax_get_pfn_user(hax_memdesc_user *memdesc, uint64 uva_offset) { PMDL pmdl = NULL; PPFN_NUMBER ppfn = NULL; uint64 len; if (!memdesc) { hax_error("%s: memdesc == NULL\n", __func__); return INVALID_PFN; } if (!memdesc->pmdl) { hax_error("%s: memdesc->pmdl == NULL\n", __func__); return INVALID_PFN; } pmdl = memdesc->pmdl; len = MmGetMdlByteCount(pmdl); if (uva_offset >= len) { hax_error("The uva_offset 0x%llx exceeds the buffer length 0x%llx.\n", uva_offset, len); return INVALID_PFN; } ppfn = MmGetMdlPfnArray(pmdl); if (NULL == ppfn) { hax_error("Get MDL pfn array failed. uva_offset: 0x%llx.\n", uva_offset); return INVALID_PFN; } return (uint64)ppfn[uva_offset >> PG_ORDER_4K]; }
static uint32_t vboxCVDdiSysMemElBuild(VBOXCMDVBVA_SYSMEMEL *pEl, PMDL pMdl, uint32_t iPfn, uint32_t cPages) { PFN_NUMBER cur = MmGetMdlPfnArray(pMdl)[iPfn]; uint32_t cbEl = sizeof (*pEl); uint32_t cStoredPages = 1; PFN_NUMBER next; pEl->iPage1 = (uint32_t)(cur & 0xfffff); pEl->iPage2 = (uint32_t)(cur >> 20); --cPages; for ( ; cPages && cStoredPages < VBOXCMDVBVA_SYSMEMEL_CPAGES_MAX; --cPages, ++cStoredPages, cur = next) { next = MmGetMdlPfnArray(pMdl)[iPfn+cStoredPages]; if (next != cur+1) break; } Assert(cStoredPages); pEl->cPagesAfterFirst = cStoredPages - 1; return cPages; }
static VOID BalloonFreePagesFromMdl( IN MDL *Mdl, IN BOOLEAN Check ) { volatile UCHAR *Mapping; ULONG Index; if (!Check) goto done; // Sanity check: // // Make sure that things written to the page really do stick. // If the page is still ballooned out at the hypervisor level // then writes will be discarded and reads will give back // all 1s. */ Mapping = MmMapLockedPagesSpecifyCache(Mdl, KernelMode, MmCached, NULL, FALSE, LowPagePriority); if (Mapping == NULL) { // Windows couldn't map the mempry. That's kind of sad, but not // really an error: it might be that we're very low on kernel // virtual address space. goto done; } // Write and read the first byte in each page to make sure it's backed // by RAM. XM_ASSERT((Mdl->ByteCount & (PAGE_SIZE - 1)) == 0); for (Index = 0; Index < Mdl->ByteCount >> PAGE_SHIFT; Index++) Mapping[Index << PAGE_SHIFT] = (UCHAR)Index; for (Index = 0; Index < Mdl->ByteCount >> PAGE_SHIFT; Index++) { if (Mapping[Index << PAGE_SHIFT] != (UCHAR)Index) { PFN_NUMBER *Array = MmGetMdlPfnArray(Mdl); TraceCritical(("%s: PFN[%d] (%p): read 0x%02x, expected 0x%02x\n", __FUNCTION__, Index, Array[Index], Mapping[Index << PAGE_SHIFT], (UCHAR)Index)); XM_BUG(); } } done: MmFreePagesFromMdl(Mdl); }
uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten) { uint32_t cbInitBuffer = cbBuffer; uint32_t i = 0; VBOXCMDVBVAPAGEIDX *pPageNumbers = pCmd->Data.aPageNumbers; cbBuffer -= RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers); for (; i < cPages && cbBuffer >= sizeof (*pPageNumbers); ++i, cbBuffer -= sizeof (*pPageNumbers)) { pPageNumbers[i] = (VBOXCMDVBVAPAGEIDX)(MmGetMdlPfnArray(pMdl)[iPfn + i]); } *pcPagesWritten = i; Assert(cbInitBuffer - cbBuffer == RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers[i])); Assert(cbInitBuffer - cbBuffer >= sizeof (VBOXCMDVBVA_PAGING_TRANSFER)); return cbInitBuffer - cbBuffer; }
uint64 hax_get_pfn_phys(hax_memdesc_phys *memdesc) { PPFN_NUMBER pfns; if (!memdesc) { hax_error("%s: memdesc == NULL\n", __func__); return INVALID_PFN; } if (!memdesc->pmdl) { hax_error("%s: memdesc->pmdl == NULL\n", __func__); return INVALID_PFN; } pfns = MmGetMdlPfnArray(memdesc->pmdl); if (!pfns) { hax_error("%s: MmGetMdlPfnArray() failed\n", __func__); return INVALID_PFN; } return pfns[0]; }
VOID BalloonLeak( IN WDFOBJECT WdfDevice, IN size_t num ) { PDEVICE_CONTEXT ctx = GetDeviceContext(WdfDevice); PPAGE_LIST_ENTRY pPageListEntry; PMDL pPageMdl; TraceEvents(TRACE_LEVEL_VERBOSE, DBG_HW_ACCESS, "--> %s\n", __FUNCTION__); pPageListEntry = (PPAGE_LIST_ENTRY)PopEntryList(&ctx->PageListHead); if (pPageListEntry == NULL) { TraceEvents(TRACE_LEVEL_WARNING, DBG_HW_ACCESS, "No list entries.\n"); return; } pPageMdl = pPageListEntry->PageMdl; num = MmGetMdlByteCount(pPageMdl) / PAGE_SIZE; TraceEvents(TRACE_LEVEL_INFORMATION, DBG_HW_ACCESS, "Deflate balloon with %d pages.\n", num); ctx->num_pfns = num; ctx->num_pages -= ctx->num_pfns; RtlCopyMemory(ctx->pfns_table, MmGetMdlPfnArray(pPageMdl), ctx->num_pfns * sizeof(PFN_NUMBER)); MmFreePagesFromMdl(pPageMdl); ExFreePool(pPageMdl); ExFreeToNPagedLookasideList(&ctx->LookAsideList, pPageListEntry); BalloonTellHost(WdfDevice, ctx->DefVirtQueue); TraceEvents(TRACE_LEVEL_VERBOSE, DBG_HW_ACCESS, "<-- %s\n", __FUNCTION__); }
/* * @implemented */ VOID NTAPI MmUnmapLockedPages(IN PVOID BaseAddress, IN PMDL Mdl) { PVOID Base; PFN_COUNT PageCount, ExtraPageCount; PPFN_NUMBER MdlPages; PMMPTE PointerPte; // // Sanity check // ASSERT(Mdl->ByteCount != 0); // // Check if this is a kernel request // if (BaseAddress > MM_HIGHEST_USER_ADDRESS) { // // Get base and count information // Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); // // Sanity checks // ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0); ASSERT(PageCount != 0); ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA); // // Get the PTE // PointerPte = MiAddressToPte(BaseAddress); // // This should be a resident system PTE // ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); ASSERT(PointerPte->u.Hard.Valid == 1); // // Check if the caller wants us to free advanced pages // if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES) { // // Get the MDL page array // MdlPages = MmGetMdlPfnArray(Mdl); /* Number of extra pages stored after the PFN array */ ExtraPageCount = (PFN_COUNT)*(MdlPages + PageCount); // // Do the math // PageCount += ExtraPageCount; PointerPte -= ExtraPageCount; ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); // // Get the new base address // BaseAddress = (PVOID)((ULONG_PTR)BaseAddress - (ExtraPageCount << PAGE_SHIFT)); } // // Remove flags // Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA | MDL_PARTIAL_HAS_BEEN_MAPPED | MDL_FREE_EXTRA_PTES); // // Release the system PTEs // MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace); } else { UNIMPLEMENTED; } }
VOID BalloonFill( IN WDFOBJECT WdfDevice, IN size_t num) { PMDL pPageMdl; PHYSICAL_ADDRESS LowAddress; PHYSICAL_ADDRESS HighAddress; PPAGE_LIST_ENTRY pNewPageListEntry; PDEVICE_CONTEXT devCtx = GetDeviceContext(WdfDevice); ULONG pages_per_request = PAGE_SIZE/sizeof(PFN_NUMBER); TraceEvents(TRACE_LEVEL_INFORMATION, DBG_HW_ACCESS, "--> %s\n", __FUNCTION__); LowAddress.QuadPart = 0; HighAddress.QuadPart = (ULONGLONG)-1; num = min(num, pages_per_request); TraceEvents(TRACE_LEVEL_INFORMATION, DBG_HW_ACCESS, "--> BalloonFill num = %d\n", num); for (devCtx->num_pfns = 0; devCtx->num_pfns < num; devCtx->num_pfns++) { if(IsLowMemory(WdfDevice)) { TraceEvents(TRACE_LEVEL_WARNING, DBG_HW_ACCESS, "LowMemoryCondition event was set to signaled,allocations stops, BalPageCount=%d\n", devCtx->num_pages); break; } pPageMdl = MmAllocatePagesForMdl( LowAddress, HighAddress, LowAddress, PAGE_SIZE ); if (pPageMdl == NULL) { TraceEvents(TRACE_LEVEL_WARNING, DBG_HW_ACCESS, "Balloon MDL Page Allocation Failed!!!, BalPageCount=%d\n", devCtx->num_pages); break; } if (MmGetMdlByteCount(pPageMdl) != PAGE_SIZE) { TraceEvents(TRACE_LEVEL_WARNING, DBG_HW_ACCESS, "Balloon MDL Page Allocation < PAGE_SIZE =%d, Failed!!!, BalPageCount=%d\n",MmGetMdlByteCount(pPageMdl), devCtx->num_pages); MmFreePagesFromMdl(pPageMdl); ExFreePool(pPageMdl); break; } pNewPageListEntry = (PPAGE_LIST_ENTRY)ExAllocateFromNPagedLookasideList(&devCtx->LookAsideList); if (pNewPageListEntry == NULL) { TraceEvents(TRACE_LEVEL_ERROR, DBG_HW_ACCESS, "List Entry Allocation Failed!!!\n"); MmFreePagesFromMdl(pPageMdl); ExFreePool(pPageMdl); break; } pNewPageListEntry->PageMdl = pPageMdl; pNewPageListEntry->PagePfn = devCtx->pfns_table[devCtx->num_pfns] = *MmGetMdlPfnArray(pPageMdl); PushEntryList(&devCtx->PageListHead, &(pNewPageListEntry->SingleListEntry)); devCtx->num_pages++; } if (devCtx->num_pfns > 0) { BalloonTellHost(WdfDevice, devCtx->InfVirtQueue); } }
VOID BalloonFill( IN WDFOBJECT WdfDevice, IN size_t num) { PDEVICE_CONTEXT ctx = GetDeviceContext(WdfDevice); PHYSICAL_ADDRESS LowAddress; PHYSICAL_ADDRESS HighAddress; PHYSICAL_ADDRESS SkipBytes; PPAGE_LIST_ENTRY pNewPageListEntry; PMDL pPageMdl; TraceEvents(TRACE_LEVEL_VERBOSE, DBG_HW_ACCESS, "--> %s\n", __FUNCTION__); ctx->num_pfns = 0; if (IsLowMemory(WdfDevice)) { TraceEvents(TRACE_LEVEL_WARNING, DBG_HW_ACCESS, "Low memory. Allocated pages: %d\n", ctx->num_pages); return; } num = min(num, PAGE_SIZE / sizeof(PFN_NUMBER)); TraceEvents(TRACE_LEVEL_INFORMATION, DBG_HW_ACCESS, "Inflate balloon with %d pages.\n", num); LowAddress.QuadPart = 0; HighAddress.QuadPart = (ULONGLONG)-1; SkipBytes.QuadPart = 0; #if (NTDDI_VERSION < NTDDI_WS03SP1) pPageMdl = MmAllocatePagesForMdl(LowAddress, HighAddress, SkipBytes, num * PAGE_SIZE); #else pPageMdl = MmAllocatePagesForMdlEx(LowAddress, HighAddress, SkipBytes, num * PAGE_SIZE, MmNonCached, MM_DONT_ZERO_ALLOCATION); #endif if (pPageMdl == NULL) { TraceEvents(TRACE_LEVEL_WARNING, DBG_HW_ACCESS, "Failed to allocate pages.\n"); return; } if (MmGetMdlByteCount(pPageMdl) != (num * PAGE_SIZE)) { TraceEvents(TRACE_LEVEL_WARNING, DBG_HW_ACCESS, "Not all requested memory was allocated (%d/%d).\n", MmGetMdlByteCount(pPageMdl), num * PAGE_SIZE); MmFreePagesFromMdl(pPageMdl); ExFreePool(pPageMdl); return; } pNewPageListEntry = (PPAGE_LIST_ENTRY)ExAllocateFromNPagedLookasideList( &ctx->LookAsideList); if (pNewPageListEntry == NULL) { TraceEvents(TRACE_LEVEL_ERROR, DBG_HW_ACCESS, "Failed to allocate list entry.\n"); MmFreePagesFromMdl(pPageMdl); ExFreePool(pPageMdl); return; } pNewPageListEntry->PageMdl = pPageMdl; PushEntryList(&ctx->PageListHead, &(pNewPageListEntry->SingleListEntry)); ctx->num_pfns = num; ctx->num_pages += ctx->num_pfns; RtlCopyMemory(ctx->pfns_table, MmGetMdlPfnArray(pPageMdl), ctx->num_pfns * sizeof(PFN_NUMBER)); BalloonTellHost(WdfDevice, ctx->InfVirtQueue); TraceEvents(TRACE_LEVEL_VERBOSE, DBG_HW_ACCESS, "<-- %s\n", __FUNCTION__); }
static VOID XenUsb_EvtIoInternalDeviceControl_PVURB( WDFQUEUE queue, WDFREQUEST request, size_t output_buffer_length, size_t input_buffer_length, ULONG io_control_code) { NTSTATUS status; WDFDEVICE device = WdfIoQueueGetDevice(queue); PXENUSB_DEVICE_DATA xudd = GetXudd(device); WDF_REQUEST_PARAMETERS wrp; pvurb_t *pvurb; partial_pvurb_t *partial_pvurb; KIRQL old_irql; UNREFERENCED_PARAMETER(input_buffer_length); UNREFERENCED_PARAMETER(output_buffer_length); UNREFERENCED_PARAMETER(io_control_code); FUNCTION_ENTER(); ASSERT(io_control_code == IOCTL_INTERNAL_PVUSB_SUBMIT_URB); WDF_REQUEST_PARAMETERS_INIT(&wrp); WdfRequestGetParameters(request, &wrp); pvurb = (pvurb_t *)wrp.Parameters.Others.Arg1; ASSERT(pvurb); RtlZeroMemory(&pvurb->rsp, sizeof(pvurb->rsp)); pvurb->status = STATUS_SUCCESS; pvurb->request = request; pvurb->ref = 1; pvurb->total_length = 0; partial_pvurb = ExAllocatePoolWithTag(NonPagedPool, sizeof(*partial_pvurb), XENUSB_POOL_TAG); /* todo - use lookaside */ if (!partial_pvurb) { WdfRequestComplete(request, STATUS_INSUFFICIENT_RESOURCES); FUNCTION_EXIT(); return; } KeAcquireSpinLock(&xudd->urb_ring_lock, &old_irql); status = WdfRequestMarkCancelableEx(request, XenUsb_EvtRequestCancelPvUrb); if (!NT_SUCCESS(status)) { KeReleaseSpinLock(&xudd->urb_ring_lock, old_irql); FUNCTION_MSG("WdfRequestMarkCancelableEx returned %08x\n", status); WdfRequestComplete(request, STATUS_INSUFFICIENT_RESOURCES); FUNCTION_EXIT(); return; } partial_pvurb->req = pvurb->req; partial_pvurb->mdl = pvurb->mdl; /* 1:1 right now, but may need to split up large pvurb into smaller partial_pvurb's */ partial_pvurb->pvurb = pvurb; partial_pvurb->other_partial_pvurb = NULL; partial_pvurb->on_ring = FALSE; if (!partial_pvurb->mdl) { partial_pvurb->req.nr_buffer_segs = 0; partial_pvurb->req.buffer_length = 0; } else { ULONG remaining = MmGetMdlByteCount(partial_pvurb->mdl); USHORT offset = (USHORT)MmGetMdlByteOffset(partial_pvurb->mdl); int i; partial_pvurb->req.buffer_length = (USHORT)MmGetMdlByteCount(partial_pvurb->mdl); partial_pvurb->req.nr_buffer_segs = (USHORT)ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(partial_pvurb->mdl), MmGetMdlByteCount(partial_pvurb->mdl)); for (i = 0; i < partial_pvurb->req.nr_buffer_segs; i++) { partial_pvurb->req.seg[i].gref = XnGrantAccess(xudd->handle, (ULONG)MmGetMdlPfnArray(partial_pvurb->mdl)[i], FALSE, INVALID_GRANT_REF, (ULONG)'XUSB'); partial_pvurb->req.seg[i].offset = (USHORT)offset; partial_pvurb->req.seg[i].length = (USHORT)min((USHORT)remaining, (USHORT)PAGE_SIZE - offset); offset = 0; remaining -= partial_pvurb->req.seg[i].length; FUNCTION_MSG("seg = %d\n", i); FUNCTION_MSG(" gref = %d\n", partial_pvurb->req.seg[i].gref); FUNCTION_MSG(" offset = %d\n", partial_pvurb->req.seg[i].offset); FUNCTION_MSG(" length = %d\n", partial_pvurb->req.seg[i].length); } FUNCTION_MSG("buffer_length = %d\n", partial_pvurb->req.buffer_length); FUNCTION_MSG("nr_buffer_segs = %d\n", partial_pvurb->req.nr_buffer_segs); } InsertTailList(&xudd->partial_pvurb_queue, &partial_pvurb->entry); PutRequestsOnRing(xudd); KeReleaseSpinLock(&xudd->urb_ring_lock, old_irql); FUNCTION_EXIT(); }
NTSTATUS RosKmAdapter::BuildPagingBuffer( IN_PDXGKARG_BUILDPAGINGBUFFER pArgs) { NTSTATUS Status = STATUS_SUCCESS; PBYTE pDmaBufStart = (PBYTE)pArgs->pDmaBuffer; PBYTE pDmaBufPos = (PBYTE)pArgs->pDmaBuffer; // // hAllocation is NULL for operation on DMA buffer and pages mapped into aperture // // // If there is insufficient space left in DMA buffer, we should return // STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER. // switch (pArgs->Operation) { case DXGK_OPERATION_MAP_APERTURE_SEGMENT: { if (pArgs->MapApertureSegment.SegmentId == kApertureSegmentId) { size_t pageIndex = pArgs->MapApertureSegment.OffsetInPages; size_t pageCount = pArgs->MapApertureSegment.NumberOfPages; NT_ASSERT(pageIndex + pageCount <= kApertureSegmentPageCount); size_t mdlPageOffset = pArgs->MapApertureSegment.MdlOffset; PMDL pMdl = pArgs->MapApertureSegment.pMdl; for (UINT i = 0; i < pageCount; i++) { m_aperturePageTable[pageIndex + i] = MmGetMdlPfnArray(pMdl)[mdlPageOffset + i]; } } } break; case DXGK_OPERATION_UNMAP_APERTURE_SEGMENT: { if (pArgs->MapApertureSegment.SegmentId == kApertureSegmentId) { size_t pageIndex = pArgs->MapApertureSegment.OffsetInPages; size_t pageCount = pArgs->MapApertureSegment.NumberOfPages; NT_ASSERT(pageIndex + pageCount <= kApertureSegmentPageCount); while (pageCount--) { m_aperturePageTable[pageIndex++] = 0; } } } break; case DXGK_OPERATION_FILL: { RosKmdAllocation * pRosKmdAllocation = (RosKmdAllocation *)pArgs->Fill.hAllocation; pRosKmdAllocation; DbgPrintEx(DPFLTR_IHVVIDEO_ID, DPFLTR_TRACE_LEVEL, "Filling at %lx with %lx size %lx\n", pArgs->Fill.Destination.SegmentAddress, pArgs->Fill.FillPattern, pArgs->Fill.FillSize); if (pArgs->DmaSize < sizeof(DXGKARG_BUILDPAGINGBUFFER)) { return STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER; } else { *((DXGKARG_BUILDPAGINGBUFFER *)pArgs->pDmaBuffer) = *pArgs; pDmaBufPos += sizeof(DXGKARG_BUILDPAGINGBUFFER); } } break; case DXGK_OPERATION_DISCARD_CONTENT: { // do nothing } break; case DXGK_OPERATION_TRANSFER: { if (pArgs->DmaSize < sizeof(DXGKARG_BUILDPAGINGBUFFER)) { return STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER; } else { *((DXGKARG_BUILDPAGINGBUFFER *)pArgs->pDmaBuffer) = *pArgs; pDmaBufPos += sizeof(DXGKARG_BUILDPAGINGBUFFER); } } break; default: { NT_ASSERT(false); m_ErrorHit.m_UnSupportedPagingOp = 1; Status = STATUS_SUCCESS; } break; } // // Update pDmaBuffer to point past the last byte used. pArgs->pDmaBuffer = pDmaBufPos; // Record DMA buffer information only when it is newly used ROSDMABUFINFO * pDmaBufInfo = (ROSDMABUFINFO *)pArgs->pDmaBufferPrivateData; if (pDmaBufInfo && (pArgs->DmaSize == ROSD_PAGING_BUFFER_SIZE)) { pDmaBufInfo->m_DmaBufState.m_Value = 0; pDmaBufInfo->m_DmaBufState.m_bPaging = 1; pDmaBufInfo->m_pDmaBuffer = pDmaBufStart; pDmaBufInfo->m_DmaBufferSize = pArgs->DmaSize; } return Status; }
static int vbsfTransferCommon(VBSFTRANSFERCTX *pCtx) { int rc = VINF_SUCCESS; BOOLEAN fProcessed = FALSE; uint32_t cbTransferred = 0; uint32_t cbToTransfer; uint32_t cbIO; if (VbglR0CanUsePhysPageList()) { ULONG offFirstPage = MmGetMdlByteOffset(pCtx->pMdl); ULONG cPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(pCtx->pMdl), pCtx->cbData); ULONG cPagesToTransfer = RT_MIN(cPages, VBSF_MAX_READ_WRITE_PAGES); RTGCPHYS64 *paPages = (RTGCPHYS64 *)RTMemTmpAlloc(cPagesToTransfer * sizeof(RTGCPHYS64)); Log(("VBOXSF: vbsfTransferCommon: using page list: %d pages, offset 0x%03X\n", cPages, offFirstPage)); if (paPages) { PPFN_NUMBER paPfns = MmGetMdlPfnArray(pCtx->pMdl); ULONG cPagesTransferred = 0; cbTransferred = 0; while (cPagesToTransfer != 0) { ULONG iPage; cbToTransfer = cPagesToTransfer * PAGE_SIZE - offFirstPage; if (cbToTransfer > pCtx->cbData - cbTransferred) cbToTransfer = pCtx->cbData - cbTransferred; if (cbToTransfer == 0) { /* Nothing to transfer. */ break; } cbIO = cbToTransfer; Log(("VBOXSF: vbsfTransferCommon: transferring %d pages at %d; %d bytes at %d\n", cPagesToTransfer, cPagesTransferred, cbToTransfer, cbTransferred)); for (iPage = 0; iPage < cPagesToTransfer; iPage++) paPages[iPage] = (RTGCPHYS64)paPfns[iPage + cPagesTransferred] << PAGE_SHIFT; rc = pCtx->pfnTransferPages(pCtx->pClient, pCtx->pMap, pCtx->hFile, pCtx->offset + cbTransferred, &cbIO, (uint16_t)offFirstPage, (uint16_t)cPagesToTransfer, paPages); if (RT_FAILURE(rc)) { Log(("VBOXSF: vbsfTransferCommon: pfnTransferPages %Rrc, cbTransferred %d\n", rc, cbTransferred)); /* If some data was transferred, then it is no error. */ if (cbTransferred > 0) rc = VINF_SUCCESS; break; } cbTransferred += cbIO; if (cbToTransfer < cbIO) { /* Transferred less than requested, do not continue with the possibly remaining data. */ break; } cPagesTransferred += cPagesToTransfer; offFirstPage = 0; cPagesToTransfer = cPages - cPagesTransferred; if (cPagesToTransfer > VBSF_MAX_READ_WRITE_PAGES) cPagesToTransfer = VBSF_MAX_READ_WRITE_PAGES; } RTMemTmpFree(paPages); fProcessed = TRUE; } } if (fProcessed != TRUE) { /* Split large transfers. */ cbTransferred = 0; cbToTransfer = RT_MIN(pCtx->cbData, VBSF_MAX_READ_WRITE_PAGES * PAGE_SIZE); /* Page list not supported or a fallback. */ Log(("VBOXSF: vbsfTransferCommon: using linear address\n")); while (cbToTransfer != 0) { cbIO = cbToTransfer; Log(("VBOXSF: vbsfTransferCommon: transferring %d bytes at %d\n", cbToTransfer, cbTransferred)); rc = pCtx->pfnTransferBuffer(pCtx->pClient, pCtx->pMap, pCtx->hFile, pCtx->offset + cbTransferred, &cbIO, pCtx->pBuffer + cbTransferred, true /* locked */); if (RT_FAILURE(rc)) { Log(("VBOXSF: vbsfTransferCommon: pfnTransferBuffer %Rrc, cbTransferred %d\n", rc, cbTransferred)); /* If some data was transferred, then it is no error. */ if (cbTransferred > 0) rc = VINF_SUCCESS; break; } cbTransferred += cbIO; if (cbToTransfer < cbIO) { /* Transferred less than requested, do not continue with the possibly remaining data. */ break; } cbToTransfer = pCtx->cbData - cbTransferred; if (cbToTransfer > VBSF_MAX_READ_WRITE_PAGES * PAGE_SIZE) cbToTransfer = VBSF_MAX_READ_WRITE_PAGES * PAGE_SIZE; } } pCtx->cbData = cbTransferred; return rc; }
static BOOLEAN BalloonAllocatePfnArray( IN ULONG Requested, OUT PULONG pAllocated ) { LARGE_INTEGER Start; LARGE_INTEGER End; ULONGLONG TimeDelta; BOOLEAN Slow; MDL *Mdl; ULONG Allocated; PFN_NUMBER *Array; XM_ASSERT(Requested <= BALLOON_PFN_ARRAY_SIZE); KeQuerySystemTime(&Start); Allocated = 0; Mdl = BalloonAllocatePagesForMdl(Requested); if (Mdl == NULL) { Balloon.AllocateFail++; goto done; } XM_ASSERT(Mdl->ByteOffset == 0); XM_ASSERT((Mdl->ByteCount & (PAGE_SIZE - 1)) == 0); XM_ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED); Allocated = Mdl->ByteCount >> PAGE_SHIFT; if (Allocated < Requested) { TraceNotice(("%s: partial allocation (%d < %d)\n", __FUNCTION__, Allocated, Requested)); Balloon.PartialAllocate++; } Array = MmGetMdlPfnArray(Mdl); BalloonSortPfnArray(Array, Allocated); RtlCopyMemory(Balloon.PfnArray, Array, Allocated * sizeof (PFN_NUMBER)); ExFreePool(Mdl); done: TraceVerbose(("%s: %d page(s)\n", __FUNCTION__, Allocated)); KeQuerySystemTime(&End); TimeDelta = (End.QuadPart - Start.QuadPart) / 10000ull; Slow = FALSE; if (TimeDelta != 0) { ULONGLONG Rate; Rate = (ULONGLONG)(Allocated * 1000) / TimeDelta; if (Rate < MIN_PAGES_PER_S) { TraceWarning(("%s: ran for more than %dms\n", __FUNCTION__, TimeDelta)); Slow = TRUE; } } *pAllocated = Allocated; return Slow; }
VOID NTAPI HalInitializeBios(ULONG Unknown, PLOADER_PARAMETER_BLOCK LoaderBlock) { PPFN_NUMBER PfnArray; PFN_NUMBER Pfn, Last; PMEMORY_ALLOCATION_DESCRIPTOR Descriptor; PLIST_ENTRY ListEntry; PMDL Mdl; /* Allocate an MDL for 1MB */ Mdl = IoAllocateMdl(NULL, 0x100000, FALSE, FALSE, NULL); if (!Mdl) { ASSERT(FALSE); } /* Get pointer to the pfn array */ PfnArray = MmGetMdlPfnArray(Mdl); /* Fill the array with low memory PFNs */ for (Pfn = 0; Pfn < 0x100; Pfn++) { PfnArray[Pfn] = Pfn; } /* Loop the memory descriptors */ for (ListEntry = LoaderBlock->MemoryDescriptorListHead.Flink; ListEntry != &LoaderBlock->MemoryDescriptorListHead; ListEntry = ListEntry->Flink) { /* Get the memory descriptor */ Descriptor = CONTAINING_RECORD(ListEntry, MEMORY_ALLOCATION_DESCRIPTOR, ListEntry); /* Check if the memory is in the low range */ if (Descriptor->BasePage < 0x100) { /* Check if the memory type is firmware */ if (Descriptor->MemoryType != LoaderFirmwarePermanent && Descriptor->MemoryType != LoaderSpecialMemory) { /* It's something else, so don't use it! */ Last = min(Descriptor->BasePage + Descriptor->PageCount, 0x100); for (Pfn = Descriptor->BasePage; Pfn < Last; Pfn++) { /* Set each page to the default page */ PfnArray[Pfn] = DEFAULT_PAGE; } } } } Mdl->MdlFlags = MDL_PAGES_LOCKED; /* Map the MDL to system space */ x86BiosMemoryMapping = MmGetSystemAddressForMdlSafe(Mdl, HighPagePriority); ASSERT(x86BiosMemoryMapping); DPRINT1("memory: %p, %p\n", *(PVOID*)x86BiosMemoryMapping, *(PVOID*)(x86BiosMemoryMapping + 8)); //DbgDumpPage(x86BiosMemoryMapping, 0xc351); x86BiosIsInitialized = TRUE; HalpBiosDisplayReset(); }