VOID SDVTest_wdf_MdlAfterReqCompletionIntIoctlAdd( _In_ PMDL Mdl ) { ULONG byteOffset; byteOffset = MmGetMdlByteOffset(Mdl); return; }
PVOID MapUserAddressToKernel( IN PVOID pUserModeAddress, IN ULONG ulSize, OUT PMDL* ppMdl ) { PMDL pUserModeMdl = NULL; PVOID pMappedKernelAddr = NULL; if (ppMdl == NULL) return NULL; __try { pUserModeMdl = IoAllocateMdl(pUserModeAddress, ulSize, FALSE, FALSE, NULL); if (pUserModeMdl != NULL) { MmProbeAndLockPages(pUserModeMdl, KernelMode, IoModifyAccess); pMappedKernelAddr = MmMapLockedPages(pUserModeMdl, KernelMode); if (pMappedKernelAddr != NULL) { pMappedKernelAddr = (PVOID) (((ULONG)PAGE_ALIGN(pMappedKernelAddr))+MmGetMdlByteOffset(pUserModeMdl)); *ppMdl = pUserModeMdl; } else { UnmapMappedKernelAddress(pUserModeMdl); } } } __except(EXCEPTION_EXECUTE_HANDLER) { if (pUserModeMdl != NULL) IoFreeMdl(pUserModeMdl); pMappedKernelAddr = NULL; } return pMappedKernelAddr; }
co_rc_t co_os_userspace_map(void *address, unsigned int pages, void **user_address_out, void **handle_out) { void *user_address; unsigned long memory_size = ((unsigned long)pages) << CO_ARCH_PAGE_SHIFT; PMDL mdl; mdl = IoAllocateMdl(address, memory_size, FALSE, FALSE, NULL); if (!mdl) return CO_RC(ERROR); MmBuildMdlForNonPagedPool(mdl); user_address = MmMapLockedPagesSpecifyCache(mdl, UserMode, MmCached, NULL, FALSE, HighPagePriority); if (!user_address) { IoFreeMdl(mdl); return CO_RC(ERROR); } *handle_out = (void *)mdl; *user_address_out = PAGE_ALIGN(user_address) + MmGetMdlByteOffset(mdl); return CO_RC(OK); }
BOOLEAN AllocateSharedMemory(PSHARED_MEMORY lpSharedMemory, POOL_TYPE PoolType, ULONG dwSizeRegion) { if (!_MmIsAddressValid(lpSharedMemory)) return FALSE; if (!dwSizeRegion) return FALSE; memset(lpSharedMemory, 0, sizeof(SHARED_MEMORY)); #ifndef __MISC_USE_KHEAP lpSharedMemory->m_lpKernelMemory = ExAllocatePool(PoolType, dwSizeRegion); #else lpSharedMemory->m_lpKernelMemory = (CHAR*) _AllocatePoolFromKHeap(hKHeapMiscDefault, dwSizeRegion); #endif //!__MISC_USE_KHEAP if (!lpSharedMemory->m_lpKernelMemory) return FALSE; lpSharedMemory->m_Mdl = IoAllocateMdl(lpSharedMemory->m_lpKernelMemory, dwSizeRegion, FALSE, FALSE, NULL); if (!lpSharedMemory->m_Mdl) { #ifndef __MISC_USE_KHEAP ExFreePool(lpSharedMemory->m_lpKernelMemory); #else FreePoolToKHeap(hKHeapMiscDefault, lpSharedMemory->m_lpKernelMemory); #endif //!__MISC_USE_KHEAP memset(lpSharedMemory, 0, sizeof(SHARED_MEMORY)); return FALSE; } MmBuildMdlForNonPagedPool(lpSharedMemory->m_Mdl); lpSharedMemory->m_lpUserPage = MmMapLockedPages(lpSharedMemory->m_Mdl, UserMode); lpSharedMemory->m_lpUserMemory = (PVOID) (((ULONG)PAGE_ALIGN(lpSharedMemory->m_lpUserPage))+MmGetMdlByteOffset(lpSharedMemory->m_Mdl)); if (!_MmIsAddressValid(lpSharedMemory->m_lpUserMemory)) { MmUnmapLockedPages(lpSharedMemory->m_lpUserPage, lpSharedMemory->m_Mdl); IoFreeMdl(lpSharedMemory->m_Mdl); #ifndef __MISC_USE_KHEAP ExFreePool(lpSharedMemory->m_lpKernelMemory); #else FreePoolToKHeap(hKHeapMiscDefault, lpSharedMemory->m_lpKernelMemory); #endif //!__MISC_USE_KHEAP memset(lpSharedMemory, 0, sizeof(SHARED_MEMORY)); return FALSE; } lpSharedMemory->m_dwSizeRegion = dwSizeRegion; return TRUE; }
static VOID XenUsb_EvtIoInternalDeviceControl_PVURB( WDFQUEUE queue, WDFREQUEST request, size_t output_buffer_length, size_t input_buffer_length, ULONG io_control_code) { NTSTATUS status; WDFDEVICE device = WdfIoQueueGetDevice(queue); PXENUSB_DEVICE_DATA xudd = GetXudd(device); WDF_REQUEST_PARAMETERS wrp; pvurb_t *pvurb; partial_pvurb_t *partial_pvurb; KIRQL old_irql; UNREFERENCED_PARAMETER(input_buffer_length); UNREFERENCED_PARAMETER(output_buffer_length); UNREFERENCED_PARAMETER(io_control_code); FUNCTION_ENTER(); ASSERT(io_control_code == IOCTL_INTERNAL_PVUSB_SUBMIT_URB); WDF_REQUEST_PARAMETERS_INIT(&wrp); WdfRequestGetParameters(request, &wrp); pvurb = (pvurb_t *)wrp.Parameters.Others.Arg1; ASSERT(pvurb); RtlZeroMemory(&pvurb->rsp, sizeof(pvurb->rsp)); pvurb->status = STATUS_SUCCESS; pvurb->request = request; pvurb->ref = 1; pvurb->total_length = 0; partial_pvurb = ExAllocatePoolWithTag(NonPagedPool, sizeof(*partial_pvurb), XENUSB_POOL_TAG); /* todo - use lookaside */ if (!partial_pvurb) { WdfRequestComplete(request, STATUS_INSUFFICIENT_RESOURCES); FUNCTION_EXIT(); return; } KeAcquireSpinLock(&xudd->urb_ring_lock, &old_irql); status = WdfRequestMarkCancelableEx(request, XenUsb_EvtRequestCancelPvUrb); if (!NT_SUCCESS(status)) { KeReleaseSpinLock(&xudd->urb_ring_lock, old_irql); FUNCTION_MSG("WdfRequestMarkCancelableEx returned %08x\n", status); WdfRequestComplete(request, STATUS_INSUFFICIENT_RESOURCES); FUNCTION_EXIT(); return; } partial_pvurb->req = pvurb->req; partial_pvurb->mdl = pvurb->mdl; /* 1:1 right now, but may need to split up large pvurb into smaller partial_pvurb's */ partial_pvurb->pvurb = pvurb; partial_pvurb->other_partial_pvurb = NULL; partial_pvurb->on_ring = FALSE; if (!partial_pvurb->mdl) { partial_pvurb->req.nr_buffer_segs = 0; partial_pvurb->req.buffer_length = 0; } else { ULONG remaining = MmGetMdlByteCount(partial_pvurb->mdl); USHORT offset = (USHORT)MmGetMdlByteOffset(partial_pvurb->mdl); int i; partial_pvurb->req.buffer_length = (USHORT)MmGetMdlByteCount(partial_pvurb->mdl); partial_pvurb->req.nr_buffer_segs = (USHORT)ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(partial_pvurb->mdl), MmGetMdlByteCount(partial_pvurb->mdl)); for (i = 0; i < partial_pvurb->req.nr_buffer_segs; i++) { partial_pvurb->req.seg[i].gref = XnGrantAccess(xudd->handle, (ULONG)MmGetMdlPfnArray(partial_pvurb->mdl)[i], FALSE, INVALID_GRANT_REF, (ULONG)'XUSB'); partial_pvurb->req.seg[i].offset = (USHORT)offset; partial_pvurb->req.seg[i].length = (USHORT)min((USHORT)remaining, (USHORT)PAGE_SIZE - offset); offset = 0; remaining -= partial_pvurb->req.seg[i].length; FUNCTION_MSG("seg = %d\n", i); FUNCTION_MSG(" gref = %d\n", partial_pvurb->req.seg[i].gref); FUNCTION_MSG(" offset = %d\n", partial_pvurb->req.seg[i].offset); FUNCTION_MSG(" length = %d\n", partial_pvurb->req.seg[i].length); } FUNCTION_MSG("buffer_length = %d\n", partial_pvurb->req.buffer_length); FUNCTION_MSG("nr_buffer_segs = %d\n", partial_pvurb->req.nr_buffer_segs); } InsertTailList(&xudd->partial_pvurb_queue, &partial_pvurb->entry); PutRequestsOnRing(xudd); KeReleaseSpinLock(&xudd->urb_ring_lock, old_irql); FUNCTION_EXIT(); }
static int vbsfTransferCommon(VBSFTRANSFERCTX *pCtx) { int rc = VINF_SUCCESS; BOOLEAN fProcessed = FALSE; uint32_t cbTransferred = 0; uint32_t cbToTransfer; uint32_t cbIO; if (VbglR0CanUsePhysPageList()) { ULONG offFirstPage = MmGetMdlByteOffset(pCtx->pMdl); ULONG cPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(pCtx->pMdl), pCtx->cbData); ULONG cPagesToTransfer = RT_MIN(cPages, VBSF_MAX_READ_WRITE_PAGES); RTGCPHYS64 *paPages = (RTGCPHYS64 *)RTMemTmpAlloc(cPagesToTransfer * sizeof(RTGCPHYS64)); Log(("VBOXSF: vbsfTransferCommon: using page list: %d pages, offset 0x%03X\n", cPages, offFirstPage)); if (paPages) { PPFN_NUMBER paPfns = MmGetMdlPfnArray(pCtx->pMdl); ULONG cPagesTransferred = 0; cbTransferred = 0; while (cPagesToTransfer != 0) { ULONG iPage; cbToTransfer = cPagesToTransfer * PAGE_SIZE - offFirstPage; if (cbToTransfer > pCtx->cbData - cbTransferred) cbToTransfer = pCtx->cbData - cbTransferred; if (cbToTransfer == 0) { /* Nothing to transfer. */ break; } cbIO = cbToTransfer; Log(("VBOXSF: vbsfTransferCommon: transferring %d pages at %d; %d bytes at %d\n", cPagesToTransfer, cPagesTransferred, cbToTransfer, cbTransferred)); for (iPage = 0; iPage < cPagesToTransfer; iPage++) paPages[iPage] = (RTGCPHYS64)paPfns[iPage + cPagesTransferred] << PAGE_SHIFT; rc = pCtx->pfnTransferPages(pCtx->pClient, pCtx->pMap, pCtx->hFile, pCtx->offset + cbTransferred, &cbIO, (uint16_t)offFirstPage, (uint16_t)cPagesToTransfer, paPages); if (RT_FAILURE(rc)) { Log(("VBOXSF: vbsfTransferCommon: pfnTransferPages %Rrc, cbTransferred %d\n", rc, cbTransferred)); /* If some data was transferred, then it is no error. */ if (cbTransferred > 0) rc = VINF_SUCCESS; break; } cbTransferred += cbIO; if (cbToTransfer < cbIO) { /* Transferred less than requested, do not continue with the possibly remaining data. */ break; } cPagesTransferred += cPagesToTransfer; offFirstPage = 0; cPagesToTransfer = cPages - cPagesTransferred; if (cPagesToTransfer > VBSF_MAX_READ_WRITE_PAGES) cPagesToTransfer = VBSF_MAX_READ_WRITE_PAGES; } RTMemTmpFree(paPages); fProcessed = TRUE; } } if (fProcessed != TRUE) { /* Split large transfers. */ cbTransferred = 0; cbToTransfer = RT_MIN(pCtx->cbData, VBSF_MAX_READ_WRITE_PAGES * PAGE_SIZE); /* Page list not supported or a fallback. */ Log(("VBOXSF: vbsfTransferCommon: using linear address\n")); while (cbToTransfer != 0) { cbIO = cbToTransfer; Log(("VBOXSF: vbsfTransferCommon: transferring %d bytes at %d\n", cbToTransfer, cbTransferred)); rc = pCtx->pfnTransferBuffer(pCtx->pClient, pCtx->pMap, pCtx->hFile, pCtx->offset + cbTransferred, &cbIO, pCtx->pBuffer + cbTransferred, true /* locked */); if (RT_FAILURE(rc)) { Log(("VBOXSF: vbsfTransferCommon: pfnTransferBuffer %Rrc, cbTransferred %d\n", rc, cbTransferred)); /* If some data was transferred, then it is no error. */ if (cbTransferred > 0) rc = VINF_SUCCESS; break; } cbTransferred += cbIO; if (cbToTransfer < cbIO) { /* Transferred less than requested, do not continue with the possibly remaining data. */ break; } cbToTransfer = pCtx->cbData - cbTransferred; if (cbToTransfer > VBSF_MAX_READ_WRITE_PAGES * PAGE_SIZE) cbToTransfer = VBSF_MAX_READ_WRITE_PAGES * PAGE_SIZE; } } pCtx->cbData = cbTransferred; return rc; }
static STDCALL NTSTATUS MapPhysicalMemoryToLinearSpace(PVOID pPhysAddress,ULONG PhysMemSizeInBytes,PVOID *PhysMemLin){ alloc_priv* alloclisttmp; PMDL Mdl=NULL; PVOID SystemVirtualAddress=NULL; PVOID UserVirtualAddress=NULL; PHYSICAL_ADDRESS pStartPhysAddress; OutputDebugString ("dhahelper: entering MapPhysicalMemoryToLinearSpace"); #ifdef _WIN64 pStartPhysAddress.QuadPart = (ULONGLONG)pPhysAddress; #else pStartPhysAddress.QuadPart = (ULONGLONG)(ULONG)pPhysAddress; #endif #ifndef NO_SEH __try { #endif SystemVirtualAddress=MmMapIoSpace(pStartPhysAddress,PhysMemSizeInBytes, /*MmWriteCombined*/MmNonCached); if(!SystemVirtualAddress){ OutputDebugString("dhahelper: MmMapIoSpace failed"); return STATUS_INVALID_PARAMETER; } OutputDebugString("dhahelper: SystemVirtualAddress 0x%x",SystemVirtualAddress); Mdl=IoAllocateMdl(SystemVirtualAddress, PhysMemSizeInBytes, FALSE, FALSE,NULL); if(!Mdl){ OutputDebugString("dhahelper: IoAllocateMdl failed"); return STATUS_INSUFFICIENT_RESOURCES; } OutputDebugString("dhahelper: Mdl 0x%x",Mdl); MmBuildMdlForNonPagedPool(Mdl); #ifdef _WIN64 UserVirtualAddress = (PVOID)(((ULONGLONG)PAGE_ALIGN(MmMapLockedPages(Mdl,UserMode))) + MmGetMdlByteOffset(Mdl)); #else UserVirtualAddress = (PVOID)(((ULONG)PAGE_ALIGN(MmMapLockedPages(Mdl,UserMode))) + MmGetMdlByteOffset(Mdl)); #endif if(!UserVirtualAddress){ OutputDebugString("dhahelper: MmMapLockedPages failed"); return STATUS_INSUFFICIENT_RESOURCES; } OutputDebugString("dhahelper: UserVirtualAddress 0x%x",UserVirtualAddress); #ifndef NO_SEH }__except(EXCEPTION_EXECUTE_HANDLER){ NTSTATUS ntStatus; ntStatus = GetExceptionCode(); OutputDebugString("dhahelper: MapPhysicalMemoryToLinearSpace failed due to exception 0x%0x\n", ntStatus); return ntStatus; } #endif OutputDebugString("dhahelper: adding data to internal allocation list"); alloclisttmp=MmAllocateNonCachedMemory((alloccount+1)*sizeof(alloc_priv)); if(!alloclisttmp){ OutputDebugString("dhahelper: not enough memory to create temporary allocation list"); MmUnmapLockedPages(UserVirtualAddress, Mdl); IoFreeMdl(Mdl); return STATUS_INSUFFICIENT_RESOURCES; } if(alloccount){ memcpy(alloclisttmp,alloclist,alloccount * sizeof(alloc_priv)); MmFreeNonCachedMemory(alloclist,alloccount*sizeof(alloc_priv)); } alloclist=alloclisttmp; alloclist[alloccount].Mdl=Mdl; alloclist[alloccount].SystemVirtualAddress=SystemVirtualAddress; alloclist[alloccount].UserVirtualAddress=UserVirtualAddress; alloclist[alloccount].PhysMemSizeInBytes=PhysMemSizeInBytes; ++alloccount; *PhysMemLin=UserVirtualAddress; OutputDebugString("dhahelper: leaving MapPhysicalMemoryToLinearSpace"); return STATUS_SUCCESS; }