Ejemplo n.º 1
2
/*
* TsmiHandleMemWrite
*
* Purpose:
*
* Patch vbox dll in memory.
*
* Warning: potential BSOD-generator due to nonstandard way of loading, take care with patch offsets.
*
*/
NTSTATUS TsmiHandleMemWrite(
    _In_ PVOID SrcAddress,
    _In_ PVOID DestAddress,
    _In_ ULONG Size
)
{
    PMDL        mdl;
    NTSTATUS    status = STATUS_SUCCESS;

    PAGED_CODE();

    mdl = IoAllocateMdl(DestAddress, Size, FALSE, FALSE, NULL);
    if (mdl == NULL) {
        return STATUS_INSUFFICIENT_RESOURCES;
    }
    if (DestAddress >= MmSystemRangeStart)
        if (!MmIsAddressValid(DestAddress)) {
            return STATUS_ACCESS_VIOLATION;
        }
    MmProbeAndLockPages(mdl, KernelMode, IoReadAccess);
    DestAddress = MmGetSystemAddressForMdlSafe(mdl, HighPagePriority);
    if (DestAddress != NULL) {
        status = MmProtectMdlSystemAddress(mdl, PAGE_EXECUTE_READWRITE);
        __movsb((PUCHAR)DestAddress, (const UCHAR *)SrcAddress, Size);
        MmUnmapLockedPages(DestAddress, mdl);
        MmUnlockPages(mdl);
    }
    else {
        status = STATUS_ACCESS_VIOLATION;
    }

    IoFreeMdl(mdl);
    return status;
}
Ejemplo n.º 2
0
/// <summary>
/// Unmap memory region, release corresponding MDL, and remove region form list
/// </summary>
/// <param name="pPageEntry">Region data</param>
/// <param name="pFoundEntry">Process data</param>
/// <returns>Status code</returns>
NTSTATUS BBUnmapRegionEntry( IN PMAP_ENTRY pPageEntry, IN PPROCESS_MAP_ENTRY pFoundEntry )
{
    NTSTATUS status = STATUS_SUCCESS;

    UNREFERENCED_PARAMETER( pFoundEntry );

    // MDL is valid
    if (pPageEntry->pMdl)
    {
        // If MDL is mapped
        if (pPageEntry->newPtr)
        {
            DPRINT( "BlackBone: %s: Unmapping region at 0x%p from process %u\n", __FUNCTION__, pPageEntry->newPtr, pFoundEntry->target.pid );
            MmUnmapLockedPages( (PVOID)pPageEntry->newPtr, pPageEntry->pMdl );
            pPageEntry->newPtr = 0;
        }

        if (pPageEntry->locked)
            MmUnlockPages( pPageEntry->pMdl );

        IoFreeMdl( pPageEntry->pMdl );
    }

    RemoveEntryList( &pPageEntry->link );
    ExFreePoolWithTag( pPageEntry, BB_POOL_TAG );

    return status;
}
Ejemplo n.º 3
0
//------------------------------------------------------------------------------
tOplkError drv_mapPdoMem(UINT8** ppKernelMem_p, UINT8** ppUserMem_p,
                         size_t* pMemSize_p)
{
    tOplkError      ret;

    // Get PDO memory
    ret = pdokcal_getPdoMemRegion((UINT8**)&pdoMemInfo_l.pKernelVa,
                                  &pdoMemInfo_l.memSize);

    if (ret != kErrorOk || pdoMemInfo_l.pKernelVa == NULL)
        return kErrorNoResource;

    if (*pMemSize_p > pdoMemInfo_l.memSize)
    {
        DEBUG_LVL_ERROR_TRACE("%s() Higher Memory requested (Kernel-%d User-%d) !\n",
                              __func__, pdoMemInfo_l.memSize, *pMemSize_p);
        *pMemSize_p = 0;
        return kErrorNoResource;
    }

    // Allocate new MDL pointing to PDO memory
    pdoMemInfo_l.pMdl = IoAllocateMdl(pdoMemInfo_l.pKernelVa, pdoMemInfo_l.memSize, FALSE, FALSE,
                                      NULL);

    if (pdoMemInfo_l.pMdl == NULL)
    {
        DEBUG_LVL_ERROR_TRACE("%s() Error allocating MDL !\n", __func__);
        return kErrorNoResource;
    }

    // Update the MDL with physical addresses
    MmBuildMdlForNonPagedPool(pdoMemInfo_l.pMdl);

    // Map the memory in user space and get the address
    pdoMemInfo_l.pUserVa = MmMapLockedPagesSpecifyCache(pdoMemInfo_l.pMdl,    // MDL
                                                        UserMode,             // Mode
                                                        MmCached,             // Caching
                                                        NULL,                 // Address
                                                        FALSE,                // Bug-check?
                                                        NormalPagePriority);  // Priority

    if (pdoMemInfo_l.pUserVa == NULL)
    {
        MmUnmapLockedPages(pdoMemInfo_l.pUserVa, pdoMemInfo_l.pMdl);
        IoFreeMdl(pdoMemInfo_l.pMdl);
        DEBUG_LVL_ERROR_TRACE("%s() Error mapping MDL !\n", __func__);
        return kErrorNoResource;
    }

    *ppKernelMem_p = pdoMemInfo_l.pKernelVa;
    *ppUserMem_p = pdoMemInfo_l.pUserVa;
    *pMemSize_p = pdoMemInfo_l.memSize;

    TRACE("Mapped memory info U:%p K:%p size %x", pdoMemInfo_l.pUserVa,
                                                 (UINT8*)pdoMemInfo_l.pKernelVa,
                                                 pdoMemInfo_l.memSize);
    return kErrorOk;
}
Ejemplo n.º 4
0
VOID
PushUnload( DRIVER_OBJECT* DriverObject )
{
    UNICODE_STRING DeviceLinkU;
    NTSTATUS ntStatus;
    PMAPINFO pMapInfo;
    PSINGLE_LIST_ENTRY pLink;

    DbgPrint("[Push] => (PushUnload)");

    RdUnload(DriverObject);

    //free resources
    pLink=PopEntryList(&lstMapInfo);
    while(pLink)
    {
        pMapInfo=CONTAINING_RECORD(pLink, MAPINFO, link);

        MmUnmapLockedPages(pMapInfo->pvu, pMapInfo->pMdl);
        IoFreeMdl(pMapInfo->pMdl);
        MmUnmapIoSpace(pMapInfo->pvk, pMapInfo->memSize);

        ExFreePool(pMapInfo);

        pLink=PopEntryList(&lstMapInfo);
    }


    //
    //  By default the I/O device is configured incorrectly or the
    // configuration parameters to the driver are incorrect.
    //
    ntStatus = STATUS_DEVICE_CONFIGURATION_ERROR;

    //
    // restore the call back routine, thus givinig chance to the
    // user mode application to unload dynamically the driver
    //
    ntStatus = PsSetCreateProcessNotifyRoutine(ProcessCallback, TRUE);

    IoDeleteDevice(DriverObject->DeviceObject);

    RtlInitUnicodeString(&DeviceLinkU, PUSH_SYMLINK_NAME);

    ntStatus=IoDeleteSymbolicLink(&DeviceLinkU);

    if (NT_SUCCESS(ntStatus))
    {
        IoDeleteDevice(DriverObject->DeviceObject);
    }
    else
    {
        DbgPrint("Error: IoDeleteSymbolicLink failed");
    }

    DbgPrint("[Push] <= (PushUnload)");
}
Ejemplo n.º 5
0
//----------------------------------------------------------------------
//
// RegmonUnmapMem
//
// Unmaps previously mapped memory.
//
//----------------------------------------------------------------------
VOID
RegmonUnmapMem(
    PVOID Pointer,
    PMDL Mdl
    )
{
    MmUnmapLockedPages( Pointer, Mdl );
    ExFreePool( Mdl );
}
Ejemplo n.º 6
0
void co_os_userspace_unmap(void *user_address, void *handle, unsigned int pages)
{
	PMDL mdl = (PMDL)handle;
	
	if (user_address)
		MmUnmapLockedPages(user_address, mdl); 

	IoFreeMdl(mdl);
}
Ejemplo n.º 7
0
static NTSTATUS TryToSatisfyRecvRequestFromBuffer( PAFD_FCB FCB,
                                                   PAFD_RECV_INFO RecvReq,
                                                   PUINT TotalBytesCopied ) {
    UINT i, BytesToCopy = 0, FcbBytesCopied = FCB->Recv.BytesUsed,
        BytesAvailable =
        FCB->Recv.Content - FCB->Recv.BytesUsed;
    PAFD_MAPBUF Map;
    *TotalBytesCopied = 0;


    AFD_DbgPrint(MID_TRACE,("Called, BytesAvailable = %u\n", BytesAvailable));

    if( CantReadMore(FCB) ) return STATUS_SUCCESS;
    if( !BytesAvailable ) return STATUS_PENDING;

    Map = (PAFD_MAPBUF)(RecvReq->BufferArray + RecvReq->BufferCount);

    AFD_DbgPrint(MID_TRACE,("Buffer Count: %u @ %p\n",
                            RecvReq->BufferCount,
                            RecvReq->BufferArray));
    for( i = 0;
         RecvReq->BufferArray &&
             BytesAvailable &&
             i < RecvReq->BufferCount;
         i++ ) {
        BytesToCopy =
            MIN( RecvReq->BufferArray[i].len, BytesAvailable );

        if( Map[i].Mdl ) {
            Map[i].BufferAddress = MmMapLockedPages( Map[i].Mdl, KernelMode );

            AFD_DbgPrint(MID_TRACE,("Buffer %u: %p:%u\n",
                                    i,
                                    Map[i].BufferAddress,
                                    BytesToCopy));

            RtlCopyMemory( Map[i].BufferAddress,
                           FCB->Recv.Window + FcbBytesCopied,
                           BytesToCopy );

            MmUnmapLockedPages( Map[i].BufferAddress, Map[i].Mdl );

            *TotalBytesCopied += BytesToCopy;
            FcbBytesCopied += BytesToCopy;
            BytesAvailable -= BytesToCopy;

            if (!(RecvReq->TdiFlags & TDI_RECEIVE_PEEK))
                FCB->Recv.BytesUsed += BytesToCopy;
        }
    }

    /* Issue another receive IRP to keep the buffer well stocked */
    RefillSocketBuffer(FCB);

    return STATUS_SUCCESS;
}
Ejemplo n.º 8
0
/*
* TsmiHandleMemWrite
*
* Purpose:
*
* Patch vbox dll in memory.
*
* Warning: If compiled not in ReleaseSigned configuration this function is a
* potential BSOD-generator due to nonstandard way of loading, take care with patch offsets.
*
*/
NTSTATUS TsmiHandleMemWrite(
    _In_ PVOID SrcAddress,
    _In_ PVOID DestAddress,
    _In_ ULONG Size
)
{
    PMDL        mdl;
    NTSTATUS    status = STATUS_SUCCESS;

    PAGED_CODE();

    mdl = IoAllocateMdl(DestAddress, Size, FALSE, FALSE, NULL);
    if (mdl == NULL) {
#ifdef _DEBUGMSG
        DbgPrint("[TSMI] Failed to create MDL at write\n");
#endif
        return STATUS_INSUFFICIENT_RESOURCES;
    }

#ifdef _SIGNED_BUILD
    __try {
#endif //_SIGNED_BUILD

        if (DestAddress >= MmSystemRangeStart)
            if (!MmIsAddressValid(DestAddress)) {
#ifdef _DEBUGMSG
                DbgPrint("[TSMI] Invalid address\n");
#endif //_DEBUGMSG
                return STATUS_ACCESS_VIOLATION;
            }
        MmProbeAndLockPages(mdl, KernelMode, IoReadAccess);
        DestAddress = MmGetSystemAddressForMdlSafe(mdl, HighPagePriority);
        if (DestAddress != NULL) {
            status = MmProtectMdlSystemAddress(mdl, PAGE_EXECUTE_READWRITE);
            __movsb((PUCHAR)DestAddress, (const UCHAR *)SrcAddress, Size);
            MmUnmapLockedPages(DestAddress, mdl);
            MmUnlockPages(mdl);
        }
        else {
            status = STATUS_ACCESS_VIOLATION;
        }

#ifdef _SIGNED_BUILD
    }
    __except (EXCEPTION_EXECUTE_HANDLER) {
        status = STATUS_ACCESS_VIOLATION;
#ifdef _DEBUGMSG
        DbgPrint("[TSMI] MmProbeAndLockPages failed at write DestAddress = %p\n", DestAddress);
#endif //_DEBUGMSG
    }
#endif //_SIGNED_BUILD

    IoFreeMdl(mdl);
    return status;
}
Ejemplo n.º 9
0
//----------------------------------------------------------------------
//
// RegmonUnmapServiceTable
//
//----------------------------------------------------------------------
VOID
RegmonUnmapServiceTable( 
    PVOID KeServiceTablePointers
    )
{
    if( KeServiceTableMdl ) {

        MmUnmapLockedPages( KeServiceTablePointers, KeServiceTableMdl );
        ExFreePool( KeServiceTableMdl );
    }
}
Ejemplo n.º 10
0
VOID
Unload(
    IN PDRIVER_OBJECT pDriverObject
    )
{
    // Local variables
	KTIMER            timer; // timer to wait for IRPs to die
	LARGE_INTEGER     timeout; // timeout for the timer
	PDEVICE_EXTENSION pKeyboardDeviceExtension;

	DbgPrint("Begin DriverUnload routine.\n");

	// Get the pointer to the device extension
	pKeyboardDeviceExtension = (PDEVICE_EXTENSION)pDriverObject->DeviceObject->DeviceExtension;

	// Detach from the device underneath that we're hooked to
	IoDetachDevice(pKeyboardDeviceExtension->pKeyboardDevice);

	//***Begin HideProcessHookMDL SSDT hook code***

	// Unhook SSDT call, disable interrupts first
	_asm{cli}
	UNHOOK_SYSCALL(ZwWriteFile, OldZwWriteFile, NewZwWriteFile);
	_asm{sti}

	// Unlock and free MDL
	if(g_pmdlSystemCall)
    {
		MmUnmapLockedPages(MappedSystemCallTable, g_pmdlSystemCall);
		IoFreeMdl(g_pmdlSystemCall);
    }
	//*** End HideProcessHookMDL SSDT hook code***

	// Initialize a timer
	timeout.QuadPart = 1000000;
	KeInitializeTimer(&timer);

    // Wait for pending IRPs to finish
	while (numPendingIrps > 0)
	{
		KeSetTimer(&timer, timeout, NULL);
		KeWaitForSingleObject(&timer, Executive, KernelMode, FALSE, NULL);
	}

	// Delete the device
	IoDeleteDevice(pDriverObject->DeviceObject);

	//TODO: clean up any remaining resources, close any files, etc..

	// Done.
	return;
} // Unload
Ejemplo n.º 11
0
NTSTATUS Ext_CryptBlocks(PEXTENSION_CONTEXT ExtContext, PMDL pSourceMdl, PMDL pTargetMdl, SIZE_T size, SIZE_T sector, BOOLEAN Encrypt)
{
    NTSTATUS status = STATUS_SUCCESS;
    PVOID pSource = NULL, pTarget = NULL;
    CONST SIZE_T SectorSize = 512;
    SIZE_T SectorOffset = 0;

    if (!ExtContext || !pSourceMdl || !pTargetMdl)
        return STATUS_INVALID_PARAMETER;
    if (!ExtContext->pCipherContext)
        return STATUS_SUCCESS;

    pSource = MmGetSystemAddressForMdlSafe(pSourceMdl, NormalPagePriority);
    pTarget = MmGetSystemAddressForMdlSafe(pTargetMdl, NormalPagePriority);

    if (!pSource || !pTarget)
        return STATUS_INSUFFICIENT_RESOURCES;

    LOG_ASSERT(0 == size % SectorSize);

    EXTLOG(LL_VERBOSE, "VHD: %s 0x%X bytes\n", Encrypt ? "Encrypting" : "Decrypting", size);

    for (SectorOffset = 0; SectorOffset < size; SectorOffset += SectorSize)
    {
        PUCHAR pSourceSector = (PUCHAR)pSource + SectorOffset;
        PUCHAR pTargetSector = (PUCHAR)pTarget + SectorOffset;
        status = (Encrypt ? ExtContext->pCipherEngine->pfnEncrypt : ExtContext->pCipherEngine->pfnDecrypt)(
            ExtContext->pCipherContext, pSourceSector, pTargetSector, SectorSize, sector++);
        if (!NT_SUCCESS(status))
            break;
    }

    if (pSourceMdl && 0 != (pSourceMdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA))
        MmUnmapLockedPages(pSource, pSourceMdl);
    if (pTargetMdl && 0 != (pTargetMdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA))
        MmUnmapLockedPages(pTarget, pTargetMdl);
    return status;
}
Ejemplo n.º 12
0
/**
*  还原钩子函数
*
*/
NTSTATUS  sstUnhook_OpenProcess()
{
	g_openProcessId = 0;

	UpdateService(SYSCALL_INDEX(ZwOpenProcess),pOriNtOpenProcess );

	if(m_MDL)
	{
		MmUnmapLockedPages(m_Mapped,m_MDL);
		IoFreeMdl(m_MDL);
	}

	return STATUS_SUCCESS;
}
Ejemplo n.º 13
0
VOID OnUnload(IN PDRIVER_OBJECT DriverObject)
{
   DbgPrint("Descargando driver...");

   //Unhookeamos
   UNHOOK_SYSCALL( ZwOpenProcess, ZwOpenProcessIni, NewZwOpenProcess );

   //Eliminamos la MDL
   if(g_pmdlSystemCall)
   {
      MmUnmapLockedPages(MappedSystemCallTable, g_pmdlSystemCall);
      IoFreeMdl(g_pmdlSystemCall);
   }
}
Ejemplo n.º 14
0
BOOLEAN AllocateSharedMemory(PSHARED_MEMORY lpSharedMemory, POOL_TYPE PoolType, ULONG dwSizeRegion)
{
  if (!_MmIsAddressValid(lpSharedMemory))
    return FALSE;
  if (!dwSizeRegion)
    return FALSE;

  memset(lpSharedMemory, 0, sizeof(SHARED_MEMORY));

  #ifndef __MISC_USE_KHEAP
  lpSharedMemory->m_lpKernelMemory = ExAllocatePool(PoolType, dwSizeRegion);
  #else
  lpSharedMemory->m_lpKernelMemory = (CHAR*) _AllocatePoolFromKHeap(hKHeapMiscDefault, dwSizeRegion);
  #endif //!__MISC_USE_KHEAP
  if (!lpSharedMemory->m_lpKernelMemory)
    return FALSE;

  lpSharedMemory->m_Mdl = IoAllocateMdl(lpSharedMemory->m_lpKernelMemory, dwSizeRegion, FALSE, FALSE, NULL);
  if (!lpSharedMemory->m_Mdl)
  {
    #ifndef __MISC_USE_KHEAP
    ExFreePool(lpSharedMemory->m_lpKernelMemory);
    #else
    FreePoolToKHeap(hKHeapMiscDefault, lpSharedMemory->m_lpKernelMemory);
    #endif //!__MISC_USE_KHEAP
    memset(lpSharedMemory, 0, sizeof(SHARED_MEMORY));
    return FALSE;
  }

  MmBuildMdlForNonPagedPool(lpSharedMemory->m_Mdl);

  lpSharedMemory->m_lpUserPage = MmMapLockedPages(lpSharedMemory->m_Mdl, UserMode);
  lpSharedMemory->m_lpUserMemory = (PVOID) (((ULONG)PAGE_ALIGN(lpSharedMemory->m_lpUserPage))+MmGetMdlByteOffset(lpSharedMemory->m_Mdl));
  if (!_MmIsAddressValid(lpSharedMemory->m_lpUserMemory))
  {
    MmUnmapLockedPages(lpSharedMemory->m_lpUserPage, lpSharedMemory->m_Mdl);
    IoFreeMdl(lpSharedMemory->m_Mdl);
    #ifndef __MISC_USE_KHEAP
    ExFreePool(lpSharedMemory->m_lpKernelMemory);
    #else
    FreePoolToKHeap(hKHeapMiscDefault, lpSharedMemory->m_lpKernelMemory);
    #endif //!__MISC_USE_KHEAP
    memset(lpSharedMemory, 0, sizeof(SHARED_MEMORY));
    return FALSE;
  }
  lpSharedMemory->m_dwSizeRegion = dwSizeRegion;

  return TRUE;
}
Ejemplo n.º 15
0
VOID OnUnload( IN PDRIVER_OBJECT DriverObject )
{
	DbgPrint("BHWIN: OnUnload called\n");

	// put back the old function pointer
	InterlockedExchange( (PLONG) &g_MappedSystemCallTable[ SYSCALL_INDEX(ZwQuerySystemInformation) ], 
						 (LONG) OldZwQuerySystemInformation);

	// Unlock and Free MDL
	if(g_MappedSystemCallTableMDL)
	{
		MmUnmapLockedPages(g_MappedSystemCallTable, g_MappedSystemCallTableMDL);
		IoFreeMdl(g_MappedSystemCallTableMDL);
	}
}
//------------------------------------------------------------------------------
void drv_unMapSocMem(void)
{
    if (socMemInfo_l.pMdl == NULL)
    {
        DEBUG_LVL_ERROR_TRACE("%s() MDL already deleted !\n", __func__);
        return;
    }

    if (socMemInfo_l.pUserVa != NULL)
    {
        MmUnmapLockedPages(socMemInfo_l.pUserVa, socMemInfo_l.pMdl);
        IoFreeMdl(socMemInfo_l.pMdl);
    }

    socMemInfo_l.pUserVa = NULL;
}
Ejemplo n.º 17
0
NTSTATUS Ext_StartScsiRequest(_In_ PVOID ExtContext, _In_ PEVHD_EXT_SCSI_PACKET pExtPacket)
{
    UNREFERENCED_PARAMETER(ExtContext);
    UNREFERENCED_PARAMETER(pExtPacket);
    NTSTATUS Status = STATUS_SUCCESS;
    UCHAR opCode = pExtPacket->Srb->Cdb[0];
    PMDL pMdl = pExtPacket->pMdl;

    PEXTENSION_CONTEXT Context = ExtContext;
    USHORT wSectors = RtlUshortByteSwap(*(USHORT *)&(pExtPacket->Srb->Cdb[7]));
    ULONG dwSectorOffset = RtlUlongByteSwap(*(ULONG *)&(pExtPacket->Srb->Cdb[2]));
    switch (opCode)
    {
    case SCSI_OP_CODE_WRITE_6:
    case SCSI_OP_CODE_WRITE_10:
    case SCSI_OP_CODE_WRITE_12:
    case SCSI_OP_CODE_WRITE_16:
        if (Context->pCipherEngine)
        {
            EXTLOG(LL_VERBOSE, "Write request: %X blocks starting from %X\n", wSectors, dwSectorOffset);

            pExtPacket->pMdl = Ext_AllocateInnerMdl(pMdl);

            Status = Ext_CryptBlocks(Context, pMdl, pExtPacket->pMdl, pExtPacket->Srb->DataTransferLength, dwSectorOffset, TRUE);

            pMdl = pExtPacket->pMdl;

            if (pMdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
                MmUnmapLockedPages(pMdl->MappedSystemVa, pMdl);
        }
        break;
    case SCSI_OP_CODE_READ_6:
    case SCSI_OP_CODE_READ_10:
    case SCSI_OP_CODE_READ_12:
    case SCSI_OP_CODE_READ_16:
        if (Context->pCipherEngine)
        {
            EXTLOG(LL_VERBOSE, "Read request: %X blocks starting from %X\n", wSectors, dwSectorOffset);
        }
        break;
    }
    return Status;
}
Ejemplo n.º 18
0
//------------------------------------------------------------------------------
void drv_unMapPdoMem(UINT8* pMem_p, size_t memSize_p)
{
    UNUSED_PARAMETER(memSize_p);
    UNUSED_PARAMETER(pMem_p);

    if (pdoMemInfo_l.pMdl == NULL)
    {
        DEBUG_LVL_ERROR_TRACE("%s() MDL already deleted !\n", __func__);
        return;
    }

    if (pdoMemInfo_l.pUserVa != NULL)
    {
        MmUnmapLockedPages(pdoMemInfo_l.pUserVa, pdoMemInfo_l.pMdl);
        IoFreeMdl(pdoMemInfo_l.pMdl);
    }

    pdoMemInfo_l.pUserVa = NULL;
}
Ejemplo n.º 19
0
static STDCALL NTSTATUS UnmapPhysicalMemory(PVOID UserVirtualAddress){
  unsigned int i;
  unsigned int x=0;
  unsigned int alloccounttmp=alloccount;
  OutputDebugString("dhahelper: entering UnmapPhysicalMemory to unmapp 0x%x",UserVirtualAddress);
  if(!alloccount){
    OutputDebugString("dhahelper: UnmapPhysicalMemory: nothing todo -> leaving...");
    return STATUS_SUCCESS;
  }

  for(i=0;i<alloccount;i++){
    if(alloclist[i].UserVirtualAddress!=UserVirtualAddress){
      if(x!=i){
       alloclist[x].Mdl=alloclist[i].Mdl;
       alloclist[x].SystemVirtualAddress=alloclist[i].SystemVirtualAddress;
       alloclist[x].UserVirtualAddress=alloclist[i].UserVirtualAddress;
       alloclist[x].PhysMemSizeInBytes=alloclist[i].PhysMemSizeInBytes;

      }
      x++;
    }
    else if(alloclist[i].UserVirtualAddress==UserVirtualAddress){
      if(x==i){
#ifndef NO_SEH
        __try {
#endif
          MmUnmapLockedPages(alloclist[x].UserVirtualAddress, alloclist[x].Mdl);
          IoFreeMdl(alloclist[x].Mdl);
          MmUnmapIoSpace(alloclist[x].SystemVirtualAddress,alloclist[x].PhysMemSizeInBytes);
#ifndef NO_SEH
        }__except(EXCEPTION_EXECUTE_HANDLER){
          NTSTATUS           ntStatus;
          ntStatus = GetExceptionCode();
          OutputDebugString("dhahelper: UnmapPhysicalMemory failed due to exception 0x%0x (Mdl 0x%x)\n", ntStatus,alloclist[x].Mdl);
          return ntStatus;
        }
#endif
      }
      alloccounttmp--;
    }

  }
Ejemplo n.º 20
0
BOOLEAN FreeSharedMemory(PSHARED_MEMORY lpSharedMemory)
{
  if (!_MmIsAddressValid(lpSharedMemory))
    return FALSE;
  if (!_MmIsAddressValid(lpSharedMemory->m_lpUserMemory))
    return FALSE;
  if (!_MmIsAddressValid(lpSharedMemory->m_lpKernelMemory))
    return FALSE;

  MmUnmapLockedPages(lpSharedMemory->m_lpUserPage, lpSharedMemory->m_Mdl);
  IoFreeMdl(lpSharedMemory->m_Mdl);
  #ifndef __MISC_USE_KHEAP
  ExFreePool(lpSharedMemory->m_lpKernelMemory);
  #else
  FreePoolToKHeap(hKHeapMiscDefault, lpSharedMemory->m_lpKernelMemory);
  #endif //!__MISC_USE_KHEAP
  memset(lpSharedMemory, 0, sizeof(SHARED_MEMORY));

  return TRUE;
}
Ejemplo n.º 21
0
NTSTATUS ultimap_continue(PULTIMAPDATAEVENT data)
/*
Called from usermode to signal that the data has been handled
*/
{
	DbgPrint("ultimap_continue\n");
	MmUnmapLockedPages((PVOID)data->Address, (PMDL)data->Mdl);
	IoFreeMdl((PMDL)data->Mdl);

	ExFreePool((PVOID)data->KernelAddress); //this memory is not needed anymore


	if (DataBlock)
		DataBlock[data->Block].Available=TRUE;


	KeReleaseSemaphore(&DataBlockSemaphore, 1, 1, FALSE); //Let the next block go through
	DbgPrint("Released semaphore\n");
	return STATUS_SUCCESS;	
}
Ejemplo n.º 22
0
// Does memcpy safely even if Destination is a read only region.
_Use_decl_annotations_ EXTERN_C NTSTATUS UtilForceMemCpy(void *Destination,
                                                         const void *Source,
                                                         SIZE_T Length) {
  auto mdl = std::experimental::make_unique_resource(
      IoAllocateMdl(Destination, static_cast<ULONG>(Length), FALSE, FALSE,
                    nullptr),
      &IoFreeMdl);
  if (!mdl) {
    return STATUS_INSUFFICIENT_RESOURCES;
  }
  MmBuildMdlForNonPagedPool(mdl.get());

#pragma warning(push)
#pragma warning(disable : 28145)
  //
  // Following MmMapLockedPagesSpecifyCache() call causes bug check in case
  // you are using Driver Verifier. The reason is explained as follows:
  //
  // A driver must not try to create more than one system-address-space
  // mapping for an MDL. Additionally, because an MDL that is built by the
  // MmBuildMdlForNonPagedPool routine is already mapped to the system
  // address space, a driver must not try to map this MDL into the system
  // address space again by using the MmMapLockedPagesSpecifyCache routine.
  // -- MSDN
  //
  // This flag modification hacks Driver Verifier's check and prevent leading
  // bug check.
  //
  mdl.get()->MdlFlags &= ~MDL_SOURCE_IS_NONPAGED_POOL;
  mdl.get()->MdlFlags |= MDL_PAGES_LOCKED;
#pragma warning(pop)

  auto writableDest = MmMapLockedPagesSpecifyCache(
      mdl.get(), KernelMode, MmCached, nullptr, FALSE, NormalPagePriority);
  if (!writableDest) {
    return STATUS_INSUFFICIENT_RESOURCES;
  }
  memcpy(writableDest, Source, Length);
  MmUnmapLockedPages(writableDest, mdl.get());
  return STATUS_SUCCESS;
}
Ejemplo n.º 23
0
VOID ZeroPEHeader(
	ULONG ImageBase
	)
{
		PIMAGE_DOS_HEADER pDosHeader;
		char *pNtHeader;
		PIMAGE_OPTIONAL_HEADER pOptinalHeader;
		ULONG HeaderSize=0;
		PMDL pHeaderMdl;
		PVOID NewBuffer;
		__try
		{
			pDosHeader=(PIMAGE_DOS_HEADER)ImageBase;
			pNtHeader=(char*)ImageBase+pDosHeader->e_lfanew;
			pOptinalHeader=(PIMAGE_OPTIONAL_HEADER)(pNtHeader+4+sizeof(IMAGE_FILE_HEADER));
			HeaderSize=pOptinalHeader->SizeOfHeaders;
			dprintf("Image Header Size=0x%X\n",HeaderSize);
			pHeaderMdl=IoAllocateMdl((PVOID)ImageBase,HeaderSize,FALSE,FALSE,NULL);
			dprintf("pHeaderMdl=0x%08X\n",pHeaderMdl);
			NewBuffer=MmGetSystemAddressForMdl(pHeaderMdl);
			dprintf("NewBuffer=0x%08X\n",NewBuffer);
			RtlZeroMemory(NewBuffer,HeaderSize);
			MmUnmapLockedPages(NewBuffer,pHeaderMdl);
			IoFreeMdl(pHeaderMdl);
	
			//若要针对所有进程,可使用以下方法,此时COW将会失效
			/*
			WPOFF();
			RtlZeroMemory((char*)ImageBase,HeaderSize);
			WPON();
			*/
		}
		__except(EXCEPTION_EXECUTE_HANDLER)
		{
			DbgPrint("Error occured while zero pe header.\n");
			return ;
		}
}
Ejemplo n.º 24
0
//-----------------------------------------------------------------------------
// Unload driver 
//-----------------------------------------------------------------------------
VOID OnUnload(IN PDRIVER_OBJECT DriverObject)
{
	DbgPrint("---------------- Driver Unloaded\n");

	InterlockedExchange(
		(unsigned int *) &syscall_tbl[SYSCALL_INDEX(ZwLoadDriver)], 
		(unsigned int) orig_ZwLoadDriver);

	if(mdl_sys_call)
	{
		MmUnmapLockedPages(syscall_tbl, mdl_sys_call);
		IoFreeMdl(mdl_sys_call);
	}

	// remove callback
#if BREAK_POINT
	PsRemoveLoadImageNotifyRoutine(add_one_time_bp);
#endif

#if DATA_MINING
	PsRemoveLoadImageNotifyRoutine(add_hooks_for_data_mining);
#endif
}
Ejemplo n.º 25
0
DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */

    /*
     * Try see if we get lucky first...
     * (We could probably just assume we're lucky on NT4.)
     */
    int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
    if (RT_SUCCESS(rc))
    {
        size_t iPage = cb >> PAGE_SHIFT;
        while (iPage-- > 0)
            if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
            {
                rc = VERR_NO_LOW_MEMORY;
                break;
            }
        if (RT_SUCCESS(rc))
            return rc;

        /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
        RTR0MemObjFree(*ppMem, false);
        *ppMem = NULL;
    }

#ifndef IPRT_TARGET_NT4
    /*
     * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
     */
    PHYSICAL_ADDRESS Zero;
    Zero.QuadPart = 0;
    PHYSICAL_ADDRESS HighAddr;
    HighAddr.QuadPart = _4G - 1;
    PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
    if (pMdl)
    {
        if (MmGetMdlByteCount(pMdl) >= cb)
        {
            __try
            {
                void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
                                                        FALSE /* no bug check on failure */, NormalPagePriority);
                if (pv)
                {
                    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
                    if (pMemNt)
                    {
                        pMemNt->fAllocatedPagesForMdl = true;
                        pMemNt->cMdls = 1;
                        pMemNt->apMdls[0] = pMdl;
                        *ppMem = &pMemNt->Core;
                        return VINF_SUCCESS;
                    }
                    MmUnmapLockedPages(pv, pMdl);
                }
            }
            __except(EXCEPTION_EXECUTE_HANDLER)
            {
                NTSTATUS rcNt = GetExceptionCode();
                Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
                /* nothing */
            }
        }
        MmFreePagesFromMdl(pMdl);
        ExFreePool(pMdl);
    }
Ejemplo n.º 26
0
DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
{
    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;

    /*
     * Deal with it on a per type basis (just as a variation).
     */
    switch (pMemNt->Core.enmType)
    {
    case RTR0MEMOBJTYPE_LOW:
#ifndef IPRT_TARGET_NT4
        if (pMemNt->fAllocatedPagesForMdl)
        {
            Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
            MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
            pMemNt->Core.pv = NULL;
            if (pMemNt->pvSecureMem)
            {
                MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
                pMemNt->pvSecureMem = NULL;
            }

            MmFreePagesFromMdl(pMemNt->apMdls[0]);
            ExFreePool(pMemNt->apMdls[0]);
            pMemNt->apMdls[0] = NULL;
            pMemNt->cMdls = 0;
            break;
        }
#endif
        AssertFailed();
        break;

    case RTR0MEMOBJTYPE_PAGE:
        Assert(pMemNt->Core.pv);
        ExFreePool(pMemNt->Core.pv);
        pMemNt->Core.pv = NULL;

        Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
        IoFreeMdl(pMemNt->apMdls[0]);
        pMemNt->apMdls[0] = NULL;
        pMemNt->cMdls = 0;
        break;

    case RTR0MEMOBJTYPE_CONT:
        Assert(pMemNt->Core.pv);
        MmFreeContiguousMemory(pMemNt->Core.pv);
        pMemNt->Core.pv = NULL;

        Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
        IoFreeMdl(pMemNt->apMdls[0]);
        pMemNt->apMdls[0] = NULL;
        pMemNt->cMdls = 0;
        break;

    case RTR0MEMOBJTYPE_PHYS:
        /* rtR0MemObjNativeEnterPhys? */
        if (!pMemNt->Core.u.Phys.fAllocated)
        {
#ifndef IPRT_TARGET_NT4
            Assert(!pMemNt->fAllocatedPagesForMdl);
#endif
            /* Nothing to do here. */
            break;
        }
    /* fall thru */

    case RTR0MEMOBJTYPE_PHYS_NC:
#ifndef IPRT_TARGET_NT4
        if (pMemNt->fAllocatedPagesForMdl)
        {
            MmFreePagesFromMdl(pMemNt->apMdls[0]);
            ExFreePool(pMemNt->apMdls[0]);
            pMemNt->apMdls[0] = NULL;
            pMemNt->cMdls = 0;
            break;
        }
#endif
        AssertFailed();
        break;

    case RTR0MEMOBJTYPE_LOCK:
        if (pMemNt->pvSecureMem)
        {
            MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
            pMemNt->pvSecureMem = NULL;
        }
        for (uint32_t i = 0; i < pMemNt->cMdls; i++)
        {
            MmUnlockPages(pMemNt->apMdls[i]);
            IoFreeMdl(pMemNt->apMdls[i]);
            pMemNt->apMdls[i] = NULL;
        }
        break;

    case RTR0MEMOBJTYPE_RES_VIRT:
        /*            if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
                    {
                    }
                    else
                    {
                    }*/
        AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
        return VERR_INTERNAL_ERROR;
        break;

    case RTR0MEMOBJTYPE_MAPPING:
    {
        Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
        PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
        Assert(pMemNtParent);
        if (pMemNtParent->cMdls)
        {
            Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
            Assert(     pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
                        ||   pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
            MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
        }
        else
        {
            Assert(     pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
                        &&   !pMemNtParent->Core.u.Phys.fAllocated);
            Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
            MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
        }
        pMemNt->Core.pv = NULL;
        break;
    }

    default:
        AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
        return VERR_INTERNAL_ERROR;
    }

    return VINF_SUCCESS;
}
Ejemplo n.º 27
0
static NTSTATUS NTAPI
SatisfyPacketRecvRequest( PAFD_FCB FCB, PIRP Irp,
                         PAFD_STORED_DATAGRAM DatagramRecv,
                         PUINT TotalBytesCopied ) {
    NTSTATUS Status = STATUS_SUCCESS;
    PIO_STACK_LOCATION IrpSp = IoGetCurrentIrpStackLocation( Irp );
    PAFD_RECV_INFO RecvReq =
    GetLockedData(Irp, IrpSp);
    UINT BytesToCopy = 0, BytesAvailable = DatagramRecv->Len, AddrLen = 0;
    PAFD_MAPBUF Map;
    BOOLEAN ExtraBuffers = CheckUnlockExtraBuffers(FCB, IrpSp);

    Map = (PAFD_MAPBUF)(RecvReq->BufferArray +
                        RecvReq->BufferCount +
                        (ExtraBuffers ? EXTRA_LOCK_BUFFERS : 0));

    BytesToCopy = MIN( RecvReq->BufferArray[0].len, BytesAvailable );

    AFD_DbgPrint(MID_TRACE,("BytesToCopy: %u len %u\n", BytesToCopy,
                            RecvReq->BufferArray[0].len));

    if( Map[0].Mdl ) {
        /* Copy the address */
        if( ExtraBuffers && Map[1].Mdl && Map[2].Mdl ) {
            AFD_DbgPrint(MID_TRACE,("Checking TAAddressCount\n"));

            if( DatagramRecv->Address->TAAddressCount != 1 ) {
                AFD_DbgPrint
                (MIN_TRACE,
                 ("Wierd address count %d\n",
                  DatagramRecv->Address->TAAddressCount));
            }

            AFD_DbgPrint(MID_TRACE,("Computing addr len\n"));

            AddrLen = MIN(DatagramRecv->Address->Address->AddressLength +
                          sizeof(USHORT),
                          RecvReq->BufferArray[1].len);

            AFD_DbgPrint(MID_TRACE,("Copying %u bytes of address\n", AddrLen));

            Map[1].BufferAddress = MmMapLockedPages( Map[1].Mdl, KernelMode );

            AFD_DbgPrint(MID_TRACE,("Done mapping, copying address\n"));

            RtlCopyMemory( Map[1].BufferAddress,
                          &DatagramRecv->Address->Address->AddressType,
                          AddrLen );

            MmUnmapLockedPages( Map[1].BufferAddress, Map[1].Mdl );

            AFD_DbgPrint(MID_TRACE,("Copying address len\n"));

            Map[2].BufferAddress = MmMapLockedPages( Map[2].Mdl, KernelMode );
            *((PINT)Map[2].BufferAddress) = AddrLen;
            MmUnmapLockedPages( Map[2].BufferAddress, Map[2].Mdl );
        }

        AFD_DbgPrint(MID_TRACE,("Mapping data buffer pages\n"));

        Map[0].BufferAddress = MmMapLockedPages( Map[0].Mdl, KernelMode );

        AFD_DbgPrint(MID_TRACE,("Buffer %d: %p:%u\n",
                                0,
                                Map[0].BufferAddress,
                                BytesToCopy));

        RtlCopyMemory( Map[0].BufferAddress,
                      DatagramRecv->Buffer,
                      BytesToCopy );

        MmUnmapLockedPages( Map[0].BufferAddress, Map[0].Mdl );

        *TotalBytesCopied = BytesToCopy;
    }

    if (*TotalBytesCopied == DatagramRecv->Len)
    {
        /* We copied the whole datagram */
        Status = Irp->IoStatus.Status = STATUS_SUCCESS;
    }
    else
    {
        /* We only copied part of the datagram */
        Status = Irp->IoStatus.Status = STATUS_BUFFER_OVERFLOW;
    }

    Irp->IoStatus.Information = *TotalBytesCopied;

    if (!(RecvReq->TdiFlags & TDI_RECEIVE_PEEK))
    {
        FCB->Recv.Content -= DatagramRecv->Len;
        ExFreePool( DatagramRecv->Address );
        ExFreePool( DatagramRecv );
    }

    AFD_DbgPrint(MID_TRACE,("Done\n"));

    return Status;
}
Ejemplo n.º 28
0
/*
 * @implemented
 */
VOID
NTAPI
MmUnlockPages(IN PMDL Mdl)
{
    PPFN_NUMBER MdlPages, LastPage;
    PEPROCESS Process;
    PVOID Base;
    ULONG Flags, PageCount;
    KIRQL OldIrql;
    PMMPFN Pfn1;
    DPRINT("Unlocking MDL: %p\n", Mdl);

    //
    // Sanity checks
    //
    ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
    ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
    ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
    ASSERT(Mdl->ByteCount != 0);

    //
    // Get the process associated and capture the flags which are volatile
    //
    Process = Mdl->Process;
    Flags = Mdl->MdlFlags;

    //
    // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
    //
    if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
    {
        //
        // Unmap the pages from system space
        //
        MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
    }

    //
    // Get the page count
    //
    MdlPages = (PPFN_NUMBER)(Mdl + 1);
    Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
    PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
    ASSERT(PageCount != 0);

    //
    // We don't support AWE
    //
    if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);

    //
    // Check if the buffer is mapped I/O space
    //
    if (Flags & MDL_IO_SPACE)
    {
        //
        // Acquire PFN lock
        //
        OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

        //
        // Loop every page
        //
        LastPage = MdlPages + PageCount;
        do
        {
            //
            // Last page, break out
            //
            if (*MdlPages == LIST_HEAD) break;

            //
            // Check if this page is in the PFN database
            //
            Pfn1 = MiGetPfnEntry(*MdlPages);
            if (Pfn1) MiDereferencePfnAndDropLockCount(Pfn1);
        } while (++MdlPages < LastPage);

        //
        // Release the lock
        //
        KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

        //
        // Check if we have a process
        //
        if (Process)
        {
            //
            // Handle the accounting of locked pages
            //
            ASSERT(Process->NumberOfLockedPages > 0);
            InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
                                        -(LONG_PTR)PageCount);
        }

        //
        // We're done
        //
        Mdl->MdlFlags &= ~MDL_IO_SPACE;
        Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
        return;
    }

    //
    // Check if we have a process
    //
    if (Process)
    {
        //
        // Handle the accounting of locked pages
        //
        ASSERT(Process->NumberOfLockedPages > 0);
        InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
                                    -(LONG_PTR)PageCount);
    }

    //
    // Loop every page
    //
    LastPage = MdlPages + PageCount;
    do
    {
        //
        // Last page reached
        //
        if (*MdlPages == LIST_HEAD)
        {
            //
            // Were there no pages at all?
            //
            if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
            {
                //
                // We're already done
                //
                Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
                return;
            }

            //
            // Otherwise, stop here
            //
            LastPage = MdlPages;
            break;
        }

        /* Save the PFN entry instead for the secondary loop */
        *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
        ASSERT(*MdlPages != 0);
    } while (++MdlPages < LastPage);

    //
    // Reset pointer
    //
    MdlPages = (PPFN_NUMBER)(Mdl + 1);

    //
    // Now grab the PFN lock for the actual unlock and dereference
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
    do
    {
        /* Get the current entry and reference count */
        Pfn1 = (PMMPFN)*MdlPages;
        MiDereferencePfnAndDropLockCount(Pfn1);
    } while (++MdlPages < LastPage);

    //
    // Release the lock
    //
    KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

    //
    // We're done
    //
    Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
}
Ejemplo n.º 29
0
/* Unlocks the previously locked user mode memory pages. */
__inline VOID
ssh_iodevice_unmap_buffer(PVOID km_addr,
                          PMDL mdl)
{
  MmUnmapLockedPages(km_addr, mdl);
}
//------------------------------------------------------------------------------
tOplkError drv_mapSocMem(void** ppUserMem_p,
                         size_t* pMemSize_p)
{
    if ((ppUserMem_p == NULL) || (pMemSize_p == NULL))
    {
        DEBUG_LVL_ERROR_TRACE("%s() Invalid pointer !\n", __func__);
        return kErrorNoResource;
    }

    // Get SoC memory
    socMemInfo_l.pKernelVa = timesynckcal_getSharedMemory();
    if (socMemInfo_l.pKernelVa == NULL)
    {
        DEBUG_LVL_ERROR_TRACE("%s() Timesync shared memory is NULL !", __func__);
        return kErrorNoResource;
    }

    // Set SoC memory size
    socMemInfo_l.memSize = sizeof(tTimesyncSharedMemory);

    if (*pMemSize_p > socMemInfo_l.memSize)
    {
        DEBUG_LVL_ERROR_TRACE("%s() Higher memory requested (Kernel:%uz User:%uz) !\n",
                              __func__,
                              socMemInfo_l.memSize,
                              *pMemSize_p);
        *pMemSize_p = 0;
        return kErrorNoResource;
    }

    // Allocate new MDL pointing to SoC memory
    socMemInfo_l.pMdl = IoAllocateMdl(socMemInfo_l.pKernelVa,
                                      socMemInfo_l.memSize,
                                      FALSE,
                                      FALSE,
                                      NULL);

    if (socMemInfo_l.pMdl == NULL)
    {
        DEBUG_LVL_ERROR_TRACE("%s() Error allocating MDL !\n", __func__);
        return kErrorNoResource;
    }

    // Update the MDL with physical addresses
    MmBuildMdlForNonPagedPool(socMemInfo_l.pMdl);

    // Maps the physical pages that are described by an MDL to a virtual address
    socMemInfo_l.pUserVa = MmMapLockedPagesSpecifyCache(socMemInfo_l.pMdl,    // MDL
                                                        UserMode,             // Mode
                                                        MmCached,             // Caching
                                                        NULL,                 // Address
                                                        FALSE,                // Bug-check?
                                                        NormalPagePriority);  // Priority

    if (socMemInfo_l.pUserVa == NULL)
    {
        MmUnmapLockedPages(socMemInfo_l.pUserVa, socMemInfo_l.pMdl);
        IoFreeMdl(socMemInfo_l.pMdl);
        DEBUG_LVL_ERROR_TRACE("%s() Error mapping MDL !\n", __func__);
        return kErrorNoResource;
    }

    *ppUserMem_p = socMemInfo_l.pUserVa;
    *pMemSize_p = socMemInfo_l.memSize;

    DEBUG_LVL_ALWAYS_TRACE("Mapped SoC memory info U:%p K:%p size:%uz\n",
                           socMemInfo_l.pUserVa,
                           socMemInfo_l.pKernelVa,
                           socMemInfo_l.memSize);

    return kErrorOk;
}