Пример #1
0
static NTSTATUS sys_thread(void *null)
{
	VCPU_DEBUG_RAW("waiting a bit\n");
	sleep_ms(2000);

	int m = ksm_hook_page(MmMapLockedPagesSpecifyCache, hk_MmMapLockedPagesSpecifyCache);
	if (m >= 0) {
		VCPU_DEBUG("hooked: %d\n", m);
		if (MmMapLockedPagesSpecifyCache((PMDLX)0xdeadbeef,
						 KernelMode,
						 MmNonCached,
						 (PVOID)0x00000000,
						 TRUE,
						 NormalPagePriority) == (PVOID)0xbaadf00d)
			VCPU_DEBUG_RAW("We succeeded\n");
		else
			VCPU_DEBUG_RAW("we failed\n");
		sleep_ms(2000);

		/* Trigger #VE  */
		struct page_hook_info *phi = ksm_find_hook(m);
		u8 *r = (u8 *)(uintptr_t)MmMapLockedPagesSpecifyCache;
		VCPU_DEBUG("Equality: %d\n", memcmp(r, phi->data, phi->size));
		return ksm_unhook_page(m);
	}

	return -m;
}
Пример #2
0
//-----------------------------------------------------------------------------
// MAIN
//-----------------------------------------------------------------------------
NTSTATUS DriverEntry(PDRIVER_OBJECT driver_object, 
	                 PUNICODE_STRING registry_path)
{
	NTSTATUS ret;

	if (!driver_object)
	{
		DbgPrint("\n!!! ERROR: invalid driver_object in DriverEntry()\n");
		return STATUS_UNSUCCESSFUL;
	}
	driver_object->DriverUnload  = OnUnload;

	DbgPrint("---------------- Driver Loaded\n");

	// routine allocates a memory descriptor list (MDL) 
	mdl_sys_call = IoAllocateMdl(KeServiceDescriptorTable.ServiceTableBase, 
		                         KeServiceDescriptorTable.NumberOfServices * 4, 
								 FALSE, FALSE, NULL);
	if (!mdl_sys_call )
	{
		DbgPrint("\n!!! ERROR: invalid mdl in DriverEntry()\n");
		return STATUS_UNSUCCESSFUL;
	}

	MmBuildMdlForNonPagedPool(mdl_sys_call);

	mdl_sys_call->MdlFlags = mdl_sys_call->MdlFlags | MDL_MAPPED_TO_SYSTEM_VA;

	// map the physical pages 
    syscall_tbl = MmMapLockedPagesSpecifyCache(mdl_sys_call, KernelMode,
											   MmNonCached, NULL, FALSE,
											   HighPagePriority);

	if (!syscall_tbl)
	{
		DbgPrint("\n!!! ERROR: invalid mapped syscall table in DriverEntry()\n");
		return STATUS_UNSUCCESSFUL;
	}

	hook_syscalls();

	debug("register our callback for when our target proc is loaded:\n %ws\n\n",
		     target_file_loc);

#if BREAK_POINT
	// register a callback func that is invoked when our target proc is loaded
	ret = PsSetLoadImageNotifyRoutine(add_one_time_bp);
#endif

#if DATA_MINING
	ret = PsSetLoadImageNotifyRoutine(add_hooks_for_data_mining);
#endif

	if (ret != STATUS_SUCCESS)
		DbgPrint("\n!!! ERROR: PsSetLoadImageNotifyRoutine()\n\n");

	return STATUS_SUCCESS;
}
Пример #3
0
//------------------------------------------------------------------------------
tOplkError drv_mapPdoMem(UINT8** ppKernelMem_p, UINT8** ppUserMem_p,
                         size_t* pMemSize_p)
{
    tOplkError      ret;

    // Get PDO memory
    ret = pdokcal_getPdoMemRegion((UINT8**)&pdoMemInfo_l.pKernelVa,
                                  &pdoMemInfo_l.memSize);

    if (ret != kErrorOk || pdoMemInfo_l.pKernelVa == NULL)
        return kErrorNoResource;

    if (*pMemSize_p > pdoMemInfo_l.memSize)
    {
        DEBUG_LVL_ERROR_TRACE("%s() Higher Memory requested (Kernel-%d User-%d) !\n",
                              __func__, pdoMemInfo_l.memSize, *pMemSize_p);
        *pMemSize_p = 0;
        return kErrorNoResource;
    }

    // Allocate new MDL pointing to PDO memory
    pdoMemInfo_l.pMdl = IoAllocateMdl(pdoMemInfo_l.pKernelVa, pdoMemInfo_l.memSize, FALSE, FALSE,
                                      NULL);

    if (pdoMemInfo_l.pMdl == NULL)
    {
        DEBUG_LVL_ERROR_TRACE("%s() Error allocating MDL !\n", __func__);
        return kErrorNoResource;
    }

    // Update the MDL with physical addresses
    MmBuildMdlForNonPagedPool(pdoMemInfo_l.pMdl);

    // Map the memory in user space and get the address
    pdoMemInfo_l.pUserVa = MmMapLockedPagesSpecifyCache(pdoMemInfo_l.pMdl,    // MDL
                                                        UserMode,             // Mode
                                                        MmCached,             // Caching
                                                        NULL,                 // Address
                                                        FALSE,                // Bug-check?
                                                        NormalPagePriority);  // Priority

    if (pdoMemInfo_l.pUserVa == NULL)
    {
        MmUnmapLockedPages(pdoMemInfo_l.pUserVa, pdoMemInfo_l.pMdl);
        IoFreeMdl(pdoMemInfo_l.pMdl);
        DEBUG_LVL_ERROR_TRACE("%s() Error mapping MDL !\n", __func__);
        return kErrorNoResource;
    }

    *ppKernelMem_p = pdoMemInfo_l.pKernelVa;
    *ppUserMem_p = pdoMemInfo_l.pUserVa;
    *pMemSize_p = pdoMemInfo_l.memSize;

    TRACE("Mapped memory info U:%p K:%p size %x", pdoMemInfo_l.pUserVa,
                                                 (UINT8*)pdoMemInfo_l.pKernelVa,
                                                 pdoMemInfo_l.memSize);
    return kErrorOk;
}
Пример #4
0
/* Locks user mode buffer pages into physical memory and maps the buffer's 
   address into kernel space. */
__inline PVOID
ssh_iodevice_map_buffer(PMDL mdl)
{
  if (mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA | MDL_SOURCE_IS_NONPAGED_POOL))
    return (mdl->MappedSystemVa);
  else 
    return (MmMapLockedPagesSpecifyCache(mdl, KernelMode, MmCached,
                                         NULL, FALSE, LowPagePriority));
}
Пример #5
0
static VOID
BalloonFreePagesFromMdl(
    IN  MDL         *Mdl,
    IN  BOOLEAN     Check
    )
{
    volatile UCHAR  *Mapping;
    ULONG           Index;

    if (!Check)
        goto done;

    // Sanity check:
    //
    // Make sure that things written to the page really do stick. 
    // If the page is still ballooned out at the hypervisor level
    // then writes will be discarded and reads will give back
    // all 1s. */

    Mapping = MmMapLockedPagesSpecifyCache(Mdl,
                                           KernelMode,
                                           MmCached,
                                           NULL,
                                           FALSE,
                                           LowPagePriority);
    if (Mapping == NULL) {
        // Windows couldn't map the mempry. That's kind of sad, but not
        // really an error: it might be that we're very low on kernel
        // virtual address space.
        goto done;
    }

    // Write and read the first byte in each page to make sure it's backed
    // by RAM.
    XM_ASSERT((Mdl->ByteCount & (PAGE_SIZE - 1)) == 0);

    for (Index = 0; Index < Mdl->ByteCount >> PAGE_SHIFT; Index++)
        Mapping[Index << PAGE_SHIFT] = (UCHAR)Index;

    for (Index = 0; Index < Mdl->ByteCount >> PAGE_SHIFT; Index++) {
        if (Mapping[Index << PAGE_SHIFT] != (UCHAR)Index) {
            PFN_NUMBER  *Array = MmGetMdlPfnArray(Mdl);

            TraceCritical(("%s: PFN[%d] (%p): read 0x%02x, expected 0x%02x\n",
                           __FUNCTION__, Index, Array[Index],
                           Mapping[Index << PAGE_SHIFT], (UCHAR)Index));
            XM_BUG();
        }
    }

done:
    MmFreePagesFromMdl(Mdl);
}
Пример #6
0
/*
 * @implemented
 */
PVOID
NTAPI
MmMapLockedPages(IN PMDL Mdl,
                 IN KPROCESSOR_MODE AccessMode)
{
    //
    // Call the extended version
    //
    return MmMapLockedPagesSpecifyCache(Mdl,
                                        AccessMode,
                                        MmCached,
                                        NULL,
                                        TRUE,
                                        HighPagePriority);
}
GENERICAPI PVOID  GenericGetSystemAddressForMdl(PMDL mdl)
    {                            // GenericGetSystemAddressForMdl

    if(!mdl)
        return NULL;

    CSHORT oldfail = mdl->MdlFlags & MDL_MAPPING_CAN_FAIL;
    mdl->MdlFlags |= MDL_MAPPING_CAN_FAIL;

    PVOID address = MmMapLockedPagesSpecifyCache(mdl, KernelMode, MmCached, NULL, FALSE, NormalPagePriority);

    if(!oldfail)
        mdl->MdlFlags &= ~MDL_MAPPING_CAN_FAIL;

    return address;
    }                            // GenericGetSystemAddressForMdl
Пример #8
0
//----------------------------------------------------------------------
//
// RegmonMapServiceTable
//
// If we are running on Whistler then we have
// to double map the system service table to get around the 
// fact that the system service table is write-protected on systems
// with > 128MB memory. Since there's no harm in always double mapping,
// we always do it, regardless of whether or not we are on Whistler.
//
//----------------------------------------------------------------------
PVOID *
RegmonMapServiceTable(
    SERVICE_HOOK_DESCRIPTOR **HookDescriptors
    )
{
    //
    // Allocate an array to store original function addresses in. This
    // makes us play well with other hookers.
    //
    *HookDescriptors = (SERVICE_HOOK_DESCRIPTOR *) ExAllocatePool( NonPagedPool,
                          KeServiceDescriptorTable->Limit * sizeof(SERVICE_HOOK_DESCRIPTOR));
    if( !*HookDescriptors ) {

        return NULL;
    }
    memset( *HookDescriptors, 0, 
            KeServiceDescriptorTable->Limit * sizeof(SERVICE_HOOK_DESCRIPTOR));

    //
    // Build an MDL that describes the system service table function 
    // pointers array.
    //
    KdPrint(("Reglib: KeServiceDescriptorTable: %I64x Pointers: %I64x Limit: %d\n",
              KeServiceDescriptorTable, KeServiceDescriptorTable->ServicePointers,
              KeServiceDescriptorTable->Limit ));
    KeServiceTableMdl = MmCreateMdl( NULL, KeServiceDescriptorTable->ServicePointers, 
                                     KeServiceDescriptorTable->Limit * sizeof(PVOID));
    if( !KeServiceTableMdl ) {

        return NULL;
    }

    //
    // Fill in the physical pages and then double-map the description. Note
    // that MmMapLockedPages is obsolete as of Win2K and has been replaced
    // with MmMapLockedPagesSpecifyCache. However, we use the same driver
    // on all NT platforms, so we use it anyway.
    //
    MmBuildMdlForNonPagedPool( KeServiceTableMdl );
    KeServiceTableMdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
#if defined(_M_IA64)
    return MmMapLockedPagesSpecifyCache( KeServiceTableMdl, KernelMode, 
                                         MmCached, NULL, FALSE, NormalPagePriority );
#else
    return MmMapLockedPages( KeServiceTableMdl, KernelMode );
#endif
}
Пример #9
0
// Does memcpy safely even if Destination is a read only region.
_Use_decl_annotations_ EXTERN_C NTSTATUS UtilForceMemCpy(void *Destination,
                                                         const void *Source,
                                                         SIZE_T Length) {
  auto mdl = std::experimental::make_unique_resource(
      IoAllocateMdl(Destination, static_cast<ULONG>(Length), FALSE, FALSE,
                    nullptr),
      &IoFreeMdl);
  if (!mdl) {
    return STATUS_INSUFFICIENT_RESOURCES;
  }
  MmBuildMdlForNonPagedPool(mdl.get());

#pragma warning(push)
#pragma warning(disable : 28145)
  //
  // Following MmMapLockedPagesSpecifyCache() call causes bug check in case
  // you are using Driver Verifier. The reason is explained as follows:
  //
  // A driver must not try to create more than one system-address-space
  // mapping for an MDL. Additionally, because an MDL that is built by the
  // MmBuildMdlForNonPagedPool routine is already mapped to the system
  // address space, a driver must not try to map this MDL into the system
  // address space again by using the MmMapLockedPagesSpecifyCache routine.
  // -- MSDN
  //
  // This flag modification hacks Driver Verifier's check and prevent leading
  // bug check.
  //
  mdl.get()->MdlFlags &= ~MDL_SOURCE_IS_NONPAGED_POOL;
  mdl.get()->MdlFlags |= MDL_PAGES_LOCKED;
#pragma warning(pop)

  auto writableDest = MmMapLockedPagesSpecifyCache(
      mdl.get(), KernelMode, MmCached, nullptr, FALSE, NormalPagePriority);
  if (!writableDest) {
    return STATUS_INSUFFICIENT_RESOURCES;
  }
  memcpy(writableDest, Source, Length);
  MmUnmapLockedPages(writableDest, mdl.get());
  return STATUS_SUCCESS;
}
Пример #10
0
//----------------------------------------------------------------------
//
// RegmonMapMem
//
// Double maps memory for writing.
//
//----------------------------------------------------------------------
PVOID 
RegmonMapMem( 
    PVOID Pointer, 
    ULONG Length, 
    PMDL *MapMdl 
    )
{
    *MapMdl = MmCreateMdl( NULL, Pointer, Length );
    if( !(*MapMdl)) {

        return NULL;
    }

    MmBuildMdlForNonPagedPool( *MapMdl );
    (*MapMdl)->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
#if defined(_M_IA64)
    return MmMapLockedPagesSpecifyCache( *MapMdl, KernelMode, 
                                         MmCached, NULL, FALSE, NormalPagePriority );
#else
    return MmMapLockedPages( *MapMdl, KernelMode );
#endif
}
Пример #11
0
co_rc_t co_os_userspace_map(void *address, unsigned int pages, void **user_address_out, void **handle_out)
{
	void *user_address;
	unsigned long memory_size = ((unsigned long)pages) << CO_ARCH_PAGE_SHIFT;
	PMDL mdl;

	mdl = IoAllocateMdl(address, memory_size, FALSE, FALSE, NULL);
	if (!mdl) 
		return CO_RC(ERROR);
	
	MmBuildMdlForNonPagedPool(mdl);
	user_address = MmMapLockedPagesSpecifyCache(mdl, UserMode, MmCached, NULL, FALSE, HighPagePriority);
	if (!user_address) {
		IoFreeMdl(mdl);
		return CO_RC(ERROR);
	}
	
	*handle_out = (void *)mdl;
	*user_address_out = PAGE_ALIGN(user_address) + MmGetMdlByteOffset(mdl);
	
	return CO_RC(OK);
}
Пример #12
0
struct Send_Packet_Data*
Packet2PacketData(PNDIS_PACKET Packet) {

	UINT buffer_count;
	PNDIS_BUFFER buffer;
	UINT packet_len;
	UINT offset;
	PVOID addr;
	UINT len;
	struct Send_Packet_Data* packet_data;
	packet_data = NULL;
	
	NdisQueryPacket(Packet,
		NULL,
		&buffer_count,
		&buffer,
		&packet_len);
	if (!buffer_count ||
		!buffer)
		goto error_exit;

	if (buffer_count > 1) {
		packet_data = PreparePacketData(buffer_count,
			packet_len);
		if (!packet_data)
			goto error_exit;
		
		offset = 0;
		while(1) {
			NdisQueryBufferSafe(buffer,
				&addr,
				&len,
				NormalPagePriority);
			if (!addr ||
				!len)
				goto error_exit;
		
			RtlCopyMemory(packet_data->m_data+offset, 
				addr, 
				len);
			offset += len;
		
			NdisGetNextBuffer(buffer, 
				&buffer);
			if (!buffer)
				break;
		}
		packet_data->m_ndis_packet = Packet;
		packet_data->m_len = packet_len;
	}
	else {
		packet_data = PreparePacketData(buffer_count,
			0);
		if (!packet_data)
			goto error_exit;

		NdisQueryBufferSafe(buffer,
			&addr,
			&len,
			NormalPagePriority);
		if (!addr ||
			!len)
			goto error_exit;
		
		packet_data->m_ndis_packet = Packet;
		packet_data->m_len = packet_len;		
		packet_data->m_data = addr;
	}

	packet_data->m_mdl = IoAllocateMdl(packet_data->m_data,
		packet_data->m_len,
		FALSE,
		FALSE,
		NULL);
	if (!packet_data->m_mdl)
		goto error_exit;

	try {
		MmProbeAndLockPages(packet_data->m_mdl,
			KernelMode,
			IoReadAccess);
		packet_data->m_locked = 1;
	}
	except(EXCEPTION_EXECUTE_HANDLER) {
		packet_data->m_locked = 0;
	}

	packet_data->m_addr = MmMapLockedPagesSpecifyCache(packet_data->m_mdl,
		UserMode,
		MmCached,
		NULL,
		FALSE,
		NormalPagePriority);
	if (!packet_data->m_addr)
		goto error_exit;

	packet_data->m_map_process = PsGetCurrentProcessId();

	return packet_data;
	
error_exit:
	DbgPrint("Packet2PacketData failed, force to complete the lost packet\n");
	free_send_packet_data(packet_data);
	return NULL;
}
Пример #13
0
DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */

    /*
     * Try see if we get lucky first...
     * (We could probably just assume we're lucky on NT4.)
     */
    int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
    if (RT_SUCCESS(rc))
    {
        size_t iPage = cb >> PAGE_SHIFT;
        while (iPage-- > 0)
            if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
            {
                rc = VERR_NO_LOW_MEMORY;
                break;
            }
        if (RT_SUCCESS(rc))
            return rc;

        /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
        RTR0MemObjFree(*ppMem, false);
        *ppMem = NULL;
    }

#ifndef IPRT_TARGET_NT4
    /*
     * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
     */
    PHYSICAL_ADDRESS Zero;
    Zero.QuadPart = 0;
    PHYSICAL_ADDRESS HighAddr;
    HighAddr.QuadPart = _4G - 1;
    PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
    if (pMdl)
    {
        if (MmGetMdlByteCount(pMdl) >= cb)
        {
            __try
            {
                void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
                                                        FALSE /* no bug check on failure */, NormalPagePriority);
                if (pv)
                {
                    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
                    if (pMemNt)
                    {
                        pMemNt->fAllocatedPagesForMdl = true;
                        pMemNt->cMdls = 1;
                        pMemNt->apMdls[0] = pMdl;
                        *ppMem = &pMemNt->Core;
                        return VINF_SUCCESS;
                    }
                    MmUnmapLockedPages(pv, pMdl);
                }
            }
            __except(EXCEPTION_EXECUTE_HANDLER)
            {
                NTSTATUS rcNt = GetExceptionCode();
                Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
                /* nothing */
            }
        }
        MmFreePagesFromMdl(pMdl);
        ExFreePool(pMdl);
    }
Пример #14
0
NTSTATUS ultimap_waitForData(ULONG timeout, PULTIMAPDATAEVENT data)
/*
Called from usermode to wait for data
*/
{
	NTSTATUS r;
	LARGE_INTEGER wait;

	PKWAIT_BLOCK waitblock;

	if (DataBlock)
	{		
		waitblock=ExAllocatePool(NonPagedPool, MaxDataBlocks*sizeof(KWAIT_BLOCK));


		wait.QuadPart=-10000LL * timeout;

		//Wait for the events in the list
		//If an event is triggered find out which one is triggered, then map that block into the usermode space and return the address and block
		//That block will be needed to continue

		if (timeout==0xffffffff) //infinite wait
			r=KeWaitForMultipleObjects(MaxDataBlocks, DataReadyPointerList, WaitAny, UserRequest, UserMode, TRUE, NULL, waitblock);
		else
			r=KeWaitForMultipleObjects(MaxDataBlocks, DataReadyPointerList, WaitAny, UserRequest, UserMode, TRUE, &wait, waitblock);

		ExFreePool(waitblock);	

		data->Block=r-STATUS_WAIT_0;

		if (data->Block <= MaxDataBlocks)
		{
			//Map this block to usermode
			

			ExAcquireFastMutex(&DataBlockMutex);
			if (DataBlock)
			{
				data->KernelAddress=(UINT64)DataBlock[data->Block].Data;
				(PMDL)data->Mdl=IoAllocateMdl(DataBlock[data->Block].Data, DataBlock[data->Block].DataSize, FALSE, FALSE, NULL);
				if (data->Mdl)
				{
					MmBuildMdlForNonPagedPool((PMDL)data->Mdl);

					data->Address=(UINT_PTR)MmMapLockedPagesSpecifyCache((PMDL)data->Mdl, UserMode, MmCached, NULL, FALSE, NormalPagePriority);
					if (data->Address)
					{
						data->Size=DataBlock[data->Block].DataSize;
						data->CpuID=DataBlock[data->Block].CpuID;
						r=STATUS_SUCCESS;
					}
					else
						r=STATUS_UNSUCCESSFUL;					
				}
				else
					r=STATUS_UNSUCCESSFUL;
			}
			else
				r=STATUS_UNSUCCESSFUL;

			ExReleaseFastMutex(&DataBlockMutex);

			return r;	
		}
		else
			return STATUS_UNSUCCESSFUL;
		
	}
	else
		return STATUS_UNSUCCESSFUL;


	
}
Пример #15
0
NTSTATUS EVhdExecuteScsiRequestDisk(ParserInstance *parser, SCSI_PACKET *pPacket)
{
	NTSTATUS status = STATUS_SUCCESS;
    STORVSP_REQUEST *pVspRequest = pPacket->pVspRequest;
    STORVSC_REQUEST *pVscRequest = pPacket->pVscRequest;
	PMDL pMdl = pPacket->pMdl;
    SCSI_OP_CODE opCode = (UCHAR)pVscRequest->Sense.Cdb6.OpCode;
    memset(&pVspRequest->Srb, 0, SCSI_REQUEST_BLOCK_SIZE);
	pVspRequest->pContext = parser;
	pVspRequest->Srb.Length = SCSI_REQUEST_BLOCK_SIZE;
    pVspRequest->Srb.SrbStatus = pVscRequest->SrbStatus;
    pVspRequest->Srb.ScsiStatus = pVscRequest->ScsiStatus;
	pVspRequest->Srb.PathId = parser->ScsiPathId;
	pVspRequest->Srb.TargetId = parser->ScsiTargetId;
	pVspRequest->Srb.Lun = parser->ScsiLun;
    pVspRequest->Srb.CdbLength = pVscRequest->CdbLength;
    pVspRequest->Srb.SenseInfoBufferLength = pVscRequest->SenseInfoBufferLength;
    pVspRequest->Srb.SrbFlags = pVscRequest->bDataIn ? SRB_FLAGS_DATA_IN : SRB_FLAGS_DATA_OUT;
    pVspRequest->Srb.DataTransferLength = pVscRequest->DataTransferLength;
    pVspRequest->Srb.SrbFlags |= pVscRequest->Extension.SrbFlags & 8000;			  // Non-standard
    pVspRequest->Srb.SrbExtension = pVspRequest + 1;	// variable-length extension right after the inner request block
    pVspRequest->Srb.SenseInfoBuffer = &pVscRequest->Sense;
    memmove(pVspRequest->Srb.Cdb, &pVscRequest->Sense, pVscRequest->CdbLength);
	switch (opCode)
	{
	default:
		if (pMdl)
		{
            pVspRequest->Srb.DataBuffer = pMdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA | MDL_SOURCE_IS_NONPAGED_POOL) ?
				pMdl->MappedSystemVa : MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL, FALSE,
				NormalPagePriority | MdlMappingNoExecute);
            if (!pVspRequest->Srb.DataBuffer)
			{
				status = STATUS_INSUFFICIENT_RESOURCES;
				break;
			}
		}
		break;
	case SCSI_OP_CODE_WRITE_6:
	case SCSI_OP_CODE_WRITE_10:
	case SCSI_OP_CODE_WRITE_12:
	case SCSI_OP_CODE_WRITE_16:
	case SCSI_OP_CODE_READ_6:
	case SCSI_OP_CODE_READ_10:
	case SCSI_OP_CODE_READ_12:
	case SCSI_OP_CODE_READ_16:
		break;
	}

	if (NT_SUCCESS(status))
        status = parser->Io.pfnStartIo(parser->Io.pIoInterface, pPacket, pVspRequest, pMdl, pPacket->bUnkFlag,
		pPacket->bUseInternalSenseBuffer ? &pPacket->Sense : NULL);
	else
        pVscRequest->SrbStatus = SRB_STATUS_INTERNAL_ERROR;

	if (STATUS_PENDING != status)
	{
		EvhdPostProcessScsiPacket(pPacket, status);
		status = VstorCompleteScsiRequest(pPacket);
	}

	return status;
}
Пример #16
0
static NTSTATUS
windows_netmap_mmap(PIRP Irp)
{
	PIO_STACK_LOCATION	irpSp;
	struct netmap_priv_d	*priv;
	struct netmap_adapter	*na;
	PMDL mdl;
	NTSTATUS		ret = STATUS_SUCCESS;

	irpSp = IoGetCurrentIrpStackLocation(Irp);
	priv = irpSp->FileObject->FsContext;

	if (priv == NULL) {
		D("no priv");
		return STATUS_DEVICE_DATA_ERROR;
	}
	na = priv->np_na;
	if (na == NULL) {
		D("na not attached");
		return STATUS_DEVICE_DATA_ERROR;
	}
	mb(); /* XXX really ? */

	mdl = win32_build_user_vm_map(na->nm_mem);
	if (mdl == NULL) {
		D("failed building memory map");
		return STATUS_DEVICE_DATA_ERROR;
	}

	try { // XXX see if we can do without exceptions
		void *UserVirtualAddress;
		PVOID buffer = MmGetSystemAddressForMdlSafe(Irp->MdlAddress, NormalPagePriority);

		if (buffer == NULL) {
			Irp->IoStatus.Information = 0;
			DbgPrint("Netmap.sys: Failed to allocate memory!!!!!");
			return STATUS_DEVICE_DATA_ERROR;
		}

		UserVirtualAddress = MmMapLockedPagesSpecifyCache(
			mdl,
			UserMode,
			MmNonCached,
			NULL,
			FALSE,
			NormalPagePriority);
		if (UserVirtualAddress != NULL) {
			MEMORY_ENTRY		returnedValue;

			returnedValue.pUsermodeVirtualAddress = UserVirtualAddress;
			RtlCopyMemory(buffer, &returnedValue, sizeof(PVOID));
			Irp->IoStatus.Information = sizeof(void*);
			DbgPrint("Netmap.sys: Memory allocated to user process");
		} else {
			Irp->IoStatus.Information = 0;
			DbgPrint("Netmap.sys: Failed to allocate memory!!!!!");
			// XXX do we need to free the mdl ?
			ret = STATUS_INSUFFICIENT_RESOURCES;
		}
	} except(EXCEPTION_EXECUTE_HANDLER) {
		Irp->IoStatus.Information = 0;
		DbgPrint("Netmap.sys: Failed to allocate memory!!!!!");
		ret = STATUS_INSUFFICIENT_RESOURCES;
	}
	IoFreeMdl(mdl);
	return ret;
}
Пример #17
0
/// <summary>
/// Allocate kernel memory and map into User space. Or free previously allocated memory
/// </summary>
/// <param name="pProcess">Target process object</param>
/// <param name="pAllocFree">Request params.</param>
/// <param name="pResult">Allocated region info.</param>
/// <returns>Status code</returns>
NTSTATUS BBAllocateFreePhysical( IN PEPROCESS pProcess, IN PALLOCATE_FREE_MEMORY pAllocFree, OUT PALLOCATE_FREE_MEMORY_RESULT pResult )
{
    NTSTATUS status = STATUS_SUCCESS;
    PVOID pRegionBase = NULL;
    PMDL pMDL = NULL;

    ASSERT( pProcess != NULL && pResult != NULL );
    if (pProcess == NULL || pResult == NULL)
        return STATUS_INVALID_PARAMETER;

    // MDL doesn't support regions this large
    if (pAllocFree->size > 0xFFFFFFFF)
    {
        DPRINT( "BlackBone: %s: Region size if too big: 0x%p\n", __FUNCTION__, pAllocFree->size );
        return STATUS_INVALID_PARAMETER;
    }

    // Align on page boundaries   
    pAllocFree->base = (ULONGLONG)PAGE_ALIGN( pAllocFree->base );
    pAllocFree->size = ADDRESS_AND_SIZE_TO_SPAN_PAGES( pAllocFree->base, pAllocFree->size ) << PAGE_SHIFT;

    // Allocate
    if (pAllocFree->allocate != FALSE)
    {
        PMMVAD_SHORT pVad = NULL;
        if (pAllocFree->base != 0 && BBFindVAD( pProcess, pAllocFree->base, &pVad ) != STATUS_NOT_FOUND)
            return STATUS_ALREADY_COMMITTED;

        pRegionBase = ExAllocatePoolWithTag( NonPagedPool, pAllocFree->size, BB_POOL_TAG );
        if (!pRegionBase)
            return STATUS_NO_MEMORY;

        // Cleanup buffer before mapping it into UserMode to prevent exposure of kernel data
        RtlZeroMemory( pRegionBase, pAllocFree->size );

        pMDL = IoAllocateMdl( pRegionBase, (ULONG)pAllocFree->size, FALSE, FALSE, NULL );
        if (pMDL == NULL)
        {
            ExFreePoolWithTag( pRegionBase, BB_POOL_TAG );
            return STATUS_NO_MEMORY;
        }

        MmBuildMdlForNonPagedPool( pMDL );

        // Map at original base
        __try {
            pResult->address = (ULONGLONG)MmMapLockedPagesSpecifyCache( 
                pMDL, UserMode, MmCached, (PVOID)pAllocFree->base, FALSE, NormalPagePriority 
                );
        }
        __except (EXCEPTION_EXECUTE_HANDLER) { }

        // Map at any suitable
        if (pResult->address == 0 && pAllocFree->base != 0)
        {
            __try {
                pResult->address = (ULONGLONG)MmMapLockedPagesSpecifyCache(
                    pMDL, UserMode, MmCached, NULL, FALSE, NormalPagePriority
                    );
            }
            __except (EXCEPTION_EXECUTE_HANDLER) { }
        }
//------------------------------------------------------------------------------
tOplkError drv_mapSocMem(void** ppUserMem_p,
                         size_t* pMemSize_p)
{
    if ((ppUserMem_p == NULL) || (pMemSize_p == NULL))
    {
        DEBUG_LVL_ERROR_TRACE("%s() Invalid pointer !\n", __func__);
        return kErrorNoResource;
    }

    // Get SoC memory
    socMemInfo_l.pKernelVa = timesynckcal_getSharedMemory();
    if (socMemInfo_l.pKernelVa == NULL)
    {
        DEBUG_LVL_ERROR_TRACE("%s() Timesync shared memory is NULL !", __func__);
        return kErrorNoResource;
    }

    // Set SoC memory size
    socMemInfo_l.memSize = sizeof(tTimesyncSharedMemory);

    if (*pMemSize_p > socMemInfo_l.memSize)
    {
        DEBUG_LVL_ERROR_TRACE("%s() Higher memory requested (Kernel:%uz User:%uz) !\n",
                              __func__,
                              socMemInfo_l.memSize,
                              *pMemSize_p);
        *pMemSize_p = 0;
        return kErrorNoResource;
    }

    // Allocate new MDL pointing to SoC memory
    socMemInfo_l.pMdl = IoAllocateMdl(socMemInfo_l.pKernelVa,
                                      socMemInfo_l.memSize,
                                      FALSE,
                                      FALSE,
                                      NULL);

    if (socMemInfo_l.pMdl == NULL)
    {
        DEBUG_LVL_ERROR_TRACE("%s() Error allocating MDL !\n", __func__);
        return kErrorNoResource;
    }

    // Update the MDL with physical addresses
    MmBuildMdlForNonPagedPool(socMemInfo_l.pMdl);

    // Maps the physical pages that are described by an MDL to a virtual address
    socMemInfo_l.pUserVa = MmMapLockedPagesSpecifyCache(socMemInfo_l.pMdl,    // MDL
                                                        UserMode,             // Mode
                                                        MmCached,             // Caching
                                                        NULL,                 // Address
                                                        FALSE,                // Bug-check?
                                                        NormalPagePriority);  // Priority

    if (socMemInfo_l.pUserVa == NULL)
    {
        MmUnmapLockedPages(socMemInfo_l.pUserVa, socMemInfo_l.pMdl);
        IoFreeMdl(socMemInfo_l.pMdl);
        DEBUG_LVL_ERROR_TRACE("%s() Error mapping MDL !\n", __func__);
        return kErrorNoResource;
    }

    *ppUserMem_p = socMemInfo_l.pUserVa;
    *pMemSize_p = socMemInfo_l.memSize;

    DEBUG_LVL_ALWAYS_TRACE("Mapped SoC memory info U:%p K:%p size:%uz\n",
                           socMemInfo_l.pUserVa,
                           socMemInfo_l.pKernelVa,
                           socMemInfo_l.memSize);

    return kErrorOk;
}