DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process) { AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED); /* create the object. */ const ULONG cPages = cb >> PAGE_SHIFT; PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb); if (!pMemOs2) return VERR_NO_MEMORY; /* lock it. */ ULONG cPagesRet = cPages; int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0), (void *)R3Ptr, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet); if (!rc) { rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet); Assert(cb == pMemOs2->Core.cb); Assert(R3Ptr == (RTR3PTR)pMemOs2->Core.pv); pMemOs2->Core.u.Lock.R0Process = R0Process; *ppMem = &pMemOs2->Core; return VINF_SUCCESS; } rtR0MemObjDelete(&pMemOs2->Core); return RTErrConvertFromOS2(rc); }
/** * The client exits abnormally / forgets to do cleanups. (logging) */ IOReturn org_virtualbox_SupDrvClient::clientDied(void) { LogFlow(("org_virtualbox_SupDrvClient::clientDied([%p]) m_Task=%p R0Process=%p Process=%d\n", this, m_Task, RTR0ProcHandleSelf(), RTProcSelf())); /* IOUserClient::clientDied() calls clientClose, so we'll just do the work there. */ return IOUserClient::clientDied(); }
/** * Initializer called when the client opens the service. */ bool org_virtualbox_SupDrvClient::initWithTask(task_t OwningTask, void *pvSecurityId, UInt32 u32Type) { LogFlow(("org_virtualbox_SupDrvClient::initWithTask([%p], %#x, %p, %#x) (cur pid=%d proc=%p)\n", this, OwningTask, pvSecurityId, u32Type, RTProcSelf(), RTR0ProcHandleSelf())); AssertMsg((RTR0PROCESS)OwningTask == RTR0ProcHandleSelf(), ("%p %p\n", OwningTask, RTR0ProcHandleSelf())); if (!OwningTask) return false; if (IOUserClient::initWithTask(OwningTask, pvSecurityId , u32Type)) { m_Task = OwningTask; m_pSession = NULL; m_pProvider = NULL; return true; } return false; }
/** * Client exits normally. */ IOReturn org_virtualbox_SupDrvClient::clientClose(void) { LogFlow(("org_virtualbox_SupDrvClient::clientClose([%p]) (cur pid=%d proc=%p)\n", this, RTProcSelf(), RTR0ProcHandleSelf())); AssertMsg((RTR0PROCESS)m_Task == RTR0ProcHandleSelf(), ("%p %p\n", m_Task, RTR0ProcHandleSelf())); /* * Clean up the session if it's still around. * * We cannot rely 100% on close, and in the case of a dead client * we'll end up hanging inside vm_map_remove() if we postpone it. */ if (m_pSession) { sessionClose(RTProcSelf()); Assert(!m_pSession); } m_pProvider = NULL; terminate(); return kIOReturnSuccess; }
/** * Returns the physical address for a virtual address. * * @param pv The virtual address. * * @returns The physical address corresponding to @a pv. */ static uint64_t rtR0MemObjSolVirtToPhys(void *pv) { struct hat *pHat = NULL; pfn_t PageFrameNum = 0; uintptr_t uVirtAddr = (uintptr_t)pv; if (SOL_IS_KRNL_ADDR(pv)) pHat = kas.a_hat; else { proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf(); AssertRelease(pProcess); pHat = pProcess->p_as->a_hat; } PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK)); AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv)); return (((uint64_t)PageFrameNum << PAGE_SHIFT) | (uVirtAddr & PAGE_OFFSET_MASK)); }
DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem) { PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem; /* * Deal with it on a per type basis (just as a variation). */ switch (pMemNt->Core.enmType) { case RTR0MEMOBJTYPE_LOW: #ifndef IPRT_TARGET_NT4 if (pMemNt->fAllocatedPagesForMdl) { Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]); MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]); pMemNt->Core.pv = NULL; if (pMemNt->pvSecureMem) { MmUnsecureVirtualMemory(pMemNt->pvSecureMem); pMemNt->pvSecureMem = NULL; } MmFreePagesFromMdl(pMemNt->apMdls[0]); ExFreePool(pMemNt->apMdls[0]); pMemNt->apMdls[0] = NULL; pMemNt->cMdls = 0; break; } #endif AssertFailed(); break; case RTR0MEMOBJTYPE_PAGE: Assert(pMemNt->Core.pv); ExFreePool(pMemNt->Core.pv); pMemNt->Core.pv = NULL; Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]); IoFreeMdl(pMemNt->apMdls[0]); pMemNt->apMdls[0] = NULL; pMemNt->cMdls = 0; break; case RTR0MEMOBJTYPE_CONT: Assert(pMemNt->Core.pv); MmFreeContiguousMemory(pMemNt->Core.pv); pMemNt->Core.pv = NULL; Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]); IoFreeMdl(pMemNt->apMdls[0]); pMemNt->apMdls[0] = NULL; pMemNt->cMdls = 0; break; case RTR0MEMOBJTYPE_PHYS: /* rtR0MemObjNativeEnterPhys? */ if (!pMemNt->Core.u.Phys.fAllocated) { #ifndef IPRT_TARGET_NT4 Assert(!pMemNt->fAllocatedPagesForMdl); #endif /* Nothing to do here. */ break; } /* fall thru */ case RTR0MEMOBJTYPE_PHYS_NC: #ifndef IPRT_TARGET_NT4 if (pMemNt->fAllocatedPagesForMdl) { MmFreePagesFromMdl(pMemNt->apMdls[0]); ExFreePool(pMemNt->apMdls[0]); pMemNt->apMdls[0] = NULL; pMemNt->cMdls = 0; break; } #endif AssertFailed(); break; case RTR0MEMOBJTYPE_LOCK: if (pMemNt->pvSecureMem) { MmUnsecureVirtualMemory(pMemNt->pvSecureMem); pMemNt->pvSecureMem = NULL; } for (uint32_t i = 0; i < pMemNt->cMdls; i++) { MmUnlockPages(pMemNt->apMdls[i]); IoFreeMdl(pMemNt->apMdls[i]); pMemNt->apMdls[i] = NULL; } break; case RTR0MEMOBJTYPE_RES_VIRT: /* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS) { } else { }*/ AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n")); return VERR_INTERNAL_ERROR; break; case RTR0MEMOBJTYPE_MAPPING: { Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv); PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent; Assert(pMemNtParent); if (pMemNtParent->cMdls) { Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]); Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf()); MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]); } else { Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS && !pMemNtParent->Core.u.Phys.fAllocated); Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS); MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb); } pMemNt->Core.pv = NULL; break; } default: AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType)); return VERR_INTERNAL_ERROR; } return VINF_SUCCESS; }
static int VBoxDrvSolarisClose(dev_t Dev, int flag, int otyp, cred_t *cred) { LogFlowFunc(("VBoxDrvSolarisClose: Dev=%#x\n", Dev)); #ifndef USE_SESSION_HASH /* * Get the session and free the soft state item. */ vbox_devstate_t *pState = ddi_get_soft_state(g_pVBoxDrvSolarisState, getminor(Dev)); if (!pState) { LogRel(("VBoxDrvSolarisClose: no state data for %#x (%d)\n", Dev, getminor(Dev))); return EFAULT; } PSUPDRVSESSION pSession = pState->pSession; pState->pSession = NULL; ddi_soft_state_free(g_pVBoxDrvSolarisState, getminor(Dev)); if (!pSession) { LogRel(("VBoxDrvSolarisClose: no session in state data for %#x (%d)\n", Dev, getminor(Dev))); return EFAULT; } LogFlow(("VBoxDrvSolarisClose: Dev=%#x pSession=%p pid=%d r0proc=%p thread=%p\n", Dev, pSession, RTProcSelf(), RTR0ProcHandleSelf(), RTThreadNativeSelf() )); #else const RTPROCESS Process = RTProcSelf(); const unsigned iHash = SESSION_HASH(Process); PSUPDRVSESSION pSession; /* * Remove from the hash table. */ RTSpinlockAcquire(g_Spinlock); pSession = g_apSessionHashTab[iHash]; if (pSession) { if (pSession->Process == Process) { g_apSessionHashTab[iHash] = pSession->pNextHash; pSession->pNextHash = NULL; } else { PSUPDRVSESSION pPrev = pSession; pSession = pSession->pNextHash; while (pSession) { if (pSession->Process == Process) { pPrev->pNextHash = pSession->pNextHash; pSession->pNextHash = NULL; break; } /* next */ pPrev = pSession; pSession = pSession->pNextHash; } } } RTSpinlockRelease(g_Spinlock); if (!pSession) { LogRel(("VBoxDrvSolarisClose: WHAT?!? pSession == NULL! This must be a mistake... pid=%d (close)\n", (int)Process)); return EFAULT; } #endif /* * Close the session. */ supdrvSessionRelease(pSession); return 0; }
/** * open() worker. */ static int VBoxDrvSolarisOpen(dev_t *pDev, int fFlag, int fType, cred_t *pCred) { const bool fUnrestricted = getminor(*pDev) == 0; PSUPDRVSESSION pSession; int rc; LogFlowFunc(("VBoxDrvSolarisOpen: pDev=%p:%#x\n", pDev, *pDev)); /* * Validate input */ if ( (getminor(*pDev) != 0 && getminor(*pDev) != 1) || fType != OTYP_CHR) return EINVAL; /* See mmopen for precedent. */ #ifndef USE_SESSION_HASH /* * Locate a new device open instance. * * For each open call we'll allocate an item in the soft state of the device. * The item index is stored in the dev_t. I hope this is ok... */ vbox_devstate_t *pState = NULL; unsigned iOpenInstance; for (iOpenInstance = 0; iOpenInstance < 4096; iOpenInstance++) { if ( !ddi_get_soft_state(g_pVBoxDrvSolarisState, iOpenInstance) /* faster */ && ddi_soft_state_zalloc(g_pVBoxDrvSolarisState, iOpenInstance) == DDI_SUCCESS) { pState = ddi_get_soft_state(g_pVBoxDrvSolarisState, iOpenInstance); break; } } if (!pState) { LogRel(("VBoxDrvSolarisOpen: too many open instances.\n")); return ENXIO; } /* * Create a new session. */ rc = supdrvCreateSession(&g_DevExt, true /* fUser */, fUnrestricted, &pSession); if (RT_SUCCESS(rc)) { pSession->Uid = crgetruid(pCred); pSession->Gid = crgetrgid(pCred); pState->pSession = pSession; *pDev = makedevice(getmajor(*pDev), iOpenInstance); LogFlow(("VBoxDrvSolarisOpen: Dev=%#x pSession=%p pid=%d r0proc=%p thread=%p\n", *pDev, pSession, RTProcSelf(), RTR0ProcHandleSelf(), RTThreadNativeSelf() )); return 0; } /* failed - clean up */ ddi_soft_state_free(g_pVBoxDrvSolarisState, iOpenInstance); #else /* * Create a new session. * Sessions in Solaris driver are mostly useless. It's however needed * in VBoxDrvSolarisIOCtlSlow() while calling supdrvIOCtl() */ rc = supdrvCreateSession(&g_DevExt, true /* fUser */, fUnrestricted, &pSession); if (RT_SUCCESS(rc)) { unsigned iHash; pSession->Uid = crgetruid(pCred); pSession->Gid = crgetrgid(pCred); /* * Insert it into the hash table. */ # error "Only one entry per process!" iHash = SESSION_HASH(pSession->Process); RTSpinlockAcquire(g_Spinlock); pSession->pNextHash = g_apSessionHashTab[iHash]; g_apSessionHashTab[iHash] = pSession; RTSpinlockRelease(g_Spinlock); LogFlow(("VBoxDrvSolarisOpen success\n")); } int instance; for (instance = 0; instance < DEVICE_MAXINSTANCES; instance++) { vbox_devstate_t *pState = ddi_get_soft_state(g_pVBoxDrvSolarisState, instance); if (pState) break; } if (instance >= DEVICE_MAXINSTANCES) { LogRel(("VBoxDrvSolarisOpen: All instances exhausted\n")); return ENXIO; } *pDev = makedevice(getmajor(*pDev), instance); #endif return VBoxSupDrvErr2SolarisErr(rc); }
DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process) { /* * Check for unsupported stuff. */ AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED); if (uAlignment > PAGE_SIZE) return VERR_NOT_SUPPORTED; int rc; PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap; struct proc *pProc = (struct proc *)R0Process; struct vm_map *pProcMap = &pProc->p_vmspace->vm_map; /* calc protection */ vm_prot_t ProtectionFlags = 0; if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE) ProtectionFlags = VM_PROT_NONE; if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ) ProtectionFlags |= VM_PROT_READ; if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE) ProtectionFlags |= VM_PROT_WRITE; if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC) ProtectionFlags |= VM_PROT_EXECUTE; /* calc mapping address */ vm_offset_t AddrR3; if (R3PtrFixed == (RTR3PTR)-1) { /** @todo: is this needed?. */ PROC_LOCK(pProc); AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA)); PROC_UNLOCK(pProc); } else AddrR3 = (vm_offset_t)R3PtrFixed; /* Insert the pObject in the map. */ vm_object_reference(pMemToMapFreeBSD->pObject); rc = vm_map_find(pProcMap, /* Map to insert the object in */ pMemToMapFreeBSD->pObject, /* Object to map */ 0, /* Start offset in the object */ &AddrR3, /* Start address IN/OUT */ pMemToMap->cb, /* Size of the mapping */ #if __FreeBSD_version >= 1000055 0, /* Upper bound of the mapping */ #endif R3PtrFixed == (RTR3PTR)-1 ? VMFS_ANY_SPACE : VMFS_NO_SPACE, /* Whether a suitable address should be searched for first */ ProtectionFlags, /* protection flags */ VM_PROT_ALL, /* Maximum protection flags */ 0); /* copy-on-write and similar flags */ if (rc == KERN_SUCCESS) { rc = vm_map_wire(pProcMap, AddrR3, AddrR3 + pMemToMap->cb, VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc)); rc = vm_map_inherit(pProcMap, AddrR3, AddrR3 + pMemToMap->cb, VM_INHERIT_SHARE); AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc)); /* * Create a mapping object for it. */ PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD), RTR0MEMOBJTYPE_MAPPING, (void *)AddrR3, pMemToMap->cb); if (pMemFreeBSD) { Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3); pMemFreeBSD->Core.u.Mapping.R0Process = R0Process; *ppMem = &pMemFreeBSD->Core; return VINF_SUCCESS; } rc = vm_map_remove(pProcMap, AddrR3, AddrR3 + pMemToMap->cb); AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n")); } else vm_object_deallocate(pMemToMapFreeBSD->pObject); return VERR_NO_MEMORY; }
/** * Start the client service. */ bool org_virtualbox_SupDrvClient::start(IOService *pProvider) { LogFlow(("org_virtualbox_SupDrvClient::start([%p], %p) (cur pid=%d proc=%p)\n", this, pProvider, RTProcSelf(), RTR0ProcHandleSelf() )); AssertMsgReturn((RTR0PROCESS)m_Task == RTR0ProcHandleSelf(), ("%p %p\n", m_Task, RTR0ProcHandleSelf()), false); if (IOUserClient::start(pProvider)) { m_pProvider = OSDynamicCast(org_virtualbox_SupDrv, pProvider); if (m_pProvider) { Assert(!m_pSession); /* * Create a new session. */ int rc = supdrvCreateSession(&g_DevExt, true /* fUser */, false /*fUnrestricted*/, &m_pSession); if (RT_SUCCESS(rc)) { m_pSession->fOpened = false; /* The Uid, Gid and fUnrestricted fields are set on open. */ /* * Insert it into the hash table, checking that there isn't * already one for this process first. (One session per proc!) */ unsigned iHash = SESSION_HASH(m_pSession->Process); RTSpinlockAcquire(g_Spinlock); PSUPDRVSESSION pCur = g_apSessionHashTab[iHash]; if (pCur && pCur->Process != m_pSession->Process) { do pCur = pCur->pNextHash; while (pCur && pCur->Process != m_pSession->Process); } if (!pCur) { m_pSession->pNextHash = g_apSessionHashTab[iHash]; g_apSessionHashTab[iHash] = m_pSession; m_pSession->pvSupDrvClient = this; ASMAtomicIncS32(&g_cSessions); rc = VINF_SUCCESS; } else rc = VERR_ALREADY_LOADED; RTSpinlockReleaseNoInts(g_Spinlock); if (RT_SUCCESS(rc)) { Log(("org_virtualbox_SupDrvClient::start: created session %p for pid %d\n", m_pSession, (int)RTProcSelf())); return true; } LogFlow(("org_virtualbox_SupDrvClient::start: already got a session for this process (%p)\n", pCur)); supdrvCloseSession(&g_DevExt, m_pSession); } m_pSession = NULL; LogFlow(("org_virtualbox_SupDrvClient::start: rc=%Rrc from supdrvCreateSession\n", rc)); } else LogFlow(("org_virtualbox_SupDrvClient::start: %p isn't org_virtualbox_SupDrv\n", pProvider)); } return false; }
/** * Helper that converts from a RTR0PROCESS handle to a linux task. * * @returns The corresponding Linux task. * @param R0Process IPRT ring-0 process handle. */ struct task_struct *rtR0ProcessToLinuxTask(RTR0PROCESS R0Process) { /** @todo fix rtR0ProcessToLinuxTask!! */ return R0Process == RTR0ProcHandleSelf() ? current : NULL; }
DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process) { AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED); AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED); if (uAlignment > PAGE_SIZE) return VERR_NOT_SUPPORTED; int rc; void *pvR0; void *pvR3 = NULL; PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap; switch (pMemToMapOs2->Core.enmType) { /* * These has kernel mappings. */ case RTR0MEMOBJTYPE_PAGE: case RTR0MEMOBJTYPE_LOW: case RTR0MEMOBJTYPE_CONT: pvR0 = pMemToMapOs2->Core.pv; break; case RTR0MEMOBJTYPE_PHYS: pvR0 = pMemToMapOs2->Core.pv; #if 0/* this is wrong. */ if (!pvR0) { /* no ring-0 mapping, so allocate a mapping in the process. */ AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED); Assert(!pMemToMapOs2->Core.u.Phys.fAllocated); ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase; rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS | VMDHA_PROCESS, &pvR3, (PPVOID)&ulPhys, NULL); if (rc) return RTErrConvertFromOS2(rc); } break; #endif return VERR_NOT_SUPPORTED; case RTR0MEMOBJTYPE_PHYS_NC: AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n")); return VERR_INTERNAL_ERROR_5; break; case RTR0MEMOBJTYPE_LOCK: if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS) return VERR_NOT_SUPPORTED; /** @todo implement this... */ pvR0 = pMemToMapOs2->Core.pv; break; case RTR0MEMOBJTYPE_RES_VIRT: case RTR0MEMOBJTYPE_MAPPING: default: AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType)); return VERR_INTERNAL_ERROR; } /* * Map the ring-0 memory into the current process. */ if (!pvR3) { Assert(pvR0); ULONG flFlags = 0; if (uAlignment == PAGE_SIZE) flFlags |= VMDHGP_4MB; if (fProt & RTMEM_PROT_WRITE) flFlags |= VMDHGP_WRITE; rc = RTR0Os2DHVMGlobalToProcess(flFlags, pvR0, pMemToMapOs2->Core.cb, &pvR3); if (rc) return RTErrConvertFromOS2(rc); } Assert(pvR3); /* * Create a mapping object for it. */ PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING, pvR3, pMemToMapOs2->Core.cb); if (pMemOs2) { Assert(pMemOs2->Core.pv == pvR3); pMemOs2->Core.u.Mapping.R0Process = R0Process; *ppMem = &pMemOs2->Core; return VINF_SUCCESS; } KernVMFree(pvR3); return VERR_NO_MEMORY; }
int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process) { #if 0 /* * Check for unsupported stuff. */ AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED); AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED); if (uAlignment > PAGE_SIZE) return VERR_NOT_SUPPORTED; int rc; PRTR0MEMOBJHAIKU pMemToMapHaiku = (PRTR0MEMOBJHAIKU)pMemToMap; struct proc *pProc = (struct proc *)R0Process; struct vm_map *pProcMap = &pProc->p_vmspace->vm_map; /* calc protection */ vm_prot_t ProtectionFlags = 0; if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE) ProtectionFlags = VM_PROT_NONE; if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ) ProtectionFlags |= VM_PROT_READ; if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE) ProtectionFlags |= VM_PROT_WRITE; if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC) ProtectionFlags |= VM_PROT_EXECUTE; /* calc mapping address */ PROC_LOCK(pProc); vm_offset_t AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA)); PROC_UNLOCK(pProc); /* Insert the object in the map. */ rc = vm_map_find(pProcMap, /* Map to insert the object in */ NULL, /* Object to map */ 0, /* Start offset in the object */ &AddrR3, /* Start address IN/OUT */ pMemToMap->cb, /* Size of the mapping */ TRUE, /* Whether a suitable address should be searched for first */ ProtectionFlags, /* protection flags */ VM_PROT_ALL, /* Maximum protection flags */ 0); /* Copy on write */ /* Map the memory page by page into the destination map. */ if (rc == KERN_SUCCESS) { size_t cPages = pMemToMap->cb >> PAGE_SHIFT;; pmap_t pPhysicalMap = pProcMap->pmap; vm_offset_t AddrR3Dst = AddrR3; if ( pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS || pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS_NC || pMemToMap->enmType == RTR0MEMOBJTYPE_PAGE) { /* Mapping physical allocations */ Assert(cPages == pMemToMapHaiku->u.Phys.cPages); /* Insert the memory page by page into the mapping. */ for (uint32_t iPage = 0; iPage < cPages; iPage++) { vm_page_t pPage = pMemToMapHaiku->u.Phys.apPages[iPage]; MY_PMAP_ENTER(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE); AddrR3Dst += PAGE_SIZE; } } else { /* Mapping cont or low memory types */ vm_offset_t AddrToMap = (vm_offset_t)pMemToMap->pv; for (uint32_t iPage = 0; iPage < cPages; iPage++) { vm_page_t pPage = PHYS_TO_VM_PAGE(vtophys(AddrToMap)); MY_PMAP_ENTER(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE); AddrR3Dst += PAGE_SIZE; AddrToMap += PAGE_SIZE; } } }