RTDECL(int) RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName) { RT_ASSERT_PREEMPTIBLE(); AssertReturn(fFlags == RTSPINLOCK_FLAGS_INTERRUPT_SAFE || fFlags == RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, VERR_INVALID_PARAMETER); /* * Allocate. */ AssertCompile(sizeof(RTSPINLOCKINTERNAL) > sizeof(void *)); PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)RTMemAllocZ(sizeof(*pThis)); if (!pThis) return VERR_NO_MEMORY; /* * Initialize & return. */ pThis->u32Magic = RTSPINLOCK_MAGIC; pThis->fFlags = fFlags; pThis->fIntSaved = 0; if (fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) { mutex_init(&pThis->pSpinLock, MUTEX_DEFAULT, IPL_BIO); } else { mutex_init(&pThis->pSpinLock, MUTEX_DEFAULT, IPL_NONE); } *pSpinlock = pThis; return VINF_SUCCESS; }
RTDECL(int) RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName) { RT_ASSERT_PREEMPTIBLE(); AssertReturn(fFlags == RTSPINLOCK_FLAGS_INTERRUPT_SAFE || fFlags == RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, VERR_INVALID_PARAMETER); /* * Allocate. */ AssertCompile(sizeof(RTSPINLOCKINTERNAL) > sizeof(void *)); PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)RTMemAlloc(sizeof(*pThis)); if (!pThis) return VERR_NO_MEMORY; /* * Initialize & return. */ pThis->u32Magic = RTSPINLOCK_MAGIC; pThis->fIntSaved = 0; pThis->fFlags = fFlags; pThis->pszName = pszName; Assert(g_pDarwinLockGroup); pThis->pSpinLock = lck_spin_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL); if (!pThis->pSpinLock) { RTMemFree(pThis); return VERR_NO_MEMORY; } *pSpinlock = pThis; return VINF_SUCCESS; }
RTR0DECL(int) RTR0DbgKrnlInfoOpen(PRTDBGKRNLINFO phKrnlInfo, uint32_t fFlags) { AssertReturn(fFlags == 0, VERR_INVALID_PARAMETER); AssertPtrReturn(phKrnlInfo, VERR_INVALID_POINTER); RT_ASSERT_PREEMPTIBLE(); *phKrnlInfo = NIL_RTDBGKRNLINFO; PRTDBGKRNLINFOINT pThis = (PRTDBGKRNLINFOINT)RTMemAllocZ(sizeof(*pThis)); if (!pThis) return VERR_NO_MEMORY; char szGenUnixModName[] = "genunix"; int rc = rtR0DbgKrnlInfoModRetain(szGenUnixModName, &pThis->pGenUnixMod, &pThis->pGenUnixCTF); if (RT_SUCCESS(rc)) { pThis->u32Magic = RTDBGKRNLINFO_MAGIC; pThis->cRefs = 1; *phKrnlInfo = pThis; return VINF_SUCCESS; } LogRel(("RTR0DbgKrnlInfoOpen: rtR0DbgKrnlInfoModRetain failed rc=%d.\n", rc)); RTMemFree(pThis); return rc; }
RTDECL(int) RTSemEventMultiCreateEx(PRTSEMEVENTMULTI phEventMultiSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...) { AssertReturn(!(fFlags & ~RTSEMEVENTMULTI_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER); AssertCompile(sizeof(RTSEMEVENTMULTIINTERNAL) > sizeof(void *)); AssertPtrReturn(phEventMultiSem, VERR_INVALID_POINTER); RT_ASSERT_PREEMPTIBLE(); PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)RTMemAlloc(sizeof(*pThis)); if (pThis) { pThis->u32Magic = RTSEMEVENTMULTI_MAGIC; pThis->fStateAndGen = RTSEMEVENTMULTIDARWIN_STATE_GEN_INIT; pThis->cRefs = 1; pThis->fHaveBlockedThreads = false; Assert(g_pDarwinLockGroup); pThis->pSpinlock = lck_spin_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL); if (pThis->pSpinlock) { *phEventMultiSem = pThis; return VINF_SUCCESS; } pThis->u32Magic = 0; RTMemFree(pThis); } return VERR_NO_MEMORY; }
RTDECL(int) RTSemMutexCreateEx(PRTSEMMUTEX phMutexSem, uint32_t fFlags, RTLOCKVALCLASS hClass, uint32_t uSubClass, const char *pszNameFmt, ...) { AssertReturn(!(fFlags & ~RTSEMMUTEX_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER); RT_ASSERT_PREEMPTIBLE(); IPRT_DARWIN_SAVE_EFL_AC(); AssertCompile(sizeof(RTSEMMUTEXINTERNAL) > sizeof(void *)); PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis)); if (pThis) { pThis->u32Magic = RTSEMMUTEX_MAGIC; pThis->cWaiters = 0; pThis->cRefs = 1; pThis->cRecursions = 0; pThis->hNativeOwner = NIL_RTNATIVETHREAD; Assert(g_pDarwinLockGroup); pThis->pSpinlock = lck_spin_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL); if (pThis->pSpinlock) { *phMutexSem = pThis; IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; } RTMemFree(pThis); } IPRT_DARWIN_RESTORE_EFL_AC(); return VERR_NO_MEMORY; }
RTDECL(bool) RTThreadYield(void) { RT_ASSERT_PREEMPTIBLE(); //FIXME //snooze(0); thread_yield(true); return true; /* this is fishy */ }
/* Note! Should *not* be exported since it's only for static linking. */ RTR0DECL(void) RTR0TermForced(void) { RT_ASSERT_PREEMPTIBLE(); AssertMsg(g_crtR0Users == 1, ("%d\n", g_crtR0Users)); ASMAtomicWriteS32(&g_crtR0Users, 0); rtR0Term(); }
RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser) { RT_ASSERT_PREEMPTIBLE(); *ppTimer = NULL; /* * Validate flags. */ if (!RTTIMER_FLAGS_ARE_VALID(fFlags)) return VERR_INVALID_PARAMETER; if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK))) return VERR_CPU_NOT_FOUND; if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL && u64NanoInterval == 0) return VERR_NOT_SUPPORTED; /* * Allocate and initialize the timer handle. */ PRTTIMER pTimer = (PRTTIMER)RTMemAlloc(sizeof(*pTimer)); if (!pTimer) return VERR_NO_MEMORY; pTimer->u32Magic = RTTIMER_MAGIC; pTimer->fSuspended = true; if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL) { pTimer->fAllCpu = true; pTimer->fSpecificCpu = false; pTimer->iCpu = 255; } else if (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) { pTimer->fAllCpu = false; pTimer->fSpecificCpu = true; pTimer->iCpu = fFlags & RTTIMER_FLAGS_CPU_MASK; /* ASSUMES: index == cpuid */ } else { pTimer->fAllCpu = false; pTimer->fSpecificCpu = false; pTimer->iCpu = 255; } pTimer->interval = u64NanoInterval; pTimer->pfnTimer = pfnTimer; pTimer->pvUser = pvUser; pTimer->pSingleTimer = NULL; pTimer->pOmniTimer = NULL; pTimer->hCyclicId = CYCLIC_NONE; *ppTimer = pTimer; return VINF_SUCCESS; }
RTDECL(int) RTSemFastMutexRelease(RTSEMFASTMUTEX hFastMtx) { PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); RT_ASSERT_PREEMPTIBLE(); lck_mtx_unlock(pThis->pMtx); return VINF_SUCCESS; }
RTDECL(bool) RTThreadYield(void) { RT_ASSERT_PREEMPTIBLE(); IPRT_DARWIN_SAVE_EFL_AC(); thread_block(THREAD_CONTINUE_NULL); IPRT_DARWIN_RESTORE_EFL_AC(); return true; /* this is fishy */ }
RTDECL(int) RTSemFastMutexRequest(RTSEMFASTMUTEX hFastMtx) { PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); RT_ASSERT_PREEMPTIBLE(); rw_enter(&pThis->Mtx, RW_WRITER); return VINF_SUCCESS; }
/** * Initializes the ring-0 driver runtime library. * * @returns iprt status code. * @param fReserved Flags reserved for the future. */ RTR0DECL(int) RTR0Init(unsigned fReserved) { int rc; uint32_t cNewUsers; Assert(fReserved == 0); #ifndef RT_OS_SOLARIS /* On Solaris our thread preemption information is only obtained in rtR0InitNative().*/ RT_ASSERT_PREEMPTIBLE(); #endif /* * The first user initializes it. * We rely on the module loader to ensure that there are no * initialization races should two modules share the IPRT. */ cNewUsers = ASMAtomicIncS32(&g_crtR0Users); if (cNewUsers != 1) { if (cNewUsers > 1) return VINF_SUCCESS; ASMAtomicDecS32(&g_crtR0Users); return VERR_INTERNAL_ERROR_3; } rc = rtR0InitNative(); if (RT_SUCCESS(rc)) { #ifdef RTR0MEM_WITH_EF_APIS rtR0MemEfInit(); #endif rc = rtThreadInit(); if (RT_SUCCESS(rc)) { #ifndef IN_GUEST /* play safe for now */ rc = rtR0MpNotificationInit(); if (RT_SUCCESS(rc)) { rc = rtR0PowerNotificationInit(); if (RT_SUCCESS(rc)) return rc; rtR0MpNotificationTerm(); } #else if (RT_SUCCESS(rc)) return rc; #endif rtThreadTerm(); } #ifdef RTR0MEM_WITH_EF_APIS rtR0MemEfTerm(); #endif rtR0TermNative(); } return rc; }
/** * Terminates the ring-0 driver runtime library. */ RTR0DECL(void) RTR0Term(void) { int32_t cNewUsers; RT_ASSERT_PREEMPTIBLE(); cNewUsers = ASMAtomicDecS32(&g_crtR0Users); Assert(cNewUsers >= 0); if (cNewUsers == 0) rtR0Term(); else if (cNewUsers < 0) ASMAtomicIncS32(&g_crtR0Users); }
static int rtR0ThreadDarwinSleepCommon(RTMSINTERVAL cMillies) { RT_ASSERT_PREEMPTIBLE(); IPRT_DARWIN_SAVE_EFL_AC(); uint64_t u64Deadline; clock_interval_to_deadline(cMillies, kMillisecondScale, &u64Deadline); clock_delay_until(u64Deadline); IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; }
DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread) { thread_id NativeThread; RT_ASSERT_PREEMPTIBLE(); NativeThread = spawn_kernel_thread(rtThreadNativeMain, pThreadInt->szName, B_NORMAL_PRIORITY, pThreadInt); if (NativeThread >= B_OK) { resume_thread(NativeThread); *pNativeThread = (RTNATIVETHREAD)NativeThread; return VINF_SUCCESS; } return RTErrConvertFromHaikuKernReturn(NativeThread); }
/** * Internal worker for RTSemMutexRequest and RTSemMutexRequestNoResume * * @returns IPRT status code. * @param hMutexSem The mutex handle. * @param cMillies The timeout. * @param fInterruptible Whether it's interruptible * (RTSemMutexRequestNoResume) or not * (RTSemMutexRequest). */ DECLINLINE(int) rtR0SemMutexDarwinRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, wait_interrupt_t fInterruptible) { /* * Validate input. */ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); RT_ASSERT_PREEMPTIBLE(); IPRT_DARWIN_SAVE_EFL_AC(); /* * Grab the lock and check out the state. */ RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); int rc = VINF_SUCCESS; lck_spin_lock(pThis->pSpinlock); /* Recursive call? */ if (pThis->hNativeOwner == hNativeSelf) { Assert(pThis->cRecursions > 0); Assert(pThis->cRecursions < 256); pThis->cRecursions++; } /* Is it free and nobody ahead of us in the queue? */ else if ( pThis->hNativeOwner == NIL_RTNATIVETHREAD && pThis->cWaiters == 0) { pThis->hNativeOwner = hNativeSelf; pThis->cRecursions = 1; } /* Polling call? */ else if (cMillies == 0) rc = VERR_TIMEOUT; /* Yawn, time for a nap... */ else { rc = rtR0SemMutexDarwinRequestSleep(pThis, cMillies, fInterruptible, hNativeSelf); IPRT_DARWIN_RESTORE_EFL_ONLY_AC(); return rc; } lck_spin_unlock(pThis->pSpinlock); IPRT_DARWIN_RESTORE_EFL_ONLY_AC(); return rc; }
DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread) { RT_ASSERT_PREEMPTIBLE(); thread_t NativeThread; kern_return_t kr = kernel_thread_start(rtThreadNativeMain, pThreadInt, &NativeThread); if (kr == KERN_SUCCESS) { *pNativeThread = (RTNATIVETHREAD)NativeThread; thread_deallocate(NativeThread); return VINF_SUCCESS; } return RTErrConvertFromMachKernReturn(kr); }
RTR0DECL(void) RTMemContFree(void *pv, size_t cb) { RT_ASSERT_PREEMPTIBLE(); if (pv) { Assert(cb > 0); AssertMsg(!((uintptr_t)pv & PAGE_OFFSET_MASK), ("pv=%p\n", pv)); IPRT_DARWIN_SAVE_EFL_AC(); cb = RT_ALIGN_Z(cb, PAGE_SIZE); IOFreeContiguous(pv, cb); IPRT_DARWIN_RESTORE_EFL_AC(); } }
RTDECL(int) RTSemFastMutexCreate(PRTSEMFASTMUTEX phFastMtx) { AssertCompile(sizeof(RTSEMFASTMUTEXINTERNAL) > sizeof(void *)); AssertPtrReturn(phFastMtx, VERR_INVALID_POINTER); RT_ASSERT_PREEMPTIBLE(); PRTSEMFASTMUTEXINTERNAL pThis = (PRTSEMFASTMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis)); if (pThis) { pThis->u32Magic = RTSEMFASTMUTEX_MAGIC; rw_init (&pThis->Mtx, "RWLOCK", RW_DRIVER, NULL); *phFastMtx = pThis; return VINF_SUCCESS; } return VERR_NO_MEMORY; }
RTR0DECL(uint32_t) RTR0DbgKrnlInfoRelease(RTDBGKRNLINFO hKrnlInfo) { PRTDBGKRNLINFOINT pThis = hKrnlInfo; if (pThis == NIL_RTDBGKRNLINFO) return 0; AssertPtrReturn(pThis, UINT32_MAX); AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), UINT32_MAX); RT_ASSERT_PREEMPTIBLE(); uint32_t cRefs = ASMAtomicDecU32(&pThis->cRefs); if (cRefs == 0) { pThis->u32Magic = ~RTDBGKRNLINFO_MAGIC; rtR0DbgKrnlInfoModRelease(pThis->pGenUnixMod, pThis->pGenUnixCTF); RTMemFree(pThis); } return cRefs; }
RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb) { AssertPtrReturn(pPhys, NULL); AssertReturn(cb > 0, NULL); RT_ASSERT_PREEMPTIBLE(); /* Allocate physically contiguous (< 4GB) page-aligned memory. */ uint64_t uPhys; void *pvMem = rtR0SolMemAlloc((uint64_t)_4G - 1, &uPhys, cb, PAGESIZE, true); if (RT_UNLIKELY(!pvMem)) { LogRel(("RTMemContAlloc failed to allocate %u bytes\n", cb)); return NULL; } Assert(uPhys < _4G); *pPhys = uPhys; return pvMem; }
RTR0DECL(int) RTR0DbgKrnlInfoQueryMember(RTDBGKRNLINFO hKrnlInfo, const char *pszStructure, const char *pszMember, size_t *poffMember) { PRTDBGKRNLINFOINT pThis = hKrnlInfo; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); AssertPtrReturn(pszMember, VERR_INVALID_PARAMETER); AssertPtrReturn(pszStructure, VERR_INVALID_PARAMETER); AssertPtrReturn(poffMember, VERR_INVALID_PARAMETER); RT_ASSERT_PREEMPTIBLE(); int rc = VERR_NOT_FOUND; ctf_id_t TypeIdent = ctf_lookup_by_name(pThis->pGenUnixCTF, pszStructure); if (TypeIdent != CTF_ERR) { ctf_membinfo_t MemberInfo; RT_ZERO(MemberInfo); if (ctf_member_info(pThis->pGenUnixCTF, TypeIdent, pszMember, &MemberInfo) != CTF_ERR) { *poffMember = (MemberInfo.ctm_offset >> 3); return VINF_SUCCESS; }
RTR0DECL(int) RTR0DbgKrnlInfoQueryMember(RTDBGKRNLINFO hKrnlInfo, const char *pszModule, const char *pszStructure, const char *pszMember, size_t *poffMember) { PRTDBGKRNLINFOINT pThis = hKrnlInfo; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); AssertPtrReturn(pszMember, VERR_INVALID_PARAMETER); AssertPtrReturn(pszStructure, VERR_INVALID_PARAMETER); AssertPtrReturn(poffMember, VERR_INVALID_PARAMETER); if (g_frtSolInitDone) RT_ASSERT_PREEMPTIBLE(); ctf_file_t *pCtf = NULL; modctl_t *pMod = NULL; if (!pszModule) { pCtf = pThis->pGenUnixCTF; pMod = pThis->pGenUnixMod; } else { int rc2 = rtR0DbgKrnlInfoModRetainEx(pszModule, &pMod, &pCtf); if (RT_FAILURE(rc2)) return rc2; Assert(pMod); Assert(pCtf); } int rc = VERR_NOT_FOUND; ctf_id_t TypeIdent = ctf_lookup_by_name(pCtf, pszStructure); if (TypeIdent != CTF_ERR) { ctf_membinfo_t MemberInfo; RT_ZERO(MemberInfo); if (ctf_member_info(pCtf, TypeIdent, pszMember, &MemberInfo) != CTF_ERR) { *poffMember = (MemberInfo.ctm_offset >> 3); rc = VINF_SUCCESS; }
RTDECL(int) RTSemFastMutexCreate(PRTSEMFASTMUTEX phFastMtx) { AssertCompile(sizeof(RTSEMFASTMUTEXINTERNAL) > sizeof(void *)); AssertPtrReturn(phFastMtx, VERR_INVALID_POINTER); RT_ASSERT_PREEMPTIBLE(); PRTSEMFASTMUTEXINTERNAL pThis = (PRTSEMFASTMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis)); if (pThis) { pThis->u32Magic = RTSEMFASTMUTEX_MAGIC; Assert(g_pDarwinLockGroup); pThis->pMtx = lck_mtx_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL); if (pThis->pMtx) { *phFastMtx = pThis; return VINF_SUCCESS; } RTMemFree(pThis); } return VERR_NO_MEMORY; }
RTDECL(int) RTSemEventMultiCreateEx(PRTSEMEVENTMULTI phEventMultiSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...) { AssertReturn(!(fFlags & ~RTSEMEVENTMULTI_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER); AssertPtrReturn(phEventMultiSem, VERR_INVALID_POINTER); RT_ASSERT_PREEMPTIBLE(); AssertCompile(sizeof(RTSEMEVENTMULTIINTERNAL) > sizeof(void *)); PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)RTMemAlloc(sizeof(*pThis)); if (pThis) { pThis->u32Magic = RTSEMEVENTMULTI_MAGIC; pThis->cRefs = 1; pThis->fStateAndGen = RTSEMEVENTMULTISOL_STATE_GEN_INIT; mutex_init(&pThis->Mtx, "IPRT Multiple Release Event Semaphore", MUTEX_DRIVER, (void *)ipltospl(DISP_LEVEL)); cv_init(&pThis->Cnd, "IPRT CV", CV_DRIVER, NULL); *phEventMultiSem = pThis; return VINF_SUCCESS; } return VERR_NO_MEMORY; }
RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMutexSem) { /* * Validate input. */ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); RT_ASSERT_PREEMPTIBLE(); IPRT_DARWIN_SAVE_EFL_AC(); /* * Take the lock and do the job. */ RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); int rc = VINF_SUCCESS; lck_spin_lock(pThis->pSpinlock); if (pThis->hNativeOwner == hNativeSelf) { Assert(pThis->cRecursions > 0); if (--pThis->cRecursions == 0) { pThis->hNativeOwner = NIL_RTNATIVETHREAD; if (pThis->cWaiters > 0) thread_wakeup_prim((event_t)pThis, TRUE /* one_thread */, THREAD_AWAKENED); } } else rc = VERR_NOT_OWNER; lck_spin_unlock(pThis->pSpinlock); AssertRC(rc); IPRT_DARWIN_RESTORE_EFL_ONLY_AC(); return VINF_SUCCESS; }
RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb) { /* * validate input. */ AssertPtr(pPhys); Assert(cb > 0); RT_ASSERT_PREEMPTIBLE(); IPRT_DARWIN_SAVE_EFL_AC(); /* * Allocate the memory and ensure that the API is still providing * memory that's always below 4GB. */ cb = RT_ALIGN_Z(cb, PAGE_SIZE); IOPhysicalAddress PhysAddr; void *pv = IOMallocContiguous(cb, PAGE_SIZE, &PhysAddr); if (pv) { if (PhysAddr + (cb - 1) <= (IOPhysicalAddress)0xffffffff) { if (!((uintptr_t)pv & PAGE_OFFSET_MASK)) { *pPhys = PhysAddr; IPRT_DARWIN_RESTORE_EFL_AC(); return pv; } AssertMsgFailed(("IOMallocContiguous didn't return a page aligned address - %p!\n", pv)); } else AssertMsgFailed(("IOMallocContiguous returned high address! PhysAddr=%RX64 cb=%#zx\n", (uint64_t)PhysAddr, cb)); IOFreeContiguous(pv, cb); } IPRT_DARWIN_RESTORE_EFL_AC(); return NULL; }
DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread) { kthread_t *pThread; RT_ASSERT_PREEMPTIBLE(); pThreadInt->tid = UINT64_MAX; pThread = thread_create(NULL, /* Stack, use base */ 0, /* Stack size */ rtThreadNativeMain, /* Thread function */ pThreadInt, /* Function data */ 0, /* Data size */ &p0, /* Process 0 handle */ TS_RUN, /* Ready to run */ minclsyspri /* Priority */ ); if (RT_LIKELY(pThread)) { *pNativeThread = (RTNATIVETHREAD)pThread; return VINF_SUCCESS; } return VERR_OUT_OF_RESOURCES; }
RTDECL(int) RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName) { RT_ASSERT_PREEMPTIBLE(); AssertReturn(fFlags == RTSPINLOCK_FLAGS_INTERRUPT_SAFE || fFlags == RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, VERR_INVALID_PARAMETER); /* * Allocate. */ AssertCompile(sizeof(RTSPINLOCKINTERNAL) > sizeof(void *)); PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)RTMemAlloc(sizeof(*pThis)); if (!pThis) return VERR_NO_MEMORY; /* * Initialize & return. */ pThis->u32Magic = RTSPINLOCK_MAGIC; pThis->fFlags = fFlags; pThis->fIntSaved = 0; /** @todo Consider different PIL when not interrupt safe requirement. */ mutex_init(&pThis->Mtx, "IPRT Spinlock", MUTEX_SPIN, (void *)ipltospl(PIL_MAX)); *pSpinlock = pThis; return VINF_SUCCESS; }
RTR0DECL(void) RTMemContFree(void *pv, size_t cb) { RT_ASSERT_PREEMPTIBLE(); rtR0SolMemFree(pv, cb); }