RTDECL(int) RTSemMutexCreateEx(PRTSEMMUTEX phMutexSem, uint32_t fFlags, RTLOCKVALCLASS hClass, uint32_t uSubClass, const char *pszNameFmt, ...) { AssertReturn(!(fFlags & ~RTSEMMUTEX_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER); RT_ASSERT_PREEMPTIBLE(); IPRT_DARWIN_SAVE_EFL_AC(); AssertCompile(sizeof(RTSEMMUTEXINTERNAL) > sizeof(void *)); PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis)); if (pThis) { pThis->u32Magic = RTSEMMUTEX_MAGIC; pThis->cWaiters = 0; pThis->cRefs = 1; pThis->cRecursions = 0; pThis->hNativeOwner = NIL_RTNATIVETHREAD; Assert(g_pDarwinLockGroup); pThis->pSpinlock = lck_spin_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL); if (pThis->pSpinlock) { *phMutexSem = pThis; IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; } RTMemFree(pThis); } IPRT_DARWIN_RESTORE_EFL_AC(); return VERR_NO_MEMORY; }
/** * Wrapper between the native darwin per-cpu callback and PFNRTWORKER * for the RTMpOnAll API. * * @param pvArg Pointer to the RTMPARGS package. */ static void rtmpOnAllDarwinWrapper(void *pvArg) { PRTMPARGS pArgs = (PRTMPARGS)pvArg; IPRT_DARWIN_SAVE_EFL_AC(); pArgs->pfnWorker(cpu_number(), pArgs->pvUser1, pArgs->pvUser2); IPRT_DARWIN_RESTORE_EFL_AC(); }
RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState) { AssertPtr(pState); Assert(pState->u32Reserved == 42); pState->u32Reserved = 0; RT_ASSERT_PREEMPT_CPUID_RESTORE(pState); RTCPUID idCpu = RTMpCpuId(); if (RT_UNLIKELY(idCpu < RT_ELEMENTS(g_aPreemptHacks))) { Assert(g_aPreemptHacks[idCpu].cRecursion > 0); if (--g_aPreemptHacks[idCpu].cRecursion == 0) { lck_spin_t *pSpinLock = g_aPreemptHacks[idCpu].pSpinLock; if (pSpinLock) { IPRT_DARWIN_SAVE_EFL_AC(); lck_spin_unlock(pSpinLock); IPRT_DARWIN_RESTORE_EFL_AC(); } else AssertFailed(); } } }
RTDECL(int) RTSemMutexDestroy(RTSEMMUTEX hMutexSem) { /* * Validate input. */ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem; if (pThis == NIL_RTSEMMUTEX) return VERR_INVALID_PARAMETER; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE); RT_ASSERT_INTS_ON(); IPRT_DARWIN_SAVE_EFL_AC(); /* * Kill it, wake up all waiting threads and release the reference. */ AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, ~RTSEMMUTEX_MAGIC, RTSEMMUTEX_MAGIC), VERR_INVALID_HANDLE); lck_spin_lock(pThis->pSpinlock); if (pThis->cWaiters > 0) thread_wakeup_prim((event_t)pThis, FALSE /* one_thread */, THREAD_RESTART); if (ASMAtomicDecU32(&pThis->cRefs) == 0) rtSemMutexDarwinFree(pThis); else lck_spin_unlock(pThis->pSpinlock); IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; }
DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread) { RT_ASSERT_PREEMPTIBLE(); IPRT_DARWIN_SAVE_EFL_AC(); thread_t NativeThread; kern_return_t kr = kernel_thread_start(rtThreadNativeMain, pThreadInt, &NativeThread); if (kr == KERN_SUCCESS) { *pNativeThread = (RTNATIVETHREAD)NativeThread; thread_deallocate(NativeThread); IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; } IPRT_DARWIN_RESTORE_EFL_AC(); return RTErrConvertFromMachKernReturn(kr); }
/** * OS specific allocation function. */ DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr) { IPRT_DARWIN_SAVE_EFL_AC(); if (RT_UNLIKELY(fFlags & RTMEMHDR_FLAG_ANY_CTX)) return VERR_NOT_SUPPORTED; PRTMEMHDR pHdr; if (fFlags & RTMEMHDR_FLAG_EXEC) { RTR0MEMOBJ hMemObj; int rc = RTR0MemObjAllocPage(&hMemObj, cb + sizeof(RTMEMDARWINHDREX), true /*fExecutable*/); if (RT_FAILURE(rc)) { IPRT_DARWIN_RESTORE_EFL_AC(); return rc; } PRTMEMDARWINHDREX pExHdr = (PRTMEMDARWINHDREX)RTR0MemObjAddress(hMemObj); pExHdr->hMemObj = hMemObj; pHdr = &pExHdr->Hdr; #if 1 /*fExecutable isn't currently honored above. */ rc = RTR0MemObjProtect(hMemObj, 0, RTR0MemObjSize(hMemObj), RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); AssertRC(rc); #endif } else { pHdr = (PRTMEMHDR)IOMalloc(cb + sizeof(*pHdr)); if (RT_UNLIKELY(!pHdr)) { printf("rtR0MemAllocEx(%#zx, %#x) failed\n", cb + sizeof(*pHdr), fFlags); IPRT_DARWIN_RESTORE_EFL_AC(); return VERR_NO_MEMORY; } } pHdr->u32Magic = RTMEMHDR_MAGIC; pHdr->fFlags = fFlags; pHdr->cb = cb; pHdr->cbReq = cb; *ppHdr = pHdr; IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; }
/** * Called when the refcount reaches zero. */ static void rtSemMutexDarwinFree(PRTSEMMUTEXINTERNAL pThis) { IPRT_DARWIN_SAVE_EFL_AC(); lck_spin_unlock(pThis->pSpinlock); lck_spin_destroy(pThis->pSpinlock, g_pDarwinLockGroup); RTMemFree(pThis); IPRT_DARWIN_RESTORE_EFL_AC(); }
RTDECL(bool) RTThreadYield(void) { RT_ASSERT_PREEMPTIBLE(); IPRT_DARWIN_SAVE_EFL_AC(); thread_block(THREAD_CONTINUE_NULL); IPRT_DARWIN_RESTORE_EFL_AC(); return true; /* this is fishy */ }
/** * Wrapper between the native darwin per-cpu callback and PFNRTWORKER * for the RTMpOnOthers API. * * @param pvArg Pointer to the RTMPARGS package. */ static void rtmpOnOthersDarwinWrapper(void *pvArg) { PRTMPARGS pArgs = (PRTMPARGS)pvArg; RTCPUID idCpu = cpu_number(); if (pArgs->idCpu != idCpu) { IPRT_DARWIN_SAVE_EFL_AC(); pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2); IPRT_DARWIN_RESTORE_EFL_AC(); } }
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu) { RT_ASSERT_INTS_ON(); if (g_pfnR0DarwinCpuInterrupt == NULL) return VERR_NOT_SUPPORTED; IPRT_DARWIN_SAVE_EFL_AC(); /* paranoia */ g_pfnR0DarwinCpuInterrupt(idCpu); IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; }
static int rtR0ThreadDarwinSleepCommon(RTMSINTERVAL cMillies) { RT_ASSERT_PREEMPTIBLE(); IPRT_DARWIN_SAVE_EFL_AC(); uint64_t u64Deadline; clock_interval_to_deadline(cMillies, kMillisecondScale, &u64Deadline); clock_delay_until(u64Deadline); IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; }
/** * Wrapper between the native darwin per-cpu callback and PFNRTWORKER * for the RTMpOnSpecific API. * * @param pvArg Pointer to the RTMPARGS package. */ static void rtmpOnSpecificDarwinWrapper(void *pvArg) { PRTMPARGS pArgs = (PRTMPARGS)pvArg; RTCPUID idCpu = cpu_number(); if (pArgs->idCpu == idCpu) { IPRT_DARWIN_SAVE_EFL_AC(); pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2); ASMAtomicIncU32(&pArgs->cHits); IPRT_DARWIN_RESTORE_EFL_AC(); } }
/** * Frees the per-cpu spin locks used to disable preemption. * * Called by rtR0TermNative. */ void rtThreadPreemptDarwinTerm(void) { IPRT_DARWIN_SAVE_EFL_AC(); for (size_t i = 0; i < RT_ELEMENTS(g_aPreemptHacks); i++) if (g_aPreemptHacks[i].pSpinLock) { lck_spin_free(g_aPreemptHacks[i].pSpinLock, g_pDarwinLockGroup); g_aPreemptHacks[i].pSpinLock = NULL; } IPRT_DARWIN_RESTORE_EFL_AC(); }
/** * Allocates the per-cpu spin locks used to disable preemption. * * Called by rtR0InitNative. */ int rtThreadPreemptDarwinInit(void) { Assert(g_pDarwinLockGroup); IPRT_DARWIN_SAVE_EFL_AC(); for (size_t i = 0; i < RT_ELEMENTS(g_aPreemptHacks); i++) { g_aPreemptHacks[i].pSpinLock = lck_spin_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL); if (!g_aPreemptHacks[i].pSpinLock) return VERR_NO_MEMORY; /* (The caller will invoke rtThreadPreemptDarwinTerm) */ } IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; }
RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb) { /* * validate input. */ AssertPtr(pPhys); Assert(cb > 0); RT_ASSERT_PREEMPTIBLE(); IPRT_DARWIN_SAVE_EFL_AC(); /* * Allocate the memory and ensure that the API is still providing * memory that's always below 4GB. */ cb = RT_ALIGN_Z(cb, PAGE_SIZE); IOPhysicalAddress PhysAddr; void *pv = IOMallocContiguous(cb, PAGE_SIZE, &PhysAddr); if (pv) { if (PhysAddr + (cb - 1) <= (IOPhysicalAddress)0xffffffff) { if (!((uintptr_t)pv & PAGE_OFFSET_MASK)) { *pPhys = PhysAddr; IPRT_DARWIN_RESTORE_EFL_AC(); return pv; } AssertMsgFailed(("IOMallocContiguous didn't return a page aligned address - %p!\n", pv)); } else AssertMsgFailed(("IOMallocContiguous returned high address! PhysAddr=%RX64 cb=%#zx\n", (uint64_t)PhysAddr, cb)); IOFreeContiguous(pv, cb); } IPRT_DARWIN_RESTORE_EFL_AC(); return NULL; }
RTR0DECL(void) RTMemContFree(void *pv, size_t cb) { RT_ASSERT_PREEMPTIBLE(); if (pv) { Assert(cb > 0); AssertMsg(!((uintptr_t)pv & PAGE_OFFSET_MASK), ("pv=%p\n", pv)); IPRT_DARWIN_SAVE_EFL_AC(); cb = RT_ALIGN_Z(cb, PAGE_SIZE); IOFreeContiguous(pv, cb); IPRT_DARWIN_RESTORE_EFL_AC(); } }
RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RT_ASSERT_INTS_ON(); IPRT_DARWIN_SAVE_EFL_AC(); RTMPARGS Args; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = RTMpCpuId(); Args.cHits = 0; mp_rendezvous_no_intrs(rtmpOnOthersDarwinWrapper, &Args); IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; }
/** * OS specific free function. */ DECLHIDDEN(void) rtR0MemFree(PRTMEMHDR pHdr) { IPRT_DARWIN_SAVE_EFL_AC(); pHdr->u32Magic += 1; if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC) { PRTMEMDARWINHDREX pExHdr = RT_FROM_MEMBER(pHdr, RTMEMDARWINHDREX, Hdr); int rc = RTR0MemObjFree(pExHdr->hMemObj, false /*fFreeMappings*/); AssertRC(rc); } else IOFree(pHdr, pHdr->cb + sizeof(*pHdr)); IPRT_DARWIN_RESTORE_EFL_AC(); }
static int rtMpDarwinInitMaxCpus(void) { IPRT_DARWIN_SAVE_EFL_AC(); int32_t cCpus = -1; size_t oldLen = sizeof(cCpus); int rc = sysctlbyname("hw.ncpu", &cCpus, &oldLen, NULL, NULL); if (rc) { printf("IPRT: sysctlbyname(hw.ncpu) failed with rc=%d!\n", rc); cCpus = 64; /* whatever */ } ASMAtomicWriteS32(&g_cMaxCpus, cCpus); IPRT_DARWIN_RESTORE_EFL_AC(); return cCpus; }
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RT_ASSERT_INTS_ON(); IPRT_DARWIN_SAVE_EFL_AC(); RTMPARGS Args; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = idCpu; Args.cHits = 0; mp_rendezvous_no_intrs(rtmpOnSpecificDarwinWrapper, &Args); IPRT_DARWIN_RESTORE_EFL_AC(); return Args.cHits == 1 ? VINF_SUCCESS : VERR_CPU_NOT_FOUND; }
RTDECL(bool) RTSemMutexIsOwned(RTSEMMUTEX hMutexSem) { /* * Validate. */ RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, false); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, false); IPRT_DARWIN_SAVE_EFL_AC(); /* * Take the lock and do the check. */ lck_spin_lock(pThis->pSpinlock); bool fRc = pThis->hNativeOwner != NIL_RTNATIVETHREAD; lck_spin_unlock(pThis->pSpinlock); IPRT_DARWIN_RESTORE_EFL_AC(); return fRc; }
RTDECL(int) RTFileOpen(PRTFILE phFile, const char *pszFilename, uint64_t fOpen) { RTFILEINT *pThis = (RTFILEINT *)RTMemAllocZ(sizeof(*pThis)); if (!pThis) return VERR_NO_MEMORY; IPRT_DARWIN_SAVE_EFL_AC(); errno_t rc; pThis->u32Magic = RTFILE_MAGIC; pThis->fOpen = fOpen; pThis->hVfsCtx = vfs_context_current(); if (pThis->hVfsCtx != NULL) { int fCMode = (fOpen & RTFILE_O_CREATE_MODE_MASK) ? (fOpen & RTFILE_O_CREATE_MODE_MASK) >> RTFILE_O_CREATE_MODE_SHIFT : RT_FILE_PERMISSION; int fVnFlags = 0; /* VNODE_LOOKUP_XXX */ int fOpenMode = 0; if (fOpen & RTFILE_O_NON_BLOCK) fOpenMode |= O_NONBLOCK; if (fOpen & RTFILE_O_WRITE_THROUGH) fOpenMode |= O_SYNC; /* create/truncate file */ switch (fOpen & RTFILE_O_ACTION_MASK) { case RTFILE_O_OPEN: break; case RTFILE_O_OPEN_CREATE: fOpenMode |= O_CREAT; break; case RTFILE_O_CREATE: fOpenMode |= O_CREAT | O_EXCL; break; case RTFILE_O_CREATE_REPLACE: fOpenMode |= O_CREAT | O_TRUNC; break; /** @todo replacing needs fixing, this is *not* a 1:1 mapping! */ } if (fOpen & RTFILE_O_TRUNCATE) fOpenMode |= O_TRUNC; switch (fOpen & RTFILE_O_ACCESS_MASK) { case RTFILE_O_READ: fOpenMode |= FREAD; break; case RTFILE_O_WRITE: fOpenMode |= fOpen & RTFILE_O_APPEND ? O_APPEND | FWRITE : FWRITE; break; case RTFILE_O_READWRITE: fOpenMode |= fOpen & RTFILE_O_APPEND ? O_APPEND | FWRITE | FREAD : FWRITE | FREAD; break; default: AssertMsgFailed(("RTFileOpen received an invalid RW value, fOpen=%#x\n", fOpen)); IPRT_DARWIN_RESTORE_EFL_AC(); return VERR_INVALID_PARAMETER; } pThis->fOpenMode = fOpenMode; rc = vnode_open(pszFilename, fOpenMode, fCMode, fVnFlags, &pThis->hVnode, pThis->hVfsCtx); if (rc == 0) { *phFile = pThis; IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; } rc = RTErrConvertFromErrno(rc); }