RTDECL(int) RTSemMutexCreateEx(PRTSEMMUTEX phMutexSem, uint32_t fFlags, RTLOCKVALCLASS hClass, uint32_t uSubClass, const char *pszNameFmt, ...) { AssertReturn(!(fFlags & ~RTSEMMUTEX_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER); RT_ASSERT_PREEMPTIBLE(); IPRT_DARWIN_SAVE_EFL_AC(); AssertCompile(sizeof(RTSEMMUTEXINTERNAL) > sizeof(void *)); PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis)); if (pThis) { pThis->u32Magic = RTSEMMUTEX_MAGIC; pThis->cWaiters = 0; pThis->cRefs = 1; pThis->cRecursions = 0; pThis->hNativeOwner = NIL_RTNATIVETHREAD; Assert(g_pDarwinLockGroup); pThis->pSpinlock = lck_spin_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL); if (pThis->pSpinlock) { *phMutexSem = pThis; IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; } RTMemFree(pThis); } IPRT_DARWIN_RESTORE_EFL_AC(); return VERR_NO_MEMORY; }
RTDECL(int) RTSemEventMultiCreateEx(PRTSEMEVENTMULTI phEventMultiSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...) { AssertReturn(!(fFlags & ~RTSEMEVENTMULTI_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER); AssertCompile(sizeof(RTSEMEVENTMULTIINTERNAL) > sizeof(void *)); AssertPtrReturn(phEventMultiSem, VERR_INVALID_POINTER); RT_ASSERT_PREEMPTIBLE(); PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)RTMemAlloc(sizeof(*pThis)); if (pThis) { pThis->u32Magic = RTSEMEVENTMULTI_MAGIC; pThis->fStateAndGen = RTSEMEVENTMULTIDARWIN_STATE_GEN_INIT; pThis->cRefs = 1; pThis->fHaveBlockedThreads = false; Assert(g_pDarwinLockGroup); pThis->pSpinlock = lck_spin_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL); if (pThis->pSpinlock) { *phEventMultiSem = pThis; return VINF_SUCCESS; } pThis->u32Magic = 0; RTMemFree(pThis); } return VERR_NO_MEMORY; }
RTDECL(int) RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName) { RT_ASSERT_PREEMPTIBLE(); AssertReturn(fFlags == RTSPINLOCK_FLAGS_INTERRUPT_SAFE || fFlags == RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, VERR_INVALID_PARAMETER); /* * Allocate. */ AssertCompile(sizeof(RTSPINLOCKINTERNAL) > sizeof(void *)); PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)RTMemAlloc(sizeof(*pThis)); if (!pThis) return VERR_NO_MEMORY; /* * Initialize & return. */ pThis->u32Magic = RTSPINLOCK_MAGIC; pThis->fIntSaved = 0; pThis->fFlags = fFlags; pThis->pszName = pszName; Assert(g_pDarwinLockGroup); pThis->pSpinLock = lck_spin_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL); if (!pThis->pSpinLock) { RTMemFree(pThis); return VERR_NO_MEMORY; } *pSpinlock = pThis; return VINF_SUCCESS; }
/** * Allocates the per-cpu spin locks used to disable preemption. * * Called by rtR0InitNative. */ int rtThreadPreemptDarwinInit(void) { Assert(g_pDarwinLockGroup); for (size_t i = 0; i < RT_ELEMENTS(g_aPreemptHacks); i++) { g_aPreemptHacks[i].pSpinLock = lck_spin_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL); if (!g_aPreemptHacks[i].pSpinLock) return VERR_NO_MEMORY; /* (The caller will invoke rtThreadPreemptDarwinTerm) */ } return VINF_SUCCESS; }
static void slice_init(slice_t *slice, slice_allocator_t *sa) { list_link_init(&slice->slice_link_node); slice->sa = sa; slice->alloc_count = 0; if (sa->flags & SMALL_ALLOC) { slice->free_list.small = 0; for (int i = 0; i < slice->sa->num_allocs_per_slice; i++) { small_allocatable_row_t *row = slice_small_get_row_address(slice, i); slice_small_insert_free_row(slice, row); } } else { slice->free_list.large = 0; #ifdef SLICE_CHECK_THREADS slice->semaphore = 0; #endif /* SLICE_CHECK_THREADS */ #ifdef SLICE_SPINLOCK slice->spinlock = lck_spin_alloc_init(bmalloc_lock_group, bmalloc_lock_attr); #endif /* SLICE_SPINLOCK */ for (int i = 0; i < slice->sa->num_allocs_per_slice; i++) { allocatable_row_t *row = slice_get_row_address(slice, i); row->navigation.slice = slice; #ifdef SLICE_CHECK_FREE_SIZE row->allocated_bytes = 0; #endif /* SLICE_CHECK_FREE_SIZE */ #ifdef SLICE_CHECK_ROW_HEADERS row->prefix = ROW_HEADER_GUARD; row->suffix = ROW_HEADER_GUARD; #endif /* SLICE_CHECK_ROW_HEADERS */ #ifdef SLICE_POISON_USER_SPACE slice_poison_row(slice, row); #endif /* SLICE_POISON_USER_SPACE */ slice_insert_free_row(slice, row); } } }
static kern_return_t register_locks(void) { /* already allocated? */ if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock) return KERN_SUCCESS; /* allocate lock group attribute and group */ if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init())) goto nomem_out; lck_grp_attr_setstat(ucode_slock_grp_attr); if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr))) goto nomem_out; /* Allocate lock attribute */ if (!(ucode_slock_attr = lck_attr_alloc_init())) goto nomem_out; /* Allocate the spin lock */ /* We keep one global spin-lock. We could have one per update * request... but srsly, why would you update microcode like that? */ if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr))) goto nomem_out; return KERN_SUCCESS; nomem_out: /* clean up */ if (ucode_slock) lck_spin_free(ucode_slock, ucode_slock_grp); if (ucode_slock_attr) lck_attr_free(ucode_slock_attr); if (ucode_slock_grp) lck_grp_free(ucode_slock_grp); if (ucode_slock_grp_attr) lck_grp_attr_free(ucode_slock_grp_attr); return KERN_NO_SPACE; }
static void slice_allocator_init(slice_allocator_t *sa, sa_size_t max_alloc_size) { osif_zero_memory(sa, sizeof (slice_allocator_t)); /* Select a memory pool allocation size of the allocator. */ #ifndef DEBUG if (max_alloc_size <= 512) { sa->flags = SMALL_ALLOC; sa->slice_size = PAGE_SIZE; } else { sa->flags = 0; sa->slice_size = 4096 + 128*1024; } #else sa->flags = 0; sa->slice_size = 4096 + 128*1024; #endif sa->spinlock = lck_spin_alloc_init(bmalloc_lock_group, bmalloc_lock_attr); /* * Create lists for tracking the state of the slices as memory is * allocated. */ list_create(&sa->free, sizeof (slice_t), offsetof(slice_t, slice_link_node)); list_create(&sa->partial, sizeof (slice_t), offsetof(slice_t, slice_link_node)); #ifdef SLICE_ALLOCATOR_TRACK_FULL_SLABS list_create(&sa->full, sizeof (slice_t), offsetof(slice_t, slice_link_node)); #endif /* SLICE_ALLOCATOR_TRACK_FULL_SLABS */ sa->max_alloc_size = max_alloc_size; sa->num_allocs_per_slice = (sa->slice_size - sizeof (slice_t)) / (sizeof (allocatable_row_t) + max_alloc_size); }
__private_extern__ int pp_filter_register() { struct sflt_filter pp_filter = { 0, // sf_handle SFLT_GLOBAL, // sf_flags 0, // sf_name ppfilter_unregister, // sf_unregistered ppfilter_attach, // sf_attach ppfilter_detach, // sf_detach NULL, // sf_notify NULL, // sf_getpeername NULL, // sf_getsockname NULL, // sf_data_in NULL, // sf_data_out ppfilter_connect_in, // sf_connect_in ppfilter_connect_out, // sf_connect_out NULL, // sf_bind NULL, // sf_setoption NULL, // sf_getoption NULL, // sf_listen NULL, // sf_ioctl }; int i; for(i=0; i < PP_DYN_ENTRIES_COUNT; ++i) pp_dyn_entries[i].addr = INADDR_NONE; pp_dynlck = lck_spin_alloc_init(pp_spin_grp, LCK_ATTR_NULL); if (!pp_dynlck) return (ENOMEM); errno_t err; pp_filter.sf_handle = PP_FILTER_TCP_HANDLE; // data filter is used for PASV FTP support pp_filter.sf_data_in = ppfilter_data_in; pp_filter.sf_name = "PeerGuardian TCP"; if ((err = sflt_register(&pp_filter, AF_INET, SOCK_STREAM, IPPROTO_TCP))) { printf("PeerGuardian: Failed to register '%s' filter: %d.\n", pp_filter.sf_name, err); return (err); } pp_filter.sf_handle = PP_FILTER_TCP6_HANDLE; pp_filter.sf_name = "PeerGuardian TCP6"; if ((err = sflt_register(&pp_filter, AF_INET6, SOCK_STREAM, IPPROTO_TCP))) { printf("PeerGuardian: Failed to register '%s' filter: %d.\n", pp_filter.sf_name, err); pp_filter_deregister_handle(PP_FILTER_TCP_HANDLE, &tcpFiltDone); goto filter_register_exit; } pp_filter.sf_data_in = (typeof(pp_filter.sf_data_in))NULL; // UDP can "connect", but it can also just send the data, so we need to monitor both pp_filter.sf_data_in = ppfilter_data_in_raw; pp_filter.sf_data_out = ppfilter_data_out_raw; pp_filter.sf_handle = PP_FILTER_UDP_HANDLE; pp_filter.sf_name = "PeerGuardian UDP"; if ((err = sflt_register(&pp_filter, AF_INET, SOCK_DGRAM, IPPROTO_UDP))) { printf("PeerGuardian: Failed to register '%s' filter: %d.\n", pp_filter.sf_name, err); pp_filter_deregister_handle(PP_FILTER_TCP_HANDLE, &tcpFiltDone); pp_filter_deregister_handle(PP_FILTER_TCP6_HANDLE, &tcp6FiltDone); goto filter_register_exit; } pp_filter.sf_handle = PP_FILTER_UDP6_HANDLE; pp_filter.sf_name = "PeerGuardian UDP6"; if ((err = sflt_register(&pp_filter, AF_INET6, SOCK_DGRAM, IPPROTO_UDP))) { printf("PeerGuardian: Failed to register '%s' filter: %d.\n", pp_filter.sf_name, err); pp_filter_deregister_handle(PP_FILTER_TCP_HANDLE, &tcpFiltDone); pp_filter_deregister_handle(PP_FILTER_TCP6_HANDLE, &tcp6FiltDone); pp_filter_deregister_handle(PP_FILTER_UDP_HANDLE, &udpFiltDone); goto filter_register_exit; } // RAW sockets don't "connect", they just send/recv data pp_filter.sf_connect_in = (typeof(pp_filter.sf_connect_in))NULL; pp_filter.sf_connect_out = (typeof(pp_filter.sf_connect_out))NULL; // Failures of the following are not fatal pp_filter.sf_handle = PP_FILTER_ICMP_HANDLE; pp_filter.sf_name = "PeerGuardian ICMP"; if ((err = sflt_register(&pp_filter, AF_INET, SOCK_RAW, IPPROTO_ICMP))) { printf("PeerGuardian: Failed to register '%s' filter: %d.\n", pp_filter.sf_name, err); icmpFiltDone = -1; } pp_filter.sf_handle = PP_FILTER_ICMP6_HANDLE; pp_filter.sf_name = "PeerGuardian ICMP6"; if ((err = sflt_register(&pp_filter, AF_INET6, SOCK_RAW, IPPROTO_ICMPV6))) { printf("PeerGuardian: Failed to register '%s' filter: %d.\n", pp_filter.sf_name, err); icmp6FiltDone = -1; } pp_filter.sf_handle = PP_FILTER_RAW_HANDLE; pp_filter.sf_name = "PeerGuardian RAW"; if ((err = sflt_register(&pp_filter, AF_INET, SOCK_RAW, IPPROTO_RAW))) { printf("PeerGuardian: Failed to register '%s' filter: %d.\n", pp_filter.sf_name, err); rawFiltDone = -1; } err = 0; filter_register_exit: if (err && pp_dynlck) lck_spin_free(pp_dynlck, pp_spin_grp); return (err); }