static inline void setup_locks() { // Create locks. Cannot be done on the stack. osquery.lck_grp_attr = lck_grp_attr_alloc_init(); lck_grp_attr_setstat(osquery.lck_grp_attr); osquery.lck_grp = lck_grp_alloc_init("osquery", osquery.lck_grp_attr); osquery.lck_attr = lck_attr_alloc_init(); // MTX is the IOCTL API handling lock. // This assures only one daemon will use the kernel API simultaneously. osquery.mtx = lck_mtx_alloc_init(osquery.lck_grp, osquery.lck_attr); }
static inline void setup_locks() { /* Create locks. Cannot be done on the stack. */ osquery.lck_grp_attr = lck_grp_attr_alloc_init(); lck_grp_attr_setstat(osquery.lck_grp_attr); osquery.lck_grp = lck_grp_alloc_init("osquery", osquery.lck_grp_attr); osquery.lck_attr = lck_attr_alloc_init(); osquery.mtx = lck_mtx_alloc_init(osquery.lck_grp, osquery.lck_attr); }
rMutex rpal_mutex_create ( ) { lck_mtx_t* mutex = NULL; lck_grp_attr_t* gattr = NULL; lck_attr_t* lattr = NULL; if( 0 == g_lck_group ) { rpal_debug_info( "mutex group not created, creating" ); gattr = lck_grp_attr_alloc_init(); if( NULL == gattr ) { rpal_debug_critical( "could not create mutex group" ); return NULL; } lck_grp_attr_setstat( gattr ); g_lck_group = lck_grp_alloc_init( "hcphbs", gattr ); lck_grp_attr_free( gattr ); } if( NULL == g_lck_group ) { return NULL; } lattr = lck_attr_alloc_init(); if( NULL != lattr ) { mutex = lck_mtx_alloc_init( g_lck_group, lattr ); lck_attr_free( lattr ); } else { rpal_debug_critical( "could not create mutex attributes" ); } return mutex; }
static kern_return_t register_locks(void) { /* already allocated? */ if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock) return KERN_SUCCESS; /* allocate lock group attribute and group */ if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init())) goto nomem_out; lck_grp_attr_setstat(ucode_slock_grp_attr); if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr))) goto nomem_out; /* Allocate lock attribute */ if (!(ucode_slock_attr = lck_attr_alloc_init())) goto nomem_out; /* Allocate the spin lock */ /* We keep one global spin-lock. We could have one per update * request... but srsly, why would you update microcode like that? */ if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr))) goto nomem_out; return KERN_SUCCESS; nomem_out: /* clean up */ if (ucode_slock) lck_spin_free(ucode_slock, ucode_slock_grp); if (ucode_slock_attr) lck_attr_free(ucode_slock_attr); if (ucode_slock_grp) lck_grp_free(ucode_slock_grp); if (ucode_slock_grp_attr) lck_grp_attr_free(ucode_slock_grp_attr); return KERN_NO_SPACE; }
int union_init(__unused struct vfsconf *vfsp) { int i; union_lck_grp_attr= lck_grp_attr_alloc_init(); #if DIAGNOSTIC lck_grp_attr_setstat(union_lck_grp_attr); #endif union_lck_grp = lck_grp_alloc_init("union", union_lck_grp_attr); union_lck_attr = lck_attr_alloc_init(); #if DIAGNOSTIC lck_attr_setdebug(union_lck_attr); #endif union_mtxp = lck_mtx_alloc_init(union_lck_grp, union_lck_attr); for (i = 0; i < NHASH; i++) LIST_INIT(&unhead[i]); bzero((caddr_t) unvplock, sizeof(unvplock)); /* add the hook for getdirentries */ union_dircheckp = union_dircheck; return (0); }