static inline void teardown_locks() { // Release locks and their heap memory. lck_mtx_free(osquery.mtx, osquery.lck_grp); lck_attr_free(osquery.lck_attr); lck_grp_free(osquery.lck_grp); lck_grp_attr_free(osquery.lck_grp_attr); }
static inline void teardown_locks() { lck_mtx_free(osquery.mtx, osquery.lck_grp); lck_attr_free(osquery.lck_attr); lck_grp_free(osquery.lck_grp); lck_grp_attr_free(osquery.lck_grp_attr); }
void zfs_context_fini(void) { lck_attr_free(zfs_lock_attr); zfs_lock_attr = NULL; lck_grp_attr_free(zfs_group_attr); zfs_group_attr = NULL; lck_grp_free(zfs_mutex_group); zfs_mutex_group = NULL; lck_grp_free(zfs_rwlock_group); zfs_rwlock_group = NULL; lck_grp_free(zfs_spinlock_group); zfs_spinlock_group = NULL; OSMalloc_Tagfree(zfs_kmem_alloc_tag); }
static void free_locks() { if (global_mutex) { lck_mtx_free(global_mutex, global_mutex_group); global_mutex = NULL; } if (global_mutex_group) { lck_grp_free(global_mutex_group); global_mutex_group = NULL; } }
void lpx_datagram_free() { DEBUG_PRINT(DEBUG_MASK_STREAM_TRACE, ("lpx_datagram_free: Entered\n")); // Release Lock. lck_rw_free(lpx_datagram_pcb.lpxp_list_rw, datagram_mtx_grp); lck_grp_attr_free(datagram_mtx_grp_attr); lck_grp_free(datagram_mtx_grp); lck_attr_free(datagram_mtx_attr); }
void bpf_init(__unused void *unused) { #ifdef __APPLE__ int i; int maj; if (bpf_devsw_installed == 0) { bpf_devsw_installed = 1; bpf_mlock_grp_attr = lck_grp_attr_alloc_init(); bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr); bpf_mlock_attr = lck_attr_alloc_init(); bpf_mlock = lck_mtx_alloc_init(bpf_mlock_grp, bpf_mlock_attr); if (bpf_mlock == 0) { printf("bpf_init: failed to allocate bpf_mlock\n"); bpf_devsw_installed = 0; return; } maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw); if (maj == -1) { if (bpf_mlock) lck_mtx_free(bpf_mlock, bpf_mlock_grp); if (bpf_mlock_attr) lck_attr_free(bpf_mlock_attr); if (bpf_mlock_grp) lck_grp_free(bpf_mlock_grp); if (bpf_mlock_grp_attr) lck_grp_attr_free(bpf_mlock_grp_attr); bpf_mlock = NULL; bpf_mlock_attr = NULL; bpf_mlock_grp = NULL; bpf_mlock_grp_attr = NULL; bpf_devsw_installed = 0; printf("bpf_init: failed to allocate a major number!\n"); return; } for (i = 0 ; i < NBPFILTER; i++) bpf_make_dev_t(maj); } #else cdevsw_add(&bpf_cdevsw); #endif }
__private_extern__ kern_return_t kext_stop_9p(kmod_info_t * ki, void * d) { #pragma unused(ki) #pragma unused(d) TRACE(); if (vfs_fsremove(vfstable_9p)) return KERN_FAILURE; vfstable_9p = NULL; lck_grp_free(lck_grp_9p); return KERN_SUCCESS; }
void _adt_xnu_rw_lock_destroy(ADT_LOCK *lockp) { ADT_LOCK rwlock; if(NULL == lockp || NULL == *lockp) { return; } rwlock=*lockp; lck_rw_free(rwlock->rw_lock, rwlock->rw_lock_grp); lck_attr_free(rwlock->rw_lock_attr); lck_grp_free(rwlock->rw_lock_grp); lck_grp_attr_free(rwlock->rw_lock_grp_attr); adt_free(rwlock); *lockp=NULL; }
void IOFWWorkLoop::free( void ) { if( fLockGroup ) { lck_grp_free( fLockGroup ); fLockGroup = NULL; } if( fRemoveSourceDeferredSet ) { fRemoveSourceDeferredSet->release(); fRemoveSourceDeferredSet = NULL; } IOWorkLoop::free(); }
void SCSIParallelWorkLoop::free ( void ) { // NOTE: IOWorkLoop::free() gets called multiple times! if ( fLockGroup != NULL ) { lck_grp_free ( fLockGroup ); fLockGroup = NULL; } super::free ( ); }
DECLHIDDEN(void) rtR0TermNative(void) { /* * Preemption hacks before the lock group. */ rtThreadPreemptDarwinTerm(); /* * Free the lock group. */ if (g_pDarwinLockGroup) { lck_grp_free(g_pDarwinLockGroup); g_pDarwinLockGroup = NULL; } }
/** * Mount and unmount helper: destroy locking group attribute and locking group itself. * * @param pMount VBoxVFS global data for which locking * group and attribute will be deallocated and set to NULL. */ static void vboxvfs_destroy_locking(vboxvfs_mount_t *pMount) { AssertReturnVoid(pMount); if (pMount->pLockGroup) { lck_grp_free(pMount->pLockGroup); pMount->pLockGroup = NULL; } if (pMount->pLockGroupAttr) { lck_grp_attr_free(pMount->pLockGroupAttr); pMount->pLockGroupAttr = NULL; } }
static kern_return_t register_locks(void) { /* already allocated? */ if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock) return KERN_SUCCESS; /* allocate lock group attribute and group */ if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init())) goto nomem_out; lck_grp_attr_setstat(ucode_slock_grp_attr); if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr))) goto nomem_out; /* Allocate lock attribute */ if (!(ucode_slock_attr = lck_attr_alloc_init())) goto nomem_out; /* Allocate the spin lock */ /* We keep one global spin-lock. We could have one per update * request... but srsly, why would you update microcode like that? */ if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr))) goto nomem_out; return KERN_SUCCESS; nomem_out: /* clean up */ if (ucode_slock) lck_spin_free(ucode_slock, ucode_slock_grp); if (ucode_slock_attr) lck_attr_free(ucode_slock_attr); if (ucode_slock_grp) lck_grp_free(ucode_slock_grp); if (ucode_slock_grp_attr) lck_grp_attr_free(ucode_slock_grp_attr); return KERN_NO_SPACE; }
int nullfs_uninit() { /* This gets called when the fs is uninstalled, there wasn't an exact * equivalent in vfsops */ lck_mtx_destroy(&null_hashmtx, null_hashlck_grp); FREE(null_node_hashtbl, M_TEMP); if (null_hashlck_grp_attr) { lck_grp_attr_free(null_hashlck_grp_attr); null_hashlck_grp_attr = NULL; } if (null_hashlck_grp) { lck_grp_free(null_hashlck_grp); null_hashlck_grp = NULL; } if (null_hashlck_attr) { lck_attr_free(null_hashlck_attr); null_hashlck_attr = NULL; } return (0); }
/* * Initialise cache headers */ int nullfs_init(__unused struct vfsconf * vfsp) { NULLFSDEBUG("%s\n", __FUNCTION__); /* assuming for now that this happens immediately and by default after fs * installation */ null_hashlck_grp_attr = lck_grp_attr_alloc_init(); if (null_hashlck_grp_attr == NULL) { goto error; } null_hashlck_grp = lck_grp_alloc_init("com.apple.filesystems.nullfs", null_hashlck_grp_attr); if (null_hashlck_grp == NULL) { goto error; } null_hashlck_attr = lck_attr_alloc_init(); if (null_hashlck_attr == NULL) { goto error; } lck_mtx_init(&null_hashmtx, null_hashlck_grp, null_hashlck_attr); null_node_hashtbl = hashinit(NULL_HASH_SIZE, M_TEMP, &null_hash_mask); NULLFSDEBUG("%s finished\n", __FUNCTION__); return (0); error: printf("NULLFS: failed to get lock element\n"); if (null_hashlck_grp_attr) { lck_grp_attr_free(null_hashlck_grp_attr); null_hashlck_grp_attr = NULL; } if (null_hashlck_grp) { lck_grp_free(null_hashlck_grp); null_hashlck_grp = NULL; } if (null_hashlck_attr) { lck_attr_free(null_hashlck_attr); null_hashlck_attr = NULL; } return KERN_FAILURE; }
void fuse_sysctl_stop(void) { int i; for (i = 0; fuse_sysctl_list[i]; i++) { sysctl_unregister_oid(fuse_sysctl_list[i]); } sysctl_unregister_oid(&sysctl__osxfuse); #if OSXFUSE_ENABLE_MACFUSE_MODE lck_mtx_lock(osxfuse_sysctl_lock); thread_deallocate(osxfuse_sysctl_macfuse_thread); if (fuse_macfuse_mode) { fuse_sysctl_macfuse_stop(); } lck_mtx_unlock(osxfuse_sysctl_lock); lck_mtx_free(osxfuse_sysctl_lock, osxfuse_lock_group); lck_grp_free(osxfuse_lock_group); #endif /* OSXFUSE_ENABLE_MACFUSE_MODE */ }
void spl_rwlock_fini(void) { lck_grp_free(zfs_rwlock_group); zfs_rwlock_group = NULL; }