static inline void teardown_locks() { // Release locks and their heap memory. lck_mtx_free(osquery.mtx, osquery.lck_grp); lck_attr_free(osquery.lck_attr); lck_grp_free(osquery.lck_grp); lck_grp_attr_free(osquery.lck_grp_attr); }
static inline void teardown_locks() { lck_mtx_free(osquery.mtx, osquery.lck_grp); lck_attr_free(osquery.lck_attr); lck_grp_free(osquery.lck_grp); lck_grp_attr_free(osquery.lck_grp_attr); }
void lpx_datagram_free() { DEBUG_PRINT(DEBUG_MASK_STREAM_TRACE, ("lpx_datagram_free: Entered\n")); // Release Lock. lck_rw_free(lpx_datagram_pcb.lpxp_list_rw, datagram_mtx_grp); lck_grp_attr_free(datagram_mtx_grp_attr); lck_grp_free(datagram_mtx_grp); lck_attr_free(datagram_mtx_attr); }
void bpf_init(__unused void *unused) { #ifdef __APPLE__ int i; int maj; if (bpf_devsw_installed == 0) { bpf_devsw_installed = 1; bpf_mlock_grp_attr = lck_grp_attr_alloc_init(); bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr); bpf_mlock_attr = lck_attr_alloc_init(); bpf_mlock = lck_mtx_alloc_init(bpf_mlock_grp, bpf_mlock_attr); if (bpf_mlock == 0) { printf("bpf_init: failed to allocate bpf_mlock\n"); bpf_devsw_installed = 0; return; } maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw); if (maj == -1) { if (bpf_mlock) lck_mtx_free(bpf_mlock, bpf_mlock_grp); if (bpf_mlock_attr) lck_attr_free(bpf_mlock_attr); if (bpf_mlock_grp) lck_grp_free(bpf_mlock_grp); if (bpf_mlock_grp_attr) lck_grp_attr_free(bpf_mlock_grp_attr); bpf_mlock = NULL; bpf_mlock_attr = NULL; bpf_mlock_grp = NULL; bpf_mlock_grp_attr = NULL; bpf_devsw_installed = 0; printf("bpf_init: failed to allocate a major number!\n"); return; } for (i = 0 ; i < NBPFILTER; i++) bpf_make_dev_t(maj); } #else cdevsw_add(&bpf_cdevsw); #endif }
rMutex rpal_mutex_create ( ) { lck_mtx_t* mutex = NULL; lck_grp_attr_t* gattr = NULL; lck_attr_t* lattr = NULL; if( 0 == g_lck_group ) { rpal_debug_info( "mutex group not created, creating" ); gattr = lck_grp_attr_alloc_init(); if( NULL == gattr ) { rpal_debug_critical( "could not create mutex group" ); return NULL; } lck_grp_attr_setstat( gattr ); g_lck_group = lck_grp_alloc_init( "hcphbs", gattr ); lck_grp_attr_free( gattr ); } if( NULL == g_lck_group ) { return NULL; } lattr = lck_attr_alloc_init(); if( NULL != lattr ) { mutex = lck_mtx_alloc_init( g_lck_group, lattr ); lck_attr_free( lattr ); } else { rpal_debug_critical( "could not create mutex attributes" ); } return mutex; }
void _adt_xnu_rw_lock_destroy(ADT_LOCK *lockp) { ADT_LOCK rwlock; if(NULL == lockp || NULL == *lockp) { return; } rwlock=*lockp; lck_rw_free(rwlock->rw_lock, rwlock->rw_lock_grp); lck_attr_free(rwlock->rw_lock_attr); lck_grp_free(rwlock->rw_lock_grp); lck_grp_attr_free(rwlock->rw_lock_grp_attr); adt_free(rwlock); *lockp=NULL; }
static kern_return_t register_locks(void) { /* already allocated? */ if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock) return KERN_SUCCESS; /* allocate lock group attribute and group */ if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init())) goto nomem_out; lck_grp_attr_setstat(ucode_slock_grp_attr); if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr))) goto nomem_out; /* Allocate lock attribute */ if (!(ucode_slock_attr = lck_attr_alloc_init())) goto nomem_out; /* Allocate the spin lock */ /* We keep one global spin-lock. We could have one per update * request... but srsly, why would you update microcode like that? */ if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr))) goto nomem_out; return KERN_SUCCESS; nomem_out: /* clean up */ if (ucode_slock) lck_spin_free(ucode_slock, ucode_slock_grp); if (ucode_slock_attr) lck_attr_free(ucode_slock_attr); if (ucode_slock_grp) lck_grp_free(ucode_slock_grp); if (ucode_slock_grp_attr) lck_grp_attr_free(ucode_slock_grp_attr); return KERN_NO_SPACE; }
void zfs_context_fini(void) { lck_attr_free(zfs_lock_attr); zfs_lock_attr = NULL; lck_grp_attr_free(zfs_group_attr); zfs_group_attr = NULL; lck_grp_free(zfs_mutex_group); zfs_mutex_group = NULL; lck_grp_free(zfs_rwlock_group); zfs_rwlock_group = NULL; lck_grp_free(zfs_spinlock_group); zfs_spinlock_group = NULL; OSMalloc_Tagfree(zfs_kmem_alloc_tag); }
int nullfs_uninit() { /* This gets called when the fs is uninstalled, there wasn't an exact * equivalent in vfsops */ lck_mtx_destroy(&null_hashmtx, null_hashlck_grp); FREE(null_node_hashtbl, M_TEMP); if (null_hashlck_grp_attr) { lck_grp_attr_free(null_hashlck_grp_attr); null_hashlck_grp_attr = NULL; } if (null_hashlck_grp) { lck_grp_free(null_hashlck_grp); null_hashlck_grp = NULL; } if (null_hashlck_attr) { lck_attr_free(null_hashlck_attr); null_hashlck_attr = NULL; } return (0); }
/* * Initialise cache headers */ int nullfs_init(__unused struct vfsconf * vfsp) { NULLFSDEBUG("%s\n", __FUNCTION__); /* assuming for now that this happens immediately and by default after fs * installation */ null_hashlck_grp_attr = lck_grp_attr_alloc_init(); if (null_hashlck_grp_attr == NULL) { goto error; } null_hashlck_grp = lck_grp_alloc_init("com.apple.filesystems.nullfs", null_hashlck_grp_attr); if (null_hashlck_grp == NULL) { goto error; } null_hashlck_attr = lck_attr_alloc_init(); if (null_hashlck_attr == NULL) { goto error; } lck_mtx_init(&null_hashmtx, null_hashlck_grp, null_hashlck_attr); null_node_hashtbl = hashinit(NULL_HASH_SIZE, M_TEMP, &null_hash_mask); NULLFSDEBUG("%s finished\n", __FUNCTION__); return (0); error: printf("NULLFS: failed to get lock element\n"); if (null_hashlck_grp_attr) { lck_grp_attr_free(null_hashlck_grp_attr); null_hashlck_grp_attr = NULL; } if (null_hashlck_grp) { lck_grp_free(null_hashlck_grp); null_hashlck_grp = NULL; } if (null_hashlck_attr) { lck_attr_free(null_hashlck_attr); null_hashlck_attr = NULL; } return KERN_FAILURE; }
/** * VBoxVFS reclaim callback. * Called when vnode is going to be deallocated. Should release * all the VBoxVFS resources that correspond to current vnode object. * * @param pArgs Operation arguments passed from VFS layer. * * @return 0 on success, BSD error code otherwise. */ static int vboxvfs_vnode_reclaim(struct vnop_reclaim_args *pArgs) { PDEBUG("Releasing vnode resources..."); AssertReturn(pArgs, EINVAL); vnode_t pVnode; vboxvfs_vnode_t *pVnodeData; vboxvfs_mount_t *pMount; mount_t mp; pVnode = pArgs->a_vp; AssertReturn(pVnode, EINVAL); mp = vnode_mount(pVnode); AssertReturn(mp, EINVAL); pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL); pVnodeData = (vboxvfs_vnode_t *)vnode_fsnode(pVnode); AssertReturn(pVnodeData, EINVAL); AssertReturn(pVnodeData->pPath, EINVAL); AssertReturn(pVnodeData->pLockAttr, EINVAL); AssertReturn(pVnodeData->pLock, EINVAL); RTMemFree(pVnodeData->pPath); pVnodeData->pPath = NULL; lck_rw_free(pVnodeData->pLock, pMount->pLockGroup); pVnodeData->pLock = NULL; lck_attr_free(pVnodeData->pLockAttr); pVnodeData->pLockAttr = NULL; return 0; }
/** * Helper function to create XNU VFS vnode object. * * @param mp Mount data structure * @param type vnode type (directory, regular file, etc) * @param pParent Parent vnode object (NULL for VBoxVFS root vnode) * @param fIsRoot Flag that indicates if created vnode object is * VBoxVFS root vnode (TRUE for VBoxVFS root vnode, FALSE * for all aother vnodes) * @param Path within Shared Folder * @param ret Returned newly created vnode * * @return 0 on success, error code otherwise */ int vboxvfs_create_vnode_internal(struct mount *mp, enum vtype type, vnode_t pParent, int fIsRoot, PSHFLSTRING Path, vnode_t *ret) { int rc; vnode_t vnode; vboxvfs_vnode_t *pVnodeData; vboxvfs_mount_t *pMount; AssertReturn(mp, EINVAL); pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL); AssertReturn(pMount->pLockGroup, EINVAL); AssertReturn(Path, EINVAL); pVnodeData = (vboxvfs_vnode_t *)RTMemAllocZ(sizeof(vboxvfs_vnode_t)); AssertReturn(pVnodeData, ENOMEM); /* Initialize private data */ pVnodeData->pHandle = SHFL_HANDLE_NIL; pVnodeData->pPath = Path; pVnodeData->pLockAttr = lck_attr_alloc_init(); if (pVnodeData->pLockAttr) { pVnodeData->pLock = lck_rw_alloc_init(pMount->pLockGroup, pVnodeData->pLockAttr); if (pVnodeData->pLock) { struct vnode_fsparam vnode_params; vnode_params.vnfs_mp = mp; vnode_params.vnfs_vtype = type; vnode_params.vnfs_str = NULL; vnode_params.vnfs_dvp = pParent; vnode_params.vnfs_fsnode = pVnodeData; /** Private data attached per xnu's vnode object */ vnode_params.vnfs_vops = g_VBoxVFSVnodeDirOpsVector; vnode_params.vnfs_markroot = fIsRoot; vnode_params.vnfs_marksystem = FALSE; vnode_params.vnfs_rdev = 0; vnode_params.vnfs_filesize = 0; vnode_params.vnfs_cnp = NULL; vnode_params.vnfs_flags = VNFS_ADDFSREF | VNFS_NOCACHE; rc = vnode_create(VNCREATE_FLAVOR, sizeof(vnode_params), &vnode_params, &vnode); if (rc == 0) *ret = vnode; return 0; } else { PDEBUG("Unable to allocate lock"); rc = ENOMEM; } lck_attr_free(pVnodeData->pLockAttr); } else { PDEBUG("Unable to allocate lock attr"); rc = ENOMEM; } return rc; }