Beispiel #1
0
/**
 * Mount helper: allocate locking group attribute and locking group itself.
 * Store allocated data into VBoxVFS private data.
 *
 * @param pMount   VBoxVFS global data which will be updated with
 *                          locking group and its attribute in case of success;
 *                          otherwise pMount unchanged.
 *
 * @return 0 on success or BSD error code otherwise.
 *
 */
static int
vboxvfs_prepare_locking(vboxvfs_mount_t *pMount)
{
    lck_grp_attr_t *pGrpAttr;
    lck_grp_t      *pGrp;

    AssertReturn(pMount, EINVAL);

    pGrpAttr = lck_grp_attr_alloc_init();
    if (pGrpAttr)
    {
        pGrp = lck_grp_alloc_init("VBoxVFS", pGrpAttr);
        if (pGrp)
        {
            pMount->pLockGroupAttr = pGrpAttr;
            pMount->pLockGroup      = pGrp;

            return 0;
        }
        else
            PERROR("Unable to allocate locking group");

        lck_grp_attr_free(pGrpAttr);
    }
    else
        PERROR("Unable to allocate locking group attribute");

    return ENOMEM;
}
Beispiel #2
0
void
cs_init(void)
{
#if MACH_ASSERT && __x86_64__
	panic_on_cs_killed = 1;
#endif /* MACH_ASSERT && __x86_64__ */
	PE_parse_boot_argn("panic_on_cs_killed", &panic_on_cs_killed,
			   sizeof (panic_on_cs_killed));
#if !SECURE_KERNEL
	int disable_cs_enforcement = 0;
	PE_parse_boot_argn("cs_enforcement_disable", &disable_cs_enforcement, 
			   sizeof (disable_cs_enforcement));
	if (disable_cs_enforcement) {
		cs_enforcement_enable = 0;
	} else {
		int panic = 0;
		PE_parse_boot_argn("cs_enforcement_panic", &panic, sizeof(panic));
		cs_enforcement_panic = (panic != 0);
	}

	PE_parse_boot_argn("cs_debug", &cs_debug, sizeof (cs_debug));

#if !CONFIG_ENFORCE_LIBRARY_VALIDATION
	PE_parse_boot_argn("cs_library_val_enable", &cs_library_val_enable,
			   sizeof (cs_library_val_enable));
#endif
#endif /* !SECURE_KERNEL */

	lck_grp_attr_t *attr = lck_grp_attr_alloc_init();
	cs_lockgrp = lck_grp_alloc_init("KERNCS", attr);
	lck_grp_attr_free(attr);
}
Beispiel #3
0
static inline void teardown_locks() {
  // Release locks and their heap memory.
  lck_mtx_free(osquery.mtx, osquery.lck_grp);

  lck_attr_free(osquery.lck_attr);
  lck_grp_free(osquery.lck_grp);
  lck_grp_attr_free(osquery.lck_grp_attr);
}
Beispiel #4
0
static inline void teardown_locks() {
    lck_mtx_free(osquery.mtx, osquery.lck_grp);

    lck_attr_free(osquery.lck_attr);

    lck_grp_free(osquery.lck_grp);

    lck_grp_attr_free(osquery.lck_grp_attr);
}
Beispiel #5
0
void lpx_datagram_free()
{
    DEBUG_PRINT(DEBUG_MASK_STREAM_TRACE, ("lpx_datagram_free: Entered\n"));
	
	// Release Lock.			
	lck_rw_free(lpx_datagram_pcb.lpxp_list_rw, datagram_mtx_grp);
	
	lck_grp_attr_free(datagram_mtx_grp_attr);
	
	lck_grp_free(datagram_mtx_grp);
	
	lck_attr_free(datagram_mtx_attr);	
}
Beispiel #6
0
void
bpf_init(__unused void *unused)
{
#ifdef __APPLE__
	int 	i;
	int	maj;

	if (bpf_devsw_installed == 0) {
		bpf_devsw_installed = 1;

        bpf_mlock_grp_attr = lck_grp_attr_alloc_init();

        bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr);

        bpf_mlock_attr = lck_attr_alloc_init();

        bpf_mlock = lck_mtx_alloc_init(bpf_mlock_grp, bpf_mlock_attr);

		if (bpf_mlock == 0) {
			printf("bpf_init: failed to allocate bpf_mlock\n");
			bpf_devsw_installed = 0;
			return;
		}
		
		maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw);
		if (maj == -1) {
			if (bpf_mlock)
				lck_mtx_free(bpf_mlock, bpf_mlock_grp);
			if (bpf_mlock_attr)
				lck_attr_free(bpf_mlock_attr);
			if (bpf_mlock_grp)
				lck_grp_free(bpf_mlock_grp);
			if (bpf_mlock_grp_attr)
				lck_grp_attr_free(bpf_mlock_grp_attr);
			
			bpf_mlock = NULL;
			bpf_mlock_attr = NULL;
			bpf_mlock_grp = NULL;
			bpf_mlock_grp_attr = NULL;
			bpf_devsw_installed = 0;
			printf("bpf_init: failed to allocate a major number!\n");
			return;
		}

		for (i = 0 ; i < NBPFILTER; i++)
			bpf_make_dev_t(maj);
	}
#else
	cdevsw_add(&bpf_cdevsw);
#endif
}
Beispiel #7
0
rMutex
    rpal_mutex_create
    (

    )
{
    lck_mtx_t* mutex = NULL;
    
    lck_grp_attr_t* gattr = NULL;
    lck_attr_t* lattr = NULL;
    
    if( 0 == g_lck_group )
    {
        rpal_debug_info( "mutex group not created, creating" );
        
        gattr = lck_grp_attr_alloc_init();
        
        if( NULL == gattr )
        {
            rpal_debug_critical( "could not create mutex group" );
            return NULL;
        }
        
        lck_grp_attr_setstat( gattr );
        
        g_lck_group = lck_grp_alloc_init( "hcphbs", gattr );
        
        lck_grp_attr_free( gattr );
    }
    
    if( NULL == g_lck_group )
    {
        return NULL;
    }
    
    lattr = lck_attr_alloc_init();
    
    if( NULL != lattr )
    {
        mutex = lck_mtx_alloc_init( g_lck_group, lattr );
        lck_attr_free( lattr );
    }
    else
    {
        rpal_debug_critical( "could not create mutex attributes" );
    }
    
    return mutex;
}
void _adt_xnu_rw_lock_destroy(ADT_LOCK *lockp)
{
    ADT_LOCK rwlock;
    if(NULL == lockp || NULL == *lockp)
    {
        return;
    }
    rwlock=*lockp;
    lck_rw_free(rwlock->rw_lock, rwlock->rw_lock_grp);
    lck_attr_free(rwlock->rw_lock_attr);
    lck_grp_free(rwlock->rw_lock_grp);
    lck_grp_attr_free(rwlock->rw_lock_grp_attr);
    adt_free(rwlock);
    *lockp=NULL;
}
Beispiel #9
0
/**
 * Mount and unmount helper: destroy locking group attribute and locking group itself.
 *
 * @param pMount   VBoxVFS global data for which locking
 *                          group and attribute will be deallocated and set to NULL.
 */
static void
vboxvfs_destroy_locking(vboxvfs_mount_t *pMount)
{
    AssertReturnVoid(pMount);

    if (pMount->pLockGroup)
    {
        lck_grp_free(pMount->pLockGroup);
        pMount->pLockGroup = NULL;
    }

    if (pMount->pLockGroupAttr)
    {
        lck_grp_attr_free(pMount->pLockGroupAttr);
        pMount->pLockGroupAttr = NULL;
    }
}
Beispiel #10
0
/* This should only be called from the bootstrap thread. */
void
ktrace_init(void)
{
	static lck_grp_attr_t *lock_grp_attr = NULL;
	static lck_grp_t *lock_grp = NULL;
	static boolean_t initialized = FALSE;

	if (initialized) {
		return;
	}

	lock_grp_attr = lck_grp_attr_alloc_init();
	lock_grp = lck_grp_alloc_init("ktrace", lock_grp_attr);
	lck_grp_attr_free(lock_grp_attr);

	ktrace_lock = lck_mtx_alloc_init(lock_grp, LCK_ATTR_NULL);
	assert(ktrace_lock);
	initialized = TRUE;
}
Beispiel #11
0
static kern_return_t
register_locks(void)
{
	/* already allocated? */
	if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock)
		return KERN_SUCCESS;

	/* allocate lock group attribute and group */
	if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init()))
		goto nomem_out;

	lck_grp_attr_setstat(ucode_slock_grp_attr);

	if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr)))
		goto nomem_out;

	/* Allocate lock attribute */
	if (!(ucode_slock_attr = lck_attr_alloc_init()))
		goto nomem_out;

	/* Allocate the spin lock */
	/* We keep one global spin-lock. We could have one per update
	 * request... but srsly, why would you update microcode like that?
	 */
	if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr)))
		goto nomem_out;

	return KERN_SUCCESS;

nomem_out:
	/* clean up */
	if (ucode_slock)
		lck_spin_free(ucode_slock, ucode_slock_grp);
	if (ucode_slock_attr)
		lck_attr_free(ucode_slock_attr);
	if (ucode_slock_grp)
		lck_grp_free(ucode_slock_grp);
	if (ucode_slock_grp_attr)
		lck_grp_attr_free(ucode_slock_grp_attr);

	return KERN_NO_SPACE;
}
Beispiel #12
0
void
zfs_context_fini(void)
{
	lck_attr_free(zfs_lock_attr);
	zfs_lock_attr = NULL;

	lck_grp_attr_free(zfs_group_attr);
	zfs_group_attr = NULL;

	lck_grp_free(zfs_mutex_group);
	zfs_mutex_group = NULL;

	lck_grp_free(zfs_rwlock_group);
	zfs_rwlock_group = NULL;

	lck_grp_free(zfs_spinlock_group);
	zfs_spinlock_group = NULL;

	OSMalloc_Tagfree(zfs_kmem_alloc_tag);
}
Beispiel #13
0
int
nullfs_uninit()
{
	/* This gets called when the fs is uninstalled, there wasn't an exact
	 * equivalent in vfsops */
	lck_mtx_destroy(&null_hashmtx, null_hashlck_grp);
	FREE(null_node_hashtbl, M_TEMP);
	if (null_hashlck_grp_attr) {
		lck_grp_attr_free(null_hashlck_grp_attr);
		null_hashlck_grp_attr = NULL;
	}
	if (null_hashlck_grp) {
		lck_grp_free(null_hashlck_grp);
		null_hashlck_grp = NULL;
	}
	if (null_hashlck_attr) {
		lck_attr_free(null_hashlck_attr);
		null_hashlck_attr = NULL;
	}
	return (0);
}
Beispiel #14
0
/*
 * Initialise cache headers
 */
int
nullfs_init(__unused struct vfsconf * vfsp)
{
	NULLFSDEBUG("%s\n", __FUNCTION__);

	/* assuming for now that this happens immediately and by default after fs
	 * installation */
	null_hashlck_grp_attr = lck_grp_attr_alloc_init();
	if (null_hashlck_grp_attr == NULL) {
		goto error;
	}
	null_hashlck_grp = lck_grp_alloc_init("com.apple.filesystems.nullfs", null_hashlck_grp_attr);
	if (null_hashlck_grp == NULL) {
		goto error;
	}
	null_hashlck_attr = lck_attr_alloc_init();
	if (null_hashlck_attr == NULL) {
		goto error;
	}

	lck_mtx_init(&null_hashmtx, null_hashlck_grp, null_hashlck_attr);
	null_node_hashtbl = hashinit(NULL_HASH_SIZE, M_TEMP, &null_hash_mask);
	NULLFSDEBUG("%s finished\n", __FUNCTION__);
	return (0);
error:
	printf("NULLFS: failed to get lock element\n");
	if (null_hashlck_grp_attr) {
		lck_grp_attr_free(null_hashlck_grp_attr);
		null_hashlck_grp_attr = NULL;
	}
	if (null_hashlck_grp) {
		lck_grp_free(null_hashlck_grp);
		null_hashlck_grp = NULL;
	}
	if (null_hashlck_attr) {
		lck_attr_free(null_hashlck_attr);
		null_hashlck_attr = NULL;
	}
	return KERN_FAILURE;
}