Пример #1
0
void 
funnel_free(
	funnel_t * fnl)
{
	lck_mtx_free(fnl->fnl_mutex, funnel_lck_grp);
	if (fnl->fnl_oldmutex)
		lck_mtx_free(fnl->fnl_oldmutex, funnel_lck_grp);
	kfree(fnl, sizeof(funnel_t));
}
Пример #2
0
/*
 * gre_ipfilter_dispose(), the opposite to gre_ipfilter_init(), ie clean up
 */
errno_t gre_ipfilter_dispose(void)
{
#ifdef DEBUG
    printf("%s ...\n", __FUNCTION__);
#endif

    if (gre_ipf_mtx == NULL) {
#ifdef DEBUG
        printf("%s: gre_ifp_mtx already freed\n", __FUNCTION__);
#endif
        return 0;
    }
    if (gre_ipfilter_detach() == 0) {
        if (gre_ipf_mtx != NULL) {
            lck_mtx_free(gre_ipf_mtx, gre_lck_grp);
            gre_ipf_mtx = NULL;
        }
#ifdef DEBUG
        printf("%s: done\n", __FUNCTION__);
#endif
        return 0;
    }
#ifdef DEBUG
    printf("%s: error dispose ipfilter\n", __FUNCTION__);
#endif
    return -1;
}
Пример #3
0
static inline void teardown_locks() {
  // Release locks and their heap memory.
  lck_mtx_free(osquery.mtx, osquery.lck_grp);

  lck_attr_free(osquery.lck_attr);
  lck_grp_free(osquery.lck_grp);
  lck_grp_attr_free(osquery.lck_grp_attr);
}
Пример #4
0
void
FSNodeScrub(struct fuse_vnode_data *fvdat)
{
    lck_mtx_free(fvdat->createlock, fuse_lock_group);
#if M_OSXFUSE_ENABLE_TSLOCKING
    lck_rw_free(fvdat->nodelock, fuse_lock_group);
    lck_rw_free(fvdat->truncatelock, fuse_lock_group);
#endif
}
Пример #5
0
static inline void teardown_locks() {
    lck_mtx_free(osquery.mtx, osquery.lck_grp);

    lck_attr_free(osquery.lck_attr);

    lck_grp_free(osquery.lck_grp);

    lck_grp_attr_free(osquery.lck_grp_attr);
}
Пример #6
0
OSSymbolPool::~OSSymbolPool()
{
    if (buckets) {
        kfree(buckets, nBuckets * sizeof(Bucket));
        ACCUMSIZE(-(nBuckets * sizeof(Bucket)));
    }

    if (poolGate)
        lck_mtx_free(poolGate, IOLockGroup);
}
Пример #7
0
static void
freemount_9p(mount_9p *nmp)
{
	if (nmp == NULL)
		return;

	free_9p(nmp->version);
	free_9p(nmp->volume);
	free_9p(nmp->uname);
	free_9p(nmp->aname);
	free_9p(nmp->node);
	if (nmp->lck)
		lck_mtx_free(nmp->lck, lck_grp_9p);
	if (nmp->reqlck)
		lck_mtx_free(nmp->reqlck, lck_grp_9p);
	if (nmp->nodelck)
		lck_mtx_free(nmp->nodelck, lck_grp_9p);
	free_9p(nmp);
}
Пример #8
0
void
    rpal_mutex_free
    (
        rMutex mutex
    )
{
    if( NULL != mutex )
    {
        lck_mtx_free( mutex, g_lck_group );
    }
}
Пример #9
0
static void free_locks()
{
    if (global_mutex) {
        lck_mtx_free(global_mutex, global_mutex_group);
        global_mutex = NULL;
    }

    if (global_mutex_group) {
        lck_grp_free(global_mutex_group);
        global_mutex_group = NULL;
    }
}
Пример #10
0
Файл: bpf.c Проект: SbIm/xnu-env
void
bpf_init(__unused void *unused)
{
#ifdef __APPLE__
	int 	i;
	int	maj;

	if (bpf_devsw_installed == 0) {
		bpf_devsw_installed = 1;

        bpf_mlock_grp_attr = lck_grp_attr_alloc_init();

        bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr);

        bpf_mlock_attr = lck_attr_alloc_init();

        bpf_mlock = lck_mtx_alloc_init(bpf_mlock_grp, bpf_mlock_attr);

		if (bpf_mlock == 0) {
			printf("bpf_init: failed to allocate bpf_mlock\n");
			bpf_devsw_installed = 0;
			return;
		}
		
		maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw);
		if (maj == -1) {
			if (bpf_mlock)
				lck_mtx_free(bpf_mlock, bpf_mlock_grp);
			if (bpf_mlock_attr)
				lck_attr_free(bpf_mlock_attr);
			if (bpf_mlock_grp)
				lck_grp_free(bpf_mlock_grp);
			if (bpf_mlock_grp_attr)
				lck_grp_attr_free(bpf_mlock_grp_attr);
			
			bpf_mlock = NULL;
			bpf_mlock_attr = NULL;
			bpf_mlock_grp = NULL;
			bpf_mlock_grp_attr = NULL;
			bpf_devsw_installed = 0;
			printf("bpf_init: failed to allocate a major number!\n");
			return;
		}

		for (i = 0 ; i < NBPFILTER; i++)
			bpf_make_dev_t(maj);
	}
#else
	cdevsw_add(&bpf_cdevsw);
#endif
}
RTDECL(int)  RTSemFastMutexDestroy(RTSEMFASTMUTEX hFastMtx)
{
    PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
    if (pThis == NIL_RTSEMFASTMUTEX)
        return VINF_SUCCESS;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
    RT_ASSERT_INTS_ON();

    ASMAtomicWriteU32(&pThis->u32Magic, RTSEMFASTMUTEX_MAGIC_DEAD);
    Assert(g_pDarwinLockGroup);
    lck_mtx_free(pThis->pMtx, g_pDarwinLockGroup);
    pThis->pMtx = NULL;
    RTMemFree(pThis);

    return VINF_SUCCESS;
}
Пример #12
0
void Lpx_PCB_dispense(struct lpxpcb *lpxp )
{	
	struct stream_pcb *cb = NULL;

	DEBUG_PRINT(DEBUG_MASK_PCB_TRACE, ("Lpx_PCB_dispense: Entered.\n"));

	if (lpxp == 0) {
		return;
	}
	
	cb = (struct stream_pcb *)lpxp->lpxp_pcb;
	
	if (cb != 0) {
		
		register struct lpx_stream_q *q;
				
		for (q = cb->s_q.si_next; q != &cb->s_q; q = q->si_next) {
			q = q->si_prev;
			remque(q->si_next);
		}
		
		m_freem(dtom(cb->s_lpx));
		FREE(cb, M_PCB);
		lpxp->lpxp_pcb = 0;
	}	
	
    // Free Lock.
	if (lpxp->lpxp_mtx != NULL) {
		lck_mtx_free(lpxp->lpxp_mtx, lpxp->lpxp_mtx_grp);  
	}
				
	lck_rw_lock_exclusive(lpxp->lpxp_head->lpxp_list_rw);
	remque(lpxp);
	lck_rw_unlock_exclusive(lpxp->lpxp_head->lpxp_list_rw);
		
	FREE(lpxp, M_PCB);		
}
Пример #13
0
/*
 * gre_ipfilter_init(), initialize resources required by ip filter
 */
errno_t gre_ipfilter_init(void)
{
#ifdef DEBUG
    printf("%s ...\n", __FUNCTION__);
#endif

    if (gre_ipf_mtx != NULL) {
#ifdef DEBUG
        printf("%s: gre_ifp_mtx already inited\n", __FUNCTION__);
#endif
        goto success;
    }

    gre_ipf_mtx = lck_mtx_alloc_init(gre_lck_grp, NULL);

    if (gre_ipf_mtx == NULL)
        goto failed;

    if (gre_ipfilter_attach()) {/* attach ip filter */
        lck_mtx_free(gre_ipf_mtx, gre_lck_grp);
        gre_ipf_mtx = NULL;
        goto failed;
    }

success:
#ifdef DEBUG
    printf("%s: done\n", __FUNCTION__);
#endif
    return 0;

failed:
#ifdef DEBUG
    printf("%s: fail\n", __FUNCTION__);
#endif
    return -1;
}
Пример #14
0
void
fuse_sysctl_stop(void)
{
    int i;

    for (i = 0; fuse_sysctl_list[i]; i++) {
       sysctl_unregister_oid(fuse_sysctl_list[i]);
    }
    sysctl_unregister_oid(&sysctl__osxfuse);

#if OSXFUSE_ENABLE_MACFUSE_MODE
    lck_mtx_lock(osxfuse_sysctl_lock);

    thread_deallocate(osxfuse_sysctl_macfuse_thread);
    if (fuse_macfuse_mode) {
        fuse_sysctl_macfuse_stop();
    }

    lck_mtx_unlock(osxfuse_sysctl_lock);

    lck_mtx_free(osxfuse_sysctl_lock, osxfuse_lock_group);
    lck_grp_free(osxfuse_lock_group);
#endif /* OSXFUSE_ENABLE_MACFUSE_MODE */
}
Пример #15
0
/* ARGSUSED */
int
pipe(proc_t p, __unused struct pipe_args *uap, int32_t *retval)
{
	struct fileproc *rf, *wf;
	struct pipe *rpipe, *wpipe;
	lck_mtx_t   *pmtx;
	int fd, error;

	if ((pmtx = lck_mtx_alloc_init(pipe_mtx_grp, pipe_mtx_attr)) == NULL)
	        return (ENOMEM);
	
	rpipe = wpipe = NULL;
	if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
	        error = ENFILE;
		goto freepipes;
	}
        /*
	 * allocate the space for the normal I/O direction up
	 * front... we'll delay the allocation for the other
	 * direction until a write actually occurs (most likely it won't)...
         */
	error = pipespace(rpipe, choose_pipespace(rpipe->pipe_buffer.size, 0));
        if (error)
	        goto freepipes;

	TAILQ_INIT(&rpipe->pipe_evlist);
	TAILQ_INIT(&wpipe->pipe_evlist);

	error = falloc(p, &rf, &fd, vfs_context_current());
	if (error) {
	        goto freepipes;
	}
	retval[0] = fd;

	/*
	 * for now we'll create half-duplex pipes(refer returns section above). 
	 * this is what we've always supported..
	 */
	rf->f_flag = FREAD;
	rf->f_data = (caddr_t)rpipe;
	rf->f_ops = &pipeops;

	error = falloc(p, &wf, &fd, vfs_context_current());
	if (error) {
		fp_free(p, retval[0], rf);
	        goto freepipes;
	}
	wf->f_flag = FWRITE;
	wf->f_data = (caddr_t)wpipe;
	wf->f_ops = &pipeops;

	rpipe->pipe_peer = wpipe;
	wpipe->pipe_peer = rpipe;
	/* both structures share the same mutex */
	rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx; 

	retval[1] = fd;
#if CONFIG_MACF
	/*
	 * XXXXXXXX SHOULD NOT HOLD FILE_LOCK() XXXXXXXXXXXX
	 *
	 * struct pipe represents a pipe endpoint.  The MAC label is shared
	 * between the connected endpoints.  As a result mac_pipe_label_init() and
	 * mac_pipe_label_associate() should only be called on one of the endpoints
	 * after they have been connected.
	 */
	mac_pipe_label_init(rpipe);
	mac_pipe_label_associate(kauth_cred_get(), rpipe);
	wpipe->pipe_label = rpipe->pipe_label;
#endif
	proc_fdlock_spin(p);
	procfdtbl_releasefd(p, retval[0], NULL);
	procfdtbl_releasefd(p, retval[1], NULL);
	fp_drop(p, retval[0], rf, 1);
	fp_drop(p, retval[1], wf, 1);
	proc_fdunlock(p);


	return (0);

freepipes:
	pipeclose(rpipe); 
	pipeclose(wpipe); 
	lck_mtx_free(pmtx, pipe_mtx_grp);

	return (error);
}
Пример #16
0
void
mutex_free_EXT(
	lck_mtx_t		*mutex)
{
	lck_mtx_free(mutex, &LockCompatGroup);	
}