Ejemplo n.º 1
0
void
flowadv_init(void)
{
	STAILQ_INIT(&fadv_list);

	/* Setup lock group and attribute for fadv_lock */
	fadv_lock_grp_attr = lck_grp_attr_alloc_init();
	fadv_lock_grp = lck_grp_alloc_init("fadv_lock", fadv_lock_grp_attr);
	lck_mtx_init(&fadv_lock, fadv_lock_grp, NULL);

	fadv_zone_size = P2ROUNDUP(sizeof (struct flowadv_fcentry),
	    sizeof (u_int64_t));
	fadv_zone = zinit(fadv_zone_size,
	    FADV_ZONE_MAX * fadv_zone_size, 0, FADV_ZONE_NAME);
	if (fadv_zone == NULL) {
		panic("%s: failed allocating %s", __func__, FADV_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(fadv_zone, Z_EXPAND, TRUE);
	zone_change(fadv_zone, Z_CALLERACCT, FALSE);

	if (kernel_thread_start(flowadv_thread_func, NULL, &fadv_thread) !=
	    KERN_SUCCESS) {
		panic("%s: couldn't create flow event advisory thread",
		    __func__);
		/* NOTREACHED */
	}
	thread_deallocate(fadv_thread);
}
Ejemplo n.º 2
0
/* keep the pet thread around while we run */
int
kperf_pet_init(void)
{
	kern_return_t rc;
	thread_t t;

	if( pet_thread != NULL )
		return 0;

	/* make the sync poing */
	pet_lock = IOLockAlloc();
	if( pet_lock == NULL )
		return ENOMEM;

	/* create the thread */
	BUF_INFO1(PERF_PET_THREAD, 0);
	rc = kernel_thread_start( pet_thread_loop, NULL, &t );
	if( rc != KERN_SUCCESS )
	{
		IOLockFree( pet_lock );
		pet_lock = NULL;
		return ENOMEM;
	}

	/* OK! */
	return 0;
}
Ejemplo n.º 3
0
void BrcmPatchRAM::processWorkQueue(IOInterruptEventSource*, int)
{
    IOLockLock(mWorkLock);

    // start firmware loading process in a non-workloop thread
    if (mWorkPending & kWorkLoadFirmware)
    {
        DebugLog("_workPending kWorkLoadFirmare\n");
        mWorkPending &= ~kWorkLoadFirmware;
        retain();
        kern_return_t result = kernel_thread_start(&BrcmPatchRAM::uploadFirmwareThread, this, &mWorker);
        if (KERN_SUCCESS == result)
            DebugLog("Success creating firmware uploader thread\n");
        else
        {
            AlwaysLog("ERROR creating firmware uploader thread.\n");
            release();
        }
    }

    // firmware loading thread is finished
    if (mWorkPending & kWorkFinished)
    {
        DebugLog("_workPending kWorkFinished\n");
        mWorkPending &= ~kWorkFinished;
        thread_deallocate(mWorker);
        mWorker = 0;
        release();  // matching retain when thread created successfully
    }

    IOLockUnlock(mWorkLock);
}
Ejemplo n.º 4
0
/* subscribe to memory pressure notifications */
kern_return_t
hv_set_mp_notify(void) {
	kern_return_t kr;

	lck_mtx_lock(hv_support_lck_mtx);
	if (hv_callbacks_enabled == 0) {
		lck_mtx_unlock(hv_support_lck_mtx);
		return KERN_FAILURE;
	}

	if (hv_mp_notify_enabled == 1) {
		hv_mp_notify_destroy = 0;
		lck_mtx_unlock(hv_support_lck_mtx);
		return KERN_SUCCESS;
	}

	kr = kernel_thread_start((thread_continue_t) &hv_mp_notify, NULL,
		&hv_mp_notify_thread);

	if (kr == KERN_SUCCESS) {
		hv_mp_notify_enabled = 1;
	}
	lck_mtx_unlock(hv_support_lck_mtx);

	return kr;
}
Ejemplo n.º 5
0
static thread_t thr_new(thr_callback_t f, void *p, int a) {
	thread_t thr;
	if (KERN_SUCCESS != (kernel_thread_start(f, p, &thr)))
		return NULL;
	thread_deallocate(thr);
	
	return thr;
}
Ejemplo n.º 6
0
inline kern_return_t	myCreateThread(
									   THREAD_FUNCTION	continuation,
									   void*				parameter,
									   THREAD_T*			new_thread
									   )
{
	return kernel_thread_start(continuation, parameter, new_thread);
}
Ejemplo n.º 7
0
IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
{
	kern_return_t	result;
	thread_t		thread;

	result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
	if (result != KERN_SUCCESS)
		return (NULL);

	thread_deallocate(thread);

	return (thread);
}
Ejemplo n.º 8
0
/* -----------------------------------------------------------------------------
Called when we need to add the L2TP protocol to the domain
Typically, ppp_add is called by ppp_domain when we add the domain,
but we can add the protocol anytime later, if the domain is present
----------------------------------------------------------------------------- */
int l2tp_add(struct domain *domain)
{
    int 	 err;
    thread_t l2tp_timer_thread = NULL;

    bzero(&l2tp_usr, sizeof(struct pr_usrreqs));
    l2tp_usr.pru_abort 		= pru_abort_notsupp;
    l2tp_usr.pru_accept 	= pru_accept_notsupp;
    l2tp_usr.pru_attach 	= l2tp_attach;
    l2tp_usr.pru_bind 		= pru_bind_notsupp;
    l2tp_usr.pru_connect 	= pru_connect_notsupp;
    l2tp_usr.pru_connect2 	= pru_connect2_notsupp;
    l2tp_usr.pru_control 	= l2tp_control;
    l2tp_usr.pru_detach 	= l2tp_detach;
    l2tp_usr.pru_disconnect	= pru_disconnect_notsupp;
    l2tp_usr.pru_listen 	= pru_listen_notsupp;
    l2tp_usr.pru_peeraddr 	= pru_peeraddr_notsupp;
    l2tp_usr.pru_rcvd 		= pru_rcvd_notsupp;
    l2tp_usr.pru_rcvoob 	= pru_rcvoob_notsupp;
    l2tp_usr.pru_send 		= (int	(*)(struct socket *, int, struct mbuf *, 
				 struct sockaddr *, struct mbuf *, struct proc *))l2tp_send;
    l2tp_usr.pru_sense 		= pru_sense_null;
    l2tp_usr.pru_shutdown 	= pru_shutdown_notsupp;
    l2tp_usr.pru_sockaddr 	= pru_sockaddr_notsupp;
    l2tp_usr.pru_sosend 	= sosend;
    l2tp_usr.pru_soreceive 	= soreceive;
    l2tp_usr.pru_sopoll 	= pru_sopoll_notsupp;


    bzero(&l2tp, sizeof(struct protosw));
    l2tp.pr_type		= SOCK_DGRAM;
    l2tp.pr_domain		= domain;
    l2tp.pr_protocol 	= PPPPROTO_L2TP;
    l2tp.pr_flags		= PR_ATOMIC | PR_ADDR | PR_PROTOLOCK;
    l2tp.pr_ctloutput 	= l2tp_ctloutput;
    l2tp.pr_init		= l2tp_init;

    l2tp.pr_usrreqs 	= &l2tp_usr;

    /* Start timer thread */
    l2tp_timer_thread_is_dying = 0;
    if (kernel_thread_start((thread_continue_t)l2tp_timer, NULL, &l2tp_timer_thread) == KERN_SUCCESS) {
        thread_deallocate(l2tp_timer_thread);
    }
    
    err = net_add_proto(&l2tp, domain);
    if (err)
        return err;

    return KERN_SUCCESS;
}
Ejemplo n.º 9
0
void
ux_handler_init(void)
{
	thread_t	thread = THREAD_NULL;

	ux_exception_port = MACH_PORT_NULL;
	(void) kernel_thread_start((thread_continue_t)ux_handler, NULL, &thread);
	thread_deallocate(thread);
	proc_list_lock();
	if (ux_exception_port == MACH_PORT_NULL)  {
		(void)msleep(&ux_exception_port, proc_list_mlock, 0, "ux_handler_wait", 0);
	}
	proc_list_unlock();
}
DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread)
{
    RT_ASSERT_PREEMPTIBLE();

    thread_t NativeThread;
    kern_return_t kr = kernel_thread_start(rtThreadNativeMain, pThreadInt, &NativeThread);
    if (kr == KERN_SUCCESS)
    {
        *pNativeThread = (RTNATIVETHREAD)NativeThread;
        thread_deallocate(NativeThread);
        return VINF_SUCCESS;
    }
    return RTErrConvertFromMachKernReturn(kr);
}
Ejemplo n.º 11
0
bool IOWorkLoop::init()
{
    // The super init and gateLock allocation MUST be done first
    if ( !super::init() )
        return false;
	
    if ( gateLock == NULL ) {
        if ( !( gateLock = IORecursiveLockAlloc()) )
            return false;
    }
	
    if ( workToDoLock == NULL ) {
        if ( !(workToDoLock = IOSimpleLockAlloc()) )
            return false;
        IOSimpleLockInit(workToDoLock);
        workToDo = false;
    }

    if ( controlG == NULL ) {
        controlG = IOCommandGate::commandGate(
            this,
            OSMemberFunctionCast(
                IOCommandGate::Action,
                this,
                &IOWorkLoop::_maintRequest));

        if ( !controlG )
            return false;
        // Point the controlGate at the workLoop.  Usually addEventSource
        // does this automatically.  The problem is in this case addEventSource
        // uses the control gate and it has to be bootstrapped.
        controlG->setWorkLoop(this);
        if (addEventSource(controlG) != kIOReturnSuccess)
            return false;
    }

    if ( workThread == NULL ) {
        thread_continue_t cptr = OSMemberFunctionCast(
            thread_continue_t,
            this,
            &IOWorkLoop::threadMain);
        if (KERN_SUCCESS != kernel_thread_start(cptr, this, &workThread))
            return false;
    }

    return true;
}
// IOFireWireIRMAllocation::handleBusReset
//
//
void IOFireWireIRMAllocation::handleBusReset(UInt32 generation)
{
	// Take the lock
	IORecursiveLockLock(fLock);

	if (!isAllocated)
	{
		IORecursiveLockUnlock(fLock);
		return;
	}
	
	if (fAllocationGeneration == generation)
	{
		IORecursiveLockUnlock(fLock);
		return;
	}
	
	// Spawn a thread to do the reallocation
	IRMAllocationThreadInfo * threadInfo = (IRMAllocationThreadInfo *)IOMalloc( sizeof(IRMAllocationThreadInfo) );
	if( threadInfo ) 
	{
		threadInfo->fGeneration = generation;
		threadInfo->fIRMAllocation = this;
		threadInfo->fControl = fControl;
		threadInfo->fLock = fLock;
		threadInfo->fIsochChannel = fIsochChannel; 
		threadInfo->fBandwidthUnits = fBandwidthUnits;

		retain();	// retain ourself for the thread to use

		thread_t		thread;
		if( kernel_thread_start((thread_continue_t)threadFunc, threadInfo, &thread ) == KERN_SUCCESS )
		{
			thread_deallocate(thread);
		}
	}
	
	// Unlock the lock
	IORecursiveLockUnlock(fLock);
}
Ejemplo n.º 13
0
void foo( void ) {
  kernel_thread_start(&_IOConfigThread::main);
  kernel_thread_start((thread_continue_t)&_IOConfigThread::main);
  kernel_thread_start(&pure_c);
}
Ejemplo n.º 14
0
bool IOWorkLoop::init()
{
    // The super init and gateLock allocation MUST be done first.
    if ( !super::init() )
        return false;
	
	// Allocate our ExpansionData if it hasn't been allocated already.
	if ( !reserved )
	{
		reserved = IONew(ExpansionData,1);
		if ( !reserved )
			return false;
		
		bzero(reserved,sizeof(ExpansionData));
	}
	
#if DEBUG
	OSBacktrace ( reserved->allocationBacktrace, sizeof ( reserved->allocationBacktrace ) / sizeof ( reserved->allocationBacktrace[0] ) );
#endif
	
    if ( gateLock == NULL ) {
        if ( !( gateLock = IORecursiveLockAlloc()) )
            return false;
    }
	
    if ( workToDoLock == NULL ) {
        if ( !(workToDoLock = IOSimpleLockAlloc()) )
            return false;
        IOSimpleLockInit(workToDoLock);
        workToDo = false;
    }

    if (!reserved) {
        reserved = IONew(ExpansionData, 1);
        reserved->options = 0;
    }
	
    IOStatisticsRegisterCounter();

    if ( controlG == NULL ) {
        controlG = IOCommandGate::commandGate(
            this,
            OSMemberFunctionCast(
                IOCommandGate::Action,
                this,
                &IOWorkLoop::_maintRequest));

        if ( !controlG )
            return false;
        // Point the controlGate at the workLoop.  Usually addEventSource
        // does this automatically.  The problem is in this case addEventSource
        // uses the control gate and it has to be bootstrapped.
        controlG->setWorkLoop(this);
        if (addEventSource(controlG) != kIOReturnSuccess)
            return false;
    }

    if ( workThread == NULL ) {
        thread_continue_t cptr = OSMemberFunctionCast(
            thread_continue_t,
            this,
            &IOWorkLoop::threadMain);
        if (KERN_SUCCESS != kernel_thread_start(cptr, this, &workThread))
            return false;
    }

    (void) thread_set_tag(workThread, THREAD_TAG_IOWORKLOOP);
    return true;
}
Ejemplo n.º 15
0
static errno_t
fuse_vfsop_mount(mount_t mp, __unused vnode_t devvp, user_addr_t udata,
                 vfs_context_t context)
{
    int err      = 0;
    int mntopts  = 0;
    bool mounted = false;

    uint32_t drandom  = 0;
    uint32_t max_read = ~0;

    size_t len;

    fuse_device_t      fdev = NULL;
    struct fuse_data  *data = NULL;
    fuse_mount_args    fusefs_args;
    struct vfsstatfs  *vfsstatfsp = vfs_statfs(mp);

    kern_return_t kr;
    thread_t      init_thread;

#if M_OSXFUSE_ENABLE_BIG_LOCK
    fuse_biglock_t    *biglock;
#endif

    fuse_trace_printf_vfsop();

    if (vfs_isupdate(mp)) {
        return ENOTSUP;
    }

    err = copyin(udata, &fusefs_args, sizeof(fusefs_args));
    if (err) {
        return EINVAL;
    }

    /*
     * Interesting flags that we can receive from mount or may want to
     * otherwise forcibly set include:
     *
     *     MNT_ASYNC
     *     MNT_AUTOMOUNTED
     *     MNT_DEFWRITE
     *     MNT_DONTBROWSE
     *     MNT_IGNORE_OWNERSHIP
     *     MNT_JOURNALED
     *     MNT_NODEV
     *     MNT_NOEXEC
     *     MNT_NOSUID
     *     MNT_NOUSERXATTR
     *     MNT_RDONLY
     *     MNT_SYNCHRONOUS
     *     MNT_UNION
     */

#if M_OSXFUSE_ENABLE_UNSUPPORTED
    vfs_setlocklocal(mp);
#endif /* M_OSXFUSE_ENABLE_UNSUPPORTED */

    /** Option Processing. **/

    if (fusefs_args.altflags & FUSE_MOPT_FSTYPENAME) {
        size_t typenamelen = strlen(fusefs_args.fstypename);
        if ((typenamelen <= 0) || (typenamelen > FUSE_FSTYPENAME_MAXLEN)) {
            return EINVAL;
        }
        snprintf(vfsstatfsp->f_fstypename, MFSTYPENAMELEN, "%s%s",
                 OSXFUSE_FSTYPENAME_PREFIX, fusefs_args.fstypename);
    }

    if ((fusefs_args.daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) ||
        (fusefs_args.daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT)) {
        return EINVAL;
    }

    if (fusefs_args.altflags & FUSE_MOPT_SPARSE) {
        mntopts |= FSESS_SPARSE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_SLOW_STATFS) {
        mntopts |= FSESS_SLOW_STATFS;
    }

    if (fusefs_args.altflags & FUSE_MOPT_AUTO_CACHE) {
        mntopts |= FSESS_AUTO_CACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_AUTO_XATTR) {
        if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) {
            return EINVAL;
        }
        mntopts |= FSESS_AUTO_XATTR;
    } else if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) {
        mntopts |= FSESS_NATIVE_XATTR;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_BROWSE) {
        vfs_setflags(mp, MNT_DONTBROWSE);
    }

    if (fusefs_args.altflags & FUSE_MOPT_JAIL_SYMLINKS) {
        mntopts |= FSESS_JAIL_SYMLINKS;
    }

    /*
     * Note that unlike Linux, which keeps allow_root in user-space and
     * passes allow_other in that case to the kernel, we let allow_root
     * reach the kernel. The 'if' ordering is important here.
     */
    if (fusefs_args.altflags & FUSE_MOPT_ALLOW_ROOT) {
        int is_member = 0;
        if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group,
                                     &is_member) == 0) && is_member) {
            mntopts |= FSESS_ALLOW_ROOT;
        } else {
            IOLog("OSXFUSE: caller not a member of OSXFUSE admin group (%d)\n",
                  fuse_admin_group);
            return EPERM;
        }
    } else if (fusefs_args.altflags & FUSE_MOPT_ALLOW_OTHER) {
        if (!fuse_allow_other && !fuse_vfs_context_issuser(context)) {
            int is_member = 0;
            if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group,
                                         &is_member) != 0) || !is_member) {
                return EPERM;
            }
        }
        mntopts |= FSESS_ALLOW_OTHER;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEDOUBLE) {
        mntopts |= FSESS_NO_APPLEDOUBLE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEXATTR) {
        mntopts |= FSESS_NO_APPLEXATTR;
    }

    if ((fusefs_args.altflags & FUSE_MOPT_FSID) && (fusefs_args.fsid != 0)) {
        fsid_t   fsid;
        mount_t  other_mp;
        uint32_t target_dev;

        target_dev = FUSE_MAKEDEV(FUSE_CUSTOM_FSID_DEVICE_MAJOR,
                                  fusefs_args.fsid);

        fsid.val[0] = target_dev;
        fsid.val[1] = FUSE_CUSTOM_FSID_VAL1;

        other_mp = vfs_getvfs(&fsid);
        if (other_mp != NULL) {
            return EPERM;
        }

        vfsstatfsp->f_fsid.val[0] = target_dev;
        vfsstatfsp->f_fsid.val[1] = FUSE_CUSTOM_FSID_VAL1;

    } else {
        vfs_getnewfsid(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_LOCALCACHES) {
        mntopts |= FSESS_NO_ATTRCACHE;
        mntopts |= FSESS_NO_READAHEAD;
        mntopts |= FSESS_NO_UBC;
        mntopts |= FSESS_NO_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_ATTRCACHE) {
        mntopts |= FSESS_NO_ATTRCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_READAHEAD) {
        mntopts |= FSESS_NO_READAHEAD;
    }

    if (fusefs_args.altflags & (FUSE_MOPT_NO_UBC | FUSE_MOPT_DIRECT_IO)) {
        mntopts |= FSESS_NO_UBC;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_VNCACHE) {
        mntopts |= FSESS_NO_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NEGATIVE_VNCACHE) {
        if (mntopts & FSESS_NO_VNCACHE) {
            return EINVAL;
        }
        mntopts |= FSESS_NEGATIVE_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCWRITES) {

        /* Cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'. */
        if (mntopts & (FSESS_NO_READAHEAD | FSESS_NO_UBC)) {
            return EINVAL;
        }

        mntopts |= FSESS_NO_SYNCWRITES;
        vfs_clearflags(mp, MNT_SYNCHRONOUS);
        vfs_setflags(mp, MNT_ASYNC);

        /* We check for this only if we have nosyncwrites in the first place. */
        if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCONCLOSE) {
            mntopts |= FSESS_NO_SYNCONCLOSE;
        }

    } else {
        vfs_clearflags(mp, MNT_ASYNC);
        vfs_setflags(mp, MNT_SYNCHRONOUS);
    }

    if (mntopts & FSESS_NO_UBC) {
        /* If no buffer cache, disallow exec from file system. */
        vfs_setflags(mp, MNT_NOEXEC);
    }

    vfs_setauthopaque(mp);
    vfs_setauthopaqueaccess(mp);

    if ((fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) &&
        (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS)) {
        return EINVAL;
    }

    if (fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) {
        mntopts |= FSESS_DEFAULT_PERMISSIONS;
        vfs_clearauthopaque(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS) {
        mntopts |= FSESS_DEFER_PERMISSIONS;
    }

    if (fusefs_args.altflags & FUSE_MOPT_EXTENDED_SECURITY) {
        mntopts |= FSESS_EXTENDED_SECURITY;
        vfs_setextendedsecurity(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_LOCALVOL) {
        mntopts |= FSESS_LOCALVOL;
        vfs_setflags(mp, MNT_LOCAL);
    }

    /* done checking incoming option bits */

    err = 0;

    vfs_setfsprivate(mp, NULL);

    fdev = fuse_device_get(fusefs_args.rdev);
    if (!fdev) {
        return EINVAL;
    }

    fuse_device_lock(fdev);

    drandom = fuse_device_get_random(fdev);
    if (fusefs_args.random != drandom) {
        fuse_device_unlock(fdev);
        IOLog("OSXFUSE: failing mount because of mismatched random\n");
        return EINVAL;
    }

    data = fuse_device_get_mpdata(fdev);

    if (!data) {
        fuse_device_unlock(fdev);
        return ENXIO;
    }

#if M_OSXFUSE_ENABLE_BIG_LOCK
    biglock = data->biglock;
    fuse_biglock_lock(biglock);
#endif

    if (data->mount_state != FM_NOTMOUNTED) {
#if M_OSXFUSE_ENABLE_BIG_LOCK
        fuse_biglock_unlock(biglock);
#endif
        fuse_device_unlock(fdev);
        return EALREADY;
    }

    if (!(data->dataflags & FSESS_OPENED)) {
        fuse_device_unlock(fdev);
        err = ENXIO;
        goto out;
    }

    data->mount_state = FM_MOUNTED;
    OSAddAtomic(1, (SInt32 *)&fuse_mount_count);
    mounted = true;

    if (fdata_dead_get(data)) {
        fuse_device_unlock(fdev);
        err = ENOTCONN;
        goto out;
    }

    if (!data->daemoncred) {
        panic("OSXFUSE: daemon found but identity unknown");
    }

    if (fuse_vfs_context_issuser(context) &&
        kauth_cred_getuid(vfs_context_ucred(context)) != kauth_cred_getuid(data->daemoncred)) {
        fuse_device_unlock(fdev);
        err = EPERM;
        goto out;
    }

    data->mp = mp;
    data->fdev = fdev;
    data->dataflags |= mntopts;

    data->daemon_timeout.tv_sec =  fusefs_args.daemon_timeout;
    data->daemon_timeout.tv_nsec = 0;
    if (data->daemon_timeout.tv_sec) {
        data->daemon_timeout_p = &(data->daemon_timeout);
    } else {
        data->daemon_timeout_p = (struct timespec *)0;
    }

    data->max_read = max_read;
    data->fssubtype = fusefs_args.fssubtype;
    data->mountaltflags = fusefs_args.altflags;
    data->noimplflags = (uint64_t)0;

    data->blocksize = fuse_round_size(fusefs_args.blocksize,
                                      FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE);

    data->iosize = fuse_round_size(fusefs_args.iosize,
                                   FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE);

    if (data->iosize < data->blocksize) {
        data->iosize = data->blocksize;
    }

    data->userkernel_bufsize = FUSE_DEFAULT_USERKERNEL_BUFSIZE;

    copystr(fusefs_args.fsname, vfsstatfsp->f_mntfromname,
            MNAMELEN - 1, &len);
    bzero(vfsstatfsp->f_mntfromname + len, MNAMELEN - len);

    copystr(fusefs_args.volname, data->volname, MAXPATHLEN - 1, &len);
    bzero(data->volname + len, MAXPATHLEN - len);

    /* previous location of vfs_setioattr() */

    vfs_setfsprivate(mp, data);

    fuse_device_unlock(fdev);

    /* Send a handshake message to the daemon. */
    kr = kernel_thread_start(fuse_internal_init, data, &init_thread);
    if (kr != KERN_SUCCESS) {
        IOLog("OSXFUSE: could not start init thread\n");
        err = ENOTCONN;
    } else {
        thread_deallocate(init_thread);
    }

out:
    if (err) {
        vfs_setfsprivate(mp, NULL);

        fuse_device_lock(fdev);
        data = fuse_device_get_mpdata(fdev); /* again */
        if (mounted) {
            OSAddAtomic(-1, (SInt32 *)&fuse_mount_count);
        }
        if (data) {
            data->mount_state = FM_NOTMOUNTED;
            if (!(data->dataflags & FSESS_OPENED)) {
#if M_OSXFUSE_ENABLE_BIG_LOCK
                assert(biglock == data->biglock);
                fuse_biglock_unlock(biglock);
#endif
                fuse_device_close_final(fdev);
                /* data is gone now */
            }
        }
        fuse_device_unlock(fdev);
    } else {
        vnode_t fuse_rootvp = NULLVP;
        err = fuse_vfsop_root(mp, &fuse_rootvp, context);
        if (err) {
            goto out; /* go back and follow error path */
        }
        err = vnode_ref(fuse_rootvp);
#if M_OSXFUSE_ENABLE_BIG_LOCK
        /*
         * Even though fuse_rootvp will not be reclaimed when calling vnode_put
         * because we incremented its usecount by calling vnode_ref release
         * biglock just to be safe.
         */
        fuse_biglock_unlock(biglock);
#endif /* M_OSXFUSE_ENABLE_BIG_LOCK */
        (void)vnode_put(fuse_rootvp);
#if M_OSXFUSE_ENABLE_BIG_LOCK
        fuse_biglock_lock(biglock);
#endif
        if (err) {
            goto out; /* go back and follow error path */
        } else {
            struct vfsioattr ioattr;

            vfs_ioattr(mp, &ioattr);
            ioattr.io_maxreadcnt = ioattr.io_maxwritecnt = data->iosize;
            ioattr.io_segreadcnt = ioattr.io_segwritecnt = data->iosize / PAGE_SIZE;
            ioattr.io_maxsegreadsize = ioattr.io_maxsegwritesize = data->iosize;
            ioattr.io_devblocksize = data->blocksize;
            vfs_setioattr(mp, &ioattr);
        }
    }

#if M_OSXFUSE_ENABLE_BIG_LOCK
    fuse_device_lock(fdev);
    data = fuse_device_get_mpdata(fdev); /* ...and again */
    if(data) {
        assert(data->biglock == biglock);
        fuse_biglock_unlock(biglock);
    }
    fuse_device_unlock(fdev);
#endif

    return err;
}