Beispiel #1
0
static int
vnop_close_9p(struct vnop_close_args *ap)
{
	openfid_9p *op;
	node_9p *np;
	int e;

	TRACE();
	e = 0;
	np = NTO9P(ap->a_vp);
	nlock_9p(np, NODE_LCK_EXCLUSIVE);
	op = ofidget(np, ap->a_fflag);
	if (op->fid == NOFID) {
		e = EBADF;
		goto error;
	}
	if (OSDecrementAtomic(&op->ref) == 1) {
		if (ISSET(np->flags, NODE_MMAPPED))
			ubc_msync(np->vp, 0, ubc_getsize(np->vp), NULL, UBC_PUSHDIRTY|UBC_SYNC);
		else
			cluster_push(np->vp, IO_CLOSE);

		/* root gets clunk in vfs_unmount_9p() */
		if (!ISSET(np->nmp->flags, F_UNMOUNTING))
			e = clunk_9p(np->nmp, op->fid);
		op->fid = NOFID;
	}
error:
	nunlock_9p(np);
	return e;
}
Beispiel #2
0
void
telemetry_task_ctl_locked(task_t task, uint32_t reasons, int enable_disable)
{
	uint32_t origflags;

	assert((reasons != 0) && ((reasons | TF_TELEMETRY) == TF_TELEMETRY));

	task_lock_assert_owned(task);

	origflags = task->t_flags;

	if (enable_disable == 1) {
		task->t_flags |= reasons;
		if ((origflags & TF_TELEMETRY) == 0) {
			OSIncrementAtomic(&telemetry_active_tasks);
#if TELEMETRY_DEBUG			
			printf("%s: telemetry OFF -> ON (%d active)\n", proc_name_address(task->bsd_info), telemetry_active_tasks);
#endif			
		}
	} else {
		task->t_flags &= ~reasons;
		if (((origflags & TF_TELEMETRY) != 0) && ((task->t_flags & TF_TELEMETRY) == 0)) {
			/*
			 * If this task went from having at least one telemetry bit to having none,
			 * the net change was to disable telemetry for the task.
			 */
			OSDecrementAtomic(&telemetry_active_tasks);
#if TELEMETRY_DEBUG
			printf("%s: telemetry ON -> OFF (%d active)\n", proc_name_address(task->bsd_info), telemetry_active_tasks);
#endif
		}
	}
}
void OSMetaClass::instanceDestructed() const
{
    if ((1 == OSDecrementAtomic((SInt32 *) &instanceCount)) && superClassLink)
	superClassLink->instanceDestructed();

    if( ((int) instanceCount) < 0)
	printf("%s: bad retain(%d)", getClassName(), instanceCount);
}
Beispiel #4
0
/*
 * Free struct sackhole.
 */
static void
tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole)
{
	zfree(sack_hole_zone, hole);

	tp->snd_numholes--;
	OSDecrementAtomic(&tcp_sack_globalholes);
}
Beispiel #5
0
static void
utun_detached(
	ifnet_t	interface)
{
	struct utun_pcb	*pcb = ifnet_softc(interface);
	
	utun_free(pcb);
	
	OSDecrementAtomic(&utun_ifcount);
}
Beispiel #6
0
void
rw_exit(krwlock_t *rwlp)
{
    if (rwlp->rw_owner == current_thread()) {
        rwlp->rw_owner = NULL;
        lck_rw_unlock_exclusive((lck_rw_t *)&rwlp->rw_lock[0]);
    } else {
        OSDecrementAtomic((volatile SInt32 *)&rwlp->rw_readers);
        lck_rw_unlock_shared((lck_rw_t *)&rwlp->rw_lock[0]);
    }
}
Beispiel #7
0
static void
ipsec_detached(
			   ifnet_t	interface)
{
	struct ipsec_pcb	*pcb = ifnet_softc(interface);
    
	ifnet_release(pcb->ipsec_ifp);
	ipsec_free(pcb);
    
	OSDecrementAtomic(&ipsec_ifcount);
}
Beispiel #8
0
/*
 *	Routine:	lock_set_dereference
 *
 *	Release a reference on a lock set.  If this is the last reference,
 *	the lock set data structure is deallocated.
 */
void
lock_set_dereference(lock_set_t lock_set)
{
	int 	size;

	if (1 == OSDecrementAtomic(&((lock_set)->ref_count))) {
		ipc_port_dealloc_kernel(lock_set->port);
		size = (int)(sizeof(struct lock_set) +
			(sizeof(struct ulock) * (lock_set->n_ulocks - 1)));
		kfree(lock_set, size);
	}
}
void com_VFSFilter0::putClient()
{
    //
    // do not exchange or add any condition before OSDecrementAtomic as it must be always done!
    //
    if( 0x1 == OSDecrementAtomic( &this->clientInvocations ) && NULL == this->userClient ){
        
        //
        // this was the last invocation
        //
        wakeup( &this->clientInvocations );
    }
}
Beispiel #10
0
void KernelPatcher::onKextSummariesUpdated() {
	if (that) {
		// macOS 10.12 generates an interrupt during this call but unlike 10.11 and below
		// it never stops handling interrupts hanging forever inside hndl_allintrs.
		// This happens even with cpus=1, and the reason is not fully understood.
		//
		// For this reason on 10.12 and above the outer function is routed, and so far it
		// seems to cause fewer issues. Regarding syncing:
		//  - the only place modifying gLoadedKextSummaries is updateLoadedKextSummaries;
		//  - updateLoadedKextSummaries is called from load/unload separately;
		//  - sKextSummariesLock is not exported or visible.
		// As a result no syncing should be necessary but there are guards for future
		// changes and in case of any misunderstanding.
		
		if (getKernelVersion() >= KernelVersion::Sierra) {
			if (OSIncrementAtomic(&updateSummariesEntryCount) != 0) {
				panic("onKextSummariesUpdated entered another time");
			}
			
			that->orgUpdateLoadedKextSummaries();
		}

		DBGLOG("patcher @ invoked at kext loading/unloading");
		
		if (that->khandlers.size() > 0 && that->loadedKextSummaries) {
			auto num = (*that->loadedKextSummaries)->numSummaries;
			if (num > 0) {
				OSKextLoadedKextSummary &last = (*that->loadedKextSummaries)->summaries[num-1];
				DBGLOG("patcher @ last kext is %llX and its name is %.*s", last.address, KMOD_MAX_NAME, last.name);
				// We may add khandlers items inside the handler
				for (size_t i = 0; i < that->khandlers.size(); i++) {
					if (!strncmp(that->khandlers[i]->id, last.name, KMOD_MAX_NAME)) {
						DBGLOG("patcher @ caught the right kext at %llX, invoking handler", last.address);
						that->khandlers[i]->address = last.address;
						that->khandlers[i]->size = last.size;
						that->khandlers[i]->handler(that->khandlers[i]);
						// Remove the item
						that->khandlers.erase(i);
						break;
					}
				}
			} else {
				SYSLOG("patcher @ no kext is currently loaded, this should not happen");
			}
		}
		
		if (getKernelVersion() >= KernelVersion::Sierra && OSDecrementAtomic(&updateSummariesEntryCount) != 1) {
			panic("onKextSummariesUpdated left another time");
		}
	}
}
Beispiel #11
0
void
OSMetaClass::instanceDestructed() const
{
    if ((1 == OSDecrementAtomic(&instanceCount)) && superClassLink) {
        superClassLink->instanceDestructed();
    }

    if (((int)instanceCount) < 0) {
        OSKext * myKext = (OSKext *)reserved;

        OSKextLog(myKext, kOSMetaClassLogSpec,
            // xxx - this phrasing is rather cryptic
            "OSMetaClass: Class %s - bad retain (%d)",
            getClassName(), instanceCount);
    }
}
VFSFilter0UserClient* com_VFSFilter0::getClient()
/*
 if non NULL is returned the putClient() must be called
 */
{
    VFSFilter0UserClient*  currentClient;
    
    //
    // if ther is no user client, then nobody call for logging
    //
    if( NULL == this->userClient || this->pendingUnregistration )
        return NULL;
    
    OSIncrementAtomic( &this->clientInvocations );
    
    currentClient = (VFSFilter0UserClient*)this->userClient;
    
    //
    // if the current client is NULL or can't be atomicaly exchanged
    // with the same value then the unregistration is in progress,
    // the call to OSCompareAndSwapPtr( NULL, NULL, &this->userClient )
    // checks the this->userClient for NULL atomically
    //
    if( !currentClient ||
       !OSCompareAndSwapPtr( currentClient, currentClient, &this->userClient ) ||
       OSCompareAndSwapPtr( NULL, NULL, &this->userClient ) ){
        
        //
        // the unregistration is in the progress and waiting for all
        // invocations to return
        //
        assert( this->pendingUnregistration );
        if( 0x1 == OSDecrementAtomic( &this->clientInvocations ) ){
            
            //
            // this was the last invocation
            //
            wakeup( &this->clientInvocations );
        }
        
        return NULL;
    }
    
    return currentClient;
}
Beispiel #13
0
void throttle_lowpri_io(boolean_t ok_to_sleep)
{
	int i;
	int max_try_num;
	struct uthread *ut;
	struct _throttle_io_info_t *info;

	ut = get_bsdthread_info(current_thread());

	if ((ut->uu_lowpri_window == 0) || (ut->uu_throttle_info == NULL))
		goto done;

	info = ut->uu_throttle_info;
	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START,
		     ut->uu_lowpri_window, ok_to_sleep, 0, 0, 0);

	if (ok_to_sleep == TRUE) {
		max_try_num = lowpri_max_waiting_msecs / LOWPRI_SLEEP_INTERVAL * MAX(1, info->numthreads_throttling);

		for (i=0; i<max_try_num; i++) {
			if (throttle_io_will_be_throttled_internal(ut->uu_lowpri_window, info)) {
				IOSleep(LOWPRI_SLEEP_INTERVAL);
    				DEBUG_ALLOC_THROTTLE_INFO("sleeping because of info = %p\n", info, info );
			} else {
				break;
			}
		}
	}
	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END,
		     ut->uu_lowpri_window, i*5, 0, 0, 0);
	SInt32 oldValue;
	oldValue = OSDecrementAtomic(&info->numthreads_throttling);

	if (oldValue <= 0) {
		panic("%s: numthreads negative", __func__);
	}
done:
	ut->uu_lowpri_window = 0;
	if (ut->uu_throttle_info)
		throttle_info_rel(ut->uu_throttle_info);
	ut->uu_throttle_info = NULL;
}
Beispiel #14
0
__private_extern__ void
devtimer_release(devtimer_ref timer)
{
    UInt32	old_retain_count;

    old_retain_count = OSDecrementAtomic(&timer->dt_retain_count);
    switch (old_retain_count) {
    case 0:
        panic("devtimer_release: retain count is 0\n");
        break;
    case 1:
        devtimer_invalidate(timer);
        FREE(timer, M_DEVTIMER);
        _devtimer_printf("devtimer: timer released\n");
        break;
    default:
        break;
    }
    return;
}
Beispiel #15
0
int
fuse_filehandle_put(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type)
{
    struct fuse_dispatcher  fdi;
    struct fuse_release_in *fri;
    struct fuse_vnode_data *fvdat = VTOFUD(vp);
    struct fuse_filehandle *fufh  = NULL;

    fuse_trace_printf("fuse_filehandle_put(vp=%p, fufh_type=%d)\n",
                      vp, fufh_type);

    fufh = &(fvdat->fufh[fufh_type]);

    if (FUFH_IS_VALID(fufh)) {
        panic("fuse4x: filehandle_put called on a valid fufh (type=%d)",
              fufh_type);
        /* NOTREACHED */
    }

    if (fuse_isdeadfs(vp)) {
        goto out;
    }

    int op = vnode_isdir(vp) ? FUSE_RELEASEDIR : FUSE_RELEASE;
    fuse_dispatcher_init(&fdi, sizeof(*fri));
    fuse_dispatcher_make_vp(&fdi, op, vp, context);
    fri = fdi.indata;
    fri->fh = fufh->fh_id;
    fri->flags = fufh->open_flags;

    int err = fuse_dispatcher_wait_answer(&fdi);
    if (!err) {
        fuse_ticket_drop(fdi.ticket);
    }

out:
    OSDecrementAtomic((SInt32 *)&fuse_fh_current);
    fuse_invalidate_attr(vp);

    return err;
}
Beispiel #16
0
/*
 * Release the reference and if the item was allocated and this is the last
 * reference then free it.
 *
 * This routine always returns the old value.
 */
static int
throttle_info_rel(struct _throttle_io_info_t *info)
{
	SInt32 oldValue = OSDecrementAtomic(&info->refcnt);

	DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n", 
		info, (int)(oldValue -1), info );

	/* The reference count just went negative, very bad */
	if (oldValue == 0)
		panic("throttle info ref cnt went negative!");

	/* 
	 * Once reference count is zero, no one else should be able to take a 
	 * reference 
	 */
	if ((info->refcnt == 0) && (info->alloc)) {
		DEBUG_ALLOC_THROTTLE_INFO("Freeing info = %p\n", info, info );
		FREE(info, M_TEMP); 
	}
	return oldValue;
}
Beispiel #17
0
/*ARGSUSED*/
static int
zfs_vfs_unmount(struct mount *mp, int mntflags, vfs_context_t context)
{
	zfsvfs_t *zfsvfs = vfs_fsprivate(mp);	
	objset_t *os = zfsvfs->z_os;
	znode_t	*zp, *nextzp;
	int ret, i;
	int flags;
	
	/*XXX NOEL: delegation admin stuffs, add back if we use delg. admin */
#if 0
	ret = 0; /* UNDEFINED: secpolicy_fs_unmount(cr, vfsp); */
	if (ret) {
		ret = dsl_deleg_access((char *)refstr_value(vfsp->vfs_resource),
		    ZFS_DELEG_PERM_MOUNT, cr);
		if (ret)
			return (ret);
	}

	/*
	 * We purge the parent filesystem's vfsp as the parent filesystem
	 * and all of its snapshots have their vnode's v_vfsp set to the
	 * parent's filesystem's vfsp.  Note, 'z_parent' is self
	 * referential for non-snapshots.
	 */
	(void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0);
#endif

	/*
	 * Unmount any snapshots mounted under .zfs before unmounting the
	 * dataset itself.
	 */
#if 0
	if (zfsvfs->z_ctldir != NULL &&
	    (ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0) {
		return (ret);
#endif
	flags = SKIPSYSTEM;
	if (mntflags & MNT_FORCE)
		flags |= FORCECLOSE;

	ret = vflush(mp, NULLVP, flags);

	/*
	 * Mac OS X needs a file system modify time
	 *
	 * We use the mtime of the "com.apple.system.mtime" 
	 * extended attribute, which is associated with the
	 * file system root directory.
	 *
	 * Here we need to release the ref we took on z_mtime_vp during mount.
	 */
	if ((ret == 0) || (mntflags & MNT_FORCE)) {
		if (zfsvfs->z_mtime_vp != NULL) {
			struct vnode *mvp;

			mvp = zfsvfs->z_mtime_vp;
			zfsvfs->z_mtime_vp = NULL;

			if (vnode_get(mvp) == 0) {
				vnode_rele(mvp);
				vnode_recycle(mvp);
				vnode_put(mvp);
			}
		}
	}

	if (!(mntflags & MNT_FORCE)) {
		/*
		 * Check the number of active vnodes in the file system.
		 * Our count is maintained in the vfs structure, but the
		 * number is off by 1 to indicate a hold on the vfs
		 * structure itself.
		 *
		 * The '.zfs' directory maintains a reference of its
		 * own, and any active references underneath are
		 * reflected in the vnode count.
		 */
		
		if (ret)
			return (EBUSY);
#if 0
		if (zfsvfs->z_ctldir == NULL) {
			if (vfsp->vfs_count > 1)
				return (EBUSY);
		} else {
			if (vfsp->vfs_count > 2 ||
			    zfsvfs->z_ctldir->v_count > 1) {
				return (EBUSY);
			}
		}
#endif
	}

	rw_enter(&zfsvfs->z_unmount_lock, RW_WRITER);
	rw_enter(&zfsvfs->z_unmount_inactive_lock, RW_WRITER);

	/*
	 * At this point there are no vops active, and any new vops will
	 * fail with EIO since we have z_unmount_lock for writer (only
	 * relavent for forced unmount).
	 *
	 * Release all holds on dbufs.
	 * Note, the dmu can still callback via znode_pageout_func()
	 * which can zfs_znode_free() the znode.  So we lock
	 * z_all_znodes; search the list for a held dbuf; drop the lock
	 * (we know zp can't disappear if we hold a dbuf lock) then
	 * regrab the lock and restart.
	 */
	mutex_enter(&zfsvfs->z_znodes_lock);
	for (zp = list_head(&zfsvfs->z_all_znodes); zp; zp = nextzp) {
		nextzp = list_next(&zfsvfs->z_all_znodes, zp);
		if (zp->z_dbuf_held) {
			/* dbufs should only be held when force unmounting */
			zp->z_dbuf_held = 0;
			mutex_exit(&zfsvfs->z_znodes_lock);
			dmu_buf_rele(zp->z_dbuf, NULL);
			/* Start again */
			mutex_enter(&zfsvfs->z_znodes_lock);
			nextzp = list_head(&zfsvfs->z_all_znodes);
		}
	}
	mutex_exit(&zfsvfs->z_znodes_lock);

	/*
	 * Set the unmounted flag and let new vops unblock.
	 * zfs_inactive will have the unmounted behavior, and all other
	 * vops will fail with EIO.
	 */
	zfsvfs->z_unmounted = B_TRUE;
	rw_exit(&zfsvfs->z_unmount_lock);
	rw_exit(&zfsvfs->z_unmount_inactive_lock);

	/*
	 * Unregister properties.
	 */
#ifndef __APPLE__
	if (!dmu_objset_is_snapshot(os))
		zfs_unregister_callbacks(zfsvfs);
#endif
	/*
	 * Close the zil. NB: Can't close the zil while zfs_inactive
	 * threads are blocked as zil_close can call zfs_inactive.
	 */
	if (zfsvfs->z_log) {
		zil_close(zfsvfs->z_log);
		zfsvfs->z_log = NULL;
	}

	/*
	 * Evict all dbufs so that cached znodes will be freed
	 */
	if (dmu_objset_evict_dbufs(os, B_TRUE)) {
		txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
		(void) dmu_objset_evict_dbufs(os, B_FALSE);
	}

	/*
	 * Finally close the objset
	 */
	dmu_objset_close(os);

	/*
	 * We can now safely destroy the '.zfs' directory node.
	 */
#if 0
	if (zfsvfs->z_ctldir != NULL)
		zfsctl_destroy(zfsvfs);
#endif

	/*
	 * Note that this work is normally done in zfs_freevfs, but since
	 * there is no VOP_FREEVFS in OSX, we free VFS items here
	 */
	OSDecrementAtomic((SInt32 *)&zfs_active_fs_count);
 	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
 		mutex_destroy(&zfsvfs->z_hold_mtx[i]);
 
 	mutex_destroy(&zfsvfs->z_znodes_lock);
 	list_destroy(&zfsvfs->z_all_znodes);
 	rw_destroy(&zfsvfs->z_unmount_lock);
 	rw_destroy(&zfsvfs->z_unmount_inactive_lock);

	return (0);
}


 
struct vnode* vnode_getparent(struct vnode *vp);  /* sys/vnode_internal.h */

static int
zfs_vget_internal(zfsvfs_t *zfsvfs, ino64_t ino, struct vnode **vpp)
{
	struct vnode	*vp;
	struct vnode	*dvp = NULL;
	znode_t		*zp;
	int		error;

	*vpp = NULL;
	
	/*
	 * On Mac OS X we always export the root directory id as 2
	 * and its parent as 1
	 */
	if (ino == 2 || ino == 1)
		ino = zfsvfs->z_root;
	
	if ((error = zfs_zget(zfsvfs, ino, &zp)))
		goto out;

	/* Don't expose EA objects! */
	if (zp->z_phys->zp_flags & ZFS_XATTR) {
		vnode_put(ZTOV(zp));
		error = ENOENT;
		goto out;
	}

	*vpp = vp = ZTOV(zp);

	if (vnode_isvroot(vp))
		goto out;

	/*
	 * If this znode didn't just come from the cache then
	 * it won't have a valid identity (parent and name).
	 *
	 * Manually fix its identity here (normally done by namei lookup).
	 */
	if ((dvp = vnode_getparent(vp)) == NULL) {
		if (zp->z_phys->zp_parent != 0 &&
		    zfs_vget_internal(zfsvfs, zp->z_phys->zp_parent, &dvp)) {
			goto out;
		}
		if ( vnode_isdir(dvp) ) {
			char objname[ZAP_MAXNAMELEN];  /* 256 bytes */
			int flags = VNODE_UPDATE_PARENT;

			/* Look for znode's name in its parent's zap */
			if ( zap_value_search(zfsvfs->z_os,
			                      zp->z_phys->zp_parent, 
			                      zp->z_id,
			                      ZFS_DIRENT_OBJ(-1ULL),
			                      objname) == 0 ) {
				flags |= VNODE_UPDATE_NAME;
			}

			/* Update the znode's parent and name */
			vnode_update_identity(vp, dvp, objname, 0, 0, flags);
		}
	}
	/* All done with znode's parent */
	vnode_put(dvp);
out:
	return (error);
}

/*
 * Get a vnode from a file id (ignoring the generation)
 *
 * Use by NFS Server (readdirplus) and VFS (build_path)
 */
static int
zfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, __unused vfs_context_t context)
{
	zfsvfs_t *zfsvfs = vfs_fsprivate(mp);
	int error;

	ZFS_ENTER(zfsvfs);

	/*
	 * On Mac OS X we always export the root directory id as 2.
	 * So we don't expect to see the real root directory id
	 * from zfs_vfs_vget KPI (unless of course the real id was
	 * already 2).
	 */
	if ((ino == zfsvfs->z_root) && (zfsvfs->z_root != 2)) {
		ZFS_EXIT(zfsvfs);
		return (ENOENT);
	}
	error = zfs_vget_internal(zfsvfs, ino, vpp);

	ZFS_EXIT(zfsvfs);
	return (error);
}
Beispiel #18
0
static int tcp_cubic_cleanup(struct tcpcb *tp)
{
#pragma unused(tp)
	OSDecrementAtomic((volatile SInt32 *)&tcp_cc_cubic.num_sockets);
	return (0);
}
Beispiel #19
0
static errno_t
fuse_vfsop_mount(mount_t mp, __unused vnode_t devvp, user_addr_t udata,
                 vfs_context_t context)
{
    int err      = 0;
    int mntopts  = 0;
    bool mounted = false;

    uint32_t max_read = ~0;

    size_t len;

    fuse_device_t      fdev = NULL;
    struct fuse_data  *data = NULL;
    fuse_mount_args    fusefs_args;
    struct vfsstatfs  *vfsstatfsp = vfs_statfs(mp);

    fuse_trace_printf_vfsop();

    if (vfs_isupdate(mp)) {
        return ENOTSUP;
    }

    err = copyin(udata, &fusefs_args, sizeof(fusefs_args));
    if (err) {
        return EINVAL;
    }

    /*
     * Interesting flags that we can receive from mount or may want to
     * otherwise forcibly set include:
     *
     *     MNT_ASYNC
     *     MNT_AUTOMOUNTED
     *     MNT_DEFWRITE
     *     MNT_DONTBROWSE
     *     MNT_IGNORE_OWNERSHIP
     *     MNT_JOURNALED
     *     MNT_NODEV
     *     MNT_NOEXEC
     *     MNT_NOSUID
     *     MNT_NOUSERXATTR
     *     MNT_RDONLY
     *     MNT_SYNCHRONOUS
     *     MNT_UNION
     */

    err = ENOTSUP;

    vfs_setlocklocal(mp);

    /** Option Processing. **/

    if (*fusefs_args.fstypename) {
        size_t typenamelen = strlen(fusefs_args.fstypename);
        if (typenamelen > FUSE_FSTYPENAME_MAXLEN) {
            return EINVAL;
        }
        snprintf(vfsstatfsp->f_fstypename, MFSTYPENAMELEN, "%s%s",
                 FUSE_FSTYPENAME_PREFIX, fusefs_args.fstypename);
    }

    if (!*fusefs_args.fsname)
        return EINVAL;

    if ((fusefs_args.daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) ||
        (fusefs_args.daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT)) {
        return EINVAL;
    }

    if (fusefs_args.altflags & FUSE_MOPT_SPARSE) {
        mntopts |= FSESS_SPARSE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_AUTO_CACHE) {
        mntopts |= FSESS_AUTO_CACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_AUTO_XATTR) {
        if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) {
            return EINVAL;
        }
        mntopts |= FSESS_AUTO_XATTR;
    } else if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) {
        mntopts |= FSESS_NATIVE_XATTR;
    }

    if (fusefs_args.altflags & FUSE_MOPT_JAIL_SYMLINKS) {
        mntopts |= FSESS_JAIL_SYMLINKS;
    }

    /*
     * Note that unlike Linux, which keeps allow_root in user-space and
     * passes allow_other in that case to the kernel, we let allow_root
     * reach the kernel. The 'if' ordering is important here.
     */
    if (fusefs_args.altflags & FUSE_MOPT_ALLOW_ROOT) {
        int is_member = 0;
        if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) {
            log("fuse4x: caller is not a member of fuse4x admin group. "
                  "Either add user (id=%d) to group (id=%d), "
                  "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n",
                  kauth_cred_getuid(kauth_cred_get()), fuse_admin_group);
            return EPERM;
        }
        mntopts |= FSESS_ALLOW_ROOT;
    } else if (fusefs_args.altflags & FUSE_MOPT_ALLOW_OTHER) {
        if (!fuse_allow_other && !fuse_vfs_context_issuser(context)) {
            int is_member = 0;
            if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) {
                log("fuse4x: caller is not a member of fuse4x admin group. "
                      "Either add user (id=%d) to group (id=%d), "
                      "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n",
                      kauth_cred_getuid(kauth_cred_get()), fuse_admin_group);
                return EPERM;
            }
        }
        mntopts |= FSESS_ALLOW_OTHER;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEDOUBLE) {
        mntopts |= FSESS_NO_APPLEDOUBLE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEXATTR) {
        mntopts |= FSESS_NO_APPLEXATTR;
    }

    if ((fusefs_args.altflags & FUSE_MOPT_FSID) && (fusefs_args.fsid != 0)) {
        fsid_t   fsid;
        mount_t  other_mp;
        uint32_t target_dev;

        target_dev = FUSE_MAKEDEV(FUSE_CUSTOM_FSID_DEVICE_MAJOR,
                                  fusefs_args.fsid);

        fsid.val[0] = target_dev;
        fsid.val[1] = FUSE_CUSTOM_FSID_VAL1;

        other_mp = vfs_getvfs(&fsid);
        if (other_mp != NULL) {
            return EPERM;
        }

        vfsstatfsp->f_fsid.val[0] = target_dev;
        vfsstatfsp->f_fsid.val[1] = FUSE_CUSTOM_FSID_VAL1;

    } else {
        vfs_getnewfsid(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_ATTRCACHE) {
        mntopts |= FSESS_NO_ATTRCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_READAHEAD) {
        mntopts |= FSESS_NO_READAHEAD;
    }

    if (fusefs_args.altflags & (FUSE_MOPT_NO_UBC | FUSE_MOPT_DIRECT_IO)) {
        mntopts |= FSESS_NO_UBC;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_VNCACHE) {
        mntopts |= FSESS_NO_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NEGATIVE_VNCACHE) {
        if (mntopts & FSESS_NO_VNCACHE) {
            return EINVAL;
        }
        mntopts |= FSESS_NEGATIVE_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCWRITES) {

        /* Cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'. */
        if (mntopts & (FSESS_NO_READAHEAD | FSESS_NO_UBC)) {
            log("fuse4x: cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'\n");
            return EINVAL;
        }

        mntopts |= FSESS_NO_SYNCWRITES;
        vfs_clearflags(mp, MNT_SYNCHRONOUS);
        vfs_setflags(mp, MNT_ASYNC);

        /* We check for this only if we have nosyncwrites in the first place. */
        if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCONCLOSE) {
            mntopts |= FSESS_NO_SYNCONCLOSE;
        }

    } else {
        vfs_clearflags(mp, MNT_ASYNC);
        vfs_setflags(mp, MNT_SYNCHRONOUS);
    }

    if (mntopts & FSESS_NO_UBC) {
        /* If no buffer cache, disallow exec from file system. */
        vfs_setflags(mp, MNT_NOEXEC);
    }

    vfs_setauthopaque(mp);
    vfs_setauthopaqueaccess(mp);

    if ((fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) &&
        (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS)) {
        return EINVAL;
    }

    if (fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) {
        mntopts |= FSESS_DEFAULT_PERMISSIONS;
        vfs_clearauthopaque(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS) {
        mntopts |= FSESS_DEFER_PERMISSIONS;
    }

    if (fusefs_args.altflags & FUSE_MOPT_EXTENDED_SECURITY) {
        mntopts |= FSESS_EXTENDED_SECURITY;
        vfs_setextendedsecurity(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_LOCALVOL) {
        vfs_setflags(mp, MNT_LOCAL);
    }
    /* done checking incoming option bits */

    err = 0;

    vfs_setfsprivate(mp, NULL);

    fdev = fuse_device_get(fusefs_args.rdev);
    if (!fdev) {
        log("fuse4x: invalid device file (number=%d)\n", fusefs_args.rdev);
        return EINVAL;
    }

    fuse_lck_mtx_lock(fdev->mtx);

    data = fdev->data;

    if (!data) {
        fuse_lck_mtx_unlock(fdev->mtx);
        return ENXIO;
    }

    if (data->mounted) {
        fuse_lck_mtx_unlock(fdev->mtx);
        return EALREADY;
    }

    if (!data->opened) {
        fuse_lck_mtx_unlock(fdev->mtx);
        err = ENXIO;
        goto out;
    }

    data->mounted = true;
    OSIncrementAtomic((SInt32 *)&fuse_mount_count);
    mounted = true;

    if (data->dead) {
        fuse_lck_mtx_unlock(fdev->mtx);
        err = ENOTCONN;
        goto out;
    }

    if (!data->daemoncred) {
        panic("fuse4x: daemon found but identity unknown");
    }

    if (fuse_vfs_context_issuser(context) &&
        kauth_cred_getuid(vfs_context_ucred(context)) != kauth_cred_getuid(data->daemoncred)) {
        fuse_lck_mtx_unlock(fdev->mtx);
        err = EPERM;
        log("fuse4x: fuse daemon running by user_id=%d does not have privileges to mount on directory %s owned by user_id=%d\n",
              kauth_cred_getuid(data->daemoncred), vfsstatfsp->f_mntonname, kauth_cred_getuid(vfs_context_ucred(context)));
        goto out;
    }

    data->mp = mp;
    data->fdev = fdev;
    data->dataflags |= mntopts;

    data->daemon_timeout.tv_sec =  fusefs_args.daemon_timeout;
    data->daemon_timeout.tv_nsec = 0;
    if (data->daemon_timeout.tv_sec) {
        data->daemon_timeout_p = &(data->daemon_timeout);
    } else {
        data->daemon_timeout_p = NULL;
    }

    data->max_read = max_read;
    data->fssubtype = fusefs_args.fssubtype;
    data->noimplflags = (uint64_t)0;

    data->blocksize = fuse_round_size(fusefs_args.blocksize,
                                      FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE);

    data->iosize = fuse_round_size(fusefs_args.iosize,
                                   FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE);

    if (data->iosize < data->blocksize) {
        data->iosize = data->blocksize;
    }

    data->userkernel_bufsize = FUSE_DEFAULT_USERKERNEL_BUFSIZE;

    copystr(fusefs_args.fsname, vfsstatfsp->f_mntfromname,
            MNAMELEN - 1, &len);
    bzero(vfsstatfsp->f_mntfromname + len, MNAMELEN - len);

    copystr(fusefs_args.volname, data->volname, MAXPATHLEN - 1, &len);
    bzero(data->volname + len, MAXPATHLEN - len);

    /* previous location of vfs_setioattr() */

    vfs_setfsprivate(mp, data);

    fuse_lck_mtx_unlock(fdev->mtx);

    /* Send a handshake message to the daemon. */
    fuse_send_init(data, context);

    struct vfs_attr vfs_attr;
    VFSATTR_INIT(&vfs_attr);
    // Our vfs_getattr() doesn't look at most *_IS_ACTIVE()'s
    err = fuse_vfsop_getattr(mp, &vfs_attr, context);
    if (!err) {
        vfsstatfsp->f_bsize  = vfs_attr.f_bsize;
        vfsstatfsp->f_iosize = vfs_attr.f_iosize;
        vfsstatfsp->f_blocks = vfs_attr.f_blocks;
        vfsstatfsp->f_bfree  = vfs_attr.f_bfree;
        vfsstatfsp->f_bavail = vfs_attr.f_bavail;
        vfsstatfsp->f_bused  = vfs_attr.f_bused;
        vfsstatfsp->f_files  = vfs_attr.f_files;
        vfsstatfsp->f_ffree  = vfs_attr.f_ffree;
        // vfsstatfsp->f_fsid already handled above
        vfsstatfsp->f_owner  = kauth_cred_getuid(data->daemoncred);
        vfsstatfsp->f_flags  = vfs_flags(mp);
        // vfsstatfsp->f_fstypename already handled above
        // vfsstatfsp->f_mntonname handled elsewhere
        // vfsstatfsp->f_mnfromname already handled above
        vfsstatfsp->f_fssubtype = data->fssubtype;
    }
    if (fusefs_args.altflags & FUSE_MOPT_BLOCKSIZE) {
        vfsstatfsp->f_bsize = data->blocksize;
    } else {
        //data->blocksize = vfsstatfsp->f_bsize;
    }
    if (fusefs_args.altflags & FUSE_MOPT_IOSIZE) {
        vfsstatfsp->f_iosize = data->iosize;
    } else {
        //data->iosize = (uint32_t)vfsstatfsp->f_iosize;
        vfsstatfsp->f_iosize = data->iosize;
    }

out:
    if (err) {
        vfs_setfsprivate(mp, NULL);

        fuse_lck_mtx_lock(fdev->mtx);
        data = fdev->data; /* again */
        if (mounted) {
            OSDecrementAtomic((SInt32 *)&fuse_mount_count);
        }
        if (data) {
            data->mounted = false;
            if (!data->opened) {
                fuse_device_close_final(fdev);
                /* data is gone now */
            }
        }
        fuse_lck_mtx_unlock(fdev->mtx);
    } else {
        vnode_t fuse_rootvp = NULLVP;
        err = fuse_vfsop_root(mp, &fuse_rootvp, context);
        if (err) {
            goto out; /* go back and follow error path */
        }
        err = vnode_ref(fuse_rootvp);
        (void)vnode_put(fuse_rootvp);
        if (err) {
            goto out; /* go back and follow error path */
        } else {
            struct vfsioattr ioattr;

            vfs_ioattr(mp, &ioattr);
            ioattr.io_devblocksize = data->blocksize;
            ioattr.io_maxsegreadsize = ioattr.io_maxsegwritesize =
              ioattr.io_maxreadcnt = ioattr.io_maxwritecnt = data->iosize;
            ioattr.io_segreadcnt = ioattr.io_segwritecnt = data->iosize / PAGE_SIZE;
            vfs_setioattr(mp, &ioattr);
        }
    }

    return err;
}
Beispiel #20
0
static errno_t
fuse_vfsop_unmount(mount_t mp, int mntflags, vfs_context_t context)
{
    int err = 0;
    bool force = false;

    fuse_device_t          fdev;
    struct fuse_data      *data;
    struct fuse_dispatcher fdi;

    vnode_t fuse_rootvp = NULLVP;

    fuse_trace_printf_vfsop();

    if (mntflags & MNT_FORCE) {
        force = true;
    }

    data = fuse_get_mpdata(mp);
    if (!data) {
        panic("fuse4x: no mount private data in vfs_unmount");
    }

    fdev = data->fdev;

    if (data->dead) {

        /*
         * If the file system daemon is dead, it's pointless to try to do
         * any unmount-time operations that go out to user space. Therefore,
         * we pretend that this is a force unmount. However, this isn't of much
         * use. That's because if any non-root vnode is in use, the vflush()
         * that the kernel does before calling our VFS_UNMOUNT will fail
         * if the original unmount wasn't forcible already. That earlier
         * vflush is called with SKIPROOT though, so it wouldn't bail out
         * on the root vnode being in use.
         *
         * If we want, we could set FORCECLOSE here so that a non-forced
         * unmount will be "upgraded" to a forced unmount if the root vnode
         * is busy (you are cd'd to the mount point, for example). It's not
         * quite pure to do that though.
         *
         *    flags |= FORCECLOSE;
         *    log("fuse4x: forcing unmount on a dead file system\n");
         */

    } else if (!data->inited) {
        force = true;
        log("fuse4x: forcing unmount on not-yet-alive file system\n");
        fuse_data_kill(data);
    }

    fuse_rootvp = data->rootvp;

    fuse_trace_printf("%s: Calling vflush(mp, fuse_rootvp, flags=0x%X);\n", __FUNCTION__, force ? FORCECLOSE : 0);
    err = vflush(mp, fuse_rootvp, force ? FORCECLOSE : 0);
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);
    if (err) {
        return err;
    }

    if (vnode_isinuse(fuse_rootvp, 1) && !force) {
        return EBUSY;
    }

    fuse_trace_printf("%s: Calling vnode_rele(fuse_rootp);\n", __FUNCTION__);
    vnode_rele(fuse_rootvp); /* We got this reference in fuse_vfsop_mount(). */
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);

    data->rootvp = NULLVP;

    fuse_trace_printf("%s: Calling vflush(mp, NULLVP, FORCECLOSE);\n", __FUNCTION__);
    (void)vflush(mp, NULLVP, FORCECLOSE);
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);

    if (!data->dead) {
        fuse_dispatcher_init(&fdi, 0 /* no data to send along */);
        fuse_dispatcher_make(&fdi, FUSE_DESTROY, mp, FUSE_ROOT_ID, context);

        fuse_trace_printf("%s: Waiting for reply from FUSE_DESTROY.\n", __FUNCTION__);
        err = fuse_dispatcher_wait_answer(&fdi);
        fuse_trace_printf("%s:   Reply received.\n", __FUNCTION__);
        if (!err) {
            fuse_ticket_drop(fdi.ticket);
        }
    }

    fuse_data_kill(data);

    fuse_lck_mtx_lock(fdev->mtx);

    vfs_setfsprivate(mp, NULL);
    data->mounted = false;
    OSDecrementAtomic((SInt32 *)&fuse_mount_count);

    if (!data->opened) {

        /* fdev->data was left for us to clean up */

        fuse_device_close_final(fdev);

        /* fdev->data is gone now */
    }

    fuse_lck_mtx_unlock(fdev->mtx);

    return 0;
}
Beispiel #21
0
int tcp_newreno_cleanup(struct tcpcb *tp) {
#pragma unused(tp)
	OSDecrementAtomic((volatile SInt32 *)&tcp_cc_newreno.num_sockets);
	return 0;
}
Beispiel #22
0
void throttle_info_update(void *throttle_info, int flags)
{
	struct _throttle_io_info_t *info = throttle_info;
	struct uthread	*ut;
	int policy;
	int is_throttleable_io = 0;
	int is_passive_io = 0;
	SInt32 oldValue;

	if (!lowpri_IO_initial_window_msecs || (info == NULL))
		return;
	policy = throttle_get_io_policy(&ut);

	switch (policy) {
	case IOPOL_DEFAULT:
	case IOPOL_NORMAL:
		break;
	case IOPOL_THROTTLE:
		is_throttleable_io = 1;
		break;
	case IOPOL_PASSIVE:
		is_passive_io = 1;
		break;
	default:
		printf("unknown I/O policy %d", policy);
		break;
	}

	if (!is_throttleable_io && ISSET(flags, B_PASSIVE))
		is_passive_io |= 1;

	if (!is_throttleable_io) {
		if (!is_passive_io){
			microuptime(&info->last_normal_IO_timestamp);
		}
	} else if (ut) {
		/*
		 * I'd really like to do the IOSleep here, but
		 * we may be holding all kinds of filesystem related locks
		 * and the pages for this I/O marked 'busy'...
		 * we don't want to cause a normal task to block on
		 * one of these locks while we're throttling a task marked
		 * for low priority I/O... we'll mark the uthread and
		 * do the delay just before we return from the system
		 * call that triggered this I/O or from vnode_pagein
		 */
		if (ut->uu_lowpri_window == 0) {
			ut->uu_throttle_info = info;
			throttle_info_ref(ut->uu_throttle_info);
			DEBUG_ALLOC_THROTTLE_INFO("updating info = %p\n", info, info );

			oldValue = OSIncrementAtomic(&info->numthreads_throttling);
			if (oldValue < 0) {
				panic("%s: numthreads negative", __func__);
			}
			ut->uu_lowpri_window = lowpri_IO_initial_window_msecs;
			ut->uu_lowpri_window += lowpri_IO_window_msecs_inc * oldValue;
		} else {
			/* The thread sends I/Os to different devices within the same system call */
			if (ut->uu_throttle_info != info) {
    				struct _throttle_io_info_t *old_info = ut->uu_throttle_info;

				// keep track of the numthreads in the right device
				OSDecrementAtomic(&old_info->numthreads_throttling);
				OSIncrementAtomic(&info->numthreads_throttling);

    				DEBUG_ALLOC_THROTTLE_INFO("switching from info = %p\n", old_info, old_info );
    				DEBUG_ALLOC_THROTTLE_INFO("switching to info = %p\n", info, info );
				/* This thread no longer needs a reference on that throttle info */
				throttle_info_rel(ut->uu_throttle_info);
				ut->uu_throttle_info = info;
				/* Need to take a reference on this throttle info */
				throttle_info_ref(ut->uu_throttle_info);
			}
			int numthreads = MAX(1, info->numthreads_throttling);
			ut->uu_lowpri_window += lowpri_IO_window_msecs_inc * numthreads;
			if (ut->uu_lowpri_window > lowpri_max_window_msecs * numthreads)
				ut->uu_lowpri_window = lowpri_max_window_msecs * numthreads;
		}
	}
}
// Called at hardware interrupt time
bool
AppleUSBUHCI::FilterInterrupt(void)
{
	UInt16						activeInterrupts;
    Boolean						needSignal = false;
	UInt64						currentFrame;
	uint64_t					timeStamp;
	
	// we leave all interrupts enabled, so see which ones are active
	activeInterrupts = ioRead16(kUHCI_STS) & kUHCI_STS_INTR_MASK;
	
	if (activeInterrupts != 0) 
	{
		USBTrace( kUSBTUHCIInterrupts, kTPUHCIInterruptsFilterInterrupt , (uintptr_t)this, activeInterrupts, 0, 3 );

		if (activeInterrupts & kUHCI_STS_HCPE)
		{
			// Host Controller Process Error - usually a bad data structure on the list
			_hostControllerProcessInterrupt = kUHCI_STS_HCPE;
			ioWrite16(kUHCI_STS, kUHCI_STS_HCPE);
			needSignal = true;
			//USBLog(1, "AppleUSBUHCI[%p]::FilterInterrupt - HCPE error - legacy reg = %p", this, (void*)_device->configRead16(kUHCI_PCI_LEGKEY));
			USBTrace( kUSBTUHCIInterrupts,  kTPUHCIInterruptsFilterInterrupt, (uintptr_t)this, _hostControllerProcessInterrupt, _device->configRead16(kUHCI_PCI_LEGKEY), 1 );
		}
		if (activeInterrupts & kUHCI_STS_HSE)
		{
			// Host System Error - usually a PCI issue
			_hostSystemErrorInterrupt = kUHCI_STS_HSE;
			ioWrite16(kUHCI_STS, kUHCI_STS_HSE);
			needSignal = true;
			//USBLog(1, "AppleUSBUHCI[%p]::FilterInterrupt - HSE error - legacy reg = %p", this, (void*)_device->configRead16(kUHCI_PCI_LEGKEY));
			USBTrace( kUSBTUHCIInterrupts,  kTPUHCIInterruptsFilterInterrupt, (uintptr_t)this, _hostSystemErrorInterrupt, _device->configRead16(kUHCI_PCI_LEGKEY), 2 );
		}
		if (activeInterrupts & kUHCI_STS_RD)
		{
			// Resume Detect - remote wakeup
			_resumeDetectInterrupt = kUHCI_STS_RD;
			ioWrite16(kUHCI_STS, kUHCI_STS_RD);
			needSignal = true;
		}
		if (activeInterrupts & kUHCI_STS_EI)
		{
			// USB Error Interrupt - transaction error (CRC, timeout, etc)
			_usbErrorInterrupt = kUHCI_STS_EI;
			ioWrite16(kUHCI_STS, kUHCI_STS_EI);
			needSignal = true;
		}
		if (activeInterrupts & kUHCI_STS_INT)
		{
			// Normal IOC interrupt - we need to check out low latency Isoch as well
			timeStamp = mach_absolute_time();
			ioWrite16(kUHCI_STS, kUHCI_STS_INT);
			needSignal = true;
						
			// This function will give us the current frame number and check for rollover at the same time
			// since we are calling from filterInterrupts we will not be preempted, it will also update the 
			// cached copy of the frame_number_with_time 
			GetFrameNumberInternal();
			
			// we need to check the periodic list to see if there are any Isoch TDs which need to come off
			// and potentially have their frame lists updated (for Low Latency) we will place them in reverse
			// order on a "done queue" which will be looked at by the isoch scavanger
			// only do this if the periodic schedule is enabled
			if (!_inAbortIsochEP  && (_outSlot < kUHCI_NVFRAMES))
			{
				AppleUHCIIsochTransferDescriptor	*cachedHead;
				UInt32								cachedProducer;
				UInt16								curSlot, testSlot, nextSlot;
				
				curSlot = (ReadFrameNumber() & kUHCI_NVFRAMES_MASK);
				
				cachedHead = (AppleUHCIIsochTransferDescriptor*)_savedDoneQueueHead;
				cachedProducer = _producerCount;
				testSlot = _outSlot;
				
				while (testSlot != curSlot)
				{
					IOUSBControllerListElement				*thing, *nextThing;
					AppleUHCIIsochTransferDescriptor		*isochTD;
					
					nextSlot = (testSlot+1) & kUHCI_NVFRAMES_MASK;
					thing = _logicalFrameList[testSlot];
					while (thing != NULL)
					{
						nextThing = thing->_logicalNext;
						isochTD = OSDynamicCast(AppleUHCIIsochTransferDescriptor, thing);
						
						if (!isochTD)
							break;						// only care about Isoch in this list - if we get here we are at the interrupt TDs
												
						// need to unlink this TD
						_logicalFrameList[testSlot] = nextThing;
						_frameList[testSlot] = HostToUSBLong(thing->GetPhysicalLink());
						
						if (isochTD->_lowLatency)
							isochTD->frStatus = isochTD->UpdateFrameList(*(AbsoluteTime*)&timeStamp);
						// place this guy on the backward done queue
						// the reason that we do not use the _logicalNext link is that the done queue is not a null terminated list
						// and the element linked "last" in the list might not be a true link - trust me
						isochTD->_doneQueueLink = cachedHead;
						cachedHead = isochTD;
						cachedProducer++;
						if (isochTD->_pEndpoint)
						{
							isochTD->_pEndpoint->onProducerQ++;
							OSDecrementAtomic( &(isochTD->_pEndpoint->scheduledTDs));
						}
						
						thing = nextThing;
					}
					testSlot = nextSlot;
					_outSlot = testSlot;
				}
				IOSimpleLockLock( _wdhLock );
				
				_savedDoneQueueHead = cachedHead;	// updates the shadow head
				_producerCount = cachedProducer;	// Validates _producerCount;
				
				IOSimpleLockUnlock( _wdhLock );

			}
		
			// 8394970:  Make sure we set the flag AFTER we have incremented our producer count.
			_usbCompletionInterrupt = kUHCI_STS_INT;
		}
	}
	
	// We will return false from this filter routine,
	// but will indicate that there the action routine should be called
	// by calling _filterInterruptSource->signalInterrupt(). 
	// This is needed because IOKit will disable interrupts for a level interrupt
	// after the filter interrupt is run, until the action interrupt is called.
	// We want to be able to have our filter interrupt routine called
	// before the action routine runs, if needed.  That is what will enable
	// low latency isoch transfers to work, as when the
	// system is under heavy load, the action routine can be delayed for tens of ms.
	//
	if (needSignal)
		_interruptSource->signalInterrupt();
	
	return false;
}