Example #1
0
void
telemetry_task_ctl_locked(task_t task, uint32_t reasons, int enable_disable)
{
	uint32_t origflags;

	assert((reasons != 0) && ((reasons | TF_TELEMETRY) == TF_TELEMETRY));

	task_lock_assert_owned(task);

	origflags = task->t_flags;

	if (enable_disable == 1) {
		task->t_flags |= reasons;
		if ((origflags & TF_TELEMETRY) == 0) {
			OSIncrementAtomic(&telemetry_active_tasks);
#if TELEMETRY_DEBUG			
			printf("%s: telemetry OFF -> ON (%d active)\n", proc_name_address(task->bsd_info), telemetry_active_tasks);
#endif			
		}
	} else {
		task->t_flags &= ~reasons;
		if (((origflags & TF_TELEMETRY) != 0) && ((task->t_flags & TF_TELEMETRY) == 0)) {
			/*
			 * If this task went from having at least one telemetry bit to having none,
			 * the net change was to disable telemetry for the task.
			 */
			OSDecrementAtomic(&telemetry_active_tasks);
#if TELEMETRY_DEBUG
			printf("%s: telemetry ON -> OFF (%d active)\n", proc_name_address(task->bsd_info), telemetry_active_tasks);
#endif
		}
	}
}
Example #2
0
void
rw_downgrade(krwlock_t *rwlp)
{
    rwlp->rw_owner = NULL;
    lck_rw_lock_exclusive_to_shared((lck_rw_t *)&rwlp->rw_lock[0]);
    OSIncrementAtomic((volatile SInt32 *)&rwlp->rw_readers);
}
Example #3
0
static int tcp_cubic_init(struct tcpcb *tp)
{
	OSIncrementAtomic((volatile SInt32 *)&tcp_cc_cubic.num_sockets);

	VERIFY(tp->t_ccstate != NULL);
	tcp_cubic_clear_state(tp);
	return (0);
}
Example #4
0
void
OSMetaClass::instanceConstructed() const
{
    // if ((0 == OSIncrementAtomic(&(((OSMetaClass *) this)->instanceCount))) && superClassLink)
    if ((0 == OSIncrementAtomic(&instanceCount)) && superClassLink) {
        superClassLink->instanceConstructed();
    }
}
Example #5
0
/*
 * When switching from a different CC it is better for Cubic to start 
 * fresh. The state required for Cubic calculation might be stale and it
 * might not represent the current state of the network. If it starts as
 * a new connection it will probe and learn the existing network conditions.
 */
static void
tcp_cubic_switch_cc(struct tcpcb *tp, uint16_t old_cc_index)
{
#pragma unused(old_cc_index)
	tcp_cubic_cwnd_init_or_reset(tp);
	/* Start counting bytes for RFC 3465 again */
	tp->t_bytes_acked = 0;

	OSIncrementAtomic((volatile SInt32 *)&tcp_cc_cubic.num_sockets);
}
Example #6
0
/*
 * Just take a reference on the throttle info structure.
 *
 * This routine always returns the old value.
 */
static SInt32
throttle_info_ref(struct _throttle_io_info_t *info)
{
	SInt32 oldValue = OSIncrementAtomic(&info->refcnt);

	DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n", 
		info, (int)(oldValue -1), info );
	/* Allocated items should never have a reference of zero */
	if (info->alloc && (oldValue == 0))
		panic("Taking a reference without calling create throttle info!\n");

	return oldValue;
}
Example #7
0
void
rw_enter(krwlock_t *rwlp, krw_t rw)
{
    if (rw == RW_READER) {
        lck_rw_lock_shared((lck_rw_t *)&rwlp->rw_lock[0]);
        OSIncrementAtomic((volatile SInt32 *)&rwlp->rw_readers);
    } else {
        if (rwlp->rw_owner == current_thread())
            panic("rw_enter: locking against myself!");
        lck_rw_lock_exclusive((lck_rw_t *)&rwlp->rw_lock[0]);
        rwlp->rw_owner = current_thread();
    }
}
Example #8
0
void KernelPatcher::onKextSummariesUpdated() {
	if (that) {
		// macOS 10.12 generates an interrupt during this call but unlike 10.11 and below
		// it never stops handling interrupts hanging forever inside hndl_allintrs.
		// This happens even with cpus=1, and the reason is not fully understood.
		//
		// For this reason on 10.12 and above the outer function is routed, and so far it
		// seems to cause fewer issues. Regarding syncing:
		//  - the only place modifying gLoadedKextSummaries is updateLoadedKextSummaries;
		//  - updateLoadedKextSummaries is called from load/unload separately;
		//  - sKextSummariesLock is not exported or visible.
		// As a result no syncing should be necessary but there are guards for future
		// changes and in case of any misunderstanding.
		
		if (getKernelVersion() >= KernelVersion::Sierra) {
			if (OSIncrementAtomic(&updateSummariesEntryCount) != 0) {
				panic("onKextSummariesUpdated entered another time");
			}
			
			that->orgUpdateLoadedKextSummaries();
		}

		DBGLOG("patcher @ invoked at kext loading/unloading");
		
		if (that->khandlers.size() > 0 && that->loadedKextSummaries) {
			auto num = (*that->loadedKextSummaries)->numSummaries;
			if (num > 0) {
				OSKextLoadedKextSummary &last = (*that->loadedKextSummaries)->summaries[num-1];
				DBGLOG("patcher @ last kext is %llX and its name is %.*s", last.address, KMOD_MAX_NAME, last.name);
				// We may add khandlers items inside the handler
				for (size_t i = 0; i < that->khandlers.size(); i++) {
					if (!strncmp(that->khandlers[i]->id, last.name, KMOD_MAX_NAME)) {
						DBGLOG("patcher @ caught the right kext at %llX, invoking handler", last.address);
						that->khandlers[i]->address = last.address;
						that->khandlers[i]->size = last.size;
						that->khandlers[i]->handler(that->khandlers[i]);
						// Remove the item
						that->khandlers.erase(i);
						break;
					}
				}
			} else {
				SYSLOG("patcher @ no kext is currently loaded, this should not happen");
			}
		}
		
		if (getKernelVersion() >= KernelVersion::Sierra && OSDecrementAtomic(&updateSummariesEntryCount) != 1) {
			panic("onKextSummariesUpdated left another time");
		}
	}
}
Example #9
0
File: cpu.c Project: argp/xnu
kern_return_t
cpu_data_register(cpu_data_t *cpu_data_ptr)
{
	int cpu;

	cpu = OSIncrementAtomic((SInt32*)&real_ncpus);
	if (real_ncpus > MAX_CPUS) {
		return KERN_FAILURE;
	}

	cpu_data_ptr->cpu_number = cpu;
	CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
	CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys( (vm_offset_t)cpu_data_ptr);
	return KERN_SUCCESS;
}
Example #10
0
/*
 * KPI routine
 *
 * Create and take a reference on a throttle info structure and return a
 * pointer for the file system to use when calling throttle_info_update.
 * Calling file system must have a matching release for every create.
 */
void *
throttle_info_create(void)
{
	struct _throttle_io_info_t *info; 

	MALLOC(info, struct _throttle_io_info_t *, sizeof(*info), M_TEMP, M_ZERO | M_WAITOK);
	/* Should never happen but just in case */
	if (info == NULL)
		return NULL;
	/* Mark that this one was allocated and needs to be freed */
	DEBUG_ALLOC_THROTTLE_INFO("Creating info = %p\n", info, info );
	info->alloc = TRUE;
	/* Take a reference */
	OSIncrementAtomic(&info->refcnt);
	return info;
}
Example #11
0
/* Switch to newreno from a different CC. If the connection is in
 * congestion avoidance state, it can continue to use the current
 * congestion window because it is going to be conservative. But
 * if the connection is in slow-start, we will halve the congestion
 * window and let newreno work from there. 
 */
void
tcp_newreno_switch_cc(struct tcpcb *tp, uint16_t old_index) {
#pragma unused(old_index)

	uint32_t cwnd = min(tp->snd_wnd, tp->snd_cwnd);
	if (tp->snd_cwnd >= tp->snd_ssthresh) {
		cwnd = cwnd / tp->t_maxseg;
	} else { 
		cwnd = cwnd / 2 / tp->t_maxseg;
	}
	tp->snd_cwnd = max(TCP_CC_CWND_INIT_BYTES, cwnd * tp->t_maxseg);

	/* Start counting bytes for RFC 3465 again */
	tp->t_bytes_acked = 0;

	OSIncrementAtomic((volatile SInt32 *)&tcp_cc_newreno.num_sockets);
}
VFSFilter0UserClient* com_VFSFilter0::getClient()
/*
 if non NULL is returned the putClient() must be called
 */
{
    VFSFilter0UserClient*  currentClient;
    
    //
    // if ther is no user client, then nobody call for logging
    //
    if( NULL == this->userClient || this->pendingUnregistration )
        return NULL;
    
    OSIncrementAtomic( &this->clientInvocations );
    
    currentClient = (VFSFilter0UserClient*)this->userClient;
    
    //
    // if the current client is NULL or can't be atomicaly exchanged
    // with the same value then the unregistration is in progress,
    // the call to OSCompareAndSwapPtr( NULL, NULL, &this->userClient )
    // checks the this->userClient for NULL atomically
    //
    if( !currentClient ||
       !OSCompareAndSwapPtr( currentClient, currentClient, &this->userClient ) ||
       OSCompareAndSwapPtr( NULL, NULL, &this->userClient ) ){
        
        //
        // the unregistration is in the progress and waiting for all
        // invocations to return
        //
        assert( this->pendingUnregistration );
        if( 0x1 == OSDecrementAtomic( &this->clientInvocations ) ){
            
            //
            // this was the last invocation
            //
            wakeup( &this->clientInvocations );
        }
        
        return NULL;
    }
    
    return currentClient;
}
bool IOFWWorkLoop::init( void )
{
	bool success = true;
	
	if( success )
	{
		// create a unique lock group for this instance of the FireWire workloop
		// this helps elucidate lock statistics
		
		SInt32	count = OSIncrementAtomic( &sLockGroupCount );
		char	name[64];
		
		snprintf( name, sizeof(name), "FireWire %d", (int)count );
		fLockGroup = lck_grp_alloc_init( name, LCK_GRP_ATTR_NULL );
		if( !fLockGroup )
		{
			success = false;
		}
	}
	
	if( success )
	{
		gateLock = IORecursiveLockAllocWithLockGroup( fLockGroup );
	}
	
	if( success )
	{
		fRemoveSourceDeferredSet = OSSet::withCapacity( 1 );
		if( fRemoveSourceDeferredSet == NULL  )
		{
			success = false;
		}
	}

	if( success )
	{
		success = IOWorkLoop::init();
	}
	
	return success;
}
Example #14
0
int
rw_tryenter(krwlock_t *rwlp, krw_t rw)
{
    int held = 0;

    if (rw == RW_READER) {
        held = lck_rw_try_lock((lck_rw_t *)&rwlp->rw_lock[0],
                               LCK_RW_TYPE_SHARED);
        if (held)
            OSIncrementAtomic((volatile SInt32 *)&rwlp->rw_readers);
    } else {
        if (rwlp->rw_owner == current_thread())
            panic("rw_tryenter: locking against myself!");
        held = lck_rw_try_lock((lck_rw_t *)&rwlp->rw_lock[0],
                               LCK_RW_TYPE_EXCLUSIVE);
        if (held)
            rwlp->rw_owner = current_thread();
    }

    return (held);
}
Example #15
0
/* Change a connection to use ledbat. First, lower bg_ssthresh value
 * if it needs to be. 
 */
void
tcp_ledbat_switch_cc(struct tcpcb *tp, uint16_t old_cc_index) {
#pragma unused(old_cc_index)
	uint32_t cwnd;

	if (tp->bg_ssthresh == 0 || tp->bg_ssthresh > tp->snd_ssthresh)
		tp->bg_ssthresh = tp->snd_ssthresh;

	cwnd = min(tp->snd_wnd, tp->snd_cwnd);

	if (tp->snd_cwnd > tp->bg_ssthresh)
		cwnd = cwnd / tp->t_maxseg;
	else
		cwnd = cwnd / 2 / tp->t_maxseg;

	if (cwnd < bg_ss_fltsz)
		cwnd = bg_ss_fltsz;

	tp->snd_cwnd = cwnd * tp->t_maxseg;
	tp->t_bytes_acked = 0;

	OSIncrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets);
}
Example #16
0
static pp_dyn_entry_t
pp_dyn_entry_get(void)
{
    int i;
    pp_dyn_entry_t e = NULL;
    u_int32_t now, now_nsec;
    clock_get_calendar_nanotime(&now, &now_nsec);

    for(i=0; i < PP_DYN_ENTRIES_COUNT; ++i) {
        // expire old entries
        if ((pp_dyn_entries[i].ts + 5) > now)
            pp_dyn_entries[i].addr = INADDR_NONE;

        if (INADDR_NONE == pp_dyn_entries[i].addr) {
            e = &pp_dyn_entries[i];
            e->addr = 0;
            e->ts = now;
            OSIncrementAtomic(&pp_dyn_entry_used);
            break;
        }
    }
    return (e);
}
Example #17
0
/*
 * Allocate struct sackhole.
 */
static struct sackhole *
tcp_sackhole_alloc(struct tcpcb *tp, tcp_seq start, tcp_seq end)
{
	struct sackhole *hole;

	if (tp->snd_numholes >= tcp_sack_maxholes ||
	    tcp_sack_globalholes >= tcp_sack_globalmaxholes) {
		tcpstat.tcps_sack_sboverflow++;
		return NULL;
	}

	hole = (struct sackhole *)zalloc(sack_hole_zone);
	if (hole == NULL)
		return NULL;

	hole->start = start;
	hole->end = end;
	hole->rxmit = start;

	tp->snd_numholes++;
	OSIncrementAtomic(&tcp_sack_globalholes);

	return hole;
}
Example #18
0
static int
zfs_domount(struct mount *mp, dev_t mount_dev, char *osname, vfs_context_t ctx)
{
	uint64_t readonly;
	int error = 0;
	int mode;
	zfsvfs_t *zfsvfs;
	znode_t *zp = NULL;
	struct timeval tv;

	ASSERT(mp);
	ASSERT(osname);

	/*
	 * Initialize the zfs-specific filesystem structure.
	 * Should probably make this a kmem cache, shuffle fields,
	 * and just bzero up to z_hold_mtx[].
	 */
	zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
	zfsvfs->z_vfs = mp;
	zfsvfs->z_parent = zfsvfs;
	zfsvfs->z_assign = TXG_NOWAIT;
	zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE;
	zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;

	mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
	    offsetof(znode_t, z_link_node));
	rw_init(&zfsvfs->z_unmount_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zfsvfs->z_unmount_inactive_lock, NULL, RW_DEFAULT, NULL);
#ifndef __APPLE__
	/* Initialize the generic filesystem structure. */
	vfsp->vfs_bcount = 0;
	vfsp->vfs_data = NULL;

	if (zfs_create_unique_device(&mount_dev) == -1) {
		error = ENODEV;
		goto out;
	}
	ASSERT(vfs_devismounted(mount_dev) == 0);
#endif

	vfs_setfsprivate(mp, zfsvfs);

	if (error = dsl_prop_get_integer(osname, "readonly", &readonly, NULL))
		goto out;

	if (readonly) {
		mode = DS_MODE_PRIMARY | DS_MODE_READONLY;
		vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_RDONLY));
	} else {
		mode = DS_MODE_PRIMARY;
	}
	error = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os);
	if (error == EROFS) {
		mode = DS_MODE_PRIMARY | DS_MODE_READONLY;
		error = dmu_objset_open(osname, DMU_OST_ZFS, mode,
		    &zfsvfs->z_os);
	}

	if (error)
		goto out;

	if (error = zfs_init_fs(zfsvfs, &zp, (cred_t *) vfs_context_ucred(ctx)))
		goto out;

	/* The call to zfs_init_fs leaves the vnode held, release it here. */
	vnode_put(ZTOV(zp));

	if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
		uint64_t xattr;

		ASSERT(mode & DS_MODE_READONLY);
#if 0
		atime_changed_cb(zfsvfs, B_FALSE);
		readonly_changed_cb(zfsvfs, B_TRUE);
		if (error = dsl_prop_get_integer(osname, "xattr", &xattr, NULL))
			goto out;
		xattr_changed_cb(zfsvfs, xattr);
#endif
		zfsvfs->z_issnap = B_TRUE;
	} else {
		
		if (!vfs_isrdonly(mp))
			zfs_unlinked_drain(zfsvfs);

#ifndef __APPLE__
		/*
		 * Parse and replay the intent log.
		 *
		 * Because of ziltest, this must be done after
		 * zfs_unlinked_drain().  (Further note: ziltest doesn't
		 * use readonly mounts, where zfs_unlinked_drain() isn't
		 * called.)  This is because ziltest causes spa_sync()
		 * to think it's committed, but actually it is not, so
		 * the intent log contains many txg's worth of changes.
		 *
		 * In particular, if object N is in the unlinked set in
		 * the last txg to actually sync, then it could be
		 * actually freed in a later txg and then reallocated in
		 * a yet later txg.  This would write a "create object
		 * N" record to the intent log.  Normally, this would be
		 * fine because the spa_sync() would have written out
		 * the fact that object N is free, before we could write
		 * the "create object N" intent log record.
		 *
		 * But when we are in ziltest mode, we advance the "open
		 * txg" without actually spa_sync()-ing the changes to
		 * disk.  So we would see that object N is still
		 * allocated and in the unlinked set, and there is an
		 * intent log record saying to allocate it.
		 */
		zil_replay(zfsvfs->z_os, zfsvfs, &zfsvfs->z_assign,
		    zfs_replay_vector);

		if (!zil_disable)
			zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
#endif
	}

#if 0
	if (!zfsvfs->z_issnap)
		zfsctl_create(zfsvfs);
#endif

	/*
	 * Record the mount time (for Spotlight)
	 */
	microtime(&tv);
	zfsvfs->z_mount_time = tv.tv_sec;
	
out:
	if (error) {
		if (zfsvfs->z_os)
			dmu_objset_close(zfsvfs->z_os);
		mutex_destroy(&zfsvfs->z_znodes_lock);
		list_destroy(&zfsvfs->z_all_znodes);
		rw_destroy(&zfsvfs->z_unmount_lock);
		rw_destroy(&zfsvfs->z_unmount_inactive_lock);
		kmem_free(zfsvfs, sizeof (zfsvfs_t));
	} else {
		OSIncrementAtomic(&zfs_active_fs_count);
		(void) copystr(osname, vfs_statfs(mp)->f_mntfromname, MNAMELEN - 1, 0);
		vfs_getnewfsid(mp);
	}

	return (error);
}
Example #19
0
__private_extern__ void
devtimer_retain(devtimer_ref timer)
{
    OSIncrementAtomic(&timer->dt_retain_count);
    return;
}
Example #20
0
static errno_t
fuse_vfsop_mount(mount_t mp, __unused vnode_t devvp, user_addr_t udata,
                 vfs_context_t context)
{
    int err      = 0;
    int mntopts  = 0;
    bool mounted = false;

    uint32_t max_read = ~0;

    size_t len;

    fuse_device_t      fdev = NULL;
    struct fuse_data  *data = NULL;
    fuse_mount_args    fusefs_args;
    struct vfsstatfs  *vfsstatfsp = vfs_statfs(mp);

    fuse_trace_printf_vfsop();

    if (vfs_isupdate(mp)) {
        return ENOTSUP;
    }

    err = copyin(udata, &fusefs_args, sizeof(fusefs_args));
    if (err) {
        return EINVAL;
    }

    /*
     * Interesting flags that we can receive from mount or may want to
     * otherwise forcibly set include:
     *
     *     MNT_ASYNC
     *     MNT_AUTOMOUNTED
     *     MNT_DEFWRITE
     *     MNT_DONTBROWSE
     *     MNT_IGNORE_OWNERSHIP
     *     MNT_JOURNALED
     *     MNT_NODEV
     *     MNT_NOEXEC
     *     MNT_NOSUID
     *     MNT_NOUSERXATTR
     *     MNT_RDONLY
     *     MNT_SYNCHRONOUS
     *     MNT_UNION
     */

    err = ENOTSUP;

    vfs_setlocklocal(mp);

    /** Option Processing. **/

    if (*fusefs_args.fstypename) {
        size_t typenamelen = strlen(fusefs_args.fstypename);
        if (typenamelen > FUSE_FSTYPENAME_MAXLEN) {
            return EINVAL;
        }
        snprintf(vfsstatfsp->f_fstypename, MFSTYPENAMELEN, "%s%s",
                 FUSE_FSTYPENAME_PREFIX, fusefs_args.fstypename);
    }

    if (!*fusefs_args.fsname)
        return EINVAL;

    if ((fusefs_args.daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) ||
        (fusefs_args.daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT)) {
        return EINVAL;
    }

    if (fusefs_args.altflags & FUSE_MOPT_SPARSE) {
        mntopts |= FSESS_SPARSE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_AUTO_CACHE) {
        mntopts |= FSESS_AUTO_CACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_AUTO_XATTR) {
        if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) {
            return EINVAL;
        }
        mntopts |= FSESS_AUTO_XATTR;
    } else if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) {
        mntopts |= FSESS_NATIVE_XATTR;
    }

    if (fusefs_args.altflags & FUSE_MOPT_JAIL_SYMLINKS) {
        mntopts |= FSESS_JAIL_SYMLINKS;
    }

    /*
     * Note that unlike Linux, which keeps allow_root in user-space and
     * passes allow_other in that case to the kernel, we let allow_root
     * reach the kernel. The 'if' ordering is important here.
     */
    if (fusefs_args.altflags & FUSE_MOPT_ALLOW_ROOT) {
        int is_member = 0;
        if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) {
            log("fuse4x: caller is not a member of fuse4x admin group. "
                  "Either add user (id=%d) to group (id=%d), "
                  "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n",
                  kauth_cred_getuid(kauth_cred_get()), fuse_admin_group);
            return EPERM;
        }
        mntopts |= FSESS_ALLOW_ROOT;
    } else if (fusefs_args.altflags & FUSE_MOPT_ALLOW_OTHER) {
        if (!fuse_allow_other && !fuse_vfs_context_issuser(context)) {
            int is_member = 0;
            if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) {
                log("fuse4x: caller is not a member of fuse4x admin group. "
                      "Either add user (id=%d) to group (id=%d), "
                      "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n",
                      kauth_cred_getuid(kauth_cred_get()), fuse_admin_group);
                return EPERM;
            }
        }
        mntopts |= FSESS_ALLOW_OTHER;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEDOUBLE) {
        mntopts |= FSESS_NO_APPLEDOUBLE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEXATTR) {
        mntopts |= FSESS_NO_APPLEXATTR;
    }

    if ((fusefs_args.altflags & FUSE_MOPT_FSID) && (fusefs_args.fsid != 0)) {
        fsid_t   fsid;
        mount_t  other_mp;
        uint32_t target_dev;

        target_dev = FUSE_MAKEDEV(FUSE_CUSTOM_FSID_DEVICE_MAJOR,
                                  fusefs_args.fsid);

        fsid.val[0] = target_dev;
        fsid.val[1] = FUSE_CUSTOM_FSID_VAL1;

        other_mp = vfs_getvfs(&fsid);
        if (other_mp != NULL) {
            return EPERM;
        }

        vfsstatfsp->f_fsid.val[0] = target_dev;
        vfsstatfsp->f_fsid.val[1] = FUSE_CUSTOM_FSID_VAL1;

    } else {
        vfs_getnewfsid(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_ATTRCACHE) {
        mntopts |= FSESS_NO_ATTRCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_READAHEAD) {
        mntopts |= FSESS_NO_READAHEAD;
    }

    if (fusefs_args.altflags & (FUSE_MOPT_NO_UBC | FUSE_MOPT_DIRECT_IO)) {
        mntopts |= FSESS_NO_UBC;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_VNCACHE) {
        mntopts |= FSESS_NO_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NEGATIVE_VNCACHE) {
        if (mntopts & FSESS_NO_VNCACHE) {
            return EINVAL;
        }
        mntopts |= FSESS_NEGATIVE_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCWRITES) {

        /* Cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'. */
        if (mntopts & (FSESS_NO_READAHEAD | FSESS_NO_UBC)) {
            log("fuse4x: cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'\n");
            return EINVAL;
        }

        mntopts |= FSESS_NO_SYNCWRITES;
        vfs_clearflags(mp, MNT_SYNCHRONOUS);
        vfs_setflags(mp, MNT_ASYNC);

        /* We check for this only if we have nosyncwrites in the first place. */
        if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCONCLOSE) {
            mntopts |= FSESS_NO_SYNCONCLOSE;
        }

    } else {
        vfs_clearflags(mp, MNT_ASYNC);
        vfs_setflags(mp, MNT_SYNCHRONOUS);
    }

    if (mntopts & FSESS_NO_UBC) {
        /* If no buffer cache, disallow exec from file system. */
        vfs_setflags(mp, MNT_NOEXEC);
    }

    vfs_setauthopaque(mp);
    vfs_setauthopaqueaccess(mp);

    if ((fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) &&
        (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS)) {
        return EINVAL;
    }

    if (fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) {
        mntopts |= FSESS_DEFAULT_PERMISSIONS;
        vfs_clearauthopaque(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS) {
        mntopts |= FSESS_DEFER_PERMISSIONS;
    }

    if (fusefs_args.altflags & FUSE_MOPT_EXTENDED_SECURITY) {
        mntopts |= FSESS_EXTENDED_SECURITY;
        vfs_setextendedsecurity(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_LOCALVOL) {
        vfs_setflags(mp, MNT_LOCAL);
    }
    /* done checking incoming option bits */

    err = 0;

    vfs_setfsprivate(mp, NULL);

    fdev = fuse_device_get(fusefs_args.rdev);
    if (!fdev) {
        log("fuse4x: invalid device file (number=%d)\n", fusefs_args.rdev);
        return EINVAL;
    }

    fuse_lck_mtx_lock(fdev->mtx);

    data = fdev->data;

    if (!data) {
        fuse_lck_mtx_unlock(fdev->mtx);
        return ENXIO;
    }

    if (data->mounted) {
        fuse_lck_mtx_unlock(fdev->mtx);
        return EALREADY;
    }

    if (!data->opened) {
        fuse_lck_mtx_unlock(fdev->mtx);
        err = ENXIO;
        goto out;
    }

    data->mounted = true;
    OSIncrementAtomic((SInt32 *)&fuse_mount_count);
    mounted = true;

    if (data->dead) {
        fuse_lck_mtx_unlock(fdev->mtx);
        err = ENOTCONN;
        goto out;
    }

    if (!data->daemoncred) {
        panic("fuse4x: daemon found but identity unknown");
    }

    if (fuse_vfs_context_issuser(context) &&
        kauth_cred_getuid(vfs_context_ucred(context)) != kauth_cred_getuid(data->daemoncred)) {
        fuse_lck_mtx_unlock(fdev->mtx);
        err = EPERM;
        log("fuse4x: fuse daemon running by user_id=%d does not have privileges to mount on directory %s owned by user_id=%d\n",
              kauth_cred_getuid(data->daemoncred), vfsstatfsp->f_mntonname, kauth_cred_getuid(vfs_context_ucred(context)));
        goto out;
    }

    data->mp = mp;
    data->fdev = fdev;
    data->dataflags |= mntopts;

    data->daemon_timeout.tv_sec =  fusefs_args.daemon_timeout;
    data->daemon_timeout.tv_nsec = 0;
    if (data->daemon_timeout.tv_sec) {
        data->daemon_timeout_p = &(data->daemon_timeout);
    } else {
        data->daemon_timeout_p = NULL;
    }

    data->max_read = max_read;
    data->fssubtype = fusefs_args.fssubtype;
    data->noimplflags = (uint64_t)0;

    data->blocksize = fuse_round_size(fusefs_args.blocksize,
                                      FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE);

    data->iosize = fuse_round_size(fusefs_args.iosize,
                                   FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE);

    if (data->iosize < data->blocksize) {
        data->iosize = data->blocksize;
    }

    data->userkernel_bufsize = FUSE_DEFAULT_USERKERNEL_BUFSIZE;

    copystr(fusefs_args.fsname, vfsstatfsp->f_mntfromname,
            MNAMELEN - 1, &len);
    bzero(vfsstatfsp->f_mntfromname + len, MNAMELEN - len);

    copystr(fusefs_args.volname, data->volname, MAXPATHLEN - 1, &len);
    bzero(data->volname + len, MAXPATHLEN - len);

    /* previous location of vfs_setioattr() */

    vfs_setfsprivate(mp, data);

    fuse_lck_mtx_unlock(fdev->mtx);

    /* Send a handshake message to the daemon. */
    fuse_send_init(data, context);

    struct vfs_attr vfs_attr;
    VFSATTR_INIT(&vfs_attr);
    // Our vfs_getattr() doesn't look at most *_IS_ACTIVE()'s
    err = fuse_vfsop_getattr(mp, &vfs_attr, context);
    if (!err) {
        vfsstatfsp->f_bsize  = vfs_attr.f_bsize;
        vfsstatfsp->f_iosize = vfs_attr.f_iosize;
        vfsstatfsp->f_blocks = vfs_attr.f_blocks;
        vfsstatfsp->f_bfree  = vfs_attr.f_bfree;
        vfsstatfsp->f_bavail = vfs_attr.f_bavail;
        vfsstatfsp->f_bused  = vfs_attr.f_bused;
        vfsstatfsp->f_files  = vfs_attr.f_files;
        vfsstatfsp->f_ffree  = vfs_attr.f_ffree;
        // vfsstatfsp->f_fsid already handled above
        vfsstatfsp->f_owner  = kauth_cred_getuid(data->daemoncred);
        vfsstatfsp->f_flags  = vfs_flags(mp);
        // vfsstatfsp->f_fstypename already handled above
        // vfsstatfsp->f_mntonname handled elsewhere
        // vfsstatfsp->f_mnfromname already handled above
        vfsstatfsp->f_fssubtype = data->fssubtype;
    }
    if (fusefs_args.altflags & FUSE_MOPT_BLOCKSIZE) {
        vfsstatfsp->f_bsize = data->blocksize;
    } else {
        //data->blocksize = vfsstatfsp->f_bsize;
    }
    if (fusefs_args.altflags & FUSE_MOPT_IOSIZE) {
        vfsstatfsp->f_iosize = data->iosize;
    } else {
        //data->iosize = (uint32_t)vfsstatfsp->f_iosize;
        vfsstatfsp->f_iosize = data->iosize;
    }

out:
    if (err) {
        vfs_setfsprivate(mp, NULL);

        fuse_lck_mtx_lock(fdev->mtx);
        data = fdev->data; /* again */
        if (mounted) {
            OSDecrementAtomic((SInt32 *)&fuse_mount_count);
        }
        if (data) {
            data->mounted = false;
            if (!data->opened) {
                fuse_device_close_final(fdev);
                /* data is gone now */
            }
        }
        fuse_lck_mtx_unlock(fdev->mtx);
    } else {
        vnode_t fuse_rootvp = NULLVP;
        err = fuse_vfsop_root(mp, &fuse_rootvp, context);
        if (err) {
            goto out; /* go back and follow error path */
        }
        err = vnode_ref(fuse_rootvp);
        (void)vnode_put(fuse_rootvp);
        if (err) {
            goto out; /* go back and follow error path */
        } else {
            struct vfsioattr ioattr;

            vfs_ioattr(mp, &ioattr);
            ioattr.io_devblocksize = data->blocksize;
            ioattr.io_maxsegreadsize = ioattr.io_maxsegwritesize =
              ioattr.io_maxreadcnt = ioattr.io_maxwritecnt = data->iosize;
            ioattr.io_segreadcnt = ioattr.io_segwritecnt = data->iosize / PAGE_SIZE;
            vfs_setioattr(mp, &ioattr);
        }
    }

    return err;
}
Example #21
0
static int
ncreate_9p(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, char *target)
{
	openfid_9p *op;
	mount_9p *nmp;
	node_9p *dnp, *np;
	uint32_t perm, iounit;
	uint8_t mode;
	fid_9p fid, openfid;
	qid_9p qid;
	char *ext, buf[64];
	int e;

	dnp = NTO9P(dvp);
	nmp = dnp->nmp;
	fid = NOFID;
	openfid = NOFID;
	*vpp = NULL;

	if (vnode_vfsisrdonly(dvp))
		return EROFS;

	if (!ISSET(nmp->flags, F_DOTU) && vap->va_type!=VREG && vap->va_type!=VDIR)
		return ENOTSUP;

	if (!ISSET(nmp->flags, FLAG_DSSTORE) &&
		strncmp(".DS_Store", cnp->cn_nameptr, cnp->cn_namelen)==0)
		return EINVAL;

	ext = "";
	mode = ORDWR;
	perm = MAKEIMODE(vap->va_type, vap->va_mode) & 0777;
	switch (vap->va_type) {
	case VREG:
		break;

	case VDIR:
		mode = OREAD;
		SET(perm, DMDIR);
		break;

	case VBLK:
	case VCHR:
		SET(perm, DMDEVICE);
		snprintf(buf, sizeof(buf), "%c %d %d", vap->va_type==VBLK?'b':'c', vap->va_rdev>>20, vap->va_rdev&((1<<20) - 1));
		ext = buf;
		break;

	case VFIFO:
		SET(perm, DMNAMEDPIPE);
		break;

	case VSOCK:
		SET(perm, DMSOCKET);
		break;

	case VLNK:
		SET(perm, DMSYMLINK);
		ext = target;
		break;

	default:
		return EINVAL;
	}
	
	if (ISSET(vap->va_vaflags, VA_EXCLUSIVE))
		SET(mode, OEXCL);

	
	nlock_9p(dnp, NODE_LCK_EXCLUSIVE);
	if ((e=walk_9p(nmp, dnp->fid, NULL, 0, &openfid, &qid)))
		goto error;
	if ((e=create_9p(nmp, openfid, cnp->cn_nameptr, cnp->cn_namelen, mode, perm, ext, &qid, &iounit)))
		goto error;
	if ((e=walk_9p(nmp, dnp->fid, cnp->cn_nameptr, cnp->cn_namelen, &fid, &qid)))
		goto error;
	if ((e=nget_9p(nmp, fid, qid, dvp, vpp, cnp, ctx)))
		goto error;

	cache_purge_negatives(dvp);
	np = NTO9P(*vpp);
	np->iounit = iounit;
	op = &np->openfid[vap->va_type==VDIR? OREAD: ORDWR];
	op->fid = openfid;
	OSIncrementAtomic(&op->ref);
	nunlock_9p(np);
	nunlock_9p(dnp);
	return 0;

error:
	clunk_9p(nmp, openfid);
	clunk_9p(nmp, fid);
	nunlock_9p(dnp);
	return e;
}
Example #22
0
int tcp_newreno_init(struct tcpcb *tp) {
#pragma unused(tp)
	OSIncrementAtomic((volatile SInt32 *)&tcp_cc_newreno.num_sockets);
	return 0;
}
Example #23
0
static errno_t
utun_ctl_connect(
	kern_ctl_ref		kctlref,
	struct sockaddr_ctl	*sac, 
	void				**unitinfo)
{
	struct ifnet_init_eparams	utun_init;
	struct utun_pcb				*pcb;
	errno_t						result;
	struct ifnet_stats_param 	stats;
	
	/* kernel control allocates, interface frees */
	pcb = utun_alloc(sizeof(*pcb));
	if (pcb == NULL)
		return ENOMEM;
	
	/* Setup the protocol control block */
	bzero(pcb, sizeof(*pcb));
	*unitinfo = pcb;
	pcb->utun_ctlref = kctlref;
	pcb->utun_unit = sac->sc_unit;
	
	printf("utun_ctl_connect: creating interface utun%d\n", pcb->utun_unit - 1);

	/* Create the interface */
	bzero(&utun_init, sizeof(utun_init));
	utun_init.ver = IFNET_INIT_CURRENT_VERSION;
	utun_init.len = sizeof (utun_init);
	utun_init.flags = IFNET_INIT_LEGACY;
	utun_init.name = "utun";
	utun_init.unit = pcb->utun_unit - 1;
	utun_init.family = utun_family;
	utun_init.type = IFT_OTHER;
	utun_init.output = utun_output;
	utun_init.demux = utun_demux;
	utun_init.framer_extended = utun_framer;
	utun_init.add_proto = utun_add_proto;
	utun_init.del_proto = utun_del_proto;
	utun_init.softc = pcb;
	utun_init.ioctl = utun_ioctl;
	utun_init.detach = utun_detached;
	
	result = ifnet_allocate_extended(&utun_init, &pcb->utun_ifp);
	if (result != 0) {
		printf("utun_ctl_connect - ifnet_allocate failed: %d\n", result);
		utun_free(pcb);
		return result;
	}
	OSIncrementAtomic(&utun_ifcount);
	
	/* Set flags and additional information. */
	ifnet_set_mtu(pcb->utun_ifp, 1500);
	ifnet_set_flags(pcb->utun_ifp, IFF_UP | IFF_MULTICAST | IFF_POINTOPOINT, 0xffff);

	/* The interface must generate its own IPv6 LinkLocal address,
	 * if possible following the recommendation of RFC2472 to the 64bit interface ID
	 */
	ifnet_set_eflags(pcb->utun_ifp, IFEF_NOAUTOIPV6LL, IFEF_NOAUTOIPV6LL);
	
	/* Reset the stats in case as the interface may have been recycled */
	bzero(&stats, sizeof(struct ifnet_stats_param));
	ifnet_set_stat(pcb->utun_ifp, &stats);

	/* Attach the interface */
	result = ifnet_attach(pcb->utun_ifp, NULL);
	if (result != 0) {
		printf("utun_ctl_connect - ifnet_allocate failed: %d\n", result);
		ifnet_release(pcb->utun_ifp);
		utun_free(pcb);
	}
	
	/* Attach to bpf */
	if (result == 0)
		bpfattach(pcb->utun_ifp, DLT_NULL, 4);
	
	/* The interfaces resoures allocated, mark it as running */
	if (result == 0)
		ifnet_set_flags(pcb->utun_ifp, IFF_RUNNING, IFF_RUNNING);
	
	return result;
}
Example #24
0
ipc_port_timestamp_t
ipc_port_timestamp(void)
{
	return OSIncrementAtomic(&ipc_port_timestamp_data);
}
Example #25
0
/*
 * Because of the vagaries of how a filehandle can be used, we try not to
 * be too smart in here (we try to be smart elsewhere). It is required that
 * you come in here only if you really do not have the said filehandle--else
 * we panic.
 *
 * This function should be called with fufh_mtx mutex locked.
 */
int
fuse_filehandle_get(vnode_t       vp,
                    vfs_context_t context,
                    fufh_type_t   fufh_type,
                    int           mode)
{
    struct fuse_dispatcher  fdi;
    struct fuse_open_in    *foi;
    struct fuse_open_out   *foo;
    struct fuse_filehandle *fufh;
    struct fuse_vnode_data *fvdat = VTOFUD(vp);

    int err    = 0;
    int oflags = 0;
    int op     = FUSE_OPEN;

    fuse_trace_printf("fuse_filehandle_get(vp=%p, fufh_type=%d, mode=%x)\n",
                      vp, fufh_type, mode);

    fufh = &(fvdat->fufh[fufh_type]);

    if (FUFH_IS_VALID(fufh)) {
        panic("fuse4x: filehandle_get called despite valid fufh (type=%d)",
              fufh_type);
        /* NOTREACHED */
    }

    /*
     * Note that this means we are effectively FILTERING OUT open() flags.
     */
    oflags = fuse_filehandle_xlate_to_oflags(fufh_type);

    if (vnode_isdir(vp)) {
        op = FUSE_OPENDIR;
        if (fufh_type != FUFH_RDONLY) {
            log("fuse4x: non-rdonly fufh requested for directory\n");
            fufh_type = FUFH_RDONLY;
        }
    }

    fuse_dispatcher_init(&fdi, sizeof(*foi));
    fuse_dispatcher_make_vp(&fdi, op, vp, context);

    if (vnode_islnk(vp) && (mode & O_SYMLINK)) {
        oflags |= O_SYMLINK;
    }

    foi = fdi.indata;
    foi->flags = oflags;

    OSIncrementAtomic((SInt32 *)&fuse_fh_upcall_count);
    if ((err = fuse_dispatcher_wait_answer(&fdi))) {
        const char *vname = vnode_getname(vp);
        if (err == ENOENT) {
            /*
             * See comment in fuse_vnop_reclaim().
             */
            cache_purge(vp);
        }
        log("fuse4x: filehandle_get: failed for %s "
              "(type=%d, err=%d, caller=%p)\n",
              (vname) ? vname : "?", fufh_type, err,
               __builtin_return_address(0));
        if (vname) {
            vnode_putname(vname);
        }
        if (err == ENOENT) {
            fuse_vncache_purge(vp);
        }
        return err;
    }
    OSIncrementAtomic((SInt32 *)&fuse_fh_current);

    foo = fdi.answer;

    fufh->fh_id = foo->fh;
    fufh->open_count = 1;
    fufh->open_flags = oflags;
    fufh->fuse_open_flags = foo->open_flags;

    fuse_ticket_drop(fdi.ticket);

    return 0;
}
Example #26
0
/*
 *	Routine:	lock_set_reference
 *
 *	Take out a reference on a lock set.  This keeps the data structure
 *	in existence (but the lock set may be deactivated).
 */
void
lock_set_reference(lock_set_t lock_set)
{
	OSIncrementAtomic(&((lock_set)->ref_count));
}
Example #27
0
static int
vnop_open_9p(struct vnop_open_args *ap)
{
	openfid_9p *op;
	node_9p *np;
	fid_9p fid;
	qid_9p qid;
	uint32_t iounit;
	int e, flags, mode;

	TRACE();
	flags = 0;
	if (ap->a_mode)
		flags = OFLAGS(ap->a_mode);

	mode = flags & O_ACCMODE;
	CLR(flags, O_ACCMODE);
    
	CLR(flags, O_DIRECTORY|O_NONBLOCK|O_NOFOLLOW);
	CLR(flags, O_APPEND);

	/* locks implemented on the vfs layer */
	CLR(flags, O_EXLOCK|O_SHLOCK);
    
	if (ISSET(flags, O_TRUNC)) {
		SET(mode, OTRUNC);
		CLR(flags, O_TRUNC);
	}

    if (ISSET(flags, O_CLOEXEC)) {
		SET(mode, OCEXEC);
		CLR(flags, O_CLOEXEC);
	}
    
    if (ISSET(flags, O_EXCL)) {
		SET(mode, OEXCL);
		CLR(flags, O_EXCL);
	}
    
	/* vnop_creat just called */
	CLR(flags, O_CREAT);

	if (ISSET(flags, O_EVTONLY))
		CLR(flags, O_EVTONLY);
	if (ISSET(flags, FNOCACHE))
		CLR(flags, FNOCACHE);
	if (ISSET(flags, FNORDAHEAD))
		CLR(flags, FNORDAHEAD);

	if (flags) {
		DEBUG("unexpected open mode %x", flags);
		return ENOTSUP;
	}

	np = NTO9P(ap->a_vp);
	nlock_9p(np, NODE_LCK_EXCLUSIVE);
	op = ofidget(np, ap->a_mode);
	if (op->fid == NOFID) {
		if ((e=walk_9p(np->nmp, np->fid, NULL, 0, &fid, &qid)))
			goto error;	
		if ((e=open_9p(np->nmp, fid, mode, &qid, &iounit)))
			goto error;

		np->iounit = iounit;
		op->fid = fid;
	}

	/* no cache for dirs, .u or synthetic files */
	if (!vnode_isreg(np->vp) || np->dir.qid.vers==0) {
		vnode_setnocache(np->vp);
		vnode_setnoreadahead(np->vp);
	}

	OSIncrementAtomic(&op->ref);
	nunlock_9p(np);
	return 0;

error:
	clunk_9p(np->nmp, fid);
	nunlock_9p(np);
	return e;
}
Example #28
0
void throttle_info_update(void *throttle_info, int flags)
{
	struct _throttle_io_info_t *info = throttle_info;
	struct uthread	*ut;
	int policy;
	int is_throttleable_io = 0;
	int is_passive_io = 0;
	SInt32 oldValue;

	if (!lowpri_IO_initial_window_msecs || (info == NULL))
		return;
	policy = throttle_get_io_policy(&ut);

	switch (policy) {
	case IOPOL_DEFAULT:
	case IOPOL_NORMAL:
		break;
	case IOPOL_THROTTLE:
		is_throttleable_io = 1;
		break;
	case IOPOL_PASSIVE:
		is_passive_io = 1;
		break;
	default:
		printf("unknown I/O policy %d", policy);
		break;
	}

	if (!is_throttleable_io && ISSET(flags, B_PASSIVE))
		is_passive_io |= 1;

	if (!is_throttleable_io) {
		if (!is_passive_io){
			microuptime(&info->last_normal_IO_timestamp);
		}
	} else if (ut) {
		/*
		 * I'd really like to do the IOSleep here, but
		 * we may be holding all kinds of filesystem related locks
		 * and the pages for this I/O marked 'busy'...
		 * we don't want to cause a normal task to block on
		 * one of these locks while we're throttling a task marked
		 * for low priority I/O... we'll mark the uthread and
		 * do the delay just before we return from the system
		 * call that triggered this I/O or from vnode_pagein
		 */
		if (ut->uu_lowpri_window == 0) {
			ut->uu_throttle_info = info;
			throttle_info_ref(ut->uu_throttle_info);
			DEBUG_ALLOC_THROTTLE_INFO("updating info = %p\n", info, info );

			oldValue = OSIncrementAtomic(&info->numthreads_throttling);
			if (oldValue < 0) {
				panic("%s: numthreads negative", __func__);
			}
			ut->uu_lowpri_window = lowpri_IO_initial_window_msecs;
			ut->uu_lowpri_window += lowpri_IO_window_msecs_inc * oldValue;
		} else {
			/* The thread sends I/Os to different devices within the same system call */
			if (ut->uu_throttle_info != info) {
    				struct _throttle_io_info_t *old_info = ut->uu_throttle_info;

				// keep track of the numthreads in the right device
				OSDecrementAtomic(&old_info->numthreads_throttling);
				OSIncrementAtomic(&info->numthreads_throttling);

    				DEBUG_ALLOC_THROTTLE_INFO("switching from info = %p\n", old_info, old_info );
    				DEBUG_ALLOC_THROTTLE_INFO("switching to info = %p\n", info, info );
				/* This thread no longer needs a reference on that throttle info */
				throttle_info_rel(ut->uu_throttle_info);
				ut->uu_throttle_info = info;
				/* Need to take a reference on this throttle info */
				throttle_info_ref(ut->uu_throttle_info);
			}
			int numthreads = MAX(1, info->numthreads_throttling);
			ut->uu_lowpri_window += lowpri_IO_window_msecs_inc * numthreads;
			if (ut->uu_lowpri_window > lowpri_max_window_msecs * numthreads)
				ut->uu_lowpri_window = lowpri_max_window_msecs * numthreads;
		}
	}
}
IOReturn DldIOShadowFile::write( __in void* data, __in off_t length, __in_opt off_t offsetForShadowFile )
{
    /*!
     @function vn_rdwr
     @abstract Read from or write to a file.
     @discussion vn_rdwr() abstracts the details of constructing a uio and picking a vnode operation to allow
     simple in-kernel file I/O.
     @param rw UIO_READ for a read, UIO_WRITE for a write.
     @param vp The vnode on which to perform I/O.
     @param base Start of buffer into which to read or from which to write data.
     @param len Length of buffer.
     @param offset Offset within the file at which to start I/O.
     @param segflg What kind of address "base" is.   See uio_seg definition in sys/uio.h.  UIO_SYSSPACE for kernelspace, UIO_USERSPACE for userspace.
     UIO_USERSPACE32 and UIO_USERSPACE64 are in general preferred, but vn_rdwr will make sure that has the correct address sizes.
     @param ioflg Defined in vnode.h, e.g. IO_NOAUTH, IO_NOCACHE.
     @param cred Credential to pass down to filesystem for authentication.
     @param aresid Destination for amount of requested I/O which was not completed, as with uio_resid().
     @param p Process requesting I/O.
     @return 0 for success; errors from filesystem, and EIO if did not perform all requested I/O and the "aresid" parameter is NULL.
     */
    
    assert( preemption_enabled() );
    assert( length < 0x0FFFFFFF00000000ll );
    
    off_t  offset = offsetForShadowFile;
    
    if( DLD_IGNR_FSIZE == offsetForShadowFile && !this->reserveOffset( length, &offset ) ){
        
        DBG_PRINT_ERROR(("A quota has been exceeded for the %s file, the quota is %llu \n", this->path, this->maxSize ));
        return kIOReturnNoResources;
    }
    
#if defined( DBG )
    static UInt32 gWriteCount = 0x0;
    UInt32 writeCount = OSIncrementAtomic( &gWriteCount ) + 0x1;
#endif//DBG
    
    int   error = 0x0;
    off_t bytesWritten = 0x0;
    
    while( !error && length != bytesWritten ){
        
        unsigned int bytesToWrite;
        
#if defined( DBG )
        //
        // write by 32МВ chunks for each 2nd function invocation, and 1GB for others
        //
        if( (length - bytesWritten) > ((writeCount%2)? 0x40000000ll : 0x2000000ll) )
            bytesToWrite = (writeCount%2)? 0x40000000ll : 0x2000000ll;
        else
            bytesToWrite = (int)(length - bytesWritten);
#else
        //
        // write by 1GB chunks
        //
        if( (length - bytesWritten) > 0x40000000ll )
            bytesToWrite = 0x40000000;
        else
            bytesToWrite = (int)(length - bytesWritten);
#endif//!DBG
        
        
        error = vn_rdwr( UIO_WRITE,
                         this->vnode,
                         (char*)data,
                         bytesToWrite,
                         offset + bytesWritten,
                         UIO_SYSSPACE,
                         IO_NOAUTH | IO_SYNC,//IO_UNIT,
                         kauth_cred_get(),
                         NULL,
                         current_proc() );
        
        if( !error )
            bytesWritten = bytesWritten + bytesToWrite;
        else {
            DBG_PRINT_ERROR(("vn_rdwr( %s, %u ) failed with the 0x%X error\n", this->path, bytesToWrite, error));
        }

        
    }
    
    return error;
}
Example #30
0
void	pmap_pcid_configure(void) {
	int ccpu = cpu_number();
	uintptr_t cr4 = get_cr4();
	boolean_t pcid_present = FALSE;

	pmap_pcid_log("PCID configure invoked on CPU %d\n", ccpu);
	pmap_assert(ml_get_interrupts_enabled() == FALSE || get_preemption_level() !=0);
	pmap_assert(cpu_mode_is64bit());

	if (PE_parse_boot_argn("-pmap_pcid_disable", &pmap_pcid_disabled, sizeof (pmap_pcid_disabled))) {
		pmap_pcid_log("PMAP: PCID feature disabled\n");
		printf("PMAP: PCID feature disabled, %u\n", pmap_pcid_disabled);
		kprintf("PMAP: PCID feature disabled %u\n", pmap_pcid_disabled);
	}
	 /* no_shared_cr3+PCID is currently unsupported */
#if	DEBUG
	if (pmap_pcid_disabled == FALSE)
		no_shared_cr3 = FALSE;
	else
		no_shared_cr3 = TRUE;
#else
	if (no_shared_cr3)
		pmap_pcid_disabled = TRUE;
#endif
	if (pmap_pcid_disabled || no_shared_cr3) {
		unsigned i;
		/* Reset PCID status, as we may have picked up
		 * strays if discovered prior to platform
		 * expert initialization.
		 */
		for (i = 0; i < real_ncpus; i++) {
			if (cpu_datap(i)) {
				cpu_datap(i)->cpu_pmap_pcid_enabled = FALSE;
			}
			pmap_pcid_ncpus = 0;
		}
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = FALSE;
		return;
	}
	/* DRKTODO: assert if features haven't been discovered yet. Redundant
	 * invocation of cpu_mode_init and descendants masks this for now.
	 */
	if ((cpuid_features() & CPUID_FEATURE_PCID))
		pcid_present = TRUE;
	else {
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = FALSE;
		pmap_pcid_log("PMAP: PCID not detected CPU %d\n", ccpu);
		return;
	}
	if ((cr4 & (CR4_PCIDE | CR4_PGE)) == (CR4_PCIDE|CR4_PGE)) {
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = TRUE;
		pmap_pcid_log("PMAP: PCID already enabled %d\n", ccpu);
		return;
	}
	if (pcid_present == TRUE) {
		pmap_pcid_log("Pre-PCID:CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu, cr4);

		if (cpu_number() >= PMAP_PCID_MAX_CPUS) {
			panic("PMAP_PCID_MAX_CPUS %d\n", cpu_number());
		}
		if ((get_cr4() & CR4_PGE) == 0) {
			set_cr4(get_cr4() | CR4_PGE);
			pmap_pcid_log("Toggled PGE ON (CPU: %d\n", ccpu);
		}
		set_cr4(get_cr4() | CR4_PCIDE);
		pmap_pcid_log("Post PCID: CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu, get_cr4());
		tlb_flush_global();
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = TRUE;

		if (OSIncrementAtomic(&pmap_pcid_ncpus) == machine_info.max_cpus) {
			pmap_pcid_log("All PCIDs enabled: real_ncpus: %d, pmap_pcid_ncpus: %d\n", real_ncpus, pmap_pcid_ncpus);
		}
		cpu_datap(ccpu)->cpu_pmap_pcid_coherentp =
		    cpu_datap(ccpu)->cpu_pmap_pcid_coherentp_kernel =
		    &(kernel_pmap->pmap_pcid_coherency_vector[ccpu]);
		cpu_datap(ccpu)->cpu_pcid_refcounts[0] = 1;
	}
}