コード例 #1
0
IOWorkLoop *
IOFWUserLocalIsochPort::createRealtimeThread()
{
	IOWorkLoop * workloop = IOWorkLoop::workLoop() ;
	if ( workloop )
	{
		// Boost isoc workloop into realtime range
		thread_time_constraint_policy_data_t	constraints;
		AbsoluteTime							time;
		
		nanoseconds_to_absolutetime(625000, &time);
		constraints.period = AbsoluteTime_to_scalar(&time);
		nanoseconds_to_absolutetime(60000, &time);
		constraints.computation = AbsoluteTime_to_scalar(&time);
		nanoseconds_to_absolutetime(1250000, &time);
		constraints.constraint = AbsoluteTime_to_scalar(&time);

		constraints.preemptible = TRUE;

		{
			IOThread thread;
			thread = workloop->getThread();
			thread_policy_set( thread, THREAD_TIME_CONSTRAINT_POLICY, (thread_policy_t) & constraints, THREAD_TIME_CONSTRAINT_POLICY_COUNT );			
		}
	}
	
	return workloop ;
}
コード例 #2
0
ファイル: clock.c プロジェクト: Prajna/xnu
/*
 *	clock_timebase_init:
 *
 *	Called by machine dependent code
 *	to initialize areas dependent on the
 *	timebase value.  May be called multiple
 *	times during start up.
 */
void
clock_timebase_init(void)
{
	uint64_t	abstime;

	nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
	calend_adjinterval = (uint32_t)abstime;

	nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
	hz_tick_interval = (uint32_t)abstime;

	sched_timebase_init();
}
コード例 #3
0
ファイル: clock.c プロジェクト: Prajna/xnu
static uint32_t
calend_adjust(void)
{
	uint64_t		now, t64;
	int32_t			delta;
	uint32_t		interval = 0;

	commpage_disable_timestamp();

	now = mach_absolute_time();

	delta = clock_calend.adjdelta;

	if (delta > 0) {
		clock_calend.offset += clock_calend.adjoffset;

		calend_adjtotal -= delta;
		if (delta > calend_adjtotal) {
			clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;

			nanoseconds_to_absolutetime((uint64_t)delta, &t64);
			clock_calend.adjoffset = (uint32_t)t64;
		}
	}
	else
		if (delta < 0) {
			clock_calend.offset -= clock_calend.adjoffset;

			calend_adjtotal -= delta;
			if (delta < calend_adjtotal) {
				clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;

				nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
				clock_calend.adjoffset = (uint32_t)t64;
			}

			if (clock_calend.adjdelta != 0)
				clock_calend.adjstart = now;
		}

	if (clock_calend.adjdelta != 0)
		interval = calend_adjinterval;

#if CONFIG_DTRACE
	clock_track_calend_nowait();
#endif

	return (interval);
}
コード例 #4
0
cyclic_id_t
cyclic_add(cyc_handler_t *handler, cyc_time_t *when)
{
	uint64_t now;

	wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK);
	if (NULL == wrapTC)
		return CYCLIC_NONE;

	wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL );
	wrapTC->hdlr = *handler;
	wrapTC->when = *when;

	ASSERT(when->cyt_when == 0);
	ASSERT(when->cyt_interval < WAKEUP_REAPER);

	nanoseconds_to_absolutetime(wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval);

	now = mach_absolute_time();
	wrapTC->deadline = now;

	clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
	(void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );

	return (cyclic_id_t)wrapTC;
}
コード例 #5
0
ファイル: clock.c プロジェクト: Prajna/xnu
/*
 *	clock_set_calendar_microtime:
 *
 *	Sets the current calendar value by
 *	recalculating the epoch and offset
 *	from the system clock.
 *
 *	Also adjusts the boottime to keep the
 *	value consistent, writes the new
 *	calendar value to the platform clock,
 *	and sends calendar change notifications.
 */
void
clock_set_calendar_microtime(
	clock_sec_t			secs,
	clock_usec_t		microsecs)
{
	clock_sec_t			sys;
	clock_usec_t		microsys;
	clock_sec_t			newsecs;
	spl_t				s;

	newsecs = (microsecs < 500*USEC_PER_SEC)? secs: secs + 1;

	s = splclock();
	clock_lock();

	commpage_disable_timestamp();

	/*
	 *	Calculate the new calendar epoch based on
	 *	the new value and the system clock.
	 */
	clock_get_system_microtime(&sys, &microsys);
	TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);

	/*
	 *	Adjust the boottime based on the delta.
	 */
	clock_boottime += secs - clock_calend.epoch;

	/*
	 *	Set the new calendar epoch.
	 */
	clock_calend.epoch = secs;

	nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);

	/*
	 *	Cancel any adjustment in progress.
	 */
	calend_adjtotal = clock_calend.adjdelta = 0;

	clock_unlock();

	/*
	 *	Set the new value for the platform clock.
	 */
	PESetGMTTimeOfDay(newsecs);

	splx(s);

	/*
	 *	Send host notifications.
	 */
	host_notify_calendar_change();
	
#if CONFIG_DTRACE
	clock_track_calend_nowait();
#endif
}
コード例 #6
0
ファイル: timetrigger.c プロジェクト: Apple-FOSS-Mirror/xnu
/* program the timer from the pet thread */
int
kperf_timer_pet_set( unsigned timer, uint64_t elapsed_ticks )
{
	static uint64_t pet_min_ticks = 0;

	uint64_t now;
	struct time_trigger *trigger = NULL;
	uint64_t period = 0;
	uint64_t deadline;

	/* compute ns -> ticks */
	if( pet_min_ticks == 0 )
		nanoseconds_to_absolutetime(MIN_PET_TIMER_NS, &pet_min_ticks);

	if( timer != pet_timer )
		panic( "PET setting with bogus ID\n" );

	if( timer >= timerc )
		return EINVAL;

	if( kperf_sampling_status() == KPERF_SAMPLING_OFF ) {
		BUF_INFO1(PERF_PET_END, SAMPLE_OFF);
		return 0;
	}

	// don't repgram the timer if it's been shutdown
	if( kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN ) {
		BUF_INFO1(PERF_PET_END, SAMPLE_SHUTDOWN);
		return 0;
	}

	/* CHECKME: we probably took so damn long in the PET thread,
	 * it makes sense to take the time again.
	 */
	now = mach_absolute_time();
	trigger = &timerv[timer];

	/* if we re-programmed the timer to zero, just drop it */
	if( !trigger->period )
		return 0;

	/* subtract the time the pet sample took being careful not to underflow */
	if ( trigger->period > elapsed_ticks )
		period = trigger->period - elapsed_ticks;

	/* make sure we don't set the next PET sample to happen too soon */
	if ( period < pet_min_ticks )
		period = pet_min_ticks;

	/* calculate deadline */
	deadline = now + period;

	BUF_INFO(PERF_PET_SCHED, trigger->period, period, elapsed_ticks, deadline);

	/* re-schedule the timer, making sure we don't apply slop */
	timer_call_enter( &trigger->tcall, deadline, TIMER_CALL_SYS_CRITICAL);

	return 0;
}
コード例 #7
0
ファイル: pktsched.c プロジェクト: Algozjb/xnu
u_int64_t
pktsched_nsecs_to_abstime(u_int64_t nsecs)
{
	u_int64_t abstime;

	nanoseconds_to_absolutetime(nsecs, &abstime);
	return (abstime);
}
コード例 #8
0
ファイル: sync_sema.c プロジェクト: JackieXie168/xnu
static __inline__ uint64_t
semaphore_deadline(
	unsigned int		sec,
	clock_res_t			nsec)
{
	uint64_t	abstime;

	nanoseconds_to_absolutetime((uint64_t)sec *	NSEC_PER_SEC + nsec, &abstime);
	clock_absolutetime_interval_to_deadline(abstime, &abstime);

	return (abstime);
}
コード例 #9
0
ファイル: clock.c プロジェクト: Prajna/xnu
/*
 *	clock_initialize_calendar:
 *
 *	Set the calendar and related clocks
 *	from the platform clock at boot or
 *	wake event.
 *
 *	Also sends host notifications.
 */
void
clock_initialize_calendar(void)
{
	clock_sec_t			sys, secs = PEGetGMTTimeOfDay();
	clock_usec_t 		microsys, microsecs = 0;
	spl_t				s;

	s = splclock();
	clock_lock();

	commpage_disable_timestamp();

	if ((long)secs >= (long)clock_boottime) {
		/*
		 *	Initialize the boot time based on the platform clock.
		 */
		if (clock_boottime == 0)
			clock_boottime = secs;

		/*
		 *	Calculate the new calendar epoch based on
		 *	the platform clock and the system clock.
		 */
		clock_get_system_microtime(&sys, &microsys);
		TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);

		/*
		 *	Set the new calendar epoch.
		 */
		clock_calend.epoch = secs;

		nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);

		/*
		 *	 Cancel any adjustment in progress.
		 */
		calend_adjtotal = clock_calend.adjdelta = 0;
	}

	clock_unlock();
	splx(s);

	/*
	 *	Send host notifications.
	 */
	host_notify_calendar_change();
	
#if CONFIG_DTRACE
	clock_track_calend_nowait();
#endif
}
コード例 #10
0
/*
 *	Set a timeout.
 *
 *	fcn:		function to call
 *	param:		parameter to pass to function
 *	ts:		timeout interval, in timespec
 */
void
bsd_timeout(
	timeout_fcn_t			fcn,
	void					*param,
	struct timespec         *ts)
{
	uint64_t		deadline = 0;

	if (ts && (ts->tv_sec || ts->tv_nsec)) {
		nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec,  &deadline );
		clock_absolutetime_interval_to_deadline( deadline, &deadline );
	}
	thread_call_func_delayed((thread_call_func_t)fcn, param, deadline);
}
コード例 #11
0
ファイル: timer_call.c プロジェクト: JackieXie168/xnu
static void
timer_call_init_abstime(void)
{
	int i;
	uint64_t result;
	timer_coalescing_priority_params_ns_t * tcoal_prio_params_init = timer_call_get_priority_params();
	nanoseconds_to_absolutetime(PAST_DEADLINE_TIMER_ADJUSTMENT_NS, &past_deadline_timer_adjustment);
	nanoseconds_to_absolutetime(tcoal_prio_params_init->idle_entry_timer_processing_hdeadline_threshold_ns, &result);
	tcoal_prio_params.idle_entry_timer_processing_hdeadline_threshold_abstime = (uint32_t)result;
	nanoseconds_to_absolutetime(tcoal_prio_params_init->interrupt_timer_coalescing_ilat_threshold_ns, &result);
	tcoal_prio_params.interrupt_timer_coalescing_ilat_threshold_abstime = (uint32_t)result;
	nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_resort_threshold_ns, &result);
	tcoal_prio_params.timer_resort_threshold_abstime = (uint32_t)result;
	tcoal_prio_params.timer_coalesce_rt_shift = tcoal_prio_params_init->timer_coalesce_rt_shift;
	tcoal_prio_params.timer_coalesce_bg_shift = tcoal_prio_params_init->timer_coalesce_bg_shift;
	tcoal_prio_params.timer_coalesce_kt_shift = tcoal_prio_params_init->timer_coalesce_kt_shift;
	tcoal_prio_params.timer_coalesce_fp_shift = tcoal_prio_params_init->timer_coalesce_fp_shift;
	tcoal_prio_params.timer_coalesce_ts_shift = tcoal_prio_params_init->timer_coalesce_ts_shift;

	nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_rt_ns_max,
	    &tcoal_prio_params.timer_coalesce_rt_abstime_max);
	nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_bg_ns_max,
	    &tcoal_prio_params.timer_coalesce_bg_abstime_max);
	nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_kt_ns_max,
	    &tcoal_prio_params.timer_coalesce_kt_abstime_max);
	nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_fp_ns_max,
	    &tcoal_prio_params.timer_coalesce_fp_abstime_max);
	nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_ts_ns_max,
	    &tcoal_prio_params.timer_coalesce_ts_abstime_max);

	for (i = 0; i < NUM_LATENCY_QOS_TIERS; i++) {
		tcoal_prio_params.latency_qos_scale[i] = tcoal_prio_params_init->latency_qos_scale[i];
		nanoseconds_to_absolutetime(tcoal_prio_params_init->latency_qos_ns_max[i],
		    &tcoal_prio_params.latency_qos_abstime_max[i]);
		tcoal_prio_params.latency_tier_rate_limited[i] = tcoal_prio_params_init->latency_tier_rate_limited[i];
	}
}
コード例 #12
0
ファイル: sysctl.c プロジェクト: Andromeda-OS/Kernel
static int
panic_set_restart_timeout(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
{
	int new_value = 0, old_value = 0, changed = 0, error;
	uint64_t nstime;

	if (panic_restart_timeout) {
		absolutetime_to_nanoseconds(panic_restart_timeout, &nstime);
		old_value = nstime / NSEC_PER_SEC;
	}

	error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
	if (error == 0 && changed) {
		nanoseconds_to_absolutetime(((uint64_t)new_value) * NSEC_PER_SEC, &panic_restart_timeout);
	}
	return error;
}
コード例 #13
0
thread_call_t
dtrace_timeout(void (*func)(void *, void *), void* arg, uint64_t nanos)
{
#pragma unused(arg)
	thread_call_t call = thread_call_allocate(func, NULL);

	nanoseconds_to_absolutetime(nanos, &nanos);

	/*
	 * This method does not use clock_deadline_for_periodic_event() because it is a one-shot,
	 * and clock drift on later invocations is not a worry.
	 */
	uint64_t deadline = mach_absolute_time() + nanos;

	thread_call_enter_delayed(call, deadline);

	return call;
}
コード例 #14
0
static cyclic_id_t
timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
{
	uint64_t now;

	timer_call_setup( &(wrapTC->call),  _timer_call_apply_cyclic, NULL );
	wrapTC->hdlr = *handler;
	wrapTC->when = *when;

	nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval );

	now = mach_absolute_time();
	wrapTC->deadline = now;

	clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
	timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline );

	return (cyclic_id_t)wrapTC;
}
コード例 #15
0
ファイル: timetrigger.c プロジェクト: Apple-FOSS-Mirror/xnu
int
kperf_timer_set_period( unsigned timer, uint64_t period )
{
	static uint64_t min_timer_ticks = 0;

	if( timer >= timerc )
		return EINVAL;

	/* compute us -> ticks */
	if( min_timer_ticks == 0 )
		nanoseconds_to_absolutetime(MIN_TIMER_NS, &min_timer_ticks);

	/* check actual timer */
	if( period && (period < min_timer_ticks) )
		period = min_timer_ticks;

	timerv[timer].period = period;

	/* FIXME: re-program running timers? */

	return 0;
}
コード例 #16
0
ファイル: kperf_timer.c プロジェクト: wzw19890321/xnu-1
extern int
kperf_timer_set_count(unsigned int count)
{
	struct kperf_timer *new_timerv = NULL, *old_timerv = NULL;
	unsigned int old_count;

	if (min_period_abstime == 0) {
		nanoseconds_to_absolutetime(MIN_PERIOD_NS, &min_period_abstime);
		nanoseconds_to_absolutetime(MIN_PERIOD_BG_NS, &min_period_bg_abstime);
		nanoseconds_to_absolutetime(MIN_PERIOD_PET_NS, &min_period_pet_abstime);
		nanoseconds_to_absolutetime(MIN_PERIOD_PET_BG_NS,
			&min_period_pet_bg_abstime);
		assert(min_period_abstime > 0);
	}

	if (count == kperf_timerc) {
		return 0;
	}
	if (count > TIMER_MAX) {
		return EINVAL;
	}

	/* TODO: allow shrinking? */
	if (count < kperf_timerc) {
		return EINVAL;
	}

	/*
	 * Make sure kperf is initialized when creating the array for the first
	 * time.
	 */
	if (kperf_timerc == 0) {
		int r;

		/* main kperf */
		if ((r = kperf_init())) {
			return r;
		}
	}

	/*
	 * Shut down any running timers since we will be messing with the timer
	 * call structures.
	 */
	kperf_timer_stop();

	/* create a new array */
	new_timerv = kalloc_tag(count * sizeof(struct kperf_timer),
		VM_KERN_MEMORY_DIAG);
	if (new_timerv == NULL) {
		return ENOMEM;
	}
	old_timerv = kperf_timerv;
	old_count = kperf_timerc;

	if (old_timerv != NULL) {
		bcopy(kperf_timerv, new_timerv,
			kperf_timerc * sizeof(struct kperf_timer));
	}

	/* zero the new entries */
	bzero(&(new_timerv[kperf_timerc]),
		(count - old_count) * sizeof(struct kperf_timer));

	/* (re-)setup the timer call info for all entries */
	for (unsigned int i = 0; i < count; i++) {
		timer_call_setup(&(new_timerv[i].tcall), kperf_timer_handler, &(new_timerv[i]));
	}

	kperf_timerv = new_timerv;
	kperf_timerc = count;

	if (old_timerv != NULL) {
		kfree(old_timerv, old_count * sizeof(struct kperf_timer));
	}

	return 0;
}
コード例 #17
0
ファイル: thread.c プロジェクト: CptFrazz/xnu
int
thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns)
{
	thread_t	thread = current_thread(); 
	ledger_t	l;
	uint64_t 	limittime = 0;
	uint64_t	abstime = 0;

	assert(percentage <= 100);

	if (percentage == 0) {
		/*
		 * Remove CPU limit, if any exists.
		 */
		if (thread->t_threadledger != LEDGER_NULL) {
			/*
			 * The only way to get a per-thread ledger is via CPU limits.
			 */
			assert(thread->options & (TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT));
			ledger_dereference(thread->t_threadledger);
			thread->t_threadledger = LEDGER_NULL;
			thread->options &= ~(TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT);
		}

		return (0);
	}

 	l = thread->t_threadledger;
	if (l == LEDGER_NULL) {
		/*
		 * This thread doesn't yet have a per-thread ledger; so create one with the CPU time entry active.
		 */
		if ((l = ledger_instantiate(thread_ledger_template, LEDGER_CREATE_INACTIVE_ENTRIES)) == LEDGER_NULL)
			return (KERN_RESOURCE_SHORTAGE);

		/*
		 * We are the first to create this thread's ledger, so only activate our entry.
		 */
		ledger_entry_setactive(l, thread_ledgers.cpu_time);
		thread->t_threadledger = l;
	}

	/*
	 * The limit is specified as a percentage of CPU over an interval in nanoseconds.
	 * Calculate the amount of CPU time that the thread needs to consume in order to hit the limit.
	 */
	limittime = (interval_ns * percentage) / 100;
	nanoseconds_to_absolutetime(limittime, &abstime); 
	ledger_set_limit(l, thread_ledgers.cpu_time, abstime);
	/*
	 * Refill the thread's allotted CPU time every interval_ns nanoseconds.
	 */
	ledger_set_period(l, thread_ledgers.cpu_time, interval_ns);

	/*
	 * Ledgers supports multiple actions for one ledger entry, so we do too.
	 */
	if (action == THREAD_CPULIMIT_EXCEPTION) {
		thread->options |= TH_OPT_PROC_CPULIMIT;
		ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_EXCEPTION);
	}

	if (action == THREAD_CPULIMIT_BLOCK) {
		thread->options |= TH_OPT_PRVT_CPULIMIT;
		/* The per-thread ledger template by default has a callback for CPU time */
		ledger_disable_callback(l, thread_ledgers.cpu_time);
		ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_BLOCK);
	}

	thread->t_threadledger = l;
	return (0);
}
コード例 #18
0
ファイル: HoRNDIS.cpp プロジェクト: burnsra/HoRNDIS
UInt32 HoRNDIS::outputPacket(mbuf_t packet, void *param) {
	mbuf_t m;
	size_t pktlen = 0;
	IOReturn ior = kIOReturnSuccess;
	UInt32 poolIndx;
	int i;

	LOG(V_DEBUG, "");
	
	/* Count the total size of this packet */
	m = packet;
	while (m) {
		pktlen += mbuf_len(m);
		m = mbuf_next(m);
	}
	
	LOG(V_DEBUG, "%ld bytes", pktlen);
	
	if (pktlen > (mtu + 14)) {
		LOG(V_ERROR, "packet too large (%ld bytes, but I told you you could have %d!)", pktlen, mtu);
		fpNetStats->outputErrors++;
		return false;
	}
	
	/* Find an output buffer in the pool */
	IOLockLock(outbuf_lock);
	for (i = 0; i < OUT_BUF_MAX_TRIES; i++) {
		AbsoluteTime ivl, deadl;
		
		for (poolIndx = 0; poolIndx < N_OUT_BUFS; poolIndx++)
			if (!outbufs[poolIndx].inuse) {
				outbufs[poolIndx].inuse = true;
				break;
			}
		if (poolIndx != N_OUT_BUFS)
			break;
		
		/* "while", not "if".  See Symphony X's seminal work on this topic, /Paradise Lost/ (2007). */
		nanoseconds_to_absolutetime(OUT_BUF_WAIT_TIME, &ivl);
		clock_absolutetime_interval_to_deadline(ivl, &deadl);
		LOG(V_NOTE, "waiting for buffer...");
		IOLockSleepDeadline(outbuf_lock, outbufs, deadl, THREAD_INTERRUPTIBLE);
	}
	IOLockUnlock(outbuf_lock);
	
	if (poolIndx == N_OUT_BUFS) {
		LOG(V_ERROR, "timed out waiting for buffer");
		return kIOReturnTimeout;
	}
	
	/* Start filling in the send buffer */
	struct rndis_data_hdr *hdr;
	hdr = (struct rndis_data_hdr *)outbufs[poolIndx].buf;
	
	outbufs[poolIndx].inuse = true;
	
	outbufs[poolIndx].mdp->setLength(pktlen + sizeof *hdr);
	
	memset(hdr, 0, sizeof *hdr);
	hdr->msg_type = RNDIS_MSG_PACKET;
	hdr->msg_len = cpu_to_le32(pktlen + sizeof *hdr);
	hdr->data_offset = cpu_to_le32(sizeof(*hdr) - 8);
	hdr->data_len = cpu_to_le32(pktlen);
	mbuf_copydata(packet, 0, pktlen, hdr + 1);
	
	freePacket(packet);
	
	/* Now, fire it off! */
	outbufs[poolIndx].comp.target    = this;
	outbufs[poolIndx].comp.parameter = (void *)poolIndx;
	outbufs[poolIndx].comp.action    = dataWriteComplete;
	
	ior = fOutPipe->Write(outbufs[poolIndx].mdp, &outbufs[poolIndx].comp);
	if (ior != kIOReturnSuccess) {
		LOG(V_ERROR, "write failed");
		if (ior == kIOUSBPipeStalled) {
			fOutPipe->Reset();
			ior = fOutPipe->Write(outbufs[poolIndx].mdp, &outbufs[poolIndx].comp);
			if (ior != kIOReturnSuccess) {
				LOG(V_ERROR, "write really failed");
				fpNetStats->outputErrors++;
				return ior;
			}
		}
	}
	fpNetStats->outputPackets++;
	
	return kIOReturnOutputSuccess;
}
コード例 #19
0
/**
 * Internal worker for the sleep scenario.
 *
 * Called owning the spinlock, returns without it.
 *
 * @returns IPRT status code.
 * @param   pThis               The mutex instance.
 * @param   cMillies            The timeout.
 * @param   fInterruptible      Whether it's interruptible
 *                              (RTSemMutexRequestNoResume) or not
 *                              (RTSemMutexRequest).
 * @param   hNativeSelf         The thread handle of the caller.
 */
static int rtR0SemMutexDarwinRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies,
                                          wait_interrupt_t fInterruptible, RTNATIVETHREAD hNativeSelf)
{
    /*
     * Grab a reference and indicate that we're waiting.
     */
    pThis->cWaiters++;
    ASMAtomicIncU32(&pThis->cRefs);

    /*
     * Go to sleep, use the address of the mutex instance as sleep/blocking/event id.
     */
    wait_result_t rcWait;
    if (cMillies == RT_INDEFINITE_WAIT)
        rcWait = lck_spin_sleep(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible);
    else
    {
        uint64_t u64AbsTime;
        nanoseconds_to_absolutetime(cMillies * UINT64_C(1000000), &u64AbsTime);
        u64AbsTime += mach_absolute_time();

        rcWait = lck_spin_sleep_deadline(pThis->pSpinlock, LCK_SLEEP_DEFAULT,
                                         (event_t)pThis, fInterruptible, u64AbsTime);
    }

    /*
     * Translate the rc.
     */
    int rc;
    switch (rcWait)
    {
        case THREAD_AWAKENED:
            if (RT_LIKELY(pThis->u32Magic == RTSEMMUTEX_MAGIC))
            {
                if (RT_LIKELY(   pThis->cRecursions  == 0
                              && pThis->hNativeOwner == NIL_RTNATIVETHREAD))
                {
                    pThis->cRecursions  = 1;
                    pThis->hNativeOwner = hNativeSelf;
                    rc = VINF_SUCCESS;
                }
                else
                {
                    Assert(pThis->cRecursions  == 0);
                    Assert(pThis->hNativeOwner == NIL_RTNATIVETHREAD);
                    rc = VERR_INTERNAL_ERROR_3;
                }
            }
            else
                rc = VERR_SEM_DESTROYED;
            break;

        case THREAD_TIMED_OUT:
            Assert(cMillies != RT_INDEFINITE_WAIT);
            rc = VERR_TIMEOUT;
            break;

        case THREAD_INTERRUPTED:
            Assert(fInterruptible);
            rc = VERR_INTERRUPTED;
            break;

        case THREAD_RESTART:
            Assert(pThis->u32Magic == ~RTSEMMUTEX_MAGIC);
            rc = VERR_SEM_DESTROYED;
            break;

        default:
            AssertMsgFailed(("rcWait=%d\n", rcWait));
            rc = VERR_GENERAL_FAILURE;
            break;
    }

    /*
     * Dereference it and quit the lock.
     */
    Assert(pThis->cWaiters > 0);
    pThis->cWaiters--;

    Assert(pThis->cRefs > 0);
    if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
        rtSemMutexDarwinFree(pThis);
    else
        lck_spin_unlock(pThis->pSpinlock);
    return rc;
}
コード例 #20
0
ファイル: SC101Device.cpp プロジェクト: mastrogb/sc101-iokit
bool net_habitue_device_SC101::init(OSDictionary *properties)
{
  KINFO("init");

  gSC101DeviceIDKey = OSSymbol::withCString(kSC101DeviceIDKey);
  gSC101DeviceIOMaxReadSizeKey = OSSymbol::withCString(kSC101DeviceIOMaxReadSizeKey);
  gSC101DeviceIOMaxWriteSizeKey = OSSymbol::withCString(kSC101DeviceIOMaxWriteSizeKey);
  gSC101DevicePartitionAddressKey = OSSymbol::withCString(kSC101DevicePartitionAddressKey);
  gSC101DeviceRootAddressKey = OSSymbol::withCString(kSC101DeviceRootAddressKey);
  gSC101DevicePartNumberKey = OSSymbol::withCString(kSC101DevicePartNumberKey);
  gSC101DeviceVersionKey = OSSymbol::withCString(kSC101DeviceVersionKey);
  gSC101DeviceLabelKey = OSSymbol::withCString(kSC101DeviceLabelKey);
  gSC101DeviceSizeKey = OSSymbol::withCString(kSC101DeviceSizeKey);
  
  OSString *id = OSDynamicCast(OSString, properties->getObject(gSC101DeviceIDKey));
  if (!id)
    return false;
  
  if (!super::init(properties))
    return false;
  
  OSNumber *ioMaxReadSize = OSDynamicCast(OSNumber, properties->getObject(gSC101DeviceIOMaxReadSizeKey));
  
  if (!ioMaxReadSize ||
      ioMaxReadSize->unsigned64BitValue() < SECTOR_SIZE ||
      ioMaxReadSize->unsigned64BitValue() > MAX_IO_READ_SIZE ||
      ioMaxReadSize->unsigned64BitValue() & (ioMaxReadSize->unsigned64BitValue() - 1))
  {
    ioMaxReadSize = OSNumber::withNumber(DEFAULT_IO_READ_SIZE, 64);
    
    if (ioMaxReadSize)
    {
      setProperty(gSC101DeviceIOMaxReadSizeKey, ioMaxReadSize);
      ioMaxReadSize->release();
    }
  }
  
  OSNumber *ioMaxWriteSize = OSDynamicCast(OSNumber, properties->getObject(gSC101DeviceIOMaxWriteSizeKey));
  
  if (!ioMaxWriteSize ||
      ioMaxWriteSize->unsigned64BitValue() < SECTOR_SIZE ||
      ioMaxWriteSize->unsigned64BitValue() > MAX_IO_WRITE_SIZE ||
      ioMaxWriteSize->unsigned64BitValue() & (ioMaxWriteSize->unsigned64BitValue() - 1))
  {
    ioMaxWriteSize = OSNumber::withNumber(DEFAULT_IO_WRITE_SIZE, 64);
    
    if (ioMaxWriteSize)
    {
      setProperty(gSC101DeviceIOMaxWriteSizeKey, ioMaxWriteSize);
      ioMaxWriteSize->release();
    }
  }
  
  nanoseconds_to_absolutetime(1000000000ULL * 60, &_resolveInterval);
  
  _mediaStateAttached = false;
  _mediaStateChanged = true;
  
  STAILQ_INIT(&_pendingHead);
  _pendingCount = 0;
  STAILQ_INIT(&_outstandingHead);
  _outstandingCount = 0;

  return true;
}
コード例 #21
0
IOReturn IOHIKeyboard::setParamProperties( OSDictionary * dict )
{
    OSData *		data	= NULL;
    OSNumber *		number	= NULL;
    IOReturn		err 	= kIOReturnSuccess;
    IOReturn		err2	= kIOReturnSuccess;
    unsigned char *	map	= NULL;
    IOHIKeyboardMapper * oldMap	= NULL;
    bool		updated = false;
    UInt64		nano;

    if( dict->getObject(kIOHIDResetKeyboardKey))
		resetKeyboard();

    IOLockLock( _deviceLock);

    if ((number = OSDynamicCast(OSNumber,
                              dict->getObject(kIOHIDKeyRepeatKey))) ||
        (data = OSDynamicCast(OSData,
                              dict->getObject(kIOHIDKeyRepeatKey))))
    {
        nano = (number) ? number->unsigned64BitValue() : *((UInt64 *) (data->getBytesNoCopy()));

        if( nano < EV_MINKEYREPEAT)
            nano = EV_MINKEYREPEAT;
        nanoseconds_to_absolutetime(nano, &_keyRepeat);
        updated = true;
    }

    if ((number = OSDynamicCast(OSNumber,
                              dict->getObject(kIOHIDInitialKeyRepeatKey))) ||
        (data = OSDynamicCast(OSData,
                              dict->getObject(kIOHIDInitialKeyRepeatKey))))
    {
        nano = (number) ? number->unsigned64BitValue() : *((UInt64 *) (data->getBytesNoCopy()));

        if( nano < EV_MINKEYREPEAT)
            nano = EV_MINKEYREPEAT;
        nanoseconds_to_absolutetime(nano, &_initialKeyRepeat);
        updated = true;
    }

    if( (data = OSDynamicCast( OSData, dict->getObject(kIOHIDKeyMappingKey))))
	{
	
		map = (unsigned char *)IOMalloc( data->getLength() );
		bcopy( data->getBytesNoCopy(), map, data->getLength() );
		oldMap = _keyMap;
		_keyMap = IOHIKeyboardMapper::keyboardMapper(this, map, data->getLength(), true);

		if (_keyMap)
		{
			// point the new keymap to the IOHIDSystem, so it can set properties in it
			_keyMap->setKeyboardTarget((IOService *) _keyboardEventTarget);
	
			if (oldMap)
				oldMap->release();
			updated = true;
		}
		else
		{
			_keyMap = oldMap;
			err = kIOReturnBadArgument;
		} 
    }
    if (NULL != (number = OSDynamicCast(OSNumber, dict->getObject(kIOHIDSubinterfaceIDKey))))
    {
        _deviceType = number->unsigned32BitValue();
        updated = true;
    }
		
	// give the keymap a chance to update to new properties
	if (_keyMap)
		err2 = _keyMap->setParamProperties(dict);

    IOLockUnlock( _deviceLock);
	
    if( updated )
        updateProperties();

	// we can only return one error
	if (err == kIOReturnSuccess)
		err = err2;
	
    return( err == kIOReturnSuccess ) ? super::setParamProperties(dict) : err;
}
コード例 #22
0
ファイル: clock.c プロジェクト: Prajna/xnu
static uint32_t
calend_set_adjustment(
	long			*secs,
	int				*microsecs)
{
	uint64_t		now, t64;
	int64_t			total, ototal;
	uint32_t		interval = 0;

	/* 
	 * Compute the total adjustment time in nanoseconds.
	 */
	total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;

	/* 
	 * Disable commpage gettimeofday().
	 */
	commpage_disable_timestamp();

	/* 
	 * Get current absolute time.
	 */
	now = mach_absolute_time();

	/* 
	 * Save the old adjustment total for later return.
	 */
	ototal = calend_adjtotal;

	/*
	 * Is a new correction specified?
	 */
	if (total != 0) {
		/*
		 * Set delta to the standard, small, adjustment skew.
		 */
		int32_t		delta = calend_adjskew;

		if (total > 0) {
			/*
			 * Positive adjustment. If greater than the preset 'big' 
			 * threshold, slew at a faster rate, capping if necessary.
			 */
			if (total > calend_adjbig)
				delta *= 10;
			if (delta > total)
				delta = (int32_t)total;

			/* 
			 * Convert the delta back from ns to absolute time and store in adjoffset.
			 */
			nanoseconds_to_absolutetime((uint64_t)delta, &t64);
			clock_calend.adjoffset = (uint32_t)t64;
		}
		else {
			/*
			 * Negative adjustment; therefore, negate the delta. If 
			 * greater than the preset 'big' threshold, slew at a faster 
			 * rate, capping if necessary.
			 */
			if (total < -calend_adjbig)
				delta *= 10;
			delta = -delta;
			if (delta < total)
				delta = (int32_t)total;

			/* 
			 * Save the current absolute time. Subsequent time operations occuring
			 * during this negative correction can make use of this value to ensure 
			 * that time increases monotonically.
			 */
			clock_calend.adjstart = now;

			/* 
			 * Convert the delta back from ns to absolute time and store in adjoffset.
			 */
			nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
			clock_calend.adjoffset = (uint32_t)t64;
		}

		/* 
		 * Store the total adjustment time in ns. 
		 */
		calend_adjtotal = total;
		
		/* 
		 * Store the delta for this adjustment period in ns. 
		 */
		clock_calend.adjdelta = delta;

		/* 
		 * Set the interval in absolute time for later return. 
		 */
		interval = calend_adjinterval;
	}
	else {
		/* 
		 * No change; clear any prior adjustment.
		 */
		calend_adjtotal = clock_calend.adjdelta = 0;
	}

	/* 
	 * If an prior correction was in progress, return the
	 * remaining uncorrected time from it. 
	 */
	if (ototal != 0) {
		*secs = (long)(ototal / NSEC_PER_SEC);
		*microsecs = (int)((ototal % NSEC_PER_SEC) / NSEC_PER_USEC);
	}
	else
		*secs = *microsecs = 0;

#if CONFIG_DTRACE
	clock_track_calend_nowait();
#endif
	
	return (interval);
}
コード例 #23
0
bool com_reidburke_air_IntelEnhancedSpeedStep::init(OSDictionary* dict) {
	bool res = super::init(dict);
	info("Initializing xnu-speedstep-air\n");
  
  // read data
  cpuid_update_generic_info();
  
	/* Allocate our spinlock for later use */
	Lock = IOSimpleLockAlloc();
	/* Check for a patched kernel which properly implements rtc_clock_stepped() */
	uint64_t magic = -1; // means autodetect
	
	OSBoolean* debugMsgs = (OSBoolean*) dict->getObject("DebugMessages");
	if (debugMsgs != 0)
		DebugOn = debugMsgs->getValue();
	else
		DebugOn = false;
	
	OSNumber* kernelFeatures = (OSNumber*) dict->getObject("KernelFeatures");
	if (kernelFeatures != 0)
		magic = kernelFeatures->unsigned8BitValue();
	
	if (magic == 255) nanoseconds_to_absolutetime(~(0), &magic); //255uint = -1 int
	
	if (magic == 1) {
		RtcFixKernel = true;
		Below1Ghz	= false;
	} else if (magic == 2) {
		RtcFixKernel = true;
		Below1Ghz	= true;
	} else if (magic == 3) {
		RtcFixKernel = false;
		Below1Ghz = true;
	} else {
		RtcFixKernel = false;
		Below1Ghz	= false;
	}
	
	checkForNby2Ratio(); // check and store in global variable before loading pstate override
	if (getFSB() == false)
		return false;
	
	OSArray* overrideTable = (OSArray*) dict->getObject("PStateTable");
	if (overrideTable != 0 )
		loadPStateOverride(overrideTable);
	
	OSNumber* defaultState = (OSNumber*) dict->getObject("DefaultPState");
	if (defaultState != 0)
		DefaultPState = defaultState->unsigned8BitValue();
	else
		DefaultPState = -1; // indicate no default state

	OSNumber* maxLatency = (OSNumber*) dict->getObject("Latency");
	if (maxLatency != 0)
		MaxLatency = maxLatency->unsigned32BitValue();
	else
		MaxLatency = 0;
	
 	
  /* Make preliminary check */
	if ( (strcmp(cpuid_info()->cpuid_vendor, CPUID_VID_INTEL) == 0) // Check it's actually Intel
      && ( cpuid_info()->cpuid_features & CPUID_FEATURE_EST) ) { // Check it supports EST
	 
    autostart = (OSNumber*) dict->getObject("AutoStart");
    info( "do autostart %d \n", autostart->unsigned8BitValue() );
    
    if ( autostart != 0 && autostart->unsigned8BitValue() == 1 ) {
      Throttler = new AutoThrottler;
      if (Throttler) {
        dbg("Throttler instantiated.\n");
        OSNumber* targetload = (OSNumber*) dict->getObject("TargetCPULoad");
        if (targetload != 0)
          Throttler->targetCPULoad = (targetload->unsigned16BitValue()) * 10;
        else
          Throttler->targetCPULoad = 700;
      }
    }
	}
  
	totalThrottles = 0;
	frequencyUsage[0] = '\0';
	
	/* Return whatever the superclass returned */
	return res;
}
コード例 #24
0
// Class probe
IOService * 
VoodooPState::probe(IOService * provider,
				   SInt32 * score)
{
	Ready = false;

	// Probe the superclass
	if (IOService::probe(provider, score) != this) return NULL;
	
	// Read our own values from the property list
	OSDictionary * dictionary = OSDynamicCast(OSDictionary, getProperty(keyPowerControl));
	if (!dictionary) return NULL;
	UseEfiFsb			= getPlistValue(dictionary, keyUseEfiFsb);
	VoltageOverride		= getPlistValue(dictionary, keyVoltageOverride);
	VoltageProbe		= getPlistValue(dictionary, keyVoltageProbe);
	UserVoltageMax		= getPlistValue(dictionary, keyUserVoltageMax);
	UserVoltageMin		= getPlistValue(dictionary, keyUserVoltageMin);
	ColdStart			= getPlistValue(dictionary, keyColdStart);
	TimerInterval		= getPlistValue(dictionary, keyTimerInterval);
	UseACPI				= getPlistValue(dictionary, keyUseACPI);
	if(TimerInterval < 50){
		TimerInterval = 50;
	}
	
	// Get CPU's from I/O Kit
	CpuCount = getCpuCount();
	
	// No CPU's found -> bailout
	if (CpuCount == 0) return NULL;
	
	// Get FSB from /efi/platform
	CpuFSB = gPEClockFrequencyInfo.bus_frequency_max_hz >> 2;
	if (UseEfiFsb) {
		IORegistryEntry * entry = fromPath(keyEfiPlatform, gIODTPlane);
		if (entry) {
			OSObject * object = entry->getProperty(keyEfiFsbFrequency);
			if (object && (OSTypeIDInst(object) == OSTypeID(OSData))) {
				OSData * data = OSDynamicCast(OSData, object);
				if (data) {
					CpuFSB = * (UInt32 *) data->getBytesNoCopy();
					gPEClockFrequencyInfo.bus_frequency_max_hz = CpuFSB << 2;
				}
			}
		}		
	}
	CpuFSB = (CpuFSB+Mega/2) / Mega;	// Mega is enough

#if	SUPPORT_VOODOO_KERNEL
	{
		UInt64 magic;
		nanoseconds_to_absolutetime(~(0), &magic);
		VoodooKernel = (magic == 2);
	}
#endif
	// Enumerate CPU's
	CpuCoreTech = Unknown;
	{
		uint32_t data[4];

		do_cpuid(0, data);
		((uint32_t*)vendor)[0] = data[1];
		((uint32_t*)vendor)[1] = data[3];
		((uint32_t*)vendor)[2] = data[2];
		vendor[15] = 0;

		do_cpuid(1, data);
		CpuSignature = data[0];

		// Features
		((uint32_t*)&Features)[0] = data[3];
		((uint32_t*)&Features)[1] = data[2];

		for( int i = 0; i < 3; i++ ){
			do_cpuid(0x80000002+i, data);
			memcpy( &brand_string[i*16], data, 16 );
		}
		brand_string[16*3] = 0;
	}

	// Find core technology and cross core vendor specifics
	// Intel
	if (!strncmp(vendor, CPUID_VID_INTEL, sizeof(CPUID_VID_INTEL))) {
		if(!intel_probe(this)) return NULL;
	}
	// AMD
	else if (!strncmp(vendor, CPUID_VID_AMD, sizeof(CPUID_VID_AMD))) {
		if(!amd_probe(this)) return NULL;
	}
	// Unknown CPU or core technology
	else {
		ErrorLog("CPU: Core Technology Unknown - Signature %x (%s)(%s)",
				 (unsigned int)CpuSignature,
				 vendor,
				 brand_string);
		return NULL;
	}
	
	return this;
}
コード例 #25
0
ファイル: hfs_fsinfo.c プロジェクト: Andromeda-OS/Kernel
/* 
 * Function to traverse all the records of a btree and then call caller-provided 
 * callback function for every record found.  The type of btree is chosen based 
 * on the fileID provided by the caller.  This fuction grabs the correct locks 
 * depending on the type of btree it will be traversing and flags provided 
 * by the caller.
 *
 * Note: It might drop and reacquire the locks during execution.
 */
static errno_t
traverse_btree(struct hfsmount *hfsmp, uint32_t btree_fileID, traverse_btree_flag_t flags,
			   void *fsinfo, int (*callback)(struct hfsmount *, HFSPlusKey *, HFSPlusRecord *, void *))
{
	int error = 0;
	int lockflags = 0;
	int ret_lockflags = 0;
	FCB *fcb;
	struct BTreeIterator *iterator = NULL;
	struct FSBufferDescriptor btdata;
	int btree_operation;
	HFSPlusRecord record;
	HFSPlusKey *key;
	uint64_t start, timeout_abs;

	switch(btree_fileID) {
		case kHFSExtentsFileID: 
			fcb = VTOF(hfsmp->hfs_extents_vp);
			lockflags = SFL_EXTENTS;
			break;
		case kHFSCatalogFileID:
			fcb = VTOF(hfsmp->hfs_catalog_vp);
			lockflags = SFL_CATALOG;
			break;
		case kHFSAttributesFileID:
			// Attributes file doesn’t exist, There are no records to iterate.
			if (hfsmp->hfs_attribute_vp == NULL)
				return error;
			fcb = VTOF(hfsmp->hfs_attribute_vp);
			lockflags = SFL_ATTRIBUTE;
			break;

		default:
			return EINVAL;
	}

	MALLOC(iterator, struct BTreeIterator *, sizeof(struct BTreeIterator), M_TEMP, M_WAITOK | M_ZERO);

	/* The key is initialized to zero because we are traversing entire btree */
	key = (HFSPlusKey *)&iterator->key;

	if (flags & TRAVERSE_BTREE_EXTENTS) {
		lockflags |= SFL_EXTENTS;
	}

	btdata.bufferAddress = &record;
	btdata.itemSize = sizeof(HFSPlusRecord);
	btdata.itemCount = 1;

	/* Lock btree for duration of traversal */
	ret_lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_SHARED_LOCK);
	btree_operation = kBTreeFirstRecord;

	nanoseconds_to_absolutetime(HFS_FSINFO_MAX_LOCKHELD_TIME, &timeout_abs);
	start = mach_absolute_time();

	while (1) {

		if (msleep(NULL, NULL, PINOD | PCATCH,
				   "hfs_fsinfo", NULL) == EINTR) {
			error = EINTR;
			break;
		}

		error = BTIterateRecord(fcb, btree_operation, iterator, &btdata, NULL);
		if (error != 0) {
			if (error == fsBTRecordNotFoundErr || error == fsBTEndOfIterationErr) {
				error = 0;
			}
			break;
		}
		/* Lookup next btree record on next call to BTIterateRecord() */
		btree_operation = kBTreeNextRecord;

		/* Call our callback function and stop iteration if there are any errors */
		error = callback(hfsmp, key, &record, fsinfo);
		if (error) {
			break;
		}

		/* let someone else use the tree after we've processed over HFS_FSINFO_MAX_LOCKHELD_TIME */
		if ((mach_absolute_time() - start) >= timeout_abs) {

			/* release b-tree locks and let someone else get the lock */
			hfs_systemfile_unlock (hfsmp, ret_lockflags);

			/* add tsleep here to force context switch and fairness */
			tsleep((caddr_t)hfsmp, PRIBIO, "hfs_fsinfo", 1);

			/*
			 * re-acquire the locks in the same way that we wanted them originally.
			 * note: it is subtle but worth pointing out that in between the time that we
			 * released and now want to re-acquire these locks that the b-trees may have shifted
			 * slightly but significantly. For example, the catalog or other b-tree could have grown
			 * past 8 extents and now requires the extents lock to be held in order to be safely
			 * manipulated. We can't be sure of the state of the b-tree from where we last left off.
			 */

			ret_lockflags = hfs_systemfile_lock (hfsmp, lockflags, HFS_SHARED_LOCK);

			/*
			 * It's highly likely that the search key we stashed away before dropping lock
			 * no longer points to an existing item.  Iterator's IterateRecord is able to
			 * re-position itself and process the next record correctly.  With lock dropped,
			 * there might be records missed for statistic gathering, which is ok. The
			 * point is to get aggregate values.
			 */

			start = mach_absolute_time();

			/* loop back around and get another record */
		}
	}

	hfs_systemfile_unlock(hfsmp, ret_lockflags);
	FREE (iterator, M_TEMP);
	return MacToVFSError(error);
}
コード例 #26
0
/*
 *	Routine:	semaphore_wait_internal
 *
 *		Decrements the semaphore count by one.  If the count is
 *		negative after the decrement, the calling thread blocks
 *		(possibly at a continuation and/or with a timeout).
 *
 *	Assumptions:
 *		The reference
 *		A reference is held on the signal semaphore.
 */
kern_return_t
semaphore_wait_internal(
	semaphore_t		wait_semaphore,
	semaphore_t		signal_semaphore,
	mach_timespec_t		*wait_timep,
	void 			(*caller_cont)(kern_return_t))
{
	boolean_t			nonblocking;
	int					wait_result;
	spl_t				spl_level;
	kern_return_t		kr = KERN_ALREADY_WAITING;

	spl_level = splsched();
	semaphore_lock(wait_semaphore);

	/*
	 * Decide if we really have to wait.
	 */
	nonblocking = (wait_timep != (mach_timespec_t *)0) ?
		      (wait_timep->tv_sec == 0 && wait_timep->tv_nsec == 0) :
		      FALSE;

	if (!wait_semaphore->active) {
		kr = KERN_TERMINATED;
	} else if (wait_semaphore->count > 0) {
		wait_semaphore->count--;
		kr = KERN_SUCCESS;
	} else if (nonblocking) {
		kr = KERN_OPERATION_TIMED_OUT;
	} else {
		uint64_t	abstime;
		thread_t	self = current_thread();

		wait_semaphore->count = -1;  /* we don't keep an actual count */
		thread_lock(self);
		
		/*
		 * If it is a timed wait, calculate the wake up deadline.
		 */
		if (wait_timep != (mach_timespec_t *)0) {
			nanoseconds_to_absolutetime((uint64_t)wait_timep->tv_sec *
											NSEC_PER_SEC + wait_timep->tv_nsec, &abstime);
			clock_absolutetime_interval_to_deadline(abstime, &abstime);
		}
		else
			abstime = 0;

		(void)wait_queue_assert_wait64_locked(
					&wait_semaphore->wait_queue,
					SEMAPHORE_EVENT,
					THREAD_ABORTSAFE, abstime,
					self);
		thread_unlock(self);
	}
	semaphore_unlock(wait_semaphore);
	splx(spl_level);

	/*
	 * wait_semaphore is unlocked so we are free to go ahead and
	 * signal the signal_semaphore (if one was provided).
	 */
	if (signal_semaphore != SEMAPHORE_NULL) {
		kern_return_t signal_kr;

		/*
		 * lock the signal semaphore reference we got and signal it.
		 * This will NOT block (we cannot block after having asserted
		 * our intention to wait above).
		 */
		signal_kr = semaphore_signal_internal(signal_semaphore,
						      THREAD_NULL,
						      SEMAPHORE_SIGNAL_PREPOST);

		if (signal_kr == KERN_NOT_WAITING)
			signal_kr = KERN_SUCCESS;
		else if (signal_kr == KERN_TERMINATED) {
			/* 
			 * Uh!Oh!  The semaphore we were to signal died.
			 * We have to get ourselves out of the wait in
			 * case we get stuck here forever (it is assumed
			 * that the semaphore we were posting is gating
			 * the decision by someone else to post the
			 * semaphore we are waiting on).  People will
			 * discover the other dead semaphore soon enough.
			 * If we got out of the wait cleanly (someone
			 * already posted a wakeup to us) then return that
			 * (most important) result.  Otherwise,
			 * return the KERN_TERMINATED status.
			 */
			thread_t self = current_thread();

			clear_wait(self, THREAD_INTERRUPTED);
			kr = semaphore_convert_wait_result(self->wait_result);
			if (kr == KERN_ABORTED)
				kr = KERN_TERMINATED;
		}
	}
	
	/*
	 * If we had an error, or we didn't really need to wait we can
	 * return now that we have signalled the signal semaphore.
	 */
	if (kr != KERN_ALREADY_WAITING)
		return kr;

	/*
	 * Now, we can block.  If the caller supplied a continuation
	 * pointer of his own for after the block, block with the
	 * appropriate semaphore continuation.  Thiswill gather the
	 * semaphore results, release references on the semaphore(s),
	 * and then call the caller's continuation.
	 */
	if (caller_cont) {
		thread_t self = current_thread();

		self->sth_continuation = caller_cont;
		self->sth_waitsemaphore = wait_semaphore;
		self->sth_signalsemaphore = signal_semaphore;
		wait_result = thread_block((thread_continue_t)semaphore_wait_continue);
	}
	else {
		wait_result = thread_block(THREAD_CONTINUE_NULL);
	}

	return (semaphore_convert_wait_result(wait_result));
}
コード例 #27
0
/**
 * Worker for RTSemEventMultiWaitEx and RTSemEventMultiWaitExDebug.
 *
 * @returns VBox status code.
 * @param   pThis           The event semaphore.
 * @param   fFlags          See RTSemEventMultiWaitEx.
 * @param   uTimeout        See RTSemEventMultiWaitEx.
 * @param   pSrcPos         The source code position of the wait.
 */
static int rtR0SemEventMultiDarwinWait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
                                       PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
    AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
    if (uTimeout != 0 || (fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
        RT_ASSERT_PREEMPTIBLE();

    rtR0SemEventMultiDarwinRetain(pThis);
    lck_spin_lock(pThis->pSpinlock);

    /*
     * Is the event already signalled or do we have to wait?
     */
    int rc;
    uint32_t const fOrgStateAndGen = ASMAtomicUoReadU32(&pThis->fStateAndGen);
    if (fOrgStateAndGen & RTSEMEVENTMULTIDARWIN_STATE_MASK)
        rc = VINF_SUCCESS;
    else
    {
        /*
         * We have to wait. So, we'll need to convert the timeout and figure
         * out if it's indefinite or not.
         */
        uint64_t uNsAbsTimeout = 1;
        if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
        {
            if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
                uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000)
                         ? uTimeout * UINT32_C(1000000)
                         : UINT64_MAX;
            if (uTimeout == UINT64_MAX)
                fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
            else
            {
                uint64_t u64Now;
                if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
                {
                    if (uTimeout != 0)
                    {
                        u64Now = RTTimeSystemNanoTS();
                        uNsAbsTimeout = u64Now + uTimeout;
                        if (uNsAbsTimeout < u64Now) /* overflow */
                            fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
                    }
                }
                else
                {
                    uNsAbsTimeout = uTimeout;
                    u64Now        = RTTimeSystemNanoTS();
                    uTimeout      = u64Now < uTimeout ? uTimeout - u64Now : 0;
                }
            }
        }

        if (   !(fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
            && uTimeout == 0)
        {
            /*
             * Poll call, we already checked the condition above so no need to
             * wait for anything.
             */
            rc = VERR_TIMEOUT;
        }
        else
        {
            for (;;)
            {
                /*
                 * Do the actual waiting.
                 */
                ASMAtomicWriteBool(&pThis->fHaveBlockedThreads, true);
                wait_interrupt_t fInterruptible = fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE ? THREAD_ABORTSAFE : THREAD_UNINT;
                wait_result_t    rcWait;
                if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
                    rcWait = lck_spin_sleep(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible);
                else
                {
                    uint64_t u64AbsTime;
                    nanoseconds_to_absolutetime(uNsAbsTimeout, &u64AbsTime);
                    rcWait = lck_spin_sleep_deadline(pThis->pSpinlock, LCK_SLEEP_DEFAULT,
                                                     (event_t)pThis, fInterruptible, u64AbsTime);
                }

                /*
                 * Deal with the wait result.
                 */
                if (RT_LIKELY(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC))
                {
                    switch (rcWait)
                    {
                        case THREAD_AWAKENED:
                            if (RT_LIKELY(ASMAtomicUoReadU32(&pThis->fStateAndGen) != fOrgStateAndGen))
                                rc = VINF_SUCCESS;
                            else if (fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE)
                                rc = VERR_INTERRUPTED;
                            else
                                continue; /* Seen this happen after fork/exec/something. */
                            break;

                        case THREAD_TIMED_OUT:
                            Assert(!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE));
                            rc = VERR_TIMEOUT;
                            break;

                        case THREAD_INTERRUPTED:
                            Assert(fInterruptible != THREAD_UNINT);
                            rc = VERR_INTERRUPTED;
                            break;

                        case THREAD_RESTART:
                            AssertMsg(pThis->u32Magic == ~RTSEMEVENTMULTI_MAGIC, ("%#x\n", pThis->u32Magic));
                            rc = VERR_SEM_DESTROYED;
                            break;

                        default:
                            AssertMsgFailed(("rcWait=%d\n", rcWait));
                            rc = VERR_INTERNAL_ERROR_3;
                            break;
                    }
                }
                else
                    rc = VERR_SEM_DESTROYED;
                break;
            }
        }
    }

    lck_spin_unlock(pThis->pSpinlock);
    rtR0SemEventMultiDarwinRelease(pThis);
    return rc;
}