Exemple #1
0
/*
 * Depress thread's priority to lowest possible for the specified interval,
 * with a value of zero resulting in no timeout being scheduled.
 */
void
thread_depress_abstime(
	uint64_t				interval)
{
	register thread_t		self = current_thread();
	uint64_t				deadline;
    spl_t					s;

    s = splsched();
    thread_lock(self);
	if (!(self->sched_flags & TH_SFLAG_DEPRESSED_MASK)) {
		processor_t		myprocessor = self->last_processor;

		self->sched_pri = DEPRESSPRI;
		myprocessor->current_pri = self->sched_pri;
		self->sched_flags |= TH_SFLAG_DEPRESS;

		if (interval != 0) {
			clock_absolutetime_interval_to_deadline(interval, &deadline);
			if (!timer_call_enter(&self->depress_timer, deadline, TIMER_CALL_USER_CRITICAL))
				self->depress_timer_active++;
		}
	}
	thread_unlock(self);
    splx(s);
}
void IOHIKeyboard::scheduleAutoRepeat()
// Description:	Schedule a procedure to be called when a timeout has expired
//		so that we can generate a repeated key.
// Preconditions:
// *	_deviceLock should be held on entry
{
    KeyboardReserved *tempReservedStruct = GetKeyboardReservedStructEventForService(this); 
          
    if ( _calloutPending == true )
    {        
        if (tempReservedStruct) {
            thread_call_cancel(tempReservedStruct->repeat_thread_call);
        }
	_calloutPending = false;
    }
    if ( AbsoluteTime_to_scalar(&_downRepeatTime) )
    {
        AbsoluteTime deadline;
        clock_absolutetime_interval_to_deadline(_downRepeatTime, &deadline);
        if (tempReservedStruct) {
            thread_call_enter_delayed(tempReservedStruct->repeat_thread_call, deadline);
        }
	_calloutPending = true;
    }
}
Exemple #3
0
Fichier : cpu.c Projet : argp/xnu
/*
 *	Routine:	cpu_idle_exit
 *	Function:
 */
void
cpu_idle_exit(boolean_t from_reset __unused)
{
	uint64_t	new_idle_timeout_ticks = 0x0ULL;
	cpu_data_t     *cpu_data_ptr = getCpuDatap();

#if KPC
	kpc_idle_exit();
#endif


	pmap_set_pmap(cpu_data_ptr->cpu_active_thread->map->pmap, current_thread());

	if (cpu_data_ptr->cpu_idle_notify)
		((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);

	if (cpu_data_ptr->idle_timer_notify != 0) {
		if (new_idle_timeout_ticks == 0x0ULL) {
			/* turn off the idle timer */
			cpu_data_ptr->idle_timer_deadline = 0x0ULL;
		} else {
			/* set the new idle timeout */
			clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
		}
		timer_resync_deadlines();
	}

	Idle_load_context();
}
Exemple #4
0
static __inline__ uint64_t
semaphore_deadline(
	unsigned int		sec,
	clock_res_t			nsec)
{
	uint64_t	abstime;

	nanoseconds_to_absolutetime((uint64_t)sec *	NSEC_PER_SEC + nsec, &abstime);
	clock_absolutetime_interval_to_deadline(abstime, &abstime);

	return (abstime);
}
/*
 *	Set a timeout.
 *
 *	fcn:		function to call
 *	param:		parameter to pass to function
 *	ts:		timeout interval, in timespec
 */
void
bsd_timeout(
	timeout_fcn_t			fcn,
	void					*param,
	struct timespec         *ts)
{
	uint64_t		deadline = 0;

	if (ts && (ts->tv_sec || ts->tv_nsec)) {
		nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec,  &deadline );
		clock_absolutetime_interval_to_deadline( deadline, &deadline );
	}
	thread_call_func_delayed((thread_call_func_t)fcn, param, deadline);
}
Exemple #6
0
Fichier : cpu.c Projet : argp/xnu
cpu_idle(void)
{
	cpu_data_t     *cpu_data_ptr = getCpuDatap();
	uint64_t	new_idle_timeout_ticks = 0x0ULL, lastPop;

	if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled))
		Idle_load_context();
	if (!SetIdlePop())
		Idle_load_context();
	lastPop = cpu_data_ptr->rtcPop;

	pmap_switch_user_ttb(kernel_pmap);
	cpu_data_ptr->cpu_active_thread = current_thread();
	if (cpu_data_ptr->cpu_user_debug)
		arm_debug_set(NULL);
	cpu_data_ptr->cpu_user_debug = NULL;

	if (cpu_data_ptr->cpu_idle_notify)
		((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);

	if (cpu_data_ptr->idle_timer_notify != 0) {
		if (new_idle_timeout_ticks == 0x0ULL) {
			/* turn off the idle timer */
			cpu_data_ptr->idle_timer_deadline = 0x0ULL;
		} else {
			/* set the new idle timeout */
			clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
		}
		timer_resync_deadlines();
		if (cpu_data_ptr->rtcPop != lastPop)
			SetIdlePop();
	}

#if KPC
	kpc_idle();
#endif

	platform_cache_idle_enter();
	cpu_idle_wfi((boolean_t) wfi_fast);
	platform_cache_idle_exit();

	ClearIdlePop(TRUE);
	cpu_idle_exit(FALSE);
}
Exemple #7
0
UInt32 HoRNDIS::outputPacket(mbuf_t packet, void *param) {
	mbuf_t m;
	size_t pktlen = 0;
	IOReturn ior = kIOReturnSuccess;
	UInt32 poolIndx;
	int i;

	LOG(V_DEBUG, "");
	
	/* Count the total size of this packet */
	m = packet;
	while (m) {
		pktlen += mbuf_len(m);
		m = mbuf_next(m);
	}
	
	LOG(V_DEBUG, "%ld bytes", pktlen);
	
	if (pktlen > (mtu + 14)) {
		LOG(V_ERROR, "packet too large (%ld bytes, but I told you you could have %d!)", pktlen, mtu);
		fpNetStats->outputErrors++;
		return false;
	}
	
	/* Find an output buffer in the pool */
	IOLockLock(outbuf_lock);
	for (i = 0; i < OUT_BUF_MAX_TRIES; i++) {
		AbsoluteTime ivl, deadl;
		
		for (poolIndx = 0; poolIndx < N_OUT_BUFS; poolIndx++)
			if (!outbufs[poolIndx].inuse) {
				outbufs[poolIndx].inuse = true;
				break;
			}
		if (poolIndx != N_OUT_BUFS)
			break;
		
		/* "while", not "if".  See Symphony X's seminal work on this topic, /Paradise Lost/ (2007). */
		nanoseconds_to_absolutetime(OUT_BUF_WAIT_TIME, &ivl);
		clock_absolutetime_interval_to_deadline(ivl, &deadl);
		LOG(V_NOTE, "waiting for buffer...");
		IOLockSleepDeadline(outbuf_lock, outbufs, deadl, THREAD_INTERRUPTIBLE);
	}
	IOLockUnlock(outbuf_lock);
	
	if (poolIndx == N_OUT_BUFS) {
		LOG(V_ERROR, "timed out waiting for buffer");
		return kIOReturnTimeout;
	}
	
	/* Start filling in the send buffer */
	struct rndis_data_hdr *hdr;
	hdr = (struct rndis_data_hdr *)outbufs[poolIndx].buf;
	
	outbufs[poolIndx].inuse = true;
	
	outbufs[poolIndx].mdp->setLength(pktlen + sizeof *hdr);
	
	memset(hdr, 0, sizeof *hdr);
	hdr->msg_type = RNDIS_MSG_PACKET;
	hdr->msg_len = cpu_to_le32(pktlen + sizeof *hdr);
	hdr->data_offset = cpu_to_le32(sizeof(*hdr) - 8);
	hdr->data_len = cpu_to_le32(pktlen);
	mbuf_copydata(packet, 0, pktlen, hdr + 1);
	
	freePacket(packet);
	
	/* Now, fire it off! */
	outbufs[poolIndx].comp.target    = this;
	outbufs[poolIndx].comp.parameter = (void *)poolIndx;
	outbufs[poolIndx].comp.action    = dataWriteComplete;
	
	ior = fOutPipe->Write(outbufs[poolIndx].mdp, &outbufs[poolIndx].comp);
	if (ior != kIOReturnSuccess) {
		LOG(V_ERROR, "write failed");
		if (ior == kIOUSBPipeStalled) {
			fOutPipe->Reset();
			ior = fOutPipe->Write(outbufs[poolIndx].mdp, &outbufs[poolIndx].comp);
			if (ior != kIOReturnSuccess) {
				LOG(V_ERROR, "write really failed");
				fpNetStats->outputErrors++;
				return ior;
			}
		}
	}
	fpNetStats->outputPackets++;
	
	return kIOReturnOutputSuccess;
}
/*
 *	Routine:	semaphore_wait_internal
 *
 *		Decrements the semaphore count by one.  If the count is
 *		negative after the decrement, the calling thread blocks
 *		(possibly at a continuation and/or with a timeout).
 *
 *	Assumptions:
 *		The reference
 *		A reference is held on the signal semaphore.
 */
kern_return_t
semaphore_wait_internal(
	semaphore_t		wait_semaphore,
	semaphore_t		signal_semaphore,
	mach_timespec_t		*wait_timep,
	void 			(*caller_cont)(kern_return_t))
{
	boolean_t			nonblocking;
	int					wait_result;
	spl_t				spl_level;
	kern_return_t		kr = KERN_ALREADY_WAITING;

	spl_level = splsched();
	semaphore_lock(wait_semaphore);

	/*
	 * Decide if we really have to wait.
	 */
	nonblocking = (wait_timep != (mach_timespec_t *)0) ?
		      (wait_timep->tv_sec == 0 && wait_timep->tv_nsec == 0) :
		      FALSE;

	if (!wait_semaphore->active) {
		kr = KERN_TERMINATED;
	} else if (wait_semaphore->count > 0) {
		wait_semaphore->count--;
		kr = KERN_SUCCESS;
	} else if (nonblocking) {
		kr = KERN_OPERATION_TIMED_OUT;
	} else {
		uint64_t	abstime;
		thread_t	self = current_thread();

		wait_semaphore->count = -1;  /* we don't keep an actual count */
		thread_lock(self);
		
		/*
		 * If it is a timed wait, calculate the wake up deadline.
		 */
		if (wait_timep != (mach_timespec_t *)0) {
			nanoseconds_to_absolutetime((uint64_t)wait_timep->tv_sec *
											NSEC_PER_SEC + wait_timep->tv_nsec, &abstime);
			clock_absolutetime_interval_to_deadline(abstime, &abstime);
		}
		else
			abstime = 0;

		(void)wait_queue_assert_wait64_locked(
					&wait_semaphore->wait_queue,
					SEMAPHORE_EVENT,
					THREAD_ABORTSAFE, abstime,
					self);
		thread_unlock(self);
	}
	semaphore_unlock(wait_semaphore);
	splx(spl_level);

	/*
	 * wait_semaphore is unlocked so we are free to go ahead and
	 * signal the signal_semaphore (if one was provided).
	 */
	if (signal_semaphore != SEMAPHORE_NULL) {
		kern_return_t signal_kr;

		/*
		 * lock the signal semaphore reference we got and signal it.
		 * This will NOT block (we cannot block after having asserted
		 * our intention to wait above).
		 */
		signal_kr = semaphore_signal_internal(signal_semaphore,
						      THREAD_NULL,
						      SEMAPHORE_SIGNAL_PREPOST);

		if (signal_kr == KERN_NOT_WAITING)
			signal_kr = KERN_SUCCESS;
		else if (signal_kr == KERN_TERMINATED) {
			/* 
			 * Uh!Oh!  The semaphore we were to signal died.
			 * We have to get ourselves out of the wait in
			 * case we get stuck here forever (it is assumed
			 * that the semaphore we were posting is gating
			 * the decision by someone else to post the
			 * semaphore we are waiting on).  People will
			 * discover the other dead semaphore soon enough.
			 * If we got out of the wait cleanly (someone
			 * already posted a wakeup to us) then return that
			 * (most important) result.  Otherwise,
			 * return the KERN_TERMINATED status.
			 */
			thread_t self = current_thread();

			clear_wait(self, THREAD_INTERRUPTED);
			kr = semaphore_convert_wait_result(self->wait_result);
			if (kr == KERN_ABORTED)
				kr = KERN_TERMINATED;
		}
	}
	
	/*
	 * If we had an error, or we didn't really need to wait we can
	 * return now that we have signalled the signal semaphore.
	 */
	if (kr != KERN_ALREADY_WAITING)
		return kr;

	/*
	 * Now, we can block.  If the caller supplied a continuation
	 * pointer of his own for after the block, block with the
	 * appropriate semaphore continuation.  Thiswill gather the
	 * semaphore results, release references on the semaphore(s),
	 * and then call the caller's continuation.
	 */
	if (caller_cont) {
		thread_t self = current_thread();

		self->sth_continuation = caller_cont;
		self->sth_waitsemaphore = wait_semaphore;
		self->sth_signalsemaphore = signal_semaphore;
		wait_result = thread_block((thread_continue_t)semaphore_wait_continue);
	}
	else {
		wait_result = thread_block(THREAD_CONTINUE_NULL);
	}

	return (semaphore_convert_wait_result(wait_result));
}