Example #1
0
/*
 * Advise a kernel process to suspend (or resume) in its main loop.
 * Participation is voluntary.
 */
int
kthread_suspend(struct thread *td, int timo)
{
	struct proc *p;

	p = td->td_proc;

	/*
	 * td_pflags should not be read by any thread other than
	 * curthread, but as long as this flag is invariant during the
	 * thread's lifetime, it is OK to check its state.
	 */
	if ((td->td_pflags & TDP_KTHREAD) == 0)
		return (EINVAL);

	/*
	 * The caller of the primitive should have already checked that the
	 * thread is up and running, thus not being blocked by other
	 * conditions.
	 */
	PROC_LOCK(p);
	thread_lock(td);
	td->td_flags |= TDF_KTH_SUSP;
	thread_unlock(td);
	return (msleep(&td->td_flags, &p->p_mtx, PPAUSE | PDROP, "suspkt",
	    timo));
}
Example #2
0
int
linux_fork(struct thread *td, struct linux_fork_args *args)
{
	int error;
	struct proc *p2;
	struct thread *td2;

#ifdef DEBUG
	if (ldebug(fork))
		printf(ARGS(fork, ""));
#endif

	if ((error = fork1(td, RFFDG | RFPROC | RFSTOPPED, 0, &p2, NULL, 0))
	    != 0)
		return (error);

	td->td_retval[0] = p2->p_pid;
	td->td_retval[1] = 0;

	error = linux_proc_init(td, td->td_retval[0], 0);
	if (error)
		return (error);

	td2 = FIRST_THREAD_IN_PROC(p2);

	/*
	 * Make this runnable after we are finished with it.
	 */
	thread_lock(td2);
	TD_SET_CAN_RUN(td2);
	sched_add(td2, SRQ_BORING);
	thread_unlock(td2);

	return (0);
}
Example #3
0
void svc_sem_post(struct semaphore *sem)
{
	struct thread *thread;

	thread_lock(state);

	sem->count++;

	if (sem->count > sem->value)
		sem->count = sem->value;

	if (sem->count <= 0) {
		if (!list_is_empty(&sem->waiting_threads)) {
			sem->waiting--;

			thread = list_peek_head_type(&sem->waiting_threads, struct thread, event_node);
			thread->state = THREAD_RUNNABLE;

			debug_printk("waking up thread: %d\n", thread->pid);

			remove_waiting_thread(sem, thread);
			insert_runnable_thread(thread);
			arch_system_call(SVC_THREAD_SWITCH, NULL, NULL, NULL);
		}
	}
Example #4
0
/*
 *	thread_doswapin:
 *
 *	Swapin the specified thread, if it should be runnable, then put
 *	it on a run queue.  No locks should be held on entry, as it is
 *	likely that this routine will sleep (waiting for stack allocation).
 */
kern_return_t thread_doswapin(thread_t thread)
{
	kern_return_t kr;
	spl_t	s;

	/*
	 *	Allocate the kernel stack.
	 */

	kr = stack_alloc(thread, thread_continue);
	if (kr != KERN_SUCCESS)
		return kr;

	/*
	 *	Place on run queue.  
	 */

	s = splsched();
	thread_lock(thread);
	thread->state &= ~(TH_SWAPPED | TH_SW_COMING_IN);
	if (thread->state & TH_RUN)
		thread_setrun(thread, TRUE);
	thread_unlock(thread);
	(void) splx(s);
	return KERN_SUCCESS;
}
Example #5
0
/*
 *	Routine:	wait_queue_assert_wait64
 *	Purpose:
 *		Insert the current thread into the supplied wait queue
 *		waiting for a particular event to be posted to that queue.
 *	Conditions:
 *		nothing of interest locked.
 */
wait_result_t
wait_queue_assert_wait64(
	wait_queue_t wq,
	event64_t event,
	wait_interrupt_t interruptible)
{
	spl_t s;
	wait_result_t ret;
	thread_t cur_thread = current_thread();

	/* If it is an invalid wait queue, you cant wait on it */
	if (!wait_queue_is_valid(wq)) {
		thread_t thread = current_thread();
		return (thread->wait_result = THREAD_RESTART);
	}

	s = splsched();
	wait_queue_lock(wq);
	thread_lock(cur_thread);
	ret = wait_queue_assert_wait64_locked(wq, event, interruptible, cur_thread);
	thread_unlock(cur_thread);
	wait_queue_unlock(wq);
	splx(s);
	return(ret);
}
Example #6
0
kern_return_t
thread_abort_safely(
	thread_t		thread)
{
	kern_return_t	result = KERN_SUCCESS;

	if (thread == THREAD_NULL)
		return (KERN_INVALID_ARGUMENT);

	thread_mtx_lock(thread);

	if (thread->active) {
		spl_t		s = splsched();

		thread_lock(thread);
		if (!thread->at_safe_point ||
				clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
			if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
				thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
				install_special_handler_locked(thread);
			}
		}
		thread_unlock(thread);
		splx(s);
	}
	else
		result = KERN_TERMINATED;
		
	thread_mtx_unlock(thread);

	return (result);
}
Example #7
0
static void
act_set_ast(
	    thread_t	thread,
	    ast_t ast)
{
	spl_t		s = splsched();
	
	if (thread == current_thread()) {
		thread_ast_set(thread, ast);
		ast_propagate(thread->ast);
	}
	else {
		processor_t		processor;

		thread_lock(thread);
		thread_ast_set(thread, ast);
		processor = thread->last_processor;
		if ( processor != PROCESSOR_NULL            &&
		     processor->state == PROCESSOR_RUNNING  &&
		     processor->active_thread == thread	     )
			cause_ast_check(processor);
		thread_unlock(thread);
	}
	
	splx(s);
}
Example #8
0
/*
 * special_handler	- handles suspension, termination.  Called
 * with nothing locked.  Returns (if it returns) the same way.
 */
void
special_handler(
	thread_t				thread)
{
	spl_t		s;

	thread_mtx_lock(thread);

	s = splsched();
	thread_lock(thread);
	thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
	thread_unlock(thread);
	splx(s);

	/*
	 * If we're suspended, go to sleep and wait for someone to wake us up.
	 */
	if (thread->active) {
		if (thread->suspend_count > 0) {
			assert_wait(&thread->suspend_count, THREAD_ABORTSAFE);
			thread_mtx_unlock(thread);
			thread_block((thread_continue_t)special_handler_continue);
			/*NOTREACHED*/
		}
	}
	else {
		thread_mtx_unlock(thread);

		thread_terminate_self();
		/*NOTREACHED*/
	}

	thread_mtx_unlock(thread);
}
Example #9
0
/*
 * Mark the current thread as sleeping on a shuttle object, and
 * switch to a new thread.
 * No locks other than 'l' should be held at this point.
 */
void
shuttle_swtch(kmutex_t *l)
{
	klwp_t	*lwp = ttolwp(curthread);

	thread_lock(curthread);
	disp_lock_enter_high(&shuttle_lock);
	lwp->lwp_asleep = 1;			/* /proc */
	lwp->lwp_sysabort = 0;			/* /proc */
	lwp->lwp_ru.nvcsw++;
	curthread->t_flag |= T_WAKEABLE;
	curthread->t_sobj_ops = &shuttle_sobj_ops;
	curthread->t_wchan0 = (caddr_t)1;
	CL_INACTIVE(curthread);
	DTRACE_SCHED(sleep);
	THREAD_SLEEP(curthread, &shuttle_lock);
	(void) new_mstate(curthread, LMS_SLEEP);
	disp_lock_exit_high(&shuttle_lock);
	mutex_exit(l);
	if (ISSIG(curthread, JUSTLOOKING) || MUSTRETURN(curproc, curthread))
		setrun(curthread);
	swtch();
	/*
	 * Caller must check for ISSIG/lwp_sysabort conditions
	 * and clear lwp->lwp_asleep/lwp->lwp_sysabort
	 */
}
Example #10
0
int
linux_vfork(struct thread *td, struct linux_vfork_args *args)
{
	struct fork_req fr;
	int error;
	struct proc *p2;
	struct thread *td2;

#ifdef DEBUG
	if (ldebug(vfork))
		printf(ARGS(vfork, ""));
#endif

	bzero(&fr, sizeof(fr));
	fr.fr_flags = RFFDG | RFPROC | RFMEM | RFPPWAIT | RFSTOPPED;
	fr.fr_procp = &p2;
	if ((error = fork1(td, &fr)) != 0)
		return (error);

	td2 = FIRST_THREAD_IN_PROC(p2);

	linux_proc_init(td, td2, 0);

   	td->td_retval[0] = p2->p_pid;

	/*
	 * Make this runnable after we are finished with it.
	 */
	thread_lock(td2);
	TD_SET_CAN_RUN(td2);
	sched_add(td2, SRQ_BORING);
	thread_unlock(td2);

	return (0);
}
DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType)
{
    int iPriority;

    switch (enmType)
    {
        case RTTHREADTYPE_INFREQUENT_POLLER:    iPriority = PZERO + 8;      break;
        case RTTHREADTYPE_EMULATION:            iPriority = PZERO + 4;      break;
        case RTTHREADTYPE_DEFAULT:              iPriority = PZERO;          break;
        case RTTHREADTYPE_MSG_PUMP:             iPriority = PZERO - 4;      break;
        case RTTHREADTYPE_IO:                   iPriority = PRIBIO;         break;
        case RTTHREADTYPE_TIMER:                iPriority = PRI_MIN_KERN;   break;
        default:
            AssertMsgFailed(("enmType=%d\n", enmType));
            return VERR_INVALID_PARAMETER;
    }

#if __FreeBSD_version < 700000
    /* Do like they're doing in subr_ntoskrnl.c... */
    mtx_lock_spin(&sched_lock);
#else
    thread_lock(curthread);
#endif
    sched_prio(curthread, iPriority);
#if __FreeBSD_version < 600000
    curthread->td_base_pri = iPriority;
#endif
#if __FreeBSD_version < 700000
    mtx_unlock_spin(&sched_lock);
#else
    thread_unlock(curthread);
#endif

    return VINF_SUCCESS;
}
Example #12
0
/*
 *	thread_depress_abort:
 *
 *	Prematurely abort priority depression if there is one.
 */
kern_return_t
thread_depress_abort(thread_t thread)
{
    spl_t	s;

    if (thread == THREAD_NULL)
	return(KERN_INVALID_ARGUMENT);

    s = splsched();
    thread_lock(thread);

    /*
     *	Only restore priority if thread is depressed.
     */
    if (thread->depress_priority >= 0) {
	reset_timeout_check(&thread->depress_timer);
	thread->priority = thread->depress_priority;
	thread->depress_priority = -1;
	compute_priority(thread, FALSE);
    }

    thread_unlock(thread);
    (void) splx(s);
    return(KERN_SUCCESS);
}
Example #13
0
/*
 *	thread_depress_priority
 *
 *	Depress thread's priority to lowest possible for specified period.
 *	Intended for use when thread wants a lock but doesn't know which
 *	other thread is holding it.  As with thread_switch, fixed
 *	priority threads get exactly what they asked for.  Users access
 *	this by the SWITCH_OPTION_DEPRESS option to thread_switch.  A Time
 *      of zero will result in no timeout being scheduled.
 */
void
thread_depress_priority(
	thread_t 		thread,
	mach_msg_timeout_t 	depress_time)
{
    unsigned int ticks;
    spl_t	s;

    /* convert from milliseconds to ticks */
    ticks = convert_ipc_timeout_to_ticks(depress_time);

    s = splsched();
    thread_lock(thread);

    /*
     *	If thread is already depressed, override previous depression.
     */
    reset_timeout_check(&thread->depress_timer);

    /*
     *	Save current priority, then set priority and
     *	sched_pri to their lowest possible values.
     */
    thread->depress_priority = thread->priority;
    thread->priority = 31;
    thread->sched_pri = 31;
    if (ticks != 0)
	set_timeout(&thread->depress_timer, ticks);

    thread_unlock(thread);
    (void) splx(s);
}
void
thread_policy_reset(
	thread_t		thread)
{
	spl_t		s;

	s = splsched();
	thread_lock(thread);

	if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
		thread->sched_mode &= ~TH_MODE_REALTIME;

		if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
			thread->sched_mode |= TH_MODE_TIMESHARE;

			if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
				sched_share_incr();
		}
	}
	else {
		thread->safe_mode = 0;
		thread->sched_mode &= ~TH_MODE_FAILSAFE;
	}

	thread->importance = 0;

	thread_recompute_priority(thread);

	thread_unlock(thread);
	splx(s);
}
Example #15
0
static int
test_modinit(void)
{
	struct thread *td;
	int i, error, pri_range, pri_off;

	pri_range = PRI_MIN_TIMESHARE - PRI_MIN_REALTIME;
	test_epoch = epoch_alloc(EPOCH_PREEMPT);
	for (i = 0; i < mp_ncpus*2; i++) {
		etilist[i].threadid = i;
		error = kthread_add(testloop, &etilist[i], NULL, &testthreads[i],
							0, 0, "epoch_test_%d", i);
		if (error) {
			printf("%s: kthread_add(epoch_test): error %d", __func__,
				   error);
		} else {
			pri_off = (i*4)%pri_range;
			td = testthreads[i];
			thread_lock(td);
			sched_prio(td, PRI_MIN_REALTIME + pri_off);
			thread_unlock(td);
		}
	}
	inited = 1;
	return (0);
}
Example #16
0
/*
 * lck_rw_clear_promotion: Undo priority promotions when the last RW
 * lock is released by a thread (if a promotion was active)
 */
void lck_rw_clear_promotion(thread_t thread)
{
	assert(thread->rwlock_count == 0);

	/* Cancel any promotions if the thread had actually blocked while holding a RW lock */
	spl_t s = splsched();

	thread_lock(thread);

	if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) {
		thread->sched_flags &= ~TH_SFLAG_RW_PROMOTED;

		if (thread->sched_flags & TH_SFLAG_PROMOTED) {
			/* Thread still has a mutex promotion */
		} else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE) | DBG_FUNC_NONE,
							      thread->sched_pri, DEPRESSPRI, 0, 0, 0);
			
			set_sched_pri(thread, DEPRESSPRI);
		} else {
			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE) | DBG_FUNC_NONE,
								  thread->sched_pri, thread->base_pri, 0, 0, 0);
			
			thread_recompute_sched_pri(thread, FALSE);
		}
	}

	thread_unlock(thread);
	splx(s);
}
Example #17
0
/*
 * Depress thread's priority to lowest possible for the specified interval,
 * with a value of zero resulting in no timeout being scheduled.
 */
void
thread_depress_abstime(
	uint64_t				interval)
{
	register thread_t		self = current_thread();
	uint64_t				deadline;
    spl_t					s;

    s = splsched();
    thread_lock(self);
	if (!(self->sched_flags & TH_SFLAG_DEPRESSED_MASK)) {
		processor_t		myprocessor = self->last_processor;

		self->sched_pri = DEPRESSPRI;
		myprocessor->current_pri = self->sched_pri;
		self->sched_flags |= TH_SFLAG_DEPRESS;

		if (interval != 0) {
			clock_absolutetime_interval_to_deadline(interval, &deadline);
			if (!timer_call_enter(&self->depress_timer, deadline, TIMER_CALL_USER_CRITICAL))
				self->depress_timer_active++;
		}
	}
	thread_unlock(self);
    splx(s);
}
Example #18
0
static int
read_frame(void)
{
    struct v4l2_buffer buf;

    CLEAR (buf);

    buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    buf.memory = V4L2_MEMORY_MMAP;

    if (-1 == _ioctl (video.fd, VIDIOC_DQBUF, &buf)) {
        switch (errno) {
            case EAGAIN:
                return 0;

            case EIO:
                /* Could ignore EIO, see spec. */
                /* fall through */

            default:
                assert(0, "failure on ioctl.VIDIOC_DQBUF");
        }
    }

    assert(buf.index < n_buffers, "non-fatal assert");

    thread_lock(video.array);
    video.array.val = buffers[buf.index].start;
    thread_cond_broadcast(&video.array_new);

    assert_fatal(-1 != _ioctl (video.fd, VIDIOC_QBUF, &buf),
            "failure on ioctl.VIDIOC_QBUF");

    return 1;
}
Example #19
0
/*
 * special_handler_continue
 *
 * Continuation routine for the special handler blocks.  It checks
 * to see whether there has been any new suspensions.  If so, it
 * installs the special handler again.  Otherwise, it checks to see
 * if the current depression needs to be re-instated (it may have
 * been temporarily removed in order to get to this point in a hurry).
 */
void
special_handler_continue(void)
{
	thread_t		thread = current_thread();

	thread_mtx_lock(thread);

	if (thread->suspend_count > 0)
		install_special_handler(thread);
	else {
		spl_t			s = splsched();

		thread_lock(thread);
		if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
			processor_t		myprocessor = thread->last_processor;

			thread->sched_pri = DEPRESSPRI;
			myprocessor->current_pri = thread->sched_pri;
		}
		thread_unlock(thread);
		splx(s);
	}

	thread_mtx_unlock(thread);

	thread_exception_return();
	/*NOTREACHED*/
}
Example #20
0
	/*
	 * Replace each thread's cpuset while using deferred release.  We
	 * must do this because the thread lock must be held while operating
	 * on the thread and this limits the type of operations allowed.
	 */
	FOREACH_THREAD_IN_PROC(p, td) {
		thread_lock(td);
		/*
		 * If we presently have an anonymous set or are applying a
		 * mask we must create an anonymous shadow set.  That is
		 * either parented to our existing base or the supplied set.
		 *
		 * If we have a base set with no anonymous shadow we simply
		 * replace it outright.
		 */
		tdset = td->td_cpuset;
		if (tdset->cs_id == CPUSET_INVALID || mask) {
			nset = LIST_FIRST(&freelist);
			LIST_REMOVE(nset, cs_link);
			if (mask)
				error = cpuset_shadow(tdset, nset, mask);
			else
				error = _cpuset_create(nset, set,
				    &tdset->cs_mask, CPUSET_INVALID);
			if (error) {
				LIST_INSERT_HEAD(&freelist, nset, cs_link);
				thread_unlock(td);
				break;
			}
		} else
			nset = cpuset_ref(set);
		cpuset_rel_defer(&droplist, tdset);
		td->td_cpuset = nset;
		sched_affinity(td);
		thread_unlock(td);
	}
Example #21
0
static void
dr_start_user_threads(void)
{
	kthread_id_t tp;

	mutex_enter(&pidlock);

	/* walk all threads and release them */
	for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) {
		proc_t *p = ttoproc(tp);

		/* skip kernel threads */
		if (ttoproc(tp)->p_as == &kas)
			continue;

		mutex_enter(&p->p_lock);
		tp->t_proc_flag &= ~TP_CHKPT;
		mutex_exit(&p->p_lock);

		thread_lock(tp);
		if (CPR_ISTOPPED(tp)) {
			/* back on the runq */
			tp->t_schedflag |= TS_RESUME;
			setrun_locked(tp);
		}
		thread_unlock(tp);
	}

	mutex_exit(&pidlock);
}
Example #22
0
/*
 * Apply an anonymous mask to a single thread.
 */
int
cpuset_setthread(lwpid_t id, cpuset_t *mask)
{
	struct cpuset *nset;
	struct cpuset *set;
	struct thread *td;
	struct proc *p;
	int error;

	nset = uma_zalloc(cpuset_zone, M_WAITOK);
	error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set);
	if (error)
		goto out;
	set = NULL;
	thread_lock(td);
	error = cpuset_shadow(td->td_cpuset, nset, mask);
	if (error == 0) {
		set = td->td_cpuset;
		td->td_cpuset = nset;
		sched_affinity(td);
		nset = NULL;
	}
	thread_unlock(td);
	PROC_UNLOCK(p);
	if (set)
		cpuset_rel(set);
out:
	if (nset)
		uma_zfree(cpuset_zone, nset);
	return (error);
}
Example #23
0
/*
 *	Routine:	_wait_queue_select64_one
 *	Purpose:
 *		Select the best thread off a wait queue that meet the
 *		supplied criteria.
 * 	Conditions:
 *		at splsched
 *		wait queue locked
 *		possibly recursive
 * 	Returns:
 *		a locked thread - if one found
 *	Note:
 *		This is where the sync policy of the wait queue comes
 *		into effect.  For now, we just assume FIFO.
 */
static thread_t
_wait_queue_select64_one(
	wait_queue_t wq,
	event64_t event)
{
	wait_queue_element_t wq_element;
	wait_queue_element_t wqe_next;
	thread_t t = THREAD_NULL;
	queue_t q;

	assert(wq->wq_fifo);

	q = &wq->wq_queue;

	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		wqe_next = (wait_queue_element_t)
			       queue_next((queue_t) wq_element);

		/*
		 * We may have to recurse if this is a compound wait queue.
		 */
		if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
			wait_queue_link_t wql = (wait_queue_link_t)wq_element;
			wait_queue_t set_queue;

			/*
			 * We have to check the set wait queue.
			 */
			set_queue = (wait_queue_t)wql->wql_setqueue;
			wait_queue_lock(set_queue);
			if (! wait_queue_empty(set_queue)) {
				t = _wait_queue_select64_one(set_queue, event);
			}
			wait_queue_unlock(set_queue);
			if (t != THREAD_NULL)
				return t;
		} else {
			
			/*
			 * Otherwise, its a thread.  If it is waiting on
			 * the event we are posting to this queue, pull
			 * it off the queue and stick it in out wake_queue.
			 */
			thread_t t = (thread_t)wq_element;

			if (t->wait_event == event) {
				thread_lock(t);
				remqueue(q, (queue_entry_t) t);
				t->wait_queue = WAIT_QUEUE_NULL;
				t->wait_event = NO_EVENT64;
				t->at_safe_point = FALSE;
				return t;	/* still locked */
			}
		}
		wq_element = wqe_next;
	}
	return THREAD_NULL;
}
Example #24
0
static void
poll_idle(void)
{
	struct thread *td = curthread;
	struct rtprio rtp;

	rtp.prio = RTP_PRIO_MAX;	/* lowest priority */
	rtp.type = RTP_PRIO_IDLE;
	PROC_SLOCK(td->td_proc);
	rtp_to_pri(&rtp, td);
	PROC_SUNLOCK(td->td_proc);

	for (;;) {
		if (poll_in_idle_loop && poll_handlers > 0) {
			idlepoll_sleeping = 0;
			ether_poll(poll_each_burst);
			thread_lock(td);
			mi_switch(SW_VOL, NULL);
			thread_unlock(td);
		} else {
			idlepoll_sleeping = 1;
			tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3);
		}
	}
}
Example #25
0
kern_return_t
thread_abort_safely(
	thread_act_t	act)
{
	thread_t		thread;
	kern_return_t	ret;
	spl_t			s;

	if (	act == THR_ACT_NULL )
		return (KERN_INVALID_ARGUMENT);

	thread = act_lock_thread(act);

	if (!act->active) {
		act_unlock_thread(act);
		return (KERN_TERMINATED);
	}

	s = splsched();
	thread_lock(thread);
	if (!thread->at_safe_point ||
		clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
		if (!(thread->state & TH_ABORT)) {
			thread->state |= (TH_ABORT|TH_ABORT_SAFELY);
			install_special_handler_locked(act);
		}
	}
	thread_unlock(thread);
	splx(s);
		
	act_unlock_thread(act);

	return (KERN_SUCCESS);
}
Example #26
0
/*
 * Clean up scheduler activations state associated with an exiting
 * (or execing) lwp.  t is always the current thread.
 */
void
schedctl_lwp_cleanup(kthread_t *t)
{
	sc_shared_t	*ssp = t->t_schedctl;
	proc_t		*p = ttoproc(t);
	sc_page_ctl_t	*pagep;
	index_t		index;

	ASSERT(MUTEX_NOT_HELD(&p->p_lock));

	thread_lock(t);		/* protect against ts_tick and ts_update */
	t->t_schedctl = NULL;
	t->t_sc_uaddr = 0;
	thread_unlock(t);

	/*
	 * Remove the context op to avoid the final call to
	 * schedctl_save when switching away from this lwp.
	 */
	(void) removectx(t, ssp, schedctl_save, schedctl_restore,
	    schedctl_fork, NULL, NULL, NULL);

	/*
	 * Do not unmap the shared page until the process exits.
	 * User-level library code relies on this for adaptive mutex locking.
	 */
	mutex_enter(&p->p_sc_lock);
	ssp->sc_state = SC_FREE;
	pagep = schedctl_page_lookup(ssp);
	index = (index_t)(ssp - pagep->spc_base);
	BT_CLEAR(pagep->spc_map, index);
	pagep->spc_space += sizeof (sc_shared_t);
	mutex_exit(&p->p_sc_lock);
}
Example #27
0
/* atende os clientes */
void *unix_loop (void *arg)
{
	fd_set fds;
	int fd, cfd, len;
	struct timeval tv;
	struct sockaddr_un s;

	fd = server_fd;

	for (;;) {
		FD_ZERO (&fds);
		FD_SET (fd, &fds);
		tv.tv_sec = 1; tv.tv_usec = 0;

		if (select (fd + 1, &fds, NULL, NULL, &tv)) {
			thread_lock ();

			len = sizeof (s);
			memset (&s, 0, sizeof (s));
			if ((cfd = accept (fd, (struct sockaddr *) &s, &len)) < 0)
				perror ("accept"), exit (1);

			/* trata os comandos */
			unix_client (cfd);

			shutdown (cfd, 2);
			close (cfd);

			thread_unlock ();
		}
	}

	return NULL;
}
Example #28
0
/*
 * System call interface to scheduler activations.
 * This always operates on the current lwp.
 */
caddr_t
schedctl(void)
{
	kthread_t	*t = curthread;
	sc_shared_t	*ssp;
	uintptr_t	uaddr;
	int		error;

	if (t->t_schedctl == NULL) {
		/*
		 * Allocate and initialize the shared structure.
		 */
		if ((error = schedctl_shared_alloc(&ssp, &uaddr)) != 0)
			return ((caddr_t)(uintptr_t)set_errno(error));
		bzero(ssp, sizeof (*ssp));

		installctx(t, ssp, schedctl_save, schedctl_restore,
		    schedctl_fork, NULL, NULL, NULL);

		thread_lock(t);	/* protect against ts_tick and ts_update */
		t->t_schedctl = ssp;
		t->t_sc_uaddr = uaddr;
		ssp->sc_cid = t->t_cid;
		ssp->sc_cpri = t->t_cpri;
		ssp->sc_priority = DISP_PRIO(t);
		thread_unlock(t);
	}

	return ((caddr_t)t->t_sc_uaddr);
}
Example #29
0
/*
 *	thread_stack_daemon:
 *
 *	Perform stack allocation as required due to
 *	invoke failures.
 */
static void
thread_stack_daemon(void)
{
	thread_t		thread;

	simple_lock(&thread_stack_lock);

	while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
		simple_unlock(&thread_stack_lock);

		stack_alloc(thread);
		
		(void)splsched();
		thread_lock(thread);
		thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
		thread_unlock(thread);
		(void)spllo();

		simple_lock(&thread_stack_lock);
	}

	assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
	simple_unlock(&thread_stack_lock);

	thread_block((thread_continue_t)thread_stack_daemon);
	/*NOTREACHED*/
}
DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType)
{
    int iPriority;
    disp_lock_t **ppDispLock;
    switch (enmType)
    {
        case RTTHREADTYPE_INFREQUENT_POLLER:    iPriority = 60;             break;
        case RTTHREADTYPE_EMULATION:            iPriority = 66;             break;
        case RTTHREADTYPE_DEFAULT:              iPriority = 72;             break;
        case RTTHREADTYPE_MSG_PUMP:             iPriority = 78;             break;
        case RTTHREADTYPE_IO:                   iPriority = 84;             break;
        case RTTHREADTYPE_TIMER:                iPriority = 99;             break;
        default:
            AssertMsgFailed(("enmType=%d\n", enmType));
            return VERR_INVALID_PARAMETER;
    }

    Assert(curthread);
    thread_lock(curthread);
    thread_change_pri(curthread, iPriority, 0);

    /*
     * thread_unlock() is a macro calling disp_lock_exit() with the thread's dispatcher lock.
     * We need to dereference the offset manually here (for S10, S11 compatibility) rather than
     * using the macro.
     */
    ppDispLock = SOL_THREAD_LOCKP_PTR;
    disp_lock_exit(*ppDispLock);

    return VINF_SUCCESS;
}