Exemplo n.º 1
0
static void
poll_idle(void)
{
	struct thread *td = curthread;
	struct rtprio rtp;

	rtp.prio = RTP_PRIO_MAX;	/* lowest priority */
	rtp.type = RTP_PRIO_IDLE;
	PROC_SLOCK(td->td_proc);
	rtp_to_pri(&rtp, td);
	PROC_SUNLOCK(td->td_proc);

	for (;;) {
		if (poll_in_idle_loop && poll_handlers > 0) {
			idlepoll_sleeping = 0;
			ether_poll(poll_each_burst);
			thread_lock(td);
			mi_switch(SW_VOL, NULL);
			thread_unlock(td);
		} else {
			idlepoll_sleeping = 1;
			tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3);
		}
	}
}
Exemplo n.º 2
0
/*
 * XXX The priority and scheduler modifications should
 *     be moved into published interfaces in kern/kern_sync.
 *
 * The permissions to modify process p were checked in "p31b_proc()".
 *
 */
int
ksched_setscheduler(struct ksched *ksched,
    struct thread *td, int policy, const struct sched_param *param)
{
	int e = 0;
	struct rtprio rtp;

	switch(policy)
	{
		case SCHED_RR:
		case SCHED_FIFO:

		if (param->sched_priority >= P1B_PRIO_MIN &&
		    param->sched_priority <= P1B_PRIO_MAX)
		{
			rtp.prio = p4prio_to_rtpprio(param->sched_priority);
			rtp.type = (policy == SCHED_FIFO)
				? RTP_PRIO_FIFO : RTP_PRIO_REALTIME;

			rtp_to_pri(&rtp, td);
		}
		else
			e = EPERM;


		break;

		case SCHED_OTHER:
		if (param->sched_priority >= 0 &&
			param->sched_priority <= (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE)) {
			rtp.type = RTP_PRIO_NORMAL;
			rtp.prio = p4prio_to_tsprio(param->sched_priority);
			rtp_to_pri(&rtp, td);
		} else
			e = EINVAL;

		break;
		
		default:
			e = EINVAL;
			break;
	}

	return e;
}
Exemplo n.º 3
0
static void
vm_pagezero(void __unused *arg)
{
	struct rtprio rtp;
	struct thread *td;
	int pages, pri;

	td = curthread;
	rtp.prio = RTP_PRIO_MAX;
	rtp.type = RTP_PRIO_IDLE;
	pages = 0;
	mtx_lock_spin(&sched_lock);
	rtp_to_pri(&rtp, td->td_ksegrp);
	pri = td->td_priority;
	mtx_unlock_spin(&sched_lock);
	idlezero_enable = idlezero_enable_default;

	for (;;) {
		if (vm_page_zero_check()) {
			pages += vm_page_zero_idle();
#ifndef PREEMPTION
			if (pages > idlezero_maxrun || sched_runnable()) {
				mtx_lock_spin(&sched_lock);
				mi_switch(SW_VOL, NULL);
				mtx_unlock_spin(&sched_lock);
				pages = 0;
			}
#endif
		} else {
			vm_page_lock_queues();
			wakeup_needed = TRUE;
			msleep(&zero_state, &vm_page_queue_mtx, PDROP | pri,
			    "pgzero", hz * 300);
			pages = 0;
		}
	}
}
Exemplo n.º 4
0
static void
vm_pagezero(void)
{
	struct thread *td;
	struct proc *p;
	struct rtprio rtp;
	int pages = 0;
	int pri;

	td = curthread;
	p = td->td_proc;
	rtp.prio = RTP_PRIO_MAX;
	rtp.type = RTP_PRIO_IDLE;
	mtx_lock_spin(&sched_lock);
	rtp_to_pri(&rtp, td->td_ksegrp);
	pri = td->td_priority;
	mtx_unlock_spin(&sched_lock);
	PROC_LOCK(p);
	p->p_flag |= P_NOLOAD;
	PROC_UNLOCK(p);

	for (;;) {
		if (vm_page_zero_check()) {
			pages += vm_page_zero_idle();
			if (pages > idlezero_maxrun || sched_runnable()) {
				mtx_lock_spin(&sched_lock);
				td->td_proc->p_stats->p_ru.ru_nvcsw++;
				mi_switch();
				mtx_unlock_spin(&sched_lock);
				pages = 0;
			}
		} else {
			tsleep(&zero_state, pri, "pgzero", hz * 300);
			pages = 0;
		}
	}
}
Exemplo n.º 5
0
static int
create_thread(struct thread *td, mcontext_t *ctx,
	    void (*start_func)(void *), void *arg,
	    char *stack_base, size_t stack_size,
	    char *tls_base,
	    long *child_tid, long *parent_tid,
	    int flags, struct rtprio *rtp)
{
	stack_t stack;
	struct thread *newtd;
	struct proc *p;
	int error;

	p = td->td_proc;

	/* Have race condition but it is cheap. */
	if (p->p_numthreads >= max_threads_per_proc) {
		++max_threads_hits;
		return (EPROCLIM);
	}

	if (rtp != NULL) {
		switch(rtp->type) {
		case RTP_PRIO_REALTIME:
		case RTP_PRIO_FIFO:
			/* Only root can set scheduler policy */
			if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0)
				return (EPERM);
			if (rtp->prio > RTP_PRIO_MAX)
				return (EINVAL);
			break;
		case RTP_PRIO_NORMAL:
			rtp->prio = 0;
			break;
		default:
			return (EINVAL);
		}
	}

#ifdef RACCT
	PROC_LOCK(td->td_proc);
	error = racct_add(p, RACCT_NTHR, 1);
	PROC_UNLOCK(td->td_proc);
	if (error != 0)
		return (EPROCLIM);
#endif

	/* Initialize our td */
	newtd = thread_alloc(0);
	if (newtd == NULL) {
		error = ENOMEM;
		goto fail;
	}

	cpu_set_upcall(newtd, td);

	/*
	 * Try the copyout as soon as we allocate the td so we don't
	 * have to tear things down in a failure case below.
	 * Here we copy out tid to two places, one for child and one
	 * for parent, because pthread can create a detached thread,
	 * if parent wants to safely access child tid, it has to provide 
	 * its storage, because child thread may exit quickly and
	 * memory is freed before parent thread can access it.
	 */
	if ((child_tid != NULL &&
	    suword_lwpid(child_tid, newtd->td_tid)) ||
	    (parent_tid != NULL &&
	    suword_lwpid(parent_tid, newtd->td_tid))) {
		thread_free(newtd);
		error = EFAULT;
		goto fail;
	}

	bzero(&newtd->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));
	bcopy(&td->td_startcopy, &newtd->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));
	newtd->td_proc = td->td_proc;
	newtd->td_ucred = crhold(td->td_ucred);

	if (ctx != NULL) { /* old way to set user context */
		error = set_mcontext(newtd, ctx);
		if (error != 0) {
			thread_free(newtd);
			crfree(td->td_ucred);
			goto fail;
		}
	} else {
		/* Set up our machine context. */
		stack.ss_sp = stack_base;
		stack.ss_size = stack_size;
		/* Set upcall address to user thread entry function. */
		cpu_set_upcall_kse(newtd, start_func, arg, &stack);
		/* Setup user TLS address and TLS pointer register. */
		error = cpu_set_user_tls(newtd, tls_base);
		if (error != 0) {
			thread_free(newtd);
			crfree(td->td_ucred);
			goto fail;
		}
	}

	PROC_LOCK(td->td_proc);
	td->td_proc->p_flag |= P_HADTHREADS;
	thread_link(newtd, p); 
	bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name));
	thread_lock(td);
	/* let the scheduler know about these things. */
	sched_fork_thread(td, newtd);
	thread_unlock(td);
	if (P_SHOULDSTOP(p))
		newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
	PROC_UNLOCK(p);

	tidhash_add(newtd);

	thread_lock(newtd);
	if (rtp != NULL) {
		if (!(td->td_pri_class == PRI_TIMESHARE &&
		      rtp->type == RTP_PRIO_NORMAL)) {
			rtp_to_pri(rtp, newtd);
			sched_prio(newtd, newtd->td_user_pri);
		} /* ignore timesharing class */
	}
	TD_SET_CAN_RUN(newtd);
	sched_add(newtd, SRQ_BORING);
	thread_unlock(newtd);

	return (0);

fail:
#ifdef RACCT
	PROC_LOCK(p);
	racct_sub(p, RACCT_NTHR, 1);
	PROC_UNLOCK(p);
#endif
	return (error);
}
Exemplo n.º 6
0
int
thread_create(struct thread *td, struct rtprio *rtp,
    int (*initialize_thread)(struct thread *, void *), void *thunk)
{
	struct thread *newtd;
	struct proc *p;
	int error;

	p = td->td_proc;

	if (rtp != NULL) {
		switch(rtp->type) {
		case RTP_PRIO_REALTIME:
		case RTP_PRIO_FIFO:
			/* Only root can set scheduler policy */
			if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0)
				return (EPERM);
			if (rtp->prio > RTP_PRIO_MAX)
				return (EINVAL);
			break;
		case RTP_PRIO_NORMAL:
			rtp->prio = 0;
			break;
		default:
			return (EINVAL);
		}
	}

#ifdef RACCT
	if (racct_enable) {
		PROC_LOCK(p);
		error = racct_add(p, RACCT_NTHR, 1);
		PROC_UNLOCK(p);
		if (error != 0)
			return (EPROCLIM);
	}
#endif

	/* Initialize our td */
	error = kern_thr_alloc(p, 0, &newtd);
	if (error)
		goto fail;

	cpu_set_upcall(newtd, td);

	bzero(&newtd->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));
	bcopy(&td->td_startcopy, &newtd->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));
	newtd->td_proc = td->td_proc;
	thread_cow_get(newtd, td);

	error = initialize_thread(newtd, thunk);
	if (error != 0) {
		thread_cow_free(newtd);
		thread_free(newtd);
		goto fail;
	}

	PROC_LOCK(p);
	p->p_flag |= P_HADTHREADS;
	thread_link(newtd, p);
	bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name));
	newtd->td_pax = p->p_pax;
	thread_lock(td);
	/* let the scheduler know about these things. */
	sched_fork_thread(td, newtd);
	thread_unlock(td);
	if (P_SHOULDSTOP(p))
		newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
	if (p->p_flag2 & P2_LWP_EVENTS)
		newtd->td_dbgflags |= TDB_BORN;

	/*
	 * Copy the existing thread VM policy into the new thread.
	 */
	vm_domain_policy_localcopy(&newtd->td_vm_dom_policy,
	    &td->td_vm_dom_policy);

	PROC_UNLOCK(p);

	tidhash_add(newtd);

	thread_lock(newtd);
	if (rtp != NULL) {
		if (!(td->td_pri_class == PRI_TIMESHARE &&
		      rtp->type == RTP_PRIO_NORMAL)) {
			rtp_to_pri(rtp, newtd);
			sched_prio(newtd, newtd->td_user_pri);
		} /* ignore timesharing class */
	}
	TD_SET_CAN_RUN(newtd);
	sched_add(newtd, SRQ_BORING);
	thread_unlock(newtd);

	return (0);

fail:
#ifdef RACCT
	if (racct_enable) {
		PROC_LOCK(p);
		racct_sub(p, RACCT_NTHR, 1);
		PROC_UNLOCK(p);
	}
#endif
	return (error);
}
Exemplo n.º 7
0
int
thread_create(struct thread *td, struct rtprio *rtp,
    int (*initialize_thread)(struct thread *, void *), void *thunk)
{
	struct thread *newtd;
	struct proc *p;
	int error;

	p = td->td_proc;

	if (rtp != NULL) {
		switch(rtp->type) {
		case RTP_PRIO_REALTIME:
		case RTP_PRIO_FIFO:
			/* Only root can set scheduler policy */
			if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0)
				return (EPERM);
			if (rtp->prio > RTP_PRIO_MAX)
				return (EINVAL);
			break;
		case RTP_PRIO_NORMAL:
			rtp->prio = 0;
			break;
		default:
			return (EINVAL);
		}
	}

#ifdef RACCT
	if (racct_enable) {
		PROC_LOCK(p);
		error = racct_add(p, RACCT_NTHR, 1);
		PROC_UNLOCK(p);
		if (error != 0)
			return (EPROCLIM);
	}
#endif

	/* Initialize our td */
	error = kern_thr_alloc(p, 0, &newtd);
	if (error)
		goto fail;

	cpu_copy_thread(newtd, td);

	bzero(&newtd->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));
	bcopy(&td->td_startcopy, &newtd->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));
	newtd->td_proc = td->td_proc;
	newtd->td_rb_list = newtd->td_rbp_list = newtd->td_rb_inact = 0;
	thread_cow_get(newtd, td);

	error = initialize_thread(newtd, thunk);
	if (error != 0) {
		thread_cow_free(newtd);
		thread_free(newtd);
		goto fail;
	}

	PROC_LOCK(p);
	p->p_flag |= P_HADTHREADS;
	thread_link(newtd, p);
	bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name));
	thread_lock(td);
	/* let the scheduler know about these things. */
	sched_fork_thread(td, newtd);
	thread_unlock(td);
	if (P_SHOULDSTOP(p))
		newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
	if (p->p_ptevents & PTRACE_LWP)
		newtd->td_dbgflags |= TDB_BORN;

	PROC_UNLOCK(p);
#ifdef	HWPMC_HOOKS
	if (PMC_PROC_IS_USING_PMCS(p))
		PMC_CALL_HOOK(newtd, PMC_FN_THR_CREATE, NULL);
	else if (PMC_SYSTEM_SAMPLING_ACTIVE())
		PMC_CALL_HOOK_UNLOCKED(newtd, PMC_FN_THR_CREATE_LOG, NULL);
#endif

	tidhash_add(newtd);

	thread_lock(newtd);
	if (rtp != NULL) {
		if (!(td->td_pri_class == PRI_TIMESHARE &&
		      rtp->type == RTP_PRIO_NORMAL)) {
			rtp_to_pri(rtp, newtd);
			sched_prio(newtd, newtd->td_user_pri);
		} /* ignore timesharing class */
	}
	TD_SET_CAN_RUN(newtd);
	sched_add(newtd, SRQ_BORING);
	thread_unlock(newtd);

	return (0);

fail:
#ifdef RACCT
	if (racct_enable) {
		PROC_LOCK(p);
		racct_sub(p, RACCT_NTHR, 1);
		PROC_UNLOCK(p);
	}
#endif
	return (error);
}