Пример #1
0
static void
poll_idle(void)
{
	struct thread *td = curthread;
	struct rtprio rtp;

	rtp.prio = RTP_PRIO_MAX;	/* lowest priority */
	rtp.type = RTP_PRIO_IDLE;
	PROC_SLOCK(td->td_proc);
	rtp_to_pri(&rtp, td);
	PROC_SUNLOCK(td->td_proc);

	for (;;) {
		if (poll_in_idle_loop && poll_handlers > 0) {
			idlepoll_sleeping = 0;
			ether_poll(poll_each_burst);
			thread_lock(td);
			mi_switch(SW_VOL, NULL);
			thread_unlock(td);
		} else {
			idlepoll_sleeping = 1;
			tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3);
		}
	}
}
Пример #2
0
static void
maybe_preempt_in_ksegrp(struct thread *td)
#if  !defined(SMP)
{
	struct thread *running_thread;

#ifndef FULL_PREEMPTION
	int pri;
	pri = td->td_priority;
	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD))
		return;
#endif
	mtx_assert(&sched_lock, MA_OWNED);
	running_thread = curthread;

	if (running_thread->td_ksegrp != td->td_ksegrp)
		return;

	if (td->td_priority > running_thread->td_priority)
		return;
#ifdef PREEMPTION
	if (running_thread->td_critnest > 1) 
		running_thread->td_pflags |= TDP_OWEPREEMPT;
	 else 		
		 mi_switch(SW_INVOL, NULL);
	
#else
	running_thread->td_flags |= TDF_NEEDRESCHED;
#endif
	return;
}
Пример #3
0
/*
 * Yield the cpu to another process, and go to sleep, on "sleep
 * address" ADDR. Subsequent calls to thread_wakeup with the same
 * value of ADDR will make the thread runnable again. The address is
 * not interpreted. Typically it's the address of a synchronization
 * primitive or data structure.
 *
 * Note that (1) interrupts must be off (if they aren't, you can
 * end up sleeping forever), and (2) you cannot sleep in an 
 * interrupt handler.
 */
void
thread_sleep(const void *addr)
{
	// may not sleep in an interrupt handler
	assert(in_interrupt==0);
	
	curthread->t_sleepaddr = addr;
	mi_switch(S_SLEEP);
	curthread->t_sleepaddr = NULL;
}
Пример #4
0
/*
 * Yield the cpu to another process, but stay runnable.
 */
void
thread_yield(void)
{
	int spl = splhigh();

	/* Check sleepers just in case we get here after shutdown */
	assert(sleepers != NULL);

	mi_switch(S_READY);
	splx(spl);
}
Пример #5
0
/*
 * Cause the current thread to exit.
 *
 * We clean up the parts of the thread structure we don't actually
 * need to run right away. The rest has to wait until thread_destroy
 * gets called from exorcise().
 */
void
thread_exit(int exitcode)
{
	DEBUG(DB_THREADS,"Thread %s exiting with %d",
               curthread->t_name, exitcode);

	/* ASST1: Let the pid management system know this thread is done. */
	pid_setexited(curthread->t_pid, exitcode);

	if (curthread->t_stack != NULL) {
		/*
		 * Check the magic number we put on the bottom end of
		 * the stack in thread_fork. If these assertions go
		 * off, it most likely means you overflowed your stack
		 * at some point, which can cause all kinds of
		 * mysterious other things to happen.
		 */
		assert(curthread->t_stack[0] == (char)0xae);
		assert(curthread->t_stack[1] == (char)0x11);
		assert(curthread->t_stack[2] == (char)0xda);
		assert(curthread->t_stack[3] == (char)0x33);
	}

	splhigh();

	if (curthread->t_vmspace) {
		/*
		 * Do this carefully to avoid race condition with
		 * context switch code.
		 */
		struct addrspace *as = curthread->t_vmspace;
		curthread->t_vmspace = NULL;
		as_destroy(as);
	}

	if (curthread->t_cwd) {
		VOP_DECREF(curthread->t_cwd);
		curthread->t_cwd = NULL;
	}

	/* A3 SETUP */
	if (curthread->t_filetable) {
		filetable_destroy(curthread->t_filetable);
    /* set it back to NULL so it can be initialized again */
    curthread->t_filetable = NULL;
	}
	/* END A3 SETUP */

	assert(numthreads>0);
	numthreads--;
	mi_switch(S_ZOMB);

	panic("Thread came back from the dead!\n");
}
Пример #6
0
/*
 * Cause the current thread to exit.
 *
 * We clean up the parts of the thread structure we don't actually
 * need to run right away. The rest has to wait until thread_destroy
 * gets called from exorcise().
 */
void
thread_exit(void)
{
	if (curthread->t_stack != NULL) {
		/*
		 * Check the magic number we put on the bottom end of
		 * the stack in thread_fork. If these assertions go
		 * off, it most likely means you overflowed your stack
		 * at some point, which can cause all kinds of
		 * mysterious other things to happen.
		 */
		assert(curthread->t_stack[0] == (char)0xae);
		assert(curthread->t_stack[1] == (char)0x11);
		assert(curthread->t_stack[2] == (char)0xda);
		assert(curthread->t_stack[3] == (char)0x33);
	}
        #if OPT_A2
        vfs_close(curthread->ft[0]->file);
        vfs_close(curthread->ft[1]->file);
        vfs_close(curthread->ft[2]->file);
        #endif
	splhigh();

	if (curthread->t_vmspace) {
		/*
		 * Do this carefully to avoid race condition with
		 * context switch code.
		 */
		struct addrspace *as = curthread->t_vmspace;
		curthread->t_vmspace = NULL;
		as_destroy(as);
	}

	if (curthread->t_cwd) {
		VOP_DECREF(curthread->t_cwd);
		curthread->t_cwd = NULL;
	}

	assert(numthreads>0);

	numthreads--;
       /* #if OPT_A2
        struct process* current = p_table[curthread->pid];
        if (current->ppid != -1){
           struct process* parent = p_table[current->ppid];
           if (parent->waiting) cv_broadcast(parent->exit,parent->exitlock);
        }         
        #endif
        */
	mi_switch(S_ZOMB);

	panic("Thread came back from the dead!\n");
}
Пример #7
0
void
uio_yield(void)
{
	struct thread *td;

	td = curthread;
	DROP_GIANT();
#ifndef __rtems__
	thread_lock(td);
	sched_prio(td, td->td_user_pri);
	mi_switch(SW_INVOL | SWT_RELINQUISH, NULL);
	thread_unlock(td);
#else /* __rtems__ */
	rtems_task_wake_after(RTEMS_YIELD_PROCESSOR);
#endif /* __rtems__ */
	PICKUP_GIANT();
}
/*
 * Cause the current thread to exit.
 *
 * We clean up the parts of the thread structure we don't actually
 * need to run right away. The rest has to wait until thread_destroy
 * gets called from exorcise().
 */
void
thread_exit(void)
{
	if (curthread->t_stack != NULL) {
		/*
		 * Check the magic number we put on the bottom end of
		 * the stack in thread_fork. If these assertions go
		 * off, it most likely means you overflowed your stack
		 * at some point, which can cause all kinds of
		 * mysterious other things to happen.
		 */
		assert(curthread->t_stack[0] == (char)0xae);
		assert(curthread->t_stack[1] == (char)0x11);
		assert(curthread->t_stack[2] == (char)0xda);
		assert(curthread->t_stack[3] == (char)0x33);
	}

	splhigh();

	if (curthread->t_vmspace) {
		/*
		 * Do this carefully to avoid race condition with
		 * context switch code.
		 */
		struct addrspace *as = curthread->t_vmspace;
		curthread->t_vmspace = NULL;
		as_destroy(as);
	}

	if (curthread->t_cwd) {
		VOP_DECREF(curthread->t_cwd);
		curthread->t_cwd = NULL;
	}

	if(curthread->ft) {
		delete_process_filetable(curthread->ft);
		curthread->ft = NULL;
	}

	assert(numthreads>0);
	numthreads--;
	mi_switch(S_ZOMB);

	panic("Thread came back from the dead!\n");
}
Пример #9
0
/*
 * Define the code needed before returning to user mode, for
 * trap, mem_access_fault, and syscall.
 */
static inline void
userret(struct proc *p, int pc, u_quad_t oticks)
{
	int sig;

	/* take pending signals */
	while ((sig = CURSIG(p)) != 0)
		postsig(sig);
	p->p_priority = p->p_usrpri;
	if (want_ast) {
		want_ast = 0;
		if (p->p_flag & P_OWEUPC) {
			p->p_flag &= ~P_OWEUPC;
			ADDUPROF(p);
		}
	}
	if (want_resched) {
		/*
		 * Since we are curproc, clock will normally just change
		 * our priority without moving us from one queue to another
		 * (since the running process is not on a queue.)
		 * If that happened after we put ourselves on the run queue
		 * but before we switched, we might not be on the queue
		 * indicated by our priority.
		 */
		(void) splstatclock();
		setrunqueue(p);
		p->p_stats->p_ru.ru_nivcsw++;
		mi_switch();
		(void) spl0();
		while ((sig = CURSIG(p)) != 0)
			postsig(sig);
	}

	/*
	 * If profiling, charge recent system time to the trapped pc.
	 */
	if (p->p_flag & P_PROFIL)
		addupc_task(p, pc, (int)(p->p_sticks - oticks));

	curpriority = p->p_priority;
}
Пример #10
0
static void
vm_pagezero(void __unused *arg)
{
	struct rtprio rtp;
	struct thread *td;
	int pages, pri;

	td = curthread;
	rtp.prio = RTP_PRIO_MAX;
	rtp.type = RTP_PRIO_IDLE;
	pages = 0;
	mtx_lock_spin(&sched_lock);
	rtp_to_pri(&rtp, td->td_ksegrp);
	pri = td->td_priority;
	mtx_unlock_spin(&sched_lock);
	idlezero_enable = idlezero_enable_default;

	for (;;) {
		if (vm_page_zero_check()) {
			pages += vm_page_zero_idle();
#ifndef PREEMPTION
			if (pages > idlezero_maxrun || sched_runnable()) {
				mtx_lock_spin(&sched_lock);
				mi_switch(SW_VOL, NULL);
				mtx_unlock_spin(&sched_lock);
				pages = 0;
			}
#endif
		} else {
			vm_page_lock_queues();
			wakeup_needed = TRUE;
			msleep(&zero_state, &vm_page_queue_mtx, PDROP | pri,
			    "pgzero", hz * 300);
			pages = 0;
		}
	}
}
Пример #11
0
static void
vm_pagezero(void)
{
	struct thread *td;
	struct proc *p;
	struct rtprio rtp;
	int pages = 0;
	int pri;

	td = curthread;
	p = td->td_proc;
	rtp.prio = RTP_PRIO_MAX;
	rtp.type = RTP_PRIO_IDLE;
	mtx_lock_spin(&sched_lock);
	rtp_to_pri(&rtp, td->td_ksegrp);
	pri = td->td_priority;
	mtx_unlock_spin(&sched_lock);
	PROC_LOCK(p);
	p->p_flag |= P_NOLOAD;
	PROC_UNLOCK(p);

	for (;;) {
		if (vm_page_zero_check()) {
			pages += vm_page_zero_idle();
			if (pages > idlezero_maxrun || sched_runnable()) {
				mtx_lock_spin(&sched_lock);
				td->td_proc->p_stats->p_ru.ru_nvcsw++;
				mi_switch();
				mtx_unlock_spin(&sched_lock);
				pages = 0;
			}
		} else {
			tsleep(&zero_state, pri, "pgzero", hz * 300);
			pages = 0;
		}
	}
}
Пример #12
0
static void
vm_pagezero(void __unused *arg)
{

	idlezero_enable = idlezero_enable_default;

	mtx_lock(&vm_page_queue_free_mtx);
	for (;;) {
		if (vm_page_zero_check()) {
			vm_page_zero_idle();
#ifndef PREEMPTION
			if (sched_runnable()) {
				thread_lock(curthread);
				mi_switch(SW_VOL | SWT_IDLE, NULL);
				thread_unlock(curthread);
			}
#endif
		} else {
			wakeup_needed = TRUE;
			msleep(&zero_state, &vm_page_queue_free_mtx, 0,
			    "pgzero", hz * 300);
		}
	}
}
Пример #13
0
/*
 * Process an asynchronous software trap.
 * This is relatively easy.
 * This function will return with preemption disabled.
 */
void
ast(struct trapframe *framep)
{
	struct thread *td;
	struct proc *p;
	int flags;
	int sig;

	td = curthread;
	p = td->td_proc;

	CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid,
            p->p_comm);
	KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
	WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode");
	mtx_assert(&Giant, MA_NOTOWNED);
	THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
	td->td_frame = framep;
	td->td_pticks = 0;

	/*
	 * This updates the td_flag's for the checks below in one
	 * "atomic" operation with turning off the astpending flag.
	 * If another AST is triggered while we are handling the
	 * AST's saved in flags, the astpending flag will be set and
	 * ast() will be called again.
	 */
	thread_lock(td);
	flags = td->td_flags;
	td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK |
	    TDF_NEEDRESCHED | TDF_ALRMPEND | TDF_PROFPEND | TDF_MACPEND);
	thread_unlock(td);
	PCPU_INC(cnt.v_trap);

	if (td->td_ucred != p->p_ucred) 
		cred_update_thread(td);
	if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) {
		addupc_task(td, td->td_profil_addr, td->td_profil_ticks);
		td->td_profil_ticks = 0;
		td->td_pflags &= ~TDP_OWEUPC;
	}
#ifdef HWPMC_HOOKS
	/* Handle Software PMC callchain capture. */
	if (PMC_IS_PENDING_CALLCHAIN(td))
		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_USER_CALLCHAIN_SOFT, (void *) framep);
#endif
	if (flags & TDF_ALRMPEND) {
		PROC_LOCK(p);
		kern_psignal(p, SIGVTALRM);
		PROC_UNLOCK(p);
	}
	if (flags & TDF_PROFPEND) {
		PROC_LOCK(p);
		kern_psignal(p, SIGPROF);
		PROC_UNLOCK(p);
	}
#ifdef MAC
	if (flags & TDF_MACPEND)
		mac_thread_userret(td);
#endif
	if (flags & TDF_NEEDRESCHED) {
#ifdef KTRACE
		if (KTRPOINT(td, KTR_CSW))
			ktrcsw(1, 1, __func__);
#endif
		thread_lock(td);
		sched_prio(td, td->td_user_pri);
		mi_switch(SW_INVOL | SWT_NEEDRESCHED, NULL);
		thread_unlock(td);
#ifdef KTRACE
		if (KTRPOINT(td, KTR_CSW))
			ktrcsw(0, 1, __func__);
#endif
	}

	/*
	 * Check for signals. Unlocked reads of p_pendingcnt or
	 * p_siglist might cause process-directed signal to be handled
	 * later.
	 */
	if (flags & TDF_NEEDSIGCHK || p->p_pendingcnt > 0 ||
	    !SIGISEMPTY(p->p_siglist)) {
		PROC_LOCK(p);
		mtx_lock(&p->p_sigacts->ps_mtx);
		while ((sig = cursig(td)) != 0)
			postsig(sig);
		mtx_unlock(&p->p_sigacts->ps_mtx);
		PROC_UNLOCK(p);
	}
	/*
	 * We need to check to see if we have to exit or wait due to a
	 * single threading requirement or some other STOP condition.
	 */
	if (flags & TDF_NEEDSUSPCHK) {
		PROC_LOCK(p);
		thread_suspend_check(0);
		PROC_UNLOCK(p);
	}

	if (td->td_pflags & TDP_OLDMASK) {
		td->td_pflags &= ~TDP_OLDMASK;
		kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0);
	}

	userret(td, framep);
}
Пример #14
0
/*
 * Shutdown the system cleanly to prepare for reboot, halt, or power off.
 */
void
kern_reboot(int howto)
{
	static int first_buf_printf = 1;

#if defined(SMP)
	/*
	 * Bind us to CPU 0 so that all shutdown code runs there.  Some
	 * systems don't shutdown properly (i.e., ACPI power off) if we
	 * run on another processor.
	 */
	if (!SCHEDULER_STOPPED()) {
		thread_lock(curthread);
		sched_bind(curthread, 0);
		thread_unlock(curthread);
		KASSERT(PCPU_GET(cpuid) == 0, ("boot: not running on cpu 0"));
	}
#endif
	/* We're in the process of rebooting. */
	rebooting = 1;

	/* collect extra flags that shutdown_nice might have set */
	howto |= shutdown_howto;

	/* We are out of the debugger now. */
	kdb_active = 0;

	/*
	 * Do any callouts that should be done BEFORE syncing the filesystems.
	 */
	EVENTHANDLER_INVOKE(shutdown_pre_sync, howto);

	/* 
	 * Now sync filesystems
	 */
	if (!cold && (howto & RB_NOSYNC) == 0 && waittime < 0) {
		register struct buf *bp;
		int iter, nbusy, pbusy;
#ifndef PREEMPTION
		int subiter;
#endif

		waittime = 0;

		wdog_kern_pat(WD_LASTVAL);
		sys_sync(curthread, NULL);

		/*
		 * With soft updates, some buffers that are
		 * written will be remarked as dirty until other
		 * buffers are written.
		 */
		for (iter = pbusy = 0; iter < 20; iter++) {
			nbusy = 0;
			for (bp = &buf[nbuf]; --bp >= buf; )
				if (isbufbusy(bp))
					nbusy++;
			if (nbusy == 0) {
				if (first_buf_printf)
					printf("All buffers synced.");
				break;
			}
			if (first_buf_printf) {
				printf("Syncing disks, buffers remaining... ");
				first_buf_printf = 0;
			}
			printf("%d ", nbusy);
			if (nbusy < pbusy)
				iter = 0;
			pbusy = nbusy;

			wdog_kern_pat(WD_LASTVAL);
			sys_sync(curthread, NULL);

#ifdef PREEMPTION
			/*
			 * Drop Giant and spin for a while to allow
			 * interrupt threads to run.
			 */
			DROP_GIANT();
			DELAY(50000 * iter);
			PICKUP_GIANT();
#else
			/*
			 * Drop Giant and context switch several times to
			 * allow interrupt threads to run.
			 */
			DROP_GIANT();
			for (subiter = 0; subiter < 50 * iter; subiter++) {
				thread_lock(curthread);
				mi_switch(SW_VOL, NULL);
				thread_unlock(curthread);
				DELAY(1000);
			}
			PICKUP_GIANT();
#endif
		}
		printf("\n");
		/*
		 * Count only busy local buffers to prevent forcing 
		 * a fsck if we're just a client of a wedged NFS server
		 */
		nbusy = 0;
		for (bp = &buf[nbuf]; --bp >= buf; ) {
			if (isbufbusy(bp)) {
#if 0
/* XXX: This is bogus.  We should probably have a BO_REMOTE flag instead */
				if (bp->b_dev == NULL) {
					TAILQ_REMOVE(&mountlist,
					    bp->b_vp->v_mount, mnt_list);
					continue;
				}
#endif
				nbusy++;
				if (show_busybufs > 0) {
					printf(
	    "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
					    nbusy, bp, bp->b_vp, bp->b_flags,
					    (intmax_t)bp->b_blkno,
					    (intmax_t)bp->b_lblkno);
					BUF_LOCKPRINTINFO(bp);
					if (show_busybufs > 1)
						vn_printf(bp->b_vp,
						    "vnode content: ");
				}
			}
		}
		if (nbusy) {
			/*
			 * Failed to sync all blocks. Indicate this and don't
			 * unmount filesystems (thus forcing an fsck on reboot).
			 */
			printf("Giving up on %d buffers\n", nbusy);
			DELAY(5000000);	/* 5 seconds */
		} else {
			if (!first_buf_printf)
				printf("Final sync complete\n");
			/*
			 * Unmount filesystems
			 */
			if (panicstr == 0)
				vfs_unmountall();
		}
		swapoff_all();
		DELAY(100000);		/* wait for console output to finish */
	}

	print_uptime();

	cngrab();

	/*
	 * Ok, now do things that assume all filesystem activity has
	 * been completed.
	 */
	EVENTHANDLER_INVOKE(shutdown_post_sync, howto);

	if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold && !dumping) 
		doadump(TRUE);

	/* Now that we're going to really halt the system... */
	EVENTHANDLER_INVOKE(shutdown_final, howto);

	for(;;) ;	/* safety against shutdown_reset not working */
	/* NOTREACHED */
}
Пример #15
0
t_Handle
bman_portal_setup(struct bman_softc *bsc)
{
    struct dpaa_portals_softc *sc;
    t_BmPortalParam bpp;
    t_Handle portal;
    unsigned int cpu, p;

    /* Return NULL if we're not ready or while detach */
    if (bp_sc == NULL)
        return (NULL);

    sc = bp_sc;

    sched_pin();
    portal = NULL;
    cpu = PCPU_GET(cpuid);

    /* Check if portal is ready */
    while (atomic_cmpset_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph,
                                0, -1) == 0) {
        p = atomic_load_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph);

        /* Return if portal is already initialized */
        if (p != 0 && p != -1) {
            sched_unpin();
            return ((t_Handle)p);
        }

        /* Not inititialized and "owned" by another thread */
        thread_lock(curthread);
        mi_switch(SW_VOL, NULL);
        thread_unlock(curthread);
    }

    /* Map portal registers */
    dpaa_portal_map_registers(sc);

    /* Configure and initialize portal */
    bpp.ceBaseAddress = rman_get_bushandle(sc->sc_rres[0]);
    bpp.ciBaseAddress = rman_get_bushandle(sc->sc_rres[1]);
    bpp.h_Bm = bsc->sc_bh;
    bpp.swPortalId = cpu;
    bpp.irq = (int)sc->sc_dp[cpu].dp_ires;

    portal = BM_PORTAL_Config(&bpp);
    if (portal == NULL)
        goto err;

    if (BM_PORTAL_Init(portal) != E_OK)
        goto err;

    atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph,
                        (uint32_t)portal);

    sched_unpin();

    return (portal);

err:
    if (portal != NULL)
        BM_PORTAL_Free(portal);

    atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0);
    sched_unpin();

    return (NULL);
}
Пример #16
0
/*
 * This function is called when a thread is about to be put on run queue
 * because it has been made runnable or its priority has been adjusted.  It
 * determines if the new thread should be immediately preempted to.  If so,
 * it switches to it and eventually returns true.  If not, it returns false
 * so that the caller may place the thread on an appropriate run queue.
 */
int
maybe_preempt(struct thread *td)
{
#ifdef PREEMPTION
	struct thread *ctd;
	int cpri, pri;

	/*
	 * The new thread should not preempt the current thread if any of the
	 * following conditions are true:
	 *
	 *  - The kernel is in the throes of crashing (panicstr).
	 *  - The current thread has a higher (numerically lower) or
	 *    equivalent priority.  Note that this prevents curthread from
	 *    trying to preempt to itself.
	 *  - It is too early in the boot for context switches (cold is set).
	 *  - The current thread has an inhibitor set or is in the process of
	 *    exiting.  In this case, the current thread is about to switch
	 *    out anyways, so there's no point in preempting.  If we did,
	 *    the current thread would not be properly resumed as well, so
	 *    just avoid that whole landmine.
	 *  - If the new thread's priority is not a realtime priority and
	 *    the current thread's priority is not an idle priority and
	 *    FULL_PREEMPTION is disabled.
	 *
	 * If all of these conditions are false, but the current thread is in
	 * a nested critical section, then we have to defer the preemption
	 * until we exit the critical section.  Otherwise, switch immediately
	 * to the new thread.
	 */
	ctd = curthread;
	THREAD_LOCK_ASSERT(td, MA_OWNED);
	KASSERT((td->td_inhibitors == 0),
			("maybe_preempt: trying to run inhibited thread"));
	pri = td->td_priority;
	cpri = ctd->td_priority;
	if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
	    TD_IS_INHIBITED(ctd))
		return (0);
#ifndef FULL_PREEMPTION
	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
		return (0);
#endif

	if (ctd->td_critnest > 1) {
		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
		    ctd->td_critnest);
		ctd->td_owepreempt = 1;
		return (0);
	}
	/*
	 * Thread is runnable but not yet put on system run queue.
	 */
	MPASS(ctd->td_lock == td->td_lock);
	MPASS(TD_ON_RUNQ(td));
	TD_SET_RUNNING(td);
	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
	    td->td_proc->p_pid, td->td_name);
	mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td);
	/*
	 * td's lock pointer may have changed.  We have to return with it
	 * locked.
	 */
	spinlock_enter();
	thread_unlock(ctd);
	thread_lock(td);
	spinlock_exit();
	return (1);
#else
	return (0);
#endif
}
Пример #17
0
t_Handle
qman_portal_setup(struct qman_softc *qsc)
{
    struct dpaa_portals_softc *sc;
    t_QmPortalParam qpp;
    unsigned int cpu, p;
    t_Handle portal;

    /* Return NULL if we're not ready or while detach */
    if (qp_sc == NULL)
        return (NULL);

    sc = qp_sc;

    sched_pin();
    portal = NULL;
    cpu = PCPU_GET(cpuid);

    /* Check if portal is ready */
    while (atomic_cmpset_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph,
                                0, -1) == 0) {
        p = atomic_load_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph);

        /* Return if portal is already initialized */
        if (p != 0 && p != -1) {
            sched_unpin();
            return ((t_Handle)p);
        }

        /* Not inititialized and "owned" by another thread */
        thread_lock(curthread);
        mi_switch(SW_VOL, NULL);
        thread_unlock(curthread);
    }

    /* Map portal registers */
    dpaa_portal_map_registers(sc);

    /* Configure and initialize portal */
    qpp.ceBaseAddress = rman_get_bushandle(sc->sc_rres[0]);
    qpp.ciBaseAddress = rman_get_bushandle(sc->sc_rres[1]);
    qpp.h_Qm = qsc->sc_qh;
    qpp.swPortalId = cpu;
    qpp.irq = (int)sc->sc_dp[cpu].dp_ires;
    qpp.fdLiodnOffset = 0;
    qpp.f_DfltFrame = qman_received_frame_callback;
    qpp.f_RejectedFrame = qman_rejected_frame_callback;
    qpp.h_App = qsc;

    portal = QM_PORTAL_Config(&qpp);
    if (portal == NULL)
        goto err;

    if (QM_PORTAL_Init(portal) != E_OK)
        goto err;

    if (QM_PORTAL_AddPoolChannel(portal, QMAN_COMMON_POOL_CHANNEL) != E_OK)
        goto err;

    atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph,
                        (uint32_t)portal);
    sched_unpin();

    return (portal);

err:
    if (portal != NULL)
        QM_PORTAL_Free(portal);

    atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0);
    sched_unpin();

    return (NULL);
}