Exemple #1
0
void manager_jenkins()
{
#ifdef CONFIG_KERNEL_TESTING
    printk("<-- BEGIN_KERNEL_TESTS -->\n");
    run_registered_ktest_suites();
    printk("<-- END_KERNEL_TESTS -->\n");
#endif

    // Run userspace tests (from config specified path).
#ifdef CONFIG_USERSPACE_TESTING
    if (strlen(CONFIG_USERSPACE_TESTING_SCRIPT) != 0) {
        char exec[] = "/bin/ash";
        char *p_argv[] = {exec, CONFIG_USERSPACE_TESTING_SCRIPT, 0};

        struct file *program = do_file_open(exec, 0, 0);
        struct proc *p = proc_create(program, p_argv, NULL);
        proc_wakeup(p);
        proc_decref(p); /* let go of the reference created in proc_create() */
        kref_put(&program->f_kref);
        run_scheduler();
        // Need a way to wait for p to finish
    } else {
        printk("No user-space launcher file specified.\n");
    }
#endif
    smp_idle();
    assert(0);
}
Exemple #2
0
/* Wakeup one process waiting for this lock. */
void
lock_wakeup (lock_t *lockp)
{
    struct proc *i_proc = (struct proc *) CURRENT_PROC();

    do {
        if (i_proc->lock != lockp) {
	    i_proc = (struct proc*) i_proc->next;
	    continue;
        }

        /* Clear lock pointer in process. */
        i_proc->lock = NULL;

        /* Wakeup process. */
        proc_wakeup (i_proc);

        break;
    } while (i_proc != CURRENT_PROC());
}
Exemple #3
0
INLINE void __sig_signal(Process *proc, sigmask_t sigs, bool wakeup)
{
	cpu_flags_t flags;

	IRQ_SAVE_DISABLE(flags);

	/* Set the signals */
	proc->sig_recv |= sigs;

	/* Check if process needs to be awoken */
	if (proc->sig_recv & proc->sig_wait)
	{
		ASSERT(proc != current_process);

		proc->sig_wait = 0;
		if (wakeup)
			proc_wakeup(proc);
		else
			SCHED_ENQUEUE_HEAD(proc);
	}
	IRQ_RESTORE(flags);
}
Exemple #4
0
/* Send an event to ev_q, based on the parameters in ev_q's flag.  We don't
 * accept null ev_qs, since the caller ought to be checking before bothering to
 * make a msg and send it to the event_q.  Vcoreid is who the kernel thinks the
 * message ought to go to (for IPIs).  Appropriate for things like
 * EV_PREEMPT_PENDING, where we tell the affected vcore.  To have the message go
 * where the kernel suggests, set EVENT_VCORE_APPRO(priate). */
void send_event(struct proc *p, struct event_queue *ev_q, struct event_msg *msg,
                uint32_t vcoreid)
{
	struct proc *old_proc;
	struct event_mbox *ev_mbox = 0;
	assert(p);
	printd("[kernel] sending msg to proc %p, ev_q %p\n", p, ev_q);
	if (!ev_q) {
		warn("[kernel] Null ev_q - kernel code should check before sending!");
		return;
	}
	if (!is_user_rwaddr(ev_q, sizeof(struct event_queue))) {
		/* Ought to kill them, just warn for now */
		printk("[kernel] Illegal addr for ev_q\n");
		return;
	}
	/* This should be caught by "future technology" that can tell when the
	 * kernel PFs on the user's behalf.  For now, we catch common userspace bugs
	 * (had this happen a few times). */
	if (!PTE_ADDR(ev_q)) {
		printk("[kernel] Bad addr %p for ev_q\n", ev_q);
		return;
	}
	/* ev_q is a user pointer, so we need to make sure we're in the right
	 * address space */
	old_proc = switch_to(p);
	/* If we're an _S, just spam vcore0, and wake up if necessary. */
	if (!__proc_is_mcp(p)) {
		spam_vcore(p, 0, msg, ev_q->ev_flags);
		wrmb();	/* don't let the notif_pending write pass the state read */
		/* using the same pattern as in spam_public (which can have multiple
		 * unblock callbacks */
		if (p->state == PROC_WAITING)
			proc_wakeup(p);
		goto out;
	}
	/* Get the vcoreid that we'll message (if appropriate).  For INDIR and
	 * SPAMMING, this is the first choice of a vcore, but other vcores might get
	 * it.  Common case is !APPRO and !ROUNDROBIN.  Note we are clobbering the
	 * vcoreid parameter. */
	if (!(ev_q->ev_flags & EVENT_VCORE_APPRO))
		vcoreid = ev_q->ev_vcore;	/* use the ev_q's vcoreid */
	/* Note that RR overwrites APPRO */
	if (ev_q->ev_flags & EVENT_ROUNDROBIN) {
		/* Pick a vcore, round-robin style.  Assuming ev_vcore was the previous
		 * one used.  Note that round-robin overrides the passed-in vcoreid.
		 * Also note this may be 'wrong' if num_vcores changes. */
		vcoreid = (ev_q->ev_vcore + 1) % p->procinfo->num_vcores;
		ev_q->ev_vcore = vcoreid;
	}
	if (!vcoreid_is_safe(vcoreid)) {
		/* Ought to kill them, just warn for now */
		printk("[kernel] Vcoreid %d unsafe! (too big?)\n", vcoreid);
		goto out;
	}
	/* If we're a SPAM_PUBLIC, they just want us to spam the message.  Note we
	 * don't care about the mbox, since it'll go to VCPD public mboxes, and
	 * we'll prefer to send it to whatever vcoreid we determined at this point
	 * (via APPRO or whatever). */
	if (ev_q->ev_flags & EVENT_SPAM_PUBLIC) {
		spam_public_msg(p, msg, vcoreid, ev_q->ev_flags & EVENT_SPAM_FLAGS);
		goto out;
	}
	/* We aren't spamming and we know the default vcore, and now we need to
	 * figure out which mbox to use.  If they provided an mbox, we'll use it.
	 * If not, we'll use a VCPD mbox (public or private, depending on the
	 * flags). */
	ev_mbox = ev_q->ev_mbox;
	if (!ev_mbox)
		ev_mbox = get_vcpd_mbox(vcoreid, ev_q->ev_flags);
	/* At this point, we ought to have the right mbox to send the msg to, and
	 * which vcore to alert (IPI/INDIR) (if applicable).  The mbox could be the
	 * vcore's vcpd ev_mbox. */
	if (!ev_mbox) {
		/* This shouldn't happen any more, this is more for sanity's sake */
		warn("[kernel] ought to have an mbox by now!");
		goto out;
	}
	/* Even if we're using an mbox in procdata (VCPD), we want a user pointer */
	if (!is_user_rwaddr(ev_mbox, sizeof(struct event_mbox))) {
		/* Ought to kill them, just warn for now */
		printk("[kernel] Illegal addr for ev_mbox\n");
		goto out;
	}
	/* We used to support no msgs, but quit being lazy and send a 'msg'.  If the
	 * ev_q is a NOMSG, we won't actually memcpy or anything, it'll just be a
	 * vehicle for sending the ev_type. */
	assert(msg);
	post_ev_msg(p, ev_mbox, msg, ev_q->ev_flags);
	wmb();	/* ensure ev_msg write is before alerting the vcore */
	/* Prod/alert a vcore with an IPI or INDIR, if desired.  INDIR will also
	 * call try_notify (IPI) later */
	if (ev_q->ev_flags & EVENT_INDIR) {
		send_indir(p, ev_q, vcoreid);
	} else {
		/* they may want an IPI despite not wanting an INDIR */
		try_notify(p, vcoreid, ev_q->ev_flags);
	}
	/* Fall through */
out:
	/* Return to the old address space. */
	switch_back(p, old_proc);
}
Exemple #5
0
/* This makes sure ev_msg is sent to some vcore, preferring vcoreid.
 *
 * One of the goals of FALLBACK (and this func) is to allow processes to yield
 * cores without fear of losing messages.  Even when yielding and getting
 * preempted, if your message is spammed, it will get to some vcore.  If
 * MUST_RUN is set, it'll get to a running vcore.  Messages that you send like
 * this must be able to handle spurious reads, since more than one vcore is
 * likely to get the message and handle it.
 *
 * We try the desired vcore, using VC_CAN_RCV_MSG.  Failing that, we'll search
 * the online and then the bulk_preempted lists.  These lists serve as a way to
 * find likely messageable vcores.  spam_list_member() helps us with them,
 * failing if anything seems to go wrong.  At which point we just lock and try
 * to deal with things.  In that scenario, we most likely would need to lock
 * anyway to wake up the process (was WAITING).
 *
 * One tricky thing with sending to the bulk_preempt list is that we may want to
 * send a message about a (bulk) preemption to someone on that list.  This works
 * since a given vcore that was preempted will be removed from that list before
 * we try to send_event() (in theory, there isn't code that can send that event
 * yet).  Someone else will get the event and wake up the preempted vcore. */
static void spam_public_msg(struct proc *p, struct event_msg *ev_msg,
							uint32_t vcoreid, int ev_flags)
{
	struct vcore *vc;
	/* First, try posting to the desired vcore (so long as we don't have to send
	 * it to a vcore that will run, like we do for preempt messages). */
	if (!(ev_flags & EVENT_VCORE_MUST_RUN) &&
	   (try_spam_vcore(p, vcoreid, ev_msg, ev_flags)))
		return;
	/* If the process is WAITING, let's just jump to the fallback */
	if (p->state == PROC_WAITING)
		goto ultimate_fallback;
	/* If we're here, the desired vcore is unreachable, but the process is
	 * probably RUNNING_M (online_vs) or RUNNABLE_M (bulk preempted or recently
	 * woken up), so we'll need to find another vcore. */
	if (spam_list_member(&p->online_vcs, p, ev_msg, ev_flags))
		return;
	if (spam_list_member(&p->bulk_preempted_vcs, p, ev_msg, ev_flags))
		return;
	/* Last chance, let's check the head of the inactives.  It might be
	 * alertable (the kernel set it earlier due to an event, or it was a
	 * bulk_preempt that didn't restart), and we can avoid grabbing the
	 * proc_lock. */
	vc = TAILQ_FIRST(&p->inactive_vcs);
	if (vc) {	/* might be none in rare circumstances */
		if (try_spam_vcore(p, vcore2vcoreid(p, vc), ev_msg, ev_flags)) {
			/* Need to ensure the proc wakes up, but only if it was WAITING.
			 * One way for this to happen is if a normal vcore was preempted
			 * right as another vcore was yielding, and the preempted
			 * message was sent after the last vcore yielded (which caused
			 * us to be WAITING */
			if (p->state == PROC_WAITING)
				proc_wakeup(p);	/* internally, this double-checks WAITING */
			return;
		}
	}
ultimate_fallback:
	/* At this point, we can't find one.  This could be due to a (hopefully
	 * rare) weird yield/request storm, or more commonly because the lists were
	 * empty and the process is simply WAITING (yielded all of its vcores and is
	 * waiting on an event).  Time for the ultimate fallback: locking.  Note
	 * that when we __alert_vcore(), there is a chance we need to mmap, which
	 * grabs the mm_lock. */
	spin_lock(&p->proc_lock);
	if (p->state != PROC_WAITING) {
		/* We need to check the online and bulk_preempt lists again, now that we are
		 * sure no one is messing with them.  If we're WAITING, we can skip
		 * these (or assert they are empty!). */
		vc = TAILQ_FIRST(&p->online_vcs);
		if (vc) {
			/* there's an online vcore, so just alert it (we know it isn't going
			 * anywhere), and return */
			spam_vcore(p, vcore2vcoreid(p, vc), ev_msg, ev_flags);
			spin_unlock(&p->proc_lock);
			return;
		}
		vc = TAILQ_FIRST(&p->bulk_preempted_vcs);
		if (vc) {
			/* the process is bulk preempted, similar deal to above */
			spam_vcore(p, vcore2vcoreid(p, vc), ev_msg, ev_flags);
			spin_unlock(&p->proc_lock);
			return;
		}
	}
	/* At this point, we're sure all vcores are yielded, though we might not be
	 * WAITING.  Post to the first on the inactive list (which is the one that
	 * will definitely be woken up) */
	vc = TAILQ_FIRST(&p->inactive_vcs);
	assert(vc);
	spam_vcore(p, vcore2vcoreid(p, vc), ev_msg, ev_flags);
	/* Set the vcore's alertable flag, to short circuit our last ditch effort
	 * above */
	set_vcore_msgable(vcore2vcoreid(p, vc));
	/* The first event to catch the process with no online/bp vcores will need
	 * to wake it up.  (We could be RUNNABLE_M here if another event already woke
	 * us.) and we didn't get lucky with the penultimate fallback.
	 * proc_wakeup (and __proc_wakeup()) will check for WAITING. */
	spin_unlock(&p->proc_lock);
	proc_wakeup(p);
	return;
}