Esempio n. 1
0
/*
 * Unconditionally stop or restart a cpu.
 *
 * Note: cpu_mask_all_signals() masks all signals except SIGXCPU itself.
 * SIGXCPU itself is blocked on entry to stopsig() by the signal handler
 * itself.
 *
 * WARNING: Signals are not physically disabled here so we have to enter
 * our critical section before bumping gd_intr_nesting_level or another
 * interrupt can come along and get really confused.
 */
static
void
stopsig(int nada, siginfo_t *info, void *ctxp)
{
	globaldata_t gd = mycpu;
	thread_t td = gd->gd_curthread;
	sigset_t ss;

	sigemptyset(&ss);
	sigaddset(&ss, SIGALRM);
	sigaddset(&ss, SIGIO);
	sigaddset(&ss, SIGQUIT);
	sigaddset(&ss, SIGUSR1);
	sigaddset(&ss, SIGUSR2);
	sigaddset(&ss, SIGTERM);
	sigaddset(&ss, SIGWINCH);

	++td->td_critcount;
	++gd->gd_intr_nesting_level;
	while (CPUMASK_TESTMASK(stopped_cpus, gd->gd_cpumask)) {
		sigsuspend(&ss);
	}
	--gd->gd_intr_nesting_level;
	--td->td_critcount;
}
Esempio n. 2
0
/*
 * Unconditionally stop or restart a cpu.
 *
 * Note: cpu_mask_all_signals() masks all signals except SIGXCPU itself.
 * SIGXCPU itself is blocked on entry to stopsig() by the signal handler
 * itself.
 *
 * WARNING: Signals are not physically disabled here so we have to enter
 * our critical section before bumping gd_intr_nesting_level or another
 * interrupt can come along and get really confused.
 */
static
void
stopsig(int nada, siginfo_t *info, void *ctxp)
{
	globaldata_t gd = mycpu;
	thread_t td = gd->gd_curthread;
	sigset_t ss;
	int save;

	save = errno;
	sigemptyset(&ss);
	sigaddset(&ss, SIGALRM);
	sigaddset(&ss, SIGIO);
	sigaddset(&ss, SIGURG);
	sigaddset(&ss, SIGQUIT);
	sigaddset(&ss, SIGUSR1);
	sigaddset(&ss, SIGUSR2);
	sigaddset(&ss, SIGTERM);
	sigaddset(&ss, SIGWINCH);

	crit_enter_raw(td);
	++gd->gd_intr_nesting_level;
	while (CPUMASK_TESTMASK(stopped_cpus, gd->gd_cpumask)) {
		sigsuspend(&ss);
	}
	--gd->gd_intr_nesting_level;
	crit_exit_raw(td);

	errno = save;
}
Esempio n. 3
0
int
sys_vmm_guest_sync_addr(struct vmm_guest_sync_addr_args *uap)
{
    int error = 0;
    cpulock_t olock;
    cpulock_t nlock;
    cpumask_t mask;
    long val;
    struct proc *p = curproc;

    if (p->p_vmm == NULL)
        return ENOSYS;

    crit_enter_id("vmm_inval");

    /*
     * Acquire CPULOCK_EXCL, spin while we wait.  This will prevent
     * any other cpu trying to use related VMMs to wait for us.
     */
    KKASSERT(CPUMASK_TESTMASK(p->p_vmm_cpumask, mycpu->gd_cpumask) == 0);
    for (;;) {
        olock = p->p_vmm_cpulock & ~CPULOCK_EXCL;
        cpu_ccfence();
        nlock = olock | CPULOCK_EXCL;
        if (atomic_cmpset_int(&p->p_vmm_cpulock, olock, nlock))
            break;
        lwkt_process_ipiq();
        cpu_pause();
    }

    /*
     * Wait for other cpu's to exit VMM mode (for this vkernel).  No
     * new cpus will enter VMM mode while we hold the lock.  New waiters
     * may turn-up though so the wakeup() later on has to be
     * unconditional.
     *
     * We must test on p_vmm_cpulock's counter, not the mask, because
     * VMM entries will set the mask bit unconditionally first
     * (interlocking our IPI below) and then conditionally bump the
     * counter.
     */
    if (olock & CPULOCK_CNTMASK) {
        mask = p->p_vmm_cpumask;
        CPUMASK_ANDMASK(mask, mycpu->gd_other_cpus);
        lwkt_send_ipiq_mask(mask, vmm_exit_vmm, NULL);
        while (p->p_vmm_cpulock & CPULOCK_CNTMASK) {
            lwkt_process_ipiq();
            cpu_pause();
        }
    }

#ifndef _KERNEL_VIRTUAL
    /*
     * Ensure that any new entries into VMM mode using
     * vmm's managed under this process will issue a
     * INVEPT before resuming.
     */
    atomic_add_acq_long(&p->p_vmspace->vm_pmap.pm_invgen, 1);
#endif

    /*
     * Make the requested modification, wakeup any waiters.
     */
    if (uap->srcaddr) {
        copyin(uap->srcaddr, &val, sizeof(long));
        copyout(&val, uap->dstaddr, sizeof(long));
    }

    /*
     * VMMs on remote cpus will not be re-entered until we
     * clear the lock.
     */
    atomic_clear_int(&p->p_vmm_cpulock, CPULOCK_EXCL);
#if 0
    wakeup(&p->p_vmm_cpulock);
#endif

    crit_exit_id("vmm_inval");

    return error;
}