Exemplo n.º 1
0
static void
pmap_inval_done(pmap_t pmap)
{
    if (pmap != &kernel_pmap) {
        atomic_add_acq_long(&pmap->pm_invgen, 1);
        atomic_clear_int(&pmap->pm_active_lock, CPULOCK_EXCL);
    }
    crit_exit_id("inval");
}
Exemplo n.º 2
0
void
cothread_unlock(cothread_t cotd, int is_cotd)
{
	if (is_cotd) {
		pthread_mutex_unlock(&cotd->mutex);
	} else {
		pthread_mutex_unlock(&cotd->mutex);
		crit_exit_id("cothread");
	}
}
Exemplo n.º 3
0
void
pmap_inval_done(pmap_inval_info_t info)
{
    KKASSERT((info->pir_flags & PIRF_CPUSYNC) == 0);
    crit_exit_id("inval");
}
Exemplo n.º 4
0
int
sys_vmm_guest_sync_addr(struct vmm_guest_sync_addr_args *uap)
{
    int error = 0;
    cpulock_t olock;
    cpulock_t nlock;
    cpumask_t mask;
    long val;
    struct proc *p = curproc;

    if (p->p_vmm == NULL)
        return ENOSYS;

    crit_enter_id("vmm_inval");

    /*
     * Acquire CPULOCK_EXCL, spin while we wait.  This will prevent
     * any other cpu trying to use related VMMs to wait for us.
     */
    KKASSERT(CPUMASK_TESTMASK(p->p_vmm_cpumask, mycpu->gd_cpumask) == 0);
    for (;;) {
        olock = p->p_vmm_cpulock & ~CPULOCK_EXCL;
        cpu_ccfence();
        nlock = olock | CPULOCK_EXCL;
        if (atomic_cmpset_int(&p->p_vmm_cpulock, olock, nlock))
            break;
        lwkt_process_ipiq();
        cpu_pause();
    }

    /*
     * Wait for other cpu's to exit VMM mode (for this vkernel).  No
     * new cpus will enter VMM mode while we hold the lock.  New waiters
     * may turn-up though so the wakeup() later on has to be
     * unconditional.
     *
     * We must test on p_vmm_cpulock's counter, not the mask, because
     * VMM entries will set the mask bit unconditionally first
     * (interlocking our IPI below) and then conditionally bump the
     * counter.
     */
    if (olock & CPULOCK_CNTMASK) {
        mask = p->p_vmm_cpumask;
        CPUMASK_ANDMASK(mask, mycpu->gd_other_cpus);
        lwkt_send_ipiq_mask(mask, vmm_exit_vmm, NULL);
        while (p->p_vmm_cpulock & CPULOCK_CNTMASK) {
            lwkt_process_ipiq();
            cpu_pause();
        }
    }

#ifndef _KERNEL_VIRTUAL
    /*
     * Ensure that any new entries into VMM mode using
     * vmm's managed under this process will issue a
     * INVEPT before resuming.
     */
    atomic_add_acq_long(&p->p_vmspace->vm_pmap.pm_invgen, 1);
#endif

    /*
     * Make the requested modification, wakeup any waiters.
     */
    if (uap->srcaddr) {
        copyin(uap->srcaddr, &val, sizeof(long));
        copyout(&val, uap->dstaddr, sizeof(long));
    }

    /*
     * VMMs on remote cpus will not be re-entered until we
     * clear the lock.
     */
    atomic_clear_int(&p->p_vmm_cpulock, CPULOCK_EXCL);
#if 0
    wakeup(&p->p_vmm_cpulock);
#endif

    crit_exit_id("vmm_inval");

    return error;
}