/* * Typically called by kernel thread or cothread * * These must be a matched pair. We will acquire a critical * section in cothread_lock() and release it in cothread_unlock(). * * We do this to simplify cothread operation to prevent an * interrupt (e.g. vkd_io_intr()) from preempting a vkd_strategy() * call and creating a recursion in the pthread. */ void cothread_lock(cothread_t cotd, int is_cotd) { if (is_cotd) { pthread_mutex_lock(&cotd->mutex); } else { crit_enter_id("cothread"); pthread_mutex_lock(&cotd->mutex); } }
static void pmap_inval_init(pmap_t pmap) { cpulock_t olock; cpulock_t nlock; crit_enter_id("inval"); if (pmap != &kernel_pmap) { for (;;) { olock = pmap->pm_active_lock; cpu_ccfence(); nlock = olock | CPULOCK_EXCL; if (olock != nlock && atomic_cmpset_int(&pmap->pm_active_lock, olock, nlock)) { break; } lwkt_process_ipiq(); cpu_pause(); } atomic_add_acq_long(&pmap->pm_invgen, 1); } }
/* * Initialize for add or flush * * The critical section is required to prevent preemption, allowing us to * set CPUMASK_LOCK on the pmap. The critical section is also assumed * when lwkt_process_ipiq() is called. */ void pmap_inval_init(pmap_inval_info_t info) { info->pir_flags = 0; crit_enter_id("inval"); }
int sys_vmm_guest_sync_addr(struct vmm_guest_sync_addr_args *uap) { int error = 0; cpulock_t olock; cpulock_t nlock; cpumask_t mask; long val; struct proc *p = curproc; if (p->p_vmm == NULL) return ENOSYS; crit_enter_id("vmm_inval"); /* * Acquire CPULOCK_EXCL, spin while we wait. This will prevent * any other cpu trying to use related VMMs to wait for us. */ KKASSERT(CPUMASK_TESTMASK(p->p_vmm_cpumask, mycpu->gd_cpumask) == 0); for (;;) { olock = p->p_vmm_cpulock & ~CPULOCK_EXCL; cpu_ccfence(); nlock = olock | CPULOCK_EXCL; if (atomic_cmpset_int(&p->p_vmm_cpulock, olock, nlock)) break; lwkt_process_ipiq(); cpu_pause(); } /* * Wait for other cpu's to exit VMM mode (for this vkernel). No * new cpus will enter VMM mode while we hold the lock. New waiters * may turn-up though so the wakeup() later on has to be * unconditional. * * We must test on p_vmm_cpulock's counter, not the mask, because * VMM entries will set the mask bit unconditionally first * (interlocking our IPI below) and then conditionally bump the * counter. */ if (olock & CPULOCK_CNTMASK) { mask = p->p_vmm_cpumask; CPUMASK_ANDMASK(mask, mycpu->gd_other_cpus); lwkt_send_ipiq_mask(mask, vmm_exit_vmm, NULL); while (p->p_vmm_cpulock & CPULOCK_CNTMASK) { lwkt_process_ipiq(); cpu_pause(); } } #ifndef _KERNEL_VIRTUAL /* * Ensure that any new entries into VMM mode using * vmm's managed under this process will issue a * INVEPT before resuming. */ atomic_add_acq_long(&p->p_vmspace->vm_pmap.pm_invgen, 1); #endif /* * Make the requested modification, wakeup any waiters. */ if (uap->srcaddr) { copyin(uap->srcaddr, &val, sizeof(long)); copyout(&val, uap->dstaddr, sizeof(long)); } /* * VMMs on remote cpus will not be re-entered until we * clear the lock. */ atomic_clear_int(&p->p_vmm_cpulock, CPULOCK_EXCL); #if 0 wakeup(&p->p_vmm_cpulock); #endif crit_exit_id("vmm_inval"); return error; }