Esempio n. 1
0
/*
 * Add a (pmap, va) pair to the invalidation list and protect access
 * as appropriate.
 *
 * CPUMASK_LOCK is used to interlock thread switchins, otherwise another
 * cpu can switch in a pmap that we are unaware of and interfere with our
 * pte operation.
 */
void
pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va)
{
    cpumask_t oactive;
#ifdef SMP
    cpumask_t nactive;

    DEBUG_PUSH_INFO("pmap_inval_interlock");
    for (;;) {
	oactive = pmap->pm_active;
	cpu_ccfence();
	nactive = oactive | CPUMASK_LOCK;
	if ((oactive & CPUMASK_LOCK) == 0 &&
	    atomic_cmpset_cpumask(&pmap->pm_active, oactive, nactive)) {
		break;
	}
	lwkt_process_ipiq();
	cpu_pause();
    }
    DEBUG_POP_INFO();
#else
    oactive = pmap->pm_active & ~CPUMASK_LOCK;
#endif
    KKASSERT((info->pir_flags & PIRF_CPUSYNC) == 0);
    info->pir_va = va;
    info->pir_flags = PIRF_CPUSYNC;
    lwkt_cpusync_init(&info->pir_cpusync, oactive, pmap_inval_callback, info);
    lwkt_cpusync_interlock(&info->pir_cpusync);
}
/*
 * Add a (pmap, va) pair to the invalidation list and protect access
 * as appropriate.
 *
 * CPULOCK_EXCL is used to interlock thread switchins
 */
void
pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va)
{
    cpulock_t olock;
    cpulock_t nlock;

    DEBUG_PUSH_INFO("pmap_inval_interlock");
    for (;;) {
	olock = pmap->pm_active_lock & ~CPULOCK_EXCL;
	nlock = olock | CPULOCK_EXCL;
	if (atomic_cmpset_int(&pmap->pm_active_lock, olock, nlock))
		break;
	lwkt_process_ipiq();
	cpu_pause();
    }
    DEBUG_POP_INFO();
    KKASSERT((info->pir_flags & PIRF_CPUSYNC) == 0);
    info->pir_va = va;
    info->pir_flags = PIRF_CPUSYNC;
    lwkt_cpusync_init(&info->pir_cpusync, pmap->pm_active,
		      pmap_inval_callback, info);
    lwkt_cpusync_interlock(&info->pir_cpusync);
}