static void pmap_inval_callback(void *arg) { pmap_inval_info_t info = arg; if (info->pir_va == (vm_offset_t)-1) cpu_invltlb(); else cpu_invlpg((void *)info->pir_va); }
/* * API function - invalidate the pte at (va) and replace *ptep with npte * atomically only if *ptep equals opte, across the pmap's active cpus. * * Returns 1 on success, 0 on failure (caller typically retries). */ int pmap_inval_smp_cmpset(pmap_t pmap, vm_offset_t va, pt_entry_t *ptep, pt_entry_t opte, pt_entry_t npte) { globaldata_t gd = mycpu; pmap_inval_info_t *info; int success; int cpu = gd->gd_cpuid; cpumask_t tmpmask; unsigned long rflags; /* * Initialize invalidation for pmap and enter critical section. */ if (pmap == NULL) pmap = &kernel_pmap; pmap_inval_init(pmap); /* * Shortcut single-cpu case if possible. */ if (CPUMASK_CMPMASKEQ(pmap->pm_active, gd->gd_cpumask)) { if (atomic_cmpset_long(ptep, opte, npte)) { if (va == (vm_offset_t)-1) cpu_invltlb(); else cpu_invlpg((void *)va); pmap_inval_done(pmap); return 1; } else { pmap_inval_done(pmap); return 0; } } /* * We need a critical section to prevent getting preempted while * we setup our command. A preemption might execute its own * pmap_inval*() command and create confusion below. */ info = &invinfo[cpu]; info->tsc_target = rdtsc() + (tsc_frequency * LOOPRECOVER_TIMEOUT1); /* * We must wait for other cpus which may still be finishing * up a prior operation. */ while (CPUMASK_TESTNZERO(info->done)) { #ifdef LOOPRECOVER if (loopwdog(info)) { info->failed = 1; loopdebug("B", info); /* XXX recover from possible bug */ CPUMASK_ASSZERO(info->done); } #endif cpu_pause(); } KKASSERT(info->mode == INVDONE); /* * Must set our cpu in the invalidation scan mask before * any possibility of [partial] execution (remember, XINVLTLB * can interrupt a critical section). */ ATOMIC_CPUMASK_ORBIT(smp_invmask, cpu); info->va = va; info->npgs = 1; /* unused */ info->ptep = ptep; info->npte = npte; info->opte = opte; #ifdef LOOPRECOVER info->failed = 0; #endif info->mode = INVCMPSET; info->success = 0; tmpmask = pmap->pm_active; /* volatile */ cpu_ccfence(); CPUMASK_ANDMASK(tmpmask, smp_active_mask); CPUMASK_ORBIT(tmpmask, cpu); info->mask = tmpmask; /* * Command may start executing the moment 'done' is initialized, * disable current cpu interrupt to prevent 'done' field from * changing (other cpus can't clear done bits until the originating * cpu clears its mask bit). */ #ifdef LOOPRECOVER info->sigmask = tmpmask; CHECKSIGMASK(info); #endif cpu_sfence(); rflags = read_rflags(); cpu_disable_intr(); ATOMIC_CPUMASK_COPY(info->done, tmpmask); /* * Pass our copy of the done bits (so they don't change out from * under us) to generate the Xinvltlb interrupt on the targets. */ smp_invlpg(&tmpmask); success = info->success; KKASSERT(info->mode == INVDONE); ATOMIC_CPUMASK_NANDBIT(smp_invmask, cpu); write_rflags(rflags); pmap_inval_done(pmap); return success; }
/* * Called with a critical section held and interrupts enabled. */ int pmap_inval_intr(cpumask_t *cpumaskp, int toolong) { globaldata_t gd = mycpu; pmap_inval_info_t *info; int loopme = 0; int cpu; cpumask_t cpumask; /* * Check all cpus for invalidations we may need to service. */ cpu_ccfence(); cpu = gd->gd_cpuid; cpumask = *cpumaskp; while (CPUMASK_TESTNZERO(cpumask)) { int n = BSFCPUMASK(cpumask); #ifdef LOOPRECOVER KKASSERT(n >= 0 && n < MAXCPU); #endif CPUMASK_NANDBIT(cpumask, n); info = &invinfo[n]; /* * Due to interrupts/races we can catch a new operation * in an older interrupt. A fence is needed once we detect * the (not) done bit. */ if (!CPUMASK_TESTBIT(info->done, cpu)) continue; cpu_lfence(); #ifdef LOOPRECOVER if (toolong) { kprintf("pminvl %d->%d %08jx %08jx mode=%d\n", cpu, n, info->done.ary[0], info->mask.ary[0], info->mode); } #endif /* * info->mask and info->done always contain the originating * cpu until the originator is done. Targets may still be * present in info->done after the originator is done (they * will be finishing up their loops). * * Clear info->mask bits on other cpus to indicate that they * have quiesced (entered the loop). Once the other mask bits * are clear we can execute the operation on the original, * then clear the mask and done bits on the originator. The * targets will then finish up their side and clear their * done bits. * * The command is considered 100% done when all done bits have * been cleared. */ if (n != cpu) { /* * Command state machine for 'other' cpus. */ if (CPUMASK_TESTBIT(info->mask, cpu)) { /* * Other cpu indicate to originator that they * are quiesced. */ ATOMIC_CPUMASK_NANDBIT(info->mask, cpu); loopme = 1; } else if (info->ptep && CPUMASK_TESTBIT(info->mask, n)) { /* * Other cpu must wait for the originator (n) * to complete its command if ptep is not NULL. */ loopme = 1; } else { /* * Other cpu detects that the originator has * completed its command, or there was no * command. * * Now that the page table entry has changed, * we can follow up with our own invalidation. */ vm_offset_t va = info->va; int npgs; if (va == (vm_offset_t)-1 || info->npgs > MAX_INVAL_PAGES) { cpu_invltlb(); } else { for (npgs = info->npgs; npgs; --npgs) { cpu_invlpg((void *)va); va += PAGE_SIZE; } } ATOMIC_CPUMASK_NANDBIT(info->done, cpu); /* info invalid now */ /* loopme left alone */ } } else if (CPUMASK_TESTBIT(info->mask, cpu)) { /* * Originator is waiting for other cpus */ if (CPUMASK_CMPMASKNEQ(info->mask, gd->gd_cpumask)) { /* * Originator waits for other cpus to enter * their loop (aka quiesce). * * If this bugs out the IPI may have been lost, * try to reissue by resetting our own * reentrancy bit and clearing the smurf mask * for the cpus that did not respond, then * reissuing the IPI. */ loopme = 1; #ifdef LOOPRECOVER if (loopwdog(info)) { info->failed = 1; loopdebug("C", info); /* XXX recover from possible bug */ mdcpu->gd_xinvaltlb = 0; ATOMIC_CPUMASK_NANDMASK(smp_smurf_mask, info->mask); cpu_disable_intr(); smp_invlpg(&smp_active_mask); /* * Force outer-loop retest of Xinvltlb * requests (see mp_machdep.c). */ mdcpu->gd_xinvaltlb = 2; cpu_enable_intr(); } #endif } else { /* * Originator executes operation and clears * mask to allow other cpus to finish. */ KKASSERT(info->mode != INVDONE); if (info->mode == INVSTORE) { if (info->ptep) info->opte = atomic_swap_long(info->ptep, info->npte); CHECKSIGMASK(info); ATOMIC_CPUMASK_NANDBIT(info->mask, cpu); CHECKSIGMASK(info); } else { if (atomic_cmpset_long(info->ptep, info->opte, info->npte)) { info->success = 1; } else { info->success = 0; } CHECKSIGMASK(info); ATOMIC_CPUMASK_NANDBIT(info->mask, cpu); CHECKSIGMASK(info); } loopme = 1; } } else { /* * Originator does not have to wait for the other * cpus to finish. It clears its done bit. A new * command will not be initiated by the originator * until the other cpus have cleared their done bits * (asynchronously). */ vm_offset_t va = info->va; int npgs; if (va == (vm_offset_t)-1 || info->npgs > MAX_INVAL_PAGES) { cpu_invltlb(); } else { for (npgs = info->npgs; npgs; --npgs) { cpu_invlpg((void *)va); va += PAGE_SIZE; } } /* leave loopme alone */ /* other cpus may still be finishing up */ /* can't race originator since that's us */ info->mode = INVDONE; ATOMIC_CPUMASK_NANDBIT(info->done, cpu); } } return loopme; }
/* * Invalidate the specified va across all cpus associated with the pmap. * If va == (vm_offset_t)-1, we invltlb() instead of invlpg(). The operation * will be done fully synchronously with storing npte into *ptep and returning * opte. * * If ptep is NULL the operation will execute semi-synchronously. * ptep must be NULL if npgs > 1 */ pt_entry_t pmap_inval_smp(pmap_t pmap, vm_offset_t va, int npgs, pt_entry_t *ptep, pt_entry_t npte) { globaldata_t gd = mycpu; pmap_inval_info_t *info; pt_entry_t opte = 0; int cpu = gd->gd_cpuid; cpumask_t tmpmask; unsigned long rflags; /* * Initialize invalidation for pmap and enter critical section. */ if (pmap == NULL) pmap = &kernel_pmap; pmap_inval_init(pmap); /* * Shortcut single-cpu case if possible. */ if (CPUMASK_CMPMASKEQ(pmap->pm_active, gd->gd_cpumask)) { /* * Convert to invltlb if there are too many pages to * invlpg on. */ if (npgs > MAX_INVAL_PAGES) { npgs = 0; va = (vm_offset_t)-1; } /* * Invalidate the specified pages, handle invltlb if requested. */ while (npgs) { --npgs; if (ptep) { opte = atomic_swap_long(ptep, npte); ++ptep; } if (va == (vm_offset_t)-1) break; cpu_invlpg((void *)va); va += PAGE_SIZE; } if (va == (vm_offset_t)-1) cpu_invltlb(); pmap_inval_done(pmap); return opte; } /* * We need a critical section to prevent getting preempted while * we setup our command. A preemption might execute its own * pmap_inval*() command and create confusion below. * * tsc_target is our watchdog timeout that will attempt to recover * from a lost IPI. Set to 1/16 second for now. */ info = &invinfo[cpu]; info->tsc_target = rdtsc() + (tsc_frequency * LOOPRECOVER_TIMEOUT1); /* * We must wait for other cpus which may still be finishing up a * prior operation that we requested. * * We do not have to disable interrupts here. An Xinvltlb can occur * at any time (even within a critical section), but it will not * act on our command until we set our done bits. */ while (CPUMASK_TESTNZERO(info->done)) { #ifdef LOOPRECOVER if (loopwdog(info)) { info->failed = 1; loopdebug("A", info); /* XXX recover from possible bug */ CPUMASK_ASSZERO(info->done); } #endif cpu_pause(); } KKASSERT(info->mode == INVDONE); /* * Must set our cpu in the invalidation scan mask before * any possibility of [partial] execution (remember, XINVLTLB * can interrupt a critical section). */ ATOMIC_CPUMASK_ORBIT(smp_invmask, cpu); info->va = va; info->npgs = npgs; info->ptep = ptep; info->npte = npte; info->opte = 0; #ifdef LOOPRECOVER info->failed = 0; #endif info->mode = INVSTORE; tmpmask = pmap->pm_active; /* volatile (bits may be cleared) */ cpu_ccfence(); CPUMASK_ANDMASK(tmpmask, smp_active_mask); /* * If ptep is NULL the operation can be semi-synchronous, which means * we can improve performance by flagging and removing idle cpus * (see the idleinvlclr function in mp_machdep.c). * * Typically kernel page table operation is semi-synchronous. */ if (ptep == NULL) smp_smurf_idleinvlclr(&tmpmask); CPUMASK_ORBIT(tmpmask, cpu); info->mask = tmpmask; /* * Command may start executing the moment 'done' is initialized, * disable current cpu interrupt to prevent 'done' field from * changing (other cpus can't clear done bits until the originating * cpu clears its mask bit, but other cpus CAN start clearing their * mask bits). */ #ifdef LOOPRECOVER info->sigmask = tmpmask; CHECKSIGMASK(info); #endif cpu_sfence(); rflags = read_rflags(); cpu_disable_intr(); ATOMIC_CPUMASK_COPY(info->done, tmpmask); /* execution can begin here due to races */ /* * Pass our copy of the done bits (so they don't change out from * under us) to generate the Xinvltlb interrupt on the targets. */ smp_invlpg(&tmpmask); opte = info->opte; KKASSERT(info->mode == INVDONE); /* * Target cpus will be in their loop exiting concurrently with our * cleanup. They will not lose the bitmask they obtained before so * we can safely clear this bit. */ ATOMIC_CPUMASK_NANDBIT(smp_invmask, cpu); write_rflags(rflags); pmap_inval_done(pmap); return opte; }