Example #1
0
/*
 * Invalidate a pte in a pmap and synchronize with target cpus
 * as required.  Throw away the modified and access bits.  Use
 * pmap_clean_pte() to do the same thing but also get an interlocked
 * modified/access status.
 *
 * Clearing the field first (basically clearing VPTE_V) prevents any
 * new races from occuring while we invalidate the TLB (i.e. the pmap
 * on the real cpu), then clear it again to clean out any race that
 * might have occured before the invalidation completed.
 */
void
pmap_inval_pte(volatile vpte_t *ptep, struct pmap *pmap, vm_offset_t va)
{
	*ptep = 0;
	pmap_inval_cpu(pmap, va, PAGE_SIZE);
	*ptep = 0;
}
Example #2
0
/*
 * Invalidating page directory entries requires some additional
 * sophistication.  The cachemask must be cleared so the kernel
 * resynchronizes its temporary page table mappings cache.
 */
void
pmap_inval_pde(volatile vpte_t *ptep, struct pmap *pmap, vm_offset_t va)
{
	*ptep = 0;
	pmap_inval_cpu(pmap, va, SEG_SIZE);
	*ptep = 0;
	pmap->pm_cpucachemask = 0;
}
Example #3
0
/*
 * This is an odd case and I'm not sure whether it even occurs in normal
 * operation.  Turn off write access to the page, clean out the tlb
 * (the real cpu's pmap), and deal with any VPTE_M race that may have
 * occured.  VPTE_M is not cleared.
 */
vpte_t
pmap_setro_pte(volatile vpte_t *ptep, struct pmap *pmap, vm_offset_t va)
{
	vpte_t pte;

	pte = *ptep;
	if (pte & VPTE_V) {
		pte = *ptep;
		atomic_clear_long(ptep, VPTE_RW);
		pmap_inval_cpu(pmap, va, PAGE_SIZE);
		pte |= *ptep & VPTE_M;
	}
	return(pte);
}
Example #4
0
vpte_t
pmap_clean_pde(volatile vpte_t *ptep, struct pmap *pmap, vm_offset_t va)
{
	vpte_t pte;

	pte = *ptep;
	if (pte & VPTE_V) {
		atomic_clear_long(ptep, VPTE_W);
		pmap_inval_cpu(pmap, va, SEG_SIZE);
		pte = *ptep;
		atomic_clear_long(ptep, VPTE_W|VPTE_M);
	}
	return(pte);
}
Example #5
0
/*
 * This is a combination of pmap_inval_pte() and pmap_clean_pte().
 * Firts prevent races with the 'A' and 'M' bits, then clean out
 * the tlb (the real cpu's pmap), then incorporate any races that
 * may have occured in the mean time, and finally zero out the pte.
 */
vpte_t
pmap_inval_loadandclear(volatile vpte_t *ptep, struct pmap *pmap,
			vm_offset_t va)
{
	vpte_t pte;

	pte = *ptep;
	if (pte & VPTE_V) {
		pte = *ptep;
		atomic_clear_long(ptep, VPTE_RW);
		pmap_inval_cpu(pmap, va, PAGE_SIZE);
		pte |= *ptep & (VPTE_A | VPTE_M);
	}
	*ptep = 0;
	return(pte);
}