/* * Move pages from one kernel virtual address to another. * Both addresses are assumed to reside in the Sysmap. */ void pagemove(caddr_t from, caddr_t to, size_t size) { pt_entry_t *fpte, *tpte, *mafpte, *matpte; pt_entry_t ofpte, otpte; #ifdef MULTIPROCESSOR u_int32_t cpumask = 0; #endif #ifdef DIAGNOSTIC if ((size & PAGE_MASK) != 0) panic("pagemove"); #endif fpte = kvtopte((vaddr_t)from); tpte = kvtopte((vaddr_t)to); while (size > 0) { mafpte = (pt_entry_t *)vtomach((vaddr_t)fpte); matpte = (pt_entry_t *)vtomach((vaddr_t)tpte); otpte = pte_atomic_update(tpte, matpte, *fpte); ofpte = pte_atomic_update(fpte, mafpte, 0); tpte++; fpte++; #if defined(I386_CPU) && !defined(MULTIPROCESSOR) if (cpu_class != CPUCLASS_386) #endif { if (otpte & PG_V) #ifdef MULTIPROCESSOR pmap_tlb_shootdown(pmap_kernel(), (vaddr_t)to, otpte, &cpumask); #else pmap_update_pg((vaddr_t)to); #endif if (ofpte & PG_V) #ifdef MULTIPROCESSOR pmap_tlb_shootdown(pmap_kernel(), (vaddr_t)from, ofpte, &cpumask); #else pmap_update_pg((vaddr_t)from); #endif } from += PAGE_SIZE; to += PAGE_SIZE; size -= PAGE_SIZE; } #ifdef MULTIPROCESSOR pmap_tlb_shootnow(cpumask); #else #if defined(I386_CPU) if (cpu_class == CPUCLASS_386) tlbflush(); #endif #endif }
static inline void pmap_tlb_invalidate(const pmap_tlb_packet_t *tp) { int i; /* Find out what we need to invalidate. */ if (tp->tp_count == (uint16_t)-1) { u_int egen = uvm_emap_gen_return(); if (tp->tp_pte & PG_G) { /* Invalidating user and kernel TLB entries. */ tlbflushg(); } else { /* Invalidating user TLB entries only. */ tlbflush(); } uvm_emap_update(egen); } else { /* Invalidating a single page or a range of pages. */ for (i = tp->tp_count - 1; i >= 0; i--) { pmap_update_pg(tp->tp_va[i]); } } }
/* * Write bytes somewhere in the kernel text. Make the text * pages writable temporarily. */ static void db_write_text(vaddr_t addr, size_t size, char *data) { vaddr_t pgva; size_t limit; uint32_t bits; char *dst; if (size == 0) return; dst = (char *)addr; do { /* * Get the PTE for the page. */ bits = pmap_pte_bits(addr); if ((bits & PG_V) == 0) { printf(" address %p not a valid page\n", dst); return; } /* * Get the VA for the page. */ if (bits & PG_PS) { if (cpu_pae) pgva = (vaddr_t)dst & PG_LGFRAME_PAE; else pgva = (vaddr_t)dst & PG_LGFRAME; } else pgva = trunc_page((vaddr_t)dst); /* * Compute number of bytes that can be written * with this mapping and subtract it from the * total size. */ #ifdef NBPD_L2 if (bits & PG_PS) limit = NBPD_L2 - ((vaddr_t)dst & (NBPD_L2 - 1)); else #endif limit = PAGE_SIZE - ((vaddr_t)dst & PGOFSET); if (limit > size) limit = size; size -= limit; pmap_update_pg(pgva); pmap_pte_setbits(addr, PG_RW, 0); /* * Page is now writable. Do as much access as we * can in this page. */ for (; limit > 0; limit--) *dst++ = *data++; /* * Restore the old PTE. */ pmap_update_pg(pgva); pmap_pte_setbits(addr, 0, bits); } while (size != 0); }
/* * Write bytes somewhere in the kernel text. Make the text * pages writable temporarily. */ static void db_write_text(vaddr_t addr, size_t size, const char *data) { pt_entry_t *pte, oldpte, tmppte; vaddr_t pgva; size_t limit; char *dst; if (size == 0) return; dst = (char *)addr; do { /* * Get the PTE for the page. */ pte = kvtopte(addr); oldpte = *pte; if ((oldpte & PG_V) == 0) { printf(" address %p not a valid page\n", dst); return; } /* * Get the VA for the page. */ if (oldpte & PG_PS) pgva = (vaddr_t)dst & PG_LGFRAME; else pgva = x86_trunc_page(dst); /* * Compute number of bytes that can be written * with this mapping and subtract it from the * total size. */ if (oldpte & PG_PS) limit = NBPD_L2 - ((vaddr_t)dst & (NBPD_L2 - 1)); else limit = PAGE_SIZE - ((vaddr_t)dst & PGOFSET); if (limit > size) limit = size; size -= limit; tmppte = (oldpte & ~PG_KR) | PG_KW; #ifdef XEN xpmap_update(pte, tmppte); #else *pte = tmppte; #endif pmap_update_pg(pgva); /* * Page is now writable. Do as much access as we * can in this page. */ for (; limit > 0; limit--) *dst++ = *data++; /* * Restore the old PTE. */ #ifdef XEN xpmap_update(pte, oldpte); #else *pte = oldpte; #endif pmap_update_pg(pgva); } while (size != 0); }
/* * Write bytes somewhere in the kernel text. Make the text * pages writable temporarily. */ static void db_write_text(vaddr_t addr, size_t size, const char *data) { pt_entry_t *ppte, pte; size_t limit; char *dst; if (size == 0) return; dst = (char *)addr; do { addr = (vaddr_t)dst; /* * Get the PTE for the page. */ ppte = kvtopte(addr); pte = *ppte; if ((pte & PG_V) == 0) { printf(" address %p not a valid page\n", dst); return; } /* * Compute number of bytes that can be written * with this mapping and subtract it from the * total size. */ if (pte & PG_PS) limit = NBPD_L2 - (addr & (NBPD_L2 - 1)); else limit = PAGE_SIZE - (addr & PGOFSET); if (limit > size) limit = size; size -= limit; /* * Make the kernel text page writable. */ pmap_pte_clearbits(ppte, PG_KR); pmap_pte_setbits(ppte, PG_KW); pmap_update_pg(addr); /* * MULTIPROCESSOR: no shootdown required as the PTE continues to * map the same page and other CPUs do not need write access. */ /* * Page is now writable. Do as much access as we * can in this page. */ for (; limit > 0; limit--) *dst++ = *data++; /* * Turn the page back to read-only. */ pmap_pte_clearbits(ppte, PG_KW); pmap_pte_setbits(ppte, PG_KR); pmap_update_pg(addr); /* * MULTIPROCESSOR: no shootdown required as all other CPUs * should be in CPUF_PAUSE state and will not cache the PTE * with the write access set. */ } while (size != 0); }