void xen_set_ldt(vaddr_t base, uint32_t entries) { vaddr_t va; vaddr_t end; pt_entry_t *ptp; int s; #ifdef __x86_64__ end = base + (entries << 3); #else end = base + entries * sizeof(union descriptor); #endif for (va = base; va < end; va += PAGE_SIZE) { KASSERT(va >= VM_MIN_KERNEL_ADDRESS); ptp = kvtopte(va); XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n", base, entries, ptp)); pmap_pte_clearbits(ptp, PG_RW); } s = splvm(); xpq_queue_set_ldt(base, entries); splx(s); }
/* * Load appropriate gdt descriptor; we better be running on *ci * (for the most part, this is how a CPU knows who it is). */ void gdt_init_cpu(struct cpu_info *ci) { #ifndef XEN struct region_descriptor region; size_t max_len; max_len = MAXGDTSIZ * sizeof(gdt[0]); setregion(®ion, ci->ci_gdt, max_len - 1); lgdt(®ion); #else size_t len = gdt_size[0] * sizeof(gdt[0]); unsigned long frames[len >> PAGE_SHIFT]; vaddr_t va; pt_entry_t *ptp; int f; for (va = (vaddr_t)ci->ci_gdt, f = 0; va < (vaddr_t)ci->ci_gdt + len; va += PAGE_SIZE, f++) { KASSERT(va >= VM_MIN_KERNEL_ADDRESS); ptp = kvtopte(va); frames[f] = *ptp >> PAGE_SHIFT; pmap_pte_clearbits(ptp, PG_RW); } /* printk("loading gdt %x, %d entries, %d pages", */ /* frames[0] << PAGE_SHIFT, gdt_size[0], len >> PAGE_SHIFT); */ if (HYPERVISOR_set_gdt(frames, gdt_size[0])) panic("HYPERVISOR_set_gdt failed!\n"); lgdt_finish(); #endif }
/* * Write bytes somewhere in the kernel text. Make the text * pages writable temporarily. */ static void db_write_text(vaddr_t addr, size_t size, const char *data) { pt_entry_t *ppte, pte; size_t limit; char *dst; if (size == 0) return; dst = (char *)addr; do { addr = (vaddr_t)dst; /* * Get the PTE for the page. */ ppte = kvtopte(addr); pte = *ppte; if ((pte & PG_V) == 0) { printf(" address %p not a valid page\n", dst); return; } /* * Compute number of bytes that can be written * with this mapping and subtract it from the * total size. */ if (pte & PG_PS) limit = NBPD_L2 - (addr & (NBPD_L2 - 1)); else limit = PAGE_SIZE - (addr & PGOFSET); if (limit > size) limit = size; size -= limit; /* * Make the kernel text page writable. */ pmap_pte_clearbits(ppte, PG_KR); pmap_pte_setbits(ppte, PG_KW); pmap_update_pg(addr); /* * MULTIPROCESSOR: no shootdown required as the PTE continues to * map the same page and other CPUs do not need write access. */ /* * Page is now writable. Do as much access as we * can in this page. */ for (; limit > 0; limit--) *dst++ = *data++; /* * Turn the page back to read-only. */ pmap_pte_clearbits(ppte, PG_KW); pmap_pte_setbits(ppte, PG_KR); pmap_update_pg(addr); /* * MULTIPROCESSOR: no shootdown required as all other CPUs * should be in CPUF_PAUSE state and will not cache the PTE * with the write access set. */ } while (size != 0); }