/* * Bolt the kernel addr space into the HPT */ static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr) { unsigned long pa; unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX; HPTE hpte; for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) { unsigned long ea = (unsigned long)__va(pa); unsigned long vsid = get_kernel_vsid(ea); unsigned long va = (vsid << 28) | (pa & 0xfffffff); unsigned long vpn = va >> PAGE_SHIFT; unsigned long slot = HvCallHpt_findValid(&hpte, vpn); /* Make non-kernel text non-executable */ if (!in_kernel_text(ea)) mode_rw |= HW_NO_EXEC; if (hpte.dw0.dw0.v) { /* HPTE exists, so just bolt it */ HvCallHpt_setSwBits(slot, 0x10, 0); /* And make sure the pp bits are correct */ HvCallHpt_setPp(slot, PP_RWXX); } else /* No HPTE exists, so create a new bolted one */ iSeries_make_pte(va, phys_to_abs(pa), mode_rw); } }
/* Return a pt_regs pointer for a valid fault handler frame */ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) { const char *fault = NULL; /* happy compiler */ char fault_buf[64]; VirtualAddress sp = kbt->it.sp; struct pt_regs *p; if (!in_kernel_stack(kbt, sp)) return NULL; if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) return NULL; p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE); if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN) fault = "syscall"; else { if (kbt->verbose) { /* else we aren't going to use it */ snprintf(fault_buf, sizeof(fault_buf), "interrupt %ld", p->faultnum); fault = fault_buf; } } if (EX1_PL(p->ex1) == KERNEL_PL && in_kernel_text(p->pc) && in_kernel_stack(kbt, p->sp) && p->sp >= sp) { if (kbt->verbose) pr_err(" <%s while in kernel mode>\n", fault); } else if (EX1_PL(p->ex1) == USER_PL && p->pc < PAGE_OFFSET && p->sp < PAGE_OFFSET) { if (kbt->verbose) pr_err(" <%s while in user mode>\n", fault); } else if (kbt->verbose) { pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n", p->pc, p->sp, p->ex1); p = NULL; } if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) return p; return NULL; }
/* Callback for backtracer; basically a glorified memcpy */ static bool read_memory_func(void *result, VirtualAddress address, unsigned int size, void *vkbt) { int retval; struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; if (in_kernel_text(address)) { /* OK to read kernel code. */ } else if (address >= PAGE_OFFSET) { /* We only tolerate kernel-space reads of this task's stack */ if (!in_kernel_stack(kbt, address)) return 0; } else if (!valid_address(kbt, address)) { return 0; /* invalid user-space address */ } pagefault_disable(); retval = __copy_from_user_inatomic(result, (void __user __force *)address, size); pagefault_enable(); return (retval == 0); }
static long map_to_linear(ulong paddr) { unsigned long vaddr; int psize; unsigned long mode; int slot; uint shift; unsigned long tmp_mode; psize = MMU_PAGE_4K; shift = mmu_psize_defs[psize].shift; mode = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; vaddr = (ulong)__va(paddr); { unsigned long vpn, hash, hpteg; unsigned long vsid = get_kernel_vsid(vaddr); unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); vpn = va >> shift; tmp_mode = mode; /* Make non-kernel text non-executable */ if (!in_kernel_text(vaddr)) tmp_mode = mode | HPTE_R_N; hash = hpt_hash(va, shift); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); BUG_ON(!ppc_md.hpte_insert); slot = ppc_md.hpte_insert(hpteg, va, paddr, tmp_mode, HPTE_V_BOLTED, psize); if (slot < 0) printk(KERN_EMERG "%s: no more bolted entries " "HTAB[0x%lx]: 0x%lx\n", __func__, hpteg, paddr); } return slot; }
int htab_bolt_mapping(unsigned long vstart, unsigned long vend, unsigned long pstart, unsigned long mode, int psize) { unsigned long vaddr, paddr; unsigned int step, shift; unsigned long tmp_mode; int ret = 0; shift = mmu_psize_defs[psize].shift; step = 1 << shift; for (vaddr = vstart, paddr = pstart; vaddr < vend; vaddr += step, paddr += step) { unsigned long vpn, hash, hpteg; unsigned long vsid = get_kernel_vsid(vaddr); unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); vpn = va >> shift; tmp_mode = mode; /* Make non-kernel text non-executable */ if (!in_kernel_text(vaddr)) tmp_mode = mode | HPTE_R_N; hash = hpt_hash(va, shift); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert); BUG_ON(!ppc_md.hpte_insert); ret = ppc_md.hpte_insert(hpteg, va, paddr, tmp_mode, HPTE_V_BOLTED, psize); if (ret < 0) break; } return ret < 0 ? ret : 0; }