void db_restart_at_pc(db_regs_t *regs, bool watchpt) { db_addr_t pc = PC_REGS(regs); #ifdef SOFTWARE_SSTEP db_addr_t brpc; #endif if ((db_run_mode == STEP_COUNT) || (db_run_mode == STEP_RETURN) || (db_run_mode == STEP_CALLT)) { db_expr_t ins __unused; /* * We are about to execute this instruction, * so count it now. */ ins = db_get_value(pc, sizeof(int), false); db_inst_count++; db_load_count += inst_load(ins); db_store_count += inst_store(ins); #ifdef SOFTWARE_SSTEP /* * Account for instructions in delay slots. */ brpc = next_instr_address(pc, true); if ((brpc != pc) && (inst_branch(ins) || inst_call(ins) || inst_return(ins))) { ins = db_get_value(brpc, sizeof(int), false); db_inst_count++; db_load_count += inst_load(ins); db_store_count += inst_store(ins); } #endif } if (db_run_mode == STEP_CONTINUE) { if (watchpt || db_find_breakpoint_here(pc)) { /* * Step over breakpoint/watchpoint. */ db_run_mode = STEP_INVISIBLE; db_set_single_step(regs); } else { db_set_breakpoints(); db_set_watchpoints(); } } else { db_set_single_step(regs); } }
void db_restart_at_pc(boolean_t watchpt) { db_addr_t pc = PC_REGS(DDB_REGS); if ((db_run_mode == STEP_COUNT) || (db_run_mode == STEP_RETURN) || (db_run_mode == STEP_CALLT)) { db_expr_t ins __unused; /* seems used but gcc thinks not */ /* * We are about to execute this instruction, * so count it now. */ ins = db_get_value(pc, sizeof(int), FALSE); db_inst_count++; db_load_count += inst_load(ins); db_store_count += inst_store(ins); #ifdef SOFTWARE_SSTEP /* XXX works on mips, but... */ if (inst_branch(ins) || inst_call(ins)) { ins = db_get_value(next_instr_address(pc,1), sizeof(int), FALSE); db_inst_count++; db_load_count += inst_load(ins); db_store_count += inst_store(ins); } #endif /* SOFTWARE_SSTEP */ } if (db_run_mode == STEP_CONTINUE) { if (watchpt || db_find_breakpoint_here(pc)) { /* * Step over breakpoint/watchpoint. */ db_run_mode = STEP_INVISIBLE; db_set_single_step(DDB_REGS); } else { db_set_breakpoints(); db_set_watchpoints(); } } else { db_set_single_step(DDB_REGS); } }
static int inst_DEC(struct OPCodeDesc *op, struct machine *m) { m->cpu.dbb--; if (m->cpu.dbb & BYTE_SIGN_MASK) SR_N_SET(m->cpu.reg.sr); else SR_N_CLR(m->cpu.reg.sr); if (m->cpu.dbb == 0) SR_Z_SET(m->cpu.reg.sr); else SR_Z_CLR(m->cpu.reg.sr); inst_store(m); return 1; }
static int inst_INC(struct OPCodeDesc *op, struct machine *m) { inst_load(m); m->cpu.dbb++; if (m->cpu.dbb == 0) SR_Z_SET(m->cpu.reg.sr); else SR_Z_CLR(m->cpu.reg.sr); if (m->cpu.dbb & BYTE_SIGN_MASK) SR_N_SET(m->cpu.reg.sr); else SR_N_CLR(m->cpu.reg.sr); inst_store(m); return 1; // NYI; dump_op(op); dump_reg(m); assert(0); return 0; }
static int inst_ASL(struct OPCodeDesc *op, struct machine *m) { if (op->am->mode == ACC) { if(m->cpu.reg.ac & BYTE_SIGN_MASK) SR_C_SET(m->cpu.reg.sr); else SR_C_CLR(m->cpu.reg.sr); m->cpu.reg.ac <<= 1; m->cpu.reg.ac &= 0xFF; if(m->cpu.reg.ac & BYTE_SIGN_MASK) { SR_N_SET(m->cpu.reg.sr); } else SR_N_CLR(m->cpu.reg.sr); if (m->cpu.reg.ac == 0) SR_Z_SET(m->cpu.reg.sr); else SR_Z_CLR(m->cpu.reg.sr); return 1; } else { inst_load(m); if(m->cpu.dbb & BYTE_SIGN_MASK) SR_C_SET(m->cpu.reg.sr); else SR_C_CLR(m->cpu.reg.sr); m->cpu.dbb <<= 1; m->cpu.dbb &= 0xFF; if(m->cpu.dbb & BYTE_SIGN_MASK) { SR_N_SET(m->cpu.reg.sr); } else SR_N_CLR(m->cpu.reg.sr); if (m->cpu.dbb == 0) SR_Z_SET(m->cpu.reg.sr); else SR_Z_CLR(m->cpu.reg.sr); inst_store(m); return 1; } }
void trap(int type, struct trapframe *frame) { struct lwp *l; struct proc *p; struct pcb *pcbp; vaddr_t va; struct vm_map *map; struct vmspace *vm; vm_prot_t vftype; pa_space_t space; u_int opcode; int ret; const char *tts; int type_raw; #ifdef DIAGNOSTIC extern int emergency_stack_start, emergency_stack_end; #endif type_raw = type & ~T_USER; opcode = frame->tf_iir; if (type_raw == T_ITLBMISS || type_raw == T_ITLBMISSNA) { va = frame->tf_iioq_head; space = frame->tf_iisq_head; vftype = VM_PROT_READ; /* XXX VM_PROT_EXECUTE ??? */ } else { va = frame->tf_ior; space = frame->tf_isr; vftype = inst_store(opcode) ? VM_PROT_WRITE : VM_PROT_READ; } if ((l = curlwp) == NULL) l = &lwp0; p = l->l_proc; #ifdef DIAGNOSTIC /* * If we are on the emergency stack, then we either got * a fault on the kernel stack, or we're just handling * a trap for the machine check handler (which also * runs on the emergency stack). * * We *very crudely* differentiate between the two cases * by checking the faulting instruction: if it is the * function prologue instruction that stores the old * frame pointer and updates the stack pointer, we assume * that we faulted on the kernel stack. * * In this case, not completing that instruction will * probably confuse backtraces in kgdb/ddb. Completing * it would be difficult, because we already faulted on * that part of the stack, so instead we fix up the * frame as if the function called has just returned. * This has peculiar knowledge about what values are in * what registers during the "normal gcc -g" prologue. */ if (&type >= &emergency_stack_start && &type < &emergency_stack_end && type != T_IBREAK && STWM_R1_D_SR0_SP(opcode)) { /* Restore the caller's frame pointer. */ frame->tf_r3 = frame->tf_r1; /* Restore the caller's instruction offsets. */ frame->tf_iioq_head = frame->tf_rp; frame->tf_iioq_tail = frame->tf_iioq_head + 4; goto dead_end; } #endif /* DIAGNOSTIC */ #ifdef DEBUG frame_sanity_check(frame, l); #endif /* DEBUG */ /* If this is a trap, not an interrupt, reenable interrupts. */ if (type_raw != T_INTERRUPT) mtctl(frame->tf_eiem, CR_EIEM); if (frame->tf_flags & TFF_LAST) l->l_md.md_regs = frame; if ((type & ~T_USER) > trap_types) tts = "reserved"; else tts = trap_type[type & ~T_USER]; #ifdef TRAPDEBUG if (type_raw != T_INTERRUPT && type_raw != T_IBREAK) printf("trap: %d, %s for %x:%x at %x:%x, fp=%p, rp=%x\n", type, tts, space, (u_int)va, frame->tf_iisq_head, frame->tf_iioq_head, frame, frame->tf_rp); else if (type_raw == T_IBREAK) printf("trap: break instruction %x:%x at %x:%x, fp=%p\n", break5(opcode), break13(opcode), frame->tf_iisq_head, frame->tf_iioq_head, frame); { extern int etext; if (frame < (struct trapframe *)&etext) { printf("trap: bogus frame ptr %p\n", frame); goto dead_end; } } #endif switch (type) { case T_NONEXIST: case T_NONEXIST|T_USER: #if !defined(DDB) && !defined(KGDB) /* we've got screwed up by the central scrutinizer */ panic ("trap: elvis has just left the building!"); break; #else goto dead_end; #endif case T_RECOVERY|T_USER: #ifdef USERTRACE for(;;) { if (frame->tf_iioq_head != rctr_next_iioq) printf("-%08x\nr %08x", rctr_next_iioq - 4, frame->tf_iioq_head); rctr_next_iioq = frame->tf_iioq_head + 4; if (frame->tf_ipsw & PSW_N) { /* Advance the program counter. */ frame->tf_iioq_head = frame->tf_iioq_tail; frame->tf_iioq_tail = frame->tf_iioq_head + 4; /* Clear flags. */ frame->tf_ipsw &= ~(PSW_N|PSW_X|PSW_Y|PSW_Z|PSW_B|PSW_T|PSW_H|PSW_L); /* Simulate another trap. */ continue; } break; } frame->tf_rctr = 0; break; #endif /* USERTRACE */ case T_RECOVERY: #if !defined(DDB) && !defined(KGDB) /* XXX will implement later */ printf ("trap: handicapped"); break; #else goto dead_end; #endif case T_EMULATION | T_USER: #ifdef FPEMUL hppa_fpu_emulate(frame, l); #else /* !FPEMUL */ /* * We don't have FPU emulation, so signal the * process with a SIGFPE. */ hppa_trapsignal_hack(l, SIGFPE, frame->tf_iioq_head); #endif /* !FPEMUL */ break; #ifdef DIAGNOSTIC case T_EXCEPTION: panic("FPU/SFU emulation botch"); /* these just can't happen ever */ case T_PRIV_OP: case T_PRIV_REG: /* these just can't make it to the trap() ever */ case T_HPMC: case T_HPMC | T_USER: case T_EMULATION: #endif case T_IBREAK: case T_DATALIGN: case T_DBREAK: dead_end: if (type & T_USER) { #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGILL, frame->tf_iioq_head); break; } if (trap_kdebug(type, va, frame)) return; else if (type == T_DATALIGN) panic ("trap: %s at 0x%x", tts, (u_int) va); else panic ("trap: no debugger for \"%s\" (%d)", tts, type); break; case T_IBREAK | T_USER: case T_DBREAK | T_USER: /* pass to user debugger */ break; case T_EXCEPTION | T_USER: /* co-proc assist trap */ hppa_trapsignal_hack(l, SIGFPE, va); break; case T_OVERFLOW | T_USER: hppa_trapsignal_hack(l, SIGFPE, va); break; case T_CONDITION | T_USER: break; case T_ILLEGAL | T_USER: #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGILL, va); break; case T_PRIV_OP | T_USER: #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGILL, va); break; case T_PRIV_REG | T_USER: #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGILL, va); break; /* these should never got here */ case T_HIGHERPL | T_USER: case T_LOWERPL | T_USER: hppa_trapsignal_hack(l, SIGSEGV, va); break; case T_IPROT | T_USER: case T_DPROT | T_USER: hppa_trapsignal_hack(l, SIGSEGV, va); break; case T_DATACC: case T_USER | T_DATACC: case T_ITLBMISS: case T_USER | T_ITLBMISS: case T_DTLBMISS: case T_USER | T_DTLBMISS: case T_ITLBMISSNA: case T_USER | T_ITLBMISSNA: case T_DTLBMISSNA: case T_USER | T_DTLBMISSNA: case T_TLB_DIRTY: case T_USER | T_TLB_DIRTY: vm = p->p_vmspace; if (!vm) { #ifdef TRAPDEBUG printf("trap: no vm, p=%p\n", p); #endif goto dead_end; } /* * it could be a kernel map for exec_map faults */ if (!(type & T_USER) && space == HPPA_SID_KERNEL) map = kernel_map; else { map = &vm->vm_map; if (l->l_flag & L_SA) { l->l_savp->savp_faultaddr = va; l->l_flag |= L_SA_PAGEFAULT; } } va = hppa_trunc_page(va); if (map->pmap->pmap_space != space) { #ifdef TRAPDEBUG printf("trap: space missmatch %d != %d\n", space, map->pmap->pmap_space); #endif /* actually dump the user, crap the kernel */ goto dead_end; } /* Never call uvm_fault in interrupt context. */ KASSERT(hppa_intr_depth == 0); ret = uvm_fault(map, va, 0, vftype); #ifdef TRAPDEBUG printf("uvm_fault(%p, %x, %d, %d)=%d\n", map, (u_int)va, 0, vftype, ret); #endif if (map != kernel_map) l->l_flag &= ~L_SA_PAGEFAULT; /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if uvm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if (va >= (vaddr_t)vm->vm_maxsaddr + vm->vm_ssize) { if (ret == 0) { vsize_t nss = btoc(va - USRSTACK + PAGE_SIZE); if (nss > vm->vm_ssize) vm->vm_ssize = nss; } else if (ret == EACCES) ret = EFAULT; } if (ret != 0) { if (type & T_USER) { printf("trapsignal: uvm_fault(%p, %x, %d, %d)=%d\n", map, (u_int)va, 0, vftype, ret); #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGSEGV, frame->tf_ior); } else { if (l && l->l_addr->u_pcb.pcb_onfault) { #ifdef PMAPDEBUG printf("trap: copyin/out %d\n",ret); #endif pcbp = &l->l_addr->u_pcb; frame->tf_iioq_tail = 4 + (frame->tf_iioq_head = pcbp->pcb_onfault); pcbp->pcb_onfault = 0; break; } #if 1 if (trap_kdebug (type, va, frame)) return; #else panic("trap: uvm_fault(%p, %x, %d, %d): %d", map, va, 0, vftype, ret); #endif } } break; case T_DATALIGN | T_USER: #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGBUS, va); break; case T_INTERRUPT: case T_INTERRUPT|T_USER: hppa_intr(frame); mtctl(frame->tf_eiem, CR_EIEM); #if 0 if (trap_kdebug (type, va, frame)) return; #endif break; case T_LOWERPL: case T_DPROT: case T_IPROT: case T_OVERFLOW: case T_CONDITION: case T_ILLEGAL: case T_HIGHERPL: case T_TAKENBR: case T_POWERFAIL: case T_LPMC: case T_PAGEREF: case T_DATAPID: case T_DATAPID | T_USER: if (0 /* T-chip */) { break; } /* FALLTHROUGH to unimplemented */ default: #if 1 if (trap_kdebug (type, va, frame)) return; #endif panic ("trap: unimplemented \'%s\' (%d)", tts, type); } if (type & T_USER) userret(l, l->l_md.md_regs->tf_iioq_head, 0); #ifdef DEBUG frame_sanity_check(frame, l); if (frame->tf_flags & TFF_LAST && curlwp != NULL) frame_sanity_check(curlwp->l_md.md_regs, curlwp); #endif /* DEBUG */ }