int suulong(user_addr_t addr, uint64_t uword) { if (IS_64BIT_PROCESS(current_proc())) { return(copyout((void *)&uword, addr, sizeof(uword)) == 0 ? 0 : -1); } else { return(suiword(addr, (uint32_t)uword)); } }
/* * With a 32-bit kernel and mixed 32/64-bit user tasks, this interface allows the * fetching and setting of process-sized size_t and pointer values. */ int sulong(user_addr_t addr, int64_t word) { if (IS_64BIT_PROCESS(current_proc())) { return(copyout((void *)&word, addr, sizeof(word)) == 0 ? 0 : -1); } else { return(suiword(addr, (long)word)); } }
/* * Trap is called from locore to handle most types of processor traps. */ void trap(unsigned int status, unsigned int cause, vaddr_t vaddr, vaddr_t opc, struct trapframe *frame) { int type; struct lwp *l = curlwp; struct proc *p = curproc; vm_prot_t ftype; ksiginfo_t ksi; struct frame *fp; extern void fswintrberr(void); KSI_INIT_TRAP(&ksi); uvmexp.traps++; if ((type = TRAPTYPE(cause)) >= LENGTH(trap_type)) panic("trap: unknown trap type %d", type); if (USERMODE(status)) { type |= T_USER; LWP_CACHE_CREDS(l, p); } /* Enable interrupts just at it was before the trap. */ _splset(status & AVR32_STATUS_IMx); switch (type) { default: dopanic: (void)splhigh(); printf("trap: %s in %s mode\n", trap_type[TRAPTYPE(cause)], USERMODE(status) ? "user" : "kernel"); printf("status=0x%x, cause=0x%x, epc=%#lx, vaddr=%#lx\n", status, cause, opc, vaddr); if (curlwp != NULL) { fp = (struct frame *)l->l_md.md_regs; printf("pid=%d cmd=%s usp=0x%x ", p->p_pid, p->p_comm, (int)fp->f_regs[_R_SP]); } else printf("curlwp == NULL "); printf("ksp=%p\n", &status); #if defined(DDB) kdb_trap(type, (mips_reg_t *) frame); /* XXX force halt XXX */ #elif defined(KGDB) { struct frame *f = (struct frame *)&ddb_regs; extern mips_reg_t kgdb_cause, kgdb_vaddr; kgdb_cause = cause; kgdb_vaddr = vaddr; /* * init global ddb_regs, used in db_interface.c routines * shared between ddb and gdb. Send ddb_regs to gdb so * that db_machdep.h macros will work with it, and * allow gdb to alter the PC. */ db_set_ddb_regs(type, (mips_reg_t *) frame); PC_BREAK_ADVANCE(f); if (kgdb_trap(type, &ddb_regs)) { ((mips_reg_t *)frame)[21] = f->f_regs[_R_PC]; return; } } #else panic("trap: dopanic: notyet"); #endif /*NOTREACHED*/ case T_TLB_MOD: panic("trap: T_TLB_MOD: notyet"); #if notyet if (KERNLAND(vaddr)) { pt_entry_t *pte; unsigned entry; paddr_t pa; pte = kvtopte(vaddr); entry = pte->pt_entry; if (!avr32_pte_v(entry) /*|| (entry & mips_pg_m_bit())*/) { panic("ktlbmod: invalid pte"); } if (entry & avr32_pte_ropage_bit()) { /* write to read only page in the kernel */ ftype = VM_PROT_WRITE; goto kernelfault; } entry |= mips_pg_m_bit(); /* XXXAVR32 Do it on tlbarlo/ tlbarhi? */ pte->pt_entry = entry; vaddr &= ~PGOFSET; MachTLBUpdate(vaddr, entry); pa = avr32_tlbpfn_to_paddr(entry); if (!IS_VM_PHYSADDR(pa)) { printf("ktlbmod: va %#lx pa %#llx\n", vaddr, (long long)pa); panic("ktlbmod: unmanaged page"); } pmap_set_modified(pa); return; /* KERN */ } /*FALLTHROUGH*/ #endif case T_TLB_MOD+T_USER: panic("trap: T_TLB_MOD+T_USER: notyet"); #if notyet { pt_entry_t *pte; unsigned entry; paddr_t pa; pmap_t pmap; pmap = p->p_vmspace->vm_map.pmap; if (!(pte = pmap_segmap(pmap, vaddr))) panic("utlbmod: invalid segmap"); pte += (vaddr >> PGSHIFT) & (NPTEPG - 1); entry = pte->pt_entry; if (!avr32_pte_v(entry)) panic("utlbmod: invalid pte"); if (entry & avr32_pte_ropage_bit()) { /* write to read only page */ ftype = VM_PROT_WRITE; goto pagefault; } /* entry |= mips_pg_m_bit(); XXXAVR32 Do it on tlbarlo/ tlbarhi? */ pte->pt_entry = entry; vaddr = (vaddr & ~PGOFSET) | (pmap->pm_asid << AVR32_TLB_PID_SHIFT); MachTLBUpdate(vaddr, entry); pa = avr32_tlbpfn_to_paddr(entry); if (!IS_VM_PHYSADDR(pa)) { printf("utlbmod: va %#lx pa %#llx\n", vaddr, (long long)pa); panic("utlbmod: unmanaged page"); } pmap_set_modified(pa); if (type & T_USER) userret(l); return; /* GEN */ } #endif case T_TLB_LD_MISS: panic("trap: T_TLB_LD_MISS: notyet"); case T_TLB_ST_MISS: ftype = (type == T_TLB_LD_MISS) ? VM_PROT_READ : VM_PROT_WRITE; if (KERNLAND(vaddr)) goto kernelfault; panic("trap: T_TLB_ST_MISS: notyet"); #if notyet /* * It is an error for the kernel to access user space except * through the copyin/copyout routines. */ if (l == NULL || l->l_addr->u_pcb.pcb_onfault == NULL) goto dopanic; /* check for fuswintr() or suswintr() getting a page fault */ if (l->l_addr->u_pcb.pcb_onfault == (void *)fswintrberr) { frame->tf_regs[TF_EPC] = (int)fswintrberr; return; /* KERN */ } goto pagefault; #endif case T_TLB_LD_MISS+T_USER: panic("trap: T_TLB_LD_MISS+T_USER: notyet"); #if notyet ftype = VM_PROT_READ; goto pagefault; #endif case T_TLB_ST_MISS+T_USER: panic("trap: T_TLB_ST_MISS+T_USER: notyet"); #if notyet ftype = VM_PROT_WRITE; #endif pagefault: ; { vaddr_t va; struct vmspace *vm; struct vm_map *map; int rv; vm = p->p_vmspace; map = &vm->vm_map; va = trunc_page(vaddr); if ((l->l_flag & LW_SA) && (~l->l_pflag & LP_SA_NOBLOCK)) { l->l_savp->savp_faultaddr = (vaddr_t)vaddr; l->l_pflag |= LP_SA_PAGEFAULT; } if (p->p_emul->e_fault) rv = (*p->p_emul->e_fault)(p, va, ftype); else rv = uvm_fault(map, va, ftype); #ifdef VMFAULT_TRACE printf( "uvm_fault(%p (pmap %p), %lx (0x%x), %d) -> %d at pc %p\n", map, vm->vm_map.pmap, va, vaddr, ftype, rv, (void*)opc); #endif /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if ((void *)va >= vm->vm_maxsaddr) { if (rv == 0){ uvm_grow(p, va); } else if (rv == EACCES) rv = EFAULT; } l->l_pflag &= ~LP_SA_PAGEFAULT; if (rv == 0) { if (type & T_USER) { userret(l); } return; /* GEN */ } if ((type & T_USER) == 0) goto copyfault; if (rv == ENOMEM) { printf("UVM: pid %d (%s), uid %d killed: out of swap\n", p->p_pid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : (uid_t) -1); ksi.ksi_signo = SIGKILL; ksi.ksi_code = 0; } else { if (rv == EACCES) { ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; } else { ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; } } ksi.ksi_trap = type & ~T_USER; ksi.ksi_addr = (void *)vaddr; break; /* SIGNAL */ } kernelfault: ; { vaddr_t va; int rv; va = trunc_page(vaddr); rv = uvm_fault(kernel_map, va, ftype); if (rv == 0) return; /* KERN */ /*FALLTHROUGH*/ } case T_ADDR_ERR_LD: /* misaligned access */ case T_ADDR_ERR_ST: /* misaligned access */ case T_BUS_ERR_LD_ST: /* BERR asserted to CPU */ copyfault: panic("trap: copyfault: notyet"); #if notyet if (l == NULL || l->l_addr->u_pcb.pcb_onfault == NULL) goto dopanic; frame->tf_regs[TF_EPC] = (intptr_t)l->l_addr->u_pcb.pcb_onfault; return; /* KERN */ #endif #if notyet case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to CPU */ case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to CPU */ ksi.ksi_trap = type & ~T_USER; ksi.ksi_signo = SIGSEGV; /* XXX */ ksi.ksi_addr = (void *)vaddr; ksi.ksi_code = SEGV_MAPERR; /* XXX */ break; /* SIGNAL */ case T_BREAK: panic("trap: T_BREAK: notyet"); #if defined(DDB) kdb_trap(type, (avr32_reg_t *) frame); return; /* KERN */ #elif defined(KGDB) { struct frame *f = (struct frame *)&ddb_regs; extern avr32_reg_t kgdb_cause, kgdb_vaddr; kgdb_cause = cause; kgdb_vaddr = vaddr; /* * init global ddb_regs, used in db_interface.c routines * shared between ddb and gdb. Send ddb_regs to gdb so * that db_machdep.h macros will work with it, and * allow gdb to alter the PC. */ db_set_ddb_regs(type, (avr32_reg_t *) frame); PC_BREAK_ADVANCE(f); if (!kgdb_trap(type, &ddb_regs)) printf("kgdb: ignored %s\n", trap_type[TRAPTYPE(cause)]); else ((avr32_reg_t *)frame)[21] = f->f_regs[_R_PC]; return; } #else goto dopanic; #endif case T_BREAK+T_USER: { vaddr_t va; uint32_t instr; int rv; /* compute address of break instruction */ va = (DELAYBRANCH(cause)) ? opc + sizeof(int) : opc; /* read break instruction */ instr = fuiword((void *)va); if (l->l_md.md_ss_addr != va || instr != MIPS_BREAK_SSTEP) { ksi.ksi_trap = type & ~T_USER; ksi.ksi_signo = SIGTRAP; ksi.ksi_addr = (void *)va; ksi.ksi_code = TRAP_TRACE; break; } /* * Restore original instruction and clear BP */ rv = suiword((void *)va, l->l_md.md_ss_instr); if (rv < 0) { vaddr_t sa, ea; sa = trunc_page(va); ea = round_page(va + sizeof(int) - 1); rv = uvm_map_protect(&p->p_vmspace->vm_map, sa, ea, VM_PROT_ALL, false); if (rv == 0) { rv = suiword((void *)va, l->l_md.md_ss_instr); (void)uvm_map_protect(&p->p_vmspace->vm_map, sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, false); } } mips_icache_sync_all(); /* XXXJRT -- necessary? */ mips_dcache_wbinv_all(); /* XXXJRT -- necessary? */ if (rv < 0) printf("Warning: can't restore instruction at 0x%lx: 0x%x\n", l->l_md.md_ss_addr, l->l_md.md_ss_instr); l->l_md.md_ss_addr = 0; ksi.ksi_trap = type & ~T_USER; ksi.ksi_signo = SIGTRAP; ksi.ksi_addr = (void *)va; ksi.ksi_code = TRAP_BRKPT; break; /* SIGNAL */ } case T_RES_INST+T_USER: case T_COP_UNUSABLE+T_USER: #if !defined(SOFTFLOAT) && !defined(NOFPU) if ((cause & MIPS_CR_COP_ERR) == 0x10000000) { struct frame *f; f = (struct frame *)l->l_md.md_regs; savefpregs(fpcurlwp); /* yield FPA */ loadfpregs(l); /* load FPA */ fpcurlwp = l; l->l_md.md_flags |= MDP_FPUSED; f->f_regs[_R_SR] |= MIPS_SR_COP_1_BIT; } else #endif { MachEmulateInst(status, cause, opc, l->l_md.md_regs); } userret(l); return; /* GEN */ case T_FPE+T_USER: panic ("trap: T_FPE+T_USER: notyet"); #if defined(SOFTFLOAT) MachEmulateInst(status, cause, opc, l->l_md.md_regs); #elif !defined(NOFPU) MachFPTrap(status, cause, opc, l->l_md.md_regs); #endif userret(l); return; /* GEN */ case T_OVFLOW+T_USER: case T_TRAP+T_USER: ksi.ksi_trap = type & ~T_USER; ksi.ksi_signo = SIGFPE; fp = (struct frame *)l->l_md.md_regs; ksi.ksi_addr = (void *)fp->f_regs[_R_PC]; ksi.ksi_code = FPE_FLTOVF; /* XXX */ break; /* SIGNAL */ #endif } panic("trap: post-switch: notyet"); #if notyet fp = (struct frame *)l->l_md.md_regs; fp->f_regs[_R_CAUSE] = cause; fp->f_regs[_R_BADVADDR] = vaddr; (*p->p_emul->e_trapsignal)(l, &ksi); if ((type & T_USER) == 0) panic("trapsignal"); userret(l); #endif return; }
/* * Code that the child process * executes to implement the command * of the parent process in tracing. */ procxmt() { register int i; register *p; register struct text *xp; if (ipc.ip_lock != u.u_procp->p_pid) return (0); u.u_procp->p_slptime = 0; i = ipc.ip_req; ipc.ip_req = 0; switch (i) { case PT_READ_I: /* read the child's text space */ if (!useracc((caddr_t)ipc.ip_addr, 4, B_READ)) goto error; ipc.ip_data = fuiword((caddr_t)ipc.ip_addr); break; case PT_READ_D: /* read the child's data space */ if (!useracc((caddr_t)ipc.ip_addr, 4, B_READ)) goto error; ipc.ip_data = fuword((caddr_t)ipc.ip_addr); break; case PT_READ_U: /* read the child's u. */ i = (int)ipc.ip_addr; if (i<0 || i >= ctob(UPAGES)) goto error; ipc.ip_data = *(int *)PHYSOFF(&u, i); break; case PT_WRITE_I: /* write the child's text space */ /* * If text, must assure exclusive use */ if (xp = u.u_procp->p_textp) { if (xp->x_count!=1 || xp->x_iptr->i_mode&ISVTX) goto error; xp->x_flag |= XTRC; } i = -1; if ((i = suiword((caddr_t)ipc.ip_addr, ipc.ip_data)) < 0) { if (chgprot((caddr_t)ipc.ip_addr, RW) && chgprot((caddr_t)ipc.ip_addr+(sizeof(int)-1), RW)) i = suiword((caddr_t)ipc.ip_addr, ipc.ip_data); (void) chgprot((caddr_t)ipc.ip_addr, RO); (void) chgprot((caddr_t)ipc.ip_addr+(sizeof(int)-1), RO); } if (i < 0) goto error; if (xp) xp->x_flag |= XWRIT; break; case PT_WRITE_D: /* write the child's data space */ if (suword((caddr_t)ipc.ip_addr, 0) < 0) goto error; (void) suword((caddr_t)ipc.ip_addr, ipc.ip_data); break; case PT_WRITE_U: /* write the child's u. */ i = (int)ipc.ip_addr; p = (int *)PHYSOFF(&u, i); for (i=0; i<NIPCREG; i++) if (p == &u.u_ar0[ipcreg[i]]) goto ok; if (p == &u.u_ar0[PS]) { ipc.ip_data |= PSL_USERSET; ipc.ip_data &= ~PSL_USERCLR; goto ok; } goto error; ok: *p = ipc.ip_data; break; case PT_STEP: /* single step the child */ case PT_CONTINUE: /* continue the child */ if ((int)ipc.ip_addr != 1) u.u_ar0[PC] = (int)ipc.ip_addr; if ((unsigned)ipc.ip_data > NSIG) goto error; u.u_procp->p_cursig = ipc.ip_data; /* see issig */ if (i == PT_STEP) u.u_ar0[PS] |= PSL_T; wakeup((caddr_t)&ipc); return (1); case PT_KILL: /* kill the child process */ wakeup((caddr_t)&ipc); exit(u.u_procp->p_cursig); default: error: ipc.ip_req = -1; } wakeup((caddr_t)&ipc); return (0); }