int vmcmd_readvn(struct lwp *l, struct exec_vmcmd *cmd) { struct proc *p = l->l_proc; int error; vm_prot_t prot, maxprot; error = vn_rdwr(UIO_READ, cmd->ev_vp, (void *)cmd->ev_addr, cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT, l->l_cred, NULL, l); if (error) return error; prot = cmd->ev_prot; maxprot = VM_PROT_ALL; #ifdef PAX_MPROTECT pax_mprotect(l, &prot, &maxprot); #endif /* PAX_MPROTECT */ #ifdef PMAP_NEED_PROCWR /* * we had to write the process, make sure the pages are synched * with the instruction cache. */ if (prot & VM_PROT_EXECUTE) pmap_procwr(p, cmd->ev_addr, cmd->ev_len); #endif /* * we had to map in the area at PROT_ALL so that vn_rdwr() * could write to it. however, the caller seems to want * it mapped read-only, so now we are going to have to call * uvm_map_protect() to fix up the protection. ICK. */ if (maxprot != VM_PROT_ALL) { error = uvm_map_protect(&p->p_vmspace->vm_map, trunc_page(cmd->ev_addr), round_page(cmd->ev_addr + cmd->ev_len), maxprot, true); if (error) return (error); } if (prot != maxprot) { error = uvm_map_protect(&p->p_vmspace->vm_map, trunc_page(cmd->ev_addr), round_page(cmd->ev_addr + cmd->ev_len), prot, false); if (error) return (error); } return 0; }
int process_domem(struct lwp *curl /*tracer*/, struct lwp *l /*traced*/, struct uio *uio) { struct proc *p = l->l_proc; /* traced */ struct vmspace *vm; int error; size_t len; #ifdef PMAP_NEED_PROCWR vaddr_t addr; #endif error = 0; len = uio->uio_resid; if (len == 0) return (0); #ifdef PMAP_NEED_PROCWR addr = uio->uio_offset; #endif vm = p->p_vmspace; mutex_enter(&vm->vm_map.misc_lock); if ((l->l_flag & LW_WEXIT) || vm->vm_refcnt < 1) error = EFAULT; if (error == 0) p->p_vmspace->vm_refcnt++; /* XXX */ mutex_exit(&vm->vm_map.misc_lock); if (error != 0) return (error); error = uvm_io(&vm->vm_map, uio); uvmspace_free(vm); #ifdef PMAP_NEED_PROCWR if (error == 0 && uio->uio_rw == UIO_WRITE) pmap_procwr(p, addr, len); #endif return (error); }