PyObject* __attribute__((weak)) vm_get_mem(JitCpu *self, PyObject* args) { PyObject *py_addr; PyObject *py_len; uint64_t addr; uint64_t size; PyObject *obj_out; char * buf_out; int ret; if (!PyArg_ParseTuple(args, "OO", &py_addr, &py_len)) return NULL; PyGetInt(py_addr, addr); PyGetInt(py_len, size); ret = vm_read_mem(&(((VmMngr*)self->pyvm)->vm_mngr), addr, &buf_out, size); if (ret < 0) { PyErr_SetString(PyExc_RuntimeError, "cannot find address"); return NULL; } obj_out = PyString_FromStringAndSize(buf_out, size); free(buf_out); return obj_out; }
PyObject* vm_get_u64(VmMngr* self, PyObject* args) { PyObject *py_addr; uint64_t addr; PyObject *obj_out; char * buf_out; int ret; uint64_t value; if (!PyArg_ParseTuple(args, "O", &py_addr)) RAISE(PyExc_TypeError,"Cannot parse arguments"); PyGetInt_uint64_t(py_addr, addr); ret = vm_read_mem(&self->vm_mngr, addr, &buf_out, 8); if (ret < 0) { RAISE(PyExc_RuntimeError,"Cannot find address"); } value = set_endian64(&self->vm_mngr, *(uint64_t*)buf_out); obj_out = PyLong_FromUnsignedLongLong(value); free(buf_out); return obj_out; }
PyObject* vm_get_mem(VmMngr* self, PyObject* args) { PyObject *py_addr; PyObject *py_len; uint64_t addr; uint64_t size; size_t size_st; PyObject *obj_out; char * buf_out; int ret; if (!PyArg_ParseTuple(args, "OO", &py_addr, &py_len)) RAISE(PyExc_TypeError,"Cannot parse arguments"); PyGetInt_uint64_t(py_addr, addr); PyGetInt_uint64_t(py_len, size); if (size > SIZE_MAX) { fprintf(stderr, "Size too big\n"); exit(EXIT_FAILURE); } size_st = (size_t) size; ret = vm_read_mem(&self->vm_mngr, addr, &buf_out, size_st); if (ret < 0) { RAISE(PyExc_RuntimeError,"Cannot find address"); } obj_out = PyBytes_FromStringAndSize(buf_out, size_st); free(buf_out); return obj_out; }
int vmx_vmexit_resolve_dt() { vmcs_exit_info_insn_dt_t *dt_insn; offset_t dt_addr; dt_reg_t dt_reg; raw64_t disp; uint64_t addr_msk, op_msk; int rc, sz, mode; if(!__rmode()) { debug(VMX_DT, "DT intercept only while in real mode\n"); return VM_FAIL; } vmcs_read(vm_exit_info.insn_info); vmcs_read(vm_exit_info.qualification); dt_insn = &vm_exit_info.insn_info.dt; dt_addr = 0; disp.sraw = vm_exit_info.qualification.sraw; addr_msk = (1ULL<<(16*(1<<dt_insn->addr))) - 1; switch(dt_insn->seg) { case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_ES: vmcs_read(vm_state.es.base); dt_addr += vm_state.es.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_CS: vmcs_read(vm_state.cs.base); dt_addr += vm_state.cs.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_SS: vmcs_read(vm_state.ss.base); dt_addr += vm_state.ss.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_DS: vmcs_read(vm_state.ds.base); dt_addr += vm_state.ds.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_FS: vmcs_read(vm_state.fs.base); dt_addr += vm_state.fs.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_GS: vmcs_read(vm_state.gs.base); dt_addr += vm_state.gs.base.raw; break; } /* XXX: compute offset alone and check against segment limit */ if(!dt_insn->no_base) { int reg = GPR64_RAX - (dt_insn->base & GPR64_RAX); dt_addr += info->vm.cpu.gpr->raw[reg].raw & addr_msk; } if(!dt_insn->no_idx) { int reg = GPR64_RAX - (dt_insn->idx & GPR64_RAX); uint64_t val = info->vm.cpu.gpr->raw[reg].raw & addr_msk; if(dt_insn->scale) val *= (1ULL<<dt_insn->scale); dt_addr += val; } dt_addr += (disp.sraw & addr_msk); mode = cpu_addr_sz(); if(mode == 64) { op_msk = -1ULL; sz = 10; } else if(dt_insn->op == VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_OP_SZ_16) { op_msk = (1ULL<<24) - 1; sz = 6; } else { op_msk = (1ULL<<32) - 1; sz = 6; } debug(VMX_DT, "dt op @ 0x%X\n", dt_addr); if(dt_insn->type < VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_TYPE_LGDT) { if(dt_insn->type == VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_TYPE_SGDT) rc = __vmx_vmexit_sgdt(&dt_reg); else rc = __vmx_vmexit_sidt(&dt_reg); dt_reg.base.raw &= op_msk; if(!vm_write_mem(dt_addr, (uint8_t*)&dt_reg, sz)) { debug(VMX_DT, "could not write vm mem @0x%X\n", dt_addr); return VM_FAIL; } } else { if(!vm_read_mem(dt_addr, (uint8_t*)&dt_reg, sz)) { debug(VMX_DT, "could not read vm mem @0x%X\n", dt_addr); return VM_FAIL; } dt_reg.base.raw &= op_msk; if(dt_insn->type == VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_TYPE_LGDT) rc = __vmx_vmexit_lgdt(&dt_reg); else rc = __vmx_vmexit_lidt(&dt_reg); } vmcs_read(vm_exit_info.insn_len); return emulate_done(rc, vm_exit_info.insn_len.raw); }
static ssize_t vm_read_mem(const pid_t pid, void *const laddr, const kernel_ulong_t raddr, const size_t len) { const unsigned long truncated_raddr = raddr; #if SIZEOF_LONG < SIZEOF_KERNEL_LONG_T if (raddr != (kernel_ulong_t) truncated_raddr) { errno = EIO; return -1; } #endif const struct iovec local = { .iov_base = laddr, .iov_len = len }; const struct iovec remote = { .iov_base = (void *) truncated_raddr, .iov_len = len }; const ssize_t rc = process_vm_readv(pid, &local, 1, &remote, 1, 0); if (rc < 0 && errno == ENOSYS) process_vm_readv_not_supported = true; return rc; } static bool tracee_addr_is_invalid(kernel_ulong_t addr) { return #if ANY_WORDSIZE_LESS_THAN_KERNEL_LONG current_wordsize < sizeof(addr) && addr & ~(kernel_ulong_t) -1U; #else false; #endif } /* legacy method of copying from tracee */ static int umoven_peekdata(const int pid, kernel_ulong_t addr, unsigned int len, void *laddr) { unsigned int nread = 0; unsigned int residue = addr & (sizeof(long) - 1); while (len) { addr &= -sizeof(long); /* aligned address */ errno = 0; union { long val; char x[sizeof(long)]; } u = { .val = ptrace(PTRACE_PEEKDATA, pid, addr, 0) }; switch (errno) { case 0: break; case ESRCH: case EINVAL: /* these could be seen if the process is gone */ return -1; case EFAULT: case EIO: case EPERM: /* address space is inaccessible */ if (nread) { perror_msg("umoven: short read (%u < %u) @0x%" PRI_klx, nread, nread + len, addr - nread); } return -1; default: /* all the rest is strange and should be reported */ perror_msg("umoven: PTRACE_PEEKDATA pid:%d @0x%" PRI_klx, pid, addr); return -1; } unsigned int m = MIN(sizeof(long) - residue, len); memcpy(laddr, &u.x[residue], m); residue = 0; addr += sizeof(long); laddr += m; nread += m; len -= m; } return 0; } /* * Copy `len' bytes of data from process `pid' * at address `addr' to our space at `our_addr'. */ int umoven(struct tcb *const tcp, kernel_ulong_t addr, unsigned int len, void *const our_addr) { if (tracee_addr_is_invalid(addr)) return -1; const int pid = tcp->pid; if (process_vm_readv_not_supported) return umoven_peekdata(pid, addr, len, our_addr); int r = vm_read_mem(pid, our_addr, addr, len); if ((unsigned int) r == len) return 0; if (r >= 0) { error_msg("umoven: short read (%u < %u) @0x%" PRI_klx, (unsigned int) r, len, addr); return -1; } switch (errno) { case ENOSYS: case EPERM: /* try PTRACE_PEEKDATA */ return umoven_peekdata(pid, addr, len, our_addr); case ESRCH: /* the process is gone */ return -1; case EFAULT: case EIO: /* address space is inaccessible */ return -1; default: /* all the rest is strange and should be reported */ perror_msg("process_vm_readv: pid:%d @0x%" PRI_klx, pid, addr); return -1; } } /* * Like umoven_peekdata but make the additional effort of looking * for a terminating zero byte. */ static int umovestr_peekdata(const int pid, kernel_ulong_t addr, unsigned int len, void *laddr) { unsigned int nread = 0; unsigned int residue = addr & (sizeof(long) - 1); void *const orig_addr = laddr; while (len) { addr &= -sizeof(long); /* aligned address */ errno = 0; union { unsigned long val; char x[sizeof(long)]; } u = { .val = ptrace(PTRACE_PEEKDATA, pid, addr, 0) }; switch (errno) { case 0: break; case ESRCH: case EINVAL: /* these could be seen if the process is gone */ return -1; case EFAULT: case EIO: case EPERM: /* address space is inaccessible */ if (nread) { perror_msg("umovestr: short read (%d < %d) @0x%" PRI_klx, nread, nread + len, addr - nread); } return -1; default: /* all the rest is strange and should be reported */ perror_msg("umovestr: PTRACE_PEEKDATA pid:%d @0x%" PRI_klx, pid, addr); return -1; } unsigned int m = MIN(sizeof(long) - residue, len); memcpy(laddr, &u.x[residue], m); while (residue < sizeof(long)) if (u.x[residue++] == '\0') return (laddr - orig_addr) + residue; residue = 0; addr += sizeof(long); laddr += m; nread += m; len -= m; } return 0; } /* * Like `umove' but make the additional effort of looking * for a terminating zero byte. * * Returns < 0 on error, strlen + 1 if NUL was seen, * else 0 if len bytes were read but no NUL byte seen. * * Note: there is no guarantee we won't overwrite some bytes * in laddr[] _after_ terminating NUL (but, of course, * we never write past laddr[len-1]). */ int umovestr(struct tcb *const tcp, kernel_ulong_t addr, unsigned int len, char *laddr) { if (tracee_addr_is_invalid(addr)) return -1; const int pid = tcp->pid; if (process_vm_readv_not_supported) return umovestr_peekdata(pid, addr, len, laddr); const size_t page_size = get_pagesize(); const size_t page_mask = page_size - 1; unsigned int nread = 0; while (len) { /* * Don't cross pages, otherwise we can get EFAULT * and fail to notice that terminating NUL lies * in the existing (first) page. */ unsigned int chunk_len = len > page_size ? page_size : len; unsigned int end_in_page = (addr + chunk_len) & page_mask; if (chunk_len > end_in_page) /* crosses to the next page */ chunk_len -= end_in_page; int r = vm_read_mem(pid, laddr, addr, chunk_len); if (r > 0) { char *nul_addr = memchr(laddr, '\0', r); if (nul_addr) return (nul_addr - laddr) + 1; addr += r; laddr += r; nread += r; len -= r; continue; } switch (errno) { case ENOSYS: case EPERM: /* try PTRACE_PEEKDATA */ if (!nread) return umovestr_peekdata(pid, addr, len, laddr); ATTRIBUTE_FALLTHROUGH; case EFAULT: case EIO: /* address space is inaccessible */ if (nread) perror_msg("umovestr: short read (%d < %d) @0x%" PRI_klx, nread, nread + len, addr - nread); return -1; case ESRCH: /* the process is gone */ return -1; default: /* all the rest is strange and should be reported */ perror_msg("process_vm_readv: pid:%d @0x%" PRI_klx, pid, addr); return -1; } } return 0; }