int mca_btl_vader_get (struct mca_btl_base_module_t *btl, struct mca_btl_base_endpoint_t *endpoint, struct mca_btl_base_descriptor_t *des) { mca_btl_vader_frag_t *frag = (mca_btl_vader_frag_t *) des; mca_btl_base_segment_t *src = des->des_remote; mca_btl_base_segment_t *dst = des->des_local; const size_t size = min(dst->seg_len, src->seg_len); struct iovec src_iov = {.iov_base = src->seg_addr.pval, .iov_len = size}; struct iovec dst_iov = {.iov_base = dst->seg_addr.pval, .iov_len = size}; ssize_t ret; ret = process_vm_readv (endpoint->seg_ds.seg_cpid, &dst_iov, 1, &src_iov, 1, 0); if (ret != (ssize_t)size) { opal_output(0, "Read %ld, expected %lu, errno = %d\n", (long)ret, (unsigned long)size, errno); return OPAL_ERROR; } /* always call the callback function */ frag->base.des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK; frag->endpoint = endpoint; mca_btl_vader_frag_complete (frag); return OPAL_SUCCESS; }
/** * Return the value of the word at the given @address in the @tracee's * memory space. The caller must test errno to check if an error * occured. */ word_t peek_mem(const Tracee *tracee, word_t address) { word_t result = 0; #if defined(HAVE_PROCESS_VM) int status; struct iovec local; struct iovec remote; local.iov_base = &result; local.iov_len = sizeof_word(tracee); remote.iov_base = (void *)address; remote.iov_len = sizeof_word(tracee); errno = 0; status = process_vm_readv(tracee->pid, &local, 1, &remote, 1, 0); if (status > 0) return result; /* Fallback to ptrace if something went wrong. */ #endif errno = 0; result = (word_t) ptrace(PTRACE_PEEKDATA, tracee->pid, address, NULL); /* Use only the 32 LSB when running a 32-bit process on a * 64-bit kernel. */ if (is_32on64_mode(tracee)) result &= 0xFFFFFFFF; return result; }
/** * Copy @size bytes to the buffer @dest_tracer from the address * @src_tracee within the memory space of the @tracee process. It * returns -errno if an error occured, otherwise 0. */ int read_data(const Tracee *tracee, void *dest_tracer, word_t src_tracee, word_t size) { word_t *src = (word_t *)src_tracee; word_t *dest = (word_t *)dest_tracer; word_t nb_trailing_bytes; word_t nb_full_words; word_t word, i, j; uint8_t *last_src_word; uint8_t *last_dest_word; #if defined(HAVE_PROCESS_VM) long status; struct iovec local; struct iovec remote; local.iov_base = dest; local.iov_len = size; remote.iov_base = src; remote.iov_len = size; status = process_vm_readv(tracee->pid, &local, 1, &remote, 1, 0); if (status == size) return 0; /* Fallback to ptrace if something went wrong. */ #endif /* HAVE_PROCESS_VM */ nb_trailing_bytes = size % sizeof(word_t); nb_full_words = (size - nb_trailing_bytes) / sizeof(word_t); /* Copy one word by one word, except for the last one. */ for (i = 0; i < nb_full_words; i++) { word = ptrace(PTRACE_PEEKDATA, tracee->pid, src + i, NULL); if (errno != 0) { notice(tracee, WARNING, SYSTEM, "ptrace(PEEKDATA)"); return -EFAULT; } store_word(&dest[i], word); } /* Copy the bytes from the last word carefully since we have * to not overwrite the bytes lying beyond @dest_tracer. */ word = ptrace(PTRACE_PEEKDATA, tracee->pid, src + i, NULL); if (errno != 0) { notice(tracee, WARNING, SYSTEM, "ptrace(PEEKDATA)"); return -EFAULT; } last_dest_word = (uint8_t *)&dest[i]; last_src_word = (uint8_t *)&word; for (j = 0; j < nb_trailing_bytes; j++) last_dest_word[j] = last_src_word[j]; return 0; }
void peek_data_cross (pid_t tracee, const void * source, size_t count, void * dest){ struct iovec remote[1], local[1]; ssize_t bytes_counter; local[0].iov_base = dest; local[0].iov_len = count; remote[0].iov_base = (void *) source; remote[0].iov_len = count; bytes_counter=process_vm_readv(tracee, local, 1, remote, 1, 0); if ( bytes_counter < 0 ) { perror("Vm_read"); exit(1); } }
static int smr_fetch_result(struct smr_ep *ep, struct smr_region *peer_smr, struct iovec *iov, size_t iov_count, const struct fi_rma_ioc *rma_ioc, size_t rma_count, enum fi_datatype datatype, size_t total_len) { int ret, i; struct iovec rma_iov[SMR_IOV_LIMIT]; for (i = 0; i < rma_count; i++) { rma_iov[i].iov_base = (void *) rma_ioc[i].addr; rma_iov[i].iov_len = rma_ioc[i].count * ofi_datatype_size(datatype); } ret = process_vm_readv(peer_smr->pid, iov, iov_count, rma_iov, rma_count, 0); if (ret != total_len) { if (ret < 0) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "CMA write error\n"); return -errno; } else { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "partial read occurred\n"); return -FI_EIO; } } return 0; }
int main(void) { struct iovec local[1]; struct iovec remote[1]; char buf1[4]; ssize_t nread; local[0].iov_base = buf1; local[0].iov_len = 4; // remote[0].iov_base = (void *) 0xbffce818 ; remote[0].iov_base = (void *) 0x10 ; remote[0].iov_len = 4; nread = process_vm_readv(24617, local, 1, remote, 1, 0) ; if (nread != 8) { perror("\n fail\n"); printf("%d", nread); return 1; } else{ printf("\n pass \n"); return 0; } }
ErrorCode Process::readMemory(Address const &address, void *data, size_t length, size_t *count) { #if defined(HAVE_PROCESS_VM_READV) // Using process_vm_readv() is faster than using ptrace() because we can do // bigger reads that ptrace() (which can only read a word at a time); the // drawback is that process_vm_readv() cannot bypass page-level permissions // like ptrace() can. // This is why for reads smaller than word-size we go straight to the // fallback so we reduce the number of possible process_vm_readv() failures. // The most common occurence of this is when writing breakpoints. if (length > sizeof(uintptr_t)) { struct iovec local_iov = {data, length}; struct iovec remote_iov = {reinterpret_cast<void *>(address.value()), length}; auto id = _currentThread == nullptr ? _pid : _currentThread->tid(); ssize_t ret = process_vm_readv(id, &local_iov, 1, &remote_iov, 1, 0); if (ret >= 0) { if (count != nullptr) { *count = ret; } return kSuccess; } } #endif // Fallback to super::readMemory, which uses ptrace(2). return super::readMemory(address, data, length, count); }
/** Read from the memory, avoid using `ptrace` (libunwind method) */ static int access_mem(const unw_addr_space_t as, const unw_word_t addr, unw_word_t* const valp, const int write, void* const arg) { if (write) return - UNW_EINVAL; pid_t pid = _UPT_getpid(arg); size_t size = sizeof(unw_word_t); #ifdef HAVE_PROCESS_VM_READV // process_vm_read implementation. // This is only available since Linux 3.2. struct iovec local = { valp, size }; struct iovec remote = { (void*) addr, size }; ssize_t s = process_vm_readv(pid, &local, 1, &remote, 1, 0); if (s >= 0) { if ((size_t) s != size) return - UNW_EINVAL; else return 0; } if (s < 0 && errno != ENOSYS) return - UNW_EINVAL; #endif // /proc/${pid}/mem implementation. // On recent kernels, we do not need to ptrace the target process. // On older kernels, it is necessary to ptrace the target process. size_t count = size; off_t off = (off_t) addr; char* buf = (char*) valp; int fd = simgrid::mc::open_vm(pid, O_RDONLY); if (fd < 0) return - UNW_EINVAL; while (1) { ssize_t s = pread(fd, buf, count, off); if (s == 0) { close(fd); return - UNW_EINVAL; } if (s == -1) break; count -= s; buf += s; off += s; if (count == 0) { close(fd); return 0; } } close(fd); // ptrace implementation. // We need to have PTRACE_ATTACH-ed it before. return _UPT_access_mem(as, addr, valp, write, arg); }
int main(void) { pid_t child; child = fork(); if(child == -1) { die("Unable to fork: %s", strerror(errno)) } if(child) { ssize_t bytes_read; int status; uint32_t buffer[sizeof(known_memory) / sizeof(uint32_t)]; struct iovec local; struct iovec remote; local.iov_base = buffer; local.iov_len = sizeof(buffer); remote.iov_base = known_memory; remote.iov_len = sizeof(known_memory); waitpid(child, &status, 0); if(WIFEXITED(status)) { die("child died early"); } bytes_read = process_vm_readv(child, &local, 1, &remote, 1, 0); if(bytes_read == -1) { die("process_vm_readv failed: %s", strerror(errno)); } if(memcmp(known_memory, buffer, 4) != 0) { die("copied memory is incorrect"); } ptrace(PTRACE_DETACH, child, 0, 0); waitpid(child, &status, 0); } else { int status; status = ptrace(PTRACE_TRACEME, 0, 0, 0); if(status == -1) { die("unable to set up ptrace: %s", strerror(errno)); } raise(SIGTRAP); exit(0); } return 0; }
/** * Return the value of the word at the given @address in the @tracee's * memory space. The caller must test errno to check if an error * occured. */ word_t peek_word(const Tracee *tracee, word_t address) { word_t result = 0; if (belongs_to_heap_prealloc(tracee, address)) { errno = EFAULT; return 0; } #if defined(HAVE_PROCESS_VM) int status; struct iovec local; struct iovec remote; local.iov_base = &result; local.iov_len = sizeof_word(tracee); remote.iov_base = (void *)address; remote.iov_len = sizeof_word(tracee); errno = 0; status = process_vm_readv(tracee->pid, &local, 1, &remote, 1, 0); if (status > 0) return result; /* Fallback to ptrace if something went wrong. */ #endif errno = 0; result = (word_t) ptrace(PTRACE_PEEKDATA, tracee->pid, address, NULL); /* From ptrace(2) manual: "Unfortunately, under Linux, * different variations of this fault will return EIO or * EFAULT more or less arbitrarily." */ if (errno == EIO) errno = EFAULT; /* Use only the 32 LSB when running a 32-bit process on a * 64-bit kernel. */ if (is_32on64_mode(tracee)) result &= 0xFFFFFFFF; return result; }
int main() { int i = 0; pid_t pid; void *addr = &i; pid = fork(); if (pid < 0) { perror("fork"); } else if (pid == 0) { while (1) { sleep(1); } } else { while (1) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) && __GLIBC_PREREQ(2,15) struct iovec local, remote; ssize_t ret; local.iov_base = addr; local.iov_len = sizeof(int); remote.iov_base = addr; remote.iov_len = sizeof(int); ret = process_vm_writev(pid, &local, 1, &remote, 1, 0); assert(ret == sizeof(int)); ret = process_vm_readv(pid, &local, 1, &remote, 1, 0); assert(ret == sizeof(int)); #endif printf("%d ", i); fflush(stdout); sleep(1); i++; } } return 0; }
int stress_vm_parent(context_t *ctxt) { /* Parent */ int status; uint8_t val = 0; uint8_t *localbuf; addr_msg_t msg_rd, msg_wr; setpgid(ctxt->pid, pgrp); localbuf = mmap(NULL, ctxt->sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (localbuf == MAP_FAILED) { (void)close(ctxt->pipe_wr[0]); (void)close(ctxt->pipe_wr[1]); (void)close(ctxt->pipe_rd[0]); (void)close(ctxt->pipe_rd[1]); pr_fail_dbg(ctxt->name, "mmap"); return EXIT_FAILURE; } /* Close unwanted ends */ (void)close(ctxt->pipe_wr[1]); (void)close(ctxt->pipe_rd[0]); do { struct iovec local[1], remote[1]; uint8_t *ptr, *end = localbuf + ctxt->sz; int ret; /* Wait for address of child's buffer */ redo_rd2: if (!opt_do_run) break; ret = read(ctxt->pipe_wr[0], &msg_rd, sizeof(msg_rd)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_rd2; pr_fail_dbg(ctxt->name, "read"); break; } if (ret == 0) break; if (ret != sizeof(msg_rd)) { pr_fail_dbg(ctxt->name, "read"); break; } /* Child telling us it's terminating? */ if (!msg_rd.addr) break; /* Perform read from child's memory */ local[0].iov_base = localbuf; local[0].iov_len = ctxt->sz; remote[0].iov_base = msg_rd.addr; remote[0].iov_len = ctxt->sz; if (process_vm_readv(ctxt->pid, local, 1, remote, 1, 0) < 0) { pr_fail_dbg(ctxt->name, "process_vm_readv"); break; } if (opt_flags & OPT_FLAGS_VERIFY) { /* Check data is sane */ for (ptr = localbuf; ptr < end; ptr += ctxt->page_size) { if (*ptr) { pr_fail(stderr, "%s: memory at %p: %d vs %d\n", ctxt->name, ptr, *ptr, msg_rd.val); goto fail; } *ptr = 0; } /* Set memory */ for (ptr = localbuf; ptr < end; ptr += ctxt->page_size) *ptr = val; } /* Write to child's memory */ msg_wr = msg_rd; local[0].iov_base = localbuf; local[0].iov_len = ctxt->sz; remote[0].iov_base = msg_rd.addr; remote[0].iov_len = ctxt->sz; if (process_vm_writev(ctxt->pid, local, 1, remote, 1, 0) < 0) { pr_fail_dbg(ctxt->name, "process_vm_writev"); break; } msg_wr.val = val; val++; redo_wr2: if (!opt_do_run) break; /* Inform child that memory has been changed */ ret = write(ctxt->pipe_rd[1], &msg_wr, sizeof(msg_wr)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_wr2; if (errno != EBADF) pr_fail_dbg(ctxt->name, "write"); break; } (*ctxt->counter)++; } while (opt_do_run && (!ctxt->max_ops || *ctxt->counter < ctxt->max_ops)); fail: /* Tell child we're done */ msg_wr.addr = NULL; msg_wr.val = 0; if (write(ctxt->pipe_wr[0], &msg_wr, sizeof(msg_wr)) < 0) { if (errno != EBADF) pr_dbg(stderr, "%s: failed to write " "termination message " "over pipe: errno=%d (%s)\n", ctxt->name, errno, strerror(errno)); } (void)close(ctxt->pipe_wr[0]); (void)close(ctxt->pipe_rd[1]); (void)kill(ctxt->pid, SIGKILL); (void)waitpid(ctxt->pid, &status, 0); (void)munmap(localbuf, ctxt->sz); return EXIT_SUCCESS; }
int main() { int i, nbytes; //nbytes to keep a count of the no. of bytes received int fd[2]; //file descriptor int pid1, ppid; char temp; char string[80] = "hello world\n"; char *readbuffer; char read1[80]; int address; struct iovec local[1]; struct iovec remote[1]; ssize_t nread; pid_t pid = 10; ppid = getpid(); //printf("I am the parent process pid : %d \n", ppid); pipe(fd); pid1 = fork(); //child A if(pid1 == -1){ perror("fork"); return 1 ; } if(pid1 == 0){ //body of the child process strcpy(string,"hello world (child)\n"); readbuffer = string; //printf("(child) process A with pid : %d\n",getpid()); //to get the pid of the child process //printf("(child) Address of the string : %p\n", string);// checks the address of the string in the child process printf("(child)the string : %s \n", string); //checks the string of the child process close(fd[0]); //pipe descriptor set write(fd[1], &readbuffer, sizeof(readbuffer)); //address of the space where data needs to be written is send to the parent for(i = 0; i < 100000; i++); // this loop is there to make sure the child process runs while the data transfer. } else { //printf("(Parent) Parent Pid : %d \n (parent)the child pid : %d\n", getpid(), pid1); strcpy(read1,"hello world (parent)\n"); printf("(Parent)the string: %s \n", read1); //print the string before data is received from the remote //printf("(parent) Address of the string : %p\n", read1); close(fd[1]); nbytes = read(fd[0], &string, sizeof(string)); //printf("(Parent)The address of the rcvd buffer : %p\n",string); checks the address of the received buffer //printf("nbytes : %d\n",nbytes); //checks if we have received the address properly /* TO set up the variables for cross memory attach*/ local[0].iov_base = read1; local[0].iov_len = strlen(read1)+1; remote[0].iov_base = (void *)string; remote[0].iov_len = 22; nread = process_vm_readv(pid1, local, 1, remote, 1, 0); int err1 = errno; printf("nread : %d ; err no: %d, %s\n\n", nread, errno, strerror(err1)); //checks if the cross memory attach was done properly printf("(Parent)the string: %s \n", read1); //Here we check if have the string from the child } return 0; }
ssize_t remote_read_bytes(pid_t pid, void *local_addr, void *remote_addr, size_t len) { iovec_t local_iov = {local_addr, len}; iovec_t remote_iov = {remote_addr, len}; return process_vm_readv(pid, &local_iov, 1, &remote_iov, 1, 0); }
/* * stress_vm_rw * stress vm_read_v/vm_write_v */ int stress_vm_rw( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pid; int pipe_wr[2], pipe_rd[2]; const size_t page_size = stress_get_pagesize(); size_t sz; (void)instance; if (!set_vm_rw_bytes) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_vm_rw_bytes = MAX_VM_RW_BYTES; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_vm_rw_bytes = MIN_VM_RW_BYTES; } sz = opt_vm_rw_bytes & ~(page_size - 1); if (pipe(pipe_wr) < 0) { pr_failed_dbg(name, "pipe"); return EXIT_FAILURE; } if (pipe(pipe_rd) < 0) { (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); pr_failed_dbg(name, "pipe"); return EXIT_FAILURE; } pid = fork(); if (pid < 0) { (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); (void)close(pipe_rd[1]); pr_failed_dbg(name, "fork"); return EXIT_FAILURE; } else if (pid == 0) { /* Child */ uint8_t *buf; int ret = EXIT_SUCCESS; addr_msg_t msg_rd, msg_wr; /* Close unwanted ends */ (void)close(pipe_wr[0]); (void)close(pipe_rd[1]); buf = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (buf == MAP_FAILED) { pr_failed_dbg(name, "mmap"); ret = EXIT_FAILURE; goto cleanup; } for (;;) { uint8_t *ptr, *end = buf + sz; int ret; memset(&msg_wr, 0, sizeof(msg_wr)); msg_wr.addr = buf; msg_wr.val = 0; /* Send address of buffer to parent */ redo_wr1: ret = write(pipe_wr[1], &msg_wr, sizeof(msg_wr)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_wr1; if (errno != EBADF) pr_failed_dbg(name, "write"); break; } redo_rd1: /* Wait for parent to populate data */ ret = read(pipe_rd[0], &msg_rd, sizeof(msg_rd)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_rd1; pr_failed_dbg(name, "read"); break; } if (ret == 0) break; if (ret != sizeof(msg_rd)) { pr_failed_dbg(name, "read"); break; } if (opt_flags & OPT_FLAGS_VERIFY) { /* Check memory altered by parent is sane */ for (ptr = buf; ptr < end; ptr += page_size) { if (*ptr != msg_rd.val) { pr_fail(stderr, "%s: memory at %p: %d vs %d\n", name, ptr, *ptr, msg_rd.val); goto cleanup; } *ptr = 0; } } } cleanup: /* Tell parent we're done */ msg_wr.addr = 0; msg_wr.val = 0; if (write(pipe_wr[1], &msg_wr, sizeof(msg_wr)) <= 0) { if (errno != EBADF) pr_dbg(stderr, "%s: failed to write termination message " "over pipe: errno=%d (%s)\n", name, errno, strerror(errno)); } (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); (void)close(pipe_rd[1]); (void)munmap(buf, sz); exit(ret); } else { /* Parent */ int status; uint8_t val = 0; uint8_t *localbuf; addr_msg_t msg_rd, msg_wr; localbuf = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (localbuf == MAP_FAILED) { (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); (void)close(pipe_rd[1]); pr_failed_dbg(name, "mmap"); exit(EXIT_FAILURE); } /* Close unwanted ends */ (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); do { struct iovec local[1], remote[1]; uint8_t *ptr, *end = localbuf + sz; int ret; /* Wait for address of child's buffer */ redo_rd2: if (!opt_do_run) break; ret = read(pipe_wr[0], &msg_rd, sizeof(msg_rd)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_rd2; pr_failed_dbg(name, "read"); break; } if (ret == 0) break; if (ret != sizeof(msg_rd)) { pr_failed_dbg(name, "read"); break; } /* Child telling us it's terminating? */ if (!msg_rd.addr) break; /* Perform read from child's memory */ local[0].iov_base = localbuf; local[0].iov_len = sz; remote[0].iov_base = msg_rd.addr; remote[0].iov_len = sz; if (process_vm_readv(pid, local, 1, remote, 1, 0) < 0) { pr_failed_dbg(name, "process_vm_readv"); break; } if (opt_flags & OPT_FLAGS_VERIFY) { /* Check data is sane */ for (ptr = localbuf; ptr < end; ptr += page_size) { if (*ptr) { pr_fail(stderr, "%s: memory at %p: %d vs %d\n", name, ptr, *ptr, msg_rd.val); goto fail; } *ptr = 0; } /* Set memory */ for (ptr = localbuf; ptr < end; ptr += page_size) *ptr = val; } /* Write to child's memory */ msg_wr = msg_rd; local[0].iov_base = localbuf; local[0].iov_len = sz; remote[0].iov_base = msg_rd.addr; remote[0].iov_len = sz; if (process_vm_writev(pid, local, 1, remote, 1, 0) < 0) { pr_failed_dbg(name, "process_vm_writev"); break; } msg_wr.val = val; val++; redo_wr2: if (!opt_do_run) break; /* Inform child that memory has been changed */ ret = write(pipe_rd[1], &msg_wr, sizeof(msg_wr)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_wr2; if (errno != EBADF) pr_failed_dbg(name, "write"); break; } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); fail: /* Tell child we're done */ msg_wr.addr = NULL; msg_wr.val = 0; if (write(pipe_wr[0], &msg_wr, sizeof(msg_wr)) < 0) { if (errno != EBADF) pr_dbg(stderr, "%s: failed to write termination message " "over pipe: errno=%d (%s)\n", name, errno, strerror(errno)); } (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); (void)close(pipe_rd[1]); (void)kill(pid, SIGKILL); (void)waitpid(pid, &status, 0); (void)munmap(localbuf, sz); } return EXIT_SUCCESS; }
int mca_btl_vader_get_cma (mca_btl_base_module_t *btl, mca_btl_base_endpoint_t *endpoint, void *local_address, uint64_t remote_address, mca_btl_base_registration_handle_t *local_handle, mca_btl_base_registration_handle_t *remote_handle, size_t size, int flags, int order, mca_btl_base_rdma_completion_fn_t cbfunc, void *cbcontext, void *cbdata) { struct iovec src_iov = {.iov_base = (void *)(intptr_t) remote_address, .iov_len = size}; struct iovec dst_iov = {.iov_base = local_address, .iov_len = size}; ssize_t ret; /* * According to the man page : * "On success, process_vm_readv() returns the number of bytes read and * process_vm_writev() returns the number of bytes written. This return * value may be less than the total number of requested bytes, if a * partial read/write occurred. (Partial transfers apply at the * granularity of iovec elements. These system calls won't perform a * partial transfer that splits a single iovec element.)". * So since we use a single iovec element, the returned size should either * be 0 or size, and the do loop should not be needed here. * We tried on various Linux kernels with size > 2 GB, and surprisingly, * the returned value is always 0x7ffff000 (fwiw, it happens to be the size * of the larger number of pages that fits a signed 32 bits integer). * We do not know whether this is a bug from the kernel, the libc or even * the man page, but for the time being, we do as is process_vm_readv() could * return any value. */ do { ret = process_vm_readv (endpoint->segment_data.other.seg_ds->seg_cpid, &dst_iov, 1, &src_iov, 1, 0); if (0 > ret) { opal_output(0, "Read %ld, expected %lu, errno = %d\n", (long)ret, (unsigned long)size, errno); return OPAL_ERROR; } src_iov.iov_base = (void *)((char *)src_iov.iov_base + ret); src_iov.iov_len -= ret; dst_iov.iov_base = (void *)((char *)dst_iov.iov_base + ret); dst_iov.iov_len -= ret; } while (0 < src_iov.iov_len); /* always call the callback function */ cbfunc (btl, endpoint, local_address, local_handle, cbcontext, cbdata, OPAL_SUCCESS); return OPAL_SUCCESS; } #endif #if OPAL_BTL_VADER_HAVE_KNEM int mca_btl_vader_get_knem (mca_btl_base_module_t *btl, mca_btl_base_endpoint_t *endpoint, void *local_address, uint64_t remote_address, mca_btl_base_registration_handle_t *local_handle, mca_btl_base_registration_handle_t *remote_handle, size_t size, int flags, int order, mca_btl_base_rdma_completion_fn_t cbfunc, void *cbcontext, void *cbdata) { struct knem_cmd_param_iovec recv_iovec; struct knem_cmd_inline_copy icopy; /* Fill in the ioctl data fields. There's no async completion, so we don't need to worry about getting a slot, etc. */ recv_iovec.base = (uintptr_t) local_address; recv_iovec.len = size; icopy.local_iovec_array = (uintptr_t) &recv_iovec; icopy.local_iovec_nr = 1; icopy.remote_cookie = remote_handle->cookie; icopy.remote_offset = remote_address - remote_handle->base_addr; icopy.write = 0; icopy.flags = 0; /* Use the DMA flag if knem supports it *and* the segment length * is greater than the cutoff. Not that if DMA is not supported * or the user specified 0 for knem_dma_min the knem_dma_min was * set to UINT_MAX in mca_btl_vader_knem_init. */ if (mca_btl_vader_component.knem_dma_min <= size) { icopy.flags = KNEM_FLAG_DMA; } /* synchronous flags only, no need to specify icopy.async_status_index */ /* When the ioctl returns, the transfer is done and we can invoke the btl callback and return the frag */ if (OPAL_UNLIKELY(0 != ioctl (mca_btl_vader.knem_fd, KNEM_CMD_INLINE_COPY, &icopy))) { return OPAL_ERROR; } if (KNEM_STATUS_FAILED == icopy.current_status) { return OPAL_ERROR; } /* always call the callback function */ cbfunc (btl, endpoint, local_address, local_handle, cbcontext, cbdata, OPAL_SUCCESS); return OPAL_SUCCESS; } #endif static void mca_btl_vader_sc_emu_get_complete (mca_btl_base_module_t *btl, mca_btl_base_endpoint_t *endpoint, mca_btl_base_descriptor_t *desc, int status) { mca_btl_vader_frag_t *frag = (mca_btl_vader_frag_t *) desc; mca_btl_vader_sc_emu_hdr_t *hdr; void *local_address = frag->rdma.local_address; size_t len = frag->segments[0].seg_len - sizeof (*hdr); void *context = frag->rdma.context; void *cbdata = frag->rdma.cbdata; mca_btl_base_rdma_completion_fn_t cbfunc = frag->rdma.cbfunc; void *data; hdr = (mca_btl_vader_sc_emu_hdr_t *) frag->segments[0].seg_addr.pval; data = (void *) (hdr + 1); memcpy (local_address, data, len); /* return the fragment before calling the callback */ MCA_BTL_VADER_FRAG_RETURN(frag); cbfunc (btl, endpoint, local_address, NULL, context, cbdata, status); }
int main(void) { return process_vm_readv(0, NULL, 0, NULL, 0, 0) + process_vm_writev(0, NULL, 0, NULL, 0, 0); }
int main(void) { unsigned char* p = (unsigned char*)mmap(NULL, PAGE_SIZE * 2, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); struct iovec in_iov[2]; struct iovec out_iov[2]; int ret; test_assert(p != MAP_FAILED); test_assert(0 == munmap(p + PAGE_SIZE, PAGE_SIZE)); in_iov[0].iov_base = p; in_iov[0].iov_len = 2; in_iov[1].iov_base = p + 3; in_iov[1].iov_len = 3; out_iov[0].iov_base = p + PAGE_SIZE - 6; out_iov[0].iov_len = 3; out_iov[1].iov_base = p + PAGE_SIZE - 1; out_iov[1].iov_len = 2; clear(p); test_assert(4 == process_vm_readv(getpid(), out_iov, 2, in_iov, 2, 0)); test_assert(out_iov[1].iov_len == 2); test_assert(p[PAGE_SIZE - 7] == ((PAGE_SIZE - 7) & 0xff)); test_assert(p[PAGE_SIZE - 6] == 0); test_assert(p[PAGE_SIZE - 5] == 1); test_assert(p[PAGE_SIZE - 4] == 3); test_assert(p[PAGE_SIZE - 3] == ((PAGE_SIZE - 3) & 0xff)); test_assert(p[PAGE_SIZE - 2] == ((PAGE_SIZE - 2) & 0xff)); test_assert(p[PAGE_SIZE - 1] == 4); clear(p); ret = process_vm_writev(getpid(), in_iov, 2, out_iov, 2, 0); if (3 == ret) { test_assert(out_iov[1].iov_len == 2); test_assert(p[PAGE_SIZE - 7] == ((PAGE_SIZE - 7) & 0xff)); test_assert(p[PAGE_SIZE - 6] == 0); test_assert(p[PAGE_SIZE - 5] == 1); test_assert(p[PAGE_SIZE - 4] == 3); test_assert(p[PAGE_SIZE - 3] == ((PAGE_SIZE - 3) & 0xff)); test_assert(p[PAGE_SIZE - 2] == ((PAGE_SIZE - 2) & 0xff)); test_assert(p[PAGE_SIZE - 1] == ((PAGE_SIZE - 1) & 0xff)); } else { test_assert(4 == ret); test_assert(out_iov[1].iov_len == 2); test_assert(p[PAGE_SIZE - 7] == ((PAGE_SIZE - 7) & 0xff)); test_assert(p[PAGE_SIZE - 6] == 0); test_assert(p[PAGE_SIZE - 5] == 1); test_assert(p[PAGE_SIZE - 4] == 3); test_assert(p[PAGE_SIZE - 3] == ((PAGE_SIZE - 3) & 0xff)); test_assert(p[PAGE_SIZE - 2] == ((PAGE_SIZE - 2) & 0xff)); test_assert(p[PAGE_SIZE - 1] == 4); } out_iov[1].iov_base = p + PAGE_SIZE - 2; out_iov[1].iov_len = 3; clear(p); test_assert(5 == process_vm_readv(getpid(), out_iov, 2, in_iov, 2, 0)); test_assert(p[PAGE_SIZE - 7] == ((PAGE_SIZE - 7) & 0xff)); test_assert(p[PAGE_SIZE - 6] == 0); test_assert(p[PAGE_SIZE - 5] == 1); test_assert(p[PAGE_SIZE - 4] == 3); test_assert(p[PAGE_SIZE - 3] == ((PAGE_SIZE - 3) & 0xff)); test_assert(p[PAGE_SIZE - 2] == 4); test_assert(p[PAGE_SIZE - 1] == 5); clear(p); ret = process_vm_writev(getpid(), in_iov, 2, out_iov, 2, 0); if (3 == ret) { test_assert(p[PAGE_SIZE - 7] == ((PAGE_SIZE - 7) & 0xff)); test_assert(p[PAGE_SIZE - 6] == 0); test_assert(p[PAGE_SIZE - 5] == 1); test_assert(p[PAGE_SIZE - 4] == 3); test_assert(p[PAGE_SIZE - 3] == ((PAGE_SIZE - 3) & 0xff)); test_assert(p[PAGE_SIZE - 2] == ((PAGE_SIZE - 2) & 0xff)); test_assert(p[PAGE_SIZE - 1] == ((PAGE_SIZE - 1) & 0xff)); } else { test_assert(5 == ret); test_assert(p[PAGE_SIZE - 7] == ((PAGE_SIZE - 7) & 0xff)); test_assert(p[PAGE_SIZE - 6] == 0); test_assert(p[PAGE_SIZE - 5] == 1); test_assert(p[PAGE_SIZE - 4] == 3); test_assert(p[PAGE_SIZE - 3] == ((PAGE_SIZE - 3) & 0xff)); test_assert(p[PAGE_SIZE - 2] == 4); test_assert(p[PAGE_SIZE - 1] == 5); } in_iov[0].iov_base = p + PAGE_SIZE - 1; in_iov[0].iov_len = 2; out_iov[0].iov_base = p; out_iov[0].iov_len = 3; clear(p); ret = process_vm_readv(getpid(), out_iov, 1, in_iov, 1, 0); if (ret == -1 && errno == EFAULT) { test_assert(p[0] == 0); test_assert(p[1] == 1); } else { test_assert(1 == ret); test_assert(p[0] == ((PAGE_SIZE - 1) & 0xff)); test_assert(p[1] == 1); } clear(p); test_assert(1 == process_vm_writev(getpid(), in_iov, 1, out_iov, 1, 0)); test_assert(p[0] == ((PAGE_SIZE - 1) & 0xff)); /* Linux kernel bug: should be 1, but sometimes is zero --- extra data written. https://bugzilla.kernel.org/show_bug.cgi?id=113541 */ if (p[1] == 0) { atomic_puts("Kernel bug detected!"); } test_assert(p[1] == 1 || p[1] == 0); in_iov[0].iov_base = p + PAGE_SIZE - 4; in_iov[0].iov_len = 2; in_iov[1].iov_base = p + PAGE_SIZE - 2; in_iov[1].iov_len = 3; out_iov[0].iov_base = p; out_iov[0].iov_len = 1; out_iov[1].iov_base = p + 2; out_iov[1].iov_len = 4; clear(p); ret = process_vm_readv(getpid(), out_iov, 2, in_iov, 2, 0); if (2 == ret) { test_assert(p[0] == ((PAGE_SIZE - 4) & 0xff)); test_assert(p[1] == 1); test_assert(p[2] == ((PAGE_SIZE - 3) & 0xff)); test_assert(p[3] == 3); test_assert(p[4] == 4); test_assert(p[5] == 5); } else { test_assert(4 == ret); test_assert(p[0] == ((PAGE_SIZE - 4) & 0xff)); test_assert(p[1] == 1); test_assert(p[2] == ((PAGE_SIZE - 3) & 0xff)); test_assert(p[3] == ((PAGE_SIZE - 2) & 0xff)); test_assert(p[4] == ((PAGE_SIZE - 1) & 0xff)); test_assert(p[5] == 5); } clear(p); test_assert(4 == process_vm_writev(getpid(), in_iov, 2, out_iov, 2, 0)); test_assert(p[0] == ((PAGE_SIZE - 4) & 0xff)); test_assert(p[1] == 1); test_assert(p[2] == ((PAGE_SIZE - 3) & 0xff)); test_assert(p[3] == ((PAGE_SIZE - 2) & 0xff)); test_assert(p[4] == ((PAGE_SIZE - 1) & 0xff)); if (p[5] == 0) { atomic_puts("Kernel bug detected!"); } test_assert(p[5] == 5 || p[5] == 0); atomic_puts("EXIT-SUCCESS"); return 0; }
static size_t arch_getProcMem(pid_t pid, uint8_t * buf, size_t len, REG_TYPE pc) { /* * Let's try process_vm_readv first */ const struct iovec local_iov = { .iov_base = buf, .iov_len = len, }; const struct iovec remote_iov = { .iov_base = (void *)(uintptr_t) pc, .iov_len = len, }; if (process_vm_readv(pid, &local_iov, 1, &remote_iov, 1, 0) == (ssize_t) len) { return len; } // Debug if failed since it shouldn't happen very often PLOG_D("process_vm_readv() failed"); /* * Ok, let's do it via ptrace() then. * len must be aligned to the sizeof(long) */ int cnt = len / sizeof(long); size_t memsz = 0; for (int x = 0; x < cnt; x++) { uint8_t *addr = (uint8_t *) (uintptr_t) pc + (int)(x * sizeof(long)); long ret = ptrace(PTRACE_PEEKDATA, pid, addr, NULL); if (errno != 0) { PLOG_W("Couldn't PT_READ_D on pid %d, addr: %p", pid, addr); break; } memsz += sizeof(long); memcpy(&buf[x * sizeof(long)], &ret, sizeof(long)); } return memsz; } void arch_ptraceGetCustomPerf(honggfuzz_t * hfuzz, pid_t pid, uint64_t * cnt UNUSED) { if ((hfuzz->dynFileMethod & _HF_DYNFILE_CUSTOM) == 0) { return; } if (hfuzz->persistent) { ptrace(PTRACE_INTERRUPT, pid, 0, 0); arch_ptraceWaitForPidStop(pid); } defer { if (hfuzz->persistent) { ptrace(PTRACE_CONT, pid, 0, 0); } }; #if defined(__x86_64__) struct user_regs_struct_64 regs; if (ptrace(PTRACE_GETREGS, pid, 0, ®s) != -1) { *cnt = regs.gs_base; return; } #endif /* defined(__x86_64__) */ *cnt = 0ULL; } void arch_ptraceSetCustomPerf(honggfuzz_t * hfuzz, pid_t pid, uint64_t cnt UNUSED) { if ((hfuzz->dynFileMethod & _HF_DYNFILE_CUSTOM) == 0) { return; } if (hfuzz->persistent) { ptrace(PTRACE_INTERRUPT, pid, 0, 0); arch_ptraceWaitForPidStop(pid); } defer { if (hfuzz->persistent) { ptrace(PTRACE_CONT, pid, 0, 0); } }; #if defined(__x86_64__) struct user_regs_struct_64 regs; if (ptrace(PTRACE_GETREGS, pid, 0, ®s) == -1) { return; } regs.gs_base = cnt; if (ptrace(PTRACE_SETREGS, pid, 0, ®s) == -1) { return; } #endif /* defined(__x86_64__) */ } static size_t arch_getPC(pid_t pid, REG_TYPE * pc, REG_TYPE * status_reg UNUSED) { /* * Some old ARM android kernels are failing with PTRACE_GETREGS to extract * the correct register values if struct size is bigger than expected. As such the * 32/64-bit multiplexing trick is not working for them in case PTRACE_GETREGSET * fails or is not implemented. To cover such cases we explicitly define * the struct size to 32bit version for arm CPU. */ #if defined(__arm__) struct user_regs_struct_32 regs; #else HEADERS_STRUCT regs; #endif struct iovec pt_iov = { .iov_base = ®s, .iov_len = sizeof(regs), }; if (ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &pt_iov) == -1L) { PLOG_D("ptrace(PTRACE_GETREGSET) failed"); // If PTRACE_GETREGSET fails, try PTRACE_GETREGS if available #if PTRACE_GETREGS_AVAILABLE if (ptrace(PTRACE_GETREGS, pid, 0, ®s)) { PLOG_D("ptrace(PTRACE_GETREGS) failed"); LOG_W("ptrace PTRACE_GETREGSET & PTRACE_GETREGS failed to extract target registers"); return 0; } #else return 0; #endif } #if defined(__i386__) || defined(__x86_64__) /* * 32-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_32)) { struct user_regs_struct_32 *r32 = (struct user_regs_struct_32 *)®s; *pc = r32->eip; *status_reg = r32->eflags; return pt_iov.iov_len; } /* * 64-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_64)) { struct user_regs_struct_64 *r64 = (struct user_regs_struct_64 *)®s; *pc = r64->ip; *status_reg = r64->flags; return pt_iov.iov_len; } LOG_W("Unknown registers structure size: '%zd'", pt_iov.iov_len); return 0; #endif /* defined(__i386__) || defined(__x86_64__) */ #if defined(__arm__) || defined(__aarch64__) /* * 32-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_32)) { struct user_regs_struct_32 *r32 = (struct user_regs_struct_32 *)®s; #ifdef __ANDROID__ *pc = r32->ARM_pc; *status_reg = r32->ARM_cpsr; #else *pc = r32->uregs[ARM_pc]; *status_reg = r32->uregs[ARM_cpsr]; #endif return pt_iov.iov_len; } /* * 64-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_64)) { struct user_regs_struct_64 *r64 = (struct user_regs_struct_64 *)®s; *pc = r64->pc; *status_reg = r64->pstate; return pt_iov.iov_len; } LOG_W("Unknown registers structure size: '%zd'", pt_iov.iov_len); return 0; #endif /* defined(__arm__) || defined(__aarch64__) */ #if defined(__powerpc64__) || defined(__powerpc__) /* * 32-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_32)) { struct user_regs_struct_32 *r32 = (struct user_regs_struct_32 *)®s; *pc = r32->nip; return pt_iov.iov_len; } /* * 64-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_64)) { struct user_regs_struct_64 *r64 = (struct user_regs_struct_64 *)®s; *pc = r64->nip; return pt_iov.iov_len; } LOG_W("Unknown registers structure size: '%zd'", pt_iov.iov_len); return 0; #endif /* defined(__powerpc64__) || defined(__powerpc__) */ LOG_D("Unknown/unsupported CPU architecture"); return 0; } static void arch_getInstrStr(pid_t pid, REG_TYPE * pc, char *instr) { /* * We need a value aligned to 8 * which is sizeof(long) on 64bit CPU archs (on most of them, I hope;) */ uint8_t buf[MAX_INSTR_SZ]; size_t memsz; REG_TYPE status_reg = 0; snprintf(instr, _HF_INSTR_SZ, "%s", "[UNKNOWN]"); size_t pcRegSz = arch_getPC(pid, pc, &status_reg); if (!pcRegSz) { LOG_W("Current architecture not supported for disassembly"); return; } if ((memsz = arch_getProcMem(pid, buf, sizeof(buf), *pc)) == 0) { snprintf(instr, _HF_INSTR_SZ, "%s", "[NOT_MMAPED]"); return; } #if !defined(__ANDROID__) arch_bfdDisasm(pid, buf, memsz, instr); #else cs_arch arch; cs_mode mode; #if defined(__arm__) || defined(__aarch64__) arch = (pcRegSz == sizeof(struct user_regs_struct_64)) ? CS_ARCH_ARM64 : CS_ARCH_ARM; if (arch == CS_ARCH_ARM) { mode = (status_reg & 0x20) ? CS_MODE_THUMB : CS_MODE_ARM; } else { mode = CS_MODE_ARM; } #elif defined(__i386__) || defined(__x86_64__) arch = CS_ARCH_X86; mode = (pcRegSz == sizeof(struct user_regs_struct_64)) ? CS_MODE_64 : CS_MODE_32; #else LOG_E("Unknown/Unsupported Android CPU architecture"); #endif csh handle; cs_err err = cs_open(arch, mode, &handle); if (err != CS_ERR_OK) { LOG_W("Capstone initialization failed: '%s'", cs_strerror(err)); return; } cs_insn *insn; size_t count = cs_disasm(handle, buf, sizeof(buf), *pc, 0, &insn); if (count < 1) { LOG_W("Couldn't disassemble the assembler instructions' stream: '%s'", cs_strerror(cs_errno(handle))); cs_close(&handle); return; } snprintf(instr, _HF_INSTR_SZ, "%s %s", insn[0].mnemonic, insn[0].op_str); cs_free(insn, count); cs_close(&handle); #endif /* defined(__ANDROID__) */ for (int x = 0; instr[x] && x < _HF_INSTR_SZ; x++) { if (instr[x] == '/' || instr[x] == '\\' || isspace(instr[x]) || !isprint(instr[x])) { instr[x] = '_'; } } return; } static void arch_hashCallstack(honggfuzz_t * hfuzz, fuzzer_t * fuzzer, funcs_t * funcs, size_t funcCnt, bool enableMasking) { uint64_t hash = 0; for (size_t i = 0; i < funcCnt && i < hfuzz->linux.numMajorFrames; i++) { /* * Convert PC to char array to be compatible with hash function */ char pcStr[REGSIZEINCHAR] = { 0 }; snprintf(pcStr, REGSIZEINCHAR, REG_PD REG_PM, (REG_TYPE) (long)funcs[i].pc); /* * Hash the last three nibbles */ hash ^= util_hash(&pcStr[strlen(pcStr) - 3], 3); } /* * If only one frame, hash is not safe to be used for uniqueness. We mask it * here with a constant prefix, so analyzers can pick it up and create filenames * accordingly. 'enableMasking' is controlling masking for cases where it should * not be enabled (e.g. fuzzer worker is from verifier). */ if (enableMasking && funcCnt == 1) { hash |= _HF_SINGLE_FRAME_MASK; } fuzzer->backtrace = hash; }
static size_t arch_getProcMem(pid_t pid, uint8_t * buf, size_t len, REG_TYPE pc) { /* * Let's try process_vm_readv first */ const struct iovec local_iov = { .iov_base = buf, .iov_len = len, }; const struct iovec remote_iov = { .iov_base = (void *)(uintptr_t) pc, .iov_len = len, }; if (process_vm_readv(pid, &local_iov, 1, &remote_iov, 1, 0) == (ssize_t) len) { return len; } // Debug if failed since it shouldn't happen very often LOGMSG_P(l_DEBUG, "process_vm_readv() failed"); /* * Ok, let's do it via ptrace() then. * len must be aligned to the sizeof(long) */ int cnt = len / sizeof(long); size_t memsz = 0; for (int x = 0; x < cnt; x++) { uint8_t *addr = (uint8_t *) (uintptr_t) pc + (int)(x * sizeof(long)); long ret = ptrace(PT_READ_D, pid, addr, NULL); if (errno != 0) { LOGMSG_P(l_WARN, "Couldn't PT_READ_D on pid %d, addr: %p", pid, addr); break; } memsz += sizeof(long); memcpy(&buf[x * sizeof(long)], &ret, sizeof(long)); } return memsz; } // Non i386 / x86_64 ISA fail build due to unused pid argument #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" uint64_t arch_ptraceGetCustomPerf(honggfuzz_t * hfuzz, pid_t pid) { if ((hfuzz->dynFileMethod & _HF_DYNFILE_CUSTOM) == 0) { return 0ULL; } #if defined(__i386__) || defined(__x86_64__) HEADERS_STRUCT regs; struct iovec pt_iov = { .iov_base = ®s, .iov_len = sizeof(regs), }; if (ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &pt_iov) == -1L) { LOGMSG_P(l_DEBUG, "ptrace(PTRACE_GETREGSET) failed"); // If PTRACE_GETREGSET fails, try PTRACE_GETREGS if available #if PTRACE_GETREGS_AVAILABLE if (ptrace(PTRACE_GETREGS, pid, 0, ®s)) { LOGMSG_P(l_DEBUG, "ptrace(PTRACE_GETREGS) failed"); LOGMSG(l_WARN, "ptrace PTRACE_GETREGSET & PTRACE_GETREGS failed to" " extract target registers"); return 0ULL; } #else return 0ULL; #endif } /* * 32-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_32)) { struct user_regs_struct_32 *r32 = (struct user_regs_struct_32 *)®s; return (uint64_t) r32->gs; } /* * 64-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_64)) { struct user_regs_struct_64 *r64 = (struct user_regs_struct_64 *)®s; return (uint64_t) r64->gs_base; } LOGMSG(l_WARN, "Unknown registers structure size: '%d'", pt_iov.iov_len); #endif /* defined(__i386__) || defined(__x86_64__) */ return 0ULL; } #pragma GCC diagnostic pop /* ignored "-Wunused-parameter" */ static size_t arch_getPC(pid_t pid, REG_TYPE * pc, REG_TYPE * status_reg) { HEADERS_STRUCT regs; struct iovec pt_iov = { .iov_base = ®s, .iov_len = sizeof(regs), }; if (ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &pt_iov) == -1L) { LOGMSG_P(l_DEBUG, "ptrace(PTRACE_GETREGSET) failed"); // If PTRACE_GETREGSET fails, try PTRACE_GETREGS if available #if PTRACE_GETREGS_AVAILABLE if (ptrace(PTRACE_GETREGS, pid, 0, ®s)) { LOGMSG_P(l_DEBUG, "ptrace(PTRACE_GETREGS) failed"); LOGMSG(l_WARN, "ptrace PTRACE_GETREGSET & PTRACE_GETREGS failed to" " extract target registers"); return 0; } #else return 0; #endif } #if defined(__i386__) || defined(__x86_64__) /* * 32-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_32)) { struct user_regs_struct_32 *r32 = (struct user_regs_struct_32 *)®s; *pc = r32->eip; *status_reg = r32->eflags; return pt_iov.iov_len; } /* * 64-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_64)) { struct user_regs_struct_64 *r64 = (struct user_regs_struct_64 *)®s; *pc = r64->ip; *status_reg = r64->flags; return pt_iov.iov_len; } LOGMSG(l_WARN, "Unknown registers structure size: '%d'", pt_iov.iov_len); return 0; #endif /* defined(__i386__) || defined(__x86_64__) */ #if defined(__arm__) || defined(__aarch64__) /* * 32-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_32)) { struct user_regs_struct_32 *r32 = (struct user_regs_struct_32 *)®s; #ifdef __ANDROID__ *pc = r32->ARM_pc; *status_reg = r32->ARM_cpsr; #else *pc = r32->uregs[ARM_pc]; *status_reg = r32->uregs[ARM_cpsr]; #endif return pt_iov.iov_len; } /* * 64-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_64)) { struct user_regs_struct_64 *r64 = (struct user_regs_struct_64 *)®s; *pc = r64->pc; *status_reg = r64->pstate; return pt_iov.iov_len; } LOGMSG(l_WARN, "Unknown registers structure size: '%d'", pt_iov.iov_len); return 0; #endif /* defined(__arm__) || defined(__aarch64__) */ #if defined(__powerpc64__) || defined(__powerpc__) /* * 32-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_32)) { struct user_regs_struct_32 *r32 = (struct user_regs_struct_32 *)®s; *pc = r32->nip; return pt_iov.iov_len; } /* * 64-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_64)) { struct user_regs_struct_64 *r64 = (struct user_regs_struct_64 *)®s; *pc = r64->nip; return pt_iov.iov_len; } LOGMSG(l_WARN, "Unknown registers structure size: '%d'", pt_iov.iov_len); return 0; #endif /* defined(__powerpc64__) || defined(__powerpc__) */ LOGMSG(l_DEBUG, "Unknown/unsupported CPU architecture"); return 0; } static void arch_getInstrStr(pid_t pid, REG_TYPE * pc, char *instr) { /* * We need a value aligned to 8 * which is sizeof(long) on 64bit CPU archs (on most of them, I hope;) */ uint8_t buf[MAX_INSTR_SZ]; size_t memsz; REG_TYPE status_reg = 0; snprintf(instr, _HF_INSTR_SZ, "%s", "[UNKNOWN]"); size_t pcRegSz = arch_getPC(pid, pc, &status_reg); if (!pcRegSz) { LOGMSG(l_WARN, "Current architecture not supported for disassembly"); return; } if ((memsz = arch_getProcMem(pid, buf, sizeof(buf), *pc)) == 0) { snprintf(instr, _HF_INSTR_SZ, "%s", "[NOT_MMAPED]"); return; } #if !defined(__ANDROID__) arch_bfdDisasm(pid, buf, memsz, instr); #else cs_arch arch; cs_mode mode; #if defined(__arm__) || defined(__aarch64__) arch = (pcRegSz == sizeof(struct user_regs_struct_64)) ? CS_ARCH_ARM64 : CS_ARCH_ARM; if (arch == CS_ARCH_ARM) { mode = (status_reg & 0x20) ? CS_MODE_THUMB : CS_MODE_ARM; } else { mode = CS_MODE_ARM; } #elif defined(__i386__) || defined(__x86_64__) arch = CS_ARCH_X86; mode = (pcRegSz == sizeof(struct user_regs_struct_64)) ? CS_MODE_64 : CS_MODE_32; #else LOGMSG(l_ERROR, "Unknown/unsupported Android CPU architecture"); #endif csh handle; cs_err err = cs_open(arch, mode, &handle); if (err != CS_ERR_OK) { LOGMSG(l_WARN, "Capstone initialization failed: '%s'", cs_strerror(err)); return; } cs_insn *insn; size_t count = cs_disasm(handle, buf, sizeof(buf), *pc, 0, &insn); if (count < 1) { LOGMSG(l_WARN, "Couldn't disassemble the assembler instructions' stream: '%s'", cs_strerror(cs_errno(handle))); cs_close(&handle); return; } snprintf(instr, _HF_INSTR_SZ, "%s %s", insn[0].mnemonic, insn[0].op_str); cs_free(insn, count); cs_close(&handle); #endif for (int x = 0; instr[x] && x < _HF_INSTR_SZ; x++) { if (instr[x] == '/' || instr[x] == '\\' || isspace(instr[x]) || !isprint(instr[x])) { instr[x] = '_'; } } return; } static void arch_ptraceGenerateReport(pid_t pid, fuzzer_t * fuzzer, funcs_t * funcs, size_t funcCnt, siginfo_t * si, const char *instr) { fuzzer->report[0] = '\0'; util_ssnprintf(fuzzer->report, sizeof(fuzzer->report), "ORIG_FNAME: %s\n", fuzzer->origFileName); util_ssnprintf(fuzzer->report, sizeof(fuzzer->report), "FUZZ_FNAME: %s\n", fuzzer->fileName); util_ssnprintf(fuzzer->report, sizeof(fuzzer->report), "PID: %d\n", pid); util_ssnprintf(fuzzer->report, sizeof(fuzzer->report), "SIGNAL: %s (%d)\n", arch_sigs[si->si_signo].descr, si->si_signo); util_ssnprintf(fuzzer->report, sizeof(fuzzer->report), "FAULT ADDRESS: %p\n", si->si_addr); util_ssnprintf(fuzzer->report, sizeof(fuzzer->report), "INSTRUCTION: %s\n", instr); util_ssnprintf(fuzzer->report, sizeof(fuzzer->report), "STACK:\n"); for (size_t i = 0; i < funcCnt; i++) { #ifdef __HF_USE_CAPSTONE__ util_ssnprintf(fuzzer->report, sizeof(fuzzer->report), " <" REG_PD REG_PM "> ", (REG_TYPE) (long)funcs[i].pc, funcs[i].func, funcs[i].line); if (funcs[i].func[0] != '\0') util_ssnprintf(fuzzer->report, sizeof(fuzzer->report), "[%s + 0x%x]\n", funcs[i].func, funcs[i].line); else util_ssnprintf(fuzzer->report, sizeof(fuzzer->report), "[]\n"); #else util_ssnprintf(fuzzer->report, sizeof(fuzzer->report), " <" REG_PD REG_PM "> [%s():%u]\n", (REG_TYPE) (long)funcs[i].pc, funcs[i].func, funcs[i].line); #endif } // libunwind is not working for 32bit targets in 64bit systems #if defined(__aarch64__) if (funcCnt == 0) { util_ssnprintf(fuzzer->report, sizeof(fuzzer->report), " !ERROR: If 32bit fuzz target" " in aarch64 system, try ARM 32bit build\n"); } #endif return; }
static ssize_t vm_read_mem(const pid_t pid, void *const laddr, const kernel_ulong_t raddr, const size_t len) { const unsigned long truncated_raddr = raddr; #if SIZEOF_LONG < SIZEOF_KERNEL_LONG_T if (raddr != (kernel_ulong_t) truncated_raddr) { errno = EIO; return -1; } #endif const struct iovec local = { .iov_base = laddr, .iov_len = len }; const struct iovec remote = { .iov_base = (void *) truncated_raddr, .iov_len = len }; const ssize_t rc = process_vm_readv(pid, &local, 1, &remote, 1, 0); if (rc < 0 && errno == ENOSYS) process_vm_readv_not_supported = true; return rc; } static bool tracee_addr_is_invalid(kernel_ulong_t addr) { return #if ANY_WORDSIZE_LESS_THAN_KERNEL_LONG current_wordsize < sizeof(addr) && addr & ~(kernel_ulong_t) -1U; #else false; #endif } /* legacy method of copying from tracee */ static int umoven_peekdata(const int pid, kernel_ulong_t addr, unsigned int len, void *laddr) { unsigned int nread = 0; unsigned int residue = addr & (sizeof(long) - 1); while (len) { addr &= -sizeof(long); /* aligned address */ errno = 0; union { long val; char x[sizeof(long)]; } u = { .val = ptrace(PTRACE_PEEKDATA, pid, addr, 0) }; switch (errno) { case 0: break; case ESRCH: case EINVAL: /* these could be seen if the process is gone */ return -1; case EFAULT: case EIO: case EPERM: /* address space is inaccessible */ if (nread) { perror_msg("umoven: short read (%u < %u) @0x%" PRI_klx, nread, nread + len, addr - nread); } return -1; default: /* all the rest is strange and should be reported */ perror_msg("umoven: PTRACE_PEEKDATA pid:%d @0x%" PRI_klx, pid, addr); return -1; } unsigned int m = MIN(sizeof(long) - residue, len); memcpy(laddr, &u.x[residue], m); residue = 0; addr += sizeof(long); laddr += m; nread += m; len -= m; } return 0; } /* * Copy `len' bytes of data from process `pid' * at address `addr' to our space at `our_addr'. */ int umoven(struct tcb *const tcp, kernel_ulong_t addr, unsigned int len, void *const our_addr) { if (tracee_addr_is_invalid(addr)) return -1; const int pid = tcp->pid; if (process_vm_readv_not_supported) return umoven_peekdata(pid, addr, len, our_addr); int r = vm_read_mem(pid, our_addr, addr, len); if ((unsigned int) r == len) return 0; if (r >= 0) { error_msg("umoven: short read (%u < %u) @0x%" PRI_klx, (unsigned int) r, len, addr); return -1; } switch (errno) { case ENOSYS: case EPERM: /* try PTRACE_PEEKDATA */ return umoven_peekdata(pid, addr, len, our_addr); case ESRCH: /* the process is gone */ return -1; case EFAULT: case EIO: /* address space is inaccessible */ return -1; default: /* all the rest is strange and should be reported */ perror_msg("process_vm_readv: pid:%d @0x%" PRI_klx, pid, addr); return -1; } } /* * Like umoven_peekdata but make the additional effort of looking * for a terminating zero byte. */ static int umovestr_peekdata(const int pid, kernel_ulong_t addr, unsigned int len, void *laddr) { unsigned int nread = 0; unsigned int residue = addr & (sizeof(long) - 1); void *const orig_addr = laddr; while (len) { addr &= -sizeof(long); /* aligned address */ errno = 0; union { unsigned long val; char x[sizeof(long)]; } u = { .val = ptrace(PTRACE_PEEKDATA, pid, addr, 0) }; switch (errno) { case 0: break; case ESRCH: case EINVAL: /* these could be seen if the process is gone */ return -1; case EFAULT: case EIO: case EPERM: /* address space is inaccessible */ if (nread) { perror_msg("umovestr: short read (%d < %d) @0x%" PRI_klx, nread, nread + len, addr - nread); } return -1; default: /* all the rest is strange and should be reported */ perror_msg("umovestr: PTRACE_PEEKDATA pid:%d @0x%" PRI_klx, pid, addr); return -1; } unsigned int m = MIN(sizeof(long) - residue, len); memcpy(laddr, &u.x[residue], m); while (residue < sizeof(long)) if (u.x[residue++] == '\0') return (laddr - orig_addr) + residue; residue = 0; addr += sizeof(long); laddr += m; nread += m; len -= m; } return 0; } /* * Like `umove' but make the additional effort of looking * for a terminating zero byte. * * Returns < 0 on error, strlen + 1 if NUL was seen, * else 0 if len bytes were read but no NUL byte seen. * * Note: there is no guarantee we won't overwrite some bytes * in laddr[] _after_ terminating NUL (but, of course, * we never write past laddr[len-1]). */ int umovestr(struct tcb *const tcp, kernel_ulong_t addr, unsigned int len, char *laddr) { if (tracee_addr_is_invalid(addr)) return -1; const int pid = tcp->pid; if (process_vm_readv_not_supported) return umovestr_peekdata(pid, addr, len, laddr); const size_t page_size = get_pagesize(); const size_t page_mask = page_size - 1; unsigned int nread = 0; while (len) { /* * Don't cross pages, otherwise we can get EFAULT * and fail to notice that terminating NUL lies * in the existing (first) page. */ unsigned int chunk_len = len > page_size ? page_size : len; unsigned int end_in_page = (addr + chunk_len) & page_mask; if (chunk_len > end_in_page) /* crosses to the next page */ chunk_len -= end_in_page; int r = vm_read_mem(pid, laddr, addr, chunk_len); if (r > 0) { char *nul_addr = memchr(laddr, '\0', r); if (nul_addr) return (nul_addr - laddr) + 1; addr += r; laddr += r; nread += r; len -= r; continue; } switch (errno) { case ENOSYS: case EPERM: /* try PTRACE_PEEKDATA */ if (!nread) return umovestr_peekdata(pid, addr, len, laddr); ATTRIBUTE_FALLTHROUGH; case EFAULT: case EIO: /* address space is inaccessible */ if (nread) perror_msg("umovestr: short read (%d < %d) @0x%" PRI_klx, nread, nread + len, addr - nread); return -1; case ESRCH: /* the process is gone */ return -1; default: /* all the rest is strange and should be reported */ perror_msg("process_vm_readv: pid:%d @0x%" PRI_klx, pid, addr); return -1; } } return 0; }
/** * Copy to @dest_tracer at most @max_size bytes from the string * pointed to by @src_tracee within the memory space of the @tracee * process. This function returns -errno on error, otherwise * it returns the number in bytes of the string, including the * end-of-string terminator. */ int read_string(const Tracee *tracee, char *dest_tracer, word_t src_tracee, word_t max_size) { word_t *src = (word_t *)src_tracee; word_t *dest = (word_t *)dest_tracer; word_t nb_trailing_bytes; word_t nb_full_words; word_t word, i, j; uint8_t *src_word; uint8_t *dest_word; if (belongs_to_heap_prealloc(tracee, src_tracee)) return -EFAULT; #if defined(HAVE_PROCESS_VM) /* [process_vm] system calls do not check the memory regions * in the remote process until just before doing the * read/write. Consequently, a partial read/write [1] may * result if one of the remote_iov elements points to an * invalid memory region in the remote process. No further * reads/writes will be attempted beyond that point. Keep * this in mind when attempting to read data of unknown length * (such as C strings that are null-terminated) from a remote * process, by avoiding spanning memory pages (typically 4KiB) * in a single remote iovec element. (Instead, split the * remote read into two remote_iov elements and have them * merge back into a single write local_iov entry. The first * read entry goes up to the page boundary, while the second * starts on the next page boundary.). * * [1] Partial transfers apply at the granularity of iovec * elements. These system calls won't perform a partial * transfer that splits a single iovec element. * * -- man 2 process_vm_readv */ long status; size_t size; size_t offset; struct iovec local; struct iovec remote; static size_t chunk_size = 0; static uintptr_t chunk_mask; /* A chunk shall not cross a page boundary. */ if (chunk_size == 0) { chunk_size = sysconf(_SC_PAGESIZE); chunk_size = (chunk_size > 0 && chunk_size < 1024 ? chunk_size : 1024); chunk_mask = ~(chunk_size - 1); } /* Read the string by chunk. */ offset = 0; do { uintptr_t current_chunk = (src_tracee + offset) & chunk_mask; uintptr_t next_chunk = current_chunk + chunk_size; /* Compute the number of bytes available up to the * next chunk or up to max_size. */ size = next_chunk - (src_tracee + offset); size = (size < max_size - offset ? size : max_size - offset); local.iov_base = (uint8_t *)dest + offset; local.iov_len = size; remote.iov_base = (uint8_t *)src + offset; remote.iov_len = size; status = process_vm_readv(tracee->pid, &local, 1, &remote, 1, 0); if ((size_t) status != size) goto fallback; status = strnlen(local.iov_base, size); if ((size_t) status < size) { size = offset + status + 1; assert(size <= max_size); return size; } offset += size; } while (offset < max_size); assert(offset == max_size); /* Fallback to ptrace if something went wrong. */ fallback: #endif /* HAVE_PROCESS_VM */ nb_trailing_bytes = max_size % sizeof(word_t); nb_full_words = (max_size - nb_trailing_bytes) / sizeof(word_t); /* Copy one word by one word, except for the last one. */ for (i = 0; i < nb_full_words; i++) { word = ptrace(PTRACE_PEEKDATA, tracee->pid, src + i, NULL); if (errno != 0) return -EFAULT; store_word(&dest[i], word); /* Stop once an end-of-string is detected. */ src_word = (uint8_t *)&word; for (j = 0; j < sizeof(word_t); j++) if (src_word[j] == '\0') return i * sizeof(word_t) + j + 1; } /* Copy the bytes from the last word carefully since we have * to not overwrite the bytes lying beyond @dest_tracer. */ word = ptrace(PTRACE_PEEKDATA, tracee->pid, src + i, NULL); if (errno != 0) return -EFAULT; dest_word = (uint8_t *)&dest[i]; src_word = (uint8_t *)&word; for (j = 0; j < nb_trailing_bytes; j++) { dest_word[j] = src_word[j]; if (src_word[j] == '\0') break; } return i * sizeof(word_t) + j + 1; }