void arch_ptraceDetach(pid_t pid) { if (syscall(__NR_kill, pid, 0) == -1 && errno == ESRCH) { LOG_D("PID: %d no longer exists", pid); return; } int tasks[MAX_THREAD_IN_TASK + 1] = { 0 }; if (!arch_listThreads(tasks, MAX_THREAD_IN_TASK, pid)) { LOG_E("Couldn't read thread list for pid '%d'", pid); return; } for (int i = 0; i < MAX_THREAD_IN_TASK && tasks[i]; i++) { ptrace(PTRACE_INTERRUPT, tasks[i], NULL, NULL); arch_ptraceWaitForPidStop(tasks[i]); ptrace(PTRACE_DETACH, tasks[i], NULL, NULL); } }
static size_t arch_getProcMem(pid_t pid, uint8_t * buf, size_t len, REG_TYPE pc) { /* * Let's try process_vm_readv first */ const struct iovec local_iov = { .iov_base = buf, .iov_len = len, }; const struct iovec remote_iov = { .iov_base = (void *)(uintptr_t) pc, .iov_len = len, }; if (process_vm_readv(pid, &local_iov, 1, &remote_iov, 1, 0) == (ssize_t) len) { return len; } // Debug if failed since it shouldn't happen very often PLOG_D("process_vm_readv() failed"); /* * Ok, let's do it via ptrace() then. * len must be aligned to the sizeof(long) */ int cnt = len / sizeof(long); size_t memsz = 0; for (int x = 0; x < cnt; x++) { uint8_t *addr = (uint8_t *) (uintptr_t) pc + (int)(x * sizeof(long)); long ret = ptrace(PTRACE_PEEKDATA, pid, addr, NULL); if (errno != 0) { PLOG_W("Couldn't PT_READ_D on pid %d, addr: %p", pid, addr); break; } memsz += sizeof(long); memcpy(&buf[x * sizeof(long)], &ret, sizeof(long)); } return memsz; } void arch_ptraceGetCustomPerf(honggfuzz_t * hfuzz, pid_t pid, uint64_t * cnt UNUSED) { if ((hfuzz->dynFileMethod & _HF_DYNFILE_CUSTOM) == 0) { return; } if (hfuzz->persistent) { ptrace(PTRACE_INTERRUPT, pid, 0, 0); arch_ptraceWaitForPidStop(pid); } defer { if (hfuzz->persistent) { ptrace(PTRACE_CONT, pid, 0, 0); } }; #if defined(__x86_64__) struct user_regs_struct_64 regs; if (ptrace(PTRACE_GETREGS, pid, 0, ®s) != -1) { *cnt = regs.gs_base; return; } #endif /* defined(__x86_64__) */ *cnt = 0ULL; } void arch_ptraceSetCustomPerf(honggfuzz_t * hfuzz, pid_t pid, uint64_t cnt UNUSED) { if ((hfuzz->dynFileMethod & _HF_DYNFILE_CUSTOM) == 0) { return; } if (hfuzz->persistent) { ptrace(PTRACE_INTERRUPT, pid, 0, 0); arch_ptraceWaitForPidStop(pid); } defer { if (hfuzz->persistent) { ptrace(PTRACE_CONT, pid, 0, 0); } }; #if defined(__x86_64__) struct user_regs_struct_64 regs; if (ptrace(PTRACE_GETREGS, pid, 0, ®s) == -1) { return; } regs.gs_base = cnt; if (ptrace(PTRACE_SETREGS, pid, 0, ®s) == -1) { return; } #endif /* defined(__x86_64__) */ } static size_t arch_getPC(pid_t pid, REG_TYPE * pc, REG_TYPE * status_reg UNUSED) { /* * Some old ARM android kernels are failing with PTRACE_GETREGS to extract * the correct register values if struct size is bigger than expected. As such the * 32/64-bit multiplexing trick is not working for them in case PTRACE_GETREGSET * fails or is not implemented. To cover such cases we explicitly define * the struct size to 32bit version for arm CPU. */ #if defined(__arm__) struct user_regs_struct_32 regs; #else HEADERS_STRUCT regs; #endif struct iovec pt_iov = { .iov_base = ®s, .iov_len = sizeof(regs), }; if (ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &pt_iov) == -1L) { PLOG_D("ptrace(PTRACE_GETREGSET) failed"); // If PTRACE_GETREGSET fails, try PTRACE_GETREGS if available #if PTRACE_GETREGS_AVAILABLE if (ptrace(PTRACE_GETREGS, pid, 0, ®s)) { PLOG_D("ptrace(PTRACE_GETREGS) failed"); LOG_W("ptrace PTRACE_GETREGSET & PTRACE_GETREGS failed to extract target registers"); return 0; } #else return 0; #endif } #if defined(__i386__) || defined(__x86_64__) /* * 32-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_32)) { struct user_regs_struct_32 *r32 = (struct user_regs_struct_32 *)®s; *pc = r32->eip; *status_reg = r32->eflags; return pt_iov.iov_len; } /* * 64-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_64)) { struct user_regs_struct_64 *r64 = (struct user_regs_struct_64 *)®s; *pc = r64->ip; *status_reg = r64->flags; return pt_iov.iov_len; } LOG_W("Unknown registers structure size: '%zd'", pt_iov.iov_len); return 0; #endif /* defined(__i386__) || defined(__x86_64__) */ #if defined(__arm__) || defined(__aarch64__) /* * 32-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_32)) { struct user_regs_struct_32 *r32 = (struct user_regs_struct_32 *)®s; #ifdef __ANDROID__ *pc = r32->ARM_pc; *status_reg = r32->ARM_cpsr; #else *pc = r32->uregs[ARM_pc]; *status_reg = r32->uregs[ARM_cpsr]; #endif return pt_iov.iov_len; } /* * 64-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_64)) { struct user_regs_struct_64 *r64 = (struct user_regs_struct_64 *)®s; *pc = r64->pc; *status_reg = r64->pstate; return pt_iov.iov_len; } LOG_W("Unknown registers structure size: '%zd'", pt_iov.iov_len); return 0; #endif /* defined(__arm__) || defined(__aarch64__) */ #if defined(__powerpc64__) || defined(__powerpc__) /* * 32-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_32)) { struct user_regs_struct_32 *r32 = (struct user_regs_struct_32 *)®s; *pc = r32->nip; return pt_iov.iov_len; } /* * 64-bit */ if (pt_iov.iov_len == sizeof(struct user_regs_struct_64)) { struct user_regs_struct_64 *r64 = (struct user_regs_struct_64 *)®s; *pc = r64->nip; return pt_iov.iov_len; } LOG_W("Unknown registers structure size: '%zd'", pt_iov.iov_len); return 0; #endif /* defined(__powerpc64__) || defined(__powerpc__) */ LOG_D("Unknown/unsupported CPU architecture"); return 0; } static void arch_getInstrStr(pid_t pid, REG_TYPE * pc, char *instr) { /* * We need a value aligned to 8 * which is sizeof(long) on 64bit CPU archs (on most of them, I hope;) */ uint8_t buf[MAX_INSTR_SZ]; size_t memsz; REG_TYPE status_reg = 0; snprintf(instr, _HF_INSTR_SZ, "%s", "[UNKNOWN]"); size_t pcRegSz = arch_getPC(pid, pc, &status_reg); if (!pcRegSz) { LOG_W("Current architecture not supported for disassembly"); return; } if ((memsz = arch_getProcMem(pid, buf, sizeof(buf), *pc)) == 0) { snprintf(instr, _HF_INSTR_SZ, "%s", "[NOT_MMAPED]"); return; } #if !defined(__ANDROID__) arch_bfdDisasm(pid, buf, memsz, instr); #else cs_arch arch; cs_mode mode; #if defined(__arm__) || defined(__aarch64__) arch = (pcRegSz == sizeof(struct user_regs_struct_64)) ? CS_ARCH_ARM64 : CS_ARCH_ARM; if (arch == CS_ARCH_ARM) { mode = (status_reg & 0x20) ? CS_MODE_THUMB : CS_MODE_ARM; } else { mode = CS_MODE_ARM; } #elif defined(__i386__) || defined(__x86_64__) arch = CS_ARCH_X86; mode = (pcRegSz == sizeof(struct user_regs_struct_64)) ? CS_MODE_64 : CS_MODE_32; #else LOG_E("Unknown/Unsupported Android CPU architecture"); #endif csh handle; cs_err err = cs_open(arch, mode, &handle); if (err != CS_ERR_OK) { LOG_W("Capstone initialization failed: '%s'", cs_strerror(err)); return; } cs_insn *insn; size_t count = cs_disasm(handle, buf, sizeof(buf), *pc, 0, &insn); if (count < 1) { LOG_W("Couldn't disassemble the assembler instructions' stream: '%s'", cs_strerror(cs_errno(handle))); cs_close(&handle); return; } snprintf(instr, _HF_INSTR_SZ, "%s %s", insn[0].mnemonic, insn[0].op_str); cs_free(insn, count); cs_close(&handle); #endif /* defined(__ANDROID__) */ for (int x = 0; instr[x] && x < _HF_INSTR_SZ; x++) { if (instr[x] == '/' || instr[x] == '\\' || isspace(instr[x]) || !isprint(instr[x])) { instr[x] = '_'; } } return; } static void arch_hashCallstack(honggfuzz_t * hfuzz, fuzzer_t * fuzzer, funcs_t * funcs, size_t funcCnt, bool enableMasking) { uint64_t hash = 0; for (size_t i = 0; i < funcCnt && i < hfuzz->linux.numMajorFrames; i++) { /* * Convert PC to char array to be compatible with hash function */ char pcStr[REGSIZEINCHAR] = { 0 }; snprintf(pcStr, REGSIZEINCHAR, REG_PD REG_PM, (REG_TYPE) (long)funcs[i].pc); /* * Hash the last three nibbles */ hash ^= util_hash(&pcStr[strlen(pcStr) - 3], 3); } /* * If only one frame, hash is not safe to be used for uniqueness. We mask it * here with a constant prefix, so analyzers can pick it up and create filenames * accordingly. 'enableMasking' is controlling masking for cases where it should * not be enabled (e.g. fuzzer worker is from verifier). */ if (enableMasking && funcCnt == 1) { hash |= _HF_SINGLE_FRAME_MASK; } fuzzer->backtrace = hash; }
void arch_reapChild(honggfuzz_t * hfuzz, fuzzer_t * fuzzer) { pid_t ptracePid = (hfuzz->pid > 0) ? hfuzz->pid : fuzzer->pid; pid_t childPid = fuzzer->pid; timer_t timerid; if (arch_setTimer(&timerid) == false) { LOG_F("Couldn't set timer"); } if (arch_ptraceWaitForPidStop(childPid) == false) { LOG_F("PID %d not in a stopped state", childPid); } LOG_D("PID: %d is in a stopped state now", childPid); static bool ptraceAttached = false; if (ptraceAttached == false) { if (arch_ptraceAttach(ptracePid) == false) { LOG_F("arch_ptraceAttach(pid=%d) failed", ptracePid); } /* In case we fuzz a long-lived process (-p pid) we attach to it once only */ if (ptracePid != childPid) { ptraceAttached = true; } } /* A long-lived processed could have already exited, and we wouldn't know */ if (kill(ptracePid, 0) == -1) { PLOG_F("Liveness of %d questioned", ptracePid); } perfFd_t perfFds; if (arch_perfEnable(ptracePid, hfuzz, &perfFds) == false) { LOG_F("Couldn't enable perf counters for pid %d", ptracePid); } if (kill(childPid, SIGCONT) == -1) { PLOG_F("Restarting PID: %d failed", childPid); } for (;;) { int status; pid_t pid = wait4(-1, &status, __WALL | __WNOTHREAD, NULL); if (pid == -1 && errno == EINTR) { if (hfuzz->tmOut) { arch_checkTimeLimit(hfuzz, fuzzer); } continue; } if (pid == -1 && errno == ECHILD) { LOG_D("No more processes to track"); break; } if (pid == -1) { PLOG_F("wait4() failed"); } LOG_D("PID '%d' returned with status '%d'", pid, status); arch_ptraceGetCustomPerf(hfuzz, ptracePid, &fuzzer->hwCnts.customCnt); if (ptracePid == childPid) { arch_ptraceAnalyze(hfuzz, status, pid, fuzzer); continue; } if (pid == childPid && (WIFEXITED(status) || WIFSIGNALED(status))) { break; } if (pid == childPid) { continue; } arch_ptraceAnalyze(hfuzz, status, pid, fuzzer); } arch_removeTimer(&timerid); arch_perfAnalyze(hfuzz, fuzzer, &perfFds); return; }