PostPatchFn PatchTimeoutSyscall(PrePatchArgs args) { if (SkipTimeoutVirt(args)) return NullPostPatch; int syscall = PIN_GetSyscallNumber(args.ctxt, args.std); assert_msg(syscall == SYS_futex || syscall == SYS_epoll_wait || syscall == SYS_epoll_pwait || syscall == SYS_poll, "Invalid timeout syscall %d", syscall); FutexInfo fi = {0, 0}; if (syscall == SYS_futex) fi = PrePatchFutex(args.tid, args.ctxt, args.std); if (PrePatchTimeoutSyscall(args.tid, args.ctxt, args.std, syscall)) { ADDRINT prevIp = PIN_GetContextReg(args.ctxt, REG_INST_PTR); ADDRINT timeoutArgVal = PIN_GetSyscallArgument(args.ctxt, args.std, getTimeoutArg(syscall)); return [syscall, prevIp, timeoutArgVal, fi](PostPatchArgs args) { if (PostPatchTimeoutSyscall(args.tid, args.ctxt, args.std, syscall, prevIp, timeoutArgVal)) { return PPA_USE_NOP_PTRS; // retry } else { if (syscall == SYS_futex) PostPatchFutex(args.tid, fi, args.ctxt, args.std); return PPA_USE_JOIN_PTRS; // finish } }; } else { if (syscall == SYS_futex) { return [fi](PostPatchArgs args) { PostPatchFutex(args.tid, fi, args.ctxt, args.std); return PPA_NOTHING; }; } else { return NullPostPatch; } } }
VOID OnSyscallEntry(THREADID threadIndex, CONTEXT *ctxt, SYSCALL_STANDARD std, VOID *v) { ADDRINT sysnum = PIN_GetSyscallNumber(ctxt, std); ADDRINT arg0 = PIN_GetSyscallArgument(ctxt, std, 0); #ifdef TARGET_BSD // If this is the system call dispatcher, then use arg0 as the system call number if (sysnum == SYS___syscall) { sysnum = arg0; arg0 = PIN_GetSyscallArgument(ctxt, std, 1); } #endif if (sysnum == SYS_open && strncmp(reinterpret_cast<char *>(arg0), "does-not-exist1", sizeof("does-not-exist1")-1) == 0) { PIN_SetSyscallNumber(ctxt, std, SYS_getpid); } if (IsSigaction(sysnum) && (arg0 == SIGUSR1)) { PIN_SetSyscallNumber(ctxt, std, SYS_getpid); } if (sysnum == SYS_open && strncmp(reinterpret_cast<char *>(arg0), "does-not-exist2", sizeof("does-not-exist2")-1) == 0) { PIN_SetSyscallNumber(ctxt, std, SYS_exit); PIN_SetSyscallArgument(ctxt, std, 0, 0); } }
static VOID SyscallEntry(THREADID threadIndex, CONTEXT *ctxt, SYSCALL_STANDARD std, VOID *v) { if (PIN_GetSyscallNumber(ctxt, std) != SYS_sysarch) return; ADDRINT pc = PIN_GetContextReg(ctxt, REG_INST_PTR); ADDRINT op = PIN_GetSyscallArgument(ctxt, std, 0); ADDRINT addr = PIN_GetSyscallArgument(ctxt, std, 1); ADDRINT value = 0; if (op == AMD64_SET_FSBASE || op == AMD64_SET_GSBASE) { if (PIN_SafeCopy(&value, Addrint2VoidStar(addr), sizeof(ADDRINT)) != sizeof(ADDRINT)) { Out << Header(threadIndex, pc) << "Failed to read actual TLS pointer" << endl; } } else { // Remember the location where to write the segment register in REG_INST_G0 PIN_SetContextReg(ctxt, REG_INST_G0, addr); value = addr; } Out << Header(threadIndex, pc) << "sysarch(" << SysArchFunc(op) << ", 0x" << std::hex << value << ")" << std::endl; }
VOID SyscallEntry(THREADID threadIndex, CONTEXT *ctxt, SYSCALL_STANDARD std, VOID *v) { ADDRINT sc_num = PIN_GetSyscallNumber(ctxt, std); switch(sc_num) { case SYS_read: break; case SYS_write: break; case SYS_exit: break; case SYS_exit_group: break; case SYS_alarm: if(activated) exit(-1); activated = 1; break; default: if(activated) { fprintf(stderr, "Invalid syscall : %lu\n", sc_num); exit(-1); } break; } }
void sys_enter (THREADID tid, CONTEXT *ctxt, SYSCALL_STANDARD std, void *v) { struct event_sysenter event; event.comm.type = ET_SYSENTER; event.comm.tid = tid; event.standard = std; event.sysnum = PIN_GetSyscallNumber(ctxt, std); tb_write((event_common *)&event, sizeof(event)); }
VOID SyscallEntry(THREADID threadIndex, CONTEXT *ctxt, SYSCALL_STANDARD std, VOID *v) { sysargs(PIN_GetSyscallNumber(ctxt, std), PIN_GetSyscallArgument(ctxt, std, 0), PIN_GetSyscallArgument(ctxt, std, 1), PIN_GetSyscallArgument(ctxt, std, 2), PIN_GetSyscallArgument(ctxt, std, 3), PIN_GetSyscallArgument(ctxt, std, 4), PIN_GetSyscallArgument(ctxt, std, 5)); }
VOID SyscallEntry(THREADID threadIndex, CONTEXT *ctxt, SYSCALL_STANDARD std, VOID *v) { SysBefore(PIN_GetContextReg(ctxt, REG_INST_PTR), PIN_GetSyscallNumber(ctxt, std), PIN_GetSyscallArgument(ctxt, std, 0), PIN_GetSyscallArgument(ctxt, std, 1), PIN_GetSyscallArgument(ctxt, std, 2), PIN_GetSyscallArgument(ctxt, std, 3), PIN_GetSyscallArgument(ctxt, std, 4), PIN_GetSyscallArgument(ctxt, std, 5)); }
static bool PostPatchTimeoutSyscall(uint32_t tid, CONTEXT* ctxt, SYSCALL_STANDARD std, int syscall, ADDRINT prevIp, ADDRINT timeoutArgVal) { assert(inFakeTimeoutMode[tid]); int res = (int)PIN_GetSyscallNumber(ctxt, std); // Decide if it timed out bool timedOut; if (syscall == SYS_futex) { timedOut = (res == -ETIMEDOUT); } else { timedOut = (res == 0); } bool isSleeping = zinfo->sched->isSleeping(procIdx, tid); // Decide whether to retry bool retrySyscall; if (!timedOut) { if (isSleeping) zinfo->sched->notifySleepEnd(procIdx, tid); retrySyscall = false; } else { retrySyscall = isSleeping; } if (retrySyscall && zinfo->procArray[procIdx]->isInFastForward()) { warn("[%d] Fast-forwarding started, not retrying timeout syscall (%s)", tid, GetSyscallName(syscall)); retrySyscall = false; assert(isSleeping); zinfo->sched->notifySleepEnd(procIdx, tid); } if (retrySyscall) { // ADDRINT curIp = PIN_GetContextReg(ctxt, REG_INST_PTR); //info("[%d] post-patch, retrying, IP: 0x%lx -> 0x%lx", tid, curIp, prevIp); PIN_SetContextReg(ctxt, REG_INST_PTR, prevIp); PIN_SetSyscallNumber(ctxt, std, syscall); } else { // Restore timeout arg PIN_SetSyscallArgument(ctxt, std, getTimeoutArg(syscall), timeoutArgVal); inFakeTimeoutMode[tid] = false; // Restore arg? I don't think we need this! /*if (syscall == SYS_futex) { PIN_SetSyscallNumber(ctxt, std, -ETIMEDOUT); } else { assert(syscall == SYS_epoll_wait || syscall == SYS_epoll_pwait || syscall == SYS_poll); PIN_SetSyscallNumber(ctxt, std, 0); //no events returned }*/ } //info("[%d] post-patch %s (%d), timedOut %d, sleeping (orig) %d, retrying %d, orig res %d, patched res %d", tid, GetSyscallName(syscall), syscall, timedOut, isSleeping, retrySyscall, res, (int)PIN_GetSyscallNumber(ctxt, std)); return retrySyscall; }
static PyObject *Triton_getSyscallNumber(PyObject *self, PyObject *std) { uint64 syscallNumber; if (!PyLong_Check(std) && !PyInt_Check(std)) return PyErr_Format(PyExc_TypeError, "getSyscallNumber(): expected an id (integer) as argument"); LEVEL_CORE::SYSCALL_STANDARD standard = static_cast<LEVEL_CORE::SYSCALL_STANDARD>(PyLong_AsLong(std));; CONTEXT *ctx = static_cast<CONTEXT*>(ap.getCurrentCtxH()->getCtx()); syscallNumber = PIN_GetSyscallNumber(ctx, standard); return PyLong_FromLong(syscallNumber); }
/* Taint from Syscalls */ VOID Syscall_entry(THREADID thread_id, CONTEXT *ctx, SYSCALL_STANDARD std, void *v) { struct range taint; /* Taint from read */ if (PIN_GetSyscallNumber(ctx, std) == __NR_read){ TRICKS(); taint.start = static_cast<UINT64>((PIN_GetSyscallArgument(ctx, std, 1))); taint.end = taint.start + static_cast<UINT64>((PIN_GetSyscallArgument(ctx, std, 2))); bytesTainted.push_back(taint); std::cout << "[TAINT]\t\t\tbytes tainted from " << std::hex << "0x" << taint.start << " to 0x" << taint.end << " (via read)"<< std::endl; } }
VOID Syscall_entry(THREADID thread_id, CONTEXT *ctx, SYSCALL_STANDARD std, void *v) { unsigned int i; UINT64 start, size; if (PIN_GetSyscallNumber(ctx, std) == __NR_read){ TRICKS(); /* tricks to ignore the first open */ start = static_cast<UINT64>((PIN_GetSyscallArgument(ctx, std, 1))); size = static_cast<UINT64>((PIN_GetSyscallArgument(ctx, std, 2))); for (i = 0; i < size; i++) addressTainted.push_back(start+i); std::cout << "[TAINT]\t\t\tbytes tainted from " << std::hex << "0x" << start << " to 0x" << start+size << " (via read)"<< std::endl; } }
void PostPatchFutex(uint32_t tid, FutexInfo fi, CONTEXT* ctxt, SYSCALL_STANDARD std) { int res = (int) PIN_GetSyscallNumber(ctxt, std); if (isFutexWaitOp(fi.op) && res == 0) { zinfo->sched->notifyFutexWaitWoken(procIdx, tid); } else if (isFutexWakeOp(fi.op) && res >= 0) { /* In contrast to the futex manpage, from the kernel's futex.c * (do_futex), WAKE and WAKE_OP return the number of threads woken up, * but the REQUEUE and CMP_REQUEUE and REQUEUE_PI ops return the number * of threads woken up + requeued. However, these variants * (futex_requeue) first try to wake the specified threads, then * requeue as many other threads as they can. * * Therefore, this wokenUp expression should be correct for all variants * of SYS_futex that wake up threads (WAKE, REQUEUE, CMP_REQUEUE, ...) */ uint32_t wokenUp = std::min(res, fi.val); zinfo->sched->notifyFutexWakeEnd(procIdx, tid, wokenUp); } }
VOID SyscallEntry(THREADID threadIndex, CONTEXT *ctxt, SYSCALL_STANDARD std, VOID *v) { flush_mwrite(threadIndex); ADDRINT num = PIN_GetSyscallNumber(ctxt, std); if(num == SYS_execve) { PrintTime(NULL); output_dummy_stacktrace(); mprintf("mtrace_execve(%ld, %u, %u, %u, %u) = 0\n", syscall(SYS_gettid), PIN_GetTid(), getpid(), threadIndex, PIN_ThreadId()); m_dump_bytes(NULL, 0); return; } if (num != SYS_mmap && num != SYS_munmap) { thread_to_syscall[threadIndex].ip = 0; return; } thread_to_syscall[threadIndex].num = num; thread_to_syscall[threadIndex].ip = PIN_GetContextReg(ctxt, REG_INST_PTR); if(num == SYS_mmap) { for(int i = 0; i < 6; i++) { #if defined(TARGET_LINUX) && defined(TARGET_IA32) thread_to_syscall[threadIndex].args[i] = (reinterpret_cast<ADDRINT *>(PIN_GetSyscallArgument(ctxt, std, 0)))[i]; #else thread_to_syscall[threadIndex].args[i] = PIN_GetSyscallArgument(ctxt, std, i); #endif } } else { for(int i = 0; i < 2; i++) { thread_to_syscall[threadIndex].args[i] = PIN_GetSyscallArgument(ctxt, std, i); } } }
/* * syscall enter notification (analysis function) * * save the system call context and invoke the pre-syscall callback * function (if registered) * * @tid: thread id * @ctx: CPU context * @std: syscall standard (e.g., Linux IA-32, IA-64, etc) * @v: callback value */ static void sysenter_save(THREADID tid, CONTEXT *ctx, SYSCALL_STANDARD std, VOID *v) { /* get the thread context */ thread_ctx_t *thread_ctx = (thread_ctx_t *) PIN_GetContextReg(ctx, thread_ctx_ptr); /* get the syscall number */ size_t syscall_nr = PIN_GetSyscallNumber(ctx, std); /* unknown syscall; optimized branch */ if (unlikely(syscall_nr >= SYSCALL_MAX)) { //LOG(string(__FUNCTION__) + ": unknown syscall (num=" + // decstr(syscall_nr) + ")\n"); /* syscall number is set to -1; hint for the sysexit_save() */ thread_ctx->syscall_ctx.nr = -1; /* no context save and no pre-syscall callback invocation */ return; } /* pass the system call number to sysexit_save() */ thread_ctx->syscall_ctx.nr = syscall_nr; /* * check if we need to save the arguments for that syscall * * we save only when we have a callback registered or the syscall * returns a value in the arguments */ if (syscall_desc[syscall_nr].save_args | syscall_desc[syscall_nr].retval_args) { /* * dump only the appropriate number of arguments * or yet another lame way to avoid a loop (vpk) */ switch (syscall_desc[syscall_nr].nargs) { /* 6 */ case SYSCALL_ARG5 + 1: thread_ctx->syscall_ctx.arg[SYSCALL_ARG5] = PIN_GetSyscallArgument(ctx, std, SYSCALL_ARG5); /* 5 */ case SYSCALL_ARG4 + 1: thread_ctx->syscall_ctx.arg[SYSCALL_ARG4] = PIN_GetSyscallArgument(ctx, std, SYSCALL_ARG4); /* 4 */ case SYSCALL_ARG3 + 1: thread_ctx->syscall_ctx.arg[SYSCALL_ARG3] = PIN_GetSyscallArgument(ctx, std, SYSCALL_ARG3); /* 3 */ case SYSCALL_ARG2 + 1: thread_ctx->syscall_ctx.arg[SYSCALL_ARG2] = PIN_GetSyscallArgument(ctx, std, SYSCALL_ARG2); /* 2 */ case SYSCALL_ARG1 + 1: thread_ctx->syscall_ctx.arg[SYSCALL_ARG1] = PIN_GetSyscallArgument(ctx, std, SYSCALL_ARG1); /* 1 */ case SYSCALL_ARG0 + 1: thread_ctx->syscall_ctx.arg[SYSCALL_ARG0] = PIN_GetSyscallArgument(ctx, std, SYSCALL_ARG0); /* default */ default: /* nothing to do */ break; } /* * dump the architectural state of the processor; * saved as "auxiliary" data */ thread_ctx->syscall_ctx.aux = ctx; /* call the pre-syscall callback (if any) */ if (syscall_desc[syscall_nr].pre != NULL) syscall_desc[syscall_nr].pre(&thread_ctx->syscall_ctx); } }
VOID SyscallEntry(THREADID threadIndex, CONTEXT*ctxt, SYSCALL_STANDARD std, VOID*v) { SysBefore(PIN_GetSyscallReturn(ctxt, std), PIN_GetSyscallNumber(ctxt, std)); }
/* ========================================================================== */ VOID SyscallEntry(THREADID threadIndex, CONTEXT* ictxt, SYSCALL_STANDARD std, VOID* v) { /* Kill speculative feeder before reaching a syscall. * This guarantees speculative processes don't have side effects. */ if (speculation_mode) { FinishSpeculation(get_tls(threadIndex)); return; } lk_lock(&syscall_lock, threadIndex + 1); ADDRINT syscall_num = PIN_GetSyscallNumber(ictxt, std); ADDRINT arg1 = PIN_GetSyscallArgument(ictxt, std, 0); ADDRINT arg2; ADDRINT arg3; mmap_arg_struct mmap_arg; thread_state_t* tstate = get_tls(threadIndex); tstate->last_syscall_number = syscall_num; #ifdef SYSCALL_DEBUG stringstream log; log << tstate->tid << ": "; #endif switch (syscall_num) { case __NR_brk: #ifdef SYSCALL_DEBUG log << "Syscall brk(" << dec << syscall_num << ") addr: 0x" << hex << arg1 << dec << endl; #endif tstate->last_syscall_arg1 = arg1; break; case __NR_munmap: arg2 = PIN_GetSyscallArgument(ictxt, std, 1); #ifdef SYSCALL_DEBUG log << "Syscall munmap(" << dec << syscall_num << ") addr: 0x" << hex << arg1 << " length: " << arg2 << dec << endl; #endif tstate->last_syscall_arg1 = arg1; tstate->last_syscall_arg2 = arg2; break; case __NR_mmap: // oldmmap #ifndef _LP64 memcpy(&mmap_arg, (void*)arg1, sizeof(mmap_arg_struct)); #else mmap_arg.addr = arg1; mmap_arg.len = PIN_GetSyscallArgument(ictxt, std, 1); #endif tstate->last_syscall_arg1 = mmap_arg.len; #ifdef SYSCALL_DEBUG log << "Syscall oldmmap(" << dec << syscall_num << ") addr: 0x" << hex << mmap_arg.addr << " length: " << mmap_arg.len << dec << endl; #endif break; #ifndef _LP64 case __NR_mmap2: // ia32-only arg2 = PIN_GetSyscallArgument(ictxt, std, 1); #ifdef SYSCALL_DEBUG log << "Syscall mmap2(" << dec << syscall_num << ") addr: 0x" << hex << arg1 << " length: " << arg2 << dec << endl; #endif tstate->last_syscall_arg1 = arg2; break; #endif // _LP64 case __NR_mremap: arg2 = PIN_GetSyscallArgument(ictxt, std, 1); arg3 = PIN_GetSyscallArgument(ictxt, std, 2); #ifdef SYSCALL_DEBUG log << "Syscall mremap(" << dec << syscall_num << ") old_addr: 0x" << hex << arg1 << " old_length: " << arg2 << " new_length: " << arg3 << dec << endl; #endif tstate->last_syscall_arg1 = arg1; tstate->last_syscall_arg2 = arg2; tstate->last_syscall_arg3 = arg3; break; case __NR_gettimeofday: #ifdef SYSCALL_DEBUG log << "Syscall gettimeofday(" << dec << syscall_num << ")" << endl; #endif tstate->last_syscall_arg1 = arg1; BeforeGettimeofday(threadIndex, arg1); break; case __NR_mprotect: arg2 = PIN_GetSyscallArgument(ictxt, std, 1); arg3 = PIN_GetSyscallArgument(ictxt, std, 2); #ifdef SYSCALL_DEBUG log << "Syscall mprotect(" << dec << syscall_num << ") addr: " << hex << arg1 << dec << " length: " << arg2 << " prot: " << hex << arg3 << dec << endl; #endif tstate->last_syscall_arg1 = arg1; tstate->last_syscall_arg2 = arg2; tstate->last_syscall_arg3 = arg3; break; case __NR_futex: { { std::lock_guard<XIOSIM_LOCK> l(tstate->lock); if (tstate->ignore) break; } arg2 = PIN_GetSyscallArgument(ictxt, std, 1); tstate->last_syscall_arg1 = arg1; tstate->last_syscall_arg2 = arg2; #ifdef SYSCALL_DEBUG log << "Syscall futex(" << hex << arg1 << dec << ", " << arg2 << ")" << endl; #endif int futex_op = FUTEX_CMD_MASK & arg2; if (futex_op == FUTEX_WAIT || futex_op == FUTEX_WAIT_BITSET) { AddGiveUpHandshake(threadIndex, false, true); } } break; case __NR_epoll_wait: case __NR_epoll_pwait: #ifdef SYSCALL_DEBUG log << "Syscall epoll_wait(*)" << endl; #endif AddGiveUpHandshake(threadIndex, false, true); break; case __NR_poll: case __NR_ppoll: #ifdef SYSCALL_DEBUG log << "Syscall poll(*)" << endl; #endif AddGiveUpHandshake(threadIndex, false, true); break; case __NR_select: case __NR_pselect6: #ifdef SYSCALL_DEBUG log << "Syscall select(*)" << endl; #endif AddGiveUpHandshake(threadIndex, false, true); break; case __NR_nanosleep: #ifdef SYSCALL_DEBUG log << "Syscall nanosleep(*)" << endl; #endif AddGiveUpHandshake(threadIndex, false, true); break; case __NR_pause: #ifdef SYSCALL_DEBUG log << "Syscall pause(*)" << endl; #endif AddGiveUpHandshake(threadIndex, false, true); break; #ifdef SYSCALL_DEBUG case __NR_open: log << "Syscall open (" << dec << syscall_num << ") path: " << (char*)arg1 << endl; break; #endif #ifdef SYSCALL_DEBUG case __NR_exit: log << "Syscall exit (" << dec << syscall_num << ") code: " << arg1 << endl; break; #endif case __NR_sched_setaffinity: { arg2 = PIN_GetSyscallArgument(ictxt, std, 1); arg3 = PIN_GetSyscallArgument(ictxt, std, 2); #ifdef SYSCALL_DEBUG log << "Syscall sched_setaffinity(" << arg1 << ", " << arg2 << ")"; #endif size_t mask_size = (size_t) arg2; cpu_set_t* mask = (cpu_set_t*) arg3; if (CPU_COUNT(mask) > 1) { #ifdef SYSCALL_DEBUG log << endl; #endif cerr << "We don't virtualize sched_setaffinity with a mask > 1." << endl; break; } int coreID = xiosim::INVALID_CORE; for (size_t i = 0; i < mask_size; i++) { if (CPU_ISSET(i, mask)) { coreID = static_cast<int>(i); break; } } #ifdef SYSCALL_DEBUG log << " cpu " << coreID << endl; #endif AddAffinityHandshake(threadIndex, coreID); } break; /* case __NR_sysconf: #ifdef SYSCALL_DEBUG log << "Syscall sysconf (" << dec << syscall_num << ") arg: " << arg1 << endl; #endif tstate->last_syscall_arg1 = arg1; break; */ default: #ifdef SYSCALL_DEBUG log << "Syscall " << dec << syscall_num << endl; #endif break; } #ifdef SYSCALL_DEBUG cerr << log.str(); #endif lk_unlock(&syscall_lock); }