/* EXPORTED */ void VG_(sigframe_create)( ThreadId tid, Bool on_altstack, Addr sp_top_of_frame, const vki_siginfo_t *siginfo, const struct vki_ucontext *siguc, void *handler, UInt flags, const vki_sigset_t *mask, void *restorer ) { ThreadState *tst; Addr sp = sp_top_of_frame; Int sigNo = siginfo->si_signo; UInt size; tst = VG_(get_ThreadState)(tid); size = sizeof(struct rt_sigframe); sp -= size; sp = VG_ROUNDDN(sp, 16); if (! ML_(sf_maybe_extend_stack)(tst, sp, size, flags)) return; // Give up. No idea if this is correct struct rt_sigframe *rsf = (struct rt_sigframe *)sp; /* Track our writes to siginfo */ VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, /* VVVVV */ "signal handler siginfo", (Addr)rsf, offsetof(struct rt_sigframe, sig)); VG_(memcpy)(&rsf->info, siginfo, sizeof(vki_siginfo_t)); if (sigNo == VKI_SIGILL && siginfo->si_code > 0) { rsf->info._sifields._sigfault._addr = (Addr*)(tst)->arch.vex.guest_PC; } VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, /* ^^^^^ */ (Addr)rsf, offsetof(struct rt_sigframe, sig)); build_sigframe(tst, &rsf->sig, siginfo, siguc, handler, flags, mask, restorer); tst->arch.vex.guest_X1 = (Addr)&rsf->info; tst->arch.vex.guest_X2 = (Addr)&rsf->sig.uc; VG_(set_SP)(tid, sp); VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR, sizeof(Addr)); tst->arch.vex.guest_X0 = sigNo; if (flags & VKI_SA_RESTORER) tst->arch.vex.guest_X30 = (Addr)restorer; else tst->arch.vex.guest_X30 = (Addr)&VG_(arm64_linux_SUBST_FOR_rt_sigreturn); tst->arch.vex.guest_PC = (Addr)handler; }
// Scan a block of memory between [start, start+len). This range may // be bogus, inaccessable, or otherwise strange; we deal with it. For each // valid aligned word we assume it's a pointer to a chunk a push the chunk // onto the mark stack if so. static void lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite, Int clique) { Addr ptr = VG_ROUNDUP(start, sizeof(Addr)); Addr end = VG_ROUNDDN(start+len, sizeof(Addr)); vki_sigset_t sigmask; if (VG_DEBUG_LEAKCHECK) VG_(printf)("scan %#lx-%#lx (%lu)\n", start, end, len); VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask); VG_(set_fault_catcher)(scan_all_valid_memory_catcher); // We might be in the middle of a page. Do a cheap check to see if // it's valid; if not, skip onto the next page. if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) ptr = VG_PGROUNDUP(ptr+1); // First page is bad - skip it. while (ptr < end) { Addr addr; // Skip invalid chunks. if ( ! MC_(is_within_valid_secondary)(ptr) ) { ptr = VG_ROUNDUP(ptr+1, SM_SIZE); continue; } // Look to see if this page seems reasonable. if ((ptr % VKI_PAGE_SIZE) == 0) { if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) { ptr += VKI_PAGE_SIZE; // Bad page - skip it. continue; } } if (__builtin_setjmp(memscan_jmpbuf) == 0) { if ( MC_(is_valid_aligned_word)(ptr) ) { lc_scanned_szB += sizeof(Addr); addr = *(Addr *)ptr; // If we get here, the scanned word is in valid memory. Now // let's see if its contents point to a chunk. lc_push_if_a_chunk_ptr(addr, clique, is_prior_definite); } else if (0 && VG_DEBUG_LEAKCHECK) { VG_(printf)("%#lx not valid\n", ptr); } ptr += sizeof(Addr); } else { // We need to restore the signal mask, because we were // longjmped out of a signal handler. VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL); ptr = VG_PGROUNDUP(ptr+1); // Bad page - skip it. } } VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL); VG_(set_fault_catcher)(NULL); }
static Addr build_sigframe(ThreadState *tst, Addr esp_top_of_frame, const vki_siginfo_t *siginfo, const struct vki_ucontext *siguc, UInt flags, const vki_sigset_t *mask, void *restorer) { struct sigframe *frame; Addr esp = esp_top_of_frame; Int sigNo = siginfo->si_signo; UWord trapno; UWord err; struct vki_ucontext uc; vg_assert((flags & VKI_SA_SIGINFO) == 0); esp -= sizeof(*frame); esp = VG_ROUNDDN(esp, 16); frame = (struct sigframe *)esp; if (!extend(tst, esp, sizeof(*frame))) return esp_top_of_frame; /* retaddr, sigNo, siguContext fields are to be written */ VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal handler frame", esp, offsetof(struct sigframe, vg) ); frame->sigNo = sigNo; if (flags & VKI_SA_RESTORER) frame->retaddr = (Addr)restorer; else frame->retaddr = (Addr)&VG_(x86_linux_SUBST_FOR_sigreturn); if (siguc) { trapno = siguc->uc_mcontext.trapno; err = siguc->uc_mcontext.err; } else { trapno = 0; err = 0; } synth_ucontext(tst->tid, siginfo, trapno, err, mask, &uc, &frame->fpstate); VG_(memcpy)(&frame->sigContext, &uc.uc_mcontext, sizeof(struct vki_sigcontext)); frame->sigContext.oldmask = mask->sig[0]; VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, esp, offsetof(struct sigframe, vg) ); build_vg_sigframe(&frame->vg, tst, flags, sigNo); return esp; }
static Addr build_rt_sigframe(ThreadState *tst, Addr rsp_top_of_frame, const vki_siginfo_t *siginfo, const struct vki_ucontext *siguc, void *handler, UInt flags, const vki_sigset_t *mask, void *restorer) { struct rt_sigframe *frame; Addr rsp = rsp_top_of_frame; Int sigNo = siginfo->si_signo; UWord trapno; UWord err; rsp -= sizeof(*frame); rsp = VG_ROUNDDN(rsp, 16) - 8; frame = (struct rt_sigframe *)rsp; if (!extend(tst, rsp, sizeof(*frame))) return rsp_top_of_frame; /* retaddr, siginfo, uContext fields are to be written */ VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "rt signal handler frame", rsp, offsetof(struct rt_sigframe, vg) ); if (flags & VKI_SA_RESTORER) frame->retaddr = (Addr)restorer; else frame->retaddr = (Addr)&VG_(amd64_linux_SUBST_FOR_rt_sigreturn); if (siguc) { trapno = siguc->uc_mcontext.trapno; err = siguc->uc_mcontext.err; } else { trapno = 0; err = 0; } VG_(memcpy)(&frame->sigInfo, siginfo, sizeof(vki_siginfo_t)); /* SIGILL defines addr to be the faulting address */ if (sigNo == VKI_SIGILL && siginfo->si_code > 0) frame->sigInfo._sifields._sigfault._addr = (void*)tst->arch.vex.guest_RIP; synth_ucontext(tst->tid, siginfo, trapno, err, mask, &frame->uContext, &frame->fpstate); VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, rsp, offsetof(struct rt_sigframe, vg) ); build_vg_sigframe(&frame->vg, tst, mask, flags, sigNo); return rsp; }
static Addr build_sigframe(ThreadState *tst, Addr rsp_top_of_frame, const vki_siginfo_t *siginfo, const struct vki_ucontext *siguc, void *handler, UInt flags, const vki_sigset_t *mask, void *restorer) { struct sigframe *frame; Addr rsp = rsp_top_of_frame; Int sigNo = siginfo->si_signo; UWord trapno; UWord err; rsp -= sizeof(*frame); rsp = VG_ROUNDDN(rsp, 16); frame = (struct sigframe *)rsp; if (!extend(tst, rsp, sizeof(*frame))) return rsp_top_of_frame; /* retaddr, siginfo, uContext fields are to be written */ VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal handler frame", rsp, offsetof(struct sigframe, vg) ); frame->sigNo = sigNo; frame->retaddr = (Addr)&VG_(amd64_freebsd_SUBST_FOR_sigreturn); if ((flags & VKI_SA_SIGINFO) == 0) frame->psigInfo = (Addr)siginfo->si_code; else frame->psigInfo = (Addr)&frame->sigInfo; VG_(memcpy)(&frame->sigInfo, siginfo, sizeof(vki_siginfo_t)); if (siguc != NULL) { trapno = siguc->uc_mcontext.trapno; err = siguc->uc_mcontext.err; } else { trapno = 0; err = 0; } synth_ucontext(tst->tid, siginfo, trapno, err, mask, &frame->uContext, &frame->fpstate); if (sigNo == VKI_SIGILL && siginfo->si_code > 0) frame->sigInfo.si_addr = (void*)tst->arch.vex.guest_RIP; VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, rsp, offsetof(struct sigframe, vg) ); build_vg_sigframe(&frame->vg, tst, mask, flags, sigNo); return rsp; }
/* Scan a block of memory between [start, start+len). This range may be bogus, inaccessable, or otherwise strange; we deal with it. If clique != -1, it means we're gathering leaked memory into cliques, and clique is the index of the current clique leader. */ static void _lc_scan_memory(Addr start, SizeT len, Int clique) { Addr ptr = VG_ROUNDUP(start, sizeof(Addr)); Addr end = VG_ROUNDDN(start+len, sizeof(Addr)); vki_sigset_t sigmask; if (VG_DEBUG_LEAKCHECK) VG_(printf)("scan %p-%p\n", start, start+len); VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask); VG_(set_fault_catcher)(scan_all_valid_memory_catcher); lc_scanned += end-ptr; if (!VG_(is_client_addr)(ptr) || !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ)) ptr = VG_PGROUNDUP(ptr+1); /* first page bad */ while (ptr < end) { Addr addr; /* Skip invalid chunks */ if (!(*lc_is_within_valid_secondary)(ptr)) { ptr = VG_ROUNDUP(ptr+1, SECONDARY_SIZE); continue; } /* Look to see if this page seems reasonble */ if ((ptr % VKI_PAGE_SIZE) == 0) { if (!VG_(is_client_addr)(ptr) || !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ)) ptr += VKI_PAGE_SIZE; /* bad page - skip it */ } if (__builtin_setjmp(memscan_jmpbuf) == 0) { if ((*lc_is_valid_aligned_word)(ptr)) { addr = *(Addr *)ptr; _lc_markstack_push(addr, clique); } else if (0 && VG_DEBUG_LEAKCHECK) VG_(printf)("%p not valid\n", ptr); ptr += sizeof(Addr); } else { /* We need to restore the signal mask, because we were longjmped out of a signal handler. */ VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL); ptr = VG_PGROUNDUP(ptr+1); /* bad page - skip it */ } } VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL); VG_(set_fault_catcher)(NULL); }
/* EXPORTED */ void VG_(sigframe_create)( ThreadId tid, Addr sp_top_of_frame, const vki_siginfo_t *siginfo, const struct vki_ucontext *siguc, void *handler, UInt flags, const vki_sigset_t *mask, void *restorer ) { // struct vg_sig_private *priv; Addr sp = sp_top_of_frame; ThreadState *tst; Int sigNo = siginfo->si_signo; // Addr faultaddr; UInt size; tst = VG_(get_ThreadState)(tid); size = flags & VKI_SA_SIGINFO ? sizeof(struct rt_sigframe) : sizeof(struct sigframe); sp -= size; sp = VG_ROUNDDN(sp, 16); if(!extend(tst, sp, size)) I_die_here; // XXX Incorrect behavior if (flags & VKI_SA_SIGINFO){ struct rt_sigframe *rsf = (struct rt_sigframe *)sp; /* Track our writes to siginfo */ VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, /* VVVVV */ "signal handler siginfo", (Addr)rsf, offsetof(struct rt_sigframe, sig)); VG_(memcpy)(&rsf->info, siginfo, sizeof(vki_siginfo_t)); if(sigNo == VKI_SIGILL && siginfo->si_code > 0) { rsf->info._sifields._sigfault._addr = (Addr *) (tst)->arch.vex.guest_R12; /* IP */ } VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, /* ^^^^^ */ (Addr)rsf, offsetof(struct rt_sigframe, sig)); build_sigframe(tst, &rsf->sig, siginfo, siguc, handler, flags, mask, restorer); tst->arch.vex.guest_R1 = (Addr)&rsf->info; tst->arch.vex.guest_R2 = (Addr)&rsf->sig.uc; } else {
static void describe_many(void) { const int pagesize = guess_pagesize(); describe ("discovered address giving SEGV in thread stack", (void*)lowest_j); describe ("byte just above highest guardpage byte", (void*) VG_ROUNDUP(lowest_j, pagesize)); describe ("highest guardpage byte", (void*) VG_ROUNDUP(lowest_j, pagesize)-1); describe ("lowest guardpage byte", (void*) VG_ROUNDDN(lowest_j, pagesize)); /* Cannot test the next byte, as we cannot predict how this byte will be described. */ }
static Addr build_rt_sigframe(ThreadState *tst, Addr esp_top_of_frame, const vki_siginfo_t *siginfo, void *handler, UInt flags, const vki_sigset_t *mask, void *restorer) { struct rt_sigframe *frame; Addr esp = esp_top_of_frame; Int sigNo = siginfo->si_signo; vg_assert((flags & VKI_SA_SIGINFO) != 0); esp -= sizeof(*frame); esp = VG_ROUNDDN(esp, 16); frame = (struct rt_sigframe *)esp; if (!extend(tst, esp, sizeof(*frame))) return esp_top_of_frame; /* retaddr, sigNo, pSiginfo, puContext fields are to be written */ VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "rt signal handler frame", esp, offsetof(struct rt_sigframe, vg) ); frame->sigNo = sigNo; if (flags & VKI_SA_RESTORER) frame->retaddr = (Addr)restorer; else frame->retaddr = VG_(client_trampoline_code)+VG_(tramp_rt_sigreturn_offset); frame->psigInfo = (Addr)&frame->sigInfo; frame->puContext = (Addr)&frame->uContext; VG_(memcpy)(&frame->sigInfo, siginfo, sizeof(vki_siginfo_t)); /* SIGILL defines addr to be the faulting address */ if (sigNo == VKI_SIGILL && siginfo->si_code > 0) frame->sigInfo._sifields._sigfault._addr = (void*)tst->arch.vex.guest_EIP; synth_ucontext(tst->tid, siginfo, mask, &frame->uContext, &frame->fpstate); VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, esp, offsetof(struct rt_sigframe, vg) ); build_vg_sigframe(&frame->vg, tst, mask, flags, sigNo); return esp; }
static Addr build_sigframe(ThreadState *tst, Addr esp_top_of_frame, const vki_siginfo_t *siginfo, void *handler, UInt flags, const vki_sigset_t *mask, void *restorer) { struct sigframe *frame; Addr esp = esp_top_of_frame; Int sigNo = siginfo->si_signo; struct vki_ucontext uc; vg_assert((flags & VKI_SA_SIGINFO) == 0); esp -= sizeof(*frame); esp = VG_ROUNDDN(esp, 16); frame = (struct sigframe *)esp; if (!extend(tst, esp, sizeof(*frame))) return esp_top_of_frame; /* retaddr, sigNo, siguContext fields are to be written */ VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal handler frame", esp, offsetof(struct sigframe, vg) ); frame->sigNo = sigNo; if (flags & VKI_SA_RESTORER) frame->retaddr = (Addr)restorer; else frame->retaddr = VG_(client_trampoline_code)+VG_(tramp_sigreturn_offset); synth_ucontext(tst->tid, siginfo, mask, &uc, &frame->fpstate); VG_(memcpy)(&frame->sigContext, &uc.uc_mcontext, sizeof(struct vki_sigcontext)); frame->sigContext.oldmask = mask->sig[0]; VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, esp, offsetof(struct sigframe, vg) ); build_vg_sigframe(&frame->vg, tst, mask, flags, sigNo); return esp; }
static void* child_fn_0 ( void* arg ) { grow_the_stack(); bad_things_below_sp(); if (setjmp(goback)) { describe_many(); } else bad_things_till_guard_page(); if (shake_with_wrong_registration) { // Do whatever stupid things we could imagine // with stack registration and see no explosion happens // Note: this is executed only if an arg is given to the program. // const int pgsz = guess_pagesize(); int stackid; fprintf(stderr, "\n\nShaking after unregistering stack\n"); // Assuming our first stack was automatically registered as nr 1 VALGRIND_STACK_DEREGISTER(1); // Test with no stack registered describe_many(); fprintf(stderr, "\n\nShaking with small stack\n"); stackid = VALGRIND_STACK_REGISTER((void*) VG_ROUNDDN(&stackid, pgsz), (void*) VG_ROUNDUP(&stackid, pgsz)); describe_many(); VALGRIND_STACK_DEREGISTER(stackid); fprintf(stderr, "\n\nShaking with huge stack\n"); stackid = VALGRIND_STACK_REGISTER((void*) 0x0, (void*) VG_ROUNDUP(&stackid, 2<<20)); describe_many(); VALGRIND_STACK_DEREGISTER(stackid); } return NULL; }
void test_VG_ROUND_et_al() { CHECK( 0 == VG_ROUNDDN(0, 1) ); CHECK( 1 == VG_ROUNDDN(1, 1) ); CHECK( 2 == VG_ROUNDDN(2, 1) ); CHECK( 3 == VG_ROUNDDN(3, 1) ); CHECK( 4 == VG_ROUNDDN(4, 1) ); CHECK( 5 == VG_ROUNDDN(5, 1) ); CHECK( 6 == VG_ROUNDDN(6, 1) ); CHECK( 7 == VG_ROUNDDN(7, 1) ); CHECK( 0 == VG_ROUNDUP(0, 1) ); CHECK( 1 == VG_ROUNDUP(1, 1) ); CHECK( 2 == VG_ROUNDUP(2, 1) ); CHECK( 3 == VG_ROUNDUP(3, 1) ); CHECK( 4 == VG_ROUNDUP(4, 1) ); CHECK( 5 == VG_ROUNDUP(5, 1) ); CHECK( 6 == VG_ROUNDUP(6, 1) ); CHECK( 7 == VG_ROUNDUP(7, 1) ); CHECK( 0 == VG_ROUNDDN(0, 2) ); CHECK( 0 == VG_ROUNDDN(1, 2) ); CHECK( 2 == VG_ROUNDDN(2, 2) ); CHECK( 2 == VG_ROUNDDN(3, 2) ); CHECK( 4 == VG_ROUNDDN(4, 2) ); CHECK( 4 == VG_ROUNDDN(5, 2) ); CHECK( 6 == VG_ROUNDDN(6, 2) ); CHECK( 6 == VG_ROUNDDN(7, 2) ); CHECK( 0 == VG_ROUNDUP(0, 2) ); CHECK( 2 == VG_ROUNDUP(1, 2) ); CHECK( 2 == VG_ROUNDUP(2, 2) ); CHECK( 4 == VG_ROUNDUP(3, 2) ); CHECK( 4 == VG_ROUNDUP(4, 2) ); CHECK( 6 == VG_ROUNDUP(5, 2) ); CHECK( 6 == VG_ROUNDUP(6, 2) ); CHECK( 8 == VG_ROUNDUP(7, 2) ); CHECK( 0 == VG_ROUNDDN(0, 4) ); CHECK( 0 == VG_ROUNDDN(1, 4) ); CHECK( 0 == VG_ROUNDDN(2, 4) ); CHECK( 0 == VG_ROUNDDN(3, 4) ); CHECK( 4 == VG_ROUNDDN(4, 4) ); CHECK( 4 == VG_ROUNDDN(5, 4) ); CHECK( 4 == VG_ROUNDDN(6, 4) ); CHECK( 4 == VG_ROUNDDN(7, 4) ); CHECK( 0 == VG_ROUNDUP(0, 4) ); CHECK( 4 == VG_ROUNDUP(1, 4) ); CHECK( 4 == VG_ROUNDUP(2, 4) ); CHECK( 4 == VG_ROUNDUP(3, 4) ); CHECK( 4 == VG_ROUNDUP(4, 4) ); CHECK( 8 == VG_ROUNDUP(5, 4) ); CHECK( 8 == VG_ROUNDUP(6, 4) ); CHECK( 8 == VG_ROUNDUP(7, 4) ); CHECK( 0 == VG_ROUNDDN(0, 8) ); CHECK( 0 == VG_ROUNDDN(1, 8) ); CHECK( 0 == VG_ROUNDDN(2, 8) ); CHECK( 0 == VG_ROUNDDN(3, 8) ); CHECK( 0 == VG_ROUNDDN(4, 8) ); CHECK( 0 == VG_ROUNDDN(5, 8) ); CHECK( 0 == VG_ROUNDDN(6, 8) ); CHECK( 0 == VG_ROUNDDN(7, 8) ); CHECK( 0 == VG_ROUNDUP(0, 8) ); CHECK( 8 == VG_ROUNDUP(1, 8) ); CHECK( 8 == VG_ROUNDUP(2, 8) ); CHECK( 8 == VG_ROUNDUP(3, 8) ); CHECK( 8 == VG_ROUNDUP(4, 8) ); CHECK( 8 == VG_ROUNDUP(5, 8) ); CHECK( 8 == VG_ROUNDUP(6, 8) ); CHECK( 8 == VG_ROUNDUP(7, 8) ); CHECK( 0 == VG_PGROUNDDN(0) ); CHECK( 0 == VG_PGROUNDDN(1) ); CHECK( 0 == VG_PGROUNDDN(2) ); CHECK( 0 == VG_PGROUNDDN(3) ); CHECK( 0 == VG_PGROUNDDN(4) ); CHECK( 0 == VG_PGROUNDDN(VKI_PAGE_SIZE-1) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDDN(VKI_PAGE_SIZE ) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDDN(VKI_PAGE_SIZE+1) ); CHECK( 0 == VG_PGROUNDUP(0) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDUP(1) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDUP(2) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDUP(3) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDUP(4) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDUP(VKI_PAGE_SIZE-1) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDUP(VKI_PAGE_SIZE ) ); CHECK( VKI_PAGE_SIZE*2 == VG_PGROUNDUP(VKI_PAGE_SIZE+1) ); }
static Addr build_rt_sigframe(ThreadState *tst, Addr esp_top_of_frame, const vki_siginfo_t *siginfo, const struct vki_ucontext *siguc, UInt flags, const vki_sigset_t *mask, void *restorer) { struct rt_sigframe *frame; Addr esp = esp_top_of_frame; Int sigNo = siginfo->si_signo; UWord trapno; UWord err; vg_assert((flags & VKI_SA_SIGINFO) != 0); esp -= sizeof(*frame); esp = VG_ROUNDDN(esp, 16); frame = (struct rt_sigframe *)esp; if (!extend(tst, esp, sizeof(*frame))) return esp_top_of_frame; /* retaddr, sigNo, pSiginfo, puContext fields are to be written */ VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "rt signal handler frame", esp, offsetof(struct rt_sigframe, vg) ); frame->sigNo = sigNo; if (flags & VKI_SA_RESTORER) frame->retaddr = (Addr)restorer; else frame->retaddr = (Addr)&VG_(x86_linux_SUBST_FOR_rt_sigreturn); if (siguc) { trapno = siguc->uc_mcontext.trapno; err = siguc->uc_mcontext.err; } else { trapno = 0; err = 0; } frame->psigInfo = (Addr)&frame->sigInfo; frame->puContext = (Addr)&frame->uContext; VG_(memcpy)(&frame->sigInfo, siginfo, sizeof(vki_siginfo_t)); if (sigNo == VKI_SIGILL && siginfo->si_code > 0) frame->sigInfo._sifields._sigfault._addr = (void*)tst->arch.vex.guest_EIP; synth_ucontext(tst->tid, siginfo, trapno, err, mask, &frame->uContext, &frame->fpstate); VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, esp, offsetof(struct rt_sigframe, vg) ); build_vg_sigframe(&frame->vg, tst, flags, sigNo); return esp; }
static Addr setup_client_stack( void* init_sp, char** orig_envp, const ExeInfo* info, Addr clstack_end, SizeT clstack_max_size ) { char **cpp; char *strtab; /* string table */ char *stringbase; Addr *ptr; unsigned stringsize; /* total size of strings in bytes */ unsigned auxsize; /* total size of auxv in bytes */ Int argc; /* total argc */ Int envc; /* total number of env vars */ unsigned stacksize; /* total client stack size */ Addr client_SP; /* client stack base (initial SP) */ Addr clstack_start; Int i; Bool have_exename; vg_assert(VG_IS_PAGE_ALIGNED(clstack_end+1)); vg_assert( VG_(args_for_client) ); /* ==================== compute sizes ==================== */ /* first of all, work out how big the client stack will be */ stringsize = 0; auxsize = 0; have_exename = VG_(args_the_exename) != NULL; /* paste on the extra args if the loader needs them (ie, the #! interpreter and its argument) */ argc = 0; if (info->interp_name != NULL) { argc++; stringsize += VG_(strlen)(info->interp_name) + 1; } if (info->interp_args != NULL) { argc++; stringsize += VG_(strlen)(info->interp_args) + 1; } /* now scan the args we're given... */ if (have_exename) stringsize += VG_(strlen)( VG_(args_the_exename) ) + 1; for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++) { argc++; stringsize += VG_(strlen)( * (HChar**) VG_(indexXA)( VG_(args_for_client), i )) + 1; } /* ...and the environment */ envc = 0; for (cpp = orig_envp; cpp && *cpp; cpp++) { envc++; stringsize += VG_(strlen)(*cpp) + 1; } /* Darwin executable_path + NULL */ auxsize += 2 * sizeof(Word); if (info->executable_path) { stringsize += 1 + VG_(strlen)(info->executable_path); } /* Darwin mach_header */ if (info->dynamic) auxsize += sizeof(Word); /* OK, now we know how big the client stack is */ stacksize = sizeof(Word) + /* argc */ (have_exename ? sizeof(char **) : 0) + /* argc[0] == exename */ sizeof(char **)*argc + /* argv */ sizeof(char **) + /* terminal NULL */ sizeof(char **)*envc + /* envp */ sizeof(char **) + /* terminal NULL */ auxsize + /* auxv */ VG_ROUNDUP(stringsize, sizeof(Word)); /* strings (aligned) */ if (0) VG_(printf)("stacksize = %d\n", stacksize); /* client_SP is the client's stack pointer */ client_SP = clstack_end - stacksize; client_SP = VG_ROUNDDN(client_SP, 32); /* make stack 32 byte aligned */ /* base of the string table (aligned) */ stringbase = strtab = (char *)clstack_end - VG_ROUNDUP(stringsize, sizeof(int)); /* The max stack size */ clstack_max_size = VG_PGROUNDUP(clstack_max_size); /* Darwin stack is chosen by the ume loader */ clstack_start = clstack_end - clstack_max_size; /* Record stack extent -- needed for stack-change code. */ /* GrP fixme really? */ VG_(clstk_base) = clstack_start; VG_(clstk_end) = clstack_end; if (0) VG_(printf)("stringsize=%d auxsize=%d stacksize=%d maxsize=0x%x\n" "clstack_start %p\n" "clstack_end %p\n", stringsize, auxsize, stacksize, (Int)clstack_max_size, (void*)clstack_start, (void*)clstack_end); /* ==================== allocate space ==================== */ /* Stack was allocated by the ume loader. */ /* ==================== create client stack ==================== */ ptr = (Addr*)client_SP; /* --- mach_header --- */ if (info->dynamic) *ptr++ = info->text; /* --- client argc --- */ *ptr++ = (Addr)(argc + (have_exename ? 1 : 0)); /* --- client argv --- */ if (info->interp_name) { *ptr++ = (Addr)copy_str(&strtab, info->interp_name); VG_(free)(info->interp_name); } if (info->interp_args) { *ptr++ = (Addr)copy_str(&strtab, info->interp_args); VG_(free)(info->interp_args); } if (have_exename) *ptr++ = (Addr)copy_str(&strtab, VG_(args_the_exename)); for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++) { *ptr++ = (Addr)copy_str( &strtab, * (HChar**) VG_(indexXA)( VG_(args_for_client), i ) ); } *ptr++ = 0; /* --- envp --- */ VG_(client_envp) = (Char **)ptr; for (cpp = orig_envp; cpp && *cpp; ptr++, cpp++) *ptr = (Addr)copy_str(&strtab, *cpp); *ptr++ = 0; /* --- executable_path + NULL --- */ if (info->executable_path) *ptr++ = (Addr)copy_str(&strtab, info->executable_path); else *ptr++ = 0; *ptr++ = 0; vg_assert((strtab-stringbase) == stringsize); /* client_SP is pointing at client's argc/argv */ if (0) VG_(printf)("startup SP = %#lx\n", client_SP); return client_SP; }
static void main2(void) { int err, padfile; struct exeinfo info; extern char _end; int *esp; char buf[strlen(valgrind_lib) + sizeof(stage2) + 16]; info.exe_end = VG_PGROUNDDN(init_sp); // rounding down info.exe_base = KICKSTART_BASE; printf("info.exe_end = %p\n", info.exe_end); #ifdef HAVE_PIE info.exe_base = VG_ROUNDDN(info.exe_end - 0x02000000, 0x10000000); assert(info.exe_base >= VG_PGROUNDUP(&_end)); info.map_base = info.exe_base + 0x01000000 ; #else // If this system doesn't have PIE (position-independent executables), // we have to choose a hardwired location for stage2. // info.exe_base = VG_PGROUNDUP(&_end); printf("info.exe_base = %p\n", info.exe_base); info.map_base = KICKSTART_BASE + 0x01000000 ; printf("info.map_base = %p\n", info.map_base); #endif info.argv = NULL; snprintf(buf, sizeof(buf), "%s/%s", valgrind_lib, stage2); printf("valgrind_lib = %s\n",valgrind_lib); err = do_exec(buf, &info); if (err != 0) { fprintf(stderr, "valgrind: failed to load %s: %s\n", buf, strerror(err)); exit(1); } /* Make sure stage2's dynamic linker can't tromp on the lower part of the address space. */ padfile = as_openpadfile(); as_pad(0, (void *)info.map_base, padfile); // map base is the start of our stuff printf("init sp : %x\n", init_sp); esp = fix_auxv(init_sp, &info, padfile); printf("after fix_auxb\n"); if (1) { printf("---------- launch stage 2 ----------\n"); printf("eip=%p esp=%p\n", (void *)info.init_eip, esp); foreach_map(prmap, /*dummy*/NULL); } VG_(debugLog)(1, "stage1", "main2(): starting stage2\n"); printf("jumping to stage 2 \n"); printf("esp : %x \n eip : %x\n",esp, info.init_eip); jump_and_switch_stacks( (Addr) esp, /* stack */ (Addr) info.init_eip /* Where to jump */ ); /*NOTREACHED*/ assert(0); }