// Scan a block of memory between [start, start+len). This range may // be bogus, inaccessable, or otherwise strange; we deal with it. For each // valid aligned word we assume it's a pointer to a chunk a push the chunk // onto the mark stack if so. static void lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite, Int clique) { Addr ptr = VG_ROUNDUP(start, sizeof(Addr)); Addr end = VG_ROUNDDN(start+len, sizeof(Addr)); vki_sigset_t sigmask; if (VG_DEBUG_LEAKCHECK) VG_(printf)("scan %#lx-%#lx (%lu)\n", start, end, len); VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask); VG_(set_fault_catcher)(scan_all_valid_memory_catcher); // We might be in the middle of a page. Do a cheap check to see if // it's valid; if not, skip onto the next page. if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) ptr = VG_PGROUNDUP(ptr+1); // First page is bad - skip it. while (ptr < end) { Addr addr; // Skip invalid chunks. if ( ! MC_(is_within_valid_secondary)(ptr) ) { ptr = VG_ROUNDUP(ptr+1, SM_SIZE); continue; } // Look to see if this page seems reasonable. if ((ptr % VKI_PAGE_SIZE) == 0) { if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) { ptr += VKI_PAGE_SIZE; // Bad page - skip it. continue; } } if (__builtin_setjmp(memscan_jmpbuf) == 0) { if ( MC_(is_valid_aligned_word)(ptr) ) { lc_scanned_szB += sizeof(Addr); addr = *(Addr *)ptr; // If we get here, the scanned word is in valid memory. Now // let's see if its contents point to a chunk. lc_push_if_a_chunk_ptr(addr, clique, is_prior_definite); } else if (0 && VG_DEBUG_LEAKCHECK) { VG_(printf)("%#lx not valid\n", ptr); } ptr += sizeof(Addr); } else { // We need to restore the signal mask, because we were // longjmped out of a signal handler. VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL); ptr = VG_PGROUNDUP(ptr+1); // Bad page - skip it. } } VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL); VG_(set_fault_catcher)(NULL); }
/* Scan a block of memory between [start, start+len). This range may be bogus, inaccessable, or otherwise strange; we deal with it. If clique != -1, it means we're gathering leaked memory into cliques, and clique is the index of the current clique leader. */ static void _lc_scan_memory(Addr start, SizeT len, Int clique) { Addr ptr = VG_ROUNDUP(start, sizeof(Addr)); Addr end = VG_ROUNDDN(start+len, sizeof(Addr)); vki_sigset_t sigmask; if (VG_DEBUG_LEAKCHECK) VG_(printf)("scan %p-%p\n", start, start+len); VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask); VG_(set_fault_catcher)(scan_all_valid_memory_catcher); lc_scanned += end-ptr; if (!VG_(is_client_addr)(ptr) || !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ)) ptr = VG_PGROUNDUP(ptr+1); /* first page bad */ while (ptr < end) { Addr addr; /* Skip invalid chunks */ if (!(*lc_is_within_valid_secondary)(ptr)) { ptr = VG_ROUNDUP(ptr+1, SECONDARY_SIZE); continue; } /* Look to see if this page seems reasonble */ if ((ptr % VKI_PAGE_SIZE) == 0) { if (!VG_(is_client_addr)(ptr) || !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ)) ptr += VKI_PAGE_SIZE; /* bad page - skip it */ } if (__builtin_setjmp(memscan_jmpbuf) == 0) { if ((*lc_is_valid_aligned_word)(ptr)) { addr = *(Addr *)ptr; _lc_markstack_push(addr, clique); } else if (0 && VG_DEBUG_LEAKCHECK) VG_(printf)("%p not valid\n", ptr); ptr += sizeof(Addr); } else { /* We need to restore the signal mask, because we were longjmped out of a signal handler. */ VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL); ptr = VG_PGROUNDUP(ptr+1); /* bad page - skip it */ } } VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL); VG_(set_fault_catcher)(NULL); }
static void describe_many(void) { const int pagesize = guess_pagesize(); describe ("discovered address giving SEGV in thread stack", (void*)lowest_j); describe ("byte just above highest guardpage byte", (void*) VG_ROUNDUP(lowest_j, pagesize)); describe ("highest guardpage byte", (void*) VG_ROUNDUP(lowest_j, pagesize)-1); describe ("lowest guardpage byte", (void*) VG_ROUNDDN(lowest_j, pagesize)); /* Cannot test the next byte, as we cannot predict how this byte will be described. */ }
static __inline__ UInt elt2nr (DedupPoolAlloc *ddpa, const void *dedup_elt) { vg_assert (dedup_elt >= (const void *)ddpa->curpool && dedup_elt < (const void *)ddpa->curpool_free); return 1 + ((const UChar*)dedup_elt - (const UChar *)ddpa->curpool) / VG_ROUNDUP(ddpa->fixedSzb, ddpa->eltAlign); }
UInt VG_(sizeDedupPA) (DedupPoolAlloc *ddpa) { if (ddpa->curpool == NULL) return 0; vg_assert (ddpa->fixedSzb); return (ddpa->curpool_free - ddpa_align(ddpa, ddpa->curpool)) / VG_ROUNDUP(ddpa->fixedSzb, ddpa->eltAlign); }
static void* child_fn_0 ( void* arg ) { grow_the_stack(); bad_things_below_sp(); if (setjmp(goback)) { describe_many(); } else bad_things_till_guard_page(); if (shake_with_wrong_registration) { // Do whatever stupid things we could imagine // with stack registration and see no explosion happens // Note: this is executed only if an arg is given to the program. // const int pgsz = guess_pagesize(); int stackid; fprintf(stderr, "\n\nShaking after unregistering stack\n"); // Assuming our first stack was automatically registered as nr 1 VALGRIND_STACK_DEREGISTER(1); // Test with no stack registered describe_many(); fprintf(stderr, "\n\nShaking with small stack\n"); stackid = VALGRIND_STACK_REGISTER((void*) VG_ROUNDDN(&stackid, pgsz), (void*) VG_ROUNDUP(&stackid, pgsz)); describe_many(); VALGRIND_STACK_DEREGISTER(stackid); fprintf(stderr, "\n\nShaking with huge stack\n"); stackid = VALGRIND_STACK_REGISTER((void*) 0x0, (void*) VG_ROUNDUP(&stackid, 2<<20)); describe_many(); VALGRIND_STACK_DEREGISTER(stackid); } return NULL; }
void* VG_(indexEltNumber) (DedupPoolAlloc *ddpa, UInt eltNr) { void *dedup_elt; dedup_elt = ddpa->curpool + (eltNr - 1) * VG_ROUNDUP(ddpa->fixedSzb, ddpa->eltAlign); vg_assert ((UChar*)dedup_elt >= ddpa->curpool && (UChar*)dedup_elt < ddpa->curpool_free); return dedup_elt; }
/* Based on ML_(generic_PRE_sys_mmap) from syswrap-generic.c. If we are trying to do mmap with VKI_MAP_SHARED flag we need to align the start address on VKI_SHMLBA like we did in VG_(am_mmap_file_float_valgrind_flags) */ static SysRes mips_PRE_sys_mmap(ThreadId tid, UWord arg1, UWord arg2, UWord arg3, UWord arg4, UWord arg5, Off64T arg6) { Addr advised; SysRes sres; MapRequest mreq; Bool mreq_ok; if (arg2 == 0) { /* SuSV3 says: If len is zero, mmap() shall fail and no mapping shall be established. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } if (!VG_IS_PAGE_ALIGNED(arg1)) { /* zap any misaligned addresses. */ /* SuSV3 says misaligned addresses only cause the MAP_FIXED case to fail. Here, we catch them all. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } if (!VG_IS_PAGE_ALIGNED(arg6)) { /* zap any misaligned offsets. */ /* SuSV3 says: The off argument is constrained to be aligned and sized according to the value returned by sysconf() when passed _SC_PAGESIZE or _SC_PAGE_SIZE. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } /* Figure out what kind of allocation constraints there are (fixed/hint/any), and ask aspacem what we should do. */ mreq.start = arg1; mreq.len = arg2; if (arg4 & VKI_MAP_FIXED) { mreq.rkind = MFixed; } else if (arg1 != 0) { mreq.rkind = MHint; } else { mreq.rkind = MAny; } if ((VKI_SHMLBA > VKI_PAGE_SIZE) && (VKI_MAP_SHARED & arg4) && !(VKI_MAP_FIXED & arg4)) mreq.len = arg2 + VKI_SHMLBA - VKI_PAGE_SIZE; /* Enquire ... */ advised = VG_(am_get_advisory)( &mreq, True/*client*/, &mreq_ok ); if ((VKI_SHMLBA > VKI_PAGE_SIZE) && (VKI_MAP_SHARED & arg4) && !(VKI_MAP_FIXED & arg4)) advised = VG_ROUNDUP(advised, VKI_SHMLBA); if (!mreq_ok) { /* Our request was bounced, so we'd better fail. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } /* Otherwise we're OK (so far). Install aspacem's choice of address, and let the mmap go through. */ sres = VG_(am_do_mmap_NO_NOTIFY)(advised, arg2, arg3, arg4 | VKI_MAP_FIXED, arg5, arg6); /* A refinement: it may be that the kernel refused aspacem's choice of address. If we were originally asked for a hinted mapping, there is still a last chance: try again at any address. Hence: */ if (mreq.rkind == MHint && sr_isError(sres)) { mreq.start = 0; mreq.len = arg2; mreq.rkind = MAny; advised = VG_(am_get_advisory)( &mreq, True/*client*/, &mreq_ok ); if (!mreq_ok) { /* Our request was bounced, so we'd better fail. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } /* and try again with the kernel */ sres = VG_(am_do_mmap_NO_NOTIFY)(advised, arg2, arg3, arg4 | VKI_MAP_FIXED, arg5, arg6); } if (!sr_isError(sres)) { ULong di_handle; /* Notify aspacem. */ notify_core_of_mmap( (Addr)sr_Res(sres), /* addr kernel actually assigned */ arg2, /* length */ arg3, /* prot */ arg4, /* the original flags value */ arg5, /* fd */ arg6 /* offset */ ); /* Load symbols? */ di_handle = VG_(di_notify_mmap)( (Addr)sr_Res(sres), False/*allow_SkFileV*/, (Int)arg5 ); /* Notify the tool. */ notify_tool_of_mmap( (Addr)sr_Res(sres), /* addr kernel actually assigned */ arg2, /* length */ arg3, /* prot */ di_handle /* so the tool can refer to the read debuginfo later, if it wants. */ ); } /* Stay sane */ if (!sr_isError(sres) && (arg4 & VKI_MAP_FIXED)) vg_assert(sr_Res(sres) == arg1); return sres; }
void test_VG_ROUND_et_al() { CHECK( 0 == VG_ROUNDDN(0, 1) ); CHECK( 1 == VG_ROUNDDN(1, 1) ); CHECK( 2 == VG_ROUNDDN(2, 1) ); CHECK( 3 == VG_ROUNDDN(3, 1) ); CHECK( 4 == VG_ROUNDDN(4, 1) ); CHECK( 5 == VG_ROUNDDN(5, 1) ); CHECK( 6 == VG_ROUNDDN(6, 1) ); CHECK( 7 == VG_ROUNDDN(7, 1) ); CHECK( 0 == VG_ROUNDUP(0, 1) ); CHECK( 1 == VG_ROUNDUP(1, 1) ); CHECK( 2 == VG_ROUNDUP(2, 1) ); CHECK( 3 == VG_ROUNDUP(3, 1) ); CHECK( 4 == VG_ROUNDUP(4, 1) ); CHECK( 5 == VG_ROUNDUP(5, 1) ); CHECK( 6 == VG_ROUNDUP(6, 1) ); CHECK( 7 == VG_ROUNDUP(7, 1) ); CHECK( 0 == VG_ROUNDDN(0, 2) ); CHECK( 0 == VG_ROUNDDN(1, 2) ); CHECK( 2 == VG_ROUNDDN(2, 2) ); CHECK( 2 == VG_ROUNDDN(3, 2) ); CHECK( 4 == VG_ROUNDDN(4, 2) ); CHECK( 4 == VG_ROUNDDN(5, 2) ); CHECK( 6 == VG_ROUNDDN(6, 2) ); CHECK( 6 == VG_ROUNDDN(7, 2) ); CHECK( 0 == VG_ROUNDUP(0, 2) ); CHECK( 2 == VG_ROUNDUP(1, 2) ); CHECK( 2 == VG_ROUNDUP(2, 2) ); CHECK( 4 == VG_ROUNDUP(3, 2) ); CHECK( 4 == VG_ROUNDUP(4, 2) ); CHECK( 6 == VG_ROUNDUP(5, 2) ); CHECK( 6 == VG_ROUNDUP(6, 2) ); CHECK( 8 == VG_ROUNDUP(7, 2) ); CHECK( 0 == VG_ROUNDDN(0, 4) ); CHECK( 0 == VG_ROUNDDN(1, 4) ); CHECK( 0 == VG_ROUNDDN(2, 4) ); CHECK( 0 == VG_ROUNDDN(3, 4) ); CHECK( 4 == VG_ROUNDDN(4, 4) ); CHECK( 4 == VG_ROUNDDN(5, 4) ); CHECK( 4 == VG_ROUNDDN(6, 4) ); CHECK( 4 == VG_ROUNDDN(7, 4) ); CHECK( 0 == VG_ROUNDUP(0, 4) ); CHECK( 4 == VG_ROUNDUP(1, 4) ); CHECK( 4 == VG_ROUNDUP(2, 4) ); CHECK( 4 == VG_ROUNDUP(3, 4) ); CHECK( 4 == VG_ROUNDUP(4, 4) ); CHECK( 8 == VG_ROUNDUP(5, 4) ); CHECK( 8 == VG_ROUNDUP(6, 4) ); CHECK( 8 == VG_ROUNDUP(7, 4) ); CHECK( 0 == VG_ROUNDDN(0, 8) ); CHECK( 0 == VG_ROUNDDN(1, 8) ); CHECK( 0 == VG_ROUNDDN(2, 8) ); CHECK( 0 == VG_ROUNDDN(3, 8) ); CHECK( 0 == VG_ROUNDDN(4, 8) ); CHECK( 0 == VG_ROUNDDN(5, 8) ); CHECK( 0 == VG_ROUNDDN(6, 8) ); CHECK( 0 == VG_ROUNDDN(7, 8) ); CHECK( 0 == VG_ROUNDUP(0, 8) ); CHECK( 8 == VG_ROUNDUP(1, 8) ); CHECK( 8 == VG_ROUNDUP(2, 8) ); CHECK( 8 == VG_ROUNDUP(3, 8) ); CHECK( 8 == VG_ROUNDUP(4, 8) ); CHECK( 8 == VG_ROUNDUP(5, 8) ); CHECK( 8 == VG_ROUNDUP(6, 8) ); CHECK( 8 == VG_ROUNDUP(7, 8) ); CHECK( 0 == VG_PGROUNDDN(0) ); CHECK( 0 == VG_PGROUNDDN(1) ); CHECK( 0 == VG_PGROUNDDN(2) ); CHECK( 0 == VG_PGROUNDDN(3) ); CHECK( 0 == VG_PGROUNDDN(4) ); CHECK( 0 == VG_PGROUNDDN(VKI_PAGE_SIZE-1) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDDN(VKI_PAGE_SIZE ) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDDN(VKI_PAGE_SIZE+1) ); CHECK( 0 == VG_PGROUNDUP(0) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDUP(1) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDUP(2) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDUP(3) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDUP(4) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDUP(VKI_PAGE_SIZE-1) ); CHECK( VKI_PAGE_SIZE == VG_PGROUNDUP(VKI_PAGE_SIZE ) ); CHECK( VKI_PAGE_SIZE*2 == VG_PGROUNDUP(VKI_PAGE_SIZE+1) ); }
static Addr setup_client_stack( void* init_sp, char** orig_envp, const ExeInfo* info, Addr clstack_end, SizeT clstack_max_size ) { char **cpp; char *strtab; /* string table */ char *stringbase; Addr *ptr; unsigned stringsize; /* total size of strings in bytes */ unsigned auxsize; /* total size of auxv in bytes */ Int argc; /* total argc */ Int envc; /* total number of env vars */ unsigned stacksize; /* total client stack size */ Addr client_SP; /* client stack base (initial SP) */ Addr clstack_start; Int i; Bool have_exename; vg_assert(VG_IS_PAGE_ALIGNED(clstack_end+1)); vg_assert( VG_(args_for_client) ); /* ==================== compute sizes ==================== */ /* first of all, work out how big the client stack will be */ stringsize = 0; auxsize = 0; have_exename = VG_(args_the_exename) != NULL; /* paste on the extra args if the loader needs them (ie, the #! interpreter and its argument) */ argc = 0; if (info->interp_name != NULL) { argc++; stringsize += VG_(strlen)(info->interp_name) + 1; } if (info->interp_args != NULL) { argc++; stringsize += VG_(strlen)(info->interp_args) + 1; } /* now scan the args we're given... */ if (have_exename) stringsize += VG_(strlen)( VG_(args_the_exename) ) + 1; for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++) { argc++; stringsize += VG_(strlen)( * (HChar**) VG_(indexXA)( VG_(args_for_client), i )) + 1; } /* ...and the environment */ envc = 0; for (cpp = orig_envp; cpp && *cpp; cpp++) { envc++; stringsize += VG_(strlen)(*cpp) + 1; } /* Darwin executable_path + NULL */ auxsize += 2 * sizeof(Word); if (info->executable_path) { stringsize += 1 + VG_(strlen)(info->executable_path); } /* Darwin mach_header */ if (info->dynamic) auxsize += sizeof(Word); /* OK, now we know how big the client stack is */ stacksize = sizeof(Word) + /* argc */ (have_exename ? sizeof(char **) : 0) + /* argc[0] == exename */ sizeof(char **)*argc + /* argv */ sizeof(char **) + /* terminal NULL */ sizeof(char **)*envc + /* envp */ sizeof(char **) + /* terminal NULL */ auxsize + /* auxv */ VG_ROUNDUP(stringsize, sizeof(Word)); /* strings (aligned) */ if (0) VG_(printf)("stacksize = %d\n", stacksize); /* client_SP is the client's stack pointer */ client_SP = clstack_end - stacksize; client_SP = VG_ROUNDDN(client_SP, 32); /* make stack 32 byte aligned */ /* base of the string table (aligned) */ stringbase = strtab = (char *)clstack_end - VG_ROUNDUP(stringsize, sizeof(int)); /* The max stack size */ clstack_max_size = VG_PGROUNDUP(clstack_max_size); /* Darwin stack is chosen by the ume loader */ clstack_start = clstack_end - clstack_max_size; /* Record stack extent -- needed for stack-change code. */ /* GrP fixme really? */ VG_(clstk_base) = clstack_start; VG_(clstk_end) = clstack_end; if (0) VG_(printf)("stringsize=%d auxsize=%d stacksize=%d maxsize=0x%x\n" "clstack_start %p\n" "clstack_end %p\n", stringsize, auxsize, stacksize, (Int)clstack_max_size, (void*)clstack_start, (void*)clstack_end); /* ==================== allocate space ==================== */ /* Stack was allocated by the ume loader. */ /* ==================== create client stack ==================== */ ptr = (Addr*)client_SP; /* --- mach_header --- */ if (info->dynamic) *ptr++ = info->text; /* --- client argc --- */ *ptr++ = (Addr)(argc + (have_exename ? 1 : 0)); /* --- client argv --- */ if (info->interp_name) { *ptr++ = (Addr)copy_str(&strtab, info->interp_name); VG_(free)(info->interp_name); } if (info->interp_args) { *ptr++ = (Addr)copy_str(&strtab, info->interp_args); VG_(free)(info->interp_args); } if (have_exename) *ptr++ = (Addr)copy_str(&strtab, VG_(args_the_exename)); for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++) { *ptr++ = (Addr)copy_str( &strtab, * (HChar**) VG_(indexXA)( VG_(args_for_client), i ) ); } *ptr++ = 0; /* --- envp --- */ VG_(client_envp) = (Char **)ptr; for (cpp = orig_envp; cpp && *cpp; ptr++, cpp++) *ptr = (Addr)copy_str(&strtab, *cpp); *ptr++ = 0; /* --- executable_path + NULL --- */ if (info->executable_path) *ptr++ = (Addr)copy_str(&strtab, info->executable_path); else *ptr++ = 0; *ptr++ = 0; vg_assert((strtab-stringbase) == stringsize); /* client_SP is pointing at client's argc/argv */ if (0) VG_(printf)("startup SP = %#lx\n", client_SP); return client_SP; }
static __inline__ UChar* ddpa_align ( DedupPoolAlloc* ddpa, UChar *c ) { return (UChar*)VG_ROUNDUP(c, ddpa->eltAlign); }