TEST(UNISTD_TEST, brk) { void* initial_break = get_brk(); // The kernel aligns the break to a page. void* new_break = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(initial_break) + 1); ASSERT_EQ(0, brk(new_break)); ASSERT_GE(get_brk(), new_break); new_break = page_align(reinterpret_cast<uintptr_t>(initial_break) + sysconf(_SC_PAGE_SIZE)); ASSERT_EQ(0, brk(new_break)); ASSERT_EQ(get_brk(), new_break); }
void notify_mmap(int asid, md_addr_t addr, size_t length, bool mod_brk) { std::lock_guard<XIOSIM_LOCK> l(memory_lock); md_addr_t page_addr = page_round_down(addr); size_t page_length = page_round_up(length); mem_newmap(asid, page_addr, page_length); md_addr_t curr_brk = get_brk(asid); if(mod_brk && page_addr > curr_brk) set_brk(asid, page_addr + page_length); }
app_pc get_heap_start(void) { static app_pc heap_start; /* cached value */ if (heap_start == NULL) { app_pc cur_brk = get_brk(true/*pre-us*/); dr_mem_info_t info; module_data_t *data; /* Locate the heap */ if (!dr_query_memory_ex(cur_brk - 1, &info)) { ASSERT(false, "cannot find heap region"); return NULL; } if (info.type == DR_MEMTYPE_FREE || info.type == DR_MEMTYPE_IMAGE || !TEST(DR_MEMPROT_WRITE, info.prot)) { /* Heap is empty */ heap_start = cur_brk; } else { ASSERT(!dr_memory_is_dr_internal(info.base_pc), "heap location error"); /* we no longer assert that these are equal b/c -replace_malloc * has extended the brk already */ ASSERT(info.base_pc + info.size >= cur_brk, "heap location error"); heap_start = info.base_pc; /* workaround for PR 618178 where /proc/maps is wrong on suse * and lists last 2 pages of executable as heap! */ /* On some old Linux kernel, the heap might be right after the bss * segment. DR's map iterator used by dr_query_memory_ex cannot * split bss out of heap. * We use dr_lookup_module to find the right bounds of bss so that * we can check whether the base is bss, existing heap, or merge of * the two. */ /* XXX: we still cannot handle the case that the application creates * memory right before the heap. */ data = dr_lookup_module(info.base_pc); if (data != NULL) { if (data->start < heap_start && data->end > heap_start) { heap_start = (byte *) ALIGN_FORWARD(data->end, PAGE_SIZE); LOG(1, "WARNING: workaround for invalid heap_start "PFX" => "PFX"\n", info.base_pc, heap_start); } dr_free_module_data(data); } } } return heap_start; }
void update_brk(int asid, md_addr_t brk_end, bool do_mmap) { assert(brk_end != 0); if(do_mmap) { md_addr_t old_brk_end = get_brk(asid); if(brk_end > old_brk_end) notify_mmap(asid, page_round_up(old_brk_end), page_round_up(brk_end - old_brk_end), false); else if(brk_end < old_brk_end) notify_munmap(asid, page_round_up(brk_end), page_round_up(old_brk_end - brk_end), false); } { std::lock_guard<XIOSIM_LOCK> l(memory_lock); set_brk(asid, brk_end); } }
TEST(UNISTD_TEST, sbrk_ENOMEM) { #if defined(__BIONIC__) && !defined(__LP64__) // There is no way to guarantee that all overflow conditions can be tested // without manipulating the underlying values of the current break. extern void* __bionic_brk; class ScopedBrk { public: ScopedBrk() : saved_brk_(__bionic_brk) {} virtual ~ScopedBrk() { __bionic_brk = saved_brk_; } private: void* saved_brk_; }; ScopedBrk scope_brk; // Set the current break to a point that will cause an overflow. __bionic_brk = reinterpret_cast<void*>(static_cast<uintptr_t>(PTRDIFF_MAX) + 2); // Can't increase by so much that we'd overflow. ASSERT_EQ(reinterpret_cast<void*>(-1), sbrk(PTRDIFF_MAX)); ASSERT_EQ(ENOMEM, errno); // Set the current break to a point that will cause an overflow. __bionic_brk = reinterpret_cast<void*>(static_cast<uintptr_t>(PTRDIFF_MAX)); ASSERT_EQ(reinterpret_cast<void*>(-1), sbrk(PTRDIFF_MIN)); ASSERT_EQ(ENOMEM, errno); __bionic_brk = reinterpret_cast<void*>(static_cast<uintptr_t>(PTRDIFF_MAX) - 1); ASSERT_EQ(reinterpret_cast<void*>(-1), sbrk(PTRDIFF_MIN + 1)); ASSERT_EQ(ENOMEM, errno); #else class ScopedBrk { public: ScopedBrk() : saved_brk_(get_brk()) {} virtual ~ScopedBrk() { brk(saved_brk_); } private: void* saved_brk_; }; ScopedBrk scope_brk; uintptr_t cur_brk = reinterpret_cast<uintptr_t>(get_brk()); if (cur_brk < static_cast<uintptr_t>(-(SBRK_MIN+1))) { // Do the overflow test for a max negative increment. ASSERT_EQ(reinterpret_cast<void*>(-1), sbrk(SBRK_MIN)); #if defined(__BIONIC__) // GLIBC does not set errno in overflow case. ASSERT_EQ(ENOMEM, errno); #endif } uintptr_t overflow_brk = static_cast<uintptr_t>(SBRK_MAX) + 2; if (cur_brk < overflow_brk) { // Try and move the value to PTRDIFF_MAX + 2. cur_brk = reinterpret_cast<uintptr_t>(sbrk(overflow_brk)); } if (cur_brk >= overflow_brk) { ASSERT_EQ(reinterpret_cast<void*>(-1), sbrk(SBRK_MAX)); #if defined(__BIONIC__) // GLIBC does not set errno in overflow case. ASSERT_EQ(ENOMEM, errno); #endif } #endif }
ScopedBrk() : saved_brk_(get_brk()) {}