void *__wrap_malloc(size_t size) { void *p; if ((SYS_GetArena1Lo() >= MAX_MEM1_ARENA_LO) || size >= MEM2_PRIORITY_SIZE) { p = MEM2_alloc(size); return p != 0 ? p : __real_malloc(size); } p = __real_malloc(size); return p != 0 ? p : MEM2_alloc(size); }
void *__wrap_malloc(size_t size) { void *p; if(size >= MEM2_PRIORITY_SIZE) { p = g_mem2gp.allocate(size); if(p != 0) return p; return __real_malloc(size); } p = __real_malloc(size); if(p != 0) return p; return g_mem2gp.allocate(size); }
/* This function wraps the real malloc */ void * __wrap_malloc (size_t size) { void *lptr = __real_malloc(size); mem_alloced += size; printf("malloc: %lu bytes @%p\n", size, lptr); return lptr; }
void* operator new[](std::size_t size) throw (std::bad_alloc) { void * lptr = __real_malloc(size); if (lptr == 0) throw std::bad_alloc() ; printf("@ %s:[%p] + %p %#x\n", program_invocation_short_name, __builtin_return_address(0), lptr, size ); return lptr ; }
void *__wrap_realloc(void *p, size_t size) { void *n; // ptr from mem2 if(((u32)p & 0x10000000) != 0 || (p == 0 && size > MEM2_PRIORITY_SIZE)) { n = g_mem2gp.reallocate(p, size); if(n != 0) return n; n = __real_malloc(size); if(n == 0) return 0; if(p != 0) { memcpy(n, p, MEM2_usableSize(p) < size ? MEM2_usableSize(p) : size); g_mem2gp.release(p); } return n; } // ptr from malloc n = __real_realloc(p, size); if(n != 0) return n; n = g_mem2gp.allocate(size); if(n == 0) return 0; if(p != 0) { memcpy(n, p, __real_malloc_usable_size(p) < size ? __real_malloc_usable_size(p) : size); __real_free(p); } return n; }
void* __wrap_malloc(size_t size) { if (mem_usage + size+16 > mem_limit) { printf("\ntrying to allocate %lld bytes when usage already %lld bytes", size+16, mem_usage.load()); printf("\nmemory limit hit!\n\n"); return nullptr; } mem_usage += size+16; total_mem_usage += size+16; // if (peak_mem_usage < mem_usage) { // peak_mem_usage = mem_usage; // } allocs++; // if (peak_allocs < allocs) peak_allocs = allocs; total_allocs++; void* p = __real_malloc(size+16); *(size_t*)p = size; #ifdef DEBUG_MALLOC printf("MALLOC: %p : %lld malloc\n", (char*)p+16, size); #endif return (char*)p+16; }
/** * Returns a descriptor page from the descriptor page * pool or allocates a new descriptor page if the * descriptor page pool is empty. */ static descriptor_page_t *new_descriptor_page() { descriptor_page_t *new_page = NULL; if (descriptor_root->number_of_pooled_descriptor_pages > 0) { descriptor_root->number_of_pooled_descriptor_pages--; new_page = descriptor_root->descriptor_page_pool [descriptor_root->number_of_pooled_descriptor_pages]; #ifdef SCM_RECORD_MEMORY_USAGE dec_pooled_mem(sizeof(descriptor_page_t)); #endif } else { new_page = __real_malloc(SCM_DESCRIPTOR_PAGE_SIZE); if (!new_page) { #ifdef SCM_DEBUG printf("Allocation of new descriptor page failed.\n"); #endif return NULL; } #ifdef SCM_RECORD_MEMORY_USAGE inc_overhead(__real_malloc_usable_size(new_page)); #endif } #ifdef SCM_RECORD_MEMORY_USAGE inc_allocated_mem(__real_malloc_usable_size(new_page)); #endif new_page->number_of_descriptors = 0; new_page->next = NULL; return new_page; }
void * __wrap_malloc(size_t size) { printf("wrapped malloc call for size[%zu]\n", size); void * ret = __real_malloc(size); printf("returning [%p]\n", ret); return ret; }
extern "C" void* mallocWithTag(size_t size, unsigned int tag) { void* p = __real_malloc(size + sizeof(tag)); if (!p && __smoothieHeapBase) return p; setTag(p, tag); return p; }
void *__wrap_malloc(size_t size) { void *p; if ((SYS_GetArena1Lo() > MAX_MEM1_ARENA_LO) || (g_bigGoesToMem2 && size > MEM2_PRIORITY_SIZE)) { p = g_mem2gp.allocate(size); if (p != 0) { return p; } return __real_malloc(size); } p = __real_malloc(size); if (p != 0) { return p; } return g_mem2gp.allocate(size); }
void *__wrap_malloc(size_t size) { if (EarlyMallocEnabled) { return early_malloc(size); } enter_kernel(); void *mem = __real_malloc(size); leave_kernel(); return mem; }
void* __wrap_malloc(size_t size) { void *tmp = __real_malloc(size); char error[512]; if (tmp == NULL) { strerror_r(errno, (char *) &error, sizeof(error)); log_itf(LOG_ERROR, "malloc() failed: (%d) %s", errno, error); exit(EXIT_FAILURE); } return tmp; }
/* * __wrap_malloc - malloc wrapper function */ void *__wrap_malloc(size_t size) { void *lptr = __real_malloc(size); printf("@ %s:[%p] + %p %#x\n", program_invocation_short_name , __builtin_return_address(0) , lptr , size ); return lptr; }
extern "C" void* wrap(malloc)(size_t sz) { init_lib(); void* p1 = __real_malloc(FULL_SIZE(sz)); if (p1) { memset(p1, 0, STUB_SIZE); fibjs::MemPool::global().add(p1, sz); } return MEM_PTR(p1); }
void *__wrap_malloc(size_t s){ void *alloc; _hpcrun_in_malloc = 1; alloc = __real_malloc(s); if (hpcrun_need_more){ assert(0); /* alloc more space here */ hpcrun_need_more = 0; } _hpcrun_in_malloc = 0; return alloc; }
void* operator new[] (size_t sz) { init_lib(); void* p1 = __real_malloc(FULL_SIZE(sz)); if (p1) { memset(p1, 0, STUB_SIZE); fibjs::MemPool::global().add(p1, sz); } return MEM_PTR(p1); }
void* __wrap_malloc( size_t size ) { LockAlloc(); void *ptr = __real_malloc( size ); if( ptr == NULL && size != 0 && IsSingleLocked() && g_bMemoryAssert ) { TempLog( "malloc failed: %ubytes\r\n", size ); MemoryError(); } UnlockAlloc(); return ptr; }
extern "C" void* wrap(realloc)(void* p, size_t sz) { init_lib(); fibjs::MemPool& mp = fibjs::MemPool::global(); if (p == 0) { void* p1 = __real_malloc(FULL_SIZE(sz)); if (p1) { memset(p1, 0, STUB_SIZE); mp.add(p1, sz); } return MEM_PTR(p1); } if (sz == 0) { void* p1 = STUB_PTR(p); if (p1) mp.remove(p1); __real_free(p1); return 0; } void* p1 = STUB_PTR(p); mp.remove(p1); void* p2 = __real_realloc(p1, FULL_SIZE(sz)); if (p2) { memset(p2, 0, STUB_SIZE); mp.add(p2, sz); } else mp.add(p1); return MEM_PTR(p2); }
void *__wrap_malloc(int size) { void *ret; if (!kmalloc_ok) return __real_malloc(size); else if (size <= UM_KERN_PAGE_SIZE) /* finding contiguous pages can be hard*/ ret = uml_kmalloc(size, UM_GFP_KERNEL); else ret = vmalloc(size); /* * glibc people insist that if malloc fails, errno should be * set by malloc as well. So we do. */ if (ret == NULL) errno = ENOMEM; return ret; }
void * __wrap_malloc(size_t size) { u32 lr=(u32)__builtin_return_address(0); lock(&lck); size+=PRE_GUARD; size+=POST_GUARD; u8 * p=__real_malloc(size); memset(p,FILL,size); *(u32*)&p[PRE_GUARD-4]=MAGIC; *(size_t*)&p[PRE_GUARD-8]=PRE_GUARD; *(size_t*)&p[PRE_GUARD-12]=size; *(u32*)&p[PRE_GUARD-16]=lr; unlock(&lck); return &p[PRE_GUARD]; }
void dump() { Item* items; Item* p; int32_t n = 0; int32_t i; char fname[32]; init_lib(); m_lock.lock(); items = (Item*)__real_malloc(sizeof(Item) * m_list.count()); p = m_list.head(); while (p) { memcpy(items + n ++, p, sizeof(Item)); p = m_list.next(p); } m_lock.unlock(); caller root(0); for (i = 0; i < n; i ++) root.put(items[i].m_sz, items[i].m_frames, items[i].m_frame_count); sprintf(fname, "fibjs.%d.heap", getpid()); FILE* fp = fopen(fname, "w"); if (fp) { fprintf(fp, "\nfound %d times, total ", root.m_times); out_size(fp, root.m_sz); root.dumpSubs(fp); fclose(fp); } __real_free(items); }
void *__wrap_malloc(size_t size) { __real_printf("calling into my malloc with sz=%d\n", size); void *p = (void *)__real_malloc(size); return p; }
void * __wrap_malloc (size_t size) { void *lptr = __real_malloc(size); TNT_MAKE_MEM_TAINTED(&lptr, sizeof(lptr)); return lptr; }
void *MEM1_alloc(unsigned int s) { return __real_malloc(s); }
extern "C" void* __wrap_malloc(size_t size) { breakOnHeapOpFromInterruptHandler(); return __real_malloc(size); }
/* Memory allocation services */ void *__wrap_malloc(size_t size) { assert_nrt(); return __real_malloc(size); }