/*! \brief Main function of the example memory manager. * * This example shows how memory can be allocated from different * memory spaces. * The default allocation will get memory from the internal SRAM. * By using the "memory space" functionality of the memory manager * it is possible to use other memory spaces as resources like an * attached SDRAM. */ int main(void) { void *some_space; void *some_more_space; void *some_space_in_sdram; mspace sdram_msp; // Switch to external oscillator 0. pcl_switch_to_osc(PCL_OSC0, FOSC0, OSC0_STARTUP); sdramc_init(FHSB_HZ); // default allocation from internal SRAM some_space = dlmalloc(512); some_more_space = dlmalloc(64); // Create a new memory space the covers the SDRAM sdram_msp = create_mspace_with_base((void*) SDRAM_START_ADDRESS, MEM_SPACE_SIZE, 0); // allocate memory from the created memroy space some_space_in_sdram = mspace_malloc(sdram_msp, 512); while (true) { } }
void * mbed_ualloc(size_t bytes, UAllocTraits_t traits) { void * ptr = NULL; void * caller = (void*) caller_addr(); if (UALLOC_TEST_TRAITS(traits.flags, UALLOC_TRAITS_NEVER_FREE)) { ptr = mbed_krbs(bytes); // krbs uses the same semantics as sbrk, so translate a -1 to NULL. if (ptr == (void*)-1) { ptr = NULL; } if ((ptr != NULL) && UALLOC_TEST_TRAITS(traits.flags, UALLOC_TRAITS_ZERO_FILL)) { memset(ptr, 0, bytes); } } else if (UALLOC_TEST_TRAITS(traits.flags, UALLOC_TRAITS_ZERO_FILL)) { ptr = dlcalloc(1, bytes); } else if (!(traits.flags & ~UALLOC_TRAITS_BITMASK)) { ptr = dlmalloc(bytes); } else if (traits.flags & UALLOC_RESERVED_MASK) { ualloc_debug(UALLOC_DEBUG_ERROR, "ua c:%p reserved: %lx\n", caller, traits.flags & UALLOC_RESERVED_MASK); } if(ptr == NULL) { ualloc_debug(UALLOC_DEBUG_WARNING, "ua c:%p fail\n", caller); } else { ualloc_debug(UALLOC_DEBUG_LOG, "ua c:%p m:%p\n", caller, ptr); } return ptr; }
/* This routine serves as entry point for 'malloc'. * Primary responsibility of this routine is to allocate requested number of * bytes (plus prefix, and suffix guards), and report allocation to the * emulator. */ void* qemu_instrumented_malloc(size_t bytes) { MallocDesc desc; /* Initialize block descriptor and allocate memory. Note that dlmalloc * returns a valid pointer on zero allocation. Lets mimic this behavior. */ desc.prefix_size = DEFAULT_PREFIX_SIZE; desc.requested_bytes = bytes; desc.suffix_size = DEFAULT_SUFFIX_SIZE; desc.ptr = dlmalloc(mallocdesc_alloc_size(&desc)); if (desc.ptr == NULL) { qemu_error_log("<libc_pid=%03u, pid=%03u> malloc(%u): dlmalloc(%u) failed.", malloc_pid, getpid(), bytes, mallocdesc_alloc_size(&desc)); return NULL; } // Fire up event in the emulator. if (notify_qemu_malloc(&desc)) { log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: malloc: notify_malloc failed for ", malloc_pid, getpid()); dlfree(desc.ptr); return NULL; } else { #if TEST_ACCESS_VIOLATIONS test_access_violation(&desc); #endif // TEST_ACCESS_VIOLATIONS log_mdesc(info, &desc, "+++ <libc_pid=%03u, pid=%03u> malloc(%u) -> ", malloc_pid, getpid(), bytes); return mallocdesc_user_ptr(&desc); } }
void *malloc(size_t size) { spin_lock(&malloc_lock); void *ptr = dlmalloc(size); spin_unlock(&malloc_lock); return ptr; }
// O(1) for small objects pointer allocate( size_type count, std::allocator<void>::const_pointer /*hint*/ = 0 ) const { POOL_MUTEX_LOCK; assert(ms_pool); ms_totalAllocs++; ms_allocCount++; pointer ret = 0; size_type to_alloc = count*sizeof(T); ret = (pointer)ms_pool->allocate(to_alloc); if (ret == 0) { #ifdef USE_DLMALLOC ret = reinterpret_cast<T*>( dlmalloc( to_alloc ) ); #else ret = reinterpret_cast<T*>( malloc(to_alloc) ); #endif } POOL_MUTEX_UNLOCK; return ret; }
void * ::operator new(size_t nSize) // throw(std::bad_alloc) { void * p = dlmalloc(nSize); if (!p) { throw std::bad_alloc(); } return p; }
char *strDup(const char *str) //maks { char *newstr; newstr = (char *)dlmalloc(strlen(str)+1); if ( newstr != NULL ) { strcpy(newstr, str); } return(newstr); }
void* shmem_internal_shmalloc(size_t size) { void *ret; SHMEM_MUTEX_LOCK(shmem_internal_mutex_alloc); ret = dlmalloc(size); SHMEM_MUTEX_UNLOCK(shmem_internal_mutex_alloc); return ret; }
extern "C" void* chk_malloc(size_t size) { // log_message("%s: %s\n", __FILE__, __FUNCTION__); hdr_t* hdr = static_cast<hdr_t*>(dlmalloc(sizeof(hdr_t) + size + sizeof(ftr_t))); if (hdr) { hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH); add(hdr, size); return user(hdr); } return NULL; }
/** Allocates a portion of memory in the system and returns a pointer on it * @param[in] _u32Size size of the memory to allocate * @param[in] _eMemType Memory zone where data will be allocated * @return returns a pointer on the memory allocated, or orxNULL if an error has occurred */ void *orxFASTCALL orxMemory_Allocate(orxU32 _u32Size, orxMEMORY_TYPE _eMemType) { void *pResult; /* Checks */ orxASSERT((sstMemory.u32Flags & orxMEMORY_KU32_STATIC_FLAG_READY) == orxMEMORY_KU32_STATIC_FLAG_READY); orxASSERT(_eMemType < orxMEMORY_TYPE_NUMBER); #ifdef __orxPROFILER__ /* Allocates memory */ pResult = dlmalloc((size_t)(_u32Size + sizeof(orxMEMORY_TYPE))); /* Success? */ if(pResult != NULL) { size_t uMemoryChunkSize; /* Tags memory chunk */ *(orxMEMORY_TYPE *)pResult = _eMemType; /* Gets memory chunk size */ uMemoryChunkSize = dlmalloc_usable_size(pResult); /* Updates memory tracker */ orxMemory_Track(_eMemType, (orxU32)(uMemoryChunkSize - sizeof(orxMEMORY_TYPE)), orxTRUE); /* Updates result */ pResult = (orxU8 *)pResult + sizeof(orxMEMORY_TYPE); } #else /* __orxPROFILER__ */ /* Allocates memory */ pResult = dlmalloc((size_t)_u32Size); #endif /* __orxPROFILER__ */ /* Done! */ return pResult; }
void* DLAllocator::TMalloc(int32 nSize) { int32 nRealSize = nSize + sizeof(MemoryHead); void* result = dlmalloc(nRealSize); //void* result = malloc(nRealSize); if(!result) return NULL; *((MemoryHead*)result) = m_info; ((MemoryHead*)result)->MemInfo |= 0x80000000; return (void*)((char*)result + sizeof(MemoryHead)); }
void* chk_malloc(size_t size) { struct hdr *hdr; // log_message("%s: %s\n", __FILE__, __FUNCTION__); hdr = dlmalloc(sizeof(struct hdr) + size + sizeof(struct ftr)); if (hdr) { hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH); add(hdr, size); return user(hdr); } return NULL; }
void * shmem_malloc(size_t size) { void *ret; SHMEM_ERR_CHECK_INITIALIZED(); SHMEM_MUTEX_LOCK(shmem_internal_mutex_alloc); ret = dlmalloc(size); SHMEM_MUTEX_UNLOCK(shmem_internal_mutex_alloc); shmem_internal_barrier_all(); return ret; }
void* malloc(size_t size) { if(pthread_equal(pthread_self(), main_thread) && size > 0 && size <= MAX_CHUNK_SIZE) { size = ROUNDUP_SIZE(size); void* result = allocate_memory(size); if(result != NULL) { return result; } } pthread_mutex_lock(&dlmalloc_mutex); void* result = dlmalloc(size); pthread_mutex_unlock(&dlmalloc_mutex); return result; }
void init_custom_malloc() { main_thread = pthread_self(); // allocate the memory -- allocate an extra block at the end, so that // if the address we get back isn't block-aligned, we can advance // the pointer until it is. void* alloc = dlmalloc(CUSTOM_MEMORY_SIZE + BLOCK_SIZE); assert(alloc); begin_superblock = (uint8_t*)alloc; while(((uintptr_t)begin_superblock)%BLOCK_SIZE) { ++begin_superblock; } end_superblock = begin_superblock + CUSTOM_MEMORY_SIZE; begin_superblock_range = begin_superblock; }
static void fill_text_message(lua_State *L, struct text_message * message, int pos) { int i; luaL_checktype(L, pos, LUA_TTABLE); int n = luaL_getn(L, pos); const char **lines = (const char**) dlmalloc(n * sizeof(const char*)); if(lines == NULL) luaL_error(L, "Can't allocate %d bytes!", n * sizeof(const char*)); for(i=1; i<=n; i++) { lua_rawgeti(L, pos, i); lines[i-1] = luaL_checkstring(L, -1); lua_pop(L, 1); } message->message_lines = lines; message->nb_lines = n; }
/* Allocate a chunk of memory with the given size. Returns a pointer to the writable address, and sets *CODE to the executable corresponding virtual address. */ void * ffi_closure_alloc (size_t size, void **code) { void *ptr; if (!code) return NULL; ptr = dlmalloc (size); if (ptr) { msegmentptr seg = segment_holding (gm, ptr); *code = add_segment_exec_offset (ptr, seg); } return ptr; }
void substring::set( const char* subs, uints len, bool icase ) { if(len == 1) return set(*subs, icase); _icase = icase; _subs = (const uchar*)subs; _len = len; //create uninitialized distance array, compute range and fill the distances // the dist array stores how many characters remain until the end of the substring // from the last occurence of each of the characters in the substring //note value 0 means that the character isn't there and it's safe to skip // whole substring length as the substring cannot be there _from = _to = *subs++; uints dist[256]; dist[_to] = len; for( uints i=1; i<len; ++i,++subs ) { uchar c = *subs; if(_icase) c = ::tolower(c); if( c < _from ) { ::memset( dist+c+1, 0, (_from-c-1)*sizeof(uints) ); _from = c; } if( c > _to ) { ::memset( dist+_to+1, 0, (c-_to-1)*sizeof(uints) ); _to = c; } dist[c] = len - i; } if(_shf) delete[] _shf; uints n = (uints)_to+1 - (uints)_from; _shf = (uints*)dlmalloc(n * sizeof(uints));//new uints[n]; ::memcpy(_shf, dist+_from, n*sizeof(uints)); }
static mapinfo* parse_maps_line(char* line) { int len = strlen(line); if (len < 1) return 0; line[--len] = 0; if (len < 50) return 0; if (line[20] != 'x') return 0; mapinfo* mi = static_cast<mapinfo*>(dlmalloc(sizeof(mapinfo) + (len - 47))); if (mi == 0) return 0; mi->start = strtoul(line, 0, 16); mi->end = strtoul(line + 9, 0, 16); /* To be filled in parse_elf_info if the mapped section starts with * elf_header */ mi->next = 0; strcpy(mi->name, line + 49); return mi; }
void FC_SHPALLOC(void **addr, fortran_integer_t *length, fortran_integer_t *errcode, fortran_integer_t *want_abort) { size_t len; SHMEM_ERR_CHECK_INITIALIZED(); len = ((size_t) *length) * 4; *errcode = 0; if (len == 0) { if (0 == *want_abort) { *errcode = -1; return; } else { fprintf(stderr, "[%03d] ERROR: shpalloc failure (invalid length). Aborting job.\n", shmem_internal_my_pe); RAISE_ERROR(1); } } SHMEM_MUTEX_LOCK(shmem_internal_mutex_alloc); *addr = dlmalloc(len); /* length is number of 32 bit words */ SHMEM_MUTEX_UNLOCK(shmem_internal_mutex_alloc); if (*addr == NULL) { if (0 == *want_abort) { *errcode = -2; return; } else { fprintf(stderr, "[%03d] ERROR: shpalloc failure. Aborting job.\n", shmem_internal_my_pe); RAISE_ERROR(1); } } shmem_internal_barrier_all(); }
extern "C" void* chk_memalign(size_t alignment, size_t bytes) { if (alignment <= MALLOC_ALIGNMENT) { return chk_malloc(bytes); } // Make the alignment a power of two. if (alignment & (alignment-1)) { alignment = 1L << (31 - __builtin_clz(alignment)); } // here, alignment is at least MALLOC_ALIGNMENT<<1 bytes // we will align by at least MALLOC_ALIGNMENT bytes // and at most alignment-MALLOC_ALIGNMENT bytes size_t size = (alignment-MALLOC_ALIGNMENT) + bytes; if (size < bytes) { // Overflow. return NULL; } void* base = dlmalloc(sizeof(hdr_t) + size + sizeof(ftr_t)); if (base != NULL) { // Check that the actual pointer that will be returned is aligned // properly. uintptr_t ptr = reinterpret_cast<uintptr_t>(user(reinterpret_cast<hdr_t*>(base))); if ((ptr % alignment) != 0) { // Align the pointer. ptr += ((-ptr) % alignment); } hdr_t* hdr = meta(reinterpret_cast<void*>(ptr)); hdr->base = base; hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH); add(hdr, bytes); return user(hdr); } return base; }
/* * Retrieve native heap information. * * "*info" is set to a buffer we allocate * "*overallSize" is set to the size of the "info" buffer * "*infoSize" is set to the size of a single entry * "*totalMemory" is set to the sum of all allocations we're tracking; does * not include heap overhead * "*backtraceSize" is set to the maximum number of entries in the back trace */ void get_malloc_leak_info(uint8_t** info, size_t* overallSize, size_t* infoSize, size_t* totalMemory, size_t* backtraceSize) { // don't do anything if we have invalid arguments if (info == NULL || overallSize == NULL || infoSize == NULL || totalMemory == NULL || backtraceSize == NULL) { return; } *totalMemory = 0; pthread_mutex_lock(&gAllocationsMutex); if (gHashTable.count == 0) { *info = NULL; *overallSize = 0; *infoSize = 0; *backtraceSize = 0; goto done; } void** list = (void**)dlmalloc(sizeof(void*) * gHashTable.count); // get the entries into an array to be sorted int index = 0; int i; for (i = 0 ; i < HASHTABLE_SIZE ; i++) { HashEntry* entry = gHashTable.slots[i]; while (entry != NULL) { list[index] = entry; *totalMemory = *totalMemory + ((entry->size & ~SIZE_FLAG_MASK) * entry->allocations); index++; entry = entry->next; } } // XXX: the protocol doesn't allow variable size for the stack trace (yet) *infoSize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * BACKTRACE_SIZE); *overallSize = *infoSize * gHashTable.count; *backtraceSize = BACKTRACE_SIZE; // now get A byte array big enough for this *info = (uint8_t*)dlmalloc(*overallSize); if (*info == NULL) { *overallSize = 0; goto out_nomem_info; } qsort((void*)list, gHashTable.count, sizeof(void*), hash_entry_compare); uint8_t* head = *info; const int count = gHashTable.count; for (i = 0 ; i < count ; i++) { HashEntry* entry = list[i]; size_t entrySize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * entry->numEntries); if (entrySize < *infoSize) { /* we're writing less than a full entry, clear out the rest */ memset(head + entrySize, 0, *infoSize - entrySize); } else { /* make sure the amount we're copying doesn't exceed the limit */ entrySize = *infoSize; } memcpy(head, &(entry->size), entrySize); head += *infoSize; } out_nomem_info: dlfree(list); done: pthread_mutex_unlock(&gAllocationsMutex); }
// Only PCM!!!!, no any check __rttype_ struct pcm_s *_open_wave (const char *name) { #ifdef __WINDOWS_ HMMIO mmioHandle; MMCKINFO mmckinfoParent; MMCKINFO mmckinfoSubChunk; struct pcm_s waveHandle; mmioHandle = mmioOpenA((LPSTR)name, NULL, MMIO_ALLOCBUF | MMIO_READ); if (!mmioHandle) return NULL; memset (&mmckinfoParent, 0, sizeof(MMCKINFO)); memset (&mmckinfoSubChunk, 0, sizeof(MMCKINFO)); memset (&waveHandle, 0, sizeof(waveHandle)); mmckinfoParent.fccType = mmioFOURCC('W', 'A', 'V', 'E'); if (MMSYSERR_NOERROR == mmioDescend(mmioHandle, (LPMMCKINFO) & mmckinfoParent, NULL, MMIO_FINDRIFF)) { mmckinfoSubChunk.ckid = mmioFOURCC('f', 'm', 't', ' '); /* Find 'fmt ' chunk */ if (MMSYSERR_NOERROR == mmioDescend(mmioHandle, &mmckinfoSubChunk, &mmckinfoParent, MMIO_FINDCHUNK)) { waveHandle.infos = dlmalloc (mmckinfoSubChunk.cksize); waveHandle.wf_size = mmckinfoSubChunk.cksize; if (NULL != waveHandle.infos) { if (mmckinfoSubChunk.cksize == mmioRead(mmioHandle, (HPSTR)waveHandle.infos, mmckinfoSubChunk.cksize)) { if (MMSYSERR_NOERROR == mmioAscend(mmioHandle, &mmckinfoSubChunk, 0)) { /* Find 'data' chunk */ mmckinfoSubChunk.ckid = mmioFOURCC('d', 'a', 't', 'a'); if (MMSYSERR_NOERROR == mmioDescend(mmioHandle, &mmckinfoSubChunk, &mmckinfoParent, MMIO_FINDCHUNK)) { if (0 != mmckinfoSubChunk.cksize) { waveHandle.snd_buf = dlmalloc (mmckinfoSubChunk.cksize + 176400); if (NULL != waveHandle.snd_buf) { if (-1 != mmioSeek(mmioHandle, mmckinfoSubChunk.dwDataOffset, SEEK_SET)) { if (-1 != mmioRead(mmioHandle, waveHandle.snd_buf, mmckinfoSubChunk.cksize)) { /* Read success !!! */ struct pcm_s *p = dlmalloc (sizeof(struct pcm_s)); if (NULL != p) { mmioClose (mmioHandle, 0); waveHandle.mb_size = mmckinfoSubChunk.cksize; waveHandle.fquad_ = ((float)mmckinfoSubChunk.cksize) / (float)waveHandle.infos->average_rate; waveHandle.quad_ = mmckinfoSubChunk.cksize / waveHandle.infos->average_rate; waveHandle.fsmall_ = fmod((float)mmckinfoSubChunk.cksize, (float)waveHandle.infos->average_rate) / (float)waveHandle.infos->average_rate; waveHandle.small_ = (int)(waveHandle.fsmall_ * (float)60.000); memcpy (p, &waveHandle, sizeof (struct pcm_s)); return p; } } } } } } } } } } } if (NULL != waveHandle.infos) dlfree (waveHandle.infos); if (NULL != waveHandle.snd_buf) dlfree (waveHandle.snd_buf); mmioClose (mmioHandle, 0); return NULL; #else #endif }
extern "C" void* chk_realloc(void* ptr, size_t size) { // log_message("%s: %s\n", __FILE__, __FUNCTION__); if (!ptr) { return chk_malloc(size); } #ifdef REALLOC_ZERO_BYTES_FREE if (!size) { chk_free(ptr); return NULL; } #endif hdr_t* hdr = meta(ptr); if (del(hdr) < 0) { uintptr_t bt[MAX_BACKTRACE_DEPTH]; int depth = get_backtrace(bt, MAX_BACKTRACE_DEPTH); if (hdr->tag == BACKLOG_TAG) { log_message("+++ REALLOCATION %p SIZE %d OF FREED MEMORY!\n", user(hdr), size, hdr->size); log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n", user(hdr), hdr->size); log_backtrace(hdr->bt, hdr->bt_depth); /* hdr->freed_bt_depth should be nonzero here */ log_message("+++ ALLOCATION %p SIZE %d FIRST FREED HERE:\n", user(hdr), hdr->size); log_backtrace(hdr->freed_bt, hdr->freed_bt_depth); log_message("+++ ALLOCATION %p SIZE %d NOW BEING REALLOCATED HERE:\n", user(hdr), hdr->size); log_backtrace(bt, depth); /* We take the memory out of the backlog and fall through so the * reallocation below succeeds. Since we didn't really free it, we * can default to this behavior. */ del_from_backlog(hdr); } else { log_message("+++ REALLOCATION %p SIZE %d IS CORRUPTED OR NOT ALLOCATED VIA TRACKER!\n", user(hdr), size); log_backtrace(bt, depth); // just get a whole new allocation and leak the old one return dlrealloc(0, size); // return dlrealloc(user(hdr), size); // assuming it was allocated externally } } if (hdr->base != hdr) { // An allocation from memalign, so create another allocation and // copy the data out. void* newMem = dlmalloc(sizeof(hdr_t) + size + sizeof(ftr_t)); if (newMem) { memcpy(newMem, hdr, sizeof(hdr_t) + hdr->size); dlfree(hdr->base); hdr = static_cast<hdr_t*>(newMem); } else { dlfree(hdr->base); hdr = NULL; } } else { hdr = static_cast<hdr_t*>(dlrealloc(hdr, sizeof(hdr_t) + size + sizeof(ftr_t))); } if (hdr) { hdr->base = hdr; hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH); add(hdr, size); return user(hdr); } return NULL; }
void* operator new[](size_t size) { return dlmalloc(size); }
// as published by the Free Software Foundation; either version 3 // of the License, or (at your option) any later version. // // The complete GNU General Public Licence Notice can be found as the // `COPYING' file in the root directory. // #if defined(__slc_os_sim__) && (defined(__slc_arch_mtsparc__) || defined(__slc_arch_mtalpha__)) #include <stdlib.h> #include "mtmalloc.h" sl_def(t_dlmalloc,,sl_shparm(void*, szret)) { size_t sz = (size_t)sl_getp(szret); sl_setp(szret, (void*)dlmalloc(sz)); } sl_enddef sl_def(t_dlfree,,sl_glparm(void*, ptr)) { dlfree(sl_getp(ptr)); } sl_enddef sl_def(t_dlcalloc,,sl_glparm(size_t, cnt), sl_shparm(void*, szret)) { size_t sz = (size_t)sl_getp(szret); sl_setp(szret, (void*)dlcalloc(sl_getp(cnt), sz)); } sl_enddef
/* This routine serves as entry point for 'realloc'. * This routine behaves similarly to qemu_instrumented_free + * qemu_instrumented_malloc. Note that this modifies behavior of "shrinking" an * allocation, but overall it doesn't seem to matter, as caller of realloc * should not expect that pointer returned after shrinking will remain the same. */ void* qemu_instrumented_realloc(void* mem, size_t bytes) { MallocDesc new_desc; MallocDesc cur_desc; size_t to_copy; void* ret; if (mem == NULL) { // Nothing to realloc. just do regular malloc. qemu_info_log("::: <libc_pid=%03u, pid=%03u>: realloc(%p, %u) redir to malloc", malloc_pid, getpid(), mem, bytes); return qemu_instrumented_malloc(bytes); } if (bytes == 0) { // This is a "free" condition. qemu_info_log("::: <libc_pid=%03u, pid=%03u>: realloc(%p, %u) redir to free and malloc", malloc_pid, getpid(), mem, bytes); qemu_instrumented_free(mem); // This is what dlrealloc does for a "free" realloc. return NULL; } // Query emulator for the reallocating block information. if (query_qemu_malloc_info(mem, &cur_desc, 2)) { // Note that this violation should be already caught in the emulator. error_log("<libc_pid=%03u, pid=%03u>: realloc(%p, %u) query_info failed.", malloc_pid, getpid(), mem, bytes); return NULL; } #if TEST_ACCESS_VIOLATIONS test_access_violation(&cur_desc); #endif // TEST_ACCESS_VIOLATIONS /* Make sure that reallocating pointer value is what we would expect * for this memory block. Note that this violation should be already caught * in the emulator.*/ if (mem != mallocdesc_user_ptr(&cur_desc)) { log_mdesc(error, &cur_desc, "<libc_pid=%03u, pid=%03u>: realloc(%p, %u) is invalid for ", malloc_pid, getpid(), mem, bytes); return NULL; } /* TODO: We're a bit inefficient here, always allocating new block from * the heap. If this realloc shrinks current buffer, we can just do the * shrinking "in place", adjusting suffix_size in the allocation descriptor * for this block that is stored in the emulator. */ // Initialize descriptor for the new block. new_desc.prefix_size = DEFAULT_PREFIX_SIZE; new_desc.requested_bytes = bytes; new_desc.suffix_size = DEFAULT_SUFFIX_SIZE; new_desc.ptr = dlmalloc(mallocdesc_alloc_size(&new_desc)); if (new_desc.ptr == NULL) { log_mdesc(error, &cur_desc, "<libc_pid=%03u, pid=%03u>: realloc(%p, %u): dlmalloc(%u) failed on ", malloc_pid, getpid(), mem, bytes, mallocdesc_alloc_size(&new_desc)); return NULL; } ret = mallocdesc_user_ptr(&new_desc); // Copy user data from old block to the new one. to_copy = bytes < cur_desc.requested_bytes ? bytes : cur_desc.requested_bytes; if (to_copy != 0) { memcpy(ret, mallocdesc_user_ptr(&cur_desc), to_copy); } // Register new block with emulator. if (notify_qemu_malloc(&new_desc)) { log_mdesc(error, &new_desc, "<libc_pid=%03u, pid=%03u>: realloc(%p, %u) notify_malloc failed -> ", malloc_pid, getpid(), mem, bytes); log_mdesc(error, &cur_desc, " <- "); dlfree(new_desc.ptr); return NULL; } #if TEST_ACCESS_VIOLATIONS test_access_violation(&new_desc); #endif // TEST_ACCESS_VIOLATIONS // Free old block. if (notify_qemu_free(mem)) { log_mdesc(error, &cur_desc, "<libc_pid=%03u, pid=%03u>: realloc(%p, %u): notify_free failed for ", malloc_pid, getpid(), mem, bytes); /* Since we registered new decriptor with the emulator, we need * to unregister it before freeing newly allocated block. */ notify_qemu_free(mallocdesc_user_ptr(&new_desc)); dlfree(new_desc.ptr); return NULL; } dlfree(cur_desc.ptr); log_mdesc(info, &new_desc, "=== <libc_pid=%03u, pid=%03u>: realloc(%p, %u) -> ", malloc_pid, getpid(), mem, bytes); log_mdesc(info, &cur_desc, " <- "); return ret; }
/*! Allocates \a size bytes and returns a pointer to the allocated memory. The memory is not cleared. Returns 0 if the memory could not be allocated. */ void *QMallocPool::malloc(size_t size) { Q_ASSERT(d && "Cannot operate on a null malloc pool"); QMallocPtr p(d); return dlmalloc(size); }
// Redirect all allocator calls to our dlmalloc void* _malloc_r( struct _reent* r, size_t size ) { return dlmalloc( size ); }
VALUE rb_dlhandle_sym(int argc, VALUE argv[], VALUE self) { VALUE sym, type; void (*func)(); VALUE val; struct dl_handle *dlhandle; void *handle; const char *name, *stype; const char *err; rb_secure(2); if (rb_scan_args(argc, argv, "11", &sym, &type) == 2) { SafeStringValue(type); stype = StringValuePtr(type); } else{ stype = NULL; } if (sym == Qnil) { #if defined(RTLD_NEXT) name = RTLD_NEXT; #else name = NULL; #endif } else{ SafeStringValue(sym); name = StringValuePtr(sym); } Data_Get_Struct(self, struct dl_handle, dlhandle); if (!dlhandle->open) { rb_raise(rb_eRuntimeError, "closed handle"); } handle = dlhandle->ptr; func = dlsym(handle, name); #if defined(HAVE_DLERROR) if (!func && (err = dlerror())) #else if (!func) #endif { #if defined(__CYGWIN__) || defined(WIN32) || defined(__MINGW32__) { int len = strlen(name); char *name_a = (char*)dlmalloc(len+2); strcpy(name_a, name); name_a[len] = 'A'; name_a[len+1] = '\0'; func = dlsym(handle, name_a); dlfree(name_a); #if defined(HAVE_DLERROR) if (!func && (err = dlerror())) #else if (!func) #endif { rb_raise(rb_eRuntimeError, "unknown symbol \"%sA\"", name); } } #else rb_raise(rb_eRuntimeError, "unknown symbol \"%s\"", name); #endif } val = rb_dlsym_new(func, name, stype); return val; }