static mchunkptr internal_function mem2chunk_check(void* mem, unsigned char **magic_p) { mchunkptr p; INTERNAL_SIZE_T sz, c; unsigned char magic; if(!aligned_OK(mem)) return NULL; p = mem2chunk(mem); if (!chunk_is_mmapped(p)) { /* Must be a chunk in conventional heap memory. */ int contig = contiguous(&main_arena); sz = chunksize(p); if((contig && ((char*)p<mp_.sbrk_base || ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) || sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) || ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK || (contig && (char*)prev_chunk(p)<mp_.sbrk_base) || next_chunk(prev_chunk(p))!=p) )) return NULL; magic = MAGICBYTE(p); for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) { if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL; } } else { unsigned long offset, page_mask = GLRO(dl_pagesize)-1; /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two alignment relative to the beginning of a page. Check this first. */ offset = (unsigned long)mem & page_mask; if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 && offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 && offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 && offset<0x2000) || !chunk_is_mmapped(p) || (p->size & PREV_INUSE) || ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) || ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) ) return NULL; magic = MAGICBYTE(p); for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) { if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL; } } ((unsigned char*)p)[sz] ^= 0xFF; if (magic_p) *magic_p = (unsigned char *)p + sz; return p; }
static void free_atfork(Void_t* mem, const Void_t *caller) { Void_t *vptr = NULL; mstate ar_ptr; mchunkptr p; /* chunk corresponding to mem */ if (mem == 0) /* free(0) has no effect */ return; p = mem2chunk(mem); /* do not bother to replicate free_check here */ #if HAVE_MMAP if (chunk_is_mmapped(p)) /* release mmapped memory. */ { munmap_chunk(p); return; } #endif ar_ptr = arena_for_chunk(p); tsd_getspecific(arena_key, vptr); if(vptr != ATFORK_ARENA_PTR) (void)mutex_lock(&ar_ptr->mutex); _int_free(ar_ptr, mem); if(vptr != ATFORK_ARENA_PTR) (void)mutex_unlock(&ar_ptr->mutex); }
static void free_check (void *mem, const void *caller) { mchunkptr p; if (!mem) return; (void) mutex_lock (&main_arena.mutex); p = mem2chunk_check (mem, NULL); if (!p) { (void) mutex_unlock (&main_arena.mutex); malloc_printerr (check_action, "free(): invalid pointer", mem); return; } if (chunk_is_mmapped (p)) { (void) mutex_unlock (&main_arena.mutex); munmap_chunk (p); return; } _int_free (&main_arena, p, 1); (void) mutex_unlock (&main_arena.mutex); }
internal_function mem2mem_check (void *ptr, size_t sz) { mchunkptr p; unsigned char *m_ptr = ptr; size_t i; if (!ptr) return ptr; p = mem2chunk (ptr); for (i = chunksize (p) - (chunk_is_mmapped (p) ? 2 * SIZE_SZ + 1 : SIZE_SZ + 1); i > sz; i -= 0xFF) { if (i - sz < 0x100) { m_ptr[i] = (unsigned char) (i - sz); break; } m_ptr[i] = 0xFF; } m_ptr[sz] = MAGICBYTE (p); return (void *) m_ptr; }
static int internal_function top_check(void) { mchunkptr t = top(&main_arena); char* brk, * new_brk; INTERNAL_SIZE_T front_misalign, sbrk_size; unsigned long pagesz = GLRO(dl_pagesize); if (t == initial_top(&main_arena) || (!chunk_is_mmapped(t) && chunksize(t)>=MINSIZE && prev_inuse(t) && (!contiguous(&main_arena) || (char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem))) return 0; malloc_printerr (check_action, "malloc: top chunk is corrupt", t); /* Try to set up a new top chunk. */ brk = MORECORE(0); front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK; if (front_misalign > 0) front_misalign = MALLOC_ALIGNMENT - front_misalign; sbrk_size = front_misalign + mp_.top_pad + MINSIZE; sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1)); new_brk = (char*)(MORECORE (sbrk_size)); if (new_brk == (char*)(MORECORE_FAILURE)) { __set_errno (ENOMEM); return -1; } /* Call the `morecore' hook if necessary. */ void (*hook) (void) = force_reg (__after_morecore_hook); if (hook) (*hook) (); main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size; top(&main_arena) = (mchunkptr)(brk + front_misalign); set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE); return 0; }
static void free_atfork (void *mem, const void *caller) { void *vptr = NULL; mstate ar_ptr; mchunkptr p; /* chunk corresponding to mem */ if (mem == 0) /* free(0) has no effect */ return; p = mem2chunk (mem); /* do not bother to replicate free_check here */ if (chunk_is_mmapped (p)) /* release mmapped memory. */ { munmap_chunk (p); return; } ar_ptr = arena_for_chunk (p); tsd_getspecific (arena_key, vptr); _int_free (ar_ptr, p, vptr == ATFORK_ARENA_PTR); }
/* Visualize the chunk as being partitioned into blocks of 256 bytes from the highest address of the chunk, downwards. The beginning of each block tells us the size of the previous block, up to the actual size of the requested memory. Our magic byte is right at the end of the requested size, so we must reach it with this iteration, otherwise we have witnessed a memory corruption. */ static size_t malloc_check_get_size(mchunkptr p) { size_t size; unsigned char c; unsigned char magic = MAGICBYTE(p); assert(using_malloc_checking == 1); for (size = chunksize(p) - 1 + (chunk_is_mmapped(p) ? 0 : SIZE_SZ); (c = ((unsigned char*)p)[size]) != magic; size -= c) { if(c<=0 || size<(c+2*SIZE_SZ)) { malloc_printerr(check_action, "malloc_check_get_size: memory corruption", chunk2mem(p)); return 0; } } /* chunk2mem size. */ return size - 2*SIZE_SZ; }
/* ------------------------------ realloc ------------------------------ */ void* ulibc_realloc(void* oldmem, size_t bytes) { mstate av; size_t nb; /* padded request size */ mchunkptr oldp; /* chunk corresponding to oldmem */ size_t oldsize; /* its size */ mchunkptr newp; /* chunk to return */ size_t newsize; /* its size */ void* newmem; /* corresponding user mem */ mchunkptr next; /* next contiguous chunk after oldp */ mchunkptr remainder; /* extra space at end of newp */ unsigned long remainder_size; /* its size */ mchunkptr bck; /* misc temp for linking */ mchunkptr fwd; /* misc temp for linking */ unsigned long copysize; /* bytes to copy */ unsigned int ncopies; /* size_t words to copy */ size_t* s; /* copy source */ size_t* d; /* copy destination */ void *retval; /* Check for special cases. */ if (! oldmem) return ulibc_malloc(bytes); if (! bytes) { ulibc_free (oldmem); return NULL; } av = get_malloc_state(); checked_request2size(bytes, nb); oldp = mem2chunk(oldmem); oldsize = chunksize(oldp); check_inuse_chunk(oldp); if (!chunk_is_mmapped(oldp)) { if ((unsigned long)(oldsize) >= (unsigned long)(nb)) { /* already big enough; split below */ newp = oldp; newsize = oldsize; } else { next = chunk_at_offset(oldp, oldsize); /* Try to expand forward into top */ if (next == av->top && (unsigned long)(newsize = oldsize + chunksize(next)) >= (unsigned long)(nb + MINSIZE)) { set_head_size(oldp, nb); av->top = chunk_at_offset(oldp, nb); set_head(av->top, (newsize - nb) | PREV_INUSE); retval = chunk2mem(oldp); goto DONE; } /* Try to expand forward into next chunk; split off remainder below */ else if (next != av->top && !inuse(next) && (unsigned long)(newsize = oldsize + chunksize(next)) >= (unsigned long)(nb)) { newp = oldp; unlink(next, bck, fwd); } /* allocate, copy, free */ else { newmem = malloc(nb - MALLOC_ALIGN_MASK); if (newmem == 0) { retval = 0; /* propagate failure */ goto DONE; } newp = mem2chunk(newmem); newsize = chunksize(newp); /* Avoid copy if newp is next chunk after oldp. */ if (newp == next) { newsize += oldsize; newp = oldp; } else { /* Unroll copy of <= 36 bytes (72 if 8byte sizes) We know that contents have an odd number of size_t-sized words; minimally 3. */ copysize = oldsize - (sizeof(size_t)); s = (size_t*)(oldmem); d = (size_t*)(newmem); ncopies = copysize / sizeof(size_t); assert(ncopies >= 3); if (ncopies > 9) memcpy(d, s, copysize); else { *(d+0) = *(s+0); *(d+1) = *(s+1); *(d+2) = *(s+2); if (ncopies > 4) { *(d+3) = *(s+3); *(d+4) = *(s+4); if (ncopies > 6) { *(d+5) = *(s+5); *(d+6) = *(s+6); if (ncopies > 8) { *(d+7) = *(s+7); *(d+8) = *(s+8); } } } } ulibc_free(oldmem); check_inuse_chunk(newp); retval = chunk2mem(newp); goto DONE; } } } /* If possible, free extra space in old or extended chunk */ assert((unsigned long)(newsize) >= (unsigned long)(nb)); remainder_size = newsize - nb; if (remainder_size < MINSIZE) { /* not enough extra to split off */ set_head_size(newp, newsize); set_inuse_bit_at_offset(newp, newsize); } else { /* split remainder */ remainder = chunk_at_offset(newp, nb); set_head_size(newp, nb); set_head(remainder, remainder_size | PREV_INUSE); /* Mark remainder as inuse so free() won't complain */ set_inuse_bit_at_offset(remainder, remainder_size); ulibc_free(chunk2mem(remainder)); } check_inuse_chunk(newp); retval = chunk2mem(newp); goto DONE; } /* Handle mmap cases */ else { size_t offset = oldp->prev_size; size_t pagemask = av->pagesize - 1; char *cp; unsigned long sum; /* Note the extra (sizeof(size_t)) overhead */ newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask; /* don't need to remap if still within same page */ if (oldsize == newsize - offset) { retval = oldmem; goto DONE; } cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1); if (cp != (char*)MORECORE_FAILURE) { newp = (mchunkptr)(cp + offset); set_head(newp, (newsize - offset)|IS_MMAPPED); assert(aligned_OK(chunk2mem(newp))); assert((newp->prev_size == offset)); /* update statistics */ sum = av->mmapped_mem += newsize - oldsize; if (sum > (unsigned long)(av->max_mmapped_mem)) av->max_mmapped_mem = sum; sum += av->sbrked_mem; if (sum > (unsigned long)(av->max_total_mem)) av->max_total_mem = sum; retval = chunk2mem(newp); goto DONE; } /* Note the extra (sizeof(size_t)) overhead. */ if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t)))) newmem = oldmem; /* do nothing */ else { /* Must alloc, copy, free. */ newmem = malloc(nb - MALLOC_ALIGN_MASK); if (newmem != 0) { memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t))); ulibc_free(oldmem); } } retval = newmem; } DONE: return retval; }
/* ------------------------------ calloc ------------------------------ */ void* calloc(size_t n_elements, size_t elem_size) { mchunkptr p; unsigned long clearsize; unsigned long nclears; size_t size, *d; void* mem; /* guard vs integer overflow, but allow nmemb * to fall through and call malloc(0) */ size = n_elements * elem_size; if (n_elements && elem_size != (size / n_elements)) { __set_errno(ENOMEM); return NULL; } __MALLOC_LOCK; mem = malloc(size); if (mem != 0) { p = mem2chunk(mem); if (!chunk_is_mmapped(p)) { /* Unroll clear of <= 36 bytes (72 if 8byte sizes) We know that contents have an odd number of size_t-sized words; minimally 3. */ d = (size_t*)mem; clearsize = chunksize(p) - (sizeof(size_t)); nclears = clearsize / sizeof(size_t); assert(nclears >= 3); if (nclears > 9) memset(d, 0, clearsize); else { *(d+0) = 0; *(d+1) = 0; *(d+2) = 0; if (nclears > 4) { *(d+3) = 0; *(d+4) = 0; if (nclears > 6) { *(d+5) = 0; *(d+6) = 0; if (nclears > 8) { *(d+7) = 0; *(d+8) = 0; } } } } } #if 0 else { /* Standard unix mmap using /dev/zero clears memory so calloc * doesn't need to actually zero anything.... */ d = (size_t*)mem; /* Note the additional (sizeof(size_t)) */ clearsize = chunksize(p) - 2*(sizeof(size_t)); memset(d, 0, clearsize); } #endif } __MALLOC_UNLOCK; return mem; }
static void* realloc_check(void* oldmem, size_t bytes, const void *caller) { INTERNAL_SIZE_T nb; void* newmem = 0; unsigned char *magic_p; if (bytes+1 == 0) { __set_errno (ENOMEM); return NULL; } if (oldmem == 0) return malloc_check(bytes, NULL); if (bytes == 0) { free_check (oldmem, NULL); return NULL; } (void)mutex_lock(&main_arena.mutex); const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p); (void)mutex_unlock(&main_arena.mutex); if(!oldp) { malloc_printerr(check_action, "realloc(): invalid pointer", oldmem); return malloc_check(bytes, NULL); } const INTERNAL_SIZE_T oldsize = chunksize(oldp); checked_request2size(bytes+1, nb); (void)mutex_lock(&main_arena.mutex); if (chunk_is_mmapped(oldp)) { #if HAVE_MREMAP mchunkptr newp = mremap_chunk(oldp, nb); if(newp) newmem = chunk2mem(newp); else #endif { /* Note the extra SIZE_SZ overhead. */ if(oldsize - SIZE_SZ >= nb) newmem = oldmem; /* do nothing */ else { /* Must alloc, copy, free. */ if (top_check() >= 0) newmem = _int_malloc(&main_arena, bytes+1); if (newmem) { MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); munmap_chunk(oldp); } } } } else { if (top_check() >= 0) { INTERNAL_SIZE_T nb; checked_request2size(bytes + 1, nb); newmem = _int_realloc(&main_arena, oldp, oldsize, nb); } } /* mem2chunk_check changed the magic byte in the old chunk. If newmem is NULL, then the old chunk will still be used though, so we need to invert that change here. */ if (newmem == NULL) *magic_p ^= 0xFF; (void)mutex_unlock(&main_arena.mutex); return mem2mem_check(newmem, bytes); }
/* ------------------------------ free ------------------------------ */ void free(void* mem) { mstate av; mchunkptr p; /* chunk corresponding to mem */ size_t size; /* its size */ mfastbinptr* fb; /* associated fastbin */ mchunkptr nextchunk; /* next contiguous chunk */ size_t nextsize; /* its size */ int nextinuse; /* true if nextchunk is used */ size_t prevsize; /* size of previous contiguous chunk */ mchunkptr bck; /* misc temp for linking */ mchunkptr fwd; /* misc temp for linking */ /* free(0) has no effect */ if (mem == NULL) return; __MALLOC_LOCK; av = get_malloc_state(); p = mem2chunk(mem); size = chunksize(p); check_inuse_chunk(p); /* If eligible, place chunk on a fastbin so it can be found and used quickly in malloc. */ if ((unsigned long)(size) <= (unsigned long)(av->max_fast) #if TRIM_FASTBINS /* If TRIM_FASTBINS set, don't place chunks bordering top into fastbins */ && (chunk_at_offset(p, size) != av->top) #endif ) { set_fastchunks(av); fb = &(av->fastbins[fastbin_index(size)]); p->fd = *fb; *fb = p; } /* Consolidate other non-mmapped chunks as they arrive. */ else if (!chunk_is_mmapped(p)) { set_anychunks(av); nextchunk = chunk_at_offset(p, size); nextsize = chunksize(nextchunk); /* consolidate backward */ if (!prev_inuse(p)) { prevsize = p->prev_size; size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); unlink(p, bck, fwd); } if (nextchunk != av->top) { /* get and clear inuse bit */ nextinuse = inuse_bit_at_offset(nextchunk, nextsize); set_head(nextchunk, nextsize); /* consolidate forward */ if (!nextinuse) { unlink(nextchunk, bck, fwd); size += nextsize; } /* Place the chunk in unsorted chunk list. Chunks are not placed into regular bins until after they have been given one chance to be used in malloc. */ bck = unsorted_chunks(av); fwd = bck->fd; p->bk = bck; p->fd = fwd; bck->fd = p; fwd->bk = p; set_head(p, size | PREV_INUSE); set_foot(p, size); check_free_chunk(p); } /* If the chunk borders the current high end of memory, consolidate into top */ else { size += nextsize; set_head(p, size | PREV_INUSE); av->top = p; check_chunk(p); } /* If freeing a large space, consolidate possibly-surrounding chunks. Then, if the total unused topmost memory exceeds trim threshold, ask malloc_trim to reduce top. Unless max_fast is 0, we don't know if there are fastbins bordering top, so we cannot tell for sure whether threshold has been reached unless fastbins are consolidated. But we don't want to consolidate on each free. As a compromise, consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD is reached. */ if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { if (have_fastchunks(av)) __malloc_consolidate(av); if ((unsigned long)(chunksize(av->top)) >= (unsigned long)(av->trim_threshold)) __malloc_trim(av->top_pad, av); } } /* If the chunk was allocated via mmap, release via munmap() Note that if HAVE_MMAP is false but chunk_is_mmapped is true, then user must have overwritten memory. There's nothing we can do to catch this error unless DEBUG is set, in which case check_inuse_chunk (above) will have triggered error. */ else { size_t offset = p->prev_size; av->n_mmaps--; av->mmapped_mem -= (size + offset); munmap((char*)p - offset, size + offset); } __MALLOC_UNLOCK; }
static void _int_free (mstate av, mchunkptr p, int have_lock) { ... else if (!chunk_is_mmapped(p)) { ... nextchunk = chunk_at_offset(p, size); ... nextsize = chunksize(nextchunk); ... /* consolidate backward */ if (!prev_inuse(p)) { prevsize = p->prev_size; size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); unlink(p, bck, fwd); } if (nextchunk != av->top) { /* get and clear inuse bit */ nextinuse = inuse_bit_at_offset(nextchunk, nextsize); /* consolidate forward */ if (!nextinuse) { unlink(nextchunk, bck, fwd); size += nextsize; } else clear_inuse_bit_at_offset(nextchunk, 0); /* Place the chunk in unsorted chunk list. Chunks are not placed into regular bins until after they have been given one chance to be used in malloc. */ bck = unsorted_chunks(av); fwd = bck->fd; if (__glibc_unlikely (fwd->bk != bck)) { errstr = "free(): corrupted unsorted chunks"; goto errout; } p->fd = fwd; p->bk = bck; if (!in_smallbin_range(size)) { p->fd_nextsize = NULL; p->bk_nextsize = NULL; } bck->fd = p; fwd->bk = p; set_head(p, size | PREV_INUSE); set_foot(p, size); check_free_chunk(av, p); } /* If the chunk borders the current high end of memory, consolidate into top */ else { size += nextsize; set_head(p, size | PREV_INUSE); av->top = p; check_chunk(av, p); } ... }