static int internal_function heap_trim(heap_info *heap, size_t pad) { mstate ar_ptr = heap->ar_ptr; unsigned long pagesz = GLRO(dl_pagesize); mchunkptr top_chunk = top(ar_ptr), p, bck, fwd; heap_info *prev_heap; long new_size, top_size, extra, prev_size, misalign; /* Can this heap go away completely? */ while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) { prev_heap = heap->prev; prev_size = prev_heap->size - (MINSIZE-2*SIZE_SZ); p = chunk_at_offset(prev_heap, prev_size); /* fencepost must be properly aligned. */ misalign = ((long) p) & MALLOC_ALIGN_MASK; p = chunk_at_offset(prev_heap, prev_size - misalign); assert(p->size == (0|PREV_INUSE)); /* must be fencepost */ p = prev_chunk(p); new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ) + misalign; assert(new_size>0 && new_size<(long)(2*MINSIZE)); if(!prev_inuse(p)) new_size += p->prev_size; assert(new_size>0 && new_size<HEAP_MAX_SIZE); if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz) break; ar_ptr->system_mem -= heap->size; arena_mem -= heap->size; delete_heap(heap); heap = prev_heap; if(!prev_inuse(p)) { /* consolidate backward */ p = prev_chunk(p); unlink(p, bck, fwd); } assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0); assert( ((char*)p + new_size) == ((char*)heap + heap->size) ); top(ar_ptr) = top_chunk = p; set_head(top_chunk, new_size | PREV_INUSE); /*check_chunk(ar_ptr, top_chunk);*/ } top_size = chunksize(top_chunk); extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1); if(extra < (long)pagesz) return 0; /* Try to shrink. */ if(shrink_heap(heap, extra) != 0) return 0; ar_ptr->system_mem -= extra; arena_mem -= extra; /* Success. Adjust top accordingly. */ set_head(top_chunk, (top_size - extra) | PREV_INUSE); /*check_chunk(ar_ptr, top_chunk);*/ return 1; }
static mchunkptr internal_function mem2chunk_check(void* mem, unsigned char **magic_p) { mchunkptr p; INTERNAL_SIZE_T sz, c; unsigned char magic; if(!aligned_OK(mem)) return NULL; p = mem2chunk(mem); if (!chunk_is_mmapped(p)) { /* Must be a chunk in conventional heap memory. */ int contig = contiguous(&main_arena); sz = chunksize(p); if((contig && ((char*)p<mp_.sbrk_base || ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) || sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) || ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK || (contig && (char*)prev_chunk(p)<mp_.sbrk_base) || next_chunk(prev_chunk(p))!=p) )) return NULL; magic = MAGICBYTE(p); for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) { if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL; } } else { unsigned long offset, page_mask = GLRO(dl_pagesize)-1; /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two alignment relative to the beginning of a page. Check this first. */ offset = (unsigned long)mem & page_mask; if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 && offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 && offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 && offset<0x2000) || !chunk_is_mmapped(p) || (p->size & PREV_INUSE) || ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) || ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) ) return NULL; magic = MAGICBYTE(p); for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) { if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL; } } ((unsigned char*)p)[sz] ^= 0xFF; if (magic_p) *magic_p = (unsigned char *)p + sz; return p; }
static int internal_function top_check(void) { mchunkptr t = top(&main_arena); char* brk, * new_brk; INTERNAL_SIZE_T front_misalign, sbrk_size; unsigned long pagesz = GLRO(dl_pagesize); if (t == initial_top(&main_arena) || (!chunk_is_mmapped(t) && chunksize(t)>=MINSIZE && prev_inuse(t) && (!contiguous(&main_arena) || (char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem))) return 0; malloc_printerr (check_action, "malloc: top chunk is corrupt", t); /* Try to set up a new top chunk. */ brk = MORECORE(0); front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK; if (front_misalign > 0) front_misalign = MALLOC_ALIGNMENT - front_misalign; sbrk_size = front_misalign + mp_.top_pad + MINSIZE; sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1)); new_brk = (char*)(MORECORE (sbrk_size)); if (new_brk == (char*)(MORECORE_FAILURE)) { __set_errno (ENOMEM); return -1; } /* Call the `morecore' hook if necessary. */ void (*hook) (void) = force_reg (__after_morecore_hook); if (hook) (*hook) (); main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size; top(&main_arena) = (mchunkptr)(brk + front_misalign); set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE); return 0; }
//static void void pos_int_free(char *name, mstate av, mchunkptr p, int flag) { INTERNAL_SIZE_T size; mfastbinptr* fb; mchunkptr prevchunk; INTERNAL_SIZE_T prevsize; mchunkptr nextchunk; INTERNAL_SIZE_T nextsize; int nextinuse; mchunkptr bck; mchunkptr fwd; //const char *errstr = NULL; size = chunksize(p); /*if ((uintptr_t) p > (uintptr_t) -size || misaligned_chunk (p)) { errstr = "free(): invalid pointer"; errout: //malloc_printerr (check_action, errstr, chunk2mem(p)); return; }*/ /*if (size < MINSIZE) { errstr = "free(): invalid size"; goto errout; }*/ //check_inuse_chunk(av, p); // fastbin if (flag==1 && (unsigned long)(size) <= (unsigned long)(get_max_fast ())) { /*if (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ || chunksize (chunk_at_offset (p, size)) >= av->system_mem) { errstr = "free(): invalid next size (fast)"; goto errout; }*/ #if CONSISTENCY == 1 set_fastchunks_log(name, av); #else set_fastchunks(av); #endif fb = &fastbin(av, fastbin_index(size)); if (*fb == p) { //errstr = "double free or corruption (fasttop)"; //goto errout; return ; } #if CONSISTENCY == 1 POS_WRITE_VAUE(name, (unsigned long *)&p->fd, (unsigned long)*fb); POS_WRITE_VAUE(name, (unsigned long *)fb, (unsigned long)p); #else p->fd = *fb; *fb = p; #endif return ; } // 1. First chunk if (chunk_is_first(p)) { nextchunk = next_chunk(p); nextsize = chunksize(nextchunk); // 1-1. (free F), free L if (chunk_is_last(nextchunk) && !inuse(nextchunk)) { //if (av < p && p < (char *)(av+PAGESIZE)){ if ((char*)av+sizeof(struct malloc_state) == (char*)p) { #if CONSISTENCY == 1 insert_to_unsorted_log(name, av, p, bck, fwd, size); set_foot_log(name, p, size); clear_inuse_bit_at_offset_log(name, p, size); #else insert_to_unsorted(av, p, bck, fwd, size); set_foot(p, size); clear_inuse_bit_at_offset(p, size); #endif goto out; } else { #if CONSISTENCY == 1 unlink_log(name, nextchunk, bck, fwd); size = size + nextsize + 2*SIZE_SZ; pos_log_insert_malloc_free(name, (unsigned long)p, size); //pos_seg_free(name, (void *)p, size); // Delayed pos_seg_free POS_WRITE_VAUE(name, (unsigned long *)&av->system_mem, (unsigned long)(av->system_mem-size)); #else unlink(nextchunk, bck, fwd); size = size + nextsize + 2*SIZE_SZ; /*if (size%PAGESIZE != 0) { errstr = "free(): unmmap size is not page size"; goto errout; }*/ //FREE((char*)p, size); pos_seg_free(name, (void *)p, size); av->system_mem -= size; #endif goto out; } } // 1-3. (free F), free M else if (!inuse(nextchunk)) { #if CONSISTENCY == 1 unlink_log(name, nextchunk, bck, fwd); size += nextsize; insert_to_unsorted_log(name, av, p, bck, fwd, size); set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE); set_foot_log(name, p, size); #else unlink(nextchunk, bck, fwd); size += nextsize; insert_to_unsorted(av, p, bck, fwd, size); set_head(p, size | FIRST_CHUNK | PREV_INUSE); set_foot(p, size); #endif goto out; } // 1-2. (free F), inuse L & 1-4. (free F), inuse M else { #if CONSISTENCY == 1 insert_to_unsorted_log(name, av, p, bck, fwd, size); set_foot_log(name, p, size); clear_inuse_bit_at_offset_log(name, p, size); #else insert_to_unsorted(av, p, bck, fwd, size); set_foot(p, size); clear_inuse_bit_at_offset(p, size); #endif goto out; } } // 2. Last chunk else if (chunk_is_last(p)) { if (!prev_inuse(p)) { prevchunk = prev_chunk(p); prevsize = chunksize(prevchunk); // 2-1. free F, (free L) if (chunk_is_first(prevchunk)) { //if (av < prevchunk && prevchunk < av+PAGESIZE){ if((char*)av+sizeof(struct malloc_state) == (char*)prevchunk) { #if CONSISTENCY == 1 insert_to_unsorted_log(name, av, p, bck, fwd, size); set_foot_log(name, p, size); clear_inuse_bit_at_offset_log(name, p, size); #else insert_to_unsorted(av, p, bck, fwd, size); set_foot(p, size); clear_inuse_bit_at_offset(p, size); #endif goto out; } else { #if CONSISTENCY == 1 unlink_log(name, prevchunk, bck, fwd); size = prevsize+size+2*SIZE_SZ; //pos_seg_free(name, (void *)p, size); pos_log_insert_malloc_free(name, (unsigned long)p, size); POS_WRITE_VAUE(name, (unsigned long *)&av->system_mem, (unsigned long)(av->system_mem-size)); #else unlink(prevchunk, bck, fwd); size = prevsize+size+2*SIZE_SZ; /*if (size%PAGESIZE != 0) { errstr = "free(): unmmap size is not page size"; goto errout; }*/ //FREE((char*)p, size); pos_seg_free(name, (void *)p, size); av->system_mem -= size; #endif goto out; } } // 2-3. free M, (free L) else { #if CONSISTENCY == 1 unlink_log(name, prevchunk, bck, fwd); size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); insert_to_unsorted_log(name, av, p, bck, fwd, size); set_head_log(name, p, size | LAST_CHUNK | PREV_INUSE); set_foot_log(name, p, size); clear_inuse_bit_at_offset_log(name, p, size); #else unlink(prevchunk, bck, fwd); size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); insert_to_unsorted(av, p, bck, fwd, size); set_head(p, size | LAST_CHUNK | PREV_INUSE); set_foot(p, size); clear_inuse_bit_at_offset(p, size); #endif goto out; } } // 2-2. inuse F, (free L) & 2-4. inuse M, (free L) else { #if CONSISTENCY == 1 insert_to_unsorted_log(name, av, p, bck, fwd, size); set_foot_log(name, p, size); clear_inuse_bit_at_offset_log(name, p, size); #else insert_to_unsorted(av, p, bck, fwd, size); set_foot(p, size); clear_inuse_bit_at_offset(p, size); #endif goto out; } } // 3. Middle chunk else { nextchunk = next_chunk(p); nextsize = chunksize(nextchunk); if (!prev_inuse(p)) { prevchunk = prev_chunk(p); prevsize = chunksize(prevchunk); // 3-1. free F, (free M), free L if (chunk_is_first(prevchunk) && chunk_is_last(nextchunk) && !inuse(nextchunk) ) { //if (av < prevchunk && prevchunk < av+PAGESIZE){ if((char*)av+sizeof(struct malloc_state) == (char*)prevchunk) { #if CONSISTENCY == 1 unlink_log(name, prevchunk, bck, fwd); size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); insert_to_unsorted_log(name, av, p, bck, fwd, size); set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE); set_foot_log(name, p, size); clear_inuse_bit_at_offset_log(name, p, size); #else unlink(prevchunk, bck, fwd); size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); insert_to_unsorted(av, p, bck, fwd, size); set_head(p, size | FIRST_CHUNK | PREV_INUSE); set_foot(p, size); clear_inuse_bit_at_offset(p, size); #endif goto out; } else { #if CONSISTENCY == 1 unlink_log(name, prevchunk, bck, fwd); unlink_log(name, nextchunk, bck, fwd); p = chunk_at_offset(p, -((long) prevsize)); size = prevsize+size+nextsize+2*SIZE_SZ; pos_log_insert_malloc_free(name, (unsigned long)p, size); //pos_seg_free(name, (void *)p, size); POS_WRITE_VAUE(name, (unsigned long *)&av->system_mem, (unsigned long)(av->system_mem-size)); #else unlink(prevchunk, bck, fwd); unlink(nextchunk, bck, fwd); p = chunk_at_offset(p, -((long) prevsize)); size = prevsize+size+nextsize+2*SIZE_SZ; /*if (size%PAGESIZE != 0) { errstr = "free(): unmmap size is not page size"; goto errout; }*/ //FREE((char*)p, size); pos_seg_free(name, (void *)p, size); av->system_mem -= size; #endif goto out; } } #if CONSISTENCY == 1 unlink_log(name, prevchunk, bck, fwd); #else unlink(prevchunk, bck, fwd); #endif size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); if (chunk_is_first(prevchunk)) { #if CONSISTENCY == 1 set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE); #else set_head(p, size | FIRST_CHUNK | PREV_INUSE); //set_foot(p, size); //clear_inuse_bit_at_offset(p, size); #endif } } nextinuse = inuse_bit_at_offset(nextchunk, nextsize); if (!nextinuse) { #if CONSISTENCY == 1 unlink_log(name, nextchunk, bck, fwd); #else unlink(nextchunk, bck, fwd); #endif size += nextsize; } #if CONSISTENCY == 1 insert_to_unsorted_log(name, av, p, bck, fwd, size); if (chunk_is_first(p)) { set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE); } else if (chunk_is_last(nextchunk)&&!nextinuse) { set_head_log(name, p, size | LAST_CHUNK | PREV_INUSE); } else { set_head_log(name, p, size | PREV_INUSE); } set_foot_log(name, p, size); clear_inuse_bit_at_offset_log(name, p, size); #else //else //clear_inuse_bit_at_offset(nextchunk, 0); insert_to_unsorted(av, p, bck, fwd, size); if (chunk_is_first(p)) { set_head(p, size | FIRST_CHUNK | PREV_INUSE); } else if (chunk_is_last(nextchunk)&&!nextinuse) { set_head(p, size | LAST_CHUNK | PREV_INUSE); } else { set_head(p, size | PREV_INUSE); } set_foot(p, size); clear_inuse_bit_at_offset(p, size); //check_free_chunk(av, p); #endif } out: if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD && have_fastchunks(av)) { pos_malloc_consolidate(name, av); } }
/* ------------------------------ free ------------------------------ */ void free(void* mem) { mstate av; mchunkptr p; /* chunk corresponding to mem */ size_t size; /* its size */ mfastbinptr* fb; /* associated fastbin */ mchunkptr nextchunk; /* next contiguous chunk */ size_t nextsize; /* its size */ int nextinuse; /* true if nextchunk is used */ size_t prevsize; /* size of previous contiguous chunk */ mchunkptr bck; /* misc temp for linking */ mchunkptr fwd; /* misc temp for linking */ /* free(0) has no effect */ if (mem == NULL) return; __MALLOC_LOCK; av = get_malloc_state(); p = mem2chunk(mem); size = chunksize(p); check_inuse_chunk(p); /* If eligible, place chunk on a fastbin so it can be found and used quickly in malloc. */ if ((unsigned long)(size) <= (unsigned long)(av->max_fast) #if TRIM_FASTBINS /* If TRIM_FASTBINS set, don't place chunks bordering top into fastbins */ && (chunk_at_offset(p, size) != av->top) #endif ) { set_fastchunks(av); fb = &(av->fastbins[fastbin_index(size)]); p->fd = *fb; *fb = p; } /* Consolidate other non-mmapped chunks as they arrive. */ else if (!chunk_is_mmapped(p)) { set_anychunks(av); nextchunk = chunk_at_offset(p, size); nextsize = chunksize(nextchunk); /* consolidate backward */ if (!prev_inuse(p)) { prevsize = p->prev_size; size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); unlink(p, bck, fwd); } if (nextchunk != av->top) { /* get and clear inuse bit */ nextinuse = inuse_bit_at_offset(nextchunk, nextsize); set_head(nextchunk, nextsize); /* consolidate forward */ if (!nextinuse) { unlink(nextchunk, bck, fwd); size += nextsize; } /* Place the chunk in unsorted chunk list. Chunks are not placed into regular bins until after they have been given one chance to be used in malloc. */ bck = unsorted_chunks(av); fwd = bck->fd; p->bk = bck; p->fd = fwd; bck->fd = p; fwd->bk = p; set_head(p, size | PREV_INUSE); set_foot(p, size); check_free_chunk(p); } /* If the chunk borders the current high end of memory, consolidate into top */ else { size += nextsize; set_head(p, size | PREV_INUSE); av->top = p; check_chunk(p); } /* If freeing a large space, consolidate possibly-surrounding chunks. Then, if the total unused topmost memory exceeds trim threshold, ask malloc_trim to reduce top. Unless max_fast is 0, we don't know if there are fastbins bordering top, so we cannot tell for sure whether threshold has been reached unless fastbins are consolidated. But we don't want to consolidate on each free. As a compromise, consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD is reached. */ if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { if (have_fastchunks(av)) __malloc_consolidate(av); if ((unsigned long)(chunksize(av->top)) >= (unsigned long)(av->trim_threshold)) __malloc_trim(av->top_pad, av); } } /* If the chunk was allocated via mmap, release via munmap() Note that if HAVE_MMAP is false but chunk_is_mmapped is true, then user must have overwritten memory. There's nothing we can do to catch this error unless DEBUG is set, in which case check_inuse_chunk (above) will have triggered error. */ else { size_t offset = p->prev_size; av->n_mmaps--; av->mmapped_mem -= (size + offset); munmap((char*)p - offset, size + offset); } __MALLOC_UNLOCK; }
/* ------------------------- __malloc_consolidate ------------------------- __malloc_consolidate is a specialized version of free() that tears down chunks held in fastbins. Free itself cannot be used for this purpose since, among other things, it might place chunks back onto fastbins. So, instead, we need to use a minor variant of the same code. Also, because this routine needs to be called the first time through malloc anyway, it turns out to be the perfect place to trigger initialization code. */ void attribute_hidden __malloc_consolidate(mstate av) { mfastbinptr* fb; /* current fastbin being consolidated */ mfastbinptr* maxfb; /* last fastbin (for loop control) */ mchunkptr p; /* current chunk being consolidated */ mchunkptr nextp; /* next chunk to consolidate */ mchunkptr unsorted_bin; /* bin header */ mchunkptr first_unsorted; /* chunk to link to */ /* These have same use as in free() */ mchunkptr nextchunk; size_t size; size_t nextsize; size_t prevsize; int nextinuse; mchunkptr bck; mchunkptr fwd; /* If max_fast is 0, we know that av hasn't yet been initialized, in which case do so below */ if (av->max_fast != 0) { clear_fastchunks(av); unsorted_bin = unsorted_chunks(av); /* Remove each chunk from fast bin and consolidate it, placing it then in unsorted bin. Among other reasons for doing this, placing in unsorted bin avoids needing to calculate actual bins until malloc is sure that chunks aren't immediately going to be reused anyway. */ maxfb = &(av->fastbins[fastbin_index(av->max_fast)]); fb = &(av->fastbins[0]); do { if ( (p = *fb) != 0) { *fb = 0; do { check_inuse_chunk(p); nextp = p->fd; /* Slightly streamlined version of consolidation code in free() */ size = p->size & ~PREV_INUSE; nextchunk = chunk_at_offset(p, size); nextsize = chunksize(nextchunk); if (!prev_inuse(p)) { prevsize = p->prev_size; size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); unlink(p, bck, fwd); } if (nextchunk != av->top) { nextinuse = inuse_bit_at_offset(nextchunk, nextsize); set_head(nextchunk, nextsize); if (!nextinuse) { size += nextsize; unlink(nextchunk, bck, fwd); } first_unsorted = unsorted_bin->fd; unsorted_bin->fd = p; first_unsorted->bk = p; set_head(p, size | PREV_INUSE); p->bk = unsorted_bin; p->fd = first_unsorted; set_foot(p, size); } else { size += nextsize; set_head(p, size | PREV_INUSE); av->top = p; } } while ( (p = nextp) != 0); } } while (fb++ != maxfb); } else { malloc_init_state(av); check_malloc_state(); } }
/* ------------------------- __malloc_consolidate ------------------------- __malloc_consolidate is a specialized version of free() that tears down chunks held in fastbins. Free itself cannot be used for this purpose since, among other things, it might place chunks back onto fastbins. So, instead, we need to use a minor variant of the same code. Also, because this routine needs to be called the first time through malloc anyway, it turns out to be the perfect place to trigger initialization code. */ void attribute_hidden __malloc_consolidate(mstate av) { mfastbinptr* fb; /* current fastbin being consolidated */ mfastbinptr* maxfb; /* last fastbin (for loop control) */ mchunkptr p; /* current chunk being consolidated */ mchunkptr nextp; /* next chunk to consolidate */ mchunkptr unsorted_bin; /* bin header */ mchunkptr first_unsorted; /* chunk to link to */ ustate unit; /* */ /* These have same use as in free() */ mchunkptr nextchunk; size_t size; size_t nextsize; size_t prevsize; int nextinuse; mchunkptr bck; mchunkptr fwd; /* If max_fast is 0, we know that av hasn't yet been initialized, in which case do so below */ if (av->max_fast != 0) { clear_fastchunks(av); unsorted_bin = unsorted_chunks(av); /* Remove each chunk from fast bin and consolidate it, placing it then in unsorted bin. Among other reasons for doing this, placing in unsorted bin avoids needing to calculate actual bins until malloc is sure that chunks aren't immediately going to be reused anyway. */ maxfb = &(av->fastbins[fastbin_index(av->max_fast)]); fb = &(av->fastbins[0]); do { if ( (p = *fb) != 0) { *fb = 0; do { check_inuse_chunk(p); nextp = p->fd; /* Slightly streamlined version of consolidation code in free() */ size = p->size & ~PREV_INUSE; nextchunk = chunk_at_offset(p, size); nextsize = chunksize(nextchunk); if (!prev_inuse(p)) { prevsize = p->prev_size; size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); unlink(p, bck, fwd); } unit = lookup_ustate_by_mem((void*)p); if (nextchunk != unit->unit_top) { nextinuse = inuse_bit_at_offset(nextchunk, nextsize); set_head(nextchunk, nextsize); if (!nextinuse) { size += nextsize; unlink(nextchunk, bck, fwd); } first_unsorted = unsorted_bin->fd; unsorted_bin->fd = p; first_unsorted->bk = p; set_head(p, size | PREV_INUSE); p->bk = unsorted_bin; p->fd = first_unsorted; set_foot(p, size); } else { size += nextsize; set_head(p, size | PREV_INUSE); unit->unit_top = p; } } while ( (p = nextp) != 0); } } while (fb++ != maxfb); } else { if (get_abstate()->mstate_list.num == 0) { //initialize abheap state init_linked_list(&(get_abstate()->mstate_list)); init_linked_list(&(get_abstate()->ustate_list)); init_linked_list(&(get_abstate()->mmapped_ustate_list)); get_abstate()->ab_top = (mchunkptr)(CHANNEL_ADDR); //allocate channel heap space mmap((void *) CHANNEL_ADDR, CHANNEL_SIZE, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_FIXED|MAP_SHARED, -1, 0); touch_mem((void *)CHANNEL_ADDR, CHANNEL_SIZE); } malloc_init_state(av); check_malloc_state(); } }
static int internal_function heap_trim (heap_info *heap, size_t pad) { mstate ar_ptr = heap->ar_ptr; unsigned long pagesz = GLRO (dl_pagesize); mchunkptr top_chunk = top (ar_ptr), p, bck, fwd; heap_info *prev_heap; long new_size, top_size, top_area, extra, prev_size, misalign; /* Can this heap go away completely? */ while (top_chunk == chunk_at_offset (heap, sizeof (*heap))) { prev_heap = heap->prev; prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ); p = chunk_at_offset (prev_heap, prev_size); /* fencepost must be properly aligned. */ misalign = ((long) p) & MALLOC_ALIGN_MASK; p = chunk_at_offset (prev_heap, prev_size - misalign); assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */ p = prev_chunk (p); new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign; assert (new_size > 0 && new_size < (long) (2 * MINSIZE)); if (!prev_inuse (p)) new_size += p->prev_size; assert (new_size > 0 && new_size < HEAP_MAX_SIZE); if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz) break; ar_ptr->system_mem -= heap->size; arena_mem -= heap->size; LIBC_PROBE (memory_heap_free, 2, heap, heap->size); delete_heap (heap); heap = prev_heap; if (!prev_inuse (p)) /* consolidate backward */ { p = prev_chunk (p); unlink (ar_ptr, p, bck, fwd); } assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0); assert (((char *) p + new_size) == ((char *) heap + heap->size)); top (ar_ptr) = top_chunk = p; set_head (top_chunk, new_size | PREV_INUSE); /*check_chunk(ar_ptr, top_chunk);*/ } /* Uses similar logic for per-thread arenas as the main arena with systrim by preserving the top pad and at least a page. */ top_size = chunksize (top_chunk); top_area = top_size - MINSIZE - 1; if (top_area <= pad) return 0; extra = ALIGN_DOWN(top_area - pad, pagesz); if ((unsigned long) extra < mp_.trim_threshold) return 0; /* Try to shrink. */ if (shrink_heap (heap, extra) != 0) return 0; ar_ptr->system_mem -= extra; arena_mem -= extra; /* Success. Adjust top accordingly. */ set_head (top_chunk, (top_size - extra) | PREV_INUSE); /*check_chunk(ar_ptr, top_chunk);*/ return 1; }
static void _int_free (mstate av, mchunkptr p, int have_lock) { ... else if (!chunk_is_mmapped(p)) { ... nextchunk = chunk_at_offset(p, size); ... nextsize = chunksize(nextchunk); ... /* consolidate backward */ if (!prev_inuse(p)) { prevsize = p->prev_size; size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); unlink(p, bck, fwd); } if (nextchunk != av->top) { /* get and clear inuse bit */ nextinuse = inuse_bit_at_offset(nextchunk, nextsize); /* consolidate forward */ if (!nextinuse) { unlink(nextchunk, bck, fwd); size += nextsize; } else clear_inuse_bit_at_offset(nextchunk, 0); /* Place the chunk in unsorted chunk list. Chunks are not placed into regular bins until after they have been given one chance to be used in malloc. */ bck = unsorted_chunks(av); fwd = bck->fd; if (__glibc_unlikely (fwd->bk != bck)) { errstr = "free(): corrupted unsorted chunks"; goto errout; } p->fd = fwd; p->bk = bck; if (!in_smallbin_range(size)) { p->fd_nextsize = NULL; p->bk_nextsize = NULL; } bck->fd = p; fwd->bk = p; set_head(p, size | PREV_INUSE); set_foot(p, size); check_free_chunk(av, p); } /* If the chunk borders the current high end of memory, consolidate into top */ else { size += nextsize; set_head(p, size | PREV_INUSE); av->top = p; check_chunk(av, p); } ... }