void AllocPool::DoCheckFreeChunk(AllocChunkPtr p) { size_t size = p->Size(); #ifndef NDEBUG AllocChunkPtr next = p->ChunkAtOffset(size); #endif DoCheckChunk(p); /* Check whether it claims to be free ... */ assert(!p->InUse()); /* Unless an end marker, must have OK fields */ if (size >= kMinAllocSize) { assert((size & kAlignMask) == 0); assert(aligned_OK(p->ToPtr())); /* ... and is fully consolidated */ assert(p->PrevInUse()); assert(next->InUse()); /* ... and has minimally sane links */ assert(p->Next()->Prev() == p); assert(p->Prev()->Next() == p); } else /* end markers are always of size 0 */ assert(size == 0); }
static mchunkptr internal_function mem2chunk_check(void* mem, unsigned char **magic_p) { mchunkptr p; INTERNAL_SIZE_T sz, c; unsigned char magic; if(!aligned_OK(mem)) return NULL; p = mem2chunk(mem); if (!chunk_is_mmapped(p)) { /* Must be a chunk in conventional heap memory. */ int contig = contiguous(&main_arena); sz = chunksize(p); if((contig && ((char*)p<mp_.sbrk_base || ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) || sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) || ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK || (contig && (char*)prev_chunk(p)<mp_.sbrk_base) || next_chunk(prev_chunk(p))!=p) )) return NULL; magic = MAGICBYTE(p); for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) { if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL; } } else { unsigned long offset, page_mask = GLRO(dl_pagesize)-1; /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two alignment relative to the beginning of a page. Check this first. */ offset = (unsigned long)mem & page_mask; if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 && offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 && offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 && offset<0x2000) || !chunk_is_mmapped(p) || (p->size & PREV_INUSE) || ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) || ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) ) return NULL; magic = MAGICBYTE(p); for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) { if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL; } } ((unsigned char*)p)[sz] ^= 0xFF; if (magic_p) *magic_p = (unsigned char *)p + sz; return p; }
void AllocPool::DoCheckAllocedChunk(AllocChunkPtr p, size_t s) { #ifndef NDEBUG size_t size = p->Size(); long room = size - s; #endif DoCheckInUseChunk(p); /* Legal size ... */ assert(size >= kMinAllocSize); assert((size & kAlignMask) == 0); assert(room >= 0); assert(room < kMinAllocSize); /* ... and alignment */ assert(aligned_OK(p->ToPtr())); /* ... and was allocated at front of an available chunk */ assert(p->PrevInUse()); // huh?? - jmc }
/* ------------------------------ realloc ------------------------------ */ void* ulibc_realloc(void* oldmem, size_t bytes) { mstate av; size_t nb; /* padded request size */ mchunkptr oldp; /* chunk corresponding to oldmem */ size_t oldsize; /* its size */ mchunkptr newp; /* chunk to return */ size_t newsize; /* its size */ void* newmem; /* corresponding user mem */ mchunkptr next; /* next contiguous chunk after oldp */ mchunkptr remainder; /* extra space at end of newp */ unsigned long remainder_size; /* its size */ mchunkptr bck; /* misc temp for linking */ mchunkptr fwd; /* misc temp for linking */ unsigned long copysize; /* bytes to copy */ unsigned int ncopies; /* size_t words to copy */ size_t* s; /* copy source */ size_t* d; /* copy destination */ void *retval; /* Check for special cases. */ if (! oldmem) return ulibc_malloc(bytes); if (! bytes) { ulibc_free (oldmem); return NULL; } av = get_malloc_state(); checked_request2size(bytes, nb); oldp = mem2chunk(oldmem); oldsize = chunksize(oldp); check_inuse_chunk(oldp); if (!chunk_is_mmapped(oldp)) { if ((unsigned long)(oldsize) >= (unsigned long)(nb)) { /* already big enough; split below */ newp = oldp; newsize = oldsize; } else { next = chunk_at_offset(oldp, oldsize); /* Try to expand forward into top */ if (next == av->top && (unsigned long)(newsize = oldsize + chunksize(next)) >= (unsigned long)(nb + MINSIZE)) { set_head_size(oldp, nb); av->top = chunk_at_offset(oldp, nb); set_head(av->top, (newsize - nb) | PREV_INUSE); retval = chunk2mem(oldp); goto DONE; } /* Try to expand forward into next chunk; split off remainder below */ else if (next != av->top && !inuse(next) && (unsigned long)(newsize = oldsize + chunksize(next)) >= (unsigned long)(nb)) { newp = oldp; unlink(next, bck, fwd); } /* allocate, copy, free */ else { newmem = malloc(nb - MALLOC_ALIGN_MASK); if (newmem == 0) { retval = 0; /* propagate failure */ goto DONE; } newp = mem2chunk(newmem); newsize = chunksize(newp); /* Avoid copy if newp is next chunk after oldp. */ if (newp == next) { newsize += oldsize; newp = oldp; } else { /* Unroll copy of <= 36 bytes (72 if 8byte sizes) We know that contents have an odd number of size_t-sized words; minimally 3. */ copysize = oldsize - (sizeof(size_t)); s = (size_t*)(oldmem); d = (size_t*)(newmem); ncopies = copysize / sizeof(size_t); assert(ncopies >= 3); if (ncopies > 9) memcpy(d, s, copysize); else { *(d+0) = *(s+0); *(d+1) = *(s+1); *(d+2) = *(s+2); if (ncopies > 4) { *(d+3) = *(s+3); *(d+4) = *(s+4); if (ncopies > 6) { *(d+5) = *(s+5); *(d+6) = *(s+6); if (ncopies > 8) { *(d+7) = *(s+7); *(d+8) = *(s+8); } } } } ulibc_free(oldmem); check_inuse_chunk(newp); retval = chunk2mem(newp); goto DONE; } } } /* If possible, free extra space in old or extended chunk */ assert((unsigned long)(newsize) >= (unsigned long)(nb)); remainder_size = newsize - nb; if (remainder_size < MINSIZE) { /* not enough extra to split off */ set_head_size(newp, newsize); set_inuse_bit_at_offset(newp, newsize); } else { /* split remainder */ remainder = chunk_at_offset(newp, nb); set_head_size(newp, nb); set_head(remainder, remainder_size | PREV_INUSE); /* Mark remainder as inuse so free() won't complain */ set_inuse_bit_at_offset(remainder, remainder_size); ulibc_free(chunk2mem(remainder)); } check_inuse_chunk(newp); retval = chunk2mem(newp); goto DONE; } /* Handle mmap cases */ else { size_t offset = oldp->prev_size; size_t pagemask = av->pagesize - 1; char *cp; unsigned long sum; /* Note the extra (sizeof(size_t)) overhead */ newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask; /* don't need to remap if still within same page */ if (oldsize == newsize - offset) { retval = oldmem; goto DONE; } cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1); if (cp != (char*)MORECORE_FAILURE) { newp = (mchunkptr)(cp + offset); set_head(newp, (newsize - offset)|IS_MMAPPED); assert(aligned_OK(chunk2mem(newp))); assert((newp->prev_size == offset)); /* update statistics */ sum = av->mmapped_mem += newsize - oldsize; if (sum > (unsigned long)(av->max_mmapped_mem)) av->max_mmapped_mem = sum; sum += av->sbrked_mem; if (sum > (unsigned long)(av->max_total_mem)) av->max_total_mem = sum; retval = chunk2mem(newp); goto DONE; } /* Note the extra (sizeof(size_t)) overhead. */ if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t)))) newmem = oldmem; /* do nothing */ else { /* Must alloc, copy, free. */ newmem = malloc(nb - MALLOC_ALIGN_MASK); if (newmem != 0) { memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t))); ulibc_free(oldmem); } } retval = newmem; } DONE: return retval; }
static mchunkptr malloc_from_sys(unsigned nb) { mchunkptr p; size_t sbrk_size; int* ip; /* Minimally, we need to pad with enough space */ /* to place dummy size/use fields to ends if needed */ sbrk_size = ((nb + SBRK_UNIT - 1 + SIZE_SZ + SIZE_SZ) / SBRK_UNIT) * SBRK_UNIT; ip = (int*)(sbrk(sbrk_size)); if ((char*)ip == (char*)(-1)) /* sbrk returns -1 on failure */ { return 0; } UPDATE_STATS ((++n_sbrks, sbrked_mem += sbrk_size)); if (last_sbrk_end != &ip[-1]) { /* It's either first time through or someone else called sbrk. */ /* Arrange end-markers at front & back */ /* Shouldn't be necessary, but better to be safe */ while (!aligned_OK(ip)) { ++ip; sbrk_size -= SIZE_SZ; } /* Mark the front as in use to prevent merging. */ /* Note we can get away with only 1 word, not MINSIZE overhead here */ *ip++ = SIZE_SZ | INUSE; p = (mchunkptr)ip; set_size(p,sbrk_size - (SIZE_SZ + SIZE_SZ)); } else { mchunkptr l; /* We can safely make the header start at end of prev sbrked chunk. */ /* We will still have space left at the end from a previous call */ /* to place the end marker, below */ p = (mchunkptr)(last_sbrk_end); set_size(p, sbrk_size); /* Even better, maybe we can merge with last fragment: */ l = prev_chunk(p); if (!inuse(l)) { unlink(l); set_size(l, p->size + l->size); p = l; } } /* mark the end of sbrked space as in use to prevent merging */ last_sbrk_end = (int*)((char*)p + p->size); *last_sbrk_end = SIZE_SZ | INUSE; UPDATE_STATS((++n_avail, ++n_malloc_chunks)); /* make it safe to unlink in malloc */ UPDATE_STATS(++n_avail); p->fd = p->bk = p; return p; }