示例#1
1
/* ------------------------- malloc_trim -------------------------
  malloc_trim(size_t pad);

  If possible, gives memory back to the system (via negative
  arguments to sbrk) if there is unused memory at the `high' end of
  the malloc pool. You can call this after freeing large blocks of
  memory to potentially reduce the system-level memory requirements
  of a program. However, it cannot guarantee to reduce memory. Under
  some allocation patterns, some large free blocks of memory will be
  locked between two used chunks, so they cannot be given back to
  the system.

  The `pad' argument to malloc_trim represents the amount of free
  trailing space to leave untrimmed. If this argument is zero,
  only the minimum amount of memory to maintain internal data
  structures will be left (one page or less). Non-zero arguments
  can be supplied to maintain enough trailing space to service
  future expected allocations without having to re-obtain memory
  from the system.

  Malloc_trim returns 1 if it actually released any memory, else 0.
  On systems that do not support "negative sbrks", it will always
  return 0.
*/
int malloc_trim(size_t pad)
{
  int r;
  __MALLOC_LOCK;
  mstate av = get_malloc_state();
  __malloc_consolidate(av);
  r = __malloc_trim(pad, av);
  __MALLOC_UNLOCK;
  return r;
}
示例#2
0
/* libc_hidden_proto(mallinfo) */
struct mallinfo mallinfo(void)
{
    mstate av;
    struct mallinfo mi;
    unsigned int i;
    mbinptr b;
    mchunkptr p;
    size_t avail;
    size_t fastavail;
    int nblocks;
    int nfastblocks;

    __MALLOC_LOCK;
    av = get_malloc_state();
    /* Ensure initialization */
    if (av->top == 0)  {
	__malloc_consolidate(av);
    }

    check_malloc_state();

    /* Account for top */
    avail = chunksize(av->top);
    nblocks = 1;  /* top always exists */

    /* traverse fastbins */
    nfastblocks = 0;
    fastavail = 0;

    for (i = 0; i < NFASTBINS; ++i) {
	for (p = av->fastbins[i]; p != 0; p = p->fd) {
	    ++nfastblocks;
	    fastavail += chunksize(p);
	}
    }

    avail += fastavail;

    /* traverse regular bins */
    for (i = 1; i < NBINS; ++i) {
	b = bin_at(av, i);
	for (p = last(b); p != b; p = p->bk) {
	    ++nblocks;
	    avail += chunksize(p);
	}
    }

    mi.smblks = nfastblocks;
    mi.ordblks = nblocks;
    mi.fordblks = avail;
    mi.uordblks = av->sbrked_mem - avail;
    mi.arena = av->sbrked_mem;
    mi.hblks = av->n_mmaps;
    mi.hblkhd = av->mmapped_mem;
    mi.fsmblks = fastavail;
    mi.keepcost = chunksize(av->top);
    mi.usmblks = av->max_total_mem;
    __MALLOC_UNLOCK;
    return mi;
}
示例#3
0
/* We need the heap_size argument because the heap size is not up to date when
 * we call this function from constrained_sbrk() or constrained_mmap().
 */
static void show_mem_info(int heap_size)
{
	if ( g_show_mem_info == 0 )
		return;

	if ( g_show_mem_info == -1)
	{
		g_show_mem_info = getenv("OPERA_SHOW_MEM_INFO") ? 1 : 0;
		if ( g_show_mem_info == 0 )
			return;
	}

	mstate av = get_malloc_state();
	if ( heap_size == 0 )
	{
		heap_size = av->mmapped_mem + av->sbrked_mem;
	}

	fprintf(stderr, "Heap size is %d\n",  heap_size);
	fprintf(stderr, "Opera allocated memory is %d\n", g_constrain_allocated_mem);

#ifdef STACK_TRACE_SUPPORT

	if ( g_show_mem_info_stack == 0 )
		return;

	if (g_show_mem_info_stack == -1)
	{
		g_show_mem_info_stack = getenv("OPERA_SHOW_MEM_INFO_STACK") ? 1 : 0;
		if ( g_show_mem_info_stack == 0 )
			return;
	}

	if ( heap_size > get_heap_limit() )
	{
		fprintf(stderr,"Exceeded heap limit at:\n");
		show_stack_trace(15,0);
	}
	if ( heap_size == 0 && g_constrain_allocated_mem > get_alloc_limit() )
	{
		fprintf(stderr,"Exceeded alloc limit at:\n");
		show_stack_trace(15,0);
	}
#endif
}
示例#4
0
static void  *constrained_mmap(void *addr, size_t size, int prot , int flags, int fd, off_t offset)
{
	int heap_increase_allowed = !g_constrained_allocation || (get_heap_limit() >= (get_heap_size() + size));

	if ( heap_increase_allowed )
	{
#ifdef SHOW_MEM_INFO
		if ( g_show_mem_info )
		{
			mstate av = get_malloc_state();
			int new_total = av->mmapped_mem + av->sbrked_mem + size;
			if ( av->max_total_mem < new_total )
				show_mem_info(new_total);
		}
#endif
		return mmap(addr, size, prot, flags, fd, offset);
	}

	return (void*) MORECORE_FAILURE;
}
示例#5
0
static void *constrained_sbrk(intptr_t increment)
{
	/* increment == 0 is used to find the current break, so we must always let
	 * it through, and it should ofcourse always be possible to decrease the
	 * heap size...
	 */
	int heap_increase_allowed = !g_constrained_allocation || increment <= 0 || (get_heap_limit() >= (get_heap_size() + increment));

	if ( heap_increase_allowed )
	{
#ifdef SHOW_MEM_INFO
		if ( g_show_mem_info )
		{
			mstate av = get_malloc_state();
			int new_total = av->mmapped_mem + av->sbrked_mem + increment;
			if ( av->max_total_mem < new_total )
				show_mem_info(new_total);
		}
#endif
		return sbrk(increment);
	}
	return (void*) MORECORE_FAILURE;
}
示例#6
0
/* ------------------------------ realloc ------------------------------ */
void* ulibc_realloc(void* oldmem, size_t bytes)
{
    mstate av;

    size_t  nb;              /* padded request size */

    mchunkptr        oldp;            /* chunk corresponding to oldmem */
    size_t  oldsize;         /* its size */

    mchunkptr        newp;            /* chunk to return */
    size_t  newsize;         /* its size */
    void*          newmem;          /* corresponding user mem */

    mchunkptr        next;            /* next contiguous chunk after oldp */

    mchunkptr        remainder;       /* extra space at end of newp */
    unsigned long     remainder_size;  /* its size */

    mchunkptr        bck;             /* misc temp for linking */
    mchunkptr        fwd;             /* misc temp for linking */

    unsigned long     copysize;        /* bytes to copy */
    unsigned int     ncopies;         /* size_t words to copy */
    size_t* s;               /* copy source */
    size_t* d;               /* copy destination */

    void *retval;

    /* Check for special cases.  */
    if (! oldmem)
	return ulibc_malloc(bytes);
    if (! bytes) {
	ulibc_free (oldmem);
	return NULL;
    }

    av = get_malloc_state();
    checked_request2size(bytes, nb);

    oldp    = mem2chunk(oldmem);
    oldsize = chunksize(oldp);

    check_inuse_chunk(oldp);

    if (!chunk_is_mmapped(oldp)) {

	if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
	    /* already big enough; split below */
	    newp = oldp;
	    newsize = oldsize;
	}

	else {
	    next = chunk_at_offset(oldp, oldsize);

	    /* Try to expand forward into top */
	    if (next == av->top &&
		    (unsigned long)(newsize = oldsize + chunksize(next)) >=
		    (unsigned long)(nb + MINSIZE)) {
		set_head_size(oldp, nb);
		av->top = chunk_at_offset(oldp, nb);
		set_head(av->top, (newsize - nb) | PREV_INUSE);
		retval = chunk2mem(oldp);
		goto DONE;
	    }

	    /* Try to expand forward into next chunk;  split off remainder below */
	    else if (next != av->top &&
		    !inuse(next) &&
		    (unsigned long)(newsize = oldsize + chunksize(next)) >=
		    (unsigned long)(nb)) {
		newp = oldp;
		unlink(next, bck, fwd);
	    }

	    /* allocate, copy, free */
	    else {
		newmem = malloc(nb - MALLOC_ALIGN_MASK);
		if (newmem == 0) {
		    retval = 0; /* propagate failure */
		    goto DONE;
		}

		newp = mem2chunk(newmem);
		newsize = chunksize(newp);

		/*
		   Avoid copy if newp is next chunk after oldp.
		   */
		if (newp == next) {
		    newsize += oldsize;
		    newp = oldp;
		}
		else {
		    /*
		       Unroll copy of <= 36 bytes (72 if 8byte sizes)
		       We know that contents have an odd number of
		       size_t-sized words; minimally 3.
		       */

		    copysize = oldsize - (sizeof(size_t));
		    s = (size_t*)(oldmem);
		    d = (size_t*)(newmem);
		    ncopies = copysize / sizeof(size_t);
		    assert(ncopies >= 3);

		    if (ncopies > 9)
			memcpy(d, s, copysize);

		    else {
			*(d+0) = *(s+0);
			*(d+1) = *(s+1);
			*(d+2) = *(s+2);
			if (ncopies > 4) {
			    *(d+3) = *(s+3);
			    *(d+4) = *(s+4);
			    if (ncopies > 6) {
				*(d+5) = *(s+5);
				*(d+6) = *(s+6);
				if (ncopies > 8) {
				    *(d+7) = *(s+7);
				    *(d+8) = *(s+8);
				}
			    }
			}
		    }

		    ulibc_free(oldmem);
		    check_inuse_chunk(newp);
		    retval = chunk2mem(newp);
		    goto DONE;
		}
	    }
	}

	/* If possible, free extra space in old or extended chunk */

	assert((unsigned long)(newsize) >= (unsigned long)(nb));

	remainder_size = newsize - nb;

	if (remainder_size < MINSIZE) { /* not enough extra to split off */
	    set_head_size(newp, newsize);
	    set_inuse_bit_at_offset(newp, newsize);
	}
	else { /* split remainder */
	    remainder = chunk_at_offset(newp, nb);
	    set_head_size(newp, nb);
	    set_head(remainder, remainder_size | PREV_INUSE);
	    /* Mark remainder as inuse so free() won't complain */
	    set_inuse_bit_at_offset(remainder, remainder_size);
	    ulibc_free(chunk2mem(remainder));
	}

	check_inuse_chunk(newp);
	retval = chunk2mem(newp);
	goto DONE;
    }

    /*
       Handle mmap cases
       */

    else {
	size_t offset = oldp->prev_size;
	size_t pagemask = av->pagesize - 1;
	char *cp;
	unsigned long  sum;

	/* Note the extra (sizeof(size_t)) overhead */
	newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask;

	/* don't need to remap if still within same page */
	if (oldsize == newsize - offset) {
	    retval = oldmem;
	    goto DONE;
	}

	cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);

	if (cp != (char*)MORECORE_FAILURE) {

	    newp = (mchunkptr)(cp + offset);
	    set_head(newp, (newsize - offset)|IS_MMAPPED);

	    assert(aligned_OK(chunk2mem(newp)));
	    assert((newp->prev_size == offset));

	    /* update statistics */
	    sum = av->mmapped_mem += newsize - oldsize;
	    if (sum > (unsigned long)(av->max_mmapped_mem))
		av->max_mmapped_mem = sum;
	    sum += av->sbrked_mem;
	    if (sum > (unsigned long)(av->max_total_mem))
		av->max_total_mem = sum;

	    retval = chunk2mem(newp);
	    goto DONE;
	}

	/* Note the extra (sizeof(size_t)) overhead. */
	if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t))))
	    newmem = oldmem; /* do nothing */
	else {
	    /* Must alloc, copy, free. */
	    newmem = malloc(nb - MALLOC_ALIGN_MASK);
	    if (newmem != 0) {
		memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t)));
		ulibc_free(oldmem);
	    }
	}
	retval = newmem;
    }

 DONE:
    return retval;
}
示例#7
0
/* ------------------------------ free ------------------------------ */
void free(void* mem)
{
    mstate av;

    mchunkptr       p;           /* chunk corresponding to mem */
    size_t size;        /* its size */
    mfastbinptr*    fb;          /* associated fastbin */
    mchunkptr       nextchunk;   /* next contiguous chunk */
    size_t nextsize;    /* its size */
    int             nextinuse;   /* true if nextchunk is used */
    size_t prevsize;    /* size of previous contiguous chunk */
    mchunkptr       bck;         /* misc temp for linking */
    mchunkptr       fwd;         /* misc temp for linking */

    /* free(0) has no effect */
    if (mem == NULL)
	return;

    __MALLOC_LOCK;
    av = get_malloc_state();
    p = mem2chunk(mem);
    size = chunksize(p);

    check_inuse_chunk(p);

    /*
       If eligible, place chunk on a fastbin so it can be found
       and used quickly in malloc.
       */

    if ((unsigned long)(size) <= (unsigned long)(av->max_fast)

#if TRIM_FASTBINS
	    /* If TRIM_FASTBINS set, don't place chunks
	       bordering top into fastbins */
	    && (chunk_at_offset(p, size) != av->top)
#endif
       ) {

	set_fastchunks(av);
	fb = &(av->fastbins[fastbin_index(size)]);
	p->fd = *fb;
	*fb = p;
    }

    /*
       Consolidate other non-mmapped chunks as they arrive.
       */

    else if (!chunk_is_mmapped(p)) {
	set_anychunks(av);

	nextchunk = chunk_at_offset(p, size);
	nextsize = chunksize(nextchunk);

	/* consolidate backward */
	if (!prev_inuse(p)) {
	    prevsize = p->prev_size;
	    size += prevsize;
	    p = chunk_at_offset(p, -((long) prevsize));
	    unlink(p, bck, fwd);
	}

	if (nextchunk != av->top) {
	    /* get and clear inuse bit */
	    nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
	    set_head(nextchunk, nextsize);

	    /* consolidate forward */
	    if (!nextinuse) {
		unlink(nextchunk, bck, fwd);
		size += nextsize;
	    }

	    /*
	       Place the chunk in unsorted chunk list. Chunks are
	       not placed into regular bins until after they have
	       been given one chance to be used in malloc.
	       */

	    bck = unsorted_chunks(av);
	    fwd = bck->fd;
	    p->bk = bck;
	    p->fd = fwd;
	    bck->fd = p;
	    fwd->bk = p;

	    set_head(p, size | PREV_INUSE);
	    set_foot(p, size);

	    check_free_chunk(p);
	}

	/*
	   If the chunk borders the current high end of memory,
	   consolidate into top
	   */

	else {
	    size += nextsize;
	    set_head(p, size | PREV_INUSE);
	    av->top = p;
	    check_chunk(p);
	}

	/*
	   If freeing a large space, consolidate possibly-surrounding
	   chunks. Then, if the total unused topmost memory exceeds trim
	   threshold, ask malloc_trim to reduce top.

	   Unless max_fast is 0, we don't know if there are fastbins
	   bordering top, so we cannot tell for sure whether threshold
	   has been reached unless fastbins are consolidated.  But we
	   don't want to consolidate on each free.  As a compromise,
	   consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
	   is reached.
	   */

	if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
	    if (have_fastchunks(av))
		__malloc_consolidate(av);

	    if ((unsigned long)(chunksize(av->top)) >=
		    (unsigned long)(av->trim_threshold))
		__malloc_trim(av->top_pad, av);
	}

    }
    /*
       If the chunk was allocated via mmap, release via munmap()
       Note that if HAVE_MMAP is false but chunk_is_mmapped is
       true, then user must have overwritten memory. There's nothing
       we can do to catch this error unless DEBUG is set, in which case
       check_inuse_chunk (above) will have triggered error.
       */

    else {
	size_t offset = p->prev_size;
	av->n_mmaps--;
	av->mmapped_mem -= (size + offset);
	munmap((char*)p - offset, size + offset);
    }
    __MALLOC_UNLOCK;
}
示例#8
0
/* ------------------------- malloc_trim -------------------------
  malloc_trim(size_t pad);

  If possible, gives memory back to the system (via negative
  arguments to sbrk) if there is unused memory at the `high' end of
  the malloc pool. You can call this after freeing large blocks of
  memory to potentially reduce the system-level memory requirements
  of a program. However, it cannot guarantee to reduce memory. Under
  some allocation patterns, some large free blocks of memory will be
  locked between two used chunks, so they cannot be given back to
  the system.

  The `pad' argument to malloc_trim represents the amount of free
  trailing space to leave untrimmed. If this argument is zero,
  only the minimum amount of memory to maintain internal data
  structures will be left (one page or less). Non-zero arguments
  can be supplied to maintain enough trailing space to service
  future expected allocations without having to re-obtain memory
  from the system.

  Malloc_trim returns 1 if it actually released any memory, else 0.
  On systems that do not support "negative sbrks", it will always
  return 0.
*/
int malloc_trim(size_t pad)
{
  mstate av = get_malloc_state(); //FIXME 
  __malloc_consolidate(av);
  return __malloc_trim(pad, av);
}