Exemple #1
0
void*
public_mEMALIGn(size_t alignment, size_t bytes)
{
  struct malloc_arena* ar_ptr;
  void *p;

  void * (*hook) (size_t, size_t, const void *) = __memalign_hook;
  if (hook != NULL)
    return (*hook)(alignment, bytes, RETURN_ADDRESS (0));

  /* If need less alignment than we give anyway, just relay to malloc */
  if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);

  /* Otherwise, ensure that it is at least a minimum chunk size */
  if (alignment <  MIN_CHUNK_SIZE)
    alignment = MIN_CHUNK_SIZE;

  arena_get(ar_ptr,
	    bytes + FOOTER_OVERHEAD + alignment + MIN_CHUNK_SIZE);
  if(!ar_ptr)
    return 0;

  if (ar_ptr != &main_arena)
    bytes += FOOTER_OVERHEAD;
  p = mspace_memalign(arena_to_mspace(ar_ptr), alignment, bytes);

  if (p && ar_ptr != &main_arena)
    set_non_main_arena(p, ar_ptr);
  (void)mutex_unlock(&ar_ptr->mutex);

  assert(!p || is_mmapped(mem2chunk(p)) ||
	 ar_ptr == arena_for_chunk(mem2chunk(p)));
  return p;
}
Exemple #2
0
void*
public_rEALLOc(void* oldmem, size_t bytes)
{
  struct malloc_arena* ar_ptr;

  mchunkptr oldp;             /* chunk corresponding to oldmem */

  void* newp;             /* chunk to return */

  void * (*hook) (void *, size_t, const void *) = __realloc_hook;
  if (hook != NULL)
    return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));

#if REALLOC_ZERO_BYTES_FREES
  if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
#endif

  /* realloc of null is supposed to be same as malloc */
  if (oldmem == 0)
    return public_mALLOc(bytes);

  oldp    = mem2chunk(oldmem);
  if (is_mmapped(oldp))
    ar_ptr = arena_for_mmap_chunk(oldp); /* FIXME: use mmap_resize */
  else
    ar_ptr = arena_for_chunk(oldp);
#if THREAD_STATS
  if(!mutex_trylock(&ar_ptr->mutex))
    ++(ar_ptr->stat_lock_direct);
  else {
    (void)mutex_lock(&ar_ptr->mutex);
    ++(ar_ptr->stat_lock_wait);
  }
#else
  (void)mutex_lock(&ar_ptr->mutex);
#endif

#ifndef NO_THREADS
  /* As in malloc(), remember this arena for the next allocation. */
  tsd_setspecific(arena_key, (void *)ar_ptr);
#endif

  if (ar_ptr != &main_arena)
    bytes += FOOTER_OVERHEAD;
  newp = mspace_realloc(arena_to_mspace(ar_ptr), oldmem, bytes);

  if (newp && ar_ptr != &main_arena)
    set_non_main_arena(newp, ar_ptr);
  (void)mutex_unlock(&ar_ptr->mutex);

  assert(!newp || is_mmapped(mem2chunk(newp)) ||
	 ar_ptr == arena_for_chunk(mem2chunk(newp)));
  return newp;
}
Exemple #3
0
void*
public_cALLOc(size_t n_elements, size_t elem_size)
{
  struct malloc_arena* ar_ptr;
  size_t bytes, sz;
  void* mem;
  void * (*hook) (size_t, const void *) = __malloc_hook;

  /* size_t is unsigned so the behavior on overflow is defined.  */
  bytes = n_elements * elem_size;
#define HALF_INTERNAL_SIZE_T \
  (((size_t) 1) << (8 * sizeof (size_t) / 2))
  if (__builtin_expect ((n_elements | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
    if (elem_size != 0 && bytes / elem_size != n_elements) {
      /*MALLOC_FAILURE_ACTION;*/
      return 0;
    }
  }

  if (hook != NULL) {
    sz = bytes;
    mem = (*hook)(sz, RETURN_ADDRESS (0));
    if(mem == 0)
      return 0;
#ifdef HAVE_MEMCPY
    return memset(mem, 0, sz);
#else
    while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
    return mem;
#endif
  }

  arena_get(ar_ptr, bytes + FOOTER_OVERHEAD);
  if(!ar_ptr)
    return 0;

  if (ar_ptr != &main_arena)
    bytes += FOOTER_OVERHEAD;
  mem = mspace_calloc(arena_to_mspace(ar_ptr), bytes, 1);

  if (mem && ar_ptr != &main_arena)
    set_non_main_arena(mem, ar_ptr);
  (void)mutex_unlock(&ar_ptr->mutex);
  
  assert(!mem || is_mmapped(mem2chunk(mem)) ||
	 ar_ptr == arena_for_chunk(mem2chunk(mem)));

  return mem;
}
Exemple #4
0
internal_function
mem2mem_check (void *ptr, size_t sz)
{
  mchunkptr p;
  unsigned char *m_ptr = ptr;
  size_t i;

  if (!ptr)
    return ptr;

  p = mem2chunk (ptr);
  for (i = chunksize (p) - (chunk_is_mmapped (p) ? 2 * SIZE_SZ + 1 : SIZE_SZ + 1);
       i > sz;
       i -= 0xFF)
    {
      if (i - sz < 0x100)
        {
          m_ptr[i] = (unsigned char) (i - sz);
          break;
        }
      m_ptr[i] = 0xFF;
    }
  m_ptr[sz] = MAGICBYTE (p);
  return (void *) m_ptr;
}
Exemple #5
0
static void
free_atfork(void* mem, const void *caller)
{
  void *vptr = NULL;
  struct malloc_arena *ar_ptr;
  mchunkptr p;                          /* chunk corresponding to mem */

  if (mem == 0)                              /* free(0) has no effect */
    return;

  p = mem2chunk(mem);

  if (is_mmapped(p)) {                      /* release mmapped memory. */
    ar_ptr = arena_for_mmap_chunk(p);
    munmap_chunk(arena_to_mspace(ar_ptr), p);
    return;
  }

  ar_ptr = arena_for_chunk(p);
  tsd_getspecific(arena_key, vptr);
  if(vptr != ATFORK_ARENA_PTR)
    (void)mutex_lock(&ar_ptr->mutex);
  mspace_free(arena_to_mspace(ar_ptr), mem);
  if(vptr != ATFORK_ARENA_PTR)
    (void)mutex_unlock(&ar_ptr->mutex);
}
Exemple #6
0
void
pos_public_fREe(char *name, Void_t *mem)
{
	struct malloc_state * ar_ptr;
	mchunkptr p;


	/*if (pos_is_mapped(name) == 0) {
		//printf("Not mapped\n");
		return ;
	}*/

	if (mem == (Void_t *)0)
		return;

	p = mem2chunk(mem);

	ar_ptr = (struct malloc_state *)pos_lookup_mstate(name);
	if (ar_ptr == NULL) {
		return;
	}

	(void)mutex_lock(&ar_ptr->mutex);
	pos_int_free(name, ar_ptr, p, 1);
	(void)mutex_unlock(&ar_ptr->mutex);
}
static void
free_atfork(Void_t* mem, const Void_t *caller)
{
  Void_t *vptr = NULL;
  mstate ar_ptr;
  mchunkptr p;                          /* chunk corresponding to mem */

  if (mem == 0)                              /* free(0) has no effect */
    return;

  p = mem2chunk(mem);         /* do not bother to replicate free_check here */

#if HAVE_MMAP
  if (chunk_is_mmapped(p))                       /* release mmapped memory. */
  {
    munmap_chunk(p);
    return;
  }
#endif

  ar_ptr = arena_for_chunk(p);
  tsd_getspecific(arena_key, vptr);
  if(vptr != ATFORK_ARENA_PTR)
    (void)mutex_lock(&ar_ptr->mutex);
  _int_free(ar_ptr, mem);
  if(vptr != ATFORK_ARENA_PTR)
    (void)mutex_unlock(&ar_ptr->mutex);
}
/* A direct copy of dlmalloc_usable_size(),
 * which isn't compiled in when ONLY_MSPACES is set.
 * The mspace parameter isn't actually necessary,
 * but we include it to be consistent with the
 * rest of the mspace_*() functions.
 */
size_t mspace_usable_size(mspace _unused, const void* mem) {
  if (mem != 0) {
    const mchunkptr p = mem2chunk(mem);
    if (cinuse(p))
      return chunksize(p) - overhead_for(p);
  }
  return 0;
}
Exemple #9
0
void free(void* mem)
{
  if (mem != 0)
  {
    mchunkptr p = mem2chunk(mem);
    UPDATE_STATS(do_free_stats(p));
    frontlink(p);
  }
}
Exemple #10
0
size_t extmem_get_mem_size(unsigned long pgoff)
{
    void * va = (void *)get_virt_from_mspace(pgoff << PAGE_SHIFT);
    mchunkptr p  = mem2chunk(va);
    size_t psize = chunksize(p) - TWO_SIZE_T_SIZES;

    extmem_printk("[EXT_MEM] %s size: 0x%x\n", __FUNCTION__, psize);
    return psize;
}
Exemple #11
0
size_t
public_mUSABLe(void* mem)
{
  if (mem != 0) {
    mchunkptr p = mem2chunk(mem);
    if (cinuse(p))
      return chunksize(p) - overhead_for(p);
  }
  return 0;
}
Exemple #12
0
void* realloc(void* oldmem, size_t bytes)
{
    void *retval;
    int oldsize;
    jmalloc_header_t *jhead;
    void *ra = __builtin_return_address(0);

    if (oldmem)
    {
	jhead = oldmem;
	jhead--;
	if (jhead->magic == JMALLOC_MAGIC)
	    oldsize = jhead->len;
	else
	{
	    if (jhead->magic == JFREE_MAGIC)
	    {
		malloc_printf("MEMORY: %d about to realloc from %p memory %p "
		    "(%d->%d bytes), allocated from %p and already released "
		    "from %p\n", getpid(), ra, oldmem, jhead->len, bytes,
		    jhead->alloc_ra, jhead->free_ra);
		__display_chunk(mem2chunk(jhead));
	    }
	    else
	    {
		malloc_printf("MEMORY: %d about to realloc garbage %p from %p: "
		    "alloc_ra %p, free_ra %p, len %d, magic %#x\n",
		    getpid(), oldmem, ra, jhead->alloc_ra, jhead->free_ra,
		    jhead->len, jhead->magic);
	    }
#ifdef __CONFIG_RG_DBG_ULIBC_MALLOC_CRASH__
	    /* Let's give to complete the printing */
	    sleep(2);
	    *((int *)0) = 0xfaceface;
#endif
	    return NULL;
	}
    }
    else
	oldsize = 0;

    retval = malloc(bytes);
    if (retval && oldsize && bytes)
	memcpy(retval, oldmem, bytes < oldsize? bytes : oldsize);

    if (retval)
    {
	jhead = retval;
	jhead--;
	jhead->alloc_ra = ra;
    }

    free(oldmem);
    return retval;
}
Exemple #13
0
// unsafe(Volatile) -> safe(Non-Volatile)
Void_t* pos_unsafe_region_relocate(char *name, mstate av, Void_t *p)
{
    mchunkptr chunk_ptr;
    mchunkptr next_chunk_ptr;
    Void_t *new_addr;
    Void_t *mem_ptr;
    Void_t *next_mem_ptr;
    int i,j;

    chunk_ptr = mem2chunk(p);
    new_addr = pos_malloc(name , av -> node_obj.size);
    memcpy(new_addr , p , av -> node_obj.size);	    
    printf(" --->     NodePtr : %p" , p);
    printf(" --->     NewPtr : %p" , new_addr);

    for(i = 0; i < 50; i++)
    {
        if(av -> node_obj.ptr_offset[i] == 0)
	    break;

	mem_ptr = lookup_pointer(mem2chunk(new_addr), av -> node_obj.ptr_offset[i]);
	if(mem_ptr == NULL)
	    break;

	for(j = 0; j < 50; j++)
	{
	    if(av -> node_obj.ptr_offset[j] == 0)
	        break;

	    next_mem_ptr = lookup_pointer(mem2chunk(mem_ptr), av -> node_obj.ptr_offset[j]);
	    if(next_mem_ptr == NULL)
  	        break;

	    if(next_mem_ptr == p){
	        chunk_change_pointer(mem2chunk(mem_ptr), new_addr, av -> node_obj.ptr_offset[j]);   
		}
	}	
    }
    return new_addr;
}
Exemple #14
0
static void
free_starter(void* mem, const void *caller)
{
  if (mem) {
    mchunkptr p = mem2chunk(mem);
    void *msp = arena_to_mspace(&main_arena);
    if (is_mmapped(p))
      munmap_chunk(msp, p);
    else
      mspace_free(msp, mem);
  }
  THREAD_STAT(++main_arena.stat_starter);
}
Exemple #15
0
/** clears the constrain tag and adjusts the allocation count */
static void clear_constrain_tag(void* m)
{
	if ( m == NULL )
		return;

	mchunkptr p = mem2chunk(m);

	if ( p->size & CONSTRAIN_TAG )
	{
		p->size &= ~CONSTRAIN_TAG;
		g_constrain_allocated_mem -= chunksize(p);
	}
}
Exemple #16
0
static mchunkptr
internal_function
mem2chunk_check(void* mem, unsigned char **magic_p)
{
  mchunkptr p;
  INTERNAL_SIZE_T sz, c;
  unsigned char magic;

  if(!aligned_OK(mem)) return NULL;
  p = mem2chunk(mem);
  if (!chunk_is_mmapped(p)) {
    /* Must be a chunk in conventional heap memory. */
    int contig = contiguous(&main_arena);
    sz = chunksize(p);
    if((contig &&
	((char*)p<mp_.sbrk_base ||
	 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
       sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
       ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
			    (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
			    next_chunk(prev_chunk(p))!=p) ))
      return NULL;
    magic = MAGICBYTE(p);
    for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
      if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
    }
  } else {
    unsigned long offset, page_mask = GLRO(dl_pagesize)-1;

    /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
       alignment relative to the beginning of a page.  Check this
       first. */
    offset = (unsigned long)mem & page_mask;
    if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
	offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
	offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
	offset<0x2000) ||
       !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
       ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
       ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
      return NULL;
    magic = MAGICBYTE(p);
    for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
      if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
    }
  }
  ((unsigned char*)p)[sz] ^= 0xFF;
  if (magic_p)
    *magic_p = (unsigned char *)p + sz;
  return p;
}
Exemple #17
0
void*
public_mALLOc(size_t bytes)
{
  struct malloc_arena* ar_ptr;
  void *victim;

  void * (*hook) (size_t, const void *) = __malloc_hook;
  if (hook != NULL)
    return (*hook)(bytes, RETURN_ADDRESS (0));

  arena_get(ar_ptr, bytes + FOOTER_OVERHEAD);
  if (!ar_ptr)
    return 0;
  if (ar_ptr != &main_arena)
    bytes += FOOTER_OVERHEAD;
  victim = mspace_malloc(arena_to_mspace(ar_ptr), bytes);
  if (victim && ar_ptr != &main_arena)
    set_non_main_arena(victim, ar_ptr);
  (void)mutex_unlock(&ar_ptr->mutex);
  assert(!victim || is_mmapped(mem2chunk(victim)) ||
	 ar_ptr == arena_for_chunk(mem2chunk(victim)));
  return victim;
}
Exemple #18
0
void* realloc(void* mem, size_t       bytes)
{
  if (mem == 0) 
    return malloc(bytes);
  else
  {
    size_t       nb      = request2size(bytes);
    mchunkptr    p       = mem2chunk(mem);
    size_t       oldsize = p->size;
    int          room;
    mchunkptr    nxt;

    UPDATE_STATS((++n_reallocs, requested_mem += bytes-oldsize));
    
    /* try to expand (even if already big enough), to clean up chunk */

    while (!inuse(nxt = next_chunk(p)))
    {
      UPDATE_STATS ((malloced_mem += nxt->size, ++n_consol));
      unlink(nxt);
      set_size(p, p->size + nxt->size);
    }

    room = p->size - nb;
    if (room >= 0)
    {
      split(p, nb);
      UPDATE_STATS(malloced_mem -= room);
      return chunk2mem(p);
    }
    else /* do the obvious */
    {
      void* newmem;
      set_inuse(p);    /* don't let malloc consolidate us yet! */
      newmem = malloc(nb);
      bcopy(mem, newmem, oldsize - SIZE_SZ);
      free(mem);
      UPDATE_STATS(++n_reallocs_with_copy);
      return newmem;
    }
  }
}
Exemple #19
0
/** tags the allocated memory and adjusts the allocation count */
static void constrain_tag_allocation(void *m)
{
	if ( m == NULL )
		return;

	mchunkptr p = mem2chunk(m);

	assert(!(p->size & CONSTRAIN_TAG));
	p->size |= CONSTRAIN_TAG;

	g_constrain_allocated_mem += chunksize(p);

#ifdef SHOW_MEM_INFO
	if ( g_show_mem_info && (g_constrain_allocated_mem > g_max_constrain_allocated_mem) )
	{
		show_mem_info(0);
		g_max_constrain_allocated_mem = g_constrain_allocated_mem;
	}
#endif

}
Exemple #20
0
static void
free_atfork (void *mem, const void *caller)
{
  void *vptr = NULL;
  mstate ar_ptr;
  mchunkptr p;                          /* chunk corresponding to mem */

  if (mem == 0)                              /* free(0) has no effect */
    return;

  p = mem2chunk (mem);         /* do not bother to replicate free_check here */

  if (chunk_is_mmapped (p))                       /* release mmapped memory. */
    {
      munmap_chunk (p);
      return;
    }

  ar_ptr = arena_for_chunk (p);
  tsd_getspecific (arena_key, vptr);
  _int_free (ar_ptr, p, vptr == ATFORK_ARENA_PTR);
}
Exemple #21
0
void
public_fREe(void* mem)
{
  struct malloc_arena* ar_ptr;
  mchunkptr p;                          /* chunk corresponding to mem */

  void (*hook) (void *, const void *) = __free_hook;
  if (hook != NULL) {
    (*hook)(mem, RETURN_ADDRESS (0));
    return;
  }

  if (mem == 0)                              /* free(0) has no effect */
    return;

  p = mem2chunk(mem);

  if (is_mmapped(p)) {                      /* release mmapped memory. */
    ar_ptr = arena_for_mmap_chunk(p);
    munmap_chunk(arena_to_mspace(ar_ptr), p);
    return;
  }

  ar_ptr = arena_for_chunk(p);
#if THREAD_STATS
  if(!mutex_trylock(&ar_ptr->mutex))
    ++(ar_ptr->stat_lock_direct);
  else {
    (void)mutex_lock(&ar_ptr->mutex);
    ++(ar_ptr->stat_lock_wait);
  }
#else
  (void)mutex_lock(&ar_ptr->mutex);
#endif
  mspace_free(arena_to_mspace(ar_ptr), mem);
  (void)mutex_unlock(&ar_ptr->mutex);
}
Exemple #22
0
Void_t*
pos_public_rEALLOc(char *name, Void_t *oldmem, unsigned long _bytes)
{
	mstate ar_ptr;
	INTERNAL_SIZE_T nb;      /* padded request size */

	Void_t* newp;             /* chunk to return */

	size_t bytes = _bytes;

	/*if (bytes == 0 && oldmem != NULL) {
		pos_public_fREe(name, oldmem);
		return NULL;
	}*/

	/* realloc of null is supposed to be same as malloc */
	if (oldmem == 0)
		return pos_public_mALLOc(name, bytes);


	/* chunk corresponding to oldmem */
	const mchunkptr oldp = mem2chunk(oldmem);
	/* its size */
	const INTERNAL_SIZE_T oldsize = chunksize(oldp);

	/* Little security check which won't hurt performance: the
	    allocator never wrapps around at the end of the address space.
	    Therefore we can exclude some size values which might appear
	    here by accident or by "design" from some intruder. */
	/*if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
		|| __builtin_expect (misaligned_chunk (oldp), 0))
	{
		malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
		return NULL;
	}*/

	checked_request2size(bytes, nb);

	ar_ptr = (struct malloc_state *)pos_lookup_mstate(name);
	if (ar_ptr == NULL) {
		return NULL;
	}

	(void)mutex_lock(&ar_ptr->mutex);
	newp = pos_int_realloc(name, ar_ptr, oldp, oldsize, nb);
	(void)mutex_unlock(&ar_ptr->mutex);

	if (newp == NULL) {
		/* Try harder to allocate memory in other arenas.  */
		newp = pos_public_mALLOc(name, bytes);
		if (newp != NULL) {
			memcpy (newp, oldmem, oldsize - SIZE_SZ);
			
			(void)mutex_lock(&ar_ptr->mutex);
			pos_int_free(name, ar_ptr, oldp, 1);
			(void)mutex_unlock(&ar_ptr->mutex);
		}
	}

	return newp;
}
Exemple #23
0
////////////////////////////////////////
// WARNING!: pos_realloc has error!. FIX UP!
////////////////////////////////////////
Void_t*
pos_int_realloc(char *name, mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
	     INTERNAL_SIZE_T nb)
{
	mchunkptr newp;				/* chunk to return */
	INTERNAL_SIZE_T newsize;		/* its size */
	Void_t* newmem;				/* corresponding user mem */

	mchunkptr next;				/* next contiguous chunk after oldp */

	mchunkptr remainder;			/* extra space at end of newp */
	unsigned long remainder_size;	/* its size */

	mchunkptr bck;				/* misc temp for linking */
	mchunkptr fwd;				/* misc temp for linking */

	unsigned long copysize;		/* bytes to copy */
	unsigned int ncopies;			/* INTERNAL_SIZE_T words to copy */
	INTERNAL_SIZE_T* s;			/* copy source */
	INTERNAL_SIZE_T* d;			/* copy destination */

	const char *errstr = NULL;


	/* oldmem size */
	/*if (oldp->size <= 2 * SIZE_SZ || oldsize >= av->system_mem) {
		errstr = "realloc(): invalid old size";
errout:
		malloc_printerr (check_action, errstr, chunk2mem(oldp));
		return NULL;
	}*/

	next = chunk_at_offset(oldp, oldsize);
	INTERNAL_SIZE_T nextsize = chunksize(next);
	/*if (next->size <= 2 * SIZE_SZ || nextsize >= av->system_mem) {
		errstr = "realloc(): invalid next size";
			goto errout;
	}*/

	//old size 보다 작을 경우
	if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
		/* already big enough; split below */
		newp = oldp;
		newsize = oldsize;
	}

	//old size 보다 클 경우
	else {

		/* Try to expand forward into next chunk;  split off remainder below */
		if (!inuse(next) &&
			(unsigned long)(newsize = oldsize + nextsize) >= (unsigned long)(nb)) {
			newp = oldp;
			unlink(next, bck, fwd);
		}

		/* allocate, copy, free */
		else {
			newmem = pos_int_malloc(name, av, nb - MALLOC_ALIGN_MASK);
			if (newmem == 0)
				return 0; /* propagate failure */

			newp = mem2chunk(newmem);
			newsize = chunksize(newp);

			/*
			  Avoid copy if newp is next chunk after oldp.
			*/
			if (newp == next) {
				newsize += oldsize;
				newp = oldp;
			}
			else {
				/*
				  Unroll copy of <= 36 bytes (72 if 8byte sizes)
				  We know that contents have an odd number of
				  INTERNAL_SIZE_T-sized words; minimally 3.
				*/

				copysize = oldsize - SIZE_SZ;
				s = (INTERNAL_SIZE_T*)(chunk2mem(oldp));
				d = (INTERNAL_SIZE_T*)(newmem);
				ncopies = copysize / sizeof(INTERNAL_SIZE_T);

				if (ncopies > 9)
					memcpy(d, s, copysize);
				else {
					*(d+0) = *(s+0);
					*(d+1) = *(s+1);
					*(d+2) = *(s+2);
					if (ncopies > 4) {
						*(d+3) = *(s+3);
						*(d+4) = *(s+4);
						if (ncopies > 6) {
							*(d+5) = *(s+5);
							*(d+6) = *(s+6);
							if (ncopies > 8) {
								*(d+7) = *(s+7);
								*(d+8) = *(s+8);
							}
						}
					}
				}

				pos_int_free(name, av, oldp, 1);

				return chunk2mem(newp);
			}
		}
	}

	/* If possible, free extra space in old or extended chunk */

	remainder_size = newsize - nb;

	if (remainder_size < MINSIZE) { /* not enough extra to split off */
		set_head_size(newp, newsize);
		set_inuse_bit_at_offset(newp, newsize);
	}
	else { /* split remainder */
		remainder = chunk_at_offset(newp, nb);

		if (chunk_is_last(newp))
			set_head(remainder, remainder_size | LAST_CHUNK | PREV_INUSE);
		else
			set_head(remainder, remainder_size | PREV_INUSE);

		// set PREV_INUSE flag..
		if (chunk_is_first(newp))
			set_head(newp, nb | FIRST_CHUNK | PREV_INUSE);
		else
			set_head(newp, nb | PREV_INUSE);
		
		//set_head_size(newp, nb);
		//set_head(remainder, remainder_size | PREV_INUSE |(av != &main_arena ? NON_MAIN_ARENA : 0));

		/* Mark remainder as inuse so free() won't complain */
		set_inuse_bit_at_offset(remainder, remainder_size);
		pos_int_free(name, av, remainder, 1);
	}

	return chunk2mem(newp);
}
Exemple #24
0
/* ------------------------------ realloc ------------------------------ */
void* ulibc_realloc(void* oldmem, size_t bytes)
{
    mstate av;

    size_t  nb;              /* padded request size */

    mchunkptr        oldp;            /* chunk corresponding to oldmem */
    size_t  oldsize;         /* its size */

    mchunkptr        newp;            /* chunk to return */
    size_t  newsize;         /* its size */
    void*          newmem;          /* corresponding user mem */

    mchunkptr        next;            /* next contiguous chunk after oldp */

    mchunkptr        remainder;       /* extra space at end of newp */
    unsigned long     remainder_size;  /* its size */

    mchunkptr        bck;             /* misc temp for linking */
    mchunkptr        fwd;             /* misc temp for linking */

    unsigned long     copysize;        /* bytes to copy */
    unsigned int     ncopies;         /* size_t words to copy */
    size_t* s;               /* copy source */
    size_t* d;               /* copy destination */

    void *retval;

    /* Check for special cases.  */
    if (! oldmem)
	return ulibc_malloc(bytes);
    if (! bytes) {
	ulibc_free (oldmem);
	return NULL;
    }

    av = get_malloc_state();
    checked_request2size(bytes, nb);

    oldp    = mem2chunk(oldmem);
    oldsize = chunksize(oldp);

    check_inuse_chunk(oldp);

    if (!chunk_is_mmapped(oldp)) {

	if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
	    /* already big enough; split below */
	    newp = oldp;
	    newsize = oldsize;
	}

	else {
	    next = chunk_at_offset(oldp, oldsize);

	    /* Try to expand forward into top */
	    if (next == av->top &&
		    (unsigned long)(newsize = oldsize + chunksize(next)) >=
		    (unsigned long)(nb + MINSIZE)) {
		set_head_size(oldp, nb);
		av->top = chunk_at_offset(oldp, nb);
		set_head(av->top, (newsize - nb) | PREV_INUSE);
		retval = chunk2mem(oldp);
		goto DONE;
	    }

	    /* Try to expand forward into next chunk;  split off remainder below */
	    else if (next != av->top &&
		    !inuse(next) &&
		    (unsigned long)(newsize = oldsize + chunksize(next)) >=
		    (unsigned long)(nb)) {
		newp = oldp;
		unlink(next, bck, fwd);
	    }

	    /* allocate, copy, free */
	    else {
		newmem = malloc(nb - MALLOC_ALIGN_MASK);
		if (newmem == 0) {
		    retval = 0; /* propagate failure */
		    goto DONE;
		}

		newp = mem2chunk(newmem);
		newsize = chunksize(newp);

		/*
		   Avoid copy if newp is next chunk after oldp.
		   */
		if (newp == next) {
		    newsize += oldsize;
		    newp = oldp;
		}
		else {
		    /*
		       Unroll copy of <= 36 bytes (72 if 8byte sizes)
		       We know that contents have an odd number of
		       size_t-sized words; minimally 3.
		       */

		    copysize = oldsize - (sizeof(size_t));
		    s = (size_t*)(oldmem);
		    d = (size_t*)(newmem);
		    ncopies = copysize / sizeof(size_t);
		    assert(ncopies >= 3);

		    if (ncopies > 9)
			memcpy(d, s, copysize);

		    else {
			*(d+0) = *(s+0);
			*(d+1) = *(s+1);
			*(d+2) = *(s+2);
			if (ncopies > 4) {
			    *(d+3) = *(s+3);
			    *(d+4) = *(s+4);
			    if (ncopies > 6) {
				*(d+5) = *(s+5);
				*(d+6) = *(s+6);
				if (ncopies > 8) {
				    *(d+7) = *(s+7);
				    *(d+8) = *(s+8);
				}
			    }
			}
		    }

		    ulibc_free(oldmem);
		    check_inuse_chunk(newp);
		    retval = chunk2mem(newp);
		    goto DONE;
		}
	    }
	}

	/* If possible, free extra space in old or extended chunk */

	assert((unsigned long)(newsize) >= (unsigned long)(nb));

	remainder_size = newsize - nb;

	if (remainder_size < MINSIZE) { /* not enough extra to split off */
	    set_head_size(newp, newsize);
	    set_inuse_bit_at_offset(newp, newsize);
	}
	else { /* split remainder */
	    remainder = chunk_at_offset(newp, nb);
	    set_head_size(newp, nb);
	    set_head(remainder, remainder_size | PREV_INUSE);
	    /* Mark remainder as inuse so free() won't complain */
	    set_inuse_bit_at_offset(remainder, remainder_size);
	    ulibc_free(chunk2mem(remainder));
	}

	check_inuse_chunk(newp);
	retval = chunk2mem(newp);
	goto DONE;
    }

    /*
       Handle mmap cases
       */

    else {
	size_t offset = oldp->prev_size;
	size_t pagemask = av->pagesize - 1;
	char *cp;
	unsigned long  sum;

	/* Note the extra (sizeof(size_t)) overhead */
	newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask;

	/* don't need to remap if still within same page */
	if (oldsize == newsize - offset) {
	    retval = oldmem;
	    goto DONE;
	}

	cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);

	if (cp != (char*)MORECORE_FAILURE) {

	    newp = (mchunkptr)(cp + offset);
	    set_head(newp, (newsize - offset)|IS_MMAPPED);

	    assert(aligned_OK(chunk2mem(newp)));
	    assert((newp->prev_size == offset));

	    /* update statistics */
	    sum = av->mmapped_mem += newsize - oldsize;
	    if (sum > (unsigned long)(av->max_mmapped_mem))
		av->max_mmapped_mem = sum;
	    sum += av->sbrked_mem;
	    if (sum > (unsigned long)(av->max_total_mem))
		av->max_total_mem = sum;

	    retval = chunk2mem(newp);
	    goto DONE;
	}

	/* Note the extra (sizeof(size_t)) overhead. */
	if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t))))
	    newmem = oldmem; /* do nothing */
	else {
	    /* Must alloc, copy, free. */
	    newmem = malloc(nb - MALLOC_ALIGN_MASK);
	    if (newmem != 0) {
		memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t)));
		ulibc_free(oldmem);
	    }
	}
	retval = newmem;
    }

 DONE:
    return retval;
}
Exemple #25
0
/* ------------------------------ free ------------------------------ */
void free(void* mem)
{
    mstate av;

    mchunkptr       p;           /* chunk corresponding to mem */
    size_t size;        /* its size */
    mfastbinptr*    fb;          /* associated fastbin */
    mchunkptr       nextchunk;   /* next contiguous chunk */
    size_t nextsize;    /* its size */
    int             nextinuse;   /* true if nextchunk is used */
    size_t prevsize;    /* size of previous contiguous chunk */
    mchunkptr       bck;         /* misc temp for linking */
    mchunkptr       fwd;         /* misc temp for linking */

    /* free(0) has no effect */
    if (mem == NULL)
	return;

    __MALLOC_LOCK;
    av = get_malloc_state();
    p = mem2chunk(mem);
    size = chunksize(p);

    check_inuse_chunk(p);

    /*
       If eligible, place chunk on a fastbin so it can be found
       and used quickly in malloc.
       */

    if ((unsigned long)(size) <= (unsigned long)(av->max_fast)

#if TRIM_FASTBINS
	    /* If TRIM_FASTBINS set, don't place chunks
	       bordering top into fastbins */
	    && (chunk_at_offset(p, size) != av->top)
#endif
       ) {

	set_fastchunks(av);
	fb = &(av->fastbins[fastbin_index(size)]);
	p->fd = *fb;
	*fb = p;
    }

    /*
       Consolidate other non-mmapped chunks as they arrive.
       */

    else if (!chunk_is_mmapped(p)) {
	set_anychunks(av);

	nextchunk = chunk_at_offset(p, size);
	nextsize = chunksize(nextchunk);

	/* consolidate backward */
	if (!prev_inuse(p)) {
	    prevsize = p->prev_size;
	    size += prevsize;
	    p = chunk_at_offset(p, -((long) prevsize));
	    unlink(p, bck, fwd);
	}

	if (nextchunk != av->top) {
	    /* get and clear inuse bit */
	    nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
	    set_head(nextchunk, nextsize);

	    /* consolidate forward */
	    if (!nextinuse) {
		unlink(nextchunk, bck, fwd);
		size += nextsize;
	    }

	    /*
	       Place the chunk in unsorted chunk list. Chunks are
	       not placed into regular bins until after they have
	       been given one chance to be used in malloc.
	       */

	    bck = unsorted_chunks(av);
	    fwd = bck->fd;
	    p->bk = bck;
	    p->fd = fwd;
	    bck->fd = p;
	    fwd->bk = p;

	    set_head(p, size | PREV_INUSE);
	    set_foot(p, size);

	    check_free_chunk(p);
	}

	/*
	   If the chunk borders the current high end of memory,
	   consolidate into top
	   */

	else {
	    size += nextsize;
	    set_head(p, size | PREV_INUSE);
	    av->top = p;
	    check_chunk(p);
	}

	/*
	   If freeing a large space, consolidate possibly-surrounding
	   chunks. Then, if the total unused topmost memory exceeds trim
	   threshold, ask malloc_trim to reduce top.

	   Unless max_fast is 0, we don't know if there are fastbins
	   bordering top, so we cannot tell for sure whether threshold
	   has been reached unless fastbins are consolidated.  But we
	   don't want to consolidate on each free.  As a compromise,
	   consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
	   is reached.
	   */

	if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
	    if (have_fastchunks(av))
		__malloc_consolidate(av);

	    if ((unsigned long)(chunksize(av->top)) >=
		    (unsigned long)(av->trim_threshold))
		__malloc_trim(av->top_pad, av);
	}

    }
    /*
       If the chunk was allocated via mmap, release via munmap()
       Note that if HAVE_MMAP is false but chunk_is_mmapped is
       true, then user must have overwritten memory. There's nothing
       we can do to catch this error unless DEBUG is set, in which case
       check_inuse_chunk (above) will have triggered error.
       */

    else {
	size_t offset = p->prev_size;
	av->n_mmaps--;
	av->mmapped_mem -= (size + offset);
	munmap((char*)p - offset, size + offset);
    }
    __MALLOC_UNLOCK;
}
//This function is equal to mspace_free
//replacing PREACTION with 0 and POSTACTION with nothing
static void mspace_free_lockless(mspace msp, void* mem)
{
  if (mem != 0) {
    mchunkptr p  = mem2chunk(mem);
#if FOOTERS
    mstate fm = get_mstate_for(p);
    msp = msp; /* placate people compiling -Wunused */
#else /* FOOTERS */
    mstate fm = (mstate)msp;
#endif /* FOOTERS */
    if (!ok_magic(fm)) {
      USAGE_ERROR_ACTION(fm, p);
      return;
    }
    if (!0){//PREACTION(fm)) {
      check_inuse_chunk(fm, p);
      if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
        size_t psize = chunksize(p);
        mchunkptr next = chunk_plus_offset(p, psize);
        s_allocated_memory -= psize;
        if (!pinuse(p)) {
          size_t prevsize = p->prev_foot;
          if (is_mmapped(p)) {
            psize += prevsize + MMAP_FOOT_PAD;
            if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
              fm->footprint -= psize;
            goto postaction;
          }
          else {
            mchunkptr prev = chunk_minus_offset(p, prevsize);
            psize += prevsize;
            p = prev;
            if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
              if (p != fm->dv) {
                unlink_chunk(fm, p, prevsize);
              }
              else if ((next->head & INUSE_BITS) == INUSE_BITS) {
                fm->dvsize = psize;
                set_free_with_pinuse(p, psize, next);
                goto postaction;
              }
            }
            else
              goto erroraction;
          }
        }

        if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
          if (!cinuse(next)) {  /* consolidate forward */
            if (next == fm->top) {
              size_t tsize = fm->topsize += psize;
              fm->top = p;
              p->head = tsize | PINUSE_BIT;
              if (p == fm->dv) {
                fm->dv = 0;
                fm->dvsize = 0;
              }
              if (should_trim(fm, tsize))
                sys_trim(fm, 0);
              goto postaction;
            }
            else if (next == fm->dv) {
              size_t dsize = fm->dvsize += psize;
              fm->dv = p;
              set_size_and_pinuse_of_free_chunk(p, dsize);
              goto postaction;
            }
            else {
              size_t nsize = chunksize(next);
              psize += nsize;
              unlink_chunk(fm, next, nsize);
              set_size_and_pinuse_of_free_chunk(p, psize);
              if (p == fm->dv) {
                fm->dvsize = psize;
                goto postaction;
              }
            }
          }
          else
            set_free_with_pinuse(p, psize, next);

          if (is_small(psize)) {
            insert_small_chunk(fm, p, psize);
            check_free_chunk(fm, p);
          }
          else {
            tchunkptr tp = (tchunkptr)p;
            insert_large_chunk(fm, tp, psize);
            check_free_chunk(fm, p);
            if (--fm->release_checks == 0)
              release_unused_segments(fm);
          }
          goto postaction;
        }
      }
    erroraction:
      USAGE_ERROR_ACTION(fm, p);
    postaction:
      ;//POSTACTION(fm);
    }
  }
}
Exemple #27
0
void* memalign(size_t       alignment, size_t       bytes)
{
  mchunkptr p;
  size_t       nb = request2size(bytes);

  /* find an alignment that both we and the user can live with: */
  /* least common multiple guarantees mutual happiness */
  size_t       align = lcm(alignment, MALLOC_MIN_OVERHEAD);
  size_t       mask = align - 1;

  /* call malloc with worst case padding to hit alignment; */
  /* we will give back extra */

  size_t       req = nb + align + MINSIZE;
  void* m = malloc(req);

  if (m == 0) return m;

  p = mem2chunk(m);

  /* keep statistics on track */

  UPDATE_STATS(--n_mallocs);
  UPDATE_STATS(malloced_mem -= p->size);
  UPDATE_STATS(requested_mem -= req);
  UPDATE_STATS(requested_mem += bytes);

  if (((int)(m) & (mask)) != 0) /* misaligned */
  {

    /* find an aligned spot inside chunk */

    mchunkptr ap = (mchunkptr)(( ((int)(m) + mask) & -align) - SIZE_SZ);

    size_t       gap = (size_t      )(ap) - (size_t      )(p);
    size_t       room;

    /* we need to give back leading space in a chunk of at least MINSIZE */

    if (gap < MINSIZE)
    {
      /* This works since align >= MINSIZE */
      /* and we've malloc'd enough total room */

      ap = (mchunkptr)( (int)(ap) + align );
      gap += align;    
    }

    if (gap + nb > p->size) /* can't happen unless chunk sizes corrupted */
      malloc_user_error();

    room = p->size - gap;

    /* give back leader */
    set_size(p, gap);
    consollink(p);

    /* use the rest */
    p = ap;
    set_size(p, room);
  }

  /* also give back spare room at the end */

  split(p, nb); 
  UPDATE_STATS(do_malloc_stats(p));
  return chunk2mem(p);

}
/* ------------------------------ calloc ------------------------------ */
void* calloc(size_t n_elements, size_t elem_size)
{
    mchunkptr p;
    unsigned long  clearsize;
    unsigned long  nclears;
    size_t size, *d;
    void* mem;


    /* guard vs integer overflow, but allow nmemb
     * to fall through and call malloc(0) */
    size = n_elements * elem_size;
    if (n_elements && elem_size != (size / n_elements)) {
	__set_errno(ENOMEM);
	return NULL;
    }

    __MALLOC_LOCK;
    mem = malloc(size);
    if (mem != 0) {
	p = mem2chunk(mem);

	if (!chunk_is_mmapped(p))
	{
	    /*
	       Unroll clear of <= 36 bytes (72 if 8byte sizes)
	       We know that contents have an odd number of
	       size_t-sized words; minimally 3.
	       */

	    d = (size_t*)mem;
	    clearsize = chunksize(p) - (sizeof(size_t));
	    nclears = clearsize / sizeof(size_t);
	    assert(nclears >= 3);

	    if (nclears > 9)
		memset(d, 0, clearsize);

	    else {
		*(d+0) = 0;
		*(d+1) = 0;
		*(d+2) = 0;
		if (nclears > 4) {
		    *(d+3) = 0;
		    *(d+4) = 0;
		    if (nclears > 6) {
			*(d+5) = 0;
			*(d+6) = 0;
			if (nclears > 8) {
			    *(d+7) = 0;
			    *(d+8) = 0;
			}
		    }
		}
	    }
	}
#if 0
	else
	{
	/* Standard unix mmap using /dev/zero clears memory so calloc
	 * doesn't need to actually zero anything....
	 */
	    d = (size_t*)mem;
	    /* Note the additional (sizeof(size_t)) */
	    clearsize = chunksize(p) - 2*(sizeof(size_t));
	    memset(d, 0, clearsize);
	}
#endif
    }
    __MALLOC_UNLOCK;
    return mem;
}
/*!
  Returns the allocated size of \a mem, assuming \a mem was previously returned
  by malloc(), calloc() or realloc().
  */
size_t QMallocPool::size_of(void *mem)
{
    return chunksize(mem2chunk(mem)) - sizeof(mchunkptr);
}