Exemplo n.º 1
0
static Void_t*
malloc_atfork(size_t sz, const Void_t *caller)
{
  Void_t *vptr = NULL;
  Void_t *victim;

  tsd_getspecific(arena_key, vptr);
  if(vptr == ATFORK_ARENA_PTR) {
    /* We are the only thread that may allocate at all.  */
    if(save_malloc_hook != malloc_check) {
      return _int_malloc(&main_arena, sz);
    } else {
      if(top_check()<0)
        return 0;
      victim = _int_malloc(&main_arena, sz+1);
      return mem2mem_check(victim, sz);
    }
  } else {
    /* Suspend the thread until the `atfork' handlers have completed.
       By that time, the hooks will have been reset as well, so that
       mALLOc() can be used again. */
    (void)mutex_lock(&list_lock);
    (void)mutex_unlock(&list_lock);
    return public_mALLOc(sz);
  }
}
Exemplo n.º 2
0
static void*
malloc_check(size_t sz, const void *caller)
{
  void *victim;

  if (sz+1 == 0) {
    __set_errno (ENOMEM);
    return NULL;
  }

  (void)mutex_lock(&main_arena.mutex);
  victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
  (void)mutex_unlock(&main_arena.mutex);
  return mem2mem_check(victim, sz);
}
Exemplo n.º 3
0
static void*
memalign_check(size_t alignment, size_t bytes, const void *caller)
{
  void* mem;

  if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
  if (alignment <  MINSIZE) alignment = MINSIZE;

  if (bytes+1 == 0) {
    __set_errno (ENOMEM);
    return NULL;
  }
  (void)mutex_lock(&main_arena.mutex);
  mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
    NULL;
  (void)mutex_unlock(&main_arena.mutex);
  return mem2mem_check(mem, bytes);
}
Exemplo n.º 4
0
static void *
memalign_check (size_t alignment, size_t bytes, const void *caller)
{
  void *mem;

  if (alignment <= MALLOC_ALIGNMENT)
    return malloc_check (bytes, NULL);

  if (alignment < MINSIZE)
    alignment = MINSIZE;

  /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
     power of 2 and will cause overflow in the check below.  */
  if (alignment > SIZE_MAX / 2 + 1)
    {
      __set_errno (EINVAL);
      return 0;
    }

  /* Check for overflow.  */
  if (bytes > SIZE_MAX - alignment - MINSIZE)
    {
      __set_errno (ENOMEM);
      return 0;
    }

  /* Make sure alignment is power of 2.  */
  if (!powerof2 (alignment))
    {
      size_t a = MALLOC_ALIGNMENT * 2;
      while (a < alignment)
        a <<= 1;
      alignment = a;
    }

  (void) mutex_lock (&main_arena.mutex);
  mem = (top_check () >= 0) ? _int_memalign (&main_arena, alignment, bytes + 1) :
        NULL;
  (void) mutex_unlock (&main_arena.mutex);
  return mem2mem_check (mem, bytes);
}
Exemplo n.º 5
0
static void*
realloc_check(void* oldmem, size_t bytes, const void *caller)
{
  INTERNAL_SIZE_T nb;
  void* newmem = 0;
  unsigned char *magic_p;

  if (bytes+1 == 0) {
    __set_errno (ENOMEM);
    return NULL;
  }
  if (oldmem == 0) return malloc_check(bytes, NULL);
  if (bytes == 0) {
    free_check (oldmem, NULL);
    return NULL;
  }
  (void)mutex_lock(&main_arena.mutex);
  const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
  (void)mutex_unlock(&main_arena.mutex);
  if(!oldp) {
    malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
    return malloc_check(bytes, NULL);
  }
  const INTERNAL_SIZE_T oldsize = chunksize(oldp);

  checked_request2size(bytes+1, nb);
  (void)mutex_lock(&main_arena.mutex);

  if (chunk_is_mmapped(oldp)) {
#if HAVE_MREMAP
    mchunkptr newp = mremap_chunk(oldp, nb);
    if(newp)
      newmem = chunk2mem(newp);
    else
#endif
    {
      /* Note the extra SIZE_SZ overhead. */
      if(oldsize - SIZE_SZ >= nb)
	newmem = oldmem; /* do nothing */
      else {
	/* Must alloc, copy, free. */
	if (top_check() >= 0)
	  newmem = _int_malloc(&main_arena, bytes+1);
	if (newmem) {
	  MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
	  munmap_chunk(oldp);
	}
      }
    }
  } else {
    if (top_check() >= 0) {
      INTERNAL_SIZE_T nb;
      checked_request2size(bytes + 1, nb);
      newmem = _int_realloc(&main_arena, oldp, oldsize, nb);
    }
  }

  /* mem2chunk_check changed the magic byte in the old chunk.
     If newmem is NULL, then the old chunk will still be used though,
     so we need to invert that change here.  */
  if (newmem == NULL) *magic_p ^= 0xFF;

  (void)mutex_unlock(&main_arena.mutex);

  return mem2mem_check(newmem, bytes);
}