static Void_t*
malloc_atfork(size_t sz, const Void_t *caller)
{
  Void_t *vptr = NULL;
  Void_t *victim;

  tsd_getspecific(arena_key, vptr);
  if(vptr == ATFORK_ARENA_PTR) {
    /* We are the only thread that may allocate at all.  */
    if(save_malloc_hook != malloc_check) {
      return _int_malloc(&main_arena, sz);
    } else {
      if(top_check()<0)
        return 0;
      victim = _int_malloc(&main_arena, sz+1);
      return mem2mem_check(victim, sz);
    }
  } else {
    /* Suspend the thread until the `atfork' handlers have completed.
       By that time, the hooks will have been reset as well, so that
       mALLOc() can be used again. */
    (void)mutex_lock(&list_lock);
    (void)mutex_unlock(&list_lock);
    return public_mALLOc(sz);
  }
}
Beispiel #2
0
void*
public_mEMALIGn(size_t alignment, size_t bytes)
{
  struct malloc_arena* ar_ptr;
  void *p;

  void * (*hook) (size_t, size_t, const void *) = __memalign_hook;
  if (hook != NULL)
    return (*hook)(alignment, bytes, RETURN_ADDRESS (0));

  /* If need less alignment than we give anyway, just relay to malloc */
  if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);

  /* Otherwise, ensure that it is at least a minimum chunk size */
  if (alignment <  MIN_CHUNK_SIZE)
    alignment = MIN_CHUNK_SIZE;

  arena_get(ar_ptr,
	    bytes + FOOTER_OVERHEAD + alignment + MIN_CHUNK_SIZE);
  if(!ar_ptr)
    return 0;

  if (ar_ptr != &main_arena)
    bytes += FOOTER_OVERHEAD;
  p = mspace_memalign(arena_to_mspace(ar_ptr), alignment, bytes);

  if (p && ar_ptr != &main_arena)
    set_non_main_arena(p, ar_ptr);
  (void)mutex_unlock(&ar_ptr->mutex);

  assert(!p || is_mmapped(mem2chunk(p)) ||
	 ar_ptr == arena_for_chunk(mem2chunk(p)));
  return p;
}
Beispiel #3
0
static void*
malloc_hook_ini(size_t sz, const void * caller)
{
  __malloc_hook = NULL;
  ptmalloc_init();
  return public_mALLOc(sz);
}
Beispiel #4
0
void*
public_rEALLOc(void* oldmem, size_t bytes)
{
  struct malloc_arena* ar_ptr;

  mchunkptr oldp;             /* chunk corresponding to oldmem */

  void* newp;             /* chunk to return */

  void * (*hook) (void *, size_t, const void *) = __realloc_hook;
  if (hook != NULL)
    return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));

#if REALLOC_ZERO_BYTES_FREES
  if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
#endif

  /* realloc of null is supposed to be same as malloc */
  if (oldmem == 0)
    return public_mALLOc(bytes);

  oldp    = mem2chunk(oldmem);
  if (is_mmapped(oldp))
    ar_ptr = arena_for_mmap_chunk(oldp); /* FIXME: use mmap_resize */
  else
    ar_ptr = arena_for_chunk(oldp);
#if THREAD_STATS
  if(!mutex_trylock(&ar_ptr->mutex))
    ++(ar_ptr->stat_lock_direct);
  else {
    (void)mutex_lock(&ar_ptr->mutex);
    ++(ar_ptr->stat_lock_wait);
  }
#else
  (void)mutex_lock(&ar_ptr->mutex);
#endif

#ifndef NO_THREADS
  /* As in malloc(), remember this arena for the next allocation. */
  tsd_setspecific(arena_key, (void *)ar_ptr);
#endif

  if (ar_ptr != &main_arena)
    bytes += FOOTER_OVERHEAD;
  newp = mspace_realloc(arena_to_mspace(ar_ptr), oldmem, bytes);

  if (newp && ar_ptr != &main_arena)
    set_non_main_arena(newp, ar_ptr);
  (void)mutex_unlock(&ar_ptr->mutex);

  assert(!newp || is_mmapped(mem2chunk(newp)) ||
	 ar_ptr == arena_for_chunk(mem2chunk(newp)));
  return newp;
}
Beispiel #5
0
static void*
malloc_atfork(size_t sz, const void *caller)
{
  void *vptr = NULL;

  tsd_getspecific(arena_key, vptr);
  if(vptr == ATFORK_ARENA_PTR) {
    /* We are the only thread that may allocate at all.  */
    return mspace_malloc(arena_to_mspace(&main_arena), sz);
  } else {
    /* Suspend the thread until the `atfork' handlers have completed.
       By that time, the hooks will have been reset as well, so that
       mALLOc() can be used again. */
    (void)mutex_lock(&list_lock);
    (void)mutex_unlock(&list_lock);
    return public_mALLOc(sz);
  }
}