Beispiel #1
0
void* hbw_calloc(size_t nmemb, size_t size)
{
    if (myhbwmalloc_mspace == NULL) {
        if (!myhbwmalloc_hardfail) {
            fprintf(stderr, "hbwmalloc: mspace invalid - allocating from default heap\n");
            return calloc(nmemb, size);
        } else {
            fprintf(stderr, "hbwmalloc: mspace invalid - cannot allocate from hbw heap\n");
            abort();
        }
    }
    return mspace_calloc(myhbwmalloc_mspace, nmemb, size);
}
Beispiel #2
0
void* memory_calloc(size_t num, size_t elem_size)
{
    void* mem = NULL;
    int i = 0;

    if (!use_allocator)
    {
        memory_check_limits(num * elem_size, 0);
        return calloc(num, elem_size);
    }

    size_t bytes = num * elem_size;
    while (mem == NULL)
    {
        memory_t* memptr = &memory_table[i];
        /* First try to allocate in one of the existing chunks */
        if (memptr->in_use == 1)
            if ((mem = mspace_calloc(memptr->shm_mspace, num, elem_size)) != NULL)
                break;

        /* Create a new chunck if already past the last valid chunk */
        if (i > memory_table_last)
        {
            memptr = memory_expand(bytes);
            if ((mem = mspace_calloc(memptr->shm_mspace, num, elem_size)) != NULL)
                break;
            else
                ERROR("EPLIB calloc failed to allocate %ld bytes\n", bytes);
        }
        i++;
    }

    DEBUG_ASSERT(mem);
    MAKE_BOUNDS(mem, bytes);

    return mem;
}
Beispiel #3
0
void*
public_cALLOc(size_t n_elements, size_t elem_size)
{
  struct malloc_arena* ar_ptr;
  size_t bytes, sz;
  void* mem;
  void * (*hook) (size_t, const void *) = __malloc_hook;

  /* size_t is unsigned so the behavior on overflow is defined.  */
  bytes = n_elements * elem_size;
#define HALF_INTERNAL_SIZE_T \
  (((size_t) 1) << (8 * sizeof (size_t) / 2))
  if (__builtin_expect ((n_elements | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
    if (elem_size != 0 && bytes / elem_size != n_elements) {
      /*MALLOC_FAILURE_ACTION;*/
      return 0;
    }
  }

  if (hook != NULL) {
    sz = bytes;
    mem = (*hook)(sz, RETURN_ADDRESS (0));
    if(mem == 0)
      return 0;
#ifdef HAVE_MEMCPY
    return memset(mem, 0, sz);
#else
    while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
    return mem;
#endif
  }

  arena_get(ar_ptr, bytes + FOOTER_OVERHEAD);
  if(!ar_ptr)
    return 0;

  if (ar_ptr != &main_arena)
    bytes += FOOTER_OVERHEAD;
  mem = mspace_calloc(arena_to_mspace(ar_ptr), bytes, 1);

  if (mem && ar_ptr != &main_arena)
    set_non_main_arena(mem, ar_ptr);
  (void)mutex_unlock(&ar_ptr->mutex);
  
  assert(!mem || is_mmapped(mem2chunk(mem)) ||
	 ar_ptr == arena_for_chunk(mem2chunk(mem)));

  return mem;
}
/*
 * Allocates <n> bytes of zeroed data.
 */
void* dvmHeapSourceAlloc(size_t n)
{
    HS_BOILERPLATE();

    HeapSource *hs = gHs;
    Heap* heap = hs2heap(hs);
    if (heap->bytesAllocated + n > hs->softLimit) {
        /*
         * This allocation would push us over the soft limit; act as
         * if the heap is full.
         */
        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
                  FRACTIONAL_MB(hs->softLimit), n);
        return NULL;
    }
    void* ptr = mspace_calloc(heap->msp, 1, n);
    if (ptr == NULL) {
        return NULL;
    }
    countAllocation(heap, ptr);
    /*
     * Check to see if a concurrent GC should be initiated.
     */
    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
        /*
         * The garbage collector thread is already running or has yet
         * to be started.  Do nothing.
         */
        return ptr;
    }
    if (heap->bytesAllocated > heap->concurrentStartBytes) {
        /*
         * We have exceeded the allocation threshold.  Wake up the
         * garbage collector.
         */
        dvmSignalCond(&gHs->gcThreadCond);
    }
    return ptr;
}
Beispiel #5
0
/*
 * Allocates <n> bytes of zeroed data.
 */
void* dvmHeapSourceAlloc(size_t n)
{
    HS_BOILERPLATE();

    HeapSource *hs = gHs;
    Heap* heap = hs2heap(hs);
    if (heap->bytesAllocated + n > hs->softLimit) {
        /*
         * This allocation would push us over the soft limit; act as
         * if the heap is full.
         */
        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
                  FRACTIONAL_MB(hs->softLimit), n);
        return NULL;
    }
    void* ptr;
    if (gDvm.lowMemoryMode) {
        /* This is only necessary because mspace_calloc always memsets the
         * allocated memory to 0. This is bad for memory usage since it leads
         * to dirty zero pages. If low memory mode is enabled, we use
         * mspace_malloc which doesn't memset the allocated memory and madvise
         * the page aligned region back to the kernel.
         */
        ptr = mspace_malloc(heap->msp, n);
        if (ptr == NULL) {
            return NULL;
        }
        uintptr_t zero_begin = (uintptr_t)ptr;
        uintptr_t zero_end = (uintptr_t)ptr + n;
        /* Calculate the page aligned region.
         */
        uintptr_t begin = ALIGN_UP_TO_PAGE_SIZE(zero_begin);
        uintptr_t end = zero_end & ~(uintptr_t)(SYSTEM_PAGE_SIZE - 1);
        /* If our allocation spans more than one page, we attempt to madvise.
         */
        if (begin < end) {
            /* madvise the page aligned region to kernel.
             */
            madvise((void*)begin, end - begin, MADV_DONTNEED);
            /* Zero the region after the page aligned region.
             */
            memset((void*)end, 0, zero_end - end);
            /* Zero out the region before the page aligned region.
             */
            zero_end = begin;
        }
        memset((void*)zero_begin, 0, zero_end - zero_begin);
    } else {
        ptr = mspace_calloc(heap->msp, 1, n);
        if (ptr == NULL) {
            return NULL;
        }
    }

    countAllocation(heap, ptr);
    /*
     * Check to see if a concurrent GC should be initiated.
     */
    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
        /*
         * The garbage collector thread is already running or has yet
         * to be started.  Do nothing.
         */
        return ptr;
    }
    if (heap->bytesAllocated > heap->concurrentStartBytes) {
        /*
         * We have exceeded the allocation threshold.  Wake up the
         * garbage collector.
         */
        dvmSignalCond(&gHs->gcThreadCond);
    }
    return ptr;
}
Beispiel #6
0
void* calloc(size_t n_elements, size_t elem_size) {
    if(unlikely(sm_mspace == NULL)) __sm_init();

    return mspace_calloc(sm_mspace, n_elements, elem_size);
}
Beispiel #7
0
void *dlmalloc_mspace_alloc_zeroed(void *space, ulen size) { // substitute
	return mspace_calloc(space, size, 1);
}
Beispiel #8
0
void * NONNULL(1) MALLOC
mm_private_space_calloc(struct mm_private_space *space, size_t count, size_t size)
{
    return mspace_calloc(space->space.opaque, count, size);
}