Ejemplo n.º 1
0
void *dt_realloc(void *ptr, size_t size)
{
#if CONFIG_MEMALIGN_HACK
    int diff;
#endif

    /* let's disallow possibly ambiguous cases */
    if (size > (max_alloc_size - 32)) {
        return NULL;
    }

#if CONFIG_MEMALIGN_HACK
    //FIXME this isn't aligned correctly, though it probably isn't needed
    if (!ptr) {
        return dt_malloc(size);
    }
    diff = ((char *)ptr)[-1];
    dt_assert0(diff > 0 && diff <= ALIGN);
    ptr = realloc((char *)ptr - diff, size + diff);
    if (ptr) {
        ptr = (char *)ptr + diff;
    }
    return ptr;
#elif HAVE_ALIGNED_MALLOC
    return _aligned_realloc(ptr, size + !size, ALIGN);
#else
    return realloc(ptr, size + !size);
#endif
}
Ejemplo n.º 2
0
void *dt_mallocz(size_t size)
{
    void *ptr = dt_malloc(size);
    if (ptr) {
        memset(ptr, 0, size);
    }
    return ptr;
}
/*
 * dt_malloc_w_debug
 *
 * Wrapper for the dt_malloc function to be used when in debug mode so that we
 * can print out where each allocation came from.
 *
 * Prints a line to the standard debugger.
 *
 * Parameters: size - The number of bytes to allocate.
 *             file - The value of __FILE__ in whatever file called this func.
 *             line - The line number in that file.
 *
 * Returns: A pointer to the newly created memory.
 */
void *dt_malloc_w_debug(size_t size, char *file, int line)
{
    /*
     * Local Variables.
     */
    void *temp_obj = dt_malloc(size);

    DT_MEM_LOG("Allocated %i bytes: %s(%i)\n", size, file, line);

    return(temp_obj);
}
Ejemplo n.º 4
0
void *dt_memdup(const void *p, size_t size)
{
    void *ptr = NULL;
    if (p) {
        ptr = dt_malloc(size);
        if (ptr) {
            memcpy(ptr, p, size);
        }
    }
    return ptr;
}
Ejemplo n.º 5
0
void *dt_malloc(size_t size)
{
    void *ptr = NULL;
#if CONFIG_MEMALIGN_HACK
    long diff;
#endif

    /* let's disallow possibly ambiguous cases */
    if (size > (max_alloc_size - 32)) {
        return NULL;
    }

#if CONFIG_MEMALIGN_HACK
    ptr = malloc(size + ALIGN);
    if (!ptr) {
        return ptr;
    }
    diff              = ((~(long)ptr) & (ALIGN - 1)) + 1;
    ptr               = (char *)ptr + diff;
    ((char *)ptr)[-1] = diff;
#elif HAVE_POSIX_MEMALIGN
    if (size) //OS X on SDK 10.6 has a broken posix_memalign implementation
        if (posix_memalign(&ptr, ALIGN, size)) {
            ptr = NULL;
        }
#elif HAVE_ALIGNED_MALLOC
    ptr = _aligned_malloc(size, ALIGN);
#elif HAVE_MEMALIGN
#ifndef __DJGPP__
    ptr = memalign(ALIGN, size);
#else
    ptr = memalign(size, ALIGN);
#endif
    /* Why 64?
     * Indeed, we should align it:
     *   on  4 for 386
     *   on 16 for 486
     *   on 32 for 586, PPro - K6-III
     *   on 64 for K7 (maybe for P3 too).
     * Because L1 and L2 caches are aligned on those values.
     * But I don't want to code such logic here!
     */
    /* Why 32?
     * For AVX ASM. SSE / NEON needs only 16.
     * Why not larger? Because I did not see a difference in benchmarks ...
     */
    /* benchmarks with P3
     * memalign(64) + 1          3071, 3051, 3032
     * memalign(64) + 2          3051, 3032, 3041
     * memalign(64) + 4          2911, 2896, 2915
     * memalign(64) + 8          2545, 2554, 2550
     * memalign(64) + 16         2543, 2572, 2563
     * memalign(64) + 32         2546, 2545, 2571
     * memalign(64) + 64         2570, 2533, 2558
     *
     * BTW, malloc seems to do 8-byte alignment by default here.
     */
#else
    ptr = malloc(size);
#endif
    if (!ptr && !size) {
        size = 1;
        ptr = dt_malloc(1);
    }
#if CONFIG_MEMORY_POISONING
    if (ptr) {
        memset(ptr, FF_MEMORY_POISON, size);
    }
#endif
    return ptr;
}