static void *
wmem_block_alloc(void *private_data, const size_t size)
{
    wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
    wmem_block_chunk_t     *chunk;

    if (size > WMEM_BLOCK_MAX_ALLOC_SIZE) {
        return wmem_block_alloc_jumbo(allocator, size);
    }

    if (allocator->recycler_head &&
            WMEM_CHUNK_DATA_LEN(allocator->recycler_head) >= size) {

        /* If we can serve it from the recycler, do so. */
        chunk = allocator->recycler_head;
    }
    else {
        if (allocator->master_head &&
                WMEM_CHUNK_DATA_LEN(allocator->master_head) < size) {

            /* Recycle the head of the master list if necessary. */
            chunk = allocator->master_head;
            wmem_block_pop_master(allocator);
            wmem_block_add_to_recycler(allocator, chunk);
        }

        if (!allocator->master_head) {
            /* Allocate a new block if necessary. */
            wmem_block_new_block(allocator);
        }

        chunk = allocator->master_head;
    }

    /* if our chunk is used, something is wrong */
    g_assert(! chunk->used);
    /* if we still don't have the space at this point, something is wrong */
    g_assert(size <= WMEM_CHUNK_DATA_LEN(chunk));

    /* Split our chunk into two to preserve any trailing free space */
    wmem_block_split_free_chunk(allocator, chunk, size);

    /* if our split reduced our size too much, something went wrong */
    g_assert(size <= WMEM_CHUNK_DATA_LEN(chunk));
    /* the resulting chunk should not be in either free list */
    g_assert(chunk != allocator->master_head);
    g_assert(chunk != allocator->recycler_head);

    /* Now cycle the recycler */
    wmem_block_cycle_recycler(allocator);

    /* mark it as used */
    chunk->used = TRUE;

    /* and return the user's pointer */
    return WMEM_CHUNK_TO_DATA(chunk);
}
/* Takes a used chunk and a size, and splits it into two chunks if possible.
 * The first chunk can hold at least `size` bytes of data, while the second gets
 * whatever's left over. The second is marked as unused and is added to the
 * recycler. */
static void
wmem_block_split_used_chunk(wmem_block_allocator_t *allocator,
                            wmem_block_chunk_t *chunk,
                            const size_t size)
{
    wmem_block_chunk_t *extra;
    size_t aligned_size, available;
    gboolean last;

    g_assert(chunk->used);
    g_assert(WMEM_CHUNK_DATA_LEN(chunk) >= size);

    aligned_size = WMEM_ALIGN_SIZE(size) + WMEM_CHUNK_HEADER_SIZE;

    if (aligned_size > WMEM_CHUNK_DATA_LEN(chunk)) {
        /* in this case we don't have enough space to really split it, so
         * it's basically a no-op */
        return;
    }
    /* otherwise, we have room to split it, though the remaining free chunk
     * may still not be usefully large */

    /* preserve a few values from chunk that we'll need to manipulate */
    last      = chunk->last;
    available = chunk->len - aligned_size;

    /* set new values for chunk */
    chunk->len  = (guint32) aligned_size;
    chunk->last = FALSE;

    /* with chunk's values set, we can use the standard macro to calculate
     * the location and size of the new free chunk */
    extra = WMEM_CHUNK_NEXT(chunk);

    /* set the new values for the chunk */
    extra->len   = (guint32) available;
    extra->last  = last;
    extra->prev  = chunk->len;
    extra->used  = FALSE;
    extra->jumbo = FALSE;

    /* Correctly update the following chunk's back-pointer */
    if (!last) {
        WMEM_CHUNK_NEXT(extra)->prev = extra->len;
    }

    /* Merge it to its right if possible (it can't be merged left, obviously).
     * This also adds it to the recycler. */
    wmem_block_merge_free(allocator, extra);
}
Пример #3
0
/* Adds a chunk from the recycler. */
static void
wmem_block_add_to_recycler(wmem_block_allocator_t *allocator,
                           wmem_block_chunk_t *chunk)
{
    wmem_block_free_t *free_chunk;

    if (WMEM_CHUNK_DATA_LEN(chunk) < sizeof(wmem_block_free_t)) {
        return;
    }

    free_chunk = WMEM_GET_FREE(chunk);

    if (! allocator->recycler_head) {
        /* First one */
        free_chunk->next         = chunk;
        free_chunk->prev         = chunk;
        allocator->recycler_head = chunk;
    }
    else {
        free_chunk->next = allocator->recycler_head;
        free_chunk->prev = WMEM_GET_FREE(allocator->recycler_head)->prev;

        WMEM_GET_FREE(free_chunk->next)->prev = chunk;
        WMEM_GET_FREE(free_chunk->prev)->next = chunk;

        if (chunk->len > allocator->recycler_head->len) {
            allocator->recycler_head = chunk;
        }
    }
}
Пример #4
0
static void *
wmem_block_alloc(void *private_data, const size_t size)
{
    wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
    wmem_block_chunk_t     *chunk;

    if (size > WMEM_BLOCK_MAX_ALLOC_SIZE) {
        return wmem_block_alloc_jumbo(allocator, size);
    }

    if (allocator->recycler_head &&
            WMEM_CHUNK_DATA_LEN(allocator->recycler_head) >= size) {

        /* If we can serve it from the recycler, do so. */
        chunk = allocator->recycler_head;
    }
    else {
        if (allocator->master_head &&
                WMEM_CHUNK_DATA_LEN(allocator->master_head) < size) {

            /* Recycle the head of the master list if necessary. */
            chunk = allocator->master_head;
            wmem_block_pop_master(allocator);
            wmem_block_add_to_recycler(allocator, chunk);
        }

        if (!allocator->master_head) {
            /* Allocate a new block if necessary. */
            wmem_block_new_block(allocator);
        }

        chunk = allocator->master_head;
    }

    /* Split our chunk into two to preserve any trailing free space */
    wmem_block_split_free_chunk(allocator, chunk, size);

    /* Now cycle the recycler */
    wmem_block_cycle_recycler(allocator);

    /* mark it as used */
    chunk->used = TRUE;

    /* and return the user's pointer */
    return WMEM_CHUNK_TO_DATA(chunk);
}
Пример #5
0
/* DEBUG AND TEST */
static int
wmem_block_verify_block(wmem_block_hdr_t *block)
{
    int                 total_free_space = 0;
    guint32             total_len;
    wmem_block_chunk_t *chunk;

    chunk     = WMEM_BLOCK_TO_CHUNK(block);
    total_len = WMEM_BLOCK_HEADER_SIZE;

    if (chunk->jumbo) {
        /* We can tell nothing else about jumbo chunks except that they are
         * always used. */
        return 0;
    }

    g_assert(chunk->prev == 0);

    do {
        total_len += chunk->len;

        g_assert(chunk->len >= WMEM_CHUNK_HEADER_SIZE);
        g_assert(!chunk->jumbo);

        if (WMEM_CHUNK_NEXT(chunk)) {
            g_assert(chunk->len == WMEM_CHUNK_NEXT(chunk)->prev);
        }

        if (!chunk->used &&
                WMEM_CHUNK_DATA_LEN(chunk) >= sizeof(wmem_block_free_t)) {

            total_free_space += chunk->len;

            if (!chunk->last) {
                g_assert(WMEM_GET_FREE(chunk)->next);
                g_assert(WMEM_GET_FREE(chunk)->prev);
            }
        }

        chunk = WMEM_CHUNK_NEXT(chunk);
    } while (chunk);

    g_assert(total_len == WMEM_BLOCK_SIZE);

    return total_free_space;
}
Пример #6
0
static void *
wmem_block_realloc(void *private_data, void *ptr, const size_t size)
{
    wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
    wmem_block_chunk_t     *chunk;

    chunk = WMEM_DATA_TO_CHUNK(ptr);

    if (chunk->jumbo) {
        return wmem_block_realloc_jumbo(allocator, chunk, size);
    }

    if (size > WMEM_CHUNK_DATA_LEN(chunk)) {
        /* grow */
        wmem_block_chunk_t *tmp;

        tmp = WMEM_CHUNK_NEXT(chunk);

        if (tmp && (!tmp->used) &&
            (size < WMEM_CHUNK_DATA_LEN(chunk) + tmp->len)) {
            /* the next chunk is free and has enough extra, so just grab
             * from that */
            size_t split_size;

            /* we ask for the next chunk to be split, but we don't end up
             * using the split chunk header (it just gets merged into this one),
             * so we want the split to be of (size - curdatalen - header_size).
             * However, this can underflow by header_size, so we do a quick
             * check here and floor the value to 0. */
            split_size = size - WMEM_CHUNK_DATA_LEN(chunk);

            if (split_size < WMEM_CHUNK_HEADER_SIZE) {
                split_size = 0;
            }
            else {
                split_size -= WMEM_CHUNK_HEADER_SIZE;
            }

            wmem_block_split_free_chunk(allocator, tmp, split_size);

            /* Now do a 'quickie' merge between the current block and the left-
             * hand side of the split. Simply calling wmem_block_merge_free
             * might confuse things, since we may temporarily have two blocks
             * to our right that are both free (and it isn't guaranteed to
             * handle that case). Update our 'next' count and last flag, and
             * our (new) successor's 'prev' count */
            chunk->len += tmp->len;
            chunk->last = tmp->last;
            tmp = WMEM_CHUNK_NEXT(chunk);
            if (tmp) {
                tmp->prev = chunk->len;
            }

            /* Now cycle the recycler */
            wmem_block_cycle_recycler(allocator);

            /* And return the same old pointer */
            return ptr;
        }
        else {
            /* no room to grow, need to alloc, copy, free */
            void *newptr;

            newptr = wmem_block_alloc(private_data, size);
            memcpy(newptr, ptr, WMEM_CHUNK_DATA_LEN(chunk));
            wmem_block_free(private_data, ptr);

            /* No need to cycle the recycler, alloc and free both did that
             * already */

            return newptr;
        }
    }
    else if (size < WMEM_CHUNK_DATA_LEN(chunk)) {
        /* shrink */
        wmem_block_split_used_chunk(allocator, chunk, size);

        /* Now cycle the recycler */
        wmem_block_cycle_recycler(allocator);

        return ptr;
    }

    /* no-op */
    return ptr;
}
Пример #7
0
/* Takes an unused chunk and a size, and splits it into two chunks if possible.
 * The first chunk (at the same address as the input chunk) is guaranteed to
 * hold at least `size` bytes of data, and to not be in either the master or
 * recycler lists.
 *
 * The second chunk gets whatever data is left over. It is marked unused and
 * replaces the input chunk in whichever list it originally inhabited. */
static void
wmem_block_split_free_chunk(wmem_block_allocator_t *allocator,
                            wmem_block_chunk_t *chunk,
                            const size_t size)
{
    wmem_block_chunk_t *extra;
    wmem_block_free_t  *old_blk, *new_blk;
    size_t aligned_size, available;
    gboolean last;

    aligned_size = WMEM_ALIGN_SIZE(size) + WMEM_CHUNK_HEADER_SIZE;

    if (WMEM_CHUNK_DATA_LEN(chunk) < aligned_size + sizeof(wmem_block_free_t)) {
        /* If the available space is not enought to store all of
         * (hdr + requested size + alignment padding + hdr + free-header) then
         * just remove the current chunk from the free list and return, since we
         * can't usefully split it. */
        if (chunk == allocator->master_head) {
            wmem_block_pop_master(allocator);
        }
        else if (WMEM_CHUNK_DATA_LEN(chunk) >= sizeof(wmem_block_free_t)) {
            wmem_block_remove_from_recycler(allocator, chunk);
        }
        return;
    }

    /* preserve a few values from chunk that we'll need to manipulate */
    last      = chunk->last;
    available = chunk->len - aligned_size;

    /* set new values for chunk */
    chunk->len  = (guint32) aligned_size;
    chunk->last = FALSE;

    /* with chunk's values set, we can use the standard macro to calculate
     * the location and size of the new free chunk */
    extra = WMEM_CHUNK_NEXT(chunk);

    /* Now we move the free chunk's address without changing its location
     * in whichever list it is in.
     *
     * Note that the new chunk header 'extra' may overlap the old free header,
     * so we have to copy the free header before we write anything to extra.
     */
    old_blk = WMEM_GET_FREE(chunk);
    new_blk = WMEM_GET_FREE(extra);

    if (allocator->master_head == chunk) {
        new_blk->prev = old_blk->prev;
        new_blk->next = old_blk->next;

        if (old_blk->next) {
            WMEM_GET_FREE(old_blk->next)->prev = extra;
        }

        allocator->master_head = extra;
    }
    else {
        if (old_blk->prev == chunk) {
            new_blk->prev = extra;
            new_blk->next = extra;
        }
        else {
            new_blk->prev = old_blk->prev;
            new_blk->next = old_blk->next;

            WMEM_GET_FREE(old_blk->prev)->next = extra;
            WMEM_GET_FREE(old_blk->next)->prev = extra;
        }

        if (allocator->recycler_head == chunk) {
            allocator->recycler_head = extra;
        }
    }

    /* Now that we've copied over the free-list stuff (which may have overlapped
     * with our new chunk header) we can safely write our new chunk header. */
    extra->len   = (guint32) available;
    extra->last  = last;
    extra->prev  = chunk->len;
    extra->used  = FALSE;
    extra->jumbo = FALSE;

    /* Correctly update the following chunk's back-pointer */
    if (!last) {
        WMEM_CHUNK_NEXT(extra)->prev = extra->len;
    }
}
Пример #8
0
/* Takes a free chunk and checks the chunks to its immediate right and left in
 * the block. If they are also free, the contigous free chunks are merged into
 * a single free chunk. The resulting chunk ends up in either the master list or
 * the recycler, depending on where the merged chunks were originally.
 */
static void
wmem_block_merge_free(wmem_block_allocator_t *allocator,
                      wmem_block_chunk_t *chunk)
{
    wmem_block_chunk_t *tmp;
    wmem_block_chunk_t *left_free  = NULL;
    wmem_block_chunk_t *right_free = NULL;

    /* Check the chunk to our right. If it is free, merge it into our current
     * chunk. If it is big enough to hold a free-header, save it for later (we
     * need to know about the left chunk before we decide what goes where). */
    tmp = WMEM_CHUNK_NEXT(chunk);
    if (tmp && !tmp->used) {
        if (WMEM_CHUNK_DATA_LEN(tmp) >= sizeof(wmem_block_free_t)) {
            right_free = tmp;
        }
        chunk->len += tmp->len;
        chunk->last = tmp->last;
    }

    /* Check the chunk to our left. If it is free, merge our current chunk into
     * it (thus chunk = tmp). As before, save it if it has enough space to
     * hold a free-header. */
    tmp = WMEM_CHUNK_PREV(chunk);
    if (tmp && !tmp->used) {
        if (WMEM_CHUNK_DATA_LEN(tmp) >= sizeof(wmem_block_free_t)) {
            left_free = tmp;
        }
        tmp->len += chunk->len;
        tmp->last = chunk->last;
        chunk = tmp;
    }

    /* The length of our chunk may have changed. If we have a chunk following,
     * update its 'prev' count. */
    if (!chunk->last) {
        WMEM_CHUNK_NEXT(chunk)->prev = chunk->len;
    }

    /* Now that the chunk headers are merged and consistent, we need to figure
     * out what goes where in which free list. */
    if (right_free && right_free == allocator->master_head) {
        /* If we merged right, and that chunk was the head of the master list,
         * then we leave the resulting chunk at the head of the master list. */
        wmem_block_free_t *moved;
        if (left_free) {
            wmem_block_remove_from_recycler(allocator, left_free);
        }
        moved = WMEM_GET_FREE(chunk);
        moved->prev = NULL;
        moved->next = WMEM_GET_FREE(right_free)->next;
        allocator->master_head = chunk;
        if (moved->next) {
            WMEM_GET_FREE(moved->next)->prev = chunk;
        }
    }
    else {
        /* Otherwise, we remove the right-merged chunk (if there was one) from
         * the recycler. Then, if we merged left we have nothing to do, since
         * that recycler entry is still valid. If not, we add the chunk. */
        if (right_free) {
            wmem_block_remove_from_recycler(allocator, right_free);
        }
        if (!left_free) {
            wmem_block_add_to_recycler(allocator, chunk);
        }
    }
}