static int
wmem_block_verify_recycler(wmem_block_allocator_t *allocator)
{
    wmem_block_chunk_t *cur;
    wmem_block_free_t  *cur_free;
    int                 free_space = 0;

    cur = allocator->recycler_head;
    if (!cur) {
        return 0;
    }

    do {
        free_space += cur->len;

        cur_free = WMEM_GET_FREE(cur);

        g_assert(! cur->used);

        g_assert(cur_free->prev);
        g_assert(cur_free->next);

        g_assert(WMEM_GET_FREE(cur_free->prev)->next == cur);
        g_assert(WMEM_GET_FREE(cur_free->next)->prev == cur);

        cur = cur_free->next;
    } while (cur != allocator->recycler_head);

    return free_space;
}
/* Adds a chunk from the recycler. */
static void
wmem_block_add_to_recycler(wmem_block_allocator_t *allocator,
                           wmem_block_chunk_t *chunk)
{
    wmem_block_free_t *free_chunk;

    if (WMEM_CHUNK_DATA_LEN(chunk) < sizeof(wmem_block_free_t)) {
        return;
    }

    free_chunk = WMEM_GET_FREE(chunk);

    if (! allocator->recycler_head) {
        /* First one */
        free_chunk->next         = chunk;
        free_chunk->prev         = chunk;
        allocator->recycler_head = chunk;
    }
    else {
        free_chunk->next = allocator->recycler_head;
        free_chunk->prev = WMEM_GET_FREE(allocator->recycler_head)->prev;

        WMEM_GET_FREE(free_chunk->next)->prev = chunk;
        WMEM_GET_FREE(free_chunk->prev)->next = chunk;

        if (chunk->len > allocator->recycler_head->len) {
            allocator->recycler_head = chunk;
        }
    }
}
static int
wmem_block_verify_master_list(wmem_block_allocator_t *allocator)
{
    wmem_block_chunk_t *cur;
    wmem_block_free_t  *cur_free;
    int                 free_space = 0;

    cur = allocator->master_head;
    if (!cur) {
        return 0;
    }

    g_assert(WMEM_GET_FREE(cur)->prev == NULL);

    while (cur) {
        free_space += cur->len;

        cur_free = WMEM_GET_FREE(cur);

        g_assert(! cur->used);

        if (cur_free->next) {
            g_assert(WMEM_GET_FREE(cur_free->next)->prev == cur);
        }

        if (cur != allocator->master_head) {
            g_assert(cur->len == WMEM_BLOCK_SIZE);
        }

        cur = cur_free->next;
    }

    return free_space;
}
/* Removes a chunk from the recycler. */
static void
wmem_block_remove_from_recycler(wmem_block_allocator_t *allocator,
                                wmem_block_chunk_t *chunk)
{
    wmem_block_free_t *free_chunk;

    g_assert (! chunk->used);

    free_chunk = WMEM_GET_FREE(chunk);

    g_assert(free_chunk->prev && free_chunk->next);

    if (free_chunk->prev == chunk && free_chunk->next == chunk) {
        /* Only one item in recycler, just empty it. */
        g_assert(allocator->recycler_head == chunk);
        allocator->recycler_head = NULL;
    }
    else {
        /* Two or more items, usual doubly-linked-list removal. It's circular
         * so we don't need to worry about null-checking anything, which is
         * nice. */
        WMEM_GET_FREE(free_chunk->prev)->next = free_chunk->next;
        WMEM_GET_FREE(free_chunk->next)->prev = free_chunk->prev;
        if (allocator->recycler_head == chunk) {
            allocator->recycler_head = free_chunk->next;
        }
    }
}
/* Pushes a chunk onto the master stack. */
static void
wmem_block_push_master(wmem_block_allocator_t *allocator,
                       wmem_block_chunk_t *chunk)
{
    wmem_block_free_t *free_chunk;

    free_chunk = WMEM_GET_FREE(chunk);
    free_chunk->prev = NULL;
    free_chunk->next = allocator->master_head;
    if (free_chunk->next) {
        WMEM_GET_FREE(free_chunk->next)->prev = chunk;
    }
    allocator->master_head = chunk;
}
static void
wmem_block_gc(void *private_data)
{
    wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
    wmem_block_hdr_t   *cur, *next;
    wmem_block_chunk_t *chunk;
    wmem_block_free_t  *free_chunk;

    /* Walk through the blocks, adding used blocks to the new list and
     * completely destroying unused blocks. */
    cur = allocator->block_list;
    allocator->block_list = NULL;

    while (cur) {
        chunk = WMEM_BLOCK_TO_CHUNK(cur);
        next  = cur->next;

        if (!chunk->jumbo && !chunk->used && chunk->last) {
            /* If the first chunk is also the last, and is unused, then
             * the block as a whole is entirely unused, so return it to
             * the OS and remove it from whatever lists it is in. */
            free_chunk = WMEM_GET_FREE(chunk);
            if (free_chunk->next) {
                WMEM_GET_FREE(free_chunk->next)->prev = free_chunk->prev;
            }
            if (free_chunk->prev) {
                WMEM_GET_FREE(free_chunk->prev)->next = free_chunk->next;
            }
            if (allocator->recycler_head == chunk) {
                if (free_chunk->next == chunk) {
                    allocator->recycler_head = NULL;
                }
                else {
                    allocator->recycler_head = free_chunk->next;
                }
            }
            else if (allocator->master_head == chunk) {
                allocator->master_head = free_chunk->next;
            }
            wmem_free(NULL, cur);
        }
        else {
            /* part of this block is used, so add it to the new block list */
            wmem_block_add_to_block_list(allocator, cur);
        }

        cur = next;
    }
}
/* Removes the top chunk from the master stack. */
static void
wmem_block_pop_master(wmem_block_allocator_t *allocator)
{
    wmem_block_chunk_t *chunk;
    wmem_block_free_t  *free_chunk;

    chunk = allocator->master_head;

    free_chunk = WMEM_GET_FREE(chunk);

    allocator->master_head = free_chunk->next;
    if (free_chunk->next) {
        WMEM_GET_FREE(free_chunk->next)->prev = NULL;
    }
}
/* DEBUG AND TEST */
static int
wmem_block_verify_block(wmem_block_hdr_t *block)
{
    int                 total_free_space = 0;
    guint32             total_len;
    wmem_block_chunk_t *chunk;

    chunk     = WMEM_BLOCK_TO_CHUNK(block);
    total_len = WMEM_BLOCK_HEADER_SIZE;

    if (chunk->jumbo) {
        /* We can tell nothing else about jumbo chunks except that they are
         * always used. */
        return 0;
    }

    g_assert(chunk->prev == 0);

    do {
        total_len += chunk->len;

        g_assert(chunk->len >= WMEM_CHUNK_HEADER_SIZE);
        g_assert(!chunk->jumbo);

        if (WMEM_CHUNK_NEXT(chunk)) {
            g_assert(chunk->len == WMEM_CHUNK_NEXT(chunk)->prev);
        }

        if (!chunk->used &&
                WMEM_CHUNK_DATA_LEN(chunk) >= sizeof(wmem_block_free_t)) {

            total_free_space += chunk->len;

            if (!chunk->last) {
                g_assert(WMEM_GET_FREE(chunk)->next);
                g_assert(WMEM_GET_FREE(chunk)->prev);
            }
        }

        chunk = WMEM_CHUNK_NEXT(chunk);
    } while (chunk);

    g_assert(total_len == WMEM_BLOCK_SIZE);

    return total_free_space;
}
/* Cycles the recycler. See the design notes at the top of this file for more
 * details. */
static void
wmem_block_cycle_recycler(wmem_block_allocator_t *allocator)
{
    wmem_block_chunk_t *chunk;
    wmem_block_free_t  *free_chunk;

    chunk = allocator->recycler_head;

    if (chunk == NULL) {
        return;
    }

    free_chunk = WMEM_GET_FREE(chunk);

    if (free_chunk->next->len < chunk->len) {
        /* Hold the current head fixed during rotation. */
        WMEM_GET_FREE(free_chunk->next)->prev = free_chunk->prev;
        WMEM_GET_FREE(free_chunk->prev)->next = free_chunk->next;

        free_chunk->prev = free_chunk->next;
        free_chunk->next = WMEM_GET_FREE(free_chunk->next)->next;

        WMEM_GET_FREE(free_chunk->next)->prev = chunk;
        WMEM_GET_FREE(free_chunk->prev)->next = chunk;
    }
    else {
        /* Just rotate everything. */
        allocator->recycler_head = free_chunk->next;
    }
}
/* Takes an unused chunk and a size, and splits it into two chunks if possible.
 * The first chunk (at the same address as the input chunk) is guaranteed to
 * hold at least `size` bytes of data, and to not be in either the master or
 * recycler lists.
 *
 * The second chunk gets whatever data is left over. It is marked unused and
 * replaces the input chunk in whichever list it originally inhabited. */
static void
wmem_block_split_free_chunk(wmem_block_allocator_t *allocator,
                            wmem_block_chunk_t *chunk,
                            const size_t size)
{
    wmem_block_chunk_t *extra;
    wmem_block_free_t  *old_blk, *new_blk;
    size_t aligned_size, available;
    gboolean last;

    aligned_size = WMEM_ALIGN_SIZE(size) + WMEM_CHUNK_HEADER_SIZE;

    if (WMEM_CHUNK_DATA_LEN(chunk) < aligned_size + sizeof(wmem_block_free_t)) {
        /* If the available space is not enought to store all of
         * (hdr + requested size + alignment padding + hdr + free-header) then
         * just remove the current chunk from the free list and return, since we
         * can't usefully split it. */
        if (chunk == allocator->master_head) {
            wmem_block_pop_master(allocator);
        }
        else if (WMEM_CHUNK_DATA_LEN(chunk) >= sizeof(wmem_block_free_t)) {
            wmem_block_remove_from_recycler(allocator, chunk);
        }
        return;
    }

    /* preserve a few values from chunk that we'll need to manipulate */
    last      = chunk->last;
    available = chunk->len - aligned_size;

    /* set new values for chunk */
    chunk->len  = (guint32) aligned_size;
    chunk->last = FALSE;

    /* with chunk's values set, we can use the standard macro to calculate
     * the location and size of the new free chunk */
    extra = WMEM_CHUNK_NEXT(chunk);

    /* Now we move the free chunk's address without changing its location
     * in whichever list it is in.
     *
     * Note that the new chunk header 'extra' may overlap the old free header,
     * so we have to copy the free header before we write anything to extra.
     */
    old_blk = WMEM_GET_FREE(chunk);
    new_blk = WMEM_GET_FREE(extra);

    if (allocator->master_head == chunk) {
        new_blk->prev = old_blk->prev;
        new_blk->next = old_blk->next;

        if (old_blk->next) {
            WMEM_GET_FREE(old_blk->next)->prev = extra;
        }

        allocator->master_head = extra;
    }
    else {
        if (old_blk->prev == chunk) {
            new_blk->prev = extra;
            new_blk->next = extra;
        }
        else {
            new_blk->prev = old_blk->prev;
            new_blk->next = old_blk->next;

            WMEM_GET_FREE(old_blk->prev)->next = extra;
            WMEM_GET_FREE(old_blk->next)->prev = extra;
        }

        if (allocator->recycler_head == chunk) {
            allocator->recycler_head = extra;
        }
    }

    /* Now that we've copied over the free-list stuff (which may have overlapped
     * with our new chunk header) we can safely write our new chunk header. */
    extra->len   = (guint32) available;
    extra->last  = last;
    extra->prev  = chunk->len;
    extra->used  = FALSE;
    extra->jumbo = FALSE;

    /* Correctly update the following chunk's back-pointer */
    if (!last) {
        WMEM_CHUNK_NEXT(extra)->prev = extra->len;
    }
}
/* Takes a free chunk and checks the chunks to its immediate right and left in
 * the block. If they are also free, the contigous free chunks are merged into
 * a single free chunk. The resulting chunk ends up in either the master list or
 * the recycler, depending on where the merged chunks were originally.
 */
static void
wmem_block_merge_free(wmem_block_allocator_t *allocator,
                      wmem_block_chunk_t *chunk)
{
    wmem_block_chunk_t *tmp;
    wmem_block_chunk_t *left_free  = NULL;
    wmem_block_chunk_t *right_free = NULL;

    /* Check the chunk to our right. If it is free, merge it into our current
     * chunk. If it is big enough to hold a free-header, save it for later (we
     * need to know about the left chunk before we decide what goes where). */
    tmp = WMEM_CHUNK_NEXT(chunk);
    if (tmp && !tmp->used) {
        if (WMEM_CHUNK_DATA_LEN(tmp) >= sizeof(wmem_block_free_t)) {
            right_free = tmp;
        }
        chunk->len += tmp->len;
        chunk->last = tmp->last;
    }

    /* Check the chunk to our left. If it is free, merge our current chunk into
     * it (thus chunk = tmp). As before, save it if it has enough space to
     * hold a free-header. */
    tmp = WMEM_CHUNK_PREV(chunk);
    if (tmp && !tmp->used) {
        if (WMEM_CHUNK_DATA_LEN(tmp) >= sizeof(wmem_block_free_t)) {
            left_free = tmp;
        }
        tmp->len += chunk->len;
        tmp->last = chunk->last;
        chunk = tmp;
    }

    /* The length of our chunk may have changed. If we have a chunk following,
     * update its 'prev' count. */
    if (!chunk->last) {
        WMEM_CHUNK_NEXT(chunk)->prev = chunk->len;
    }

    /* Now that the chunk headers are merged and consistent, we need to figure
     * out what goes where in which free list. */
    if (right_free && right_free == allocator->master_head) {
        /* If we merged right, and that chunk was the head of the master list,
         * then we leave the resulting chunk at the head of the master list. */
        wmem_block_free_t *moved;
        if (left_free) {
            wmem_block_remove_from_recycler(allocator, left_free);
        }
        moved = WMEM_GET_FREE(chunk);
        moved->prev = NULL;
        moved->next = WMEM_GET_FREE(right_free)->next;
        allocator->master_head = chunk;
        if (moved->next) {
            WMEM_GET_FREE(moved->next)->prev = chunk;
        }
    }
    else {
        /* Otherwise, we remove the right-merged chunk (if there was one) from
         * the recycler. Then, if we merged left we have nothing to do, since
         * that recycler entry is still valid. If not, we add the chunk. */
        if (right_free) {
            wmem_block_remove_from_recycler(allocator, right_free);
        }
        if (!left_free) {
            wmem_block_add_to_recycler(allocator, chunk);
        }
    }
}