Beispiel #1
0
/* No lock necessary. This function is not multiple caller safe! */
static void memblock_make_local(pa_memblock *b) {
    pa_assert(b);

    pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);

    if (b->length <= b->pool->block_size) {
        struct mempool_slot *slot;

        if ((slot = mempool_allocate_slot(b->pool))) {
            void *new_data;
            /* We can move it into a local pool, perfect! */

            new_data = mempool_slot_data(slot);
            memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
            pa_atomic_ptr_store(&b->data, new_data);

            b->type = PA_MEMBLOCK_POOL_EXTERNAL;
            b->read_only = false;

            goto finish;
        }
    }

    /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
    b->per_type.user.free_cb = pa_xfree;
    pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));

    b->type = PA_MEMBLOCK_USER;
    b->read_only = false;

finish:
    pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
    pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
    memblock_wait(b);
}
Beispiel #2
0
/* No lock necessary */
void* pa_memblock_acquire(pa_memblock *b) {
    pa_assert(b);
    pa_assert(PA_REFCNT_VALUE(b) > 0);

    pa_atomic_inc(&b->n_acquired);

    return pa_atomic_ptr_load(&b->data);
}
Beispiel #3
0
/* No lock necessary */
static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
    pa_memblock *n;

    pa_assert(p);
    pa_assert(b);

    if (b->type == PA_MEMBLOCK_IMPORTED ||
        b->type == PA_MEMBLOCK_POOL ||
        b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
        pa_assert(b->pool == p);
        return pa_memblock_ref(b);
    }

    if (!(n = pa_memblock_new_pool(p, b->length)))
        return NULL;

    memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
    return n;
}
Beispiel #4
0
/* This is a copy of the function in mutex-posix.c */
pa_mutex* pa_static_mutex_get(pa_static_mutex *s, bool recursive, bool inherit_priority) {
    pa_mutex *m;

    pa_assert(s);

    /* First, check if already initialized and short cut */
    if ((m = pa_atomic_ptr_load(&s->ptr)))
        return m;

    /* OK, not initialized, so let's allocate, and fill in */
    m = pa_mutex_new(recursive, inherit_priority);
    if ((pa_atomic_ptr_cmpxchg(&s->ptr, NULL, m)))
        return m;

    pa_mutex_free(m);

    /* Him, filling in failed, so someone else must have filled in
     * already */
    pa_assert_se(m = pa_atomic_ptr_load(&s->ptr));
    return m;
}
Beispiel #5
0
void* pa_flist_pop(pa_flist*l) {
    unsigned idx, n;
    pa_atomic_ptr_t *cells;
#ifdef PROFILE
    unsigned len;
#endif

    pa_assert(l);

    cells = PA_FLIST_CELLS(l);

    n = (unsigned) pa_atomic_load(&l->length) + N_EXTRA_SCAN;

#ifdef PROFILE
    len = n;
#endif

    _Y;
    idx = reduce(l, (unsigned) pa_atomic_load(&l->read_idx));

    for (; n > 0 ; n--) {
        void *p;

        _Y;
        p = pa_atomic_ptr_load(&cells[idx]);

        if (p) {

            _Y;
            if (!pa_atomic_ptr_cmpxchg(&cells[idx], p, NULL))
                continue;

            _Y;
            pa_atomic_inc(&l->read_idx);

            _Y;
            pa_atomic_dec(&l->length);

            return p;
        }

        _Y;
        idx = reduce(l, idx+1);
    }

#ifdef PROFILE
    if (len > N_EXTRA_SCAN)
        pa_log_warn("Didn't find used cell after %u iterations.", len);
#endif

    return NULL;
}
Beispiel #6
0
void pa_flist_free(pa_flist *l, pa_free_cb_t free_cb) {
    pa_assert(l);
    pa_assert(l->name);

    if (free_cb) {
        pa_flist_elem *elem;
        while((elem = stack_pop(l, &l->stored)))
            free_cb(pa_atomic_ptr_load(&elem->ptr));
    }

    pa_xfree(l->name);
    pa_xfree(l);
}
Beispiel #7
0
void* pa_flist_pop(pa_flist *l) {
    pa_flist_elem *elem;
    void *ptr;
    pa_assert(l);

    elem = stack_pop(l, &l->stored);
    if (elem == NULL)
        return NULL;

    ptr = pa_atomic_ptr_load(&elem->ptr);

    stack_push(l, &l->empty, elem);

    return ptr;
}
Beispiel #8
0
void* pa_shmasyncq_pop(pa_shmasyncq*l, int wait) {
    int idx;
    void *ret;
    pa_atomic_ptr_t *cells;

    pa_assert(l);

    cells = PA_SHMASYNCQ_CELLS(l);

    _Y;
    idx = reduce(l, l->read_idx);

    if (!(ret = pa_atomic_ptr_load(&cells[idx]))) {

        if (!wait)
            return NULL;

/*         pa_log("sleeping on pop"); */

        do {
            pa_fdsem_wait(l->write_fdsem);
        } while (!(ret = pa_atomic_ptr_load(&cells[idx])));
    }

    pa_assert(ret);

    /* Guaranteed to succeed if we only have a single reader */
    pa_assert_se(pa_atomic_ptr_cmpxchg(&cells[idx], ret, NULL));

    _Y;
    l->read_idx++;

    pa_fdsem_post(l->read_fdsem);

    return ret;
}
Beispiel #9
0
int pa_asyncq_read_before_poll(pa_asyncq *l) {
    unsigned idx;
    pa_atomic_ptr_t *cells;

    pa_assert(l);

    cells = PA_ASYNCQ_CELLS(l);

    _Y;
    idx = reduce(l, l->read_idx);

    for (;;) {
        if (pa_atomic_ptr_load(&cells[idx]))
            return -1;

        if (pa_fdsem_before_poll(l->write_fdsem) >= 0)
            return 0;
    }
}
Beispiel #10
0
void pa_flist_free(pa_flist *l, pa_free_cb_t free_cb) {
    pa_assert(l);

    if (free_cb) {
        pa_atomic_ptr_t*cells;
        unsigned idx;

        cells = PA_FLIST_CELLS(l);

        for (idx = 0; idx < l->size; idx ++) {
            void *p;

            if ((p = pa_atomic_ptr_load(&cells[idx])))
                free_cb(p);
        }
    }

    pa_xfree(l);
}
Beispiel #11
0
static void memblock_free(pa_memblock *b) {
    pa_assert(b);

    pa_assert(pa_atomic_load(&b->n_acquired) == 0);

    stat_remove(b);

    switch (b->type) {
        case PA_MEMBLOCK_USER :
            pa_assert(b->per_type.user.free_cb);
            b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));

            /* Fall through */

        case PA_MEMBLOCK_FIXED:
            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);

            break;

        case PA_MEMBLOCK_APPENDED:

            /* We could attach it to unused_memblocks, but that would
             * probably waste some considerable amount of memory */
            pa_xfree(b);
            break;

        case PA_MEMBLOCK_IMPORTED: {
            pa_memimport_segment *segment;
            pa_memimport *import;

            /* FIXME! This should be implemented lock-free */

            pa_assert_se(segment = b->per_type.imported.segment);
            pa_assert_se(import = segment->import);

            pa_mutex_lock(import->mutex);

            pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));

            pa_assert(segment->n_blocks >= 1);
            if (-- segment->n_blocks <= 0)
                segment_detach(segment);

            pa_mutex_unlock(import->mutex);

            import->release_cb(import, b->per_type.imported.id, import->userdata);

            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);

            break;
        }

        case PA_MEMBLOCK_POOL_EXTERNAL:
        case PA_MEMBLOCK_POOL: {
            struct mempool_slot *slot;
            bool call_free;

            pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));

            call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;

/* #ifdef HAVE_VALGRIND_MEMCHECK_H */
/*             if (PA_UNLIKELY(pa_in_valgrind())) { */
/*                 VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
/*             } */
/* #endif */

            /* The free list dimensions should easily allow all slots
             * to fit in, hence try harder if pushing this slot into
             * the free list fails */
            while (pa_flist_push(b->pool->free_slots, slot) < 0)
                ;

            if (call_free)
                if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                    pa_xfree(b);

            break;
        }

        case PA_MEMBLOCK_TYPE_MAX:
        default:
            pa_assert_not_reached();
    }
}
Beispiel #12
0
static void memblock_free(pa_memblock *b) {
    pa_assert(b);

    pa_assert(pa_atomic_load(&b->n_acquired) == 0);

    stat_remove(b);

    switch (b->type) {
        case PA_MEMBLOCK_USER :
            pa_assert(b->per_type.user.free_cb);
            b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));

            /* Fall through */

        case PA_MEMBLOCK_FIXED:
        case PA_MEMBLOCK_APPENDED :
            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);

            break;

        case PA_MEMBLOCK_IMPORTED : {
            pa_memimport_segment *segment;
            pa_memimport *import;

            /* FIXME! This should be implemented lock-free */

            segment = b->per_type.imported.segment;
            pa_assert(segment);
            import = segment->import;
            pa_assert(import);

            pa_mutex_lock(import->mutex);
            pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
            if (-- segment->n_blocks <= 0)
                segment_detach(segment);

            pa_mutex_unlock(import->mutex);

            import->release_cb(import, b->per_type.imported.id, import->userdata);

            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);
            break;
        }

        case PA_MEMBLOCK_POOL_EXTERNAL:
        case PA_MEMBLOCK_POOL: {
            struct mempool_slot *slot;
            int call_free;

            slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
            pa_assert(slot);

            call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;

            /* The free list dimensions should easily allow all slots
             * to fit in, hence try harder if pushing this slot into
             * the free list fails */
            while (pa_flist_push(b->pool->free_slots, slot) < 0)
                ;

            if (call_free)
                if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                    pa_xfree(b);

            break;
        }

        case PA_MEMBLOCK_TYPE_MAX:
        default:
            pa_assert_not_reached();
    }
}