Beispiel #1
0
static void thread_func(void *data) {
    char *s = data;
    int n = 0;
    int b = 1;

    while (!quit) {
        char *text;

        /* Allocate some memory, if possible take it from the flist */
        if (b && (text = pa_flist_pop(flist)))
            pa_log("%s: popped '%s'", s, text);
        else {
            text = pa_sprintf_malloc("Block %i, allocated by %s", n++, s);
            pa_log("%s: allocated '%s'", s, text);
        }

        b = !b;

        spin();

        /* Give it back to the flist if possible */
        if (pa_flist_push(flist, text) < 0) {
            pa_log("%s: failed to push back '%s'", s, text);
            pa_xfree(text);
        } else
            pa_log("%s: pushed", s);

        spin();
    }

    if (pa_flist_push(flist, s) < 0)
        pa_xfree(s);
}
Beispiel #2
0
static void drop_block(pa_memblockq *bq, struct list_item *q) {
    pa_assert(bq);
    pa_assert(q);

    pa_assert(bq->n_blocks >= 1);

    if (q->prev)
        q->prev->next = q->next;
    else {
        pa_assert(bq->blocks == q);
        bq->blocks = q->next;
    }

    if (q->next)
        q->next->prev = q->prev;
    else {
        pa_assert(bq->blocks_tail == q);
        bq->blocks_tail = q->prev;
    }

    if (bq->current_write == q)
        bq->current_write = q->prev;

    if (bq->current_read == q)
        bq->current_read = q->next;

    pa_memblock_unref(q->chunk.memblock);

    if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
        pa_xfree(q);

    bq->n_blocks--;
}
Beispiel #3
0
void* pa_prioq_remove(pa_prioq *q, pa_prioq_item *i) {
    void *p;

    pa_assert(q);
    pa_assert(i);
    pa_assert(q->n_items >= 1);

    p = i->value;

    if (q->n_items-1 == i->idx) {
        /* We are the last entry, so let's just remove us and good */
        q->n_items--;

    } else {

        /* We are not the last entry, we need to replace ourselves
         * with the last node and reshuffle */

        q->items[i->idx] = q->items[q->n_items-1];
        q->items[i->idx]->idx = i->idx;
        q->n_items--;

        shuffle_down(q, i->idx);
    }

    if (pa_flist_push(PA_STATIC_FLIST_GET(items), i) < 0)
        pa_xfree(i);

    return p;
}
Beispiel #4
0
void pa_asyncq_free(pa_asyncq *l, pa_free_cb_t free_cb) {
    struct localq *q;
    pa_assert(l);

    if (free_cb) {
        void *p;

        while ((p = pa_asyncq_pop(l, 0)))
            free_cb(p);
    }

    while ((q = l->localq)) {
        if (free_cb)
            free_cb(q->data);

        PA_LLIST_REMOVE(struct localq, l->localq, q);

        if (pa_flist_push(PA_STATIC_FLIST_GET(localq), q) < 0)
            pa_xfree(q);
    }

    pa_fdsem_free(l->read_fdsem);
    pa_fdsem_free(l->write_fdsem);
    pa_xfree(l);
}
Beispiel #5
0
static void asyncmsgq_free(pa_asyncmsgq *a) {
    struct asyncmsgq_item *i;
    pa_assert(a);

    while ((i = pa_asyncq_pop(a->asyncq, false))) {

        pa_assert(!i->semaphore);

        if (i->object)
            pa_msgobject_unref(i->object);

        if (i->memchunk.memblock)
            pa_memblock_unref(i->memchunk.memblock);

        if (i->free_cb)
            i->free_cb(i->userdata);

        if (pa_flist_push(PA_STATIC_FLIST_GET(asyncmsgq), i) < 0)
            pa_xfree(i);
    }

    pa_asyncq_free(a->asyncq, NULL);
    pa_mutex_free(a->mutex);
    pa_xfree(a);
}
Beispiel #6
0
void pa_asyncmsgq_done(pa_asyncmsgq *a, int ret) {
    pa_assert(PA_REFCNT_VALUE(a) > 0);
    pa_assert(a);
    pa_assert(a->current);

    if (a->current->semaphore) {
        a->current->ret = ret;
        pa_semaphore_post(a->current->semaphore);
    } else {

        if (a->current->free_cb)
            a->current->free_cb(a->current->userdata);

        if (a->current->object)
            pa_msgobject_unref(a->current->object);

        if (a->current->memchunk.memblock)
            pa_memblock_unref(a->current->memchunk.memblock);

        if (pa_flist_push(PA_STATIC_FLIST_GET(asyncmsgq), a->current) < 0)
            pa_xfree(a->current);
    }

    a->current = NULL;
}
Beispiel #7
0
int pa_asyncmsgq_send(pa_asyncmsgq *a, pa_msgobject *object, int code, const void *userdata, int64_t offset, const pa_memchunk *chunk) {
    struct asyncmsgq_item i;
    pa_assert(PA_REFCNT_VALUE(a) > 0);

    i.code = code;
    i.object = object;
    i.userdata = (void*) userdata;
    i.free_cb = NULL;
    i.ret = -1;
    i.offset = offset;
    if (chunk) {
        pa_assert(chunk->memblock);
        i.memchunk = *chunk;
    } else
        pa_memchunk_reset(&i.memchunk);

    if (!(i.semaphore = pa_flist_pop(PA_STATIC_FLIST_GET(semaphores))))
        i.semaphore = pa_semaphore_new(0);

    /* This mutex makes the queue multiple-writer safe. This lock is only used on the writing side */
    pa_mutex_lock(a->mutex);
    pa_assert_se(pa_asyncq_push(a->asyncq, &i, true) == 0);
    pa_mutex_unlock(a->mutex);

    pa_semaphore_wait(i.semaphore);

    if (pa_flist_push(PA_STATIC_FLIST_GET(semaphores), i.semaphore) < 0)
        pa_semaphore_free(i.semaphore);

    return i.ret;
}
Beispiel #8
0
void* pa_queue_pop(pa_queue *q) {
    void *p;
    struct queue_entry *e;

    pa_assert(q);

    if (!(e = q->front))
        return NULL;

    q->front = e->next;

    if (q->back == e) {
        pa_assert(!e->next);
        q->back = NULL;
    }

    p = e->data;

    if (pa_flist_push(PA_STATIC_FLIST_GET(entries), e) < 0)
        pa_xfree(e);

    q->length--;

    return p;
}
Beispiel #9
0
void pa_tagstruct_free(pa_tagstruct*t) {
    pa_assert(t);

    if (t->type == PA_TAGSTRUCT_DYNAMIC)
        pa_xfree(t->data);
    if (pa_flist_push(PA_STATIC_FLIST_GET(tagstructs), t) < 0)
        pa_xfree(t);
}
Beispiel #10
0
void pa_packet_unref(pa_packet *p) {
    pa_assert(p);
    pa_assert(PA_REFCNT_VALUE(p) >= 1);

    if (PA_REFCNT_DEC(p) <= 0) {
        if (p->type == PA_PACKET_DYNAMIC)
            pa_xfree(p->data);
        if (pa_flist_push(PA_STATIC_FLIST_GET(packets), p) < 0)
            pa_xfree(p);
    }
}
Beispiel #11
0
/* No lock necessary */
void pa_mempool_vacuum(pa_mempool *p) {
    struct mempool_slot *slot;
    pa_flist *list;

    pa_assert(p);

    list = pa_flist_new(p->n_blocks);

    while ((slot = pa_flist_pop(p->free_slots)))
        while (pa_flist_push(list, slot) < 0)
            ;

    while ((slot = pa_flist_pop(list))) {
        pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);

        while (pa_flist_push(p->free_slots, slot))
            ;
    }

    pa_flist_free(list, NULL);
}
Beispiel #12
0
static pa_bool_t flush_postq(pa_asyncq *l, pa_bool_t wait_op) {
    struct localq *q;

    pa_assert(l);

    while ((q = l->last_localq)) {

        if (push(l, q->data, wait_op) < 0)
            return FALSE;

        l->last_localq = q->prev;

        PA_LLIST_REMOVE(struct localq, l->localq, q);

        if (pa_flist_push(PA_STATIC_FLIST_GET(localq), q) < 0)
            pa_xfree(q);
    }

    return TRUE;
}
Beispiel #13
0
static void remove_entry(pa_idxset *s, struct idxset_entry *e) {
    pa_assert(s);
    pa_assert(e);

    /* Remove from iteration linked list */
    if (e->iterate_next)
        e->iterate_next->iterate_previous = e->iterate_previous;
    else
        s->iterate_list_tail = e->iterate_previous;

    if (e->iterate_previous)
        e->iterate_previous->iterate_next = e->iterate_next;
    else
        s->iterate_list_head = e->iterate_next;

    /* Remove from data hash table */
    if (e->data_next)
        e->data_next->data_previous = e->data_previous;

    if (e->data_previous)
        e->data_previous->data_next = e->data_next;
    else {
        unsigned hash = s->hash_func(e->data) % NBUCKETS;
        BY_DATA(s)[hash] = e->data_next;
    }

    /* Remove from index hash table */
    if (e->index_next)
        e->index_next->index_previous = e->index_previous;

    if (e->index_previous)
        e->index_previous->index_next = e->index_next;
    else
        BY_INDEX(s)[e->idx % NBUCKETS] = e->index_next;

    if (pa_flist_push(PA_STATIC_FLIST_GET(entries), e) < 0)
        pa_xfree(e);

    pa_assert(s->n_entries >= 1);
    s->n_entries--;
}
Beispiel #14
0
void pa_mempool_free(pa_mempool *p) {
    pa_assert(p);

    pa_mutex_lock(p->mutex);

    while (p->imports)
        pa_memimport_free(p->imports);

    while (p->exports)
        pa_memexport_free(p->exports);

    pa_mutex_unlock(p->mutex);

    pa_flist_free(p->free_slots, NULL);

    if (pa_atomic_load(&p->stat.n_allocated) > 0) {

        /* Ouch, somebody is retaining a memory block reference! */

#ifdef DEBUG_REF
        unsigned i;
        pa_flist *list;

        /* Let's try to find at least one of those leaked memory blocks */

        list = pa_flist_new(p->n_blocks);

        for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
            struct mempool_slot *slot;
            pa_memblock *b, *k;

            slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
            b = mempool_slot_data(slot);

            while ((k = pa_flist_pop(p->free_slots))) {
                while (pa_flist_push(list, k) < 0)
                    ;

                if (b == k)
                    break;
            }

            if (!k)
                pa_log("REF: Leaked memory block %p", b);

            while ((k = pa_flist_pop(list)))
                while (pa_flist_push(p->free_slots, k) < 0)
                    ;
        }

        pa_flist_free(list, NULL);

#endif

        pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));

/*         PA_DEBUG_TRAP; */
    }

    pa_shm_free(&p->memory);

    pa_mutex_free(p->mutex);
    pa_semaphore_free(p->semaphore);

    pa_xfree(p);
}
Beispiel #15
0
static void memblock_free(pa_memblock *b) {
    pa_assert(b);

    pa_assert(pa_atomic_load(&b->n_acquired) == 0);

    stat_remove(b);

    switch (b->type) {
        case PA_MEMBLOCK_USER :
            pa_assert(b->per_type.user.free_cb);
            b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));

            /* Fall through */

        case PA_MEMBLOCK_FIXED:
            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);

            break;

        case PA_MEMBLOCK_APPENDED:

            /* We could attach it to unused_memblocks, but that would
             * probably waste some considerable amount of memory */
            pa_xfree(b);
            break;

        case PA_MEMBLOCK_IMPORTED: {
            pa_memimport_segment *segment;
            pa_memimport *import;

            /* FIXME! This should be implemented lock-free */

            pa_assert_se(segment = b->per_type.imported.segment);
            pa_assert_se(import = segment->import);

            pa_mutex_lock(import->mutex);

            pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));

            pa_assert(segment->n_blocks >= 1);
            if (-- segment->n_blocks <= 0)
                segment_detach(segment);

            pa_mutex_unlock(import->mutex);

            import->release_cb(import, b->per_type.imported.id, import->userdata);

            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);

            break;
        }

        case PA_MEMBLOCK_POOL_EXTERNAL:
        case PA_MEMBLOCK_POOL: {
            struct mempool_slot *slot;
            bool call_free;

            pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));

            call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;

/* #ifdef HAVE_VALGRIND_MEMCHECK_H */
/*             if (PA_UNLIKELY(pa_in_valgrind())) { */
/*                 VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
/*             } */
/* #endif */

            /* The free list dimensions should easily allow all slots
             * to fit in, hence try harder if pushing this slot into
             * the free list fails */
            while (pa_flist_push(b->pool->free_slots, slot) < 0)
                ;

            if (call_free)
                if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                    pa_xfree(b);

            break;
        }

        case PA_MEMBLOCK_TYPE_MAX:
        default:
            pa_assert_not_reached();
    }
}
Beispiel #16
0
static void memblock_free(pa_memblock *b) {
    pa_assert(b);

    pa_assert(pa_atomic_load(&b->n_acquired) == 0);

    stat_remove(b);

    switch (b->type) {
        case PA_MEMBLOCK_USER :
            pa_assert(b->per_type.user.free_cb);
            b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));

            /* Fall through */

        case PA_MEMBLOCK_FIXED:
        case PA_MEMBLOCK_APPENDED :
            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);

            break;

        case PA_MEMBLOCK_IMPORTED : {
            pa_memimport_segment *segment;
            pa_memimport *import;

            /* FIXME! This should be implemented lock-free */

            segment = b->per_type.imported.segment;
            pa_assert(segment);
            import = segment->import;
            pa_assert(import);

            pa_mutex_lock(import->mutex);
            pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
            if (-- segment->n_blocks <= 0)
                segment_detach(segment);

            pa_mutex_unlock(import->mutex);

            import->release_cb(import, b->per_type.imported.id, import->userdata);

            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);
            break;
        }

        case PA_MEMBLOCK_POOL_EXTERNAL:
        case PA_MEMBLOCK_POOL: {
            struct mempool_slot *slot;
            int call_free;

            slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
            pa_assert(slot);

            call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;

            /* The free list dimensions should easily allow all slots
             * to fit in, hence try harder if pushing this slot into
             * the free list fails */
            while (pa_flist_push(b->pool->free_slots, slot) < 0)
                ;

            if (call_free)
                if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                    pa_xfree(b);

            break;
        }

        case PA_MEMBLOCK_TYPE_MAX:
        default:
            pa_assert_not_reached();
    }
}