Beispiel #1
0
static void thread_func(void *data) {
    char *s = data;
    int n = 0;
    int b = 1;

    while (!quit) {
        char *text;

        /* Allocate some memory, if possible take it from the flist */
        if (b && (text = pa_flist_pop(flist)))
            pa_log("%s: popped '%s'", s, text);
        else {
            text = pa_sprintf_malloc("Block %i, allocated by %s", n++, s);
            pa_log("%s: allocated '%s'", s, text);
        }

        b = !b;

        spin();

        /* Give it back to the flist if possible */
        if (pa_flist_push(flist, text) < 0) {
            pa_log("%s: failed to push back '%s'", s, text);
            pa_xfree(text);
        } else
            pa_log("%s: pushed", s);

        spin();
    }

    if (pa_flist_push(flist, s) < 0)
        pa_xfree(s);
}
Beispiel #2
0
int pa_asyncmsgq_send(pa_asyncmsgq *a, pa_msgobject *object, int code, const void *userdata, int64_t offset, const pa_memchunk *chunk) {
    struct asyncmsgq_item i;
    pa_assert(PA_REFCNT_VALUE(a) > 0);

    i.code = code;
    i.object = object;
    i.userdata = (void*) userdata;
    i.free_cb = NULL;
    i.ret = -1;
    i.offset = offset;
    if (chunk) {
        pa_assert(chunk->memblock);
        i.memchunk = *chunk;
    } else
        pa_memchunk_reset(&i.memchunk);

    if (!(i.semaphore = pa_flist_pop(PA_STATIC_FLIST_GET(semaphores))))
        i.semaphore = pa_semaphore_new(0);

    /* This mutex makes the queue multiple-writer safe. This lock is only used on the writing side */
    pa_mutex_lock(a->mutex);
    pa_assert_se(pa_asyncq_push(a->asyncq, &i, true) == 0);
    pa_mutex_unlock(a->mutex);

    pa_semaphore_wait(i.semaphore);

    if (pa_flist_push(PA_STATIC_FLIST_GET(semaphores), i.semaphore) < 0)
        pa_semaphore_free(i.semaphore);

    return i.ret;
}
Beispiel #3
0
void pa_asyncmsgq_post(pa_asyncmsgq *a, pa_msgobject *object, int code, const void *userdata, int64_t offset, const pa_memchunk *chunk, pa_free_cb_t free_cb) {
    struct asyncmsgq_item *i;
    pa_assert(PA_REFCNT_VALUE(a) > 0);

    if (!(i = pa_flist_pop(PA_STATIC_FLIST_GET(asyncmsgq))))
        i = pa_xnew(struct asyncmsgq_item, 1);

    i->code = code;
    i->object = object ? pa_msgobject_ref(object) : NULL;
    i->userdata = (void*) userdata;
    i->free_cb = free_cb;
    i->offset = offset;
    if (chunk) {
        pa_assert(chunk->memblock);
        i->memchunk = *chunk;
        pa_memblock_ref(i->memchunk.memblock);
    } else
        pa_memchunk_reset(&i->memchunk);
    i->semaphore = NULL;

    /* This mutex makes the queue multiple-writer safe. This lock is only used on the writing side */
    pa_mutex_lock(a->mutex);
    pa_asyncq_post(a->asyncq, i);
    pa_mutex_unlock(a->mutex);
}
Beispiel #4
0
void pa_asyncq_post(pa_asyncq*l, void *p) {
    struct localq *q;

    pa_assert(l);
    pa_assert(p);

    if (flush_postq(l, FALSE))
        if (pa_asyncq_push(l, p, FALSE) >= 0)
            return;

    /* OK, we couldn't push anything in the queue. So let's queue it
     * locally and push it later */

    if (pa_log_ratelimit())
        pa_log_warn("q overrun, queuing locally");

    if (!(q = pa_flist_pop(PA_STATIC_FLIST_GET(localq))))
        q = pa_xnew(struct localq, 1);

    q->data = p;
    PA_LLIST_PREPEND(struct localq, l->localq, q);

    if (!l->last_localq)
        l->last_localq = q;

    return;
}
Beispiel #5
0
/* No lock necessary */
static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
    struct mempool_slot *slot;
    pa_assert(p);

    if (!(slot = pa_flist_pop(p->free_slots))) {
        int idx;

        /* The free list was empty, we have to allocate a new entry */

        if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
            pa_atomic_dec(&p->n_init);
        else
            slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));

        if (!slot) {
            if (pa_log_ratelimit(PA_LOG_DEBUG))
                pa_log_debug("Pool full");
            pa_atomic_inc(&p->stat.n_pool_full);
            return NULL;
        }
    }

/* #ifdef HAVE_VALGRIND_MEMCHECK_H */
/*     if (PA_UNLIKELY(pa_in_valgrind())) { */
/*         VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
/*     } */
/* #endif */

    return slot;
}
Beispiel #6
0
/* No lock necessary */
pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, bool read_only) {
    pa_memblock *b;

    pa_assert(p);
    pa_assert(d);
    pa_assert(length);
    pa_assert(length != (size_t) -1);
    pa_assert(free_cb);

    if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
        b = pa_xnew(pa_memblock, 1);

    PA_REFCNT_INIT(b);
    b->pool = p;
    b->type = PA_MEMBLOCK_USER;
    b->read_only = read_only;
    b->is_silence = false;
    pa_atomic_ptr_store(&b->data, d);
    b->length = length;
    pa_atomic_store(&b->n_acquired, 0);
    pa_atomic_store(&b->please_signal, 0);

    b->per_type.user.free_cb = free_cb;

    stat_add(b);
    return b;
}
Beispiel #7
0
int pa_idxset_put(pa_idxset*s, void *p, uint32_t *idx) {
    unsigned hash;
    struct idxset_entry *e;

    pa_assert(s);

    hash = s->hash_func(p) % NBUCKETS;

    if ((e = data_scan(s, hash, p))) {
        if (idx)
            *idx = e->idx;

        return -1;
    }

    if (!(e = pa_flist_pop(PA_STATIC_FLIST_GET(entries))))
        e = pa_xnew(struct idxset_entry, 1);

    e->data = p;
    e->idx = s->current_index++;

    /* Insert into data hash table */
    e->data_next = BY_DATA(s)[hash];
    e->data_previous = NULL;
    if (BY_DATA(s)[hash])
        BY_DATA(s)[hash]->data_previous = e;
    BY_DATA(s)[hash] = e;

    hash = e->idx % NBUCKETS;

    /* Insert into index hash table */
    e->index_next = BY_INDEX(s)[hash];
    e->index_previous = NULL;
    if (BY_INDEX(s)[hash])
        BY_INDEX(s)[hash]->index_previous = e;
    BY_INDEX(s)[hash] = e;

    /* Insert into iteration list */
    e->iterate_previous = s->iterate_list_tail;
    e->iterate_next = NULL;
    if (s->iterate_list_tail) {
        pa_assert(s->iterate_list_head);
        s->iterate_list_tail->iterate_next = e;
    } else {
        pa_assert(!s->iterate_list_head);
        s->iterate_list_head = e;
    }
    s->iterate_list_tail = e;

    s->n_entries++;
    pa_assert(s->n_entries >= 1);

    if (idx)
        *idx = e->idx;

    return 0;
}
Beispiel #8
0
/* No lock necessary */
pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
    pa_memblock *b = NULL;
    struct mempool_slot *slot;
    static int mempool_disable = 0;

    pa_assert(p);
    pa_assert(length);

    if (mempool_disable == 0)
        mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;

    if (mempool_disable > 0)
        return NULL;

    /* If -1 is passed as length we choose the size for the caller: we
     * take the largest size that fits in one of our slots. */

    if (length == (size_t) -1)
        length = pa_mempool_block_size_max(p);

    if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {

        if (!(slot = mempool_allocate_slot(p)))
            return NULL;

        b = mempool_slot_data(slot);
        b->type = PA_MEMBLOCK_POOL;
        pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));

    } else if (p->block_size >= length) {

        if (!(slot = mempool_allocate_slot(p)))
            return NULL;

        if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
            b = pa_xnew(pa_memblock, 1);

        b->type = PA_MEMBLOCK_POOL_EXTERNAL;
        pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));

    } else {
        pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
        pa_atomic_inc(&p->stat.n_too_large_for_pool);
        return NULL;
    }

    PA_REFCNT_INIT(b);
    b->pool = p;
    b->read_only = b->is_silence = false;
    b->length = length;
    pa_atomic_store(&b->n_acquired, 0);
    pa_atomic_store(&b->please_signal, 0);

    stat_add(b);
    return b;
}
Beispiel #9
0
pa_tagstruct *pa_tagstruct_copy(pa_tagstruct*t) {
    pa_tagstruct*tc;

    if (!(tc = pa_flist_pop(PA_STATIC_FLIST_GET(tagstructs))))
        tc = pa_xnew(pa_tagstruct, 1);
    tc->data = pa_xmemdup(t->data, t->length);
    tc->allocated = t->length;
    tc->rindex = 0;
    tc->type = PA_TAGSTRUCT_DYNAMIC;

    return tc;
}
Beispiel #10
0
pa_tagstruct *pa_tagstruct_new(void) {
    pa_tagstruct*t;

    if (!(t = pa_flist_pop(PA_STATIC_FLIST_GET(tagstructs))))
        t = pa_xnew(pa_tagstruct, 1);
    t->data = t->per_type.appended;
    t->allocated = MAX_APPENDED_SIZE;
    t->length = t->rindex = 0;
    t->type = PA_TAGSTRUCT_APPENDED;

    return t;
}
Beispiel #11
0
/* No lock necessary */
void pa_mempool_vacuum(pa_mempool *p) {
    struct mempool_slot *slot;
    pa_flist *list;

    pa_assert(p);

    list = pa_flist_new(p->n_blocks);

    while ((slot = pa_flist_pop(p->free_slots)))
        while (pa_flist_push(list, slot) < 0)
            ;

    while ((slot = pa_flist_pop(list))) {
        pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);

        while (pa_flist_push(p->free_slots, slot))
            ;
    }

    pa_flist_free(list, NULL);
}
Beispiel #12
0
pa_tagstruct *pa_tagstruct_new_fixed(const uint8_t* data, size_t length) {
    pa_tagstruct*t;

    pa_assert(data && length);

    if (!(t = pa_flist_pop(PA_STATIC_FLIST_GET(tagstructs))))
        t = pa_xnew(pa_tagstruct, 1);
    t->data = (uint8_t*) data;
    t->allocated = t->length = length;
    t->rindex = 0;
    t->type = PA_TAGSTRUCT_FIXED;

    return t;
}
Beispiel #13
0
/* Self-locked */
pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
    pa_memblock *b = NULL;
    pa_memimport_segment *seg;

    pa_assert(i);

    pa_mutex_lock(i->mutex);

    if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
        pa_memblock_ref(b);
        goto finish;
    }

    if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
        goto finish;

    if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
        if (!(seg = segment_attach(i, shm_id)))
            goto finish;

    if (offset+size > seg->memory.size)
        goto finish;

    if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
        b = pa_xnew(pa_memblock, 1);

    PA_REFCNT_INIT(b);
    b->pool = i->pool;
    b->type = PA_MEMBLOCK_IMPORTED;
    b->read_only = true;
    b->is_silence = false;
    pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
    b->length = size;
    pa_atomic_store(&b->n_acquired, 0);
    pa_atomic_store(&b->please_signal, 0);
    b->per_type.imported.id = block_id;
    b->per_type.imported.segment = seg;

    pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);

    seg->n_blocks++;

    stat_add(b);

finish:
    pa_mutex_unlock(i->mutex);

    return b;
}
Beispiel #14
0
pa_packet* pa_packet_new_dynamic(void* data, size_t length) {
    pa_packet *p;

    pa_assert(data);
    pa_assert(length > 0);

    if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(packets))))
        p = pa_xnew(pa_packet, 1);
    PA_REFCNT_INIT(p);
    p->length = length;
    p->data = data;
    p->type = PA_PACKET_DYNAMIC;

    return p;
}
Beispiel #15
0
pa_packet* pa_packet_new(size_t length) {
    pa_packet *p;

    pa_assert(length > 0);

    if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(packets))))
        p = pa_xnew(pa_packet, 1);
    PA_REFCNT_INIT(p);
    p->length = length;
    if (length > MAX_APPENDED_SIZE) {
        p->data = pa_xmalloc(length);
        p->type = PA_PACKET_DYNAMIC;
    } else {
        p->data = p->per_type.appended;
        p->type = PA_PACKET_APPENDED;
    }

    return p;
}
Beispiel #16
0
pa_prioq_item* pa_prioq_put(pa_prioq *q, void *p) {
    pa_prioq_item *i;

    pa_assert(q);

    if (q->n_items >= q->n_allocated) {
        q->n_allocated = PA_MAX(q->n_items+1, q->n_allocated)*2;
        q->items = pa_xrealloc(q->items, sizeof(pa_prioq_item*) * q->n_allocated);
    }

    if (!(i = pa_flist_pop(PA_STATIC_FLIST_GET(items))))
        i = pa_xnew(pa_prioq_item, 1);

    i->value = p;
    i->idx = q->n_items++;

    shuffle_up(q, i);

    return i;
}
Beispiel #17
0
/* No lock necessary */
pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, int read_only) {
    pa_memblock *b;

    pa_assert(p);
    pa_assert(d);
    pa_assert(length != (size_t) -1);
    pa_assert(length > 0);

    if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
        b = pa_xnew(pa_memblock, 1);
    PA_REFCNT_INIT(b);
    b->pool = p;
    b->type = PA_MEMBLOCK_FIXED;
    b->read_only = read_only;
    pa_atomic_ptr_store(&b->data, d);
    b->length = length;
    pa_atomic_store(&b->n_acquired, 0);
    pa_atomic_store(&b->please_signal, 0);

    stat_add(b);
    return b;
}
Beispiel #18
0
void pa_queue_push(pa_queue *q, void *p) {
    struct queue_entry *e;

    pa_assert(q);
    pa_assert(p);

    if (!(e = pa_flist_pop(PA_STATIC_FLIST_GET(entries))))
        e = pa_xnew(struct queue_entry, 1);

    e->data = p;
    e->next = NULL;

    if (q->back) {
        pa_assert(q->front);
        q->back->next = e;
    } else {
        pa_assert(!q->front);
        q->front = e;
    }

    q->back = e;
    q->length++;
}
Beispiel #19
0
/* No lock necessary */
static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
    struct mempool_slot *slot;
    pa_assert(p);

    if (!(slot = pa_flist_pop(p->free_slots))) {
        int idx;

        /* The free list was empty, we have to allocate a new entry */

        if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
            pa_atomic_dec(&p->n_init);
        else
            slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * idx));

        if (!slot) {
            pa_log_debug("Pool full");
            pa_atomic_inc(&p->stat.n_pool_full);
            return NULL;
        }
    }

    return slot;
}
Beispiel #20
0
void pa_mempool_free(pa_mempool *p) {
    pa_assert(p);

    pa_mutex_lock(p->mutex);

    while (p->imports)
        pa_memimport_free(p->imports);

    while (p->exports)
        pa_memexport_free(p->exports);

    pa_mutex_unlock(p->mutex);

    pa_flist_free(p->free_slots, NULL);

    if (pa_atomic_load(&p->stat.n_allocated) > 0) {

        /* Ouch, somebody is retaining a memory block reference! */

#ifdef DEBUG_REF
        unsigned i;
        pa_flist *list;

        /* Let's try to find at least one of those leaked memory blocks */

        list = pa_flist_new(p->n_blocks);

        for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
            struct mempool_slot *slot;
            pa_memblock *b, *k;

            slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
            b = mempool_slot_data(slot);

            while ((k = pa_flist_pop(p->free_slots))) {
                while (pa_flist_push(list, k) < 0)
                    ;

                if (b == k)
                    break;
            }

            if (!k)
                pa_log("REF: Leaked memory block %p", b);

            while ((k = pa_flist_pop(list)))
                while (pa_flist_push(p->free_slots, k) < 0)
                    ;
        }

        pa_flist_free(list, NULL);

#endif

        pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));

/*         PA_DEBUG_TRAP; */
    }

    pa_shm_free(&p->memory);

    pa_mutex_free(p->mutex);
    pa_semaphore_free(p->semaphore);

    pa_xfree(p);
}
Beispiel #21
0
int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
    struct list_item *q, *n;
    pa_memchunk chunk;
    int64_t old;

    pa_assert(bq);
    pa_assert(uchunk);
    pa_assert(uchunk->memblock);
    pa_assert(uchunk->length > 0);
    pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));

    pa_assert(uchunk->length % bq->base == 0);
    pa_assert(uchunk->index % bq->base == 0);

    if (!can_push(bq, uchunk->length))
        return -1;

    old = bq->write_index;
    chunk = *uchunk;

    fix_current_write(bq);
    q = bq->current_write;

    /* First we advance the q pointer right of where we want to
     * write to */

    if (q) {
        while (bq->write_index + (int64_t) chunk.length > q->index)
            if (q->next)
                q = q->next;
            else
                break;
    }

    if (!q)
        q = bq->blocks_tail;

    /* We go from back to front to look for the right place to add
     * this new entry. Drop data we will overwrite on the way */

    while (q) {

        if (bq->write_index >= q->index + (int64_t) q->chunk.length)
            /* We found the entry where we need to place the new entry immediately after */
            break;
        else if (bq->write_index + (int64_t) chunk.length <= q->index) {
            /* This entry isn't touched at all, let's skip it */
            q = q->prev;
        } else if (bq->write_index <= q->index &&
                   bq->write_index + (int64_t) chunk.length >= q->index + (int64_t) q->chunk.length) {

            /* This entry is fully replaced by the new entry, so let's drop it */

            struct list_item *p;
            p = q;
            q = q->prev;
            drop_block(bq, p);
        } else if (bq->write_index >= q->index) {
            /* The write index points into this memblock, so let's
             * truncate or split it */

            if (bq->write_index + (int64_t) chunk.length < q->index + (int64_t) q->chunk.length) {

                /* We need to save the end of this memchunk */
                struct list_item *p;
                size_t d;

                /* Create a new list entry for the end of the memchunk */
                if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
                    p = pa_xnew(struct list_item, 1);

                p->chunk = q->chunk;
                pa_memblock_ref(p->chunk.memblock);

                /* Calculate offset */
                d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
                pa_assert(d > 0);

                /* Drop it from the new entry */
                p->index = q->index + (int64_t) d;
                p->chunk.length -= d;

                /* Add it to the list */
                p->prev = q;
                if ((p->next = q->next))
                    q->next->prev = p;
                else
                    bq->blocks_tail = p;
                q->next = p;

                bq->n_blocks++;
            }

            /* Truncate the chunk */
            if (!(q->chunk.length = (size_t) (bq->write_index - q->index))) {
                struct list_item *p;
                p = q;
                q = q->prev;
                drop_block(bq, p);
            }

            /* We had to truncate this block, hence we're now at the right position */
            break;
        } else {
            size_t d;

            pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
                      bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
                      bq->write_index < q->index);

            /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */

            d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
            q->index += (int64_t) d;
            q->chunk.index += d;
            q->chunk.length -= d;

            q = q->prev;
        }
    }