Ejemplo n.º 1
0
int pa_asyncmsgq_send(pa_asyncmsgq *a, pa_msgobject *object, int code, const void *userdata, int64_t offset, const pa_memchunk *chunk) {
    struct asyncmsgq_item i;
    pa_assert(PA_REFCNT_VALUE(a) > 0);

    i.code = code;
    i.object = object;
    i.userdata = (void*) userdata;
    i.free_cb = NULL;
    i.ret = -1;
    i.offset = offset;
    if (chunk) {
        pa_assert(chunk->memblock);
        i.memchunk = *chunk;
    } else
        pa_memchunk_reset(&i.memchunk);

    if (!(i.semaphore = pa_flist_pop(PA_STATIC_FLIST_GET(semaphores))))
        i.semaphore = pa_semaphore_new(0);

    /* This mutex makes the queue multiple-writer safe. This lock is only used on the writing side */
    pa_mutex_lock(a->mutex);
    pa_assert_se(pa_asyncq_push(a->asyncq, &i, true) == 0);
    pa_mutex_unlock(a->mutex);

    pa_semaphore_wait(i.semaphore);

    if (pa_flist_push(PA_STATIC_FLIST_GET(semaphores), i.semaphore) < 0)
        pa_semaphore_free(i.semaphore);

    return i.ret;
}
Ejemplo n.º 2
0
void pa_mempool_free(pa_mempool *p) {
    pa_assert(p);

    pa_mutex_lock(p->mutex);

    while (p->imports)
        pa_memimport_free(p->imports);

    while (p->exports)
        pa_memexport_free(p->exports);

    pa_mutex_unlock(p->mutex);

    pa_flist_free(p->free_slots, NULL);

    if (pa_atomic_load(&p->stat.n_allocated) > 0) {
/*         raise(SIGTRAP);  */
        pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
    }

    pa_shm_free(&p->memory);

    pa_mutex_free(p->mutex);
    pa_semaphore_free(p->semaphore);

    pa_xfree(p);
}
Ejemplo n.º 3
0
void pa_aupdate_free(pa_aupdate *a) {
    pa_assert(a);

    pa_mutex_free(a->write_lock);
    pa_semaphore_free(a->semaphore);

    pa_xfree(a);
}
Ejemplo n.º 4
0
void pa_mempool_free(pa_mempool *p) {
    pa_assert(p);

    pa_mutex_lock(p->mutex);

    while (p->imports)
        pa_memimport_free(p->imports);

    while (p->exports)
        pa_memexport_free(p->exports);

    pa_mutex_unlock(p->mutex);

    pa_flist_free(p->free_slots, NULL);

    if (pa_atomic_load(&p->stat.n_allocated) > 0) {

        /* Ouch, somebody is retaining a memory block reference! */

#ifdef DEBUG_REF
        unsigned i;
        pa_flist *list;

        /* Let's try to find at least one of those leaked memory blocks */

        list = pa_flist_new(p->n_blocks);

        for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
            struct mempool_slot *slot;
            pa_memblock *b, *k;

            slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
            b = mempool_slot_data(slot);

            while ((k = pa_flist_pop(p->free_slots))) {
                while (pa_flist_push(list, k) < 0)
                    ;

                if (b == k)
                    break;
            }

            if (!k)
                pa_log("REF: Leaked memory block %p", b);

            while ((k = pa_flist_pop(list)))
                while (pa_flist_push(p->free_slots, k) < 0)
                    ;
        }

        pa_flist_free(list, NULL);

#endif

        pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));

/*         PA_DEBUG_TRAP; */
    }

    pa_shm_free(&p->memory);

    pa_mutex_free(p->mutex);
    pa_semaphore_free(p->semaphore);

    pa_xfree(p);
}