Beispiel #1
0
/* No lock necessary */
static void stat_add(pa_memblock*b) {
    pa_assert(b);
    pa_assert(b->pool);

    pa_atomic_inc(&b->pool->stat.n_allocated);
    pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);

    pa_atomic_inc(&b->pool->stat.n_accumulated);
    pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);

    if (b->type == PA_MEMBLOCK_IMPORTED) {
        pa_atomic_inc(&b->pool->stat.n_imported);
        pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
    }

    pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
    pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
}
Beispiel #2
0
/* No lock necessary */
pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
    pa_memblock *b = NULL;
    struct mempool_slot *slot;
    static int mempool_disable = 0;

    pa_assert(p);
    pa_assert(length);

    if (mempool_disable == 0)
        mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;

    if (mempool_disable > 0)
        return NULL;

    /* If -1 is passed as length we choose the size for the caller: we
     * take the largest size that fits in one of our slots. */

    if (length == (size_t) -1)
        length = pa_mempool_block_size_max(p);

    if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {

        if (!(slot = mempool_allocate_slot(p)))
            return NULL;

        b = mempool_slot_data(slot);
        b->type = PA_MEMBLOCK_POOL;
        pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));

    } else if (p->block_size >= length) {

        if (!(slot = mempool_allocate_slot(p)))
            return NULL;

        if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
            b = pa_xnew(pa_memblock, 1);

        b->type = PA_MEMBLOCK_POOL_EXTERNAL;
        pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));

    } else {
        pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
        pa_atomic_inc(&p->stat.n_too_large_for_pool);
        return NULL;
    }

    PA_REFCNT_INIT(b);
    b->pool = p;
    b->read_only = b->is_silence = false;
    b->length = length;
    pa_atomic_store(&b->n_acquired, 0);
    pa_atomic_store(&b->please_signal, 0);

    stat_add(b);
    return b;
}
Beispiel #3
0
void* pa_flist_pop(pa_flist*l) {
    unsigned idx, n;
    pa_atomic_ptr_t *cells;
#ifdef PROFILE
    unsigned len;
#endif

    pa_assert(l);

    cells = PA_FLIST_CELLS(l);

    n = (unsigned) pa_atomic_load(&l->length) + N_EXTRA_SCAN;

#ifdef PROFILE
    len = n;
#endif

    _Y;
    idx = reduce(l, (unsigned) pa_atomic_load(&l->read_idx));

    for (; n > 0 ; n--) {
        void *p;

        _Y;
        p = pa_atomic_ptr_load(&cells[idx]);

        if (p) {

            _Y;
            if (!pa_atomic_ptr_cmpxchg(&cells[idx], p, NULL))
                continue;

            _Y;
            pa_atomic_inc(&l->read_idx);

            _Y;
            pa_atomic_dec(&l->length);

            return p;
        }

        _Y;
        idx = reduce(l, idx+1);
    }

#ifdef PROFILE
    if (len > N_EXTRA_SCAN)
        pa_log_warn("Didn't find used cell after %u iterations.", len);
#endif

    return NULL;
}
Beispiel #4
0
/* Lock free push to linked list stack */
static void stack_push(pa_flist *flist, pa_atomic_t *list, pa_flist_elem *new_elem) {
    int tag, newindex, next;
    pa_assert(list);

    tag = pa_atomic_inc(&flist->current_tag);
    newindex = new_elem - flist->table;
    pa_assert(newindex >= 0 && newindex < (int) flist->size);
    newindex |= (tag << flist->tag_shift) & flist->tag_mask;

    do {
        next = pa_atomic_load(list);
        pa_atomic_store(&new_elem->next, next);
    } while (!pa_atomic_cmpxchg(list, next, newindex));
}
Beispiel #5
0
/* No lock necessary */
static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
    struct mempool_slot *slot;
    pa_assert(p);

    if (!(slot = pa_flist_pop(p->free_slots))) {
        int idx;

        /* The free list was empty, we have to allocate a new entry */

        if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
            pa_atomic_dec(&p->n_init);
        else
            slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * idx));

        if (!slot) {
            pa_log_debug("Pool full");
            pa_atomic_inc(&p->stat.n_pool_full);
            return NULL;
        }
    }

    return slot;
}
Beispiel #6
0
unsigned pa_aupdate_read_begin(pa_aupdate *a) {
    unsigned n;

    pa_assert(a);

    /* Increase the lock counter */
    n = (unsigned) pa_atomic_inc(&a->read_lock);

    /* When n is 0 we have about 2^31 threads running that all try to
     * access the data at the same time, oh my! */
    pa_assert(COUNTER(n)+1 > 0);

    /* The uppermost bit tells us which data to look at */
    return WHICH(n);
}
Beispiel #7
0
/* Self locked */
static void memblock_wait(pa_memblock *b) {
    pa_assert(b);

    if (pa_atomic_load(&b->n_acquired) > 0) {
        /* We need to wait until all threads gave up access to the
         * memory block before we can go on. Unfortunately this means
         * that we have to lock and wait here. Sniff! */

        pa_atomic_inc(&b->please_signal);

        while (pa_atomic_load(&b->n_acquired) > 0)
            pa_semaphore_wait(b->pool->semaphore);

        pa_atomic_dec(&b->please_signal);
    }
}
Beispiel #8
0
int pa_fdsem_before_poll(pa_fdsem *f) {
    pa_assert(f);

    flush(f);

    if (pa_atomic_cmpxchg(&f->data->signalled, 1, 0))
        return -1;

    pa_atomic_inc(&f->data->waiting);

    if (pa_atomic_cmpxchg(&f->data->signalled, 1, 0)) {
        pa_assert_se(pa_atomic_dec(&f->data->waiting) >= 1);
        return -1;
    }
    return 0;
}
Beispiel #9
0
void pa_fdsem_wait(pa_fdsem *f) {
    pa_assert(f);

    flush(f);

    if (pa_atomic_cmpxchg(&f->data->signalled, 1, 0))
        return;

    pa_atomic_inc(&f->data->waiting);

    while (!pa_atomic_cmpxchg(&f->data->signalled, 1, 0)) {
        char x[10];
        ssize_t r;

#ifdef HAVE_SYS_EVENTFD_H
        if (f->efd >= 0) {
            uint64_t u;

            if ((r = pa_read(f->efd, &u, sizeof(u), NULL)) != sizeof(u)) {

                if (r >= 0 || errno != EINTR) {
                    pa_log_error("Invalid read from eventfd: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
                    pa_assert_not_reached();
                }

                continue;
            }

            r = (ssize_t) u;
        } else
#endif

            if ((r = pa_read(f->fds[0], &x, sizeof(x), NULL)) <= 0) {

                if (r >= 0 || errno != EINTR) {
                    pa_log_error("Invalid read from pipe: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
                    pa_assert_not_reached();
                }

                continue;
            }

        pa_atomic_sub(&f->data->in_pipe, (int) r);
    }

    pa_assert_se(pa_atomic_dec(&f->data->waiting) >= 1);
}
Beispiel #10
0
void pa_fdsem_post(pa_fdsem *f) {
    pa_assert(f);

    if (pa_atomic_cmpxchg(&f->data->signalled, 0, 1)) {

        if (pa_atomic_load(&f->data->waiting)) {
            ssize_t r;
            char x = 'x';

            pa_atomic_inc(&f->data->in_pipe);

            for (;;) {

#ifdef HAVE_SYS_EVENTFD_H
                if (f->efd >= 0) {
                    uint64_t u = 1;

                    if ((r = pa_write(f->efd, &u, sizeof(u), NULL)) != sizeof(u)) {
                        if (r >= 0 || errno != EINTR) {
                            pa_log_error("Invalid write to eventfd: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
                            pa_assert_not_reached();
                        }

                        continue;
                    }
                } else
#endif

                    if ((r = pa_write(f->fds[1], &x, 1, NULL)) != 1) {
                        if (r >= 0 || errno != EINTR) {
                            pa_log_error("Invalid write to pipe: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
                            pa_assert_not_reached();
                        }

                        continue;
                    }

                break;
            }
        }
    }
}