Beispiel #1
0
/* Self-locked. This function is not multiple-caller safe */
static void memblock_replace_import(pa_memblock *b) {
    pa_memimport_segment *segment;
    pa_memimport *import;

    pa_assert(b);
    pa_assert(b->type == PA_MEMBLOCK_IMPORTED);

    pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
    pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
    pa_atomic_dec(&b->pool->stat.n_imported);
    pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);

    pa_assert_se(segment = b->per_type.imported.segment);
    pa_assert_se(import = segment->import);

    pa_mutex_lock(import->mutex);

    pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));

    memblock_make_local(b);

    pa_assert(segment->n_blocks >= 1);
    if (-- segment->n_blocks <= 0)
        segment_detach(segment);

    pa_mutex_unlock(import->mutex);
}
Beispiel #2
0
/* Returns true only if the buffer was completely full before the drop. */
static bool pa_ringbuffer_drop(pa_ringbuffer *r, int count) {
    bool b = pa_atomic_sub(r->count, count) >= r->capacity;

    r->readindex += count;
    r->readindex %= r->capacity;

    return b;
}
static int do_read(connection *c) {
    pa_memchunk chunk;
    ssize_t r;
    size_t l;
    void *p;
    size_t space = 0;

    connection_assert_ref(c);

    if (!c->sink_input || (l = (size_t) pa_atomic_load(&c->playback.missing)) <= 0)
        return 0;

    if (c->playback.current_memblock) {

        space = pa_memblock_get_length(c->playback.current_memblock) - c->playback.memblock_index;

        if (space <= 0) {
            pa_memblock_unref(c->playback.current_memblock);
            c->playback.current_memblock = NULL;
        }
    }

    if (!c->playback.current_memblock) {
        pa_assert_se(c->playback.current_memblock = pa_memblock_new(c->protocol->core->mempool, (size_t) -1));
        c->playback.memblock_index = 0;

        space = pa_memblock_get_length(c->playback.current_memblock);
    }

    if (l > space)
        l = space;

    p = pa_memblock_acquire(c->playback.current_memblock);
    r = pa_iochannel_read(c->io, (uint8_t*) p + c->playback.memblock_index, l);
    pa_memblock_release(c->playback.current_memblock);

    if (r <= 0) {

        if (r < 0 && (errno == EINTR || errno == EAGAIN))
            return 0;

        pa_log_debug("read(): %s", r == 0 ? "EOF" : pa_cstrerror(errno));
        return -1;
    }

    chunk.memblock = c->playback.current_memblock;
    chunk.index = c->playback.memblock_index;
    chunk.length = (size_t) r;

    c->playback.memblock_index += (size_t) r;

    pa_asyncmsgq_post(c->sink_input->sink->asyncmsgq, PA_MSGOBJECT(c->sink_input), SINK_INPUT_MESSAGE_POST_DATA, NULL, 0, &chunk, NULL);
    pa_atomic_sub(&c->playback.missing, (int) r);

    return 0;
}
Beispiel #4
0
/* No lock necessary */
static void stat_remove(pa_memblock *b) {
    pa_assert(b);
    pa_assert(b->pool);

    pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
    pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);

    pa_atomic_dec(&b->pool->stat.n_allocated);
    pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);

    if (b->type == PA_MEMBLOCK_IMPORTED) {
        pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
        pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);

        pa_atomic_dec(&b->pool->stat.n_imported);
        pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
    }

    pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
}
Beispiel #5
0
void pa_fdsem_wait(pa_fdsem *f) {
    pa_assert(f);

    flush(f);

    if (pa_atomic_cmpxchg(&f->data->signalled, 1, 0))
        return;

    pa_atomic_inc(&f->data->waiting);

    while (!pa_atomic_cmpxchg(&f->data->signalled, 1, 0)) {
        char x[10];
        ssize_t r;

#ifdef HAVE_SYS_EVENTFD_H
        if (f->efd >= 0) {
            uint64_t u;

            if ((r = pa_read(f->efd, &u, sizeof(u), NULL)) != sizeof(u)) {

                if (r >= 0 || errno != EINTR) {
                    pa_log_error("Invalid read from eventfd: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
                    pa_assert_not_reached();
                }

                continue;
            }

            r = (ssize_t) u;
        } else
#endif

            if ((r = pa_read(f->fds[0], &x, sizeof(x), NULL)) <= 0) {

                if (r >= 0 || errno != EINTR) {
                    pa_log_error("Invalid read from pipe: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
                    pa_assert_not_reached();
                }

                continue;
            }

        pa_atomic_sub(&f->data->in_pipe, (int) r);
    }

    pa_assert_se(pa_atomic_dec(&f->data->waiting) >= 1);
}
Beispiel #6
0
static void flush(pa_fdsem *f) {
    ssize_t r;
    pa_assert(f);

    if (pa_atomic_load(&f->data->in_pipe) <= 0)
        return;

    do {
        char x[10];

#ifdef HAVE_SYS_EVENTFD_H
        if (f->efd >= 0) {
            uint64_t u;

            if ((r = pa_read(f->efd, &u, sizeof(u), NULL)) != sizeof(u)) {

                if (r >= 0 || errno != EINTR) {
                    pa_log_error("Invalid read from eventfd: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
                    pa_assert_not_reached();
                }

                continue;
            }
            r = (ssize_t) u;
        } else
#endif

            if ((r = pa_read(f->fds[0], &x, sizeof(x), NULL)) <= 0) {

                if (r >= 0 || errno != EINTR) {
                    pa_log_error("Invalid read from pipe: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
                    pa_assert_not_reached();
                }

                continue;
            }

    } while (pa_atomic_sub(&f->data->in_pipe, (int) r) > (int) r);
}
Beispiel #7
0
/* Self-locked */
int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
    pa_memblock *b;

    pa_assert(e);

    pa_mutex_lock(e->mutex);

    if (id >= e->n_init)
        goto fail;

    if (!e->slots[id].block)
        goto fail;

    b = e->slots[id].block;
    e->slots[id].block = NULL;

    PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
    PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);

    pa_mutex_unlock(e->mutex);

/*     pa_log("Processing release for %u", id); */

    pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
    pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);

    pa_atomic_dec(&e->pool->stat.n_exported);
    pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);

    pa_memblock_unref(b);

    return 0;

fail:
    pa_mutex_unlock(e->mutex);

    return -1;
}