Example #1
0
/* Self-locked. This function is not multiple-caller safe */
static void memblock_replace_import(pa_memblock *b) {
    pa_memimport_segment *segment;
    pa_memimport *import;

    pa_assert(b);
    pa_assert(b->type == PA_MEMBLOCK_IMPORTED);

    pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
    pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
    pa_atomic_dec(&b->pool->stat.n_imported);
    pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);

    pa_assert_se(segment = b->per_type.imported.segment);
    pa_assert_se(import = segment->import);

    pa_mutex_lock(import->mutex);

    pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));

    memblock_make_local(b);

    pa_assert(segment->n_blocks >= 1);
    if (-- segment->n_blocks <= 0)
        segment_detach(segment);

    pa_mutex_unlock(import->mutex);
}
Example #2
0
/* No lock necessary */
static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
    struct mempool_slot *slot;
    pa_assert(p);

    if (!(slot = pa_flist_pop(p->free_slots))) {
        int idx;

        /* The free list was empty, we have to allocate a new entry */

        if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
            pa_atomic_dec(&p->n_init);
        else
            slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));

        if (!slot) {
            if (pa_log_ratelimit(PA_LOG_DEBUG))
                pa_log_debug("Pool full");
            pa_atomic_inc(&p->stat.n_pool_full);
            return NULL;
        }
    }

/* #ifdef HAVE_VALGRIND_MEMCHECK_H */
/*     if (PA_UNLIKELY(pa_in_valgrind())) { */
/*         VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
/*     } */
/* #endif */

    return slot;
}
Example #3
0
/* No lock necessary. This function is not multiple caller safe! */
static void memblock_make_local(pa_memblock *b) {
    pa_assert(b);

    pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);

    if (b->length <= b->pool->block_size) {
        struct mempool_slot *slot;

        if ((slot = mempool_allocate_slot(b->pool))) {
            void *new_data;
            /* We can move it into a local pool, perfect! */

            new_data = mempool_slot_data(slot);
            memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
            pa_atomic_ptr_store(&b->data, new_data);

            b->type = PA_MEMBLOCK_POOL_EXTERNAL;
            b->read_only = false;

            goto finish;
        }
    }

    /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
    b->per_type.user.free_cb = pa_xfree;
    pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));

    b->type = PA_MEMBLOCK_USER;
    b->read_only = false;

finish:
    pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
    pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
    memblock_wait(b);
}
Example #4
0
/* No lock necessary */
static void stat_remove(pa_memblock *b) {
    pa_assert(b);
    pa_assert(b->pool);

    pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
    pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);

    pa_atomic_dec(&b->pool->stat.n_allocated);
    pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);

    if (b->type == PA_MEMBLOCK_IMPORTED) {
        pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
        pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);

        pa_atomic_dec(&b->pool->stat.n_imported);
        pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
    }

    pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
}
Example #5
0
/* No lock necessary, in corner cases locks by its own */
void pa_memblock_release(pa_memblock *b) {
    int r;
    pa_assert(b);
    pa_assert(PA_REFCNT_VALUE(b) > 0);

    r = pa_atomic_dec(&b->n_acquired);
    pa_assert(r >= 1);

    /* Signal a waiting thread that this memblock is no longer used */
    if (r == 1 && pa_atomic_load(&b->please_signal))
        pa_semaphore_post(b->pool->semaphore);
}
Example #6
0
int pa_fdsem_after_poll(pa_fdsem *f) {
    pa_assert(f);

    pa_assert_se(pa_atomic_dec(&f->data->waiting) >= 1);

    flush(f);

    if (pa_atomic_cmpxchg(&f->data->signalled, 1, 0))
        return 1;

    return 0;
}
Example #7
0
void* pa_flist_pop(pa_flist*l) {
    unsigned idx, n;
    pa_atomic_ptr_t *cells;
#ifdef PROFILE
    unsigned len;
#endif

    pa_assert(l);

    cells = PA_FLIST_CELLS(l);

    n = (unsigned) pa_atomic_load(&l->length) + N_EXTRA_SCAN;

#ifdef PROFILE
    len = n;
#endif

    _Y;
    idx = reduce(l, (unsigned) pa_atomic_load(&l->read_idx));

    for (; n > 0 ; n--) {
        void *p;

        _Y;
        p = pa_atomic_ptr_load(&cells[idx]);

        if (p) {

            _Y;
            if (!pa_atomic_ptr_cmpxchg(&cells[idx], p, NULL))
                continue;

            _Y;
            pa_atomic_inc(&l->read_idx);

            _Y;
            pa_atomic_dec(&l->length);

            return p;
        }

        _Y;
        idx = reduce(l, idx+1);
    }

#ifdef PROFILE
    if (len > N_EXTRA_SCAN)
        pa_log_warn("Didn't find used cell after %u iterations.", len);
#endif

    return NULL;
}
Example #8
0
void pa_aupdate_read_end(pa_aupdate *a) {
    unsigned PA_UNUSED n;

    pa_assert(a);

    /* Decrease the lock counter */
    n = (unsigned) pa_atomic_dec(&a->read_lock);

    /* Make sure the counter was valid */
    pa_assert(COUNTER(n) > 0);

    /* Post the semaphore */
    pa_semaphore_post(a->semaphore);
}
Example #9
0
/* Self locked */
static void memblock_wait(pa_memblock *b) {
    pa_assert(b);

    if (pa_atomic_load(&b->n_acquired) > 0) {
        /* We need to wait until all threads gave up access to the
         * memory block before we can go on. Unfortunately this means
         * that we have to lock and wait here. Sniff! */

        pa_atomic_inc(&b->please_signal);

        while (pa_atomic_load(&b->n_acquired) > 0)
            pa_semaphore_wait(b->pool->semaphore);

        pa_atomic_dec(&b->please_signal);
    }
}
Example #10
0
int pa_fdsem_before_poll(pa_fdsem *f) {
    pa_assert(f);

    flush(f);

    if (pa_atomic_cmpxchg(&f->data->signalled, 1, 0))
        return -1;

    pa_atomic_inc(&f->data->waiting);

    if (pa_atomic_cmpxchg(&f->data->signalled, 1, 0)) {
        pa_assert_se(pa_atomic_dec(&f->data->waiting) >= 1);
        return -1;
    }
    return 0;
}
Example #11
0
void pa_fdsem_wait(pa_fdsem *f) {
    pa_assert(f);

    flush(f);

    if (pa_atomic_cmpxchg(&f->data->signalled, 1, 0))
        return;

    pa_atomic_inc(&f->data->waiting);

    while (!pa_atomic_cmpxchg(&f->data->signalled, 1, 0)) {
        char x[10];
        ssize_t r;

#ifdef HAVE_SYS_EVENTFD_H
        if (f->efd >= 0) {
            uint64_t u;

            if ((r = pa_read(f->efd, &u, sizeof(u), NULL)) != sizeof(u)) {

                if (r >= 0 || errno != EINTR) {
                    pa_log_error("Invalid read from eventfd: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
                    pa_assert_not_reached();
                }

                continue;
            }

            r = (ssize_t) u;
        } else
#endif

            if ((r = pa_read(f->fds[0], &x, sizeof(x), NULL)) <= 0) {

                if (r >= 0 || errno != EINTR) {
                    pa_log_error("Invalid read from pipe: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
                    pa_assert_not_reached();
                }

                continue;
            }

        pa_atomic_sub(&f->data->in_pipe, (int) r);
    }

    pa_assert_se(pa_atomic_dec(&f->data->waiting) >= 1);
}
Example #12
0
/* Self-locked */
int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
    pa_memblock *b;

    pa_assert(e);

    pa_mutex_lock(e->mutex);

    if (id >= e->n_init)
        goto fail;

    if (!e->slots[id].block)
        goto fail;

    b = e->slots[id].block;
    e->slots[id].block = NULL;

    PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
    PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);

    pa_mutex_unlock(e->mutex);

/*     pa_log("Processing release for %u", id); */

    pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
    pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);

    pa_atomic_dec(&e->pool->stat.n_exported);
    pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);

    pa_memblock_unref(b);

    return 0;

fail:
    pa_mutex_unlock(e->mutex);

    return -1;
}
Example #13
0
/* No lock necessary */
static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
    struct mempool_slot *slot;
    pa_assert(p);

    if (!(slot = pa_flist_pop(p->free_slots))) {
        int idx;

        /* The free list was empty, we have to allocate a new entry */

        if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
            pa_atomic_dec(&p->n_init);
        else
            slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * idx));

        if (!slot) {
            pa_log_debug("Pool full");
            pa_atomic_inc(&p->stat.n_pool_full);
            return NULL;
        }
    }

    return slot;
}