示例#1
0
/* Self-locked. This function is not multiple-caller safe */
static void memblock_replace_import(pa_memblock *b) {
    pa_memimport_segment *segment;
    pa_memimport *import;

    pa_assert(b);
    pa_assert(b->type == PA_MEMBLOCK_IMPORTED);

    pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
    pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
    pa_atomic_dec(&b->pool->stat.n_imported);
    pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);

    pa_assert_se(segment = b->per_type.imported.segment);
    pa_assert_se(import = segment->import);

    pa_mutex_lock(import->mutex);

    pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));

    memblock_make_local(b);

    pa_assert(segment->n_blocks >= 1);
    if (-- segment->n_blocks <= 0)
        segment_detach(segment);

    pa_mutex_unlock(import->mutex);
}
示例#2
0
static void print_stats(pa_mempool *p, const char *text) {
    const pa_mempool_stat*s = pa_mempool_get_stat(p);

    printf("%s = {\n"
           "n_allocated = %u\n"
           "n_accumulated = %u\n"
           "n_imported = %u\n"
           "n_exported = %u\n"
           "allocated_size = %u\n"
           "accumulated_size = %u\n"
           "imported_size = %u\n"
           "exported_size = %u\n"
           "n_too_large_for_pool = %u\n"
           "n_pool_full = %u\n"
           "}\n",
           text,
           (unsigned) pa_atomic_load(&s->n_allocated),
           (unsigned) pa_atomic_load(&s->n_accumulated),
           (unsigned) pa_atomic_load(&s->n_imported),
           (unsigned) pa_atomic_load(&s->n_exported),
           (unsigned) pa_atomic_load(&s->allocated_size),
           (unsigned) pa_atomic_load(&s->accumulated_size),
           (unsigned) pa_atomic_load(&s->imported_size),
           (unsigned) pa_atomic_load(&s->exported_size),
           (unsigned) pa_atomic_load(&s->n_too_large_for_pool),
           (unsigned) pa_atomic_load(&s->n_pool_full));
}
示例#3
0
void* pa_flist_pop(pa_flist*l) {
    unsigned idx, n;
    pa_atomic_ptr_t *cells;
#ifdef PROFILE
    unsigned len;
#endif

    pa_assert(l);

    cells = PA_FLIST_CELLS(l);

    n = (unsigned) pa_atomic_load(&l->length) + N_EXTRA_SCAN;

#ifdef PROFILE
    len = n;
#endif

    _Y;
    idx = reduce(l, (unsigned) pa_atomic_load(&l->read_idx));

    for (; n > 0 ; n--) {
        void *p;

        _Y;
        p = pa_atomic_ptr_load(&cells[idx]);

        if (p) {

            _Y;
            if (!pa_atomic_ptr_cmpxchg(&cells[idx], p, NULL))
                continue;

            _Y;
            pa_atomic_inc(&l->read_idx);

            _Y;
            pa_atomic_dec(&l->length);

            return p;
        }

        _Y;
        idx = reduce(l, idx+1);
    }

#ifdef PROFILE
    if (len > N_EXTRA_SCAN)
        pa_log_warn("Didn't find used cell after %u iterations.", len);
#endif

    return NULL;
}
示例#4
0
/* Lock free pop from linked list stack */
static pa_flist_elem *stack_pop(pa_flist *flist, pa_atomic_t *list) {
    pa_flist_elem *popped;
    int idx;
    pa_assert(list);

    do {
        idx = pa_atomic_load(list);
        if (idx < 0)
            return NULL;
        popped = &flist->table[idx & flist->index_mask];
    } while (!pa_atomic_cmpxchg(list, idx, pa_atomic_load(&popped->next)));

    return popped;
}
示例#5
0
/* Self locked */
static void memblock_wait(pa_memblock *b) {
    pa_assert(b);

    if (pa_atomic_load(&b->n_acquired) > 0) {
        /* We need to wait until all threads gave up access to the
         * memory block before we can go on. Unfortunately this means
         * that we have to lock and wait here. Sniff! */

        pa_atomic_inc(&b->please_signal);

        while (pa_atomic_load(&b->n_acquired) > 0)
            pa_semaphore_wait(b->pool->semaphore);

        pa_atomic_dec(&b->please_signal);
    }
}
示例#6
0
文件: memblock.c 项目: thewb/mokoiax
void pa_mempool_free(pa_mempool *p) {
    pa_assert(p);

    pa_mutex_lock(p->mutex);

    while (p->imports)
        pa_memimport_free(p->imports);

    while (p->exports)
        pa_memexport_free(p->exports);

    pa_mutex_unlock(p->mutex);

    pa_flist_free(p->free_slots, NULL);

    if (pa_atomic_load(&p->stat.n_allocated) > 0) {
/*         raise(SIGTRAP);  */
        pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
    }

    pa_shm_free(&p->memory);

    pa_mutex_free(p->mutex);
    pa_semaphore_free(p->semaphore);

    pa_xfree(p);
}
示例#7
0
static void *pa_ringbuffer_begin_write(pa_ringbuffer *r, int *count) {
    int c = pa_atomic_load(r->count);

    *count = PA_MIN(r->capacity - r->writeindex, r->capacity - c);

    return r->memory + r->writeindex;
}
示例#8
0
int pa_flist_push(pa_flist*l, void *p) {
    unsigned idx, n;
    pa_atomic_ptr_t*cells;
#ifdef PROFILE
    unsigned len;
#endif

    pa_assert(l);
    pa_assert(p);

    cells = PA_FLIST_CELLS(l);

    n = l->size + N_EXTRA_SCAN - (unsigned) pa_atomic_load(&l->length);

#ifdef PROFILE
    len = n;
#endif

    _Y;
    idx = reduce(l, (unsigned) pa_atomic_load(&l->write_idx));

    for (; n > 0 ; n--) {

        _Y;

        if (pa_atomic_ptr_cmpxchg(&cells[idx], NULL, p)) {

            _Y;
            pa_atomic_inc(&l->write_idx);

            _Y;
            pa_atomic_inc(&l->length);

            return 0;
        }

        _Y;
        idx = reduce(l, idx + 1);
    }

#ifdef PROFILE
    if (len > N_EXTRA_SCAN)
        pa_log_warn("Didn't  find free cell after %u iterations.", len);
#endif

    return -1;
}
static int do_read(connection *c) {
    pa_memchunk chunk;
    ssize_t r;
    size_t l;
    void *p;
    size_t space = 0;

    connection_assert_ref(c);

    if (!c->sink_input || (l = (size_t) pa_atomic_load(&c->playback.missing)) <= 0)
        return 0;

    if (c->playback.current_memblock) {

        space = pa_memblock_get_length(c->playback.current_memblock) - c->playback.memblock_index;

        if (space <= 0) {
            pa_memblock_unref(c->playback.current_memblock);
            c->playback.current_memblock = NULL;
        }
    }

    if (!c->playback.current_memblock) {
        pa_assert_se(c->playback.current_memblock = pa_memblock_new(c->protocol->core->mempool, (size_t) -1));
        c->playback.memblock_index = 0;

        space = pa_memblock_get_length(c->playback.current_memblock);
    }

    if (l > space)
        l = space;

    p = pa_memblock_acquire(c->playback.current_memblock);
    r = pa_iochannel_read(c->io, (uint8_t*) p + c->playback.memblock_index, l);
    pa_memblock_release(c->playback.current_memblock);

    if (r <= 0) {

        if (r < 0 && (errno == EINTR || errno == EAGAIN))
            return 0;

        pa_log_debug("read(): %s", r == 0 ? "EOF" : pa_cstrerror(errno));
        return -1;
    }

    chunk.memblock = c->playback.current_memblock;
    chunk.index = c->playback.memblock_index;
    chunk.length = (size_t) r;

    c->playback.memblock_index += (size_t) r;

    pa_asyncmsgq_post(c->sink_input->sink->asyncmsgq, PA_MSGOBJECT(c->sink_input), SINK_INPUT_MESSAGE_POST_DATA, NULL, 0, &chunk, NULL);
    pa_atomic_sub(&c->playback.missing, (int) r);

    return 0;
}
示例#10
0
static void *pa_ringbuffer_peek(pa_ringbuffer *r, int *count) {
    int c = pa_atomic_load(r->count);

    if (r->readindex + c > r->capacity)
        *count = r->capacity - r->readindex;
    else
        *count = c;

    return r->memory + r->readindex;
}
示例#11
0
/* No lock necessary */
static void stat_remove(pa_memblock *b) {
    pa_assert(b);
    pa_assert(b->pool);

    pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
    pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);

    pa_atomic_dec(&b->pool->stat.n_allocated);
    pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);

    if (b->type == PA_MEMBLOCK_IMPORTED) {
        pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
        pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);

        pa_atomic_dec(&b->pool->stat.n_imported);
        pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
    }

    pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
}
示例#12
0
/* No lock necessary, in corner cases locks by its own */
void pa_memblock_release(pa_memblock *b) {
    int r;
    pa_assert(b);
    pa_assert(PA_REFCNT_VALUE(b) > 0);

    r = pa_atomic_dec(&b->n_acquired);
    pa_assert(r >= 1);

    /* Signal a waiting thread that this memblock is no longer used */
    if (r == 1 && pa_atomic_load(&b->please_signal))
        pa_semaphore_post(b->pool->semaphore);
}
示例#13
0
unsigned pa_aupdate_write_begin(pa_aupdate *a) {
    unsigned n;

    pa_assert(a);

    pa_mutex_lock(a->write_lock);

    n = (unsigned) pa_atomic_load(&a->read_lock);

    a->swapped = false;

    return !WHICH(n);
}
示例#14
0
/* Self-locked */
int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
    pa_memblock *b;

    pa_assert(e);

    pa_mutex_lock(e->mutex);

    if (id >= e->n_init)
        goto fail;

    if (!e->slots[id].block)
        goto fail;

    b = e->slots[id].block;
    e->slots[id].block = NULL;

    PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
    PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);

    pa_mutex_unlock(e->mutex);

/*     pa_log("Processing release for %u", id); */

    pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
    pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);

    pa_atomic_dec(&e->pool->stat.n_exported);
    pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);

    pa_memblock_unref(b);

    return 0;

fail:
    pa_mutex_unlock(e->mutex);

    return -1;
}
示例#15
0
/* Lock free push to linked list stack */
static void stack_push(pa_flist *flist, pa_atomic_t *list, pa_flist_elem *new_elem) {
    int tag, newindex, next;
    pa_assert(list);

    tag = pa_atomic_inc(&flist->current_tag);
    newindex = new_elem - flist->table;
    pa_assert(newindex >= 0 && newindex < (int) flist->size);
    newindex |= (tag << flist->tag_shift) & flist->tag_mask;

    do {
        next = pa_atomic_load(list);
        pa_atomic_store(&new_elem->next, next);
    } while (!pa_atomic_cmpxchg(list, next, newindex));
}
示例#16
0
/* Generic source state change logic. Used by raw_source and voice_source */
int voice_source_set_state(pa_source *s, pa_source *other, pa_source_state_t state) {
    struct userdata *u;

    pa_source_assert_ref(s);
    pa_assert_se(u = s->userdata);
    if (!other) {
        pa_log_debug("other source not initialized or freed");
        return 0;
    }
    pa_source_assert_ref(other);

    if (u->hw_source_output) {
        if (pa_source_output_get_state(u->hw_source_output) == PA_SOURCE_OUTPUT_RUNNING) {
            if (state == PA_SOURCE_SUSPENDED &&
                pa_source_get_state(other) == PA_SOURCE_SUSPENDED &&
                pa_atomic_load(&u->cmt_connection.ul_state) != CMT_UL_ACTIVE) {
                pa_source_output_cork(u->hw_source_output, TRUE);
                pa_log_debug("hw_source_output corked");
            }
        }
        else if (pa_source_output_get_state(u->hw_source_output) == PA_SOURCE_OUTPUT_CORKED) {
            if (PA_SOURCE_IS_OPENED(state) ||
                PA_SOURCE_IS_OPENED(pa_source_get_state(other)) ||
                pa_atomic_load(&u->cmt_connection.ul_state) == CMT_UL_ACTIVE) {
                pa_source_output_cork(u->hw_source_output, FALSE);
                pa_log_debug("hw_source_output uncorked");
            }
        }
    }
    if (pa_atomic_load(&u->cmt_connection.ul_state) != CMT_UL_ACTIVE && 
        !PA_SOURCE_IS_OPENED(pa_source_get_state(u->voip_source)))
    {
        voice_aep_ear_ref_loop_reset(u);
    }
    return 0;
}
示例#17
0
unsigned pa_aupdate_write_swap(pa_aupdate *a) {
    unsigned n;

    pa_assert(a);

    for (;;) {
        n = (unsigned) pa_atomic_load(&a->read_lock);

        /* If the read counter is > 0 wait; if it is 0 try to swap the lists */
        if (COUNTER(n) > 0)
            pa_semaphore_wait(a->semaphore);
        else if (pa_atomic_cmpxchg(&a->read_lock, (int) n, (int) (n ^ MSB)))
            break;
    }

    a->swapped = true;

    return WHICH(n);
}
示例#18
0
void pa_fdsem_post(pa_fdsem *f) {
    pa_assert(f);

    if (pa_atomic_cmpxchg(&f->data->signalled, 0, 1)) {

        if (pa_atomic_load(&f->data->waiting)) {
            ssize_t r;
            char x = 'x';

            pa_atomic_inc(&f->data->in_pipe);

            for (;;) {

#ifdef HAVE_SYS_EVENTFD_H
                if (f->efd >= 0) {
                    uint64_t u = 1;

                    if ((r = pa_write(f->efd, &u, sizeof(u), NULL)) != sizeof(u)) {
                        if (r >= 0 || errno != EINTR) {
                            pa_log_error("Invalid write to eventfd: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
                            pa_assert_not_reached();
                        }

                        continue;
                    }
                } else
#endif

                    if ((r = pa_write(f->fds[1], &x, 1, NULL)) != 1) {
                        if (r >= 0 || errno != EINTR) {
                            pa_log_error("Invalid write to pipe: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
                            pa_assert_not_reached();
                        }

                        continue;
                    }

                break;
            }
        }
    }
}
示例#19
0
static void flush(pa_fdsem *f) {
    ssize_t r;
    pa_assert(f);

    if (pa_atomic_load(&f->data->in_pipe) <= 0)
        return;

    do {
        char x[10];

#ifdef HAVE_SYS_EVENTFD_H
        if (f->efd >= 0) {
            uint64_t u;

            if ((r = pa_read(f->efd, &u, sizeof(u), NULL)) != sizeof(u)) {

                if (r >= 0 || errno != EINTR) {
                    pa_log_error("Invalid read from eventfd: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
                    pa_assert_not_reached();
                }

                continue;
            }
            r = (ssize_t) u;
        } else
#endif

            if ((r = pa_read(f->fds[0], &x, sizeof(x), NULL)) <= 0) {

                if (r >= 0 || errno != EINTR) {
                    pa_log_error("Invalid read from pipe: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
                    pa_assert_not_reached();
                }

                continue;
            }

    } while (pa_atomic_sub(&f->data->in_pipe, (int) r) > (int) r);
}
示例#20
0
文件: shm.c 项目: Elemecca/pulseaudio
int pa_shm_cleanup(void) {

#ifdef HAVE_SHM_OPEN
#ifdef SHM_PATH
    DIR *d;
    struct dirent *de;

    if (!(d = opendir(SHM_PATH))) {
        pa_log_warn("Failed to read "SHM_PATH": %s", pa_cstrerror(errno));
        return -1;
    }

    while ((de = readdir(d))) {
        pa_shm seg;
        unsigned id;
        pid_t pid;
        char fn[128];
        struct shm_marker *m;

#if defined(__sun)
        if (strncmp(de->d_name, ".SHMDpulse-shm-", SHM_ID_LEN))
#else
        if (strncmp(de->d_name, "pulse-shm-", SHM_ID_LEN))
#endif
            continue;

        if (pa_atou(de->d_name + SHM_ID_LEN, &id) < 0)
            continue;

        if (pa_shm_attach(&seg, id, false) < 0)
            continue;

        if (seg.size < SHM_MARKER_SIZE) {
            pa_shm_free(&seg);
            continue;
        }

        m = (struct shm_marker*) ((uint8_t*) seg.ptr + seg.size - SHM_MARKER_SIZE);

        if (pa_atomic_load(&m->marker) != SHM_MARKER) {
            pa_shm_free(&seg);
            continue;
        }

        if (!(pid = (pid_t) pa_atomic_load(&m->pid))) {
            pa_shm_free(&seg);
            continue;
        }

        if (kill(pid, 0) == 0 || errno != ESRCH) {
            pa_shm_free(&seg);
            continue;
        }

        pa_shm_free(&seg);

        /* Ok, the owner of this shms segment is dead, so, let's remove the segment */
        segment_name(fn, sizeof(fn), id);

        if (shm_unlink(fn) < 0 && errno != EACCES && errno != ENOENT)
            pa_log_warn("Failed to remove SHM segment %s: %s\n", fn, pa_cstrerror(errno));
    }

    closedir(d);
#endif /* SHM_PATH */
#endif /* HAVE_SHM_OPEN */

    return 0;
}
示例#21
0
void pa_mempool_free(pa_mempool *p) {
    pa_assert(p);

    pa_mutex_lock(p->mutex);

    while (p->imports)
        pa_memimport_free(p->imports);

    while (p->exports)
        pa_memexport_free(p->exports);

    pa_mutex_unlock(p->mutex);

    pa_flist_free(p->free_slots, NULL);

    if (pa_atomic_load(&p->stat.n_allocated) > 0) {

        /* Ouch, somebody is retaining a memory block reference! */

#ifdef DEBUG_REF
        unsigned i;
        pa_flist *list;

        /* Let's try to find at least one of those leaked memory blocks */

        list = pa_flist_new(p->n_blocks);

        for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
            struct mempool_slot *slot;
            pa_memblock *b, *k;

            slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
            b = mempool_slot_data(slot);

            while ((k = pa_flist_pop(p->free_slots))) {
                while (pa_flist_push(list, k) < 0)
                    ;

                if (b == k)
                    break;
            }

            if (!k)
                pa_log("REF: Leaked memory block %p", b);

            while ((k = pa_flist_pop(list)))
                while (pa_flist_push(p->free_slots, k) < 0)
                    ;
        }

        pa_flist_free(list, NULL);

#endif

        pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));

/*         PA_DEBUG_TRAP; */
    }

    pa_shm_free(&p->memory);

    pa_mutex_free(p->mutex);
    pa_semaphore_free(p->semaphore);

    pa_xfree(p);
}
示例#22
0
文件: memblock.c 项目: thewb/mokoiax
static void memblock_free(pa_memblock *b) {
    pa_assert(b);

    pa_assert(pa_atomic_load(&b->n_acquired) == 0);

    stat_remove(b);

    switch (b->type) {
        case PA_MEMBLOCK_USER :
            pa_assert(b->per_type.user.free_cb);
            b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));

            /* Fall through */

        case PA_MEMBLOCK_FIXED:
        case PA_MEMBLOCK_APPENDED :
            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);

            break;

        case PA_MEMBLOCK_IMPORTED : {
            pa_memimport_segment *segment;
            pa_memimport *import;

            /* FIXME! This should be implemented lock-free */

            segment = b->per_type.imported.segment;
            pa_assert(segment);
            import = segment->import;
            pa_assert(import);

            pa_mutex_lock(import->mutex);
            pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
            if (-- segment->n_blocks <= 0)
                segment_detach(segment);

            pa_mutex_unlock(import->mutex);

            import->release_cb(import, b->per_type.imported.id, import->userdata);

            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);
            break;
        }

        case PA_MEMBLOCK_POOL_EXTERNAL:
        case PA_MEMBLOCK_POOL: {
            struct mempool_slot *slot;
            int call_free;

            slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
            pa_assert(slot);

            call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;

            /* The free list dimensions should easily allow all slots
             * to fit in, hence try harder if pushing this slot into
             * the free list fails */
            while (pa_flist_push(b->pool->free_slots, slot) < 0)
                ;

            if (call_free)
                if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                    pa_xfree(b);

            break;
        }

        case PA_MEMBLOCK_TYPE_MAX:
        default:
            pa_assert_not_reached();
    }
}
示例#23
0
static void memblock_free(pa_memblock *b) {
    pa_assert(b);

    pa_assert(pa_atomic_load(&b->n_acquired) == 0);

    stat_remove(b);

    switch (b->type) {
        case PA_MEMBLOCK_USER :
            pa_assert(b->per_type.user.free_cb);
            b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));

            /* Fall through */

        case PA_MEMBLOCK_FIXED:
            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);

            break;

        case PA_MEMBLOCK_APPENDED:

            /* We could attach it to unused_memblocks, but that would
             * probably waste some considerable amount of memory */
            pa_xfree(b);
            break;

        case PA_MEMBLOCK_IMPORTED: {
            pa_memimport_segment *segment;
            pa_memimport *import;

            /* FIXME! This should be implemented lock-free */

            pa_assert_se(segment = b->per_type.imported.segment);
            pa_assert_se(import = segment->import);

            pa_mutex_lock(import->mutex);

            pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));

            pa_assert(segment->n_blocks >= 1);
            if (-- segment->n_blocks <= 0)
                segment_detach(segment);

            pa_mutex_unlock(import->mutex);

            import->release_cb(import, b->per_type.imported.id, import->userdata);

            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);

            break;
        }

        case PA_MEMBLOCK_POOL_EXTERNAL:
        case PA_MEMBLOCK_POOL: {
            struct mempool_slot *slot;
            bool call_free;

            pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));

            call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;

/* #ifdef HAVE_VALGRIND_MEMCHECK_H */
/*             if (PA_UNLIKELY(pa_in_valgrind())) { */
/*                 VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
/*             } */
/* #endif */

            /* The free list dimensions should easily allow all slots
             * to fit in, hence try harder if pushing this slot into
             * the free list fails */
            while (pa_flist_push(b->pool->free_slots, slot) < 0)
                ;

            if (call_free)
                if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                    pa_xfree(b);

            break;
        }

        case PA_MEMBLOCK_TYPE_MAX:
        default:
            pa_assert_not_reached();
    }
}
示例#24
0
/* Generic sink state change logic. Used by raw_sink and voip_sink */
int voice_sink_set_state(pa_sink *s, pa_sink *other, pa_sink_state_t state) {
    struct userdata *u;
    pa_sink *om_sink;

    pa_sink_assert_ref(s);
    pa_assert_se(u = s->userdata);
    if (!other) {
        pa_log_debug("other sink not initialized or freed");
        return 0;
    }
    pa_sink_assert_ref(other);
    om_sink = u->master_sink;

    if (u->hw_sink_input && PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->hw_sink_input))) {
        if (pa_sink_input_get_state(u->hw_sink_input) == PA_SINK_INPUT_CORKED) {
            if (PA_SINK_IS_OPENED(state) ||
                PA_SINK_IS_OPENED(pa_sink_get_state(other)) ||
                pa_atomic_load(&u->cmt_connection.dl_state) == CMT_DL_ACTIVE) {
                pa_sink_input_cork(u->hw_sink_input, FALSE);
                pa_log_debug("hw_sink_input uncorked");
            }
        }
        else {
            if (state == PA_SINK_SUSPENDED &&
                pa_sink_get_state(other) == PA_SINK_SUSPENDED &&
                pa_atomic_load(&u->cmt_connection.dl_state) != CMT_DL_ACTIVE) {
                pa_sink_input_cork(u->hw_sink_input, TRUE);
                pa_log_debug("hw_sink_input corked");
            }
        }
    }

    if (om_sink == NULL) {
        pa_log_info("No master sink, assuming primary mixer tuning.\n");
        pa_atomic_store(&u->mixer_state, PROP_MIXER_TUNING_PRI);
    }
    else if (pa_atomic_load(&u->cmt_connection.dl_state) == CMT_DL_ACTIVE ||
            (pa_sink_get_state(u->voip_sink) <= PA_SINK_SUSPENDED &&
             voice_voip_sink_used_by(u))) {
        if (pa_atomic_load(&u->mixer_state) == PROP_MIXER_TUNING_PRI) {
             pa_proplist *p = pa_proplist_new();
             pa_assert(p);
             pa_proplist_sets(p, PROP_MIXER_TUNING_MODE, PROP_MIXER_TUNING_ALT_S);
             pa_sink_update_proplist(om_sink, PA_UPDATE_REPLACE, p);
             pa_atomic_store(&u->mixer_state, PROP_MIXER_TUNING_ALT);
             pa_proplist_free(p);
             if (u->sidetone_enable)
                 voice_enable_sidetone(u,1);
        }
    }
    else {
        if (pa_atomic_load(&u->mixer_state) == PROP_MIXER_TUNING_ALT) {
            pa_proplist *p = pa_proplist_new();
            pa_assert(p);
            pa_proplist_sets(p, PROP_MIXER_TUNING_MODE, PROP_MIXER_TUNING_PRI_S);
            pa_sink_update_proplist(om_sink, PA_UPDATE_REPLACE, p);
            pa_atomic_store(&u->mixer_state, PROP_MIXER_TUNING_PRI);
            pa_proplist_free(p);
            voice_enable_sidetone(u,0);

        }
    }

    return 0;
}