int pa_asyncmsgq_send(pa_asyncmsgq *a, pa_msgobject *object, int code, const void *userdata, int64_t offset, const pa_memchunk *chunk) { struct asyncmsgq_item i; pa_assert(PA_REFCNT_VALUE(a) > 0); i.code = code; i.object = object; i.userdata = (void*) userdata; i.free_cb = NULL; i.ret = -1; i.offset = offset; if (chunk) { pa_assert(chunk->memblock); i.memchunk = *chunk; } else pa_memchunk_reset(&i.memchunk); if (!(i.semaphore = pa_flist_pop(PA_STATIC_FLIST_GET(semaphores)))) i.semaphore = pa_semaphore_new(0); /* This mutex makes the queue multiple-writer safe. This lock is only used on the writing side */ pa_mutex_lock(a->mutex); pa_assert_se(pa_asyncq_push(a->asyncq, &i, true) == 0); pa_mutex_unlock(a->mutex); pa_semaphore_wait(i.semaphore); if (pa_flist_push(PA_STATIC_FLIST_GET(semaphores), i.semaphore) < 0) pa_semaphore_free(i.semaphore); return i.ret; }
pa_mempool* pa_mempool_new(int shared) { pa_mempool *p; p = pa_xnew(pa_mempool, 1); p->mutex = pa_mutex_new(TRUE, TRUE); p->semaphore = pa_semaphore_new(0); p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE); if (p->block_size < PA_PAGE_SIZE) p->block_size = PA_PAGE_SIZE; p->n_blocks = PA_MEMPOOL_SLOTS_MAX; pa_assert(p->block_size > PA_ALIGN(sizeof(struct mempool_slot))); if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) { pa_xfree(p); return NULL; } memset(&p->stat, 0, sizeof(p->stat)); pa_atomic_store(&p->n_init, 0); PA_LLIST_HEAD_INIT(pa_memimport, p->imports); PA_LLIST_HEAD_INIT(pa_memexport, p->exports); p->free_slots = pa_flist_new(p->n_blocks*2); return p; }
pa_aupdate *pa_aupdate_new(void) { pa_aupdate *a; a = pa_xnew(pa_aupdate, 1); pa_atomic_store(&a->read_lock, 0); a->write_lock = pa_mutex_new(false, false); a->semaphore = pa_semaphore_new(0); return a; }
pa_mempool* pa_mempool_new(bool shared, size_t size) { pa_mempool *p; char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX]; p = pa_xnew(pa_mempool, 1); p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE); if (p->block_size < PA_PAGE_SIZE) p->block_size = PA_PAGE_SIZE; if (size <= 0) p->n_blocks = PA_MEMPOOL_SLOTS_MAX; else { p->n_blocks = (unsigned) (size / p->block_size); if (p->n_blocks < 2) p->n_blocks = 2; } if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) { pa_xfree(p); return NULL; } pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu", p->memory.shared ? "shared" : "private", p->n_blocks, pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size), pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)), (unsigned long) pa_mempool_block_size_max(p)); memset(&p->stat, 0, sizeof(p->stat)); pa_atomic_store(&p->n_init, 0); PA_LLIST_HEAD_INIT(pa_memimport, p->imports); PA_LLIST_HEAD_INIT(pa_memexport, p->exports); p->mutex = pa_mutex_new(true, true); p->semaphore = pa_semaphore_new(0); p->free_slots = pa_flist_new(p->n_blocks); return p; }