pa_mempool* pa_mempool_new(int shared) { pa_mempool *p; p = pa_xnew(pa_mempool, 1); p->mutex = pa_mutex_new(TRUE, TRUE); p->semaphore = pa_semaphore_new(0); p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE); if (p->block_size < PA_PAGE_SIZE) p->block_size = PA_PAGE_SIZE; p->n_blocks = PA_MEMPOOL_SLOTS_MAX; pa_assert(p->block_size > PA_ALIGN(sizeof(struct mempool_slot))); if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) { pa_xfree(p); return NULL; } memset(&p->stat, 0, sizeof(p->stat)); pa_atomic_store(&p->n_init, 0); PA_LLIST_HEAD_INIT(pa_memimport, p->imports); PA_LLIST_HEAD_INIT(pa_memexport, p->exports); p->free_slots = pa_flist_new(p->n_blocks*2); return p; }
static int ref(void) { if (n_ref > 0) { pa_assert(pipe_fd[0] >= 0); pa_assert(pipe_fd[1] >= 0); pa_assert(lock_fd_mutex); n_ref++; return 0; } pa_assert(!lock_fd_mutex); pa_assert(state == STATE_IDLE); pa_assert(lock_fd < 0); pa_assert(!thread); pa_assert(pipe_fd[0] < 0); pa_assert(pipe_fd[1] < 0); if (pa_pipe_cloexec(pipe_fd) < 0) return -1; pa_make_fd_nonblock(pipe_fd[1]); pa_make_fd_nonblock(pipe_fd[0]); lock_fd_mutex = pa_mutex_new(FALSE, FALSE); n_ref = 1; return 0; }
pa_aupdate *pa_aupdate_new(void) { pa_aupdate *a; a = pa_xnew(pa_aupdate, 1); pa_atomic_store(&a->read_lock, 0); a->write_lock = pa_mutex_new(false, false); a->semaphore = pa_semaphore_new(0); return a; }
pa_asyncmsgq *pa_asyncmsgq_new(unsigned size) { pa_asyncq *asyncq; pa_asyncmsgq *a; asyncq = pa_asyncq_new(size); if (!asyncq) return NULL; a = pa_xnew(pa_asyncmsgq, 1); PA_REFCNT_INIT(a); a->asyncq = asyncq; pa_assert_se(a->mutex = pa_mutex_new(false, true)); a->current = NULL; return a; }
pa_mempool* pa_mempool_new(bool shared, size_t size) { pa_mempool *p; char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX]; p = pa_xnew(pa_mempool, 1); p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE); if (p->block_size < PA_PAGE_SIZE) p->block_size = PA_PAGE_SIZE; if (size <= 0) p->n_blocks = PA_MEMPOOL_SLOTS_MAX; else { p->n_blocks = (unsigned) (size / p->block_size); if (p->n_blocks < 2) p->n_blocks = 2; } if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) { pa_xfree(p); return NULL; } pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu", p->memory.shared ? "shared" : "private", p->n_blocks, pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size), pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)), (unsigned long) pa_mempool_block_size_max(p)); memset(&p->stat, 0, sizeof(p->stat)); pa_atomic_store(&p->n_init, 0); PA_LLIST_HEAD_INIT(pa_memimport, p->imports); PA_LLIST_HEAD_INIT(pa_memexport, p->exports); p->mutex = pa_mutex_new(true, true); p->semaphore = pa_semaphore_new(0); p->free_slots = pa_flist_new(p->n_blocks); return p; }
pa_threaded_mainloop *pa_threaded_mainloop_new(void) { pa_threaded_mainloop *m; pa_init_i18n(); m = pa_xnew0(pa_threaded_mainloop, 1); if (!(m->real_mainloop = pa_mainloop_new())) { pa_xfree(m); return NULL; } m->mutex = pa_mutex_new(true, true); m->cond = pa_cond_new(); m->accept_cond = pa_cond_new(); pa_mainloop_set_poll_func(m->real_mainloop, poll_func, m->mutex); return m; }
/* For receiving blocks from other nodes */ pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) { pa_memimport *i; pa_assert(p); pa_assert(cb); i = pa_xnew(pa_memimport, 1); i->mutex = pa_mutex_new(true, true); i->pool = p; i->segments = pa_hashmap_new(NULL, NULL); i->blocks = pa_hashmap_new(NULL, NULL); i->release_cb = cb; i->userdata = userdata; pa_mutex_lock(p->mutex); PA_LLIST_PREPEND(pa_memimport, p->imports, i); pa_mutex_unlock(p->mutex); return i; }
/* This is a copy of the function in mutex-posix.c */ pa_mutex* pa_static_mutex_get(pa_static_mutex *s, bool recursive, bool inherit_priority) { pa_mutex *m; pa_assert(s); /* First, check if already initialized and short cut */ if ((m = pa_atomic_ptr_load(&s->ptr))) return m; /* OK, not initialized, so let's allocate, and fill in */ m = pa_mutex_new(recursive, inherit_priority); if ((pa_atomic_ptr_cmpxchg(&s->ptr, NULL, m))) return m; pa_mutex_free(m); /* Him, filling in failed, so someone else must have filled in * already */ pa_assert_se(m = pa_atomic_ptr_load(&s->ptr)); return m; }
/* For sending blocks to other nodes */ pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) { pa_memexport *e; pa_assert(p); pa_assert(cb); if (!p->memory.shared) return NULL; e = pa_xnew(pa_memexport, 1); e->mutex = pa_mutex_new(true, true); e->pool = p; PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots); PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots); e->n_init = 0; e->revoke_cb = cb; e->userdata = userdata; pa_mutex_lock(p->mutex); PA_LLIST_PREPEND(pa_memexport, p->exports, e); pa_mutex_unlock(p->mutex); return e; }