/* Self-locked. Not multiple-caller safe */ void pa_memimport_free(pa_memimport *i) { pa_memexport *e; pa_memblock *b; pa_assert(i); pa_mutex_lock(i->mutex); while ((b = pa_hashmap_first(i->blocks))) memblock_replace_import(b); pa_assert(pa_hashmap_size(i->segments) == 0); pa_mutex_unlock(i->mutex); pa_mutex_lock(i->pool->mutex); /* If we've exported this block further we need to revoke that export */ for (e = i->pool->exports; e; e = e->next) memexport_revoke_blocks(e, i); PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i); pa_mutex_unlock(i->pool->mutex); pa_hashmap_free(i->blocks); pa_hashmap_free(i->segments); pa_mutex_free(i->mutex); pa_xfree(i); }
static void asyncmsgq_free(pa_asyncmsgq *a) { struct asyncmsgq_item *i; pa_assert(a); while ((i = pa_asyncq_pop(a->asyncq, false))) { pa_assert(!i->semaphore); if (i->object) pa_msgobject_unref(i->object); if (i->memchunk.memblock) pa_memblock_unref(i->memchunk.memblock); if (i->free_cb) i->free_cb(i->userdata); if (pa_flist_push(PA_STATIC_FLIST_GET(asyncmsgq), i) < 0) pa_xfree(i); } pa_asyncq_free(a->asyncq, NULL); pa_mutex_free(a->mutex); pa_xfree(a); }
void pa_mempool_free(pa_mempool *p) { pa_assert(p); pa_mutex_lock(p->mutex); while (p->imports) pa_memimport_free(p->imports); while (p->exports) pa_memexport_free(p->exports); pa_mutex_unlock(p->mutex); pa_flist_free(p->free_slots, NULL); if (pa_atomic_load(&p->stat.n_allocated) > 0) { /* raise(SIGTRAP); */ pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated)); } pa_shm_free(&p->memory); pa_mutex_free(p->mutex); pa_semaphore_free(p->semaphore); pa_xfree(p); }
void pa_aupdate_free(pa_aupdate *a) { pa_assert(a); pa_mutex_free(a->write_lock); pa_semaphore_free(a->semaphore); pa_xfree(a); }
static void unref(pa_bool_t after_fork) { pa_assert(n_ref > 0); pa_assert(pipe_fd[0] >= 0); pa_assert(pipe_fd[1] >= 0); pa_assert(lock_fd_mutex); n_ref--; if (n_ref > 0) return; if (thread) { pa_thread_free(thread); thread = NULL; } pa_mutex_lock(lock_fd_mutex); pa_assert(state != STATE_TAKEN); if (state == STATE_OWNING) { pa_assert(lock_fd >= 0); if (after_fork) pa_close(lock_fd); else { char *lf; if (!(lf = pa_runtime_path(AUTOSPAWN_LOCK))) pa_log_warn(_("Cannot access autospawn lock.")); pa_unlock_lockfile(lf, lock_fd); pa_xfree(lf); } } lock_fd = -1; state = STATE_IDLE; pa_mutex_unlock(lock_fd_mutex); pa_mutex_free(lock_fd_mutex); lock_fd_mutex = NULL; pa_close(pipe_fd[0]); pa_close(pipe_fd[1]); pipe_fd[0] = pipe_fd[1] = -1; }
void pa_memexport_free(pa_memexport *e) { pa_assert(e); pa_mutex_lock(e->mutex); while (e->used_slots) pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots)); pa_mutex_unlock(e->mutex); pa_mutex_lock(e->pool->mutex); PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e); pa_mutex_unlock(e->pool->mutex); pa_mutex_free(e->mutex); pa_xfree(e); }
void pa_threaded_mainloop_free(pa_threaded_mainloop* m) { pa_assert(m); /* Make sure that this function is not called from the helper thread */ pa_assert((m->thread && !pa_thread_is_running(m->thread)) || !in_worker(m)); pa_threaded_mainloop_stop(m); if (m->thread) pa_thread_free(m->thread); pa_mainloop_free(m->real_mainloop); pa_mutex_free(m->mutex); pa_cond_free(m->cond); pa_cond_free(m->accept_cond); pa_xfree(m->name); pa_xfree(m); }
/* This is a copy of the function in mutex-posix.c */ pa_mutex* pa_static_mutex_get(pa_static_mutex *s, bool recursive, bool inherit_priority) { pa_mutex *m; pa_assert(s); /* First, check if already initialized and short cut */ if ((m = pa_atomic_ptr_load(&s->ptr))) return m; /* OK, not initialized, so let's allocate, and fill in */ m = pa_mutex_new(recursive, inherit_priority); if ((pa_atomic_ptr_cmpxchg(&s->ptr, NULL, m))) return m; pa_mutex_free(m); /* Him, filling in failed, so someone else must have filled in * already */ pa_assert_se(m = pa_atomic_ptr_load(&s->ptr)); return m; }
void pa_mempool_free(pa_mempool *p) { pa_assert(p); pa_mutex_lock(p->mutex); while (p->imports) pa_memimport_free(p->imports); while (p->exports) pa_memexport_free(p->exports); pa_mutex_unlock(p->mutex); pa_flist_free(p->free_slots, NULL); if (pa_atomic_load(&p->stat.n_allocated) > 0) { /* Ouch, somebody is retaining a memory block reference! */ #ifdef DEBUG_REF unsigned i; pa_flist *list; /* Let's try to find at least one of those leaked memory blocks */ list = pa_flist_new(p->n_blocks); for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) { struct mempool_slot *slot; pa_memblock *b, *k; slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i)); b = mempool_slot_data(slot); while ((k = pa_flist_pop(p->free_slots))) { while (pa_flist_push(list, k) < 0) ; if (b == k) break; } if (!k) pa_log("REF: Leaked memory block %p", b); while ((k = pa_flist_pop(list))) while (pa_flist_push(p->free_slots, k) < 0) ; } pa_flist_free(list, NULL); #endif pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated)); /* PA_DEBUG_TRAP; */ } pa_shm_free(&p->memory); pa_mutex_free(p->mutex); pa_semaphore_free(p->semaphore); pa_xfree(p); }