int pa_asyncmsgq_send(pa_asyncmsgq *a, pa_msgobject *object, int code, const void *userdata, int64_t offset, const pa_memchunk *chunk) { struct asyncmsgq_item i; pa_assert(PA_REFCNT_VALUE(a) > 0); i.code = code; i.object = object; i.userdata = (void*) userdata; i.free_cb = NULL; i.ret = -1; i.offset = offset; if (chunk) { pa_assert(chunk->memblock); i.memchunk = *chunk; } else pa_memchunk_reset(&i.memchunk); if (!(i.semaphore = pa_flist_pop(PA_STATIC_FLIST_GET(semaphores)))) i.semaphore = pa_semaphore_new(0); /* This mutex makes the queue multiple-writer safe. This lock is only used on the writing side */ pa_mutex_lock(a->mutex); pa_assert_se(pa_asyncq_push(a->asyncq, &i, true) == 0); pa_mutex_unlock(a->mutex); pa_semaphore_wait(i.semaphore); if (pa_flist_push(PA_STATIC_FLIST_GET(semaphores), i.semaphore) < 0) pa_semaphore_free(i.semaphore); return i.ret; }
void pa_asyncq_free(pa_asyncq *l, pa_free_cb_t free_cb) { struct localq *q; pa_assert(l); if (free_cb) { void *p; while ((p = pa_asyncq_pop(l, 0))) free_cb(p); } while ((q = l->localq)) { if (free_cb) free_cb(q->data); PA_LLIST_REMOVE(struct localq, l->localq, q); if (pa_flist_push(PA_STATIC_FLIST_GET(localq), q) < 0) pa_xfree(q); } pa_fdsem_free(l->read_fdsem); pa_fdsem_free(l->write_fdsem); pa_xfree(l); }
static void drop_block(pa_memblockq *bq, struct list_item *q) { pa_assert(bq); pa_assert(q); pa_assert(bq->n_blocks >= 1); if (q->prev) q->prev->next = q->next; else { pa_assert(bq->blocks == q); bq->blocks = q->next; } if (q->next) q->next->prev = q->prev; else { pa_assert(bq->blocks_tail == q); bq->blocks_tail = q->prev; } if (bq->current_write == q) bq->current_write = q->prev; if (bq->current_read == q) bq->current_read = q->next; pa_memblock_unref(q->chunk.memblock); if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0) pa_xfree(q); bq->n_blocks--; }
/* No lock necessary */ pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, bool read_only) { pa_memblock *b; pa_assert(p); pa_assert(d); pa_assert(length); pa_assert(length != (size_t) -1); pa_assert(free_cb); if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks)))) b = pa_xnew(pa_memblock, 1); PA_REFCNT_INIT(b); b->pool = p; b->type = PA_MEMBLOCK_USER; b->read_only = read_only; b->is_silence = false; pa_atomic_ptr_store(&b->data, d); b->length = length; pa_atomic_store(&b->n_acquired, 0); pa_atomic_store(&b->please_signal, 0); b->per_type.user.free_cb = free_cb; stat_add(b); return b; }
void pa_asyncq_post(pa_asyncq*l, void *p) { struct localq *q; pa_assert(l); pa_assert(p); if (flush_postq(l, FALSE)) if (pa_asyncq_push(l, p, FALSE) >= 0) return; /* OK, we couldn't push anything in the queue. So let's queue it * locally and push it later */ if (pa_log_ratelimit()) pa_log_warn("q overrun, queuing locally"); if (!(q = pa_flist_pop(PA_STATIC_FLIST_GET(localq)))) q = pa_xnew(struct localq, 1); q->data = p; PA_LLIST_PREPEND(struct localq, l->localq, q); if (!l->last_localq) l->last_localq = q; return; }
static void asyncmsgq_free(pa_asyncmsgq *a) { struct asyncmsgq_item *i; pa_assert(a); while ((i = pa_asyncq_pop(a->asyncq, false))) { pa_assert(!i->semaphore); if (i->object) pa_msgobject_unref(i->object); if (i->memchunk.memblock) pa_memblock_unref(i->memchunk.memblock); if (i->free_cb) i->free_cb(i->userdata); if (pa_flist_push(PA_STATIC_FLIST_GET(asyncmsgq), i) < 0) pa_xfree(i); } pa_asyncq_free(a->asyncq, NULL); pa_mutex_free(a->mutex); pa_xfree(a); }
void pa_asyncmsgq_done(pa_asyncmsgq *a, int ret) { pa_assert(PA_REFCNT_VALUE(a) > 0); pa_assert(a); pa_assert(a->current); if (a->current->semaphore) { a->current->ret = ret; pa_semaphore_post(a->current->semaphore); } else { if (a->current->free_cb) a->current->free_cb(a->current->userdata); if (a->current->object) pa_msgobject_unref(a->current->object); if (a->current->memchunk.memblock) pa_memblock_unref(a->current->memchunk.memblock); if (pa_flist_push(PA_STATIC_FLIST_GET(asyncmsgq), a->current) < 0) pa_xfree(a->current); } a->current = NULL; }
void pa_asyncmsgq_post(pa_asyncmsgq *a, pa_msgobject *object, int code, const void *userdata, int64_t offset, const pa_memchunk *chunk, pa_free_cb_t free_cb) { struct asyncmsgq_item *i; pa_assert(PA_REFCNT_VALUE(a) > 0); if (!(i = pa_flist_pop(PA_STATIC_FLIST_GET(asyncmsgq)))) i = pa_xnew(struct asyncmsgq_item, 1); i->code = code; i->object = object ? pa_msgobject_ref(object) : NULL; i->userdata = (void*) userdata; i->free_cb = free_cb; i->offset = offset; if (chunk) { pa_assert(chunk->memblock); i->memchunk = *chunk; pa_memblock_ref(i->memchunk.memblock); } else pa_memchunk_reset(&i->memchunk); i->semaphore = NULL; /* This mutex makes the queue multiple-writer safe. This lock is only used on the writing side */ pa_mutex_lock(a->mutex); pa_asyncq_post(a->asyncq, i); pa_mutex_unlock(a->mutex); }
void* pa_queue_pop(pa_queue *q) { void *p; struct queue_entry *e; pa_assert(q); if (!(e = q->front)) return NULL; q->front = e->next; if (q->back == e) { pa_assert(!e->next); q->back = NULL; } p = e->data; if (pa_flist_push(PA_STATIC_FLIST_GET(entries), e) < 0) pa_xfree(e); q->length--; return p; }
void* pa_prioq_remove(pa_prioq *q, pa_prioq_item *i) { void *p; pa_assert(q); pa_assert(i); pa_assert(q->n_items >= 1); p = i->value; if (q->n_items-1 == i->idx) { /* We are the last entry, so let's just remove us and good */ q->n_items--; } else { /* We are not the last entry, we need to replace ourselves * with the last node and reshuffle */ q->items[i->idx] = q->items[q->n_items-1]; q->items[i->idx]->idx = i->idx; q->n_items--; shuffle_down(q, i->idx); } if (pa_flist_push(PA_STATIC_FLIST_GET(items), i) < 0) pa_xfree(i); return p; }
void pa_tagstruct_free(pa_tagstruct*t) { pa_assert(t); if (t->type == PA_TAGSTRUCT_DYNAMIC) pa_xfree(t->data); if (pa_flist_push(PA_STATIC_FLIST_GET(tagstructs), t) < 0) pa_xfree(t); }
int pa_idxset_put(pa_idxset*s, void *p, uint32_t *idx) { unsigned hash; struct idxset_entry *e; pa_assert(s); hash = s->hash_func(p) % NBUCKETS; if ((e = data_scan(s, hash, p))) { if (idx) *idx = e->idx; return -1; } if (!(e = pa_flist_pop(PA_STATIC_FLIST_GET(entries)))) e = pa_xnew(struct idxset_entry, 1); e->data = p; e->idx = s->current_index++; /* Insert into data hash table */ e->data_next = BY_DATA(s)[hash]; e->data_previous = NULL; if (BY_DATA(s)[hash]) BY_DATA(s)[hash]->data_previous = e; BY_DATA(s)[hash] = e; hash = e->idx % NBUCKETS; /* Insert into index hash table */ e->index_next = BY_INDEX(s)[hash]; e->index_previous = NULL; if (BY_INDEX(s)[hash]) BY_INDEX(s)[hash]->index_previous = e; BY_INDEX(s)[hash] = e; /* Insert into iteration list */ e->iterate_previous = s->iterate_list_tail; e->iterate_next = NULL; if (s->iterate_list_tail) { pa_assert(s->iterate_list_head); s->iterate_list_tail->iterate_next = e; } else { pa_assert(!s->iterate_list_head); s->iterate_list_head = e; } s->iterate_list_tail = e; s->n_entries++; pa_assert(s->n_entries >= 1); if (idx) *idx = e->idx; return 0; }
/* No lock necessary */ pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) { pa_memblock *b = NULL; struct mempool_slot *slot; static int mempool_disable = 0; pa_assert(p); pa_assert(length); if (mempool_disable == 0) mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1; if (mempool_disable > 0) return NULL; /* If -1 is passed as length we choose the size for the caller: we * take the largest size that fits in one of our slots. */ if (length == (size_t) -1) length = pa_mempool_block_size_max(p); if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) { if (!(slot = mempool_allocate_slot(p))) return NULL; b = mempool_slot_data(slot); b->type = PA_MEMBLOCK_POOL; pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock))); } else if (p->block_size >= length) { if (!(slot = mempool_allocate_slot(p))) return NULL; if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks)))) b = pa_xnew(pa_memblock, 1); b->type = PA_MEMBLOCK_POOL_EXTERNAL; pa_atomic_ptr_store(&b->data, mempool_slot_data(slot)); } else { pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size); pa_atomic_inc(&p->stat.n_too_large_for_pool); return NULL; } PA_REFCNT_INIT(b); b->pool = p; b->read_only = b->is_silence = false; b->length = length; pa_atomic_store(&b->n_acquired, 0); pa_atomic_store(&b->please_signal, 0); stat_add(b); return b; }
void pa_packet_unref(pa_packet *p) { pa_assert(p); pa_assert(PA_REFCNT_VALUE(p) >= 1); if (PA_REFCNT_DEC(p) <= 0) { if (p->type == PA_PACKET_DYNAMIC) pa_xfree(p->data); if (pa_flist_push(PA_STATIC_FLIST_GET(packets), p) < 0) pa_xfree(p); } }
pa_tagstruct *pa_tagstruct_copy(pa_tagstruct*t) { pa_tagstruct*tc; if (!(tc = pa_flist_pop(PA_STATIC_FLIST_GET(tagstructs)))) tc = pa_xnew(pa_tagstruct, 1); tc->data = pa_xmemdup(t->data, t->length); tc->allocated = t->length; tc->rindex = 0; tc->type = PA_TAGSTRUCT_DYNAMIC; return tc; }
pa_tagstruct *pa_tagstruct_new(void) { pa_tagstruct*t; if (!(t = pa_flist_pop(PA_STATIC_FLIST_GET(tagstructs)))) t = pa_xnew(pa_tagstruct, 1); t->data = t->per_type.appended; t->allocated = MAX_APPENDED_SIZE; t->length = t->rindex = 0; t->type = PA_TAGSTRUCT_APPENDED; return t; }
pa_tagstruct *pa_tagstruct_new_fixed(const uint8_t* data, size_t length) { pa_tagstruct*t; pa_assert(data && length); if (!(t = pa_flist_pop(PA_STATIC_FLIST_GET(tagstructs)))) t = pa_xnew(pa_tagstruct, 1); t->data = (uint8_t*) data; t->allocated = t->length = length; t->rindex = 0; t->type = PA_TAGSTRUCT_FIXED; return t; }
/* Self-locked */ pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) { pa_memblock *b = NULL; pa_memimport_segment *seg; pa_assert(i); pa_mutex_lock(i->mutex); if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) { pa_memblock_ref(b); goto finish; } if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX) goto finish; if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id)))) if (!(seg = segment_attach(i, shm_id))) goto finish; if (offset+size > seg->memory.size) goto finish; if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks)))) b = pa_xnew(pa_memblock, 1); PA_REFCNT_INIT(b); b->pool = i->pool; b->type = PA_MEMBLOCK_IMPORTED; b->read_only = true; b->is_silence = false; pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset); b->length = size; pa_atomic_store(&b->n_acquired, 0); pa_atomic_store(&b->please_signal, 0); b->per_type.imported.id = block_id; b->per_type.imported.segment = seg; pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b); seg->n_blocks++; stat_add(b); finish: pa_mutex_unlock(i->mutex); return b; }
pa_packet* pa_packet_new_dynamic(void* data, size_t length) { pa_packet *p; pa_assert(data); pa_assert(length > 0); if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(packets)))) p = pa_xnew(pa_packet, 1); PA_REFCNT_INIT(p); p->length = length; p->data = data; p->type = PA_PACKET_DYNAMIC; return p; }
pa_packet* pa_packet_new(size_t length) { pa_packet *p; pa_assert(length > 0); if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(packets)))) p = pa_xnew(pa_packet, 1); PA_REFCNT_INIT(p); p->length = length; if (length > MAX_APPENDED_SIZE) { p->data = pa_xmalloc(length); p->type = PA_PACKET_DYNAMIC; } else { p->data = p->per_type.appended; p->type = PA_PACKET_APPENDED; } return p; }
static pa_bool_t flush_postq(pa_asyncq *l, pa_bool_t wait_op) { struct localq *q; pa_assert(l); while ((q = l->last_localq)) { if (push(l, q->data, wait_op) < 0) return FALSE; l->last_localq = q->prev; PA_LLIST_REMOVE(struct localq, l->localq, q); if (pa_flist_push(PA_STATIC_FLIST_GET(localq), q) < 0) pa_xfree(q); } return TRUE; }
pa_prioq_item* pa_prioq_put(pa_prioq *q, void *p) { pa_prioq_item *i; pa_assert(q); if (q->n_items >= q->n_allocated) { q->n_allocated = PA_MAX(q->n_items+1, q->n_allocated)*2; q->items = pa_xrealloc(q->items, sizeof(pa_prioq_item*) * q->n_allocated); } if (!(i = pa_flist_pop(PA_STATIC_FLIST_GET(items)))) i = pa_xnew(pa_prioq_item, 1); i->value = p; i->idx = q->n_items++; shuffle_up(q, i); return i; }
static void remove_entry(pa_idxset *s, struct idxset_entry *e) { pa_assert(s); pa_assert(e); /* Remove from iteration linked list */ if (e->iterate_next) e->iterate_next->iterate_previous = e->iterate_previous; else s->iterate_list_tail = e->iterate_previous; if (e->iterate_previous) e->iterate_previous->iterate_next = e->iterate_next; else s->iterate_list_head = e->iterate_next; /* Remove from data hash table */ if (e->data_next) e->data_next->data_previous = e->data_previous; if (e->data_previous) e->data_previous->data_next = e->data_next; else { unsigned hash = s->hash_func(e->data) % NBUCKETS; BY_DATA(s)[hash] = e->data_next; } /* Remove from index hash table */ if (e->index_next) e->index_next->index_previous = e->index_previous; if (e->index_previous) e->index_previous->index_next = e->index_next; else BY_INDEX(s)[e->idx % NBUCKETS] = e->index_next; if (pa_flist_push(PA_STATIC_FLIST_GET(entries), e) < 0) pa_xfree(e); pa_assert(s->n_entries >= 1); s->n_entries--; }
/* No lock necessary */ pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, int read_only) { pa_memblock *b; pa_assert(p); pa_assert(d); pa_assert(length != (size_t) -1); pa_assert(length > 0); if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks)))) b = pa_xnew(pa_memblock, 1); PA_REFCNT_INIT(b); b->pool = p; b->type = PA_MEMBLOCK_FIXED; b->read_only = read_only; pa_atomic_ptr_store(&b->data, d); b->length = length; pa_atomic_store(&b->n_acquired, 0); pa_atomic_store(&b->please_signal, 0); stat_add(b); return b; }
void pa_queue_push(pa_queue *q, void *p) { struct queue_entry *e; pa_assert(q); pa_assert(p); if (!(e = pa_flist_pop(PA_STATIC_FLIST_GET(entries)))) e = pa_xnew(struct queue_entry, 1); e->data = p; e->next = NULL; if (q->back) { pa_assert(q->front); q->back->next = e; } else { pa_assert(!q->front); q->front = e; } q->back = e; q->length++; }
static void memblock_free(pa_memblock *b) { pa_assert(b); pa_assert(pa_atomic_load(&b->n_acquired) == 0); stat_remove(b); switch (b->type) { case PA_MEMBLOCK_USER : pa_assert(b->per_type.user.free_cb); b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data)); /* Fall through */ case PA_MEMBLOCK_FIXED: if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); break; case PA_MEMBLOCK_APPENDED: /* We could attach it to unused_memblocks, but that would * probably waste some considerable amount of memory */ pa_xfree(b); break; case PA_MEMBLOCK_IMPORTED: { pa_memimport_segment *segment; pa_memimport *import; /* FIXME! This should be implemented lock-free */ pa_assert_se(segment = b->per_type.imported.segment); pa_assert_se(import = segment->import); pa_mutex_lock(import->mutex); pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id))); pa_assert(segment->n_blocks >= 1); if (-- segment->n_blocks <= 0) segment_detach(segment); pa_mutex_unlock(import->mutex); import->release_cb(import, b->per_type.imported.id, import->userdata); if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); break; } case PA_MEMBLOCK_POOL_EXTERNAL: case PA_MEMBLOCK_POOL: { struct mempool_slot *slot; bool call_free; pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data))); call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL; /* #ifdef HAVE_VALGRIND_MEMCHECK_H */ /* if (PA_UNLIKELY(pa_in_valgrind())) { */ /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */ /* } */ /* #endif */ /* The free list dimensions should easily allow all slots * to fit in, hence try harder if pushing this slot into * the free list fails */ while (pa_flist_push(b->pool->free_slots, slot) < 0) ; if (call_free) if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); break; } case PA_MEMBLOCK_TYPE_MAX: default: pa_assert_not_reached(); } }
int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) { struct list_item *q, *n; pa_memchunk chunk; int64_t old; pa_assert(bq); pa_assert(uchunk); pa_assert(uchunk->memblock); pa_assert(uchunk->length > 0); pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock)); pa_assert(uchunk->length % bq->base == 0); pa_assert(uchunk->index % bq->base == 0); if (!can_push(bq, uchunk->length)) return -1; old = bq->write_index; chunk = *uchunk; fix_current_write(bq); q = bq->current_write; /* First we advance the q pointer right of where we want to * write to */ if (q) { while (bq->write_index + (int64_t) chunk.length > q->index) if (q->next) q = q->next; else break; } if (!q) q = bq->blocks_tail; /* We go from back to front to look for the right place to add * this new entry. Drop data we will overwrite on the way */ while (q) { if (bq->write_index >= q->index + (int64_t) q->chunk.length) /* We found the entry where we need to place the new entry immediately after */ break; else if (bq->write_index + (int64_t) chunk.length <= q->index) { /* This entry isn't touched at all, let's skip it */ q = q->prev; } else if (bq->write_index <= q->index && bq->write_index + (int64_t) chunk.length >= q->index + (int64_t) q->chunk.length) { /* This entry is fully replaced by the new entry, so let's drop it */ struct list_item *p; p = q; q = q->prev; drop_block(bq, p); } else if (bq->write_index >= q->index) { /* The write index points into this memblock, so let's * truncate or split it */ if (bq->write_index + (int64_t) chunk.length < q->index + (int64_t) q->chunk.length) { /* We need to save the end of this memchunk */ struct list_item *p; size_t d; /* Create a new list entry for the end of the memchunk */ if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items)))) p = pa_xnew(struct list_item, 1); p->chunk = q->chunk; pa_memblock_ref(p->chunk.memblock); /* Calculate offset */ d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index); pa_assert(d > 0); /* Drop it from the new entry */ p->index = q->index + (int64_t) d; p->chunk.length -= d; /* Add it to the list */ p->prev = q; if ((p->next = q->next)) q->next->prev = p; else bq->blocks_tail = p; q->next = p; bq->n_blocks++; } /* Truncate the chunk */ if (!(q->chunk.length = (size_t) (bq->write_index - q->index))) { struct list_item *p; p = q; q = q->prev; drop_block(bq, p); } /* We had to truncate this block, hence we're now at the right position */ break; } else { size_t d; pa_assert(bq->write_index + (int64_t)chunk.length > q->index && bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length && bq->write_index < q->index); /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */ d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index); q->index += (int64_t) d; q->chunk.index += d; q->chunk.length -= d; q = q->prev; } }
static void memblock_free(pa_memblock *b) { pa_assert(b); pa_assert(pa_atomic_load(&b->n_acquired) == 0); stat_remove(b); switch (b->type) { case PA_MEMBLOCK_USER : pa_assert(b->per_type.user.free_cb); b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data)); /* Fall through */ case PA_MEMBLOCK_FIXED: case PA_MEMBLOCK_APPENDED : if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); break; case PA_MEMBLOCK_IMPORTED : { pa_memimport_segment *segment; pa_memimport *import; /* FIXME! This should be implemented lock-free */ segment = b->per_type.imported.segment; pa_assert(segment); import = segment->import; pa_assert(import); pa_mutex_lock(import->mutex); pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)); if (-- segment->n_blocks <= 0) segment_detach(segment); pa_mutex_unlock(import->mutex); import->release_cb(import, b->per_type.imported.id, import->userdata); if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); break; } case PA_MEMBLOCK_POOL_EXTERNAL: case PA_MEMBLOCK_POOL: { struct mempool_slot *slot; int call_free; slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)); pa_assert(slot); call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL; /* The free list dimensions should easily allow all slots * to fit in, hence try harder if pushing this slot into * the free list fails */ while (pa_flist_push(b->pool->free_slots, slot) < 0) ; if (call_free) if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); break; } case PA_MEMBLOCK_TYPE_MAX: default: pa_assert_not_reached(); } }