/* Self-locked. This function is not multiple-caller safe */ static void memblock_replace_import(pa_memblock *b) { pa_memimport_segment *segment; pa_memimport *import; pa_assert(b); pa_assert(b->type == PA_MEMBLOCK_IMPORTED); pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0); pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length); pa_atomic_dec(&b->pool->stat.n_imported); pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length); pa_assert_se(segment = b->per_type.imported.segment); pa_assert_se(import = segment->import); pa_mutex_lock(import->mutex); pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id))); memblock_make_local(b); pa_assert(segment->n_blocks >= 1); if (-- segment->n_blocks <= 0) segment_detach(segment); pa_mutex_unlock(import->mutex); }
/* Should be called locked */ static void segment_detach(pa_memimport_segment *seg) { pa_assert(seg); pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id)); pa_shm_free(&seg->memory); pa_xfree(seg); }
static void entry_remove_and_free(pa_autoload_entry *e) { pa_assert(e); pa_assert(e->core); pa_idxset_remove_by_data(e->core->autoload_idxset, e, NULL); pa_hashmap_remove(e->core->autoload_hashmap, e->name); entry_free(e); }
static void service_free(struct service *s) { pa_assert(s); pa_hashmap_remove(s->userdata->services, s->device); if (s->service) DNSServiceRefDeallocate(s->service); pa_xfree(s->service_name); pa_xfree(s); }
static pa_hook_result_t device_unlink_hook_cb(pa_core *c, pa_object *o, struct userdata *u) { struct device_info *d; pa_assert(c); pa_object_assert_ref(o); pa_assert(u); if ((d = pa_hashmap_remove(u->device_infos, o))) device_info_free(d); return PA_HOOK_OK; }
int pa_property_remove(pa_core *c, const char *name) { pa_property *p; pa_assert(c); pa_assert(name); pa_assert(c->properties); if (!(p = pa_hashmap_remove(c->properties, name))) return -1; property_free(p); return 0; }
int pa_shared_remove(pa_core *c, const char *name) { pa_shared *p; pa_assert(c); pa_assert(name); pa_assert(c->shared); if (!(p = pa_hashmap_remove(c->shared, name))) return -1; shared_free(p); return 0; }
static void service_free(struct service *s) { pa_assert(s); pa_hashmap_remove(s->userdata->services, s->device); if (s->entry_group) { pa_log_debug("Removing entry group for %s.", s->service_name); avahi_entry_group_free(s->entry_group); } pa_xfree(s->service_name); pa_xfree(s); }
static void remove_card(struct userdata *u, struct udev_device *dev) { struct device *d; pa_assert(u); pa_assert(dev); if (!(d = pa_hashmap_remove(u->devices, udev_device_get_devpath(dev)))) return; pa_log_info("Card %s removed.", d->path); if (d->module != PA_INVALID_INDEX) pa_module_unload_request_by_index(u->core, d->module, true); device_free(d); }
static void browser_cb( AvahiServiceBrowser *b, AvahiIfIndex interface, AvahiProtocol protocol, AvahiBrowserEvent event, const char *name, const char *type, const char *domain, AvahiLookupResultFlags flags, void *userdata) { struct userdata *u = userdata; struct tunnel *t; pa_assert(u); if (flags & AVAHI_LOOKUP_RESULT_LOCAL) return; t = tunnel_new(interface, protocol, name, type, domain); if (event == AVAHI_BROWSER_NEW) { if (!pa_hashmap_get(u->tunnels, t)) if (!(avahi_service_resolver_new(u->client, interface, protocol, name, type, domain, AVAHI_PROTO_UNSPEC, 0, resolver_cb, u))) pa_log("avahi_service_resolver_new() failed: %s", avahi_strerror(avahi_client_errno(u->client))); /* We ignore the returned resolver object here, since the we don't * need to attach any special data to it, and we can still destroy * it from the callback */ } else if (event == AVAHI_BROWSER_REMOVE) { struct tunnel *t2; if ((t2 = pa_hashmap_get(u->tunnels, t))) { pa_module_unload_request_by_index(u->core, t2->module_index, TRUE); pa_hashmap_remove(u->tunnels, t2); tunnel_free(t2); } } tunnel_free(t); }
int pa_cond_wait(pa_cond *c, pa_mutex *m) { HANDLE event; assert(c); assert(m); event = CreateEvent(NULL, FALSE, FALSE, NULL); assert(event); pa_hashmap_put(c->wait_events, event, event); pa_mutex_unlock(m); WaitForSingleObject(event, INFINITE); pa_mutex_lock(m); pa_hashmap_remove(c->wait_events, event); CloseHandle(event); return 0; }
static pa_hook_result_t process(struct userdata *u, pa_sink_input *i, pa_bool_t create) { pa_bool_t cork = FALSE; const char *role; pa_assert(u); pa_sink_input_assert_ref(i); if (!create) pa_hashmap_remove(u->cork_state, i); if (!(role = pa_proplist_gets(i->proplist, PA_PROP_MEDIA_ROLE))) return PA_HOOK_OK; if (!pa_streq(role, "phone") && !pa_streq(role, "music") && !pa_streq(role, "video")) return PA_HOOK_OK; cork = shall_cork(i->sink, create ? NULL : i); apply_cork(u, i->sink, create ? NULL : i, cork); return PA_HOOK_OK; }
static void apply_cork(struct userdata *u, pa_sink *s, pa_sink_input *ignore, pa_bool_t cork) { pa_sink_input *j; uint32_t idx; pa_assert(u); pa_sink_assert_ref(s); for (j = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); j; j = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) { pa_bool_t corked; const char *role; if (j == ignore) continue; if (!(role = pa_proplist_gets(j->proplist, PA_PROP_MEDIA_ROLE))) continue; if (!pa_streq(role, "video") && !pa_streq(role, "music")) continue; corked = !!pa_hashmap_get(u->cork_state, j); if (cork && !corked) { pa_hashmap_put(u->cork_state, j, PA_INT_TO_PTR(1)); pa_sink_input_set_mute(j, TRUE, FALSE); pa_sink_input_send_event(j, PA_STREAM_EVENT_REQUEST_CORK, NULL); } else if (!cork) { pa_hashmap_remove(u->cork_state, j); if (corked) { pa_sink_input_set_mute(j, FALSE, FALSE); pa_sink_input_send_event(j, PA_STREAM_EVENT_REQUEST_UNCORK, NULL); } } } }
static void memblock_free(pa_memblock *b) { pa_assert(b); pa_assert(pa_atomic_load(&b->n_acquired) == 0); stat_remove(b); switch (b->type) { case PA_MEMBLOCK_USER : pa_assert(b->per_type.user.free_cb); b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data)); /* Fall through */ case PA_MEMBLOCK_FIXED: if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); break; case PA_MEMBLOCK_APPENDED: /* We could attach it to unused_memblocks, but that would * probably waste some considerable amount of memory */ pa_xfree(b); break; case PA_MEMBLOCK_IMPORTED: { pa_memimport_segment *segment; pa_memimport *import; /* FIXME! This should be implemented lock-free */ pa_assert_se(segment = b->per_type.imported.segment); pa_assert_se(import = segment->import); pa_mutex_lock(import->mutex); pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id))); pa_assert(segment->n_blocks >= 1); if (-- segment->n_blocks <= 0) segment_detach(segment); pa_mutex_unlock(import->mutex); import->release_cb(import, b->per_type.imported.id, import->userdata); if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); break; } case PA_MEMBLOCK_POOL_EXTERNAL: case PA_MEMBLOCK_POOL: { struct mempool_slot *slot; bool call_free; pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data))); call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL; /* #ifdef HAVE_VALGRIND_MEMCHECK_H */ /* if (PA_UNLIKELY(pa_in_valgrind())) { */ /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */ /* } */ /* #endif */ /* The free list dimensions should easily allow all slots * to fit in, hence try harder if pushing this slot into * the free list fails */ while (pa_flist_push(b->pool->free_slots, slot) < 0) ; if (call_free) if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); break; } case PA_MEMBLOCK_TYPE_MAX: default: pa_assert_not_reached(); } }
static void memblock_free(pa_memblock *b) { pa_assert(b); pa_assert(pa_atomic_load(&b->n_acquired) == 0); stat_remove(b); switch (b->type) { case PA_MEMBLOCK_USER : pa_assert(b->per_type.user.free_cb); b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data)); /* Fall through */ case PA_MEMBLOCK_FIXED: case PA_MEMBLOCK_APPENDED : if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); break; case PA_MEMBLOCK_IMPORTED : { pa_memimport_segment *segment; pa_memimport *import; /* FIXME! This should be implemented lock-free */ segment = b->per_type.imported.segment; pa_assert(segment); import = segment->import; pa_assert(import); pa_mutex_lock(import->mutex); pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)); if (-- segment->n_blocks <= 0) segment_detach(segment); pa_mutex_unlock(import->mutex); import->release_cb(import, b->per_type.imported.id, import->userdata); if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); break; } case PA_MEMBLOCK_POOL_EXTERNAL: case PA_MEMBLOCK_POOL: { struct mempool_slot *slot; int call_free; slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)); pa_assert(slot); call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL; /* The free list dimensions should easily allow all slots * to fit in, hence try harder if pushing this slot into * the free list fails */ while (pa_flist_push(b->pool->free_slots, slot) < 0) ; if (call_free) if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); break; } case PA_MEMBLOCK_TYPE_MAX: default: pa_assert_not_reached(); } }