bool fiber_channel_has_waiter(struct fiber_channel *ch, enum fiber_channel_wait_status status) { if (rlist_empty(&ch->waiters)) return false; struct fiber *f = rlist_first_entry(&ch->waiters, struct fiber, state); return f->wait_pad->status == status; }
void fiber_channel_close(struct fiber_channel *ch) { if (ch->is_closed) return; while (ch->count) { struct ipc_msg *msg = fiber_channel_buffer_pop(ch); msg->destroy(msg); } struct fiber *f; while (! rlist_empty(&ch->waiters)) { f = rlist_first_entry(&ch->waiters, struct fiber, state); fiber_channel_waiter_wakeup(f, FIBER_CHANNEL_WAIT_CLOSED); } ch->is_closed = true; }
void * region_join_nothrow(struct region *region, size_t size) { if (rlist_empty(®ion->slabs.slabs)) { assert(size == 0); return region_alloc_nothrow(region, 0); } struct rslab *slab = rlist_first_entry(®ion->slabs.slabs, struct rslab, slab.next_in_list); if (slab->used >= size) { /* Don't move stuff if it's in a single chunk. */ return (char *) rslab_data(slab) + slab->used - size; } /** * Use region_reserve() to ensure slab->size is not * changed when the joined region is in the same slab * as the final chunk. */ char *ptr = region_reserve_nothrow(region, size); size_t offset = size; if (ptr == NULL) return NULL; /* * Copy data from last chunk to first, i.e. in the reverse order. */ while (offset > 0 && slab->used <= offset) { memcpy(ptr + offset - slab->used, rslab_data(slab), slab->used); offset -= slab->used; slab = rlist_next_entry(slab, slab.next_in_list); } if (offset > 0) memcpy(ptr, rslab_data(slab) + slab->used - offset, offset); region_alloc_nothrow(region, size); return ptr; }
/** * Release all memory down to new_size; new_size has to be previously * obtained by calling region_used(). */ void region_truncate(struct region *region, size_t used) { ssize_t cut_size = region_used(region) - used; assert(cut_size >= 0); while (! rlist_empty(®ion->slabs.slabs)) { struct rslab *slab = rlist_first_entry(®ion->slabs.slabs, struct rslab, slab.next_in_list); if (slab->used > cut_size) { /* This is the last slab to trim. */ slab->used -= cut_size; cut_size = 0; break; } cut_size -= slab->used; /* Remove the entire slab. */ slab_list_del(®ion->slabs, &slab->slab, next_in_list); slab_put(region->cache, &slab->slab); } assert(cut_size == 0); region->slabs.stats.used = used; }
static struct space * blackhole_engine_create_space(struct engine *engine, struct space_def *def, struct rlist *key_list) { if (!rlist_empty(key_list)) { diag_set(ClientError, ER_UNSUPPORTED, "Blackhole", "indexes"); return NULL; } struct space *space = (struct space *)calloc(1, sizeof(*space)); if (space == NULL) { diag_set(OutOfMemory, sizeof(*space), "malloc", "struct space"); return NULL; } /* Allocate tuples on runtime arena, but check space format. */ struct tuple_format *format; format = tuple_format_new(&tuple_format_runtime->vtab, NULL, NULL, 0, def->fields, def->field_count, def->exact_field_count, def->dict, false, false); if (format == NULL) { free(space); return NULL; } tuple_format_ref(format); if (space_create(space, engine, &blackhole_space_vtab, def, key_list, format) != 0) { tuple_format_unref(format); free(space); return NULL; } return space; }