/* * CTL_READ_HANDLER(desc) -- reads the information about allocation class */ static int CTL_READ_HANDLER(desc)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; uint8_t id; struct ctl_index *idx = SLIST_FIRST(indexes); ASSERTeq(strcmp(idx->name, "class_id"), 0); if (idx->value < 0 || idx->value >= MAX_ALLOCATION_CLASSES) { ERR("class id outside of the allowed range"); errno = ERANGE; return -1; } id = (uint8_t)idx->value; struct alloc_class *c = alloc_class_by_id( heap_alloc_classes(&pop->heap), id); if (c == NULL) { ERR("class with the given id does not exist"); errno = ENOENT; return -1; } enum pobj_header_type user_htype = MAX_POBJ_HEADER_TYPES; switch (c->header_type) { case HEADER_LEGACY: user_htype = POBJ_HEADER_LEGACY; break; case HEADER_COMPACT: user_htype = POBJ_HEADER_COMPACT; break; case HEADER_NONE: user_htype = POBJ_HEADER_NONE; break; default: ASSERT(0); /* unreachable */ break; } struct pobj_alloc_class_desc *p = arg; p->units_per_block = c->type == CLASS_HUGE ? 0 : c->run.nallocs; p->header_type = user_htype; p->unit_size = c->unit_size; p->class_id = c->id; p->alignment = c->flags & CHUNK_FLAG_ALIGNED ? c->run.alignment : 0; return 0; }
/* * palloc_reservation_create -- creates a volatile reservation of a * memory block. * * The first step in the allocation of a new block is reserving it in * the transient heap - which is represented by the bucket abstraction. * * To provide optimal scaling for multi-threaded applications and reduce * fragmentation the appropriate bucket is chosen depending on the * current thread context and to which allocation class the requested * size falls into. * * Once the bucket is selected, just enough memory is reserved for the * requested size. The underlying block allocation algorithm * (best-fit, next-fit, ...) varies depending on the bucket container. */ static int palloc_reservation_create(struct palloc_heap *heap, size_t size, palloc_constr constructor, void *arg, uint64_t extra_field, uint16_t object_flags, uint16_t class_id, struct pobj_action_internal *out) { int err = 0; struct memory_block *new_block = &out->m; ASSERT(class_id < UINT8_MAX); struct alloc_class *c = class_id == 0 ? heap_get_best_class(heap, size) : alloc_class_by_id(heap_alloc_classes(heap), (uint8_t)class_id); if (c == NULL) { ERR("no allocation class for size %lu bytes", size); errno = EINVAL; return -1; } /* * The caller provided size in bytes, but buckets operate in * 'size indexes' which are multiples of the block size in the * bucket. * * For example, to allocate 500 bytes from a bucket that * provides 256 byte blocks two memory 'units' are required. */ ssize_t size_idx = alloc_class_calc_size_idx(c, size); if (size_idx < 0) { ERR("allocation class not suitable for size %lu bytes", size); errno = EINVAL; return -1; } ASSERT(size_idx <= UINT32_MAX); new_block->size_idx = (uint32_t)size_idx; struct bucket *b = heap_bucket_acquire(heap, c); err = heap_get_bestfit_block(heap, b, new_block); if (err != 0) goto out; if (alloc_prep_block(heap, new_block, constructor, arg, extra_field, object_flags, &out->offset) != 0) { /* * Constructor returned non-zero value which means * the memory block reservation has to be rolled back. */ if (new_block->type == MEMORY_BLOCK_HUGE) { bucket_insert_block(b, new_block); } err = ECANCELED; goto out; } /* * Each as of yet unfulfilled reservation needs to be tracked in the * runtime state. * The memory block cannot be put back into the global state unless * there are no active reservations. */ if ((out->resvp = bucket_current_resvp(b)) != NULL) util_fetch_and_add64(out->resvp, 1); out->lock = new_block->m_ops->get_lock(new_block); out->new_state = MEMBLOCK_ALLOCATED; out: heap_bucket_release(heap, b); if (err == 0) return 0; errno = err; return -1; }
/* * CTL_WRITE_HANDLER(proto) -- creates a new allocation class */ static int CTL_WRITE_HANDLER(desc)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; uint8_t id; struct alloc_class_collection *ac = heap_alloc_classes(&pop->heap); struct pobj_alloc_class_desc *p = arg; if (p->unit_size <= 0 || p->unit_size > PMEMOBJ_MAX_ALLOC_SIZE || p->units_per_block <= 0) { errno = EINVAL; return -1; } if (p->alignment != 0 && p->unit_size % p->alignment != 0) { ERR("unit size must be evenly divisible by alignment"); errno = EINVAL; return -1; } if (p->alignment > (MEGABYTE * 2)) { ERR("alignment cannot be larger than 2 megabytes"); errno = EINVAL; return -1; } enum header_type lib_htype = MAX_HEADER_TYPES; switch (p->header_type) { case POBJ_HEADER_LEGACY: lib_htype = HEADER_LEGACY; break; case POBJ_HEADER_COMPACT: lib_htype = HEADER_COMPACT; break; case POBJ_HEADER_NONE: lib_htype = HEADER_NONE; break; case MAX_POBJ_HEADER_TYPES: default: ERR("invalid header type"); errno = EINVAL; return -1; } if (SLIST_EMPTY(indexes)) { if (alloc_class_find_first_free_slot(ac, &id) != 0) { ERR("no available free allocation class identifier"); errno = EINVAL; return -1; } } else { struct ctl_index *idx = SLIST_FIRST(indexes); ASSERTeq(strcmp(idx->name, "class_id"), 0); if (idx->value < 0 || idx->value >= MAX_ALLOCATION_CLASSES) { ERR("class id outside of the allowed range"); errno = ERANGE; return -1; } id = (uint8_t)idx->value; if (alloc_class_reserve(ac, id) != 0) { ERR("attempted to overwrite an allocation class"); errno = EEXIST; return -1; } } size_t runsize_bytes = CHUNK_ALIGN_UP((p->units_per_block * p->unit_size) + RUN_BASE_METADATA_SIZE); /* aligning the buffer might require up-to to 'alignment' bytes */ if (p->alignment != 0) runsize_bytes += p->alignment; uint32_t size_idx = (uint32_t)(runsize_bytes / CHUNKSIZE); if (size_idx > UINT16_MAX) size_idx = UINT16_MAX; struct alloc_class *c = alloc_class_new(id, heap_alloc_classes(&pop->heap), CLASS_RUN, lib_htype, p->unit_size, p->alignment, size_idx); if (c == NULL) { errno = EINVAL; return -1; } if (heap_create_alloc_class_buckets(&pop->heap, c) != 0) { alloc_class_delete(ac, c); return -1; } p->class_id = c->id; p->units_per_block = c->run.nallocs; return 0; }