/* initialization before config loading */ void init_objects(void) { aatree_init(&user_tree, user_node_cmp, NULL); user_cache = slab_create("user_cache", sizeof(PgUser), 0, NULL, USUAL_ALLOC); db_cache = slab_create("db_cache", sizeof(PgDatabase), 0, NULL, USUAL_ALLOC); pool_cache = slab_create("pool_cache", sizeof(PgPool), 0, NULL, USUAL_ALLOC); if (!user_cache || !db_cache || !pool_cache) fatal("cannot create initial caches"); }
/* initialization before config loading */ void init_objects(void) { aatree_init(&user_tree, user_node_cmp, NULL); user_cache = slab_create("user_cache", sizeof(PgUser), 0, NULL, USUAL_ALLOC); db_cache = slab_create("db_cache", sizeof(PgDatabase), 0, NULL, USUAL_ALLOC); pool_cache = slab_create("pool_cache", sizeof(PgPool), 0, NULL, USUAL_ALLOC); cluster_cache = slab_create("cluster_cache", sizeof(PgCluster), 0, NULL, USUAL_ALLOC); if (regcomp(&sharding_command_regex, "^([a-zA-Z]+)[ ]+'([^']+)';(.*)", REG_EXTENDED)) { fatal("Could not compile regular expression.\n"); }; if (!user_cache || !db_cache || !pool_cache || !cluster_cache) fatal("cannot create initial caches"); }
static void init_hft(void) { hash_init(&htab_player_list, 256, delete_dbref); player_dbref_slab = slab_create("player list dbrefs", sizeof(dbref)); hft_initialized = 1; }
struct slab_pool * slab_pool_create(struct memface *mc, size_t nitem) { assert_nitem(nitem); assert_invar(); struct slab_pool *so = (struct slab_pool *)slab_create(mc, nitem, sizeof(struct slab)); return so; }
static lock_list * next_free_lock(const void *hint) { if (lock_slab == NULL) { lock_slab = slab_create("locks", sizeof(lock_list)); slab_set_opt(lock_slab, SLAB_ALLOC_BEST_FIT, 1); } return slab_malloc(lock_slab, hint); }
/* * Determines the chunk sizes and initializes the slab class descriptors * accordingly. */ mem_cache_ptr mem_cache_create(size_t base_chunk_size, double factor, size_t power_largest, size_t power_block, size_t mem_limit, int pre_alloc) { mem_cache_ptr mem_cache; slab_ptr *slabclass; size_t i; size_t chunk_size; if (base_chunk_size <= CHUNK_ALIGN_BYTES || power_largest <= 0 || power_block <= CHUNK_ALIGN_BYTES || power_block < base_chunk_size || mem_limit <= CHUNK_ALIGN_BYTES) { return NULL; } mem_cache = (mem_cache_ptr)calloc(1, sizeof(*mem_cache)); if (NULL == mem_cache) { return NULL; } slabclass = (slab_ptr *)calloc(power_largest, sizeof(slab_ptr)); if (NULL == slabclass) { free (mem_cache); return NULL; } mem_cache->mem_limit = mem_limit; mem_cache->mem_malloced = 0; mem_cache->factor = factor; mem_cache->power_block = power_block; mem_cache->base_chunk_size = base_chunk_size; mem_cache->power_smallest = 1; mem_cache->slabclass = slabclass; for (i = 1, chunk_size = base_chunk_size; i < power_largest; ++i) { if (chunk_size > power_block) { break; } slabclass[i] = slab_create(chunk_size, power_block, pre_alloc); LOG_DEBUG_F3("slab class %3lu: chunk size %6lu perslab %5u\n", i, slabclass[i]->chunk_size, slabclass[i]->chunk_number_per_page); chunk_size *= factor; } mem_cache->power_largest = i - 1; mem_cache->mem_malloced = mem_cache->power_largest; return mem_cache; }
/** Allocate and initialize a new integer map. */ intmap * im_new(void) { intmap *im; im = mush_malloc(sizeof *im, "int_map"); if (!intmap_slab) intmap_slab = slab_create("patricia tree nodes", sizeof(struct patricia)); im->count = 0; im->root = NULL; return im; }
void create_tree(tree_t *tree, slab_t *slab, size_t entry_size, int (*compare)(const int a, const int b)) { slab->unit_size = entry_size; slab->block_size = PAGE_SIZE; slab->min_block = 1; slab->max_block = tree_max_block(65536, PAGE_SIZE, entry_size); slab->palloc = palloc; slab->pfree = pfree; slab_create(slab); tree_create(tree, compare); }
void thread_init() { slab_create(&thread_cache, sizeof(thread_t), page_alloc(1)); init_thread = slab_alloc(&thread_cache); active_thread = init_thread; active_switch_stack = init_thread->stack; init_thread->status = THREAD_SCHEDULED; init_thread->user = 0; init_thread->parent = 0; init_thread->children = 0; init_thread->next = 0; }
void* slab_cache_alloc(slab_cache_t* cache) { slab_t* slab = cache->slabs_free; if(!cache->slabs_free) { slab = slab_create(cache); if (slab == NULL) { return NULL; } } return slab_alloc(slab); }
/* * Determines the chunk sizes and initializes the slab class descriptors * accordingly. */ mem_cache_ptr mem_cache_create(size_t base_chunk_size, double factor, size_t power_largest, size_t power_block, size_t mem_limit, int pre_alloc) { mem_cache_ptr mem_cache; slab_ptr *slabclass; size_t i; size_t chunk_size; if (base_chunk_size <= CHUNK_ALIGN_BYTES || power_largest <= 0 || power_block <= CHUNK_ALIGN_BYTES || power_block < base_chunk_size || mem_limit <= CHUNK_ALIGN_BYTES) { return NULL; } mem_cache = (mem_cache_ptr)calloc(1, sizeof(*mem_cache)); if (NULL == mem_cache) { return NULL; } slabclass = (slab_ptr *)calloc(power_largest, sizeof(slab_ptr)); if (NULL == slabclass) { free (mem_cache); return NULL; } mem_cache->mem_limit = mem_limit; mem_cache->mem_malloced = 0; mem_cache->factor = factor; mem_cache->power_block = power_block; mem_cache->base_chunk_size = base_chunk_size; mem_cache->power_smallest = 1; mem_cache->slabclass = slabclass; mem_cache->magic_number = MEMCACHE_MAGIC_NUMBER; for (i = 1, chunk_size = base_chunk_size; i < power_largest; ++i) { if (chunk_size > power_block) { break; } slabclass[i] = slab_create(chunk_size, power_block, pre_alloc); chunk_size *= factor; } mem_cache->power_largest = i - 1; mem_cache->mem_malloced = mem_cache->power_largest; return mem_cache; }
/* Allocate a new reference count struct. Consider moving away from slabs; the struct size is pretty big with the skip list fields added. But see comment for memcheck_slab's declaration. */ static MEM * alloc_memcheck_node(const char *ref) { MEM *newcheck; if (!memcheck_slab) memcheck_slab = slab_create("mem check references", sizeof(MEM)); newcheck = slab_malloc(memcheck_slab, NULL); memset(newcheck, 0, sizeof *newcheck); mush_strncpy(newcheck->ref_name, ref, REF_NAME_LEN); newcheck->link_count = pick_link_count(MAX_LINKS); newcheck->ref_count = 1; return newcheck; }
/** * Allocate frames for slab space and initialize * */ static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) { void *data; slab_t *slab; size_t fsize; unsigned int i; u32_t p; DBG("%s order %d\n", __FUNCTION__, cache->order); data = (void*)PA2KA(frame_alloc(1 << cache->order)); if (!data) { return NULL; } slab = (slab_t*)slab_create(); if (!slab) { frame_free(KA2PA(data)); return NULL; } /* Fill in slab structures */ for (i = 0; i < ((u32_t) 1 << cache->order); i++) frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab); slab->start = data; slab->available = cache->objects; slab->nextavail = (void*)data; slab->cache = cache; for (i = 0, p = (u32_t)slab->start; i < cache->objects; i++) { *(addr_t *)p = p+cache->size; p = p+cache->size; }; atomic_inc(&cache->allocated_slabs); return slab; }
/* initialization after config loading */ void init_caches(void) { server_cache = slab_create("server_cache", sizeof(PgSocket), 0, construct_server, USUAL_ALLOC); client_cache = slab_create("client_cache", sizeof(PgSocket), 0, construct_client, USUAL_ALLOC); iobuf_cache = slab_create("iobuf_cache", IOBUF_SIZE, 0, do_iobuf_reset, USUAL_ALLOC); }
static struct pipe_context * ilo_context_create(struct pipe_screen *screen, void *priv, unsigned flags) { struct ilo_screen *is = ilo_screen(screen); struct ilo_context *ilo; ilo = CALLOC_STRUCT(ilo_context); if (!ilo) return NULL; ilo->winsys = is->dev.winsys; ilo->dev = &is->dev; /* * initialize first, otherwise it may not be safe to call * ilo_context_destroy() on errors */ slab_create(&ilo->transfer_mempool, sizeof(struct ilo_transfer), 64); ilo->shader_cache = ilo_shader_cache_create(); ilo->cp = ilo_cp_create(ilo->dev, ilo->winsys, ilo->shader_cache); if (ilo->cp) ilo->render = ilo_render_create(&ilo->cp->builder); if (!ilo->cp || !ilo->shader_cache || !ilo->render) { ilo_context_destroy(&ilo->base); return NULL; } ilo_cp_set_submit_callback(ilo->cp, ilo_context_cp_submitted, (void *) ilo); ilo->base.screen = screen; ilo->base.priv = priv; ilo->base.destroy = ilo_context_destroy; ilo->base.flush = ilo_flush; ilo->base.render_condition = ilo_render_condition; ilo_init_draw_functions(ilo); ilo_init_query_functions(ilo); ilo_init_state_functions(ilo); ilo_init_blit_functions(ilo); ilo_init_transfer_functions(ilo); ilo_init_video_functions(ilo); ilo_init_gpgpu_functions(ilo); ilo_init_draw(ilo); ilo_state_vector_init(ilo->dev, &ilo->state_vector); /* * These must be called last as u_upload/u_blitter are clients of the pipe * context. */ ilo->uploader = u_upload_create(&ilo->base, 1024 * 1024, PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM); if (!ilo->uploader) { ilo_context_destroy(&ilo->base); return NULL; } ilo->blitter = ilo_blitter_create(ilo); if (!ilo->blitter) { ilo_context_destroy(&ilo->base); return NULL; } return &ilo->base; }