/* * test_load_factor -- calculates the average load factor of the hash table * when inserting <0, 1M> elements in random order. * * The factor itself isn't really that important because the implementation * is optimized for lookup speed, but it should be reasonable. */ static void test_load_factor() { struct cuckoo *c = cuckoo_new(); UT_ASSERT(c != NULL); /* * The seed is intentionally constant so that the test result is * consistent (at least on the same platform). */ srand(INITIAL_SEED); float avg_load = 0.f; rand64(); int inserted = 0; for (int i = 0; ; ++i) { if (cuckoo_insert(c, rand64() % NVALUES, TEST_VALUE) == 0) { inserted++; avg_load += (float)inserted / cuckoo_get_size(c); if (inserted == NVALUES) break; } } avg_load /= inserted; UT_ASSERT(avg_load >= 0.4f); cuckoo_delete(c); }
/* * lane_info_create -- (internal) constructor for thread shared data */ static inline void lane_info_create() { Lane_info_ht = cuckoo_new(); if (Lane_info_ht == NULL) FATAL("cuckoo_new"); }
static void test_cuckoo_new_delete() { struct cuckoo *c = NULL; /* cuckoo malloc fail */ c = cuckoo_new(); UT_ASSERT(c == NULL); /* tab malloc fail */ c = cuckoo_new(); UT_ASSERT(c == NULL); /* all ok */ c = cuckoo_new(); UT_ASSERT(c != NULL); cuckoo_delete(c); }
/* * obj_init -- initialization of obj * * Called by constructor. */ void obj_init(void) { LOG(3, NULL); COMPILE_ERROR_ON(sizeof (struct pmemobjpool) != 8192); pools = cuckoo_new(); if (pools == NULL) FATAL("!cuckoo_new"); }
/* * obj_init -- initialization of obj * * Called by constructor. */ void obj_init(void) { LOG(3, NULL); COMPILE_ERROR_ON(sizeof (struct pmemobjpool) != 8192); #ifdef USE_COW_ENV char *env = getenv("PMEMOBJ_COW"); if (env) Open_cow = atoi(env); #endif pools_ht = cuckoo_new(); if (pools_ht == NULL) FATAL("!cuckoo_new"); pools_tree = ctree_new(); if (pools_tree == NULL) FATAL("!ctree_new"); }
static void test_insert_get_remove() { struct cuckoo *c = cuckoo_new(); ASSERT(c != NULL); for (int i = 0; i < TEST_INSERTS; ++i) ASSERT(cuckoo_insert(c, i, TEST_VAL(i)) == 0); for (int i = 0; i < TEST_INSERTS; ++i) ASSERT(cuckoo_get(c, i) == TEST_VAL(i)); for (int i = 0; i < TEST_INSERTS; ++i) ASSERT(cuckoo_remove(c, i) == TEST_VAL(i)); for (int i = 0; i < TEST_INSERTS; ++i) ASSERT(cuckoo_remove(c, i) == NULL); for (int i = 0; i < TEST_INSERTS; ++i) ASSERT(cuckoo_get(c, i) == NULL); cuckoo_delete(c); }
/* * alloc_class_collection_new -- creates a new collection of allocation classes */ struct alloc_class_collection * alloc_class_collection_new() { LOG(10, NULL); struct alloc_class_collection *ac = Zalloc(sizeof(*ac)); if (ac == NULL) return NULL; memset(ac->aclasses, 0, sizeof(ac->aclasses)); ac->granularity = ALLOC_BLOCK_SIZE; ac->last_run_max_size = MAX_RUN_SIZE; ac->fail_on_missing_class = 0; ac->autogenerate_on_missing_class = 1; size_t maps_size = (MAX_RUN_SIZE / ac->granularity) + 1; if ((ac->class_map_by_alloc_size = Malloc(maps_size)) == NULL) goto error; if ((ac->class_map_by_unit_size = cuckoo_new()) == NULL) goto error; memset(ac->class_map_by_alloc_size, 0xFF, maps_size); if (alloc_class_new(-1, ac, CLASS_HUGE, HEADER_COMPACT, CHUNKSIZE, 0, 1) == NULL) goto error; struct alloc_class *predefined_class = alloc_class_new(-1, ac, CLASS_RUN, HEADER_COMPACT, MIN_RUN_SIZE, 0, 1); if (predefined_class == NULL) goto error; for (size_t i = 0; i < FIRST_GENERATED_CLASS_SIZE / ac->granularity; ++i) { ac->class_map_by_alloc_size[i] = predefined_class->id; } /* * Based on the defined categories, a set of allocation classes is * created. The unit size of those classes is depended on the category * initial size and step. */ size_t granularity_mask = ALLOC_BLOCK_SIZE_GEN - 1; for (int c = 1; c < MAX_ALLOC_CATEGORIES; ++c) { size_t n = categories[c - 1].size + ALLOC_BLOCK_SIZE_GEN; do { if (alloc_class_find_or_create(ac, n) == NULL) goto error; float stepf = (float)n * categories[c].step; size_t stepi = (size_t)stepf; stepi = (stepf - (float)stepi < FLT_EPSILON) ? stepi : stepi + 1; n += (stepi + (granularity_mask)) & ~granularity_mask; } while (n <= categories[c].size); } /* * Find the largest alloc class and use it's unit size as run allocation * threshold. */ uint8_t largest_aclass_slot; for (largest_aclass_slot = MAX_ALLOCATION_CLASSES - 1; largest_aclass_slot > 0 && ac->aclasses[largest_aclass_slot] == NULL; --largest_aclass_slot) { /* intentional NOP */ } struct alloc_class *c = ac->aclasses[largest_aclass_slot]; /* * The actual run might contain less unit blocks than the theoretical * unit max variable. This may be the case for very large unit sizes. */ size_t real_unit_max = c->run.bitmap_nallocs < RUN_UNIT_MAX_ALLOC ? c->run.bitmap_nallocs : RUN_UNIT_MAX_ALLOC; size_t theoretical_run_max_size = c->unit_size * real_unit_max; ac->last_run_max_size = MAX_RUN_SIZE > theoretical_run_max_size ? theoretical_run_max_size : MAX_RUN_SIZE; #ifdef DEBUG /* * Verify that each bucket's unit size points back to the bucket by the * bucket map. This must be true for the default allocation classes, * otherwise duplicate buckets will be created. */ for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) { struct alloc_class *c = ac->aclasses[i]; if (c != NULL && c->type == CLASS_RUN) { ASSERTeq(i, c->id); ASSERTeq(alloc_class_by_run(ac, c->unit_size, c->flags, c->run.size_idx), c); } } #endif return ac; error: alloc_class_collection_delete(ac); return NULL; }