static void test_ctree_insert() { struct ctree *t = ctree_new(); UT_ASSERT(t != NULL); Rcounter_malloc = TEST_INSERT; UT_ASSERT(ctree_is_empty(t)); /* leaf Malloc fail */ UT_ASSERT(ctree_insert(t, TEST_VAL_A, 0) != 0); /* all OK root */ UT_ASSERT(ctree_insert(t, TEST_VAL_B, 0) == 0); /* insert +2 mallocs */ /* accessor Malloc fail */ UT_ASSERT(ctree_insert(t, TEST_VAL_A, 0) != 0); /* insert duplicate */ UT_ASSERT(ctree_insert(t, TEST_VAL_B, 0) != 0); /* all OK second */ UT_ASSERT(ctree_insert(t, TEST_VAL_A, 0) == 0); UT_ASSERT(!ctree_is_empty(t)); ctree_delete(t); }
static void test_ctree_remove() { struct ctree *t = ctree_new(); UT_ASSERT(t != NULL); Rcounter_malloc = TEST_REMOVE; /* remove from empty tree */ UT_ASSERT(ctree_remove(t, TEST_VAL_A, 0) == 0); /* insert 2 valid values */ UT_ASSERT(ctree_insert(t, TEST_VAL_A, 0) == 0); UT_ASSERT(ctree_insert(t, TEST_VAL_B, 0) == 0); /* fail to remove equal greater */ UT_ASSERT(ctree_remove(t, TEST_VAL_C, 0) == 0); /* remove accessor */ UT_ASSERT(ctree_remove(t, TEST_VAL_A, 1) == TEST_VAL_A); /* remove root */ UT_ASSERT(ctree_remove(t, TEST_VAL_B, 1) == TEST_VAL_B); ctree_delete(t); }
int main(int argc, char **argv) { uint8_t *i; ctree_t ctree = CTREE_EMPTY; printf("========== FORWARD INSERT CHECK ==========\n"); for (i = 0; i <= (uint8_t*) MAX; i+=INC) { printf("----- Inserting [%10p, %10p] -----\n", i, i+INC-1); int conflict = ctree_insert(&ctree, i, i+INC-1); ctree_print(ctree); if (conflict) printf("Error, conflict inserting %p\n", i); } for (i = 0; i <= (uint8_t*) MAX; i+=INC) { printf("----- Checking [%10p, %10p] -----\n", i, i+INC-1); int conflict = ctree_insert(&ctree, i, i+INC-1); if (!conflict) printf("Error, no conflict inserting %p\n", i); } ctree_destroy(&ctree); printf("========== REVERSE INSERT CHECK ==========\n"); for (i = (uint8_t*) MAX+INC; i-INC >= (uint8_t*) 0 && i-INC <= (uint8_t*) MAX; i-=INC) { printf("----- Inserting [%10p, %10p] -----\n", i-INC, i-1); int conflict = ctree_insert(&ctree, i-INC, i-1); ctree_print(ctree); if (conflict) printf("Error, conflict inserting %p\n", i); } for (i = 0; i <= (uint8_t*) MAX; i+=INC) { printf("----- Checking [%10p, %10p] -----\n", i, i+INC-1); int conflict = ctree_insert(&ctree, i, i+INC-1); if (!conflict) printf("Error, no conflict inserting %p\n", i); } ctree_destroy(&ctree); return 0; }
int main(int argc, char **argv) { int i; ctree_t ctree = CTREE_EMPTY; srand(time(NULL)); for (i = 0; i < NELT; i++) data[i] = ((uint8_t*) NULL) + i; // Perform NELT random swaps for (i = 0; i < NELT; i++) { int j = rand() % NELT; int k = rand() % NELT; uint8_t *tmp = data[j]; data[j] = data[k]; data[k] = tmp; } for (i = 0; i < NELT; i++) { printf(" + Inserting [%p, %p]\n", data[i], data[i]); int conflict = ctree_insert(&ctree, data[i], data[i]); if (conflict) { printf("*** Error, conflict inserting %p\n", data[i]); ctree_print(ctree); exit(1); } } printf("\n"); ctree_print(ctree); printf("\n"); for (i = 0; i < NELT; i++) { printf(" + Checking [%p, %p]\n", data[i], data[i]); int conflict = ctree_insert(&ctree, data[i], data[i]); if (!conflict) { printf("*** Error, no conflict inserting %p\n", data[i]); ctree_print(ctree); exit(1); } } ctree_destroy(&ctree); return 0; }
/* * bucket_tree_insert_block -- (internal) inserts a new memory block * into the container */ static int bucket_tree_insert_block(struct block_container *bc, struct palloc_heap *heap, struct memory_block m) { /* * Even though the memory block representation of an object uses * relatively large types in practise the entire memory block structure * needs to fit in a single 64 bit value - the type of the key in the * container tree. * Given those limitations a reasonable idea might be to make the * memory_block structure be the size of single uint64_t, which would * work for now, but if someday someone decides there's a need for * larger objects the current implementation would allow them to simply * replace this container instead of making little changes all over * the heap code. */ ASSERT(m.chunk_id < MAX_CHUNK); ASSERT(m.zone_id < UINT16_MAX); ASSERTne(m.size_idx, 0); struct block_container_ctree *c = (struct block_container_ctree *)bc; #ifdef USE_VG_MEMCHECK bucket_vg_mark_noaccess(heap, bc, m); #endif uint64_t key = CHUNK_KEY_PACK(m.zone_id, m.chunk_id, m.block_off, m.size_idx); return ctree_insert(c->tree, key, 0); }
/* * pmemobj_runtime_init -- (internal) initialize runtime part of the pool header */ static int pmemobj_runtime_init(PMEMobjpool *pop, int rdonly, int boot) { LOG(3, "pop %p rdonly %d boot %d", pop, rdonly, boot); if (pop->replica != NULL) { /* switch to functions that replicate data */ pop->persist = obj_rep_persist; pop->flush = obj_rep_flush; pop->drain = obj_rep_drain; pop->memcpy_persist = obj_rep_memcpy_persist; pop->memset_persist = obj_rep_memset_persist; } /* run_id is made unique by incrementing the previous value */ pop->run_id += 2; if (pop->run_id == 0) pop->run_id += 2; pop->persist(pop, &pop->run_id, sizeof (pop->run_id)); /* * Use some of the memory pool area for run-time info. This * run-time state is never loaded from the file, it is always * created here, so no need to worry about byte-order. */ pop->rdonly = rdonly; pop->lanes = NULL; pop->uuid_lo = pmemobj_get_uuid_lo(pop); pop->store = (struct object_store *) ((uintptr_t)pop + pop->obj_store_offset); if (boot) { if ((errno = pmemobj_boot(pop)) != 0) return -1; if ((errno = cuckoo_insert(pools_ht, pop->uuid_lo, pop)) != 0) { ERR("!cuckoo_insert"); return -1; } if ((errno = ctree_insert(pools_tree, (uint64_t)pop, pop->size)) != 0) { ERR("!ctree_insert"); return -1; } } /* * If possible, turn off all permissions on the pool header page. * * The prototype PMFS doesn't allow this when large pages are in * use. It is not considered an error if this fails. */ util_range_none(pop->addr, sizeof (struct pool_hdr)); return 0; }
/* * bucket_insert_block -- inserts a new memory block into the container */ int bucket_insert_block(struct bucket *b, struct memory_block m) { ASSERT(m.chunk_id < MAX_CHUNK); ASSERT(m.zone_id < UINT16_MAX); ASSERT(m.size_idx != 0); uint64_t key = CHUNK_KEY_PACK(m.zone_id, m.chunk_id, m.block_off, m.size_idx); return ctree_insert(b->tree, key, 0); }
/** Check an I/O vector operation's buffers for overlap. * * @param[in] iov Vector of transfer information. * @return Logical true when regions overlap, 0 otherwise. */ int ARMCII_Iov_check_overlap(void **ptrs, int count, int size) { #ifndef NO_CHECK_OVERLAP #ifdef NO_USE_CTREE int i, j; if (ARMCII_GLOBAL_STATE.iov_checks_disabled) return 0; for (i = 0; i < count; i++) { for (j = i+1; j < count; j++) { const uint8_t *ptr_1_lo = ptrs[i]; const uint8_t *ptr_1_hi = ((uint8_t*)ptrs[i]) + size - 1; const uint8_t *ptr_2_lo = ptrs[j]; const uint8_t *ptr_2_hi = ((uint8_t*)ptrs[j]) + size - 1; if ( (ptr_1_lo >= ptr_2_lo && ptr_1_lo <= ptr_2_hi) || (ptr_1_hi >= ptr_2_lo && ptr_1_hi <= ptr_2_hi) || (ptr_1_lo < ptr_2_lo && ptr_1_hi > ptr_2_hi)) { ARMCII_Dbg_print(DEBUG_CAT_IOV, "IOV regions overlap: [%p, %p] - [%p, %p]\n", ptr_1_lo, ptr_1_hi, ptr_2_lo, ptr_2_hi); return 1; } } } #else int i; ctree_t ctree = CTREE_EMPTY; if (ARMCII_GLOBAL_STATE.iov_checks_disabled) return 0; for (i = 0; i < count; i++) { int conflict = ctree_insert(&ctree, ptrs[i], ((uint8_t*)ptrs[i]) + size - 1); if (conflict) { ctree_t cnode = ctree_locate(ctree, ptrs[i], ((uint8_t*)ptrs[i]) + size - 1); ARMCII_Dbg_print(DEBUG_CAT_IOV, "IOV regions overlap: [%p, %p] - [%p, %p]\n", ptrs[i], ((uint8_t*)ptrs[i]) + size - 1, cnode->lo, cnode->hi); ctree_destroy(&ctree); return 1; } } ctree_destroy(&ctree); #endif /* NO_USE_CTREE */ #endif /* NO_CHECK_OVERLAP */ return 0; }
static void test_ctree_find() { struct ctree *t = ctree_new(); UT_ASSERT(t != NULL); /* search empty tree */ uint64_t k = TEST_VAL_A; UT_ASSERT(ctree_find_le(t, &k) == 0); /* insert 2 valid elements */ UT_ASSERT(ctree_insert(t, TEST_VAL_A, TEST_VAL_A) == 0); UT_ASSERT(ctree_insert(t, TEST_VAL_B, TEST_VAL_B) == 0); /* search for values */ k = 0; UT_ASSERT(ctree_find_le(t, &k) == 0); k = TEST_VAL_A; UT_ASSERT(ctree_find_le(t, &k) == TEST_VAL_A); k = TEST_VAL_B; UT_ASSERT(ctree_find_le(t, &k) == TEST_VAL_B); ctree_delete(t); }
/* * bucket_insert_block -- inserts a new memory block into the container */ int bucket_insert_block(PMEMobjpool *pop, struct bucket *b, struct memory_block m) { ASSERT(m.chunk_id < MAX_CHUNK); ASSERT(m.zone_id < UINT16_MAX); ASSERT(m.size_idx != 0); #ifdef USE_VG_MEMCHECK if (On_valgrind) { size_t rsize = m.size_idx * bucket_unit_size(b); void *block_data = heap_get_block_data(pop, m); VALGRIND_DO_MAKE_MEM_NOACCESS(pop, block_data, rsize); } #endif uint64_t key = CHUNK_KEY_PACK(m.zone_id, m.chunk_id, m.block_off, m.size_idx); return ctree_insert(b->tree, key, 0); }