int main(int argc, char *argv[]) { int expect_custom_alloc = 0; START(argc, argv, "vmem_custom_alloc"); if (argc < 2 || argc > 3 || strlen(argv[1]) != 1) UT_FATAL("usage: %s (0-2) [directory]", argv[0]); switch (argv[1][0]) { case '0': { /* use default allocator */ expect_custom_alloc = 0; expect_malloc = 1; break; } case '1': { /* error in custom malloc function */ expect_custom_alloc = 1; expect_malloc = 0; vmem_set_funcs(malloc_null, free_custom, realloc_custom, strdup_custom, NULL); break; } case '2': { /* use custom alloc functions */ expect_custom_alloc = 1; expect_malloc = 1; vmem_set_funcs(malloc_custom, free_custom, realloc_custom, strdup_custom, NULL); break; } default: { UT_FATAL("usage: %s (0-2) [directory]", argv[0]); break; } } if (argc == 3) { pool_test(argv[2]); } else { int i; /* repeat create pool */ for (i = 0; i < TEST_REPEAT_CREATE_POOLS; ++i) pool_test(NULL); } /* check memory leak in custom allocator */ UT_ASSERTeq(custom_allocs, 0); if (expect_custom_alloc == 0) { UT_ASSERTeq(custom_alloc_calls, 0); } else { UT_ASSERTne(custom_alloc_calls, 0); } DONE(NULL); }
/* * clnt_wait_disconnect -- wait for disconnection */ void clnt_wait_disconnect(struct rpmem_ssh *ssh) { int ret; ret = rpmem_ssh_monitor(ssh, 0); UT_ASSERTne(ret, 1); }
/* * timed_check_worker -- (internal) check consistency with mutex */ static void * timed_check_worker(void *arg) { for (unsigned run = 0; run < WORKER_RUNS; run++) { int mutex_id = (int)(uintptr_t)arg % 2; PMEMmutex *mtx = mutex_id == LOCKED_MUTEX ? &Test_obj->mutex_locked : &Test_obj->mutex; struct timespec t1, t2, t_diff, abs_time; os_clock_gettime(CLOCK_REALTIME, &t1); abs_time = t1; abs_time.tv_nsec += TIMEOUT; if (abs_time.tv_nsec >= NANO_PER_ONE) { abs_time.tv_sec += abs_time.tv_nsec / NANO_PER_ONE; abs_time.tv_nsec %= NANO_PER_ONE; } int ret = pmemobj_mutex_timedlock(&Mock_pop, mtx, &abs_time); os_clock_gettime(CLOCK_REALTIME, &t2); if (mutex_id == LOCKED_MUTEX) { UT_ASSERTeq(ret, ETIMEDOUT); t_diff.tv_sec = t2.tv_sec - t1.tv_sec; t_diff.tv_nsec = t2.tv_nsec - t1.tv_nsec; if (t_diff.tv_nsec < 0) { --t_diff.tv_sec; t_diff.tv_nsec += NANO_PER_ONE; } UT_ASSERT(t_diff.tv_sec * NANO_PER_ONE + t_diff.tv_nsec >= TIMEOUT); return NULL; } if (ret == 0) { UT_ASSERTne(mutex_id, LOCKED_MUTEX); pmemobj_mutex_unlock(&Mock_pop, mtx); } else if (ret == ETIMEDOUT) { t_diff.tv_sec = t2.tv_sec - t1.tv_sec; t_diff.tv_nsec = t2.tv_nsec - t1.tv_nsec; if (t_diff.tv_nsec < 0) { --t_diff.tv_sec; t_diff.tv_nsec += NANO_PER_ONE; } UT_ASSERT(t_diff.tv_sec * NANO_PER_ONE + t_diff.tv_nsec >= TIMEOUT); } else { errno = ret; UT_ERR("!pmemobj_mutex_timedlock"); } } return NULL; }
/* * test_mmap_shared -- test shared mappings */ static void test_mmap_shared(int fd) { char *ptr1; ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); UT_ASSERTne(ptr1, MAP_FAILED); check_mapping(fd, ptr1, FILE_SIZE, PROT_READ|PROT_WRITE, 0, 0); }
/* * client_set_attr -- perform set attributes request */ int client_set_attr(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <addr>[:<port>]", tc->name); char *target = argv[0]; int ret; struct rpmem_obc *rpc; struct rpmem_target_info *info; const struct rpmem_pool_attr pool_attr = POOL_ATTR_ALT; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); rpc = rpmem_obc_init(); UT_ASSERTne(rpc, NULL); ret = rpmem_obc_connect(rpc, info); UT_ASSERTeq(ret, 0); rpmem_target_free(info); ret = rpmem_obc_monitor(rpc, 1); UT_ASSERTeq(ret, 1); ret = rpmem_obc_set_attr(rpc, &pool_attr); UT_ASSERTeq(ret, 0); ret = rpmem_obc_monitor(rpc, 1); UT_ASSERTeq(ret, 1); ret = rpmem_obc_close(rpc); UT_ASSERTeq(ret, 0); ret = rpmem_obc_disconnect(rpc); UT_ASSERTeq(ret, 0); rpmem_obc_fini(rpc); return 1; }
/* * server_econnreset -- test case for closing connection when operation on * server is in progress - server side */ int server_econnreset(const struct test_case *tc, int argc, char *argv[]) { struct rpmemd_obc *rpdc; int ret; rpdc = rpmemd_obc_init(STDIN_FILENO, STDOUT_FILENO); UT_ASSERTne(rpdc, NULL); ret = rpmemd_obc_status(rpdc, 0); UT_ASSERTeq(ret, 0); ret = rpmemd_obc_process(rpdc, &REQ_CB, NULL); UT_ASSERTne(ret, 0); rpmemd_obc_fini(rpdc); return 0; }
/* * server_bad_msg -- process a message specified number of times and expect * error returned from rpmemd_obc_client_process function */ void server_bad_msg(struct rpmemd_obc *rpdc, int count) { struct rpmemd_obc_client *client; int ret; for (int i = 0; i < count; i++) { client = rpmemd_obc_accept(rpdc); UT_ASSERTne(client, NULL); ret = rpmemd_obc_client_process(client, &REQ_CB, NULL); UT_ASSERTne(ret, 0); ret = rpmemd_obc_client_close(client); UT_ASSERTeq(ret, 0); rpmemd_obc_client_fini(client); } }
/* * req_close -- process close request */ static int req_close(struct rpmemd_obc *obc, void *arg) { UT_ASSERTne(arg, NULL); struct req_arg *args = arg; args->closing = 1; return rpmemd_obc_close_resp(obc, 0); }
/* * req_set_attr -- process set attributes request */ static int req_set_attr(struct rpmemd_obc *obc, void *arg, const struct rpmem_pool_attr *pool_attr) { struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_ALT; UT_ASSERTne(arg, NULL); UT_ASSERTeq(memcmp(&ex_pool_attr, pool_attr, sizeof(ex_pool_attr)), 0); return rpmemd_obc_set_attr_resp(obc, 0); }
static void test_string_config(struct pool *pop) { UT_ASSERTne(pop, NULL); int ret; test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, ""); UT_ASSERTeq(ret, 0); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, ";;"); UT_ASSERTeq(ret, 0); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, ";=;"); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "="); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "debug.test_wo="); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "=b"); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "debug.test_wo=111=222"); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "debug.test_wo=333;debug.test_rw=444;"); UT_ASSERTeq(ret, 0); UT_ASSERTeq(test_config_written, 2); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "debug.test_config="TEST_CONFIG_VALUE";"); UT_ASSERTeq(ret, 0); UT_ASSERTeq(test_config_written, 1); }
static void test_file_config(struct pool *pop) { create_and_test_file_config(pop, "debug.test_config="TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(pop, "debug.test_config="TEST_CONFIG_VALUE";" "debug.test_config="TEST_CONFIG_VALUE";", 0, 2); create_and_test_file_config(pop, "#this is a comment\n" "debug.test_config="TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(pop, "debug.#this is a comment\n" "test_config#this is a comment\n" "="TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(pop, "debug.test_config="TEST_CONFIG_VALUE";#this is a comment", 0, 1); create_and_test_file_config(pop, "\n\n\ndebug\n.\ntest\t_\tconfig="TEST_CONFIG_VALUE";\n", 0, 1); create_and_test_file_config(pop, " d e b u g . t e s t _ c o n f i g = "TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(pop, "#debug.test_config="TEST_CONFIG_VALUE";", 0, 0); create_and_test_file_config(pop, "debug.#this is a comment\n" "test_config#this is a not properly terminated comment" "="TEST_CONFIG_VALUE";", -1, 0); create_and_test_file_config(pop, "invalid", -1, 0); create_and_test_file_config(pop, "", 0, 0); create_and_test_file_config(pop, "debug.test_config_complex_arg=;", -1, 0); create_and_test_file_config(pop, "debug.test_config_complex_arg=1,2,3;", -1, 0); create_and_test_file_config(pop, "debug.test_config_complex_arg=12345,abcd,,1;", -1, 0); create_and_test_file_config(pop, "debug.test_config_complex_arg=12345,abcd,3147483647,1;", 0, 1); create_and_test_file_config(NULL, "global_debug.gtest_config="TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(NULL, "private.missing.query=1;" "global_debug.gtest_config="TEST_CONFIG_VALUE";", 0, 1); test_too_large_file(pop); int ret = ctl_load_config_from_file(pop->ctl, pop, "does_not_exist"); UT_ASSERTne(ret, 0); }
/* * client_create_errno -- perform create request operation and expect * specified errno, repeat the operation specified number of times. * If ex_errno is zero expect certain values in res struct. */ static void client_create_errno(char *target, int ex_errno, int count) { struct rpmem_req_attr req = { .pool_size = POOL_SIZE, .nlanes = NLANES, .provider = PROVIDER, .pool_desc = POOL_DESC, }; struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT; struct rpmem_resp_attr res; int ret; for (int i = 0; i < count; i++) { struct rpmem_obc *rpc = rpmem_obc_init(); UT_ASSERTne(rpc, NULL); client_connect_wait(rpc, target); ret = rpmem_obc_create(rpc, &req, &res, &pool_attr); if (ex_errno) { UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ex_errno); } else { UT_ASSERTeq(ret, 0); UT_ASSERTeq(res.port, CREATE_RESP.ibc.port); UT_ASSERTeq(res.rkey, CREATE_RESP.ibc.rkey); UT_ASSERTeq(res.raddr, CREATE_RESP.ibc.raddr); UT_ASSERTeq(res.persist_method, CREATE_RESP.ibc.persist_method); UT_ASSERTeq(res.nlanes, CREATE_RESP.ibc.nlanes); } rpmem_obc_disconnect(rpc); rpmem_obc_fini(rpc); } }
static void do_fault_injection_register(void *addr, size_t len, enum pmem_map_type type) { if (!pmem_fault_injection_enabled()) return; pmem_inject_fault_at(PMEM_MALLOC, 1, "util_range_register"); int ret = util_range_register(addr, len, "", type); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ENOMEM); }
int main(int argc, char *argv[]) { START(argc, argv, "obj_fragmentation2"); if (argc < 3) UT_FATAL("usage: %s filename workload [seed]", argv[0]); const char *path = argv[1]; PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, DEFAULT_FILE_SIZE, S_IWUSR | S_IRUSR); if (pop == NULL) UT_FATAL("!pmemobj_create: %s", path); int w = atoi(argv[2]); if (argc > 3) seed = (unsigned)atoi(argv[3]); else seed = time(NULL); objects = ZALLOC(sizeof(uint64_t) * MAX_OBJECTS); UT_ASSERTne(objects, NULL); workloads[w - 1](pop); PMEMoid oid; size_t remaining = 0; size_t chunk = 100; /* calc at chunk level */ while (pmemobj_alloc(pop, &oid, chunk, 0, NULL, NULL) == 0) remaining += pmemobj_alloc_usable_size(oid) + 16; size_t allocated_sum = 0; oid = pmemobj_root(pop, 1); for (size_t n = 0; n < nobjects; ++n) { if (objects[n] == 0) continue; oid.off = objects[n]; allocated_sum += pmemobj_alloc_usable_size(oid) + 16; } size_t used = DEFAULT_FILE_SIZE - remaining; float frag = ((float)used / allocated_sum) - 1.f; UT_ASSERT(frag <= workloads_target[w - 1]); pmemobj_close(pop); FREE(objects); DONE(NULL); }
static void do_fault_injection_split(void *addr, size_t len) { if (!pmem_fault_injection_enabled()) return; pmem_inject_fault_at(PMEM_MALLOC, 1, "util_range_split"); int ret = util_range_unregister(addr, len); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ENOMEM); }
/* * req_cb_remove -- callback for remove request operation * * This function behaves according to arguments specified via * struct req_cb_arg. */ static int req_cb_remove(struct rpmemd_obc_client *client, void *arg, const char *pool_desc) { UT_ASSERTne(arg, NULL); UT_ASSERTne(pool_desc, NULL); UT_ASSERTeq(strcmp(pool_desc, POOL_DESC), 0); struct req_cb_arg *args = arg; args->types |= (1 << RPMEM_MSG_TYPE_REMOVE); int ret = args->ret; if (args->resp) ret = rpmemd_obc_client_remove_resp(client, args->status); if (args->force_ret) ret = args->ret; return ret; }
static void * thread_func_create(void *arg) { unsigned start_idx = *(unsigned *)arg; size_t len = strlen(Dir) + 50; /* reserve some space for pool id */ char *filename = MALLOC(sizeof(*filename) * len); for (int repeat = 0; repeat < NREPEATS; ++repeat) { for (unsigned idx = 0; idx < Npools; ++idx) { unsigned pool_id = start_idx + idx; snprintf(filename, len, "%s" OS_DIR_SEP_STR "pool%d", Dir, pool_id); UT_OUT("%s", filename); /* delete old pool with the same id if exists */ if (Pools[pool_id] != NULL) { pmemcto_close(Pools[pool_id]); Pools[pool_id] = NULL; UNLINK(filename); } Pools[pool_id] = pmemcto_create(filename, "test", PMEMCTO_MIN_POOL, 0600); UT_ASSERTne(Pools[pool_id], NULL); void *ptr = pmemcto_malloc(Pools[pool_id], sizeof(int)); UT_ASSERTne(ptr, NULL); pmemcto_free(Pools[pool_id], ptr); } } FREE(filename); return NULL; }
int main(int argc, char *argv[]) { const int test_value = 123456; char *dir = NULL; void *mem_pool = NULL; VMEM *vmp; START(argc, argv, "vmem_calloc"); if (argc == 2) { dir = argv[1]; } else if (argc > 2) { UT_FATAL("usage: %s [directory]", argv[0]); } if (dir == NULL) { /* allocate memory for function vmem_create_in_region() */ mem_pool = MMAP_ANON_ALIGNED(VMEM_MIN_POOL, 4 << 20); vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL); if (vmp == NULL) UT_FATAL("!vmem_create_in_region"); } else { vmp = vmem_create(dir, VMEM_MIN_POOL); if (vmp == NULL) UT_FATAL("!vmem_create"); } int *test = vmem_calloc(vmp, 1, sizeof(int)); UT_ASSERTne(test, NULL); /* pool_calloc should return zeroed memory */ UT_ASSERTeq(*test, 0); *test = test_value; UT_ASSERTeq(*test, test_value); /* check that pointer came from mem_pool */ if (dir == NULL) { UT_ASSERTrange(test, mem_pool, VMEM_MIN_POOL); } vmem_free(vmp, test); vmem_delete(vmp); DONE(NULL); }
/* * test_open -- in the open test we should be able to allocate exactly * one object. */ static void test_open(const char *path) { PMEMobjpool *pop; if ((pop = pmemobj_open(path, LAYOUT_NAME)) == NULL) UT_FATAL("!pmemobj_open: %s", path); int ret = pmemobj_alloc(pop, NULL, ALLOC_SIZE, 0, NULL, NULL); UT_ASSERTeq(ret, 0); ret = pmemobj_alloc(pop, NULL, ALLOC_SIZE, 0, NULL, NULL); UT_ASSERTne(ret, 0); pmemobj_close(pop); }
/* * test_mmap_hint -- test hint address */ static void test_mmap_hint(int fd) { char *ptr1; char *ptr2; /* map entire file first to get unused address */ ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); UT_ASSERTne(ptr1, MAP_FAILED); check_mapping(fd, ptr1, FILE_SIZE, PROT_READ|PROT_WRITE, CHECK_PRIV, 0); /* now try to map a part of it at specified address */ ptr2 = mmap(ptr1 + MMAP_ALIGN, MMAP_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); UT_ASSERTeq(ptr2, ptr1 + MMAP_ALIGN); check_mapping(fd, ptr2, MMAP_SIZE, PROT_READ|PROT_WRITE, CHECK_PRIV, 0); /* non-aligned hint address - should be ignored */ ptr2 = mmap(ptr1 + 100, MMAP_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); UT_ASSERTne(ptr2, MAP_FAILED); UT_ASSERTne(ptr2, ptr1 + 100); check_mapping(fd, ptr2, MMAP_SIZE, PROT_READ|PROT_WRITE, CHECK_PRIV, 0); /* hint address is busy */ ptr1 = mmap(NULL, FILE_SIZE / 2, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); UT_ASSERTne(ptr1, MAP_FAILED); ptr2 = mmap(ptr1 + MMAP_SIZE, MMAP_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); UT_ASSERTne(ptr2, MAP_FAILED); UT_ASSERT(ptr2 < ptr1 || ptr2 >= ptr1 + FILE_SIZE / 2); munmap(ptr1, FILE_SIZE / 2); check_mapping(fd, ptr2, MMAP_SIZE, PROT_READ|PROT_WRITE, CHECK_PRIV, 0); }
/* * req_cb_create -- callback for create request operation * * This function behaves according to arguments specified via * struct req_cb_arg. */ static int req_cb_create(struct rpmemd_obc *obc, void *arg, const struct rpmem_req_attr *req, const struct rpmem_pool_attr *pool_attr) { UT_ASSERTne(arg, NULL); UT_ASSERTne(req, NULL); UT_ASSERTne(pool_attr, NULL); req_cb_check_req(req); req_cb_check_pool_attr(pool_attr); struct req_cb_arg *args = arg; args->types |= (1 << RPMEM_MSG_TYPE_CREATE); int ret = args->ret; if (args->resp) { struct rpmem_resp_attr resp = { .port = PORT, .rkey = RKEY, .raddr = RADDR, .persist_method = PERSIST_METHOD, .nlanes = NLANES_RESP, }; ret = rpmemd_obc_create_resp(obc, args->status, &resp); } if (args->force_ret) ret = args->ret; return ret; }
/* * client_open -- perform open request */ int client_open(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <addr>[:<port>]", tc->name); char *target = argv[0]; int ret; struct rpmem_obc *rpc; struct rpmem_req_attr req = REQ_ATTR_INIT; struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_INIT; struct rpmem_pool_attr pool_attr; struct rpmem_resp_attr ex_res = RESP_ATTR_INIT; struct rpmem_resp_attr res; rpc = rpmem_obc_init(); UT_ASSERTne(rpc, NULL); ret = rpmem_obc_connect(rpc, target); UT_ASSERTeq(ret, 0); ret = rpmem_obc_monitor(rpc, 1); UT_ASSERTeq(ret, 1); ret = rpmem_obc_open(rpc, &req, &res, &pool_attr); UT_ASSERTeq(ret, 0); UT_ASSERTeq(ex_res.port, res.port); UT_ASSERTeq(ex_res.rkey, res.rkey); UT_ASSERTeq(ex_res.raddr, res.raddr); UT_ASSERTeq(ex_res.persist_method, res.persist_method); UT_ASSERTeq(ex_res.nlanes, res.nlanes); UT_ASSERTeq(memcmp(&ex_pool_attr, &pool_attr, sizeof(ex_pool_attr)), 0); ret = rpmem_obc_monitor(rpc, 1); UT_ASSERTeq(ret, 1); ret = rpmem_obc_close(rpc); UT_ASSERTeq(ret, 0); ret = rpmem_obc_disconnect(rpc); UT_ASSERTeq(ret, 0); rpmem_obc_fini(rpc); return 1; }
/* * req_open -- process open request */ static int req_open(struct rpmemd_obc *obc, void *arg, const struct rpmem_req_attr *req) { struct rpmem_req_attr ex_req = REQ_ATTR_INIT; UT_ASSERTne(arg, NULL); UT_ASSERTeq(ex_req.provider, req->provider); UT_ASSERTeq(ex_req.pool_size, req->pool_size); UT_ASSERTeq(ex_req.nlanes, req->nlanes); UT_ASSERTeq(strcmp(ex_req.pool_desc, req->pool_desc), 0); struct req_arg *args = arg; return rpmemd_obc_open_resp(obc, 0, &args->resp, &args->pool_attr); }
/* * req_create -- process create request */ static int req_create(struct rpmemd_obc *obc, void *arg, const struct rpmem_req_attr *req, const struct rpmem_pool_attr *pool_attr) { struct rpmem_req_attr ex_req = REQ_ATTR_INIT; struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_INIT; UT_ASSERTne(arg, NULL); UT_ASSERTeq(ex_req.provider, req->provider); UT_ASSERTeq(ex_req.pool_size, req->pool_size); UT_ASSERTeq(ex_req.nlanes, req->nlanes); UT_ASSERTeq(strcmp(ex_req.pool_desc, req->pool_desc), 0); UT_ASSERTeq(memcmp(&ex_pool_attr, pool_attr, sizeof(ex_pool_attr)), 0); struct req_arg *args = arg; return rpmemd_obc_create_resp(obc, 0, &args->resp); }
static void * test_worker(void *arg) { /* check before pool is closed, then let main continue */ UT_ASSERTne(pmemobj_direct(thread_oid), NULL); pthread_mutex_lock(&lock1); cond1 = 1; pthread_cond_signal(&sync_cond1); pthread_mutex_unlock(&lock1); /* wait for main thread to free & close, then check */ pthread_mutex_lock(&lock2); while (!cond2) pthread_cond_wait(&sync_cond2, &lock2); pthread_mutex_unlock(&lock2); UT_ASSERTeq(pmemobj_direct(thread_oid), NULL); return NULL; }
/* * req_cb_close -- callback for close request operation * * This function behaves according to arguments specified via * struct req_cb_arg. */ static int req_cb_close(struct rpmemd_obc *obc, void *arg) { UT_ASSERTne(arg, NULL); struct req_cb_arg *args = arg; args->types |= (1 << RPMEM_MSG_TYPE_CLOSE); int ret = args->ret; if (args->resp) ret = rpmemd_obc_close_resp(obc, args->status); if (args->force_ret) ret = args->ret; return ret; }
/* * init_pool -- map local pool file or allocate memory region */ static void init_pool(struct pool_entry *pool, const char *pool_path, const char *pool_size) { int ret = util_parse_size(pool_size, &pool->size); UT_ASSERTeq(ret, 0); if (strcmp(pool_path, "mem") == 0) { pool->pool = MALLOC(pool->size); pool->is_mem = 1; } else { pool->pool = pmem_map_file(pool_path, pool->size, PMEM_FILE_CREATE | PMEM_FILE_EXCL, 0666, &pool->size, NULL); UT_ASSERTne(pool->pool, NULL); pool->is_mem = 0; unlink(pool_path); } }
/* * test_close -- test case for closing remote pool */ static int test_close(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_close <id>"); const char *id_str = argv[0]; int id = atoi(id_str); UT_ASSERT(id >= 0 && id < MAX_IDS); struct pool_entry *pool = &pools[id]; UT_ASSERTne(pool->rpp, NULL); int ret = rpmem_close(pool->rpp); UT_ASSERTeq(ret, 0); free_pool(pool); return 1; }
/* * get_provider -- get provider for given target */ static enum rpmem_provider get_provider(const char *target, const char *prov_name, unsigned *nlanes) { struct rpmem_fip_probe probe; int ret; int any = 0; if (strcmp(prov_name, "any") == 0) any = 1; ret = rpmem_fip_probe_get(target, &probe); UT_ASSERTeq(ret, 0); UT_ASSERT(rpmem_fip_probe_any(probe)); enum rpmem_provider provider; if (any) { /* return verbs in first place */ if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_VERBS)) provider = RPMEM_PROV_LIBFABRIC_VERBS; else if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_SOCKETS)) provider = RPMEM_PROV_LIBFABRIC_SOCKETS; else UT_ASSERT(0); } else { provider = rpmem_provider_from_str(prov_name); UT_ASSERTne(provider, RPMEM_PROV_UNKNOWN); UT_ASSERT(rpmem_fip_probe(probe, provider)); } /* * Decrease number of lanes for socket provider because * the test may be too long. */ if (provider == RPMEM_PROV_LIBFABRIC_SOCKETS) *nlanes = SOCK_NLANES; return provider; }
static void * thread_func(void *arg) { int start_idx = *(int *)arg; for (int repeat = 0; repeat < TEST_REPEAT_CREATE_POOLS; ++repeat) { for (int idx = 0; idx < npools; ++idx) { int pool_id = start_idx + idx; /* delete old pool with the same id if exist */ if (pools[pool_id] != NULL) { vmem_delete(pools[pool_id]); pools[pool_id] = NULL; } if (pool_id % 2 == 0) { /* for even pool_id, create in region */ pools[pool_id] = vmem_create_in_region( mem_pools[pool_id / 2], VMEM_MIN_POOL); if (pools[pool_id] == NULL) UT_FATAL("!vmem_create_in_region"); } else { /* for odd pool_id, create in file */ pools[pool_id] = vmem_create(dir, VMEM_MIN_POOL); if (pools[pool_id] == NULL) UT_FATAL("!vmem_create"); } void *test = vmem_malloc(pools[pool_id], sizeof(void *)); UT_ASSERTne(test, NULL); vmem_free(pools[pool_id], test); } } return NULL; }