int main(int argc, char *argv[]) { START(argc, argv, "obj_tx_mt"); if (argc != 2) UT_FATAL("usage: %s [file]", argv[0]); if ((pop = pmemobj_create(argv[1], "mt", PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create"); int i = 0; long ncpus = sysconf(_SC_NPROCESSORS_ONLN); pthread_t *threads = MALLOC(2 * ncpus * sizeof(threads[0])); for (int j = 0; j < ncpus; ++j) { PTHREAD_CREATE(&threads[i++], NULL, tx_alloc_free, NULL); PTHREAD_CREATE(&threads[i++], NULL, tx_snap, NULL); } while (i > 0) PTHREAD_JOIN(threads[--i], NULL); pmemobj_close(pop); FREE(threads); DONE(NULL); }
static void test_open(unsigned nthreads) { size_t len = strlen(Dir) + 50; /* reserve some space for pool id */ char *filename = MALLOC(sizeof(*filename) * len); /* create all the pools */ for (unsigned pool_id = 0; pool_id < Npools * nthreads; ++pool_id) { snprintf(filename, len, "%s" OS_DIR_SEP_STR "pool%d", Dir, pool_id); UT_OUT("%s", filename); Pools[pool_id] = pmemcto_create(filename, "test", PMEMCTO_MIN_POOL, 0600); UT_ASSERTne(Pools[pool_id], NULL); } for (unsigned pool_id = 0; pool_id < Npools * nthreads; ++pool_id) pmemcto_close(Pools[pool_id]); for (unsigned t = 0; t < nthreads; t++) { Pool_idx[t] = Npools * t; PTHREAD_CREATE(&Threads[t], NULL, thread_func_open, &Pool_idx[t]); } for (unsigned t = 0; t < nthreads; t++) PTHREAD_JOIN(&Threads[t], NULL); FREE(filename); }
int main(int argc, char *argv[]) { START(argc, argv, "vmem_multiple_pools"); if (argc < 4) UT_FATAL("usage: %s directory npools nthreads", argv[0]); dir = argv[1]; npools = atoi(argv[2]); int nthreads = atoi(argv[3]); UT_OUT("create %d pools in %d thread(s)", npools, nthreads); const unsigned mem_pools_size = (npools / 2 + npools % 2) * nthreads; mem_pools = MALLOC(mem_pools_size * sizeof(char *)); pools = CALLOC(npools * nthreads, sizeof(VMEM *)); os_thread_t *threads = CALLOC(nthreads, sizeof(os_thread_t)); UT_ASSERTne(threads, NULL); int *pool_idx = CALLOC(nthreads, sizeof(int)); UT_ASSERTne(pool_idx, NULL); for (unsigned pool_id = 0; pool_id < mem_pools_size; ++pool_id) { /* allocate memory for function vmem_create_in_region() */ mem_pools[pool_id] = MMAP_ANON_ALIGNED(VMEM_MIN_POOL, 4 << 20); } /* create and destroy pools multiple times */ for (int t = 0; t < nthreads; t++) { pool_idx[t] = npools * t; PTHREAD_CREATE(&threads[t], NULL, thread_func, &pool_idx[t]); } for (int t = 0; t < nthreads; t++) PTHREAD_JOIN(&threads[t], NULL); for (int pool_id = 0; pool_id < npools * nthreads; ++pool_id) { if (pools[pool_id] != NULL) { vmem_delete(pools[pool_id]); pools[pool_id] = NULL; } } FREE(mem_pools); FREE(pools); FREE(threads); FREE(pool_idx); DONE(NULL); }
static void run_mt_test(void *(*worker)(void *)) { pthread_t thread[NUM_THREADS]; int ver[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; ++i) { ver[i] = 10000 + i; PTHREAD_CREATE(&thread[i], NULL, worker, &ver[i]); } for (int i = 0; i < NUM_THREADS; ++i) { PTHREAD_JOIN(thread[i], NULL); } }
/* * test_persist -- test case for persist operation */ static int test_persist(const struct test_case *tc, int argc, char *argv[]) { if (argc < 4) UT_FATAL("usage: test_persist <id> <seed> <nthreads> <nops>"); int id = atoi(argv[0]); UT_ASSERT(id >= 0 && id < MAX_IDS); struct pool_entry *pool = &pools[id]; srand(atoi(argv[1])); int nthreads = atoi(argv[2]); int nops = atoi(argv[3]); uint8_t *buff = (uint8_t *)pool->pool; size_t buff_size = pool->size; for (size_t i = 0; i < buff_size; i++) buff[i] = rand(); pthread_t *threads = MALLOC(nthreads * sizeof(*threads)); struct thread_arg *args = MALLOC(nthreads * sizeof(*args)); size_t size_per_thread = buff_size / nthreads; UT_ASSERTeq(buff_size % nthreads, 0); for (int i = 0; i < nthreads; i++) { args[i].rpp = pool->rpp; args[i].nops = nops; args[i].lane = (unsigned)i; args[i].off = i * size_per_thread; size_t size_left = buff_size - size_per_thread * i; args[i].size = size_left < size_per_thread ? size_left : size_per_thread; PTHREAD_CREATE(&threads[i], NULL, persist_thread, &args[i]); } for (int i = 0; i < nthreads; i++) PTHREAD_JOIN(threads[i], NULL); FREE(args); FREE(threads); return 4; }
int main(int argc, char *argv[]) { START(argc, argv, "pmem_is_pmem"); if (argc < 2 || argc > 3) UT_FATAL("usage: %s file [env]", argv[0]); if (argc == 3) UT_ASSERTeq(setenv("PMEM_IS_PMEM_FORCE", argv[2], 1), 0); int fd = OPEN(argv[1], O_RDWR); ut_util_stat_t stbuf; FSTAT(fd, &stbuf); Size = stbuf.st_size; Addr = MMAP(0, stbuf.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); CLOSE(fd); pthread_t threads[NTHREAD]; int ret[NTHREAD]; /* kick off NTHREAD threads */ for (int i = 0; i < NTHREAD; i++) PTHREAD_CREATE(&threads[i], NULL, worker, &ret[i]); /* wait for all the threads to complete */ for (int i = 0; i < NTHREAD; i++) PTHREAD_JOIN(threads[i], NULL); /* verify that all the threads return the same value */ for (int i = 1; i < NTHREAD; i++) UT_ASSERTeq(ret[0], ret[i]); UT_OUT("%d", ret[0]); UT_ASSERTeq(unsetenv("PMEM_IS_PMEM_FORCE"), 0); UT_OUT("%d", pmem_is_pmem(Addr, Size)); DONE(NULL); }
int main(int argc, char *argv[]) { START(argc, argv, "blk_rw_mt"); if (argc != 6) FATAL("usage: %s bsize file seed nthread nops", argv[0]); Bsize = strtoul(argv[1], NULL, 0); const char *path = argv[2]; if ((Handle = pmemblk_pool_open(path, Bsize)) == NULL) FATAL("!%s: pmemblk_pool_open", path); if (Nblock == 0) Nblock = pmemblk_nblock(Handle); Seed = strtoul(argv[3], NULL, 0); Nthread = strtoul(argv[4], NULL, 0); Nops = strtoul(argv[5], NULL, 0); OUT("%s block size %zu usable blocks %zu", argv[1], Bsize, Nblock); pthread_t threads[Nthread]; /* kick off nthread threads */ for (int i = 0; i < Nthread; i++) PTHREAD_CREATE(&threads[i], NULL, worker, (void *)(long)i); /* wait for all the threads to complete */ for (int i = 0; i < Nthread; i++) PTHREAD_JOIN(threads[i], NULL); pmemblk_pool_close(Handle); /* XXX not ready to pass this part of the test yet */ int result = pmemblk_pool_check(path); if (result < 0) OUT("!%s: pmemblk_pool_check", path); else if (result == 0) OUT("%s: pmemblk_pool_check: not consistent", path); DONE(NULL); }
static void test_create(unsigned nthreads) { /* create and destroy pools multiple times */ for (unsigned t = 0; t < nthreads; t++) { Pool_idx[t] = Npools * t; PTHREAD_CREATE(&Threads[t], NULL, thread_func_create, &Pool_idx[t]); } for (unsigned t = 0; t < nthreads; t++) PTHREAD_JOIN(&Threads[t], NULL); for (unsigned i = 0; i < Npools * nthreads; ++i) { if (Pools[i] != NULL) { pmemcto_close(Pools[i]); Pools[i] = NULL; } } }
int main(int argc, char* argv[]) { float result; int I, J; if(argc < 2) { fprintf(stderr,"Supply data set file.\n"); return(1); } signal(SIGINT, Exit); signal(SIGTERM, Exit); #ifdef SW init_pipe_handler(); PTHREAD_DECL(bestFit); PTHREAD_DECL(qrsDet); PTHREAD_CREATE(bestFit); PTHREAD_CREATE(qrsDet); #endif PTHREAD_DECL(Sender); PTHREAD_DECL(Receiver); PTHREAD_CREATE_WITH_ARG(Sender, argv[1]); PTHREAD_CREATE(Receiver); PTHREAD_JOIN(Sender); PTHREAD_CANCEL(Receiver); #ifdef SW PTHREAD_CANCEL(qrsDet); PTHREAD_CANCEL(bestFit); close_pipe_handler(); return(0); #endif }
int main(int argc, char *argv[]) { if (argc == 4 && argv[3][0] == 't') { exit(0); } START(argc, argv, "vmmalloc_fork"); if (argc < 4) FATAL("usage: %s [c|e] <nfork> <nthread>", argv[0]); int nfork = atoi(argv[2]); int nthread = atoi(argv[3]); ASSERT(nfork >= 0); ASSERT(nthread >= 0); pthread_t thread[nthread]; int first_child = 0; int **bufs = malloc(nfork * NBUFS * sizeof (void *)); ASSERTne(bufs, NULL); size_t *sizes = malloc(nfork * NBUFS * sizeof (size_t)); ASSERTne(sizes, NULL); int *pids1 = malloc(nfork * sizeof (pid_t)); ASSERTne(pids1, NULL); int *pids2 = malloc(nfork * sizeof (pid_t)); ASSERTne(pids2, NULL); for (int i = 0; i < nfork; i++) { for (int j = 0; j < NBUFS; j++) { int idx = i * NBUFS + j; sizes[idx] = sizeof (int) + 64 * (rand() % 100); bufs[idx] = malloc(sizes[idx]); ASSERTne(bufs[idx], NULL); ASSERT(malloc_usable_size(bufs[idx]) >= sizes[idx]); } for (int t = 0; t < nthread; ++t) { PTHREAD_CREATE(&thread[t], NULL, do_test, NULL); } pids1[i] = fork(); if (pids1[i] == -1) OUT("fork failed"); ASSERTne(pids1[i], -1); if (pids1[i] == 0 && argv[1][0] == 'e' && i == nfork - 1) { int fd = open("/dev/null", O_RDWR, S_IWUSR); int res = dup2(fd, 1); ASSERTne(res, -1); close(fd); execl("/bin/echo", "/bin/echo", "Hello world!", NULL); } pids2[i] = getpid(); for (int j = 0; j < NBUFS; j++) { *bufs[i * NBUFS + j] = ((unsigned)pids2[i] << 16) + j; } if (pids1[i]) { /* parent */ for (int t = 0; t < nthread; ++t) { PTHREAD_JOIN(thread[t], NULL); } } else { /* child */ first_child = i + 1; } for (int ii = 0; ii < i; ii++) { for (int j = 0; j < NBUFS; j++) { ASSERTeq(*bufs[ii * NBUFS + j], ((unsigned)pids2[ii] << 16) + j); } } } for (int i = first_child; i < nfork; i++) { int status; waitpid(pids1[i], &status, 0); ASSERT(WIFEXITED(status)); ASSERTeq(WEXITSTATUS(status), 0); } free(pids1); free(pids2); for (int i = 0; i < nfork; i++) { for (int j = 0; j < NBUFS; j++) { int idx = i * NBUFS + j; ASSERT(malloc_usable_size(bufs[idx]) >= sizes[idx]); free(bufs[idx]); } } free(bufs); if (first_child == 0) { DONE(NULL); } }
/** * Make sure the DB is current and then go into FAM-monitored mode * updating the DB all the time in the background. Exits after a * signal (i.e. SIGHUP/SIGINT) is received. */ static int build(const char * libraries, const char * dbName, size_t mem_limit, const char * log, int argc, char * argv[]) { int i; unsigned int ret; DIC cls; char * ename; FILE * logfile; PTHREAD_T workerThread; void * unused; cls.argc = argc; cls.argv = argv; cls.deferredCount = 0; cls.deferredTruncations = NULL; logfile = NULL; if (log != NULL) { logfile = fopen(log, "w+"); if (logfile == NULL) my_log(stderr, DOODLE_LOG_CRITICAL, _("Could not open '%s' for logging: %s.\n"), log, strerror(errno)); } cls.logContext = logfile; cls.log = &my_log; if (dbName == NULL) { my_log(logfile, DOODLE_LOG_CRITICAL, _("No database specified. Aborting.\n")); return -1; } for (i=strlen(dbName);i>=0;i--) { if (dbName[i] == ':') { my_log(logfile, DOODLE_LOG_CRITICAL, _("'%s' is an invalid database filename (has a colon) for building database (option '%s').\n"), dbName, "-b"); return -1; } } ename = expandFileName(dbName); if (ename == NULL) return -1; cls.ename = ename; cls.tree = DOODLE_tree_create(&my_log, logfile, ename); cls.treePresent = 1; if (cls.tree == NULL) return -1; if (mem_limit != 0) DOODLE_tree_set_memory_limit(cls.tree, mem_limit); cls.elist = forkExtractor(do_default, libraries, &my_log, logfile); if (cls.elist == NULL) { DOODLE_tree_destroy(cls.tree); return -1; } if (0 != FAMOpen2(&cls.fc, "doodled")) { my_log(logfile, DOODLE_LOG_CRITICAL, _("Failed to connect to fam. Aborting.\n")); DOODLE_tree_destroy(cls.tree); return -1; } cls.fr = NULL; cls.frPos = 0; cls.frSize = 0; GROW(cls.fr, cls.frSize, 128); cls.frNames = NULL; ret = 0; GROW(cls.frNames, ret, 128); ret = 0; MUTEX_CREATE(&cls.lock); if (0 != PTHREAD_CREATE(&workerThread, &worker, &cls, 64 * 1024)) { my_log(logfile, DOODLE_LOG_CRITICAL, _("Failed to create worker thread: %s"), strerror(errno)); ret = -1; } else { wait_for_shutdown(); cls.continueRunning = 0; SEMAPHORE_UP(cls.signal); PTHREAD_JOIN(&workerThread, &unused); } MUTEX_DESTROY(&cls.lock); my_log(logfile, DOODLE_LOG_VERBOSE, _("doodled is shutting down.\n")); if (cls.frPos == 0) { my_log(logfile, DOODLE_LOG_CRITICAL, _("No files exist that doodled would monitor for changes. Exiting.\n")); } for (i=0;i<cls.frSize;i++) { if (cls.frNames[i] != NULL) { my_log(logfile, DOODLE_LOG_VERBOSE, _("Cancelling fam monitor '%s'.\n"), cls.frNames[i]); free(cls.frNames[i]); } } for (i=cls.deferredCount-1;i>=0;i--) free(cls.deferredTruncations[i]); GROW(cls.deferredTruncations, cls.deferredCount, 0); i = cls.frSize; GROW(cls.fr, cls.frSize, 0); cls.frSize = i; GROW(cls.frNames, cls.frSize, 0); my_log(logfile, DOODLE_LOG_VERBOSE, _("Unloading libextractor plugins.\n")); joinExtractor(cls.elist); free(ename); if (logfile != NULL) fclose(logfile); return ret; }
/** * Main worker thread. Register FAM events and process. */ static void * worker(void * arg) { DIC * cls = arg; int i; int more; int wasMore; int ret; void * unused; char * fn; PTHREAD_T helperThread; cls->log(cls->logContext, DOODLE_LOG_VERY_VERBOSE, _("Main worker thread created.\n")); cls->eventCount = 0; cls->continueRunning = 1; cls->events = NULL; cls->signal = SEMAPHORE_NEW(0); if (0 != PTHREAD_CREATE(&helperThread, &processEvents, cls, 64 * 1024)) { cls->log(cls->logContext, DOODLE_LOG_CRITICAL, _("Failed to spawn event processing thread.\n")); run_shutdown(0); return NULL; } cls->log(cls->logContext, DOODLE_LOG_VERBOSE, _("Registering with FAM for file system events.\n")); for (i=0;i<cls->argc;i++) { char * exp; cls->log(cls->logContext, DOODLE_LOG_VERY_VERBOSE, _("Indexing '%s'\n"), cls->argv[i]); exp = expandFileName(cls->argv[i]); if (-1 == do_index(exp, cls)) { ret = -1; free(exp); break; } free(exp); } DOODLE_tree_destroy(cls->tree); cls->treePresent = 0; cls->tree = NULL; cls->log(cls->logContext, DOODLE_LOG_VERBOSE, _("doodled startup complete. Now waiting for FAM events.\n")); wasMore = 0; while ( (cls->continueRunning) && (0 == testShutdown()) ) { SEMAPHORE_DOWN(cls->signal); cls->log(cls->logContext, DOODLE_LOG_INSANELY_VERBOSE, "Received signal to process fam event.\n"); MUTEX_LOCK(&cls->lock); if (cls->eventCount > 0) { fn = cls->events[cls->eventCount-1]; GROW(cls->events, cls->eventCount, cls->eventCount-1); more = cls->eventCount > 0; cls->log(cls->logContext, DOODLE_LOG_INSANELY_VERBOSE, "Processing fam event '%s'.\n", fn); } else { fn = NULL; more = 0; } if (! wasMore) { cls->treePresent++; if (cls->treePresent == 1) cls->tree = DOODLE_tree_create((DOODLE_Logger) cls->log, cls->logContext, cls->ename); } MUTEX_UNLOCK(&cls->lock); if (fn != NULL) { do_index(fn, cls); free(fn); } MUTEX_LOCK(&cls->lock); if (! more) { cls->treePresent--; if (cls->treePresent == 0) DOODLE_tree_destroy(cls->tree); } MUTEX_UNLOCK(&cls->lock); wasMore = more; } /* forever (until signal) */ cls->continueRunning = 0; if (0 != FAMClose(&cls->fc)) { cls->log(cls->logContext, DOODLE_LOG_CRITICAL, _("Error disconnecting from fam.\n")); } PTHREAD_KILL(&helperThread, SIGTERM); PTHREAD_JOIN(&helperThread, &unused); SEMAPHORE_FREE(cls->signal); if (cls->treePresent > 0) DOODLE_tree_destroy(cls->tree); return NULL; }
int main(int argc, char *argv[]) { START(argc, argv, "obj_direct"); if (argc != 3) UT_FATAL("usage: %s [directory] [# of pools]", argv[0]); int npools = atoi(argv[2]); const char *dir = argv[1]; int r; pthread_mutex_init(&lock1, NULL); pthread_mutex_init(&lock2, NULL); pthread_cond_init(&sync_cond1, NULL); pthread_cond_init(&sync_cond2, NULL); cond1 = cond2 = 0; PMEMobjpool **pops = MALLOC(npools * sizeof(PMEMobjpool *)); UT_ASSERTne(pops, NULL); char path[MAX_PATH_LEN]; for (int i = 0; i < npools; ++i) { snprintf(path, MAX_PATH_LEN, "%s/testfile%d", dir, i); pops[i] = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR); if (pops[i] == NULL) UT_FATAL("!pmemobj_create"); } PMEMoid *oids = MALLOC(npools * sizeof(PMEMoid)); UT_ASSERTne(oids, NULL); PMEMoid *tmpoids = MALLOC(npools * sizeof(PMEMoid)); UT_ASSERTne(tmpoids, NULL); oids[0] = OID_NULL; UT_ASSERTeq(pmemobj_direct(oids[0]), NULL); for (int i = 0; i < npools; ++i) { oids[i] = (PMEMoid) {pops[i]->uuid_lo, 0}; UT_ASSERTeq(pmemobj_direct(oids[i]), NULL); uint64_t off = pops[i]->heap_offset; oids[i] = (PMEMoid) {pops[i]->uuid_lo, off}; UT_ASSERTeq((char *)pmemobj_direct(oids[i]) - off, (char *)pops[i]); r = pmemobj_alloc(pops[i], &tmpoids[i], 100, 1, NULL, NULL); UT_ASSERTeq(r, 0); } r = pmemobj_alloc(pops[0], &thread_oid, 100, 2, NULL, NULL); UT_ASSERTeq(r, 0); UT_ASSERTne(pmemobj_direct(thread_oid), NULL); pthread_t t; PTHREAD_CREATE(&t, NULL, test_worker, NULL); /* wait for the worker thread to perform the first check */ pthread_mutex_lock(&lock1); while (!cond1) pthread_cond_wait(&sync_cond1, &lock1); pthread_mutex_unlock(&lock1); for (int i = 0; i < npools; ++i) { UT_ASSERTne(pmemobj_direct(tmpoids[i]), NULL); pmemobj_free(&tmpoids[i]); UT_ASSERTeq(pmemobj_direct(tmpoids[i]), NULL); pmemobj_close(pops[i]); UT_ASSERTeq(pmemobj_direct(oids[i]), NULL); } /* signal the worker that we're free and closed */ pthread_mutex_lock(&lock2); cond2 = 1; pthread_cond_signal(&sync_cond2); pthread_mutex_unlock(&lock2); PTHREAD_JOIN(t, NULL); pthread_cond_destroy(&sync_cond1); pthread_cond_destroy(&sync_cond2); pthread_mutex_destroy(&lock1); pthread_mutex_destroy(&lock2); FREE(pops); FREE(tmpoids); FREE(oids); DONE(NULL); }
int main(int argc, char *argv[]) { START(argc, argv, "obj_sync"); util_init(); if (argc < 4) FATAL_USAGE(); worker writer; worker checker; char test_type = argv[1][0]; switch (test_type) { case 'm': writer = mutex_write_worker; checker = mutex_check_worker; break; case 'r': writer = rwlock_write_worker; checker = rwlock_check_worker; break; case 'c': writer = cond_write_worker; checker = cond_check_worker; break; case 't': writer = timed_write_worker; checker = timed_check_worker; break; default: FATAL_USAGE(); } unsigned long num_threads = strtoul(argv[2], NULL, 10); if (num_threads > MAX_THREAD_NUM) UT_FATAL("Do not use more than %d threads.\n", MAX_THREAD_NUM); unsigned long opens = strtoul(argv[3], NULL, 10); if (opens > MAX_OPENS) UT_FATAL("Do not use more than %d runs.\n", MAX_OPENS); os_thread_t *write_threads = (os_thread_t *)MALLOC(num_threads * sizeof(os_thread_t)); os_thread_t *check_threads = (os_thread_t *)MALLOC(num_threads * sizeof(os_thread_t)); /* first pool open */ mock_open_pool(&Mock_pop); Mock_pop.p_ops.persist = obj_sync_persist; Mock_pop.p_ops.base = &Mock_pop; Test_obj = (struct mock_obj *)MALLOC(sizeof(struct mock_obj)); /* zero-initialize the test object */ pmemobj_mutex_zero(&Mock_pop, &Test_obj->mutex); pmemobj_mutex_zero(&Mock_pop, &Test_obj->mutex_locked); pmemobj_cond_zero(&Mock_pop, &Test_obj->cond); pmemobj_rwlock_zero(&Mock_pop, &Test_obj->rwlock); Test_obj->check_data = 0; memset(&Test_obj->data, 0, DATA_SIZE); for (unsigned long run = 0; run < opens; run++) { if (test_type == 't') { pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex_locked); } for (unsigned i = 0; i < num_threads; i++) { PTHREAD_CREATE(&write_threads[i], NULL, writer, (void *)(uintptr_t)i); PTHREAD_CREATE(&check_threads[i], NULL, checker, (void *)(uintptr_t)i); } for (unsigned i = 0; i < num_threads; i++) { PTHREAD_JOIN(&write_threads[i], NULL); PTHREAD_JOIN(&check_threads[i], NULL); } if (test_type == 't') { pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex_locked); } /* up the run_id counter and cleanup */ mock_open_pool(&Mock_pop); cleanup(test_type); } FREE(check_threads); FREE(write_threads); FREE(Test_obj); DONE(NULL); }
/* * client_init -- test case for client initialization */ int client_init(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; set_rpmem_cmd("server_init %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); unsigned nlanes; enum rpmem_provider provider = get_provider(info->node, prov_name, &nlanes); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, NLANES, provider, &resp); struct rpmem_fip_attr attr = { .provider = provider, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes); UT_ASSERTne(fip, NULL); client_close_begin(client); client_close_end(client); rpmem_fip_fini(fip); rpmem_target_free(info); return 3; } /* * server_init -- test case for server initialization */ int server_init(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <persist method>", tc->name); enum rpmem_persist_method persist_method = get_persist_method(argv[0]); unsigned nlanes; enum rpmem_provider provider; char *addr = NULL; server_exchange_begin(&nlanes, &provider, &addr); UT_ASSERTne(addr, NULL); struct rpmemd_fip_attr attr = { .addr = rpool, .size = POOL_SIZE, .nlanes = nlanes, .provider = provider, .persist_method = persist_method, .persist = pmem_persist, .nthreads = NTHREADS, }; struct rpmem_resp_attr resp; struct rpmemd_fip *fip; enum rpmem_err err; fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err); UT_ASSERTne(fip, NULL); server_exchange_end(resp); server_close_begin(); server_close_end(); rpmemd_fip_fini(fip); FREE(addr); return 1; } /* * client_connect -- test case for establishing connection - client side */ int client_connect(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; set_rpmem_cmd("server_connect %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; int ret; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); unsigned nlanes; enum rpmem_provider provider = get_provider(info->node, prov_name, &nlanes); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, NLANES, provider, &resp); struct rpmem_fip_attr attr = { .provider = provider, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes); UT_ASSERTne(fip, NULL); ret = rpmem_fip_connect(fip); UT_ASSERTeq(ret, 0); client_close_begin(client); ret = rpmem_fip_close(fip); UT_ASSERTeq(ret, 0); client_close_end(client); rpmem_fip_fini(fip); rpmem_target_free(info); return 3; } /* * server_connect -- test case for establishing connection - server side */ int server_connect(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <persist method>", tc->name); enum rpmem_persist_method persist_method = get_persist_method(argv[0]); unsigned nlanes; enum rpmem_provider provider; char *addr = NULL; server_exchange_begin(&nlanes, &provider, &addr); UT_ASSERTne(addr, NULL); struct rpmemd_fip_attr attr = { .addr = rpool, .size = POOL_SIZE, .nlanes = nlanes, .provider = provider, .persist_method = persist_method, .persist = pmem_persist, .nthreads = NTHREADS, }; int ret; struct rpmem_resp_attr resp; struct rpmemd_fip *fip; enum rpmem_err err; fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err); UT_ASSERTne(fip, NULL); server_exchange_end(resp); ret = rpmemd_fip_accept(fip, -1); UT_ASSERTeq(ret, 0); server_close_begin(); server_close_end(); ret = rpmemd_fip_wait_close(fip, -1); UT_ASSERTeq(ret, 0); ret = rpmemd_fip_close(fip); UT_ASSERTeq(ret, 0); rpmemd_fip_fini(fip); FREE(addr); return 1; } /* * server_process -- test case for processing data on server side */ int server_process(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <persist method>", tc->name); enum rpmem_persist_method persist_method = get_persist_method(argv[0]); set_pool_data(rpool, 1); unsigned nlanes; enum rpmem_provider provider; char *addr = NULL; server_exchange_begin(&nlanes, &provider, &addr); UT_ASSERTne(addr, NULL); struct rpmemd_fip_attr attr = { .addr = rpool, .size = POOL_SIZE, .nlanes = nlanes, .provider = provider, .persist_method = persist_method, .persist = pmem_persist, .nthreads = NTHREADS, }; int ret; struct rpmem_resp_attr resp; struct rpmemd_fip *fip; enum rpmem_err err; fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err); UT_ASSERTne(fip, NULL); server_exchange_end(resp); ret = rpmemd_fip_accept(fip, -1); UT_ASSERTeq(ret, 0); ret = rpmemd_fip_process_start(fip); server_close_begin(); ret = rpmemd_fip_process_stop(fip); UT_ASSERTeq(ret, 0); server_close_end(); ret = rpmemd_fip_wait_close(fip, -1); UT_ASSERTeq(ret, 0); ret = rpmemd_fip_close(fip); UT_ASSERTeq(ret, 0); rpmemd_fip_fini(fip); FREE(addr); return 1; } /* * client_persist -- test case for single-threaded persist operation */ int client_persist(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; set_rpmem_cmd("server_process %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); int ret; set_pool_data(lpool, 1); set_pool_data(rpool, 1); unsigned nlanes; enum rpmem_provider provider = get_provider(info->node, prov_name, &nlanes); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, NLANES, provider, &resp); struct rpmem_fip_attr attr = { .provider = provider, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes); UT_ASSERTne(fip, NULL); ret = rpmem_fip_connect(fip); UT_ASSERTeq(ret, 0); ret = rpmem_fip_process_start(fip); UT_ASSERTeq(ret, 0); struct persist_arg arg = { .fip = fip, .lane = 0, }; client_persist_thread(&arg); ret = rpmem_fip_read(fip, rpool, POOL_SIZE, 0); UT_ASSERTeq(ret, 0); ret = rpmem_fip_process_stop(fip); UT_ASSERTeq(ret, 0); client_close_begin(client); ret = rpmem_fip_close(fip); UT_ASSERTeq(ret, 0); client_close_end(client); rpmem_fip_fini(fip); ret = memcmp(rpool, lpool, POOL_SIZE); UT_ASSERTeq(ret, 0); rpmem_target_free(info); return 3; } /* * client_persist_mt -- test case for multi-threaded persist operation */ int client_persist_mt(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; set_rpmem_cmd("server_process %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; int ret; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); set_pool_data(lpool, 1); set_pool_data(rpool, 1); unsigned nlanes; enum rpmem_provider provider = get_provider(info->node, prov_name, &nlanes); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, NLANES, provider, &resp); struct rpmem_fip_attr attr = { .provider = provider, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes); UT_ASSERTne(fip, NULL); ret = rpmem_fip_connect(fip); UT_ASSERTeq(ret, 0); ret = rpmem_fip_process_start(fip); UT_ASSERTeq(ret, 0); pthread_t *persist_thread = MALLOC(resp.nlanes * sizeof(pthread_t)); struct persist_arg *args = MALLOC(resp.nlanes * sizeof(struct persist_arg)); for (unsigned i = 0; i < nlanes; i++) { args[i].fip = fip; args[i].lane = i; PTHREAD_CREATE(&persist_thread[i], NULL, client_persist_thread, &args[i]); } for (unsigned i = 0; i < nlanes; i++) PTHREAD_JOIN(persist_thread[i], NULL); ret = rpmem_fip_read(fip, rpool, POOL_SIZE, 0); UT_ASSERTeq(ret, 0); ret = rpmem_fip_process_stop(fip); UT_ASSERTeq(ret, 0); client_close_begin(client); ret = rpmem_fip_close(fip); UT_ASSERTeq(ret, 0); client_close_end(client); rpmem_fip_fini(fip); FREE(persist_thread); FREE(args); ret = memcmp(rpool, lpool, POOL_SIZE); UT_ASSERTeq(ret, 0); rpmem_target_free(info); return 3; } /* * client_read -- test case for read operation */ int client_read(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; set_rpmem_cmd("server_process %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; int ret; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); set_pool_data(lpool, 0); set_pool_data(rpool, 1); unsigned nlanes; enum rpmem_provider provider = get_provider(info->node, prov_name, &nlanes); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, NLANES, provider, &resp); struct rpmem_fip_attr attr = { .provider = provider, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes); UT_ASSERTne(fip, NULL); ret = rpmem_fip_connect(fip); UT_ASSERTeq(ret, 0); ret = rpmem_fip_process_start(fip); UT_ASSERTeq(ret, 0); ret = rpmem_fip_read(fip, lpool, POOL_SIZE, 0); UT_ASSERTeq(ret, 0); ret = rpmem_fip_process_stop(fip); UT_ASSERTeq(ret, 0); client_close_begin(client); ret = rpmem_fip_close(fip); UT_ASSERTeq(ret, 0); client_close_end(client); rpmem_fip_fini(fip); ret = memcmp(rpool, lpool, POOL_SIZE); UT_ASSERTeq(ret, 0); rpmem_target_free(info); return 3; } /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(client_init), TEST_CASE(server_init), TEST_CASE(client_connect), TEST_CASE(server_connect), TEST_CASE(client_persist), TEST_CASE(client_persist_mt), TEST_CASE(server_process), TEST_CASE(client_read), }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char *argv[]) { /* workaround for left-opened files by libfabric */ rpmem_fip_probe_get("localhost", NULL); START(argc, argv, "rpmem_obc"); common_init("rpmem_fip", "RPMEM_LOG_LEVEL", "RPMEM_LOG_FILE", 0, 0); rpmem_util_cmds_init(); rpmemd_log_init("rpmemd", getenv("RPMEMD_LOG_FILE"), 0); rpmemd_log_level = rpmemd_log_level_from_str( getenv("RPMEMD_LOG_LEVEL")); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); common_fini(); rpmemd_log_close(); rpmem_util_cmds_fini(); DONE(NULL); }