void cli_disconnect(cli_info_t *cli) { if( CLI_DISCONN != cli->next_state[cli->state] ){ TEST_ERROR(("rank %d: bad client next state: expect %d have %d!", cli_rank(cli), CLI_DISCONN, cli->next_state[cli->state])); test_abort = true; } if( 0 > cli->sd ){ TEST_ERROR(("Bad sd = %d of rank = %d ", cli->sd, cli_rank(cli))); test_abort = true; } else { TEST_VERBOSE(("close sd = %d for rank = %d", cli->sd, cli_rank(cli))); close(cli->sd); cli->sd = -1; } if( NULL == cli->ev ){ TEST_ERROR(("Bad ev = NULL of rank = %d ", cli->sd, cli_rank(cli))); test_abort = true; } else { TEST_VERBOSE(("remove event of rank %d from event queue", cli_rank(cli))); event_del(cli->ev); event_free(cli->ev); cli->ev = NULL; } TEST_VERBOSE(("Destruct modex list for the rank %d", cli_rank(cli))); PMIX_LIST_DESTRUCT(&(cli->modex)); cli->state = CLI_DISCONN; }
int test_connect_disconnect(char *my_nspace, int my_rank) { int rc; pmix_proc_t proc; char nspace[PMIX_MAX_NSLEN+1]; pmix_rank_t newrank; cd_cbdata cbdata; (void)strncpy(proc.nspace, my_nspace, PMIX_MAX_NSLEN); proc.rank = PMIX_RANK_WILDCARD; rc = PMIx_Connect(&proc, 1, NULL, 0, nspace, &newrank); if (PMIX_SUCCESS != rc) { TEST_ERROR(("%s:%d: Connect blocking test failed.", my_nspace, my_rank)); return PMIX_ERROR; } TEST_VERBOSE(("%s:%d: Connect blocking test succeded to nspace %s.", my_nspace, my_rank, nspace)); rc = PMIx_Disconnect(nspace, NULL, 0); if (PMIX_SUCCESS != rc) { TEST_ERROR(("%s:%d: Disconnect blocking test failed.", my_nspace, my_rank)); return PMIX_ERROR; } TEST_VERBOSE(("%s:%d: Disconnect blocking test succeded.", my_nspace, my_rank)); cbdata.in_progress = 1; rc = PMIx_Connect_nb(&proc, 1, NULL, 0, cnct_cb, &cbdata); if (PMIX_SUCCESS == rc) { PMIX_WAIT_FOR_COMPLETION(cbdata.in_progress); rc = cbdata.status; } if (PMIX_SUCCESS != rc) { TEST_ERROR(("%s:%d: Connect non-blocking test failed.", my_nspace, my_rank)); return PMIX_ERROR; } TEST_VERBOSE(("%s:%d: Connect non-blocking test succeded.", my_nspace, my_rank)); cbdata.in_progress = 1; rc = PMIx_Disconnect_nb(nspace, NULL, 0, cd_cb, &cbdata); if (PMIX_SUCCESS == rc) { PMIX_WAIT_FOR_COMPLETION(cbdata.in_progress); rc = cbdata.status; } if (PMIX_SUCCESS != rc) { TEST_ERROR(("%s:%d: Disconnect non-blocking test failed.", my_nspace, my_rank)); return PMIX_ERROR; } TEST_VERBOSE(("%s:%d: Disconnect non-blocking test succeded.", my_nspace, my_rank)); return PMIX_SUCCESS; }
void cli_wait_all(double timeout) { struct timeval tv; double start_time, cur_time; gettimeofday(&tv, NULL); start_time = tv.tv_sec + 1E-6*tv.tv_usec; cur_time = start_time; //TEST_VERBOSE(("Wait for all children to terminate")) // Wait for all children to cleanup after the test. while( !test_terminated() && ( timeout >= (cur_time - start_time) ) ){ struct timespec ts; int status, i; pid_t pid; while( 0 < (pid = waitpid(-1, &status, WNOHANG) ) ){ TEST_VERBOSE(("waitpid = %d", pid)); for(i=0; i < cli_info_cnt; i++){ if( cli_info[i].pid == pid ){ TEST_VERBOSE(("the child with pid = %d has rank = %d\n" "\t\texited = %d, signalled = %d", pid, i, WIFEXITED(status), WIFSIGNALED(status) )); if( WIFEXITED(status) || WIFSIGNALED(status) ){ cli_cleanup(&cli_info[i]); } } } } if( pid < 0 ){ if( errno == ECHILD ){ TEST_VERBOSE(("No more children to wait. Happens on the last cli_wait_all call " "which is used to ensure that all children terminated.\n")); break; } else { TEST_ERROR(("waitpid(): %d : %s", errno, strerror(errno))); exit(0); } } ts.tv_sec = 0; ts.tv_nsec = 100000; nanosleep(&ts, NULL); // calculate current timestamp gettimeofday(&tv, NULL); cur_time = tv.tv_sec + 1E-6*tv.tv_usec; } }
int test_spawn(char *my_nspace, int my_rank) { int rc; rc = test_spawn_common(my_nspace, my_rank, 1); if (PMIX_SUCCESS != rc) { TEST_ERROR(("%s:%d: Spawn blocking test failed.", my_nspace, my_rank)); return PMIX_ERROR; } TEST_VERBOSE(("%s:%d: Spawn blocking test succeded.", my_nspace, my_rank)); rc = test_spawn_common(my_nspace, my_rank, 0); if (PMIX_SUCCESS != rc) { TEST_ERROR(("%s:%d: Spawn non-blocking test failed.", my_nspace, my_rank)); return PMIX_ERROR; } TEST_VERBOSE(("%s:%d: Spawn non-blocking test succeded.", my_nspace, my_rank)); return PMIX_SUCCESS; }
int deregevents_fn (const pmix_info_t info[], size_t ninfo, pmix_op_cbfunc_t cbfunc, void *cbdata) { TEST_VERBOSE ((" pmix host server deregevents_fn called ")); if (NULL != cbfunc) { cbfunc(PMIX_SUCCESS, cbdata); } return PMIX_SUCCESS; }
void cli_kill_all(void) { int i; for(i = 0; i < cli_info_cnt; i++){ if( CLI_UNINIT == cli_info[i].state ){ TEST_ERROR(("Skip rank %d as it wasn't ever initialized (shouldn't happe)", i)); continue; } else if( CLI_TERM <= cli_info[i].state ){ TEST_VERBOSE(("Skip rank %d as it was already terminated.", i)); continue; } TEST_VERBOSE(("Kill rank %d (pid = %d).", i, cli_info[i].pid)); kill(cli_info[i].pid, SIGKILL); cli_cleanup(&cli_info[i]); } }
static void errhandler_reg_callbk1 (pmix_status_t status, size_t errhandler_ref, void *cbdata) { size_t *ref = (size_t*) cbdata; *ref = errhandler_ref; TEST_VERBOSE(("PMIX client ERRHANDLER REGISTRATION CALLED WITH STATUS %d, ref=%lu", status, *ref, (unsigned long)errhandler_ref)); }
int abort_fn(const pmix_proc_t *proc, void *server_object, int status, const char msg[], pmix_proc_t procs[], size_t nprocs, pmix_op_cbfunc_t cbfunc, void *cbdata) { if (NULL != cbfunc) { cbfunc(PMIX_SUCCESS, cbdata); } TEST_VERBOSE(("Abort is called with status = %d, msg = %s", status, msg)); test_abort = true; return PMIX_SUCCESS; }
int test_error(char *my_nspace, int my_rank, test_params params) { size_t errhandler_refs[MAX_ERR_HANDLERS]; struct timespec ts; pmix_status_t status; pmix_proc_t source; TEST_VERBOSE(("test-error: running error handling test cases")); /* register specific client error handlers and test their invocation * by trigerring events from server side*/ status = PMIX_ERR_TIMEOUT; PMIx_Register_event_handler(&status, 1, NULL, 0, timeout_errhandler, errhandler_reg_callbk1, &errhandler_refs[0]); /* reg a handler for comm errors */ status = PMIX_ERR_LOST_PEER_CONNECTION; PMIx_Register_event_handler(&status, 1, NULL, 0, comfail_errhandler, errhandler_reg_callbk1, &errhandler_refs[1]); /* inject error from client */ done = false; (void)strncpy(source.nspace, my_nspace, PMIX_MAX_NSLEN); source.rank = my_rank; /* change error value to test other error notifications */ PMIx_Notify_event(TEST_NOTIFY, &source, PMIX_RANGE_NAMESPACE, NULL, 0, op1_callbk, NULL); while(!done) { ts.tv_sec = 0; ts.tv_nsec = 100000; nanosleep(&ts, NULL); } done = false; /* dereg all handlers*/ PMIx_Deregister_event_handler( errhandler_refs[0], op1_callbk, NULL); /* loop until we get callback */ while(!done) { ts.tv_sec = 0; ts.tv_nsec = 100000; nanosleep(&ts, NULL); } done = false; PMIx_Deregister_event_handler( errhandler_refs[1], op1_callbk, NULL); /* loop until we get callback */ while(!done) { ts.tv_sec = 0; ts.tv_nsec = 100000; nanosleep(&ts, NULL); } return PMIX_SUCCESS; }
void cli_terminate(cli_info_t *cli) { if( CLI_TERM != cli->next_state[cli->state] ){ TEST_ERROR(("rank %d: bad client next state: expect %d have %d!", cli_rank(cli), CLI_TERM, cli->next_state[cli->state])); test_abort = true; } cli->pid = -1; TEST_VERBOSE(("Client rank = %d terminated", cli_rank(cli))); cli->state = CLI_TERM; if (NULL != cli->ns) { free(cli->ns); } }
int dmodex_fn(const pmix_proc_t *proc, const pmix_info_t info[], size_t ninfo, pmix_modex_cbfunc_t cbfunc, void *cbdata) { TEST_VERBOSE(("Getting data for %s:%d", proc->nspace, proc->rank)); /* In a perfect world, we should call another server * to get the data for one of its clients. We don't * have multi-server capability yet, so we'll just * respond right away */ if (NULL != cbfunc) { cbfunc(PMIX_ERR_NOT_FOUND, NULL, 0, cbdata, NULL, NULL); } return PMIX_SUCCESS; }
void cli_connect(cli_info_t *cli, int sd, struct event_base * ebase, event_callback_fn callback) { if( CLI_CONNECTED != cli->next_state[cli->state] ){ TEST_ERROR(("Rank %d has bad next state: expect %d have %d!", cli_rank(cli), CLI_CONNECTED, cli->next_state[cli->state])); test_abort = true; return; } cli->sd = sd; cli->ev = event_new(ebase, sd, EV_READ|EV_PERSIST, callback, cli); event_add(cli->ev,NULL); pmix_usock_set_nonblocking(sd); TEST_VERBOSE(("Connection accepted from rank %d", cli_rank(cli) )); cli->state = CLI_CONNECTED; }
int fencenb_fn(const pmix_proc_t procs[], size_t nprocs, const pmix_info_t info[], size_t ninfo, char *data, size_t ndata, pmix_modex_cbfunc_t cbfunc, void *cbdata) { TEST_VERBOSE(("Getting data for %s:%d", procs[0].nspace, procs[0].rank)); /* In a perfect world, we should wait until * the test servers from all involved procs * respond. We don't have multi-server capability * yet, so we'll just respond right away and * return what we were given */ if (NULL != cbfunc) { cbfunc(PMIX_SUCCESS, data, ndata, cbdata, NULL, NULL); } return PMIX_SUCCESS; }
int finalized(const pmix_proc_t *proc, void *server_object, pmix_op_cbfunc_t cbfunc, void *cbdata) { if( CLI_TERM <= cli_info[proc->rank].state ){ TEST_ERROR(("double termination of rank %d", proc->rank)); return PMIX_SUCCESS; } TEST_VERBOSE(("Rank %d terminated", proc->rank)); cli_finalize(&cli_info[proc->rank]); finalized_count++; if (finalized_count == cli_info_cnt) { if (NULL != pmix_test_published_list) { PMIX_LIST_RELEASE(pmix_test_published_list); } } if (NULL != cbfunc) { cbfunc(PMIX_SUCCESS, cbdata); } return PMIX_SUCCESS; }
int main(int argc, char **argv) { char **client_env=NULL; char **client_argv=NULL; int rc; struct stat stat_buf; struct timeval tv; double test_start; test_params params; INIT_TEST_PARAMS(params); int test_fail = 0; char *tmp; int ns_nprocs; gettimeofday(&tv, NULL); test_start = tv.tv_sec + 1E-6*tv.tv_usec; /* smoke test */ if (PMIX_SUCCESS != 0) { TEST_ERROR(("ERROR IN COMPUTING CONSTANTS: PMIX_SUCCESS = %d", PMIX_SUCCESS)); exit(1); } TEST_VERBOSE(("Testing version %s", PMIx_Get_version())); parse_cmd(argc, argv, ¶ms); TEST_VERBOSE(("Start PMIx_lite smoke test (timeout is %d)", params.timeout)); /* set common argv and env */ client_env = pmix_argv_copy(environ); set_client_argv(¶ms, &client_argv); tmp = pmix_argv_join(client_argv, ' '); TEST_VERBOSE(("Executing test: %s", tmp)); free(tmp); /* verify executable */ if( 0 > ( rc = stat(params.binary, &stat_buf) ) ){ TEST_ERROR(("Cannot stat() executable \"%s\": %d: %s", params.binary, errno, strerror(errno))); FREE_TEST_PARAMS(params); return 0; } else if( !S_ISREG(stat_buf.st_mode) ){ TEST_ERROR(("Client executable \"%s\": is not a regular file", params.binary)); FREE_TEST_PARAMS(params); return 0; }else if( !(stat_buf.st_mode & S_IXUSR) ){ TEST_ERROR(("Client executable \"%s\": has no executable flag", params.binary)); FREE_TEST_PARAMS(params); return 0; } if (PMIX_SUCCESS != (rc = server_init(¶ms))) { FREE_TEST_PARAMS(params); return rc; } cli_init(params.lsize); int launched = 0; /* set namespaces and fork clients */ if (NULL == params.ns_dist) { uint32_t i; int base_rank = 0; /* compute my start counter */ for(i = 0; i < (uint32_t)my_server_id; i++) { base_rank += (params.nprocs % params.nservers) > (uint32_t)i ? params.nprocs / params.nservers + 1 : params.nprocs / params.nservers; } /* we have a single namespace for all clients */ ns_nprocs = params.nprocs; launched += server_launch_clients(params.lsize, params.nprocs, base_rank, ¶ms, &client_env, &client_argv); } else { char *pch; pch = strtok(params.ns_dist, ":"); while (NULL != pch) { ns_nprocs = (int)strtol(pch, NULL, 10); if (params.nprocs < (uint32_t)(launched+ns_nprocs)) { TEST_ERROR(("Total number of processes doesn't correspond number specified by ns_dist parameter.")); FREE_TEST_PARAMS(params); return PMIX_ERROR; } if (0 < ns_nprocs) { launched += server_launch_clients(ns_nprocs, ns_nprocs, 0, ¶ms, &client_env, &client_argv); } pch = strtok (NULL, ":"); } } if (params.lsize != (uint32_t)launched) { TEST_ERROR(("Total number of processes doesn't correspond number specified by ns_dist parameter.")); cli_kill_all(); test_fail = 1; } /* hang around until the client(s) finalize */ while (!test_terminated()) { // To avoid test hang we want to interrupt the loop each 0.1s double test_current; // check if we exceed the max time gettimeofday(&tv, NULL); test_current = tv.tv_sec + 1E-6*tv.tv_usec; if( (test_current - test_start) > params.timeout ){ break; } cli_wait_all(0); } if( !test_terminated() ){ TEST_ERROR(("Test exited by a timeout!")); cli_kill_all(); test_fail = 1; } if( test_abort ){ TEST_ERROR(("Test was aborted!")); /* do not simply kill the clients as that generates * event notifications which these tests then print * out, flooding the log */ // cli_kill_all(); test_fail = 1; } if (0 != params.test_spawn) { PMIX_WAIT_FOR_COMPLETION(spawn_wait); } /* deregister the errhandler */ PMIx_Deregister_event_handler(0, op_callbk, NULL); cli_wait_all(1.0); test_fail += server_finalize(¶ms); FREE_TEST_PARAMS(params); pmix_argv_free(client_argv); pmix_argv_free(client_env); return test_fail; }
int main(int _argc, const char** _argv) { // the only one init for etk: etk::init(_argc, _argv); std::string inputName = ""; bool performance = false; bool perf = false; int64_t sampleRate = 48000; for (int32_t iii=0; iii<_argc ; ++iii) { std::string data = _argv[iii]; if (etk::start_with(data,"--in=")) { inputName = &data[5]; } else if (data == "--performance") { performance = true; } else if (data == "--perf") { perf = true; } else if (etk::start_with(data,"--sample-rate=")) { data = &data[14]; sampleRate = etk::string_to_int32_t(data); } else if ( data == "-h" || data == "--help") { TEST_PRINT("Help : "); TEST_PRINT(" ./xxx --fb=file.raw --mic=file.raw"); TEST_PRINT(" --in=YYY.raw inout file"); TEST_PRINT(" --performance Generate signal to force algo to maximum process time"); TEST_PRINT(" --perf Enable performence test (little slower but real performence test)"); TEST_PRINT(" --sample-rate=XXXX Signal sample rate (default 48000)"); exit(0); } } // PERFORMANCE test only .... if (performance == true) { performanceCompressor(); performanceLimiter(); performanceGate(); return 0; } if (inputName == "") { TEST_ERROR("Can not Process missing parameters..."); exit(-1); } TEST_INFO("Read input:"); std::vector<double> inputData = convert(etk::FSNodeReadAllDataType<int16_t>(inputName)); TEST_INFO(" " << inputData.size() << " samples"); // resize output : std::vector<double> output; output.resize(inputData.size(), 0); // process in chunk of 256 samples int32_t blockSize = 256; Performance perfo; /* audio::algo::chunkware::Compressor algo; algo.setThreshold(-10); algo.setRatio(-5); int32_t lastPourcent = -1; for (int32_t iii=0; iii<output.size()/blockSize; ++iii) { if (lastPourcent != 100*iii / (output.size()/blockSize)) { lastPourcent = 100*iii / (output.size()/blockSize); TEST_INFO("Process : " << iii*blockSize << "/" << int32_t(output.size()/blockSize)*blockSize << " " << lastPourcent << "/100"); } else { TEST_VERBOSE("Process : " << iii*blockSize << "/" << int32_t(output.size()/blockSize)*blockSize); } perfo.tic(); algo.process(audio::format_double, &output[iii*blockSize], &inputData[iii*blockSize], blockSize, 1); if (perf == true) { perfo.toc(); std::this_thread::sleep_for(std::chrono::milliseconds(1)); } } */ audio::algo::chunkware::Limiter algo; algo.setSampleRate(48000); algo.setThreshold(0); algo.setAttack(0.1); algo.setRelease(2); algo.init(1); int32_t lastPourcent = -1; for (int32_t iii=0; iii<output.size()/blockSize; ++iii) { if (lastPourcent != 100*iii / (output.size()/blockSize)) { lastPourcent = 100*iii / (output.size()/blockSize); TEST_INFO("Process : " << iii*blockSize << "/" << int32_t(output.size()/blockSize)*blockSize << " " << lastPourcent << "/100"); } else { TEST_VERBOSE("Process : " << iii*blockSize << "/" << int32_t(output.size()/blockSize)*blockSize); } perfo.tic(); algo.process(&output[iii*blockSize], &inputData[iii*blockSize], blockSize, 1, audio::format_double); if (perf == true) { perfo.toc(); std::this_thread::sleep_for(std::chrono::milliseconds(1)); } } if (perf == true) { TEST_INFO("Performance Result: "); TEST_INFO(" blockSize=" << blockSize << " sample"); TEST_INFO(" min=" << perfo.getMinProcessing().count() << " ns"); TEST_INFO(" max=" << perfo.getMaxProcessing().count() << " ns"); TEST_INFO(" avg=" << perfo.getTotalTimeProcessing().count()/perfo.getTotalIteration() << " ns"); TEST_INFO(" min=" << (float((perfo.getMinProcessing().count()*sampleRate)/blockSize)/1000000000.0)*100.0 << " %"); TEST_INFO(" max=" << (float((perfo.getMaxProcessing().count()*sampleRate)/blockSize)/1000000000.0)*100.0 << " %"); TEST_INFO(" avg=" << (float(((perfo.getTotalTimeProcessing().count()/perfo.getTotalIteration())*sampleRate)/blockSize)/1000000000.0)*100.0 << " %"); } etk::FSNodeWriteAllDataType<int16_t>("output.raw", convert(output)); }
int launch_clients(int num_procs, char *binary, char *** client_env, char ***base_argv) { int n; uid_t myuid; gid_t mygid; char *ranks = NULL; char digit[MAX_DIGIT_LEN]; int rc; static int counter = 0; static int num_ns = 0; pmix_proc_t proc; TEST_VERBOSE(("Setting job info")); fill_seq_ranks_array(num_procs, counter, &ranks); if (NULL == ranks) { PMIx_server_finalize(); TEST_ERROR(("fill_seq_ranks_array failed")); return PMIX_ERROR; } (void)snprintf(proc.nspace, PMIX_MAX_NSLEN, "%s-%d", TEST_NAMESPACE, num_ns); set_namespace(num_procs, ranks, proc.nspace); if (NULL != ranks) { free(ranks); } myuid = getuid(); mygid = getgid(); /* fork/exec the test */ for (n = 0; n < num_procs; n++) { proc.rank = counter; if (PMIX_SUCCESS != (rc = PMIx_server_setup_fork(&proc, client_env))) {//n TEST_ERROR(("Server fork setup failed with error %d", rc)); PMIx_server_finalize(); cli_kill_all(); return rc; } if (PMIX_SUCCESS != (rc = PMIx_server_register_client(&proc, myuid, mygid, NULL, NULL, NULL))) {//n TEST_ERROR(("Server fork setup failed with error %d", rc)); PMIx_server_finalize(); cli_kill_all(); return rc; } cli_info[counter].pid = fork(); if (cli_info[counter].pid < 0) { TEST_ERROR(("Fork failed")); PMIx_server_finalize(); cli_kill_all(); return -1; } cli_info[counter].rank = counter;//n cli_info[counter].ns = strdup(proc.nspace); char **client_argv = pmix_argv_copy(*base_argv); /* add two last arguments: -r <rank> */ sprintf(digit, "%d", counter);//n pmix_argv_append_nosize(&client_argv, "-r"); pmix_argv_append_nosize(&client_argv, digit); pmix_argv_append_nosize(&client_argv, "-s"); pmix_argv_append_nosize(&client_argv, proc.nspace); sprintf(digit, "%d", num_procs); pmix_argv_append_nosize(&client_argv, "--ns-size"); pmix_argv_append_nosize(&client_argv, digit); sprintf(digit, "%d", num_ns); pmix_argv_append_nosize(&client_argv, "--ns-id"); pmix_argv_append_nosize(&client_argv, digit); sprintf(digit, "%d", (counter-n)); pmix_argv_append_nosize(&client_argv, "--base-rank"); pmix_argv_append_nosize(&client_argv, digit); if (cli_info[counter].pid == 0) { if( !TEST_VERBOSE_GET() ){ // Hide clients stdout // TODO: on some systems stdout is a constant, address this fclose(stdout); stdout = fopen("/dev/null","w"); } execve(binary, client_argv, *client_env); /* Does not return */ exit(0); } cli_info[counter].state = CLI_FORKED; pmix_argv_free(client_argv); counter++; } num_ns++; return PMIX_SUCCESS; }
static void op1_callbk(pmix_status_t status, void *cbdata) { TEST_VERBOSE(( "op1_callbk CALLED WITH STATUS %d", status)); done = true; }
int main(int argc, char **argv) { char **client_env=NULL; char **client_argv=NULL; int rc; struct stat stat_buf; struct timeval tv; double test_start; cli_state_t order[CLI_TERM+1]; test_params params; INIT_TEST_PARAMS(params); int test_fail = 0; char *tmp; int ns_nprocs; gettimeofday(&tv, NULL); test_start = tv.tv_sec + 1E-6*tv.tv_usec; /* smoke test */ if (PMIX_SUCCESS != 0) { TEST_ERROR(("ERROR IN COMPUTING CONSTANTS: PMIX_SUCCESS = %d", PMIX_SUCCESS)); exit(1); } TEST_VERBOSE(("Testing version %s", PMIx_Get_version())); parse_cmd(argc, argv, ¶ms); TEST_VERBOSE(("Start PMIx_lite smoke test (timeout is %d)", params.timeout)); /* verify executable */ if( 0 > ( rc = stat(params.binary, &stat_buf) ) ){ TEST_ERROR(("Cannot stat() executable \"%s\": %d: %s", params.binary, errno, strerror(errno))); FREE_TEST_PARAMS(params); return 0; } else if( !S_ISREG(stat_buf.st_mode) ){ TEST_ERROR(("Client executable \"%s\": is not a regular file", params.binary)); FREE_TEST_PARAMS(params); return 0; }else if( !(stat_buf.st_mode & S_IXUSR) ){ TEST_ERROR(("Client executable \"%s\": has no executable flag", params.binary)); FREE_TEST_PARAMS(params); return 0; } /* setup the server library */ pmix_info_t info[1]; (void)strncpy(info[0].key, PMIX_SOCKET_MODE, PMIX_MAX_KEYLEN); info[0].value.type = PMIX_UINT32; info[0].value.data.uint32 = 0666; if (PMIX_SUCCESS != (rc = PMIx_server_init(&mymodule, info, 1))) { TEST_ERROR(("Init failed with error %d", rc)); FREE_TEST_PARAMS(params); return rc; } /* register the errhandler */ PMIx_Register_event_handler(NULL, 0, NULL, 0, errhandler, errhandler_reg_callbk, NULL); order[CLI_UNINIT] = CLI_FORKED; order[CLI_FORKED] = CLI_FIN; order[CLI_CONNECTED] = CLI_UNDEF; order[CLI_FIN] = CLI_TERM; order[CLI_DISCONN] = CLI_UNDEF; order[CLI_TERM] = CLI_UNDEF; cli_init(params.nprocs, order); /* set common argv and env */ client_env = pmix_argv_copy(environ); set_client_argv(¶ms, &client_argv); tmp = pmix_argv_join(client_argv, ' '); TEST_VERBOSE(("Executing test: %s", tmp)); free(tmp); int launched = 0; /* set namespaces and fork clients */ if (NULL == params.ns_dist) { /* we have a single namespace for all clients */ ns_nprocs = params.nprocs; rc = launch_clients(ns_nprocs, params.binary, &client_env, &client_argv); if (PMIX_SUCCESS != rc) { FREE_TEST_PARAMS(params); return rc; } launched += ns_nprocs; } else { char *pch; pch = strtok(params.ns_dist, ":"); while (NULL != pch) { ns_nprocs = (int)strtol(pch, NULL, 10); if (params.nprocs < (uint32_t)(launched+ns_nprocs)) { TEST_ERROR(("Total number of processes doesn't correspond number specified by ns_dist parameter.")); FREE_TEST_PARAMS(params); return PMIX_ERROR; } if (0 < ns_nprocs) { rc = launch_clients(ns_nprocs, params.binary, &client_env, &client_argv); if (PMIX_SUCCESS != rc) { FREE_TEST_PARAMS(params); return rc; } } pch = strtok (NULL, ":"); launched += ns_nprocs; } } if (params.nprocs != (uint32_t)launched) { TEST_ERROR(("Total number of processes doesn't correspond number specified by ns_dist parameter.")); cli_kill_all(); test_fail = 1; } /* hang around until the client(s) finalize */ while (!test_terminated()) { // To avoid test hang we want to interrupt the loop each 0.1s double test_current; // check if we exceed the max time gettimeofday(&tv, NULL); test_current = tv.tv_sec + 1E-6*tv.tv_usec; if( (test_current - test_start) > params.timeout ){ break; } cli_wait_all(0); } if( !test_terminated() ){ TEST_ERROR(("Test exited by a timeout!")); cli_kill_all(); test_fail = 1; } if( test_abort ){ TEST_ERROR(("Test was aborted!")); /* do not simply kill the clients as that generates * event notifications which these tests then print * out, flooding the log */ // cli_kill_all(); test_fail = 1; } if (0 != params.test_spawn) { PMIX_WAIT_FOR_COMPLETION(spawn_wait); } pmix_argv_free(client_argv); pmix_argv_free(client_env); /* deregister the errhandler */ PMIx_Deregister_event_handler(0, op_callbk, NULL); cli_wait_all(1.0); /* finalize the server library */ if (PMIX_SUCCESS != (rc = PMIx_server_finalize())) { TEST_ERROR(("Finalize failed with error %d", rc)); } FREE_TEST_PARAMS(params); if (0 == test_fail) { TEST_OUTPUT(("Test finished OK!")); } return test_fail; }
int test_resolve_peers(char *my_nspace, int my_rank, test_params params) { int rc, n; int ns_num; char nspace[PMIX_MAX_NSLEN+1]; pmix_proc_t procs[2]; /* first resolve peers from the own namespace. */ rc = resolve_nspace(my_nspace, params, my_nspace, my_rank); if (PMIX_SUCCESS == rc) { TEST_VERBOSE(("%s:%d: Resolve peers succeeded for the own namespace\n", my_nspace, my_rank)); } else { TEST_ERROR(("%s:%d: Resolve peers failed for the own namespace\n", my_nspace, my_rank)); return PMIX_ERROR; } /* then get number of namespaces and try to resolve peers from them. */ ns_num = get_total_ns_number(params); if (0 >= ns_num) { TEST_ERROR(("%s:%d: get_total_ns_number function failed", my_nspace, my_rank)); return PMIX_ERROR; } for (n = 0; n < ns_num; n++) { /* then connect to processes from different namespaces and resolve peers. */ (void)snprintf(nspace, PMIX_MAX_NSLEN, "%s-%d", TEST_NAMESPACE, n); if (0 == strncmp(my_nspace, nspace, strlen(nspace)+1)) { continue; } /* add to procs array all processes from own namespace and all processes from this namespace. * Make sure that processes are placed in the same order. */ if (0 < strncmp(nspace, my_nspace, PMIX_MAX_NSLEN)) { (void)strncpy(procs[0].nspace, nspace, PMIX_MAX_NSLEN); (void)strncpy(procs[1].nspace, my_nspace, PMIX_MAX_NSLEN); } else { (void)strncpy(procs[1].nspace, nspace, PMIX_MAX_NSLEN); (void)strncpy(procs[0].nspace, my_nspace, PMIX_MAX_NSLEN); } procs[0].rank = PMIX_RANK_WILDCARD; procs[1].rank = PMIX_RANK_WILDCARD; /* make a connection between processes from own namespace and processes from this namespace. */ rc = test_cd_common(procs, 2, 1, 0); if (PMIX_SUCCESS == rc) { TEST_VERBOSE(("%s:%d: Connect to %s succeeded %s.", my_nspace, my_rank, nspace)); } else { TEST_ERROR(("%s:%d: Connect to %s failed %s.", my_nspace, my_rank, nspace)); return PMIX_ERROR; } /* then resolve peers from this namespace. */ rc = resolve_nspace(nspace, params, my_nspace, my_rank); if (PMIX_SUCCESS == rc) { TEST_VERBOSE(("%s:%d: Resolve peers succeeded for ns %s\n", my_nspace, my_rank, nspace)); } else { test_cd_common(procs, 2, 1, 1); break; } /* disconnect from the processes of this namespace. */ rc = test_cd_common(procs, 2, 1, 0); if (PMIX_SUCCESS == rc) { TEST_VERBOSE(("%s:%d: Disconnect from %s succeeded %s.", my_nspace, my_rank, nspace)); } else { TEST_ERROR(("%s:%d: Disconnect from %s failed %s.", my_nspace, my_rank, nspace)); return PMIX_ERROR; } } if (PMIX_SUCCESS == rc) { TEST_VERBOSE(("%s:%d: Resolve peers test succeeded.", my_nspace, my_rank)); } return rc; }
void callbackConstInt(const int32_t& _a) { TEST_VERBOSE("event a=" << _a); }
void changeCount(size_t _a) { TEST_VERBOSE("connection number : " << _a); m_count = _a; }