static void test_zmq (flux_reactor_t *reactor) { zctx_t *zctx; void *zs[2]; flux_watcher_t *r, *w; ok ((zctx = zctx_new ()) != NULL, "zmq: created zmq context"); zs[0] = zsocket_new (zctx, ZMQ_PAIR); zs[1] = zsocket_new (zctx, ZMQ_PAIR); ok (zs[0] && zs[1] && zsocket_bind (zs[0], "inproc://test_zmq") == 0 && zsocket_connect (zs[1], "inproc://test_zmq") == 0, "zmq: connected ZMQ_PAIR sockets over inproc"); r = flux_zmq_watcher_create (reactor, zs[0], FLUX_POLLIN, zmqreader, NULL); w = flux_zmq_watcher_create (reactor, zs[1], FLUX_POLLOUT, zmqwriter, NULL); ok (r != NULL && w != NULL, "zmq: nonblocking reader and writer created"); flux_watcher_start (r); flux_watcher_start (w); ok (flux_reactor_run (reactor, 0) == 0, "zmq: reactor ran to completion after %d messages", zmqwriter_msgcount); flux_watcher_stop (r); flux_watcher_stop (w); flux_watcher_destroy (r); flux_watcher_destroy (w); zsocket_destroy (zctx, zs[0]); zsocket_destroy (zctx, zs[1]); zctx_destroy (&zctx); }
void test_selfmod (int argc, char **argv) { flux_t *h; char *key; if (argc != 1) { fprintf (stderr, "Usage: selfmod key\n"); exit (1); } key = argv[0]; if (!(h = flux_open (NULL, 0))) log_err_exit ("flux_open"); if (kvs_put_int (h, key, -1) < 0) log_err_exit ("kvs_put_int"); if (kvs_commit (h) < 0) log_err_exit ("kvs_commit"); if (kvs_watch_int (h, key, selfmod_watch_cb, h) < 0) log_err_exit ("kvs_watch_int"); log_msg ("reactor: start"); flux_reactor_run (flux_get_reactor (h), 0); log_msg ("reactor: end"); flux_close (h); }
int mod_main (flux_t *h, int argc, char **argv) { int rc = -1; ctx_t *ctx = getctx (h); if (!ctx) goto done; if (flux_event_subscribe (h, "barrier.") < 0) { flux_log_error (h, "flux_event_subscribe"); goto done; } if (flux_msg_handler_addvec (h, htab, ctx) < 0) { flux_log_error (h, "flux_msghandler_add"); goto done; } if (flux_reactor_run (flux_get_reactor (h), 0) < 0) { flux_log_error (h, "flux_reactor_run"); goto done_unreg; } rc = 0; done_unreg: flux_msg_handler_delvec (htab); done: return rc; }
void *thread (void *arg) { thd_t *t = arg; if (!(t->h = flux_open (NULL, 0))) { log_err ("%d: flux_open", t->n); goto done; } signal_ready (); /* The first kvs.watch reply is handled synchronously, then other kvs.watch * replies will arrive asynchronously and be handled by the reactor. */ if (kvs_watch_int (t->h, key, mt_watch_cb, t) < 0) { log_err ("%d: kvs_watch_int", t->n); goto done; } if (kvs_watch_int (t->h, "nonexistent-key", mt_watchnil_cb, t) < 0) { log_err ("%d: kvs_watch_int", t->n); goto done; } if (kvs_watch_int (t->h, key_stable, mt_watchstable_cb, t) < 0) { log_err ("%d: kvs_watch_int", t->n); goto done; } if (flux_reactor_run (flux_get_reactor (t->h), 0) < 0) { log_err ("%d: flux_reactor_run", t->n); goto done; } done: if (t->h) flux_close (t->h); return NULL; }
int mod_main (flux_t *h, int argc, char **argv) { flux_msg_handler_t **handlers = NULL; sqlite_ctx_t *ctx = getctx (h); if (!ctx) goto done; if (flux_event_subscribe (h, "shutdown") < 0) { flux_log_error (h, "flux_event_subscribe"); goto done; } if (flux_msg_handler_addvec (h, htab, ctx, &handlers) < 0) { flux_log_error (h, "flux_msg_handler_addvec"); goto done; } if (register_backing_store (h, true, "content-sqlite") < 0) { flux_log_error (h, "registering backing store"); goto done; } if (register_content_backing_service (h) < 0) { flux_log_error (h, "service.add: content-backing"); goto done; } if (flux_reactor_run (flux_get_reactor (h), 0) < 0) { flux_log_error (h, "flux_reactor_run"); goto done; } done: flux_msg_handler_delvec (handlers); return 0; }
/* Timer pops every 1 ms, writing a new value to key. * After 10 calls, it calls kvs_unwatch(). * After 20 calls, it calls flux_reactor_stop(). * The kvs_unwatch_cb() counts the number of times it is called, should be 10. */ void test_unwatch (int argc, char **argv) { struct timer_ctx ctx; flux_reactor_t *r; int count = 0; flux_watcher_t *timer; if (argc != 1) { fprintf (stderr, "Usage: unwatch key\n"); exit (1); } ctx.key = argv[0]; if (!(ctx.h = flux_open (NULL, 0))) log_err_exit ("flux_open"); r = flux_get_reactor (ctx.h); if (kvs_watch_int (ctx.h, ctx.key, unwatch_watch_cb, &count) < 0) log_err_exit ("kvs_watch_int %s", ctx.key); if (!(timer = flux_timer_watcher_create (r, 0.001, 0.001, unwatch_timer_cb, &ctx))) log_err_exit ("flux_timer_watcher_create"); flux_watcher_start (timer); if (flux_reactor_run (r, 0) < 0) log_err_exit ("flux_reactor_run"); if (count != 10) log_msg_exit ("watch called %d times (should be 10)", count); flux_watcher_destroy (timer); flux_close (ctx.h); }
static int wait_job_complete (flux_t h) { int rc = -1; sig_flux_h = h; wjctx_t *ctx = getctx (h); if (signal (SIGINT, sig_handler) == SIG_ERR) goto done; if (jsc_notify_status (h, waitjob_cb, (void *)h) != 0) { flux_log (h, LOG_ERR, "failed to register a waitjob CB"); } /* once jsc_notify_status is returned, all of JSC events * will be queued and delivered. It is safe to signal * readiness. */ if (ctx->start) touch_outfile (ctx->start); if (complete_job (ctx)) { if (ctx->complete) touch_outfile (ctx->complete); flux_log (ctx->h, LOG_INFO, "wait_job_complete: completion detected"); } if (flux_reactor_run (flux_get_reactor (h), 0) < 0) { flux_log (h, LOG_ERR, "error in flux_reactor_run"); goto done; } rc = 0; done: return rc; }
void *server_thread (void *arg) { struct context *ctx = arg; flux_reactor_t *reactor = NULL; flux_watcher_t *w = NULL; if (!(reactor = flux_reactor_create (0))) goto done; if (!(w = flux_fd_watcher_create (reactor, ctx->fds[1], FLUX_POLLIN, s_io_cb, ctx))) goto done; flux_watcher_start (w); ctx->exit_rc = -1; if (flux_reactor_run (reactor, 0) < 0) goto done; ctx->exit_rc = 0; done: if (w) flux_watcher_destroy (w); if (reactor) flux_reactor_destroy (reactor); return NULL; }
int mod_main (flux_t *h, int argc, char **argv) { int saved_errno; flux_msg_handler_t **handlers = NULL; if (argc == 1 && !strcmp (argv[0], "--init-failure")) { flux_log (h, LOG_INFO, "aborting during init per test request"); errno = EIO; goto error; } if (!(modules = zhash_new ())) { errno = ENOMEM; goto error; } if (flux_get_rank (h, &rank) < 0) goto error; if (flux_msg_handler_addvec (h, htab, NULL, &handlers) < 0) goto error; if (flux_reactor_run (flux_get_reactor (h), 0) < 0) { flux_log_error (h, "flux_reactor_run"); goto error; } zhash_destroy (&modules); return 0; error: saved_errno = errno; flux_msg_handler_delvec (handlers); zhash_destroy (&modules); errno = saved_errno; return -1; }
int mod_main (flux_t *h, int argc, char **argv) { ctx_t *ctx = getctx (h); uint32_t rank; flux_msg_handler_t **handlers = NULL; int rc = -1; if (flux_get_rank (h, &rank) < 0) return -1; if (rank != 0) { flux_log (h, LOG_ERR, "this module must only run on rank 0"); return -1; } flux_log (h, LOG_INFO, "module starting"); if (flux_event_subscribe (h, "sim.start") < 0) { flux_log (h, LOG_ERR, "subscribing to event: %s", strerror (errno)); return -1; } if (flux_msg_handler_addvec (h, htab, ctx, &handlers) < 0) { flux_log (h, LOG_ERR, "flux_msg_handler_add: %s", strerror (errno)); return -1; } send_alive_request (h, module_name); if (flux_reactor_run (flux_get_reactor (h), 0) < 0) { flux_log (h, LOG_ERR, "flux_reactor_run: %s", strerror (errno)); goto done_delvec; } rc = 0; done_delvec: flux_msg_handler_delvec (handlers); return rc; }
int main (int argc, char **argv) { flux_t h; heartbeat_t *hb; flux_msg_handler_t *w; plan (18); check_codec (); (void)setenv ("FLUX_CONNECTOR_PATH", CONNECTOR_PATH, 0); ok ((h = flux_open ("loop://", 0)) != NULL, "opened loop connector"); if (!h) BAIL_OUT ("can't continue without loop handle"); flux_fatal_set (h, fatal_err, NULL); ok ((hb = heartbeat_create ()) != NULL, "heartbeat_create works"); heartbeat_set_flux (hb, h); ok (heartbeat_get_rate (hb) == 2., "heartbeat_get_rate returns default of 2s"); errno = 0; ok (heartbeat_set_rate (hb, -1) < 1 && errno == EINVAL, "heartbeat_set_rate -1 fails with EINVAL"); errno = 0; ok (heartbeat_set_rate (hb, 1000000) < 1 && errno == EINVAL, "heartbeat_set_rate 1000000 fails with EINVAL"); ok (heartbeat_set_ratestr (hb, "250ms") == 0, "heartbeat_set_ratestr 250ms works"); ok (heartbeat_get_rate (hb) == 0.250, "heartbeat_get_rate returns what was set"); ok (heartbeat_set_rate (hb, 0.1) == 0, "heartbeat_set_rate 0.1 works"); ok (heartbeat_get_rate (hb) == 0.1, "heartbeat_get_rate returns what was set"); ok (heartbeat_get_epoch (hb) == 0, "heartbeat_get_epoch works, default is zero"); w = flux_msg_handler_create (h, FLUX_MATCH_EVENT, heartbeat_event_cb, hb); ok (w != NULL, "created event watcher"); flux_msg_handler_start (w); ok (heartbeat_start (hb) == 0, "heartbeat_start works"); ok (flux_reactor_run (flux_get_reactor (h), 0) == 0, "flux reactor exited normally"); heartbeat_destroy (hb); flux_msg_handler_destroy (w); flux_close (h); done_testing (); return 0; }
int mod_main (flux_t h, int argc, char **argv) { int saved_errno; ctx_t *ctx = getctx (h); if (!ctx) { saved_errno = errno; flux_log_error (h, "error allocating context"); goto error; } if (flux_msg_handler_addvec (h, htab, ctx) < 0) { saved_errno = errno; flux_log_error (h, "flux_msg_handler_addvec"); goto error; } if (flux_reactor_run (flux_get_reactor (h), 0) < 0) { saved_errno = errno; flux_log_error (h, "flux_reactor_run"); flux_msg_handler_delvec (htab); goto error; } flux_msg_handler_delvec (htab); return 0; error: errno = saved_errno; return -1; }
static void test_multmatch (flux_t h) { flux_msg_handler_t *w1, *w2; struct flux_match m1 = FLUX_MATCH_ANY; struct flux_match m2 = FLUX_MATCH_ANY; m1.topic_glob = "foo.*"; m2.topic_glob = "foo.bar"; /* test #1: verify multiple match behaves as documented, that is, * a message is matched (only) by the most recently added watcher */ ok ((w1 = flux_msg_handler_create (h, m1, multmatch1, NULL)) != NULL, "multmatch: first added handler for foo.*"); ok ((w2 = flux_msg_handler_create (h, m2, multmatch2, NULL)) != NULL, "multmatch: next added handler for foo.bar"); flux_msg_handler_start (w1); flux_msg_handler_start (w2); ok (send_request (h, "foo.bar") == 0, "multmatch: send foo.bar msg"); ok (send_request (h, "foo.baz") == 0, "multmatch: send foo.baz msg"); ok (flux_reactor_run (flux_get_reactor (h), 0) == 0 && multmatch_count == 2, "multmatch: last added watcher handled foo.bar"); flux_msg_handler_destroy (w1); flux_msg_handler_destroy (w2); }
int main (int argc, char *argv[]) { flux_t h; flux_reactor_t *reactor; plan (4+11+3+4+3+5+2); (void)setenv ("FLUX_CONNECTOR_PATH", CONNECTOR_PATH, 0); ok ((h = flux_open ("loop://", 0)) != NULL, "opened loop connector"); if (!h) BAIL_OUT ("can't continue without loop handle"); flux_fatal_set (h, fatal_err, NULL); ok ((reactor = flux_get_reactor (h)) != NULL, "obtained reactor"); if (!reactor) BAIL_OUT ("can't continue without reactor"); ok (flux_reactor_run (reactor, 0) == 0, "general: reactor ran to completion (no watchers)"); errno = 0; ok (flux_sleep_on (h, FLUX_MATCH_ANY) < 0 && errno == EINVAL, "general: flux_sleep_on outside coproc fails with EINVAL"); test_timer (reactor); // 11 test_fd (reactor); // 3 test_zmq (reactor); // 4 test_msg (h); // 3 test_multmatch (h); // 5 /* Misc */ lives_ok ({ reactor_destroy_early ();}, "destroying reactor then watcher doesn't segfault");
static void test_timer (flux_reactor_t *reactor) { flux_watcher_t *w; errno = 0; ok (!flux_timer_watcher_create (reactor, -1, 0, oneshot, NULL) && errno == EINVAL, "timer: creating negative timeout fails with EINVAL"); ok (!flux_timer_watcher_create (reactor, 0, -1, oneshot, NULL) && errno == EINVAL, "timer: creating negative repeat fails with EINVAL"); ok ((w = flux_timer_watcher_create (reactor, 0, 0, oneshot, NULL)) != NULL, "timer: creating zero timeout works"); flux_watcher_start (w); ok (flux_reactor_run (reactor, 0) == 0, "timer: reactor ran to completion (single oneshot)"); ok (oneshot_ran == true, "timer: oneshot was executed"); oneshot_ran = false; ok (flux_reactor_run (reactor, 0) == 0, "timer: reactor ran to completion (expired oneshot)"); ok (oneshot_ran == false, "timer: expired oneshot was not re-executed"); errno = 0; oneshot_errno = ESRCH; flux_watcher_start (w); ok (flux_reactor_run (reactor, 0) < 0 && errno == ESRCH, "general: reactor stop_error worked with errno passthru"); flux_watcher_stop (w); flux_watcher_destroy (w); ok ((w = flux_timer_watcher_create (reactor, 0.01, 0.01, repeat, NULL)) != NULL, "timer: creating 1ms timeout with 1ms repeat works"); flux_watcher_start (w); ok (flux_reactor_run (reactor, 0) == 0, "timer: reactor ran to completion (single repeat)"); ok (repeat_countdown == 0, "timer: repeat timer stopped itself after countdown"); flux_watcher_stop (w); flux_watcher_destroy (w); }
void *watchthread (void *arg) { thd_t *t = arg; watch_count_t wc; flux_kvs_txn_t *txn; flux_future_t *f; flux_reactor_t *r; flux_watcher_t *pw = NULL; flux_watcher_t *tw = NULL; if (!(t->h = flux_open (NULL, 0))) log_err_exit ("flux_open"); /* Make sure key doesn't already exist, initial value may affect * test by chance (i.e. initial value = 0, commit 0 and thus no * change) */ if (!(txn = flux_kvs_txn_create ())) log_err_exit ("flux_kvs_txn_create"); if (flux_kvs_txn_unlink (txn, 0, key) < 0) log_err_exit ("flux_kvs_txn_unlink"); if (!(f = flux_kvs_commit (t->h, 0, txn)) || flux_future_get (f, NULL) < 0) log_err_exit ("flux_kvs_commit"); flux_future_destroy (f); flux_kvs_txn_destroy (txn); r = flux_get_reactor (t->h); if (flux_kvs_watch (t->h, key, watch_count_cb, t) < 0) log_err_exit ("flux_kvs_watch %s", key); pw = flux_prepare_watcher_create (r, watch_prepare_cb, NULL); wc.t = t; wc.lastcount = -1; /* So test won't hang if there's a bug */ tw = flux_timer_watcher_create (r, WATCH_TIMEOUT, WATCH_TIMEOUT, watch_timeout_cb, &wc); flux_watcher_start (pw); flux_watcher_start (tw); if (flux_reactor_run (r, 0) < 0) log_err_exit ("flux_reactor_run"); flux_watcher_destroy (pw); flux_watcher_destroy (tw); flux_close (t->h); return NULL; }
int mod_main (flux_t *h, int argc, char **argv) { zhash_t *args = zhash_fromargv (argc, argv); ctx_t *ctx; char *eoc_str; bool exit_on_complete; uint32_t rank; if (flux_get_rank (h, &rank) < 0) return -1; if (rank != 0) { flux_log (h, LOG_ERR, "sim module must only run on rank 0"); return -1; } flux_log (h, LOG_INFO, "sim comms module starting"); if (!(eoc_str = zhash_lookup (args, "exit-on-complete"))) { flux_log (h, LOG_ERR, "exit-on-complete argument is not set, defaulting to false"); exit_on_complete = false; } else { exit_on_complete = (!strcmp (eoc_str, "true") || !strcmp (eoc_str, "True")); } ctx = getctx (h, exit_on_complete); if (flux_event_subscribe (h, "rdl.update") < 0) { flux_log (h, LOG_ERR, "subscribing to event: %s", strerror (errno)); return -1; } if (flux_msg_handler_addvec (h, htab, ctx) < 0) { flux_log (h, LOG_ERR, "flux_msg_handler_add: %s", strerror (errno)); return -1; } if (send_start_event (h) < 0) { flux_log (h, LOG_ERR, "sim failed to send start event"); return -1; } flux_log (h, LOG_DEBUG, "sim sent start event"); if (flux_reactor_run (flux_get_reactor (h), 0) < 0) { flux_log (h, LOG_ERR, "flux_reactor_run: %s", strerror (errno)); return -1; } return 0; }
int main (int argc, char *argv[]) { flux_msg_t *msg; flux_t h; flux_reactor_t *reactor; plan (35); (void)setenv ("FLUX_CONNECTOR_PATH", CONNECTOR_PATH, 0); ok ((h = flux_open ("loop://", FLUX_O_COPROC)) != NULL, "opened loop connector"); if (!h) BAIL_OUT ("can't continue without loop handle"); ok ((reactor = flux_get_reactor (h)) != NULL, "obtained reactor"); if (!reactor) BAIL_OUT ("can't continue without reactor"); flux_fatal_set (h, fatal_err, NULL); flux_fatal_error (h, __FUNCTION__, "Foo"); ok (fatal_tested == true, "flux_fatal function is called on fatal error"); /* create nodeset for last _then test */ ok ((then_ns = nodeset_create ()) != NULL, "nodeset created ok"); ok (flux_msghandler_addvec (h, htab, htablen, NULL) == 0, "registered message handlers"); /* test continues in rpctest_begin_cb() so that rpc calls * can sleep while we answer them */ ok ((msg = flux_request_encode ("rpctest.begin", NULL)) != NULL && flux_send (h, msg, 0) == 0, "sent message to initiate test"); ok (flux_reactor_run (reactor, 0) == 0, "reactor completed normally"); flux_msg_destroy (msg); /* Check result of last _then test */ ok (nodeset_count (then_ns) == 128, "then callback worked with correct nodemap"); nodeset_destroy (then_ns); flux_rpc_destroy (then_r); flux_close (h); done_testing(); return (0); }
int main (int ac, char **av) { int rc; struct subprocess_manager *sm; struct subprocess *p; flux_reactor_t *r; zsys_handler_set (NULL); plan (NO_PLAN); if (!(sm = subprocess_manager_create ())) BAIL_OUT ("Failed to create subprocess manager"); ok (sm != NULL, "create subprocess manager"); if (!(r = flux_reactor_create (FLUX_REACTOR_SIGCHLD))) BAIL_OUT ("Failed to create a reactor"); rc = subprocess_manager_set (sm, SM_REACTOR, r); ok (rc == 0, "set subprocess manager reactor (rc=%d, %s)", rc, strerror (errno)); if (!(p = subprocess_create (sm))) BAIL_OUT ("Failed to create a subprocess object"); ok (subprocess_set_callback (p, exit_handler, r) >= 0, "set subprocess exit handler"); ok (subprocess_set_io_callback (p, io_cb) >= 0, "set subprocess io callback"); ok (subprocess_set_command (p, "sleep 0.5 && /bin/echo -n 'hello\nworld\n'") >= 0, "set subprocess command"); ok (subprocess_set_environ (p, environ) >= 0, "set subprocess environ"); ok (subprocess_fork (p) >= 0, "subprocess_fork"); ok (subprocess_exec (p) >= 0, "subprocess_exec"); ok (flux_reactor_run (r, 0) == 0, "reactor returned normally"); subprocess_manager_destroy (sm); flux_reactor_destroy (r); done_testing (); }
static void test_msg (flux_t h) { flux_msg_handler_t *w; int i; ok ((w = flux_msg_handler_create (h, FLUX_MATCH_ANY, msgreader, NULL)) != NULL, "msg: created handler for any message"); flux_msg_handler_start (w); for (i = 0; i < msgwatcher_count; i++) { if (send_request (h, "foo") < 0) break; } ok (i == msgwatcher_count, "msg: sent %d requests", i); ok (flux_reactor_run (flux_get_reactor (h), 0) == 0, "msg: reactor ran to completion after %d requests", msgwatcher_count); flux_msg_handler_stop (w); flux_msg_handler_destroy (w); }
static int handle_notify_req (flux_t h, const char *ofn) { jstatctx_t *ctx = NULL; sig_flux_h = h; if (signal (SIGINT, sig_handler) == SIG_ERR) return -1; ctx = getctx (h); ctx->op = (ofn)? open_test_outfile (ofn) : stdout; if (jsc_notify_status (h, job_status_cb, (void *)h) != 0) { flux_log (h, LOG_ERR, "failed to reg a job status change CB"); return -1; } if (flux_reactor_run (flux_get_reactor (h), 0) < 0) flux_log (h, LOG_ERR, "error in flux_reactor_run"); return 0; }
static int internal_content_spam (optparse_t *p, int ac, char *av[]) { int i, count; flux_rpc_t *rpc; flux_t *h; flux_reactor_t *r; char data[256]; int size = 256; if (ac != 2 && ac != 3) { optparse_print_usage (p); exit (1); } count = strtoul (av[1], NULL, 10); if (ac == 3) spam_max_inflight = strtoul (av[2], NULL, 10); else spam_max_inflight = 1; if (!(h = builtin_get_flux_handle (p))) log_err_exit ("flux_open"); if (!(r = flux_get_reactor (h))) log_err_exit ("flux_get_reactor"); spam_cur_inflight = 0; i = 0; while (i < count || spam_cur_inflight > 0) { while (i < count && spam_cur_inflight < spam_max_inflight) { snprintf (data, size, "spam-o-matic pid=%d seq=%d", getpid(), i); if (!(rpc = flux_rpc_raw (h, "content.store", data, size, 0, 0))) log_err_exit ("content.store(%d)", i); if (flux_rpc_then (rpc, store_completion, r) < 0) log_err_exit ("flux_rpc_then(%d)", i); spam_cur_inflight++; i++; } if (flux_reactor_run (r, 0) < 0) log_err ("flux_reactor_run"); } return (0); }
static void test_fd (flux_reactor_t *reactor) { int fd[2]; flux_watcher_t *r, *w; ok (socketpair (PF_LOCAL, SOCK_STREAM, 0, fd) == 0 && set_nonblock (fd[0]) == 0 && set_nonblock (fd[1]) == 0, "fd: successfully created non-blocking socketpair"); r = flux_fd_watcher_create (reactor, fd[0], FLUX_POLLIN, fdreader, NULL); w = flux_fd_watcher_create (reactor, fd[1], FLUX_POLLOUT, fdwriter, NULL); ok (r != NULL && w != NULL, "fd: reader and writer created"); flux_watcher_start (r); flux_watcher_start (w); ok (flux_reactor_run (reactor, 0) == 0, "fd: reactor ran to completion after %lu bytes", fdwriter_bufsize); flux_watcher_stop (r); flux_watcher_stop (w); flux_watcher_destroy (r); flux_watcher_destroy (w); close (fd[0]); close (fd[1]); }
int main (int argc, char *argv[]) { flux_t *h; flux_reactor_t *r; int last = -1; int ch; flux_future_t *f; log_init ("commit_order"); while ((ch = getopt_long (argc, argv, OPTIONS, longopts, NULL)) != -1) { switch (ch) { case 'h': /* --help */ usage (); break; case 'v': /* --verbose */ verbose = true; break; case 'c': /* --count N */ totcount = strtoul (optarg, NULL, 10); break; case 'f': /* --fanout N */ max_queue_depth = strtoul (optarg, NULL, 10); break; case 'n': /* --namespace=NAME */ if (!(ns = strdup (optarg))) log_err_exit ("out of memory"); break; default: usage (); break; } } if (optind != argc - 1) usage (); key = argv[optind++]; if (totcount < 1 || max_queue_depth < 1) usage (); if (!(h = flux_open (NULL, 0))) log_err_exit ("flux_open"); if (!(r = flux_get_reactor (h))) log_err_exit ("flux_get_reactor"); /* One synchronous put before watch request, so that * watch request doesn't fail with ENOENT. */ f = commit_int (h, key, txcount++); commit_continuation (f, NULL); // destroys f, increments rxcount /* Configure watcher * Wait for one response before unleashing async puts, to ensure * that first value is captured. */ if (!(f = flux_kvs_lookup (h, ns, FLUX_KVS_WATCH, key))) log_err_exit ("flux_kvs_lookup"); watch_continuation (f, &last); // resets f, increments wrxcount if (flux_future_then (f, -1., watch_continuation, &last) < 0) log_err_exit ("flux_future_then"); /* Configure mechanism to keep max_queue_depth (--fanout) put RPCs * outstanding until totcount (--count) reached. */ if (!(w_prep = flux_prepare_watcher_create (r, prep, NULL))) log_err_exit ("flux_prepare_watcher_create"); if (!(w_check = flux_check_watcher_create (r, check, h))) log_err_exit ("flux_check_watcher_create"); if (!(w_idle = flux_idle_watcher_create (r, NULL, NULL))) log_err_exit ("flux_idle_watcher_create"); flux_watcher_start (w_prep); flux_watcher_start (w_check); /* Run until work is exhausted. */ if (flux_reactor_run (r, 0) < 0) log_err_exit ("flux_reactor_run"); flux_watcher_destroy (w_prep); flux_watcher_destroy (w_check); flux_watcher_destroy (w_idle); free (ns); flux_close (h); log_fini (); return 0; }
static void attach (flux_t *h, const char *key, bool rawtty, int kzoutflags, int blocksize) { t_kzutil_ctx_t *ctx = xzmalloc (sizeof (*ctx)); char *name; int fdin = dup (STDIN_FILENO); struct termios saved_tio; flux_reactor_t *r = flux_get_reactor (h); flux_watcher_t *w = NULL; log_msg ("process attached to %s", key); ctx->h = h; ctx->blocksize = blocksize; /* FIXME: need a ~. style escape sequence to terminate stdin * in raw mode. */ if (rawtty) { if (fd_set_raw (fdin, &saved_tio, true) < 0) log_err_exit ("fd_set_raw stdin"); } if (fd_set_nonblocking (fdin, true) < 0) log_err_exit ("fd_set_nonblocking stdin"); if (asprintf (&name, "%s.stdin", key) < 0) oom (); if (!(ctx->kz[0] = kz_open (h, name, kzoutflags))) if (errno == EEXIST) log_err ("disabling stdin"); else log_err_exit ("%s", name); else { if (!(w = flux_fd_watcher_create (r, fdin, FLUX_POLLIN, attach_stdin_ready_cb, ctx))) log_err_exit ("flux_fd_watcher_create %s", name); flux_watcher_start (w); } free (name); if (asprintf (&name, "%s.stdout", key) < 0) oom (); if (!(ctx->kz[1] = kz_open (h, name, KZ_FLAGS_READ | KZ_FLAGS_NONBLOCK))) log_err_exit ("kz_open %s", name); if (kz_set_ready_cb (ctx->kz[1], attach_stdout_ready_cb, ctx) < 0) log_err_exit ("kz_set_ready_cb %s", name); free (name); ctx->readers++; if (asprintf (&name, "%s.stderr", key) < 0) oom (); if (!(ctx->kz[2] = kz_open (h, name, KZ_FLAGS_READ | KZ_FLAGS_NONBLOCK))) log_err_exit ("kz_open %s", name); if (kz_set_ready_cb (ctx->kz[2], attach_stderr_ready_cb, ctx) < 0) log_err_exit ("kz_set_ready_cb %s", name); free (name); ctx->readers++; /* Reactor terminates when ctx->readers reaches zero, i.e. * when EOF is read from remote stdout and stderr. * (Note: if they are already at eof, we will have already terminated * before the reactor is started, since kvs_watch callbacks make one * call to the callback in the context of the caller). */ if (ctx->readers > 0) { if (flux_reactor_run (r, 0) < 0) log_err_exit ("flux_reactor_run"); } (void)kz_close (ctx->kz[1]); (void)kz_close (ctx->kz[2]); /* FIXME: tty state needs to be restored on all exit paths. */ if (rawtty) { if (fd_set_raw (fdin, &saved_tio, false) < 0) log_err_exit ("fd_set_raw stdin"); } flux_watcher_destroy (w); free (ctx); }
int main (int argc, char *argv[]) { flux_msg_t *msg; flux_t *h; flux_reactor_t *reactor; plan (NO_PLAN); (void)setenv ("FLUX_CONNECTOR_PATH", flux_conf_get ("connector_path", CONF_FLAG_INTREE), 0); ok ((h = flux_open ("loop://", FLUX_O_COPROC)) != NULL, "opened loop connector"); if (!h) BAIL_OUT ("can't continue without loop handle"); flux_fatal_set (h, fatal_err, NULL); ok ((reactor = flux_get_reactor(h)) != NULL, "obtained reactor"); if (!h) BAIL_OUT ("can't continue without reactor"); ok (flux_msg_handler_addvec (h, htab, NULL) == 0, "registered message handlers"); /* test continues in rpctest_begin_cb() so that rpc calls * can sleep while we answer them */ ok ((msg = flux_request_encode ("rpctest.begin", NULL)) != NULL, "encoded rpctest.begin request OK"); ok (flux_send (h, msg, 0) == 0, "sent rpctest.begin request"); ok (flux_reactor_run (reactor, 0) == 0, "reactor completed normally"); flux_msg_destroy (msg); /* test _then: Slightly tricky. * Send request. We're not in a coproc ctx here in main(), so there * will be no response, therefore, check will be false. Register * continuation, start reactor. Response will be received, continuation * will be invoked. Continuation stops the reactor. */ flux_rpc_t *r; ok ((r = flux_rpc (h, "rpctest.echo", "{}", FLUX_NODEID_ANY, 0)) != NULL, "flux_rpc with payload when payload is expected works"); ok (flux_rpc_check (r) == false, "flux_rpc_check says get would block"); /* reg/unreg _then a couple times for fun */ ok (flux_rpc_then (r, NULL, 0) == 0, "flux_rpc_then with NULL cb works"); ok (flux_rpc_then (r, then_cb, h) == 0, "flux_rpc_then works after NULL"); ok (flux_rpc_then (r, NULL, 0) == 0, "flux_rpc_then with NULL cb after non-NULL works"); ok (flux_rpc_then (r, then_cb, h) == 0, "flux_rpc_then works"); /* enough of that */ ok (flux_reactor_run (reactor, 0) == 0, "reactor completed normally"); flux_rpc_destroy (r); /* Test a _then corner case: * If _check() is called before _then(), a message may have been cached * in the flux_rpc_t. rpctest_thenbug_cb creates this condition. * Next, _then continuation is installed, but will reactor call it? * This will hang if rpc implementation doesn't return a cached message * back to the handle in _then(). Else, continuation will stop reactor. */ ok ((thenbug_r = flux_rpc (h, "rpctest.echo", "{}", FLUX_NODEID_ANY, 0)) != NULL, "thenbug: sent echo request"); do { if (!(msg = flux_request_encode ("rpctest.thenbug", NULL)) || flux_send (h, msg, 0) < 0 || flux_reactor_run (reactor, 0) < 0) { flux_msg_destroy (msg); break; } flux_msg_destroy (msg); } while (!flux_rpc_check (thenbug_r)); ok (true, "thenbug: check says message ready"); ok (flux_rpc_then (thenbug_r, then_cb, h) == 0, "thenbug: registered then - hangs on failure"); ok (flux_reactor_run (reactor, 0) == 0, "reactor completed normally"); flux_rpc_destroy (thenbug_r); flux_msg_handler_delvec (htab); flux_close (h); done_testing(); return (0); }
void test_timed (flux_t *h) { flux_reduce_t *r; int i, errors; double timeout; clear_counts (); ok ((r = flux_reduce_create (h, reduce_ops, 0.1, NULL, FLUX_REDUCE_TIMEDFLUSH)) != NULL, "timed: flux_reduce_create works"); if (!r) BAIL_OUT(); ok (flux_reduce_opt_get (r, FLUX_REDUCE_OPT_TIMEOUT, &timeout, sizeof (timeout)) == 0 && timeout == 0.1, "timed: flux_reduce_opt_get TIMEOUT returned timeout"); /* Append 100 items in batch 0 before starting reactor. * Reduction occurs at each append. * Nothing should be sinked. */ errors = 0; for (i = 0; i < 100; i++) { if (flux_reduce_append (r, xstrdup ("hi"), 0) < 0) errors++; } ok (errors == 0, "timed.0: flux_reduce_append added 100 items"); cmp_ok (reduce_calls, "==", 99, "timed.0: op.reduce called 99 times"); cmp_ok (sink_calls, "==", 0, "timed.0: op.sink called 0 times"); /* Start reactor so timeout handler can run. * It should fire once and sink all items in one sink call. */ ok (flux_reactor_run (flux_get_reactor (h), 0) == 0, "timed.0: reactor completed normally"); cmp_ok (sink_calls, "==", 1, "timed.0: op.sink called 1 time"); cmp_ok (sink_items, "==", 100, "timed.0: op.sink processed 100 items"); clear_counts (); /* Now append one more item to batch 0. * It should be immediately flushed. */ ok (flux_reduce_append (r, xstrdup ("hi"), 0) == 0, "timed.0: flux_reduce_append added 1 more item"); cmp_ok (reduce_calls, "==", 0, "timed.0: op.reduce not called"); cmp_ok (sink_calls, "==", 1, "timed.0: op.sink called 1 time"); cmp_ok (sink_items, "==", 1, "timed.0: op.sink processed 1 items"); clear_counts (); /* Append 100 items to batch 1. * It should behave like the first batch. */ errors = 0; for (i = 0; i < 100; i++) { if (flux_reduce_append (r, xstrdup ("hi"), 1) < 0) errors++; } ok (errors == 0, "timed.1: flux_reduce_append added 100 items"); cmp_ok (reduce_calls, "==", 99, "timed.1: op.reduce called 99 times"); cmp_ok (sink_calls, "==", 0, "timed.1: op.sink called 0 times"); /* Start reactor so timeout handler can run. * It should fire once and sink all items in one sink call. */ ok (flux_reactor_run (flux_get_reactor (h), 0) == 0, "timed.1: reactor completed normally"); cmp_ok (sink_calls, "==", 1, "timed.1: op.sink called 1 time"); cmp_ok (sink_items, "==", 100, "timed.1: op.sink processed 100 items"); flux_reduce_destroy (r); }
int main (int argc, char **argv) { zio_t *zio; int init_fds; const char *name; struct counts c; int fd; flux_reactor_t *r; flux_watcher_t *w; memset (&c, 0, sizeof (c)); plan (NO_PLAN); test_encode (); ok ((r = flux_reactor_create (0)) != NULL, "flux reactor created"); init_fds = fdcount (); diag ("initial fd count: %d", init_fds); /* simple reader tests */ ok ((zio = zio_pipe_reader_create ("test1", &c)) != NULL, "reader: zio_pipe_reader_create works"); ok ((name = zio_name (zio)) != NULL && !strcmp (name, "test1"), "reader: zio_name returns correct name"); ok (zio_set_close_cb (zio, close_reader) == 0, "reader: zio_set_close_cb works"); ok (zio_set_send_cb (zio, send_reader) == 0, "reader: zio_set_send_cb works"); ok (zio_reactor_attach (zio, r) == 0, "reader: zio_reactor_attach works"); ok ((fd = zio_dst_fd (zio)) >= 0, "reader: zio_dst_fd returned valid file descriptor"); ok (write (fd, "narf!", 5) == 5, "reader: wrote narf! to reader pipe"); ok (zio_close_dst_fd (zio) == 0, "reader: zio_close_dst_fd succeeded"); ok (flux_reactor_run (r, 0) == 0, "reader: reactor completed successfully"); ok (c.send_reader == 1, "reader: send function called once for EOF + incomplete line"); errno = 0; zio_destroy (zio); ok (init_fds == fdcount (), "reader: zio_destroy leaks no file descriptors"); /* simple writer tests */ ok ((zio = zio_pipe_writer_create ("test2", &c)) != NULL, "writer: zio_pipe_writer_create works"); ok ((name = zio_name (zio)) != NULL && !strcmp (name, "test2"), "writer: zio_name returns correct name"); ok (zio_set_close_cb (zio, close_writer) == 0, "writer: zio_set_close_cb works"); ok ((fd = zio_src_fd (zio)) >= 0, "writer: zio_src_fd returned valid file descriptor"); w = flux_fd_watcher_create (r, fd, FLUX_POLLIN, fd_read, &c); ok (w != NULL, "writer: created fd watcher"); flux_watcher_start (w); ok (zio_write (zio, "narf!", 5) == 5, "writer: zio_write narf! works"); ok (zio_write_eof (zio) == 0, "writer: zio_write_eof works"); ok (flux_reactor_run (r, 0) == 0, "writer: reactor completed successfully"); ok (c.fd_read_errors == 0 && c.fd_read_data == 5 && c.fd_read_eof == 1, "writer: read narf + EOF on read end of pipe"); ok (c.close_writer == 1, "writer: close callback invoked"); zio_destroy (zio); ok (init_fds == fdcount (), "writer: zio_destroy leaks no file descriptors"); flux_watcher_destroy (w); flux_reactor_destroy (r); done_testing (); }
int main (int argc, char *argv[]) { int ch; int pad_bytes = 0; char *target; flux_watcher_t *tw = NULL; struct ping_ctx ctx = { .period = 1.0, .rank = NULL, .nodeid = FLUX_NODEID_ANY, .topic = NULL, .pad = NULL, .count = -1, .send_count = 0, .batch = false, }; log_init ("flux-ping"); while ((ch = getopt_long (argc, argv, OPTIONS, longopts, NULL)) != -1) { switch (ch) { case 'h': /* --help */ usage (); break; case 'p': /* --pad bytes */ pad_bytes = strtoul (optarg, NULL, 10); break; case 'd': /* --delay seconds */ ctx.period = strtod (optarg, NULL); if (ctx.period < 0) usage (); break; case 'r': /* --rank NODESET */ ctx.rank = optarg; break; case 'c': /* --count N */ ctx.count = strtoul (optarg, NULL, 10); break; case 'b': /* --batch-request */ ctx.batch = true; break; default: usage (); break; } } if (optind != argc - 1) usage (); if (ctx.batch && ctx.count == -1) log_msg_exit ("--batch should only be used with --count"); target = argv[optind++]; /* Create null terminated pad string for reuse in each message. * By default it's the empty string. */ ctx.pad = xzmalloc (pad_bytes + 1); memset (ctx.pad, 'p', pad_bytes); /* If "rank!" is prepended to the target, and there is no --rank * argument, snip it off and set the rank. If it's just the bare * rank, assume the target is "cmb". */ if (ctx.rank == NULL) { char *p; nodeset_t *ns = NULL; if ((p = strchr (target, '!'))) { *p++ = '\0'; ctx.rank = target; target = p; } else if ((ns = nodeset_create_string (target)) != NULL) { ctx.rank = target; target = "cmb"; nodeset_destroy (ns); } else if (!strcmp (target, "all")) { ctx.rank = target; target = "cmb"; } } /* Use singleton rpc if there's only one nodeid */ if (ctx.rank != NULL) { nodeset_t *ns = nodeset_create_string (ctx.rank); if (ns) { if (nodeset_count (ns) == 1) { ctx.nodeid = nodeset_min (ns); ctx.rank = NULL; } nodeset_destroy (ns); } } ctx.topic = xasprintf ("%s.ping", target); if (!(ctx.h = flux_open (NULL, 0))) log_err_exit ("flux_open"); if (!(ctx.reactor = flux_get_reactor (ctx.h))) log_err_exit ("flux_get_reactor"); /* In batch mode, requests are sent before reactor is started * to process responses. o/w requests are set in a timer watcher. */ if (ctx.batch) { while (ctx.send_count < ctx.count) { send_ping (&ctx); usleep ((useconds_t)(ctx.period * 1E6)); } } else { tw = flux_timer_watcher_create (ctx.reactor, ctx.period, ctx.period, timer_cb, &ctx); if (!tw) log_err_exit ("error creating watchers"); flux_watcher_start (tw); } if (flux_reactor_run (ctx.reactor, 0) < 0) log_err_exit ("flux_reactor_run"); /* Clean up. */ flux_watcher_destroy (tw); free (ctx.topic); free (ctx.pad); flux_close (ctx.h); log_fini (); return 0; }