static void send_create_event (flux_t h, int64_t id, char *topic) { flux_msg_t *msg; char *json = NULL; if (asprintf (&json, "{\"lwj\":%ld}", id) < 0) { errno = ENOMEM; flux_log_error (h, "failed to create state change event"); goto out; } if ((msg = flux_event_encode (topic, json)) == NULL) { flux_log_error (h, "failed to create state change event"); goto out; } if (flux_send (h, msg, 0) < 0) flux_log_error (h, "reserved event failed"); flux_msg_destroy (msg); /* Workaround -- wait for our own event to be published with a * blocking recv. XXX: Remove when publish is synchronous. */ wait_for_event (h, id, topic); out: free (json); }
static int rpc_request_send (flux_rpc_t *rpc, int n, const char *topic, const char *json_str, uint32_t nodeid) { flux_msg_t *msg; int flags = 0; int rc = -1; if (!(msg = flux_request_encode (topic, json_str))) goto done; if (flux_msg_set_matchtag (msg, rpc->m.matchtag + n) < 0) goto done; if (nodeid == FLUX_NODEID_UPSTREAM) { flags |= FLUX_MSGFLAG_UPSTREAM; if (flux_get_rank (rpc->h, &nodeid) < 0) goto done; } if (flux_msg_set_nodeid (msg, nodeid, flags) < 0) goto done; if (flux_send (rpc->h, msg, 0) < 0) goto done; rc = 0; done: if (msg) flux_msg_destroy (msg); return rc; }
static int send_state_event (flux_t *h, job_state_t st, int64_t j) { flux_msg_t *msg; char *json = NULL; char *topic = NULL; int rc = -1; if (asprintf (&topic, "jsc.state.%s", jsc_job_num2state (st)) < 0) { errno = ENOMEM; flux_log_error (h, "create state change event: %s", jsc_job_num2state (st)); goto done; } if ((msg = flux_event_pack (topic, "{ s:I }", "lwj", j)) == NULL) { flux_log_error (h, "flux_event_pack"); goto done; } if (flux_send (h, msg, 0) < 0) flux_log_error (h, "flux_send event"); flux_msg_destroy (msg); rc = 0; done: free (topic); free (json); return rc; }
/* Reactor loop just unblocked. */ static void check_cb (flux_reactor_t *r, flux_watcher_t *w, int revents, void *arg) { ctx_t *ctx = arg; flux_msg_t *msg = flux_keepalive_encode (0, FLUX_MODSTATE_RUNNING); if (!msg || flux_send (ctx->h, msg, 0) < 0) flux_log_error (ctx->h, "error sending keepalive"); flux_msg_destroy (msg); }
void cmd_dropcache_all (flux_t h, int argc, char **argv) { if (argc != 0) msg_exit ("dropcache-all: takes no arguments"); flux_msg_t *msg = flux_event_encode ("kvs.dropcache", NULL); if (!msg || flux_send (h, msg, 0) < 0) err_exit ("flux_send"); flux_msg_destroy (msg); }
static int hello_sendmsg (hello_t *hello, uint32_t rank) { flux_msg_t *msg; int rc = -1; if (!(msg = hello_encode (rank))) goto done; if (flux_send (hello->h, msg, 0) < 0) goto done; rc = 0; done: flux_msg_destroy (msg); return rc; }
static int send_request (flux_t h, const char *topic) { int rc = -1; flux_msg_t *msg = flux_request_encode (topic, NULL); if (!msg || flux_send (h, msg, 0) < 0) { fprintf (stderr, "%s: flux_send failed: %s", __FUNCTION__, strerror (errno)); goto done; } rc = 0; done: flux_msg_destroy (msg); return rc; }
int main (int argc, char *argv[]) { flux_msg_t *msg; flux_t h; flux_reactor_t *reactor; plan (35); (void)setenv ("FLUX_CONNECTOR_PATH", CONNECTOR_PATH, 0); ok ((h = flux_open ("loop://", FLUX_O_COPROC)) != NULL, "opened loop connector"); if (!h) BAIL_OUT ("can't continue without loop handle"); ok ((reactor = flux_get_reactor (h)) != NULL, "obtained reactor"); if (!reactor) BAIL_OUT ("can't continue without reactor"); flux_fatal_set (h, fatal_err, NULL); flux_fatal_error (h, __FUNCTION__, "Foo"); ok (fatal_tested == true, "flux_fatal function is called on fatal error"); /* create nodeset for last _then test */ ok ((then_ns = nodeset_create ()) != NULL, "nodeset created ok"); ok (flux_msghandler_addvec (h, htab, htablen, NULL) == 0, "registered message handlers"); /* test continues in rpctest_begin_cb() so that rpc calls * can sleep while we answer them */ ok ((msg = flux_request_encode ("rpctest.begin", NULL)) != NULL && flux_send (h, msg, 0) == 0, "sent message to initiate test"); ok (flux_reactor_run (reactor, 0) == 0, "reactor completed normally"); flux_msg_destroy (msg); /* Check result of last _then test */ ok (nodeset_count (then_ns) == 128, "then callback worked with correct nodemap"); nodeset_destroy (then_ns); flux_rpc_destroy (then_r); flux_close (h); done_testing(); return (0); }
int main (int argc, char **argv) { flux_t h; flux_msg_t *msg; if (!(h = flux_open (NULL, 0))) log_err_exit ("flux_open"); if (!(msg = flux_event_encode ("snack.bar.closing", NULL))) log_err_exit ("flux_event_encode"); if (flux_send (h, msg, 0) < 0) log_err_exit ("flux_send"); flux_msg_destroy (msg); flux_close (h); return (0); }
/* Send sched.free request for job. * Update flags. */ int free_request (struct alloc_ctx *ctx, struct job *job) { flux_msg_t *msg; if (!(msg = flux_request_encode ("sched.free", NULL))) return -1; if (flux_msg_pack (msg, "{s:I}", "id", job->id) < 0) goto error; if (flux_send (ctx->h, msg, 0) < 0) goto error; flux_msg_destroy (msg); return 0; error: flux_msg_destroy (msg); return -1; }
static void timer_cb (flux_reactor_t *r, flux_watcher_t *w, int revents, void *arg) { heartbeat_t *hb = arg; flux_msg_t *msg = NULL; if (!(msg = flux_heartbeat_encode (hb->send_epoch++))) { log_err ("heartbeat_encode"); goto done; } if (flux_send (hb->h, msg, 0) < 0) { log_err ("flux_send"); goto done; } done: flux_msg_destroy (msg); }
static int rpc_request_send_raw (flux_rpc_t *rpc, const char *topic, uint32_t nodeid, const void *data, int len) { flux_msg_t *msg; int rc = -1; if (!(msg = flux_request_encode_raw (topic, data, len))) goto done; if (rpc_request_prepare (rpc, msg, nodeid) < 0) goto done; if (flux_send (rpc->h, msg, 0) < 0) goto done; rc = 0; done: flux_msg_destroy (msg); return rc; }
static int rpc_request_send (flux_rpc_t *rpc, const char *topic, uint32_t nodeid, const char *json_str) { flux_msg_t *msg; int rc = -1; if (!(msg = flux_request_encode (topic, json_str))) goto done; if (rpc_request_prepare (rpc, msg, nodeid) < 0) goto done; if (flux_send (rpc->h, msg, 0) < 0) goto done; rc = 0; done: flux_msg_destroy (msg); return rc; }
static int exit_event_send (flux_t *h, const char *name, int errnum) { json_object *o = Jnew (); flux_msg_t *msg = NULL; int rc = -1; Jadd_str (o, "name", name); Jadd_int (o, "errnum", errnum); if (!(msg = flux_event_encode ("barrier.exit", Jtostr (o)))) goto done; if (flux_send (h, msg, 0) < 0) goto done; rc = 0; done: Jput (o); flux_msg_destroy (msg); return rc; }
static int rpc_request_vsendf (flux_rpc_t *rpc, const char *topic, uint32_t nodeid, const char *fmt, va_list ap) { flux_msg_t *msg; int rc = -1; if (!(msg = flux_request_encode (topic, NULL))) goto done; if (flux_msg_vset_jsonf (msg, fmt, ap) < 0) goto done; if (rpc_request_prepare (rpc, msg, nodeid) < 0) goto done; if (flux_send (rpc->h, msg, 0) < 0) goto done; rc = 0; done: flux_msg_destroy (msg); return rc; }
int shutdown_arm (shutdown_t *s, double grace, int exitcode, const char *fmt, ...) { va_list ap; flux_msg_t *msg = NULL; int rc = -1; if (!s->timer) { va_start (ap, fmt); msg = shutdown_vencode (grace, exitcode, s->myrank, fmt, ap); va_end (ap); if (!msg || flux_send (s->h, msg, 0) < 0) goto done; } rc = 0; done: flux_msg_destroy (msg); return rc; }
static void disconnect_destroy (client_t *c, struct disconnect_notify *d) { flux_msg_t *msg; if (!(msg = flux_msg_create (FLUX_MSGTYPE_REQUEST))) goto done; if (flux_msg_set_topic (msg, d->topic) < 0) goto done; if (flux_msg_enable_route (msg) < 0) goto done; if (flux_msg_push_route (msg, zuuid_str (c->uuid)) < 0) goto done; if (flux_msg_set_nodeid (msg, d->nodeid, d->flags) < 0) goto done; (void)flux_send (c->ctx->h, msg, 0); done: flux_msg_destroy (msg); free (d->topic); free (d); }
/* Send sched.alloc request for job. * Update flags. */ int alloc_request (struct alloc_ctx *ctx, struct job *job) { flux_msg_t *msg; if (!(msg = flux_request_encode ("sched.alloc", NULL))) return -1; if (flux_msg_pack (msg, "{s:I s:i s:i s:f}", "id", job->id, "priority", job->priority, "userid", job->userid, "t_submit", job->t_submit) < 0) goto error; if (flux_send (ctx->h, msg, 0) < 0) goto error; flux_msg_destroy (msg); return 0; error: flux_msg_destroy (msg); return -1; }
// Send out a call to all modules that the simulation is starting // and that they should join int send_start_event (flux_t *h) { int rc = 0; flux_msg_t *msg = NULL; uint32_t rank; if (flux_get_rank (h, &rank) < 0) return -1; if (!(msg = flux_event_pack ("sim.start", "{ s:s s:i s:i }", "mod_name", "sim", "rank", rank, "sim_time", 0)) || flux_send (h, msg, 0) < 0) { rc = -1; } flux_msg_destroy (msg); return rc; }
int main (int argc, char *argv[]) { flux_msg_t *msg; flux_t h; plan (33); (void)setenv ("FLUX_CONNECTOR_PATH", CONNECTOR_PATH, 0); ok ((h = flux_open ("loop://", FLUX_O_COPROC)) != NULL, "opened loop connector"); if (!h) BAIL_OUT ("can't continue without loop handle"); flux_fatal_set (h, fatal_err, NULL); ok (flux_msg_watcher_addvec (h, htab, NULL) == 0, "registered message handlers"); /* test continues in rpctest_begin_cb() so that rpc calls * can sleep while we answer them */ ok ((msg = flux_request_encode ("rpctest.begin", NULL)) != NULL, "encoded rpctest.begin request OK"); ok (flux_send (h, msg, 0) == 0, "sent rpctest.begin request"); ok (flux_reactor_start (h) == 0, "reactor completed normally"); flux_msg_destroy (msg); /* test _then: Slightly tricky. * Send request. We're not in a coproc ctx here in main(), so there * will be no response, therefore, check will be false. Register * continuation, start reactor. Response will be received, continuation * will be invoked. Continuation stops the reactor. */ flux_rpc_t *r; ok ((r = flux_rpc (h, "rpctest.echo", "xxx", FLUX_NODEID_ANY, 0)) != NULL, "flux_rpc with payload when payload is expected works"); ok (flux_rpc_check (r) == false, "flux_rpc_check says get would block"); /* reg/unreg _then a couple times for fun */ ok (flux_rpc_then (r, NULL, 0) == 0, "flux_rpc_then with NULL cb works"); ok (flux_rpc_then (r, then_cb, h) == 0, "flux_rpc_then works after NULL"); ok (flux_rpc_then (r, NULL, 0) == 0, "flux_rpc_then with NULL cb after non-NULL works"); ok (flux_rpc_then (r, then_cb, h) == 0, "flux_rpc_then works"); /* enough of that */ ok (flux_reactor_start (h) == 0, "reactor completed normally"); flux_rpc_destroy (r); /* Test a _then corner case: * If _check() is called before _then(), a message may have been cached * in the flux_rpc_t. rpctest_thenbug_cb creates this condition. * Next, _then continuation is installed, but will reactor call it? * This will hang if rpc implementation doesn't return a cached message * back to the handle in _then(). Else, continuation will stop reactor. */ ok ((thenbug_r = flux_rpc (h, "rpctest.echo", "xxx", FLUX_NODEID_ANY, 0)) != NULL, "thenbug: sent echo request"); do { if (!(msg = flux_request_encode ("rpctest.thenbug", NULL)) || flux_send (h, msg, 0) < 0 || flux_reactor_start (h) < 0) { flux_msg_destroy (msg); break; } flux_msg_destroy (msg); } while (!flux_rpc_check (thenbug_r)); ok (true, "thenbug: check says message ready"); ok (flux_rpc_then (thenbug_r, then_cb, h) == 0, "thenbug: registered then - hangs on failure"); ok (flux_reactor_start (h) == 0, "reactor completed normally"); flux_rpc_destroy (thenbug_r); flux_msg_watcher_delvec (h, htab); flux_close (h); done_testing(); return (0); }
static void client_read_cb (flux_t h, flux_fd_watcher_t *w, int fd, int revents, void *arg) { client_t *c = arg; flux_msg_t *msg = NULL; int type; if (revents & FLUX_POLLERR) goto disconnect; if (!(revents & FLUX_POLLIN)) return; /* EPROTO, ECONNRESET are normal disconnect errors * EWOULDBLOCK, EAGAIN stores state in c->inbuf for continuation */ //flux_log (h, LOG_DEBUG, "recv: client ready"); if (!(msg = flux_msg_recvfd (c->fd, &c->inbuf))) { if (errno == EWOULDBLOCK || errno == EAGAIN) { //flux_log (h, LOG_DEBUG, "recv: client not ready"); return; } if (errno != ECONNRESET && errno != EPROTO) flux_log (h, LOG_ERR, "flux_msg_recvfd: %s", strerror (errno)); goto disconnect; } if (flux_msg_get_type (msg, &type) < 0) { flux_log (h, LOG_ERR, "flux_msg_get_type: %s", strerror (errno)); goto disconnect; } switch (type) { const char *name; subscription_t *sub; case FLUX_MSGTYPE_REQUEST: if (match_substr (msg, "api.event.subscribe.", &name)) { sub = subscription_create (h, FLUX_MSGTYPE_EVENT, name); if (zlist_append (c->subscriptions, sub) < 0) oom (); } else if (match_substr (msg, "api.event.unsubscribe.", &name)) { if ((sub = subscription_lookup (c, FLUX_MSGTYPE_EVENT, name))) zlist_remove (c->subscriptions, sub); } else { /* insert disconnect notifier before forwarding request */ if (c->disconnect_notify && disconnect_update (c, msg) < 0) { flux_log (h, LOG_ERR, "disconnect_update: %s", strerror (errno)); goto disconnect; } if (flux_msg_push_route (msg, zuuid_str (c->uuid)) < 0) oom (); /* FIXME */ if (flux_send (h, msg, 0) < 0) err ("%s: flux_send", __FUNCTION__); } break; case FLUX_MSGTYPE_EVENT: if (flux_send (h, msg, 0) < 0) err ("%s: flux_send", __FUNCTION__); break; default: flux_log (h, LOG_ERR, "drop unexpected %s", flux_msg_typestr (type)); break; } flux_msg_destroy (msg); return; disconnect: flux_msg_destroy (msg); zlist_remove (c->ctx->clients, c); client_destroy (c); }