/* N.B. if a new message arrives with an unconsumed one in the rpc handle, * push the new one back to to the receive queue so it will trigger another * reactor callback and handle the cached one now. * The reactor will repeatedly call the continuation (level-triggered) * until all received responses are consumed. */ static void rpc_cb (flux_t h, flux_msg_handler_t *w, const flux_msg_t *msg, void *arg) { flux_rpc_t *rpc = arg; assert (rpc->then_cb != NULL); flux_rpc_usecount_incr (rpc); if (rpc->rx_msg) { if (flux_requeue (rpc->h, msg, FLUX_RQ_HEAD) < 0) goto done; } else { if (!(rpc->rx_msg = flux_msg_copy (msg, true))) goto done; } rpc->then_cb (rpc, rpc->then_arg); if (rpc->rx_msg) { if (flux_requeue (rpc->h, rpc->rx_msg, FLUX_RQ_HEAD) < 0) goto done; rpc->rx_msg = NULL; } done: /* no good way to report flux_requeue() errors */ if (flux_rpc_completed (rpc)) flux_msg_handler_stop (rpc->w); flux_rpc_usecount_decr (rpc); }
int flux_rpc_then (flux_rpc_t *rpc, flux_then_f cb, void *arg) { int rc = -1; assert (rpc->magic == RPC_MAGIC); if (rpc->rx_count >= rpc->rx_expected) { errno = EINVAL; goto done; } if (cb && !rpc->then_cb) { if (!rpc->w) { if (!(rpc->w = flux_msg_handler_create (rpc->h, rpc->m, rpc_cb, rpc))) goto done; } flux_msg_handler_start (rpc->w); if (rpc->rx_msg || rpc->rx_errnum) { if (rpc->rx_msg) if (flux_requeue (rpc->h, rpc->rx_msg, FLUX_RQ_HEAD) < 0) goto done; (void)flux_rpc_next (rpc); } } else if (!cb && rpc->then_cb) { flux_msg_handler_stop (rpc->w); } rpc->then_cb = cb; rpc->then_arg = arg; rc = 0; done: return rc; }
int flux_rpc_then (flux_rpc_t *rpc, flux_then_f cb, void *arg) { int rc = -1; if (rpc->oneway) { errno = EINVAL; goto done; } if (cb && !rpc->then_cb) { if (!rpc->w) { if (!(rpc->w = flux_msg_handler_create (rpc->h, rpc->m, rpc_cb, rpc))) goto done; } flux_msg_handler_start (rpc->w); if (rpc->rx_msg) { if (flux_requeue (rpc->h, rpc->rx_msg, FLUX_RQ_HEAD) < 0) goto done; rpc->rx_msg = NULL; } } else if (!cb && rpc->then_cb) { flux_msg_handler_stop (rpc->w); } rpc->then_cb = cb; rpc->then_arg = arg; rc = 0; done: return rc; }
/* This test is to make sure that deferred responses are handled in order. * Arrange for module to source 10K sequenced responses. Messages 5000-5499 * are "put back" on the handle using flux_putmsg(). We ensure that * the 10K messages are nonetheless received in order. */ void test_putmsg (flux_t *h, uint32_t nodeid) { flux_future_t *f; const char *json_str; const int count = 10000; const int defer_start = 5000; const int defer_count = 500; json_object *in = Jnew (); json_object *out = NULL; int seq, myseq = 0; zlist_t *defer = zlist_new (); bool popped = false; flux_msg_t *z; if (!defer) oom (); Jadd_int (in, "count", count); if (!(f = flux_rpc (h, "req.nsrc", Jtostr (in), FLUX_NODEID_ANY, FLUX_RPC_NORESPONSE))) log_err_exit ("%s", __FUNCTION__); flux_future_destroy (f); do { flux_msg_t *msg = flux_recv (h, FLUX_MATCH_ANY, 0); if (!msg) log_err_exit ("%s", __FUNCTION__); if (flux_response_decode (msg, NULL, &json_str) < 0) log_msg_exit ("%s: decode", __FUNCTION__); if (!json_str || !(out = Jfromstr (json_str)) || !Jget_int (out, "seq", &seq)) log_msg_exit ("%s: decode - payload", __FUNCTION__); Jput (out); if (seq >= defer_start && seq < defer_start + defer_count && !popped) { if (zlist_append (defer, msg) < 0) oom (); if (seq == defer_start + defer_count - 1) { while ((z = zlist_pop (defer))) { if (flux_requeue (h, z, FLUX_RQ_TAIL) < 0) log_err_exit ("%s: flux_requeue", __FUNCTION__); flux_msg_destroy (z); } popped = true; } continue; } if (seq != myseq) log_msg_exit ("%s: expected %d got %d", __FUNCTION__, myseq, seq); myseq++; flux_msg_destroy (msg); } while (myseq < count); zlist_destroy (&defer); Jput (in); }
static int defer_requeue (zlist_t **l, flux_t h) { flux_msg_t *msg; if (*l) { while ((msg = zlist_pop (*l))) { int rc = flux_requeue (h, msg, FLUX_RQ_TAIL); flux_msg_destroy (msg); if (rc < 0) return -1; } } return 0; }
/* This test is to make sure that deferred responses are handled in order. * Arrange for module to source 10K sequenced responses. Messages 5000-5499 * are "put back" on the handle using flux_putmsg(). We ensure that * the 10K messages are nonetheless received in order. */ void test_putmsg (flux_t *h, uint32_t nodeid) { flux_future_t *f; const char *json_str; const int count = 10000; const int defer_start = 5000; const int defer_count = 500; int seq, myseq = 0; zlist_t *defer = zlist_new (); bool popped = false; flux_msg_t *z; json_t *o; if (!defer) oom (); if (!(f = flux_rpc_pack (h, "req.nsrc", FLUX_NODEID_ANY, FLUX_RPC_NORESPONSE, "{s:i}", "count", count))) log_err_exit ("%s", __FUNCTION__); flux_future_destroy (f); do { flux_msg_t *msg = flux_recv (h, FLUX_MATCH_ANY, 0); if (!msg) log_err_exit ("%s", __FUNCTION__); if (flux_response_decode (msg, NULL, &json_str) < 0) log_msg_exit ("%s: decode", __FUNCTION__); if (!json_str || !(o = json_loads (json_str, 0, NULL)) || json_unpack (o, "{s:i}", "seq", &seq) < 0) log_msg_exit ("%s: decode - payload", __FUNCTION__); json_decref (o); if (seq >= defer_start && seq < defer_start + defer_count && !popped) { if (zlist_append (defer, msg) < 0) oom (); if (seq == defer_start + defer_count - 1) { while ((z = zlist_pop (defer))) { if (flux_requeue (h, z, FLUX_RQ_TAIL) < 0) log_err_exit ("%s: flux_requeue", __FUNCTION__); flux_msg_destroy (z); } popped = true; } continue; } if (seq != myseq) log_msg_exit ("%s: expected %d got %d", __FUNCTION__, myseq, seq); myseq++; flux_msg_destroy (msg); } while (myseq < count); zlist_destroy (&defer); }
static int backlog_flush (flux_msg_handler_t *w) { int errnum = 0; int rc = 0; if (w->backlog) { flux_msg_t *msg; while ((msg = zlist_pop (w->backlog))) { if (flux_requeue (w->d->h, msg, FLUX_RQ_TAIL) < 0) { if (errnum < errno) { errnum = errno; rc = -1; } flux_msg_destroy (msg); } } } if (errnum > 0) errno = errnum; return rc; }
static void handle_cb (flux_reactor_t *r, flux_watcher_t *hw, int revents, void *arg) { struct dispatch *d = arg; flux_msg_handler_t *w; flux_msg_t *msg = NULL; int type; if (revents & FLUX_POLLERR) goto fatal; if (!(msg = flux_recv (d->h, FLUX_MATCH_ANY, FLUX_O_NONBLOCK))) { if (errno != EAGAIN && errno != EWOULDBLOCK) goto fatal; else goto done; } if (flux_msg_get_type (msg, &type) < 0) goto done; /* Message matches a coproc that yielded. * Resume, arranging for msg to be returned next by flux_recv(). */ if ((w = find_waiting_handler (d, msg))) { if (flux_requeue (d->h, msg, FLUX_RQ_HEAD) < 0) goto fatal; zlist_remove (d->waiters, w); if (resume_coproc (w) < 0) goto fatal; /* Message matches a handler. * If coproc already running, queue message as backlog. * Else if FLUX_O_COPROC, start coproc. * If coprocs not enabled, call handler directly. */ } else if ((w = find_handler (d, msg))) { if (w->coproc && coproc_started (w->coproc)) { if (backlog_append (w, &msg) < 0) /* msg now property of backlog */ goto fatal; } else if ((flux_flags_get (d->h) & FLUX_O_COPROC)) { if (flux_requeue (d->h, msg, FLUX_RQ_HEAD) < 0) goto fatal; if (start_coproc (w) < 0) goto fatal; } else { w->fn (d->h, w, msg, w->arg); } /* Message matched nothing. * Respond with ENOSYS if it was a request. * Else log it if FLUX_O_TRACE */ } else { if (type == FLUX_MSGTYPE_REQUEST) { if (flux_respond (d->h, msg, ENOSYS, NULL)) goto done; } else if (flux_flags_get (d->h) & FLUX_O_TRACE) { const char *topic = NULL; (void)flux_msg_get_topic (msg, &topic); fprintf (stderr, "nomatch: %s '%s'\n", flux_msg_typestr (type), topic ? topic : ""); } } done: flux_msg_destroy (msg); return; fatal: flux_msg_destroy (msg); flux_reactor_stop_error (r); FLUX_FATAL (d->h); }