struct alloc_ctx *alloc_ctx_create (flux_t *h, struct queue *queue, struct event_ctx *event_ctx) { struct alloc_ctx *ctx; flux_reactor_t *r = flux_get_reactor (h); if (!(ctx = calloc (1, sizeof (*ctx)))) return NULL; ctx->h = h; ctx->queue = queue; ctx->event_ctx = event_ctx; if (!(ctx->inqueue = queue_create (false))) goto error; if (flux_msg_handler_addvec (h, htab, ctx, &ctx->handlers) < 0) goto error; ctx->prep = flux_prepare_watcher_create (r, prep_cb, ctx); ctx->check = flux_check_watcher_create (r, check_cb, ctx); ctx->idle = flux_idle_watcher_create (r, NULL, NULL); if (!ctx->prep || !ctx->check || !ctx->idle) { errno = ENOMEM; goto error; } flux_watcher_start (ctx->prep); flux_watcher_start (ctx->check); event_ctx_set_alloc_ctx (event_ctx, ctx); return ctx; error: alloc_ctx_destroy (ctx); return NULL; }
void modservice_register (flux_t h, module_t *p) { ctx_t *ctx = getctx (h, p); flux_reactor_t *r = flux_get_reactor (h); register_request (ctx, "shutdown", shutdown_cb); register_request (ctx, "ping", ping_cb); register_request (ctx, "stats.get", stats_get_cb); register_request (ctx, "stats.clear", stats_clear_request_cb); register_request (ctx, "rusage", rusage_cb); register_event (ctx, "stats.clear", stats_clear_event_cb); if (!(ctx->w_prepare = flux_prepare_watcher_create (r, prepare_cb, ctx))) log_err_exit ("flux_prepare_watcher_create"); if (!(ctx->w_check = flux_check_watcher_create (r, check_cb, ctx))) log_err_exit ("flux_check_watcher_create"); flux_watcher_start (ctx->w_prepare); flux_watcher_start (ctx->w_check); }
int main (int argc, char *argv[]) { flux_t *h; flux_reactor_t *r; int last = -1; int ch; flux_future_t *f; log_init ("commit_order"); while ((ch = getopt_long (argc, argv, OPTIONS, longopts, NULL)) != -1) { switch (ch) { case 'h': /* --help */ usage (); break; case 'v': /* --verbose */ verbose = true; break; case 'c': /* --count N */ totcount = strtoul (optarg, NULL, 10); break; case 'f': /* --fanout N */ max_queue_depth = strtoul (optarg, NULL, 10); break; case 'n': /* --namespace=NAME */ if (!(ns = strdup (optarg))) log_err_exit ("out of memory"); break; default: usage (); break; } } if (optind != argc - 1) usage (); key = argv[optind++]; if (totcount < 1 || max_queue_depth < 1) usage (); if (!(h = flux_open (NULL, 0))) log_err_exit ("flux_open"); if (!(r = flux_get_reactor (h))) log_err_exit ("flux_get_reactor"); /* One synchronous put before watch request, so that * watch request doesn't fail with ENOENT. */ f = commit_int (h, key, txcount++); commit_continuation (f, NULL); // destroys f, increments rxcount /* Configure watcher * Wait for one response before unleashing async puts, to ensure * that first value is captured. */ if (!(f = flux_kvs_lookup (h, ns, FLUX_KVS_WATCH, key))) log_err_exit ("flux_kvs_lookup"); watch_continuation (f, &last); // resets f, increments wrxcount if (flux_future_then (f, -1., watch_continuation, &last) < 0) log_err_exit ("flux_future_then"); /* Configure mechanism to keep max_queue_depth (--fanout) put RPCs * outstanding until totcount (--count) reached. */ if (!(w_prep = flux_prepare_watcher_create (r, prep, NULL))) log_err_exit ("flux_prepare_watcher_create"); if (!(w_check = flux_check_watcher_create (r, check, h))) log_err_exit ("flux_check_watcher_create"); if (!(w_idle = flux_idle_watcher_create (r, NULL, NULL))) log_err_exit ("flux_idle_watcher_create"); flux_watcher_start (w_prep); flux_watcher_start (w_check); /* Run until work is exhausted. */ if (flux_reactor_run (r, 0) < 0) log_err_exit ("flux_reactor_run"); flux_watcher_destroy (w_prep); flux_watcher_destroy (w_check); flux_watcher_destroy (w_idle); free (ns); flux_close (h); log_fini (); return 0; }