static int server_on_request(struct xio_session *session, struct xio_msg *xio_req, int last_in_rxq, void *cb_user_conext) { struct client_info *ci = (struct client_info *)cb_user_conext; struct sd_req *hdr; struct request *req; struct xio_iovec_ex *sglist = vmsg_sglist(&xio_req->in); int nents = vmsg_sglist_nents(&xio_req->in); sd_debug("on request: %p, %p, nents: %d", session, xio_req, nents); hdr = xio_req->in.header.iov_base; req = alloc_request(ci, hdr->data_length); memcpy(&req->rq, hdr, sizeof(req->rq)); if (hdr->data_length && hdr->flags & SD_FLAG_CMD_WRITE) { sd_assert(nents == 1); req->data = sglist[0].iov_base; } xio_req->in.header.iov_base = NULL; xio_req->in.header.iov_len = 0; vmsg_sglist_set_nents(&xio_req->in, 0); ci->xio_req = xio_req; queue_request(req); xio_context_stop_loop(xio_get_main_ctx()); return 0; }
/* check: * Runs right after reactor calls poll(2). * Stop idle watcher, and send next alloc request, if available. */ static void check_cb (flux_reactor_t *r, flux_watcher_t *w, int revents, void *arg) { struct alloc_ctx *ctx = arg; struct job *job; flux_watcher_stop (ctx->idle); if (!ctx->ready) return; if (ctx->mode == SCHED_SINGLE && ctx->active_alloc_count > 0) return; if ((job = queue_first (ctx->inqueue))) { if (alloc_request (ctx, job) < 0) { flux_log_error (ctx->h, "alloc_request fatal error"); flux_reactor_stop_error (flux_get_reactor (ctx->h)); return; } queue_delete (ctx->inqueue, job, job->aux_queue_handle); job->aux_queue_handle = NULL; job->alloc_pending = 1; job->alloc_queued = 0; ctx->active_alloc_count++; if ((job->flags & FLUX_JOB_DEBUG)) (void)event_job_post_pack (ctx->event_ctx, job, "debug.alloc-request", NULL); } }