static void ucp_ep_flush_progress(ucp_request_t *req) { ucp_ep_h ep = req->send.ep; ucp_lane_index_t lane; ucs_status_t status; uct_ep_h uct_ep; ucs_trace("ep %p: progress flush req %p, lanes 0x%x count %d", ep, req, req->send.flush.lanes, req->send.uct_comp.count); while (req->send.flush.lanes) { /* Search for next lane to start flush */ lane = ucs_ffs64(req->send.flush.lanes); uct_ep = ep->uct_eps[lane]; if (uct_ep == NULL) { req->send.flush.lanes &= ~UCS_BIT(lane); --req->send.uct_comp.count; continue; } /* Start flush operation on UCT endpoint */ status = uct_ep_flush(uct_ep, 0, &req->send.uct_comp); ucs_trace("flushing ep %p lane[%d]: %s", ep, lane, ucs_status_string(status)); if (status == UCS_OK) { req->send.flush.lanes &= ~UCS_BIT(lane); --req->send.uct_comp.count; } else if (status == UCS_INPROGRESS) { req->send.flush.lanes &= ~UCS_BIT(lane); } else if (status == UCS_ERR_NO_RESOURCE) { if (req->send.lane != UCP_NULL_LANE) { ucs_trace("ep %p: not adding pending flush %p on lane %d, " "because it's already pending on lane %d", ep, req, lane, req->send.lane); break; } req->send.lane = lane; status = uct_ep_pending_add(uct_ep, &req->send.uct); ucs_trace("adding pending flush on ep %p lane[%d]: %s", ep, lane, ucs_status_string(status)); if (status == UCS_OK) { req->send.flush.lanes &= ~UCS_BIT(lane); } else if (status != UCS_ERR_BUSY) { ucp_ep_flush_error(req, status); } } else { ucp_ep_flush_error(req, status); } } }
void ucp_ep_wireup_stop(ucp_ep_h ep) { ucp_worker_h worker = ep->worker; ucs_trace_func("ep=%p", ep); if (ep->uct.next_ep != NULL) { while (uct_ep_flush(ep->uct.next_ep) != UCS_OK) { ucp_worker_progress(ep->worker); } uct_ep_destroy(ep->uct.next_ep); } if (ep->wireup_ep != NULL) { while (uct_ep_flush(ep->wireup_ep) != UCS_OK) { ucp_worker_progress(ep->worker); } uct_ep_destroy(ep->wireup_ep); } UCS_ASYNC_BLOCK(&worker->async); sglib_hashed_ucp_ep_t_delete(worker->ep_hash, ep); UCS_ASYNC_UNBLOCK(&worker->async); }
ucs_status_t ucp_ep_flush(ucp_ep_h ep) { ucp_lane_index_t lane; ucs_status_t status; for (lane = 0; lane < ucp_ep_num_lanes(ep); ++lane) { for (;;) { status = uct_ep_flush(ep->uct_eps[lane], 0, NULL); if (status == UCS_OK) { break; } else if ((status != UCS_INPROGRESS) && (status != UCS_ERR_NO_RESOURCE)) { return status; } ucp_worker_progress(ep->worker); } } return UCS_OK; }
static ucs_status_t ucp_ep_flush_progress_pending(uct_pending_req_t *self) { ucp_request_t *req = ucs_container_of(self, ucp_request_t, send.uct); ucp_lane_index_t lane = req->send.lane; ucp_ep_h ep = req->send.ep; ucs_status_t status; int completed; ucs_assert(!(req->flags & UCP_REQUEST_FLAG_COMPLETED)); status = uct_ep_flush(ep->uct_eps[lane], 0, &req->send.uct_comp); ucs_trace("flushing ep %p lane[%d]: %s", ep, lane, ucs_status_string(status)); if (status == UCS_OK) { --req->send.uct_comp.count; /* UCT endpoint is flushed */ } /* since req->flush.pend.lane is still non-NULL, this function will not * put anything on pending. */ ucp_ep_flush_progress(req); completed = ucp_flush_check_completion(req); /* If the operation has not completed, add slow-path progress to resume */ if (!completed && req->send.flush.lanes && !req->send.flush.cbq_elem_on) { ucs_trace("ep %p: adding slow-path callback to resume flush", ep); req->send.flush.cbq_elem.cb = ucp_ep_flush_resume_slow_path_callback; req->send.flush.cbq_elem_on = 1; uct_worker_slowpath_progress_register(ep->worker->uct, &req->send.flush.cbq_elem); } if ((status == UCS_OK) || (status == UCS_INPROGRESS)) { req->send.lane = UCP_NULL_LANE; return UCS_OK; } else if (status == UCS_ERR_NO_RESOURCE) { return UCS_ERR_NO_RESOURCE; } else { ucp_ep_flush_error(req, status); return UCS_OK; } }