int psmx_wait_wait(struct fid_wait *wait, int timeout) { struct psmx_fid_wait *wait_priv; int err = 0; wait_priv = container_of(wait, struct psmx_fid_wait, wait.fid); switch (wait_priv->type) { case FI_WAIT_UNSPEC: /* TODO: optimized custom wait */ break; case FI_WAIT_FD: err = fi_poll_fd(wait_priv->fd[0], timeout); if (err > 0) err = 0; else if (err == 0) err = -FI_ETIMEDOUT; break; case FI_WAIT_MUTEX_COND: err = fi_wait_cond(&wait_priv->cond, &wait_priv->mutex, timeout); break; default: break; } return err; }
static int sock_cntr_wait(struct fid_cntr *cntr, uint64_t threshold, int timeout) { int ret = 0; uint64_t start_ms = 0, end_ms = 0; struct sock_cntr *_cntr; _cntr = container_of(cntr, struct sock_cntr, cntr_fid); pthread_mutex_lock(&_cntr->mut); if (_cntr->err_flag) { ret = -FI_EAVAIL; goto out; } if (atomic_get(&_cntr->value) >= threshold) { ret = 0; goto out; } if (_cntr->is_waiting) { ret = -FI_EBUSY; goto out; } _cntr->is_waiting = 1; atomic_set(&_cntr->threshold, threshold); if (_cntr->domain->progress_mode == FI_PROGRESS_MANUAL) { pthread_mutex_unlock(&_cntr->mut); if (timeout >= 0) { start_ms = fi_gettime_ms(); end_ms = start_ms + timeout; } while (atomic_get(&_cntr->value) < threshold) { sock_cntr_progress(_cntr); if (timeout >= 0 && fi_gettime_ms() >= end_ms) { ret = FI_ETIMEDOUT; break; } } pthread_mutex_lock(&_cntr->mut); } else { ret = fi_wait_cond(&_cntr->cond, &_cntr->mut, timeout); } _cntr->is_waiting = 0; atomic_set(&_cntr->threshold, ~0); pthread_mutex_unlock(&_cntr->mut); sock_cntr_check_trigger_list(_cntr); return (_cntr->err_flag) ? -FI_EAVAIL : -ret; out: pthread_mutex_unlock(&_cntr->mut); return ret; }
static int sock_cntr_wait(struct fid_cntr *cntr, uint64_t threshold, int timeout) { struct sock_cntr *_cntr; int ret = 0; _cntr = container_of(cntr, struct sock_cntr, cntr_fid); pthread_mutex_lock(&_cntr->mut); _cntr->threshold = threshold; while (_cntr->value < _cntr->threshold && !ret) ret = fi_wait_cond(&_cntr->cond, &_cntr->mut, timeout); _cntr->threshold = ~0; pthread_mutex_unlock(&_cntr->mut); return ret; }
static int sock_cntr_wait(struct fid_cntr *fid_cntr, uint64_t threshold, int timeout) { int last_read, ret = 0; uint64_t start_ms = 0, end_ms = 0, remaining_ms = 0; struct sock_cntr *cntr; cntr = container_of(fid_cntr, struct sock_cntr, cntr_fid); pthread_mutex_lock(&cntr->mut); if (cntr->err_flag) { ret = -FI_EAVAIL; goto out; } if (atomic_get(&cntr->value) >= threshold) { ret = 0; goto out; } atomic_inc(&cntr->num_waiting); if (timeout >= 0) { start_ms = fi_gettime_ms(); end_ms = start_ms + timeout; } last_read = atomic_get(&cntr->value); remaining_ms = timeout; while (!ret && last_read < threshold) { if (cntr->domain->progress_mode == FI_PROGRESS_MANUAL) { pthread_mutex_unlock(&cntr->mut); ret = sock_cntr_progress(cntr); pthread_mutex_lock(&cntr->mut); } else { ret = fi_wait_cond(&cntr->cond, &cntr->mut, remaining_ms); } uint64_t curr_ms = fi_gettime_ms(); if (timeout >= 0) { if (curr_ms >= end_ms) { ret = -FI_ETIMEDOUT; break; } else { remaining_ms = end_ms - curr_ms; } } last_read = atomic_get(&cntr->value); } atomic_set(&cntr->last_read_val, last_read); atomic_dec(&cntr->num_waiting); pthread_mutex_unlock(&cntr->mut); sock_cntr_check_trigger_list(cntr); return (cntr->err_flag) ? -FI_EAVAIL : ret; out: pthread_mutex_unlock(&cntr->mut); return ret; }