void sock_cntr_inc(struct sock_cntr *cntr) { pthread_mutex_lock(&cntr->mut); atomic_inc(&cntr->value); if(atomic_get(&cntr->num_waiting)) pthread_cond_broadcast(&cntr->cond); pthread_mutex_unlock(&cntr->mut); sock_cntr_check_trigger_list(cntr); }
static int sock_cntr_wait(struct fid_cntr *cntr, uint64_t threshold, int timeout) { int ret = 0; uint64_t start_ms = 0, end_ms = 0; struct sock_cntr *_cntr; _cntr = container_of(cntr, struct sock_cntr, cntr_fid); pthread_mutex_lock(&_cntr->mut); if (_cntr->err_flag) { ret = -FI_EAVAIL; goto out; } if (atomic_get(&_cntr->value) >= threshold) { ret = 0; goto out; } if (_cntr->is_waiting) { ret = -FI_EBUSY; goto out; } _cntr->is_waiting = 1; atomic_set(&_cntr->threshold, threshold); if (_cntr->domain->progress_mode == FI_PROGRESS_MANUAL) { pthread_mutex_unlock(&_cntr->mut); if (timeout >= 0) { start_ms = fi_gettime_ms(); end_ms = start_ms + timeout; } while (atomic_get(&_cntr->value) < threshold) { sock_cntr_progress(_cntr); if (timeout >= 0 && fi_gettime_ms() >= end_ms) { ret = FI_ETIMEDOUT; break; } } pthread_mutex_lock(&_cntr->mut); } else { ret = fi_wait_cond(&_cntr->cond, &_cntr->mut, timeout); } _cntr->is_waiting = 0; atomic_set(&_cntr->threshold, ~0); pthread_mutex_unlock(&_cntr->mut); sock_cntr_check_trigger_list(_cntr); return (_cntr->err_flag) ? -FI_EAVAIL : -ret; out: pthread_mutex_unlock(&_cntr->mut); return ret; }
int sock_cntr_inc(struct sock_cntr *cntr) { pthread_mutex_lock(&cntr->mut); atomic_inc(&cntr->value); if (atomic_get(&cntr->value) >= atomic_get(&cntr->threshold)) pthread_cond_signal(&cntr->cond); pthread_mutex_unlock(&cntr->mut); sock_cntr_check_trigger_list(cntr); return 0; }
ssize_t sock_queue_atomic_op(struct fid_ep *ep, const struct fi_msg_atomic *msg, const struct fi_ioc *comparev, size_t compare_count, struct fi_ioc *resultv, size_t result_count, uint64_t flags, uint8_t op_type) { struct sock_cntr *cntr; struct sock_trigger *trigger; struct fi_triggered_context *trigger_context; struct fi_trigger_threshold *threshold; trigger_context = (struct fi_triggered_context *) msg->context; if ((flags & FI_INJECT) || !trigger_context || (trigger_context->event_type != FI_TRIGGER_THRESHOLD)) return -FI_EINVAL; threshold = &trigger_context->trigger.threshold; cntr = container_of(threshold->cntr, struct sock_cntr, cntr_fid); if (atomic_get(&cntr->value) >= threshold->threshold) return 1; trigger = calloc(1, sizeof(*trigger)); if (!trigger) return -FI_ENOMEM; trigger->threshold = threshold->threshold; memcpy(&trigger->op.atomic.msg, msg, sizeof(*msg)); trigger->op.atomic.msg.msg_iov = &trigger->op.atomic.msg_iov[0]; trigger->op.atomic.msg.rma_iov = &trigger->op.atomic.rma_iov[0]; memcpy(&trigger->op.atomic.msg_iov[0], &msg->msg_iov[0], msg->iov_count * sizeof(struct fi_ioc)); memcpy(&trigger->op.atomic.rma_iov[0], &msg->rma_iov[0], msg->iov_count * sizeof(struct fi_rma_ioc)); if (comparev) { memcpy(&trigger->op.atomic.comparev[0], &comparev[0], compare_count * sizeof(struct fi_ioc)); } if (resultv) { memcpy(&trigger->op.atomic.resultv[0], &resultv[0], result_count * sizeof(struct fi_ioc)); } trigger->op_type = op_type; trigger->ep = ep; trigger->flags = flags; fastlock_acquire(&cntr->trigger_lock); dlist_insert_tail(&trigger->entry, &cntr->trigger_list); fastlock_release(&cntr->trigger_lock); sock_cntr_check_trigger_list(cntr); return 0; }
static int sock_cntr_set(struct fid_cntr *cntr, uint64_t value) { struct sock_cntr *_cntr; _cntr = container_of(cntr, struct sock_cntr, cntr_fid); pthread_mutex_lock(&_cntr->mut); atomic_set(&_cntr->value, value); if (atomic_get(&_cntr->value) >= atomic_get(&_cntr->threshold)) pthread_cond_signal(&_cntr->cond); pthread_mutex_unlock(&_cntr->mut); sock_cntr_check_trigger_list(_cntr); return 0; }
static int sock_cntr_set(struct fid_cntr *cntr, uint64_t value) { uint64_t new_val; struct sock_cntr *_cntr; _cntr = container_of(cntr, struct sock_cntr, cntr_fid); pthread_mutex_lock(&_cntr->mut); new_val = atomic_set(&_cntr->value, value); atomic_set(&_cntr->last_read_val, new_val); if(atomic_get(&_cntr->num_waiting)) pthread_cond_broadcast(&_cntr->cond); pthread_mutex_unlock(&_cntr->mut); sock_cntr_check_trigger_list(_cntr); return 0; }
static int sock_cntr_wait(struct fid_cntr *fid_cntr, uint64_t threshold, int timeout) { int last_read, ret = 0; uint64_t start_ms = 0, end_ms = 0, remaining_ms = 0; struct sock_cntr *cntr; cntr = container_of(fid_cntr, struct sock_cntr, cntr_fid); pthread_mutex_lock(&cntr->mut); if (cntr->err_flag) { ret = -FI_EAVAIL; goto out; } if (atomic_get(&cntr->value) >= threshold) { ret = 0; goto out; } atomic_inc(&cntr->num_waiting); if (timeout >= 0) { start_ms = fi_gettime_ms(); end_ms = start_ms + timeout; } last_read = atomic_get(&cntr->value); remaining_ms = timeout; while (!ret && last_read < threshold) { if (cntr->domain->progress_mode == FI_PROGRESS_MANUAL) { pthread_mutex_unlock(&cntr->mut); ret = sock_cntr_progress(cntr); pthread_mutex_lock(&cntr->mut); } else { ret = fi_wait_cond(&cntr->cond, &cntr->mut, remaining_ms); } uint64_t curr_ms = fi_gettime_ms(); if (timeout >= 0) { if (curr_ms >= end_ms) { ret = -FI_ETIMEDOUT; break; } else { remaining_ms = end_ms - curr_ms; } } last_read = atomic_get(&cntr->value); } atomic_set(&cntr->last_read_val, last_read); atomic_dec(&cntr->num_waiting); pthread_mutex_unlock(&cntr->mut); sock_cntr_check_trigger_list(cntr); return (cntr->err_flag) ? -FI_EAVAIL : ret; out: pthread_mutex_unlock(&cntr->mut); return ret; }