Ejemplo n.º 1
0
static uint64_t sock_cntr_read(struct fid_cntr *cntr)
{
    struct sock_cntr *_cntr;
    _cntr = container_of(cntr, struct sock_cntr, cntr_fid);
    sock_cntr_progress(_cntr);
    return atomic_get(&_cntr->value);
}
Ejemplo n.º 2
0
static uint64_t sock_cntr_readerr(struct fid_cntr *cntr)
{
	struct sock_cntr *_cntr;
	_cntr = container_of(cntr, struct sock_cntr, cntr_fid);
	if (_cntr->domain->progress_mode == FI_PROGRESS_MANUAL)
		sock_cntr_progress(_cntr);
	return atomic_get(&_cntr->err_cnt);
}
Ejemplo n.º 3
0
static int sock_cntr_wait(struct fid_cntr *cntr, uint64_t threshold,
                          int timeout)
{
    int ret = 0;
    uint64_t start_ms = 0, end_ms = 0;
    struct sock_cntr *_cntr;

    _cntr = container_of(cntr, struct sock_cntr, cntr_fid);
    pthread_mutex_lock(&_cntr->mut);

    if (_cntr->err_flag) {
        ret = -FI_EAVAIL;
        goto out;
    }

    if (atomic_get(&_cntr->value) >= threshold) {
        ret = 0;
        goto out;
    }

    if (_cntr->is_waiting) {
        ret = -FI_EBUSY;
        goto out;
    }

    _cntr->is_waiting = 1;
    atomic_set(&_cntr->threshold, threshold);

    if (_cntr->domain->progress_mode == FI_PROGRESS_MANUAL) {
        pthread_mutex_unlock(&_cntr->mut);
        if (timeout >= 0) {
            start_ms = fi_gettime_ms();
            end_ms = start_ms + timeout;
        }

        while (atomic_get(&_cntr->value) < threshold) {
            sock_cntr_progress(_cntr);
            if (timeout >= 0 && fi_gettime_ms() >= end_ms) {
                ret = FI_ETIMEDOUT;
                break;
            }
        }
        pthread_mutex_lock(&_cntr->mut);
    } else {
        ret = fi_wait_cond(&_cntr->cond, &_cntr->mut, timeout);
    }

    _cntr->is_waiting = 0;
    atomic_set(&_cntr->threshold, ~0);
    pthread_mutex_unlock(&_cntr->mut);
    sock_cntr_check_trigger_list(_cntr);
    return (_cntr->err_flag) ? -FI_EAVAIL : -ret;

out:
    pthread_mutex_unlock(&_cntr->mut);
    return ret;
}
Ejemplo n.º 4
0
static int sock_poll_poll(struct fid_poll *pollset, void **context, int count)
{
	struct sock_poll *poll;
	struct sock_cq *cq;
	struct sock_eq *eq;
	struct sock_cntr *cntr;
	struct sock_fid_list *list_item;
	struct dlist_entry *p, *head;
	int ret_count = 0;

	poll = container_of(pollset, struct sock_poll, poll_fid.fid);
	head = &poll->fid_list;

	for (p = head->next; p != head && ret_count < count; p = p->next) {
		list_item = container_of(p, struct sock_fid_list, entry);
		switch (list_item->fid->fclass) {
		case FI_CLASS_CQ:
			cq = container_of(list_item->fid, struct sock_cq, cq_fid);
			sock_cq_progress(cq);
			fastlock_acquire(&cq->lock);
			if (rbfdused(&cq->cq_rbfd)) {
				*context++ = cq->cq_fid.fid.context;
				ret_count++;
			}
			fastlock_release(&cq->lock);
			break;

		case FI_CLASS_CNTR:
			cntr = container_of(list_item->fid, struct sock_cntr, cntr_fid);
			sock_cntr_progress(cntr);
			fastlock_acquire(&cntr->mut);
			if (atomic_get(&cntr->value) >= atomic_get(&cntr->threshold)) {
				*context++ = cntr->cntr_fid.fid.context;
				ret_count++;
			}
			fastlock_release(&cntr->mut);
			break;

		case FI_CLASS_EQ:
			eq = container_of(list_item->fid, struct sock_eq, eq);
			fastlock_acquire(&eq->lock);
			if (!dlistfd_empty(&eq->list)) {
				*context++ = eq->eq.fid.context;
				ret_count++;
			}
			fastlock_release(&eq->lock);
			break;

		default:
			break;
		}
	}

	return ret_count;
}
Ejemplo n.º 5
0
static int sock_cntr_wait(struct fid_cntr *fid_cntr, uint64_t threshold,
			  int timeout)
{
	int last_read, ret = 0;
	uint64_t start_ms = 0, end_ms = 0, remaining_ms = 0;
	struct sock_cntr *cntr;
	cntr = container_of(fid_cntr, struct sock_cntr, cntr_fid);

	pthread_mutex_lock(&cntr->mut);
	if (cntr->err_flag) {
		ret = -FI_EAVAIL;
		goto out;
	}

	if (atomic_get(&cntr->value) >= threshold) {
		ret = 0;
		goto out;
	}

	atomic_inc(&cntr->num_waiting);

	if (timeout >= 0) {
		start_ms = fi_gettime_ms();
		end_ms = start_ms + timeout;
	}

	last_read = atomic_get(&cntr->value);
	remaining_ms = timeout;

	while (!ret && last_read < threshold) {
		if (cntr->domain->progress_mode == FI_PROGRESS_MANUAL) {
			pthread_mutex_unlock(&cntr->mut);
			ret = sock_cntr_progress(cntr);
			pthread_mutex_lock(&cntr->mut);
		} else {
			ret = fi_wait_cond(&cntr->cond, &cntr->mut, remaining_ms);
		}

		uint64_t curr_ms = fi_gettime_ms();
		if (timeout >= 0) {
			if (curr_ms >= end_ms) {
				ret = -FI_ETIMEDOUT;
				break;
			} else {
				remaining_ms = end_ms - curr_ms;
			}
		}

		last_read = atomic_get(&cntr->value);
	}

	atomic_set(&cntr->last_read_val, last_read);
	atomic_dec(&cntr->num_waiting);
	pthread_mutex_unlock(&cntr->mut);

	sock_cntr_check_trigger_list(cntr);
	return (cntr->err_flag) ? -FI_EAVAIL : ret;

out:
	pthread_mutex_unlock(&cntr->mut);
	return ret;
}