Ejemplo n.º 1
0
static int sock_poll_poll(struct fid_poll *pollset, void **context, int count)
{
	struct sock_poll *poll;
	struct sock_cq *cq;
	struct sock_eq *eq;
	struct sock_cntr *cntr;
	struct sock_fid_list *list_item;
	struct dlist_entry *p, *head;
	int ret_count = 0;

	poll = container_of(pollset, struct sock_poll, poll_fid.fid);
	head = &poll->fid_list;

	for (p = head->next; p != head && ret_count < count; p = p->next) {
		list_item = container_of(p, struct sock_fid_list, entry);
		switch (list_item->fid->fclass) {
		case FI_CLASS_CQ:
			cq = container_of(list_item->fid, struct sock_cq, cq_fid);
			sock_cq_progress(cq);
			fastlock_acquire(&cq->lock);
			if (rbfdused(&cq->cq_rbfd)) {
				*context++ = cq->cq_fid.fid.context;
				ret_count++;
			}
			fastlock_release(&cq->lock);
			break;

		case FI_CLASS_CNTR:
			cntr = container_of(list_item->fid, struct sock_cntr, cntr_fid);
			sock_cntr_progress(cntr);
			fastlock_acquire(&cntr->mut);
			if (atomic_get(&cntr->value) >= atomic_get(&cntr->threshold)) {
				*context++ = cntr->cntr_fid.fid.context;
				ret_count++;
			}
			fastlock_release(&cntr->mut);
			break;

		case FI_CLASS_EQ:
			eq = container_of(list_item->fid, struct sock_eq, eq);
			fastlock_acquire(&eq->lock);
			if (!dlistfd_empty(&eq->list)) {
				*context++ = eq->eq.fid.context;
				ret_count++;
			}
			fastlock_release(&eq->lock);
			break;

		default:
			break;
		}
	}

	return ret_count;
}
Ejemplo n.º 2
0
static ssize_t sock_cq_sreadfrom(struct fid_cq *cq, void *buf, size_t count,
			fi_addr_t *src_addr, const void *cond, int timeout)
{
	int ret = 0;
	size_t threshold;
	struct sock_cq *sock_cq;
	uint64_t start_ms = 0, end_ms = 0;
	ssize_t cq_entry_len, avail;

	sock_cq = container_of(cq, struct sock_cq, cq_fid);
	if (rbused(&sock_cq->cqerr_rb))
		return -FI_EAVAIL;

	cq_entry_len = sock_cq->cq_entry_size;
	if (sock_cq->attr.wait_cond == FI_CQ_COND_THRESHOLD)
		threshold = MIN((uintptr_t) cond, count);
	else
		threshold = count;

	if (sock_cq->domain->progress_mode == FI_PROGRESS_MANUAL) {
		if (timeout >= 0) {
			start_ms = fi_gettime_ms();
			end_ms = start_ms + timeout;
		}

		do {
			sock_cq_progress(sock_cq);
			fastlock_acquire(&sock_cq->lock);
			avail = rbfdused(&sock_cq->cq_rbfd);
			if (avail)
				ret = sock_cq_rbuf_read(sock_cq, buf,
					MIN(threshold, avail / cq_entry_len),
					src_addr, cq_entry_len);
			fastlock_release(&sock_cq->lock);
			if (ret == 0 && timeout >= 0) {
				if (fi_gettime_ms() >= end_ms)
					return -FI_EAGAIN;
			}
		} while (ret == 0);
	} else {
		ret = rbfdwait(&sock_cq->cq_rbfd, timeout);
		if (ret > 0) {
			fastlock_acquire(&sock_cq->lock);
			ret = 0;
			avail = rbfdused(&sock_cq->cq_rbfd);
			if (avail)
				ret = sock_cq_rbuf_read(sock_cq, buf,
					MIN(threshold, avail / cq_entry_len),
					src_addr, cq_entry_len);
			fastlock_release(&sock_cq->lock);
		}
	}
	return (ret == 0 || ret == -FI_ETIMEDOUT) ? -FI_EAGAIN : ret;
}
Ejemplo n.º 3
0
static ssize_t sock_cq_readerr(struct fid_cq *cq, struct fi_cq_err_entry *buf,
			uint64_t flags)
{
	struct sock_cq *sock_cq;
	ssize_t ret;
	
	sock_cq = container_of(cq, struct sock_cq, cq_fid);
	if (sock_cq->domain->progress_mode == FI_PROGRESS_MANUAL)
		sock_cq_progress(sock_cq);

	fastlock_acquire(&sock_cq->lock);
	if (rbused(&sock_cq->cqerr_rb) >= sizeof(struct fi_cq_err_entry)) {
		rbread(&sock_cq->cqerr_rb, buf, sizeof(*buf));
		ret = 1;
	} else {
		ret = -FI_EAGAIN;
	}
	fastlock_release(&sock_cq->lock);
	return ret;
}
Ejemplo n.º 4
0
static ssize_t sock_cq_readerr(struct fid_cq *cq, struct fi_cq_err_entry *buf,
			uint64_t flags)
{
	struct sock_cq *sock_cq;
	ssize_t ret;
	struct fi_cq_err_entry entry;
	uint32_t api_version;
	size_t err_data_size = 0;
	void *err_data = NULL;

	sock_cq = container_of(cq, struct sock_cq, cq_fid);
	if (sock_cq->domain->progress_mode == FI_PROGRESS_MANUAL)
		sock_cq_progress(sock_cq);

	fastlock_acquire(&sock_cq->lock);
	if (ofi_rbused(&sock_cq->cqerr_rb) >= sizeof(struct fi_cq_err_entry)) {
		api_version = sock_cq->domain->fab->fab_fid.api_version;
		ofi_rbread(&sock_cq->cqerr_rb, &entry, sizeof(entry));

		if ((FI_VERSION_GE(api_version, FI_VERSION(1, 5)))
			&& buf->err_data && buf->err_data_size) {
			err_data = buf->err_data;
			err_data_size = buf->err_data_size;
			*buf = entry;
			buf->err_data = err_data;

			/* Fill provided user's buffer */
			buf->err_data_size = MIN(entry.err_data_size, err_data_size);
			memcpy(buf->err_data, entry.err_data, buf->err_data_size);
		} else {
			*buf = entry;
		}

		ret = 1;
	} else {
		ret = -FI_EAGAIN;
	}
	fastlock_release(&sock_cq->lock);
	return ret;
}
Ejemplo n.º 5
0
static ssize_t sock_cq_sreadfrom(struct fid_cq *cq, void *buf, size_t count,
			fi_addr_t *src_addr, const void *cond, int timeout)
{
	int ret = 0;
	size_t threshold;
	struct sock_cq *sock_cq;
	uint64_t start_ms;
	ssize_t cq_entry_len, avail;

	sock_cq = container_of(cq, struct sock_cq, cq_fid);
	if (ofi_rbused(&sock_cq->cqerr_rb))
		return -FI_EAVAIL;

	cq_entry_len = sock_cq->cq_entry_size;
	if (sock_cq->attr.wait_cond == FI_CQ_COND_THRESHOLD)
		threshold = MIN((uintptr_t) cond, count);
	else
		threshold = count;

	start_ms = (timeout >= 0) ? fi_gettime_ms() : 0;

	if (sock_cq->domain->progress_mode == FI_PROGRESS_MANUAL) {
		while (1) {
			sock_cq_progress(sock_cq);
			fastlock_acquire(&sock_cq->lock);
			avail = ofi_rbfdused(&sock_cq->cq_rbfd);
			if (avail) {
				ret = sock_cq_rbuf_read(sock_cq, buf,
					MIN(threshold, (size_t)(avail / cq_entry_len)),
					src_addr, cq_entry_len);
			}
			fastlock_release(&sock_cq->lock);
			if (ret)
				return ret;

			if (timeout >= 0) {
				timeout -= (int) (fi_gettime_ms() - start_ms);
				if (timeout <= 0)
					return -FI_EAGAIN;
			}

			if (ofi_atomic_get32(&sock_cq->signaled)) {
				ofi_atomic_set32(&sock_cq->signaled, 0);
				return -FI_ECANCELED;
			}
		};
	} else {
		do {
			fastlock_acquire(&sock_cq->lock);
			ret = 0;
			avail = ofi_rbfdused(&sock_cq->cq_rbfd);
			if (avail) {
				ret = sock_cq_rbuf_read(sock_cq, buf,
					MIN(threshold, (size_t)(avail / cq_entry_len)),
					src_addr, cq_entry_len);
			} else {
				ofi_rbfdreset(&sock_cq->cq_rbfd);
			}
			fastlock_release(&sock_cq->lock);
			if (ret && ret != -FI_EAGAIN)
				return ret;

			if (timeout >= 0) {
				timeout -= (int) (fi_gettime_ms() - start_ms);
				if (timeout <= 0)
					return -FI_EAGAIN;
			}

			if (ofi_atomic_get32(&sock_cq->signaled)) {
				ofi_atomic_set32(&sock_cq->signaled, 0);
				return -FI_ECANCELED;
			}
			ret = ofi_rbfdwait(&sock_cq->cq_rbfd, timeout);
		} while (ret > 0);
	}

	return (ret == 0 || ret == -FI_ETIMEDOUT) ? -FI_EAGAIN : ret;
}