Ejemplo n.º 1
0
ssize_t sock_comm_send(struct sock_pe_entry *pe_entry,
		       const void *buf, size_t len)
{
	ssize_t ret, used;

	if (len > pe_entry->cache_sz) {
		used = ofi_rbused(&pe_entry->comm_buf);
		if (used == sock_comm_flush(pe_entry)) {
			return sock_comm_send_socket(pe_entry->conn, buf, len);
		} else {
			return 0;
		}
	}

	if (ofi_rbavail(&pe_entry->comm_buf) < len) {
		ret = sock_comm_flush(pe_entry);
		if (ret <= 0)
			return 0;
	}

	ret = MIN(ofi_rbavail(&pe_entry->comm_buf), len);
	ofi_rbwrite(&pe_entry->comm_buf, buf, ret);
	ofi_rbcommit(&pe_entry->comm_buf);
	SOCK_LOG_DBG("buffered %lu\n", ret);
	return ret;
}
Ejemplo n.º 2
0
ssize_t sock_comm_flush(struct sock_pe_entry *pe_entry)
{
	ssize_t ret1, ret2 = 0;
	size_t endlen, len, xfer_len;

	len = ofi_rbused(&pe_entry->comm_buf);
	endlen = pe_entry->comm_buf.size -
		(pe_entry->comm_buf.rcnt & pe_entry->comm_buf.size_mask);

	xfer_len = MIN(len, endlen);
	ret1 = sock_comm_send_socket(pe_entry->conn, (char*)pe_entry->comm_buf.buf +
				     (pe_entry->comm_buf.rcnt & pe_entry->comm_buf.size_mask),
				     xfer_len);
	if (ret1 > 0)
		pe_entry->comm_buf.rcnt += ret1;

	if (ret1 == xfer_len && xfer_len < len) {
		ret2 = sock_comm_send_socket(pe_entry->conn, (char*)pe_entry->comm_buf.buf +
					     (pe_entry->comm_buf.rcnt & pe_entry->comm_buf.size_mask),
					     len - xfer_len);
		if (ret2 > 0)
			pe_entry->comm_buf.rcnt += ret2;
		else
			ret2 = 0;
	}

	return (ret1 > 0) ? ret1 + ret2 : 0;
}
Ejemplo n.º 3
0
ssize_t sock_comm_recv(struct sock_pe_entry *pe_entry, void *buf, size_t len)
{
	ssize_t read_len;
	if (ofi_rbempty(&pe_entry->comm_buf)) {
		if (len <= pe_entry->cache_sz) {
			sock_comm_recv_buffer(pe_entry);
		} else {
			return sock_comm_recv_socket(pe_entry->conn, buf, len);
		}
	}

	read_len = MIN(len, ofi_rbused(&pe_entry->comm_buf));
	ofi_rbread(&pe_entry->comm_buf, buf, read_len);
	SOCK_LOG_DBG("read from buffer: %lu\n", read_len);
	return read_len;
}
Ejemplo n.º 4
0
static ssize_t sock_cq_readerr(struct fid_cq *cq, struct fi_cq_err_entry *buf,
			uint64_t flags)
{
	struct sock_cq *sock_cq;
	ssize_t ret;
	struct fi_cq_err_entry entry;
	uint32_t api_version;
	size_t err_data_size = 0;
	void *err_data = NULL;

	sock_cq = container_of(cq, struct sock_cq, cq_fid);
	if (sock_cq->domain->progress_mode == FI_PROGRESS_MANUAL)
		sock_cq_progress(sock_cq);

	fastlock_acquire(&sock_cq->lock);
	if (ofi_rbused(&sock_cq->cqerr_rb) >= sizeof(struct fi_cq_err_entry)) {
		api_version = sock_cq->domain->fab->fab_fid.api_version;
		ofi_rbread(&sock_cq->cqerr_rb, &entry, sizeof(entry));

		if ((FI_VERSION_GE(api_version, FI_VERSION(1, 5)))
			&& buf->err_data && buf->err_data_size) {
			err_data = buf->err_data;
			err_data_size = buf->err_data_size;
			*buf = entry;
			buf->err_data = err_data;

			/* Fill provided user's buffer */
			buf->err_data_size = MIN(entry.err_data_size, err_data_size);
			memcpy(buf->err_data, entry.err_data, buf->err_data_size);
		} else {
			*buf = entry;
		}

		ret = 1;
	} else {
		ret = -FI_EAGAIN;
	}
	fastlock_release(&sock_cq->lock);
	return ret;
}
Ejemplo n.º 5
0
static ssize_t sock_cq_sreadfrom(struct fid_cq *cq, void *buf, size_t count,
			fi_addr_t *src_addr, const void *cond, int timeout)
{
	int ret = 0;
	size_t threshold;
	struct sock_cq *sock_cq;
	uint64_t start_ms = 0, end_ms = 0;
	ssize_t cq_entry_len, avail;

	sock_cq = container_of(cq, struct sock_cq, cq_fid);
	if (ofi_rbused(&sock_cq->cqerr_rb))
		return -FI_EAVAIL;

	cq_entry_len = sock_cq->cq_entry_size;
	if (sock_cq->attr.wait_cond == FI_CQ_COND_THRESHOLD)
		threshold = MIN((uintptr_t) cond, count);
	else
		threshold = count;

	if (timeout >= 0) {
		start_ms = fi_gettime_ms();
		end_ms = start_ms + timeout;
	}

	if (sock_cq->domain->progress_mode == FI_PROGRESS_MANUAL) {
		do {
			sock_cq_progress(sock_cq);
			fastlock_acquire(&sock_cq->lock);
			avail = ofi_rbfdused(&sock_cq->cq_rbfd);
			if (avail)
				ret = sock_cq_rbuf_read(sock_cq, buf,
					MIN(threshold, (size_t)(avail / cq_entry_len)),
					src_addr, cq_entry_len);
			fastlock_release(&sock_cq->lock);
			if (ret == 0 && timeout >= 0) {
				if (fi_gettime_ms() >= end_ms)
					return -FI_EAGAIN;
			}
		} while (ret == 0);
	} else {
		do {
			ret = ofi_rbfdwait(&sock_cq->cq_rbfd, timeout);
			if (ret <= 0)
				break;

			fastlock_acquire(&sock_cq->lock);
			ret = 0;
			avail = ofi_rbfdused(&sock_cq->cq_rbfd);
			if (avail)
				ret = sock_cq_rbuf_read(sock_cq, buf,
					MIN(threshold, (size_t)(avail / cq_entry_len)),
					src_addr, cq_entry_len);
			else /* No CQ entry available, read the fd */
				ofi_rbfdreset(&sock_cq->cq_rbfd);
			fastlock_release(&sock_cq->lock);

			if ((ret == -FI_EAGAIN || ret == 0) && timeout >= 0) {
				timeout = end_ms - fi_gettime_ms();
				if (timeout <= 0)
					break;
			}
		} while (ret == 0 || ret == -FI_EAGAIN);
	}
	return (ret == 0 || ret == -FI_ETIMEDOUT) ? -FI_EAGAIN : ret;
}
Ejemplo n.º 6
0
static ssize_t sock_cq_sreadfrom(struct fid_cq *cq, void *buf, size_t count,
			fi_addr_t *src_addr, const void *cond, int timeout)
{
	int ret = 0;
	size_t threshold;
	struct sock_cq *sock_cq;
	uint64_t start_ms;
	ssize_t cq_entry_len, avail;

	sock_cq = container_of(cq, struct sock_cq, cq_fid);
	if (ofi_rbused(&sock_cq->cqerr_rb))
		return -FI_EAVAIL;

	cq_entry_len = sock_cq->cq_entry_size;
	if (sock_cq->attr.wait_cond == FI_CQ_COND_THRESHOLD)
		threshold = MIN((uintptr_t) cond, count);
	else
		threshold = count;

	start_ms = (timeout >= 0) ? fi_gettime_ms() : 0;

	if (sock_cq->domain->progress_mode == FI_PROGRESS_MANUAL) {
		while (1) {
			sock_cq_progress(sock_cq);
			fastlock_acquire(&sock_cq->lock);
			avail = ofi_rbfdused(&sock_cq->cq_rbfd);
			if (avail) {
				ret = sock_cq_rbuf_read(sock_cq, buf,
					MIN(threshold, (size_t)(avail / cq_entry_len)),
					src_addr, cq_entry_len);
			}
			fastlock_release(&sock_cq->lock);
			if (ret)
				return ret;

			if (timeout >= 0) {
				timeout -= (int) (fi_gettime_ms() - start_ms);
				if (timeout <= 0)
					return -FI_EAGAIN;
			}

			if (ofi_atomic_get32(&sock_cq->signaled)) {
				ofi_atomic_set32(&sock_cq->signaled, 0);
				return -FI_ECANCELED;
			}
		};
	} else {
		do {
			fastlock_acquire(&sock_cq->lock);
			ret = 0;
			avail = ofi_rbfdused(&sock_cq->cq_rbfd);
			if (avail) {
				ret = sock_cq_rbuf_read(sock_cq, buf,
					MIN(threshold, (size_t)(avail / cq_entry_len)),
					src_addr, cq_entry_len);
			} else {
				ofi_rbfdreset(&sock_cq->cq_rbfd);
			}
			fastlock_release(&sock_cq->lock);
			if (ret && ret != -FI_EAGAIN)
				return ret;

			if (timeout >= 0) {
				timeout -= (int) (fi_gettime_ms() - start_ms);
				if (timeout <= 0)
					return -FI_EAGAIN;
			}

			if (ofi_atomic_get32(&sock_cq->signaled)) {
				ofi_atomic_set32(&sock_cq->signaled, 0);
				return -FI_ECANCELED;
			}
			ret = ofi_rbfdwait(&sock_cq->cq_rbfd, timeout);
		} while (ret > 0);
	}

	return (ret == 0 || ret == -FI_ETIMEDOUT) ? -FI_EAGAIN : ret;
}