Ejemplo n.º 1
0
ssize_t sock_comm_send(struct sock_conn *conn, const void *buf, size_t len)
{
	ssize_t ret, used;

	if (len >= SOCK_COMM_THRESHOLD) {
		used = rbused(&conn->outbuf);
		if (used == sock_comm_flush(conn)) {
			return sock_comm_send_socket(conn, buf, len);
		} else {
			return 0;
		}
	}

	if (rbavail(&conn->outbuf) < len) {
		ret = sock_comm_flush(conn);
		if (ret <= 0)
			return 0;
	}

	ret = MIN(rbavail(&conn->outbuf), len);
	rbwrite(&conn->outbuf, buf, ret);
	rbcommit(&conn->outbuf);
	SOCK_LOG_INFO("buffered %lu\n", ret);
	return ret;
}
Ejemplo n.º 2
0
static ssize_t sock_tx_size_left(struct fid_ep *ep)
{
	struct sock_ep *sock_ep;
	struct sock_tx_ctx *tx_ctx;
	ssize_t num_left = 0;

	switch (ep->fid.fclass) {
	case FI_CLASS_EP:
		sock_ep = container_of(ep, struct sock_ep, ep);
		tx_ctx = sock_ep->attr->tx_ctx;
		break;

	case FI_CLASS_TX_CTX:
		tx_ctx = container_of(ep, struct sock_tx_ctx, fid.ctx);
		break;

	default:
		SOCK_LOG_ERROR("Invalid EP type\n");
		return -FI_EINVAL;
	}

	fastlock_acquire(&tx_ctx->wlock);
	num_left = rbavail(&tx_ctx->rb)/SOCK_EP_TX_ENTRY_SZ;
	fastlock_release(&tx_ctx->wlock);
	return num_left;
}
Ejemplo n.º 3
0
int sock_cq_report_error(struct sock_cq *cq, struct sock_pe_entry *entry,
			 size_t olen, int err, int prov_errno, void *err_data)
{
	int ret;
	struct fi_cq_err_entry err_entry;

	fastlock_acquire(&cq->lock);
	if (rbavail(&cq->cqerr_rb) < sizeof(err_entry)) {
		ret = -FI_ENOSPC;
		goto out;
	}

	err_entry.err = err;
	err_entry.olen = olen;
	err_entry.err_data = err_data;
	err_entry.len = entry->data_len;
	err_entry.prov_errno = prov_errno;
	err_entry.flags = entry->flags;
	err_entry.data = entry->data;
	err_entry.tag = entry->tag;
	err_entry.op_context = (void *) (uintptr_t) entry->context;

	if (entry->type == SOCK_PE_RX)
		err_entry.buf = (void *) (uintptr_t) entry->pe.rx.rx_iov[0].iov.addr;
	else
		err_entry.buf = (void *) (uintptr_t) entry->pe.tx.tx_iov[0].src.iov.addr;

	rbwrite(&cq->cqerr_rb, &err_entry, sizeof(err_entry));
	rbcommit(&cq->cqerr_rb);
	ret = 0;

out:
	fastlock_release(&cq->lock);
	return ret;
}
Ejemplo n.º 4
0
ssize_t sock_conn_send_src_addr(struct sock_ep *sock_ep, struct sock_tx_ctx *tx_ctx,
				struct sock_conn *conn)
{
	int ret;
	uint64_t total_len;
	struct sock_op tx_op;

	memset(&tx_op, 0, sizeof(struct sock_op));
	tx_op.op = SOCK_OP_CONN_MSG;
	SOCK_LOG_DBG("New conn msg on TX: %p using conn: %p\n", tx_ctx, conn);

	total_len = 0;
	tx_op.src_iov_len = sizeof(struct sockaddr_in);
	total_len = tx_op.src_iov_len + sizeof(struct sock_op_send);

	sock_tx_ctx_start(tx_ctx);
	if (rbavail(&tx_ctx->rb) < total_len) {
		ret = -FI_EAGAIN;
		goto err;
	}

	sock_tx_ctx_write_op_send(tx_ctx, &tx_op, 0, (uintptr_t) NULL, 0, 0, sock_ep,
					conn);
	sock_tx_ctx_write(tx_ctx, sock_ep->src_addr, sizeof(struct sockaddr_in));
	sock_tx_ctx_commit(tx_ctx);
	conn->address_published = 1;
	return 0;

err:
	sock_tx_ctx_abort(tx_ctx);
	return ret;
}
Ejemplo n.º 5
0
ssize_t sock_comm_recv_buffer(struct sock_conn *conn)
{
	int ret;
	size_t endlen;

	endlen = conn->inbuf.size - (conn->inbuf.wpos & conn->inbuf.size_mask);
	ret = sock_comm_recv_socket(conn,(char*) conn->inbuf.buf +
					 (conn->inbuf.wpos & conn->inbuf.size_mask), 
					 endlen);
	if (ret <= 0)
		return 0;

	conn->inbuf.wpos += ret;
	rbcommit(&conn->inbuf);
	if (ret != endlen)
		return ret;

	ret = sock_comm_recv_socket(conn, conn->inbuf.buf, rbavail(&conn->inbuf));
	if (ret <= 0)
		return 0;

	conn->inbuf.wpos += ret;
	rbcommit(&conn->inbuf);
	return 0;
}
Ejemplo n.º 6
0
static ssize_t _sock_cq_writeerr(struct sock_cq *cq, 
				 struct fi_cq_err_entry *buf, size_t len)
{
	ssize_t ret;
	
	fastlock_acquire(&cq->lock);
	if(rbavail(&cq->cqerr_rb) < len) {
		ret = -FI_ENOSPC;
		goto out;
	}

	rbwrite(&cq->cqerr_rb, buf, len);
	rbcommit(&cq->cqerr_rb);
	ret = len;

out:
	fastlock_release(&cq->lock);
	return ret;
}
Ejemplo n.º 7
0
ssize_t sock_ep_tsendmsg(struct fid_ep *ep,
                         const struct fi_msg_tagged *msg, uint64_t flags)
{
    int ret, i;
    uint64_t total_len;
    struct sock_op tx_op;
    union sock_iov tx_iov;
    struct sock_conn *conn;
    struct sock_tx_ctx *tx_ctx;
    struct sock_ep *sock_ep;

    switch (ep->fid.fclass) {
    case FI_CLASS_EP:
        sock_ep = container_of(ep, struct sock_ep, ep);
        tx_ctx = sock_ep->tx_ctx;
        break;
    case FI_CLASS_TX_CTX:
        tx_ctx = container_of(ep, struct sock_tx_ctx, fid.ctx);
        sock_ep = tx_ctx->ep;
        break;
    default:
        SOCK_LOG_ERROR("Invalid EP type\n");
        return -FI_EINVAL;
    }

#if ENABLE_DEBUG
    if (msg->iov_count > SOCK_EP_MAX_IOV_LIMIT)
        return -FI_EINVAL;
#endif

    if (!tx_ctx->enabled)
        return -FI_EOPBADSTATE;

    if (sock_drop_packet(sock_ep))
        return 0;

    ret = sock_ep_get_conn(sock_ep, tx_ctx, msg->addr, &conn);
    if (ret)
        return ret;

    SOCK_EP_SET_TX_OP_FLAGS(flags);
    if (flags & SOCK_USE_OP_FLAGS)
        flags |= tx_ctx->attr.op_flags;

    if (sock_ep_is_send_cq_low(&tx_ctx->comp, flags)) {
        SOCK_LOG_ERROR("CQ size low\n");
        return -FI_EAGAIN;
    }

    if (flags & FI_TRIGGER) {
        ret = sock_queue_tmsg_op(ep, msg, flags, SOCK_OP_TSEND);
        if (ret != 1)
            return ret;
    }

    memset(&tx_op, 0, sizeof(tx_op));
    tx_op.op = SOCK_OP_TSEND;

    total_len = 0;
    if (flags & FI_INJECT) {
        for (i = 0; i < msg->iov_count; i++)
            total_len += msg->msg_iov[i].iov_len;

        tx_op.src_iov_len = total_len;
        if (total_len > SOCK_EP_MAX_INJECT_SZ) {
            ret = -FI_EINVAL;
            goto err;
        }
    } else {
        total_len = msg->iov_count * sizeof(union sock_iov);
        tx_op.src_iov_len = msg->iov_count;
    }

    total_len += sizeof(struct sock_op_tsend);
    if (flags & FI_REMOTE_CQ_DATA)
        total_len += sizeof(uint64_t);

    sock_tx_ctx_start(tx_ctx);
    if (rbavail(&tx_ctx->rb) < total_len) {
        ret = -FI_EAGAIN;
        goto err;
    }

    sock_tx_ctx_write_op_tsend(tx_ctx, &tx_op, flags,
                               (uintptr_t) msg->context, msg->addr,
                               (uintptr_t) msg->msg_iov[0].iov_base,
                               sock_ep, conn, msg->tag);

    if (flags & FI_REMOTE_CQ_DATA)
        sock_tx_ctx_write(tx_ctx, &msg->data, sizeof(msg->data));

    if (flags & FI_INJECT) {
        for (i = 0; i < msg->iov_count; i++) {
            sock_tx_ctx_write(tx_ctx, msg->msg_iov[i].iov_base,
                              msg->msg_iov[i].iov_len);
        }
    } else {
        for (i = 0; i < msg->iov_count; i++) {
            tx_iov.iov.addr = (uintptr_t) msg->msg_iov[i].iov_base;
            tx_iov.iov.len = msg->msg_iov[i].iov_len;
            sock_tx_ctx_write(tx_ctx, &tx_iov, sizeof(tx_iov));
        }
    }

    sock_tx_ctx_commit(tx_ctx);
    return 0;

err:
    sock_tx_ctx_abort(tx_ctx);
    return ret;
}