Beispiel #1
0
static ssize_t util_cq_readerr(struct fid_cq *cq_fid, struct fi_cq_err_entry *buf,
			       uint64_t flags)
{
	struct util_cq *cq;
	struct util_cq_err_entry *err;
	struct slist_entry *entry;
	ssize_t ret;

	cq = container_of(cq_fid, struct util_cq, cq_fid);
	fastlock_acquire(&cq->cq_lock);
	if (!cirque_isempty(cq->cirq) &&
	    (cirque_head(cq->cirq)->flags & UTIL_FLAG_ERROR)) {
		cirque_discard(cq->cirq);
		entry = slist_remove_head(&cq->err_list);
		err = container_of(entry, struct util_cq_err_entry, list_entry);
		*buf = err->err_entry;
		free(err);
		ret = 0;
	} else {
Beispiel #2
0
ssize_t rxm_send(struct fid_ep *ep_fid, const void *buf, size_t len, void *desc,
		fi_addr_t dest_addr, void *context)
{
	struct rxm_ep *rxm_ep;
	struct fid_ep *msg_ep;
	ssize_t ret;

	rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid);
	fastlock_acquire(&rxm_ep->cmap->lock);
	ret = rxm_get_msg_ep(rxm_ep, dest_addr, &msg_ep);
	if (ret)
		goto unlock;

	// TODO handle the case when send fails due to connection shutdown
	ret = fi_send(msg_ep, buf, len, desc, 0, context);
unlock:
	fastlock_release(&rxm_ep->cmap->lock);
	return ret;
}
void mlx_send_callback( void *request,
			ucs_status_t status)
{
	struct util_cq *cq;
	struct mlx_request *mlx_req = request;
	struct fi_cq_tagged_entry *t_entry;
	struct util_cq_err_entry *err;

	cq = mlx_req->cq;

	if (status == UCS_ERR_CANCELED) {
		ucp_request_release(request);
		return;
	}

	fastlock_acquire(&cq->cq_lock);

	t_entry = cirque_tail(cq->cirq);
	*t_entry = (mlx_req->completion.tagged);
	cirque_commit(cq->cirq);

	if (status != UCS_OK){
		t_entry->flags |= UTIL_FLAG_ERROR;
		err = calloc(1, sizeof(struct util_cq_err_entry));
		if (!err) {
			FI_WARN(&mlx_prov, FI_LOG_CQ,
				"out of memory, cannot report CQ error\n");
			return;
		}

		err->err_entry = (mlx_req->completion.error);
		err->err_entry.prov_errno = (int)status;
		err->err_entry.err = MLX_TRANSLATE_ERRCODE(status);
		err->err_entry.olen = 0;
		slist_insert_tail(&err->list_entry, &cq->err_list);
	}

	mlx_req->type = MLX_FI_REQ_UNINITIALIZED;

	fastlock_release(&cq->cq_lock);
	ucp_request_release(request);
}
Beispiel #4
0
int ofi_wait_fd_add(struct util_wait *wait, int fd, uint32_t events,
		    ofi_wait_fd_try_func wait_try, void *arg, void *context)
{
	struct ofi_wait_fd_entry *fd_entry;
	struct dlist_entry *entry;
	struct util_wait_fd *wait_fd = container_of(wait, struct util_wait_fd,
						    util_wait);
	int ret = 0;

	fastlock_acquire(&wait_fd->lock);
	entry = dlist_find_first_match(&wait_fd->fd_list, ofi_wait_fd_match, &fd);
	if (entry) {
		FI_DBG(wait->prov, FI_LOG_EP_CTRL,
		       "Given fd (%d) already added to wait list - %p \n",
		       fd, wait_fd);
		fd_entry = container_of(entry, struct ofi_wait_fd_entry, entry);
		ofi_atomic_inc32(&fd_entry->ref);
		goto out;
	}

	ret = fi_epoll_add(wait_fd->epoll_fd, fd, events, context);
	if (ret) {
		FI_WARN(wait->prov, FI_LOG_FABRIC, "Unable to add fd to epoll\n");
		goto out;
	}

	fd_entry = calloc(1, sizeof *fd_entry);
	if (!fd_entry) {
		ret = -FI_ENOMEM;
		fi_epoll_del(wait_fd->epoll_fd, fd);
		goto out;
	}
	fd_entry->fd = fd;
	fd_entry->wait_try = wait_try;
	fd_entry->arg = arg;
	ofi_atomic_initialize32(&fd_entry->ref, 1);

	dlist_insert_tail(&fd_entry->entry, &wait_fd->fd_list);
out:
	fastlock_release(&wait_fd->lock);
	return ret;
}
Beispiel #5
0
ssize_t sock_queue_rma_op(struct fid_ep *ep, const struct fi_msg_rma *msg,
			  uint64_t flags, uint8_t op_type)
{
	struct sock_cntr *cntr;
	struct sock_trigger *trigger;
	struct fi_triggered_context *trigger_context;
	struct fi_trigger_threshold *threshold;

	trigger_context = (struct fi_triggered_context *) msg->context;
	if ((flags & FI_INJECT) || !trigger_context ||
	     (trigger_context->event_type != FI_TRIGGER_THRESHOLD))
		return -FI_EINVAL;

	threshold = &trigger_context->trigger.threshold;
	cntr = container_of(threshold->cntr, struct sock_cntr, cntr_fid);
	if (atomic_get(&cntr->value) >= threshold->threshold)
		return 1;

	trigger = calloc(1, sizeof(*trigger));
	if (!trigger)
		return -FI_ENOMEM;

	trigger->threshold = threshold->threshold;
	memcpy(&trigger->op.rma.msg, msg, sizeof(*msg));
	trigger->op.rma.msg.msg_iov = &trigger->op.rma.msg_iov[0];
	trigger->op.rma.msg.rma_iov = &trigger->op.rma.rma_iov[0];

	memcpy(&trigger->op.rma.msg_iov[0], &msg->msg_iov[0],
	       msg->iov_count * sizeof(struct iovec));
	memcpy(&trigger->op.rma.rma_iov[0], &msg->rma_iov[0],
	       msg->rma_iov_count * sizeof(struct fi_rma_iov));

	trigger->op_type = op_type;
	trigger->ep = ep;
	trigger->flags = flags;

	fastlock_acquire(&cntr->trigger_lock);
	dlist_insert_tail(&trigger->entry, &cntr->trigger_list);
	fastlock_release(&cntr->trigger_lock);
	sock_cntr_check_trigger_list(cntr);
	return 0;
}
DIRECT_FN int gnix_pep_bind(struct fid *fid, struct fid *bfid, uint64_t flags)
{
	int ret = FI_SUCCESS;
	struct gnix_fid_pep  *pep;
	struct gnix_fid_eq *eq;

	if (!fid || !bfid)
		return -FI_EINVAL;

	pep = container_of(fid, struct gnix_fid_pep, pep_fid.fid);

	fastlock_acquire(&pep->lock);

	switch (bfid->fclass) {
	case FI_CLASS_EQ:
		eq = container_of(bfid, struct gnix_fid_eq, eq_fid.fid);
		if (pep->fabric != eq->fabric) {
			ret = -FI_EINVAL;
			break;
		}

		if (pep->eq) {
			ret = -FI_EINVAL;
			break;
		}

		pep->eq = eq;
		_gnix_eq_poll_obj_add(eq, &pep->pep_fid.fid);
		_gnix_ref_get(eq);

		GNIX_DEBUG(FI_LOG_EP_CTRL, "Bound EQ to PEP: %p, %p\n",
			   eq, pep);
		break;
	default:
		ret = -FI_ENOSYS;
		break;
	}

	fastlock_release(&pep->lock);

	return ret;
}
Beispiel #7
0
static int rxd_av_remove(struct fid_av *av_fid, fi_addr_t *fi_addr, size_t count,
			uint64_t flags)
{
	int ret = 0;
	size_t i;
	fi_addr_t dg_fiaddr;
	struct rxd_av *av;

	av = container_of(av_fid, struct rxd_av, util_av.av_fid);
	fastlock_acquire(&av->util_av.lock);
	for (i = 0; i < count; i++) {
		dg_fiaddr = rxd_av_dg_addr(av, fi_addr[i]);
		ret = fi_av_remove(av->dg_av, &dg_fiaddr, 1, flags);
		if (ret)
			break;
		av->dg_av_used--;
	}
	fastlock_release(&av->util_av.lock);
	return ret;
}
Beispiel #8
0
static ssize_t sock_cq_readerr(struct fid_cq *cq, struct fi_cq_err_entry *buf,
			uint64_t flags)
{
	struct sock_cq *sock_cq;
	ssize_t ret;
	
	sock_cq = container_of(cq, struct sock_cq, cq_fid);
	if (sock_cq->domain->progress_mode == FI_PROGRESS_MANUAL)
		sock_cq_progress(sock_cq);

	fastlock_acquire(&sock_cq->lock);
	if (rbused(&sock_cq->cqerr_rb) >= sizeof(struct fi_cq_err_entry)) {
		rbread(&sock_cq->cqerr_rb, buf, sizeof(*buf));
		ret = 1;
	} else {
		ret = -FI_EAGAIN;
	}
	fastlock_release(&sock_cq->lock);
	return ret;
}
Beispiel #9
0
static ssize_t _sock_cq_write(struct sock_cq *cq, fi_addr_t addr,
			      const void *buf, size_t len)
{
	ssize_t ret;
	struct sock_cq_overflow_entry_t *overflow_entry;

	fastlock_acquire(&cq->lock);
	if (ofi_rbfdavail(&cq->cq_rbfd) < len) {
		SOCK_LOG_ERROR("Not enough space in CQ\n");
		overflow_entry = calloc(1, sizeof(*overflow_entry) + len);
		if (!overflow_entry) {
			ret = -FI_ENOSPC;
			goto out;
		}

		memcpy(&overflow_entry->cq_entry[0], buf, len);
		overflow_entry->len = len;
		overflow_entry->addr = addr;
		dlist_insert_tail(&overflow_entry->entry, &cq->overflow_list);
		ret = len;
		goto out;
	}


	ofi_rbwrite(&cq->addr_rb, &addr, sizeof(addr));
	ofi_rbcommit(&cq->addr_rb);

	ofi_rbfdwrite(&cq->cq_rbfd, buf, len);
	if (cq->domain->progress_mode == FI_PROGRESS_MANUAL)
		ofi_rbcommit(&cq->cq_rbfd.rb);
	else
		ofi_rbfdcommit(&cq->cq_rbfd);

	ret = len;

	if (cq->signal)
		sock_wait_signal(cq->waitset);
out:
	fastlock_release(&cq->lock);
	return ret;
}
Beispiel #10
0
static int tcpx_ep_shutdown(struct fid_ep *ep, uint64_t flags)
{
	struct tcpx_ep *tcpx_ep;
	int ret;

	tcpx_ep = container_of(ep, struct tcpx_ep, util_ep.ep_fid);

	ret = ofi_shutdown(tcpx_ep->conn_fd, SHUT_RDWR);
	if (ret && ofi_sockerr() != ENOTCONN) {
		FI_WARN(&tcpx_prov, FI_LOG_EP_DATA, "ep shutdown unsuccessful\n");
	}

	fastlock_acquire(&tcpx_ep->lock);
	ret = tcpx_ep_shutdown_report(tcpx_ep, &ep->fid);
	fastlock_release(&tcpx_ep->lock);
	if (ret) {
		FI_WARN(&tcpx_prov, FI_LOG_EP_DATA, "Error writing to EQ\n");
	}

	return ret;
}
Beispiel #11
0
static int fi_ibv_signal_send(struct fi_ibv_msg_ep *ep, struct ibv_send_wr *wr)
{
	struct fi_ibv_msg_epe *epe;

	fastlock_acquire(&ep->scq->lock);
	if (VERBS_SIGNAL_SEND(ep)) {
		epe = util_buf_alloc(ep->scq->epe_pool);
		if (!epe) {
			fastlock_release(&ep->scq->lock);
			return -FI_ENOMEM;
		}
		memset(epe, 0, sizeof(*epe));
		wr->send_flags |= IBV_SEND_SIGNALED;
		wr->wr_id = ep->ep_id;
		epe->ep = ep;
		slist_insert_tail(&epe->entry, &ep->scq->ep_list);
		ofi_atomic_inc32(&ep->comp_pending);
	}
	fastlock_release(&ep->scq->lock);
	return 0;
}
Beispiel #12
0
int ofi_monitor_add_cache(struct ofi_mem_monitor *monitor,
			  struct ofi_mr_cache *cache)
{
	int ret = 0;

	fastlock_acquire(&monitor->lock);
	if (dlist_empty(&monitor->list)) {
		if (monitor == uffd_monitor)
			ret = ofi_uffd_init();
		else
			ret = -FI_ENOSYS;

		if (ret)
			goto out;
	}
	cache->monitor = monitor;
	dlist_insert_tail(&cache->notify_entry, &monitor->list);
out:
	fastlock_release(&monitor->lock);
	return ret;
}
Beispiel #13
0
static ssize_t sock_cq_readerr(struct fid_cq *cq, struct fi_cq_err_entry *buf,
			uint64_t flags)
{
	struct sock_cq *sock_cq;
	ssize_t ret;
	struct fi_cq_err_entry entry;
	uint32_t api_version;
	size_t err_data_size = 0;
	void *err_data = NULL;

	sock_cq = container_of(cq, struct sock_cq, cq_fid);
	if (sock_cq->domain->progress_mode == FI_PROGRESS_MANUAL)
		sock_cq_progress(sock_cq);

	fastlock_acquire(&sock_cq->lock);
	if (ofi_rbused(&sock_cq->cqerr_rb) >= sizeof(struct fi_cq_err_entry)) {
		api_version = sock_cq->domain->fab->fab_fid.api_version;
		ofi_rbread(&sock_cq->cqerr_rb, &entry, sizeof(entry));

		if ((FI_VERSION_GE(api_version, FI_VERSION(1, 5)))
			&& buf->err_data && buf->err_data_size) {
			err_data = buf->err_data;
			err_data_size = buf->err_data_size;
			*buf = entry;
			buf->err_data = err_data;

			/* Fill provided user's buffer */
			buf->err_data_size = MIN(entry.err_data_size, err_data_size);
			memcpy(buf->err_data, entry.err_data, buf->err_data_size);
		} else {
			*buf = entry;
		}

		ret = 1;
	} else {
		ret = -FI_EAGAIN;
	}
	fastlock_release(&sock_cq->lock);
	return ret;
}
Beispiel #14
0
static int handle_poll_list(struct poll_fd_mgr *poll_mgr)
{
	struct poll_fd_info *poll_item;
	int ret = FI_SUCCESS;
	int id = 0;

	fastlock_acquire(&poll_mgr->lock);
	while (!dlist_empty(&poll_mgr->list)) {
		poll_item = container_of(poll_mgr->list.next,
					 struct poll_fd_info, entry);
		dlist_remove_init(&poll_item->entry);

		if (poll_item->flags & POLL_MGR_DEL) {
			id = poll_fds_find_dup(poll_mgr, poll_item);
			assert(id > 0);
			if (id <= 0) {
				ret = -FI_EINVAL;
				goto err;
			}

			poll_fds_swap_del_last(poll_mgr, id);
			poll_item->flags |= POLL_MGR_ACK;
		} else {
			assert(poll_fds_find_dup(poll_mgr, poll_item) < 0);
			ret = poll_fds_add_item(poll_mgr, poll_item);
			if (ret) {
				FI_WARN(&tcpx_prov, FI_LOG_EP_CTRL,
					"Failed to add fd to event polling\n");
			}
		}

		if (poll_item->flags & POLL_MGR_FREE)
			free(poll_item);
		else
			poll_item->flags |= POLL_MGR_ACK;
	}
err:
	fastlock_release(&poll_mgr->lock);
	return ret;
}
Beispiel #15
0
/*
 * All EPs use the same underlying datagram provider, so pick any and use its
 * associated CQ.
 */
static const char *rxd_cq_strerror(struct fid_cq *cq_fid, int prov_errno,
		const void *err_data, char *buf, size_t len)
{
	struct fid_list_entry *fid_entry;
	struct util_ep *util_ep;
	struct rxd_cq *cq;
	struct rxd_ep *ep;
	const char *str;

	cq = container_of(cq_fid, struct rxd_cq, util_cq.cq_fid);

	fastlock_acquire(&cq->util_cq.ep_list_lock);
	assert(!dlist_empty(&cq->util_cq.ep_list));
	fid_entry = container_of(cq->util_cq.ep_list.next,
				struct fid_list_entry, entry);
	util_ep = container_of(fid_entry->fid, struct util_ep, ep_fid.fid);
	ep = container_of(util_ep, struct rxd_ep, util_ep);

	str = fi_cq_strerror(ep->dg_cq, prov_errno, err_data, buf, len);
	fastlock_release(&cq->util_cq.ep_list_lock);
	return str;
}
Beispiel #16
0
static int psmx2_alloc_vlane(struct psmx2_fid_domain *domain, uint8_t *vl)
{
	int i;
	int id;

	fastlock_acquire(&domain->vl_lock);
	for (i=0; i<BITMAP_SIZE; i++) {
		id = (domain->vl_alloc + i) % BITMAP_SIZE;
		if (bitmap_test(domain->vl_map, id) == 0) {
			bitmap_set(domain->vl_map, id);
			domain->vl_alloc = id + 1;
			break;
		}
	}
	fastlock_release(&domain->vl_lock);

	if (i >= BITMAP_SIZE)
		return -FI_ENOSPC;

	*vl = (uint8_t)id;
	return 0;
}
Beispiel #17
0
ssize_t sock_cq_sreadfrom(struct fid_cq *cq, void *buf, size_t count,
			fi_addr_t *src_addr, const void *cond, int timeout)
{
	int ret;
	fi_addr_t addr;
	int64_t threshold;
	ssize_t i, bytes_read, num_read, cq_entry_len;
	struct sock_cq *sock_cq;
	
	sock_cq = container_of(cq, struct sock_cq, cq_fid);
	cq_entry_len = sock_cq->cq_entry_size;

	if (sock_cq->attr.wait_cond == FI_CQ_COND_THRESHOLD) {
		threshold = MIN((int64_t)cond, count);
	}else{
		threshold = count;
	}

	fastlock_acquire(&sock_cq->lock);
	bytes_read = rbfdsread(&sock_cq->cq_rbfd, buf, 
			       cq_entry_len*threshold, timeout);

	if(bytes_read == 0) {
		ret = -FI_ETIMEDOUT;
		goto out;
	}

	num_read = bytes_read/cq_entry_len;
	for(i=0; i < num_read; i++) {
		rbread(&sock_cq->addr_rb, &addr, sizeof(fi_addr_t));
		if(src_addr)
			src_addr[i] = addr;
	}
	ret = num_read;

out:
	fastlock_release(&sock_cq->lock);
	return ret;
}
Beispiel #18
0
static int sock_mr_close(struct fid *fid)
{
	struct sock_domain *dom;
	struct sock_mr *mr;
	RbtIterator it;
	RbtStatus res;
	uint64_t mr_key;

	mr = container_of(fid, struct sock_mr, mr_fid.fid);
	dom = mr->domain;
	mr_key = mr->key;

	fastlock_acquire(&dom->lock);
	it = rbtFind(dom->mr_heap, &mr_key);
	if (!it || ((res = rbtErase(dom->mr_heap, it)) != RBT_STATUS_OK))
		SOCK_LOG_ERROR("Invalid mr\n");

	fastlock_release(&dom->lock);
	atomic_dec(&dom->ref);
	free(mr);
	return 0;
}
Beispiel #19
0
static int util_wait_fd_try(struct util_wait *wait)
{
	struct ofi_wait_fd_entry *fd_entry;
	struct util_wait_fd *wait_fd;
	void *context;
	int ret;

	wait_fd = container_of(wait, struct util_wait_fd, util_wait);
	fd_signal_reset(&wait_fd->signal);
	fastlock_acquire(&wait_fd->lock);
	dlist_foreach_container(&wait_fd->fd_list, struct ofi_wait_fd_entry,
				fd_entry, entry) {
		ret = fd_entry->wait_try(fd_entry->arg);
		if (ret != FI_SUCCESS) {
			fastlock_release(&wait_fd->lock);
			return ret;
		}
	}
	fastlock_release(&wait_fd->lock);
	ret = fi_poll(&wait->pollset->poll_fid, &context, 1);
	return (ret > 0) ? -FI_EAGAIN : (ret == -FI_EAGAIN) ? FI_SUCCESS : ret;
}
Beispiel #20
0
int sock_cq_report_error(struct sock_cq *cq, struct sock_pe_entry *entry,
			 size_t olen, int err, int prov_errno, void *err_data,
			 size_t err_data_size)
{
	int ret;
	struct fi_cq_err_entry err_entry;

	fastlock_acquire(&cq->lock);
	if (ofi_rbavail(&cq->cqerr_rb) < sizeof(err_entry)) {
		ret = -FI_ENOSPC;
		goto out;
	}

	err_entry.err = err;
	err_entry.olen = olen;
	err_entry.err_data = err_data;
	err_entry.err_data_size = err_data_size;
	err_entry.len = entry->data_len;
	err_entry.prov_errno = prov_errno;
	err_entry.flags = entry->flags;
	err_entry.data = entry->data;
	err_entry.tag = entry->tag;
	err_entry.op_context = (void *) (uintptr_t) entry->context;

	if (entry->type == SOCK_PE_RX)
		err_entry.buf = (void *) (uintptr_t) entry->pe.rx.rx_iov[0].iov.addr;
	else
		err_entry.buf = (void *) (uintptr_t) entry->pe.tx.tx_iov[0].src.iov.addr;

	ofi_rbwrite(&cq->cqerr_rb, &err_entry, sizeof(err_entry));
	ofi_rbcommit(&cq->cqerr_rb);
	ret = 0;

	ofi_rbfdsignal(&cq->cq_rbfd);

out:
	fastlock_release(&cq->lock);
	return ret;
}
Beispiel #21
0
void rxm_cq_progress(struct util_cq *util_cq)
{
	ssize_t ret = 0;
	struct rxm_cq *rxm_cq;
	struct fi_cq_tagged_entry *comp;

	rxm_cq = container_of(util_cq, struct rxm_cq, util_cq);

	fastlock_acquire(&util_cq->cq_lock);
	do {
		if (cirque_isfull(util_cq->cirq))
			goto out;

		comp = cirque_tail(util_cq->cirq);
		ret = rxm_msg_cq_read(util_cq, rxm_cq->msg_cq, comp);
		if (ret < 0)
			goto out;
		cirque_commit(util_cq->cirq);
	} while (ret > 0);
out:
	fastlock_release(&util_cq->cq_lock);
}
Beispiel #22
0
static int fi_ibv_reap_comp(struct fi_ibv_msg_ep *ep)
{
	struct fi_ibv_wce *wce = NULL;
	int got_wc = 0;
	int ret = 0;

	fastlock_acquire(&ep->scq->lock);
	while (ofi_atomic_get32(&ep->comp_pending) > 0) {
		if (!wce) {
			wce = util_buf_alloc(ep->scq->wce_pool);
			if (!wce) {
				fastlock_release(&ep->scq->lock);
				return -FI_ENOMEM;
			}
			memset(wce, 0, sizeof(*wce));
		}
		ret = fi_ibv_poll_cq(ep->scq, &wce->wc);
		if (ret < 0) {
			VERBS_WARN(FI_LOG_EP_DATA,
				   "Failed to read completion for signaled send\n");
			util_buf_release(ep->scq->wce_pool, wce);
			fastlock_release(&ep->scq->lock);
			return ret;
		} else if (ret > 0) {
			slist_insert_tail(&wce->entry, &ep->scq->wcq);
			got_wc = 1;
			wce = NULL;
		}
	}
	if (wce)
		util_buf_release(ep->scq->wce_pool, wce);

	if (got_wc && ep->scq->channel)
		ret = fi_ibv_cq_signal(&ep->scq->cq_fid);

	fastlock_release(&ep->scq->lock);
	return ret;
}
Beispiel #23
0
static int smr_ep_cancel_recv(struct smr_ep *ep, struct smr_queue *queue,
			      void *context)
{
	struct smr_ep_entry *recv_entry;
	struct dlist_entry *entry;
	int ret = 0;

	fastlock_acquire(&ep->util_ep.rx_cq->cq_lock);
	entry = dlist_remove_first_match(&queue->list, smr_match_recv_ctx,
					 context);
	if (entry) {
		recv_entry = container_of(entry, struct smr_ep_entry, entry);
		ret = ep->rx_comp(ep, (void *) recv_entry->context,
				  recv_entry->flags | FI_RECV, 0,
				  NULL, (void *) recv_entry->addr,
				  recv_entry->tag, 0, FI_ECANCELED);
		freestack_push(ep->recv_fs, recv_entry);
		ret = ret ? ret : 1;
	}

	fastlock_release(&ep->util_ep.rx_cq->cq_lock);
	return ret;
}
Beispiel #24
0
static int tcpx_ep_msg_xfer_enable(struct tcpx_ep *ep)
{
	int ret;

	fastlock_acquire(&ep->lock);
	if (ep->cm_state != TCPX_EP_CONNECTING) {
		fastlock_release(&ep->lock);
		return -FI_EINVAL;
	}
	ep->progress_func = tcpx_ep_progress;
	ret = fi_fd_nonblock(ep->conn_fd);
	if (ret)
		goto err;

	ret = tcpx_cq_wait_ep_add(ep);
	if (ret)
		goto err;

	ep->cm_state = TCPX_EP_CONNECTED;
err:
	fastlock_release(&ep->lock);
	return ret;
}
Beispiel #25
0
ssize_t sock_cq_readerr(struct fid_cq *cq, struct fi_cq_err_entry *buf,
			size_t len, uint64_t flags)
{
	ssize_t num_read;
	struct sock_cq *sock_cq;
	
	sock_cq = container_of(cq, struct sock_cq, cq_fid);
	if(len < sizeof(struct fi_cq_err_entry))
		return -FI_ETOOSMALL;

	num_read = 0;
	fastlock_acquire(&sock_cq->lock);

	while(rbused(&sock_cq->cqerr_rb) >= sizeof(struct fi_cq_err_entry)) {
		rbread(&sock_cq->cqerr_rb, 
		       (char*)buf +sizeof(struct fi_cq_err_entry) * num_read, 
		       sizeof(struct fi_cq_err_entry));
		num_read++;
	}

	fastlock_release(&sock_cq->lock);
	return num_read;
}
Beispiel #26
0
static ssize_t _sock_cq_write(struct sock_cq *cq, fi_addr_t addr,
			      const void *buf, size_t len)
{
	ssize_t ret;

	fastlock_acquire(&cq->lock);

	if(rbfdavail(&cq->cq_rbfd) < len) {
		ret = -FI_ENOSPC;
		goto out;
	}

	rbfdwrite(&cq->cq_rbfd, buf, len);
	rbfdcommit(&cq->cq_rbfd);
	ret = len;

	rbwrite(&cq->addr_rb, &addr, sizeof(fi_addr_t));
	rbcommit(&cq->addr_rb);

out:
	fastlock_release(&cq->lock);
	return ret;
}
Beispiel #27
0
static int rxd_cq_close(struct fid *fid)
{
	int ret;
	struct rxd_cq *cq;

	cq = container_of(fid, struct rxd_cq, util_cq.cq_fid.fid);

	fastlock_acquire(&cq->domain->lock);
	dlist_remove(&cq->dom_entry);
	fastlock_release(&cq->domain->lock);
	fastlock_destroy(&cq->lock);

	ret = fi_close(&cq->dg_cq->fid);
	if (ret)
		return ret;

	ret = ofi_cq_cleanup(&cq->util_cq);
	if (ret)
		return ret;
	util_buf_pool_destroy(cq->unexp_pool);
	free(cq);
	return 0;
}
int _gnix_buddy_alloc(gnix_buddy_alloc_handle_t *alloc_handle, void **ptr,
		      uint32_t len)
{
	uint32_t block_size, i = 0;

	GNIX_TRACE(FI_LOG_EP_CTRL, "\n");

	if (unlikely(!alloc_handle || !ptr || !len ||
		     len > alloc_handle->max)) {

		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Invalid parameter to _gnix_buddy_alloc.\n");
		return -FI_EINVAL;
	}

	block_size = BLOCK_SIZE(len, MIN_BLOCK_SIZE);
	i = (uint32_t) LIST_INDEX(block_size, MIN_BLOCK_SIZE);

	fastlock_acquire(&alloc_handle->lock);

	if (__gnix_buddy_find_block(alloc_handle, i, ptr)) {
		fastlock_release(&alloc_handle->lock);
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Could not allocate buddy block.\n");
		return -FI_ENOMEM;
	}

	fastlock_release(&alloc_handle->lock);

	_gnix_set_bit(&alloc_handle->bitmap,
		      __gnix_buddy_bitmap_index(*ptr, block_size,
						alloc_handle->base,
						alloc_handle->len,
						MIN_BLOCK_SIZE));

	return FI_SUCCESS;
}
Beispiel #29
0
static int process_srx_entry(struct tcpx_xfer_entry *rx_entry)
{
	int ret;

	ret = tcpx_recv_msg_data(rx_entry);
	if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret))
		return ret;

	if (ret) {
		FI_WARN(&tcpx_prov, FI_LOG_DOMAIN,
			"msg recv Failed ret = %d\n", ret);

		tcpx_ep_shutdown_report(rx_entry->ep,
					&rx_entry->ep->util_ep.ep_fid.fid);
	}

	if ((ntohl(rx_entry->msg_hdr.hdr.flags) &
	     OFI_DELIVERY_COMPLETE) && !ret) {
		if (tcpx_prepare_rx_entry_resp(rx_entry))
			rx_entry->ep->cur_rx_proc_fn = tcpx_prepare_rx_entry_resp;

		return FI_SUCCESS;
	}

	tcpx_cq_report_completion(rx_entry->ep->util_ep.rx_cq,
				  rx_entry, -ret);

	/* release the shared entry */
	if (rx_entry->ep->cur_rx_entry == rx_entry) {
		rx_entry->ep->cur_rx_entry = NULL;
	}

	fastlock_acquire(&rx_entry->ep->srx_ctx->lock);
	util_buf_release(rx_entry->ep->srx_ctx->buf_pool, rx_entry);
	fastlock_release(&rx_entry->ep->srx_ctx->lock);
	return FI_SUCCESS;
}
Beispiel #30
0
int sock_cq_progress(struct sock_cq *cq)
{
	struct sock_tx_ctx *tx_ctx;
	struct sock_rx_ctx *rx_ctx;
	struct dlist_entry *entry;

	if (cq->domain->progress_mode == FI_PROGRESS_AUTO)
		return 0;

	fastlock_acquire(&cq->list_lock);
	for (entry = cq->tx_list.next; entry != &cq->tx_list;
	     entry = entry->next) {
		tx_ctx = container_of(entry, struct sock_tx_ctx, cq_entry);
		if (!tx_ctx->enabled)
			continue;

		if (tx_ctx->use_shared)
			sock_pe_progress_tx_ctx(cq->domain->pe, tx_ctx->stx_ctx);
		else
			sock_pe_progress_tx_ctx(cq->domain->pe, tx_ctx);
	}

	for (entry = cq->rx_list.next; entry != &cq->rx_list;
	     entry = entry->next) {
		rx_ctx = container_of(entry, struct sock_rx_ctx, cq_entry);
		if (!rx_ctx->enabled)
			continue;

		if (rx_ctx->use_shared)
			sock_pe_progress_rx_ctx(cq->domain->pe, rx_ctx->srx_ctx);
		else
			sock_pe_progress_rx_ctx(cq->domain->pe, rx_ctx);
	}
	fastlock_release(&cq->list_lock);

	return 0;
}