Пример #1
0
static void rxd_peer_timeout(struct rxd_ep *rxd_ep, struct rxd_peer *peer)
{
	struct fi_cq_err_entry err_entry;
	struct rxd_x_entry *tx_entry;
	struct rxd_pkt_entry *pkt_entry;
	int ret;

	while (!dlist_empty(&peer->tx_list)) {
		dlist_pop_front(&peer->tx_list, struct rxd_x_entry, tx_entry, entry);
		memset(&err_entry, 0, sizeof(struct fi_cq_err_entry));
		rxd_tx_entry_free(rxd_ep, tx_entry);
		err_entry.op_context = tx_entry->cq_entry.op_context;
		err_entry.flags = tx_entry->cq_entry.flags;
		err_entry.err = FI_ECONNREFUSED;
		err_entry.prov_errno = 0;
		ret = ofi_cq_write_error(&rxd_ep_tx_cq(rxd_ep)->util_cq, &err_entry);
		if (ret)
			FI_WARN(&rxd_prov, FI_LOG_EP_CTRL, "could not write error entry\n");
	}

	while (!dlist_empty(&peer->unacked)) {
		dlist_pop_front(&peer->unacked, struct rxd_pkt_entry, pkt_entry,
				d_entry);
		ofi_buf_free(pkt_entry);
	     	peer->unacked_cnt--;
	}

	dlist_remove(&peer->entry);
}
Пример #2
0
static ssize_t rxd_ep_cancel_recv(struct rxd_ep *ep, struct dlist_entry *list,
				  void *context)
{
	struct dlist_entry *entry;
	struct rxd_x_entry *rx_entry;
	struct fi_cq_err_entry err_entry;
	int ret = 0;

	fastlock_acquire(&ep->util_ep.lock);

	entry = dlist_find_first_match(list, &rxd_match_ctx, context);
	if (!entry)
		goto out;

	rx_entry = container_of(entry, struct rxd_x_entry, entry);
	memset(&err_entry, 0, sizeof(struct fi_cq_err_entry));
	err_entry.op_context = rx_entry->cq_entry.op_context;
	err_entry.flags = rx_entry->cq_entry.flags;
	err_entry.err = FI_ECANCELED;
	err_entry.prov_errno = 0;
	ret = ofi_cq_write_error(&rxd_ep_rx_cq(ep)->util_cq, &err_entry);
	if (ret) {
		FI_WARN(&rxd_prov, FI_LOG_EP_CTRL, "could not write error entry\n");
		goto out;
	}

	rx_entry->flags |= RXD_CANCELLED;

	ret = 1;
out:
	fastlock_release(&ep->util_ep.lock);
	return ret;
}
Пример #3
0
void tcpx_cq_report_error(struct util_cq *cq,
			  struct tcpx_xfer_entry *xfer_entry,
			  int err)
{
	struct fi_cq_err_entry err_entry;
	uint64_t data = 0;

	if (!(xfer_entry->flags & FI_COMPLETION))
		return;

	if (xfer_entry->hdr.base_hdr.flags & OFI_REMOTE_CQ_DATA) {
		xfer_entry->flags |= FI_REMOTE_CQ_DATA;
		data = xfer_entry->hdr.cq_data_hdr.cq_data;
	}

	err_entry.op_context = xfer_entry->context;
	err_entry.flags = xfer_entry->flags;
	err_entry.len = 0;
	err_entry.buf = NULL;
	err_entry.data = data;
	err_entry.tag = 0;
	err_entry.olen = 0;
	err_entry.err = err;
	err_entry.prov_errno = ofi_sockerr();
	err_entry.err_data = NULL;
	err_entry.err_data_size = 0;

	ofi_cq_write_error(cq, &err_entry);
}
Пример #4
0
static void report_pe_entry_completion(struct tcpx_pe_entry *pe_entry, int err)
{
	struct fi_cq_err_entry err_entry;
	struct tcpx_ep *ep = pe_entry->ep;
	struct util_cq *cq = NULL;
	struct util_cntr *cntr = NULL;

	if (pe_entry->flags & TCPX_NO_COMPLETION) {
		return;
	}

	switch (pe_entry->msg_hdr.op_data) {
	case TCPX_OP_MSG_SEND:
		cq = ep->util_ep.tx_cq;
		cntr = ep->util_ep.tx_cntr;
		break;
	case TCPX_OP_MSG_RECV:
		cq = ep->util_ep.rx_cq;
		cntr = ep->util_ep.rx_cntr;
		break;
	default:

		return;
	}

	if (cq && err) {
		err_entry.op_context = pe_entry->context;
		err_entry.flags = pe_entry->flags;
		err_entry.len = 0;
		err_entry.buf = NULL;
		err_entry.data = pe_entry->msg_hdr.data;
		err_entry.tag = 0;
		err_entry.olen = 0;
		err_entry.err = err;
		err_entry.prov_errno = errno;
		err_entry.err_data = NULL;
		err_entry.err_data_size = 0;

		ofi_cq_write_error(cq, &err_entry);
	} else if (cq) {
		ofi_cq_write(cq, pe_entry->context,
			     pe_entry->flags, 0, NULL,
			     pe_entry->msg_hdr.data, 0);
	}

	if (cntr && err) {
		cntr->cntr_fid.ops->adderr(&cntr->cntr_fid, 1);
	}else if (cntr) {
		cntr->cntr_fid.ops->add(&cntr->cntr_fid, 1);
	}
}