Example #1
0
int psmx_am_progress(struct psmx_fid_domain *domain)
{
	struct slist_entry *item;
	struct psmx_am_request *req;
	struct psmx_trigger *trigger;

#if PSMX_AM_USE_SEND_QUEUE
	pthread_mutex_lock(&domain->send_queue.lock);
	while (!slist_empty(&domain->send_queue.list)) {
		item = slist_remove_head(&domain->send_queue.list);
		req = container_of(item, struct psmx_am_request, list_entry);
		if (req->state == PSMX_AM_STATE_DONE) {
			free(req);
		}
		else {
			pthread_mutex_unlock(&domain->send_queue.lock);
			psmx_am_process_send(domain, req);
			pthread_mutex_lock(&domain->send_queue.lock);
		}
	}
	pthread_mutex_unlock(&domain->send_queue.lock);
#endif

	if (psmx_env.tagged_rma) {
		pthread_mutex_lock(&domain->rma_queue.lock);
		while (!slist_empty(&domain->rma_queue.list)) {
			item = slist_remove_head(&domain->rma_queue.list);
			req = container_of(item, struct psmx_am_request, list_entry);
			pthread_mutex_unlock(&domain->rma_queue.lock);
			psmx_am_process_rma(domain, req);
			pthread_mutex_lock(&domain->rma_queue.lock);
		}
		pthread_mutex_unlock(&domain->rma_queue.lock);
	}
Example #2
0
struct sock_rx_entry *sock_rx_new_entry(struct sock_rx_ctx *rx_ctx)
{
	struct sock_rx_entry *rx_entry;
	struct slist_entry *entry;
	int i;

	if (rx_ctx->rx_entry_pool == NULL) {
		rx_ctx->rx_entry_pool = calloc(rx_ctx->attr.size,
						sizeof(*rx_entry));
		if (!rx_ctx->rx_entry_pool)
			return NULL;

		slist_init(&rx_ctx->pool_list);

		for (i = 0; i < rx_ctx->attr.size; i++) {
			slist_insert_tail(&rx_ctx->rx_entry_pool[i].pool_entry,
					  &rx_ctx->pool_list);
			rx_ctx->rx_entry_pool[i].is_pool_entry = 1;
		}
	}

	if (!slist_empty(&rx_ctx->pool_list)) {
		entry = slist_remove_head(&rx_ctx->pool_list);
		rx_entry = container_of(entry, struct sock_rx_entry, pool_entry);
		rx_entry->rx_ctx = rx_ctx;
		entry = slist_remove_head(&rx_ctx->pool_list);
	} else {
Example #3
0
void _gnix_sfl_destroy(struct gnix_s_freelist *fl)
{
	assert(fl);

	struct slist_entry *chunk;

	for (chunk = slist_remove_head(&fl->chunks);
	     chunk != NULL;
	     chunk = slist_remove_head(&fl->chunks)) {
		free(chunk);
	}

	if (fl->ts)
		fastlock_destroy(&fl->lock);
}
Example #4
0
static int util_eq_close(struct fid *fid)
{
	struct util_eq *eq;
	struct slist_entry *entry;
	struct util_event *event;

	eq = container_of(fid, struct util_eq, eq_fid.fid);
	if (ofi_atomic_get32(&eq->ref))
		return -FI_EBUSY;

	while (!slist_empty(&eq->list)) {
		entry = slist_remove_head(&eq->list);
		event = container_of(entry, struct util_event, entry);
		free(event);
	}

	if (eq->wait) {
		fi_poll_del(&eq->wait->pollset->poll_fid,
			    &eq->eq_fid.fid, 0);
		if (eq->internal_wait)
			fi_close(&eq->wait->wait_fid.fid);
	}

	fastlock_destroy(&eq->lock);
	ofi_atomic_dec32(&eq->fabric->ref);
	free(eq);
	return 0;
}
Example #5
0
int tcpx_get_rx_entry_op_msg(struct tcpx_ep *tcpx_ep)
{
	struct tcpx_xfer_entry *rx_entry;
	struct tcpx_xfer_entry *tx_entry;
	struct slist_entry *entry;
	struct tcpx_cq *tcpx_cq;
	struct tcpx_rx_detect *rx_detect = &tcpx_ep->rx_detect;
	int ret;

	tcpx_cq = container_of(tcpx_ep->util_ep.rx_cq,
			       struct tcpx_cq, util_cq);

	if (rx_detect->hdr.hdr.op_data == TCPX_OP_MSG_RESP) {
		assert(!slist_empty(&tcpx_ep->tx_rsp_pend_queue));
		entry = tcpx_ep->tx_rsp_pend_queue.head;
		tx_entry = container_of(entry, struct tcpx_xfer_entry,
					entry);

		tcpx_cq = container_of(tcpx_ep->util_ep.tx_cq, struct tcpx_cq,
				       util_cq);
		tcpx_cq_report_completion(tx_entry->ep->util_ep.tx_cq,
					  tx_entry, 0);

		slist_remove_head(&tx_entry->ep->tx_rsp_pend_queue);
		tcpx_xfer_entry_release(tcpx_cq, tx_entry);
		rx_detect->done_len = 0;
		return -FI_EAGAIN;
	}
Example #6
0
void process_tx_entry(struct tcpx_xfer_entry *tx_entry)
{
	struct tcpx_cq *tcpx_cq;
	int ret;

	ret = tcpx_send_msg(tx_entry);
	if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret))
		return;

	if (!ret)
		goto done;

	FI_WARN(&tcpx_prov, FI_LOG_DOMAIN, "msg send failed\n");

	if (ret == -FI_ENOTCONN)
		tcpx_ep_shutdown_report(tx_entry->ep,
					&tx_entry->ep->util_ep.ep_fid.fid);
done:
	tcpx_cq_report_completion(tx_entry->ep->util_ep.tx_cq,
				  tx_entry, -ret);
	slist_remove_head(&tx_entry->ep->tx_queue);

	if (ntohl(tx_entry->msg_hdr.hdr.flags) &
	    (OFI_DELIVERY_COMPLETE | OFI_COMMIT_COMPLETE)) {
		tx_entry->flags |= FI_COMPLETION;
		slist_insert_tail(&tx_entry->entry,
				  &tx_entry->ep->tx_rsp_pend_queue);
		return;
	}

	tcpx_cq = container_of(tx_entry->ep->util_ep.tx_cq,
			       struct tcpx_cq, util_cq);
	tcpx_xfer_entry_release(tcpx_cq, tx_entry);
}
Example #7
0
static int tcpx_prepare_rx_entry_resp(struct tcpx_xfer_entry *rx_entry)
{
	struct tcpx_cq *tcpx_rx_cq, *tcpx_tx_cq;
	struct tcpx_xfer_entry *resp_entry;

	tcpx_tx_cq = container_of(rx_entry->ep->util_ep.tx_cq,
			       struct tcpx_cq, util_cq);

	resp_entry = tcpx_xfer_entry_alloc(tcpx_tx_cq, TCPX_OP_MSG_RESP);
	if (!resp_entry)
		return -FI_EAGAIN;

	resp_entry->msg_data.iov[0].iov_base = (void *) &resp_entry->msg_hdr;
	resp_entry->msg_data.iov[0].iov_len = sizeof(resp_entry->msg_hdr);
	resp_entry->msg_data.iov_cnt = 1;

	resp_entry->msg_hdr.hdr.op = ofi_op_msg;
	resp_entry->msg_hdr.hdr.size = htonll(sizeof(resp_entry->msg_hdr));

	resp_entry->flags = 0;
	resp_entry->context = NULL;
	resp_entry->done_len = 0;
	resp_entry->ep = rx_entry->ep;
	tcpx_tx_queue_insert(resp_entry->ep, resp_entry);

	tcpx_cq_report_completion(rx_entry->ep->util_ep.rx_cq,
				  rx_entry, 0);
	slist_remove_head(&rx_entry->ep->rx_queue);
	tcpx_rx_cq = container_of(rx_entry->ep->util_ep.rx_cq,
			       struct tcpx_cq, util_cq);
	tcpx_xfer_entry_release(tcpx_rx_cq, rx_entry);
	return FI_SUCCESS;
}
Example #8
0
static void process_tx_entry(struct tcpx_xfer_entry *tx_entry)
{
	struct tcpx_cq *tcpx_cq;
	int ret;

	ret = tcpx_send_msg(tx_entry);
	if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret))
		return;

	/* Keep this path below as a single pass path.*/
	tx_entry->ep->hdr_bswap(&tx_entry->hdr.base_hdr);
	slist_remove_head(&tx_entry->ep->tx_queue);

	if (ret) {
		FI_WARN(&tcpx_prov, FI_LOG_DOMAIN, "msg send failed\n");
		tcpx_ep_shutdown_report(tx_entry->ep,
					&tx_entry->ep->util_ep.ep_fid.fid);
		tcpx_cq_report_error(tx_entry->ep->util_ep.tx_cq,
				     tx_entry, ret);
	} else {
		tcpx_cq_report_success(tx_entry->ep->util_ep.tx_cq,
				       tx_entry);

		if (tx_entry->hdr.base_hdr.flags &
		    (OFI_DELIVERY_COMPLETE | OFI_COMMIT_COMPLETE)) {
			tx_entry->flags |= FI_COMPLETION;
			slist_insert_tail(&tx_entry->entry,
					  &tx_entry->ep->tx_rsp_pend_queue);
			return;
		}
	}
	tcpx_cq = container_of(tx_entry->ep->util_ep.tx_cq,
			       struct tcpx_cq, util_cq);
	tcpx_xfer_entry_release(tcpx_cq, tx_entry);
}
Example #9
0
static int process_rx_read_entry(struct tcpx_xfer_entry *rx_entry)
{
	struct tcpx_cq *tcpx_cq;
	int ret;

	ret = tcpx_recv_msg_data(rx_entry);
	if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret))
		return ret;

	if (!ret)
		goto done;

	FI_WARN(&tcpx_prov, FI_LOG_DOMAIN, "msg recv Failed ret = %d\n", ret);

	if (ret == -FI_ENOTCONN)
		tcpx_ep_shutdown_report(rx_entry->ep,
					&rx_entry->ep->util_ep.ep_fid.fid);
done:
	tcpx_cq_report_completion(rx_entry->ep->util_ep.tx_cq,
				  rx_entry, -ret);
	slist_remove_head(&rx_entry->ep->rma_read_queue);
	tcpx_cq = container_of(rx_entry->ep->util_ep.tx_cq,
			       struct tcpx_cq, util_cq);
	tcpx_xfer_entry_release(tcpx_cq, rx_entry);
	return FI_SUCCESS;
}
Example #10
0
static int psmx2_ep_close(fid_t fid)
{
	struct psmx2_fid_ep *ep;
	struct slist_entry *entry;
	struct psmx2_context *item;

	ep = container_of(fid, struct psmx2_fid_ep, ep.fid);

	if (ep->base_ep) {
		atomic_dec(&ep->base_ep->ref);
		return 0;
	}

	if (atomic_get(&ep->ref))
		return -FI_EBUSY;

	ep->domain->eps[ep->vlane] = NULL;
	psmx2_free_vlane(ep->domain, ep->vlane);
	psmx2_domain_release(ep->domain);

	while (!slist_empty(&ep->free_context_list)) {
		entry = slist_remove_head(&ep->free_context_list);
		item = container_of(entry, struct psmx2_context, list_entry);
		free(item);
	}

	fastlock_destroy(&ep->context_lock);

	free(ep);

	return 0;
}
Example #11
0
ssize_t ofi_eq_read(struct fid_eq *eq_fid, uint32_t *event,
		    void *buf, size_t len, uint64_t flags)
{
	struct util_eq *eq;
	struct util_event *entry;
	void *user_err_data = NULL;
	ssize_t ret;

	eq = container_of(eq_fid, struct util_eq, eq_fid);

	fastlock_acquire(&eq->lock);
	if (slist_empty(&eq->list)) {
		ret = -FI_EAGAIN;
		goto out;
	}

	entry = container_of(eq->list.head, struct util_event, entry);
	if (entry->err && !(flags & UTIL_FLAG_ERROR)) {
		ret = -FI_EAVAIL;
		goto out;
	} else if (!entry->err && (flags & UTIL_FLAG_ERROR)) {
		ret = -FI_EAGAIN;
		goto out;
	}

	if (event)
		*event = entry->event;
	if (buf) {
		if (flags & UTIL_FLAG_ERROR) {
			if (eq->saved_err_data) {
				free(eq->saved_err_data);
				eq->saved_err_data = NULL;
			}
			assert((size_t)entry->size == sizeof(struct fi_eq_err_entry));
			user_err_data = ((struct fi_eq_err_entry *)buf)->err_data;
			ofi_eq_handle_err_entry(eq->fabric->fabric_fid.api_version,
						(struct fi_eq_err_entry *)entry->data,
						(struct fi_eq_err_entry *)buf);
			ret = (ssize_t)entry->size;
		} else {
			ret = MIN(len, (size_t)entry->size);
			memcpy(buf, entry->data, ret);
		}
	}  else {
		ret = 0;
	}

	if (!(flags & FI_PEEK)) {
		if ((flags & UTIL_FLAG_ERROR) && !user_err_data &&
		    ((struct fi_eq_err_entry *)buf)->err_data) {
			eq->saved_err_data = ((struct fi_eq_err_entry *)buf)->err_data;
		}
		slist_remove_head(&eq->list);
		free(entry);
	}
out:
	fastlock_release(&eq->lock);
	return ret;
}
Example #12
0
static void tcpx_ep_tx_rx_queues_release(struct tcpx_ep *ep)
{
	struct slist_entry *entry;
	struct tcpx_xfer_entry *xfer_entry;
	struct tcpx_cq *tcpx_cq;

	fastlock_acquire(&ep->lock);
	while (!slist_empty(&ep->tx_queue)) {
		entry = ep->tx_queue.head;
		xfer_entry = container_of(entry, struct tcpx_xfer_entry, entry);
		slist_remove_head(&ep->tx_queue);
		tcpx_cq = container_of(xfer_entry->ep->util_ep.tx_cq,
				       struct tcpx_cq, util_cq);
		tcpx_xfer_entry_release(tcpx_cq, xfer_entry);
	}

	while (!slist_empty(&ep->rx_queue)) {
		entry = ep->rx_queue.head;
		xfer_entry = container_of(entry, struct tcpx_xfer_entry, entry);
		slist_remove_head(&ep->rx_queue);
		tcpx_cq = container_of(xfer_entry->ep->util_ep.rx_cq,
				       struct tcpx_cq, util_cq);
		tcpx_xfer_entry_release(tcpx_cq, xfer_entry);
	}

	while (!slist_empty(&ep->rma_read_queue)) {
		entry = ep->rma_read_queue.head;
		xfer_entry = container_of(entry, struct tcpx_xfer_entry, entry);
		slist_remove_head(&ep->rma_read_queue);
		tcpx_cq = container_of(xfer_entry->ep->util_ep.tx_cq,
				       struct tcpx_cq, util_cq);
		tcpx_xfer_entry_release(tcpx_cq, xfer_entry);
	}

	while (!slist_empty(&ep->tx_rsp_pend_queue)) {
		entry = ep->tx_rsp_pend_queue.head;
		xfer_entry = container_of(entry, struct tcpx_xfer_entry, entry);
		slist_remove_head(&ep->tx_rsp_pend_queue);
		tcpx_cq = container_of(xfer_entry->ep->util_ep.tx_cq,
				       struct tcpx_cq, util_cq);
		tcpx_xfer_entry_release(tcpx_cq, xfer_entry);
	}

	fastlock_release(&ep->lock);
}
Example #13
0
static ssize_t fi_ibv_cq_read(struct fid_cq *cq_fid, void *buf, size_t count)
{
	struct fi_ibv_cq *cq;
	struct fi_ibv_wce *wce;
	struct slist_entry *entry;
	struct ibv_wc wc;
	ssize_t ret = 0, i;

	cq = container_of(cq_fid, struct fi_ibv_cq, util_cq.cq_fid);

	cq->util_cq.cq_fastlock_acquire(&cq->util_cq.cq_lock);

	for (i = 0; i < count; i++) {
		if (!slist_empty(&cq->wcq)) {
			wce = container_of(cq->wcq.head, struct fi_ibv_wce, entry);
			if (wce->wc.status) {
				ret = -FI_EAVAIL;
				break;
			}
			entry = slist_remove_head(&cq->wcq);
			wce = container_of(entry, struct fi_ibv_wce, entry);
			cq->read_entry(&wce->wc, (char *)buf + i * cq->entry_size);
			util_buf_release(cq->wce_pool, wce);
			continue;
		}

		ret = fi_ibv_poll_cq(cq, &wc);
		if (ret <= 0)
			break;

		/* Insert error entry into wcq */
		if (OFI_UNLIKELY(wc.status)) {
			if (wc.status == IBV_WC_WR_FLUSH_ERR) {
				/* Handle case when remote side destroys
				 * the connection, but local side isn't aware
				 * about that yet */
				VERBS_DBG(FI_LOG_CQ,
					  "Ignoring WC with status "
					  "IBV_WC_WR_FLUSH_ERR(%d)\n",
					  wc.status);
				i--;
				continue;
			}
			wce = util_buf_alloc(cq->wce_pool);
			if (!wce) {
				cq->util_cq.cq_fastlock_release(&cq->util_cq.cq_lock);
				return -FI_ENOMEM;
			}
			memset(wce, 0, sizeof(*wce));
			memcpy(&wce->wc, &wc, sizeof wc);
			slist_insert_tail(&wce->entry, &cq->wcq);
			ret = -FI_EAVAIL;
			break;
		}

		cq->read_entry(&wc, (char *)buf + i * cq->entry_size);
	}
Example #14
0
void *util_buf_get(struct util_buf_pool *pool)
{
	struct slist_entry *entry;
	struct util_buf_footer *buf_ftr;

	entry = slist_remove_head(&pool->buf_list);
	buf_ftr = (struct util_buf_footer *) ((char *) entry + pool->data_sz);
	buf_ftr->region->num_used++;
	return entry;
}
Example #15
0
int _gnix_mbox_allocator_destroy(struct gnix_mbox_alloc_handle *alloc_handle)
{
	struct slist_entry *entry;
	struct gnix_slab *temp;
	char error_buf[256];
	int position;
	char *error;
	int ret = FI_SUCCESS;

	GNIX_TRACE(FI_LOG_EP_CTRL, "\n");

	if (!alloc_handle) {
		GNIX_WARN(FI_LOG_EP_CTRL, "Invalid alloc handle.\n");
		return -FI_EINVAL;
	}

	position = __find_used(alloc_handle, &temp);
	if (position >= 0) {
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Can't destroy, not all mailboxes have been returned (pos = %d).\n",
			  position);
		return -FI_EBUSY;
	}

	while (!slist_empty(&alloc_handle->slab_list)) {
		entry = slist_remove_head(&alloc_handle->slab_list);

		temp = container_of(entry, struct gnix_slab, list_entry);

		ret = __destroy_slab(alloc_handle, temp);
		if (ret)
			GNIX_WARN(FI_LOG_EP_CTRL,
				  "Error destroying slab.\n");
	}

	if (alloc_handle->filename != NULL)
		free(alloc_handle->filename);

	if (alloc_handle->fd != -1)
		ret = close(alloc_handle->fd);

	if (ret) {
		error = strerror_r(errno, error_buf, sizeof(error_buf));
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Error closing map file: %s\n",
			  error);
	}

	fastlock_destroy(&alloc_handle->lock);

	free(alloc_handle);

	return FI_SUCCESS;
}
Example #16
0
static struct psmx_cq_event *psmx_cq_dequeue_event(struct psmx_fid_cq *cq)
{
	struct slist_entry *entry;

	if (slist_empty(&cq->event_queue))
		return NULL;

	entry = slist_remove_head(&cq->event_queue);
	cq->event_count--;

	return container_of(entry, struct psmx_cq_event, list_entry);
}
Example #17
0
static struct psmx2_eq_event *psmx2_eq_dequeue_error(struct psmx2_fid_eq *eq)
{
	struct slist_entry *entry;

	if (slist_empty(&eq->error_queue))
		return NULL;

	fastlock_acquire(&eq->lock);
	entry = slist_remove_head(&eq->error_queue);
	fastlock_release(&eq->lock);

	return container_of(entry, struct psmx2_eq_event, list_entry);
}
Example #18
0
static struct psmx_cq_event *psmx_cq_dequeue_event(struct psmx_fid_cq *cq)
{
	struct slist_entry *entry;

	fastlock_acquire(&cq->lock);
	if (slist_empty(&cq->event_queue)) {
		fastlock_release(&cq->lock);
		return NULL;
	}
	entry = slist_remove_head(&cq->event_queue);
	cq->event_count--;
	fastlock_release(&cq->lock);

	return container_of(entry, struct psmx_cq_event, list_entry);
}
Example #19
0
int _gnix_sfe_alloc(struct slist_entry **e, struct gnix_s_freelist *fl)
{
	int ret = FI_SUCCESS;

	assert(fl);

	if (fl->ts)
		fastlock_acquire(&fl->lock);

	struct slist_entry *se = slist_remove_head(&fl->freelist);

	if (!se) {
		ret = __gnix_sfl_refill(fl, fl->refill_size);
		if (ret != FI_SUCCESS)
			goto err;
		if (fl->refill_size < fl->max_refill_size) {
			int ns = fl->refill_size *= fl->growth_factor;

			fl->refill_size = (ns >= fl->max_refill_size ?
					   fl->max_refill_size :
					   ns);
		}
		se = slist_remove_head(&fl->freelist);
		if (!se) {
			/* Can't happen unless multithreaded */
			ret = -FI_EAGAIN;
			goto err;
		}
	}

	*e = se;
err:
	if (fl->ts)
		fastlock_release(&fl->lock);
	return ret;
}
Example #20
0
static
struct mrail_ooo_recv *mrail_get_next_recv(struct mrail_peer_info *peer_info)
{
	struct slist *queue = &peer_info->ooo_recv_queue;
	struct mrail_ooo_recv *ooo_recv;

	if (!slist_empty(queue)) {
		ooo_recv = container_of(queue->head, struct mrail_ooo_recv,
				entry);
		if (ooo_recv->seq_no == peer_info->expected_seq_no) {
			slist_remove_head(queue);
			peer_info->expected_seq_no++;
			return ooo_recv;
		}
	}
Example #21
0
static ssize_t
fi_ibv_cq_readerr(struct fid_cq *cq_fid, struct fi_cq_err_entry *entry,
		  uint64_t flags)
{
	struct fi_ibv_cq *cq;
	struct fi_ibv_wce *wce;
	struct slist_entry *slist_entry;
	uint32_t api_version;

	cq = container_of(cq_fid, struct fi_ibv_cq, util_cq.cq_fid);

	cq->util_cq.cq_fastlock_acquire(&cq->util_cq.cq_lock);
	if (slist_empty(&cq->wcq))
		goto err;

	wce = container_of(cq->wcq.head, struct fi_ibv_wce, entry);
	if (!wce->wc.status)
		goto err;

	api_version = cq->util_cq.domain->fabric->fabric_fid.api_version;

	slist_entry = slist_remove_head(&cq->wcq);
	cq->util_cq.cq_fastlock_release(&cq->util_cq.cq_lock);

	wce = container_of(slist_entry, struct fi_ibv_wce, entry);

	entry->op_context = (void *)(uintptr_t)wce->wc.wr_id;
	entry->err = EIO;
	entry->prov_errno = wce->wc.status;
	fi_ibv_handle_wc(&wce->wc, &entry->flags, &entry->len, &entry->data);

	if ((FI_VERSION_GE(api_version, FI_VERSION(1, 5))) &&
		entry->err_data && entry->err_data_size) {
		entry->err_data_size = MIN(entry->err_data_size,
			sizeof(wce->wc.vendor_err));
		memcpy(entry->err_data, &wce->wc.vendor_err, entry->err_data_size);
	} else {
		memcpy(&entry->err_data, &wce->wc.vendor_err,
			sizeof(wce->wc.vendor_err));
	}

	util_buf_release(cq->wce_pool, wce);
	return 1;
err:
	cq->util_cq.cq_fastlock_release(&cq->util_cq.cq_lock);
	return -FI_EAGAIN;
}
Example #22
0
static ssize_t fi_ibv_cq_read(struct fid_cq *cq_fid, void *buf, size_t count)
{
	struct fi_ibv_cq *cq;
	struct fi_ibv_wce *wce;
	struct slist_entry *entry;
	struct ibv_wc wc;
	ssize_t ret = 0, i;

	cq = container_of(cq_fid, struct fi_ibv_cq, cq_fid);

	fastlock_acquire(&cq->lock);

	for (i = 0; i < count; i++) {
		if (!slist_empty(&cq->wcq)) {
			wce = container_of(cq->wcq.head, struct fi_ibv_wce, entry);
			if (wce->wc.status) {
				ret = -FI_EAVAIL;
				break;
			}
			entry = slist_remove_head(&cq->wcq);
			wce = container_of(entry, struct fi_ibv_wce, entry);
			cq->read_entry(&wce->wc, i, buf);
			util_buf_release(cq->domain->fab->wce_pool, wce);
			continue;
		}

		ret = fi_ibv_poll_cq(cq, &wc);
		if (ret <= 0)
			break;

		/* Insert error entry into wcq */
		if (wc.status) {
			wce = util_buf_alloc(cq->domain->fab->wce_pool);
			if (!wce) {
				fastlock_release(&cq->lock);
				return -FI_ENOMEM;
			}
			memset(wce, 0, sizeof(*wce));
			memcpy(&wce->wc, &wc, sizeof wc);
			slist_insert_tail(&wce->entry, &cq->wcq);
			ret = -FI_EAVAIL;
			break;
		}

		cq->read_entry(&wc, i, buf);
	}
Example #23
0
static int __gnix_nic_txd_err_get(struct gnix_nic *nic,
				  struct gnix_tx_descriptor **txd)
{
	struct slist_entry *list_entry;
	struct gnix_tx_descriptor *txd_p;

	list_entry = slist_remove_head(&nic->err_txds);
	if (list_entry) {
		txd_p = container_of(list_entry,
				     struct gnix_tx_descriptor,
				     err_list);
		*txd = txd_p;
		return 1;
	}

	return 0;
}
Example #24
0
int psmx_am_progress(struct psmx_fid_domain *domain)
{
	struct slist_entry *item;
	struct psmx_am_request *req;
	struct psmx_trigger *trigger;

	if (psmx_env.am_msg) {
		fastlock_acquire(&domain->send_queue.lock);
		while (!slist_empty(&domain->send_queue.list)) {
			item = slist_remove_head(&domain->send_queue.list);
			req = container_of(item, struct psmx_am_request, list_entry);
			fastlock_release(&domain->send_queue.lock);
			psmx_am_process_send(domain, req);
			fastlock_acquire(&domain->send_queue.lock);
		}
		fastlock_release(&domain->send_queue.lock);
	}
Example #25
0
int psmx2_am_progress(struct psmx2_trx_ctxt *trx_ctxt)
{
	struct slist_entry *item;
	struct psmx2_am_request *req;
	struct psmx2_trigger *trigger;

	if (psmx2_env.tagged_rma) {
		psmx2_lock(&trx_ctxt->rma_queue.lock, 2);
		while (!slist_empty(&trx_ctxt->rma_queue.list)) {
			item = slist_remove_head(&trx_ctxt->rma_queue.list);
			req = container_of(item, struct psmx2_am_request, list_entry);
			psmx2_unlock(&trx_ctxt->rma_queue.lock, 2);
			psmx2_am_process_rma(trx_ctxt, req);
			psmx2_lock(&trx_ctxt->rma_queue.lock, 2);
		}
		psmx2_unlock(&trx_ctxt->rma_queue.lock, 2);
	}
Example #26
0
static void tcpx_cq_report_xfer_fail(struct tcpx_ep *tcpx_ep, int err)
{
	struct slist_entry *entry;
	struct tcpx_xfer_entry *tx_entry;
	struct tcpx_cq *tcpx_cq;

	while (!slist_empty(&tcpx_ep->tx_rsp_pend_queue)) {
		entry = slist_remove_head(&tcpx_ep->tx_rsp_pend_queue);
		tx_entry = container_of(entry, struct tcpx_xfer_entry, entry);
		tcpx_cq_report_completion(tx_entry->ep->util_ep.tx_cq,
					  tx_entry, -err);

		tcpx_cq = container_of(tx_entry->ep->util_ep.tx_cq,
				       struct tcpx_cq, util_cq);
		tcpx_xfer_entry_release(tcpx_cq, tx_entry);
	}
}
Example #27
0
void util_buf_pool_destroy(struct util_buf_pool *pool)
{
	struct slist_entry *entry;
	struct util_buf_region *buf_region;

	while (!slist_empty(&pool->region_list)) {
		entry = slist_remove_head(&pool->region_list);
		buf_region = container_of(entry, struct util_buf_region, entry);
#if ENABLE_DEBUG
		assert(buf_region->num_used == 0);
#endif
		if (pool->free_hndlr)
			pool->free_hndlr(pool->ctx, buf_region->context);
		ofi_freealign(buf_region->mem_region);
		free(buf_region);
	}
	free(pool);
}
Example #28
0
/* Process deferred request work on the VC. */
static int __gnix_vc_push_work_reqs(struct gnix_vc *vc)
{
	int ret, fi_rc = FI_SUCCESS;
	struct slist_entry *item;
	struct gnix_fab_req *req;

try_again:
	fastlock_acquire(&vc->work_queue_lock);

	item = slist_remove_head(&vc->work_queue);
	fastlock_release(&vc->work_queue_lock);
	if (item != NULL) {
		req = (struct gnix_fab_req *)container_of(item,
							  struct gnix_fab_req,
							  slist);
		ret = req->work_fn(req);
		if (ret == FI_SUCCESS) {
			GNIX_INFO(FI_LOG_EP_DATA,
				  "Request processed: %p\n", req);
			goto try_again;
		} else {
			/* Work failed.  Reschedule to put this VC back on the
			 * end of the list. */
			__gnix_vc_work_schedule(vc);

			fastlock_acquire(&vc->work_queue_lock);
			slist_insert_tail(item, &vc->work_queue);
			fastlock_release(&vc->work_queue_lock);

			/* FI_ENOSPC is reserved to indicate a lack of TXDs,
			 * which are shared by all VCs on the NIC.  Return
			 * error to stall processing of VCs in this case.  The
			 * other likely error is a lack of SMSG credits, which
			 * only halts this VC. */
			if (ret == -FI_ENOSPC) {
				fi_rc = -FI_EAGAIN;
			} else if (ret != -FI_EAGAIN) {
				/* TODO report error? */
				GNIX_WARN(FI_LOG_EP_DATA,
					  "Failed to push request %p: %s\n",
					  req, fi_strerror(-ret));
			} /* else return success to keep processing TX VCs */
		}
	}
Example #29
0
static ssize_t util_cq_readerr(struct fid_cq *cq_fid, struct fi_cq_err_entry *buf,
			       uint64_t flags)
{
	struct util_cq *cq;
	struct util_cq_err_entry *err;
	struct slist_entry *entry;
	ssize_t ret;

	cq = container_of(cq_fid, struct util_cq, cq_fid);
	fastlock_acquire(&cq->cq_lock);
	if (!cirque_isempty(cq->cirq) &&
	    (cirque_head(cq->cirq)->flags & UTIL_FLAG_ERROR)) {
		cirque_discard(cq->cirq);
		entry = slist_remove_head(&cq->err_list);
		err = container_of(entry, struct util_cq_err_entry, list_entry);
		*buf = err->err_entry;
		free(err);
		ret = 0;
	} else {
Example #30
0
static void rxm_ep_txrx_res_close(struct rxm_ep *rxm_ep)
{
	struct slist_entry *entry;
	struct rxm_rx_buf *rx_buf;

	rxm_recv_queue_close(&rxm_ep->trecv_queue);
	rxm_recv_queue_close(&rxm_ep->recv_queue);

	if (rxm_ep->txe_fs)
		rxm_txe_fs_free(rxm_ep->txe_fs);

	while(!slist_empty(&rxm_ep->rx_buf_list)) {
		entry = slist_remove_head(&rxm_ep->rx_buf_list);
		rx_buf = container_of(entry, struct rxm_rx_buf, entry);
		util_buf_release(rxm_ep->rx_pool, rx_buf);
	}

	util_buf_pool_destroy(rxm_ep->rx_pool);
	util_buf_pool_destroy(rxm_ep->tx_pool);
}