void mlx_send_callback( void *request,
			ucs_status_t status)
{
	struct util_cq *cq;
	struct mlx_request *mlx_req = request;
	struct fi_cq_tagged_entry *t_entry;
	struct util_cq_err_entry *err;

	cq = mlx_req->cq;

	if (status == UCS_ERR_CANCELED) {
		ucp_request_release(request);
		return;
	}

	fastlock_acquire(&cq->cq_lock);

	t_entry = cirque_tail(cq->cirq);
	*t_entry = (mlx_req->completion.tagged);
	cirque_commit(cq->cirq);

	if (status != UCS_OK){
		t_entry->flags |= UTIL_FLAG_ERROR;
		err = calloc(1, sizeof(struct util_cq_err_entry));
		if (!err) {
			FI_WARN(&mlx_prov, FI_LOG_CQ,
				"out of memory, cannot report CQ error\n");
			return;
		}

		err->err_entry = (mlx_req->completion.error);
		err->err_entry.prov_errno = (int)status;
		err->err_entry.err = MLX_TRANSLATE_ERRCODE(status);
		err->err_entry.olen = 0;
		slist_insert_tail(&err->list_entry, &cq->err_list);
	}

	mlx_req->type = MLX_FI_REQ_UNINITIALIZED;

	fastlock_release(&cq->cq_lock);
	ucp_request_release(request);
}
Example #2
0
void rxm_cq_progress(struct util_cq *util_cq)
{
	ssize_t ret = 0;
	struct rxm_cq *rxm_cq;
	struct fi_cq_tagged_entry *comp;

	rxm_cq = container_of(util_cq, struct rxm_cq, util_cq);

	fastlock_acquire(&util_cq->cq_lock);
	do {
		if (cirque_isfull(util_cq->cirq))
			goto out;

		comp = cirque_tail(util_cq->cirq);
		ret = rxm_msg_cq_read(util_cq, rxm_cq->msg_cq, comp);
		if (ret < 0)
			goto out;
		cirque_commit(util_cq->cirq);
	} while (ret > 0);
out:
	fastlock_release(&util_cq->cq_lock);
}
void mlx_recv_callback (
			void *request,
			ucs_status_t status,
			ucp_tag_recv_info_t *info)
{
	struct util_cq *cq;
	struct mlx_request *mlx_req;

	mlx_req = (struct mlx_request*)request;
	if (status == UCS_ERR_CANCELED) {
		ucp_request_release(request);
		return;
	}

	cq = mlx_req->cq;

	mlx_req->completion.tagged.tag = info->sender_tag;
	mlx_req->completion.tagged.len = info->length;

	if (status != UCS_OK) {
		mlx_req->completion.error.prov_errno = (int)status;
		mlx_req->completion.error.err = MLX_TRANSLATE_ERRCODE(status);
	}

	if (mlx_req->type == MLX_FI_REQ_UNINITIALIZED) {
		if (status != UCS_OK) {
			mlx_req->completion.error.olen = info->length;
			mlx_req->type = MLX_FI_REQ_UNEXPECTED_ERR;
		} else {
			mlx_req->type = MLX_FI_REQ_UNEXPECTED;
		}
	} else {
		if (status != UCS_OK) {
			mlx_req->completion.error.olen = info->length -
						mlx_req->completion.error.len;
		}

		struct fi_cq_tagged_entry *t_entry;
		t_entry = cirque_tail(cq->cirq);
		*t_entry = (mlx_req->completion.tagged);

		if (status != UCS_OK) {
			struct util_cq_err_entry* err;
			t_entry->flags |= UTIL_FLAG_ERROR;

			err = calloc(1, sizeof(struct util_cq_err_entry));
			if (!err) {
				FI_WARN(&mlx_prov, FI_LOG_CQ,
					"out of memory, cannot report CQ error\n");
				return;
			}

			err->err_entry = (mlx_req->completion.error);
			slist_insert_tail(&err->list_entry, &cq->err_list);
		}

		if (cq->src){
			cq->src[cirque_windex((struct mlx_comp_cirq*)(cq->cirq))] =
					FI_ADDR_NOTAVAIL;
		}

		if (cq->wait) {
			cq->wait->signal(cq->wait);
		}

		mlx_req->type = MLX_FI_REQ_UNINITIALIZED;
		cirque_commit(cq->cirq);
		ucp_request_release(request);
	}
	fastlock_release(&cq->cq_lock);
}