Пример #1
0
static int sock_poll_add(struct fid_poll *pollset, struct fid *event_fid,
			 uint64_t flags)
{
	struct sock_poll *poll;
	struct sock_fid_list *list_item;
	struct sock_cq *cq;
	struct sock_cntr *cntr;

	poll = container_of(pollset, struct sock_poll, poll_fid.fid);
	list_item = calloc(1, sizeof(*list_item));
	if (!list_item)
		return -FI_ENOMEM;

	list_item->fid = event_fid;
	dlist_init(&list_item->entry);
	dlist_insert_after(&list_item->entry, &poll->fid_list);

	switch (list_item->fid->fclass) {
	case FI_CLASS_CQ:
		cq = container_of(list_item->fid, struct sock_cq, cq_fid);
		ofi_atomic_inc32(&cq->ref);
		break;
	case FI_CLASS_CNTR:
		cntr = container_of(list_item->fid, struct sock_cntr, cntr_fid);
		ofi_atomic_inc32(&cntr->ref);
		break;
	default:
		SOCK_LOG_ERROR("Invalid fid class\n");
		return -FI_EINVAL;
	}
	return 0;
}
Пример #2
0
int ofi_ep_bind_cq(struct util_ep *ep, struct util_cq *cq, uint64_t flags)
{
	int ret;

	ret = ofi_check_bind_cq_flags(ep, cq, flags);
	if (ret)
		return ret;

	if (flags & FI_TRANSMIT) {
		ep->tx_cq = cq;
		if (!(flags & FI_SELECTIVE_COMPLETION)) {
			ep->tx_op_flags |= FI_COMPLETION;
			ep->tx_msg_flags = FI_COMPLETION;
		}
		ofi_atomic_inc32(&cq->ref);
	}

	if (flags & FI_RECV) {
		ep->rx_cq = cq;
		if (!(flags & FI_SELECTIVE_COMPLETION)) {
			ep->rx_op_flags |= FI_COMPLETION;
			ep->rx_msg_flags = FI_COMPLETION;
		}
		ofi_atomic_inc32(&cq->ref);
	}

	if (flags & (FI_TRANSMIT | FI_RECV)) {
		return fid_list_insert(&cq->ep_list,
				       &cq->ep_list_lock,
				       &ep->ep_fid.fid);
	}

	return FI_SUCCESS;
}
Пример #3
0
static int mlx_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags)
{
	struct mlx_ep *ep;
	struct util_cq *cq;

	ep = container_of(fid, struct mlx_ep, ep.ep_fid.fid);
	int status = FI_SUCCESS;

	switch (bfid->fclass) {
	case FI_CLASS_CQ:
		/* TODO: check rest flags for send/recv ECs */
		do {
			cq = container_of(bfid, struct util_cq, cq_fid.fid);

			if ( ((flags & FI_TRANSMIT) && ep->ep.tx_cq)||
				((flags & FI_RECV) && ep->ep.rx_cq)) {
				FI_WARN( &mlx_prov, FI_LOG_EP_CTRL,
					"CQ already binded\n");
				status = -FI_EINVAL;
				break;
			}

			if (flags & FI_TRANSMIT) {
				ep->ep.tx_cq = cq;
				ofi_atomic_inc32(&(cq->ref));
			}

			if (flags & FI_RECV) {
				ep->ep.rx_cq = cq;
				ofi_atomic_inc32(&(cq->ref));
				status = fid_list_insert( &cq->ep_list,
							&cq->ep_list_lock,
							&ep->ep.ep_fid.fid);
				if (status) break;
			}

			if (flags & FI_SELECTIVE_COMPLETION) {
				ep->ep.flags |= FI_SELECTIVE_COMPLETION;
			}

		} while (0);
		break;
	case FI_CLASS_AV:
		if (ep->av) {
			FI_WARN( &mlx_prov, FI_LOG_EP_CTRL,
				"AV already binded\n");
			status = -FI_EINVAL;
			break;
		}
		ep->av = container_of(bfid, struct mlx_av, av.fid);
		ep->av->ep = ep;
		break;
	default:
		status = -FI_EINVAL;
		break;
	}
	return status;
}
Пример #4
0
static int
usdf_domain_bind(struct fid *fid, struct fid *bfid, uint64_t flags)
{
        struct usdf_domain *udp;

	USDF_TRACE_SYS(DOMAIN, "\n");

	if (flags & FI_REG_MR) {
		USDF_WARN_SYS(DOMAIN,
			"FI_REG_MR for EQs is not supported by the usnic provider");
		return -FI_EOPNOTSUPP;
	}

        udp = dom_fidtou(fid);

        switch (bfid->fclass) {
        case FI_CLASS_EQ:
                if (udp->dom_eq != NULL) {
                        return -FI_EINVAL;
                }
                udp->dom_eq = eq_fidtou(bfid);
                ofi_atomic_inc32(&udp->dom_eq->eq_refcnt);
                break;
        default:
                return -FI_EINVAL;
        }

        return 0;
}
Пример #5
0
int fi_wait_init(struct util_fabric *fabric, struct fi_wait_attr *attr,
		 struct util_wait *wait)
{
	struct fid_poll *poll_fid;
	struct fi_poll_attr poll_attr;
	int ret;

	wait->prov = fabric->prov;
	ofi_atomic_initialize32(&wait->ref, 0);
	wait->wait_fid.fid.fclass = FI_CLASS_WAIT;

	switch (attr->wait_obj) {
	case FI_WAIT_UNSPEC:
	case FI_WAIT_FD:
		wait->wait_obj = FI_WAIT_FD;
		break;
	case FI_WAIT_MUTEX_COND:
		wait->wait_obj = FI_WAIT_MUTEX_COND;
		break;
	default:
		assert(0);
		return -FI_EINVAL;
	}

	memset(&poll_attr, 0, sizeof poll_attr);
	ret = fi_poll_create_(fabric->prov, NULL, &poll_attr, &poll_fid);
	if (ret)
		return ret;

	wait->pollset = container_of(poll_fid, struct util_poll, poll_fid);
	wait->fabric = fabric;
	ofi_atomic_inc32(&fabric->ref);
	return 0;
}
Пример #6
0
int ofi_domain_init(struct fid_fabric *fabric_fid, const struct fi_info *info,
		   struct util_domain *domain, void *context)
{
	struct util_fabric *fabric;
	int ret;

	fabric = container_of(fabric_fid, struct util_fabric, fabric_fid);
	domain->fabric = fabric;
	domain->prov = fabric->prov;
	ret = util_domain_init(domain, info);
	if (ret) {
		free(domain);
		return ret;
	}

	domain->domain_fid.fid.fclass = FI_CLASS_DOMAIN;
	domain->domain_fid.fid.context = context;
	/*
	 * domain ops set by provider
	 */
	domain->domain_fid.mr = &util_domain_mr_ops;

	fastlock_acquire(&fabric->lock);
	dlist_insert_tail(&domain->list_entry, &fabric->domain_list);
	fastlock_release(&fabric->lock);

	ofi_atomic_inc32(&fabric->ref);
	return 0;
}
Пример #7
0
int sock_stx_ctx(struct fid_domain *domain,
		 struct fi_tx_attr *attr, struct fid_stx **stx, void *context)
{
	struct sock_domain *dom;
	struct sock_tx_ctx *tx_ctx;

	if (attr && sock_verify_tx_attr(attr))
		return -FI_EINVAL;

	dom = container_of(domain, struct sock_domain, dom_fid);

	tx_ctx = sock_stx_ctx_alloc(attr ? attr : &sock_stx_attr, context);
	if (!tx_ctx)
		return -FI_ENOMEM;

	tx_ctx->domain = dom;
	if (tx_ctx->rx_ctrl_ctx && tx_ctx->rx_ctrl_ctx->is_ctrl_ctx)
		tx_ctx->rx_ctrl_ctx->domain = dom;

	tx_ctx->fid.stx.fid.ops = &sock_ctx_ops;
	tx_ctx->fid.stx.ops = &sock_ep_ops;
	ofi_atomic_inc32(&dom->ref);

	*stx = &tx_ctx->fid.stx;
	return 0;
}
Пример #8
0
int sock_srx_ctx(struct fid_domain *domain,
		 struct fi_rx_attr *attr, struct fid_ep **srx, void *context)
{
	struct sock_domain *dom;
	struct sock_rx_ctx *rx_ctx;

	if (attr && sock_verify_rx_attr(attr))
		return -FI_EINVAL;

	dom = container_of(domain, struct sock_domain, dom_fid);
	rx_ctx = sock_rx_ctx_alloc(attr ? attr : &sock_srx_attr, context, 0);
	if (!rx_ctx)
		return -FI_ENOMEM;

	rx_ctx->domain = dom;
	rx_ctx->ctx.fid.fclass = FI_CLASS_SRX_CTX;

	rx_ctx->ctx.fid.ops = &sock_ctx_ops;
	rx_ctx->ctx.ops = &sock_ctx_ep_ops;
	rx_ctx->ctx.msg = &sock_ep_msg_ops;
	rx_ctx->ctx.tagged = &sock_ep_tagged;
	rx_ctx->enabled = 1;

	/* default config */
	rx_ctx->min_multi_recv = SOCK_EP_MIN_MULTI_RECV;
	*srx = &rx_ctx->ctx;
	ofi_atomic_inc32(&dom->ref);
	return 0;
}
Пример #9
0
int ofi_endpoint_init(struct fid_domain *domain, const struct util_prov *util_prov,
		      struct fi_info *info, struct util_ep *ep, void *context,
		      ofi_ep_progress_func progress)
{
	struct util_domain *util_domain;
	int ret;

	util_domain = container_of(domain, struct util_domain, domain_fid);

	if (!info || !info->ep_attr || !info->rx_attr || !info->tx_attr)
		return -FI_EINVAL;

	ret = ofi_prov_check_info(util_prov,
				  util_domain->fabric->fabric_fid.api_version,
				  info);
	if (ret)
		return ret;

	ep->ep_fid.fid.fclass = FI_CLASS_EP;
	ep->ep_fid.fid.context = context;
	ep->domain = util_domain;
	ep->caps = info->caps;
	ep->progress = progress;
	ep->tx_op_flags = info->tx_attr->op_flags;
	ep->rx_op_flags = info->rx_attr->op_flags;
	ofi_atomic_inc32(&util_domain->ref);
	if (util_domain->eq)
		ofi_ep_bind_eq(ep, util_domain->eq);
	fastlock_init(&ep->lock);
	return 0;
}
Пример #10
0
/**
 * Will attempt to find a directory in hugetlbfs using the given page size and
 * create a filename to use for backing an mmap.
 *
 * @param[in] page_size Page size to look for in the hugetlbfs
 * @param[out] filename Pointer containing filename after generation.
 *
 * @return FI_SUCCESS	On successfully finding a huge page and generating a
 * file name.
 *
 * @return -FI_EINVAL	if an invalid parameter was given
 * @return -FI_EIO	if an error occurred while opening the /proc/mounts
 * file. This is propagated from __find_huge_page.
 * @return -FI_ENOMEM	if an error occurred while allocating space for the
 * filename.
 */
static int __generate_file_name(size_t page_size, char **filename)
{
	static const char basename[] = "gnix_map";
	char *full_filename = NULL;
	char *huge_page = NULL;
	char *error;
	char error_buf[256];
	int my_file_id;
	int size;
	int ret;

	if (!filename) {
		GNIX_WARN(FI_LOG_EP_CTRL, "filename pointer is NULL.\n");
		ret = -FI_EINVAL;
		goto err_invalid;
	}

	ret = __find_huge_page(page_size, &huge_page);
	if (ret != FI_SUCCESS) {
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Find huge page returned error %s\n",
			  fi_strerror(-ret));
		goto err_invalid;
	}

	my_file_id = ofi_atomic_inc32(&file_id_counter);
	size = snprintf(NULL, 0, "%s/%s.%d.%d", huge_page, basename, getpid(),
			my_file_id);
	if (size < 0) {
		error = strerror_r(errno, error_buf, sizeof(error_buf));
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Error while gathering size for snprintf: %s\n",
			  error);
		goto err_snprintf;
	}

	full_filename = malloc(size + 1);
	if (!full_filename) {
		error = strerror_r(errno, error_buf, sizeof(error_buf));
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Error allocating full_filename: %s\n",
			  error);
		ret = -FI_ENOMEM;
		goto err_snprintf;
	}

	sprintf(full_filename, "%s/%s.%d.%d", huge_page, basename, getpid(),
		my_file_id);

	GNIX_DEBUG(FI_LOG_EP_CTRL, "Generated filename: %s\n", full_filename);

	*filename = full_filename;

err_snprintf:
	free(huge_page);
err_invalid:
	return ret;
}
Пример #11
0
static int sock_ep_tx_ctx(struct fid_ep *ep, int index, struct fi_tx_attr *attr,
			  struct fid_ep **tx_ep, void *context)
{
	struct sock_ep *sock_ep;
	struct sock_tx_ctx *tx_ctx;

	sock_ep = container_of(ep, struct sock_ep, ep);
	if (sock_ep->attr->fclass != FI_CLASS_SEP ||
		index >= (int)sock_ep->attr->ep_attr.tx_ctx_cnt)
		return -FI_EINVAL;

	if (attr) {
		if (ofi_check_tx_attr(&sock_prov, sock_ep->attr->info.tx_attr,
				      attr, 0) ||
			ofi_check_attr_subset(&sock_prov,
				sock_ep->attr->info.tx_attr->caps, attr->caps))
			return -FI_ENODATA;
		tx_ctx = sock_tx_ctx_alloc(attr, context, 0);
	} else {
		tx_ctx = sock_tx_ctx_alloc(&sock_ep->tx_attr, context, 0);
	}
	if (!tx_ctx)
		return -FI_ENOMEM;

	tx_ctx->tx_id = index;
	tx_ctx->ep_attr = sock_ep->attr;
	tx_ctx->domain = sock_ep->attr->domain;
	if (tx_ctx->rx_ctrl_ctx && tx_ctx->rx_ctrl_ctx->is_ctrl_ctx)
		tx_ctx->rx_ctrl_ctx->domain = sock_ep->attr->domain;
	tx_ctx->av = sock_ep->attr->av;
	dlist_insert_tail(&sock_ep->attr->tx_ctx_entry, &tx_ctx->ep_list);

	tx_ctx->fid.ctx.fid.ops = &sock_ctx_ops;
	tx_ctx->fid.ctx.ops = &sock_ctx_ep_ops;
	tx_ctx->fid.ctx.msg = &sock_ep_msg_ops;
	tx_ctx->fid.ctx.tagged = &sock_ep_tagged;
	tx_ctx->fid.ctx.rma = &sock_ep_rma;
	tx_ctx->fid.ctx.atomic = &sock_ep_atomic;

	*tx_ep = &tx_ctx->fid.ctx;
	sock_ep->attr->tx_array[index] = tx_ctx;
	ofi_atomic_inc32(&sock_ep->attr->num_tx_ctx);
	ofi_atomic_inc32(&sock_ep->attr->domain->ref);
	return 0;
}
Пример #12
0
int ofi_ep_bind_eq(struct util_ep *ep, struct util_eq *eq)
{
	if (ep->eq)
		ofi_atomic_dec32(&ep->eq->ref);

	ep->eq = eq;
	ofi_atomic_inc32(&eq->ref);
	return 0;
}
Пример #13
0
static int smr_ep_bind_cq(struct smr_ep *ep, struct util_cq *cq, uint64_t flags)
{
	int ret = 0;

	if (flags & ~(FI_TRANSMIT | FI_RECV)) {
		FI_WARN(&smr_prov, FI_LOG_EP_CTRL,
			"unsupported flags\n");
		return -FI_EBADFLAGS;
	}

	if (((flags & FI_TRANSMIT) && ep->util_ep.tx_cq) ||
	    ((flags & FI_RECV) && ep->util_ep.rx_cq)) {
		FI_WARN(&smr_prov, FI_LOG_EP_CTRL,
			"duplicate CQ binding\n");
		return -FI_EINVAL;
	}

	if (flags & FI_TRANSMIT) {
		ep->util_ep.tx_cq = cq;
		ofi_atomic_inc32(&cq->ref);
		ep->tx_comp = cq->wait ? smr_tx_comp_signal : smr_tx_comp;
	}

	if (flags & FI_RECV) {
		ep->util_ep.rx_cq = cq;
		ofi_atomic_inc32(&cq->ref);

		if (cq->wait) {
			ep->rx_comp = (cq->domain->info_domain_caps & FI_SOURCE) ?
				      smr_rx_src_comp_signal :
				      smr_rx_comp_signal;
		} else {
			ep->rx_comp = (cq->domain->info_domain_caps & FI_SOURCE) ?
				      smr_rx_src_comp : smr_rx_comp;
		}
	}

	ret = fid_list_insert(&cq->ep_list,
			      &cq->ep_list_lock,
			      &ep->util_ep.ep_fid.fid);

	return ret;
}
Пример #14
0
static int sock_ep_rx_ctx(struct fid_ep *ep, int index, struct fi_rx_attr *attr,
		    struct fid_ep **rx_ep, void *context)
{
	struct sock_ep *sock_ep;
	struct sock_rx_ctx *rx_ctx;

	sock_ep = container_of(ep, struct sock_ep, ep);
	if (sock_ep->attr->fclass != FI_CLASS_SEP ||
		index >= (int)sock_ep->attr->ep_attr.rx_ctx_cnt)
		return -FI_EINVAL;

	if (attr) {
		if (ofi_check_rx_attr(&sock_prov, &sock_ep->attr->info, attr, 0) ||
			ofi_check_attr_subset(&sock_prov, sock_ep->attr->info.rx_attr->caps,
				attr->caps))
			return -FI_ENODATA;
		rx_ctx = sock_rx_ctx_alloc(attr, context, 0);
	} else {
		rx_ctx = sock_rx_ctx_alloc(&sock_ep->rx_attr, context, 0);
	}
	if (!rx_ctx)
		return -FI_ENOMEM;

	rx_ctx->rx_id = index;
	rx_ctx->ep_attr = sock_ep->attr;
	rx_ctx->domain = sock_ep->attr->domain;
	rx_ctx->av = sock_ep->attr->av;
	dlist_insert_tail(&sock_ep->attr->rx_ctx_entry, &rx_ctx->ep_list);

	rx_ctx->ctx.fid.ops = &sock_ctx_ops;
	rx_ctx->ctx.ops = &sock_ctx_ep_ops;
	rx_ctx->ctx.msg = &sock_ep_msg_ops;
	rx_ctx->ctx.tagged = &sock_ep_tagged;

	rx_ctx->min_multi_recv = sock_ep->attr->min_multi_recv;
	*rx_ep = &rx_ctx->ctx;
	sock_ep->attr->rx_array[index] = rx_ctx;
	ofi_atomic_inc32(&sock_ep->attr->num_rx_ctx);
	ofi_atomic_inc32(&sock_ep->attr->domain->ref);
	return 0;
}
Пример #15
0
void ofi_monitor_add_queue(struct ofi_mem_monitor *monitor,
			   struct ofi_notification_queue *nq)
{
	fastlock_init(&nq->lock);
	dlist_init(&nq->list);
	fastlock_acquire(&nq->lock);
	nq->refcnt = 0;
	fastlock_release(&nq->lock);

	nq->monitor = monitor;
	ofi_atomic_inc32(&monitor->refcnt);
}
Пример #16
0
int ofi_domain_bind_eq(struct util_domain *domain, struct util_eq *eq)
{
	if (domain->eq) {
		FI_WARN(domain->prov, FI_LOG_DOMAIN,
			"duplicate EQ binding\n");
		return -FI_EINVAL;
	}

	domain->eq = eq;
	ofi_atomic_inc32(&eq->ref);
	return 0;
}
Пример #17
0
int ofi_endpoint_init(struct fid_domain *domain, const struct util_prov *util_prov,
		      struct fi_info *info, struct util_ep *ep, void *context,
		      ofi_ep_progress_func progress)
{
	struct util_domain *util_domain;
	int ret;

	util_domain = container_of(domain, struct util_domain, domain_fid);

	if (!info || !info->ep_attr || !info->rx_attr || !info->tx_attr)
		return -FI_EINVAL;

	ret = ofi_prov_check_info(util_prov,
				  util_domain->fabric->fabric_fid.api_version,
				  info);
	if (ret)
		return ret;

	ep->ep_fid.fid.fclass = FI_CLASS_EP;
	ep->ep_fid.fid.context = context;
	ep->domain = util_domain;
	ep->caps = info->caps;
	ep->flags = 0;
	ep->progress = progress;
	ep->tx_op_flags = info->tx_attr->op_flags;
	ep->rx_op_flags = info->rx_attr->op_flags;
	ep->tx_msg_flags = 0;
	ep->rx_msg_flags = 0;
	ep->inject_op_flags =
		((info->tx_attr->op_flags &
		  ~(FI_COMPLETION | FI_INJECT_COMPLETE |
		    FI_TRANSMIT_COMPLETE | FI_DELIVERY_COMPLETE)) | FI_INJECT);
	ep->tx_cntr_inc 	= ofi_cntr_inc_noop;
	ep->rx_cntr_inc 	= ofi_cntr_inc_noop;
	ep->rd_cntr_inc 	= ofi_cntr_inc_noop;
	ep->wr_cntr_inc 	= ofi_cntr_inc_noop;
	ep->rem_rd_cntr_inc 	= ofi_cntr_inc_noop;
	ep->rem_wr_cntr_inc 	= ofi_cntr_inc_noop;
	ep->type = info->ep_attr->type;
	ofi_atomic_inc32(&util_domain->ref);
	if (util_domain->eq)
		ofi_ep_bind_eq(ep, util_domain->eq);
	fastlock_init(&ep->lock);
	if (ep->domain->threading != FI_THREAD_SAFE) {
		ep->lock_acquire = ofi_fastlock_acquire_noop;
		ep->lock_release = ofi_fastlock_release_noop;
	} else {
		ep->lock_acquire = ofi_fastlock_acquire;
		ep->lock_release = ofi_fastlock_release;
	}
	return 0;
}
Пример #18
0
static int psmx2_ep_control(fid_t fid, int command, void *arg)
{
	struct fi_alias *alias;
	struct psmx2_fid_ep *ep, *new_ep;
	int err;

	ep = container_of(fid, struct psmx2_fid_ep, ep.fid);

	switch (command) {
	case FI_ALIAS:
		new_ep = (struct psmx2_fid_ep *) calloc(1, sizeof *ep);
		if (!new_ep)
			return -FI_ENOMEM;
		alias = arg;
		*new_ep = *ep;
		err = psmx2_ep_set_flags(new_ep, alias->flags);
		if (err) {
			free(new_ep);
			return err;
		}
		new_ep->base_ep = ep;
		ofi_atomic_inc32(&ep->ref);
		psmx2_ep_optimize_ops(new_ep);
		*alias->fid = &new_ep->ep.fid;
		break;

	case FI_SETOPSFLAG:
		err = psmx2_ep_set_flags(ep, *(uint64_t *)arg);
		if (err)
			return err;
		psmx2_ep_optimize_ops(ep);
		break;

	case FI_GETOPSFLAG:
		if (!arg)
			return -FI_EINVAL;
		err = psmx2_ep_get_flags(ep, arg);
		if (err)
			return err;
		break;

	case FI_ENABLE:
		ep->enabled = 1;
		return 0;

	default:
		return -FI_ENOSYS;
	}

	return 0;
}
Пример #19
0
void sock_cq_add_tx_ctx(struct sock_cq *cq, struct sock_tx_ctx *tx_ctx)
{
	struct dlist_entry *entry;
	struct sock_tx_ctx *curr_ctx;
	fastlock_acquire(&cq->list_lock);
	for (entry = cq->tx_list.next; entry != &cq->tx_list;
	     entry = entry->next) {
		curr_ctx = container_of(entry, struct sock_tx_ctx, cq_entry);
		if (tx_ctx == curr_ctx)
			goto out;
	}
	dlist_insert_tail(&tx_ctx->cq_entry, &cq->tx_list);
	ofi_atomic_inc32(&cq->ref);
out:
	fastlock_release(&cq->list_lock);
}
Пример #20
0
int ofi_ep_bind_cntr(struct util_ep *ep, struct util_cntr *cntr, uint64_t flags)
{
	if (flags & ~(FI_TRANSMIT | FI_RECV | FI_READ  | FI_WRITE |
		      FI_REMOTE_READ | FI_REMOTE_WRITE)) {
		FI_WARN(ep->domain->fabric->prov, FI_LOG_EP_CTRL,
			"Unsupported bind flags\n");
		return -FI_EBADFLAGS;
	}

	if (((flags & FI_TRANSMIT) && ep->tx_cntr) ||
	    ((flags & FI_RECV) && ep->rx_cntr) ||
	    ((flags & FI_READ) && ep->rd_cntr) ||
	    ((flags & FI_WRITE) && ep->wr_cntr) ||
	    ((flags & FI_REMOTE_READ) && ep->rem_rd_cntr) ||
	    ((flags & FI_REMOTE_WRITE) && ep->rem_wr_cntr)) {
		FI_WARN(ep->domain->fabric->prov, FI_LOG_EP_CTRL,
			"Duplicate counter binding\n");
		return -FI_EINVAL;
	}

	if (flags & FI_TRANSMIT) {
		ep->tx_cntr = cntr;
		ofi_atomic_inc32(&cntr->ref);
	}

	if (flags & FI_RECV) {
		ep->rx_cntr = cntr;
		ofi_atomic_inc32(&cntr->ref);
	}

	if (flags & FI_READ) {
		ep->rd_cntr = cntr;
		ofi_atomic_inc32(&cntr->ref);
	}

	if (flags & FI_WRITE) {
		ep->wr_cntr = cntr;
		ofi_atomic_inc32(&cntr->ref);
	}

	if (flags & FI_REMOTE_READ) {
		ep->rem_rd_cntr = cntr;
		ofi_atomic_inc32(&cntr->ref);
	}

	if (flags & FI_REMOTE_WRITE) {
		ep->rem_wr_cntr = cntr;
		ofi_atomic_inc32(&cntr->ref);
	}

	ep->flags |= OFI_CNTR_ENABLED;

	return fid_list_insert(&cntr->ep_list, &cntr->ep_list_lock,
			       &ep->ep_fid.fid);
}
Пример #21
0
int ofi_ep_bind_av(struct util_ep *util_ep, struct util_av *av)
{
	if (util_ep->av) {
		FI_WARN(util_ep->av->prov, FI_LOG_EP_CTRL,
				"duplicate AV binding\n");
		return -FI_EINVAL;
	}
	util_ep->av = av;
	ofi_atomic_inc32(&av->ref);

	fastlock_acquire(&av->ep_list_lock);
	dlist_insert_tail(&util_ep->av_entry, &av->ep_list);
	fastlock_release(&av->ep_list_lock);

	return 0;
}
Пример #22
0
int ofi_eq_create(struct fid_fabric *fabric_fid, struct fi_eq_attr *attr,
		 struct fid_eq **eq_fid, void *context)
{
	struct util_fabric *fabric;
	struct util_eq *eq;
	int ret;

	fabric = container_of(fabric_fid, struct util_fabric, fabric_fid);
	ret = util_verify_eq_attr(fabric->prov, attr);
	if (ret)
		return ret;

	eq = calloc(1, sizeof(*eq));
	if (!eq)
		return -FI_ENOMEM;

	eq->fabric = fabric;
	eq->prov = fabric->prov;
	ret = util_eq_init(fabric_fid, eq, attr);
	if (ret) {
		free(eq);
		return ret;
	}

	eq->eq_fid.fid.fclass = FI_CLASS_EQ;
	eq->eq_fid.fid.context = context;
	eq->eq_fid.fid.ops = &util_eq_fi_ops;
	eq->eq_fid.ops = &util_eq_ops;

	ofi_atomic_inc32(&fabric->ref);

	/* EQ must be fully operational before adding to wait set */
	if (eq->wait) {
		ret = fi_poll_add(&eq->wait->pollset->poll_fid,
				  &eq->eq_fid.fid, 0);
		if (ret) {
			util_eq_close(&eq->eq_fid.fid);
			return ret;
		}
	}

	*eq_fid = &eq->eq_fid;
	return 0;
}
Пример #23
0
ssize_t fi_ibv_send(struct fi_ibv_msg_ep *ep, struct ibv_send_wr *wr, size_t len,
		    int count, void *context)
{
	struct ibv_send_wr *bad_wr;
	int ret;

	assert(ep->scq);
	wr->num_sge = count;
	wr->wr_id = (uintptr_t) context;

	if (wr->send_flags & IBV_SEND_SIGNALED) {
		assert((wr->wr_id & ep->scq->wr_id_mask) != ep->scq->send_signal_wr_id);
		ofi_atomic_set32(&ep->unsignaled_send_cnt, 0);
	} else {
		if (VERBS_SIGNAL_SEND(ep)) {
			ret = fi_ibv_signal_send(ep, wr);
			if (ret)
				return ret;
		} else {
			ofi_atomic_inc32(&ep->unsignaled_send_cnt);

			if (ofi_atomic_get32(&ep->unsignaled_send_cnt) >=
					VERBS_SEND_COMP_THRESH(ep)) {
				ret = fi_ibv_reap_comp(ep);
				if (ret)
					return ret;
			}
		}
	}

	ret = ibv_post_send(ep->id->qp, wr, &bad_wr);
	switch (ret) {
	case ENOMEM:
		return -FI_EAGAIN;
	case -1:
		/* Deal with non-compliant libibverbs drivers which set errno
		 * instead of directly returning the error value */
		return (errno == ENOMEM) ? -FI_EAGAIN : -errno;
	default:
		return -ret;
	}
}
Пример #24
0
int ofi_wait_fd_add(struct util_wait *wait, int fd, uint32_t events,
		    ofi_wait_fd_try_func wait_try, void *arg, void *context)
{
	struct ofi_wait_fd_entry *fd_entry;
	struct dlist_entry *entry;
	struct util_wait_fd *wait_fd = container_of(wait, struct util_wait_fd,
						    util_wait);
	int ret = 0;

	fastlock_acquire(&wait_fd->lock);
	entry = dlist_find_first_match(&wait_fd->fd_list, ofi_wait_fd_match, &fd);
	if (entry) {
		FI_DBG(wait->prov, FI_LOG_EP_CTRL,
		       "Given fd (%d) already added to wait list - %p \n",
		       fd, wait_fd);
		fd_entry = container_of(entry, struct ofi_wait_fd_entry, entry);
		ofi_atomic_inc32(&fd_entry->ref);
		goto out;
	}

	ret = fi_epoll_add(wait_fd->epoll_fd, fd, events, context);
	if (ret) {
		FI_WARN(wait->prov, FI_LOG_FABRIC, "Unable to add fd to epoll\n");
		goto out;
	}

	fd_entry = calloc(1, sizeof *fd_entry);
	if (!fd_entry) {
		ret = -FI_ENOMEM;
		fi_epoll_del(wait_fd->epoll_fd, fd);
		goto out;
	}
	fd_entry->fd = fd;
	fd_entry->wait_try = wait_try;
	fd_entry->arg = arg;
	ofi_atomic_initialize32(&fd_entry->ref, 1);

	dlist_insert_tail(&fd_entry->entry, &wait_fd->fd_list);
out:
	fastlock_release(&wait_fd->lock);
	return ret;
}
Пример #25
0
static inline int
fi_ibv_poll_events(struct fi_ibv_cq *_cq, int timeout)
{
	int ret, rc;
	void *context;
	struct pollfd fds[2];
	char data;

	fds[0].fd = _cq->channel->fd;
	fds[1].fd = _cq->signal_fd[0];

	fds[0].events = fds[1].events = POLLIN;

	rc = poll(fds, 2, timeout);
	if (rc == 0)
		return -FI_EAGAIN;
	else if (rc < 0)
		return -errno;

	if (fds[0].revents & POLLIN) {
		ret = ibv_get_cq_event(_cq->channel, &_cq->cq, &context);
		if (ret)
			return ret;

		ofi_atomic_inc32(&_cq->nevents);
		rc--;
	}
	if (fds[1].revents & POLLIN) {
		do {
			ret = read(fds[1].fd, &data, 1);
		} while (ret > 0);
		ret = -FI_EAGAIN;
		rc--;
	}
	if (rc) {
		VERBS_WARN(FI_LOG_CQ, "Unknown poll error: check revents\n");
		return -FI_EOTHER;
	}

	return ret;
}
Пример #26
0
int _gnix_cm_nic_create_cdm_id(struct gnix_fid_domain *domain, uint32_t *id)
{
	uint32_t cdm_id;
	int v;

	if (*id != GNIX_CREATE_CDM_ID) {
		return FI_SUCCESS;
	}

	/*
	 * generate a cdm_id, use the 16 LSB of base_id from domain
	 * with 16 MSBs being obtained from atomic increment of
	 * a local variable.
	 */

	v = ofi_atomic_inc32(&gnix_id_counter);

	cdm_id = ((domain->cdm_id_seed & 0xFFF) << 12) | v;
	*id = cdm_id;
	return FI_SUCCESS;
}
Пример #27
0
static int fi_ibv_signal_send(struct fi_ibv_msg_ep *ep, struct ibv_send_wr *wr)
{
	struct fi_ibv_msg_epe *epe;

	fastlock_acquire(&ep->scq->lock);
	if (VERBS_SIGNAL_SEND(ep)) {
		epe = util_buf_alloc(ep->scq->epe_pool);
		if (!epe) {
			fastlock_release(&ep->scq->lock);
			return -FI_ENOMEM;
		}
		memset(epe, 0, sizeof(*epe));
		wr->send_flags |= IBV_SEND_SIGNALED;
		wr->wr_id = ep->ep_id;
		epe->ep = ep;
		slist_insert_tail(&epe->entry, &ep->scq->ep_list);
		ofi_atomic_inc32(&ep->comp_pending);
	}
	fastlock_release(&ep->scq->lock);
	return 0;
}
Пример #28
0
int ofi_mr_cache_init(struct util_domain *domain,
		      struct ofi_mem_monitor *monitor,
		      struct ofi_mr_cache *cache)
{
	int ret;
	assert(cache->add_region && cache->delete_region);

	ret = ofi_mr_cache_init_storage(cache);
	if (ret)
		return ret;

	cache->domain = domain;
	ofi_atomic_inc32(&domain->ref);

	dlist_init(&cache->lru_list);
	cache->cached_cnt = 0;
	cache->cached_size = 0;
	if (!cache->max_cached_size)
		cache->max_cached_size = SIZE_MAX;
	cache->search_cnt = 0;
	cache->delete_cnt = 0;
	cache->hit_cnt = 0;
	ofi_monitor_add_queue(monitor, &cache->nq);

	ret = util_buf_pool_create(&cache->entry_pool,
				   sizeof(struct ofi_mr_entry) +
				   cache->entry_data_size,
				   16, 0, cache->max_cached_cnt);
	if (ret)
		goto err;

	return 0;
err:
	ofi_atomic_dec32(&cache->domain->ref);
	ofi_monitor_del_queue(&cache->nq);
	cache->mr_storage.destroy(&cache->mr_storage);
	return ret;
}
Пример #29
0
static int
usdf_dom_rdc_alloc_data(struct usdf_domain *udp)
{
	struct usdf_rdm_connection *rdc;
	int ret;
	int i;

	udp->dom_rdc_hashtab = calloc(USDF_RDM_HASH_SIZE,
			sizeof(*udp->dom_rdc_hashtab));
	if (udp->dom_rdc_hashtab == NULL) {
		return -FI_ENOMEM;
	}
	SLIST_INIT(&udp->dom_rdc_free);
	ofi_atomic_initialize32(&udp->dom_rdc_free_cnt, 0);
	for (i = 0; i < USDF_RDM_FREE_BLOCK; ++i) {
		rdc = calloc(1, sizeof(*rdc));
		if (rdc == NULL) {
			return -FI_ENOMEM;
		}
		ret = usdf_timer_alloc(usdf_rdm_rdc_timeout, rdc,
				&rdc->dc_timer);
		if (ret != 0) {
			free(rdc);
			return ret;
		}
		rdc->dc_flags = USDF_DCS_UNCONNECTED | USDF_DCF_NEW_RX;
		rdc->dc_next_rx_seq = 0;
		rdc->dc_next_tx_seq = 0;
		rdc->dc_last_rx_ack = rdc->dc_next_tx_seq - 1;
		TAILQ_INIT(&rdc->dc_wqe_posted);
		TAILQ_INIT(&rdc->dc_wqe_sent);
		SLIST_INSERT_HEAD(&udp->dom_rdc_free, rdc, dc_addr_link);
		ofi_atomic_inc32(&udp->dom_rdc_free_cnt);
	}
	udp->dom_rdc_total = USDF_RDM_FREE_BLOCK;
	return 0;
}
Пример #30
0
int ofi_ep_bind_cntr(struct util_ep *ep, struct util_cntr *cntr, uint64_t flags)
{
	int ret;

	ret = ofi_check_bind_cntr_flags(ep, cntr, flags);
	if (ret)
		return ret;

	if (flags & FI_TRANSMIT) {
		ep->tx_cntr = cntr;
		ofi_atomic_inc32(&cntr->ref);
	}

	if (flags & FI_RECV) {
		ep->rx_cntr = cntr;
		ofi_atomic_inc32(&cntr->ref);
	}

	if (flags & FI_READ) {
		ep->rd_cntr = cntr;
		ofi_atomic_inc32(&cntr->ref);
	}

	if (flags & FI_WRITE) {
		ep->wr_cntr = cntr;
		ofi_atomic_inc32(&cntr->ref);
	}

	if (flags & FI_REMOTE_READ) {
		ep->rem_rd_cntr = cntr;
		ofi_atomic_inc32(&cntr->ref);
	}

	if (flags & FI_REMOTE_WRITE) {
		ep->rem_wr_cntr = cntr;
		ofi_atomic_inc32(&cntr->ref);
	}

	return fid_list_insert(&cntr->ep_list,
			       &cntr->ep_list_lock,
			       &ep->ep_fid.fid);
}