Esempio n. 1
0
static int fi_ibv_check_hints(uint32_t version, const struct fi_info *hints,
		const struct fi_info *info)
{
	int ret;
	uint64_t prov_mode;

	if (hints->caps & ~(info->caps)) {
		VERBS_INFO(FI_LOG_CORE, "Unsupported capabilities\n");
		FI_INFO_CHECK(&fi_ibv_prov, info, hints, caps, FI_TYPE_CAPS);
		return -FI_ENODATA;
	}

	prov_mode = ofi_mr_get_prov_mode(version, hints, info);

	if ((hints->mode & prov_mode) != prov_mode) {
		VERBS_INFO(FI_LOG_CORE, "needed mode not set\n");
		FI_INFO_MODE(&fi_ibv_prov, prov_mode, hints->mode);
		return -FI_ENODATA;
	}

	if (hints->fabric_attr) {
		ret = ofi_check_fabric_attr(&fi_ibv_prov, info->fabric_attr,
					    hints->fabric_attr);
		if (ret)
			return ret;
	}

	if (hints->domain_attr) {
		ret = ofi_check_domain_attr(&fi_ibv_prov, version, info->domain_attr,
					    hints->domain_attr);
		if (ret)
			return ret;
	}

	if (hints->ep_attr) {
		ret = fi_ibv_check_ep_attr(hints->ep_attr, info);
		if (ret)
			return ret;
	}

	if (hints->rx_attr) {
		ret = fi_ibv_check_rx_attr(hints->rx_attr, hints, info);
		if (ret)
			return ret;
	}

	if (hints->tx_attr) {
		ret = fi_ibv_check_tx_attr(hints->tx_attr, hints, info);
		if (ret)
			return ret;
	}

	return 0;
}
Esempio n. 2
0
static int efa_check_hints(uint32_t version, const struct fi_info *hints,
			   const struct fi_info *info)
{
	uint64_t prov_mode;
	size_t size;
	int ret;

	if (hints->caps & ~(info->caps)) {
		EFA_INFO(FI_LOG_CORE, "Unsupported capabilities\n");
		FI_INFO_CHECK(&efa_prov, info, hints, caps, FI_TYPE_CAPS);
		return -FI_ENODATA;
	}

	prov_mode = ofi_mr_get_prov_mode(version, hints, info);

	if ((hints->mode & prov_mode) != prov_mode) {
		EFA_INFO(FI_LOG_CORE, "Required hints mode bits not set\n");
		FI_INFO_MODE(&efa_prov, prov_mode, hints->mode);
		return -FI_ENODATA;
	}

	if (hints->fabric_attr) {
		ret = ofi_check_fabric_attr(&efa_prov, info->fabric_attr,
					    hints->fabric_attr);

		if (ret)
			return ret;
	}

	switch (hints->addr_format) {
	case FI_FORMAT_UNSPEC:
	case FI_ADDR_EFA:
		size = EFA_EP_ADDR_LEN;
		break;
	default:
		EFA_INFO(FI_LOG_CORE,
			 "Address format not supported: hints[%u], supported[%u,%u]\n",
			 hints->addr_format, FI_FORMAT_UNSPEC, FI_ADDR_EFA);
		return -FI_ENODATA;
	}

	if (hints->src_addr && hints->src_addrlen < size)
		return -FI_ENODATA;

	if (hints->dest_addr && hints->dest_addrlen < size)
		return -FI_ENODATA;

	if (hints->domain_attr) {
		ret = ofi_check_domain_attr(&efa_prov, version, info->domain_attr, hints);
		if (ret)
			return ret;
	}

	if (hints->ep_attr) {
		ret = ofi_check_ep_attr(&efa_util_prov, info->fabric_attr->api_version, info, hints);
		if (ret)
			return ret;
	}

	if (hints->rx_attr) {
		ret = ofi_check_rx_attr(&efa_prov, info, hints->rx_attr, hints->mode);
		if (ret)
			return ret;
	}

	if (hints->tx_attr) {
		ret = ofi_check_tx_attr(&efa_prov, info->tx_attr, hints->tx_attr, hints->mode);
		if (ret)
			return ret;
	}

	return 0;
}
Esempio n. 3
0
static int
fi_ibv_mr_reg(struct fid *fid, const void *buf, size_t len,
	   uint64_t access, uint64_t offset, uint64_t requested_key,
	   uint64_t flags, struct fid_mr **mr, void *context)
{
	struct fi_ibv_mem_desc *md;
	int fi_ibv_access = 0;
	struct fid_domain *domain;

	if (flags)
		return -FI_EBADFLAGS;

	if (fid->fclass != FI_CLASS_DOMAIN) {
		return -FI_EINVAL;
	}
	domain = container_of(fid, struct fid_domain, fid);

	md = calloc(1, sizeof *md);
	if (!md)
		return -FI_ENOMEM;

	md->domain = container_of(domain, struct fi_ibv_domain, domain_fid);
	md->mr_fid.fid.fclass = FI_CLASS_MR;
	md->mr_fid.fid.context = context;
	md->mr_fid.fid.ops = &fi_ibv_mr_ops;

	/* Enable local write access by default for FI_EP_RDM which hides local
	 * registration requirements. This allows to avoid buffering or double
	 * registration */
	if (!(md->domain->info->caps & FI_LOCAL_MR))
		fi_ibv_access |= IBV_ACCESS_LOCAL_WRITE;

	/* Local read access to an MR is enabled by default in verbs */

	if (access & FI_RECV)
		fi_ibv_access |= IBV_ACCESS_LOCAL_WRITE;

	/* iWARP spec requires Remote Write access for an MR that is used
	 * as a data sink for a Remote Read */
	if (access & FI_READ) {
		fi_ibv_access |= IBV_ACCESS_LOCAL_WRITE;
		if (md->domain->verbs->device->transport_type == IBV_TRANSPORT_IWARP)
			fi_ibv_access |= IBV_ACCESS_REMOTE_WRITE;
	}

	if (access & FI_WRITE)
		fi_ibv_access |= IBV_ACCESS_LOCAL_WRITE;

	if (access & FI_REMOTE_READ)
		fi_ibv_access |= IBV_ACCESS_REMOTE_READ;

	/* Verbs requires Local Write access too for Remote Write access */
	if (access & FI_REMOTE_WRITE)
		fi_ibv_access |= IBV_ACCESS_LOCAL_WRITE |
			IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_ATOMIC;

	md->mr = ibv_reg_mr(md->domain->pd, (void *) buf, len, fi_ibv_access);
	if (!md->mr)
		goto err;

	md->mr_fid.mem_desc = (void *) (uintptr_t) md->mr->lkey;
	md->mr_fid.key = md->mr->rkey;
	*mr = &md->mr_fid;
	if(md->domain->eq && (md->domain->eq_flags & FI_REG_MR)) {
		struct fi_eq_entry entry = {
			.fid = &md->mr_fid.fid,
			.context = context
		};
		fi_ibv_eq_write_event(md->domain->eq, FI_MR_COMPLETE,
			 	      &entry, sizeof(entry));
	}
	return 0;

err:
	free(md);
	return -errno;
}

static int fi_ibv_mr_regv(struct fid *fid, const struct iovec * iov,
		size_t count, uint64_t access, uint64_t offset, uint64_t requested_key,
		uint64_t flags, struct fid_mr **mr, void *context)
{
	if (count > VERBS_MR_IOV_LIMIT) {
		VERBS_WARN(FI_LOG_FABRIC,
			   "iov count > %d not supported\n",
			   VERBS_MR_IOV_LIMIT);
		return -FI_EINVAL;
	}
	return fi_ibv_mr_reg(fid, iov->iov_base, iov->iov_len, access, offset,
			requested_key, flags, mr, context);
}

static int fi_ibv_mr_regattr(struct fid *fid, const struct fi_mr_attr *attr,
		uint64_t flags, struct fid_mr **mr)
{
	return fi_ibv_mr_regv(fid, attr->mr_iov, attr->iov_count, attr->access,
			0, attr->requested_key, flags, mr, attr->context);
}

static int fi_ibv_domain_bind(struct fid *fid, struct fid *bfid, uint64_t flags)
{
	struct fi_ibv_domain *domain;
	struct fi_ibv_eq *eq;

	domain = container_of(fid, struct fi_ibv_domain, domain_fid.fid);

	switch (bfid->fclass) {
	case FI_CLASS_EQ:
		eq = container_of(bfid, struct fi_ibv_eq, eq_fid);
		domain->eq = eq;
		domain->eq_flags = flags;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

static int fi_ibv_domain_close(fid_t fid)
{
	struct fi_ibv_domain *domain;
	int ret;

	domain = container_of(fid, struct fi_ibv_domain, domain_fid.fid);

	if (domain->rdm) {
		rdma_destroy_ep(domain->rdm_cm->listener);
		free(domain->rdm_cm);
	}

	if (domain->pd) {
		ret = ibv_dealloc_pd(domain->pd);
		if (ret)
			return -ret;
		domain->pd = NULL;
	}

	fi_freeinfo(domain->info);
	free(domain);
	return 0;
}

static int fi_ibv_open_device_by_name(struct fi_ibv_domain *domain, const char *name)
{
	struct ibv_context **dev_list;
	int i, ret = -FI_ENODEV;

	if (!name)
		return -FI_EINVAL;

	dev_list = rdma_get_devices(NULL);
	if (!dev_list)
		return -errno;

	for (i = 0; dev_list[i] && ret; i++) {
		if (domain->rdm) {
			ret = strncmp(name, ibv_get_device_name(dev_list[i]->device),
				      strlen(name) - strlen(verbs_rdm_domain.suffix));

		} else {
			ret = strcmp(name, ibv_get_device_name(dev_list[i]->device));
		}

		if (!ret)
			domain->verbs = dev_list[i];
	}
	rdma_free_devices(dev_list);
	return ret;
}

static struct fi_ops fi_ibv_fid_ops = {
	.size = sizeof(struct fi_ops),
	.close = fi_ibv_domain_close,
	.bind = fi_ibv_domain_bind,
	.control = fi_no_control,
	.ops_open = fi_no_ops_open,
};

static struct fi_ops_mr fi_ibv_domain_mr_ops = {
	.size = sizeof(struct fi_ops_mr),
	.reg = fi_ibv_mr_reg,
	.regv = fi_ibv_mr_regv,
	.regattr = fi_ibv_mr_regattr,
};

static struct fi_ops_domain fi_ibv_domain_ops = {
	.size = sizeof(struct fi_ops_domain),
	.av_open = fi_no_av_open,
	.cq_open = fi_ibv_cq_open,
	.endpoint = fi_ibv_open_ep,
	.scalable_ep = fi_no_scalable_ep,
	.cntr_open = fi_no_cntr_open,
	.poll_open = fi_no_poll_open,
	.stx_ctx = fi_no_stx_context,
	.srx_ctx = fi_ibv_srq_context,
};

static struct fi_ops_domain fi_ibv_rdm_domain_ops = {
	.size = sizeof(struct fi_ops_domain),
	.av_open = fi_ibv_rdm_av_open,
	.cq_open = fi_ibv_rdm_cq_open,
	.endpoint = fi_ibv_rdm_open_ep,
	.scalable_ep = fi_no_scalable_ep,
	.cntr_open = fi_rbv_rdm_cntr_open,
	.poll_open = fi_no_poll_open,
	.stx_ctx = fi_no_stx_context,
	.srx_ctx = fi_no_srx_context,
};

static int
fi_ibv_domain(struct fid_fabric *fabric, struct fi_info *info,
	   struct fid_domain **domain, void *context)
{
	struct fi_ibv_domain *_domain;
	struct fi_ibv_fabric *fab;
	struct fi_info *fi;
	int ret;

	fi = fi_ibv_get_verbs_info(info->domain_attr->name);
	if (!fi)
		return -FI_EINVAL;

	fab = container_of(fabric, struct fi_ibv_fabric, util_fabric.fabric_fid);
	ret = ofi_check_domain_attr(&fi_ibv_prov, fabric->api_version,
				    fi->domain_attr, info->domain_attr);
	if (ret)
		return ret;

	_domain = calloc(1, sizeof *_domain);
	if (!_domain)
		return -FI_ENOMEM;

	_domain->info = fi_dupinfo(info);
	if (!_domain->info)
		goto err1;

	_domain->rdm = FI_IBV_EP_TYPE_IS_RDM(info);
	if (_domain->rdm) {
		_domain->rdm_cm = calloc(1, sizeof(*_domain->rdm_cm));
		if (!_domain->rdm_cm) {
			ret = -FI_ENOMEM;
			goto err2;
		}
	}
	ret = fi_ibv_open_device_by_name(_domain, info->domain_attr->name);
	if (ret)
		goto err2;

	_domain->pd = ibv_alloc_pd(_domain->verbs);
	if (!_domain->pd) {
		ret = -errno;
		goto err2;
	}

	_domain->domain_fid.fid.fclass = FI_CLASS_DOMAIN;
	_domain->domain_fid.fid.context = context;
	_domain->domain_fid.fid.ops = &fi_ibv_fid_ops;
	_domain->domain_fid.mr = &fi_ibv_domain_mr_ops;
	if (_domain->rdm) {
		_domain->domain_fid.ops = &fi_ibv_rdm_domain_ops;

		_domain->rdm_cm->ec = rdma_create_event_channel();

		if (!_domain->rdm_cm->ec) {
			VERBS_INFO(FI_LOG_EP_CTRL,
				"Failed to create listener event channel: %s\n",
				strerror(errno));
			ret = -FI_EOTHER;
			goto err2;
		}

		if (fi_fd_nonblock(_domain->rdm_cm->ec->fd) != 0) {
			VERBS_INFO_ERRNO(FI_LOG_EP_CTRL, "fcntl", errno);
			ret = -FI_EOTHER;
			goto err3;
		}

		if (rdma_create_id(_domain->rdm_cm->ec,
				   &_domain->rdm_cm->listener, NULL, RDMA_PS_TCP))
		{
			VERBS_INFO(FI_LOG_EP_CTRL, "Failed to create cm listener: %s\n",
				   strerror(errno));
			ret = -FI_EOTHER;
			goto err3;
		}
		_domain->rdm_cm->is_bound = 0;
	} else {
		_domain->domain_fid.ops = &fi_ibv_domain_ops;
	}
	_domain->fab = fab;

	*domain = &_domain->domain_fid;
	return 0;
err3:
	if (_domain->rdm)
		rdma_destroy_event_channel(_domain->rdm_cm->ec);
err2:
	if (_domain->rdm)
		free(_domain->rdm_cm);
	fi_freeinfo(_domain->info);
err1:
	free(_domain);
	return ret;
}

static int fi_ibv_trywait(struct fid_fabric *fabric, struct fid **fids, int count)
{
	struct fi_ibv_cq *cq;
	int ret, i;

	for (i = 0; i < count; i++) {
		switch (fids[i]->fclass) {
		case FI_CLASS_CQ:
			cq = container_of(fids[i], struct fi_ibv_cq, cq_fid.fid);
			ret = cq->trywait(fids[i]);
			if (ret)
				return ret;
			break;
		case FI_CLASS_EQ:
			/* We are always ready to wait on an EQ since
			 * rdmacm EQ is based on an fd */
			continue;
		case FI_CLASS_CNTR:
		case FI_CLASS_WAIT:
			return -FI_ENOSYS;
		default:
			return -FI_EINVAL;
		}

	}
	return FI_SUCCESS;
}

static int fi_ibv_fabric_close(fid_t fid)
{
	struct fi_ibv_fabric *fab;
	int ret;

	fab = container_of(fid, struct fi_ibv_fabric, util_fabric.fabric_fid.fid);
	ret = ofi_fabric_close(&fab->util_fabric);
	if (ret)
		return ret;
	free(fab);

	return 0;
}

static struct fi_ops fi_ibv_fi_ops = {
	.size = sizeof(struct fi_ops),
	.close = fi_ibv_fabric_close,
	.bind = fi_no_bind,
	.control = fi_no_control,
	.ops_open = fi_no_ops_open,
};

static struct fi_ops_fabric fi_ibv_ops_fabric = {
	.size = sizeof(struct fi_ops_fabric),
	.domain = fi_ibv_domain,
	.passive_ep = fi_ibv_passive_ep,
	.eq_open = fi_ibv_eq_open,
	.wait_open = fi_no_wait_open,
	.trywait = fi_ibv_trywait
};

int fi_ibv_fabric(struct fi_fabric_attr *attr, struct fid_fabric **fabric,
		  void *context)
{
	struct fi_ibv_fabric *fab;
	struct fi_info *info;
	int ret;

	ret = fi_ibv_init_info();
	if (ret)
		return ret;

	fab = calloc(1, sizeof(*fab));
	if (!fab)
		return -FI_ENOMEM;

	for (info = verbs_info; info; info = info->next) {
		ret = ofi_fabric_init(&fi_ibv_prov, info->fabric_attr, attr,
				      &fab->util_fabric, context);
		if (ret != -FI_ENODATA)
			break;
	}
	if (ret) {
		free(fab);
		return ret;
	}

	*fabric = &fab->util_fabric.fabric_fid;
	(*fabric)->fid.ops = &fi_ibv_fi_ops;
	(*fabric)->ops = &fi_ibv_ops_fabric;

	return 0;
}
Esempio n. 4
0
static int _gnix_ep_getinfo(enum fi_ep_type ep_type, uint32_t version,
			    const char *node, const char *service,
			    uint64_t flags, struct fi_info *hints,
			    struct fi_info **info)
{
	uint64_t mode = GNIX_FAB_MODES;
	struct fi_info *gnix_info = NULL;
	int ret = -FI_ENODATA;

	GNIX_TRACE(FI_LOG_FABRIC, "\n");

	if ((hints && hints->ep_attr) &&
	    (hints->ep_attr->type != FI_EP_UNSPEC &&
	     hints->ep_attr->type != ep_type)) {
		return -FI_ENODATA;
	}

	gnix_info = _gnix_allocinfo();
	if (!gnix_info)
		return -FI_ENOMEM;

	gnix_info->ep_attr->type = ep_type;

	if (hints) {
		/* TODO: Add version check when we decide on how to do it */
		if (hints->addr_format == FI_ADDR_STR) {
			gnix_info->addr_format = FI_ADDR_STR;
		}

		if (hints->ep_attr) {
			/* Only support FI_PROTO_GNI protocol. */
			switch (hints->ep_attr->protocol) {
			case FI_PROTO_UNSPEC:
			case FI_PROTO_GNI:
				break;
			default:
				goto err;
			}

			if ((hints->ep_attr->tx_ctx_cnt > GNIX_SEP_MAX_CNT) &&
				(hints->ep_attr->tx_ctx_cnt !=
						FI_SHARED_CONTEXT)) {
				goto err;
			}

			if (hints->ep_attr->rx_ctx_cnt > GNIX_SEP_MAX_CNT)
				goto err;

			if (hints->ep_attr->tx_ctx_cnt)
				gnix_info->ep_attr->tx_ctx_cnt =
					hints->ep_attr->tx_ctx_cnt;
			if (hints->ep_attr->rx_ctx_cnt)
				gnix_info->ep_attr->rx_ctx_cnt =
					hints->ep_attr->rx_ctx_cnt;

			if (hints->ep_attr->max_msg_size > GNIX_MAX_MSG_SIZE)
				goto err;
		}

		GNIX_DEBUG(FI_LOG_FABRIC, "Passed EP attributes check\n");

		/*
		 * check the mode field
		 */
		if (hints->mode) {
			if ((hints->mode & GNIX_FAB_MODES) != GNIX_FAB_MODES) {
				goto err;
			}
			mode = hints->mode & ~GNIX_FAB_MODES_CLEAR;
		}

		GNIX_DEBUG(FI_LOG_FABRIC, "Passed mode check\n");

		if (hints->caps) {
			/* The provider must support all requested
			 * capabilities. */
			if ((hints->caps & GNIX_EP_CAPS_FULL) != hints->caps)
				goto err;

			/* The provider may silently enable secondary
			 * capabilities that do not introduce any overhead. */
			gnix_info->caps = hints->caps | GNIX_EP_SEC_CAPS;
		}

		GNIX_DEBUG(FI_LOG_FABRIC, "Passed caps check gnix_info->caps = 0x%016lx\n",
			   gnix_info->caps);

		if (hints->tx_attr) {
			if ((hints->tx_attr->op_flags & GNIX_EP_OP_FLAGS) !=
				hints->tx_attr->op_flags) {
				goto err;
			}
			if (hints->tx_attr->inject_size > GNIX_INJECT_SIZE) {
				goto err;
			}

			gnix_info->tx_attr->op_flags =
				hints->tx_attr->op_flags & GNIX_EP_OP_FLAGS;
		}

		GNIX_DEBUG(FI_LOG_FABRIC, "Passed TX attributes check\n");

		if (hints->rx_attr) {
			if ((hints->rx_attr->op_flags & GNIX_EP_OP_FLAGS) !=
					hints->rx_attr->op_flags) {
				goto err;
			}

			gnix_info->rx_attr->op_flags =
				hints->rx_attr->op_flags & GNIX_EP_OP_FLAGS;
		}

		if (hints->fabric_attr && hints->fabric_attr->name &&
		    strncmp(hints->fabric_attr->name, gnix_fab_name,
			    strlen(gnix_fab_name))) {
			goto err;
		}

		GNIX_DEBUG(FI_LOG_FABRIC, "Passed fabric name check\n");

		if (hints->domain_attr) {
			if (hints->domain_attr->name &&
			    strncmp(hints->domain_attr->name, gnix_dom_name,
				    strlen(gnix_dom_name))) {
				goto err;
			}

			if (hints->domain_attr->control_progress !=
				FI_PROGRESS_UNSPEC)
				gnix_info->domain_attr->control_progress =
					hints->domain_attr->control_progress;

			if (hints->domain_attr->data_progress !=
				FI_PROGRESS_UNSPEC)
				gnix_info->domain_attr->data_progress =
					hints->domain_attr->data_progress;


			switch (hints->domain_attr->mr_mode) {
			case FI_MR_UNSPEC:
			case FI_MR_BASIC:
				if (FI_VERSION_GE(version, FI_VERSION(1, 5))) {
					hints->domain_attr->mr_mode =
						OFI_MR_BASIC_MAP;
				}
				break;
			case FI_MR_SCALABLE:
				GNIX_WARN(FI_LOG_FABRIC,
						  "GNI provider doesn't currently support MR_SCALABLE\n");
				goto err;
			}

			switch (hints->domain_attr->threading) {
			case FI_THREAD_COMPLETION:
				gnix_info->domain_attr->threading =
					hints->domain_attr->threading;
				break;
			default:
				break;
			}

			if (hints->domain_attr->caps) {
				if (hints->domain_attr->caps & ~GNIX_DOM_CAPS) {
					GNIX_WARN(FI_LOG_FABRIC,
						  "Invalid domain caps\n");
					goto err;
				}

				gnix_info->domain_attr->caps =
					hints->domain_attr->caps;
			}

			ret = ofi_check_domain_attr(&gnix_prov, version,
						    gnix_info->domain_attr,
						    hints->domain_attr);
			if (ret) {
				GNIX_WARN(FI_LOG_FABRIC,
						  "GNI failed domain attributes check\n");
				goto err;
			}

			GNIX_DEBUG(FI_LOG_FABRIC, "Passed the domain attributes check\n");
		}
	}

	ret = __gnix_getinfo_resolve_node(node, service, flags, hints,
					  gnix_info);
	if (ret != FI_SUCCESS)
		goto  err;

	gnix_info->mode = mode;
	gnix_info->fabric_attr->name = strdup(gnix_fab_name);
	gnix_info->tx_attr->caps = gnix_info->caps;
	gnix_info->tx_attr->mode = gnix_info->mode;
	gnix_info->rx_attr->caps = gnix_info->caps;
	gnix_info->rx_attr->mode = gnix_info->mode;

	*info = gnix_info;

	GNIX_DEBUG(FI_LOG_FABRIC, "Returning EP type: %s\n",
		   fi_tostr(&ep_type, FI_TYPE_EP_TYPE));
	return FI_SUCCESS;
err:
	fi_freeinfo(gnix_info);
	return ret;
}
Esempio n. 5
0
int ofi_check_info(const struct util_prov *util_prov,
		  const struct fi_info *user_info,
		  enum fi_match_type type)
{
	const struct fi_info *prov_info = util_prov->info;
	const struct fi_provider *prov = util_prov->prov;
	int ret;

	if (!user_info)
		return 0;

	if (user_info->caps & ~(prov_info->caps)) {
		FI_INFO(prov, FI_LOG_CORE, "Unsupported capabilities\n");
		FI_INFO_CAPS(prov, prov_info, user_info, caps, FI_TYPE_CAPS);
		return -FI_ENODATA;
	}

	if ((user_info->mode & prov_info->mode) != prov_info->mode) {
		FI_INFO(prov, FI_LOG_CORE, "needed mode not set\n");
		FI_INFO_MODE(prov, prov_info, user_info);
		return -FI_ENODATA;
	}

	if (!fi_valid_addr_format(prov_info->addr_format,
				  user_info->addr_format)) {
		FI_INFO(prov, FI_LOG_CORE, "address format not supported\n");
		return -FI_ENODATA;
	}

	if (user_info->fabric_attr) {
		ret = ofi_check_fabric_attr(prov, prov_info->fabric_attr,
					   user_info->fabric_attr,
					   type);
		if (ret)
			return ret;
	}

	if (user_info->domain_attr) {
		ret = ofi_check_domain_attr(prov, prov_info->domain_attr,
				user_info->domain_attr,
				type);
		if (ret)
			return ret;
	}

	if (user_info->ep_attr) {
		ret = ofi_check_ep_attr(util_prov, user_info->ep_attr);
		if (ret)
			return ret;
	}

	if (user_info->rx_attr) {
		ret = ofi_check_rx_attr(prov, prov_info->rx_attr,
				user_info->rx_attr);
		if (ret)
			return ret;
	}

	if (user_info->tx_attr) {
		ret = ofi_check_tx_attr(prov, prov_info->tx_attr,
				user_info->tx_attr);
		if (ret)
			return ret;
	}

	return 0;
}
Esempio n. 6
0
/* if there is only single fi_info in the provider */
int ofi_check_info(const struct util_prov *util_prov,
		   const struct fi_info *prov_info, uint32_t api_version,
		   const struct fi_info *user_info)
{
	const struct fi_provider *prov = util_prov->prov;
	uint64_t prov_mode;
	int ret;

	if (!user_info)
		return 0;

	/* Check oft-used endpoint type attribute first to avoid any other
	 * unnecessary check */
	if (user_info->ep_attr) {
		ret = ofi_check_ep_type(prov, prov_info->ep_attr,
					user_info->ep_attr);
		if (ret)
			return ret;
	}

	if (user_info->caps & ~(prov_info->caps)) {
		FI_INFO(prov, FI_LOG_CORE, "Unsupported capabilities\n");
		FI_INFO_CHECK(prov, prov_info, user_info, caps, FI_TYPE_CAPS);
		return -FI_ENODATA;
	}

	prov_mode = ofi_mr_get_prov_mode(api_version, user_info, prov_info);

	if ((user_info->mode & prov_mode) != prov_mode) {
		FI_INFO(prov, FI_LOG_CORE, "needed mode not set\n");
		FI_INFO_MODE(prov, prov_mode, user_info->mode);
		return -FI_ENODATA;
	}

	if (!fi_valid_addr_format(prov_info->addr_format,
				  user_info->addr_format)) {
		FI_INFO(prov, FI_LOG_CORE, "address format not supported\n");
		FI_INFO_CHECK(prov, prov_info, user_info, addr_format,
			      FI_TYPE_ADDR_FORMAT);
		return -FI_ENODATA;
	}

	if (user_info->fabric_attr) {
		ret = ofi_check_fabric_attr(prov, prov_info->fabric_attr,
					    user_info->fabric_attr);
		if (ret)
			return ret;
	}

	if (user_info->domain_attr) {
		ret = ofi_check_domain_attr(prov, api_version,
					    prov_info->domain_attr,
					    user_info);
		if (ret)
			return ret;
	}

	if (user_info->ep_attr) {
		ret = ofi_check_ep_attr(util_prov, api_version,
					prov_info, user_info);
		if (ret)
			return ret;
	}

	if (user_info->rx_attr) {
		ret = ofi_check_rx_attr(prov, prov_info,
					user_info->rx_attr, user_info->mode);
		if (ret)
			return ret;
	}

	if (user_info->tx_attr) {
		ret = ofi_check_tx_attr(prov, prov_info->tx_attr,
					user_info->tx_attr, user_info->mode);
		if (ret)
			return ret;
	}
	return 0;
}
Esempio n. 7
0
int efa_domain_open(struct fid_fabric *fabric_fid, struct fi_info *info,
		    struct fid_domain **domain_fid, void *context)
{
	struct efa_domain *domain;
	struct efa_fabric *fabric;
	const struct fi_info *fi;
	int ret;

	fi = efa_get_efa_info(info->domain_attr->name);
	if (!fi)
		return -FI_EINVAL;

	fabric = container_of(fabric_fid, struct efa_fabric,
			      util_fabric.fabric_fid);
	ret = ofi_check_domain_attr(&efa_prov, fabric_fid->api_version,
				    fi->domain_attr, info);
	if (ret)
		return ret;

	domain = calloc(1, sizeof(*domain));
	if (!domain)
		return -FI_ENOMEM;

	ret = ofi_domain_init(fabric_fid, info, &domain->util_domain,
			      context);
	if (ret)
		goto err_free_domain;

	domain->info = fi_dupinfo(info);
	if (!domain->info) {
		ret = -FI_ENOMEM;
		goto err_close_domain;
	}

	domain->rdm = EFA_EP_TYPE_IS_RDM(info);

	ret = efa_open_device_by_name(domain, info->domain_attr->name);
	if (ret)
		goto err_free_info;

	domain->pd = efa_cmd_alloc_pd(domain->ctx);
	if (!domain->pd) {
		ret = -errno;
		goto err_free_info;
	}

	EFA_INFO(FI_LOG_DOMAIN, "Allocated pd[%u].\n", domain->pd->pdn);

	domain->util_domain.domain_fid.fid.ops = &efa_fid_ops;
	domain->util_domain.domain_fid.ops = &efa_domain_ops;

	domain->fab = fabric;

	*domain_fid = &domain->util_domain.domain_fid;

	if (efa_mr_cache_enable) {
		domain->notifier = efa_mem_notifier;
		if (!domain->notifier) {
			EFA_DBG(FI_LOG_MR,
				"efa_mem_notifier is not initialized.\n");
			ret = -FI_ENOMEM;
			goto err_free_pd;
		}
		domain->monitor.subscribe = efa_monitor_subscribe;
		domain->monitor.unsubscribe = efa_monitor_unsubscribe;
		ofi_monitor_init(&domain->monitor);

		domain->cache.max_cached_cnt = efa_mr_max_cached_count;
		domain->cache.max_cached_size = efa_mr_max_cached_size;
		domain->cache.merge_regions = efa_mr_cache_merge_regions;
		domain->cache.entry_data_size = sizeof(struct efa_mem_desc);
		domain->cache.add_region = efa_mr_cache_entry_reg;
		domain->cache.delete_region = efa_mr_cache_entry_dereg;
		ret = ofi_mr_cache_init(&domain->util_domain, &domain->monitor,
					&domain->cache);
		if (OFI_UNLIKELY(ret))
			goto err_cleanup_monitor;

		domain->util_domain.domain_fid.mr = &efa_domain_mr_cache_ops;
	} else {
		domain->util_domain.domain_fid.mr = &efa_domain_mr_ops;
	}

	return 0;

err_cleanup_monitor:
	ofi_monitor_cleanup(&domain->monitor);
err_free_pd:
	ret = efa_cmd_dealloc_pd(domain->pd);
	if (ret) {
		EFA_INFO_ERRNO(FI_LOG_DOMAIN, "efa_cmd_dealloc_pd", ret);
	}
	domain->pd = NULL;
err_free_info:
	fi_freeinfo(domain->info);
err_close_domain:
	ofi_domain_close(&domain->util_domain);
err_free_domain:
	free(domain);
	return ret;
}