Пример #1
0
void rdm_sr_setup_common(void)
{
	int ret = 0, i = 0;

	rdm_sr_setup_common_eps();

	for (i = 0; i < NUMEPS; i++) {
		ret = fi_mr_reg(dom[i], target, 3 * BUF_SZ,
				FI_REMOTE_WRITE, 0, 0, 0, rem_mr + i, &target);
		cr_assert_eq(ret, 0);

		ret = fi_mr_reg(dom[i], source, BUF_SZ,
				FI_REMOTE_WRITE, 0, 0, 0, loc_mr + i, &source);
		cr_assert_eq(ret, 0);

		ret = fi_mr_reg(dom[i], iov_dest_buf, IOV_CNT * BUF_SZ,
				FI_REMOTE_WRITE, 0, 0, 0, iov_dest_buf_mr + i,
				&iov_dest_buf);
		cr_assert_eq(ret, 0);

		ret = fi_mr_reg(dom[i], iov_src_buf, IOV_CNT * BUF_SZ,
				FI_REMOTE_WRITE, 0, 0, 0, iov_src_buf_mr + i,
				&iov_src_buf);
		cr_assert_eq(ret, 0);

		mr_key[i] = fi_mr_key(rem_mr[i]);
		iov_dest_buf_mr_key[i] = fi_mr_key(iov_dest_buf_mr[i]);
	}
}
Пример #2
0
/*
 * rpmem_fip_init_memory -- (internal) initialize common memory resources
 */
static int
rpmem_fip_init_memory(struct rpmem_fip *fip)
{
	ASSERTne(Pagesize, 0);
	int ret;

	/*
	 * Register local memory space. The local memory will be used
	 * with WRITE operation in rpmem_fip_persist function thus
	 * the FI_WRITE access flag.
	 */
	ret = fi_mr_reg(fip->domain, fip->laddr, fip->size,
			FI_WRITE, 0, 0, 0, &fip->mr, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "registrating memory");
		return ret;
	}

	/* get local memory descriptor */
	fip->mr_desc = fi_mr_desc(fip->mr);

	/* allocate buffer for read operation */
	ASSERT(IS_PAGE_ALIGNED(RPMEM_RD_BUFF_SIZE));
	errno = posix_memalign((void **)&fip->rd_buff, Pagesize,
			RPMEM_RD_BUFF_SIZE);
	if (errno) {
		RPMEM_LOG(ERR, "!allocating read buffer");
		ret = -1;
		goto err_malloc_rd_buff;
	}

	/*
	 * Register buffer for read operation.
	 * The read operation utilizes READ operation thus
	 * the FI_REMOTE_WRITE flag.
	 */
	ret = fi_mr_reg(fip->domain, fip->rd_buff,
			RPMEM_RD_BUFF_SIZE, FI_REMOTE_WRITE,
			0, 0, 0, &fip->rd_mr, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "registrating read buffer");
		goto err_rd_mr;
	}

	/* get read buffer local memory descriptor */
	fip->rd_mr_desc = fi_mr_desc(fip->rd_mr);

	return 0;
err_rd_mr:
	free(fip->rd_buff);
err_malloc_rd_buff:
	RPMEM_FI_CLOSE(fip->mr, "unregistering memory");
	return ret;
}
Пример #3
0
static int alloc_ep_res(struct fi_info *fi)
{
	int ret;

	tx_size = MAX(FT_MAX_CTRL_MSG, opts.transfer_size);
	if (tx_size > fi->ep_attr->max_msg_size) {
		fprintf(stderr, "transfer size is larger than the maximum size "
				"of the data transfer supported by the provider\n");
		return -1;
	}

	tx_buf = malloc(tx_size);
	if (!tx_buf) {
		fprintf(stderr, "Cannot allocate tx_buf\n");
		return -1;
	}

	ret = fi_mr_reg(domain, tx_buf, tx_size, FI_SEND,
			0, FT_MR_KEY, 0, &mr, NULL);
	if (ret) {
		FT_PRINTERR("fi_mr_reg", ret);
		return ret;
	}

	// set the multi buffer size to be allocated
	rx_size = MAX(tx_size, DEFAULT_MULTI_BUF_SIZE) * MULTI_BUF_SIZE_FACTOR;
	rx_buf = malloc(rx_size);
	if (!rx_buf) {
		fprintf(stderr, "Cannot allocate rx_buf\n");
		return -1;
	}

	ret = fi_mr_reg(domain, rx_buf, rx_size, FI_RECV, 0, FT_MR_KEY + 1, 0,
			&mr_multi_recv, NULL);
	if (ret) {
		FT_PRINTERR("fi_mr_reg", ret);
		return ret;
	}

	/* Prevent memory registration by ft_alloc_active_res() -> ft_alloc_msgs() */
	ft_skip_mr = 1;

	ret = ft_alloc_active_res(fi);
	if (ret)
		return ret;

	return 0;
}
Пример #4
0
static int ft_setup_xcontrol_bufs(struct ft_xcontrol *ctrl)
{
	size_t size;
	int i, ret;

	size = ft.size_array[ft.size_cnt - 1];
	if (!ctrl->buf) {
		ctrl->buf = calloc(1, size);
		if (!ctrl->buf)
			return -FI_ENOMEM;
	}

	if ((fabric_info->mode & FI_LOCAL_MR) && !ctrl->mr) {
		ret = fi_mr_reg(domain, ctrl->buf, size,
				0, 0, 0, 0, &ctrl->mr, NULL);
		if (ret) {
			FT_PRINTERR("fi_mr_reg", ret);
			return ret;
		}
		ctrl->memdesc = fi_mr_desc(ctrl->mr);
	}

	for (i = 0; i < ft.iov_cnt; i++)
		ctrl->iov_desc[i] = ctrl->memdesc;

	return 0;
}
Пример #5
0
static int create_messages(struct cma_node *node)
{
	int ret;

	if (!hints->ep_attr->max_msg_size)
		hints->tx_attr->size = 0;

	if (!hints->tx_attr->size)
		return 0;

	node->mem = malloc(hints->ep_attr->max_msg_size);
	if (!node->mem) {
		printf("failed message allocation\n");
		return -1;
	}

	if (info->mode & FI_LOCAL_MR) {
		ret = fi_mr_reg(node->domain, node->mem, hints->ep_attr->max_msg_size,
				FI_SEND | FI_RECV, 0, 0, 0, &node->mr, NULL);
		if (ret) {
			FT_PRINTERR("fi_reg_mr", ret);
			goto err;
		}
		node->mrdesc = fi_mr_desc(node->mr);
	}

	ret = post_recvs(node);
	return ret;

err:
	free(node->mem);
	return -1;
}
Пример #6
0
Test(domain, cache_flush_op)
{
	int i, ret;
	const int num_doms = 11;
	struct fid_domain *doms[num_doms];
	struct fi_gni_ops_domain *gni_domain_ops;
	struct fid_mr *mr;
	char *buf = calloc(1024, sizeof(char));

	cr_assert(buf);

	memset(doms, 0, num_doms*sizeof(struct fid_domain *));

	for (i = 0; i < num_doms; i++) {
		ret = fi_domain(fabric, fi, &doms[i], NULL);
		cr_assert(ret == FI_SUCCESS, "fi_domain");
		ret = fi_open_ops(&doms[i]->fid, FI_GNI_DOMAIN_OPS_1,
				  0, (void **) &gni_domain_ops, NULL);
		cr_assert(ret == FI_SUCCESS, "fi_open_ops");

		ret = fi_mr_reg(doms[i], buf, 1024, FI_READ, 0, 0, 0, &mr, NULL);
		cr_assert(ret == FI_SUCCESS, "fi_reg_mr");

		ret = fi_close(&mr->fid);
		cr_assert(ret == FI_SUCCESS, "fi_close mr");

		ret = gni_domain_ops->flush_cache(&doms[i]->fid);
		cr_assert(ret == FI_SUCCESS, "flush cache");

		ret = fi_close(&doms[i]->fid);
		cr_assert(ret == FI_SUCCESS, "fi_close domain");
	}

	free(buf);
}
Пример #7
0
static int alloc_ep_res(struct fi_info *fi)
{
	uint64_t access_mode;
	int ret;

	ret = ft_alloc_bufs();
	if (ret)
		return ret;

	switch (op_type) {
	case FT_RMA_READ:
		access_mode = FI_REMOTE_READ;
		break;
	case FT_RMA_WRITE:
	case FT_RMA_WRITEDATA:
		access_mode = FI_REMOTE_WRITE;
		break;
	default:
		/* Impossible to reach here */
		FT_PRINTERR("invalid op_type", ret);
		exit(1);
	}
	ret = fi_mr_reg(domain, buf, buf_size,
			access_mode, 0, 0, 0, &mr, NULL);
	if (ret) {
		FT_PRINTERR("fi_mr_reg", ret);
		return ret;
	}

	ret = ft_alloc_active_res(fi);
	if (ret)
		return ret;

	return 0;
}
Пример #8
0
/* Common code will free allocated buffers and MR */
static int alloc_bufs(void)
{
	int ret;

	tx_size = opts.transfer_size + ft_tx_prefix_size();
	rx_size = opts.transfer_size + ft_rx_prefix_size();
	buf_size = (tx_size + rx_size) * concurrent_msgs;

	buf = malloc(buf_size);
	tx_ctx_arr = calloc(concurrent_msgs, sizeof(*tx_ctx_arr));
	rx_ctx_arr = calloc(concurrent_msgs, sizeof(*rx_ctx_arr));
	if (!buf || !tx_ctx_arr || !rx_ctx_arr)
		return -FI_ENOMEM;

	rx_buf = buf;
	tx_buf = (char *) buf + rx_size * concurrent_msgs;

	if (fi->domain_attr->mr_mode & FI_MR_LOCAL) {
		ret = fi_mr_reg(domain, buf, buf_size, FI_SEND | FI_RECV,
				 0, FT_MR_KEY, 0, &mr, NULL);
		if (ret)
			return ret;

		mr_desc = fi_mr_desc(mr);
	}

	return 0;
}
Пример #9
0
FidMr register_memory_buffer(fid_domain& domain, void* buf, size_t size, uint64_t access) {
    fid_mr* mem_reg{};
    auto ret = fi_mr_reg(&domain, buf, size, access, 0, 0, 0, &mem_reg, nullptr);
    if (ret) {
        throw FabricException("fi_mr_reg error: " + fi_error_to_string(int(ret)), ret);
    }
    return FidMr{mem_reg};
}
Пример #10
0
static int alloc_ep_res(struct fi_info *fi)
{
	struct fi_cq_attr cq_attr;
	struct fi_av_attr av_attr;
	int ret;

	buffer_size = !custom ? test_size[TEST_CNT - 1].size : transfer_size;
	buffer_size += prefix_len;
	buf = malloc(buffer_size);
	if (!buf) {
		perror("malloc");
		return -1;
	}
	buf_ptr = (char *)buf + prefix_len;

	memset(&cq_attr, 0, sizeof cq_attr);
	cq_attr.format = FI_CQ_FORMAT_CONTEXT;
	cq_attr.wait_obj = FI_WAIT_NONE;
	cq_attr.size = max_credits << 1;
	ret = fi_cq_open(dom, &cq_attr, &scq, NULL);
	if (ret) {
		printf("fi_cq_open send comp %s\n", fi_strerror(-ret));
		goto err1;
	}

	ret = fi_cq_open(dom, &cq_attr, &rcq, NULL);
	if (ret) {
		printf("fi_cq_open recv comp %s\n", fi_strerror(-ret));
		goto err2;
	}

	ret = fi_mr_reg(dom, buf, buffer_size, 0, 0, 0, 0, &mr, NULL);
	if (ret) {
		printf("fi_mr_reg %s\n", fi_strerror(-ret));
		goto err3;
	}

	av_attr.type = FI_AV_MAP;
	av_attr.name = NULL;
	av_attr.flags = 0;
	ret = fi_av_open(dom, &av_attr, &av, NULL);
	if (ret) {
		printf("fi_av_open %s\n", fi_strerror(-ret));
		goto err4;
	}

	return 0;

err4:
	fi_close(&mr->fid);
err3:
	fi_close(&rcq->fid);
err2:
	fi_close(&scq->fid);
err1:
	free(buf);
	return ret;
}
Пример #11
0
/*
 * Include FI_MSG_PREFIX space in the allocated buffer, and ensure that the
 * buffer is large enough for a control message used to exchange addressing
 * data.
 */
int ft_alloc_msgs(void)
{
	int ret;
	long alignment = 1;

	/* TODO: support multi-recv tests */
	if (fi->rx_attr->op_flags == FI_MULTI_RECV)
		return 0;

	tx_size = opts.options & FT_OPT_SIZE ?
		  opts.transfer_size : test_size[TEST_CNT - 1].size;
	if (tx_size > fi->ep_attr->max_msg_size)
		tx_size = fi->ep_attr->max_msg_size;
	rx_size = tx_size + ft_rx_prefix_size();
	tx_size += ft_tx_prefix_size();
	buf_size = MAX(tx_size, FT_MAX_CTRL_MSG) + MAX(rx_size, FT_MAX_CTRL_MSG);

	if (opts.options & FT_OPT_ALIGN) {
		alignment = sysconf(_SC_PAGESIZE);
		if (alignment < 0)
			return -errno;
		buf_size += alignment;

		ret = posix_memalign(&buf, (size_t) alignment, buf_size);
		if (ret) {
			FT_PRINTERR("posix_memalign", ret);
			return ret;
		}
	} else {
		buf = malloc(buf_size);
		if (!buf) {
			perror("malloc");
			return -FI_ENOMEM;
		}
	}
	memset(buf, 0, buf_size);
	rx_buf = buf;
	tx_buf = (char *) buf + MAX(rx_size, FT_MAX_CTRL_MSG);
	tx_buf = (void *) (((uintptr_t) tx_buf + alignment - 1) &
			   ~(alignment - 1));

	remote_cq_data = ft_init_cq_data(fi);

	if (!ft_skip_mr && ((fi->mode & FI_LOCAL_MR) ||
				(fi->caps & (FI_RMA | FI_ATOMIC)))) {
		ret = fi_mr_reg(domain, buf, buf_size, ft_caps_to_mr_access(fi->caps),
				0, FT_MR_KEY, 0, &mr, NULL);
		if (ret) {
			FT_PRINTERR("fi_mr_reg", ret);
			return ret;
		}
	} else {
		mr = &no_mr;
	}

	return 0;
}
Пример #12
0
static int rxm_mr_buf_reg(void *pool_ctx, void *addr, size_t len, void **context)
{
	int ret;
	struct fid_mr *mr;
	struct fid_domain *msg_domain = (struct fid_domain *)pool_ctx;

	ret = fi_mr_reg(msg_domain, addr, len, FI_SEND | FI_RECV, 0, 0, 0, &mr, NULL);
	*context = mr;
	return ret;
}
Пример #13
0
static int alloc_ep_res(struct fi_info *fi)
{
	struct fi_cq_attr cq_attr;
	int ret;

	buffer_size = !custom ? test_size[TEST_CNT - 1].size : transfer_size;
	if (buffer_size < MIN_BUF_SIZE) {
		buffer_size = MIN_BUF_SIZE;
	}
	buf = malloc(buffer_size);
	if (!buf) {
		perror("malloc");
		return -1;
	}

	memset(&cq_attr, 0, sizeof cq_attr);
	cq_attr.format = FI_CQ_FORMAT_CONTEXT;
	cq_attr.wait_obj = FI_WAIT_NONE;
	cq_attr.size = max_credits << 1;
	ret = fi_cq_open(dom, &cq_attr, &scq, NULL);
	if (ret) {
		printf("fi_eq_open send comp %s\n", fi_strerror(-ret));
		goto err1;
	}

	ret = fi_cq_open(dom, &cq_attr, &rcq, NULL);
	if (ret) {
		printf("fi_eq_open recv comp %s\n", fi_strerror(-ret));
		goto err2;
	}

	ret = fi_mr_reg(dom, buf, buffer_size, FI_REMOTE_WRITE, 0, 0, 0, &mr, NULL);
	if (ret) {
		printf("fi_mr_reg %s\n", fi_strerror(-ret));
		goto err3;
	}

	if (!cmeq) {
		ret = alloc_cm_res();
		if (ret)
			goto err4;
	}

	return 0;

err4:
	fi_close(&mr->fid);
err3:
	fi_close(&rcq->fid);
err2:
	fi_close(&scq->fid);
err1:
	free(buf);
	return ret;
}
Пример #14
0
static void setup_mr(void)
{
	int ret;

	target = malloc(BUF_SZ);
	assert(target);

	source = malloc(BUF_SZ);
	assert(source);

	ret = fi_mr_reg(dom, target, BUF_SZ,
			FI_SEND | FI_RECV, 0, 0, 0, &rem_mr, &target);
	cr_assert_eq(ret, 0);

	ret = fi_mr_reg(dom, source, BUF_SZ,
			FI_SEND | FI_RECV, 0, 0, 0, &loc_mr, &source);
	cr_assert_eq(ret, 0);

	mr_key = fi_mr_key(rem_mr);
}
Пример #15
0
static inline void cntr_setup_mr(void)
{
	int ret;

	target = malloc(BUF_SZ);
	assert(target);

	source = malloc(BUF_SZ);
	assert(source);

	ret = fi_mr_reg(dom, target, BUF_SZ,
			FI_REMOTE_WRITE, 0, 0, 0, &rem_mr, &target);
	cr_assert_eq(ret, 0);

	ret = fi_mr_reg(dom, source, BUF_SZ,
			FI_REMOTE_WRITE, 0, 0, 0, &loc_mr, &source);
	cr_assert_eq(ret, 0);

	mr_key = fi_mr_key(rem_mr);
}
Пример #16
0
static int rxd_buf_region_alloc_fn(struct ofi_bufpool_region *region)
{
	struct rxd_domain *domain = region->pool->attr.context;
	struct fid_mr *mr;
	int ret;

	ret = fi_mr_reg(domain->dg_domain, region->mem_region,
			region->pool->region_size,
			FI_SEND | FI_RECV, 0, 0, 0, &mr, NULL);
	region->context = mr;
	return ret;
}
Пример #17
0
static int alloc_ep_res(struct fi_info *fi)
{
	struct fi_cq_attr cq_attr;
	int ret;

	buffer_size = test_size[TEST_CNT - 1].size;
	buf = malloc(buffer_size);
	if (!buf) {
		perror("malloc");
		return -1;
	}

	memset(&cq_attr, 0, sizeof cq_attr);
	cq_attr.format = FI_CQ_FORMAT_DATA;
	cq_attr.wait_obj = FI_WAIT_NONE;
	cq_attr.size = rx_depth;
	ret = fi_cq_open(dom, &cq_attr, &rcq, NULL);
	if (ret) {
		printf("fi_cq_open send comp %s\n", fi_strerror(-ret));
		goto err1;
	}

	cq_attr.format = FI_CQ_FORMAT_CONTEXT;
	ret = fi_cq_open(dom, &cq_attr, &scq, NULL);
	if (ret) {
		printf("fi_cq_open recv comp %s\n", fi_strerror(-ret));
		goto err2;
	}

	ret = fi_mr_reg(dom, buf, buffer_size, 0, 0, 0, 0, &mr, NULL);
	if (ret) {
		printf("fi_mr_reg %s\n", fi_strerror(-ret));
		goto err3;
	}

	if (!cmeq) {
		ret = alloc_cm_res();
		if (ret)
			goto err4;
	}

	return 0;

err4:
	fi_close(&mr->fid);
err3:
	fi_close(&rcq->fid);
err2:
	fi_close(&scq->fid);
err1:
	free(buf);
	return ret;
}
Пример #18
0
static void setup_mr(void)
{
	int i, ret;

	dest_iov = malloc(sizeof(struct iovec) * IOV_CNT);
	assert(dest_iov);

	target = malloc(BUF_SZ);
	assert(target);

	source = malloc(BUF_SZ);
	assert(source);

	src_iov = malloc(sizeof(struct iovec) * IOV_CNT);
	assert(src_iov);

	for (i = 0; i < IOV_CNT; i++) {
		src_iov[i].iov_base = malloc(BUF_SZ);
		assert(src_iov[i].iov_base != NULL);

		dest_iov[i].iov_base = malloc(BUF_SZ);
		assert(dest_iov[i].iov_base != NULL);
	}

	iov_src_buf = malloc(BUF_SZ * IOV_CNT);
	assert(iov_src_buf != NULL);

	iov_dest_buf = malloc(BUF_SZ * IOV_CNT);
	assert(iov_src_buf != NULL);

	ret = fi_mr_reg(dom, target, BUF_SZ,
			FI_SEND | FI_RECV, 0, 0, 0, &rem_mr, &target);
	cr_assert_eq(ret, 0);

	ret = fi_mr_reg(dom, source, BUF_SZ,
			FI_SEND | FI_RECV, 0, 0, 0, &loc_mr, &source);
	cr_assert_eq(ret, 0);

	mr_key = fi_mr_key(rem_mr);
}
Пример #19
0
static int rxm_mr_reg(struct fid *domain_fid, const void *buf, size_t len,
	   uint64_t access, uint64_t offset, uint64_t requested_key,
	   uint64_t flags, struct fid_mr **mr, void *context)
{
	struct rxm_domain *rxm_domain;
	struct rxm_mr *rxm_mr;
	int ret;

	rxm_domain = container_of(domain_fid, struct rxm_domain,
			util_domain.domain_fid.fid);

	if (!(rxm_mr = calloc(1, sizeof(*rxm_mr))))
		return -FI_ENOMEM;

	/* Additional flags to use RMA read for large message transfers */
	access |= FI_READ | FI_REMOTE_READ;

	if (rxm_domain->mr_local)
		access |= FI_WRITE;

	ret = fi_mr_reg(rxm_domain->msg_domain, buf, len, access, offset, requested_key,
			flags, &rxm_mr->msg_mr, context);
	if (ret) {
		FI_WARN(&rxm_prov, FI_LOG_DOMAIN, "Unable to register MSG MR\n");
		goto err;
	}

	rxm_mr->mr_fid.fid.fclass = FI_CLASS_MR;
	rxm_mr->mr_fid.fid.context = context;
	rxm_mr->mr_fid.fid.ops = &rxm_mr_ops;
	/* Store msg_mr as rxm_mr descriptor so that we can get its key when
	 * the app passes msg_mr as the descriptor in fi_send and friends.
	 * The key would be used in large message transfer protocol. */
	rxm_mr->mr_fid.mem_desc = rxm_mr->msg_mr;
	rxm_mr->mr_fid.key = fi_mr_key(rxm_mr->msg_mr);
	*mr = &rxm_mr->mr_fid;

	return 0;
err:
	free(rxm_mr);
	return ret;
}
Пример #20
0
/*
 * Tests:
 */
static int mr_reg()
{
	int i, j;
	int ret = 0;
	int testret = FAIL;
	struct fid_mr *mr;
	uint64_t access;
	uint64_t *access_combinations;
	int cnt;

	access = ft_info_to_mr_access(fi);
	ret = ft_alloc_bit_combo(0, access, &access_combinations, &cnt);
	if (ret) {
		FT_UNIT_STRERR(err_buf, "ft_alloc_bit_combo failed", ret);
		goto out;
	}

	for (i = 0; i < test_cnt; i++) {
		buf_size = test_size[i].size;
		for (j = 0; j < cnt; j++) {
			ret = fi_mr_reg(domain, buf, buf_size,
					access_combinations[j], 0,
					FT_MR_KEY, 0, &mr, NULL);
			if (ret) {
				FT_UNIT_STRERR(err_buf, "fi_mr_reg failed", ret);
				goto free;
			}

			ret = fi_close(&mr->fid);
			if (ret) {
				FT_UNIT_STRERR(err_buf, "fi_close failed", ret);
				goto free;
			}
		}
	}
	testret = PASS;
free:
	ft_free_bit_combo(access_combinations);
out:
	return TEST_RET_VAL(ret, testret);
}
Пример #21
0
/*
 * rpmemd_fip_init_memory -- initialize memory pool's resources
 */
static int
rpmemd_fip_init_memory(struct rpmemd_fip *fip)
{
	int ret;

	/*
	 * Register memory region with appropriate access bits:
	 * - FI_REMOTE_READ  - remote peer can issue READ operation,
	 * - FI_REMOTE_WRITE - remote peer can issue WRITE operation,
	 */
	ret = fi_mr_reg(fip->domain, fip->addr, fip->size,
			FI_REMOTE_READ | FI_REMOTE_WRITE, 0, 0, 0,
			&fip->mr, NULL);
	if (ret) {
		RPMEMD_FI_ERR(ret, "registering memory");
		goto err_mr_reg;
	}

	return 0;
err_mr_reg:
	return -1;
}
Пример #22
0
/*
 * rpmemd_fip_init_gpspm -- initialize GPSPM resources
 */
static int
rpmemd_fip_init_gpspm(struct rpmemd_fip *fip)
{
	int ret;

	/* allocate persist message buffer */
	size_t msg_size = fip->nlanes * sizeof(struct rpmem_msg_persist);
	fip->pmsg = malloc(msg_size);
	if (!fip->pmsg) {
		RPMEMD_LOG(ERR, "!allocating GPSPM messages buffer");
		goto err_msg_malloc;
	}

	/* register persist message buffer */
	ret = fi_mr_reg(fip->domain, fip->pmsg, msg_size, FI_RECV,
			0, 0, 0, &fip->pmsg_mr, NULL);
	if (ret) {
		RPMEMD_FI_ERR(ret, "registering GPSPM messages buffer");
		goto err_mr_reg_msg;
	}

	/* get persist message buffer's local descriptor */
	fip->pmsg_mr_desc = fi_mr_desc(fip->pmsg_mr);

	/* allocate persist response message buffer */
	size_t msg_resp_size = fip->nlanes *
		sizeof(struct rpmem_msg_persist_resp);
	fip->pres = malloc(msg_resp_size);
	if (!fip->pres) {
		RPMEMD_FI_ERR(ret, "allocating GPSPM messages response buffer");
		goto err_msg_resp_malloc;
	}

	/* register persist response message buffer */
	ret = fi_mr_reg(fip->domain, fip->pres, msg_resp_size, FI_SEND,
			0, 0, 0, &fip->pres_mr, NULL);
	if (ret) {
		RPMEMD_FI_ERR(ret, "registering GPSPM messages "
				"response buffer");
		goto err_mr_reg_msg_resp;
	}

	/* get persist message buffer's local descriptor */
	fip->pres_mr_desc = fi_mr_desc(fip->pres_mr);

	/* allocate lanes structures */
	fip->lanes = malloc(fip->nlanes * sizeof(*fip->lanes));
	if (!fip->lanes) {
		RPMEMD_LOG(ERR, "!allocating lanes");
		goto err_alloc_lanes;
	}

	/* initialize lanes */
	unsigned i;
	for (i = 0; i < fip->nlanes; i++) {
		struct rpmemd_fip_lane *lanep = &fip->lanes[i];

		/* initialize basic lane structure */
		ret = rpmem_fip_lane_init(&lanep->lane);
		if (ret) {
			RPMEMD_LOG(ERR, "!initializing lane");
			goto err_lane_init;
		}

		/* initialize RECV message */
		rpmem_fip_msg_init(&lanep->recv,
				fip->pmsg_mr_desc, 0,
				lanep,
				&fip->pmsg[i],
				sizeof(fip->pmsg[i]),
				FI_COMPLETION);

		/* initialize SEND message */
		rpmem_fip_msg_init(&lanep->send,
				fip->pres_mr_desc, 0,
				lanep,
				&fip->pres[i],
				sizeof(fip->pres[i]),
				FI_COMPLETION);
	}

	return 0;
err_lane_init:
	for (unsigned j = 0; j < i; j++)
		rpmem_fip_lane_fini(&fip->lanes[i].lane);
err_alloc_lanes:
	RPMEMD_FI_CLOSE(fip->pres_mr,
			"unregistering GPSPM messages response buffer");
err_mr_reg_msg_resp:
	free(fip->pres);
err_msg_resp_malloc:
	RPMEMD_FI_CLOSE(fip->pmsg_mr,
			"unregistering GPSPM messages buffer");
err_mr_reg_msg:
	free(fip->pmsg);
err_msg_malloc:
	return -1;
}
Пример #23
0
void cancel_setup(void)
{
	int ret = 0;
	struct fi_av_attr attr;
	size_t addrlen = 0;
	int rem_requested_key, loc_requested_key;

	hints = fi_allocinfo();
	cr_assert(hints, "fi_allocinfo");

	hints->domain_attr->mr_mode = GNIX_DEFAULT_MR_MODE;
	hints->domain_attr->cq_data_size = 4;
	hints->mode = mode_bits;

	hints->fabric_attr->prov_name = strdup("gni");

	ret = fi_getinfo(fi_version(), NULL, 0, 0, hints, &fi);
	cr_assert(!ret, "fi_getinfo");

	ret = fi_fabric(fi->fabric_attr, &fab, NULL);
	cr_assert(!ret, "fi_fabric");

	ret = fi_domain(fab, fi, &dom, NULL);
	cr_assert(!ret, "fi_domain");

	memset(&attr, 0, sizeof(attr));
	attr.type = FI_AV_MAP;
	attr.count = 16;

	ret = fi_av_open(dom, &attr, &av, NULL);
	cr_assert(!ret, "fi_av_open");

	ret = fi_endpoint(dom, fi, &ep[0], NULL);
	cr_assert(!ret, "fi_endpoint");

	cq_attr.format = FI_CQ_FORMAT_CONTEXT;
	cq_attr.size = 1024;
	cq_attr.wait_obj = 0;

	ret = fi_cq_open(dom, &cq_attr, &msg_cq[0], 0);
	cr_assert(!ret, "fi_cq_open");

	ret = fi_cq_open(dom, &cq_attr, &msg_cq[1], 0);
	cr_assert(!ret, "fi_cq_open");

	ret = fi_ep_bind(ep[0], &msg_cq[0]->fid, FI_SEND | FI_RECV);
	cr_assert(!ret, "fi_ep_bind");

	ret = fi_getname(&ep[0]->fid, NULL, &addrlen);
	cr_assert(addrlen > 0);

	ep_name[0] = malloc(addrlen);
	cr_assert(ep_name[0] != NULL);

	ret = fi_getname(&ep[0]->fid, ep_name[0], &addrlen);
	cr_assert(ret == FI_SUCCESS);

	ret = fi_endpoint(dom, fi, &ep[1], NULL);
	cr_assert(!ret, "fi_endpoint");

	ret = fi_ep_bind(ep[1], &msg_cq[1]->fid, FI_SEND | FI_RECV);
	cr_assert(!ret, "fi_ep_bind");

	ep_name[1] = malloc(addrlen);
	cr_assert(ep_name[1] != NULL);

	ret = fi_getname(&ep[1]->fid, ep_name[1], &addrlen);
	cr_assert(ret == FI_SUCCESS);

	ret = fi_av_insert(av, ep_name[0], 1, &gni_addr[0], 0,
				NULL);
	cr_assert(ret == 1);

	ret = fi_av_insert(av, ep_name[1], 1, &gni_addr[1], 0,
				NULL);
	cr_assert(ret == 1);

	ret = fi_ep_bind(ep[0], &av->fid, 0);
	cr_assert(!ret, "fi_ep_bind");

	ret = fi_ep_bind(ep[1], &av->fid, 0);
	cr_assert(!ret, "fi_ep_bind");

	ret = fi_enable(ep[0]);
	cr_assert(!ret, "fi_ep_enable");

	ret = fi_enable(ep[1]);
	cr_assert(!ret, "fi_ep_enable");

	target_base = malloc(GNIT_ALIGN_LEN(BUF_SZ));
	assert(target_base);
	target = GNIT_ALIGN_BUFFER(char *, target_base);

	source_base = malloc(GNIT_ALIGN_LEN(BUF_SZ));
	assert(source_base);
	source = GNIT_ALIGN_BUFFER(char *, source_base);

	rem_requested_key = USING_SCALABLE(fi) ? 1 : 0;
	loc_requested_key = USING_SCALABLE(fi) ? 2 : 0;

	ret = fi_mr_reg(dom,
			  target,
			  BUF_SZ,
			  FI_REMOTE_WRITE,
			  0,
			  rem_requested_key,
			  0,
			  &rem_mr,
			  &target);
	cr_assert_eq(ret, 0);

	ret = fi_mr_reg(dom,
			  source,
			  BUF_SZ,
			  FI_REMOTE_WRITE,
			  0,
			  loc_requested_key,
			  0,
			  &loc_mr,
			  &source);
	cr_assert_eq(ret, 0);

	if (USING_SCALABLE(fi)) {
		MR_ENABLE(rem_mr, target, BUF_SZ);
		MR_ENABLE(loc_mr, source, BUF_SZ);
	}

	mr_key = fi_mr_key(rem_mr);
}
Пример #24
0
static int alloc_ep_res(struct fi_info *fi)
{
	struct fi_cq_attr cq_attr;
	struct fi_av_attr av_attr;
	int ret;

	buffer_size = opts.user_options & FT_OPT_SIZE ?
			opts.transfer_size : test_size[TEST_CNT - 1].size;
	buf = malloc(MAX(buffer_size, sizeof(uint64_t)));
	if (!buf) {
		perror("malloc");
		return -1;
	}

	result = malloc(MAX(buffer_size, sizeof(uint64_t)));
	if (!result) {
		perror("malloc");
		return -1;
	}
	
	compare = malloc(MAX(buffer_size, sizeof(uint64_t)));
	if (!compare) {
		perror("malloc");
		return -1;
	}
	
	memset(&cq_attr, 0, sizeof cq_attr);
	cq_attr.format = FI_CQ_FORMAT_CONTEXT;
	cq_attr.wait_obj = FI_WAIT_NONE;
	cq_attr.size = 128;
	ret = fi_cq_open(dom, &cq_attr, &scq, NULL);
	if (ret) {
		FT_PRINTERR("fi_cq_open", ret);
		goto err1;
	}

	ret = fi_cq_open(dom, &cq_attr, &rcq, NULL);
	if (ret) {
		FT_PRINTERR("fi_cq_open", ret);
		goto err2;
	}
	
	// registers local data buffer buff that specifies 
	// the first operand of the atomic operation
	ret = fi_mr_reg(dom, buf, MAX(buffer_size, sizeof(uint64_t)), 
		FI_REMOTE_READ | FI_REMOTE_WRITE, 0,
		get_mr_key(), 0, &mr, NULL);
	if (ret) {
		FT_PRINTERR("fi_mr_reg", ret);
		goto err3;
	}

	// registers local data buffer that stores initial value of 
	// the remote buffer
	ret = fi_mr_reg(dom, result, MAX(buffer_size, sizeof(uint64_t)), 
		FI_REMOTE_READ | FI_REMOTE_WRITE, 0,
		get_mr_key(), 0, &mr_result, NULL);
	if (ret) {
		FT_PRINTERR("fi_mr_reg", -ret);
		goto err4;
	}
	
	// registers local data buffer that contains comparison data
	ret = fi_mr_reg(dom, compare, MAX(buffer_size, sizeof(uint64_t)), 
		FI_REMOTE_READ | FI_REMOTE_WRITE, 0,
		get_mr_key(), 0, &mr_compare, NULL);
	if (ret) {
		FT_PRINTERR("fi_mr_reg", ret);
		goto err5;
	}

	memset(&av_attr, 0, sizeof av_attr);
	av_attr.type = fi->domain_attr->av_type ?
			fi->domain_attr->av_type : FI_AV_MAP;
	av_attr.count = 1;
	av_attr.name = NULL;

	ret = fi_av_open(dom, &av_attr, &av, NULL);
	if (ret) {
		FT_PRINTERR("fi_av_open", ret);
		goto err6;
	}
	
	ret = fi_endpoint(dom, fi, &ep, NULL);
	if (ret) {
		FT_PRINTERR("fi_endpoint", ret);
		goto err7;
	}

	return 0;

err7:
	fi_close(&av->fid);
err6:
	fi_close(&mr_compare->fid);
err5:
	fi_close(&mr_result->fid);
err4:
	fi_close(&mr->fid);
err3:
	fi_close(&rcq->fid);
err2:
	fi_close(&scq->fid);
err1:
	free(buf);
	free(result);
	free(compare);
	
	return ret;
}
Пример #25
0
/*
 * rpmem_fip_init_lanes_gpspm -- (internal) initialize lanes for GPSPM
 */
static int
rpmem_fip_init_lanes_gpspm(struct rpmem_fip *fip)
{
	int ret = 0;

	/* allocate GPSPM lanes */
	fip->lanes.gpspm = calloc(1, fip->nlanes * sizeof(*fip->lanes.gpspm));
	if (!fip->lanes.gpspm) {
		RPMEM_LOG(ERR, "allocating GPSPM lanes");
		goto err_malloc_lanes;
	}

	/* allocate persist messages buffer */
	size_t msg_size = fip->nlanes * sizeof(struct rpmem_msg_persist);
	fip->pmsg = malloc(msg_size);
	if (!fip->pmsg) {
		RPMEM_LOG(ERR, "!allocating messages buffer");
		ret = -1;
		goto err_malloc_pmsg;
	}

	/*
	 * Register persist messages buffer. The persist messages
	 * are sent to daemon thus the FI_SEND access flag.
	 */
	ret = fi_mr_reg(fip->domain, fip->pmsg, msg_size, FI_SEND,
			0, 0, 0, &fip->pmsg_mr, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "registering messages buffer");
		goto err_fi_mr_reg_pmsg;
	}

	/* get persist messages buffer local descriptor */
	fip->pmsg_mr_desc = fi_mr_desc(fip->pmsg_mr);

	/* allocate persist response messages buffer */
	size_t msg_resp_size = fip->nlanes *
				sizeof(struct rpmem_msg_persist_resp);
	fip->pres = malloc(msg_resp_size);
	if (!fip->pres) {
		RPMEM_LOG(ERR, "!allocating messages response buffer");
		ret = -1;
		goto err_malloc_pres;
	}

	/*
	 * Register persist messages response buffer. The persist response
	 * messages are received from daemon thus the FI_RECV access flag.
	 */
	ret = fi_mr_reg(fip->domain, fip->pres, msg_resp_size, FI_RECV,
			0, 0, 0, &fip->pres_mr, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "registering messages response buffer");
		goto err_fi_mr_reg_pres;
	}

	/* get persist response messages buffer local descriptor */
	fip->pres_mr_desc = fi_mr_desc(fip->pres_mr);

	/* allocate RECV structures for fi_recvmsg(3) */
	fip->recv = malloc(fip->nlanes * sizeof(*fip->recv));
	if (!fip->recv) {
		RPMEM_LOG(ERR, "!allocating response message iov buffer");
		goto err_malloc_recv;
	}

	/*
	 * Initialize all required structures for:
	 * WRITE, SEND and RECV operations.
	 *
	 * If the completion is required the FI_COMPLETION flag and
	 * appropriate context should be used.
	 *
	 * In GPSPM only the RECV and SEND completions are required.
	 *
	 * For RECV the context is RECV operation structure used for
	 * fi_recvmsg(3) function call.
	 *
	 * For SEND the context is lane structure.
	 *
	 * The received buffer contains a lane id which is used
	 * to obtain a lane which must be signaled that operation
	 * has been completed.
	 */
	unsigned i;
	for (i = 0; i < fip->nlanes; i++) {
		ret = rpmem_fip_lane_init(&fip->lanes.gpspm[i].lane);
		if (ret)
			goto err_lane_init;

		/* WRITE */
		rpmem_fip_rma_init(&fip->lanes.gpspm[i].write,
				fip->mr_desc, 0,
				fip->rkey,
				&fip->lanes.gpspm[i],
				0);

		/* SEND */
		rpmem_fip_msg_init(&fip->lanes.gpspm[i].send,
				fip->pmsg_mr_desc, 0,
				&fip->lanes.gpspm[i],
				&fip->pmsg[i],
				sizeof(fip->pmsg[i]),
				FI_COMPLETION);

		/* RECV */
		rpmem_fip_msg_init(&fip->recv[i],
				fip->pres_mr_desc, 0,
				&fip->recv[i],
				&fip->pres[i],
				sizeof(fip->pres[i]),
				FI_COMPLETION);
	}

	return 0;
err_lane_init:
	for (unsigned j = 0; j < i; j++)
		rpmem_fip_lane_fini(&fip->lanes.gpspm[i].lane);
err_malloc_recv:
	RPMEM_FI_CLOSE(fip->pres_mr, "unregistering messages "
			"response buffer");
err_fi_mr_reg_pres:
	free(fip->pres);
err_malloc_pres:
	RPMEM_FI_CLOSE(fip->pmsg_mr, "unregistering messages buffer");
err_fi_mr_reg_pmsg:
	free(fip->pmsg);
err_malloc_pmsg:
	free(fip->lanes.gpspm);
err_malloc_lanes:
	return ret;
}
Пример #26
0
/*
 * rpmem_fip_init_lanes_apm -- (internal) initialize lanes for APM
 */
static int
rpmem_fip_init_lanes_apm(struct rpmem_fip *fip)
{
	int ret;

	/* allocate APM lanes */
	fip->lanes.apm = calloc(1, fip->nlanes * sizeof(*fip->lanes.apm));
	if (!fip->lanes.apm) {
		RPMEM_LOG(ERR, "!allocating APM lanes");
		goto err_malloc_lanes;
	}

	/* register read-after-write buffer */
	ret = fi_mr_reg(fip->domain, &fip->raw_buff, sizeof(fip->raw_buff),
			FI_REMOTE_WRITE, 0, 0, 0, &fip->raw_mr, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "registering APM read buffer");
		goto err_fi_raw_mr;
	}

	/* get read-after-write buffer local descriptor */
	fip->raw_mr_desc = fi_mr_desc(fip->raw_mr);

	/*
	 * Initialize all required structures for:
	 * WRITE and READ operations.
	 *
	 * If the completion is required the FI_COMPLETION flag and
	 * appropriate context should be used.
	 *
	 * In APM only the READ completion is required.
	 * The context is a lane structure.
	 */
	unsigned i;
	for (i = 0; i < fip->nlanes; i++) {
		ret = rpmem_fip_lane_init(&fip->lanes.apm[i].lane);
		if (ret)
			goto err_lane_init;

		/* WRITE */
		rpmem_fip_rma_init(&fip->lanes.apm[i].write,
				fip->mr_desc, 0,
				fip->rkey,
				&fip->lanes.apm[i],
				0);

		/* READ */
		rpmem_fip_rma_init(&fip->lanes.apm[i].read,
				fip->raw_mr_desc, 0,
				fip->rkey,
				&fip->lanes.apm[i],
				FI_COMPLETION);
	}

	return 0;
err_lane_init:
	for (unsigned j = 0; j < i; j++)
		rpmem_fip_lane_fini(&fip->lanes.apm[i].lane);
err_fi_raw_mr:
	free(fip->lanes.apm);
err_malloc_lanes:
	return -1;
}
Пример #27
0
static int alloc_ep_res(struct fi_info *fi)
{
	struct fi_cq_attr cq_attr;
	struct fi_av_attr av_attr;
	int ret;

	buffer_size = opts.user_options & FT_OPT_SIZE ?
			opts.transfer_size : test_size[TEST_CNT - 1].size;
	if (max_msg_size > 0 && buffer_size > max_msg_size) {
		buffer_size = max_msg_size;
	}
	if (buffer_size < fi->src_addrlen) {
		buffer_size = fi->src_addrlen;
	}
	buffer_size += prefix_len;
	buf = malloc(buffer_size);
	if (!buf) {
		perror("malloc");
		return -1;
	}
	buf_ptr = (char *)buf + prefix_len;

	memset(&cq_attr, 0, sizeof cq_attr);
	cq_attr.format = FI_CQ_FORMAT_CONTEXT;
	cq_attr.wait_obj = FI_WAIT_NONE;
	cq_attr.size = max_credits << 1;
	ret = fi_cq_open(dom, &cq_attr, &scq, NULL);
	if (ret) {
		FT_PRINTERR("fi_cq_open", ret);
		goto err1;
	}

	ret = fi_cq_open(dom, &cq_attr, &rcq, NULL);
	if (ret) {
		FT_PRINTERR("fi_cq_open", ret);
		goto err2;
	}

	ret = fi_mr_reg(dom, buf, buffer_size, 0, 0, 0, 0, &mr, NULL);
	if (ret) {
		FT_PRINTERR("fi_mr_reg", ret);
		goto err3;
	}

	memset(&av_attr, 0, sizeof(av_attr));
	av_attr.type = fi->domain_attr->av_type ?
			fi->domain_attr->av_type : FI_AV_MAP;
	av_attr.name = NULL;
	av_attr.flags = 0;
	ret = fi_av_open(dom, &av_attr, &av, NULL);
	if (ret) {
		FT_PRINTERR("fi_av_open", ret);
		goto err4;
	}

	ret = fi_endpoint(dom, fi, &ep, NULL);
	if (ret) {
		FT_PRINTERR("fi_endpoint", ret);
		goto err5;
	}

	return 0;

err5:
	fi_close(&av->fid);
err4:
	fi_close(&mr->fid);
err3:
	fi_close(&rcq->fid);
err2:
	fi_close(&scq->fid);
err1:
	free(buf);
	return ret;
}
Пример #28
0
static int alloc_ep_res(struct fi_info *fi)
{
	struct fi_cntr_attr cntr_attr;
	struct fi_av_attr av_attr;
	uint64_t flags = 0;
	int ret;

	buffer_size = MAX(sizeof(char *) * strlen(welcome_text), 
			sizeof(uint64_t));
	buf = malloc(buffer_size);
	if (!buf) {
		perror("malloc");
		return -1;
	}

	memset(&cntr_attr, 0, sizeof cntr_attr);
	cntr_attr.events = FI_CNTR_EVENTS_COMP;

	ret = fi_cntr_open(dom, &cntr_attr, &scntr, NULL);
	if (ret) {
		FT_PRINTERR("fi_cntr_open", ret);
		goto err1;
	}

	ret = fi_cntr_open(dom, &cntr_attr, &rcntr, NULL);
	if (ret) {
		FT_PRINTERR("fi_cntr_open", ret);
		goto err2;
	}
	
	/* Set FI_MR_KEY to associate the memory region with the specified key
	 * Set FI_MR_OFFSET to use specified offset as the base address */
	flags = FI_MR_KEY | FI_MR_OFFSET;
	ret = fi_mr_reg(dom, buf, buffer_size, FI_REMOTE_WRITE, 0, 
			user_defined_key, flags, &mr, NULL);
	if (ret) {
		FT_PRINTERR("fi_mr_reg", ret);
		goto err3;
	}

	memset(&av_attr, 0, sizeof av_attr);
	av_attr.type = fi->domain_attr->av_type ?
			fi->domain_attr->av_type : FI_AV_MAP;
	av_attr.count = 1;
	av_attr.name = NULL;

	ret = fi_av_open(dom, &av_attr, &av, NULL);
	if (ret) {
		FT_PRINTERR("fi_av_open", ret);
		goto err4;
	}

	ret = fi_endpoint(dom, fi, &ep, NULL);
	if (ret) {
		FT_PRINTERR("fi_endpoint", ret);
		goto err5;
	}

	return 0;

err5:
	fi_close(&av->fid);
err4:
	fi_close(&mr->fid);
err3:
	fi_close(&rcntr->fid);
err2:
	fi_close(&scntr->fid);
err1:
	free(buf);
	return ret;
}
Пример #29
0
int do_test(void)
{
	struct fi_cq_msg_entry	comp;
	int			len = msg_len * post_depth;
	int			msg_cnt = num_msgs;
	int			tx_bufs_sent = 0;
	int			ret;
	char			*mp;
	u64			time_elap;
#if SREAD == 0
	int			eagain_cnt = EAGAIN_TRIES;
#endif

	print_trace("in\n");

	if (!ctx.buf) {
		ctx.buf = kmalloc(len, GFP_KERNEL);
		if (!ctx.buf) {
			print_err("kalloc failed!\n");
			return -ENOMEM;
		}

		ret = fi_mr_reg(ctx.domain, ctx.buf, len, 0, 0, 0, 0,
				&ctx.mr, NULL);
		if (ret) {
			print_err("fi_mr_reg returned %d\n", ret);
			kfree(ctx.buf);
			ctx.buf = ERR_PTR(-EFAULT);
			return ret;
		}
	} else if (IS_ERR(ctx.buf))
		return 0;

	print_msg("post_depth %d num_msgs %d msg_len %d SREAD[%d]\n",
		post_depth, num_msgs, msg_len, SREAD);

	print_dbg("ctx.buf %p '%s' len %ld msg_len %d\n",
		ctx.buf, ctx.buf, strlen(ctx.buf)+1, msg_len);

	time_elap = get_jiffies_64();

	for (mp = ctx.buf; msg_cnt > 0 && !kthread_should_stop(); ) {
		int post_cnt, cnt;

		post_cnt = (msg_cnt > post_depth ? post_depth : msg_cnt);

		for (cnt = 0, mp = ctx.buf; cnt < post_cnt;
			cnt++, mp += msg_len) {

			if (verify) {
				sprintf(mp, TEST_MESSAGE, tx_bufs_sent);
				tx_bufs_sent++;
			}

			ret = fi_send(ctx.ep, mp, msg_len, fi_mr_desc(ctx.mr),
					0, mp);
			if (ret) {
				print_err("fi_send returned %d '%s'\n",
					ret, fi_strerror(ret));
				return ret;
			}
			if (kthread_should_stop())
				return -EINTR;
		}

		/* reap completions */
		for (cnt = 0; cnt < post_cnt; cnt++) {
#if SREAD
			ret = fi_cq_sread(ctx.scq, &comp, 1, 0, TIMEOUT);
			if (ret == -ETIMEDOUT) {
				print_msg("%s(ETIMEDOUT) cnt %d post_cnt %d "
					"msg_cnt %d\n", "fi_cq_sread", cnt,
					post_cnt, msg_cnt);
			}
			if (kthread_should_stop())
				return -EINTR;
#else
			do {
				ret = fi_cq_read(ctx.scq, &comp, 1);
				if (ret == 0 || ret == -EAGAIN) {
					if (--eagain_cnt <= 0) {
						dprint(DEBUG_HIGH,
							"%s(resched %d) cnt "
							"%d post_cnt %d\n",
							"fi_cq_read", ret, cnt,
							post_cnt);
						eagain_cnt = EAGAIN_TRIES;
						schedule();
					}
				}
				if (kthread_should_stop())
					return -EINTR;
			} while (ret == 0 || ret == -EAGAIN);

#endif
			if (ret < 0) {
				struct fi_cq_err_entry cqe = { 0 };
				int rc;

				rc = fi_cq_readerr(ctx.scq, &cqe, 0);
				print_err("fi_cq_read returned %d '%s'\n",
					ret, fi_strerror(ret));
				if (rc) {
					char buf[64];

					print_err("fi_cq_readerr() err '%s'(%d)"
						"\n", fi_strerror(cqe.err),
						cqe.err);
					print_err("fi_cq_readerr() prov_err "
						"'%s'(%d)\n",
						fi_cq_strerror(ctx.scq,
							cqe.prov_errno,
							cqe.err_data, buf,
							sizeof(buf)),
						cqe.prov_errno);
				}
				return ret;
			}
			if (!ret)
				print_err("fi_cq_sread no completion? ret %d\n",
					ret);
#if 0
			if ((char *)comp.op_context < (char *)ctx.buf ||
				(char *)comp.op_context >= (char *)
						&ctx.buf[msg_len*post_depth]) {

				print_err("cq.op_context(%p) not in range "
					"[ctx.buf(%p) ... &ctx.buf[%d](%p)]\n",
						(void *)comp.op_context,
						(void *)ctx.buf,
						msg_len,
						(void *)&ctx.buf[msg_len]);
			}
#endif
			if (verify)
				print_msg("Tx '%s'\n",
					(char *) comp.op_context);
		}
		msg_cnt -= post_cnt;
	}
	time_elap = get_jiffies_64() - time_elap;

#define AGIG (1024UL*1024UL*1024UL)
#define AMEG (1024UL*1024UL)
#define AKILO (1024UL)
	{
		struct timeval	tv;
		ulong		rate, rate_mod, bytes, units_of;
		char		units;

		jiffies_to_timeval(time_elap, &tv);

		bytes = (ulong) num_msgs * (ulong) msg_len;

		if (bytes >= AKILO && tv.tv_sec > 0) {
			rate = bytes / tv.tv_sec;
			rate_mod = bytes % tv.tv_sec;
			if (rate >= AGIG) {
				units = 'G';
				units_of = AGIG;
			} else if (rate >= AMEG) {
				units = 'M';
				units_of = AMEG;
			} else {
				units = 'K';
				units_of = AKILO;
			}
			rate /=  units_of;
		} else {
			rate = rate_mod = 0UL;
			units = ' ';
			units_of = 1UL;
		}

		print_info("Tx %d msgs (%lu.%lu%cB) @ ~%lu.%lu %cB/sec (%ld sec %ld "
			"usec)\n",
				num_msgs, (bytes/units_of), (bytes % units_of),
				units, rate, rate_mod, units,
				tv.tv_sec, tv.tv_usec);
	}

	return 0;
}
Пример #30
0
void rdm_api_setup_ep(void)
{
	int ret, i, j;
	struct fi_av_attr attr;
	size_t addrlen = 0;

	/* Get info about fabric services with the provided hints */
	for (i = 0; i < NUMEPS; i++) {
		ret = fi_getinfo(FI_VERSION(1, 0), NULL, 0, 0, hints[i],
				 &fi[i]);
		cr_assert(!ret, "fi_getinfo");
	}

	attr.type = FI_AV_MAP;
	attr.count = NUMEPS;

	cq_attr.format = FI_CQ_FORMAT_TAGGED;
	cq_attr.size = 1024;
	cq_attr.wait_obj = 0;

	target = malloc(BUF_SZ * 3); /* 3x BUF_SZ for multi recv testing */
	assert(target);

	source = malloc(BUF_SZ);
	assert(source);

	uc_target = malloc(BUF_SZ);
	assert(uc_target);

	uc_source = malloc(BUF_SZ);
	assert(uc_source);

	ret = fi_fabric(fi[0]->fabric_attr, &fab, NULL);
	cr_assert(!ret, "fi_fabric");

	for (i = 0; i < NUMEPS; i++) {
		ret = fi_domain(fab, fi[i], dom + i, NULL);
		cr_assert(!ret, "fi_domain");

		ret = fi_open_ops(&dom[i]->fid, FI_GNI_DOMAIN_OPS_1,
				  0, (void **) (gni_domain_ops + i), NULL);

		ret = fi_av_open(dom[i], &attr, av + i, NULL);
		cr_assert(!ret, "fi_av_open");

		ret = fi_endpoint(dom[i], fi[i], ep + i, NULL);
		cr_assert(!ret, "fi_endpoint");

		ret = fi_cq_open(dom[i], &cq_attr, msg_cq + i, 0);
		cr_assert(!ret, "fi_cq_open");

		ret = fi_ep_bind(ep[i], &msg_cq[i]->fid, FI_SEND | FI_RECV);
		cr_assert(!ret, "fi_ep_bind");

		ret = fi_getname(&ep[i]->fid, NULL, &addrlen);
		cr_assert(addrlen > 0);

		ep_name[i] = malloc(addrlen);
		cr_assert(ep_name[i] != NULL);

		ret = fi_getname(&ep[i]->fid, ep_name[i], &addrlen);
		cr_assert(ret == FI_SUCCESS);
	}

	for (i = 0; i < NUMEPS; i++) {
		/* Insert all gni addresses into each av */
		for (j = 0; j < NUMEPS; j++) {
			ret = fi_av_insert(av[i], ep_name[j], 1, &gni_addr[j],
					   0, NULL);
			cr_assert(ret == 1);
		}

		ret = fi_ep_bind(ep[i], &av[i]->fid, 0);
		cr_assert(!ret, "fi_ep_bind");

		ret = fi_enable(ep[i]);
		cr_assert(!ret, "fi_ep_enable");

		ret = fi_cntr_open(dom[i], &cntr_attr, send_cntr + i, 0);
		cr_assert(!ret, "fi_cntr_open");

		ret = fi_ep_bind(ep[i], &send_cntr[i]->fid, FI_SEND);
		cr_assert(!ret, "fi_ep_bind");

		ret = fi_cntr_open(dom[i], &cntr_attr, recv_cntr + i, 0);
		cr_assert(!ret, "fi_cntr_open");

		ret = fi_ep_bind(ep[i], &recv_cntr[i]->fid, FI_RECV);
		cr_assert(!ret, "fi_ep_bind");
	}

	for (i = 0; i < NUMEPS; i++) {
		ret = fi_mr_reg(dom[i], target, 3 * BUF_SZ,
				FI_REMOTE_WRITE, 0, 0, 0, rem_mr + i, &target);
		cr_assert_eq(ret, 0);

		ret = fi_mr_reg(dom[i], source, BUF_SZ,
				FI_REMOTE_WRITE, 0, 0, 0, loc_mr + i, &source);
		cr_assert_eq(ret, 0);

		mr_key[i] = fi_mr_key(rem_mr[i]);
	}
}