Example #1
0
/*
 * rpmem_fip_fini_memory -- (internal) deinitialize common memory resources
 */
static void
rpmem_fip_fini_memory(struct rpmem_fip *fip)
{
	RPMEM_FI_CLOSE(fip->rd_mr, "unregistering read buffer");
	free(fip->rd_buff);
	RPMEM_FI_CLOSE(fip->mr, "unregistering memory");
}
Example #2
0
/*
 * rpmem_fip_fini_lanes_gpspm -- (internal) deinitialize lanes for GPSPM
 */
static void
rpmem_fip_fini_lanes_gpspm(struct rpmem_fip *fip)
{
	RPMEM_FI_CLOSE(fip->pmsg_mr, "unregistering messages buffer");
	RPMEM_FI_CLOSE(fip->pres_mr, "unregistering messages "
			"response buffer");
	free(fip->pmsg);
	free(fip->pres);
	free(fip->recv);
	free(fip->lanes.gpspm);
}
Example #3
0
/*
 * rpmem_fip_init_fabric_res -- (internal) initialize common fabric resources
 */
static int
rpmem_fip_init_fabric_res(struct rpmem_fip *fip)
{
	int ret;
	ret = fi_fabric(fip->fi->fabric_attr, &fip->fabric, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "opening fabric domain");
		goto err_fi_fabric;
	}

	ret = fi_domain(fip->fabric, fip->fi, &fip->domain, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "opening fabric access domain");
		goto err_fi_domain;
	}

	struct fi_eq_attr eq_attr = {
		.size = 0, /* use default value */
		.flags = 0,
		.wait_obj = FI_WAIT_UNSPEC,
		.signaling_vector = 0,
		.wait_set = NULL,
	};

	ret = fi_eq_open(fip->fabric, &eq_attr, &fip->eq, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "opening event queue");
		goto err_eq_open;
	}

	return 0;
err_eq_open:
	RPMEM_FI_CLOSE(fip->domain, "closing fabric access domain");
err_fi_domain:
	RPMEM_FI_CLOSE(fip->fabric, "closing fabric domain");
err_fi_fabric:
	return ret;
}

/*
 * rpmem_fip_fini_fabric_res -- (internal) deinitialize common fabric resources
 */
static void
rpmem_fip_fini_fabric_res(struct rpmem_fip *fip)
{
	RPMEM_FI_CLOSE(fip->eq, "closing event queue");
	RPMEM_FI_CLOSE(fip->domain, "closing fabric access domain");
	RPMEM_FI_CLOSE(fip->fabric, "closing fabric domain");
}
Example #4
0
/*
 * rpmem_fip_fini_lanes_apm -- (internal) deinitialize lanes for APM
 */
static void
rpmem_fip_fini_lanes_apm(struct rpmem_fip *fip)
{
	RPMEM_FI_CLOSE(fip->raw_mr, "unregistering APM read buffer");
	free(fip->raw_buff);
	free(fip->lanes.apm);
}
Example #5
0
/*
 * rpmem_fip_init_memory -- (internal) initialize common memory resources
 */
static int
rpmem_fip_init_memory(struct rpmem_fip *fip)
{
	ASSERTne(Pagesize, 0);
	int ret;

	/*
	 * Register local memory space. The local memory will be used
	 * with WRITE operation in rpmem_fip_persist function thus
	 * the FI_WRITE access flag.
	 */
	ret = fi_mr_reg(fip->domain, fip->laddr, fip->size,
			FI_WRITE, 0, 0, 0, &fip->mr, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "registrating memory");
		return ret;
	}

	/* get local memory descriptor */
	fip->mr_desc = fi_mr_desc(fip->mr);

	/* allocate buffer for read operation */
	ASSERT(IS_PAGE_ALIGNED(RPMEM_RD_BUFF_SIZE));
	errno = posix_memalign((void **)&fip->rd_buff, Pagesize,
			RPMEM_RD_BUFF_SIZE);
	if (errno) {
		RPMEM_LOG(ERR, "!allocating read buffer");
		ret = -1;
		goto err_malloc_rd_buff;
	}

	/*
	 * Register buffer for read operation.
	 * The read operation utilizes READ operation thus
	 * the FI_REMOTE_WRITE flag.
	 */
	ret = fi_mr_reg(fip->domain, fip->rd_buff,
			RPMEM_RD_BUFF_SIZE, FI_REMOTE_WRITE,
			0, 0, 0, &fip->rd_mr, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "registrating read buffer");
		goto err_rd_mr;
	}

	/* get read buffer local memory descriptor */
	fip->rd_mr_desc = fi_mr_desc(fip->rd_mr);

	return 0;
err_rd_mr:
	free(fip->rd_buff);
err_malloc_rd_buff:
	RPMEM_FI_CLOSE(fip->mr, "unregistering memory");
	return ret;
}
Example #6
0
/*
 * rpmem_fip_init_lanes_gpspm -- (internal) initialize lanes for GPSPM
 */
static int
rpmem_fip_init_lanes_gpspm(struct rpmem_fip *fip)
{
	int ret = 0;

	/* allocate GPSPM lanes */
	fip->lanes.gpspm = calloc(1, fip->nlanes * sizeof(*fip->lanes.gpspm));
	if (!fip->lanes.gpspm) {
		RPMEM_LOG(ERR, "allocating GPSPM lanes");
		goto err_malloc_lanes;
	}

	/* allocate persist messages buffer */
	size_t msg_size = fip->nlanes * sizeof(struct rpmem_msg_persist);
	fip->pmsg = malloc(msg_size);
	if (!fip->pmsg) {
		RPMEM_LOG(ERR, "!allocating messages buffer");
		ret = -1;
		goto err_malloc_pmsg;
	}

	/*
	 * Register persist messages buffer. The persist messages
	 * are sent to daemon thus the FI_SEND access flag.
	 */
	ret = fi_mr_reg(fip->domain, fip->pmsg, msg_size, FI_SEND,
			0, 0, 0, &fip->pmsg_mr, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "registering messages buffer");
		goto err_fi_mr_reg_pmsg;
	}

	/* get persist messages buffer local descriptor */
	fip->pmsg_mr_desc = fi_mr_desc(fip->pmsg_mr);

	/* allocate persist response messages buffer */
	size_t msg_resp_size = fip->nlanes *
				sizeof(struct rpmem_msg_persist_resp);
	fip->pres = malloc(msg_resp_size);
	if (!fip->pres) {
		RPMEM_LOG(ERR, "!allocating messages response buffer");
		ret = -1;
		goto err_malloc_pres;
	}

	/*
	 * Register persist messages response buffer. The persist response
	 * messages are received from daemon thus the FI_RECV access flag.
	 */
	ret = fi_mr_reg(fip->domain, fip->pres, msg_resp_size, FI_RECV,
			0, 0, 0, &fip->pres_mr, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "registering messages response buffer");
		goto err_fi_mr_reg_pres;
	}

	/* get persist response messages buffer local descriptor */
	fip->pres_mr_desc = fi_mr_desc(fip->pres_mr);

	/* allocate RECV structures for fi_recvmsg(3) */
	fip->recv = malloc(fip->nlanes * sizeof(*fip->recv));
	if (!fip->recv) {
		RPMEM_LOG(ERR, "!allocating response message iov buffer");
		goto err_malloc_recv;
	}

	/*
	 * Initialize all required structures for:
	 * WRITE, SEND and RECV operations.
	 *
	 * If the completion is required the FI_COMPLETION flag and
	 * appropriate context should be used.
	 *
	 * In GPSPM only the RECV and SEND completions are required.
	 *
	 * For RECV the context is RECV operation structure used for
	 * fi_recvmsg(3) function call.
	 *
	 * For SEND the context is lane structure.
	 *
	 * The received buffer contains a lane id which is used
	 * to obtain a lane which must be signaled that operation
	 * has been completed.
	 */
	unsigned i;
	for (i = 0; i < fip->nlanes; i++) {
		ret = rpmem_fip_lane_init(&fip->lanes.gpspm[i].lane);
		if (ret)
			goto err_lane_init;

		/* WRITE */
		rpmem_fip_rma_init(&fip->lanes.gpspm[i].write,
				fip->mr_desc, 0,
				fip->rkey,
				&fip->lanes.gpspm[i],
				0);

		/* SEND */
		rpmem_fip_msg_init(&fip->lanes.gpspm[i].send,
				fip->pmsg_mr_desc, 0,
				&fip->lanes.gpspm[i],
				&fip->pmsg[i],
				sizeof(fip->pmsg[i]),
				FI_COMPLETION);

		/* RECV */
		rpmem_fip_msg_init(&fip->recv[i],
				fip->pres_mr_desc, 0,
				&fip->recv[i],
				&fip->pres[i],
				sizeof(fip->pres[i]),
				FI_COMPLETION);
	}

	return 0;
err_lane_init:
	for (unsigned j = 0; j < i; j++)
		rpmem_fip_lane_fini(&fip->lanes.gpspm[i].lane);
err_malloc_recv:
	RPMEM_FI_CLOSE(fip->pres_mr, "unregistering messages "
			"response buffer");
err_fi_mr_reg_pres:
	free(fip->pres);
err_malloc_pres:
	RPMEM_FI_CLOSE(fip->pmsg_mr, "unregistering messages buffer");
err_fi_mr_reg_pmsg:
	free(fip->pmsg);
err_malloc_pmsg:
	free(fip->lanes.gpspm);
err_malloc_lanes:
	return ret;
}
Example #7
0
/*
 * rpmem_fip_fini_ep -- (internal) deinitialize endpoint
 */
static int
rpmem_fip_fini_ep(struct rpmem_fip *fip)
{
	return RPMEM_FI_CLOSE(fip->ep, "closing endpoint");
}
Example #8
0
/*
 * rpmem_fip_init_cq -- (internal) initialize completion queue(s)
 */
static int
rpmem_fip_init_cq(struct rpmem_fip *fip)
{
	int ret;

	struct fi_cq_attr cq_attr = {
		.size = fip->cq_size,
		.flags = 0,
		.format = FI_CQ_FORMAT_MSG,
		.wait_obj = FI_WAIT_UNSPEC,
		.signaling_vector = 0,
		.wait_cond = FI_CQ_COND_NONE,
		.wait_set = NULL,
	};

	ret = fi_cq_open(fip->domain, &cq_attr, &fip->cq, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "opening completion queue");
		goto err_cq_open;
	}

	return 0;
err_cq_open:
	return -1;
}

/*
 * rpmem_fip_fini_cq -- (internal) deinitialize completion queue(s)
 */
static int
rpmem_fip_fini_cq(struct rpmem_fip *fip)
{
	return RPMEM_FI_CLOSE(fip->cq, "closing completion queue");
}

/*
 * rpmem_fip_init_ep -- (internal) initialize endpoint
 */
static int
rpmem_fip_init_ep(struct rpmem_fip *fip)
{
	int ret;

	/* create an endpoint */
	ret = fi_endpoint(fip->domain, fip->fi, &fip->ep, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "allocating endpoint");
		goto err_endpoint;
	}

	/*
	 * Bind an event queue to an endpoint to get
	 * connection-related events for the endpoint.
	 */
	ret = fi_ep_bind(fip->ep, &fip->eq->fid, 0);
	if (ret) {
		RPMEM_FI_ERR(ret, "binding event queue to endpoint");
		goto err_ep_bind_eq;
	}

	/*
	 * Bind a completion queue to an endpoint to get completion
	 * events of specified inbound/outbound operations.
	 *
	 * FI_SELECTIVE_COMPLETION means all inbound/outbound operations
	 * must explicitly specify if the completion event should be
	 * generated or not using FI_COMPLETION flag.
	 *
	 * The completion events received are highly related to the
	 * persistency method used and are configured in lanes
	 * initialization specified for persistency method utilized.
	 */
	ret = fi_ep_bind(fip->ep, &fip->cq->fid,
			FI_RECV | FI_TRANSMIT | FI_SELECTIVE_COMPLETION);
	if (ret) {
		RPMEM_FI_ERR(ret, "binding completion queue to endpoint");
		goto err_ep_bind_cq;
	}

	/*
	 * Enable endpoint so it is possible to post inbound/outbound
	 * operations if required.
	 */
	ret = fi_enable(fip->ep);
	if (ret) {
		RPMEM_FI_ERR(ret, "activating endpoint");
		goto err_fi_enable;
	}

	return 0;
err_fi_enable:
err_ep_bind_cq:
err_ep_bind_eq:
err_endpoint:
	return ret;
}