void register_memory(rdma_conn_t *conn) { conn->send_msg = malloc(sizeof(rdma_msg_t)); conn->recv_msg = malloc(sizeof(rdma_msg_t)); conn->data_region = data_region; conn->addr_region = addr_region; TEST_Z(conn->send_mr = ibv_reg_mr( s_ctx->pd, conn->send_msg, sizeof(rdma_msg_t), IBV_ACCESS_LOCAL_WRITE)); TEST_Z(conn->recv_mr = ibv_reg_mr( s_ctx->pd, conn->recv_msg, sizeof(rdma_msg_t), IBV_ACCESS_LOCAL_WRITE)); TEST_Z(conn->addr_mr = ibv_reg_mr( s_ctx->pd, conn->addr_region, ADDR_REGION_SIZE, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); TEST_Z(conn->data_mr = ibv_reg_mr( s_ctx->pd, conn->data_region, DATA_REGION_SIZE, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ)); }
void register_memory(struct connection *conn) { conn->send_region = malloc(send_buffer_size); conn->recv_region = malloc(recv_buffer_size); memset(conn->recv_region,0, recv_buffer_size); conn->send_msg = malloc(sizeof(struct message)); conn->recv_msg = malloc(sizeof(struct message)); TEST_Z(conn->send_region_mr = ibv_reg_mr( s_ctx->pd, conn->send_region, send_buffer_size, IBV_ACCESS_LOCAL_WRITE)); TEST_Z(conn->recv_region_mr = ibv_reg_mr( s_ctx->pd, conn->recv_region, recv_buffer_size, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); TEST_Z(conn->send_msg_mr = ibv_reg_mr( s_ctx->pd, conn->send_msg, sizeof(struct message), IBV_ACCESS_LOCAL_WRITE)); TEST_Z(conn->recv_msg_mr = ibv_reg_mr( s_ctx->pd, conn->recv_msg, sizeof(struct message), IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); }
void InputChannelSender::on_addr_resolved(struct rdma_cm_id* id) { IBConnectionGroup<InputChannelConnection>::on_addr_resolved(id); if (!mr_data_) { // Register memory regions. mr_data_ = ibv_reg_mr( pd_, const_cast<uint8_t*>(data_source_.data_send_buffer().ptr()), data_source_.data_send_buffer().bytes(), IBV_ACCESS_LOCAL_WRITE); if (!mr_data_) { L_(error) << "ibv_reg_mr failed for mr_data: " << strerror(errno); throw InfinibandException("registration of memory region failed"); } mr_desc_ = ibv_reg_mr(pd_, const_cast<fles::MicrosliceDescriptor*>( data_source_.desc_send_buffer().ptr()), data_source_.desc_send_buffer().bytes(), IBV_ACCESS_LOCAL_WRITE); if (!mr_desc_) { L_(error) << "ibv_reg_mr failed for mr_desc: " << strerror(errno); throw InfinibandException("registration of memory region failed"); } if (true) { dump_mr(mr_desc_); dump_mr(mr_data_); } } }
void register_memory(struct connection *conn) { conn->send_msg = malloc(sizeof(struct message)); conn->recv_msg = malloc(sizeof(struct message)); conn->rdma_local_region = malloc(RDMA_BUFFER_SIZE); conn->rdma_remote_region = malloc(RDMA_BUFFER_SIZE); TEST_Z(conn->send_mr = ibv_reg_mr( s_ctx->pd, conn->send_msg, sizeof(struct message), IBV_ACCESS_LOCAL_WRITE)); TEST_Z(conn->recv_mr = ibv_reg_mr( s_ctx->pd, conn->recv_msg, sizeof(struct message), IBV_ACCESS_LOCAL_WRITE | ((s_mode == M_WRITE) ? IBV_ACCESS_REMOTE_WRITE : IBV_ACCESS_REMOTE_READ))); TEST_Z(conn->rdma_local_mr = ibv_reg_mr( s_ctx->pd, conn->rdma_local_region, RDMA_BUFFER_SIZE, IBV_ACCESS_LOCAL_WRITE)); TEST_Z(conn->rdma_remote_mr = ibv_reg_mr( s_ctx->pd, conn->rdma_remote_region, RDMA_BUFFER_SIZE, IBV_ACCESS_LOCAL_WRITE | ((s_mode == M_WRITE) ? IBV_ACCESS_REMOTE_WRITE : IBV_ACCESS_REMOTE_READ))); }
void register_memory(IbvConnection *conn) { conn->send_msg = (message *)malloc(sizeof(struct message)); conn->recv_msg = (message *)malloc(sizeof(struct message)); conn->rdma_local_region = (char *)calloc(RDMA_BUFFER_SIZE, 1); conn->rdma_remote_region = (char *)calloc(RDMA_BUFFER_SIZE, 1); TEST_Z(conn->send_mr = ibv_reg_mr( conn->pd, conn->send_msg, sizeof(struct message), IBV_ACCESS_LOCAL_WRITE)); TEST_Z(conn->recv_mr = ibv_reg_mr( conn->pd, conn->recv_msg, sizeof(struct message), IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE )); TEST_Z(conn->rdma_local_mr = ibv_reg_mr( conn->pd, conn->rdma_local_region, RDMA_BUFFER_SIZE, IBV_ACCESS_LOCAL_WRITE)); TEST_Z(conn->rdma_remote_mr = ibv_reg_mr( conn->pd, conn->rdma_remote_region, RDMA_BUFFER_SIZE, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); }
void on_preconnect(rdma_cm_id * id, ibv_pd * pd, boost::system::error_code &ec) { close(); { util::spinlock::scoped_lock lk(mtx_); HPX_ASSERT(buffer_); buffer_mr_ = ibv_reg_mr( pd , buffer_ , buffer_size_ , IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE ); if(!buffer_mr_) { int verrno = errno; boost::system::error_code err(verrno, boost::system::system_category()); HPX_IBVERBS_THROWS_IF( ec , err ); } server_msg_mr_ = ibv_reg_mr( pd , server_msg_ , sizeof(hpx::parcelset::policies::ibverbs::message) , IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE ); if(!server_msg_mr_) { int verrno = errno; boost::system::error_code err(verrno, boost::system::system_category()); HPX_IBVERBS_THROWS_IF( ec , err ); } client_msg_mr_ = ibv_reg_mr( pd , client_msg_ , sizeof(hpx::parcelset::policies::ibverbs::message) , IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE ); if(!client_msg_mr_) { int verrno = errno; boost::system::error_code err(verrno, boost::system::system_category()); HPX_IBVERBS_THROWS_IF( ec , err ); } id_ = id; } //post_receive(); }
int BClientContext::register_memory() { int mr_flags = IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE; TEST_Z(recv_memory_mr = ibv_reg_mr(pd, &recv_memory_msg, sizeof(struct BMemoryKeys), mr_flags)); TEST_Z(lock_result_mr = ibv_reg_mr(pd, &lock_result, sizeof(uint64_t), mr_flags)); return 0; }
void vbuf_reregister_all() { int i = 0; vbuf_region *vr = vbuf_region_head; MPIDI_STATE_DECL(MPID_STATE_VBUF_REREGISTER_ALL); MPIDI_FUNC_ENTER(MPID_STATE_VBUF_REREGISTER_ALL); for (; i < rdma_num_hcas; ++i) { ptag_save[i] = MPIDI_CH3I_RDMA_Process.ptag[i]; } while (vr) { for (i = 0; i < rdma_num_hcas; ++i) { vr->mem_handle[i] = ibv_reg_mr( ptag_save[i], vr->malloc_buf_start, vr->count * rdma_vbuf_total_size, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); if (!vr->mem_handle[i]) { ibv_error_abort(IBV_RETURN_ERR,"Cannot reregister vbuf region\n"); } } vr = vr->next; } MPIDI_FUNC_EXIT(MPID_STATE_VBUF_REREGISTER_ALL); }
/** * Sends a buffer's memory region so that it can be mapped to it's remote end. */ void RDMAChannel::SendMR(ibv_mr* mr, int id) { // Map the memory region itself so that it can be sent ibv_mr* init = ibv_reg_mr(adapter_.pd_, mr, sizeof(ibv_mr), IBV_ACCESS_LOCAL_WRITE); struct ibv_sge list; list.addr = (uint64_t) mr; list.length = sizeof(ibv_mr); list.lkey = init->lkey; struct ibv_send_wr wr; caffe_memset(sizeof(wr), 0, &wr); wr.wr_id = (uint64_t) init; wr.sg_list = &list; wr.num_sge = 1; wr.opcode = IBV_WR_SEND_WITH_IMM; wr.send_flags = IBV_SEND_SIGNALED; wr.imm_data = id; struct ibv_send_wr *bad_wr; CHECK(!ibv_post_send(qp_, &wr, &bad_wr)); for (;;) { ibv_wc wc; int ne = ibv_poll_cq(write_cq_, 1, &wc); CHECK_GE(ne, 0); if (ne && wc.wr_id == (uint64_t) init) { break; } } CHECK(!ibv_dereg_mr(init)); }
int m_pi_create_wc_q(struct mcm_qp *m_qp, int entries) { /* RDMA proxy WC pool, register with SCIF and IB, set pool and segm size with parameters */ m_qp->wrc.wc_sz = ALIGN_64(sizeof(struct mcm_wc_rx)); m_qp->wrc.wc_len = m_qp->wrc.wc_sz * entries; /* 64 byte aligned for signal_fence */ m_qp->wrc.wc_end = entries - 1; m_qp->wc_hd_rem = 0; m_qp->wc_tl_rem = 0; if (posix_memalign((void **)&m_qp->wrc.wc_addr, 4096, ALIGN_PAGE(m_qp->wrc.wc_len))) { mlog(0, "failed to allocate wc_rbuf, m_qp=%p, wc_len=%d, entries=%d\n", m_qp, m_qp->wrc.wc_len, entries); return -1; } memset((void*)m_qp->wrc.wc_addr, 0, ALIGN_PAGE(m_qp->wrc.wc_len)); mlog(4, " WC rbuf pool %p, LEN req=%d, act=%d\n", m_qp->wrc.wc_addr, m_qp->wrc.wc_len, ALIGN_PAGE(m_qp->wrc.wc_len)); m_qp->wc_rbuf_mr = ibv_reg_mr(m_qp->smd->md->pd, (void*)m_qp->wrc.wc_addr, m_qp->wrc.wc_len, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); if (!m_qp->wc_rbuf_mr) { mlog(0, " IB_register addr=%p,%d failed %s\n", m_qp->wrc.wc_addr, ALIGN_PAGE(m_qp->wrc.wc_len), strerror(errno)); return -1; } m_qp->wrc.wc_addr = (uint64_t)(uintptr_t)m_qp->wc_rbuf_mr->addr; m_qp->wrc.wc_rkey = m_qp->wc_rbuf_mr->rkey; mlog(4, " IB_mr for wc_buf addr %p, mr 0x%llx, len %d, entries %d rkey %x lkey %x\n", m_qp->wrc.wc_addr, m_qp->wc_rbuf_mr->addr, ALIGN_PAGE(m_qp->wrc.wc_len), entries, m_qp->wc_rbuf_mr->rkey, m_qp->wc_rbuf_mr->lkey); return 0; }
static int create_message(struct cmatest_node *node) { if (!message_size) message_count = 0; if (!message_count) return 0; node->mem = malloc(message_size + sizeof(struct ibv_grh)); if (!node->mem) { printf("failed message allocation\n"); return -1; } node->mr = ibv_reg_mr(node->pd, node->mem, message_size + sizeof(struct ibv_grh), IBV_ACCESS_LOCAL_WRITE); if (!node->mr) { printf("failed to reg MR\n"); goto err; } return 0; err: free(node->mem); return -1; }
static int psoib_vapi_alloc(hca_info_t *hca_info, int size, enum ibv_access_flags access_perm, mem_info_t *mem_info) { mem_info->mr = NULL; /* Region for buffers */ mem_info->ptr = valloc(size); if (!mem_info->ptr) goto err_malloc; // printf("ibv_reg_mr(pd = %p, ptr = %p, size = %d, access_perm = 0x%x)\n", // hca_info->pd, mem_info->ptr, size, access_perm); mem_info->mr = ibv_reg_mr(hca_info->pd, mem_info->ptr, size, access_perm); if (!mem_info->mr) goto err_reg_mr; return 0; /* --- */ err_reg_mr: free(mem_info->ptr); mem_info->ptr = NULL; psoib_err_errno("ibv_reg_mr() failed", errno); if (errno == ENOMEM) print_mlock_help(size); return -1; err_malloc: psoib_err_errno("malloc() failed!", errno); return -1; }
static void on_pre_conn(struct rdma_cm_id *id) { struct conn_context *ctx = (struct conn_context *)malloc(sizeof(struct conn_context)); id->context = ctx; ctx->file_name[0] = '\0'; // take this to mean we don't have the file name posix_memalign((void **)&ctx->buffer, sysconf(_SC_PAGESIZE), BUFFER_SIZE); TEST_Z(ctx->buffer_mr = ibv_reg_mr(rc_get_pd(), ctx->buffer, BUFFER_SIZE, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); posix_memalign((void **)&ctx->msg, sysconf(_SC_PAGESIZE), sizeof(*ctx->msg)); TEST_Z(ctx->msg_mr = ibv_reg_mr(rc_get_pd(), ctx->msg, sizeof(*ctx->msg), IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); post_receive(id); }
/** * Register mempool as a memory region. * * @param pd * Pointer to protection domain. * @param mp * Pointer to memory pool. * * @return * Memory region pointer, NULL in case of error. */ struct ibv_mr * mlx5_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp) { const struct rte_memseg *ms = rte_eal_get_physmem_layout(); uintptr_t start = mp->elt_va_start; uintptr_t end = mp->elt_va_end; unsigned int i; DEBUG("mempool %p area start=%p end=%p size=%zu", (const void *)mp, (void *)start, (void *)end, (size_t)(end - start)); /* Round start and end to page boundary if found in memory segments. */ for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { uintptr_t addr = (uintptr_t)ms[i].addr; size_t len = ms[i].len; unsigned int align = ms[i].hugepage_sz; if ((start > addr) && (start < addr + len)) start = RTE_ALIGN_FLOOR(start, align); if ((end > addr) && (end < addr + len)) end = RTE_ALIGN_CEIL(end, align); } DEBUG("mempool %p using start=%p end=%p size=%zu for MR", (const void *)mp, (void *)start, (void *)end, (size_t)(end - start)); return ibv_reg_mr(pd, (void *)start, end - start, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); }
int xfer_rdma_register_buffer(struct xfer_context *ctx, struct xfer_rdma_buf_handle_t *handle) { /* We dont really want IBV_ACCESS_LOCAL_WRITE, but IB spec says: * The Consumer is not allowed to assign Remote Write or Remote Atomic to * a Memory Region that has not been assigned Local Write. */ handle->local_mr = ibv_reg_mr(ctx->pd, handle->buf, handle->local_size, IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ); if (!handle->local_mr) { fprintf(stderr, "%d:%s: Couldn't allocate MR\n", pid, __func__); return -1; } handle->id = (uintptr_t) handle->buf; handle->got_done = 0; handle->ctx = ctx; handle->remote_mr = malloc(sizeof(struct ibv_mr)); if (!handle->remote_mr) { fprintf(stderr, "%d:%s: could not malloc remote_mr\n", pid, __func__); return -1; } return 0; }
void register_memory(struct connection *conn) { conn->send_region = malloc(BUFFER_SIZE); conn->recv_region = malloc(BUFFER_SIZE); TEST_Z(conn->send_mr = ibv_reg_mr( s_ctx->pd, conn->send_region, BUFFER_SIZE, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); TEST_Z(conn->recv_mr = ibv_reg_mr( s_ctx->pd, conn->recv_region, BUFFER_SIZE, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); }
int main() { int ib_port = 1; int gid_idx = 1; int rc; int rank, nprocs; struct ibv_sge sge_list; struct ibv_wc wc; struct ibv_send_wr *sr; unsigned long long start, end; float time; mypmiInit(&rank, &nprocs); fprintf(stderr, "[%d] nprocs(%d)\n", rank, nprocs); rc = resource_create(&res, ib_port, rank); gid_idx = rank; rc = connect_qp(&res, ib_port, gid_idx, rank); create_sge(&res, buf, SIZE, &sge_list); memset(&wc, 0, sizeof(struct ibv_wc)); sr = malloc(sizeof(*sr)); memset(sr, 0, sizeof(*sr)); mypmiBarrier(); fprintf(stderr, "[%d] START\n", rank); memset(buf, 0, SIZE); mypmiBarrier(); if (rank == 0) { struct ibv_mr *mr; for (int size = RDMA_MIN_SIZE; size < RDMA_MAX_SIZE; size += STEP) { char *received = calloc(size, sizeof(char)); mr = ibv_reg_mr(res.pd, received, size, IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE); INT_TO_BE(buf, mr->rkey); INT_TO_BE(buf + 4, (((intptr_t)mr->addr) >> 32)); INT_TO_BE(buf + 8, (((intptr_t)mr->addr) & 0xffffffff)); if (post_ibsend(&res, IBV_WR_SEND, &sge_list, sr, 1)) { fprintf(stderr, "[%d] failed to post SR\n", rank); goto end; } while ((rc = poll_cq(&res, &wc, 1, SCQ_FLG)) == 0) { } /* printf("[%d] memory region is sent. key(%x) addr(%lx) rc(%d)\n", rank, mr->rkey, (intptr_t)mr->addr, rc); */ /* wait for done */ post_ibreceive(&res, &sge_list, 1); while (poll_cq(&res, &wc, 1, RCQ_FLG) == 0) { } /* printf("[%d] %d byte has received (opcode=%d)\n", rank, wc.byte_len, wc.opcode); */ /* printf("[%d] Received message: %s\n", rank, buf); */ /* display_received(received, size); */ ibv_dereg_mr(mr); free(received); } } else {
void register_memory(struct connection *conn) { conn->send_msg = malloc(sizeof(struct control_msg)); conn->recv_msg = malloc(sizeof(struct control_msg)); // conn->rdma_local_region = malloc(RDMA_BUFFER_SIZE); // conn->rdma_remote_region = malloc(RDMA_BUFFER_SIZE); // conn->rdma_msg_region = malloc(RDMA_BUFFER_SIZE); TEST_Z(conn->send_mr = ibv_reg_mr( s_ctx->pd, conn->send_msg, sizeof(struct control_msg), IBV_ACCESS_LOCAL_WRITE)); TEST_Z(conn->recv_mr = ibv_reg_mr( s_ctx->pd, conn->recv_msg, sizeof(struct control_msg), IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ)); // TEST_Z(conn->rdma_msg_mr = ibv_reg_mr( // s_ctx->pd, // conn->rdma_msg_region, // RDMA_BUFFER_SIZE, // IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ)); // IBV_ACCESS_LOCAL_WRITE | ((s_mode == M_WRITE) ? IBV_ACCESS_REMOTE_WRITE : IBV_ACCESS_REMOTE_READ))); /* TEST_Z(conn->rdma_local_mr = ibv_reg_mr( s_ctx->pd, conn->rdma_local_region, RDMA_BUFFER_SIZE, IBV_ACCESS_LOCAL_WRITE)); TEST_Z(conn->rdma_remote_mr = ibv_reg_mr( s_ctx->pd, conn->rdma_remote_region, RDMA_BUFFER_SIZE, IBV_ACCESS_LOCAL_WRITE | ((s_mode == M_WRITE) ? IBV_ACCESS_REMOTE_WRITE : IBV_ACCESS_REMOTE_READ))); */ return; }
static ssize_t fi_ibv_rdm_prepare_conn_memory(struct fi_ibv_rdm_ep *ep, struct fi_ibv_rdm_tagged_conn *conn) { assert(conn->s_mr == NULL); assert(conn->r_mr == NULL); const size_t size = ep->buff_len * ep->n_buffs; conn->s_mr = fi_ibv_rdm_alloc_and_reg(ep, (void **) &conn->sbuf_mem_reg, size); if (!conn->s_mr) { assert(conn->s_mr); goto s_err; } conn->r_mr = fi_ibv_rdm_alloc_and_reg(ep, (void **) &conn->rbuf_mem_reg, size); if (!conn->r_mr) { assert(conn->r_mr); goto r_err; } conn->ack_mr = ibv_reg_mr(ep->domain->pd, &conn->sbuf_ack_status, sizeof(conn->sbuf_ack_status), IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); if (!conn->ack_mr) { assert(conn->ack_mr); goto ack_err; } conn->rma_mr = fi_ibv_rdm_alloc_and_reg(ep, (void **) &conn->rmabuf_mem_reg, size); if (!conn->rma_mr) { assert(conn->rma_mr); goto rma_err; } fi_ibv_rdm_buffer_lists_init(conn, ep); return FI_SUCCESS; /* Error handling */ rma_err: free(conn->rmabuf_mem_reg); ack_err: /* * Ack buffer is a part of connection structure, freeing is not needed */ r_err: free(conn->rbuf_mem_reg); s_err: free(conn->sbuf_mem_reg); /* The is a lack of host or HCA memory */ return -FI_ENOMEM; }
int mca_oob_ud_register_iov (struct iovec *iov, int count, struct ibv_mr **ib_mr, struct ibv_pd *ib_pd, unsigned int mtu, int *sge_countp, int *wr_countp, int *data_lenp) { int data_len, iov_index, sge_count; unsigned int packet_size = 0; opal_output_verbose (80, orte_oob_base_framework.framework_output, "%s oob:ud:register_iov registering memory", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)); *wr_countp = 0; *data_lenp = 0; *sge_countp = 0; for (iov_index = 0, data_len = 0, sge_count = 0 ; iov_index < count ; ++iov_index) { unsigned int iov_left = iov[iov_index].iov_len; data_len += iov_left; sge_count++; do { unsigned int to_trans = min (iov_left, mtu - packet_size); packet_size = (to_trans < iov_left) ? 0 : packet_size + to_trans; iov_left -= to_trans; if (0 == packet_size && iov_left) { sge_count++; } } while (iov_left); /* register buffers */ if (NULL == ib_mr[iov_index]) { ib_mr[iov_index] = ibv_reg_mr (ib_pd, iov[iov_index].iov_base, iov[iov_index].iov_len, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); if (NULL == ib_mr[iov_index]) { /* Ruh-roh */ orte_show_help("help-oob-ud.txt", "reg-mr-failed", true, orte_process_info.nodename, iov[iov_index].iov_base, iov[iov_index].iov_len,strerror(errno)); return ORTE_ERR_OUT_OF_RESOURCE; } } } *wr_countp = (data_len + mtu - 1) / mtu; *sge_countp = sge_count; *data_lenp = data_len; return ORTE_SUCCESS; }
void build_msg_region(struct connection *conn, char* addr, int size) { conn->rdma_msg_region = addr; TEST_Z(conn->rdma_msg_mr = ibv_reg_mr( s_ctx->pd, conn->rdma_msg_region, size, IBV_ACCESS_LOCAL_WRITE)); }
int ompi_btl_openib_connect_base_alloc_cts(mca_btl_base_endpoint_t *endpoint) { ompi_free_list_item_t *fli; int length = sizeof(mca_btl_openib_header_t) + sizeof(mca_btl_openib_header_coalesced_t) + sizeof(mca_btl_openib_control_header_t) + sizeof(mca_btl_openib_footer_t) + mca_btl_openib_component.qp_infos[mca_btl_openib_component.credits_qp].size; /* Explicitly don't use the mpool registration */ fli = &(endpoint->endpoint_cts_frag.super.super.base.super); fli->registration = NULL; fli->ptr = malloc(length); if (NULL == fli->ptr) { BTL_ERROR(("malloc failed")); return OMPI_ERR_OUT_OF_RESOURCE; } endpoint->endpoint_cts_mr = ibv_reg_mr(endpoint->endpoint_btl->device->ib_pd, fli->ptr, length, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ); OPAL_OUTPUT((-1, "registered memory %p, length %d", fli->ptr, length)); if (NULL == endpoint->endpoint_cts_mr) { free(fli->ptr); BTL_ERROR(("Failed to reg mr!")); return OMPI_ERR_OUT_OF_RESOURCE; } /* NOTE: We do not need to register this memory with the opal_memory subsystem, because this is OMPI-controlled memory -- we do not need to worry about this memory being freed out from underneath us. */ /* Copy the lkey where it needs to go */ endpoint->endpoint_cts_frag.super.sg_entry.lkey = endpoint->endpoint_cts_frag.super.super.segment.key = endpoint->endpoint_cts_mr->lkey; endpoint->endpoint_cts_frag.super.sg_entry.length = length; /* Construct the rest of the recv_frag_t */ OBJ_CONSTRUCT(&(endpoint->endpoint_cts_frag), mca_btl_openib_recv_frag_t); endpoint->endpoint_cts_frag.super.super.base.order = mca_btl_openib_component.credits_qp; endpoint->endpoint_cts_frag.super.endpoint = endpoint; OPAL_OUTPUT((-1, "Got a CTS frag for peer %s, addr %p, length %d, lkey %d", (NULL == endpoint->endpoint_proc->proc_ompi->proc_hostname) ? "unknown" : endpoint->endpoint_proc->proc_ompi->proc_hostname, (void*) endpoint->endpoint_cts_frag.super.sg_entry.addr, endpoint->endpoint_cts_frag.super.sg_entry.length, endpoint->endpoint_cts_frag.super.sg_entry.lkey)); return OMPI_SUCCESS; }
ibverbs_mr(ibv_pd *pd, void * buffer, std::size_t size, int access) { ibv_mr * mr = ibv_reg_mr(pd, buffer, size, access); if(!mr) { int verrno = errno; boost::system::error_code err(verrno, boost::system::system_category()); HPX_IBVERBS_THROWS(err); } mr_ = boost::shared_ptr<ibv_mr>(mr, deleter); }
static int fio_rdmaio_setup_control_msg_buffers(struct thread_data *td) { struct rdmaio_data *rd = td->io_ops->data; rd->recv_mr = ibv_reg_mr(rd->pd, &rd->recv_buf, sizeof(rd->recv_buf), IBV_ACCESS_LOCAL_WRITE); if (rd->recv_mr == NULL) { log_err("fio: recv_buf reg_mr failed\n"); return 1; } rd->send_mr = ibv_reg_mr(rd->pd, &rd->send_buf, sizeof(rd->send_buf), 0); if (rd->send_mr == NULL) { log_err("fio: send_buf reg_mr failed\n"); ibv_dereg_mr(rd->recv_mr); return 1; } /* setup work request */ /* recv wq */ rd->recv_sgl.addr = (uint64_t) (unsigned long)&rd->recv_buf; rd->recv_sgl.length = sizeof(rd->recv_buf); rd->recv_sgl.lkey = rd->recv_mr->lkey; rd->rq_wr.sg_list = &rd->recv_sgl; rd->rq_wr.num_sge = 1; rd->rq_wr.wr_id = FIO_RDMA_MAX_IO_DEPTH; /* send wq */ rd->send_sgl.addr = (uint64_t) (unsigned long)&rd->send_buf; rd->send_sgl.length = sizeof(rd->send_buf); rd->send_sgl.lkey = rd->send_mr->lkey; rd->sq_wr.opcode = IBV_WR_SEND; rd->sq_wr.send_flags = IBV_SEND_SIGNALED; rd->sq_wr.sg_list = &rd->send_sgl; rd->sq_wr.num_sge = 1; rd->sq_wr.wr_id = FIO_RDMA_MAX_IO_DEPTH; return 0; }
static struct ibv_mr * fi_ibv_rdm_alloc_and_reg(struct fi_ibv_rdm_ep *ep, void **buf, size_t size) { *buf = memalign(FI_IBV_RDM_BUF_ALIGNMENT, size); if (*buf) { memset(*buf, 0, size); return ibv_reg_mr(ep->domain->pd, *buf, size, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); } return NULL; }
static MrPtr make_mr(PdPtr pd, size_t size, int access=IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_ATOMIC) { auto buf = new char[size]; auto ptr = ibv_reg_mr(pd.get(), buf, size, access); if(!ptr) { throw std::runtime_error("cannot create mr"); } return MrPtr(ptr, [](ibv_mr *ptr){ ibv_dereg_mr(ptr); delete[] reinterpret_cast<char*>(ptr->addr); }); }
RdmaBuffer::RdmaBuffer(infinityverbs::core::Context* context, void* buffer, uint64_t sizeInBytes) { this->context = context; this->sizeInBytes = sizeInBytes; this->data = buffer; this->ibvMemoryRegion = ibv_reg_mr(this->context->getInfiniBandProtectionDomain(), this->data, this->sizeInBytes, IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ); deregOnDelete = true; deallocOnDelete = false; }
int rdma_backend_create_mr(RdmaBackendMR *mr, RdmaBackendPD *pd, void *addr, size_t length, int access) { mr->ibmr = ibv_reg_mr(pd->ibpd, addr, length, access); if (!mr->ibmr) { rdma_error_report("ibv_reg_mr fail, errno=%d", errno); return -EIO; } mr->ibpd = pd->ibpd; return 0; }
void register_memory(struct connection *conn) { conn->send_msg = malloc(sizeof(struct message)); conn->recv_msg = malloc(sizeof(struct message)); conn->rdma_local_region = malloc(RDMA_BUFFER_SIZE); conn->rdma_remote_region = malloc(RDMA_BUFFER_SIZE); /* time_stamp(10); memset(conn->rdma_local_region, 0, RDMA_BUFFER_SIZE); memset(conn->rdma_remote_region, 0, RDMA_BUFFER_SIZE); time_stamp(11); */ TEST_Z(conn->send_mr = ibv_reg_mr( s_ctx->pd, conn->send_msg, sizeof(struct message), IBV_ACCESS_LOCAL_WRITE)); TEST_Z(conn->recv_mr = ibv_reg_mr( s_ctx->pd, conn->recv_msg, sizeof(struct message), IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); TEST_Z(conn->rdma_local_mr = ibv_reg_mr( s_ctx->pd, conn->rdma_local_region, RDMA_BUFFER_SIZE, IBV_ACCESS_LOCAL_WRITE)); TEST_Z(conn->rdma_remote_mr = ibv_reg_mr( s_ctx->pd, conn->rdma_remote_region, RDMA_BUFFER_SIZE, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); }
RDMABuffer::RDMABuffer(RDMAChannel* channel, uint8_t* addr, size_t size) : channel_(channel), addr_(addr), size_(size) { self_ = ibv_reg_mr(channel_->adapter_.pd_, addr, size, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); CHECK(self_) << "Failed to register memory region"; id_ = channel_->buffers_.size(); channel_->buffers_.push_back(this); channel_->SendMR(self_, id_); peer_ = channel_->memory_regions_queue_.pop(); }