void api_cntr_atomic(void) { ssize_t sz; /* u64 */ *((uint64_t *)source) = SOURCE_DATA; *((uint64_t *)target) = TARGET_DATA; sz = fi_atomic(ep[0], source, 1, loc_mr[0], gni_addr[1], (uint64_t)target, mr_key[1], FI_UINT64, FI_ATOMIC_WRITE, target); api_cntr_write_allowed(sz, cntr_bind_flags, "fi_atomic"); *((uint64_t *)source) = SOURCE_DATA; *((uint64_t *)target) = TARGET_DATA; sz = fi_fetch_atomic(ep[0], source, 1, loc_mr[0], source, loc_mr[0], gni_addr[1], (uint64_t)target, mr_key[1], FI_UINT64, FI_ATOMIC_WRITE, target); api_cntr_read_allowed(sz, cntr_bind_flags, "fi_atomic"); sz = fi_inject_atomic(ep[0], source, 1, gni_addr[1], (uint64_t)target, mr_key[1], FI_INT64, FI_MIN); cr_assert_eq(sz, 0); }
void sep_atomic(int index) { int ret; ssize_t sz; struct fi_cq_tagged_entry cqe = { (void *) -1, UINT_MAX, UINT_MAX, (void *) -1, UINT_MAX, UINT_MAX }; uint64_t w[NUMEPS] = {0}, r[NUMEPS] = {0}, w_e[NUMEPS] = {0}; uint64_t r_e[NUMEPS] = {0}; /* u64 */ *((uint64_t *)source) = SOURCE_DATA; *((uint64_t *)target) = TARGET_DATA; sz = fi_atomic(tx_ep[0][index], source, 1, loc_mr[0], rx_addr[index], (uint64_t)target, mr_key[1], FI_UINT64, FI_ATOMIC_WRITE, target); cr_assert_eq(sz, 0); while ((ret = fi_cq_read(tx_cq[0][index], &cqe, 1)) == -FI_EAGAIN) { pthread_yield(); } cr_assert_eq(ret, 1); sep_check_tcqe(&cqe, target, FI_ATOMIC | FI_WRITE, 0); w[0] = 1; sep_check_cntrs(w, r, w_e, r_e); ret = *((uint64_t *)target) == SOURCE_DATA; cr_assert(ret, "Data mismatch"); }
int mca_btl_ofi_aop (struct mca_btl_base_module_t *btl, mca_btl_base_endpoint_t *endpoint, uint64_t remote_address, mca_btl_base_registration_handle_t *remote_handle, mca_btl_base_atomic_op_t op, uint64_t operand, int flags, int order, mca_btl_base_rdma_completion_fn_t cbfunc, void *cbcontext, void *cbdata) { int rc; int fi_datatype = FI_UINT64; int fi_op; mca_btl_ofi_module_t *ofi_btl = (mca_btl_ofi_module_t *) btl; mca_btl_ofi_endpoint_t *btl_endpoint = (mca_btl_ofi_endpoint_t*) endpoint; mca_btl_ofi_completion_t *comp = NULL; mca_btl_ofi_context_t *ofi_context; ofi_context = get_ofi_context(ofi_btl); if (flags & MCA_BTL_ATOMIC_FLAG_32BIT) { fi_datatype = FI_UINT32; } fi_op = to_fi_op(op); comp = mca_btl_ofi_completion_alloc(btl, endpoint, ofi_context, NULL, NULL, cbfunc, cbcontext, cbdata, MCA_BTL_OFI_TYPE_AOP); /* copy the operand because it might get freed from upper layer */ comp->operand = (uint64_t) operand; remote_address = (remote_address - (uint64_t) remote_handle->base_addr); rc = fi_atomic(ofi_context->tx_ctx, (void*) &comp->operand, 1, NULL, /* operand */ btl_endpoint->peer_addr, /* remote addr */ remote_address, remote_handle->rkey, /* remote buffer */ fi_datatype, fi_op, comp); if (rc == -FI_EAGAIN) { return OPAL_ERR_OUT_OF_RESOURCE; } else if (rc < 0) { BTL_ERROR(("fi_atomic failed with rc=%d (%s)", rc, fi_strerror(-rc))); MCA_BTL_OFI_ABORT(); } MCA_BTL_OFI_NUM_RDMA_INC(ofi_btl); return OPAL_SUCCESS; }
static int execute_base_atomic_op(enum fi_op op) { int ret; ret = fi_atomic(ep, buf, 1, fi_mr_desc(mr), remote_fi_addr, remote.addr, remote.key, datatype, op, &fi_ctx_atomic); if (ret) { FT_PRINTERR("fi_atomic", ret); } else { ret = wait_for_completion(scq, 1); } return ret; }
static int execute_base_atomic_op(enum fi_op op) { int ret; ret = fi_atomic(ep, buf, 1, fi_mr_desc(mr), remote_fi_addr, remote.addr, remote.key, datatype, op, &fi_ctx_atomic); if (ret) { fprintf(stderr, "fi_atomic %d (%s)\n", ret, fi_strerror(-ret)); } else { ret = wait_for_completion(scq, 1); } return ret; }
void api_cq_atomic(void) { ssize_t sz; /* u64 */ *((uint64_t *)source) = SOURCE_DATA; *((uint64_t *)target) = TARGET_DATA; sz = fi_atomic(ep[0], source, 1, loc_mr[0], gni_addr[1], _REM_ADDR(fi[0], target, target), mr_key[1], FI_UINT64, FI_ATOMIC_WRITE, target); api_cq_send_allowed(sz, cq_bind_flags, "fi_atomic"); sz = fi_inject_atomic(ep[0], source, 1, gni_addr[1], _REM_ADDR(fi[0], target, target), mr_key[1], FI_INT64, FI_MIN); cr_assert_eq(sz, 0); api_cq_wait1(msg_cq[0], cq_bind_flags & FI_SEND); }
void do_atomic_write_fetch(void) { int ret; ssize_t sz; uint64_t operand; struct fi_cq_tagged_entry cqe; struct fi_cq_err_entry err_cqe; /* u64 */ *((uint64_t *)source) = SOURCE_DATA; *((uint64_t *)target) = TARGET_DATA; sz = fi_atomic(ep[0], source, 1, loc_mr[0], gni_addr[1], (uint64_t)target, mr_key[1], FI_UINT64, FI_ATOMIC_WRITE, target); cr_assert_eq(sz, 0); while ((ret = fi_cq_read(msg_cq[0], &cqe, 1)) == -FI_EAGAIN) pthread_yield(); if (ret == -FI_EAVAIL) { fi_cq_readerr(msg_cq[0], &err_cqe, 0); dbg_printf("fi_cq_readerr err:%d\n", err_cqe.err); } if (write_allowed(FI_ATOMIC, fi[0]->caps, fi[1]->caps)) { cr_assert(ret == 1, "fi_atomic failed caps:0x%lx rcaps:0x%lx", fi[0]->caps, fi[1]->caps); } else { cr_assert(err_cqe.err == FI_EOPNOTSUPP, "fi_atomic should fail caps:0x%lx rcaps:0x%lx", fi[0]->caps, fi[1]->caps); } /* u64 */ operand = SOURCE_DATA; *((uint64_t *)source) = FETCH_SOURCE_DATA; *((uint64_t *)target) = TARGET_DATA; sz = fi_fetch_atomic(ep[0], &operand, 1, NULL, source, loc_mr[0], gni_addr[1], (uint64_t)target, mr_key[1], FI_UINT64, FI_ATOMIC_READ, target); cr_assert_eq(sz, 0); while ((ret = fi_cq_read(msg_cq[0], &cqe, 1)) == -FI_EAGAIN) pthread_yield(); if (ret == -FI_EAVAIL) { fi_cq_readerr(msg_cq[0], &err_cqe, 0); dbg_printf("fi_cq_readerr err:%d\n", err_cqe.err); } if (read_allowed(FI_ATOMIC, fi[0]->caps, fi[1]->caps)) { cr_assert(ret == 1, "fi_fetch_atomic failed caps:0x%lx rcaps:0x%lx", fi[0]->caps, fi[1]->caps); } else { cr_assert(err_cqe.err == FI_EOPNOTSUPP, "fi_fetch_atomic should fail caps:0x%lx rcaps:0x%lx", fi[0]->caps, fi[1]->caps); } }