void api_cntr_atomic(void) { ssize_t sz; /* u64 */ *((uint64_t *)source) = SOURCE_DATA; *((uint64_t *)target) = TARGET_DATA; sz = fi_atomic(ep[0], source, 1, loc_mr[0], gni_addr[1], (uint64_t)target, mr_key[1], FI_UINT64, FI_ATOMIC_WRITE, target); api_cntr_write_allowed(sz, cntr_bind_flags, "fi_atomic"); *((uint64_t *)source) = SOURCE_DATA; *((uint64_t *)target) = TARGET_DATA; sz = fi_fetch_atomic(ep[0], source, 1, loc_mr[0], source, loc_mr[0], gni_addr[1], (uint64_t)target, mr_key[1], FI_UINT64, FI_ATOMIC_WRITE, target); api_cntr_read_allowed(sz, cntr_bind_flags, "fi_atomic"); sz = fi_inject_atomic(ep[0], source, 1, gni_addr[1], (uint64_t)target, mr_key[1], FI_INT64, FI_MIN); cr_assert_eq(sz, 0); }
void sep_atomic_rw(int index) { int ret; ssize_t sz; struct fi_cq_tagged_entry cqe = { (void *) -1, UINT_MAX, UINT_MAX, (void *) -1, UINT_MAX, UINT_MAX }; uint64_t operand = SOURCE_DATA; uint64_t w[NUMEPS] = {0}, r[NUMEPS] = {0}, w_e[NUMEPS] = {0}; uint64_t r_e[NUMEPS] = {0}; /* u64 */ *((uint64_t *)source) = FETCH_SOURCE_DATA; *((uint64_t *)target) = TARGET_DATA; sz = fi_fetch_atomic(tx_ep[0][index], &operand, 1, NULL, source, loc_mr[0], rx_addr[index], (uint64_t)target, mr_key[1], FI_UINT64, FI_SUM, target); cr_assert_eq(sz, 0); while ((ret = fi_cq_read(tx_cq[0][index], &cqe, 1)) == -FI_EAGAIN) { pthread_yield(); } cr_assert_eq(ret, 1); sep_check_tcqe(&cqe, target, FI_ATOMIC | FI_READ, 0); r[0] = 1; sep_check_cntrs(w, r, w_e, r_e); ret = *((uint64_t *)target) == (SOURCE_DATA + TARGET_DATA); cr_assert(ret, "Data mismatch"); ret = *((uint64_t *)source) == TARGET_DATA; cr_assert(ret, "Fetch data mismatch"); }
int mca_btl_ofi_afop (struct mca_btl_base_module_t *btl, struct mca_btl_base_endpoint_t *endpoint, void *local_address, uint64_t remote_address, mca_btl_base_registration_handle_t *local_handle, mca_btl_base_registration_handle_t *remote_handle, mca_btl_base_atomic_op_t op, uint64_t operand, int flags, int order, mca_btl_base_rdma_completion_fn_t cbfunc, void *cbcontext, void *cbdata) { int rc; int fi_datatype = FI_UINT64; int fi_op; mca_btl_ofi_module_t *ofi_btl = (mca_btl_ofi_module_t *) btl; mca_btl_ofi_endpoint_t *btl_endpoint = (mca_btl_ofi_endpoint_t*) endpoint; mca_btl_ofi_completion_t *comp = NULL; mca_btl_ofi_context_t *ofi_context; ofi_context = get_ofi_context(ofi_btl); if (flags & MCA_BTL_ATOMIC_FLAG_32BIT) { fi_datatype = FI_UINT32; } fi_op = to_fi_op(op); comp = mca_btl_ofi_completion_alloc(btl, endpoint, ofi_context, local_address, local_handle, cbfunc, cbcontext, cbdata, MCA_BTL_OFI_TYPE_AFOP); /* copy the operand because it might get freed from upper layer */ comp->operand = (uint64_t) operand; remote_address = (remote_address - (uint64_t) remote_handle->base_addr); rc = fi_fetch_atomic(ofi_context->tx_ctx, (void*) &comp->operand, 1, NULL, /* operand */ local_address, local_handle->desc, /* results */ btl_endpoint->peer_addr, /* remote addr */ remote_address, remote_handle->rkey, /* remote buffer */ fi_datatype, fi_op, comp); if (rc == -FI_EAGAIN) { return OPAL_ERR_OUT_OF_RESOURCE; } else if (rc < 0) { BTL_ERROR(("fi_fetch_atomic failed with rc=%d (%s)", rc, fi_strerror(-rc))); MCA_BTL_OFI_ABORT(); } MCA_BTL_OFI_NUM_RDMA_INC(ofi_btl); return OPAL_SUCCESS; }
static int execute_fetch_atomic_op(enum fi_op op) { int ret; ret = fi_fetch_atomic(ep, buf, 1, fi_mr_desc(mr), result, fi_mr_desc(mr_result), remote_fi_addr, remote.addr, remote.key, datatype, op, &fi_ctx_atomic); if (ret) { FT_PRINTERR("fi_fetch_atomic", ret); } else { ret = wait_for_completion(scq, 1); } return ret; }
static int execute_fetch_atomic_op(enum fi_op op) { int ret; ret = fi_fetch_atomic(ep, buf, 1, fi_mr_desc(mr), result, fi_mr_desc(mr_result), remote_fi_addr, remote.addr, remote.key, datatype, op, &fi_ctx_atomic); if (ret) { fprintf(stderr, "fi_fetch_atomic %d (%s)\n", ret, fi_strerror(-ret)); } else { ret = wait_for_completion(scq, 1); } return ret; }
void sep_invalid_fetch_atomic(enum fi_datatype dt, enum fi_op op) { ssize_t sz; size_t count; uint64_t operand; if (!supported_fetch_atomic_ops[op][dt]) { sz = fi_fetch_atomic(tx_ep[0][0], &operand, 1, NULL, source, loc_mr[0], rx_addr[0], (uint64_t)target, mr_key[1], dt, op, target); cr_assert(sz == -FI_ENOENT); sz = fi_fetch_atomicvalid(tx_ep[0][0], dt, op, &count); cr_assert(sz == -FI_ENOENT, "fi_atomicvalid() succeeded\n"); } else { sz = fi_fetch_atomicvalid(tx_ep[0][0], dt, op, &count); cr_assert(!sz, "fi_atomicvalid() failed\n"); cr_assert(count == 1, "fi_atomicvalid(): bad count\n"); } }
void do_atomic_write_fetch(void) { int ret; ssize_t sz; uint64_t operand; struct fi_cq_tagged_entry cqe; struct fi_cq_err_entry err_cqe; /* u64 */ *((uint64_t *)source) = SOURCE_DATA; *((uint64_t *)target) = TARGET_DATA; sz = fi_atomic(ep[0], source, 1, loc_mr[0], gni_addr[1], (uint64_t)target, mr_key[1], FI_UINT64, FI_ATOMIC_WRITE, target); cr_assert_eq(sz, 0); while ((ret = fi_cq_read(msg_cq[0], &cqe, 1)) == -FI_EAGAIN) pthread_yield(); if (ret == -FI_EAVAIL) { fi_cq_readerr(msg_cq[0], &err_cqe, 0); dbg_printf("fi_cq_readerr err:%d\n", err_cqe.err); } if (write_allowed(FI_ATOMIC, fi[0]->caps, fi[1]->caps)) { cr_assert(ret == 1, "fi_atomic failed caps:0x%lx rcaps:0x%lx", fi[0]->caps, fi[1]->caps); } else { cr_assert(err_cqe.err == FI_EOPNOTSUPP, "fi_atomic should fail caps:0x%lx rcaps:0x%lx", fi[0]->caps, fi[1]->caps); } /* u64 */ operand = SOURCE_DATA; *((uint64_t *)source) = FETCH_SOURCE_DATA; *((uint64_t *)target) = TARGET_DATA; sz = fi_fetch_atomic(ep[0], &operand, 1, NULL, source, loc_mr[0], gni_addr[1], (uint64_t)target, mr_key[1], FI_UINT64, FI_ATOMIC_READ, target); cr_assert_eq(sz, 0); while ((ret = fi_cq_read(msg_cq[0], &cqe, 1)) == -FI_EAGAIN) pthread_yield(); if (ret == -FI_EAVAIL) { fi_cq_readerr(msg_cq[0], &err_cqe, 0); dbg_printf("fi_cq_readerr err:%d\n", err_cqe.err); } if (read_allowed(FI_ATOMIC, fi[0]->caps, fi[1]->caps)) { cr_assert(ret == 1, "fi_fetch_atomic failed caps:0x%lx rcaps:0x%lx", fi[0]->caps, fi[1]->caps); } else { cr_assert(err_cqe.err == FI_EOPNOTSUPP, "fi_fetch_atomic should fail caps:0x%lx rcaps:0x%lx", fi[0]->caps, fi[1]->caps); } }