static int create_maptable(struct mca_btl_portals4_module_t *portals4_btl, size_t nprocs, opal_proc_t **procs, mca_btl_base_endpoint_t **endpoint) { int ret; ptl_process_t *maptable; maptable = malloc(sizeof(ptl_process_t) * nprocs); if (NULL == maptable) { opal_output_verbose(1, opal_btl_base_framework.framework_output, "%s:%d: malloc failed\n", __FILE__, __LINE__); return OPAL_ERR_OUT_OF_RESOURCE; } for (uint32_t i = 0 ; i < nprocs ; i++) { struct opal_proc_t *curr_proc; curr_proc = procs[i]; /* portals doesn't support heterogeneous yet... */ if (opal_proc_local_get()->proc_arch != curr_proc->proc_arch) { opal_output_verbose(1, opal_btl_base_framework.framework_output, "Portals 4 BTL does not support heterogeneous operations."); opal_output_verbose(1, opal_btl_base_framework.framework_output, "Proc %s architecture %x, mine %x.", OPAL_NAME_PRINT(curr_proc->proc_name), curr_proc->proc_arch, opal_proc_local_get()->proc_arch); return OPAL_ERR_NOT_SUPPORTED; } ret = create_peer_and_endpoint(portals4_btl->interface_num, curr_proc, &maptable[i], &endpoint[i]); if (OPAL_SUCCESS != ret) { opal_output_verbose(1, opal_btl_base_framework.framework_output, "%s:%d: create_maptable::create_peer_and_endpoint failed: %d\n", __FILE__, __LINE__, ret); return ret; } } ret = PtlSetMap(portals4_btl->portals_ni_h, nprocs, maptable); if (OPAL_SUCCESS != ret) { opal_output_verbose(1, opal_btl_base_framework.framework_output, "%s:%d: logical mapping failed: %d\n", __FILE__, __LINE__, ret); return ret; } opal_output_verbose(90, opal_btl_base_framework.framework_output, "logical mapping OK\n"); free(maptable); return OPAL_SUCCESS; }
int main(int argc, char *argv[]) { ptl_handle_ni_t ni_logical; ptl_pt_index_t logical_pt_index; ptl_process_t myself; struct timeval start, stop; int potato = 0; ENTRY_T potato_catcher; HANDLE_T potato_catcher_handle; ptl_md_t potato_launcher; ptl_handle_md_t potato_launcher_handle; int num_procs; CHECK_RETURNVAL(PtlInit()); CHECK_RETURNVAL(libtest_init()); num_procs = libtest_get_size(); if (NULL != getenv("MAKELEVEL") && num_procs > 2) { return 77; } CHECK_RETURNVAL(PtlNIInit (PTL_IFACE_DEFAULT, NI_TYPE | PTL_NI_LOGICAL, PTL_PID_ANY, NULL, NULL, &ni_logical)); CHECK_RETURNVAL(PtlSetMap(ni_logical, num_procs, libtest_get_mapping(ni_logical))); CHECK_RETURNVAL(PtlGetId(ni_logical, &myself)); CHECK_RETURNVAL(PtlPTAlloc (ni_logical, 0, PTL_EQ_NONE, PTL_PT_ANY, &logical_pt_index)); assert(logical_pt_index == 0); /* Now do the initial setup on ni_logical */ potato_catcher.start = &potato; potato_catcher.length = sizeof(potato); potato_catcher.uid = PTL_UID_ANY; potato_catcher.options = OPTIONS; #if INTERFACE == 1 potato_catcher.match_id.rank = PTL_RANK_ANY; potato_catcher.match_bits = 1; potato_catcher.ignore_bits = ~potato_catcher.match_bits; #endif CHECK_RETURNVAL(PtlCTAlloc(ni_logical, &potato_catcher.ct_handle)); CHECK_RETURNVAL(APPEND (ni_logical, logical_pt_index, &potato_catcher, PTL_PRIORITY_LIST, NULL, &potato_catcher_handle)); /* Now do a barrier (on ni_physical) to make sure that everyone has their * logical interface set up */ libtest_barrier(); /* now I can communicate between ranks with ni_logical */ /* set up the potato launcher */ potato_launcher.start = &potato; potato_launcher.length = sizeof(potato); potato_launcher.options = PTL_MD_EVENT_CT_ACK | PTL_MD_EVENT_CT_SEND; potato_launcher.eq_handle = PTL_EQ_NONE; // i.e. don't queue send events CHECK_RETURNVAL(PtlCTAlloc(ni_logical, &potato_launcher.ct_handle)); CHECK_RETURNVAL(PtlMDBind (ni_logical, &potato_launcher, &potato_launcher_handle)); /* rank 0 starts the potato going */ if (myself.rank == 0) { ptl_process_t nextrank; nextrank.rank = myself.rank + 1; nextrank.rank *= (nextrank.rank <= num_procs - 1); gettimeofday(&start, NULL); CHECK_RETURNVAL(PtlPut(potato_launcher_handle, 0, potato_launcher.length, (LOOPS == 1) ? PTL_OC_ACK_REQ : PTL_NO_ACK_REQ, nextrank, logical_pt_index, 1, 0, NULL, 1)); } { /* the potato-passing loop */ size_t waitfor; ptl_ct_event_t ctc; ptl_process_t nextrank; nextrank.rank = myself.rank + 1; nextrank.rank *= (nextrank.rank <= num_procs - 1); for (waitfor = 1; waitfor <= LOOPS; ++waitfor) { CHECK_RETURNVAL(PtlCTWait(potato_catcher.ct_handle, waitfor, &ctc)); // wait for potato assert(ctc.failure == 0); assert(ctc.success == waitfor); /* I have the potato! */ ++potato; if (potato < LOOPS * (num_procs)) { // otherwise, the recipient may have exited /* Bomb's away! */ if (myself.rank == 0) { CHECK_RETURNVAL(PtlPut(potato_launcher_handle, 0, potato_launcher.length, (waitfor == (LOOPS - 1)) ? PTL_OC_ACK_REQ : PTL_NO_ACK_REQ, nextrank, logical_pt_index, 3, 0, NULL, 2)); } else { CHECK_RETURNVAL(PtlPut(potato_launcher_handle, 0, potato_launcher.length, (waitfor == LOOPS) ? PTL_OC_ACK_REQ : PTL_NO_ACK_REQ, nextrank, logical_pt_index, 3, 0, NULL, 2)); } } } // make sure that last send completed before exiting CHECK_RETURNVAL(PtlCTWait(potato_launcher.ct_handle, LOOPS+1, &ctc)); assert(ctc.failure == 0); } if (myself.rank == 0) { double accumulate = 0.0; gettimeofday(&stop, NULL); accumulate = (stop.tv_sec + stop.tv_usec * 1e-6) - (start.tv_sec + start.tv_usec * 1e-6); /* calculate the average time waiting */ printf("Total time: %g secs\n", accumulate); accumulate /= LOOPS; printf("Average time around the loop: %g microseconds\n", accumulate * 1e6); accumulate /= num_procs; printf("Average catch-to-toss latency: %g microseconds\n", accumulate * 1e6); } /* cleanup */ CHECK_RETURNVAL(PtlMDRelease(potato_launcher_handle)); CHECK_RETURNVAL(PtlCTFree(potato_launcher.ct_handle)); CHECK_RETURNVAL(UNLINK(potato_catcher_handle)); CHECK_RETURNVAL(PtlCTFree(potato_catcher.ct_handle)); /* major cleanup */ CHECK_RETURNVAL(PtlPTFree(ni_logical, logical_pt_index)); CHECK_RETURNVAL(PtlNIFini(ni_logical)); CHECK_RETURNVAL(libtest_fini()); PtlFini(); return 0; }
int main(int argc, char *argv[]) { ptl_handle_ni_t ni_handle; ptl_process_t *procs; int rank; ptl_pt_index_t pt_index, signal_pt_index; HANDLE_T signal_e_handle; HANDLE_T signal_e2_handle; int num_procs; ptl_handle_eq_t eq_handle; ptl_handle_ct_t ct_handle; ptl_handle_md_t md_handle; ptl_ni_limits_t limits_reqd, limits_actual; ENTRY_T value_e; limits_reqd.max_entries = 1024; limits_reqd.max_unexpected_headers = ITERS*2; limits_reqd.max_mds = 1024; limits_reqd.max_eqs = 1024; limits_reqd.max_cts = 1024; limits_reqd.max_pt_index = 64; limits_reqd.max_iovecs = 1024; limits_reqd.max_list_size = 1024; limits_reqd.max_triggered_ops = 1024; limits_reqd.max_msg_size = 1048576; limits_reqd.max_atomic_size = 1048576; limits_reqd.max_fetch_atomic_size = 1048576; limits_reqd.max_waw_ordered_size = 1048576; limits_reqd.max_war_ordered_size = 1048576; limits_reqd.max_volatile_size = 1048576; limits_reqd.features = 0; CHECK_RETURNVAL(PtlInit()); CHECK_RETURNVAL(libtest_init()); rank = libtest_get_rank(); num_procs = libtest_get_size(); if (num_procs < 2) { fprintf(stderr, "test_flowctl_noeq requires at least two processes\n"); return 77; } int iters; if (num_procs < ITERS) iters = ITERS*2+1; else iters = ITERS; CHECK_RETURNVAL(PtlNIInit(PTL_IFACE_DEFAULT, NI_TYPE | PTL_NI_LOGICAL, PTL_PID_ANY, &limits_reqd, &limits_actual, &ni_handle)); procs = libtest_get_mapping(ni_handle); CHECK_RETURNVAL(PtlSetMap(ni_handle, num_procs, procs)); if (0 == rank) { /* create data PT space */ CHECK_RETURNVAL(PtlEQAlloc(ni_handle, (num_procs - 1) * iters + 64, &eq_handle)); CHECK_RETURNVAL(PtlPTAlloc(ni_handle, PTL_PT_FLOWCTRL, eq_handle, 5, &pt_index)); /* create signal ME */ CHECK_RETURNVAL(PtlCTAlloc(ni_handle, &ct_handle)); CHECK_RETURNVAL(PtlPTAlloc(ni_handle, 1, eq_handle, 6, &signal_pt_index)); value_e.start = NULL; value_e.length = 0; value_e.ct_handle = ct_handle; value_e.uid = PTL_UID_ANY; value_e.options = OPTIONS | PTL_LE_EVENT_CT_COMM; #if INTERFACE == 1 value_e.match_id.rank = PTL_RANK_ANY; value_e.match_bits = 0; value_e.ignore_bits = 0; #endif CHECK_RETURNVAL(APPEND(ni_handle, 5, &value_e, PTL_OVERFLOW_LIST, NULL, &signal_e_handle)); } else { ptl_md_t md; /* 16 extra just in case... */ CHECK_RETURNVAL(PtlEQAlloc(ni_handle, iters*2 + 16, &eq_handle)); md.start = NULL; md.length = 0; md.options = 0; md.eq_handle = eq_handle; md.ct_handle = PTL_CT_NONE; CHECK_RETURNVAL(PtlMDBind(ni_handle, &md, &md_handle)); } fprintf(stderr,"at barrier \n"); libtest_barrier(); if (0 == rank) { ptl_ct_event_t ct; ptl_event_t ev; int ret, count = 0, saw_flowctl = 0; fprintf(stderr,"begin ctwait \n"); /* wait for signal counts */ CHECK_RETURNVAL(PtlCTWait(ct_handle, iters / 2 , &ct)); if (ct.success != iters / 2 || ct.failure != 0) { return 1; } fprintf(stderr,"done CT wait \n"); /* wait for event entries */ while (1) { ret = PtlEQGet(eq_handle, &ev); if (PTL_OK == ret) { count++; fprintf(stderr, "found EQ value \n"); } else if (ret == PTL_EQ_EMPTY) { continue; } else { fprintf(stderr, "0: Unexpected return code from EQGet: %d\n", ret); return 1; } if (ev.type == PTL_EVENT_PT_DISABLED) { saw_flowctl++; break; } } fprintf(stderr, "0: Saw %d flowctl\n", saw_flowctl); if (saw_flowctl == 0) { return 1; } /* Now clear out all of the unexpected messages so we can clean up everything */ CHECK_RETURNVAL(APPEND(ni_handle, 5, &value_e, PTL_PRIORITY_LIST, NULL, &signal_e2_handle)); ret = PTL_OK; while (ret != PTL_EQ_EMPTY) ret = PtlEQGet(eq_handle, &ev); } else { ptl_process_t target; ptl_event_t ev; int ret, count = 0, fails = 0; int i; target.rank = 0; printf("beginning puts \n"); for (i = 0 ; i < iters ; ++i) { CHECK_RETURNVAL(PtlPut(md_handle, 0, 0, PTL_ACK_REQ, target, 5, 0, 0, NULL, 0)); usleep(100); } while (count < iters) { ret = PtlEQGet(eq_handle, &ev); if (PTL_EQ_EMPTY == ret) { continue; } else if (PTL_OK != ret) { fprintf(stderr, "%d: PtlEQGet returned %d\n", rank, ret); return 1; } if (ev.ni_fail_type == PTL_NI_OK) { if (ev.type == PTL_EVENT_SEND) { continue; } else if (ev.type == PTL_EVENT_ACK) { count++; } else { fprintf(stderr, "%d: Unexpected event type %d\n", rank, ev.type); } } else if (ev.ni_fail_type == PTL_NI_PT_DISABLED) { count++; fails++; } else if (ev.ni_fail_type == PTL_EQ_EMPTY) { continue; } else if (ev.ni_fail_type == PTL_EQ_DROPPED) { continue; } else { fprintf(stderr, "%d: Unexpected fail type: %d\n", rank, ev.ni_fail_type); return 1; } } fprintf(stderr, "%d: Saw %d of %d ACKs as fails\n", rank, fails, count); } fprintf(stderr,"at final barrier \n"); libtest_barrier(); if (0 == rank) { CHECK_RETURNVAL(UNLINK(signal_e_handle)); CHECK_RETURNVAL(UNLINK(signal_e2_handle)); CHECK_RETURNVAL(PtlPTFree(ni_handle, signal_pt_index)); CHECK_RETURNVAL(PtlCTFree(ct_handle)); CHECK_RETURNVAL(PtlPTFree(ni_handle, pt_index)); CHECK_RETURNVAL(PtlEQFree(eq_handle)); } else { CHECK_RETURNVAL(PtlMDRelease(md_handle)); CHECK_RETURNVAL(PtlEQFree(eq_handle)); } fprintf(stderr,"final cleanup \n"); CHECK_RETURNVAL(PtlNIFini(ni_handle)); CHECK_RETURNVAL(libtest_fini()); PtlFini(); return 0; }
int main(int argc, char *argv[]) { ptl_handle_ni_t ni_handle; ptl_process_t *procs; int rank; ptl_pt_index_t pt_index, signal_pt_index; HANDLE_T value_e_handle, signal_e_handle; int num_procs; ptl_handle_eq_t eq_handle; ptl_handle_ct_t ct_handle; ptl_handle_md_t md_handle; CHECK_RETURNVAL(PtlInit()); CHECK_RETURNVAL(libtest_init()); rank = libtest_get_rank(); num_procs = libtest_get_size(); if (num_procs < 2) { fprintf(stderr, "test_flowctl_noeq requires at least two processes\n"); return 77; } CHECK_RETURNVAL(PtlNIInit(PTL_IFACE_DEFAULT, NI_TYPE | PTL_NI_LOGICAL, PTL_PID_ANY, NULL, NULL, &ni_handle)); procs = libtest_get_mapping(ni_handle); CHECK_RETURNVAL(PtlSetMap(ni_handle, num_procs, procs)); if (0 == rank) { ENTRY_T value_e; /* create data ME */ CHECK_RETURNVAL(PtlEQAlloc(ni_handle, (num_procs - 1) * ITERS / 2, &eq_handle)); CHECK_RETURNVAL(PtlPTAlloc(ni_handle, PTL_PT_FLOWCTRL, eq_handle, 5, &pt_index)); value_e.start = NULL; value_e.length = 0; value_e.ct_handle = PTL_CT_NONE; value_e.uid = PTL_UID_ANY; value_e.options = OPTIONS; #if INTERFACE == 1 value_e.match_id.rank = PTL_RANK_ANY; value_e.match_bits = 0; value_e.ignore_bits = 0; #endif CHECK_RETURNVAL(APPEND(ni_handle, 5, &value_e, PTL_PRIORITY_LIST, NULL, &value_e_handle)); /* create signal ME */ CHECK_RETURNVAL(PtlCTAlloc(ni_handle, &ct_handle)); CHECK_RETURNVAL(PtlPTAlloc(ni_handle, 0, PTL_EQ_NONE, 6, &signal_pt_index)); value_e.start = NULL; value_e.length = 0; value_e.ct_handle = ct_handle; value_e.uid = PTL_UID_ANY; value_e.options = OPTIONS | PTL_LE_EVENT_SUCCESS_DISABLE | PTL_LE_EVENT_CT_COMM; #if INTERFACE == 1 value_e.match_id.rank = PTL_RANK_ANY; value_e.match_bits = 0; value_e.ignore_bits = 0; #endif CHECK_RETURNVAL(APPEND(ni_handle, 6, &value_e, PTL_PRIORITY_LIST, NULL, &signal_e_handle)); } else { ptl_md_t md; /* 16 extra just in case... */ CHECK_RETURNVAL(PtlEQAlloc(ni_handle, ITERS * 2 + 16, &eq_handle)); md.start = NULL; md.length = 0; md.options = 0; md.eq_handle = eq_handle; md.ct_handle = PTL_CT_NONE; CHECK_RETURNVAL(PtlMDBind(ni_handle, &md, &md_handle)); } libtest_barrier(); if (0 == rank) { ptl_ct_event_t ct; ptl_event_t ev; int ret, count = 0, saw_dropped = 0, saw_flowctl = 0; /* wait for signal counts */ CHECK_RETURNVAL(PtlCTWait(ct_handle, num_procs - 1, &ct)); if (ct.success != num_procs - 1 || ct.failure != 0) { return 1; } /* wait for event entries */ while (count < ITERS * (num_procs - 1)) { ret = PtlEQWait(eq_handle, &ev); if (PTL_OK == ret) { ; } else if (PTL_EQ_DROPPED == ret) { saw_dropped++; if (ev.type == PTL_EVENT_PT_DISABLED){ saw_flowctl++; CHECK_RETURNVAL(PtlPTEnable(ni_handle, pt_index)); } break; } else { fprintf(stderr, "0: Unexpected return code from EQWait: %d\n", ret); return 1; } if (ev.type == PTL_EVENT_PT_DISABLED) { CHECK_RETURNVAL(PtlPTEnable(ni_handle, pt_index)); saw_flowctl++; } else { count++; } } fprintf(stderr, "0: Saw %d dropped, %d flowctl\n", saw_dropped, saw_flowctl); if (saw_flowctl == 0) { return 1; } } else { ptl_process_t target; ptl_event_t ev; int ret, count = 0, fails = 0; int i; int *fail_seen; fail_seen = malloc(sizeof(int) * ITERS); if (NULL == fail_seen) { fprintf(stderr, "%d: malloc failed\n", rank); return 1; } memset(fail_seen, 0, sizeof(int) * ITERS); target.rank = 0; for (i = 0 ; i < ITERS ; ++i) { CHECK_RETURNVAL(PtlPut(md_handle, 0, 0, PTL_ACK_REQ, target, 5, 0, 0, (void*)(size_t)i, 0)); usleep(100); } while (count < ITERS) { ret = PtlEQGet(eq_handle, &ev); if (PTL_EQ_EMPTY == ret) { continue; } else if (PTL_OK != ret) { fprintf(stderr, "%d: PtlEQGet returned %d\n", rank, ret); return 1; } if (ev.ni_fail_type == PTL_NI_OK) { if (ev.type == PTL_EVENT_SEND) { continue; } else if (ev.type == PTL_EVENT_ACK) { count++; } else { fprintf(stderr, "%d: Unexpected event type %d\n", rank, ev.type); } } else if (ev.ni_fail_type == PTL_NI_PT_DISABLED) { int iter = (size_t) ev.user_ptr; if (fail_seen[iter]++ > 0) { fprintf(stderr, "%d: Double report of PT_DISABLED for " "iteration %d\n", rank, iter); return 1; } count++; fails++; } else { fprintf(stderr, "%d: Unexpected fail type: %d\n", rank, ev.ni_fail_type); return 1; } } fprintf(stderr, "%d: Saw %d of %d events as fails\n", rank, fails, count); CHECK_RETURNVAL(PtlPut(md_handle, 0, 0, PTL_NO_ACK_REQ, target, 6, 0, 0, NULL, 0)); /* wait for the send event on the last put */ CHECK_RETURNVAL(PtlEQWait(eq_handle, &ev)); while (fails > 0) { CHECK_RETURNVAL(PtlPut(md_handle, 0, 0, PTL_ACK_REQ, target, 5, 0, 0, NULL, 0)); while (1) { ret = PtlEQWait(eq_handle, &ev); if (PTL_OK != ret) { fprintf(stderr, "%d: PtlEQWait returned %d\n", rank, ret); return 1; } if (ev.ni_fail_type == PTL_NI_OK) { if (ev.type == PTL_EVENT_SEND) { continue; } else if (ev.type == PTL_EVENT_ACK) { fails--; break; } else { fprintf(stderr, "%d: Unexpected event type %d\n", rank, ev.type); } } else if (ev.ni_fail_type == PTL_NI_PT_DISABLED) { break; } else { fprintf(stderr, "%d: Unexpected fail type: %d\n", rank, ev.ni_fail_type); return 1; } } } } libtest_barrier(); if (0 == rank) { CHECK_RETURNVAL(UNLINK(signal_e_handle)); CHECK_RETURNVAL(PtlPTFree(ni_handle, signal_pt_index)); CHECK_RETURNVAL(PtlCTFree(ct_handle)); CHECK_RETURNVAL(UNLINK(value_e_handle)); CHECK_RETURNVAL(PtlPTFree(ni_handle, pt_index)); CHECK_RETURNVAL(PtlEQFree(eq_handle)); } else { CHECK_RETURNVAL(PtlMDRelease(md_handle)); CHECK_RETURNVAL(PtlEQFree(eq_handle)); } CHECK_RETURNVAL(PtlNIFini(ni_handle)); CHECK_RETURNVAL(libtest_fini()); PtlFini(); return 0; }
int shmem_transport_startup(void) { int ret, i; ptl_process_t *desired = NULL; ptl_md_t md; ptl_le_t le; ptl_uid_t uid = PTL_UID_ANY; ptl_process_t my_id; #ifdef USE_ON_NODE_COMMS int num_on_node = 0; #endif #ifdef ENABLE_REMOTE_VIRTUAL_ADDRESSING /* Make sure the heap and data bases are actually symmetric */ { int peer; uint64_t bases[2]; peer = (shmem_internal_my_pe + 1) % shmem_internal_num_pes; ret = shmem_runtime_get(peer, "portals4-bases", bases, sizeof(uint64_t) * 2); if (0 != ret) { fprintf(stderr, "[%03d] ERROR: runtime_put failed: %d\n", shmem_internal_my_pe, ret); return ret; } if ((uintptr_t) shmem_internal_heap_base != bases[0]) { fprintf(stderr, "[%03d] ERROR: heap base address does not match with rank %03d and virtual addressing is enabled\n", shmem_internal_my_pe, peer); return -1; } if ((uintptr_t) shmem_internal_data_base != bases[1]) { fprintf(stderr, "[%03d] ERROR: data base address does not match with rank %03d and virtual addressing is enabled\n", shmem_internal_my_pe, peer); return -1; } } #endif desired = malloc(sizeof(ptl_process_t) * shmem_internal_num_pes); if (NULL == desired) { ret = 1; goto cleanup; } ret = PtlGetPhysId(shmem_transport_portals4_ni_h, &my_id); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlGetPhysId failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } for (i = 0 ; i < shmem_internal_num_pes; ++i) { ret = shmem_runtime_get(i, "portals4-procid", &desired[i], sizeof(ptl_process_t)); if (0 != ret) { fprintf(stderr, "[%03d] ERROR: runtime_get failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } #ifdef USE_ON_NODE_COMMS /* update the connectivity map... */ if (desired[i].phys.nid == my_id.phys.nid) { SHMEM_SET_RANK_SAME_NODE(i, num_on_node++); if (num_on_node > 255) { fprintf(stderr, "[%03d] ERROR: Too many local ranks.\n", shmem_internal_my_pe); goto cleanup; } } #endif } ret = PtlSetMap(shmem_transport_portals4_ni_h, shmem_internal_num_pes, desired); if (PTL_OK != ret && PTL_IGNORED != ret) { fprintf(stderr, "[%03d] ERROR: PtlSetMap failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } ret = PtlGetUid(shmem_transport_portals4_ni_h, &uid); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlGetUid failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } shmem_transport_portals4_max_volatile_size = ni_limits.max_volatile_size; shmem_transport_portals4_max_atomic_size = ni_limits.max_atomic_size; shmem_transport_portals4_max_fetch_atomic_size = ni_limits.max_fetch_atomic_size; shmem_transport_portals4_max_msg_size = ni_limits.max_msg_size; if (shmem_transport_portals4_max_volatile_size < sizeof(long double complex)) { fprintf(stderr, "[%03d] ERROR: Max volatile size found to be %lu, too small to continue\n", shmem_internal_my_pe, (unsigned long) shmem_transport_portals4_max_volatile_size); goto cleanup; } if (shmem_transport_portals4_max_atomic_size < sizeof(long double complex)) { fprintf(stderr, "[%03d] ERROR: Max atomic size found to be %lu, too small to continue\n", shmem_internal_my_pe, (unsigned long) shmem_transport_portals4_max_atomic_size); goto cleanup; } if (shmem_transport_portals4_max_fetch_atomic_size < sizeof(long double complex)) { fprintf(stderr, "[%03d] ERROR: Max fetch atomic size found to be %lu, too small to continue\n", shmem_internal_my_pe, (unsigned long) shmem_transport_portals4_max_fetch_atomic_size); goto cleanup; } /* create portal table entries */ ret = PtlEQAlloc(shmem_transport_portals4_ni_h, shmem_transport_portals4_event_slots, &shmem_transport_portals4_eq_h); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlEQAlloc failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } #ifdef ENABLE_REMOTE_VIRTUAL_ADDRESSING ret = PtlPTAlloc(shmem_transport_portals4_ni_h, 0, shmem_transport_portals4_eq_h, shmem_transport_portals4_pt, &all_pt); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlPTAlloc of table entry failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } #else ret = PtlPTAlloc(shmem_transport_portals4_ni_h, 0, shmem_transport_portals4_eq_h, shmem_transport_portals4_data_pt, &data_pt); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlPTAlloc of data table failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } ret = PtlPTAlloc(shmem_transport_portals4_ni_h, 0, shmem_transport_portals4_eq_h, shmem_transport_portals4_heap_pt, &heap_pt); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlPTAlloc of heap table failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } #endif #ifndef ENABLE_HARD_POLLING /* target ct */ ret = PtlCTAlloc(shmem_transport_portals4_ni_h, &shmem_transport_portals4_target_ct_h); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlCTAlloc of target ct failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } le.ct_handle = shmem_transport_portals4_target_ct_h; #endif le.uid = uid; le.options = PTL_LE_OP_PUT | PTL_LE_OP_GET | PTL_LE_EVENT_LINK_DISABLE | PTL_LE_EVENT_SUCCESS_DISABLE; #if !defined(ENABLE_HARD_POLLING) le.options |= PTL_LE_EVENT_CT_COMM; #endif #ifdef ENABLE_REMOTE_VIRTUAL_ADDRESSING le.start = NULL; le.length = PTL_SIZE_MAX; ret = PtlLEAppend(shmem_transport_portals4_ni_h, shmem_transport_portals4_pt, &le, PTL_PRIORITY_LIST, NULL, &shmem_transport_portals4_le_h); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlLEAppend of all memory failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } #else /* Open LE to heap section */ le.start = shmem_internal_heap_base; le.length = shmem_internal_heap_length; ret = PtlLEAppend(shmem_transport_portals4_ni_h, shmem_transport_portals4_heap_pt, &le, PTL_PRIORITY_LIST, NULL, &shmem_transport_portals4_heap_le_h); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlLEAppend of heap section failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } /* Open LE to data section */ le.start = shmem_internal_data_base; le.length = shmem_internal_data_length; ret = PtlLEAppend(shmem_transport_portals4_ni_h, shmem_transport_portals4_data_pt, &le, PTL_PRIORITY_LIST, NULL, &shmem_transport_portals4_data_le_h); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlLEAppend of data section failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } #endif /* Open MD to all memory */ ret = PtlCTAlloc(shmem_transport_portals4_ni_h, &shmem_transport_portals4_put_ct_h); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlCTAlloc of put ct failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } ret = PtlCTAlloc(shmem_transport_portals4_ni_h, &shmem_transport_portals4_get_ct_h); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlCTAlloc of get ct failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } md.start = 0; md.length = PTL_SIZE_MAX; md.options = PTL_MD_EVENT_CT_ACK; if (1 == PORTALS4_TOTAL_DATA_ORDERING) { md.options |= PTL_MD_UNORDERED; } md.eq_handle = shmem_transport_portals4_eq_h; md.ct_handle = shmem_transport_portals4_put_ct_h; ret = PtlMDBind(shmem_transport_portals4_ni_h, &md, &shmem_transport_portals4_put_event_md_h); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlMDBind of put MD failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } md.start = 0; md.length = PTL_SIZE_MAX; md.options = PTL_MD_EVENT_CT_ACK | PTL_MD_EVENT_SUCCESS_DISABLE | PTL_MD_VOLATILE; if (1 == PORTALS4_TOTAL_DATA_ORDERING) { md.options |= PTL_MD_UNORDERED; } md.eq_handle = shmem_transport_portals4_eq_h; md.ct_handle = shmem_transport_portals4_put_ct_h; ret = PtlMDBind(shmem_transport_portals4_ni_h, &md, &shmem_transport_portals4_put_volatile_md_h); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlMDBind of put MD failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } md.start = 0; md.length = PTL_SIZE_MAX; md.options = PTL_MD_EVENT_CT_ACK | PTL_MD_EVENT_SUCCESS_DISABLE; if (1 == PORTALS4_TOTAL_DATA_ORDERING) { md.options |= PTL_MD_UNORDERED; } md.eq_handle = shmem_transport_portals4_eq_h; md.ct_handle = shmem_transport_portals4_put_ct_h; ret = PtlMDBind(shmem_transport_portals4_ni_h, &md, &shmem_transport_portals4_put_cntr_md_h); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlMDBind of put cntr MD failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } md.start = 0; md.length = PTL_SIZE_MAX; md.options = PTL_MD_EVENT_CT_REPLY | PTL_MD_EVENT_SUCCESS_DISABLE; if (1 == PORTALS4_TOTAL_DATA_ORDERING) { md.options |= PTL_MD_UNORDERED; } md.eq_handle = shmem_transport_portals4_eq_h; md.ct_handle = shmem_transport_portals4_get_ct_h; ret = PtlMDBind(shmem_transport_portals4_ni_h, &md, &shmem_transport_portals4_get_md_h); if (PTL_OK != ret) { fprintf(stderr, "[%03d] ERROR: PtlMDBind of get MD failed: %d\n", shmem_internal_my_pe, ret); goto cleanup; } ret = 0; cleanup: if (NULL != desired) free(desired); return ret; }
int main(int argc, char *argv[]) { ptl_handle_ni_t ni_h; ptl_pt_index_t pt_index; uint64_t *buf; ENTRY_T entry; HANDLE_T entry_h; ptl_md_t md; ptl_handle_md_t md_h; int rank; int num_procs; int ret; ptl_process_t *procs; ptl_handle_eq_t eq_h; ptl_event_t ev; ptl_hdr_data_t rcvd = 0; ptl_hdr_data_t goal = 0; ptl_hdr_data_t hdr_data = 1; ptl_size_t offset = sizeof(uint64_t); uint32_t distance; int sends = 0; CHECK_RETURNVAL(PtlInit()); CHECK_RETURNVAL(libtest_init()); rank = libtest_get_rank(); num_procs = libtest_get_size(); /* This test only succeeds if we have more than one rank */ if (num_procs < 2) return 77; CHECK_RETURNVAL(PtlNIInit(PTL_IFACE_DEFAULT, NI_TYPE | PTL_NI_LOGICAL, PTL_PID_ANY, NULL, NULL, &ni_h)); procs = libtest_get_mapping(ni_h); CHECK_RETURNVAL(PtlSetMap(ni_h, num_procs, procs)); CHECK_RETURNVAL(PtlEQAlloc(ni_h, 1024, &eq_h)); CHECK_RETURNVAL(PtlPTAlloc(ni_h, 0, eq_h, 0, &pt_index)); assert(pt_index == 0); buf = malloc(sizeof(uint64_t) * num_procs); assert(NULL != buf); md.start = buf; md.length = sizeof(uint64_t) * num_procs; md.options = PTL_MD_UNORDERED; md.eq_handle = eq_h; md.ct_handle = PTL_CT_NONE; CHECK_RETURNVAL(PtlMDBind(ni_h, &md, &md_h)); entry.start = buf; entry.length = sizeof(uint64_t) * num_procs; entry.ct_handle = PTL_CT_NONE; entry.uid = PTL_UID_ANY; entry.options = OPTIONS; #if MATCHING == 1 entry.match_id.rank = PTL_RANK_ANY; entry.match_bits = 0; entry.ignore_bits = 0; entry.min_free = 0; #endif CHECK_RETURNVAL(APPEND(ni_h, pt_index, &entry, PTL_PRIORITY_LIST, NULL, &entry_h)); /* ensure ME is linked before the barrier */ CHECK_RETURNVAL(PtlEQWait(eq_h, &ev)); assert( ev.type == PTL_EVENT_LINK ); libtest_barrier(); /* Bruck's Concatenation Algorithm */ memcpy(buf, &rank, sizeof(uint64_t)); for (distance = 1; distance < num_procs; distance *= 2) { ptl_size_t to_xfer; int peer; ptl_process_t proc; if (rank >= distance) { peer = rank - distance; } else { peer = rank + (num_procs - distance); } to_xfer = sizeof(uint64_t) * MIN(distance, num_procs - distance); proc.rank = peer; CHECK_RETURNVAL(PtlPut(md_h, 0, to_xfer, PTL_NO_ACK_REQ, proc, 0, 0, offset, NULL, hdr_data)); sends += 1; /* wait for completion of the proper receive, and keep count of uncompleted sends. "rcvd" is an accumulator to deal with out-of-order receives, which are IDed by the hdr_data */ goal |= hdr_data; while ((rcvd & goal) != goal) { ret = PtlEQWait(eq_h, &ev); switch (ret) { case PTL_OK: if (ev.type == PTL_EVENT_SEND) { sends -= 1; } else { rcvd |= ev.hdr_data; assert(ev.type == PTL_EVENT_PUT); assert(ev.rlength == ev.mlength); assert((ev.rlength == to_xfer) || (ev.hdr_data != hdr_data)); } break; default: fprintf(stderr, "PtlEQWait failure: %d\n", ret); abort(); } } hdr_data <<= 1; offset += to_xfer; } /* wait for any SEND_END events not yet seen */ while (sends) { ret = PtlEQWait(eq_h, &ev); switch (ret) { case PTL_OK: assert( ev.type == PTL_EVENT_SEND ); sends -= 1; break; default: fprintf(stderr, "PtlEQWait failure: %d\n", ret); abort(); } } CHECK_RETURNVAL(UNLINK(entry_h)); CHECK_RETURNVAL(PtlMDRelease(md_h)); free(buf); libtest_barrier(); /* cleanup */ CHECK_RETURNVAL(PtlPTFree(ni_h, pt_index)); CHECK_RETURNVAL(PtlEQFree(eq_h)); CHECK_RETURNVAL(PtlNIFini(ni_h)); CHECK_RETURNVAL(libtest_fini()); PtlFini(); return 0; }
void Init(ArgStruct *p, int* pargc, char*** pargv) { int rc; ptl_pt_index_t pt_handle; /* Initialize Portals and get some runtime info */ rc= PtlInit(); LIBTEST_CHECK(rc, "PtlInit"); libtest_init(); _my_rank= libtest_get_rank(); _nprocs= libtest_get_size(); if (_nprocs < 2) { if (_my_rank == 0) { fprintf(stderr, "Need at least two processes!\n", _my_rank); } exit(-2); } /* ** We need an ni to do barriers and allreduces on. ** It needs to be a non-matching ni. */ rc= PtlNIInit(PTL_IFACE_DEFAULT, PTL_NI_NO_MATCHING | PTL_NI_LOGICAL, PTL_PID_ANY, NULL, NULL, &ni_logical); LIBTEST_CHECK(rc, "PtlNIInit"); rc= PtlSetMap(ni_logical, _nprocs, libtest_get_mapping(ni_logical)); LIBTEST_CHECK(rc, "PtlSetMap"); /* Initialize the barrier in the P4support library. */ libtest_BarrierInit(ni_logical, _my_rank, _nprocs); /* Allocate a Portal Table Index entry for data transmission */ PtlPTAlloc(ni_logical, 0, PTL_EQ_NONE, PTL_XMIT_INDEX, &pt_handle); /* Allocate a Portal Table Index entry to receive an int */ PtlPTAlloc(ni_logical, 0, PTL_EQ_NONE, PTL_SEND_INT_INDEX, &pt_handle); /* Allocate a Portal Table Index entry to receive a double */ PtlPTAlloc(ni_logical, 0, PTL_EQ_NONE, PTL_SEND_DOUBLE_INDEX, &pt_handle); /* Set up the MD to send a single int */ send_int_ct_handle= PTL_INVALID_HANDLE; libtest_CreateMDCT(ni_logical, &send_int, sizeof(int), &send_int_md_handle, &send_int_ct_handle); /* Set up the MD to send a single double */ send_double_ct_handle= PTL_INVALID_HANDLE; libtest_CreateMDCT(ni_logical, &send_double, sizeof(double), &send_double_md_handle, &send_double_ct_handle); /* Create a persistent LE to receive a single int */ recv_int_ct_handle= PTL_INVALID_HANDLE; libtest_CreateLECT(ni_logical, PTL_SEND_INT_INDEX, &recv_int, sizeof(int), &recv_int_le_handle, &recv_int_ct_handle); /* Create a persistent LE to receive a single double */ recv_double_ct_handle= PTL_INVALID_HANDLE; libtest_CreateLECT(ni_logical, PTL_SEND_DOUBLE_INDEX, &recv_double, sizeof(double), &recv_double_le_handle, &recv_double_ct_handle); /* ** Initialize the benchmark data ct handles. Once allocated we'll ** reuse them, instead of reallocating them each time in ** AfterAlignmentInit() */ send_ct_handle= PTL_INVALID_HANDLE; recv_ct_handle= PTL_INVALID_HANDLE; md_handle= PTL_INVALID_HANDLE; md_size= -1; md_buf= NULL; le_handle= PTL_INVALID_HANDLE; le_size= -1; le_buf= NULL; libtest_barrier(); } /* end of Init() */
int ompi_mtl_portals4_add_procs(struct mca_mtl_base_module_t *mtl, size_t nprocs, struct ompi_proc_t** procs) { int ret, me; size_t i; bool new_found = false; ptl_process_t *maptable; if (ompi_mtl_portals4.use_logical) { maptable = malloc(sizeof(ptl_process_t) * nprocs); if (NULL == maptable) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: malloc failed\n", __FILE__, __LINE__); return OMPI_ERR_OUT_OF_RESOURCE; } } /* Get the list of ptl_process_id_t from the runtime and copy into structure */ for (i = 0 ; i < nprocs ; ++i) { ptl_process_t *modex_id; size_t size; if( procs[i] == ompi_proc_local_proc ) { me = i; } if (procs[i]->super.proc_arch != ompi_proc_local()->super.proc_arch) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "Portals 4 MTL does not support heterogeneous operations."); opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "Proc %s architecture %x, mine %x.", OMPI_NAME_PRINT(&procs[i]->super.proc_name), procs[i]->super.proc_arch, ompi_proc_local()->super.proc_arch); return OMPI_ERR_NOT_SUPPORTED; } OPAL_MODEX_RECV(ret, &mca_mtl_portals4_component.mtl_version, &procs[i]->super.proc_name, (uint8_t**)&modex_id, &size); if (OMPI_SUCCESS != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: ompi_modex_recv failed: %d\n", __FILE__, __LINE__, ret); return ret; } else if (sizeof(ptl_process_t) != size) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: ompi_modex_recv failed: %d\n", __FILE__, __LINE__, ret); return OMPI_ERR_BAD_PARAM; } if (NULL == procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_PORTALS4]) { ptl_process_t *peer_id; peer_id = malloc(sizeof(ptl_process_t)); if (NULL == peer_id) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: malloc failed: %d\n", __FILE__, __LINE__, ret); return OMPI_ERR_OUT_OF_RESOURCE; } if (ompi_mtl_portals4.use_logical) { peer_id->rank = i; maptable[i].phys.pid = modex_id->phys.pid; maptable[i].phys.nid = modex_id->phys.nid; opal_output_verbose(50, ompi_mtl_base_framework.framework_output, "logical: global rank=%d pid=%d nid=%d\n", (int)i, maptable[i].phys.pid, maptable[i].phys.nid); } else { *peer_id = *modex_id; } procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_PORTALS4] = peer_id; new_found = true; } else { ptl_process_t *proc = (ptl_process_t*) procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_PORTALS4]; if (ompi_mtl_portals4.use_logical) { if ((size_t)proc->rank != i) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: existing peer and rank don't match\n", __FILE__, __LINE__); return OMPI_ERROR; } maptable[i].phys.pid = modex_id->phys.pid; maptable[i].phys.nid = modex_id->phys.nid; } else if (proc->phys.nid != modex_id->phys.nid || proc->phys.pid != modex_id->phys.pid) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: existing peer and modex peer don't match\n", __FILE__, __LINE__); return OMPI_ERROR; } } } if (ompi_mtl_portals4.use_logical) { ret = PtlSetMap(ompi_mtl_portals4.ni_h, nprocs, maptable); if (OMPI_SUCCESS != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: logical mapping failed: %d\n", __FILE__, __LINE__, ret); return ret; } opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "logical mapping OK\n"); free(maptable); } portals4_init_interface(); /* activate progress callback */ ret = opal_progress_register(ompi_mtl_portals4_progress); if (OMPI_SUCCESS != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: opal_progress_register failed: %d\n", __FILE__, __LINE__, ret); return ret; } #if OMPI_MTL_PORTALS4_FLOW_CONTROL if (new_found) { ret = ompi_mtl_portals4_flowctl_add_procs(me, nprocs, procs); if (OMPI_SUCCESS != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: flowctl_add_procs failed: %d\n", __FILE__, __LINE__, ret); return ret; } } #endif return OMPI_SUCCESS; }